Python nibabel.load() Examples

The following are 30 code examples of nibabel.load(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module nibabel , or try the search function .
Example #1
Source File: PP.py    From pytorch-mri-segmentation-3D with MIT License 11 votes vote down vote up
def generateImgSlicesFolder(data_folder = '../Data/MS2017a/scans/'):
	scan_folders = glob.glob(data_folder + '*')

	for sf in scan_folders:
		slice_dir_path = os.path.join(sf, 'slices/')
		if not os.path.exists(slice_dir_path):
			print('Creating directory at:' , slice_dir_path)
			os.makedirs(slice_dir_path)

		img = nib.load(os.path.join(sf, 'pre/FLAIR.nii.gz'))
		img_np = img.get_data()
		img_affine = img.affine
		print(sf)
		print('The img shape', img_np.shape[2])
		for i in range(img_np.shape[2]):
			slice_img_np = img_np[:,:,i]
			nft_img = nib.Nifti1Image(slice_img_np, img_affine)
			nib.save(nft_img, slice_dir_path + 'FLAIR_' + str(i) + '.nii.gz')

			if os.path.basename(sf) == '0':
				slice_img = nib.load(slice_dir_path + 'FLAIR_' + str(i) + '.nii.gz').get_data() / 5
				print('DID I GET HERE?')
				print('Writing to', str(i) + '.jpg') 
Example #2
Source File: utils.py    From Brats2019 with MIT License 7 votes vote down vote up
def save_slice_img(self, volume_path, output_path):
        file_name = os.path.basename(volume_path)
        output_dir  = os.path.join(output_path, file_name)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        else:
            pass
        input_volume = nib.load(volume_path).get_data()
        # mapping to 0-1
        vol_max = np.max(input_volume)
        vol_min = np.min(input_volume)
        input_unit = (input_volume-vol_min)/(vol_max - vol_min)
        width, height, depth= input_unit.shape
        for i in range(0, depth):
            slice_path = os.path.join(output_dir, str(i)+'.png')
            img_i = input_unit[:, :, i]
            # normalize to 0-255
            img_i = (img_i*255).astype('uint8')
            # cv.imwrite(slice_path, img_i)
        return input_unit 
Example #3
Source File: PP.py    From pytorch-mri-segmentation-3D with MIT License 7 votes vote down vote up
def generateGTSlicesFolder(data_folder = '../Data/MS2017a/scans/'):
	scan_folders = glob.glob(data_folder + '*')

	for sf in scan_folders:
		slice_dir_path = os.path.join(sf, 'gt_slices/')
		if not os.path.exists(slice_dir_path):
			print('Creating directory at:' , slice_dir_path)
			os.makedirs(slice_dir_path)

		img = nib.load(os.path.join(sf, 'wmh.nii.gz'))
		img_np = img.get_data()
		img_affine = img.affine
		print(sf)
		print('The img shape', img_np.shape[2])
		for i in range(img_np.shape[2]):
			slice_img_np = img_np[:,:,i]
			nft_img = nib.Nifti1Image(slice_img_np, img_affine)
			nib.save(nft_img, slice_dir_path + 'wmh_' + str(i) + '.nii.gz')

			if os.path.basename(sf) == '0':
				slice_img = nib.load(slice_dir_path + 'wmh_' + str(i) + '.nii.gz').get_data() * 256
				#cv2.imwrite('temp/' + str(i) + '.jpg', slice_img) 
Example #4
Source File: test_t2smap.py    From tedana with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_basic_t2smap3(self):
        """
        A very simple test, to confirm that t2smap creates output
        files when combmode is set to 'paid'.
        """
        data_dir = get_test_data_path()
        data = [op.join(data_dir, 'echo1.nii.gz'),
                op.join(data_dir, 'echo2.nii.gz'),
                op.join(data_dir, 'echo3.nii.gz')]
        out_dir = 'TED.echo1.t2smap'
        workflows.t2smap_workflow(data, [14.5, 38.5, 62.5], combmode='paid',
                                  fitmode='all', out_dir=out_dir)

        # Check outputs
        assert op.isfile(op.join(out_dir, 'desc-optcom_bold.nii.gz'))
        img = nib.load(op.join(out_dir, 'T2starmap.nii.gz'))
        assert len(img.shape) == 3
        img = nib.load(op.join(out_dir, 'S0map.nii.gz'))
        assert len(img.shape) == 3
        img = nib.load(op.join(out_dir, 'desc-full_T2starmap.nii.gz'))
        assert len(img.shape) == 3
        img = nib.load(op.join(out_dir, 'desc-full_S0map.nii.gz'))
        assert len(img.shape) == 3
        img = nib.load(op.join(out_dir, 'desc-optcom_bold.nii.gz'))
        assert len(img.shape) == 4 
Example #5
Source File: test_nilearn.py    From NiBetaSeries with MIT License 6 votes vote down vote up
def test_censor_volumes(tmp_path, betaseries_file, brainmask_file):
    outlier_file = tmp_path / 'betaseries_outlier.nii.gz'

    # make an outlier volume
    outlier_idx = 6
    beta_img = nib.load(str(betaseries_file))
    beta_data = beta_img.get_fdata()
    beta_data[..., outlier_idx] += 1000

    beta_img.__class__(
        beta_data, beta_img.affine, beta_img.header).to_filename(str(outlier_file))

    censor_volumes = CensorVolumes(timeseries_file=str(outlier_file),
                                   mask_file=str(brainmask_file))

    res = censor_volumes.run()

    assert nib.load(res.outputs.censored_file).shape[-1] == beta_img.shape[-1] - 1
    assert res.outputs.outliers[outlier_idx] 
Example #6
Source File: nilearn.py    From NiBetaSeries with MIT License 6 votes vote down vote up
def _run_interface(self, runtime):
        import nibabel as nib
        from nipype.utils.filemanip import fname_presuffix

        bold_img = nib.load(self.inputs.timeseries_file)
        bold_mask_img = nib.load(self.inputs.mask_file)

        bold_data = bold_img.get_fdata()
        bold_mask = bold_mask_img.get_fdata().astype(bool)

        outliers = is_outlier(bold_data[bold_mask].T, thresh=self.inputs.threshold)

        out = fname_presuffix(self.inputs.timeseries_file, suffix='_censored')

        bold_img.__class__(bold_data[..., ~outliers],
                           bold_img.affine, bold_img.header).to_filename(out)

        self._results['censored_file'] = out
        self._results['outliers'] = outliers

        return runtime 
Example #7
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def forget_importer(name):
    '''
    forget_importer(name) yields True if an importer of type name was successfully forgotten from
      the neuropythy importers list and false otherwise. This function must be called before an
      importer can be replaced.
    '''
    global importers
    name = name.lower()
    if name in importers:
        importers = importers.discard(name)
        delattr(load, name)
        return True
    else:
        return False



# The list of exporter types we understand 
Example #8
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def load_json(filename, to='auto'):
    '''
    load_json(filename) yields the object represented by the json file or stream object filename.
    
    The optional argument to may be set to None to indicate that the JSON data should be returned
    verbatim rather than parsed by neuropythy's denormalize system.
    '''
    from neuropythy.util import denormalize as denorm
    if pimms.is_str(filename):
        try:
            with gzip.open(filename, 'rt') as fl: dat = json.load(fl)
        except Exception:
            with open(filename, 'rt') as fl: dat = json.load(fl)
    else:
        dat = json.load(filename)
        filename = '<stream>'
    if to is None: return dat
    elif to == 'auto': return denorm(dat)
    else: raise ValueError('unrecognized to option: %s' % to) 
Example #9
Source File: mesh_io.py    From simnibs with GNU General Public License v3.0 6 votes vote down vote up
def read_gifti_surface(fn):
    ''' Reads a gifti surface

    Parameters
    -----------
    fn: str
        File name

    Returns
    ---------
    msh: Msh()
        mesh structure with geometrical information
    '''
    s = nibabel.load(fn)
    faces = s.get_arrays_from_intent('NIFTI_INTENT_TRIANGLE')[0].data
    nodes = s.get_arrays_from_intent('NIFTI_INTENT_POINTSET')[0].data
    msh = Msh()
    msh.elm = Elements(triangles=np.array(faces + 1, dtype=int))
    msh.nodes = Nodes(np.array(nodes, dtype=float))
    return msh 
Example #10
Source File: utils.py    From Brats2019 with MIT License 6 votes vote down vote up
def get_brain_region(volume_data):
    # volume = nib.load(volume_path)
    # volume_data = volume.get_data()
    # get the brain region
    indice_list = np.where(volume_data > 0)
    # calculate the min and max of the indice,  here volume have 3 channels
    channel_0_min = min(indice_list[0])
    channel_0_max = max(indice_list[0])

    channel_1_min = min(indice_list[1])
    channel_1_max = max(indice_list[1])

    channel_2_min = min(indice_list[2])
    channel_2_max = max(indice_list[2])

    brain_volume = volume_data[channel_0_min:channel_0_max, channel_1_min:channel_1_max,channel_2_min:channel_2_max]

    return (channel_0_min, channel_0_max, channel_1_min, channel_1_max, channel_2_min, channel_2_max) 
Example #11
Source File: niio.py    From ciftify with MIT License 6 votes vote down vote up
def load_surfaces(filename, suppress_echo = False):
    '''
    separate a cifti file into surfaces,
    then loads the surface data
    '''
    ## separate the cifti file into left and right surfaces
    with TempDir() as tempdir:
        L_data_surf=os.path.join(tempdir, 'Ldata.func.gii')
        R_data_surf=os.path.join(tempdir, 'Rdata.func.gii')
        run(['wb_command','-cifti-separate', filename, 'COLUMN',
            '-metric', 'CORTEX_LEFT', L_data_surf,
            '-metric', 'CORTEX_RIGHT', R_data_surf],
            suppress_echo = suppress_echo)

        ## load both surfaces and concatenate them together
        Ldata = load_gii_data(L_data_surf)
        Rdata = load_gii_data(R_data_surf)

    return Ldata, Rdata 
Example #12
Source File: ciftify_clean_img.py    From ciftify with MIT License 6 votes vote down vote up
def __get_tr(self, tr_arg):
        '''read tr from func file is not indicated'''
        if tr_arg:
            tr = float(tr_arg)
        else:
            if self.func.type == "nifti":
                tr_ms = nib.load(self.func.path).header.get_zooms()[3]
                if tr_ms > 150:
                    '''nowing that no tr is never between 20 and 200, we assume seconds vs ms'''
                    logger.info('The TR is greater than 150, we beleive this is in ms, dividing by 1000')
                    tr = float(tr_ms) / 1000
                else:
                    tr = tr_ms
            if self.func.type == "cifti":
                tr_out = ciftify.utils.get_stdout(['wb_command','-file-information',
                        '-only-step-interval', self.func.path])
                tr = float(tr_out.strip())
        if tr > 150:
            logger.warning("TR should be specified in seconds, improbable value {} given".format(tr))
        return tr 
Example #13
Source File: sim_struct.py    From simnibs with GNU General Public License v3.0 6 votes vote down vote up
def _get_vol_info(self):
        if self.anisotropy_vol is not None:
            if self.anisotropy_affine is not None:
                return self.anisotropy_vol, self.anisotropy_affine

        if not self.fn_tensor_nifti:
            raise ValueError('could not get anisotropy information: '
                             'fn_tensor_nifti not set')

        fn_nifti = \
            os.path.abspath(os.path.expanduser(self.fn_tensor_nifti))
        if not os.path.isfile(fn_nifti):
            raise ValueError(
                'Could not find file \'{0}\' to get anisotropy '
                'information'.format(self.fn_tensor_nifti))

        # Load the nifti and interpolate the conductivieis
        image = nibabel.load(fn_nifti)
        affine = image.affine
        return image.dataobj, affine 
Example #14
Source File: test_t2smap.py    From tedana with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_basic_t2smap4(self):
        """
        A very simple test, to confirm that t2smap creates output
        files when combmode is set to 'paid' and fitmode is set to 'ts'.
        """
        data_dir = get_test_data_path()
        data = [op.join(data_dir, 'echo1.nii.gz'),
                op.join(data_dir, 'echo2.nii.gz'),
                op.join(data_dir, 'echo3.nii.gz')]
        out_dir = 'TED.echo1.t2smap'
        workflows.t2smap_workflow(data, [14.5, 38.5, 62.5], combmode='paid',
                                  fitmode='ts', out_dir=out_dir)

        # Check outputs
        assert op.isfile(op.join(out_dir, 'desc-optcom_bold.nii.gz'))
        img = nib.load(op.join(out_dir, 'T2starmap.nii.gz'))
        assert len(img.shape) == 4
        img = nib.load(op.join(out_dir, 'S0map.nii.gz'))
        assert len(img.shape) == 4
        img = nib.load(op.join(out_dir, 'desc-full_T2starmap.nii.gz'))
        assert len(img.shape) == 4
        img = nib.load(op.join(out_dir, 'desc-full_S0map.nii.gz'))
        assert len(img.shape) == 4
        img = nib.load(op.join(out_dir, 'desc-optcom_bold.nii.gz'))
        assert len(img.shape) == 4 
Example #15
Source File: test_t2smap.py    From tedana with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_basic_t2smap2(self):
        """
        A very simple test, to confirm that t2smap creates output
        files when fitmode is set to ts.
        """
        data_dir = get_test_data_path()
        data = [op.join(data_dir, 'echo1.nii.gz'),
                op.join(data_dir, 'echo2.nii.gz'),
                op.join(data_dir, 'echo3.nii.gz')]
        out_dir = 'TED.echo1.t2smap'
        workflows.t2smap_workflow(data, [14.5, 38.5, 62.5], combmode='t2s',
                                  fitmode='ts', out_dir=out_dir)

        # Check outputs
        assert op.isfile(op.join(out_dir, 'desc-optcom_bold.nii.gz'))
        img = nib.load(op.join(out_dir, 'T2starmap.nii.gz'))
        assert len(img.shape) == 4
        img = nib.load(op.join(out_dir, 'S0map.nii.gz'))
        assert len(img.shape) == 4
        img = nib.load(op.join(out_dir, 'desc-full_T2starmap.nii.gz'))
        assert len(img.shape) == 4
        img = nib.load(op.join(out_dir, 'desc-full_S0map.nii.gz'))
        assert len(img.shape) == 4
        img = nib.load(op.join(out_dir, 'desc-optcom_bold.nii.gz'))
        assert len(img.shape) == 4 
Example #16
Source File: test_t2smap.py    From tedana with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_basic_t2smap1(self):
        """
        A very simple test, to confirm that t2smap creates output
        files.
        """
        data_dir = get_test_data_path()
        data = [op.join(data_dir, 'echo1.nii.gz'),
                op.join(data_dir, 'echo2.nii.gz'),
                op.join(data_dir, 'echo3.nii.gz')]
        out_dir = 'TED.echo1.t2smap'
        workflows.t2smap_workflow(data, [14.5, 38.5, 62.5], combmode='t2s',
                                  fitmode='all', out_dir=out_dir)

        # Check outputs
        assert op.isfile(op.join(out_dir, 'desc-optcom_bold.nii.gz'))
        img = nib.load(op.join(out_dir, 'T2starmap.nii.gz'))
        assert len(img.shape) == 3
        img = nib.load(op.join(out_dir, 'S0map.nii.gz'))
        assert len(img.shape) == 3
        img = nib.load(op.join(out_dir, 'desc-full_T2starmap.nii.gz'))
        assert len(img.shape) == 3
        img = nib.load(op.join(out_dir, 'desc-full_S0map.nii.gz'))
        assert len(img.shape) == 3
        img = nib.load(op.join(out_dir, 'desc-optcom_bold.nii.gz'))
        assert len(img.shape) == 4 
Example #17
Source File: misc.py    From smriprep with Apache License 2.0 6 votes vote down vote up
def apply_lut(in_dseg, lut, newpath=None):
    """Map the input discrete segmentation to a new label set (lookup table, LUT)."""
    import numpy as np
    import nibabel as nb
    from nipype.utils.filemanip import fname_presuffix

    if newpath is None:
        from os import getcwd
        newpath = getcwd()

    out_file = fname_presuffix(in_dseg, suffix='_dseg', newpath=newpath)
    lut = np.array(lut, dtype='int16')

    segm = nb.load(in_dseg)
    hdr = segm.header.copy()
    hdr.set_data_dtype('int16')
    segm.__class__(lut[np.asanyarray(segm.dataobj, dtype=int)].astype('int16'),
                   segm.affine, hdr).to_filename(out_file)

    return out_file 
Example #18
Source File: anatomical.py    From smriprep with Apache License 2.0 6 votes vote down vote up
def _split_segments(in_file):
    from pathlib import Path
    import numpy as np
    import nibabel as nb

    segimg = nb.load(in_file)
    data = np.int16(segimg.dataobj)
    hdr = segimg.header.copy()
    hdr.set_data_dtype('uint8')

    out_files = []
    for i, label in enumerate(("GM", "WM", "CSF"), 1):
        out_fname = str(Path.cwd() / f"aseg_label-{label}_mask.nii.gz")
        segimg.__class__(data == i, segimg.affine, hdr).to_filename(out_fname)
        out_files.append(out_fname)

    return out_files 
Example #19
Source File: image_utils.py    From ukbb_cardiac with Apache License 2.0 6 votes vote down vote up
def auto_crop_image(input_name, output_name, reserve):
    nim = nib.load(input_name)
    image = nim.get_data()
    X, Y, Z = image.shape[:3]

    # Detect the bounding box of the foreground
    idx = np.nonzero(image > 0)
    x1, x2 = idx[0].min() - reserve, idx[0].max() + reserve + 1
    y1, y2 = idx[1].min() - reserve, idx[1].max() + reserve + 1
    z1, z2 = idx[2].min() - reserve, idx[2].max() + reserve + 1
    x1, x2 = max(x1, 0), min(x2, X)
    y1, y2 = max(y1, 0), min(y2, Y)
    z1, z2 = max(z1, 0), min(z2, Z)
    print('Bounding box')
    print('  bottom-left corner = ({},{},{})'.format(x1, y1, z1))
    print('  top-right corner = ({},{},{})'.format(x2, y2, z2))

    # Crop the image
    image = image[x1:x2, y1:y2, z1:z2]

    # Update the affine matrix
    affine = nim.affine
    affine[:3, 3] = np.dot(affine, np.array([x1, y1, z1, 1]))[:3]
    nim2 = nib.Nifti1Image(image, affine)
    nib.save(nim2, output_name) 
Example #20
Source File: utils.py    From Attention-Gated-Networks with MIT License 6 votes vote down vote up
def load_nifti_img(filepath, dtype):
    '''
    NIFTI Image Loader
    :param filepath: path to the input NIFTI image
    :param dtype: dataio type of the nifti numpy array
    :return: return numpy array
    '''
    nim = nib.load(filepath)
    out_nii_array = np.array(nim.get_data(),dtype=dtype)
    out_nii_array = np.squeeze(out_nii_array) # drop singleton dim in case temporal dim exists
    meta = {'affine': nim.get_affine(),
            'dim': nim.header['dim'],
            'pixdim': nim.header['pixdim'],
            'name': os.path.basename(filepath)
            }

    return out_nii_array, meta 
Example #21
Source File: coil_numpy.py    From simnibs with GNU General Public License v3.0 6 votes vote down vote up
def _calculate_dadt_nifti(msh, nifti_image, coil_matrix, didt, geo_fn):

    """ auxiliary function that interpolates the dA/dt field from a nifti file """
    if isinstance(nifti_image, str):
        nifti_image = nib.load(nifti_image)
    elif isinstance(nifti_image, nib.nifti1.Nifti1Image):
        pass
    else:
        raise NameError('Failed to parse input volume (not string or nibabel nifti1 volume)')
    coords = msh.nodes.node_coord

    out = _get_field(nifti_image, coords, coil_matrix)
    out = out * didt

    node_data = mesh_io.NodeData(out.T)

    if geo_fn is not None:
        y_axis = np.arange(1, 10, dtype=float)[:, None] * (0, 1, 0)
        z_axis = np.arange(1, 30, dtype=float)[:, None] * (0, 0, 1)
        pos = np.vstack((((0, 0, 0)), y_axis, z_axis))
        pos = (coil_matrix[:3, :3].dot(pos.T) + coil_matrix[:3, 3][:, None]).T
        mesh_io.write_geo_spheres(pos, geo_fn,
                               name='coil_directions')

    return node_data 
Example #22
Source File: conftest.py    From NiBetaSeries with MIT License 5 votes vote down vote up
def preproc_file(deriv_dir, sub_metadata, deriv_bold_fname=deriv_bold_fname):
    deriv_bold = deriv_dir.ensure(deriv_bold_fname)
    with open(str(sub_metadata), 'r') as md:
        bold_metadata = json.load(md)
    tr = bold_metadata["RepetitionTime"]
    # time_points
    tp = 200
    ix = np.arange(tp)
    # create voxel timeseries
    task_onsets = np.zeros(tp)
    # add activations at every 40 time points
    # waffles
    task_onsets[0::40] = 1
    # fries
    task_onsets[3::40] = 1.5
    # milkshakes
    task_onsets[6::40] = 2
    signal = np.convolve(task_onsets, spm_hrf(tr))[0:len(task_onsets)]
    # csf
    csf = np.cos(2*np.pi*ix*(50/tp)) * 0.1
    # white matter
    wm = np.sin(2*np.pi*ix*(22/tp)) * 0.1
    # voxel time series (signal and noise)
    voxel_ts = signal + csf + wm
    # a 4d matrix with 2 identical timeseries
    img_data = np.array([[[voxel_ts, voxel_ts]]])
    # make a nifti image
    img = nib.Nifti1Image(img_data, np.eye(4))
    # save the nifti image
    img.to_filename(str(deriv_bold))

    return deriv_bold 
Example #23
Source File: register_retinotopy.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def _guess_surf_file(fl):
    # MGH/MGZ files
    try: return np.asarray(fsmgh.load(fl).dataobj).flatten()
    except Exception: pass
    # FreeSurfer Curv files
    try: return fsio.read_morph_data(fl)
    except Exception: pass
    # Nifti files
    try: return np.squeeze(nib.load(fl).dataobj)
    except Exception: pass
    raise ValueError('Could not determine filetype for: %s' % fl) 
Example #24
Source File: reports.py    From smriprep with Apache License 2.0 5 votes vote down vote up
def _run_interface(self, runtime):
        from niworkflows.viz.utils import plot_registration, cuts_from_bbox, compose_view
        from nibabel import load

        rootdir = Path(self.inputs.subjects_dir) / self.inputs.subject_id
        _anat_file = str(rootdir / 'mri' / 'brain.mgz')
        _contour_file = str(rootdir / 'mri' / 'ribbon.mgz')

        anat = load(_anat_file)
        contour_nii = load(_contour_file)

        n_cuts = 7
        cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)

        self._results['out_report'] = str(Path(runtime.cwd) / self.inputs.out_report)

        # Call composer
        compose_view(
            plot_registration(anat, 'fixed-image',
                              estimate_brightness=True,
                              cuts=cuts,
                              contour=contour_nii,
                              compress=self.inputs.compress_report),
            [],
            out_file=self._results['out_report']
        )
        return runtime 
Example #25
Source File: conftest.py    From NiBetaSeries with MIT License 5 votes vote down vote up
def sub_events(bids_dir, sub_metadata, preproc_file,
               bids_events_fname=bids_events_fname):
    events_file = bids_dir.ensure(bids_events_fname)
    # read in subject metadata to get the TR
    with open(str(sub_metadata), 'r') as md:
        bold_metadata = json.load(md)
    tr = bold_metadata["RepetitionTime"]
    # time_points
    tp = nib.load(str(preproc_file)).shape[-1]
    # create voxel timeseries
    task_onsets = np.zeros(tp)
    # add waffles at every 40 time points
    task_onsets[0::40] = 1
    # add fries at every 40 time points starting at 3
    task_onsets[3::40] = 1
    # add milkshakes at every 40 time points starting at 6
    task_onsets[6::40] = 1
    # create event tsv
    num_trials = np.where(task_onsets == 1)[0].shape[0]
    onsets = np.multiply(np.where(task_onsets == 1), tr).reshape(num_trials)
    durations = [1] * num_trials
    num_conds = 3
    trial_types = ['waffle', 'fry', 'milkshake'] * int((num_trials / num_conds))
    events_df = pd.DataFrame.from_dict({'onset': onsets,
                                        'duration': durations,
                                        'trial_type': trial_types})
    # reorder columns
    events_df = events_df[['onset', 'duration', 'trial_type']]
    # save the events_df to file
    events_df.to_csv(str(events_file), index=False, sep='\t')
    return events_file 
Example #26
Source File: outputs.py    From smriprep with Apache License 2.0 5 votes vote down vote up
def _rpt_masks(mask_file, before, after, after_mask=None):
    from os.path import abspath
    import nibabel as nb
    msk = nb.load(mask_file).get_fdata() > 0
    bnii = nb.load(before)
    nb.Nifti1Image(bnii.get_fdata() * msk,
                   bnii.affine, bnii.header).to_filename('before.nii.gz')
    if after_mask is not None:
        msk = nb.load(after_mask).get_fdata() > 0

    anii = nb.load(after)
    nb.Nifti1Image(anii.get_fdata() * msk,
                   anii.affine, anii.header).to_filename('after.nii.gz')
    return abspath('before.nii.gz'), abspath('after.nii.gz') 
Example #27
Source File: conftest.py    From NiBetaSeries with MIT License 5 votes vote down vote up
def confounds_file(deriv_dir, preproc_file,
                   deriv_regressor_fname=deriv_regressor_fname):
    confounds_file = deriv_dir.ensure(deriv_regressor_fname)
    confound_dict = {}
    tp = nib.load(str(preproc_file)).shape[-1]
    ix = np.arange(tp)
    # csf
    confound_dict['csf'] = np.cos(2*np.pi*ix*(50/tp)) * 0.1
    # white matter
    confound_dict['white_matter'] = np.sin(2*np.pi*ix*(22/tp)) * 0.1
    # framewise_displacement
    confound_dict['framewise_displacement'] = np.random.random_sample(tp)
    confound_dict['framewise_displacement'][0] = np.nan
    # motion outliers
    for motion_outlier in range(0, 5):
        mo_name = 'motion_outlier0{}'.format(motion_outlier)
        confound_dict[mo_name] = np.zeros(tp)
        confound_dict[mo_name][motion_outlier] = 1
    # derivatives
    derive1 = [
        'csf_derivative1',
        'csf_derivative1_power2',
        'global_signal_derivative1_power2',
        'trans_x_derivative1',
        'trans_y_derivative1',
        'trans_z_derivative1',
        'trans_x_derivative1_power2',
        'trans_y_derivative1_power2',
        'trans_z_derivative1_power2',
    ]
    for d in derive1:
        confound_dict[d] = np.random.random_sample(tp)
        confound_dict[d][0] = np.nan

    # transformations
    for dir in ["trans_x", "trans_y", "trans_z"]:
        confound_dict[dir] = np.random.random_sample(tp)

    confounds_df = pd.DataFrame(confound_dict)
    confounds_df.to_csv(str(confounds_file), index=False, sep='\t', na_rep='n/a')
    return confounds_file 
Example #28
Source File: image_utils.py    From ukbb_cardiac with Apache License 2.0 5 votes vote down vote up
def padding(input_A_name, input_B_name, output_name, value_in_B, value_output):
    nim = nib.load(input_A_name)
    image_A = nim.get_data()
    image_B = nib.load(input_B_name).get_data()
    image_A[image_B == value_in_B] = value_output
    nim2 = nib.Nifti1Image(image_A, nim.affine)
    nib.save(nim2, output_name) 
Example #29
Source File: medical.py    From tensorflow-u-net with GNU General Public License v3.0 5 votes vote down vote up
def load_volume(source):
    """
    Loads the medical volumes specified by the provided paths.

    Parameters
    ----------
    source : iterable
        An iterable over a number of datapoints where each datapoint is a tuple of a list of file paths and a parameter dictionary.

    Returns
    -------
    gen : generator
        A generator that yields each transformed datapoint as a tuple of a list of inputs and a parameter dictionary.
    """

    def transformation(input_tuple):
        inputs, parameters = input_tuple

        outputs = []
        niftis = [nibabel.load(inpt) for inpt in inputs]
        outputs = [np.asarray(nifti.dataobj).astype(np.float32) for nifti in niftis]

        nifti = niftis[0]

        parameters["spacing"] = nifti.header.get_zooms()[:3]  # the last value is the time between scans, no need to keep it
        parameters["original_spacing"] = parameters["spacing"]

        parameters["size"] = nifti.header.get_data_shape()
        parameters["original_size"] = parameters["size"]

        parameters["nifti_header"] = nifti.header

        return (outputs, parameters)

    return helper.apply(source, transformation) 
Example #30
Source File: propagate_aortic_annotation.py    From ukbb_cardiac with Apache License 2.0 5 votes vote down vote up
def infer_time_frame(image_name, image_fr_name):
    """ Infer which time frame the annotation is at. """
    nim = nib.load(image_name)
    T = nim.header['dim'][4]
    image = nim.get_data()
    nim_fr = nib.load(image_fr_name)
    image_fr = nim_fr.get_data()

    diff = np.zeros(T)
    for t in range(T):
        diff[t] = np.sum(np.abs(image[:, :, :, t] - image_fr))
    k = np.argmin(diff)
    return k