Python pydicom.read_file() Examples
The following are 30
code examples of pydicom.read_file().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pydicom
, or try the search function
.
Example #1
Source File: compressed_dicom.py From dicom2nifti with MIT License | 6 votes |
def is_dicom_file(filename): """ Util function to check if file is a dicom file the first 128 bytes are preamble the next 4 bytes should contain DICM otherwise it is not a dicom :param filename: file to check for the DICM header block :type filename: str :returns: True if it is a dicom file """ file_stream = open(filename, 'rb') file_stream.seek(128) data = file_stream.read(4) file_stream.close() if data == b'DICM': return True if settings.pydicom_read_force: try: dicom_headers = pydicom.read_file(filename, defer_size="1 KB", stop_before_pixels=True, force=True) if dicom_headers is not None: return True except: pass return False
Example #2
Source File: clean.py From deid with MIT License | 6 votes |
def save_dicom(self, output_folder=None, image_type="cleaned"): """save a cleaned dicom to disk. We expose an option to save an original (change image_type to "original" to be consistent, although this is not incredibly useful given it would duplicate the original data. """ # Having clean also means has dicom image if hasattr(self, image_type): dicom_name = self._get_clean_name(output_folder) dicom = read_file(self.dicom_file, force=True) # If going from compressed, change TransferSyntax if dicom.file_meta.TransferSyntaxUID.is_compressed is True: dicom.decompress() dicom.PixelData = self.cleaned.tostring() dicom.save_as(dicom_name) return dicom_name else: bot.warning("use detect() --> clean() before saving is possible.")
Example #3
Source File: parser.py From deid with MIT License | 6 votes |
def load(self, dicom_file, force=True): """Ensure that the dicom file exists, and use full path. Here we load the file, and save the dicom, dicom_file, and dicom_name. """ # Reset seen, which is generated when we parse self.seen = [] # The user might already have provided a dataset if isinstance(dicom_file, Dataset): self.dicom = dicom_file else: # If we must read the file, the path must exist if not os.path.exists(dicom_file): bot.exit("%s does not exist." % dicom_file) self.dicom = read_file(dicom_file, force=force) # Set class variables that might be helpful later self.dicom_file = os.path.abspath(self.dicom.filename) self.dicom_name = os.path.basename(self.dicom_file)
Example #4
Source File: 01_preprocess.py From kaggle-lung-cancer with Apache License 2.0 | 6 votes |
def load_study(instance_filepaths): """Loads a study with pydicom and sorts slices in z-axis. Calculates slice thickness and writes it in the read dicom file. """ slices = [pydicom.read_file(fp) for fp in instance_filepaths] slices.sort(key=lambda s: float(s.ImagePositionPatient[2])) try: slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2]) except: slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation) if slice_thickness == 0: for i in range(1, len(slices) - 2): try: slice_thickness = np.abs(slices[i].ImagePositionPatient[2] - slices[i+1].ImagePositionPatient[2]) except: slice_thickness = np.abs(slices[i].SliceLocation - slices[i+1].SliceLocation) if slice_thickness > 0: break for s in slices: s.SliceThickness = slice_thickness return slices
Example #5
Source File: 01_preprocess.py From kaggle-lung-cancer with Apache License 2.0 | 6 votes |
def load_study(instance_filepaths): """Loads a study with pydicom and sorts slices in z-axis. Calculates slice thickness and writes it in the read dicom file. """ slices = [pydicom.read_file(fp) for fp in instance_filepaths] slices.sort(key=lambda s: float(s.ImagePositionPatient[2])) try: slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2]) except: slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation) if slice_thickness == 0: for i in range(1, len(slices) - 2): try: slice_thickness = np.abs(slices[i].ImagePositionPatient[2] - slices[i+1].ImagePositionPatient[2]) except: slice_thickness = np.abs(slices[i].SliceLocation - slices[i+1].SliceLocation) if slice_thickness > 0: break for s in slices: s.SliceThickness = slice_thickness return slices
Example #6
Source File: Xtest_dicom_header.py From deid with MIT License | 6 votes |
def test_replace_identifiers(self): print("Testing deid.dicom replace_identifiers") from deid.dicom import replace_identifiers from deid.dicom import get_identifiers from pydicom import read_file dicom_files = get_dicom(self.dataset, return_dir=True) ids = get_identifiers(dicom_files) # Before blanking, 28 fields don't have blanks notblanked = read_file(dicom_files[0]) notblanked_fields = [ x for x in notblanked.dir() if notblanked.get(x) != "" ] # 28 self.assertTrue(len(notblanked_fields) == 28) updated_files = replace_identifiers(dicom_files, ids, output_folder=self.tmpdir) # After replacing only 9 don't have blanks blanked = read_file(updated_files[0]) blanked_fields = [x for x in blanked.dir() if blanked.get(x) != ""] self.assertTrue(len(blanked_fields) == 9)
Example #7
Source File: utils.py From datman with Apache License 2.0 | 6 votes |
def get_tarfile_headers(path, stop_after_first=False): """ Get headers for dicom files within a tarball """ tar = tarfile.open(path) members = tar.getmembers() manifest = {} # for each dir, we want to inspect files inside of it until we find a dicom # file that has header information for f in [x for x in members if x.isfile()]: dirname = os.path.dirname(f.name) if dirname in manifest: continue try: manifest[dirname] = dcm.read_file(tar.extractfile(f)) if stop_after_first: break except dcm.filereader.InvalidDicomError: continue return manifest
Example #8
Source File: DataPreprocessing.py From CNNArt with Apache License 2.0 | 6 votes |
def create_DICOM_Array(PathDicom): filenames_list = [] file_list = os.listdir(PathDicom) for file in file_list: filenames_list.append(PathDicom + file) datasets = [dicom.read_file(f) \ for f in filenames_list] try: voxel_ndarray, _ = dicom_numpy.combine_slices(datasets) voxel_ndarray = voxel_ndarray.astype(float) voxel_ndarray = np.swapaxes(voxel_ndarray, 0, 1) print(voxel_ndarray.dtype) # voxel_ndarray = voxel_ndarray[:-1:] # print(voxel_ndarray.shape) except dicom_numpy.DicomImportException: # invalid DICOM data raise print(voxel_ndarray.shape) return voxel_ndarray
Example #9
Source File: compressed_dicom.py From dicom2nifti with MIT License | 6 votes |
def _is_compressed(dicom_file, force=False): """ Check if dicoms are compressed or not """ header = pydicom.read_file(dicom_file, defer_size="1 KB", stop_before_pixels=True, force=force) uncompressed_types = ["1.2.840.10008.1.2", "1.2.840.10008.1.2.1", "1.2.840.10008.1.2.1.99", "1.2.840.10008.1.2.2"] if 'TransferSyntaxUID' in header.file_meta and header.file_meta.TransferSyntaxUID in uncompressed_types: return False return True
Example #10
Source File: compressed_dicom.py From dicom2nifti with MIT License | 6 votes |
def read_file(dicom_file, defer_size=None, stop_before_pixels=False, force=False): if _is_compressed(dicom_file, force): # https://github.com/icometrix/dicom2nifti/issues/46 thanks to C-nit try: with tempfile.NamedTemporaryFile(delete=False) as fp: fp.close() _decompress_dicom(dicom_file, output_file=fp.name) return pydicom.read_file(fp.name, defer_size=None, # We can't defer stop_before_pixels=stop_before_pixels, force=force) finally: os.remove(fp.name) dicom_header = pydicom.read_file(dicom_file, defer_size=defer_size, stop_before_pixels=stop_before_pixels, force=force) return dicom_header
Example #11
Source File: utils.py From grouped-ssd-pytorch with MIT License | 6 votes |
def read_dicom_series(directory, filepattern="P_*"): """ Reads a DICOM Series files in the given directory. Only filesnames matching filepattern will be considered""" if not os.path.exists(directory) or not os.path.isdir(directory): raise ValueError("Given directory does not exist or is a file : " + str(directory)) # print('\tRead Dicom', directory) lstFilesDCM = natsort.natsorted(glob.glob(os.path.join(directory, filepattern))) # print('\tLength dicom series', len(lstFilesDCM)) # Get ref file RefDs = dicom.read_file(lstFilesDCM[0]) # Load dimensions based on the number of rows, columns, and slices (along the Z axis) ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM)) # The array is sized based on 'ConstPixelDims' ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype) # loop through all the DICOM files for filenameDCM in lstFilesDCM: # read the file ds = dicom.read_file(filenameDCM) # store the raw image data ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array return ArrayDicom
Example #12
Source File: data_checker_year1_extended.py From grouped-ssd-pytorch with MIT License | 6 votes |
def read_dicom_series(directory, filepattern="P_*"): """ Reads a DICOM Series files in the given directory. Only filesnames matching filepattern will be considered""" if not os.path.exists(directory) or not os.path.isdir(directory): raise ValueError("Given directory does not exist or is a file : " + str(directory)) # print('\tRead Dicom', directory) lstFilesDCM = natsort.natsorted(glob.glob(os.path.join(directory, filepattern))) # print('\tLength dicom series', len(lstFilesDCM)) # Get ref file RefDs = dicom.read_file(lstFilesDCM[0]) # Load dimensions based on the number of rows, columns, and slices (along the Z axis) ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM)) # The array is sized based on 'ConstPixelDims' ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype) # loop through all the DICOM files for filenameDCM in lstFilesDCM: # read the file ds = dicom.read_file(filenameDCM) # store the raw image data ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array return ArrayDicom
Example #13
Source File: ct_to_jpg.py From grouped-ssd-pytorch with MIT License | 6 votes |
def read_dicom_series(directory, filepattern="P_*"): """ Reads a DICOM Series files in the given directory. Only filesnames matching filepattern will be considered""" if not os.path.exists(directory) or not os.path.isdir(directory): raise ValueError("Given directory does not exist or is a file : " + str(directory)) # print('\tRead Dicom', directory) lstFilesDCM = natsort.natsorted(glob.glob(os.path.join(directory, filepattern))) # print('\tLength dicom series', len(lstFilesDCM)) # Get ref file RefDs = dicom.read_file(lstFilesDCM[0]) # Load dimensions based on the number of rows, columns, and slices (along the Z axis) ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM)) # The array is sized based on 'ConstPixelDims' ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype) # loop through all the DICOM files for filenameDCM in lstFilesDCM: # read the file ds = dicom.read_file(filenameDCM) # store the raw image data ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array return ArrayDicom
Example #14
Source File: utils.py From datman with Apache License 2.0 | 6 votes |
def get_zipfile_headers(path, stop_after_first=False): """ Get headers for a dicom file within a zipfile """ zf = zipfile.ZipFile(path) manifest = {} for f in zf.namelist(): dirname = os.path.dirname(f) if dirname in manifest: continue try: manifest[dirname] = dcm.read_file(io.BytesIO(zf.read(f))) if stop_after_first: break except dcm.filereader.InvalidDicomError: continue except zipfile.BadZipfile: logger.warning(f"Error in zipfile:{path}") break return manifest
Example #15
Source File: dicom_anonymizer_methods.py From DICAT with GNU General Public License v3.0 | 6 votes |
def is_file_a_dicom(file): """ Check whether a given file is of type DICOM :param file: path to the file to identify :type file: str :return: True if the file is DICOM, False otherwise :rtype: bool """ try: dicom.read_file(file) except InvalidDicomError: return False return True
Example #16
Source File: dicom_data.py From pydiq with MIT License | 6 votes |
def from_files(cls, files: List[str]) -> "DicomData": data = [] modality = None for file_path in files: f = pydicom.read_file(file_path) print(f"Reading {file_path}...") # Get modality if modality: if modality != f.Modality: raise RuntimeError("Cannot mix images from different modalities") elif f.Modality not in cls.ALLOWED_MODALITIES: raise RuntimeError(f"{f.Modality} modality not supported.") else: modality = f.Modality data.append(cls._read_pixel_data(f)) return cls(np.array(data), modality=modality)
Example #17
Source File: prepare_png.py From pneumothorax-segmentation with MIT License | 5 votes |
def save_test_file(f, out_path, img_size): img = pydicom.read_file(f).pixel_array name = f.split('/')[-1][:-4] img = resize(img,(img_size,img_size)) * 255 cv2.imwrite('{}/test/{}.png'.format(out_path, name), img)
Example #18
Source File: utils.py From datman with Apache License 2.0 | 5 votes |
def get_folder_headers(path, stop_after_first=False): """ Generate a dictionary of subfolders and dicom headers. """ manifest = {} # for each dir, we want to inspect files inside of it until we find a dicom # file that has header information subdirs = [] for filename in os.listdir(path): filepath = os.path.join(path, filename) try: if os.path.isdir(filepath): subdirs.append(filepath) continue manifest[path] = dcm.read_file(filepath) break except dcm.filereader.InvalidDicomError: pass if stop_after_first: return manifest # recurse for subdir in subdirs: manifest.update(get_folder_headers(subdir, stop_after_first)) return manifest
Example #19
Source File: dm_xnat_extract.py From datman with Apache License 2.0 | 5 votes |
def is_valid_dicom(filename): try: dicom.read_file(filename) except IOError: return except dicom.errors.InvalidDicomError: return return True
Example #20
Source File: zip_archive.py From dicom-numpy with MIT License | 5 votes |
def dicom_datasets_from_zip(zip_file): datasets = [] for entry in zip_file.namelist(): if entry.endswith('/'): continue # skip directories entry_pseudo_file = zip_file.open(entry) # the pseudo file does not support `seek`, which is required by # dicom's lazy loading mechanism; use temporary files to get around this; # relies on the temporary files not being removed until the temp # file is garbage collected, which should be the case because the # dicom datasets should retain a reference to the temp file temp_file = tempfile.TemporaryFile() temp_file.write(entry_pseudo_file.read()) temp_file.flush() temp_file.seek(0) try: dataset = dicom.read_file(temp_file) datasets.append(dataset) except dicom.errors.InvalidDicomError as e: msg = 'Skipping invalid DICOM file "{}": {}' logger.info(msg.format(entry, e)) if len(datasets) == 0: raise DicomImportException('Zipfile does not contain any valid DICOM files') return datasets
Example #21
Source File: utils.py From datman with Apache License 2.0 | 5 votes |
def is_dicom(fileobj): try: dcm.read_file(fileobj) return True except dcm.filereader.InvalidDicomError: return False
Example #22
Source File: utils.py From pydiq with MIT License | 5 votes |
def get_id(path: str) -> Tuple[str, str]: f = pydicom.read_file(path, stop_before_pixels=True) return f.StudyInstanceUID, f.SeriesInstanceUID
Example #23
Source File: 03_create_annotations.py From kaggle-lung-cancer with Apache License 2.0 | 5 votes |
def get_study_filenames_sorted(study_id): study_root_path = os.path.join(BASEPATH, SETTINGS['STAGE1_DATA_PATH'], study_id) instance_filepaths = sorted(list(get_files(study_root_path))) slices = [pydicom.read_file(fp) for fp in instance_filepaths] indices_sorted = [tup[0] for tup in sorted(enumerate(slices), key=lambda tup: int(tup[1].ImagePositionPatient[2]))] return [instance_filepaths[i].replace(s.path.join(BASEPATH, SETTINGS['STAGE1_DATA_PATH']), '') for i in indices_sorted]
Example #24
Source File: viewer.py From pydiq with MIT License | 5 votes |
def show_structure(self): if self.file_name: f = pydicom.read_file(self.file_name) l = QtWidgets.QLabel(str(f)) l.show() # print(str(f))
Example #25
Source File: test_dicom_utils.py From deid with MIT License | 5 votes |
def get_dicom(dataset): """helper function to load a dicom """ from deid.dicom import get_files from pydicom import read_file dicom_files = get_files(dataset) return read_file(next(dicom_files))
Example #26
Source File: prepare_png.py From pneumothorax-segmentation with MIT License | 5 votes |
def save_train_file(f, encode_df, out_path, img_size): img = pydicom.read_file(f).pixel_array name = f.split('/')[-1][:-4] encode = list(encode_df.loc[encode_df['ImageId'] == name, ' EncodedPixels'].values) encode = get_mask(encode,img.shape[1],img.shape[0]) encode = resize(encode,(img_size,img_size)) img = resize(img,(img_size,img_size)) cv2.imwrite('{}/train/{}.png'.format(out_path, name), img * 255) cv2.imwrite('{}/mask/{}.png'.format(out_path, name), encode)
Example #27
Source File: biobank_utils.py From ukbb_cardiac with Apache License 2.0 | 5 votes |
def find_series(self, dir_name, T): """ In a few cases, there are two or three time sequences or series within each folder. We need to find which series to convert. """ files = sorted(os.listdir(dir_name)) if len(files) > T: # Sort the files according to their series UIDs series = {} for f in files: d = dicom.read_file(os.path.join(dir_name, f)) suid = d.SeriesInstanceUID if suid in series: series[suid] += [f] else: series[suid] = [f] # Find the series which has been annotated, otherwise use the last series. if self.cvi42_dir: find_series = False for suid, suid_files in series.items(): for f in suid_files: contour_pickle = os.path.join(self.cvi42_dir, os.path.splitext(f)[0] + '.pickle') if os.path.exists(contour_pickle): find_series = True choose_suid = suid break if not find_series: choose_suid = sorted(series.keys())[-1] else: choose_suid = sorted(series.keys())[-1] print('There are multiple series. Use series {0}.'.format(choose_suid)) files = sorted(series[choose_suid]) if len(files) < T: print('Warning: {0}: Number of files < CardiacNumberOfImages! ' 'We will fill the missing files using duplicate slices.'.format(dir_name)) return(files)
Example #28
Source File: dicom_anonymizer_methods.py From DICAT with GNU General Public License v3.0 | 5 votes |
def read_dicom_with_pydicom(dicom_file, dicom_fields): """ Read DICOM file using PyDICOM python library. :param dicom_file: DICOM file to read :type dicom_file: str :param dicom_fields: Dictionary containing DICOM fields and values :type dicom_fields: dict :return: updated dictionary of DICOM fields and values :rtype : dict """ # Read DICOM file dicom_dataset = dicom.read_file(dicom_file) # Grep information from DICOM header and store them # into dicom_fields dictionary under flag Value # Dictionnary of DICOM values to be returned for name in dicom_fields: try: description = dicom_fields[name]['Description'] value = dicom_dataset.data_element(description).value dicom_fields[name]['Value'] = value except: continue return dicom_fields
Example #29
Source File: dicom_anonymizer_methods.py From DICAT with GNU General Public License v3.0 | 5 votes |
def pydicom_zapping(dicom_file, dicom_fields): """ Actual zapping method for PyDICOM :param dicom_file: DICOM to de-identify :type dicom_file: str :param dicom_fields: Dictionary with DICOM fields & values to use :type dicom_fields: dict :return: None """ dicom_dataset = dicom.read_file(dicom_file) for name in dicom_fields: new_val = "" if 'Value' in dicom_fields[name]: new_val = dicom_fields[name]['Value'].strip() if dicom_fields[name]['Editable'] is True: try: dicom_dataset.data_element( dicom_fields[name]['Description']).value = new_val except: continue else: try: dicom_dataset.data_element( dicom_fields[name]['Description']).value = '' except: continue dicom_dataset.save_as(dicom_file)
Example #30
Source File: __init__.py From mritopng with MIT License | 5 votes |
def extract_grayscale_image(mri_file): # Extracting data from the mri file plan = pydicom.read_file(mri_file) shape = plan.pixel_array.shape #Convert to float to avoid overflow or underflow losses. image_2d = plan.pixel_array.astype(float) # Rescaling grey scale between 0-255 image_2d_scaled = (np.maximum(image_2d,0) / image_2d.max()) * 255.0 #Convert to uint image_2d_scaled = np.uint8(image_2d_scaled) return GrayscaleImage(image_2d_scaled, shape[1], shape[0])