Python scipy.ndimage.morphology.binary_dilation() Examples

The following are 25 code examples of scipy.ndimage.morphology.binary_dilation(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.ndimage.morphology , or try the search function .
Example #1
Source File: audio.py    From Resemblyzer with Apache License 2.0 6 votes vote down vote up
def trim_long_silences(wav):
    """
    Ensures that segments without voice in the waveform remain no longer than a 
    threshold determined by the VAD parameters in params.py.

    :param wav: the raw waveform as a numpy array of floats 
    :return: the same waveform with silences trimmed away (length <= original wav length)
    """
    # Compute the voice detection window size
    samples_per_window = (vad_window_length * sampling_rate) // 1000
    
    # Trim the end of the audio to have a multiple of the window size
    wav = wav[:len(wav) - (len(wav) % samples_per_window)]
    
    # Convert the float waveform to 16-bit mono PCM
    pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))
    
    # Perform voice activation detection
    voice_flags = []
    vad = webrtcvad.Vad(mode=3)
    for window_start in range(0, len(wav), samples_per_window):
        window_end = window_start + samples_per_window
        voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2],
                                         sample_rate=sampling_rate))
    voice_flags = np.array(voice_flags)
    
    # Smooth the voice detection with a moving average
    def moving_average(array, width):
        array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))
        ret = np.cumsum(array_padded, dtype=float)
        ret[width:] = ret[width:] - ret[:-width]
        return ret[width - 1:] / width
    
    audio_mask = moving_average(voice_flags, vad_moving_average_width)
    audio_mask = np.round(audio_mask).astype(np.bool)
    
    # Dilate the voiced regions
    audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1))
    audio_mask = np.repeat(audio_mask, samples_per_window)
    
    return wav[audio_mask == True] 
Example #2
Source File: __init__.py    From PyESAPI with MIT License 6 votes vote down vote up
def validate_structure_mask(structure, mask, pts, margin=4):
    dilation_idx = np.where(binary_dilation(mask, iterations=margin))
    flat_pts = pts[dilation_idx]
    flat_mask = mask[dilation_idx]
    vv = VVector(0. ,0. , 0.)

    def tester(pt):
        vv.x = pt[0]
        vv.y = pt[1]
        vv.z = pt[2]
        return structure.IsPointInsideSegment(vv)

    mismatch_count = 0
    for i, p in enumerate(flat_pts):
        if flat_mask[i] != tester(p):
            mismatch_count += 1

    error = mismatch_count / len(flat_mask) * 100.0
    print("mask error (%):", error)
    assert error <= 0.05, "Masking error greater than 0.05 %" 
Example #3
Source File: peak_utils.py    From TractSeg with Apache License 2.0 6 votes vote down vote up
def mask_and_normalize_peaks(peaks, tract_seg_path, bundles, dilation, nr_cpus=-1):
    """
    runtime TOM: 2min 40s  (~8.5GB)
    """
    def _process_bundle(idx, bundle):
        bundle_peaks = np.copy(peaks[:, :, :, idx * 3:idx * 3 + 3])  # [x, y, z, 3]
        img = nib.load(join(tract_seg_path, bundle + ".nii.gz"))
        mask, flip_axis = img_utils.flip_axis_to_match_MNI_space(img.get_data(), img.affine)
        mask = binary_dilation(mask, iterations=dilation).astype(np.uint8)  # [x, y, z]
        bundle_peaks[mask == 0] = 0
        bundle_peaks = normalize_peak_to_unit_length(bundle_peaks)
        return bundle_peaks

    nr_cpus = psutil.cpu_count() if nr_cpus == -1 else nr_cpus
    results_peaks = Parallel(n_jobs=nr_cpus)(delayed(_process_bundle)(idx, bundle)
                                             for idx, bundle in enumerate(bundles))

    results_peaks = np.array(results_peaks).transpose(1, 2, 3, 0, 4)
    s = results_peaks.shape
    results_peaks = results_peaks.reshape([s[0], s[1], s[2], s[3] * s[4]])
    return results_peaks 
Example #4
Source File: data_segmentation.py    From pytorch_connectomics with MIT License 6 votes vote down vote up
def markInvalid(seg, iter_num=2, do_2d=True):
    # find invalid 
    # if do erosion(seg==0), then miss the border
    if do_2d:
        stel=np.array([[1,1,1], [1,1,1]]).astype(bool)
        if len(seg.shape)==2:
            out = binary_dilation(seg>0, structure=stel, iterations=iter_num)
            seg[out==0] = -1
        else: # save memory
            for z in range(seg.shape[0]):
                tmp = seg[z] # by reference
                out = binary_dilation(tmp>0, structure=stel, iterations=iter_num)
                tmp[out==0] = -1
    else:
        stel=np.array([[1,1,1], [1,1,1], [1,1,1]]).astype(bool)
        out = binary_dilation(seg>0, structure=stel, iterations=iter_num)
        seg[out==0] = -1
    return seg 
Example #5
Source File: transforms.py    From novelty-detection with MIT License 5 votes vote down vote up
def __call__(self, sample: tuple) -> tuple:
        X, Y, background = sample

        mask = np.uint8(np.sum(np.abs(np.int32(X) - background), axis=-1) > self.threshold)
        mask = np.expand_dims(mask, axis=-1)

        mask = np.stack([binary_dilation(mask_frame, iterations=5) for mask_frame in mask])

        X *= mask
        Y *= mask

        Y = np.concatenate((Y, mask), axis=-1)

        return X, Y 
Example #6
Source File: img_utils.py    From TractSeg with Apache License 2.0 5 votes vote down vote up
def simple_brain_mask(data):
    """
    Simple brain mask (for peak image). Does not matter if has holes
    because for cropping anyways only take min and max.

    Args:
        data: peak image (x, y, z, 9)

    Returns:
        brain mask (x, y, z)
    """
    data_max = data.max(axis=3)
    mask = data_max > 0.01
    mask = binary_dilation(mask, iterations=1)
    return mask.astype(np.uint8) 
Example #7
Source File: trace.py    From rivuletpy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _prep(self):
        self._nforeground = self._bimg.sum()        
        # Dilate bimg to make it less strict for the big gap criteria
        # It is needed since sometimes the tracing goes along the
        # boundary of the thin fibre in the binary img
        self._dilated_bimg = binary_dilation(self._bimg)

        if not self._silent:
            print('(2) --Boundary DT...')
        self._make_dt()
        if not self._silent:
            print('(3) --Fast Marching with %s quality...' % ('high' if self._quality else 'low'))
        self._fast_marching()
        if not self._silent:
            print('(4) --Compute Gradients...')
        self._make_grad()

        # Make copy of the timemap
        self._tt = self._t.copy()
        self._tt[self._bimg <= 0] = -2

        # Label all voxels of soma with -3
        self._tt[self._soma.mask > 0] = -3

        # For making a large tube to contain the last traced branch
        self._bb = np.zeros(shape=self._tt.shape) 
Example #8
Source File: prepare_for_submission.py    From brats17 with MIT License 5 votes vote down vote up
def clean_contour(prob, c_input):
    # Smaller areas with lower prob are very likely to be false positives
    wt_mor = binary_dilation((c_input > 0).astype(np.float32), iterations=10)
    labels = measure.label(wt_mor)
    w_area = []
    for l in range(1, np.amax(labels) + 1):
        w_area.append(np.sum(prob[labels == l]))
    if len(w_area) > 0:
        max_area = np.amax(w_area)
        for l in range(len(w_area)):
            if w_area[l] < max_area / 2.0:
                c_input[labels == l + 1] = 0
    return c_input 
Example #9
Source File: img_utils.py    From TractSeg with Apache License 2.0 5 votes vote down vote up
def dilate_binary_mask(file_in, file_out, dilation=2):
    img = nib.load(file_in)
    data = img.get_data()

    for i in range(dilation):
        data = binary_dilation(data)

    data = data > 0.5

    img_out = nib.Nifti1Image(data.astype(np.uint8), img.affine)
    nib.save(img_out, file_out) 
Example #10
Source File: prepare.py    From lung_nodule_detector with MIT License 5 votes vote down vote up
def process_mask(mask):
    convex_mask = np.copy(mask)
    for i_layer in range(convex_mask.shape[0]):
        mask1  = np.ascontiguousarray(mask[i_layer])
        if np.sum(mask1)>0:
            mask2 = convex_hull_image(mask1)
            if np.sum(mask2)>1.5*np.sum(mask1):
                mask2 = mask1
        else:
            mask2 = mask1
        convex_mask[i_layer] = mask2
    struct = generate_binary_structure(3,1)  
    dilatedMask = binary_dilation(convex_mask,structure=struct,iterations=10) 
    return dilatedMask 
Example #11
Source File: prepare.py    From DeepLung with GNU General Public License v3.0 5 votes vote down vote up
def process_mask(mask):
    convex_mask = np.copy(mask)
    for i_layer in range(convex_mask.shape[0]):
        mask1  = np.ascontiguousarray(mask[i_layer])
        if np.sum(mask1)>0:
            mask2 = convex_hull_image(mask1)
            if np.sum(mask2)>1.5*np.sum(mask1):
                mask2 = mask1
        else:
            mask2 = mask1
        convex_mask[i_layer] = mask2
    struct = generate_binary_structure(3,1)  
    dilatedMask = binary_dilation(convex_mask,structure=struct,iterations=10) 
    return dilatedMask 
Example #12
Source File: transforms.py    From novelty-detection with MIT License 5 votes vote down vote up
def __call__(self, sample: tuple) -> tuple:
        X, Y, background = sample

        mask = np.uint8(np.sum(np.abs(np.int32(X) - background), axis=-1) > self.threshold)
        mask = np.expand_dims(mask, axis=-1)

        mask = np.stack([binary_dilation(mask_frame, iterations=5) for mask_frame in mask])

        X *= mask
        Y *= mask

        return X, Y 
Example #13
Source File: mesh.py    From plumo with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convex_hull (binary):
    swap_sequence = [(0, 1),  # 102
                     (0, 2),  # 201
                     (0, 2)]  # 102

    output = np.ndarray(binary.shape, dtype=binary.dtype)
    for swp1, swp2 in swap_sequence:
        N = binary.shape[0]
        print 'shape', binary.shape
        for i in range(N):
            contours = measure.find_contours(binary[i], 0.5)
            if len(contours) == 0:
                continue
            if len(contours) == 1:
                contour = contours[0]
            else:
                contour = np.vstack(contours)
            cc = np.zeros_like(contour, dtype=np.int32)
            cc[:,0] = contour[:, 1]
            cc[:,1] = contour[:, 0]
            hull = cv2.convexHull(cc)
            contour = hull.reshape((1, -1, 2)) 
            cv2.fillPoly(binary[i], contour, 1)
            #binary[i] = skimage.morphology.convex_hull_image(binary[i])
            pass
        print 'swap', swp1, swp2
        nb = np.swapaxes(binary, swp1, swp2)
        binary = np.ndarray(nb.shape, dtype=nb.dtype)
        binary[:,:] = nb[:,:]
        pass
    binary = np.swapaxes(binary, 0, 1)
    output[:,:] = binary[:,:]
    return output;
    #binary = binary_dilation(output, iterations=dilate)
    #return binary 
Example #14
Source File: mesh.py    From plumo with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convex_hull (binary):
    swap_sequence = [(0, 1),  # 102
                     (0, 2),  # 201
                     (0, 2)]  # 102

    output = np.ndarray(binary.shape, dtype=binary.dtype)
    for swp1, swp2 in swap_sequence:
        N = binary.shape[0]
        print 'shape', binary.shape
        for i in range(N):
            contours = measure.find_contours(binary[i], 0.5)
            if len(contours) == 0:
                continue
            if len(contours) == 1:
                contour = contours[0]
            else:
                contour = np.vstack(contours)
            cc = np.zeros_like(contour, dtype=np.int32)
            cc[:,0] = contour[:, 1]
            cc[:,1] = contour[:, 0]
            hull = cv2.convexHull(cc)
            contour = hull.reshape((1, -1, 2)) 
            cv2.fillPoly(binary[i], contour, 1)
            #binary[i] = skimage.morphology.convex_hull_image(binary[i])
            pass
        print 'swap', swp1, swp2
        nb = np.swapaxes(binary, swp1, swp2)
        binary = np.ndarray(nb.shape, dtype=nb.dtype)
        binary[:,:] = nb[:,:]
        pass
    binary = np.swapaxes(binary, 0, 1)
    output[:,:] = binary[:,:]
    return output;
    #binary = binary_dilation(output, iterations=dilate)
    #return binary 
Example #15
Source File: prepare.py    From DeepSEED-3D-ConvNets-for-Pulmonary-Nodule-Detection with MIT License 5 votes vote down vote up
def process_mask(mask):
    convex_mask = np.copy(mask)
    for i_layer in range(convex_mask.shape[0]):
        mask1  = np.ascontiguousarray(mask[i_layer])
        if np.sum(mask1)>0:
            mask2 = convex_hull_image(mask1)
            if np.sum(mask2)>1.5*np.sum(mask1):
                mask2 = mask1
        else:
            mask2 = mask1
        convex_mask[i_layer] = mask2
    struct = generate_binary_structure(3,1)  
    dilatedMask = binary_dilation(convex_mask,structure=struct,iterations=10) 
    return dilatedMask 
Example #16
Source File: prepareLIDC.py    From DeepSEED-3D-ConvNets-for-Pulmonary-Nodule-Detection with MIT License 5 votes vote down vote up
def process_mask(mask):
    convex_mask = np.copy(mask)
    for i_layer in range(convex_mask.shape[0]):
        mask1  = np.ascontiguousarray(mask[i_layer])
        if np.sum(mask1)>0:
            mask2 = convex_hull_image(mask1)
            if np.sum(mask2)>1.5*np.sum(mask1):
                mask2 = mask1
        else:
            mask2 = mask1
        convex_mask[i_layer] = mask2
    struct = generate_binary_structure(3,1)  
    dilatedMask = binary_dilation(convex_mask,structure=struct,iterations=10) 
    return dilatedMask 
Example #17
Source File: loss.py    From weakalign with MIT License 5 votes vote down vote up
def __init__(self, geometric_model='affine', tps_grid_size=3, tps_reg_factor=0, h_matches=15, w_matches=15, use_conv_filter=False, dilation_filter=None, use_cuda=True, normalize_inlier_count=False, offset_factor=227/210):
        super(WeakInlierCount, self).__init__()
        self.normalize=normalize_inlier_count
        self.geometric_model = geometric_model
        self.geometricTnf = GeometricTnf(geometric_model=geometric_model,
                                         tps_grid_size=tps_grid_size,
                                         tps_reg_factor=tps_reg_factor,
                                         out_h=h_matches, out_w=w_matches,
                                         offset_factor = offset_factor,
                                         use_cuda=use_cuda)
        # define dilation filter
        if dilation_filter is None:
            dilation_filter = generate_binary_structure(2, 2)
        # define identity mask tensor (w,h are switched and will be permuted back later)
        mask_id = np.zeros((w_matches,h_matches,w_matches*h_matches))
        idx_list = list(range(0, mask_id.size, mask_id.shape[2]+1))
        mask_id.reshape((-1))[idx_list]=1
        mask_id = mask_id.swapaxes(0,1)
        # perform 2D dilation to each channel 
        if not use_conv_filter:
            if not (isinstance(dilation_filter,int) and dilation_filter==0):
                for i in range(mask_id.shape[2]):
                    mask_id[:,:,i] = binary_dilation(mask_id[:,:,i],structure=dilation_filter).astype(mask_id.dtype)
        else:
            for i in range(mask_id.shape[2]):
                flt=np.array([[1/16,1/8,1/16],
                                 [1/8, 1/4, 1/8],
                                 [1/16,1/8,1/16]])
                mask_id[:,:,i] = scipy.signal.convolve2d(mask_id[:,:,i], flt, mode='same', boundary='fill', fillvalue=0)
            
        # convert to PyTorch variable
        mask_id = Variable(torch.FloatTensor(mask_id).transpose(1,2).transpose(0,1).unsqueeze(0),requires_grad=False)
        self.mask_id = mask_id
        if use_cuda:
            self.mask_id = self.mask_id.cuda(); 
Example #18
Source File: hmutils.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def decouple_volumes(v1, v2, mode, se=None, iterations=1):
    """
    
    mode : {inner-from-outer, outer-from-inner, neighbors}
        inner-from-outer: this changes v1 by removing voxels
        outer-from-inner: this changes v2 by adding voxels
        neighbors: this changes v2 by removing voxels
    
    """
    assert mode in ["inner-from-outer","outer-from-inner","neighbors"]
    
    if isinstance(v1, str) and os.path.isfile(v1):
        v1 = nib.load(v1)
    assert isinstance(v1, nib.Nifti1Image) or isinstance(v1, nib.Nifti2Image)
    d1 = v1.get_data()
    if isinstance(v2, str) and os.path.isfile(v2):
        v2 = nib.load(v2)
    assert isinstance(v2, nib.Nifti1Image) or isinstance(v2, nib.Nifti2Image)
    d2 = v2.get_data()
    
    assert d1.ndim is d2.ndim
    
    
    if se is None:
        se = mrph.generate_binary_structure(d1.ndim,1)
    
    if mode == "inner-from-outer":
        # make v2/d2 the inner volume
        d1, d2 = d2, d1
        v1, v2 = v2, v1        
        d2 = d2 & mrph.binary_erosion(d1, se, iterations)
        
    if mode == "outer-from-inner":
        d2 = d2 | mrph.binary_dilation(d1, se, iterations)
        
    if mode == "neighbors":
        d2 = d2 & ~mrph.binary_dilation(d1, se, iterations)
    
    d2 = nib.Nifti1Image(d2, v2.affine, header=v2.header)
    d2.set_filename(v2.get_filename())
    return d2 
Example #19
Source File: missing_parts.py    From pytorch_connectomics with MIT License 4 votes vote down vote up
def prepare_deform_slice(self, slice_shape, random_state):
        # grow slice shape by 2 x deformation strength
        grow_by = 2 * self.deformation_strength
        #print ('sliceshape: '+str(slice_shape[0])+' growby: '+str(grow_by)+ ' strength: '+str(deformation_strength))
        shape = (slice_shape[0] + grow_by, slice_shape[1] + grow_by)
        # randomly choose fixed x or fixed y with p = 1/2
        fixed_x = random_state.rand() < 0.5
        if fixed_x:
            x0, y0 = 0, np.random.randint(1, shape[1] - 2)
            x1, y1 = shape[0] - 1, np.random.randint(1, shape[1] - 2)
        else:
            x0, y0 = np.random.randint(1, shape[0] - 2), 0
            x1, y1 = np.random.randint(1, shape[0] - 2), shape[1] - 1

        ## generate the mask of the line that should be blacked out
        #print (shape)
        line_mask = np.zeros(shape, dtype='bool')
        rr, cc = line(x0, y0, x1, y1)
        line_mask[rr, cc] = 1

        # generate vectorfield pointing towards the line to compress the image
        # first we get the unit vector representing the line
        line_vector = np.array([x1 - x0, y1 - y0], dtype='float32')
        line_vector /= np.linalg.norm(line_vector)
        # next, we generate the normal to the line
        normal_vector = np.zeros_like(line_vector)
        normal_vector[0] = - line_vector[1]
        normal_vector[1] = line_vector[0]

        # make meshgrid
        x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
        # generate the vector field
        flow_x, flow_y = np.zeros(shape), np.zeros(shape)

        # find the 2 components where coordinates are bigger / smaller than the line
        # to apply normal vector in the correct direction
        components, n_components = label(np.logical_not(line_mask).view('uint8'))
        assert n_components == 2, "%i" % n_components
        neg_val = components[0, 0] if fixed_x else components[-1, -1]
        pos_val = components[-1, -1] if fixed_x else components[0, 0]

        flow_x[components == pos_val] = self.deformation_strength * normal_vector[1]
        flow_y[components == pos_val] = self.deformation_strength * normal_vector[0]
        flow_x[components == neg_val] = - self.deformation_strength * normal_vector[1]
        flow_y[components == neg_val] = - self.deformation_strength * normal_vector[0]

        # generate the flow fields
        flow_x, flow_y = (x + flow_x).reshape(-1, 1), (y + flow_y).reshape(-1, 1)

        # dilate the line mask
        line_mask = binary_dilation(line_mask, iterations=self.iterations) #default=10
        
        return flow_x, flow_y, line_mask 
Example #20
Source File: defect_augment.py    From gunpowder with MIT License 4 votes vote down vote up
def __prepare_deform_slice(self, slice_shape):

        # grow slice shape by 2 x deformation strength
        grow_by = 2 * self.deformation_strength
        shape = (slice_shape[0] + grow_by, slice_shape[1] + grow_by)

        # randomly choose fixed x or fixed y with p = 1/2
        fixed_x = random.random() < .5
        if fixed_x:
            x0, y0 = 0, np.random.randint(1, shape[1] - 2)
            x1, y1 = shape[0] - 1, np.random.randint(1, shape[1] - 2)
        else:
            x0, y0 = np.random.randint(1, shape[0] - 2), 0
            x1, y1 = np.random.randint(1, shape[0] - 2), shape[1] - 1

        ## generate the mask of the line that should be blacked out
        line_mask = np.zeros(shape, dtype='bool')
        rr, cc = line(x0, y0, x1, y1)
        line_mask[rr, cc] = 1

        # generate vectorfield pointing towards the line to compress the image
        # first we get the unit vector representing the line
        line_vector = np.array([x1 - x0, y1 - y0], dtype='float32')
        line_vector /= np.linalg.norm(line_vector)
        # next, we generate the normal to the line
        normal_vector = np.zeros_like(line_vector)
        normal_vector[0] = - line_vector[1]
        normal_vector[1] = line_vector[0]

        # make meshgrid
        x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
        # generate the vector field
        flow_x, flow_y = np.zeros(shape), np.zeros(shape)

        # find the 2 components where coordinates are bigger / smaller than the line
        # to apply normal vector in the correct direction
        components, n_components = label(np.logical_not(line_mask).view('uint8'))
        assert n_components == 2, "%i" % n_components
        neg_val = components[0, 0] if fixed_x else components[-1, -1]
        pos_val = components[-1, -1] if fixed_x else components[0, 0]

        flow_x[components == pos_val] = self.deformation_strength * normal_vector[1]
        flow_y[components == pos_val] = self.deformation_strength * normal_vector[0]
        flow_x[components == neg_val] = - self.deformation_strength * normal_vector[1]
        flow_y[components == neg_val] = - self.deformation_strength * normal_vector[0]

        # generate the flow fields
        flow_x, flow_y = (x + flow_x).reshape(-1, 1), (y + flow_y).reshape(-1, 1)

        # dilate the line mask
        line_mask = binary_dilation(line_mask, iterations=10)

        return flow_x, flow_y, line_mask 
Example #21
Source File: compute_scores.py    From netwarp_public with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def eval_seg_parallel(datatype, gt_label_folder, result_label_folder, imgname, trimap, count):
    max_label = 34
    ignore_label = 255

    tp = np.zeros((max_label))
    fp = np.zeros((max_label))
    fn = np.zeros((max_label))

    edge_tp = np.zeros((max_label))
    edge_fp = np.zeros((max_label))
    edge_fn = np.zeros((max_label))

    print str(count) + ". Image Name: " + imgname
    gt_file = gt_label_folder + imgname + '.png'
    gt_file = os.path.join(gt_label_folder, datatype.lower(), imgname.split('_')[0], imgname + '_gtFine_labelTrainIds.png')
    result_file = result_label_folder + imgname + '.png'

    gt_labels = np.array(Image.open(gt_file))
    gt_labels = get_reindexed_image(gt_labels)
    result_labels = np.array(Image.open(result_file))

    if (np.max(result_labels) > (max_label - 1) and np.max(result_labels)!=255):
        print('Result has invalid labels: ', np.max(result_labels))
    else:
        edge_mask = find_boundaries(gt_labels, connectivity=1)
        edge_mask = binary_dilation(edge_mask, np.ones((trimap, trimap)))
        edge_gt_labels = gt_labels.copy()
        edge_result_labels = result_labels.copy()
        edge_gt_labels[np.equal(edge_mask,0)] = ignore_label
        edge_result_labels[np.equal(edge_mask,0)] = ignore_label
        # For each class
        for class_id in range(0, max_label):
            class_gt = np.equal(gt_labels, class_id)
            class_result = np.equal(result_labels, class_id)
            # import pdb; pdb.set_trace();
            class_result[np.equal(gt_labels, ignore_label)] = 0
            tp[class_id] = tp[class_id] +\
                np.count_nonzero(class_gt & class_result)
            fp[class_id] = fp[class_id] +\
                np.count_nonzero(class_result & ~class_gt)
            fn[class_id] = fn[class_id] +\
                np.count_nonzero(~class_result & class_gt)

            edge_class_gt = np.equal(edge_gt_labels, class_id)
            edge_class_result = np.equal(edge_result_labels, class_id)
            # import pdb; pdb.set_trace();
            edge_class_result[np.equal(edge_gt_labels, ignore_label)] = 0
            edge_tp[class_id] = edge_tp[class_id] +\
                np.count_nonzero(edge_class_gt & edge_class_result)
            edge_fp[class_id] = edge_fp[class_id] +\
                np.count_nonzero(edge_class_result & ~edge_class_gt)
            edge_fn[class_id] = edge_fn[class_id] +\
                np.count_nonzero(~edge_class_result & edge_class_gt)
    return [tp, fp, fn, edge_tp,  edge_fp, edge_fn] 
Example #22
Source File: adsb3_cache_ft.py    From plumo with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def extract (prob, fts, th=0.05, ext=2):
    if not fts is None:
        prob4 = np.reshape(prob, prob.shape + (1,))
        assert prob4.base is prob
        fts = np.clip(fts, 0, 6)
        fts *= prob4
    binary = prob > th
    k = int(round(ext / SPACING))
    binary = binary_dilation(binary, iterations=k)
    labels = measure.label(binary, background=0)
    boxes = measure.regionprops(labels)

    nodules = []
    dim = 2
    if not fts is None:
        dim = fts.shape[3]

    Z, Y, X = prob.shape
    for box in boxes:
        #print prob.shape, fts.shape
        z0, y0, x0, z1, y1, x1 = box.bbox
        #ft.append((z1-z0)*(y1-y0)*(x1-x0))
        prob_roi = prob[z0:z1,y0:y1,x0:x1]
        za, ya, xa, zz, zy, zx, yy, yx, xx = plumo.norm3d(prob_roi)
        zc = za + z0
        yc = ya + y0
        xc = xa + x0

        cov = np.array([[zz, zy, zx],
                        [zy, yy, yx],
                        [zx, yx, xx]], dtype=np.float32)
        eig, _ = np.linalg.eig(cov)
        #print zc, yc, xc, '------', (z0+z1)/2.0, (y0+y1)/2.0, (x0+x1)/2.0

        weight_sum = np.sum(prob_roi)
        UNIT = SPACING * SPACING * SPACING
        prob_sum = weight_sum * UNIT

        eig = sorted(list(eig), reverse=True)
        #print eig
        #print weight_sum, np.linalg.det(cov), eig
        #print za, ya, xa
        #print cov

        pos = (zc/Z, yc/Y, xc/X)

        if fts is None:
            one = [prob_sum, math.atan2(eig[0], eig[2])]
        else:
            fts_roi = fts[z0:z1,y0:y1,x0:x1,:]
            fts_sum = np.sum(fts_roi, axis=(0,1,2))
            one = list(fts_sum/weight_sum)
        nodules.append((prob_sum, pos, one))
        pass
    nodules = sorted(nodules, key=lambda x: -x[0])
    return dim, nodules 
Example #23
Source File: process.py    From plumo with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def extract_nodules (prob, fts, th=0.05, ext=2):
    if not fts is None:
        prob4 = np.reshape(prob, prob.shape + (1,))
        assert prob4.base is prob
        fts = np.clip(fts, 0, 6)
        fts *= prob4
    binary = prob > th
    k = int(round(ext / SPACING))
    binary = binary_dilation(binary, iterations=k)
    labels = measure.label(binary, background=0)
    boxes = measure.regionprops(labels)

    nodules = []
    dim = 2
    if not fts is None:
        dim += fts.shape[3]

    Z, Y, X = prob.shape
    Z = 1.0 * Z
    Y = 1.0 * Y
    X = 1.0 * X
    for box in boxes:
        #print prob.shape, fts.shape
        z0, y0, x0, z1, y1, x1 = box.bbox
        #ft.append((z1-z0)*(y1-y0)*(x1-x0))
        prob_roi = prob[z0:z1,y0:y1,x0:x1]
        za, ya, xa, zz, zy, zx, yy, yx, xx = pyadsb3.norm3d(prob_roi)
        zc = za + z0
        yc = ya + y0
        xc = xa + x0

        cov = np.array([[zz, zy, zx],
                        [zy, yy, yx],
                        [zx, yx, xx]], dtype=np.float32)
        eig, _ = np.linalg.eig(cov)
        #print zc, yc, xc, '------', (z0+z1)/2.0, (y0+y1)/2.0, (x0+x1)/2.0

        weight_sum = np.sum(prob_roi)
        UNIT = SPACING * SPACING * SPACING
        prob_sum = weight_sum * UNIT

        eig = sorted(list(eig), reverse=True)

        pos = (zc/Z, yc/Y, xc/X)
        #box = (z0/Z, y0/Y, x0/X, z1/Z, y1/Y, x1/X)

        one = [prob_sum, math.atan2(eig[0], eig[2])]
        if not fts is None:
            fts_roi = fts[z0:z1,y0:y1,x0:x1,:]
            fts_sum = np.sum(fts_roi, axis=(0,1,2))
            one.extend(list(fts_sum/weight_sum))
        nodules.append((prob_sum, pos, one, box.bbox))
        pass
    nodules = sorted(nodules, key=lambda x: -x[0])
    return dim, nodules 
Example #24
Source File: api.py    From pyAFQ with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def _wm_mask(self, row, wm_fa_thresh=0.2):
        wm_mask_file = self._get_fname(row, '_wm_mask.nii.gz')
        if self.force_recompute or not op.exists(wm_mask_file):
            dwi_img = nib.load(row['dwi_file'])
            dwi_data = dwi_img.get_fdata()

            if 'seg_file' in row.index:
                # If we found a white matter segmentation in the
                # expected location:
                seg_img = nib.load(row['seg_file'])
                seg_data_orig = seg_img.get_fdata()
                # For different sets of labels, extract all the voxels that
                # have any of these values:
                wm_mask = np.sum(np.concatenate(
                    [(seg_data_orig == l)[..., None]
                     for l in self.wm_labels], -1), -1)

                # Resample to DWI data:
                wm_mask = np.round(reg.resample(wm_mask, dwi_data[..., 0],
                                                seg_img.affine,
                                                dwi_img.affine)).astype(int)
                meta = dict(source=row['seg_file'],
                            wm_labels=self.wm_labels)
            else:
                # Otherwise, we'll identify the white matter based on FA:
                fa_fname = self._dti_fa(row)
                dti_fa = nib.load(fa_fname).get_fdata()
                wm_mask = dti_fa > wm_fa_thresh
                meta = dict(source=fa_fname,
                            fa_threshold=wm_fa_thresh)

            # Dilate to be sure to reach the gray matter:
            wm_mask = binary_dilation(wm_mask) > 0

            self.log_and_save_nii(nib.Nifti1Image(wm_mask.astype(np.float32),
                                                  row['dwi_affine']),
                                  wm_mask_file)

            meta_fname = self._get_fname(row, '_wm_mask.json')
            afd.write_json(meta_fname, meta)

        return wm_mask_file 
Example #25
Source File: prepare.py    From DeepLung with GNU General Public License v3.0 4 votes vote down vote up
def all_slice_analysis(bw, spacing, cut_num=0, vol_limit=[0.68, 8.2], area_th=6e3, dist_th=62):
    # in some cases, several top layers need to be removed first
    if cut_num > 0:
        bw0 = np.copy(bw)
        bw[-cut_num:] = False
    label = measure.label(bw, connectivity=1)
    # remove components access to corners
    mid = int(label.shape[2] / 2)
    bg_label = set([label[0, 0, 0], label[0, 0, -1], label[0, -1, 0], label[0, -1, -1], \
                    label[-1-cut_num, 0, 0], label[-1-cut_num, 0, -1], label[-1-cut_num, -1, 0], label[-1-cut_num, -1, -1], \
                    label[0, 0, mid], label[0, -1, mid], label[-1-cut_num, 0, mid], label[-1-cut_num, -1, mid]])
    for l in bg_label:
        label[label == l] = 0
        
    # select components based on volume
    properties = measure.regionprops(label)
    for prop in properties:
        if prop.area * spacing.prod() < vol_limit[0] * 1e6 or prop.area * spacing.prod() > vol_limit[1] * 1e6:
            label[label == prop.label] = 0
            
    # prepare a distance map for further analysis
    x_axis = np.linspace(-label.shape[1]/2+0.5, label.shape[1]/2-0.5, label.shape[1]) * spacing[1]
    y_axis = np.linspace(-label.shape[2]/2+0.5, label.shape[2]/2-0.5, label.shape[2]) * spacing[2]
    x, y = np.meshgrid(x_axis, y_axis)
    d = (x**2+y**2)**0.5
    vols = measure.regionprops(label)
    valid_label = set()
    # select components based on their area and distance to center axis on all slices
    for vol in vols:
        single_vol = label == vol.label
        slice_area = np.zeros(label.shape[0])
        min_distance = np.zeros(label.shape[0])
        for i in range(label.shape[0]):
            slice_area[i] = np.sum(single_vol[i]) * np.prod(spacing[1:3])
            min_distance[i] = np.min(single_vol[i] * d + (1 - single_vol[i]) * np.max(d))
        
        if np.average([min_distance[i] for i in range(label.shape[0]) if slice_area[i] > area_th]) < dist_th:
            valid_label.add(vol.label)
            
    bw = np.in1d(label, list(valid_label)).reshape(label.shape)
    
    # fill back the parts removed earlier
    if cut_num > 0:
        # bw1 is bw with removed slices, bw2 is a dilated version of bw, part of their intersection is returned as final mask
        bw1 = np.copy(bw)
        bw1[-cut_num:] = bw0[-cut_num:]
        bw2 = np.copy(bw)
        bw2 = scipy.ndimage.binary_dilation(bw2, iterations=cut_num)
        bw3 = bw1 & bw2
        label = measure.label(bw, connectivity=1)
        label3 = measure.label(bw3, connectivity=1)
        l_list = list(set(np.unique(label)) - {0})
        valid_l3 = set()
        for l in l_list:
            indices = np.nonzero(label==l)
            l3 = label3[indices[0][0], indices[1][0], indices[2][0]]
            if l3 > 0:
                valid_l3.add(l3)
        bw = np.in1d(label3, list(valid_l3)).reshape(label3.shape)
    
    return bw, len(valid_label)