Python skimage.measure.regionprops() Examples
The following are 30
code examples of skimage.measure.regionprops().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
skimage.measure
, or try the search function
.
Example #1
Source File: cloudstatistics.py From typhon with MIT License | 10 votes |
def filter_cloudmask(cloudmask, threshold=1, connectivity=1): """Filter a given cloudmask for small cloud objects defined by their pixel number. Parameters: cloudmask (ndarray): 2d binary cloud mask (optional with NaNs). threshold (int): minimum pixel number of objects remaining in cloudmask. connectivity (int): Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor (see :func:`skimage.measure.label`). Return: ndarray: filtered cloudmask without NaNs. """ cloudmask[np.isnan(cloudmask)] = 0 labels = measure.label(cloudmask, connectivity=connectivity) props = measure.regionprops(labels) area = [prop.area for prop in props] # Find objects < threshold pixle number, get their labels, set them to 0-clear. smallclouds = [t[0] for t in filter(lambda a: a[1] < threshold, enumerate(area, 1))] for label in smallclouds: cloudmask[labels == label] = 0 return cloudmask
Example #2
Source File: util.py From bnn with MIT License | 7 votes |
def centroids_of_connected_components(bitmap, threshold=0.05, rescale=1.0): # TODO: don't do raw (binary) threshold; instead use P(y) as weighting for centroid # e.g. https://arxiv.org/abs/1806.03413 sec 3.D # update: this didn't help much :/ centroid weighted by intensities moved only up # to a single pixel (guess centroids are already quite evenly dispersed) # see https://gist.github.com/matpalm/20a3974ceb7f632f935285262fac4e98 # TODO: hunt down the x/y swap between PIL and label db :/ # threshold mask = bitmap > threshold bitmap = np.zeros_like(bitmap) bitmap[mask] = 1.0 # calc connected components all_labels = measure.label(bitmap) # return centroids centroids = [] for region in measure.regionprops(label_image=all_labels): cx, cy = map(lambda p: int(p*rescale), (region.centroid[0], region.centroid[1])) centroids.append((cx, cy)) return centroids
Example #3
Source File: mesh.py From plumo with BSD 3-Clause "New" or "Revised" License | 7 votes |
def segment_body (image, smooth=1, th=-300): blur = scipy.ndimage.filters.gaussian_filter(image, smooth, mode='constant') binary = np.array(blur < th, dtype=np.uint8) # body is a rough region covering human body body = np.zeros_like(binary) for i, sl in enumerate(binary): #H, W = sl.shape ll = measure.label(sl, background=1) # connected components # biggest CC should be body pp = measure.regionprops(ll) boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air boxes = sorted(boxes, key = lambda x: -x[0]) if len(boxes) == 0: continue y0, x0, y1, x1 = boxes[0][1] body[i,y0:y1,x0:x1] = boxes[0][2] pass return body, None
Example #4
Source File: draw_ellipse.py From DeepVOG with GNU General Public License v3.0 | 6 votes |
def isolate_islands(prediction, threshold): bw = closing(prediction > threshold , square(3)) labelled = label(bw) regions_properties = regionprops(labelled) max_region_area = 0 select_region = 0 for region in regions_properties: if region.area > max_region_area: max_region_area = region.area select_region = region output = np.zeros(labelled.shape) if select_region == 0: return output else: output[labelled == select_region.label] = 1 return output # input: output from bwperim -- 2D image with perimeter of the ellipse = 1
Example #5
Source File: geometric.py From vidaug with MIT License | 6 votes |
def _apply_segmentation(self, image, replace_samples, segments): nb_channels = image.shape[2] image_sp = np.copy(image) for c in range(nb_channels): # segments+1 here because otherwise regionprops always misses # the last label regions = measure.regionprops(segments + 1, intensity_image=image[..., c]) for ridx, region in enumerate(regions): # with mod here, because slic can sometimes create more # superpixel than requested. replace_samples then does # not have enough values, so we just start over with the # first one again. if replace_samples[ridx % len(replace_samples)] == 1: mean_intensity = region.mean_intensity image_sp_c = image_sp[..., c] image_sp_c[segments == ridx] = mean_intensity return image_sp
Example #6
Source File: process_seg.py From spinalcordtoolbox with MIT License | 6 votes |
def _find_AP_and_RL_diameter(major_axis, minor_axis, orientation, dim): """ This script checks the orientation of the and assigns the major/minor axis to the appropriate dimension, right- left (RL) or antero-posterior (AP). It also multiplies by the pixel size in mm. :param major_axis: major ellipse axis length calculated by regionprops :param minor_axis: minor ellipse axis length calculated by regionprops :param orientation: orientation in degree. Ranges between [0, 90] :param dim: pixel size in mm. :return: diameter_AP, diameter_RL """ if 0 <= orientation < 45.0: diameter_AP = minor_axis diameter_RL = major_axis else: diameter_AP = major_axis diameter_RL = minor_axis # Adjust with pixel size diameter_AP *= dim[0] diameter_RL *= dim[1] return diameter_AP, diameter_RL
Example #7
Source File: segmented_frame.py From pylot with Apache License 2.0 | 6 votes |
def get_traffic_sign_bounding_boxes(self, min_width=2, min_height=3): """Extracts traffic sign bounding boxes from the frame. Returns: list(:py:class:`~pylot.perception.detection.utils.BoundingBox2D`): Traffic sign bounding boxes. """ assert self.encoding == 'carla', \ 'Not implemented on cityscapes encoding' # Set the pixels we are interested in to True. traffic_signs_frame = self._get_traffic_sign_pixels() # Extracts bounding box from frame. bboxes = [] # Labels the connected segmented pixels. map_labeled = measure.label(traffic_signs_frame, connectivity=1) # Extract the regions out of the labeled frames. for region in measure.regionprops(map_labeled): x_min = region.bbox[1] x_max = region.bbox[3] y_min = region.bbox[0] y_max = region.bbox[2] # Filter the bboxes that are extremely small. if x_max - x_min > min_width and y_max - y_min > min_height: bboxes.append(BoundingBox2D(x_min, x_max, y_min, y_max)) return bboxes
Example #8
Source File: analysis.py From gempy with GNU Lesser General Public License v3.0 | 6 votes |
def get_geobody_tops(rprops, geo_data=None): """Get the top vertical limit coordinate of geobodies (via bbox). Args: rprops (list): List of regionprops object for each unique region of the model. (skimage.measure._regionprops._RegionProperties object) geo_data (gempy.data_management.InputData): Returns: (dict): Dict with node labels as keys and geobody top coordinates as values. """ if geo_data is None: return {rprop.label: rprop.bbox[5] for rprop in rprops} else: return {rprop.label: rprop.bbox[5] * geo_data.extent[5] / geo_data.resolution[2] for rprop in rprops}
Example #9
Source File: image_utils.py From acdc_segmenter with Apache License 2.0 | 6 votes |
def keep_largest_connected_components(mask): ''' Keeps only the largest connected components of each label for a segmentation mask. ''' out_img = np.zeros(mask.shape, dtype=np.uint8) for struc_id in [1, 2, 3]: binary_img = mask == struc_id blobs = measure.label(binary_img, connectivity=1) props = measure.regionprops(blobs) if not props: continue area = [ele.area for ele in props] largest_blob_ind = np.argmax(area) largest_blob_label = props[largest_blob_ind].label out_img[blobs == largest_blob_label] = struc_id return out_img
Example #10
Source File: mesh.py From plumo with BSD 3-Clause "New" or "Revised" License | 6 votes |
def segment_body (image, smooth=1, th=-300): blur = scipy.ndimage.filters.gaussian_filter(image, smooth, mode='constant') binary = np.array(blur < th, dtype=np.uint8) # body is a rough region covering human body body = np.zeros_like(binary) for i, sl in enumerate(binary): #H, W = sl.shape ll = measure.label(sl, background=1) # connected components # biggest CC should be body pp = measure.regionprops(ll) boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air boxes = sorted(boxes, key = lambda x: -x[0]) if len(boxes) == 0: continue y0, x0, y1, x1 = boxes[0][1] body[i,y0:y1,x0:x1] = boxes[0][2] pass return body, None
Example #11
Source File: ct.py From pylinac with MIT License | 6 votes |
def _setup_geometry_rois(self): boxsize = self.geometry_roi_size_mm / self.mm_per_pixel xbounds = (int(self.phan_center.x-boxsize), int(self.phan_center.x+boxsize)) ybounds = (int(self.phan_center.y-boxsize), int(self.phan_center.y+boxsize)) geo_img = self.image[ybounds[0]:ybounds[1], xbounds[0]:xbounds[1]] larr, regionprops, num_roi = get_regions(geo_img, fill_holes=True, clear_borders=False) # check that there is at least 1 ROI if num_roi < 4: raise ValueError("Unable to locate the Geometric nodes") elif num_roi > 4: regionprops = sorted(regionprops, key=lambda x: x.filled_area, reverse=True)[:4] sorted_regions = sorted(regionprops, key=lambda x: (2*x.centroid[0]+x.centroid[1])) centers = [Point(r.weighted_centroid[1]+xbounds[0], r.weighted_centroid[0]+ybounds[0]) for r in sorted_regions] # setup the geometric lines for name, order in self.geometry_roi_settings.items(): self.lines[name] = GeometricLine(centers[order[0]], centers[order[1]], self.mm_per_pixel, self.scaling_tolerance)
Example #12
Source File: dsbowl_preprocess_2d.py From Kaggle-DSB with MIT License | 6 votes |
def generate_markers(image): #Creation of the internal Marker marker_internal = image < -400 marker_internal = segmentation.clear_border(marker_internal) marker_internal_labels = measure.label(marker_internal) areas = [r.area for r in measure.regionprops(marker_internal_labels)] areas.sort() if len(areas) > 2: for region in measure.regionprops(marker_internal_labels): if region.area < areas[-2]: for coordinates in region.coords: marker_internal_labels[coordinates[0], coordinates[1]] = 0 marker_internal = marker_internal_labels > 0 #Creation of the external Marker external_a = ndimage.binary_dilation(marker_internal, iterations=10) external_b = ndimage.binary_dilation(marker_internal, iterations=55) marker_external = external_b ^ external_a #Creation of the Watershed Marker matrix marker_watershed = np.zeros(image.shape, dtype=np.int) marker_watershed += marker_internal * 255 marker_watershed += marker_external * 128 return marker_internal, marker_external, marker_watershed
Example #13
Source File: LUNA_3d_merge_preproc.py From Kaggle-DSB with MIT License | 6 votes |
def generate_markers(image): #Creation of the internal Marker marker_internal = image < -400 marker_internal = segmentation.clear_border(marker_internal) marker_internal_labels = measure.label(marker_internal) areas = [r.area for r in measure.regionprops(marker_internal_labels)] areas.sort() if len(areas) > 2: for region in measure.regionprops(marker_internal_labels): if region.area < areas[-2]: for coordinates in region.coords: marker_internal_labels[coordinates[0], coordinates[1]] = 0 marker_internal = marker_internal_labels > 0 #Creation of the external Marker external_a = ndimage.binary_dilation(marker_internal, iterations=10) external_b = ndimage.binary_dilation(marker_internal, iterations=55) marker_external = external_b ^ external_a #Creation of the Watershed Marker matrix marker_watershed = np.zeros(image.shape, dtype=np.int) marker_watershed += marker_internal * 255 marker_watershed += marker_external * 128 return marker_internal, marker_external, marker_watershed
Example #14
Source File: submit.py From dsb2018_topcoders with MIT License | 6 votes |
def postprocess_victor(pred): av_pred = pred / 255. av_pred = av_pred[..., 2] * (1 - av_pred[..., 1]) av_pred = 1 * (av_pred > 0.5) av_pred = av_pred.astype(np.uint8) y_pred = measure.label(av_pred, neighbors=8, background=0) props = measure.regionprops(y_pred) for i in range(len(props)): if props[i].area < 12: y_pred[y_pred == i + 1] = 0 y_pred = measure.label(y_pred, neighbors=8, background=0) nucl_msk = (255 - pred[..., 2]) nucl_msk = nucl_msk.astype('uint8') y_pred = watershed(nucl_msk, y_pred, mask=((pred[..., 2] > 80)), watershed_line=True) return y_pred # test_dir = r'C:\dev\dsbowl\results_test\bowl_remap3\merged' # borders_dir = r'C:\dev\dsbowl\results_test\bowl_remap_border2\merged'
Example #15
Source File: preproc_utils.py From Kaggle-DSB with MIT License | 6 votes |
def generate_markers(image): #Creation of the internal Marker marker_internal = image < -400 marker_internal = segmentation.clear_border(marker_internal) marker_internal_labels = measure.label(marker_internal) areas = [r.area for r in measure.regionprops(marker_internal_labels)] areas.sort() if len(areas) > 2: for region in measure.regionprops(marker_internal_labels): if region.area < areas[-2]: for coordinates in region.coords: marker_internal_labels[coordinates[0], coordinates[1]] = 0 marker_internal = marker_internal_labels > 0 #Creation of the external Marker external_a = ndimage.binary_dilation(marker_internal, iterations=10) external_b = ndimage.binary_dilation(marker_internal, iterations=55) marker_external = external_b ^ external_a #Creation of the Watershed Marker matrix marker_watershed = np.zeros(image.shape, dtype=np.int) marker_watershed += marker_internal * 255 marker_watershed += marker_external * 128 return marker_internal, marker_external, marker_watershed
Example #16
Source File: generate_polygons.py From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 | 6 votes |
def label_mask(pred, main_threshold=0.3, seed_threshold=0.7, w_pixel_t=20, pixel_t=100): av_pred = pred / 255. av_pred = av_pred[..., 0] * (1 - av_pred[..., 2]) av_pred = 1 * (av_pred > seed_threshold) av_pred = av_pred.astype(np.uint8) y_pred = measure.label(av_pred, neighbors=8, background=0) props = measure.regionprops(y_pred) for i in range(len(props)): if props[i].area < w_pixel_t: y_pred[y_pred == i + 1] = 0 y_pred = measure.label(y_pred, neighbors=8, background=0) nucl_msk = (255 - pred[..., 0]) nucl_msk = nucl_msk.astype('uint8') y_pred = watershed(nucl_msk, y_pred, mask=(pred[..., 0] > main_threshold * 255), watershed_line=True) props = measure.regionprops(y_pred) for i in range(len(props)): if props[i].area < pixel_t: y_pred[y_pred == i + 1] = 0 y_pred = measure.label(y_pred, neighbors=8, background=0) return y_pred
Example #17
Source File: utils.py From PartiallyReversibleUnet with BSD 3-Clause "New" or "Revised" License | 6 votes |
def keep_largest_connected_components(mask): ''' Keeps only the largest connected components of each label for a segmentation mask. ''' out_img = np.zeros(mask.shape, dtype=np.uint8) for struc_id in [1, 2, 3]: binary_img = mask == struc_id blobs = measure.label(binary_img, connectivity=1) props = measure.regionprops(blobs) if not props: continue area = [ele.area for ele in props] largest_blob_ind = np.argmax(area) largest_blob_label = props[largest_blob_ind].label out_img[blobs == largest_blob_label] = struc_id return out_img
Example #18
Source File: data_loader.py From 3D-RU-Net with GNU General Public License v3.0 | 6 votes |
def MaxBodyBox(input): Otsu=filters.threshold_otsu(input[input.shape[0]//2]) Seg=np.zeros(input.shape) Seg[input>=Otsu]=255 Seg=Seg.astype(np.int) ConnectMap=label(Seg, connectivity= 2) Props = regionprops(ConnectMap) Area=np.zeros([len(Props)]) Area=[] Bbox=[] for j in range(len(Props)): Area.append(Props[j]['area']) Bbox.append(Props[j]['bbox']) Area=np.array(Area) Bbox=np.array(Bbox) argsort=np.argsort(Area) Area=Area[argsort] Bbox=Bbox[argsort] Area=Area[::-1] Bbox=Bbox[::-1,:] MaximumBbox=Bbox[0] return Otsu,MaximumBbox
Example #19
Source File: preprocess.py From kaggle-aptos2019-blindness-detection with MIT License | 6 votes |
def scale_radius(src, img_size, padding=False): x = src[src.shape[0] // 2, ...].sum(axis=1) r = (x > x.mean() / 10).sum() // 2 yx = src.sum(axis=2) region_props = measure.regionprops((yx > yx.mean() / 10).astype('uint8')) yc, xc = np.round(region_props[0].centroid).astype('int') x1 = max(xc - r, 0) x2 = min(xc + r, src.shape[1] - 1) y1 = max(yc - r, 0) y2 = min(yc + r, src.shape[0] - 1) dst = src[y1:y2, x1:x2] dst = cv2.resize(dst, dsize=None, fx=img_size/(2*r), fy=img_size/(2*r)) if padding: pad_x = (img_size - dst.shape[1]) // 2 pad_y = (img_size - dst.shape[0]) // 2 dst = np.pad(dst, ((pad_y, pad_y), (pad_x, pad_x), (0, 0)), 'constant') return dst
Example #20
Source File: crop_transform.py From 3D-CNNs-for-Liver-Classification with Apache License 2.0 | 5 votes |
def save_max_objects(img): labels = measure.label(img) # 返回打上标签的img数组 jj = measure.regionprops(labels) # 找出连通域的各种属性。 注意,这里jj找出的连通域不包括背景连通域 # is_del = False # print(len(jj)) if len(jj) == 1: out = img # is_del = False else: # 通过与质心之间的距离进行判断 num = labels.max() #连通域的个数 del_array = np.array([0] * (num + 1))#生成一个与连通域个数相同的空数组来记录需要删除的区域(从0开始,所以个数要加1) for k in range(num):#TODO:这里如果遇到全黑的图像的话会报错 if k == 0: initial_area = jj[0].area save_index = 1 # 初始保留第一个连通域 else: k_area = jj[k].area # 将元组转换成array if initial_area < k_area: initial_area = k_area save_index = k + 1 del_array[save_index] = 1 del_mask = del_array[labels] out = img * del_mask # is_del = True return out
Example #21
Source File: ct.py From pylinac with MIT License | 5 votes |
def phan_center(self): """Determine the location of the center of the phantom. The image is analyzed to see if: 1) the CatPhan is even in the image (if there were any ROIs detected) 2) an ROI is within the size criteria of the catphan 3) the ROI area that is filled compared to the bounding box area is close to that of a circle Raises ------ ValueError If any of the above conditions are not met. """ # convert the slice to binary and label ROIs edges = filters.scharr(self.image.as_type(np.float)) if np.max(edges) < 0.1: raise ValueError("Unable to locate Catphan") larr, regionprops, num_roi = get_regions(self, fill_holes=True, threshold='mean') # check that there is at least 1 ROI if num_roi < 1 or num_roi is None: raise ValueError("Unable to locate the CatPhan") catphan_region = sorted(regionprops, key=lambda x: np.abs(x.filled_area - self.catphan_size))[0] if (self.catphan_size * 1.2 < catphan_region.filled_area) or (catphan_region.filled_area < self.catphan_size / 1.2): raise ValueError("Unable to locate Catphan") center_pixel = catphan_region.centroid return Point(center_pixel[1], center_pixel[0])
Example #22
Source File: planar_imaging.py From pylinac with MIT License | 5 votes |
def _get_canny_regions(self, sigma=2, percentiles=(0.001, 0.01)): """Compute the canny edges of the image and return the connected regions found.""" # copy, filter, and ground the image img_copy = copy.copy(self.image) img_copy.filter(kind='gaussian', size=sigma) img_copy.ground() # compute the canny edges with very low thresholds (detects nearly everything) lo_th, hi_th = np.percentile(img_copy, percentiles) c = feature.canny(img_copy, low_threshold=lo_th, high_threshold=hi_th) # label the canny edge regions labeled = measure.label(c) regions = measure.regionprops(labeled, intensity_image=img_copy) return regions
Example #23
Source File: ct.py From pylinac with MIT License | 5 votes |
def get_regions(slice_or_arr, fill_holes=False, clear_borders=True, threshold='otsu'): """Get the skimage regions of a black & white image.""" if threshold == 'otsu': thresmeth = filters.threshold_otsu elif threshold == 'mean': thresmeth = np.mean if isinstance(slice_or_arr, Slice): edges = filters.scharr(slice_or_arr.image.array.astype(np.float)) center = slice_or_arr.image.center elif isinstance(slice_or_arr, np.ndarray): edges = filters.scharr(slice_or_arr.astype(np.float)) center = (int(edges.shape[1]/2), int(edges.shape[0]/2)) edges = filters.gaussian(edges, sigma=1) if isinstance(slice_or_arr, Slice): box_size = 100/slice_or_arr.mm_per_pixel thres_img = edges[int(center.y-box_size):int(center.y+box_size), int(center.x-box_size):int(center.x+box_size)] thres = thresmeth(thres_img) else: thres = thresmeth(edges) bw = edges > thres if clear_borders: segmentation.clear_border(bw, buffer_size=int(max(bw.shape)/50), in_place=True) if fill_holes: bw = ndimage.binary_fill_holes(bw) labeled_arr, num_roi = measure.label(bw, return_num=True) regionprops = measure.regionprops(labeled_arr, edges) return labeled_arr, regionprops, num_roi
Example #24
Source File: Utils.py From BEAL with MIT License | 5 votes |
def get_largest_fillhole(binary): label_image = label(binary) regions = regionprops(label_image) area_list = [] for region in regions: area_list.append(region.area) if area_list: idx_max = np.argmax(area_list) binary[label_image != idx_max + 1] = 0 return scipy.ndimage.binary_fill_holes(np.asarray(binary).astype(int))
Example #25
Source File: demo.py From Recursive-Cascaded-Networks with MIT License | 5 votes |
def auto_liver_mask(vol, ths = [(80, 140), (110, 160), (70, 90), (60, 80), (50, 70), (40, 60), (30, 50), (20, 40), (10, 30), (140, 180), (160, 200)]): vol = filters.gaussian(vol, sigma = 2, preserve_range = True) mask = np.zeros_like(vol, dtype = np.bool) max_area = 0 for th_lo, th_hi in ths: print(th_lo, th_hi) bw = np.ones_like(vol, dtype = np.bool) bw[vol < th_lo] = 0 bw[vol > th_hi] = 0 if np.sum(bw) <= max_area: continue with concurrent.futures.ProcessPoolExecutor(8) as executor: jobs = list(range(bw.shape[-1])) args1 = [bw[:, :, z] for z in jobs] args2 = [morphology.disk(35) for z in jobs] for idx, ret in tqdm.tqdm(zip(jobs, executor.map(filters.median, args1, args2)), total = len(jobs)): bw[:, :, jobs[idx]] = ret # for z in range(bw.shape[-1]): # bw[:, :, z] = filters.median(bw[:, :, z], morphology.disk(35)) if np.sum(bw) <= max_area: continue labeled_seg = measure.label(bw, connectivity=1) regions = measure.regionprops(labeled_seg) max_region = max(regions, key = lambda x: x.area) if max_region.area <= max_area: continue max_area = max_region.area mask = labeled_seg == max_region.label assert max_area > 0, 'Failed to find the liver area!' return mask
Example #26
Source File: analysis.py From gempy with GNU Lesser General Public License v3.0 | 5 votes |
def get_geobody_volume(rprops, geo_data=None): """Compute voxel counts per unique integer body in given block model Args: rprops (list): List of regionprops object for each unique region of the model. (skimage.measure._regionprops._RegionProperties object) Returns: (dict): Dict with node labels as keys and geobody volume values. """ if geo_data is None: return {rprop.label:rprop.area for rprop in rprops} else: return {rprop.label:rprop.area * np.product(geo_data.extent[1::2] / geo_data.resolution) for rprop in rprops}
Example #27
Source File: classify_nodes.py From PyTorch-Luna16 with Apache License 2.0 | 5 votes |
def getRegionFromMap(slice_npy): thr = np.where(slice_npy > np.mean(slice_npy), 0., 1.0) label_image = label(thr) labels = label_image.astype(int) regions = regionprops(labels) return regions
Example #28
Source File: prepare.py From DeepLung with GNU General Public License v3.0 | 5 votes |
def binarize_per_slice(image, spacing, intensity_th=-600, sigma=1, area_th=30, eccen_th=0.99, bg_patch_size=10): bw = np.zeros(image.shape, dtype=bool) # prepare a mask, with all corner values set to nan image_size = image.shape[1] grid_axis = np.linspace(-image_size/2+0.5, image_size/2-0.5, image_size) x, y = np.meshgrid(grid_axis, grid_axis) d = (x**2+y**2)**0.5 nan_mask = (d<image_size/2).astype(float) nan_mask[nan_mask == 0] = np.nan for i in range(image.shape[0]): # Check if corner pixels are identical, if so the slice before Gaussian filtering if len(np.unique(image[i, 0:bg_patch_size, 0:bg_patch_size])) == 1: current_bw = scipy.ndimage.filters.gaussian_filter(np.multiply(image[i].astype('float32'), nan_mask), sigma, truncate=2.0) < intensity_th else: current_bw = scipy.ndimage.filters.gaussian_filter(image[i].astype('float32'), sigma, truncate=2.0) < intensity_th # select proper components label = measure.label(current_bw) properties = measure.regionprops(label) valid_label = set() for prop in properties: if prop.area * spacing[1] * spacing[2] > area_th and prop.eccentricity < eccen_th: valid_label.add(prop.label) current_bw = np.in1d(label, list(valid_label)).reshape(label.shape) bw[i] = current_bw return bw
Example #29
Source File: step1.py From DeepSEED-3D-ConvNets-for-Pulmonary-Nodule-Detection with MIT License | 5 votes |
def binarize_per_slice(image, spacing, intensity_th=-600, sigma=1, area_th=30, eccen_th=0.99, bg_patch_size=10): bw = np.zeros(image.shape, dtype=bool) # prepare a mask, with all corner values set to nan image_size = image.shape[1] grid_axis = np.linspace(-image_size/2+0.5, image_size/2-0.5, image_size) x, y = np.meshgrid(grid_axis, grid_axis) d = (x**2+y**2)**0.5 nan_mask = (d<image_size/2).astype(float) nan_mask[nan_mask == 0] = np.nan for i in range(image.shape[0]): # Check if corner pixels are identical, if so the slice before Gaussian filtering if len(np.unique(image[i, 0:bg_patch_size, 0:bg_patch_size])) == 1: current_bw = scipy.ndimage.filters.gaussian_filter(np.multiply(image[i].astype('float32'), nan_mask), sigma, truncate=2.0) < intensity_th else: current_bw = scipy.ndimage.filters.gaussian_filter(image[i].astype('float32'), sigma, truncate=2.0) < intensity_th # select proper components label = measure.label(current_bw) properties = measure.regionprops(label) valid_label = set() for prop in properties: if prop.area * spacing[1] * spacing[2] > area_th and prop.eccentricity < eccen_th: valid_label.add(prop.label) current_bw = np.in1d(label, list(valid_label)).reshape(label.shape) bw[i] = current_bw return bw
Example #30
Source File: binary_mask.py From starfish with MIT License | 5 votes |
def from_label_image(cls, label_image: LabelImage) -> "BinaryMaskCollection": """Creates binary masks from a label image. Masks are cropped to the smallest size that contains the non-zero values, but pixel and physical coordinates ticks are retained. Masks extracted from BinaryMaskCollections will be cropped. To extract masks sized to the original label image, use :py:meth:`starfish.BinaryMaskCollection.uncropped_mask`. Parameters ---------- label_image : LabelImage LabelImage to extract binary masks from. Returns ------- masks : BinaryMaskCollection Masks generated from the label image. """ props = regionprops(label_image.xarray.data) pixel_ticks = { axis.value: label_image.xarray.coords[axis.value] for axis, _ in zip(*_get_axes_names(label_image.xarray.ndim)) if axis.value in label_image.xarray.coords } physical_ticks = { coord.value: label_image.xarray.coords[coord.value] for _, coord in zip(*_get_axes_names(label_image.xarray.ndim)) if coord.value in label_image.xarray.coords } masks: Sequence[MaskData] = [ MaskData(prop.image, prop.bbox[:label_image.xarray.ndim], prop) for prop in props ] log = deepcopy(label_image.log) return cls( pixel_ticks, physical_ticks, masks, log, )