Python tables.File() Examples

The following are 30 code examples of tables.File(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tables , or try the search function .
Example #1
Source File: getFilteredSkels.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def _addMissingFields(skeletons_file):
    '''
    Add missing fields that might have not been calculated before in older videos.
    '''
    with tables.File(skeletons_file, 'r+') as fid:
        if not '/contour_area' in fid:
            contour_side1 = fid.get_node('/contour_side1')[:]
            contour_side2 = fid.get_node('/contour_side2')[:]
            cnt_area = _h_calAreaArray(contour_side1, contour_side2)
            fid.create_carray('/', "contour_area", obj=cnt_area, filters=TABLE_FILTERS)

        if not '/width_midbody' in fid:
            midbody_ind = (17, 32)
            contour_width = fid.get_node('/contour_width')[:]
            width_midbody = np.median(contour_width[:, midbody_ind[0]:midbody_ind[1]+1], axis=1)
            fid.create_carray('/', "width_midbody", obj=width_midbody, filters=TABLE_FILTERS) 
Example #2
Source File: readVideoHDF5.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def __init__(self, fileName, full_img_period=np.inf):
        # to be used when added to the plugin
        self.vid_frame_pos = []
        self.vid_time_pos = []

        try:
            self.fid = tables.File(fileName, 'r')
            self.dataset = self.fid.get_node('/mask')
        except:
            raise OSError

        self.tot_frames = self.dataset.shape[0]

        self.width = self.dataset.shape[2]
        self.height = self.dataset.shape[1]
        self.dtype = self.dataset.dtype
        
        self.tot_pix = self.height * self.width

        # initialize pointer for frames
        self.curr_frame = -1

        # how often we get a full frame
        self.full_img_period = full_img_period 
Example #3
Source File: findStageMovement.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def getFrameDiffVar(masked_file, progress_refresh_rate_s=100):
    base_name = get_base_name(masked_file)
    progress_prefix = '{} Calculating variance of the difference between frames.'.format(base_name)
    
    with tables.File(masked_file, 'r') as fid:
        masks = fid.get_node('/mask')

        tot, w, h = masks.shape
        progress_time = TimeCounter(progress_prefix, tot)
        fps = read_fps(masked_file, dflt=25)
        progress_refresh_rate = int(round(fps*progress_refresh_rate_s))

        img_var_diff = np.zeros(tot-1)
        frame_prev = masks[0]
        for ii in range(1, tot):
            frame_current = masks[ii]
            img_var_diff[ii-1] = get_mask_diff_var(frame_current, frame_prev)
            frame_prev = frame_current;

            if ii % progress_refresh_rate == 0:
                print_flush(progress_time.get_str(ii))

        if tot>1:
            print_flush(progress_time.get_str(ii))
    return img_var_diff 
Example #4
Source File: MWTrackerViewer.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def updateSkelFile(self, skeletons_file):

        super().updateSkelFile(skeletons_file)
        if not self.skeletons_file or self.trajectories_data is None:
            self.food_coordinates = None
            return

        with tables.File(self.skeletons_file, 'r') as fid:
            if not '/food_cnt_coord' in fid:
                self.food_coordinates = None
                self.ui.checkBox_showFood.setEnabled(False)
            else:
                #change from microns to pixels
                self.food_coordinates = fid.get_node('/food_cnt_coord')[:]
                self.food_coordinates /= self.microns_per_pixel
                
                self.ui.checkBox_showFood.setEnabled(True) 
Example #5
Source File: check_default_attrs.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def change_attrs(fname, field_name):
    print(os.path.basename(fname))
    read_unit_conversions(fname)
    with tables.File(fname, 'r+') as fid:
        group_to_save = fid.get_node(field_name)
        set_unit_conversions(group_to_save, 
                             expected_fps=expected_fps, 
                             microns_per_pixel=microns_per_pixel)
        
    read_unit_conversions(fname)


#for fname in masked_files:
#    change_attrs(fname, '/mask')
#for fname in skeletons_files:
#    change_attrs(fname, '/trajectories_data') 
Example #6
Source File: helperIterROI.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def getROIfromInd(masked_file, trajectories_data, frame_number, worm_index, roi_size=-1):
    good = (trajectories_data['frame_number']==frame_number) & (trajectories_data['worm_index_joined']==worm_index)
    row = trajectories_data[good]
    if len(row) < 1:
        return None
    
    assert len(row) == 1
    
    row = row.iloc[0]
    
    with tables.File(masked_file, 'r') as fid:
        img_data = fid.get_node('/mask')
        img = img_data[frame_number]

    if roi_size <= 0:
        roi_size = row['roi_size']
    worm_roi, roi_corner = getWormROI(img, row['coord_x'], row['coord_y'], roi_size)
    return row, worm_roi, roi_corner 
Example #7
Source File: posture_features.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def load_eigen_worms():
    """
    Load the eigen_worms, which are stored in a Matlab data file

    The eigenworms were computed by the Schafer lab based on N2 worms

    Returns
    ----------
    eigen_worms: [7 x 48]

    From http://stackoverflow.com/questions/50499/

    """

    eigen_worm_file_path = os.path.join(
    os.path.dirname(os.path.realpath(__file__)),
    config.EIGENWORM_FILE)

    with tables.File(eigen_worm_file_path, 'r'):
        eigen_worms = h.get_node('/eigenWorms')[:]
    
    return np.transpose(eigen_worms) 
Example #8
Source File: getAdditionalData.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def storeAdditionalDataSW(video_file, masked_image_file):
    assert(os.path.exists(video_file))
    assert(os.path.exists(masked_image_file))

    info_file, stage_file = getAdditionalFiles(video_file)

    assert(os.path.exists(video_file))
    assert(os.path.exists(stage_file))

    # store data
    storeXMLInfo(info_file, masked_image_file)
    storeStageData(stage_file, masked_image_file)

    with tables.File(masked_image_file, 'r+') as mask_fid:
        mask_fid.get_node('/mask').attrs['has_finished'] = 2


# DEPRECATED 
Example #9
Source File: file_processing.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def save_modified_table(file_name, modified_table, table_name):
    tab_recarray = modified_table.to_records(index=False)
    with tables.File(file_name, "r+") as fid:
        dum_name = table_name + '_d'
        if '/' + dum_name in fid:
            fid.remove_node('/', dum_name)

        newT = fid.create_table(
            '/',
            dum_name,
            obj=tab_recarray,
            filters=TABLE_FILTERS)

        oldT = fid.get_node('/' + table_name)
        old_args = [x for x in dir(oldT._v_attrs) if not x.startswith('_')]
        for key in old_args:
            
            if not key in newT._v_attrs and not key.startswith('FIELD'):
                newT.attrs[key] = oldT.attrs[key]
                
        fid.remove_node('/', table_name)
        newT.rename(table_name) 
Example #10
Source File: getFilteredSkels.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def _h_nodes2Array(skeletons_file, nodes4fit, valid_index=-1):
    '''
    Read the groups in skeletons file and save them as a matrix.
    Used by _h_readFeat2Check
    '''
    with tables.File(skeletons_file, 'r') as fid:
        assert all(node in fid for node in nodes4fit)

        if isinstance(valid_index, (float, int)) and valid_index < 0:
            valid_index = np.arange(fid.get_node(nodes4fit[0]).shape[0])

        n_samples = len(valid_index)
        n_features = len(nodes4fit)

        X = np.zeros((n_samples, n_features))

        if valid_index.size > 0:
            for ii, node in enumerate(nodes4fit):
                X[:, ii] = fid.get_node(node)[valid_index]

        return X 
Example #11
Source File: read_attrs.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def fps_from_timestamp(file_name):
    #try to calculate the frames per second from the timestamp
    with tables.File(file_name, 'r') as fid:
        timestamp_time = fid.get_node('/timestamp/time')[:]
        if np.all(np.isnan(timestamp_time)):
            raise ValueError

        delT = np.nanmedian(np.diff(timestamp_time))
        if delT == 0:
            raise ValueError
        fps = 1 / delT

        if np.isnan(fps) or fps < 1:
            raise ValueError

        time_units = 'seconds'

    return fps, time_units 
Example #12
Source File: extract_poses.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def read_images(mask_file, batch_size, frames2check = None):
    with tables.File(mask_file, 'r') as fid:
        masks = fid.get_node('/mask')
        
        if frames2check is None:
            frames2check = range(masks.shape[0])
        
        batch = []
        for frame in frames2check:
            img = masks[frame]
            
            batch.append((frame, img))
            if len(batch) == batch_size:
                yield batch
                batch = []
        if batch:
            yield batch 
Example #13
Source File: correctVentralDorsal.py    From tierpsy-tracker with MIT License 6 votes vote down vote up
def _add_ventral_side(skeletons_file, ventral_side=''):
    #I am giving priority to a contour stored in experiments_info, rather than one read by the json file.
    #currently i am only using the experiments_info in the re-analysis of the old schafer database
    try:
        ventral_side_f = single_db_ventral_side(skeletons_file)
    except (tables.exceptions.NoSuchNodeError, KeyError):
        ventral_side_f = ''

    if ventral_side_f in VALID_CNT:
        if not ventral_side or (ventral_side == ventral_side_f):
            ventral_side = ventral_side_f
        else:
            raise ValueError('The given contour orientation ({}) and the orientation stored in /experiments_info group ({}) differ. Change /experiments_info or the parameters file to solve this issue.'.format(ventral_side, ventral_side_f) )

    #add ventral side if given
    if ventral_side in VALID_CNT:
        with tables.File(skeletons_file, 'r+') as fid:
            fid.get_node('/trajectories_data').attrs['ventral_side'] = ventral_side
    return ventral_side 
Example #14
Source File: getAdditionalData.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def storeStageData(stage_file, masked_image_file):
    # read motor data from csv
    with open(stage_file) as fid:
        reader = csv.reader(fid)
        data = [line for line in reader]

    # if the csv lines must be larger than one (a header), othewise it is an
    # empty file
    if len(data) <= 1:
        with tables.File(masked_image_file, 'r+') as fid:
            dtype = [('real_time', int), ('stage_time', int),
                     ('stage_x', float), ('stage_y', float)]
            fid.create_table('/', 'stage_log', obj=np.recarray(0, dtype))
            return

    #import pdb
    # pdb.set_trace()

    # filter, check and store the data into a recarray
    header, data = _getHeader(data)
    csv_dict = _data2dict(header, data)
    stage_recarray = _dict2recarray(csv_dict)

    with tables.File(masked_image_file, 'r+') as mask_fid:
        if '/stage_log' in mask_fid:
            mask_fid.remove_node('/', 'stage_log')
        mask_fid.create_table('/', 'stage_log', obj=stage_recarray)

    return csv_dict 
Example #15
Source File: getBlobTrajectories.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def generateROIBuff(masked_image_file, buffer_size, **argkws):
    img_generator = generateImages(masked_image_file)
    
    with tables.File(masked_image_file, 'r') as mask_fid:
        tot_frames, im_h, im_w = mask_fid.get_node("/mask").shape
    
    for frame_number, image in img_generator:
        if frame_number % buffer_size == 0:
            if frame_number + buffer_size > tot_frames:
                buffer_size = tot_frames-frame_number #change this value, otherwise the buffer will not get full
            image_buffer = np.zeros((buffer_size, im_h, im_w), np.uint8)
            ini_frame = frame_number            
        
        
        image_buffer[frame_number-ini_frame] = image
        
        #compress if it is the last frame in the buffer
        if (frame_number+1) % buffer_size == 0 or (frame_number+1 == tot_frames):
            # z projection and select pixels as connected regions that were selected as worms at
            # least once in the masks
            main_mask = np.any(image_buffer, axis=0)
    
            # change from bool to uint since same datatype is required in
            # opencv
            main_mask = main_mask.astype(np.uint8)
    
            #calculate the contours, only keep the external contours (no holes) and 

            ROI_cnts, _ = cv2.findContours(main_mask, 
                                                cv2.RETR_EXTERNAL, 
                                                cv2.CHAIN_APPROX_NONE)[-2:]

    
            yield ROI_cnts, image_buffer, ini_frame 
Example #16
Source File: get_defaults.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def _read_resampling_N(fname):
    with tables.File(fname, 'r') as fid:
        resampling_N = fid.get_node('/skeleton').shape[1]
        return resampling_N 
Example #17
Source File: read_attrs.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def single_db_ventral_side(file_name):
    #this for the shaffer's lab old database
    with tables.File(file_name, 'r') as fid:
        exp_info_b = fid.get_node('/experiment_info').read()
        exp_info = json.loads(exp_info_b.decode("utf-8"))
        ventral_side = exp_info['ventral_side']
    return ventral_side 
Example #18
Source File: getFilteredSkels.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def getFilteredSkels(
        skeletons_file,
        min_num_skel=100,
        bad_seg_thresh=0.8,
        min_displacement=5,
        critical_alpha=0.01,
        max_width_ratio=2.25,
        max_area_ratio=6):

    min_num_skel = min_num_skel_defaults(skeletons_file, min_num_skel=min_num_skel)

    # check if the skeletonization finished succesfully
    with tables.File(skeletons_file, "r") as ske_file_id:
        skeleton_table = ske_file_id.get_node('/skeleton')

    #eliminate skeletons that do not match a decent head, tail and body ratio. Likely to be coils. Taken from Segworm.
    filterPossibleCoils(
        skeletons_file,
        max_width_ratio=max_width_ratio,
        max_area_ratio=max_area_ratio)

    with pd.HDFStore(skeletons_file, 'r') as table_fid:
        trajectories_data = table_fid['/trajectories_data']

    # get valid rows using the trajectory displacement and the
    # skeletonization success. These indexes will be used to calculate statistics of what represent a valid skeleton.
    good_traj_index, good_skel_row = getValidIndexes(
        trajectories_data, min_num_skel=min_num_skel, bad_seg_thresh=bad_seg_thresh, min_displacement=min_displacement)

    #filter skeletons depending the population morphology (area, width and length)
    filterByPopulationMorphology(
        skeletons_file,
        good_skel_row,
        critical_alpha=critical_alpha) 
Example #19
Source File: read_attrs.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def _find_field(self):
        if os.path.exists(self.file_name):
            with tables.File(self.file_name, 'r') as fid:
                for field in VALID_FIELDS:
                    if field in fid:
                        return field
        #raise KeyError("Not valid field {} found in {}".format(VALID_FIELDS, self.file_name))
        return '' 
Example #20
Source File: read_attrs.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def single_db_microns_per_pixel(file_name):
    #this is used in the single worm case, but it would be deprecated. I want to use this argument when I read the data from original additional files
    with tables.File(file_name, 'r') as fid:
        microns_per_pixel_scale = fid.get_node('/stage_movement')._v_attrs['microns_per_pixel_scale']
        if microns_per_pixel_scale.size == 2:
            assert np.abs(
                microns_per_pixel_scale[0]) == np.abs(
                microns_per_pixel_scale[1])
            microns_per_pixel = np.abs(microns_per_pixel_scale[0])
    xy_units = 'micrometers'

    return microns_per_pixel, xy_units 
Example #21
Source File: smooth_skeletons_table.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def read_food_contour(skeletons_file):
    try:
        with tables.File(skeletons_file, 'r') as fid:
            food_cnt_pix = fid.get_node('/food_cnt_coord')[:]
            
        #smooth contours
        microns_per_pixel = read_microns_per_pixel(skeletons_file)
        food_cnt = microns_per_pixel*food_cnt_pix
        food_cnt = _h_smooth_cnt(food_cnt)
        
    except tables.exceptions.NoSuchNodeError:
        food_cnt = None
    
    return food_cnt 
Example #22
Source File: WormClass.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def getImage(self, masked_image_file, index, roi_size=128):
        # reading a video frame for one worm is very slow. Use it only in small
        # scale
        with tables.File(masked_image_file, 'r') as mask_fid:
            img = mask_fid.get_node("/mask")[self.frames[index], :, :]
        worm_img, roi_corner = getWormROI(
            img, self.coord_x[index], self.coord_y[index], roi_size)
        return worm_img, roi_corner 
Example #23
Source File: FOVMultiWellsSplitter.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def constructor_from_fov_wells(self, filename):
        print('constructor from /fov_wells')
        with tables.File(filename, 'r') as fid:
            self.img_shape     = fid.get_node('/fov_wells')._v_attrs['img_shape']
            self.camera_serial = fid.get_node('/fov_wells')._v_attrs['camera_serial']
            self.px2um         = fid.get_node('/fov_wells')._v_attrs['px2um']
            self.channel       = fid.get_node('/fov_wells')._v_attrs['channel']
            self.n_wells       = fid.get_node('/fov_wells')._v_attrs['n_wells']
            self.whichsideup   = fid.get_node('/fov_wells')._v_attrs['whichsideup']
            self.well_shape    = fid.get_node('/fov_wells')._v_attrs['well_shape']
            
        # is this a masked file or a features file? doesn't matter
        self.img = None
        masked_image_file = filename.replace('_featuresN.hdf5','.hdf5')
        with tables.File(masked_image_file, 'r') as fid:
            if '/bgnd' in fid:
                self.img = fid.get_node('/bgnd')[0]
            else:
                # maybe bgnd was not in the masked video? 
                # for speed, let's just get the first full frame
                self.img = fid.get_node('/full_data')[0]

        # initialise the dataframe
        self.wells = pd.DataFrame(columns = ['x','y','r','row','col',
                                          'x_min','x_max','y_min','y_max',
                                          'well_name'])
        with pd.HDFStore(filename,'r') as fid:
            wells_table = fid['/fov_wells']
        for colname in ['x_min','x_max','y_min','y_max','well_name']:
            self.wells[colname] = wells_table[colname]
        self.wells['x'] = 0.5 * (self.wells['x_min'] + self.wells['x_max'])
        self.wells['y'] = 0.5 * (self.wells['y_min'] + self.wells['y_max'])
        self.wells['r'] = self.wells['x_max'] - self.wells['x']
        
        self.calculate_wells_dimensions()
        self.find_row_col_wells() 
Example #24
Source File: obtainFeaturesHelper.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def _h_read_data(self):
        skel_table_id, timestamp_inds = self._h_get_table_indexes()
        
        if not np.array_equal(np.sort(timestamp_inds), timestamp_inds): #the time stamp must be sorted
            warnings.warn('{}: The timestamp is not sorted in worm_index {}'.format(self.file_name, self.worm_index))
        
        # use real frames to define the size of the object arrays
        first_frame = np.min(timestamp_inds)
        last_frame = np.max(timestamp_inds)
        n_frames = last_frame - first_frame + 1
        
        # get the apropiate index in the object array
        ind_ff = timestamp_inds - first_frame

        # get the number of segments from the normalized skeleton
        with tables.File(self.file_name, 'r') as ske_file_id:
            self.n_segments = ske_file_id.get_node('/skeleton').shape[1]
 
        # add the data from the skeleton_id's and timestamps used
        self.timestamp = np.arange(first_frame, last_frame + 1)
        
        self.skeleton_id = np.full(n_frames, -1, np.int32)
        self.skeleton_id[ind_ff] = skel_table_id
        
        # initialize the rest of the arrays
        self.skeleton = np.full((n_frames, self.n_segments, 2), np.nan)
        self.ventral_contour = np.full((n_frames, self.n_segments, 2), np.nan)
        self.dorsal_contour = np.full((n_frames, self.n_segments, 2), np.nan)
        self.widths = np.full((n_frames, self.n_segments), np.nan)

        # read data from the skeletons table
        with tables.File(self.file_name, 'r') as ske_file_id:
            self.skeleton[ind_ff] = \
            ske_file_id.get_node('/skeleton')[skel_table_id, :, :] * self.microns_per_pixel
            self.ventral_contour[ind_ff] = \
            ske_file_id.get_node('/contour_side1')[skel_table_id, :, :] * self.microns_per_pixel
            self.dorsal_contour[ind_ff] = \
            ske_file_id.get_node('/contour_side2')[skel_table_id, :, :] * self.microns_per_pixel
            self.widths[ind_ff] = \
            ske_file_id.get_node('/contour_width')[skel_table_id, :] * self.microns_per_pixel 
Example #25
Source File: WormClass.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def writeData(self):
        with tables.File(self.file_name, 'r+') as file_id:
            ini, end = self.rows_range
            for field in self.data_fields:
                file_id.get_node(
                    '/' +
                    field)[
                    ini:end +
                    1] = getattr(
                    self,
                    field) 
Example #26
Source File: getFoodContour.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def getFoodContour(mask_file, 
                skeletons_file,
                use_nn_food_cnt,
                model_path,
                solidity_th=0.98,
                _is_debug = False
                ):
    base_name = get_base_name(mask_file)
    
    progress_timer = TimeCounter('')
    print_flush("{} Calculating food contour {}".format(base_name, progress_timer.get_time_str()))
    
    
    food_cnt = calculate_food_cnt(mask_file,  
                                  use_nn_food_cnt = use_nn_food_cnt, 
                                  model_path = model_path,
                                  solidity_th=  solidity_th,
                                  _is_debug = _is_debug)
    
    #store contour coordinates into the skeletons file and mask_file the contour file
    for fname in [skeletons_file, mask_file]:
        with tables.File(fname, 'r+') as fid:
            if '/food_cnt_coord' in fid:
                fid.remove_node('/food_cnt_coord')
            
            #if it is a valid contour save it
            if food_cnt is not None and \
               food_cnt.size >= 2 and \
               food_cnt.ndim == 2 and \
               food_cnt.shape[1] == 2:
            
                tab = fid.create_array('/', 
                                       'food_cnt_coord', 
                                       obj=food_cnt)
                tab._v_attrs['use_nn_food_cnt'] = int(use_nn_food_cnt) 
Example #27
Source File: getFoodContourNN.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def get_food_prob(mask_file, model, max_bgnd_images = 2, _is_debug = False, resizing_size = DFLT_RESIZING_SIZE):    
    '''
    Predict the food probability for each pixel using a pretrained u-net model.
    '''
    
    with tables.File(mask_file, 'r') as fid:
        if not '/full_data' in fid:
            raise ValueError('The mask file {} does not content the /full_data dataset.'.format(mask_file)) 
            
        bgnd_o = fid.get_node('/full_data')[:max_bgnd_images]
        
        assert bgnd_o.ndim == 3
        if bgnd_o.shape[0] > 1:
            bgnd = [np.max(bgnd_o[i:i+1], axis=0) for i in range(bgnd_o.shape[0]-1)] 
        else:
            bgnd = [np.squeeze(bgnd_o)]
        
        min_size = min(bgnd[0].shape)
        resize_factor = min(resizing_size, min_size)/min_size
        dsize = tuple(int(x*resize_factor) for x in bgnd[0].shape[::-1])
        
        bgnd_s = [cv2.resize(x, dsize) for x in bgnd]
        for b_img in bgnd_s:
            Y_pred = get_unet_prediction(b_img, model, n_flips=1)
            
            if _is_debug:
                import matplotlib.pylab as plt
                plt.figure()
                plt.subplot(1,2,1)
                plt.imshow(b_img, cmap='gray')
                plt.subplot(1, 2,2)    
                plt.imshow(Y_pred, interpolation='none')
        
        original_size = bgnd[0].shape
        return Y_pred, original_size, bgnd_s 
Example #28
Source File: joinBlobsTrajectories.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def assignBlobTraj(trajectories_file, max_allowed_dist=20, area_ratio_lim=(0.5, 2)):
    #loop, save data and display progress
    base_name = os.path.basename(trajectories_file).replace('_trajectories.hdf5', '').replace('_skeletons.hdf5', '')
    
    with pd.HDFStore(trajectories_file, 'r') as fid:
        plate_worms = fid['/plate_worms']
    
    traj_ind = assignBlobTrajDF(plate_worms, max_allowed_dist, area_ratio_lim, base_name=base_name)

    if traj_ind is not None:
        with tables.File(trajectories_file, 'r+') as fid:
            tbl = fid.get_node('/', 'plate_worms')
            tbl.modify_column(column=traj_ind, colname='worm_index_blob')

        #print_flush(progress_time.get_str(frame)) 
Example #29
Source File: correctVentralDorsal.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def _switch_cnt(skeletons_file):
    with tables.File(skeletons_file, 'r+') as fid:
        # since here we are changing all the contours, let's just change
        # the name of the datasets
        side1 = fid.get_node('/contour_side1')
        side2 = fid.get_node('/contour_side2')

        side1.rename('contour_side1_bkp')
        side2.rename('contour_side1')
        side1.rename('contour_side2') 
Example #30
Source File: correctVentralDorsal.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def isBadVentralOrient(skeletons_file, ventral_side=''):
    print(ventral_side)
    ventral_side = _add_ventral_side(skeletons_file, ventral_side) 
    if not ventral_side in VALID_CNT:
        return True

    elif ventral_side == 'unknown':
        is_bad =  False
    
    elif ventral_side in ['clockwise', 'anticlockwise']:
        with tables.File(skeletons_file, 'r') as fid:
            has_skeletons = fid.get_node('/trajectories_data').col('has_skeleton')

            # let's use the first valid skeleton, it seems like a waste to use all the other skeletons.
            # I checked earlier to make sure the have the same orientation.

            valid_ind = np.where(has_skeletons)[0]
            if valid_ind.size == 0:
                #no valid skeletons, nothing to do here.
                is_bad = True
            else:
                cnt_side1 = fid.get_node('/contour_side1')[valid_ind[0], :, :]
                cnt_side2 = fid.get_node('/contour_side2')[valid_ind[0], :, :]
                A_sign = _h_calAreaSignedArray(cnt_side1, cnt_side2)
                
                # if not (np.all(A_sign > 0) or np.all(A_sign < 0)):
                #    raise ValueError('There is a problem. All the contours should have the same orientation.')
                if ventral_side == 'clockwise':
                    is_bad = A_sign[0] < 0
                elif ventral_side == 'anticlockwise':
                    is_bad = A_sign[0] > 0
                else:
                    raise ValueError

        if is_bad:
            _switch_cnt(skeletons_file)
            is_bad = False


    return is_bad