Python scipy.io.loadmat() Examples

The following are 30 code examples of scipy.io.loadmat(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.io , or try the search function .
Example #1
Source File: grassdata.py    From grass_pytorch with Apache License 2.0 6 votes vote down vote up
def __init__(self, dir, transform=None):
        self.dir = dir
        box_data = torch.from_numpy(loadmat(self.dir+u'/box_data.mat')[u'boxes']).float()
        op_data = torch.from_numpy(loadmat(self.dir+u'/op_data.mat')[u'ops']).int()
        sym_data = torch.from_numpy(loadmat(self.dir+u'/sym_data.mat')[u'syms']).float()
        #weight_list = torch.from_numpy(loadmat(self.dir+'/weights.mat')['weights']).float()
        num_examples = op_data.size()[1]
        box_data = torch.chunk(box_data, num_examples, 1)
        op_data = torch.chunk(op_data, num_examples, 1)
        sym_data = torch.chunk(sym_data, num_examples, 1)
        #weight_list = torch.chunk(weight_list, num_examples, 1)
        self.transform = transform
        self.trees = []
        for i in xrange(len(op_data)) :
            boxes = torch.t(box_data[i])
            ops = torch.t(op_data[i])
            syms = torch.t(sym_data[i])
            tree = Tree(boxes, ops, syms)
            self.trees.append(tree) 
Example #2
Source File: pascal_voc.py    From cascade-rcnn_Pytorch with MIT License 6 votes vote down vote up
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
            'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example #3
Source File: functions.py    From fast-MPN-COV with MIT License 6 votes vote down vote up
def __init__(self, path, start_epoch):
        if start_epoch is not 0:
           stats_ = sio.loadmat(os.path.join(path,'stats.mat'))
           data = stats_['data']
           content = data[0,0]
           self.trainObj = content['trainObj'][:,:start_epoch].squeeze().tolist()
           self.trainTop1 = content['trainTop1'][:,:start_epoch].squeeze().tolist()
           self.trainTop5 = content['trainTop5'][:,:start_epoch].squeeze().tolist()
           self.valObj = content['valObj'][:,:start_epoch].squeeze().tolist()
           self.valTop1 = content['valTop1'][:,:start_epoch].squeeze().tolist()
           self.valTop5 = content['valTop5'][:,:start_epoch].squeeze().tolist()
           if start_epoch is 1:
               self.trainObj = [self.trainObj]
               self.trainTop1 = [self.trainTop1]
               self.trainTop5 = [self.trainTop5]
               self.valObj = [self.valObj]
               self.valTop1 = [self.valTop1]
               self.valTop5 = [self.valTop5]
        else:
           self.trainObj = []
           self.trainTop1 = []
           self.trainTop5 = []
           self.valObj = []
           self.valTop1 = []
           self.valTop5 = [] 
Example #4
Source File: datasets.py    From face_classification with MIT License 6 votes vote down vote up
def _load_imdb(self):
        face_score_treshold = 3
        dataset = loadmat(self.dataset_path)
        image_names_array = dataset['imdb']['full_path'][0, 0][0]
        gender_classes = dataset['imdb']['gender'][0, 0][0]
        face_score = dataset['imdb']['face_score'][0, 0][0]
        second_face_score = dataset['imdb']['second_face_score'][0, 0][0]
        face_score_mask = face_score > face_score_treshold
        second_face_score_mask = np.isnan(second_face_score)
        unknown_gender_mask = np.logical_not(np.isnan(gender_classes))
        mask = np.logical_and(face_score_mask, second_face_score_mask)
        mask = np.logical_and(mask, unknown_gender_mask)
        image_names_array = image_names_array[mask]
        gender_classes = gender_classes[mask].tolist()
        image_names = []
        for image_name_arg in range(image_names_array.shape[0]):
            image_name = image_names_array[image_name_arg][0]
            image_names.append(image_name)
        return dict(zip(image_names, gender_classes)) 
Example #5
Source File: cumest.py    From spectrum with MIT License 6 votes vote down vote up
def test():
  y = sio.loadmat(here(__file__) + '/demo/ma1.mat')['y']

  # The right results are:
  #           "biased": [-0.12250513  0.35963613  1.00586945  0.35963613 -0.12250513]
  #           "unbiaed": [-0.12444965  0.36246791  1.00586945  0.36246791 -0.12444965]
  print cum2est(y, 2, 128, 0, 'unbiased')
  print cum2est(y, 2, 128, 0, 'biased')

  # For the 3rd cumulant:
  #           "biased": [-0.18203039  0.07751503  0.67113035  0.729953    0.07751503]
  #           "unbiased": [-0.18639911  0.07874543  0.67641484  0.74153955  0.07937539]
  print cum3est(y, 2, 128, 0, 'biased', 1)
  print cum3est(y, 2, 128, 0, 'unbiased', 1)

  # For testing the 4th-order cumulant
  # "biased": [-0.03642083  0.4755026   0.6352588   1.38975232  0.83791117  0.41641134 -0.97386322]
  # "unbiased": [-0.04011388  0.48736793  0.64948927  1.40734633  0.8445089   0.42303979 -0.99724968]
  print cum4est(y, 3, 128, 0, 'biased', 1, 1)
  print cum4est(y, 3, 128, 0, 'unbiased', 1, 1) 
Example #6
Source File: confusionMatrix.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 6 votes vote down vote up
def get_predict_labels():
    inputs = tf.placeholder("float", [None, 64, 64, 1])
    is_training = tf.placeholder("bool")
    prediction, _ = googlenet(inputs, is_training)
    predict_labels = tf.argmax(prediction, 1)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    data = sio.loadmat("../data/dataset.mat")
    testdata = data["test"] / 127.5 - 1.0
    testlabel = data["testlabels"]
    saver.restore(sess, "../save_para/.\\model.ckpt")
    nums_test = testlabel.shape[1]
    PREDICT_LABELS = np.zeros([nums_test])
    for i in range(nums_test // BATCH_SIZE):
        PREDICT_LABELS[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE] = sess.run(predict_labels, feed_dict={inputs: testdata[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE], is_training: False})
    PREDICT_LABELS[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:] = sess.run(predict_labels, feed_dict={inputs: testdata[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:], is_training: False})
    np.savetxt("../data/predict_labels.txt", PREDICT_LABELS) 
Example #7
Source File: feature_distribution(t-sne).py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 6 votes vote down vote up
def get_feature():
    inputs = tf.placeholder("float", [None, 64, 64, 1])
    is_training = tf.placeholder("bool")
    _, feature = googlenet(inputs, is_training)
    feature = tf.squeeze(feature, [1, 2])
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    data = sio.loadmat("../data/dataset.mat")
    testdata = data["test"] / 127.5 - 1.0
    testlabels = data["testlabels"]
    saver.restore(sess, "../save_para/.\\model.ckpt")
    nums_test = testdata.shape[0]
    FEATURE = np.zeros([nums_test, 1024])
    for i in range(nums_test // BATCH_SIZE):
        FEATURE[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE] = sess.run(feature, feed_dict={inputs: testdata[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE], is_training: False})
    FEATURE[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:] = sess.run(feature, feed_dict={inputs: testdata[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:], is_training: False})
    sio.savemat("../data/feature.mat", {"feature": FEATURE, "testlabels": testlabels}) 
Example #8
Source File: E2FAR.py    From mxnet-E2FAR with Apache License 2.0 6 votes vote down vote up
def __getitem__(self, idx):
        img_path = self.data_frame.iloc[idx, 0]
        img = cv2.imread(img_path, 1)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        x, y, w, h = self.data_frame.iloc[idx, 1:5]
        l, t, ww, hh = enlarge_bbox(x, y, w, h, self.enlarge_factor)
        r, b = l + ww, t + hh

        img = img[t: b, l:r, :]
        img = cv2.resize(img, (self.img_size, self.img_size))
        img = img.astype(np.float32) - 127.5

        img = nd.transpose(nd.array(img), (2, 0, 1))

        label_path = img_path.replace('.jpg', '.mat')

        label = sio.loadmat(label_path)

        params_shape = label['Shape_Para'].astype(np.float32).ravel()
        params_exp = label['Exp_Para'].astype(np.float32).ravel()

        return img, params_shape, params_exp 
Example #9
Source File: show_results_sunrgbd.py    From H3DNet with MIT License 6 votes vote down vote up
def export_one_scan(scan_name):
    pt = np.load(os.path.join(DATA_DIR, scan_name+'_pc.npz'))['pc']
    np.savetxt(mode+'tmp.xyz', pt)
    os.system("mv {}tmp.xyz {}tmp.xyzrgb".format(mode, mode))
    point_cloud = o3d.io.read_point_cloud(mode+'tmp.xyzrgb')

    pred_proposals = np.load(os.path.join(PRED_PATH, 'center'+scan_name+'_nms.npy'))
    gt_bbox = sio.loadmat(os.path.join(PRED_PATH, 'center'+scan_name+'_gt.mat'))['gt']
    bb =[]
    if mode=='gt':
        boundingboxes = gt_bbox
    elif mode =='pred':
        boundingboxes = pred_proposals
    else:
        print("model must be gt or pred")
        return
    for i in range(boundingboxes.shape[0]):
        c = np.array(color_mapping[int(boundingboxes[i,-1])])/255.0
        for _ in range(2):
            bb.append(create_lineset(boundingboxes[i]+0.005*(np.random.rand()-0.5)*2, colors=c))
    load_view_point([point_cloud] + bb, './viewpoint.json', window_name=scan_name+'_'+mode) 
Example #10
Source File: pascal_voc.py    From LRP with MIT License 6 votes vote down vote up
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
            'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example #11
Source File: suite_sparse.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def process(self):
        mat = loadmat(self.raw_paths[0])['Problem'][0][0][2].tocsr().tocoo()

        row = torch.from_numpy(mat.row).to(torch.long)
        col = torch.from_numpy(mat.col).to(torch.long)
        edge_index = torch.stack([row, col], dim=0)

        edge_attr = torch.from_numpy(mat.data).to(torch.float)
        if torch.all(edge_attr == 1.):
            edge_attr = None

        size = torch.Size(mat.shape)
        if mat.shape[0] == mat.shape[1]:
            size = None

        num_nodes = mat.shape[0]

        data = Data(edge_index=edge_index, edge_attr=edge_attr, size=size,
                    num_nodes=num_nodes)

        if self.pre_transform is not None:
            data = self.pre_transform(data)

        torch.save(self.collate([data]), self.processed_paths[0]) 
Example #12
Source File: utils.py    From timeception with GNU General Public License v3.0 6 votes vote down vote up
def mat_load(path, m_dict=None):
    """
    Load mat files.
    :param path:
    :return:
    """
    if m_dict is None:
        data = sio.loadmat(path)
    else:
        data = sio.loadmat(path, m_dict)

    return data

# endregion

# region File/Folder Names/Pathes 
Example #13
Source File: pascal3d.py    From TFFRCNN with MIT License 6 votes vote down vote up
def _load_selective_search_IJCV_roidb(self, gt_roidb):
        IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
                                                 'selective_search_IJCV_data',
                                                 'voc_' + self._year))
        assert os.path.exists(IJCV_path), \
               'Selective search IJCV data not found at: {}'.format(IJCV_path)

        top_k = self.config['top_k']
        box_list = []
        for i in xrange(self.num_images):
            filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
            raw_data = sio.loadmat(filename)
            box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))

        return self.create_roidb_from_box_list(box_list, gt_roidb)

    # evaluate detection results 
Example #14
Source File: pascal_voc.py    From TFFRCNN with MIT License 6 votes vote down vote up
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example #15
Source File: kittivoc.py    From TFFRCNN with MIT License 6 votes vote down vote up
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(self._data_path,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example #16
Source File: utils.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def mat_load(path, m_dict=None):
    """
    Load mat files.
    :param path:
    :return:
    """
    if m_dict is None:
        data = sio.loadmat(path)
    else:
        data = sio.loadmat(path, m_dict)

    return data

# endregion

# region File/Folder Names/Pathes 
Example #17
Source File: transforms.py    From pase with MIT License 6 votes vote down vote up
def load_IR(self, ir_file, ir_fmt):
        ir_file = os.path.join(self.data_root, ir_file)
        # print('loading ir_file: ', ir_file)
        if hasattr(self, 'cache') and ir_file in self.cache:
            return self.cache[ir_file]
        else:
            if ir_fmt == 'mat':
                IR = loadmat(ir_file, squeeze_me=True, struct_as_record=False)
                IR = IR['risp_imp']
            elif ir_fmt == 'imp' or ir_fmt == 'txt':
                IR = np.loadtxt(ir_file)
            elif ir_fmt == 'npy':
                IR = np.load(ir_file)
            elif ir_fmt == 'wav':
                IR, _ = sf.read(ir_file)
            else:
                raise TypeError('Unrecognized IR format: ', ir_fmt)
            IR = IR[:self.max_reverb_len]
            if np.max(IR)>0:
                IR = IR / np.abs(np.max(IR))
            p_max = np.argmax(np.abs(IR))
            if hasattr(self, 'cache'):
                self.cache[ir_file] = (IR, p_max)
            return IR, p_max 
Example #18
Source File: dataset_usrnet.py    From KAIR with MIT License 6 votes vote down vote up
def __init__(self, opt):
        super(DataSetUSRNet, self).__init__()
        self.opt = opt
        self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
        self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96
        self.sigma_max = self.opt['sigma_max'] if self.opt['sigma_max'] is not None else 25
        self.scales = opt['scales'] if opt['scales'] is not None else [1,2,3,4]
        self.sf_validation = opt['sf_validation'] if opt['sf_validation'] is not None else 3
        #self.kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']
        self.kernels = loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']  # for validation

        # -------------------
        # get the path of H
        # -------------------
        self.paths_H = util.get_image_paths(opt['dataroot_H'])  # return None if input is None
        self.count = 0 
Example #19
Source File: grassdata.py    From grass_pytorch with Apache License 2.0 6 votes vote down vote up
def __init__(self, dir, transform=None):
        self.dir = dir
        box_data = torch.from_numpy(loadmat(self.dir+'/box_data.mat')['boxes']).float()
        op_data = torch.from_numpy(loadmat(self.dir+'/op_data.mat')['ops']).int()
        sym_data = torch.from_numpy(loadmat(self.dir+'/sym_data.mat')['syms']).float()
        #weight_list = torch.from_numpy(loadmat(self.dir+'/weights.mat')['weights']).float()
        num_examples = op_data.size()[1]
        box_data = torch.chunk(box_data, num_examples, 1)
        op_data = torch.chunk(op_data, num_examples, 1)
        sym_data = torch.chunk(sym_data, num_examples, 1)
        #weight_list = torch.chunk(weight_list, num_examples, 1)
        self.transform = transform
        self.trees = []
        for i in range(len(op_data)) :
            boxes = torch.t(box_data[i])
            ops = torch.t(op_data[i])
            syms = torch.t(sym_data[i])
            tree = Tree(boxes, ops, syms)
            self.trees.append(tree) 
Example #20
Source File: extract_dictnet_weights.py    From reading-text-in-the-wild with GNU General Public License v3.0 5 votes vote down vote up
def loadmat(filename):
    '''
    this function should be called instead of direct sio.loadmat
    as it cures the problem of not properly recovering python dictionaries
    from mat files. It calls the function check keys to cure all entries
    which are still mat-objects
    '''
    data = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)
    return _check_keys(data) 
Example #21
Source File: prob_model.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def __init__(self, prob_model_path):
        model_param = sio.loadmat(prob_model_path)
        self.mu = np.reshape(
            model_param['mu'], (model_param['mu'].shape[0], 3, -1))
        self.e = np.reshape(model_param['e'], (model_param['e'].shape[
                            0], model_param['e'].shape[1], 3, -1))
        self.sigma = model_param['sigma']
        self.cam = np.array(
            [[1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 1.0, 0.0]]) 
Example #22
Source File: densepose_uv.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def flip_uv_featuremap(uvs_hf):
    # Invert the predicted soft uv
    uvs_inv = []
    label_index = [0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14]
    _index = [0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23]
    UV_symmetry_filename = os.path.join(
        os.path.dirname(__file__),
        '../../../../data/DensePoseData/UV_data/UV_symmetry_transforms.mat'
    )
    UV_sym = loadmat(UV_symmetry_filename)

    for i in range(len(uvs_hf)):
        uvs_hf[i] = uvs_hf[i][:, :, :, ::-1]

    uvs_inv.append(uvs_hf[0][:, label_index, :, :])
    uvs_inv.append(uvs_hf[1][:, _index, :, :])

    U_uv, V_uv = uvs_hf[2:]
    U_sym = np.zeros(U_uv.shape)
    V_sym = np.zeros(V_uv.shape)
    U_uv = np.where(U_uv < 0, 0, U_uv)
    V_uv = np.where(V_uv < 0, 0, V_uv)
    U_uv = np.where(U_uv > 1, 1, U_uv)
    V_uv = np.where(V_uv > 1, 1, V_uv)
    U_loc = (U_uv * 255).astype(np.int64)
    V_loc = (V_uv * 255).astype(np.int64)
    for i in range(1, 25):
        for j in range(len(V_sym)):
            V_sym[j, i] = UV_sym['V_transforms'][0, i - 1][V_loc[j, i], U_loc[j, i]]
            U_sym[j, i] = UV_sym['U_transforms'][0, i - 1][V_loc[j, i], U_loc[j, i]]

    uvs_inv.append(U_sym[:, _index, :, :])
    uvs_inv.append(V_sym[:, _index, :, :])

    return uvs_inv 
Example #23
Source File: densepose_cocoeval.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def _loadGEval(self):
        print('Loading densereg GT..')
        smplFpath = os.path.join(self.evalDataDir, 'SMPL_subdiv.mat')
        SMPL_subdiv = loadmat(smplFpath)
        pdistTransformFpath = os.path.join(self.evalDataDir, 'SMPL_SUBDIV_TRANSFORM.mat')
        self.PDIST_transform = loadmat(pdistTransformFpath)
        self.PDIST_transform = self.PDIST_transform['index'].squeeze()
        UV = np.array([
            SMPL_subdiv['U_subdiv'],
            SMPL_subdiv['V_subdiv']
        ]).squeeze()
        ClosestVertInds = np.arange(UV.shape[1]) + 1
        self.Part_UVs = []
        self.Part_ClosestVertInds = []
        for i in np.arange(24):
            self.Part_UVs.append(
                UV[:, SMPL_subdiv['Part_ID_subdiv'].squeeze() == (i + 1)]
            )
            self.Part_ClosestVertInds.append(
                ClosestVertInds[SMPL_subdiv['Part_ID_subdiv'].squeeze() == (i + 1)]
            )

        arrays = {}
        pdistMatrixFpath = os.path.join(self.evalDataDir, 'Pdist_matrix.mat')
        f = h5py.File(pdistMatrixFpath)
        for k, v in f.items():
            arrays[k] = np.array(v)
        self.Pdist_matrix = arrays['Pdist_matrix']
        self.Part_ids = np.array(SMPL_subdiv['Part_ID_subdiv'].squeeze())
        # Mean geodesic distances for parts.
        self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150])
        self.CoarseParts = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
                                     6, 6, 6, 6, 7, 7, 7, 7, 8, 8])
        print('Loaded') 
Example #24
Source File: omniglot.py    From nsf with MIT License 5 votes vote down vote up
def __init__(self, split='train', transform=None):
        self.transform = transform
        path = os.path.join(utils.get_data_root(), 'omniglot', 'omniglot.mat')
        rawdata = loadmat(path)

        if split == 'train':
            self.data = rawdata['data'].T.reshape(-1, 28, 28)
            self.targets = rawdata['target'].T
        elif split == 'test':
            self.data = rawdata['testdata'].T.reshape(-1, 28, 28)
            self.targets = rawdata['testtarget'].T
        else:
            raise ValueError 
Example #25
Source File: ingest_flower102.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def collectdata(self,):
        print 'Start Collect Data...'
        img_labels = sio.loadmat(self.input_dir + '/imagelabels.mat')['labels'][0]
        img_split = sio.loadmat(self.input_dir + '/setid.mat')
        img_train = img_split['trnid']
        img_val = img_split['valid']
        img_test = img_split['tstid']

        for idx in img_train[0]:
            img_name = 'image_%05d.jpg' % idx
            imgpath = os.path.join(self.input_img_dir, img_name)
            outpath = os.path.join(self.outimgdir, img_name)
            transform_and_save(img_path=imgpath, output_filename=outpath, target_size=self.target_size, skip=self.skipimg)
            self.trainpairlist[os.path.join('images', img_name)] = os.path.join('labels', str(img_labels[idx-1] - 1) + '.txt')

        for idx in img_val[0]:
            img_name = 'image_%05d.jpg' % idx
            imgpath = os.path.join(self.input_img_dir, img_name)
            outpath = os.path.join(self.outimgdir, img_name)
            transform_and_save(img_path=imgpath, output_filename=outpath, target_size=self.target_size,
                               skip=self.skipimg)
            self.valpairlist[os.path.join('images', img_name)] = os.path.join('labels', str(img_labels[idx-1] - 1) + '.txt')

        for idx in img_test[0]:
            img_name = 'image_%05d.jpg' % idx
            imgpath = os.path.join(self.input_img_dir, img_name)
            outpath = os.path.join(self.outimgdir, img_name)
            transform_and_save(img_path=imgpath, output_filename=outpath, target_size=self.target_size,
                               skip=self.skipimg)
            self.testpairlist[os.path.join('images', img_name)] = os.path.join('labels', str(img_labels[idx-1] - 1) + '.txt')

        print 'Finished Collect Data...' 
Example #26
Source File: pascal_pf.py    From pytorch_geometric with MIT License 5 votes vote down vote up
def process(self):
        path = osp.join(self.raw_dir, 'Annotations', self.category, '*.mat')
        filenames = glob.glob(path)

        names = []
        data_list = []
        for filename in filenames:
            name = filename.split(os.sep)[-1].split('.')[0]

            pos = torch.from_numpy(loadmat(filename)['kps']).to(torch.float)
            mask = ~torch.isnan(pos[:, 0])
            pos = pos[mask]

            # Normalize points to unit sphere.
            pos = pos - pos.mean(dim=0, keepdim=True)
            pos = pos / pos.norm(dim=1).max()

            y = mask.nonzero().flatten()

            data = Data(pos=pos, y=y, name=name)

            if self.pre_filter is not None and not self.pre_filter(data):
                continue
            if self.pre_transform is not None:
                data = self.pre_transform(data)

            names.append(name)
            data_list.append(data)

        pairs = loadmat(osp.join(self.raw_dir, 'parsePascalVOC.mat'))
        pairs = pairs['PascalVOC']['pair'][0, 0][
            0, self.categories.index(self.category)]

        pairs = [(names.index(x[0][0]), names.index(x[1][0])) for x in pairs]

        torch.save(self.collate(data_list), self.processed_paths[0])
        torch.save(pairs, self.processed_paths[1]) 
Example #27
Source File: download.py    From SSGAN-Tensorflow with MIT License 5 votes vote down vote up
def download_svhn(download_path):
    data_dir = os.path.join(download_path, 'svhn')

    import scipy.io as sio
    # svhn file loader
    def svhn_loader(url, path):
        cmd = ['curl', url, '-o', path]
        subprocess.call(cmd)
        m = sio.loadmat(path)
        return m['X'], m['y']

    if check_file(data_dir):
        print('SVHN was downloaded.')
        return

    data_url = 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat'
    train_image, train_label = svhn_loader(data_url, os.path.join(data_dir, 'train_32x32.mat'))

    data_url = 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat'
    test_image, test_label = svhn_loader(data_url, os.path.join(data_dir, 'test_32x32.mat'))

    prepare_h5py(np.transpose(train_image, (3, 0, 1, 2)), train_label,
                 np.transpose(test_image, (3, 0, 1, 2)), test_label, data_dir)

    cmd = ['rm', '-f', os.path.join(data_dir, '*.mat')]
    subprocess.call(cmd) 
Example #28
Source File: pascal3d.py    From TFFRCNN with MIT License 5 votes vote down vote up
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(self.cache_path, '..',
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            box_list.append(raw_data[i][:, (1, 0, 3, 2)] - 1)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example #29
Source File: voc2012.py    From Pytorch-Project-Template with MIT License 5 votes vote down vote up
def __getitem__(self, index):
        if self.mode == 'test':
            img_path, img_name = self.imgs[index]
            img = Image.open(os.path.join(img_path, img_name + '.jpg')).convert('RGB')
            if self.transform is not None:
                img = self.transform(img)
            return img_name, img

        img_path, mask_path = self.imgs[index]
        img = Image.open(img_path).convert('RGB')
        if self.mode == 'train':
            mask = sio.loadmat(mask_path)['GTcls']['Segmentation'][0][0]
            mask = Image.fromarray(mask.astype(np.uint8))
        else:
            mask = Image.open(mask_path)

        if self.joint_transform is not None:
            img, mask = self.joint_transform(img, mask)

        if self.sliding_crop is not None:
            img_slices, mask_slices, slices_info = self.sliding_crop(img, mask)
            if self.transform is not None:
                img_slices = [self.transform(e) for e in img_slices]
            if self.target_transform is not None:
                mask_slices = [self.target_transform(e) for e in mask_slices]
            img, mask = torch.stack(img_slices, 0), torch.stack(mask_slices, 0)
            return img, mask, torch.LongTensor(slices_info)
        else:
            if self.transform is not None:
                img = self.transform(img)
            if self.target_transform is not None:
                mask = self.target_transform(mask)
            return img, mask 
Example #30
Source File: ColorTextureTools.py    From laplacian-meshes with GNU General Public License v3.0 5 votes vote down vote up
def getColorPickingTexture():
    J = sio.loadmat('colors.mat')
    J = J['J']
    texId = glGenTextures(1)
    glBindTexture(GL_TEXTURE_2D, texId)
    glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
    glTexImage2D(GL_TEXTURE_2D, 0, 3, J.shape[0], J.shape[1], 0, GL_RGB, GL_UNSIGNED_BYTE, J)
    return texId