Python numpy.int32() Examples
The following are 30
code examples of numpy.int32().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: ndarray.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 7 votes |
def asnumpy(self): """Returns a ``numpy.ndarray`` object with value copied from this array. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = x.asnumpy() >>> type(y) <type 'numpy.ndarray'> >>> y array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> z = mx.nd.ones((2,3), dtype='int32') >>> z.asnumpy() array([[1, 1, 1], [1, 1, 1]], dtype=int32) """ data = np.empty(self.shape, dtype=self.dtype) check_call(_LIB.MXNDArraySyncCopyToCPU( self.handle, data.ctypes.data_as(ctypes.c_void_p), ctypes.c_size_t(data.size))) return data
Example #2
Source File: test_symbol.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_zero_prop2(): x = mx.sym.Variable('x') idx = mx.sym.Variable('idx') y = mx.sym.batch_take(x, idx) z = mx.sym.stop_gradient(y) exe = z.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,), type_dict={'x': np.float32, 'idx': np.int32}) exe.forward() exe.backward() # The following bind() should throw an exception. We discard the expected stderr # output for this operation only in order to keep the test logs clean. with discard_stderr(): try: y.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,), type_dict={'x': np.float32, 'idx': np.int32}) except: return assert False
Example #3
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def in_top_k(predictions, targets, k): '''Returns whether the `targets` are in the top `k` `predictions` # Arguments predictions: A tensor of shape batch_size x classess and type float32. targets: A tensor of shape batch_size and type int32 or int64. k: An int, number of top elements to consider. # Returns A tensor of shape batch_size and type int. output_i is 1 if targets_i is within top-k values of predictions_i ''' predictions_top_k = T.argsort(predictions)[:, -k:] result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets]) return result # CONVOLUTIONS
Example #4
Source File: create_dataset.py From cat-bbs with MIT License | 6 votes |
def load_keypoints(image_filepath, image_height, image_width): """Load facial keypoints of one image.""" fp_keypoints = "%s.cat" % (image_filepath,) if not os.path.isfile(fp_keypoints): raise Exception("Could not find keypoint coordinates for image '%s'." \ % (image_filepath,)) else: coords_raw = open(fp_keypoints, "r").readlines()[0].strip().split(" ") coords_raw = [abs(int(coord)) for coord in coords_raw] keypoints = [] #keypoints_arr = np.zeros((9*2,), dtype=np.int32) for i in range(1, len(coords_raw), 2): # first element is the number of coords x = np.clip(coords_raw[i], 0, image_width-1) y = np.clip(coords_raw[i+1], 0, image_height-1) keypoints.append((x, y)) return keypoints
Example #5
Source File: common.py From cat-bbs with MIT License | 6 votes |
def draw_heatmap(img, heatmap, alpha=0.5): """Draw a heatmap overlay over an image.""" assert len(heatmap.shape) == 2 or \ (len(heatmap.shape) == 3 and heatmap.shape[2] == 1) assert img.dtype in [np.uint8, np.int32, np.int64] assert heatmap.dtype in [np.float32, np.float64] if img.shape[0:2] != heatmap.shape[0:2]: heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8) heatmap_rs = ia.imresize_single_image( heatmap_rs[..., np.newaxis], img.shape[0:2], interpolation="nearest" ) heatmap = np.squeeze(heatmap_rs) / 255.0 cmap = plt.get_cmap('jet') heatmap_cmapped = cmap(heatmap) heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2) heatmap_cmapped = heatmap_cmapped * 255 mix = (1-alpha) * img + alpha * heatmap_cmapped mix = np.clip(mix, 0, 255).astype(np.uint8) return mix
Example #6
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def ctc_path_probs(predict, Y, alpha=1e-4): smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0] L = T.log(smoothed_predict) zeros = T.zeros_like(L[0]) log_first = zeros f_skip_idxs = ctc_create_skip_idxs(Y) b_skip_idxs = ctc_create_skip_idxs(Y[::-1]) # there should be a shortcut to calculating this def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev): f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev) b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev) return f_active_next, log_f_next, b_active_next, log_b_next [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan( step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first]) idxs = T.arange(L.shape[1]).dimshuffle('x', 0) mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1] log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L return log_probs, mask
Example #7
Source File: imdb.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 6 votes |
def create_roidb_from_box_list(self, box_list, gt_roidb): assert len(box_list) == self.num_images, \ 'Number of boxes must match number of ground-truth images' roidb = [] if gt_roidb is not None: for i in range(self.num_images): boxes = box_list[i] real_label = gt_roidb[i]['labels'] roidb.append({'boxes' : boxes, 'labels' : np.array([real_label], dtype=np.int32), 'flipped' : False}) else: for i in range(self.num_images): boxes = box_list[i] roidb.append({'boxes' : boxes, 'labels' : np.zeros((1, 0), dtype=np.int32), 'flipped' : False}) return roidb
Example #8
Source File: snippets.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 6 votes |
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)): """ A wrapper function to generate anchors given different scales Also return the number of anchors in variable 'length' """ anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales)) A = anchors.shape[0] shift_x = np.arange(0, width) * feat_stride shift_y = np.arange(0, height) * feat_stride shift_x, shift_y = np.meshgrid(shift_x, shift_y) shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose() K = shifts.shape[0] # width changes faster, so here it is H, W, C anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)) anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False) length = np.int32(anchors.shape[0]) return anchors, length
Example #9
Source File: ggtnn_train.py From gated-graph-transformer-network with MIT License | 6 votes |
def assemble_batch(story_fns, num_answer_words, format_spec): stories = [] for sfn in story_fns: with gzip.open(sfn,'rb') as f: cvtd_story, _, _, _ = pickle.load(f) stories.append(cvtd_story) sents, graphs, queries, answers = zip(*stories) cvtd_sents = np.array(sents, np.int32) cvtd_queries = np.array(queries, np.int32) max_ans_len = max(len(a) for a in answers) cvtd_answers = np.stack([convert_answer(answer, num_answer_words, format_spec, max_ans_len) for answer in answers]) num_new_nodes, new_node_strengths, new_node_ids, next_edges = zip(*graphs) num_new_nodes = np.stack(num_new_nodes) new_node_strengths = np.stack(new_node_strengths) new_node_ids = np.stack(new_node_ids) next_edges = np.stack(next_edges) return cvtd_sents, cvtd_queries, cvtd_answers, num_new_nodes, new_node_strengths, new_node_ids, next_edges
Example #10
Source File: dataset_tool.py From disentangling_conditional_gans with MIT License | 6 votes |
def create_cifar100(tfrecord_dir, cifar100_dir): print('Loading CIFAR-100 from "%s"' % cifar100_dir) import pickle with open(os.path.join(cifar100_dir, 'train'), 'rb') as file: data = pickle.load(file, encoding='latin1') images = data['data'].reshape(-1, 3, 32, 32) labels = np.array(data['fine_labels']) assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8 assert labels.shape == (50000,) and labels.dtype == np.int32 assert np.min(images) == 0 and np.max(images) == 255 assert np.min(labels) == 0 and np.max(labels) == 99 onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) onehot[np.arange(labels.size), labels] = 1.0 with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: order = tfr.choose_shuffled_order() for idx in range(order.size): tfr.add_image(images[order[idx]]) tfr.add_labels(onehot[order]) #----------------------------------------------------------------------------
Example #11
Source File: dataset.py From disentangling_conditional_gans with MIT License | 6 votes |
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'): self.resolution = resolution self.resolution_log2 = int(np.log2(resolution)) self.shape = [num_channels, resolution, resolution] self.dtype = dtype self.dynamic_range = dynamic_range self.label_size = label_size self.label_dtype = label_dtype self._tf_minibatch_var = None self._tf_lod_var = None self._tf_minibatch_np = None self._tf_labels_np = None assert self.resolution == 2 ** self.resolution_log2 with tf.name_scope('Dataset'): self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var') self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
Example #12
Source File: run_attacks_and_defenses.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _load_dataset_clipping(self, dataset_dir, epsilon): """Helper method which loads dataset and determines clipping range. Args: dataset_dir: location of the dataset. epsilon: maximum allowed size of adversarial perturbation. """ self.dataset_max_clip = {} self.dataset_min_clip = {} self._dataset_image_count = 0 for fname in os.listdir(dataset_dir): if not fname.endswith('.png'): continue image_id = fname[:-4] image = np.array( Image.open(os.path.join(dataset_dir, fname)).convert('RGB')) image = image.astype('int32') self._dataset_image_count += 1 self.dataset_max_clip[image_id] = np.clip(image + epsilon, 0, 255).astype('uint8') self.dataset_min_clip[image_id] = np.clip(image - epsilon, 0, 255).astype('uint8')
Example #13
Source File: w2v_utils.py From deep-learning-note with MIT License | 6 votes |
def batch_gen(download_url, expected_byte, vocab_size, batch_size, skip_window, visual_fld): local_dest = 'data/w2v/text8.zip' utils.download_one_file(download_url, local_dest, expected_byte) words = read_data(local_dest) dictionary, _ = build_vocab(words, vocab_size, visual_fld) index_words = convert_words_to_index(words, dictionary) del words # to save memory single_gen = generate_sample(index_words, skip_window) while True: center_batch = np.zeros(batch_size, dtype=np.int32) target_batch = np.zeros([batch_size, 1]) for index in range(batch_size): center_batch[index], target_batch[index] = next(single_gen) yield center_batch, target_batch
Example #14
Source File: run.py From fullrmc with GNU Affero General Public License v3.0 | 6 votes |
def bonds_CH(ENGINE, rang=10, recur=10, refine=False, explore=True): groups = [] for idx in range(0,ENGINE.pdb.numberOfAtoms, 13): groups.append( np.array([idx+1 ,idx+2 ], dtype=np.int32) ) # C1-H11 groups.append( np.array([idx+1 ,idx+3 ], dtype=np.int32) ) # C1-H12 groups.append( np.array([idx+4 ,idx+5 ], dtype=np.int32) ) # C2-H21 groups.append( np.array([idx+4 ,idx+6 ], dtype=np.int32) ) # C2-H22 groups.append( np.array([idx+7 ,idx+8 ], dtype=np.int32) ) # C3-H31 groups.append( np.array([idx+7 ,idx+9 ], dtype=np.int32) ) # C3-H32 groups.append( np.array([idx+10,idx+11], dtype=np.int32) ) # C4-H41 groups.append( np.array([idx+10,idx+12], dtype=np.int32) ) # C4-H42 ENGINE.set_groups(groups) [g.set_move_generator(DistanceAgitationGenerator(amplitude=0.2,agitate=(True,True))) for g in ENGINE.groups] # set selector if refine or explore: gs = RecursiveGroupSelector(RandomSelector(ENGINE), recur=recur, refine=refine, explore=explore) ENGINE.set_group_selector(gs) # number of steps nsteps = recur*len(ENGINE.groups) for stepIdx in range(rang): LOGGER.info("Running 'bonds_CH' mode step %i"%(stepIdx)) ENGINE.run(numberOfSteps=nsteps, saveFrequency=nsteps) # ############ RUN H-C-H ANGLES ############ #
Example #15
Source File: run.py From fullrmc with GNU Affero General Public License v3.0 | 6 votes |
def angles_HCH(ENGINE, rang=5, recur=10, refine=False, explore=True): groups = [] for idx in range(0,ENGINE.pdb.numberOfAtoms, 13): groups.append( np.array([idx+1 ,idx+2, idx+3 ], dtype=np.int32) ) # H11-C1-H12 groups.append( np.array([idx+4 ,idx+5, idx+6 ], dtype=np.int32) ) # H21-C2-H22 groups.append( np.array([idx+7 ,idx+8, idx+9 ], dtype=np.int32) ) # H31-C3-H32 groups.append( np.array([idx+10,idx+11,idx+12], dtype=np.int32) ) # H41-C4-H42 ENGINE.set_groups(groups) [g.set_move_generator(AngleAgitationGenerator(amplitude=5)) for g in ENGINE.groups] # set selector if refine or explore: gs = RecursiveGroupSelector(RandomSelector(ENGINE), recur=recur, refine=refine, explore=explore) ENGINE.set_group_selector(gs) # number of steps nsteps = recur*len(ENGINE.groups) for stepIdx in range(rang): LOGGER.info("Running 'angles_HCH' mode step %i"%(stepIdx)) ENGINE.run(numberOfSteps=nsteps, saveFrequency=nsteps) # ############ RUN ATOMS ############ #
Example #16
Source File: data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def tokenize(self, path): """Tokenizes a text file.""" assert os.path.exists(path) # Add words to the dictionary with open(path, 'r') as f: tokens = 0 for line in f: words = line.split() + ['<eos>'] tokens += len(words) for word in words: self.dictionary.add_word(word) # Tokenize file content with open(path, 'r') as f: ids = np.zeros((tokens,), dtype='int32') token = 0 for line in f: words = line.split() + ['<eos>'] for word in words: ids[token] = self.dictionary.word2idx[word] token += 1 return mx.nd.array(ids, dtype='int32')
Example #17
Source File: maskiou_head.py From mmdetection with Apache License 2.0 | 6 votes |
def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): """Compute area ratio of the gt mask inside the proposal and the gt mask of the corresponding instance.""" num_pos = pos_proposals.size(0) if num_pos > 0: area_ratios = [] proposals_np = pos_proposals.cpu().numpy() pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() # compute mask areas of gt instances (batch processing for speedup) gt_instance_mask_area = gt_masks.areas for i in range(num_pos): gt_mask = gt_masks[pos_assigned_gt_inds[i]] # crop the gt mask inside the proposal bbox = proposals_np[i, :].astype(np.int32) gt_mask_in_proposal = gt_mask.crop(bbox) ratio = gt_mask_in_proposal.areas[0] / ( gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) area_ratios.append(ratio) area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( pos_proposals.device) else: area_ratios = pos_proposals.new_zeros((0, )) return area_ratios
Example #18
Source File: utils.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def sample_mog(prob, mean, var, rng): """Sample from independent mixture of gaussian (MoG) distributions Each batch is an independent MoG distribution. Parameters ---------- prob : numpy.ndarray mixture probability of each gaussian. Shape --> (batch_num, center_num) mean : numpy.ndarray mean of each gaussian. Shape --> (batch_num, center_num, sample_dim) var : numpy.ndarray variance of each gaussian. Shape --> (batch_num, center_num, sample_dim) rng : numpy.random.RandomState Returns ------- ret : numpy.ndarray sampling result. Shape --> (batch_num, sample_dim) """ gaussian_inds = sample_categorical(prob, rng).astype(numpy.int32) mean = mean[numpy.arange(mean.shape[0]), gaussian_inds, :] var = var[numpy.arange(mean.shape[0]), gaussian_inds, :] ret = sample_normal(mean=mean, var=var, rng=rng) return ret
Example #19
Source File: ndarray.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def dtype(self): """Data-type of the array's elements. Returns ------- numpy.dtype This NDArray's data type. Examples -------- >>> x = mx.nd.zeros((2,3)) >>> x.dtype <type 'numpy.float32'> >>> y = mx.nd.zeros((2,3), dtype='int32') >>> y.dtype <type 'numpy.int32'> """ mx_dtype = ctypes.c_int() check_call(_LIB.MXNDArrayGetDType( self.handle, ctypes.byref(mx_dtype))) return _DTYPE_MX_TO_NP[mx_dtype.value]
Example #20
Source File: ndarray.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def asscalar(self): """Returns a scalar whose value is copied from this array. This function is equivalent to ``self.asnumpy()[0]``. This NDArray must have shape (1,). Examples -------- >>> x = mx.nd.ones((1,), dtype='int32') >>> x.asscalar() 1 >>> type(x.asscalar()) <type 'numpy.int32'> """ if self.shape != (1,): raise ValueError("The current array is not a scalar") return self.asnumpy()[0]
Example #21
Source File: test_imagenet_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def load_images(input_dir, metadata_file_path, batch_shape): """Retrieve numpy arrays of images and labels, read from a directory.""" num_images = batch_shape[0] with open(metadata_file_path) as input_file: reader = csv.reader(input_file) header_row = next(reader) rows = list(reader) row_idx_image_id = header_row.index('ImageId') row_idx_true_label = header_row.index('TrueLabel') images = np.zeros(batch_shape) labels = np.zeros(num_images, dtype=np.int32) for idx in xrange(num_images): row = rows[idx] filepath = os.path.join(input_dir, row[row_idx_image_id] + '.png') with tf.gfile.Open(filepath, 'rb') as f: image = np.array( Image.open(f).convert('RGB')).astype(np.float) / 255.0 images[idx, :, :, :] = image labels[idx] = int(row[row_idx_true_label]) return images, labels
Example #22
Source File: utils.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def get_int_list(values, expected_len=None): return get_numeric_list(values, numpy.int32, expected_len)
Example #23
Source File: util.py From DeepLab_v3_plus with MIT License | 5 votes |
def decode_segmap(label_mask, dataset='pascal', plot=False): """ 将 mask 解码成class-wise rgb图 返回 opencv 格式的 h, w, 3, 记住是np.int32 Decode segmentation class labels into a color image Args: label_mask (np.ndarray): an (M,N) array of integer values denoting the class label at each spatial location. plot (bool, optional): whether to show the resulting color image in a figure. Returns: (np.ndarray, optional): the resulting decoded color image. """ if dataset == 'pascal': n_classes = 21 label_colours = get_pascal_labels() elif dataset == 'cityscapes': n_classes = 19 label_colours = get_cityscapes_labels() else: raise NotImplementedError r = label_mask.copy() g = label_mask.copy() b = label_mask.copy() for ll in range(0, n_classes): r[label_mask == ll] = label_colours[ll, 0] g[label_mask == ll] = label_colours[ll, 1] b[label_mask == ll] = label_colours[ll, 2] rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3)) rgb[:, :, 0] = r rgb[:, :, 1] = g rgb[:, :, 2] = b #rgb = rgb.transpose((2,0,1)) if plot: plt.imshow(rgb) plt.show() else: return rgb.astype(np.int32)
Example #24
Source File: images.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def parse_type(self, hdat, dataobj=None): dtype = super(MGHImageType, self).parse_type(hdat, dataobj=dataobj) if np.issubdtype(dtype, np.floating): dtype = np.float32 elif np.issubdtype(dtype, np.int8): dtype = np.int8 elif np.issubdtype(dtype, np.int16): dtype = np.int16 elif np.issubdtype(dtype, np.integer): dtype = np.int32 else: raise ValueError('Could not deduce appropriate MGH type for dtype %s' % dtype) return dtype
Example #25
Source File: 18_basic_tfrecord.py From deep-learning-note with MIT License | 5 votes |
def read_from_tfrecord(filenames): tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue') reader = tf.TFRecordReader() _, tfrecord_serialized = reader.read(tfrecord_file_queue) tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={ 'label': tf.FixedLenFeature([],tf.int64), 'shape': tf.FixedLenFeature([],tf.string), 'image': tf.FixedLenFeature([],tf.string), }, name='features') image = tf.decode_raw(tfrecord_features['image'], tf.uint8) shape = tf.decode_raw(tfrecord_features['shape'], tf.int32) image = tf.reshape(image, shape) label = tfrecord_features['label'] return label, shape, image
Example #26
Source File: model.py From Att-ChemdNER with Apache License 2.0 | 5 votes |
def modelScore(self,tag_ids,scores,s_len): #{{{ """ ATTENTATION THIS FUNCTION IS SYMBOL PROGRAMMING this function is to return the score of our model at a fixed sentence label @param: scores: the scores matrix ,the output of our model tag: a numpy array, which represent one sentence label sent_lens: a scalar number, the length of sentence. because our sentence label will be expand to max sentence length, so we will use this to get the original sentence label. @return: a scalar number ,the score; """ #{{{ n_tags=self.output_dim; transitions=self.transitions; #score from tags_scores real_path_score = scores[T.arange(s_len), tag_ids].sum() # Score from transitions b_id = theano.shared(value=np.array([n_tags], dtype=np.int32)) e_id = theano.shared(value=np.array([n_tags + 1], dtype=np.int32)) padded_tags_ids = T.concatenate([b_id, tag_ids, e_id], axis=0) real_path_score += transitions[ padded_tags_ids[T.arange(s_len + 1)], padded_tags_ids[T.arange(s_len + 1) + 1] ].sum() #to prevent T.exp(real_path_score) to be inf #return real_path_score; return real_path_score/s_len; #}}} #}}}
Example #27
Source File: data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def iter_next(self): data = self._iter.next() if data is None: return False self._next_data = mx.nd.array(data[0], dtype=np.int32) self._next_label = mx.nd.array(data[1]) self._next_mask = mx.nd.array(data[2]) return True
Example #28
Source File: data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def __init__(self, source, batch_size, bptt): super(CorpusIter, self).__init__() self.batch_size = batch_size self.provide_data = [('data', (bptt, batch_size), np.int32)] self.provide_label = [('label', (bptt, batch_size))] self._index = 0 self._bptt = bptt self._source = batchify(source, batch_size)
Example #29
Source File: ctc_metrics.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def _lcs(p, l): """ Calculates the Longest Common Subsequence between p and l (both list of int) and returns its length""" # Dynamic Programming Finding LCS if len(p) == 0: return 0 P = np.array(list(p)).reshape((1, len(p))) L = np.array(list(l)).reshape((len(l), 1)) M = np.int32(P == L) for i in range(M.shape[0]): for j in range(M.shape[1]): up = 0 if i == 0 else M[i-1, j] left = 0 if j == 0 else M[i, j-1] M[i, j] = max(up, left, M[i, j] if (i == 0 or j == 0) else M[i, j] + M[i-1, j-1]) return M.max()
Example #30
Source File: data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def __init__(self, data_file, vocab, batch_size, bptt): super(MultiSentenceIter, self).__init__() self.batch_size = batch_size self.bptt = bptt self.provide_data = [('data', (batch_size, bptt), np.int32), ('mask', (batch_size, bptt))] self.provide_label = [('label', (batch_size, bptt))] self.vocab = vocab self.data_file = data_file self._dataset = Dataset(self.vocab, data_file, shuffle=True) self._iter = self._dataset.iterate_once(batch_size, bptt)