Python numpy.transpose() Examples
The following are 30
code examples of numpy.transpose().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: NLP.py From Financial-NLP with Apache License 2.0 | 7 votes |
def similarity_label(self, words, normalization=True): """ you can calculate more than one word at the same time. """ if self.model==None: raise Exception('no model.') if isinstance(words, string_types): words=[words] vectors=np.transpose(self.model.wv.__getitem__(words)) if normalization: unit_vector=unitvec(vectors,ax=0) # 这样写比原来那样速度提升一倍 #unit_vector=np.zeros((len(vectors),len(words))) #for i in range(len(words)): # unit_vector[:,i]=matutils.unitvec(vectors[:,i]) dists=np.dot(self.Label_vec_u, unit_vector) else: dists=np.dot(self.Label_vec, vectors) return dists
Example #2
Source File: Collection.py From fullrmc with GNU Affero General Public License v3.0 | 6 votes |
def superpose_array(refArray, array, check=False): """ Superpose arrays by calculating the rotation matrix and the translations that minimize the root mean square deviation between and array of vectors and a reference array. :Parameters: #. refArray (numpy.ndarray): the NX3 reference array to superpose to. #. array (numpy.ndarray): the NX3 array to calculate the transformation of. #. check (boolean): whether to check arguments before generating points. :Returns: #. superposedArray (numpy.ndarray): the NX3 array to superposed array. """ rotationMatrix, _,_,_ = get_superposition_transformation(refArray=refArray, array=array, check=check) return np.dot( rotationMatrix, np.transpose(array).\ reshape(1,3,-1)).transpose().reshape(-1,3)
Example #3
Source File: ingest_stl10.py From ArtGAN with BSD 3-Clause "New" or "Revised" License | 6 votes |
def collectdata(self,): print 'Start Collect Data...' train_x_path = os.path.join(self.input_dir, 'unlabeled_X.bin') train_xf = open(train_x_path, 'rb') train_x = np.fromfile(train_xf, dtype=np.uint8) train_x = np.reshape(train_x, (-1, 3, 96, 96)) train_x = np.transpose(train_x, (0, 3, 2, 1)) idx = 0 for i in xrange(train_x.shape[0]): if not self.skipimg: transform_and_save(img_arr=train_x[i], output_filename=os.path.join(self.unlabeldir, str(idx) + '.jpg')) self.trainpairlist[os.path.join('images', 'unlabeled', str(idx) + '.jpg')] = 'labels/11.txt' idx += 1 print 'Finished Collect Data...'
Example #4
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg): """ TODO :param densities_pos: :param densities_neg: :param uncerts_pos: :param uncerts_neg: :return: """ values_neg = np.concatenate( (densities_neg.reshape((1, -1)), uncerts_neg.reshape((1, -1))), axis=0).transpose([1, 0]) values_pos = np.concatenate( (densities_pos.reshape((1, -1)), uncerts_pos.reshape((1, -1))), axis=0).transpose([1, 0]) values = np.concatenate((values_neg, values_pos)) labels = np.concatenate( (np.zeros_like(densities_neg), np.ones_like(densities_pos))) lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels) return values, labels, lr
Example #5
Source File: np_box_ops.py From object_detector_app with MIT License | 6 votes |
def intersection(boxes1, boxes2): """Compute pairwise intersection areas between boxes. Args: boxes1: a numpy array with shape [N, 4] holding N boxes boxes2: a numpy array with shape [M, 4] holding M boxes Returns: a numpy array with shape [N*M] representing pairwise intersection area """ [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1) [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1) all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2)) all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2)) intersect_heights = np.maximum( np.zeros(all_pairs_max_ymin.shape), all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2)) all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2)) intersect_widths = np.maximum( np.zeros(all_pairs_max_xmin.shape), all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths
Example #6
Source File: prepare.py From DeepLung with GNU General Public License v3.0 | 6 votes |
def resample(imgs, spacing, new_spacing,order=2): if len(imgs.shape)==3: new_shape = np.round(imgs.shape * spacing / new_spacing) true_spacing = spacing * imgs.shape / new_shape resize_factor = new_shape / imgs.shape imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order) return imgs, true_spacing elif len(imgs.shape)==4: n = imgs.shape[-1] newimg = [] for i in range(n): slice = imgs[:,:,:,i] newslice,true_spacing = resample(slice,spacing,new_spacing) newimg.append(newslice) newimg=np.transpose(np.array(newimg),[1,2,3,0]) return newimg,true_spacing else: raise ValueError('wrong shape')
Example #7
Source File: utils.py From integrated-gradient-pytorch with MIT License | 6 votes |
def pre_processing(obs, cuda): mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3]) std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3]) obs = obs / 255 obs = (obs - mean) / std obs = np.transpose(obs, (2, 0, 1)) obs = np.expand_dims(obs, 0) obs = np.array(obs) if cuda: torch_device = torch.device('cuda:0') else: torch_device = torch.device('cpu') obs_tensor = torch.tensor(obs, dtype=torch.float32, device=torch_device, requires_grad=True) return obs_tensor # generate the entire images
Example #8
Source File: nn.py From deep-learning-note with MIT License | 6 votes |
def train(self, inputs_list, targets_list): inputs = np.array(inputs_list, ndmin=2).T targets = np.array(targets_list, ndmin=2).T hidden_inputs = np.dot(self.wih, inputs) hidden_outputs = self.activation_function(hidden_inputs) final_inputs = np.dot(self.who, hidden_outputs) final_outputs = self.activation_function(final_inputs) output_errors = targets - final_outputs hidden_errors = np.dot(self.who.T, output_errors) self.who += self.lr * np.dot((output_errors * final_outputs * (1.0 - final_outputs)), np.transpose(hidden_outputs)) self.wih += self.lr * np.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), np.transpose(inputs)) pass # query
Example #9
Source File: np_box_ops.py From DOTA_models with Apache License 2.0 | 6 votes |
def intersection(boxes1, boxes2): """Compute pairwise intersection areas between boxes. Args: boxes1: a numpy array with shape [N, 4] holding N boxes boxes2: a numpy array with shape [M, 4] holding M boxes Returns: a numpy array with shape [N*M] representing pairwise intersection area """ [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1) [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1) all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2)) all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2)) intersect_heights = np.maximum( np.zeros(all_pairs_max_ymin.shape), all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2)) all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2)) intersect_widths = np.maximum( np.zeros(all_pairs_max_xmin.shape), all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths
Example #10
Source File: seq2seq_attention_model.py From DOTA_models with Apache License 2.0 | 6 votes |
def decode_topk(self, sess, latest_tokens, enc_top_states, dec_init_states): """Return the topK results and new decoder states.""" feed = { self._enc_top_states: enc_top_states, self._dec_in_state: np.squeeze(np.array(dec_init_states)), self._abstracts: np.transpose(np.array([latest_tokens])), self._abstract_lens: np.ones([len(dec_init_states)], np.int32)} results = sess.run( [self._topk_ids, self._topk_log_probs, self._dec_out_state], feed_dict=feed) ids, probs, states = results[0], results[1], results[2] new_states = [s for s in states] return ids, probs, new_states
Example #11
Source File: script_preprocess_annoations_S3DIS.py From DOTA_models with Apache License 2.0 | 6 votes |
def _write_map_files(b_in, b_out, transform): cats = get_categories() env = utils.Foo(padding=10, resolution=5, num_point_threshold=2, valid_min=-10, valid_max=200, n_samples_per_face=200) robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120, camera_elevation_degree=-15) building_loader = factory.get_dataset('sbpd') for flip in [False, True]: b = nav_env.Building(b_out, robot, env, flip=flip, building_loader=building_loader) logging.info("building_in: %s, building_out: %s, transform: %d", b_in, b_out, transform) maps = _get_semantic_maps(b_in, transform, b.map, flip, cats) maps = np.transpose(np.array(maps), axes=[1,2,0]) # Load file from the cache. file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl' file_name = file_name.format(b.building_name, b.map.size[0], b.map.size[1], b.map.origin[0], b.map.origin[1], b.map.resolution, flip) out_file = os.path.join(DATA_DIR, 'processing', 'class-maps', file_name) logging.info('Writing semantic maps to %s.', out_file) save_variables(out_file, [maps, cats], ['maps', 'cats'], overwrite=True)
Example #12
Source File: dot.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def measure_cost(repeat, scipy_trans_lhs, scipy_dns_lhs, func_name, *args, **kwargs): """Measure time cost of running a function """ mx.nd.waitall() args_list = [] for arg in args: args_list.append(arg) start = time.time() if scipy_trans_lhs: args_list[0] = np.transpose(args_list[0]) if scipy_dns_lhs else sp.spmatrix.transpose(args_list[0]) for _ in range(repeat): func_name(*args_list, **kwargs) mx.nd.waitall() end = time.time() diff = end - start return diff / repeat
Example #13
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def jacobian(self, p, into=None): # transpose to be 3 x 2 x n p = np.transpose(np.reshape(p, (-1, 3, 2)), (1,2,0)) # First, get the two legs... (dx_ab, dy_ab) = p[1] - p[0] (dx_ac, dy_ac) = p[2] - p[0] (dx_bc, dy_bc) = p[2] - p[1] # now, the area is half the z-value of the cross-product... sarea0 = 0.5 * (dx_ab*dy_ac - dx_ac*dy_ab) # but we want to abs it dsarea0 = np.sign(sarea0) z = np.transpose([[-dy_bc,dx_bc], [dy_ac,-dx_ac], [-dy_ab,dx_ab]], (2,0,1)) z = times(0.5*dsarea0, z) m = numel(p) n = p.shape[2] ii = (np.arange(n) * np.ones([6, n])).T.flatten() z = sps.csr_matrix((z.flatten(), (ii, np.arange(len(ii)))), shape=(n, m)) return safe_into(into, z)
Example #14
Source File: retinotopy.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def from_logeccen(logecc, vmin=0, vmax=90, offset=0.75): ''' from_logeccen(logecc) yields a rescaled linear-space version of the log-eccentricity value (or values) logecc. from_logeccen(logxy_matrix) rescales all the (x,y) points in the given matrix to have linearly-spaced eccentricity values. from_logeccen is the inverse of to_logeccen. ''' if pimms.is_matrix(logecc): xy = np.asarray(logecc) trq = xy.shape[0] != 2 xy = np.transpose(xy) if trq else np.asarray(xy) r = np.sqrt(np.sum(xy**2, axis=0)) esc = from_logeccen(r, vmin=vmin, vmax=vmax, offset=offset) ecc = zinv(r) xy = xy * [ecc,ecc] * [esc,esc] return xy.T if trq else xy else: logecc = np.asarray(logecc) (vmin,vmax,offset) = [np.asarray(u) for u in (vmin,vmax,offset)] (vmin, vmax) = [np.log(u + offset) for u in (vmin, vmax)] logecc = logecc*(vmax - vmin) + vmin return np.exp(logecc) - offset
Example #15
Source File: convert_weights.py From Tensorflow-YOLOv3 with MIT License | 6 votes |
def load_batch_norm(idx, variables, weights, assign_ops, offset): """Loads kernel, gamma, beta, mean, variance for Batch Normalization""" kernel = variables[idx] gamma, beta, mean, variance = variables[idx + 1:idx + 5] batch_norm_vars = [beta, gamma, mean, variance] for var in batch_norm_vars: shape = var.shape.as_list() num_params = np.prod(shape) var_weights = weights[offset:offset + num_params].reshape(shape) offset += num_params assign_ops.append(tf.assign(var, var_weights)) shape = kernel.shape.as_list() num_params = np.prod(shape) var_weights = weights[offset:offset + num_params].reshape((shape[3], shape[2], shape[0], shape[1])) var_weights = np.transpose(var_weights, (2, 3, 1, 0)) offset += num_params assign_ops.append(tf.assign(kernel, var_weights)) return assign_ops, offset
Example #16
Source File: cmag.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def __call__(self, x, y=None): if y is not None: x = (x,y) x = np.asarray(x) if len(x.shape) == 1: return self([x])[0] x = np.transpose(x) if x.shape[0] == 2 else x if not x.flags['WRITEABLE']: x = np.array(x) crd = self.coordinates sig = self.sigma wts = self._weight res = np.zeros(x.shape[0]) for (sh, qd, bi) in zip(self.spatial_hashes, self.bin_query_distances, self.sigma_bins): neis = sh.query_ball_point(x, qd) res += [ np.sum(w * np.exp(-0.5 * d2/s**2)) for (ni,pt) in zip(neis,x) for ii in [bi[ni]] for (w,s,d2) in [(wts[ii], sig[ii], np.sum((crd[ii] - pt)**2, axis=1))]] return res
Example #17
Source File: dataset.py From neural-combinatorial-optimization-rl-tensorflow with MIT License | 6 votes |
def visualize_sampling(self, permutations): max_length = len(permutations[0]) grid = np.zeros([max_length,max_length]) # initialize heatmap grid to 0 transposed_permutations = np.transpose(permutations) for t, cities_t in enumerate(transposed_permutations): # step t, cities chosen at step t city_indices, counts = np.unique(cities_t,return_counts=True,axis=0) for u,v in zip(city_indices, counts): grid[t][u]+=v # update grid with counts from the batch of permutations # plot heatmap fig = plt.figure() rcParams.update({'font.size': 22}) ax = fig.add_subplot(1,1,1) ax.set_aspect('equal') plt.imshow(grid, interpolation='nearest', cmap='gray') plt.colorbar() plt.title('Sampled permutations') plt.ylabel('Time t') plt.xlabel('City i') plt.show()
Example #18
Source File: mxnet_export_test.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def test_spacetodepth(): n, c, h, w = shape = (1, 1, 4, 6) input1 = np.random.rand(n, c, h, w).astype("float32") blocksize = 2 inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=shape)] outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 4, 2, 3))] nodes = [helper.make_node("SpaceToDepth", ["input1"], ["output"], block_size=blocksize)] graph = helper.make_graph(nodes, "spacetodepth_test", inputs, outputs) spacetodepth_model = helper.make_model(graph) bkd_rep = backend.prepare(spacetodepth_model) output = bkd_rep.run([input1]) tmp = np.reshape(input1, [n, c, h // blocksize, blocksize, w // blocksize, blocksize]) tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4]) numpy_op = np.reshape(tmp, [n, c * (blocksize**2), h // blocksize, w // blocksize]) npt.assert_almost_equal(output[0], numpy_op)
Example #19
Source File: synthetic_data_utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def nparray_and_transpose(data_a_b_c): """Convert the list of items in data to a numpy array, and transpose it Args: data: data_asbsc: a nested, nested list of length a, with sublist length b, with sublist length c. Returns: a numpy 3-tensor with dimensions a x c x b """ data_axbxc = np.array([datum_b_c for datum_b_c in data_a_b_c]) data_axcxb = np.transpose(data_axbxc, axes=[0,2,1]) return data_axcxb
Example #20
Source File: ptBEV.py From PolarSeg with BSD 3-Clause "New" or "Revised" License | 5 votes |
def nb_greedy_FPS(xyz,K): start_element = 0 sample_num = xyz.shape[0] sum_vec = np.zeros((sample_num,1),dtype = np.float32) xyz_sq = xyz**2 for j in range(sample_num): sum_vec[j,0] = np.sum(xyz_sq[j,:]) pairwise_distance = sum_vec + np.transpose(sum_vec) - 2*np.dot(xyz, np.transpose(xyz)) candidates_ind = np.zeros((sample_num,),dtype = np.bool_) candidates_ind[start_element] = True remain_ind = np.ones((sample_num,),dtype = np.bool_) remain_ind[start_element] = False all_ind = np.arange(sample_num) for i in range(1,K): if i == 1: min_remain_pt_dis = pairwise_distance[:,start_element] min_remain_pt_dis = min_remain_pt_dis[remain_ind] else: cur_dis = pairwise_distance[remain_ind,:] cur_dis = cur_dis[:,candidates_ind] min_remain_pt_dis = np.zeros((cur_dis.shape[0],),dtype = np.float32) for j in range(cur_dis.shape[0]): min_remain_pt_dis[j] = np.min(cur_dis[j,:]) next_ind_in_remain = np.argmax(min_remain_pt_dis) next_ind = all_ind[remain_ind][next_ind_in_remain] candidates_ind[next_ind] = True remain_ind[next_ind] = False return candidates_ind
Example #21
Source File: gen_synthetic_single.py From DOTA_models with Apache License 2.0 | 5 votes |
def GenerateSample(filename, code_shape, layer_depth): # {0, +1} binary codes. # No conversion since the output file is expected to store # codes using {0, +1} codes (and not {-1, +1}). code = synthetic_model.GenerateSingleCode(code_shape) code = np.round(code) # Reformat the code so as to be compatible with what is generated # by the image encoder. # The image encoder generates a tensor of size: # iteration_count x batch_size x height x width x iteration_depth. # Here: batch_size = 1 if code_shape[-1] % layer_depth != 0: raise ValueError('Number of layers is not an integer') height = code_shape[0] width = code_shape[1] code = code.reshape([1, height, width, -1, layer_depth]) code = np.transpose(code, [3, 0, 1, 2, 4]) int_codes = code.astype(np.int8) exported_codes = np.packbits(int_codes.reshape(-1)) output = io.BytesIO() np.savez_compressed(output, shape=int_codes.shape, codes=exported_codes) with tf.gfile.FastGFile(filename, 'wb') as code_file: code_file.write(output.getvalue())
Example #22
Source File: stt_utils.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def spectrogram_from_file(filename, step=10, window=20, max_freq=None, eps=1e-14, overwrite=False, save_feature_as_csvfile=False): """ Calculate the log of linear spectrogram from FFT energy Params: filename (str): Path to the audio file step (int): Step size in milliseconds between windows window (int): FFT window size in milliseconds max_freq (int): Only FFT bins corresponding to frequencies between [0, max_freq] are returned eps (float): Small value to ensure numerical stability (for ln(x)) """ csvfilename = filename.replace(".wav", ".csv") if (os.path.isfile(csvfilename) is False) or overwrite: with soundfile.SoundFile(filename) as sound_file: audio = sound_file.read(dtype='float32') sample_rate = sound_file.samplerate if audio.ndim >= 2: audio = np.mean(audio, 1) if max_freq is None: max_freq = sample_rate / 2 if max_freq > sample_rate / 2: raise ValueError("max_freq must not be greater than half of " " sample rate") if step > window: raise ValueError("step size must not be greater than window size") hop_length = int(0.001 * step * sample_rate) fft_length = int(0.001 * window * sample_rate) pxx, freqs = spectrogram( audio, fft_length=fft_length, sample_rate=sample_rate, hop_length=hop_length) ind = np.where(freqs <= max_freq)[0][-1] + 1 res = np.transpose(np.log(pxx[:ind, :] + eps)) if save_feature_as_csvfile: np.savetxt(csvfilename, res) return res else: return np.loadtxt(csvfilename)
Example #23
Source File: bucket_io.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def make_data_iter_plan(self): "make a random data iteration plan" # truncate each bucket into multiple of batch-size bucket_n_batches = [] for i in range(len(self.data)): bucket_n_batches.append(np.floor((self.data[i]) / self.batch_size)) self.data[i] = self.data[i][:int(bucket_n_batches[i]*self.batch_size)] bucket_plan = np.hstack([np.zeros(n, int)+i for i, n in enumerate(bucket_n_batches)]) np.random.shuffle(bucket_plan) bucket_idx_all = [np.random.permutation(len(x)) for x in self.data] self.bucket_plan = bucket_plan self.bucket_idx_all = bucket_idx_all self.bucket_curr_idx = [0 for x in self.data] self.data_buffer = [] self.label_buffer = [] for i_bucket in range(len(self.data)): if not self.model_parallel: data = np.zeros((self.batch_size, self.buckets[i_bucket])) label = np.zeros((self.batch_size, self.buckets[i_bucket])) self.data_buffer.append(data) self.label_buffer.append(label) else: data = np.zeros((self.buckets[i_bucket], self.batch_size)) self.data_buffer.append(data) if self.model_parallel: # Transpose data if model parallel for i in range(len(self.data)): bucket_data = self.data[i] self.data[i] = np.transpose(bucket_data)
Example #24
Source File: common_layers.py From fine-lm with MIT License | 5 votes |
def cumsum(x, axis=0, exclusive=False): """TPU hack for tf.cumsum. This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless the axis dimension is very large. Args: x: a Tensor axis: an integer exclusive: a boolean Returns: Tensor of the same shape as x. """ if not is_on_tpu(): return tf.cumsum(x, axis=axis, exclusive=exclusive) x_shape = shape_list(x) rank = len(x_shape) length = x_shape[axis] my_range = tf.range(length) comparator = tf.less if exclusive else tf.less_equal mask = tf.cast( comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)), x.dtype) ret = tf.tensordot(x, mask, axes=[[axis], [0]]) if axis != rank - 1: ret = tf.transpose( ret, list(range(axis)) + [rank - 1] + list(range(axis, rank - 1))) return ret
Example #25
Source File: PairDistributionConstraints.py From fullrmc with GNU Affero General Public License v3.0 | 5 votes |
def _codify__(self, engine, name='constraint', addDependencies=True): assert isinstance(name, basestring), LOGGER.error("name must be a string") assert re.match('[a-zA-Z_][a-zA-Z0-9_]*$', name) is not None, LOGGER.error("given name '%s' can't be used as a variable name"%name) klass = self.__class__.__name__ dependencies = ['import numpy as np','from fullrmc.Constraints import {klass}s'.format(klass=klass)] code = [] if addDependencies: code.extend(dependencies) x = list(self.experimentalData[:,0]) y = list(self.experimentalData[:,1]) code.append("x = {x}".format(x=x)) code.append("y = {y}".format(y=y)) code.append("d = np.transpose([x,y]).astype(np.float32)") dw = self.dataWeights if dw is not None: dw = list(dw) code.append("dw = {dw}".format(dw=dw)) sfp = self._shapeFuncParams if isinstance(sfp, np.ndarray): sfp = list(sfp) code.append("sfp = {sfp}".format(sfp=sfp)) wf = self.windowFunction if isinstance(wf, np.ndarray): code.append("wf = np.array({wf})".format(wf=list(wf))) else: code.append("wf = {wf}".format(wf=wf)) code.append("{name} = {klass}s.{klass}\ (experimentalData=d, dataWeights=dw, weighting='{weighting}', atomsWeight={atomsWeight}, \ scaleFactor={scaleFactor}, adjustScaleFactor={adjustScaleFactor}, \ shapeFuncParams=sfp, windowFunction=wf, limits={limits})".format(name=name, klass=klass, weighting=self.weighting, atomsWeight=self.atomsWeight, scaleFactor=self.scaleFactor, adjustScaleFactor=self.adjustScaleFactor, shapeFuncParams=sfp, limits=self.limits)) code.append("{engine}.add_constraints([{name}])".format(engine=engine, name=name)) # return return dependencies, '\n'.join(code)
Example #26
Source File: download.py From Generative-Latent-Optimization-Tensorflow with MIT License | 5 votes |
def download_svhn(download_path): data_dir = osp.join(download_path, 'svhn') import scipy.io as sio # svhn file loader def svhn_loader(url, path): cmd = ['curl', url, '-o', path] subprocess.call(cmd) m = sio.loadmat(path) return m['X'], m['y'] if check_file(data_dir): print('SVHN was downloaded.') return data_url = 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat' train_image, train_label = svhn_loader(data_url, osp.join(data_dir, 'train_32x32.mat')) data_url = 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat' test_image, test_label = svhn_loader(data_url, osp.join(data_dir, 'test_32x32.mat')) prepare_h5py(np.transpose(train_image, (3, 0, 1, 2)), np.transpose(test_image, (3, 0, 1, 2)), data_dir) cmd = ['rm', '-f', osp.join(data_dir, '*.mat')] subprocess.call(cmd)
Example #27
Source File: Collection.py From fullrmc with GNU Affero General Public License v3.0 | 5 votes |
def __init__(self, engine, weighting="atomicNumber", qmin=0.001, qmax=1, dq=0.005, rmin=0.00, rmax=100, dr=1): # get qmin assert is_number(qmin), LOGGER.error("qmin must be a number") qmin = FLOAT_TYPE(qmin) assert qmin>0, LOGGER.error("qmin '%s' must be bigger than 0"%qmin) # get qmin assert is_number(qmax), LOGGER.error("qmax must be a number") qmax = FLOAT_TYPE(qmax) assert qmax>qmin, LOGGER.error("qmax '%s' must be bigger than qmin '%s'"%(qmin,qmax)) # get dq assert is_number(dq), LOGGER.error("dq must be a number") dq = FLOAT_TYPE(dq) assert dq>0, LOGGER.error("dq '%s' must be bigger than 0"%dq) # import StructureFactorConstraint from fullrmc.Constraints.StructureFactorConstraints import StructureFactorConstraint # overload constraint class _ShapeFunctionStructureFactor(StructureFactorConstraint): """This overloading is needed to avoid the constraint being saved upon setting the different properties""" def _get_repository(self, *args, **kwargs): return None def _dump_to_repository(self, *args, **kwargs): return # create StructureFactorConstraint Q = np.arange(qmin, qmax, dq) D = np.transpose([Q, np.zeros(len(Q))]).astype(FLOAT_TYPE) #self._SFC = StructureFactorConstraint(rmin=rmin, rmax=rmax, dr=dr, experimentalData=D, weighting="atomicNumber") self._SFC = _ShapeFunctionStructureFactor(rmin=rmin, rmax=rmax, dr=dr, experimentalData=D, weighting="atomicNumber") self._SFC._set_engine(engine) self._SFC.listen(message="engine set") # set parameters self._rmin = FLOAT_TYPE(rmin) self._rmax = FLOAT_TYPE(rmax) self._dr = FLOAT_TYPE(dr) self._qmin = FLOAT_TYPE(qmin) self._qmax = FLOAT_TYPE(qmax) self._dq = FLOAT_TYPE(dq) self._weighting = weighting
Example #28
Source File: common_layers.py From fine-lm with MIT License | 5 votes |
def diagonal_conv_gru(x, kernel_size, filters, dropout=0.0, name=None, reuse=None): """Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727.""" # Let's make a shorthand for conv call first. def do_conv(args, name, bias_start): return conv( args, filters, kernel_size, padding="SAME", bias_initializer=tf.constant_initializer(bias_start), name=name) # Here comes the GRU gate. with tf.variable_scope( name, default_name="diagonal_conv_gru", values=[x], reuse=reuse): reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5)) gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7)) candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0)) if dropout > 0.0: candidate = tf.nn.dropout(candidate, 1.0 - dropout) # Diagonal shift. shift_filters = filters // 3 base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) + [[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters) shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32) shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3) x_shifted = tf.nn.depthwise_conv2d( x, shift_filter, [1, 1, 1, 1], padding="SAME") # Return the gated result and cost. total_cost_avg = 0.5 * (reset_cost + gate_cost) return gate * x_shifted + (1 - gate) * candidate, total_cost_avg
Example #29
Source File: common_layers.py From fine-lm with MIT License | 5 votes |
def running_global_pool_1d(inputs, pooling_type="MAX"): """Same global pool, but only for the elements up to the current element. Useful for outputs where the state of future elements is not known. Takes no mask as all elements up to the current element are assumed to exist. Currently only supports maximum. Equivalent to using a lower triangle bias. Args: inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. pooling_type: Pooling type to use. Currently only supports 'MAX'. Returns: A tensor of shape [batch_size, sequence_length, input_dims] containing the running 'totals'. """ del pooling_type with tf.name_scope("running_global_pool", values=[inputs]): scan_fct = tf.maximum # Permute inputs so seq_length is first. elems = tf.transpose(inputs, [1, 0, 2]) # Perform scan. cumulatives = tf.scan(scan_fct, elems, swap_memory=True) # Permute output to get back to original order. output = tf.transpose(cumulatives, [1, 0, 2]) return output
Example #30
Source File: VAE.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def decoder(model, z): params = model.arg_params decoder_n = np.shape(params['decoder_z_bias'].asnumpy())[0] decoder_z = np.dot(params['decoder_z_weight'].asnumpy(),np.transpose(z)) \ + np.reshape(params['decoder_z_bias'].asnumpy(),(decoder_n,1)) act_z = np.tanh(decoder_z) decoder_x = np.transpose(np.dot(params['decoder_x_weight'].asnumpy(),act_z)) + params['decoder_x_bias'].asnumpy() reconstructed_x = 1/(1+np.exp(-decoder_x)) return reconstructed_x