Python utils.flatten() Examples

The following are 12 code examples of utils.flatten(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils , or try the search function .
Example #1
Source File: model_v2.py    From FVTA_MemexQA with MIT License 6 votes vote down vote up
def softmax(logits, scope=None):
	with tf.name_scope(scope or "softmax"): # noted here is name_scope not variable
		flat_logits = flatten(logits, 1)
		flat_out = tf.nn.softmax(flat_logits)
		out = reconstruct(flat_out, logits, 1)
		return out

# softmax selection?
# return target * softmax(logits)
# target: [ ...,  J,  d]
# logits: [ ...,  J]

# so [N, M, dim] * [N, M] -> [N, dim],  so [N, M] is the attention for each M

# return: [ ...,  d] # so the target vector is attended with logits' softmax
# [N, M, JX, JQ, 2d] * [N, M, JX, JQ] (each context to query's mapping) -> [N, M, JX, 2d] # attened the JQ dimension 
Example #2
Source File: model_mcb.py    From FVTA_MemexQA with MIT License 6 votes vote down vote up
def softmax(logits,scope=None):
	with tf.name_scope(scope or "softmax"): # noted here is name_scope not variable
		flat_logits = flatten(logits,1)
		flat_out = tf.nn.softmax(flat_logits)
		out = reconstruct(flat_out,logits,1)
		return out

# softmax selection?
# return target * softmax(logits)
# target: [ ..., J, d]
# logits: [ ..., J]

# so [N,M,dim] * [N,M] -> [N,dim], so [N,M] is the attention for each M

# return: [ ..., d] # so the target vector is attended with logits' softmax
# [N,M,JX,JQ,2d] * [N,M,JX,JQ] (each context to query's mapping) -> [N,M,JX,2d] # attened the JQ dimension 
Example #3
Source File: model.py    From FVTA_MemexQA with MIT License 6 votes vote down vote up
def softmax(logits,scope=None):
	with tf.name_scope(scope or "softmax"): # noted here is name_scope not variable
		flat_logits = flatten(logits,1)
		flat_out = tf.nn.softmax(flat_logits)
		out = reconstruct(flat_out,logits,1)
		return out

# softmax selection
# return target * softmax(logits)
# target: [ ..., J, d]
# logits: [ ..., J]

# so [N,M,dim] * [N,M] -> [N,dim], so [N,M] is the attention for each M

# return: [ ..., d] # so the target vector is attended with logits' softmax
# [N,M,JX,JQ,2d] * [N,M,JX,JQ] (each context to query's mapping) -> [N,M,JX,2d] # attened the JQ dimension 
Example #4
Source File: intra_blob_a.py    From CogAlg with MIT License 6 votes vote down vote up
def scan_P__(P__):
    """Detect up_forks and down_forks per P."""

    for _P_, P_ in pairwise(P__):  # Iterate through pairs of lines.
        _iter_P_, iter_P_ = iter(_P_), iter(P_)  # Convert to iterators.
        try:
            _P, P = next(_iter_P_), next(iter_P_)  # First pair to check.
        except StopIteration:  # No more up_fork-down_fork pair.
            continue  # To next pair of _P_, P_.
        while True:
            isleft, olp = comp_edge(_P, P)  # Check for 4 different cases.
            if olp and _P['sign'] == P['sign']:
                _P['down_fork_'].append(P)
                P['up_fork_'].append(_P)
            try:  # Check for stopping:
                _P, P = (next(_iter_P_), P) if isleft else (_P, next(iter_P_))
            except StopIteration:  # No more up_fork - down_fork pair.
                break  # To next pair of _P_, P_.

    return [*flatten(P__)]  # Flatten P__ before return. 
Example #5
Source File: lut.py    From ice with GNU General Public License v3.0 5 votes vote down vote up
def combine_lut_dicts(*args):
	ks = utils.flatten([x.keys() for x in args])
	d = {}
	for k in set(ks):
		d[k] = utils.flatten([x[k] for x in args if k in x.keys()])
	return d 
Example #6
Source File: model.py    From amdim-public with MIT License 5 votes vote down vote up
def forward(self, ftr_1):
        '''
        Input:
          ftr_1 : features at 1x1 layer
        Output:
          lgt_glb_mlp: class logits from global features
          lgt_glb_lin: class logits from global features
        '''
        # collect features to feed into classifiers
        # - always detach() -- send no grad into encoder!
        h_top_cls = flatten(ftr_1).detach()
        # compute predictions
        lgt_glb_mlp = self.block_glb_mlp(h_top_cls)
        lgt_glb_lin = self.block_glb_lin(h_top_cls)
        return lgt_glb_mlp, lgt_glb_lin 
Example #7
Source File: vigenere.py    From CryptTools with MIT License 5 votes vote down vote up
def kasiki(text):
    if args.verbose:
        print("Finding sequence duplicates and spacings...")
    utils.args = args
    min_length = 2 if (args.exhaustive or len(clean_text) < TEST_2_MAX_TEXT_LENGTH) else 3
    seqSpacings = utils.find_sequence_duplicates(clean_text, min_length)
    if args.verbose:
        if args.all:
            print(seqSpacings)
        print("Extracting spacing divisors...")
    divisors = useful_divisors(flatten(list(seqSpacings.values())))
    divisorsCount = utils.repetitions(divisors)
    if args.exhaustive:
        return [x[0] for x in divisorsCount]
    return [x[0] for x in divisorsCount if x[0] <= KEY_LENGTH_THRESHOLD] 
Example #8
Source File: model_v2.py    From FVTA_MemexQA with MIT License 5 votes vote down vote up
def linear(x, output_size, scope, add_tanh=False, wd=None):
	with tf.variable_scope(scope):
		# since the input here is not two rank,  we flat the input while keeping the last dims
		keep = 1
		#print x.get_shape().as_list()
		flat_x = flatten(x, keep) # keeping the last one dim # [N, M, JX, JQ, 2d] => [N*M*JX*JQ, 2d]
		#print flat_x.get_shape() # (?,  200) # wd+cwd
		bias_start = 0.0
		if not (type(output_size) == type(1)): # need to be get_shape()[k].value
			output_size = output_size.value

		#print [flat_x.get_shape()[-1], output_size]

		W = tf.get_variable("W", dtype="float", initializer=tf.truncated_normal([flat_x.get_shape()[-1].value, output_size], stddev=0.1))
		bias = tf.get_variable("b", dtype="float", initializer=tf.constant(bias_start, shape=[output_size]))
		flat_out = tf.matmul(flat_x, W)+bias

		if add_tanh:
			flat_out = tf.tanh(flat_out, name="tanh")


		if wd is not None:
			add_wd(wd)

		out = reconstruct(flat_out, x, keep)
		return out 
Example #9
Source File: model_mcb.py    From FVTA_MemexQA with MIT License 5 votes vote down vote up
def linear(x,output_size,scope,add_tanh=False,wd=None):
	with tf.variable_scope(scope):
		# since the input here is not two rank, we flat the input while keeping the last dims
		keep = 1
		#print x.get_shape().as_list()
		flat_x = flatten(x,keep) # keeping the last one dim # [N,M,JX,JQ,2d] => [N*M*JX*JQ,2d]
		#print flat_x.get_shape() # (?, 200) # wd+cwd
		bias_start = 0.0
		if not (type(output_size) == type(1)): # need to be get_shape()[k].value
			output_size = output_size.value

		#print [flat_x.get_shape()[-1],output_size]

		W = tf.get_variable("W",dtype="float",initializer=tf.truncated_normal([flat_x.get_shape()[-1].value,output_size],stddev=0.1))
		bias = tf.get_variable("b",dtype="float",initializer=tf.constant(bias_start,shape=[output_size]))
		flat_out = tf.matmul(flat_x,W)+bias


		if add_tanh:
			flat_out = tf.tanh(flat_out,name="tanh")

		if wd is not None:
			add_wd(wd)

		out = reconstruct(flat_out,x,keep)
		return out 
Example #10
Source File: model_dmnplus.py    From FVTA_MemexQA with MIT License 5 votes vote down vote up
def softmax(logits,scope=None):
	with tf.name_scope(scope or "softmax"): # noted here is name_scope not variable
		flat_logits = flatten(logits,1)
		flat_out = tf.nn.softmax(flat_logits)
		out = reconstruct(flat_out,logits,1)
		return out



# add current scope's variable's l2 loss to loss collection 
Example #11
Source File: model_dmnplus.py    From FVTA_MemexQA with MIT License 5 votes vote down vote up
def linear(x,output_size,scope,add_tanh=False,wd=None):
	with tf.variable_scope(scope):
		# since the input here is not two rank, we flat the input while keeping the last dims
		keep = 1
		#print x.get_shape().as_list()
		flat_x = flatten(x,keep) # keeping the last one dim # [N,M,JX,JQ,2d] => [N*M*JX*JQ,2d]
		#print flat_x.get_shape() # (?, 200) # wd+cwd
		bias_start = 0.0
		if not (type(output_size) == type(1)): # need to be get_shape()[k].value
			output_size = output_size.value

		#print [flat_x.get_shape()[-1],output_size]

		W = tf.get_variable("W",dtype="float",initializer=tf.truncated_normal([flat_x.get_shape()[-1].value,output_size],stddev=0.1))
		bias = tf.get_variable("b",dtype="float",initializer=tf.constant(bias_start,shape=[output_size]))
		flat_out = tf.matmul(flat_x,W)+bias

		if add_tanh:
			flat_out = tf.tanh(flat_out,name="tanh")


		if wd is not None:
			add_wd(wd)

		out = reconstruct(flat_out,x,keep)
		return out

# from https://github.com/barronalex/Dynamic-Memory-Networks-in-TensorFlow 
Example #12
Source File: model.py    From amdim-public with MIT License 4 votes vote down vote up
def forward(self, x1, x2, class_only=False):
        '''
        Input:
          x1 : images from which to extract features -- x1 ~ A(x)
          x2 : images from which to extract features -- x2 ~ A(x)
          class_only : whether we want all outputs for infomax training
        Output:
          res_dict : various outputs depending on the task
        '''
        # dict for returning various values
        res_dict = {}
        if class_only:
            # shortcut to encode one image and evaluate classifier
            rkhs_1, _, _ = self.encode(x1, no_grad=True)
            lgt_glb_mlp, lgt_glb_lin = self.evaluator(rkhs_1)
            res_dict['class'] = [lgt_glb_mlp, lgt_glb_lin]
            res_dict['rkhs_glb'] = flatten(rkhs_1)
            return res_dict

        # hack for redistributing workload in highly multi-gpu setting
        # -- yeah, "highly-multi-gpu" is obviously subjective...
        if has_many_gpus():
            n_batch = x1.size(0)
            n_gpus = torch.cuda.device_count()
            assert (n_batch % (n_gpus - 1) == 0), 'n_batch: {}'.format(n_batch)
            # expand input with dummy chunks so cuda:0 can skip compute
            chunk_size = n_batch // (n_gpus - 1)
            dummy_chunk = torch.zeros_like(x1[:chunk_size])
            x1 = torch.cat([dummy_chunk, x1], dim=0)
            x2 = torch.cat([dummy_chunk, x2], dim=0)

        # run augmented image pairs through the encoder
        r1_x1, r5_x1, r7_x1 = self.encoder(x1)
        r1_x2, r5_x2, r7_x2 = self.encoder(x2)

        # hack for redistributing workload in highly-multi-gpu setting
        if has_many_gpus():
            # strip off dummy vals returned by cuda:0
            r1_x1, r5_x1, r7_x1 = r1_x1[1:], r5_x1[1:], r7_x1[1:]
            r1_x2, r5_x2, r7_x2 = r1_x2[1:], r5_x2[1:], r7_x2[1:]

        # compute NCE infomax objective at multiple scales
        loss_1t5, loss_1t7, loss_5t5, lgt_reg = \
            self.g2l_loss(r1_x1, r5_x1, r7_x1, r1_x2, r5_x2, r7_x2)
        res_dict['g2l_1t5'] = loss_1t5
        res_dict['g2l_1t7'] = loss_1t7
        res_dict['g2l_5t5'] = loss_5t5
        res_dict['lgt_reg'] = lgt_reg
        # grab global features for use elsewhere
        res_dict['rkhs_glb'] = flatten(r1_x1)

        # compute classifier logits for online eval during infomax training
        # - we do this for both images in each augmented pair...
        lgt_glb_mlp, lgt_glb_lin = self.evaluator(ftr_1=torch.cat([r1_x1, r1_x2]))
        res_dict['class'] = [lgt_glb_mlp, lgt_glb_lin]
        return res_dict


##############################
# Layers for use in model... #
##############################