Python sonnet.Embed() Examples

The following are 6 code examples of sonnet.Embed(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sonnet , or try the search function .
Example #1
Source File: gmf.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 6 votes vote down vote up
def _construct_weights(self):
        """
        Constructs the user/item memories and user/item external memory/outputs

        Also add the embedding lookups
        """
        self.user_memory = snt.Embed(self.config.user_count, self.config.embed_size,
                                     initializers=self._embedding_initializers,
                                     regularizers=self._embedding_regularizers,
                                     name='MemoryEmbed')

        self.item_memory = snt.Embed(self.config.item_count,
                                     self.config.embed_size,
                                     initializers=self._embedding_initializers,
                                     regularizers=self._embedding_regularizers,
                                     name="ItemMemory")

        # [batch, embedding size]
        self._cur_user = self.user_memory(self.input_users)

        # Item memories a query
        self._cur_item = self.item_memory(self.input_items)
        self._cur_item_negative = self.item_memory(self.input_items_negative) 
Example #2
Source File: experiment.py    From scalable_agent with Apache License 2.0 6 votes vote down vote up
def _instruction(self, instruction):
    # Split string.
    splitted = tf.string_split(instruction)
    dense = tf.sparse_tensor_to_dense(splitted, default_value='')
    length = tf.reduce_sum(tf.to_int32(tf.not_equal(dense, '')), axis=1)

    # To int64 hash buckets. Small risk of having collisions. Alternatively, a
    # vocabulary can be used.
    num_hash_buckets = 1000
    buckets = tf.string_to_hash_bucket_fast(dense, num_hash_buckets)

    # Embed the instruction. Embedding size 20 seems to be enough.
    embedding_size = 20
    embedding = snt.Embed(num_hash_buckets, embedding_size)(buckets)

    # Pad to make sure there is at least one output.
    padding = tf.to_int32(tf.equal(tf.shape(embedding)[1], 0))
    embedding = tf.pad(embedding, [[0, 0], [0, padding], [0, 0]])

    core = tf.contrib.rnn.LSTMBlockCell(64, name='language_lstm')
    output, _ = tf.nn.dynamic_rnn(core, embedding, length, dtype=tf.float32)

    # Return last output.
    return tf.reverse_sequence(output, length, seq_axis=1)[:, 0] 
Example #3
Source File: model.py    From vae-seq with Apache License 2.0 5 votes vote down vote up
def _make_encoder(self):
        """Constructs an encoding for a single character ID."""
        embed = snt.Embed(
            vocab_size=self.hparams.vocab_size + self.hparams.oov_buckets,
            embed_dim=self.hparams.embed_size)
        mlp = codec_mod.MLPObsEncoder(self.hparams)
        return codec_mod.EncoderSequence([embed, mlp], name="obs_encoder") 
Example #4
Source File: attribute.py    From kglib with Apache License 2.0 5 votes vote down vote up
def _build(self, attribute_value):
        int_attribute_value = tf.cast(attribute_value, dtype=tf.int32)
        tf.summary.histogram('cat_attribute_value_histogram', int_attribute_value)
        embedding = snt.Embed(self._num_categories, self._attr_embedding_dim)(int_attribute_value)
        tf.summary.histogram('cat_embedding_histogram', embedding)
        return tf.squeeze(embedding, axis=1) 
Example #5
Source File: embedding.py    From kglib with Apache License 2.0 5 votes vote down vote up
def embed_type(features, num_types, type_embedding_dim):
    preexistance_feat = tf.expand_dims(tf.cast(features[:, 0], dtype=tf.float32), axis=1)
    type_embedder = snt.Embed(num_types, type_embedding_dim)
    norm = snt.LayerNorm()
    type_embedding = norm(type_embedder(tf.cast(features[:, 1], tf.int32)))
    tf.summary.histogram('type_embedding_histogram', type_embedding)
    return tf.concat([preexistance_feat, type_embedding], axis=1) 
Example #6
Source File: cmn.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 4 votes vote down vote up
def _construct_weights(self):
        """
        Constructs the user/item memories and user/item external memory/outputs

        Also add the embedding lookups
        """
        self.user_memory = snt.Embed(self.config.user_count, self.config.embed_size,
                                     initializers=self._embedding_initializers,
                                     name='MemoryEmbed')

        self.user_output = snt.Embed(self.config.user_count, self.config.embed_size,
                                     initializers=self._embedding_initializers,
                                     name='MemoryOutput')

        self.item_memory = snt.Embed(self.config.item_count,
                                     self.config.embed_size,
                                     initializers=self._embedding_initializers,
                                     name="ItemMemory")
        self._mem_layer = VariableLengthMemoryLayer(self.config.hops,
                                                    self.config.embed_size,
                                                    tf.nn.relu,
                                                    initializers=self._hops_init,
                                                    regularizers=self._regularizers,
                                                    name='UserMemoryLayer')

        self._output_module = snt.Sequential([
            DenseLayer(self.config.embed_size, True, tf.nn.relu,
                       initializers=self._initializers,
                       regularizers=self._regularizers,
                       name='Layer'),
            snt.Linear(1, False,
                       initializers=self._output_initializers,
                       regularizers=self._regularizers,
                       name='OutputVector'),
            tf.squeeze])

        # [batch, embedding size]
        self._cur_user = self.user_memory(self.input_users)
        self._cur_user_output = self.user_output(self.input_users)

        # Item memories a query
        self._cur_item = self.item_memory(self.input_items)
        self._cur_item_negative = self.item_memory(self.input_items_negative)

        # Share Embeddings
        self._cur_item_output = self._cur_item
        self._cur_item_output_negative = self._cur_item_negative