Python tensorflow.python.keras.layers.Reshape() Examples
The following are 2
code examples of tensorflow.python.keras.layers.Reshape().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.keras.layers
, or try the search function
.
Example #1
Source File: gcn.py From GraphNeuralNetwork with MIT License | 6 votes |
def GCN(adj_dim,feature_dim,n_hidden, num_class, num_layers=2,activation=tf.nn.relu,dropout_rate=0.5, l2_reg=0, feature_less=True, ): Adj = Input(shape=(None,), sparse=True) if feature_less: X_in = Input(shape=(1,), ) emb = Embedding(adj_dim, feature_dim, embeddings_initializer=Identity(1.0), trainable=False) X_emb = emb(X_in) h = Reshape([X_emb.shape[-1]])(X_emb) else: X_in = Input(shape=(feature_dim,), ) h = X_in for i in range(num_layers): if i == num_layers - 1: activation = tf.nn.softmax n_hidden = num_class h = GraphConvolution(n_hidden, activation=activation, dropout_rate=dropout_rate, l2_reg=l2_reg)([h,Adj]) output = h model = Model(inputs=[X_in,Adj], outputs=output) return model
Example #2
Source File: RTSNNet.py From alpha-zero-general with MIT License | 5 votes |
def __init__(self, game, encoder): """ NNet model, copied from Othello NNet, with reduced fully connected layers fc1 and fc2 and reduced nnet_args.num_channels :param game: game configuration :param encoder: Encoder, used to encode game boards """ from rts.src.config_class import CONFIG # game params self.board_x, self.board_y, num_encoders = game.getBoardSize() self.action_size = game.getActionSize() """ num_encoders = CONFIG.nnet_args.encoder.num_encoders """ num_encoders = encoder.num_encoders # Neural Net self.input_boards = Input(shape=(self.board_x, self.board_y, num_encoders)) # s: batch_size x board_x x board_y x num_encoders x_image = Reshape((self.board_x, self.board_y, num_encoders))(self.input_boards) # batch_size x board_x x board_y x num_encoders h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(x_image))) # batch_size x board_x x board_y x num_channels h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(h_conv1))) # batch_size x board_x x board_y x num_channels h_conv3 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv2))) # batch_size x (board_x-2) x (board_y-2) x num_channels h_conv4 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv3))) # batch_size x (board_x-4) x (board_y-4) x num_channels h_conv4_flat = Flatten()(h_conv4) s_fc1 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(256, use_bias=False)(h_conv4_flat)))) # batch_size x 1024 s_fc2 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(128, use_bias=False)(s_fc1)))) # batch_size x 1024 self.pi = Dense(self.action_size, activation='softmax', name='pi')(s_fc2) # batch_size x self.action_size self.v = Dense(1, activation='tanh', name='v')(s_fc2) # batch_size x 1 self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v]) self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(CONFIG.nnet_args.lr))