Python utils.init() Examples
The following are 17
code examples of utils.init().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: distributions.py From gym-miniworld with Apache License 2.0 | 5 votes |
def __init__(self, num_inputs, num_outputs): super(Categorical, self).__init__() init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), gain=0.01) self.linear = init_(nn.Linear(num_inputs, num_outputs))
Example #2
Source File: distributions.py From carla-rl with MIT License | 5 votes |
def __init__(self, num_inputs, num_outputs): super(DiagGaussian, self).__init__() # init_ = lambda m: init(m, # init_normc_, # lambda x: nn.init.constant_(x, 0)) # self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) self.fc_mean = nn.Linear(num_inputs, num_outputs) self.fc_mean.bias.data[0] = 0.5 # Throttle self.fc_mean.bias.data[1] = 0.0 # Brake self.fc_mean.bias.data[2] = 0.0 # Steer self.fc_mean.weight.data.fill_(0) self.logstd = AddBias(-2.3 * torch.ones(num_outputs))
Example #3
Source File: distributions.py From carla-rl with MIT License | 5 votes |
def __init__(self, num_inputs, num_outputs): super(Categorical, self).__init__() init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), gain=0.01) self.linear = init_(nn.Linear(num_inputs, num_outputs))
Example #4
Source File: model.py From carla-rl with MIT License | 5 votes |
def __init__(self, recurrent, recurrent_input_size, hidden_size): super(NNBase, self).__init__() self._hidden_size = hidden_size self._recurrent = recurrent if recurrent: self.gru = nn.GRU(recurrent_input_size, hidden_size) for name, param in self.gru.named_parameters(): if 'bias' in name: nn.init.constant_(param, 0) elif 'weight' in name: nn.init.orthogonal_(param)
Example #5
Source File: distributions.py From marl_transfer with MIT License | 5 votes |
def __init__(self, num_inputs, num_outputs): super(Categorical, self).__init__() init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), gain=0.01) self.linear = init_(nn.Linear(num_inputs, num_outputs))
Example #6
Source File: distributions.py From pytorch-pommerman-rl with MIT License | 5 votes |
def __init__(self, num_inputs, num_outputs): super(DiagGaussian, self).__init__() init_ = lambda m: init(m, init_normc_, lambda x: nn.init.constant_(x, 0)) self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) self.logstd = AddBias(torch.zeros(num_outputs))
Example #7
Source File: distributions.py From pytorch-pommerman-rl with MIT License | 5 votes |
def __init__(self, num_inputs, num_outputs): super(Categorical, self).__init__() init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), gain=0.01) self.linear = init_(nn.Linear(num_inputs, num_outputs))
Example #8
Source File: distributions.py From gym-miniworld with Apache License 2.0 | 5 votes |
def __init__(self, num_inputs, num_outputs): super(DiagGaussian, self).__init__() init_ = lambda m: init(m, init_normc_, lambda x: nn.init.constant_(x, 0)) self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) self.logstd = AddBias(torch.zeros(num_outputs))
Example #9
Source File: model.py From dal with MIT License | 5 votes |
def __init__(self, recurrent, recurrent_input_size, hidden_size): super(NNBase, self).__init__() self._hidden_size = hidden_size self._recurrent = recurrent if recurrent: self.gru = nn.GRU(recurrent_input_size, hidden_size) for name, param in self.gru.named_parameters(): if 'bias' in name: nn.init.constant_(param, 0) elif 'weight' in name: nn.init.orthogonal_(param)
Example #10
Source File: model.py From gym-miniworld with Apache License 2.0 | 5 votes |
def __init__(self, num_inputs, recurrent=False, hidden_size=64): super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size) if recurrent: num_inputs = hidden_size init_ = lambda m: init(m, init_normc_, lambda x: nn.init.constant_(x, 0)) self.actor = nn.Sequential( init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(), init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh() ) self.critic = nn.Sequential( init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(), init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh() ) self.critic_linear = init_(nn.Linear(hidden_size, 1)) self.train()
Example #11
Source File: model.py From gym-miniworld with Apache License 2.0 | 5 votes |
def __init__(self, num_inputs, recurrent=False, hidden_size=128): super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size) init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), nn.init.calculate_gain('relu')) # For 80x60 input self.main = nn.Sequential( init_(nn.Conv2d(num_inputs, 32, kernel_size=5, stride=2)), nn.BatchNorm2d(32), nn.ReLU(), init_(nn.Conv2d(32, 32, kernel_size=5, stride=2)), nn.BatchNorm2d(32), nn.ReLU(), init_(nn.Conv2d(32, 32, kernel_size=4, stride=2)), nn.BatchNorm2d(32), nn.ReLU(), #Print(), Flatten(), #nn.Dropout(0.2), init_(nn.Linear(32 * 7 * 5, hidden_size)), nn.ReLU() ) init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0)) self.critic_linear = init_(nn.Linear(hidden_size, 1)) self.train()
Example #12
Source File: model.py From gym-miniworld with Apache License 2.0 | 5 votes |
def __init__(self, recurrent, recurrent_input_size, hidden_size): super(NNBase, self).__init__() self._hidden_size = hidden_size self._recurrent = recurrent if recurrent: self.gru = nn.GRUCell(recurrent_input_size, hidden_size) nn.init.orthogonal_(self.gru.weight_ih.data) nn.init.orthogonal_(self.gru.weight_hh.data) self.gru.bias_ih.data.fill_(0) self.gru.bias_hh.data.fill_(0)
Example #13
Source File: distributions.py From dal with MIT License | 5 votes |
def __init__(self, num_inputs, num_outputs): super(DiagGaussian, self).__init__() init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0)) self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) self.logstd = AddBias(torch.zeros(num_outputs))
Example #14
Source File: distributions.py From dal with MIT License | 5 votes |
def __init__(self, num_inputs, num_outputs): super(Categorical, self).__init__() init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), gain=0.01) self.linear = init_(nn.Linear(num_inputs, num_outputs))
Example #15
Source File: model.py From dal with MIT License | 5 votes |
def __init__(self, num_inputs, recurrent=False, hidden_size=64): super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size) if recurrent: num_inputs = hidden_size init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), np.sqrt(2)) self.actor = nn.Sequential( init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(), init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh() ) self.critic = nn.Sequential( init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(), init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh() ) self.critic_linear = init_(nn.Linear(hidden_size, 1)) self.train()
Example #16
Source File: model.py From carla-rl with MIT License | 4 votes |
def __init__(self, img_num_features, v_num_features, recurrent=False, hidden_size=512): super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size) init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), nn.init.calculate_gain('relu')) self.cnn_backbone = nn.Sequential( init_(nn.Conv2d(img_num_features, 32, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(), init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU() ) init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0)) self.fc_backbone = nn.Sequential( init_(nn.Linear(v_num_features, hidden_size//4)), nn.ReLU(), init_(nn.Linear(hidden_size//4, hidden_size//2)), nn.ReLU(), init_(nn.Linear(hidden_size//2, hidden_size)), nn.ReLU() ) self.fc_joint = nn.Sequential( init_(nn.Linear(hidden_size*2, hidden_size*2)), nn.ReLU(), init_(nn.Linear(hidden_size*2, hidden_size)), nn.ReLU(), init_(nn.Linear(hidden_size, hidden_size)), nn.ReLU() ) self.critic_linear = init_(nn.Linear(hidden_size, 1)) self.train()
Example #17
Source File: model.py From dal with MIT License | 4 votes |
def __init__(self, num_inputs, recurrent=False, hidden_size=512): super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size) init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), nn.init.calculate_gain('relu')) num_inputs = 6 # hidden_size = 100 # self.main = nn.Sequential( # init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), # nn.ReLU(), # init_(nn.Conv2d(32, 64, 4, stride=2)), # nn.ReLU(), # init_(nn.Conv2d(64, 32, 3, stride=1)), # nn.ReLU(), # Flatten(), # init_(nn.Linear(32 * 7 * 7, hidden_size)), # nn.ReLU() # ) # self.main = nn.Sequential( # init_(nn.Conv2d(num_inputs, 16, 3,padding=1, stride=1)), # nn.ReLU(), # nn.MaxPool2d(kernel_size = 2), # init_(nn.Conv2d(16, 32, 3,padding=1, stride=1)), # nn.ReLU(), # nn.MaxPool2d(kernel_size = 2), # init_(nn.Conv2d(32, 32, 3,padding=1, stride=1)), # nn.ReLU(), # nn.MaxPool2d(kernel_size = 2), # init_(nn.Conv2d(32, 16, 3,padding=1, stride=1)), # nn.ReLU(), # # nn.MaxPool2d(kernel_size = 2), # Flatten(), # init_(nn.Linear(16 * 11 * 11, hidden_size)), # nn.ReLU() # ) self.main = nn.Sequential( init_(nn.Conv2d(num_inputs, 16, 3,padding=1, stride=1)), nn.ReLU(), init_(nn.Conv2d(16, 32, 3,padding=1, stride=1)), nn.ReLU(), init_(nn.Conv2d(32, 32, 3,padding=1, stride=1)), nn.ReLU(), init_(nn.Conv2d(32, 16, 3,padding=1, stride=1)), nn.ReLU(), Flatten(), init_(nn.Linear(16 * 11 * 11, hidden_size)), nn.ReLU() ) init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0)) self.critic_linear = init_(nn.Linear(hidden_size, 1)) self.train()