Python keras.optimizers.get() Examples

The following are 14 code examples of keras.optimizers.get(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.optimizers , or try the search function .
Example #1
Source File: worker.py    From elephas with MIT License 6 votes vote down vote up
def train(self, data_iterator):
        """Train a keras model on a worker
        """
        optimizer = get_optimizer(self.master_optimizer)
        self.model = model_from_yaml(self.yaml, self.custom_objects)
        self.model.compile(optimizer=optimizer,
                           loss=self.master_loss, metrics=self.master_metrics)
        self.model.set_weights(self.parameters.value)

        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        self.model.compile(optimizer=self.master_optimizer,
                           loss=self.master_loss,
                           metrics=self.master_metrics)

        weights_before_training = self.model.get_weights()
        if x_train.shape[0] > self.train_config.get('batch_size'):
            self.model.fit(x_train, y_train, **self.train_config)
        weights_after_training = self.model.get_weights()
        deltas = subtract_params(
            weights_before_training, weights_after_training)
        yield deltas 
Example #2
Source File: ml_model.py    From elephas with MIT License 5 votes vote down vote up
def _fit(self, df):
        """Private fit method of the Estimator, which trains the model.
        """
        simple_rdd = df_to_simple_rdd(df, categorical=self.get_categorical_labels(), nb_classes=self.get_nb_classes(),
                                      features_col=self.getFeaturesCol(), label_col=self.getLabelCol())
        simple_rdd = simple_rdd.repartition(self.get_num_workers())
        keras_model = model_from_yaml(self.get_keras_model_config())
        metrics = self.get_metrics()
        loss = self.get_loss()
        optimizer = get_optimizer(self.get_optimizer_config())
        keras_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

        spark_model = SparkModel(model=keras_model,
                                 mode=self.get_mode(),
                                 frequency=self.get_frequency(),
                                 num_workers=self.get_num_workers())
        spark_model.fit(simple_rdd,
                        epochs=self.get_epochs(),
                        batch_size=self.get_batch_size(),
                        verbose=self.get_verbosity(),
                        validation_split=self.get_validation_split())

        model_weights = spark_model.master_network.get_weights()
        weights = simple_rdd.ctx.broadcast(model_weights)
        return ElephasTransformer(labelCol=self.getLabelCol(),
                                  outputCol='prediction',
                                  keras_model_config=spark_model.master_network.to_yaml(),
                                  weights=weights) 
Example #3
Source File: models.py    From X with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def compile(self, state_dim_values, lr=0.2, policy_rule="maxrand", init_value=None):
        """Build and initialize table with all possible state values.
           state_dim_values consists of a tuple of arrays or lists - each array
           gives every possible value for the corresponding dimension.
        """

        self.policy_rule = policies.get(policy_rule)

        if init_value is None:
            self.init_value = np.zeros(self.num_actions)
        else:
            self.init_value = init_value

        self.table = {key: np.array(self.init_value) for key in list(itertools.product(*state_dim_values))}
        self.lr = lr 
Example #4
Source File: models.py    From X with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def values(self, observation):
        if observation.ndim == 1:
            vals = self.table[tuple(observation)]
        else:
            obs_tuple = tuple(map(tuple, observation))  # convert to tuple of tuples
            vals = map(self.table.__getitem__, obs_tuple)  # get values from dict as list of arrays
        vals = np.asarray(vals)  # convert list of arrays to matrix (2-d array)
        return vals 
Example #5
Source File: abstract.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def _numLabels(self):
        '''
        Use the taskdef to get total number of labels
        '''
        if self.taskdef is None:
            raise RuntimeError('must provide a task definition including' + \
                               'all actions and descriptions.')
        return self.taskdef.numActions() 
Example #6
Source File: abstract.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def getOptimizer(self):
        '''
        Set up a keras optimizer based on whatever settings you provided.
        '''
        optimizer = optimizers.get(self.optimizer)
        try:
            optimizer.lr = K.variable(self.lr, name='lr')
            optimizer.clipnorm = self.clipnorm
        except Exception:
            print('WARNING: could not set all optimizer flags')
        return optimizer 
Example #7
Source File: _multigpu_with_nccl.py    From keras_experiments with The Unlicense 5 votes vote down vote up
def compile(self, *args, **kwargs):
        '''Refer to Model.compile docstring for parameters. Override
        functionality is documented below.

        :override compile: Override Model.compile method to check for options
            that the optimizer is multi-gpu enabled, and synchronize initial
            variables.
        '''
        initsync = self._initsync
        usenccl = self._usenccl

        opt = kwargs['optimizer']
        # if isinstance(opt, str):
        if not isinstance(opt, KO.Optimizer):
            opt = KO.get(opt)
            kwargs['optimizer'] = opt

        if self._syncopt and not getattr(opt, 'ismgpu', False):
            raise RuntimeError(
                'Multi-GPU synchronization model requires a multi-GPU '
                'optimizer. Instead got: {}'.format(opt))

        opt.usenccl = usenccl

        if self._enqueue_ops:
            # Produces a warning that kwargs are ignored for Tensorflow. Patch
            # Function in tensorflow_backend to use the enqueue_ops option.
            kwargs['fetches'] = self._enqueue_ops

        super(ModelMGPU, self).compile(*args, **kwargs)

        if initsync:
            self._run_initsync() 
Example #8
Source File: util.py    From keras-rl with MIT License 5 votes vote down vote up
def clone_optimizer(optimizer):
    if type(optimizer) is str:
        return optimizers.get(optimizer)
    # Requires Keras 1.0.7 since get_config has breaking changes.
    params = dict([(k, v) for k, v in optimizer.get_config().items()])
    config = {
        'class_name': optimizer.__class__.__name__,
        'config': params,
    }
    if hasattr(optimizers, 'optimizer_from_config'):
        # COMPATIBILITY: Keras < 2.0
        clone = optimizers.optimizer_from_config(config)
    else:
        clone = optimizers.deserialize(config)
    return clone 
Example #9
Source File: dynamic_triad.py    From DynamicTriad with Apache License 2.0 5 votes vote down vote up
def __emcoef_monitor(reportq):
        total_proccnt = {}
        while True:
            obj = reportq.get()
            if isinstance(obj, StopIteration):
                break
            pid, proccnt = obj
            total_proccnt[pid] = proccnt
            print("EM coefficients calculated for {} samples\r".format(sum(total_proccnt.values())), end='')
            sys.stdout.flush()
        print("EM coefficients calculated for {} samples".format(sum(total_proccnt.values()))) 
Example #10
Source File: models.py    From keras_extensions with MIT License 5 votes vote down vote up
def compile(self, optimizer, loss):
        self.optimizer = optimizers.get(optimizer)

        self.loss = objectives.get(loss)

        # input of model
        self.X_train = self.get_input(train=True)
        self.X_test = self.get_input(train=False)

        train_loss = self.loss(self.X_train)
        test_loss = self.loss(self.X_test)

        train_loss.name = 'train_loss'
        test_loss.name = 'test_loss'

        for r in self.regularizers:
            train_loss = r(train_loss)
        updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
        updates += self.updates

        if type(self.X_train) == list:
            train_ins = self.X_train
            test_ins = self.X_test
        else:
            train_ins = [self.X_train]
            test_ins = [self.X_test]

        self._train = K.function(train_ins, train_loss, updates=updates)
        self._test = K.function(test_ins, test_loss)

    # train model, adapted from keras.models.Sequential 
Example #11
Source File: worker.py    From elephas with MIT License 4 votes vote down vote up
def train(self, data_iterator):
        """Train a keras model on a worker and send asynchronous updates
        to parameter server
        """
        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        if x_train.size == 0:
            return

        optimizer = get_optimizer(self.master_optimizer)
        self.model = model_from_yaml(self.yaml, self.custom_objects)
        self.model.compile(optimizer=optimizer,
                           loss=self.master_loss, metrics=self.master_metrics)
        self.model.set_weights(self.parameters.value)

        epochs = self.train_config['epochs']
        batch_size = self.train_config.get('batch_size')
        nb_train_sample = x_train.shape[0]
        nb_batch = int(np.ceil(nb_train_sample / float(batch_size)))
        index_array = np.arange(nb_train_sample)
        batches = [
            (i * batch_size, min(nb_train_sample, (i + 1) * batch_size))
            for i in range(0, nb_batch)
        ]

        if self.frequency == 'epoch':
            for epoch in range(epochs):
                weights_before_training = self.client.get_parameters()
                self.model.set_weights(weights_before_training)
                self.train_config['epochs'] = 1
                if x_train.shape[0] > batch_size:
                    self.model.fit(x_train, y_train, **self.train_config)
                self.train_config['epochs'] = epochs
                weights_after_training = self.model.get_weights()
                deltas = subtract_params(
                    weights_before_training, weights_after_training)
                self.client.update_parameters(deltas)
        elif self.frequency == 'batch':
            for epoch in range(epochs):
                if x_train.shape[0] > batch_size:
                    for (batch_start, batch_end) in batches:
                        weights_before_training = self.client.get_parameters()
                        self.model.set_weights(weights_before_training)
                        batch_ids = index_array[batch_start:batch_end]
                        x = slice_arrays(x_train, batch_ids)
                        y = slice_arrays(y_train, batch_ids)
                        self.model.train_on_batch(x, y)
                        weights_after_training = self.model.get_weights()
                        deltas = subtract_params(
                            weights_before_training, weights_after_training)
                        self.client.update_parameters(deltas)
        else:
            raise ValueError(
                'frequency parameter can be `epoch` or `batch, got {}'.format(self.frequency))
        yield [] 
Example #12
Source File: abstract.py    From costar_plan with Apache License 2.0 4 votes vote down vote up
def predict(self, world):
        '''
        This is the basic, "dumb" option. Compute the next option/policy to
        execute by evaluating the supervisor, then just call that model.
        '''
        features = world.initial_features #getHistoryMatrix()
        if isinstance(features, list):
            assert len(features) == len(self.supervisor.inputs) - 1
        else:
            features = [features]
        if self.supervisor is None:
            raise RuntimeError('high level model is missing')
        features = [f.reshape((1,)+f.shape) for f in features]
        res = self.supervisor.predict(features +
                [self._makeOption1h(self.prev_option)])
        next_policy = np.argmax(res)

        print("Next policy = ", next_policy,)
        if self.taskdef is not None:
            print("taskdef =", self.taskdef.name(next_policy))
        one_hot = np.zeros((1,self._numLabels()))
        one_hot[0,next_policy] = 1.
        features2 = features + [one_hot]

        # ===============================================
        # INTERMEDIATE CODE PLEASE REMOVE
        res = self.predictor.predict(features2)
        import matplotlib.pyplot as plt
        plt.subplot(2,1,1)
        plt.imshow(features[0][0])
        plt.subplot(2,1,2)
        plt.imshow(res[0][0])
        plt.ion()
        plt.show(block=False)
        plt.pause(0.01)
        # ===============================================

        # Retrieve the next policy we want to execute
        policy = self.policies[next_policy]

        # Update previous option -- which one did we end up choosing, and which
        # policy did we execute?
        self.prev_option = next_policy

        # Evaluate this policy to get the next action out
        return policy.predict(features) 
Example #13
Source File: _multigpu_with_nccl.py    From keras_experiments with The Unlicense 4 votes vote down vote up
def all_sync_params(tower_params, devices, usenccl=True):
    """Assigns the params from the first tower to all others"""
    if len(devices) == 1:
        return tf.no_op()
    sync_ops = []
    if have_nccl and usenccl:
        for param_on_devices in zip(*tower_params):
            # print('PARAM_ON_DEVICES: {}'.format(param_on_devices))  # DEBUG
            # Note: param_on_devices is [paramX_gpu0, paramX_gpu1, ...]
            param0 = param_on_devices[0]
            send_op, received_tensors = nccl.broadcast(param0, devices[1:])
            sync_ops.append(send_op)
            for device, param, received in zip(devices[1:],
                                               param_on_devices[1:],
                                               received_tensors):
                with tf.device(device):
                    sync_op = param.assign(received)
                    sync_ops.append(sync_op)
    else:
        params0 = tower_params[0]
        for device, params in zip(devices, tower_params):
            with tf.device(device):
                for param, param0 in zip(params, params0):
                    sync_op = param.assign(param0.read_value())
                    sync_ops.append(sync_op)

    return tf.group(*sync_ops)


# def stage(tensors):
#     """Stages the given tensors in a StagingArea for asynchronous put/get.
#     """
#     stage_area = data_flow_ops.StagingArea(
#         dtypes=[tensor.dtype for tensor in tensors],
#         shapes=[tensor.get_shape() for tensor in tensors])
#     put_op = stage_area.put(tensors)
#     get_tensors = stage_area.get()
#     if not isinstance(get_tensors, list):
#         get_tensors = [get_tensors]
#     # print('GET_TENSORS: {}'.format(get_tensors))  # DEBUG
#
#     get_tensors = [tf.reshape(gt, t.get_shape())
#                    for (gt, t) in zip(get_tensors, tensors)]
#     return put_op, get_tensors 
Example #14
Source File: dynamic_triad.py    From DynamicTriad with Apache License 2.0 4 votes vote down vote up
def make_online(self):
        embedding = K.variable(np.random.uniform(0, 1, (self.dataset.nsize, self.flowargs['embdim'])))
        prevemb = K.placeholder(ndim=2, dtype='float32')  # (nsize, d)
        data = K.placeholder(ndim=2, dtype='int32')  # (batchsize, 5), [k, from_pos, to_pos, from_neg, to_neg]
        weight = K.placeholder(ndim=1, dtype='float32')  # (batchsize, )

        if K._BACKEND == 'theano':
            # (batchsize, d) => (batchsize, )
            # data[:, 0] should be always 0, so we simply ignore it
            # note, when you want to use it, that according to data generation procedure, the actual data[:, 0] is not 0
            dist_pos = embedding[data[:, 1]] - embedding[data[:, 2]]
            dist_pos = K.sum(dist_pos * dist_pos, axis=-1)
            dist_neg = embedding[data[:, 3]] - embedding[data[:, 4]]
            dist_neg = K.sum(dist_neg * dist_neg, axis=-1)
        else:
            dist_pos = K.gather(embedding, K.squeeze(K.slice(data, [0, 1], [-1, 1]), axis=1)) - \
                       K.gather(embedding, K.squeeze(K.slice(data, [0, 2], [-1, 1]), axis=1))
            dist_pos = K.sum(dist_pos * dist_pos, axis=-1)
            dist_neg = K.gather(embedding, K.squeeze(K.slice(data, [0, 3], [-1, 1]), axis=1)) - \
                       K.gather(embedding, K.squeeze(K.slice(data, [0, 4], [-1, 1]), axis=1))
            dist_neg = K.sum(dist_neg * dist_neg, axis=-1)

        # (batchsize, )
        margin = 1
        lprox = K.maximum(margin + dist_pos - dist_neg, 0) * weight

        # (1, )
        lprox = K.mean(lprox)

        # lsmooth
        lsmooth = embedding - prevemb  # (nsize, d)
        lsmooth = K.sum(K.square(lsmooth), axis=-1)  # (nsize)
        lsmooth = K.mean(lsmooth)

        loss = lprox + self.flowargs['beta'][0] * lsmooth

        opt = optimizers.get({'class_name': 'Adagrad', 'config': {'lr': self.lr}})
        cstr = {embedding: constraints.get({'class_name': 'maxnorm', 'config': {'max_value': 1, 'axis': 1}})}
        upd = opt.get_updates([embedding], cstr, loss)
        lf = K.function([data, weight, prevemb], [loss], updates=upd)

        return lf, None, [embedding], {}