Python keras.utils.generic_utils.Progbar() Examples

The following are 22 code examples of keras.utils.generic_utils.Progbar(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.utils.generic_utils , or try the search function .
Example #1
Source File: language_model.py    From rnn_agreement with MIT License 6 votes vote down vote up
def results(self):
        recs = []
        columns = ['gram_loss', 'ungram_loss', 'correct'] + dependency_fields
        self.model.model._make_test_function()
        progbar = Progbar(len(self.deps_test))
        for i, dep in enumerate(self.deps_test):
            inp = np.zeros((1, self.maxlen))
            v = int(dep['verb_index']) - 1
            tokens = dep[self.field].split()[:v+1]
            ints = [self.vocab_to_ints[x] for x in tokens]
            try:
                ungram = self.vocab_to_ints[self.inflect_verb[tokens[v]]]
            except KeyError:   # reinflected form not in vocabulary: ignore
                continue
            n = len(ints) - 1
            inp[0, -n:] = ints[:-1]
            gram_loss = self.model.test_on_batch(inp, np.array([ints[v]]))
            ungram_loss = self.model.test_on_batch(inp, np.array([ungram]))
            recs.append((gram_loss, ungram_loss, gram_loss < ungram_loss) +
                        tuple(dep[x] for x in dependency_fields))
            if i % 16 == 0:
                progbar.update(i)

        self.test_results = pd.DataFrame(recs, columns=columns) 
Example #2
Source File: generative_alg.py    From nli_generation with MIT License 5 votes vote down vote up
def diversity(dev, gen_test, beam_size, hypo_len, noise_size, per_premise, samples):
    step = len(dev[0]) / samples
    sind = [i * step for i in range(samples)]
    p = Progbar(per_premise * samples)
    for i in sind:
        hypos = []
        unique_words = []
        hypo_list = []
        premise = dev[0][i]
        prem_list = set(cut_zeros(list(premise)))        
        while len(hypos) < per_premise:
            label = np.argmax(dev[2][i])
            words = single_generate(premise, label, gen_test, beam_size, hypo_len, noise_size)
            hypos += [str(ex) for ex in words]
            unique_words += [int(w) for ex in words for w in ex if w > 0]
            hypo_list += [set(cut_zeros(list(ex))) for ex in words]
        
        jacks = []  
        prem_jacks = []
        for u in range(len(hypo_list)):
            sim_prem = len(hypo_list[u] & prem_list)/float(len(hypo_list[u] | prem_list))
            prem_jacks.append(sim_prem)
            for v in range(u+1, len(hypo_list)):
                sim = len(hypo_list[u] & hypo_list[v])/float(len(hypo_list[u] | hypo_list[v]))
                jacks.append(sim)
        avg_dist_hypo = 1 -  np.mean(jacks)
        avg_dist_prem = 1 -  np.mean(prem_jacks)
        d = entropy(Counter(hypos).values()) 
        w = entropy(Counter(unique_words).values())
        p.add(len(hypos), [('diversity', d),('word_entropy', w),('avg_dist_hypo', avg_dist_hypo), ('avg_dist_prem', avg_dist_prem)])
    arrd = p.sum_values['diversity']
    arrw = p.sum_values['word_entropy']
    arrj = p.sum_values['avg_dist_hypo']
    arrp = p.sum_values['avg_dist_prem']
    
    return arrd[0] / arrd[1], arrw[0] / arrw[1], arrj[0] / arrj[1],  arrp[0] / arrp[1] 
Example #3
Source File: processing.py    From keras-text with MIT License 5 votes vote down vote up
def build_vocab(self, texts, verbose=1, **kwargs):
        """Builds the internal vocabulary and computes various statistics.

        Args:
            texts: The list of text items to encode.
            verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
            **kwargs: The kwargs for `token_generator`.
        """
        if self.has_vocab:
            logger.warn("Tokenizer already has existing vocabulary. Overriding and building new vocabulary.")

        progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
        count_tracker = _CountTracker()

        self._token_counts.clear()
        self._num_texts = len(texts)

        for token_data in self.token_generator(texts, **kwargs):
            indices, token = token_data[:-1], token_data[-1]
            count_tracker.update(indices)
            self._token_counts[token] += 1

            # Update progressbar per document level.
            progbar.update(indices[0])

        # Generate token2idx and idx2token.
        self.create_token_indices(self._token_counts.keys())

        # All done. Finalize progressbar update and count tracker.
        count_tracker.finalize()
        self._counts = count_tracker.counts
        progbar.update(len(texts), force=True) 
Example #4
Source File: processing.py    From keras-text with MIT License 5 votes vote down vote up
def encode_texts(self, texts, include_oov=False, verbose=1, **kwargs):
        """Encodes the given texts using internal vocabulary with optionally applied encoding options. See
        ``apply_encoding_options` to set various options.

        Args:
            texts: The list of text items to encode.
            include_oov: True to map unknown (out of vocab) tokens to 0. False to exclude the token.
            verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1)
            **kwargs: The kwargs for `token_generator`.

        Returns:
            The encoded texts.
        """
        if not self.has_vocab:
            raise ValueError("You need to build the vocabulary using `build_vocab` before using `encode_texts`")

        progbar = Progbar(len(texts), verbose=verbose, interval=0.25)
        encoded_texts = []
        for token_data in self.token_generator(texts, **kwargs):
            indices, token = token_data[:-1], token_data[-1]

            token_idx = self._token2idx.get(token)
            if token_idx is None and include_oov:
                token_idx = 0

            if token_idx is not None:
                _append(encoded_texts, indices, token_idx)

            # Update progressbar per document level.
            progbar.update(indices[0])

        # All done. Finalize progressbar.
        progbar.update(len(texts), force=True)
        return encoded_texts 
Example #5
Source File: adversarial_learner.py    From unsupervised_detection with MIT License 5 votes vote down vote up
def epoch_end_callback(self, sess, sv, epoch_num):
        # Evaluate val loss
        validation_iou = 0
        print("\nComputing Validation IoU")
        progbar = Progbar(target=self.val_steps_per_epoch)

        for i in range(self.val_steps_per_epoch):
            loss_iou = sess.run(self.val_iou,
                             feed_dict={self.is_training: False})
            validation_iou+= loss_iou
            progbar.update(i)
        validation_iou /= self.val_steps_per_epoch*self.config.batch_size

        # Log to Tensorflow board
        val_sum = sess.run(self.val_sum, feed_dict ={
                           self.val_iou_ph: validation_iou})

        sv.summary_writer.add_summary(val_sum, epoch_num)

        print("Epoch [{}] Validation IoU: {}".format(
            epoch_num, validation_iou))
        # Model Saving
        if validation_iou > self.min_val_iou:
            self.save(sess, self.config.checkpoint_dir, 'best')
            self.min_val_iou = validation_iou
        if epoch_num % self.config.save_freq == 0:
            self.save(sess, self.config.checkpoint_dir, epoch_num) 
Example #6
Source File: callbacks.py    From AirGym with MIT License 5 votes vote down vote up
def reset(self):
        self.interval_start = timeit.default_timer()
        self.progbar = Progbar(target=self.interval)
        self.metrics = []
        self.infos = []
        self.info_names = None
        self.episode_rewards = [] 
Example #7
Source File: collect_agreement.py    From rnn_agreement with MIT License 5 votes vote down vote up
def collect_agreement(self):
        n_deps = 0
        self.deps = []
        random.seed(1)

        if self.verbose and self.stop_after:
            from keras.utils.generic_utils import Progbar
            progbar = Progbar(self.stop_after)

        for i, sent in enumerate(tokenize_blanks(zread(self.infile)), 1):
            if self.stop_after is not None and n_deps >= self.stop_after:
                break
            if i % (self.skip + 1) != 0:
                continue

            # only one dependency per sentence
            deps = self.find_nsubj_agreement(sent)
            if len(deps) == 0:
                continue
            dep = random.choice(deps)
            if dep['subj_index'] > dep['verb_index']:
                continue
            if (dep['subj_pos'] == 'NN' and dep['verb_pos'] == 'VBP' or 
                dep['subj_pos'] == 'NNS' and dep['verb_pos'] == 'VBZ'):
                # ungrammatical dependency (parse error)
                continue

            n_deps += 1
            dep['sentence'] = self.represent_sentence(sent)
            dep['pos_sentence'] = ' '.join(x[POS] for x in sent)
            dep['orig_sentence'] = ' '.join(x[WORD] for x in sent)
            dep['all_nouns'] = self.only_nouns(sent, len(sent))
            dep['nouns_up_to_verb'] = self.only_nouns(sent, 
                                                      int(dep['verb_index']))
            self.deps.append(dep)

            if self.verbose and self.stop_after and n_deps % 10 == 0:
                progbar.update(n_deps) 
Example #8
Source File: language_model.py    From rnn_agreement with MIT License 5 votes vote down vote up
def evaluate(self, howmany=1000):
        self.model.model._make_test_function()
        random.seed(0)
        shuffled = self.deps_test[:]
        random.shuffle(shuffled)
        shuffled = shuffled[:howmany]
        X_test = []
        Y_test = []

        for dep in shuffled:
            tokens = self.process_single_dependency(dep)
            ints = []
            for token in tokens:
                if token not in self.vocab_to_ints:
                    # zero is for pad
                    x = self.vocab_to_ints[token] = len(self.vocab_to_ints) + 1
                    self.ints_to_vocab[x] = token
                ints.append(self.vocab_to_ints[token])

            first = 1
            for i in range(first, len(ints) - 1):
                X_test.append(ints[:i])
                Y_test.append(ints[i])

        test_loss = []
        end = int(float(len(X_test) / self.batch_size))
        progbar = Progbar(end)
        for i in range(0, len(X_test), self.batch_size):
            inp = sequence.pad_sequences(X_test[i:i+self.batch_size],
                                         maxlen=self.maxlen)
            out = Y_test[i:i+self.batch_size]
            output = self.model.test_on_batch(inp, out)
            test_loss.append(output)
            j = int(float(i) / self.batch_size)
            if j % 16 == 0:
                progbar.update(j)
        progbar.update(end)

        return np.mean(test_loss) 
Example #9
Source File: callbacks.py    From keras-rl with MIT License 5 votes vote down vote up
def reset(self):
        """ Reset statistics """
        self.interval_start = timeit.default_timer()
        self.progbar = Progbar(target=self.interval)
        self.metrics = []
        self.infos = []
        self.info_names = None
        self.episode_rewards = [] 
Example #10
Source File: visualize.py    From nli_generation with MIT License 5 votes vote down vote up
def test_points(premises, labels, noises, gtest, cmodel, hypo_len):
    p = Progbar(len(premises))
    hypos = []
    bs = 64 
    for i in range(len(labels) / bs):
        words, _  = generative_predict_beam(gtest, premises[i * bs: (i+1)*bs], 
                          noises[i * bs: (i+1)*bs,None,:], labels[i * bs: (i+1)*bs], True, hypo_len)
        hypos.append(words)
        p.add(len(words))
    hypos = np.vstack(hypos)
    cpreds = cmodel.evaluate([premises[:len(hypos)], hypos], labels[:len(hypos)])
    print cpreds 
Example #11
Source File: agent.py    From X with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def play(self, env, epoch=1, batch_size=1, visualize=None, verbose=1):
        print("Free play started!")
        frames = np.zeros((0, ) + env.observe_image().shape[1:])
        frames = frames.transpose(0, 2, 3, 1)
        progbar = Progbar(epoch)

        for e in xrange(epoch):
            # reset environment on each epoch
            env.reset()
            game_over = False
            loss = 0
            rewards = 0

            # get initial observation, start game
            obs_t = env.observe()
            while not game_over:
                obs_tm1 = obs_t

                # get next action
                action = self.policy(obs_tm1, train=False)

                # apply action, get rewareds and new state
                obs_t, reward, game_over = env.update(action)
                rewards += reward

                frame_t = env.observe_image().transpose(0, 2, 3, 1)
                frames = np.concatenate([frames, frame_t], axis=0)

            if verbose == 1:
                progbar.add(1, values=[("loss", loss), ("rewards", rewards)])


        if visualize:
            from agnez.video import make_gif
            print("Making gif!")
            frames = np.repeat(frames, 3, axis=-1)
            make_gif(frames[:visualize['n_frames']],
                     filepath=visualize['filepath'], gray=visualize['gray'], interpolation='none')
            print("See your gif at {}".format(visualize['filepath'])) 
Example #12
Source File: generative_alg.py    From nli_generation with MIT License 5 votes vote down vote up
def validate(dev, gen_test, beam_size, hypo_len, samples, noise_size, glove, cmodel = None, adverse = False, 
                 diverse = False):
    vgen = val_generator(dev, gen_test, beam_size, hypo_len, noise_size)
    p = Progbar(samples)
    batchez = []
    while p.seen_so_far < samples:
        batch = next(vgen)
        preplexity = np.mean(np.power(2, batch[2]))
        loss = np.mean(batch[2])
        losses = [('hypo_loss',loss),('perplexity', preplexity)]
        if cmodel is not None:
            ceval = cmodel.evaluate([batch[0], batch[1]], batch[4], verbose = 0)
            losses += [('class_loss', ceval[0]), ('class_acc', ceval[1])]
            probs = cmodel.predict([batch[0], batch[1]], verbose = 0)
            losses += [('class_entropy', np.mean(-np.sum(probs * np.log(probs), axis=1)))]
        
        p.add(len(batch[0]), losses)
        batchez.append(batch)
    batchez = merge_result_batches(batchez)
    
    res = {}
    if adverse:
        val_loss = adverse_validation(dev, batchez, glove)
        print 'adverse_loss:', val_loss
        res['adverse_loss'] = val_loss
    if diverse:
        div, _, _, _ = diversity(dev, gen_test, beam_size, hypo_len, noise_size, 64, 32)
        res['diversity'] = div
    print
    for val in p.unique_values:
        arr = p.sum_values[val]
        res[val] = arr[0] / arr[1]
    return res 
Example #13
Source File: augment.py    From nli_generation with MIT License 5 votes vote down vote up
def new_generate_dataset(dataset, samples, gen_test, beam_size, hypo_len, noise_size, cmodel):

    vgen = val_generator(dataset, gen_test, beam_size, hypo_len, noise_size)
    p = Progbar(samples)
    batchez = []
    while p.seen_so_far < samples:
        batch = next(vgen)
        probs = cmodel.predict([batch[0], batch[1]], verbose = 0)
        batch += (probs,)

        p.add(len(batch[0]))
        batchez.append(batch)
    return merge_result_batches(batchez) 
Example #14
Source File: generic_utils_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_progbar():
    values_s = [None,
                [['key1', 1], ['key2', 1e-4]],
                [['key3', 1], ['key2', 1e-4]]]

    for target in (len(values_s) - 1, None):
        for verbose in (0, 1, 2):
            bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
            for current, values in enumerate(values_s):
                bar.update(current, values=values) 
Example #15
Source File: generic_utils_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_progbar():
    values_s = [None,
                [['key1', 1], ['key2', 1e-4]],
                [['key3', 1], ['key2', 1e-4]]]

    for target in (len(values_s) - 1, None):
        for verbose in (0, 1, 2):
            bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
            for current, values in enumerate(values_s):
                bar.update(current, values=values) 
Example #16
Source File: generic_utils_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_progbar():
    values_s = [None,
                [['key1', 1], ['key2', 1e-4]],
                [['key3', 1], ['key2', 1e-4]]]

    for target in (len(values_s) - 1, None):
        for verbose in (0, 1, 2):
            bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
            for current, values in enumerate(values_s):
                bar.update(current, values=values) 
Example #17
Source File: generic_utils_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_progbar():
    values_s = [None,
                [['key1', 1], ['key2', 1e-4]],
                [['key3', 1], ['key2', 1e-4]]]

    for target in (len(values_s) - 1, None):
        for verbose in (0, 1, 2):
            bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
            for current, values in enumerate(values_s):
                bar.update(current, values=values) 
Example #18
Source File: generic_utils_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_progbar():
    values_s = [None,
                [['key1', 1], ['key2', 1e-4]],
                [['key3', 1], ['key2', 1e-4]]]

    for target in (len(values_s) - 1, None):
        for verbose in (0, 1, 2):
            bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
            for current, values in enumerate(values_s):
                bar.update(current, values=values) 
Example #19
Source File: generic_utils_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_progbar():
    values_s = [None,
                [['key1', 1], ['key2', 1e-4]],
                [['key3', 1], ['key2', 1e-4]]]

    for target in (len(values_s) - 1, None):
        for verbose in (0, 1, 2):
            bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
            for current, values in enumerate(values_s):
                bar.update(current, values=values) 
Example #20
Source File: generic_utils_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_progbar():
    values_s = [None,
                [['key1', 1], ['key2', 1e-4]],
                [['key3', 1], ['key2', 1e-4]]]

    for target in (len(values_s) - 1, None):
        for verbose in (0, 1, 2):
            bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
            for current, values in enumerate(values_s):
                bar.update(current, values=values) 
Example #21
Source File: enhancer_gan.py    From ImageEnhancer with MIT License 4 votes vote down vote up
def train_model(self, critic_updates=1):
        """ train the model """
        self.d_model.trainable = True
        self.d_model.compile(Adam(lr=self.learning_rate), loss=binary_crossentropy, metrics=['accuracy'])
        self.d_model.trainable = False
        self.gan.compile(Adam(lr=self.learning_rate), loss=binary_crossentropy, loss_weights=[10, 1])
        self.d_model.trainable = True

        cb_list = []
        cb_list.append(TensorBoard(self.graph_path))
        cb_list.append(LearningRateScheduler(lambda e: self.learning_rate * 0.99 ** (e / 10)))
        cb_list.append(ModelCheckpoint(self.checkpoint_path + 'checkpoint.best.hdf5', save_best_only=True))
        if not self.best_cp:
            cb_list.append(ModelCheckpoint(self.checkpoint_path + 'checkpoint.{epoch:02d}-{val_loss:.2f}.hdf5'))
        callback = CallBacks(cb_list)
        callback.set_model(self.gan)
        
        train_num = self.corrupted['train'].shape[0]
        valid_num = self.corrupted['valid'].shape[0]
        for itr in range(self.epoch):
            print('[Epoch %s/%s]' % (itr + 1, self.epoch))
            callback.on_epoch_begin(itr)
            d_acces = []
            gan_losses = []
            indexes = np.random.permutation(train_num)
            #progbar = Progbar(train_num)
            for idx in range(int(train_num / self.batch_size)):
                print('[Batch %s/%s]' % (idx + 1, int(train_num / self.batch_size)))
                batch_idx = indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
                crp_batch = self.corrupted['train'][batch_idx]
                raw_batch = self.source['train'][batch_idx]
                generated = self.g_model.predict(crp_batch, self.batch_size)
                for _ in range(critic_updates):
                    d_loss_real = self.d_model.train_on_batch(raw_batch, np.ones((self.batch_size, 1)))
                    d_loss_fake = self.d_model.train_on_batch(generated, np.zeros((self.batch_size, 1)))
                    d_acc = 0.5 * np.add(d_loss_real[1], d_loss_fake[1])
                    d_acces.append(d_acc)
                    print('D real loss/acc: %s, fake loss/acc: %s' % (d_loss_real, d_loss_fake))
                print('D acc: %s' % np.mean(d_acces))
                self.d_model.trainable = False
                gan_loss = self.gan.train_on_batch(crp_batch, [raw_batch, np.ones((self.batch_size, 1))])
                print('GAN loss: %s' % gan_loss)
                gan_losses.append(gan_loss)
                self.d_model.trainable = True
                print('loss: %s' % np.mean(gan_losses))
                #progbar.add(self.batch_size, [('loss', np.mean(gan_losses)), ('d_acc', 100 * np.mean(d_acces))])
            val_loss = self.gan.evaluate(self.corrupted['valid'], [self.source['valid'], np.ones((valid_num, 1))], self.batch_size, verbose=0)
            #progbar.update(train_num, [('val_loss', np.mean(val_loss))])
            print('val_loss: %s' % np.mean(val_loss))
            callback.on_epoch_end(itr, logs={'loss': np.mean(gan_losses), 'val_loss': np.mean(val_loss)})
            self.save_image('test.{e:02d}-{v:.2f}'.format(e=(itr + 1), v=np.mean(val_loss)))
        callback.on_train_end() 
Example #22
Source File: preprocessing.py    From keras-neural-graph-fingerprint with MIT License 4 votes vote down vote up
def tensorise_smiles_mp(smiles, max_degree=5, max_atoms=None, workers=cpu_count()-1, chunksize=3000, verbose=True):
    ''' Multiprocess implementation of `tensorise_smiles`

    # Arguments:
        See `tensorise_smiles` documentation

    # Additional arguments:
        workers: int, num parallel processes
        chunksize: int, num molecules tensorised per worker, bigger chunksize is
            preffered as each process will preallocate np.arrays

    # Returns:
        See `tensorise_smiles` documentation

    # TODO:
        - fix python keyboardinterrupt bug:
          https://noswap.com/blog/python-multiprocessing-keyboardinterrupt
        - replace progbar with proper logging
    '''

    pool = Pool(processes=workers)

    # Create an iterator
    #http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
    def chunks(l, n):
        """Yield successive n-sized chunks from l."""
        for i in range(0, len(l), n):
            yield l[i:i + n]
    smiles_chunks = chunks(smiles, chunksize)

    # MAP: Tensorise in parallel
    map_function = partial(tensorise_smiles, max_degree=max_degree, max_atoms=max_atoms)
    if verbose:
        print('Tensorising molecules in batches...')
        pbar = Progbar(len(smiles), width=50)
        tensor_list = []
        for tensors in pool.imap(map_function, smiles_chunks):
            pbar.add(tensors[0].shape[0])
            tensor_list.append(tensors)
        print('Merging batch tensors...    ', end='')
    else:
        tensor_list = pool.map(map_function, smiles_chunks)
    if verbose:
        print('[DONE]')

    # REDUCE: Concatenate the obtained tensors
    pool.close()
    pool.join()
    return concat_mol_tensors(tensor_list, match_degree=max_degree!=None, match_max_atoms=max_atoms!=None)