Python load_data.load_data() Examples

The following are 3 code examples of load_data.load_data(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module load_data , or try the search function .
Example #1
Source File: commands.py    From autonomio with MIT License 5 votes vote down vote up
def data(name, mode='default', sep=',', delimiter=None, header='infer'):

    '''Function for loading one of the Autonomio dataset.

    OPTIONS: Either set mode to 'file' or use name without mode parameter.

    FILENAMES:

    'election_in_twitter'
     Dataset consisting of 10 minute samples of 80 million tweets.

     'tweet_sentiment'
     Dataset with tweet text classified for sentiment using NLTK Vader.

    'sites_category_and_vec'
     4,000 sites with word vectors and 5 categories.

    'programmatic_ad_fraud'
     Data from both buy and sell side and over 10 other sources.

    'parties_and_employment'
     9 years of monthly poll and unemployment numbers.

    'random_tweets'
     20,000 tweets main intended for.

    '''

    out = load_data(name, mode, sep, delimiter, header)

    return out 
Example #2
Source File: main.py    From Multilevel_Wavelet_Decomposition_Network_Pytorch with Apache License 2.0 5 votes vote down vote up
def main():
    data_path = "./Data/GasPrice.csv"
    P = 12  #sequence length
    step = 1 #ahead predict steps

    X_train,Y_train,X_test,Y_test,data_df_combined_clean = load_data(data_path,P=P,step=step)
    print(X_train.shape)
    print(Y_train.shape)
    
    model = Wavelet_LSTM(P,32,1)
    model = model.double()
    train(model,X_train,Y_train,epochs=20)
    test(model,X_test,Y_test,data_df_combined_clean) 
Example #3
Source File: model.py    From DeepHDR with MIT License 4 votes vote down vote up
def build_model(self, config, train):
        
        if train:
            tfrecord_list = glob(os.path.join(config.dataset, '**', '*.tfrecords'), recursive=True)
            assert (tfrecord_list)
            shuffle(tfrecord_list)
            print('\n\n====================\ntfrecords list:')
            [print(f) for f in tfrecord_list]
            print('====================\n\n')
            
            with tf.device('/cpu:0'):
                filename_queue = tf.train.string_input_producer(tfrecord_list)
                self.in_LDRs, self.in_HDRs, self.ref_LDRs, self.ref_HDR, _, _ = load_data(filename_queue, config)

            self.G_HDR = self.generator(self.in_LDRs,self.in_HDRs, train=train)
            self.G_tonemapped = tonemap(self.G_HDR)
            self.G_sum = tf.summary.image("G", self.G_tonemapped)
            
            # l2 loss
            self.g_loss = tf.reduce_mean((self.G_tonemapped - tonemap(self.ref_HDR))**2) # after tonemapping
            self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
            
            t_vars = tf.trainable_variables()
            self.g_vars = [var for var in t_vars if 'g_' in var.name]

            with tf.device('/cpu:0'):
                sample_tfrecord_list = glob(os.path.join(
                    './dataset/tf_records', '**', '*.tfrecords'), recursive=True)
                shuffle(sample_tfrecord_list)
                filename_queue_sample = tf.train.string_input_producer(sample_tfrecord_list)
                self.in_LDRs_sample, self.in_HDRs_sample, self.ref_LDRs_sample, self.ref_HDR_sample, _, _ = \
                    load_data(filename_queue_sample, config)
            
            self.sampler_HDR = self.generator(self.in_LDRs_sample, self.in_HDRs_sample, train=False, reuse = True)
            self.sampler_tonemapped = tonemap(self.sampler_HDR)

        # testing
        else:
            self.in_LDRs_sample = tf.placeholder(
                tf.float32, [self.batch_size, config.test_h, config.test_w, self.c_dim*self.num_shots], name='input_LDR_sample')
            self.in_HDRs_sample = tf.placeholder(
                tf.float32, [self.batch_size, config.test_h, config.test_w, self.c_dim*self.num_shots], name='input_HDR_sample')
            
            self.sampler_HDR = self.generator(self.in_LDRs_sample, self.in_HDRs_sample, train=False, free_size=True)
            self.sampler_tonemapped = tonemap(self.sampler_HDR)

        self.saver = tf.train.Saver(max_to_keep=50)