Python data.load_data() Examples
The following are 5
code examples of data.load_data().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
data
, or try the search function
.
Example #1
Source File: chatbot.py From stanford-tensorflow-tutorials with MIT License | 6 votes |
def _get_buckets(): """ Load the dataset into buckets based on their lengths. train_buckets_scale is the inverval that'll help us choose a random bucket later on. """ test_buckets = data.load_data('test_ids.enc', 'test_ids.dec') data_buckets = data.load_data('train_ids.enc', 'train_ids.dec') train_bucket_sizes = [len(data_buckets[b]) for b in range(len(config.BUCKETS))] print("Number of samples in each bucket:\n", train_bucket_sizes) train_total_size = sum(train_bucket_sizes) # list of increasing numbers from 0 to 1 that we'll use to select a bucket. train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size for i in range(len(train_bucket_sizes))] print("Bucket scale:\n", train_buckets_scale) return test_buckets, data_buckets, train_buckets_scale
Example #2
Source File: run.py From DeepMRI with GNU General Public License v3.0 | 5 votes |
def main(argv=None): train_data, validate_data, test_data, mask = load_data(data_path, BATCH_SIZE) train(train_data, validate_data,test_data, mask)
Example #3
Source File: lstm.py From lstm-electric-load-forecast with MIT License | 5 votes |
def run_lstm(model, sequence_length, prediction_steps): data = None global_start_time = time.time() epochs = 1 ratio_of_data = 1 # ratio of data to use from 2+ million data points path_to_dataset = 'data/household_power_consumption.txt' if data is None: print('Loading data... ') x_train, y_train, x_test, y_test, result_mean = load_data(path_to_dataset, sequence_length, prediction_steps, ratio_of_data) else: x_train, y_train, x_test, y_test = data print('\nData Loaded. Compiling...\n') if model is None: model = build_model(prediction_steps) try: model.fit(x_train, y_train, batch_size=128, epochs=epochs, validation_split=0.05) predicted = model.predict(x_test) # predicted = np.reshape(predicted, (predicted.size,)) model.save('LSTM_power_consumption_model.h5') # save LSTM model except KeyboardInterrupt: # save model if training interrupted by user print('Duration of training (s) : ', time.time() - global_start_time) model.save('LSTM_power_consumption_model.h5') return model, y_test, 0 else: # previously trained mode is given print('Loading model...') predicted = model.predict(x_test) plot_predictions(result_mean, prediction_steps, predicted, y_test, global_start_time) return None
Example #4
Source File: chatbot.py From stanford-tensorflow-tutorials with MIT License | 5 votes |
def _get_buckets(): """ Load the dataset into buckets based on their lengths. train_buckets_scale is the inverval that'll help us choose a random bucket later on. """ test_buckets = data.load_data('test_ids.enc', 'test_ids.dec') data_buckets = data.load_data('train_ids.enc', 'train_ids.dec') train_bucket_sizes = [len(data_buckets[b]) for b in range(len(config.BUCKETS))] print("Number of samples in each bucket:\n", train_bucket_sizes) train_total_size = sum(train_bucket_sizes) # list of increasing numbers from 0 to 1 that we'll use to select a bucket. train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size for i in range(len(train_bucket_sizes))] print("Bucket scale:\n", train_buckets_scale) return test_buckets, data_buckets, train_buckets_scale
Example #5
Source File: BlackBoxAuditor.py From BlackBoxAuditing with Apache License 2.0 | 4 votes |
def main(): # format data data = load_data("german") # set the auditor auditor = Auditor() auditor.model = Weka_SVM # call the auditor auditor(data, output_dir="try", features_to_audit=["checking_status","duration"], dump_all=True)