Python parameters.Parameters() Examples
The following are 12
code examples of parameters.Parameters().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
parameters
, or try the search function
.
Example #1
Source File: neural_programmer.py From DOTA_models with Apache License 2.0 | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print "list of models: ", file_list for model_file in file_list: model_file = model_file[1] print "restoring: ", model_file saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print "evaluating on dev ", model_file, model_step evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print "model dir: ", model_dir if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print "create dir: ", utility.FLAGS.output_dir tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print "create dir: ", model_dir tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #2
Source File: neural_programmer.py From yolo_v2 with Apache License 2.0 | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print "list of models: ", file_list for model_file in file_list: model_file = model_file[1] print "restoring: ", model_file saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print "evaluating on dev ", model_file, model_step evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print "model dir: ", model_dir if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print "create dir: ", utility.FLAGS.output_dir tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print "create dir: ", model_dir tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #3
Source File: neural_programmer.py From Gun-Detector with Apache License 2.0 | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print("list of models: ", file_list) for model_file in file_list: model_file = model_file[1] print("restoring: ", model_file) saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print("evaluating on dev ", model_file, model_step) evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print("model dir: ", model_dir) if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print("create dir: ", utility.FLAGS.output_dir) tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print("create dir: ", model_dir) tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #4
Source File: neural_programmer.py From Action_Recognition_Zoo with MIT License | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print "list of models: ", file_list for model_file in file_list: model_file = model_file[1] print "restoring: ", model_file saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print "evaluating on dev ", model_file, model_step evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print "model dir: ", model_dir if (not (tf.gfile.IsDirectory(model_dir))): print "create dir: ", model_dir tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #5
Source File: neural_programmer.py From ECO-pytorch with BSD 2-Clause "Simplified" License | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print "list of models: ", file_list for model_file in file_list: model_file = model_file[1] print "restoring: ", model_file saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print "evaluating on dev ", model_file, model_step evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print "model dir: ", model_dir if (not (tf.gfile.IsDirectory(model_dir))): print "create dir: ", model_dir tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #6
Source File: neural_programmer.py From hands-detection with MIT License | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print "list of models: ", file_list for model_file in file_list: model_file = model_file[1] print "restoring: ", model_file saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print "evaluating on dev ", model_file, model_step evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print "model dir: ", model_dir if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print "create dir: ", utility.FLAGS.output_dir tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print "create dir: ", model_dir tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #7
Source File: neural_programmer.py From object_detection_kitti with Apache License 2.0 | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print "list of models: ", file_list for model_file in file_list: model_file = model_file[1] print "restoring: ", model_file saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print "evaluating on dev ", model_file, model_step evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print "model dir: ", model_dir if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print "create dir: ", utility.FLAGS.output_dir tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print "create dir: ", model_dir tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #8
Source File: neural_programmer.py From object_detection_with_tensorflow with MIT License | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print "list of models: ", file_list for model_file in file_list: model_file = model_file[1] print "restoring: ", model_file saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print "evaluating on dev ", model_file, model_step evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print "model dir: ", model_dir if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print "create dir: ", utility.FLAGS.output_dir tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print "create dir: ", model_dir tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #9
Source File: neural_programmer.py From HumanRecognition with MIT License | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print "list of models: ", file_list for model_file in file_list: model_file = model_file[1] print "restoring: ", model_file saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print "evaluating on dev ", model_file, model_step evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print "model dir: ", model_dir if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print "create dir: ", utility.FLAGS.output_dir tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print "create dir: ", model_dir tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #10
Source File: neural_programmer.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print("list of models: ", file_list) for model_file in file_list: model_file = model_file[1] print("restoring: ", model_file) saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print("evaluating on dev ", model_file, model_step) evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print("model dir: ", model_dir) if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print("create dir: ", utility.FLAGS.output_dir) tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print("create dir: ", model_dir) tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #11
Source File: neural_programmer.py From models with Apache License 2.0 | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print("list of models: ", file_list) for model_file in file_list: model_file = model_file[1] print("restoring: ", model_file) saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print("evaluating on dev ", model_file, model_step) evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print("model dir: ", model_dir) if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print("create dir: ", utility.FLAGS.output_dir) tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print("create dir: ", model_dir) tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)
Example #12
Source File: neural_programmer.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def master(train_data, dev_data, utility): #creates TF graph and calls trainer or evaluator batch_size = utility.FLAGS.batch_size model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/" #create all paramters of the model param_class = parameters.Parameters(utility) params, global_step, init = param_class.parameters(utility) key = "test" if (FLAGS.evaluator_job) else "train" graph = model.Graph(utility, batch_size, utility.FLAGS.max_passes, mode=key) graph.create_graph(params, global_step) prev_dev_error = 0.0 final_loss = 0.0 final_accuracy = 0.0 #start session with tf.Session() as sess: sess.run(init.name) sess.run(graph.init_op.name) to_save = params.copy() saver = tf.train.Saver(to_save, max_to_keep=500) if (FLAGS.evaluator_job): while True: selected_models = {} file_list = tf.gfile.ListDirectory(model_dir) for model_file in file_list: if ("checkpoint" in model_file or "index" in model_file or "meta" in model_file): continue if ("data" in model_file): model_file = model_file.split(".")[0] model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) selected_models[model_step] = model_file file_list = sorted(selected_models.items(), key=lambda x: x[0]) if (len(file_list) > 0): file_list = file_list[0:len(file_list) - 1] print("list of models: ", file_list) for model_file in file_list: model_file = model_file[1] print("restoring: ", model_file) saver.restore(sess, model_dir + "/" + model_file) model_step = int( model_file.split("_")[len(model_file.split("_")) - 1]) print("evaluating on dev ", model_file, model_step) evaluate(sess, dev_data, batch_size, graph, model_step) else: ckpt = tf.train.get_checkpoint_state(model_dir) print("model dir: ", model_dir) if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))): print("create dir: ", utility.FLAGS.output_dir) tf.gfile.MkDir(utility.FLAGS.output_dir) if (not (tf.gfile.IsDirectory(model_dir))): print("create dir: ", model_dir) tf.gfile.MkDir(model_dir) Train(graph, utility, batch_size, train_data, sess, model_dir, saver)