Python util.get_config() Examples
The following are 30
code examples of util.get_config().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
util
, or try the search function
.
Example #1
Source File: run_summarization.py From TransferRL with MIT License | 6 votes |
def convert_to_coverage_model(self): """Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint""" tf.logging.info("converting non-coverage model to coverage model..") # initialize an entire coverage model from scratch sess = tf.Session(config=util.get_config()) print("initializing everything...") sess.run(tf.global_variables_initializer()) # load all non-coverage weights from checkpoint saver = tf.train.Saver([v for v in tf.global_variables() if "coverage" not in v.name and "Adagrad" not in v.name]) print("restoring non-coverage variables...") curr_ckpt = util.load_ckpt(saver, sess) print("restored.") # save this model and quit new_fname = curr_ckpt + '_cov_init' print("saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this one will save all variables that now exist new_saver.save(sess, new_fname) print("saved.") exit()
Example #2
Source File: run_summarization.py From pointer-generator with Apache License 2.0 | 6 votes |
def convert_to_coverage_model(): """Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint""" tf.logging.info("converting non-coverage model to coverage model..") # initialize an entire coverage model from scratch sess = tf.Session(config=util.get_config()) print "initializing everything..." sess.run(tf.global_variables_initializer()) # load all non-coverage weights from checkpoint saver = tf.train.Saver([v for v in tf.global_variables() if "coverage" not in v.name and "Adagrad" not in v.name]) print "restoring non-coverage variables..." curr_ckpt = util.load_ckpt(saver, sess) print "restored." # save this model and quit new_fname = curr_ckpt + '_cov_init' print "saving model to %s..." % (new_fname) new_saver = tf.train.Saver() # this one will save all variables that now exist new_saver.save(sess, new_fname) print "saved." exit()
Example #3
Source File: run_summarization.py From pointer-generator with Apache License 2.0 | 6 votes |
def restore_best_model(): """Load bestmodel file from eval directory, add variables for adagrad, and save to train directory""" tf.logging.info("Restoring bestmodel for training...") # Initialize all vars in the model sess = tf.Session(config=util.get_config()) print "Initializing all variables..." sess.run(tf.initialize_all_variables()) # Restore the best model from eval dir saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name]) print "Restoring all non-adagrad variables from best model in eval dir..." curr_ckpt = util.load_ckpt(saver, sess, "eval") print "Restored %s." % curr_ckpt # Save this model to train dir and quit new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model") new_fname = os.path.join(FLAGS.log_root, "train", new_model_name) print "Saving model to %s..." % (new_fname) new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables new_saver.save(sess, new_fname) print "Saved." exit()
Example #4
Source File: decode.py From unified-summarization with MIT License | 6 votes |
def __init__(self, model, batcher, vocab): """Initialize decoder. Args: model: a Seq2SeqAttentionModel object. batcher: a Batcher object. vocab: Vocabulary object """ self._model = model self._model.build_graph() self._batcher = batcher self._vocab = vocab self._saver = tf.train.Saver(max_to_keep=3) # we use this to load checkpoints for decoding self._sess = tf.Session(config=util.get_config()) if FLAGS.mode == 'evalall': self.prepare_evaluate()
Example #5
Source File: evaluate.py From unified-summarization with MIT License | 6 votes |
def __init__(self, model, batcher, vocab): """Initialize decoder. Args: model: a Seq2SeqAttentionModel object. batcher: a Batcher object. vocab: Vocabulary object """ self._model = model self._model.build_graph() self._batcher = batcher self._vocab = vocab self._saver = tf.train.Saver(max_to_keep=3) # we use this to load checkpoints for decoding self._sess = tf.Session(config=util.get_config()) if FLAGS.mode == 'evalall': self.prepare_evaluate()
Example #6
Source File: views.py From Adminset_Zabbix with Apache License 2.0 | 6 votes |
def zabbix_link_tem(request, arg1): work_dir = os.path.dirname(os.path.realpath(__file__)) service_conf = os.path.join(work_dir, 'service.conf') zabbix_config = util.get_config(service_conf, 'zabbix') result = [] tem = [] template = {} data = arg1['params'] print data # {u'hostids': [u'10157,10158'], u'groupid': u'10001'} data_host = data['hostids'].split(',') print data_host for i in data_host: if len(zabbix_api.Zabbix(zabbix_config).hostid_get_template(i)[0]['parentTemplates']) == 0: result.append(zabbix_api.Zabbix(zabbix_config).link_template(int(i), data['groupid'])) else: template['templateid'] = data['groupid'] data_mu = zabbix_api.Zabbix(zabbix_config).hostid_get_template(i)[0]['parentTemplates'] data_mu.append(template) result.append(zabbix_api.Zabbix(zabbix_config).link_template(int(i), data_mu)) return json.dumps({'code': 0, 'result': result})
Example #7
Source File: views.py From Adminset_Zabbix with Apache License 2.0 | 6 votes |
def zbhost_select(request): datadict = {} ret = [] # zbhost表关联cmdb_host by zhoux init() # update by zhouzx (delete 字段 host) fields = ['id', 'cmdb_hostid', 'hostid', 'host', 'ip'] zabbix_hosts = db.Cursor(util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'),'api')).get_results('zbhost', fields) hostid = [str(zb["cmdb_hostid"]) for zb in zabbix_hosts] server_hosts = db.Cursor(util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'),'api')).get_results('cmdb_host', ["id"]) for i in server_hosts: if str(i["id"]) not in hostid: datadict["id"] = i["id"] # all_host = app.config['cursor'].get_results('cmdb_host',["ip"],datadict) get_ip = db.Cursor(util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'),'api')).get_where_results('cmdb_host', ["id", "ip"], datadict) ret.append(get_ip[0]) return json.dumps({'code': 0, 'result': ret})
Example #8
Source File: zabbix_api.py From Adminset_Zabbix with Apache License 2.0 | 6 votes |
def create_maintenance(name, start, stop, hostids, time): data = { "name": name, "active_since": start, "active_till": stop, "hostids": hostids, "timeperiods": [ { "timeperiod_type": 0, "period": time } ] } ret = zabbix_api.Zabbix( util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'zabbix')).create_maintenance(data) return ret
Example #9
Source File: zabbix_api.py From Adminset_Zabbix with Apache License 2.0 | 6 votes |
def create_zabbix_host(hostid, groupid): ret = [] for host in hostid: data = { "host": host, "interfaces": [ { "type": 1, "main": 1, "useip": 1, "ip": host, "dns": "", "port": "10050" } ], "groups": [ { "groupid": groupid } ] } ret.append(zabbix_api.Zabbix( util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'zabbix')).create_host(data)) return ret
Example #10
Source File: zabbix_api.py From Adminset_Zabbix with Apache License 2.0 | 6 votes |
def init_zabbix(): try: # 第一步 取出所有host,要ip,host,id # zb_hosts = app.config['zabbix'].get_hosts() zb_hosts = zabbix_api.Zabbix( util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'zabbix')).get_hosts() zb_hosts_interface = zabbix_api.Zabbix( util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'zabbix')).get_interface([z['hostid'] for z in zb_hosts]) data = [] ret = [] for h in zb_hosts: h['ip'] = zb_hosts_interface[h['hostid']] data.append(h) ###数据插入数据库 for i in data: result = db.Cursor( util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'api')).execute_insert_sql('zbhost', i) except: return ""
Example #11
Source File: zabbix_api.py From Adminset_Zabbix with Apache License 2.0 | 6 votes |
def init_cmdb(): try: # 取host (在cmdb_host表里) # fields = ['id', 'hostname', 'ip', 'vm_status', 'st', 'uuid', 'manufacturers', 'server_type', 'server_cpu', 'os', # 'server_disk', 'server_mem', 'mac_address', 'manufacture_date', 'check_update_time', 'server_purpose', # 'server_run', 'expire', 'server_up_time'] fields = ['id','hostname','ip'] # 将角色对应的p_id都转为name,最终返回的结果p_id的值都是name hosts = db.Cursor(util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'api')).get_results('cmdb_host', fields) for h in hosts: data = {'cmdb_hostid': h['id']} where = {'ip': h['ip']} result = db.Cursor( util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'api')).execute_update_sql('zbhost', data, where) # 更新到cache表, ip except: return ""
Example #12
Source File: main.py From docker with MIT License | 6 votes |
def setup_training_discriminator(model): """Does setup before starting training (run_training)""" train_dir = os.path.join(FLAGS.log_root, "train-discriminator") if not os.path.exists(train_dir): os.makedirs(train_dir) model.build_graph() # build the graph saver = tf.train.Saver(max_to_keep=20) # we use this to load checkpoints for decoding sess = tf.Session(config=util.get_config()) #init = tf.global_variables_initializer() #sess.run(init) util.load_ckpt(saver, sess, ckpt_dir="train-discriminator") return sess, saver,train_dir
Example #13
Source File: main.py From docker with MIT License | 6 votes |
def setup_training_generator(model): """Does setup before starting training (run_training)""" train_dir = os.path.join(FLAGS.log_root, "train-generator") if not os.path.exists(train_dir): os.makedirs(train_dir) model.build_graph() # build the graph saver = tf.train.Saver(max_to_keep=20) # we use this to load checkpoints for decoding sess = tf.Session(config=util.get_config()) #sess.run(tf.train.Saver(max_to_keep=20)) #init = tf.global_variables_initializer() #sess.run(init) # Load an initial checkpoint to use for decoding util.load_ckpt(saver, sess, ckpt_dir="train-generator") return sess, saver,train_dir
Example #14
Source File: run_summarization.py From long-summarization with Apache License 2.0 | 6 votes |
def convert_linear_attn_to_hier_model(): """Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint""" tf.logging.info("converting linear model to hier model..") # initialize an entire coverage model from scratch sess = tf.Session(config=util.get_config()) print("initializing everything...") sess.run(tf.global_variables_initializer()) # load all non-coverage weights from checkpoint saver = tf.train.Saver([v for v in tf.global_variables( ) if "Linear--Section-Features" not in v.name and "v_sec" not in v.name and "Adagrad" not in v.name]) print("restoring variables...") curr_ckpt = util.load_ckpt(saver, sess) print("restored.") # save this model and quit new_fname = curr_ckpt print(("saving model to %s..." % (new_fname))) new_saver = tf.train.Saver() # this one will save all variables that now exist new_saver.save(sess, new_fname) print("saved.") exit()
Example #15
Source File: run_summarization.py From long-summarization with Apache License 2.0 | 6 votes |
def restore_best_model(): """Load bestmodel file from eval directory, add variables for adagrad, and save to train directory""" tf.logging.info("Restoring bestmodel for training...") # Initialize all vars in the model sess = tf.Session(config=util.get_config()) print("Initializing all variables...") sess.run(tf.initialize_all_variables()) # Restore the best model from eval dir saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name]) print("Restoring all non-adagrad variables from best model in eval dir...") curr_ckpt = util.load_ckpt(saver, sess, "eval") print("Restored %s." % curr_ckpt) # Save this model to train dir and quit new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model") new_fname = os.path.join(FLAGS.log_root, "train", new_model_name) print("Saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables new_saver.save(sess, new_fname) print("Saved.") exit()
Example #16
Source File: run_summarization.py From rotational-unit-of-memory with MIT License | 6 votes |
def convert_to_coverage_model(): """Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint""" tf.logging.info("converting non-coverage model to coverage model..") # initialize an entire coverage model from scratch sess = tf.Session(config=util.get_config()) print("initializing everything...") sess.run(tf.global_variables_initializer()) # load all non-coverage weights from checkpoint saver = tf.train.Saver([v for v in tf.global_variables( ) if "coverage" not in v.name and "Adagrad" not in v.name]) print("restoring non-coverage variables...") curr_ckpt = util.load_ckpt(saver, sess) print("restored.") # save this model and quit new_fname = curr_ckpt + '_cov_init' print("saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this one will save all variables that now exist new_saver.save(sess, new_fname) print("saved.") exit()
Example #17
Source File: run_summarization.py From TransferRL with MIT License | 6 votes |
def restore_best_model(self): """Load bestmodel file from eval directory, add variables for adagrad, and save to train directory""" tf.logging.info("Restoring bestmodel for training...") # Initialize all vars in the model sess = tf.Session(config=util.get_config()) print("Initializing all variables...") sess.run(tf.initialize_all_variables()) # Restore the best model from eval dir saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name]) print("Restoring all non-adagrad variables from best model in eval dir...") curr_ckpt = util.load_ckpt(saver, sess, "eval") print("Restored %s." % curr_ckpt) # Save this model to train dir and quit new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model") new_fname = os.path.join(FLAGS.log_root, "train", new_model_name) print("Saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables new_saver.save(sess, new_fname) print("Saved.") exit()
Example #18
Source File: run_summarization.py From TransferRL with MIT License | 6 votes |
def convert_to_reinforce_model(self): """Load non-reinforce checkpoint, add initialized extra variables for reinforce, and save as new checkpoint""" tf.logging.info("converting non-reinforce model to reinforce model..") # initialize an entire reinforce model from scratch sess = tf.Session(config=util.get_config()) print("initializing everything...") sess.run(tf.global_variables_initializer()) # load all non-reinforce weights from checkpoint saver = tf.train.Saver([v for v in tf.global_variables() if "reinforce" not in v.name and "Adagrad" not in v.name]) print("restoring non-reinforce variables...") curr_ckpt = util.load_ckpt(saver, sess) print("restored.") # save this model and quit new_fname = curr_ckpt + '_rl_init' print("saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this one will save all variables that now exist new_saver.save(sess, new_fname) print("saved.") exit()
Example #19
Source File: run_summarization.py From RLSeq2Seq with MIT License | 6 votes |
def restore_best_model(self): """Load bestmodel file from eval directory, add variables for adagrad, and save to train directory""" tf.logging.info("Restoring bestmodel for training...") # Initialize all vars in the model sess = tf.Session(config=util.get_config()) print("Initializing all variables...") sess.run(tf.initialize_all_variables()) # Restore the best model from eval dir saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name]) print("Restoring all non-adagrad variables from best model in eval dir...") curr_ckpt = util.load_ckpt(saver, sess, "eval") print("Restored %s." % curr_ckpt) # Save this model to train dir and quit new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model") new_fname = os.path.join(FLAGS.log_root, "train", new_model_name) print("Saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables new_saver.save(sess, new_fname) print("Saved.") exit()
Example #20
Source File: run_summarization.py From RLSeq2Seq with MIT License | 6 votes |
def convert_to_coverage_model(self): """Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint""" tf.logging.info("converting non-coverage model to coverage model..") # initialize an entire coverage model from scratch sess = tf.Session(config=util.get_config()) print("initializing everything...") sess.run(tf.global_variables_initializer()) # load all non-coverage weights from checkpoint saver = tf.train.Saver([v for v in tf.global_variables() if "coverage" not in v.name and "Adagrad" not in v.name]) print("restoring non-coverage variables...") curr_ckpt = util.load_ckpt(saver, sess) print("restored.") # save this model and quit new_fname = curr_ckpt + '_cov_init' print("saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this one will save all variables that now exist new_saver.save(sess, new_fname) print("saved.") exit()
Example #21
Source File: run_summarization.py From RLSeq2Seq with MIT License | 6 votes |
def convert_to_reinforce_model(self): """Load non-reinforce checkpoint, add initialized extra variables for reinforce, and save as new checkpoint""" tf.logging.info("converting non-reinforce model to reinforce model..") # initialize an entire reinforce model from scratch sess = tf.Session(config=util.get_config()) print("initializing everything...") sess.run(tf.global_variables_initializer()) # load all non-reinforce weights from checkpoint saver = tf.train.Saver([v for v in tf.global_variables() if "reinforce" not in v.name and "Adagrad" not in v.name]) print("restoring non-reinforce variables...") curr_ckpt = util.load_ckpt(saver, sess) print("restored.") # save this model and quit new_fname = curr_ckpt + '_rl_init' print("saving model to %s..." % (new_fname)) new_saver = tf.train.Saver() # this one will save all variables that now exist new_saver.save(sess, new_fname) print("saved.") exit()
Example #22
Source File: run_summarization.py From MAX-Text-Summarizer with Apache License 2.0 | 6 votes |
def convert_to_coverage_model(): """Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint""" tf.logging.info("converting non-coverage model to coverage model..") # initialize an entire coverage model from scratch sess = tf.Session(config=util.get_config()) print("initializing everything...") sess.run(tf.global_variables_initializer()) # load all non-coverage weights from checkpoint saver = tf.train.Saver([v for v in tf.global_variables() if "coverage" not in v.name and "Adagrad" not in v.name]) print("restoring non-coverage variables...") curr_ckpt = util.load_ckpt(saver, sess, FLAGS.ckpt_dir) print("restored.") # save this model and quit new_fname = curr_ckpt + '_cov_init' print("saving model to %s..." % new_fname) new_saver = tf.train.Saver() # this one will save all variables that now exist new_saver.save(sess, new_fname) print("saved.") exit()
Example #23
Source File: run_summarization.py From MAX-Text-Summarizer with Apache License 2.0 | 6 votes |
def restore_best_model(): """Load bestmodel file from eval directory, add variables for adagrad, and save to train directory""" tf.logging.info("Restoring best model for training...") # Initialize all vars in the model sess = tf.Session(config=util.get_config()) print("Initializing all variables...") sess.run(tf.initialize_all_variables()) # Restore the best model from eval dir saver = tf.train.Saver([v for v in tf.all_variables() if "Adagrad" not in v.name]) print("Restoring all non-adagrad variables from best model in eval dir...") curr_ckpt = util.load_ckpt(saver, sess, "eval") print("Restored %s." % curr_ckpt) # Save this model to train dir and quit new_model_name = curr_ckpt.split("/")[-1].replace("bestmodel", "model") new_fname = os.path.join(FLAGS.log_root, "train", new_model_name) print("Saving model to %s..." % new_fname) new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables new_saver.save(sess, new_fname) print("Saved.") exit()
Example #24
Source File: views.py From Adminset_Zabbix with Apache License 2.0 | 5 votes |
def zabbix_unlink_tem(request, arg1): work_dir = os.path.dirname(os.path.realpath(__file__)) service_conf = os.path.join(work_dir, 'service.conf') zabbix_config = util.get_config(service_conf, 'zabbix') result = [] data = arg1['params'] print data data_host = data['hostids'].split(',') for i in data_host: result.append(zabbix_api.Zabbix(zabbix_config).unlink_template(int(i), data['templateid'])) return json.dumps({'code': 0, 'result': result})
Example #25
Source File: decode.py From pointer-generator with Apache License 2.0 | 5 votes |
def __init__(self, model, batcher, vocab): """Initialize decoder. Args: model: a Seq2SeqAttentionModel object. batcher: a Batcher object. vocab: Vocabulary object """ self._model = model self._model.build_graph() self._batcher = batcher self._vocab = vocab self._saver = tf.train.Saver() # we use this to load checkpoints for decoding self._sess = tf.Session(config=util.get_config()) # Load an initial checkpoint to use for decoding ckpt_path = util.load_ckpt(self._saver, self._sess) if FLAGS.single_pass: # Make a descriptive decode directory name ckpt_name = "ckpt-" + ckpt_path.split('-')[-1] # this is something of the form "ckpt-123456" self._decode_dir = os.path.join(FLAGS.log_root, get_decode_dir_name(ckpt_name)) if os.path.exists(self._decode_dir): raise Exception("single_pass decode directory %s should not already exist" % self._decode_dir) else: # Generic decode dir name self._decode_dir = os.path.join(FLAGS.log_root, "decode") # Make the decode dir if necessary if not os.path.exists(self._decode_dir): os.mkdir(self._decode_dir) if FLAGS.single_pass: # Make the dirs to contain output written in the correct format for pyrouge self._rouge_ref_dir = os.path.join(self._decode_dir, "reference") if not os.path.exists(self._rouge_ref_dir): os.mkdir(self._rouge_ref_dir) self._rouge_dec_dir = os.path.join(self._decode_dir, "decoded") if not os.path.exists(self._rouge_dec_dir): os.mkdir(self._rouge_dec_dir)
Example #26
Source File: run_summarization.py From pointer-generator with Apache License 2.0 | 5 votes |
def setup_training(model, batcher): """Does setup before starting training (run_training)""" train_dir = os.path.join(FLAGS.log_root, "train") if not os.path.exists(train_dir): os.makedirs(train_dir) model.build_graph() # build the graph if FLAGS.convert_to_coverage_model: assert FLAGS.coverage, "To convert your non-coverage model to a coverage model, run with convert_to_coverage_model=True and coverage=True" convert_to_coverage_model() if FLAGS.restore_best_model: restore_best_model() saver = tf.train.Saver(max_to_keep=3) # keep 3 checkpoints at a time sv = tf.train.Supervisor(logdir=train_dir, is_chief=True, saver=saver, summary_op=None, save_summaries_secs=60, # save summaries for tensorboard every 60 secs save_model_secs=60, # checkpoint every 60 secs global_step=model.global_step) summary_writer = sv.summary_writer tf.logging.info("Preparing or waiting for session...") sess_context_manager = sv.prepare_or_wait_for_session(config=util.get_config()) tf.logging.info("Created session.") try: run_training(model, batcher, sess_context_manager, sv, summary_writer) # this is an infinite loop until interrupted except KeyboardInterrupt: tf.logging.info("Caught keyboard interrupt on worker. Stopping supervisor...") sv.stop()
Example #27
Source File: run_rewriter.py From unified-summarization with MIT License | 5 votes |
def setup_training(model, batcher): """Does setup before starting training (run_training)""" train_dir = os.path.join(FLAGS.log_root, "train") if not os.path.exists(train_dir): os.makedirs(train_dir) default_device = tf.device('/gpu:0') with default_device: model.build_graph() # build the graph if FLAGS.convert_to_coverage_model: assert FLAGS.coverage, "To convert your non-coverage model to a coverage model, run with convert_to_coverage_model=True and coverage=True" convert_to_coverage_model() saver = tf.train.Saver(max_to_keep=FLAGS.model_max_to_keep) # only keep 1 checkpoint at a time sv = tf.train.Supervisor(logdir=train_dir, is_chief=True, saver=saver, summary_op=None, save_summaries_secs=60, # save summaries for tensorboard every 60 secs save_model_secs=0, # checkpoint every 60 secs global_step=model.global_step) summary_writer = sv.summary_writer tf.logging.info("Preparing or waiting for session...") sess_context_manager = sv.prepare_or_wait_for_session(config=util.get_config()) tf.logging.info("Created session.") try: run_training(model, batcher, sess_context_manager, sv, summary_writer) # this is an infinite loop until interrupted except KeyboardInterrupt: tf.logging.info("Caught keyboard interrupt on worker. Stopping supervisor...") sv.stop()
Example #28
Source File: db.py From Adminset_Zabbix with Apache License 2.0 | 5 votes |
def getinfo(self, table_name, fields): ''' 查询单个数据表内容,fields首字段为key fields为两个字段,返回{v1: v2, ...},格式为 ['field1','field2'], 例如['id','name'],['name','r_id'] 返回结果一,两列都是字符串如:用户id2name {'1':'tom','2','jerry'}; 组信息id2name {'1':'sa','2':'ask'} 返回结果二,第二列是个列表如:用户权限信息:{u'songpeng': [u'1', u'2'], u'admin': [u'1', u'2', u'4', u'3']} ''' result = Cursor(util.get_config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'service.conf'), 'api')).get_results(table_name,fields) if fields[1] in ['r_id','p_id','user_all_perm']: #第二列的结果为列表的字段拼接为字符串 result = dict((str(x[fields[0]]), x[fields[1]].split(',')) for x in result) else: result = dict((str(x[fields[0]]), x[fields[1]]) for x in result) return result
Example #29
Source File: views.py From Adminset_Zabbix with Apache License 2.0 | 5 votes |
def zabbix_gettem_select(request): work_dir = os.path.dirname(os.path.realpath(__file__)) service_conf = os.path.join(work_dir, 'service.conf') zabbix_config = util.get_config(service_conf, 'zabbix') tem = zabbix_api.Zabbix(zabbix_config).get_host_tem() print json.dumps({'code': 0, 'result': tem}) return json.dumps({'code': 0, 'result': tem})
Example #30
Source File: views.py From Adminset_Zabbix with Apache License 2.0 | 5 votes |
def zbhost_allhost_select(request): work_dir = os.path.dirname(os.path.realpath(__file__)) service_conf = os.path.join(work_dir, 'service.conf') zabbix_config = util.get_config(service_conf, 'zabbix') data = zabbix_api.Zabbix(zabbix_config).get_hosts() print json.dumps({'code': 0, 'result': data}) return json.dumps({'code': 0, 'result': data})