Python tensorflow.GIT_VERSION Examples
The following are 22
code examples of tensorflow.GIT_VERSION().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: logger.py From ml-on-gcp with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #2
Source File: logger.py From training with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #3
Source File: logger.py From models with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #4
Source File: logger.py From models with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #5
Source File: logger.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #6
Source File: logger.py From models with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #7
Source File: logger.py From models with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #8
Source File: logger.py From ml-on-gcp with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #9
Source File: logger.py From ml-on-gcp with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #10
Source File: logger.py From ml-on-gcp with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #11
Source File: versions_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def testGitAndCompilerVersion(self): self.assertEqual(type(tf.__git_version__), str) self.assertEqual(type(tf.__compiler_version__), str) self.assertEqual(type(tf.GIT_VERSION), str) self.assertEqual(type(tf.COMPILER_VERSION), str)
Example #12
Source File: logger.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #13
Source File: logger.py From Gun-Detector with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #14
Source File: logger.py From nsfw with Apache License 2.0 | 5 votes |
def _collect_tensorflow_info(run_info): run_info["tensorflow_version"] = { "version": tf.VERSION, "git_hash": tf.GIT_VERSION}
Example #15
Source File: reference_data.py From multilabel-image-classification-tensorflow with MIT License | 4 votes |
def _construct_and_save_reference_files( self, name, graph, ops_to_eval, correctness_function): """Save reference data files. Constructs a serialized graph_def, layer weights, and computation results. It then saves them to files which are read at test time. Args: name: String defining the run. This will be used to define folder names and will be used for random seed construction. graph: The graph in which the test is conducted. ops_to_eval: Ops which the user wishes to be evaluated under a controlled session. correctness_function: This function accepts the evaluated results of ops_to_eval, and returns a list of values. This list must be JSON serializable; in particular it is up to the user to convert numpy dtypes into builtin dtypes. """ data_dir = os.path.join(self.data_root, name) # Make sure there is a clean space for results. if os.path.exists(data_dir): shutil.rmtree(data_dir) os.makedirs(data_dir) # Serialize graph for comparison. graph_bytes = graph.as_graph_def().SerializeToString() expected_file = os.path.join(data_dir, "expected_graph") with tf.gfile.Open(expected_file, "wb") as f: f.write(graph_bytes) with graph.as_default(): init = tf.global_variables_initializer() saver = tf.train.Saver() with self.test_session(graph=graph) as sess: sess.run(init) saver.save(sess=sess, save_path=os.path.join(data_dir, self.ckpt_prefix)) # These files are not needed for this test. os.remove(os.path.join(data_dir, "checkpoint")) os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta")) # ops are evaluated even if there is no correctness function to ensure # that they can be evaluated. eval_results = [op.eval() for op in ops_to_eval] if correctness_function is not None: results = correctness_function(*eval_results) with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f: json.dump(results, f) with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f: json.dump([tf.VERSION, tf.GIT_VERSION], f)
Example #16
Source File: reference_data.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def _construct_and_save_reference_files( self, name, graph, ops_to_eval, correctness_function): """Save reference data files. Constructs a serialized graph_def, layer weights, and computation results. It then saves them to files which are read at test time. Args: name: String defining the run. This will be used to define folder names and will be used for random seed construction. graph: The graph in which the test is conducted. ops_to_eval: Ops which the user wishes to be evaluated under a controlled session. correctness_function: This function accepts the evaluated results of ops_to_eval, and returns a list of values. This list must be JSON serializable; in particular it is up to the user to convert numpy dtypes into builtin dtypes. """ data_dir = os.path.join(self.data_root, name) # Make sure there is a clean space for results. if os.path.exists(data_dir): shutil.rmtree(data_dir) os.makedirs(data_dir) # Serialize graph for comparison. graph_bytes = graph.as_graph_def().SerializeToString() expected_file = os.path.join(data_dir, "expected_graph") with tf.gfile.Open(expected_file, "wb") as f: f.write(graph_bytes) with graph.as_default(): init = tf.global_variables_initializer() saver = tf.train.Saver() with self.test_session(graph=graph) as sess: sess.run(init) saver.save(sess=sess, save_path=os.path.join(data_dir, self.ckpt_prefix)) # These files are not needed for this test. os.remove(os.path.join(data_dir, "checkpoint")) os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta")) # ops are evaluated even if there is no correctness function to ensure # that they can be evaluated. eval_results = [op.eval() for op in ops_to_eval] if correctness_function is not None: results = correctness_function(*eval_results) with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f: json.dump(results, f) with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f: json.dump([tf.VERSION, tf.GIT_VERSION], f)
Example #17
Source File: reference_data.py From training with Apache License 2.0 | 4 votes |
def _construct_and_save_reference_files( self, name, graph, ops_to_eval, correctness_function): """Save reference data files. Constructs a serialized graph_def, layer weights, and computation results. It then saves them to files which are read at test time. Args: name: String defining the run. This will be used to define folder names and will be used for random seed construction. graph: The graph in which the test is conducted. ops_to_eval: Ops which the user wishes to be evaluated under a controlled session. correctness_function: This function accepts the evaluated results of ops_to_eval, and returns a list of values. This list must be JSON serializable; in particular it is up to the user to convert numpy dtypes into builtin dtypes. """ data_dir = os.path.join(self.data_root, name) # Make sure there is a clean space for results. if os.path.exists(data_dir): shutil.rmtree(data_dir) os.makedirs(data_dir) # Serialize graph for comparison. graph_bytes = graph.as_graph_def().SerializeToString() expected_file = os.path.join(data_dir, "expected_graph") with tf.gfile.Open(expected_file, "wb") as f: f.write(graph_bytes) with graph.as_default(): init = tf.global_variables_initializer() saver = tf.train.Saver() with self.test_session(graph=graph) as sess: sess.run(init) saver.save(sess=sess, save_path=os.path.join(data_dir, self.ckpt_prefix)) # These files are not needed for this test. os.remove(os.path.join(data_dir, "checkpoint")) os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta")) # ops are evaluated even if there is no correctness function to ensure # that they can be evaluated. eval_results = [op.eval() for op in ops_to_eval] if correctness_function is not None: results = correctness_function(*eval_results) with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f: json.dump(results, f) with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f: json.dump([tf.VERSION, tf.GIT_VERSION], f)
Example #18
Source File: reference_data.py From models with Apache License 2.0 | 4 votes |
def _construct_and_save_reference_files( self, name, graph, ops_to_eval, correctness_function): """Save reference data files. Constructs a serialized graph_def, layer weights, and computation results. It then saves them to files which are read at test time. Args: name: String defining the run. This will be used to define folder names and will be used for random seed construction. graph: The graph in which the test is conducted. ops_to_eval: Ops which the user wishes to be evaluated under a controlled session. correctness_function: This function accepts the evaluated results of ops_to_eval, and returns a list of values. This list must be JSON serializable; in particular it is up to the user to convert numpy dtypes into builtin dtypes. """ data_dir = os.path.join(self.data_root, name) # Make sure there is a clean space for results. if os.path.exists(data_dir): shutil.rmtree(data_dir) os.makedirs(data_dir) # Serialize graph for comparison. graph_bytes = graph.as_graph_def().SerializeToString() expected_file = os.path.join(data_dir, "expected_graph") with tf.gfile.Open(expected_file, "wb") as f: f.write(graph_bytes) with graph.as_default(): init = tf.global_variables_initializer() saver = tf.train.Saver() with self.test_session(graph=graph) as sess: sess.run(init) saver.save(sess=sess, save_path=os.path.join(data_dir, self.ckpt_prefix)) # These files are not needed for this test. os.remove(os.path.join(data_dir, "checkpoint")) os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta")) # ops are evaluated even if there is no correctness function to ensure # that they can be evaluated. eval_results = [op.eval() for op in ops_to_eval] if correctness_function is not None: results = correctness_function(*eval_results) with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f: json.dump(results, f) with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f: json.dump([tf.VERSION, tf.GIT_VERSION], f)
Example #19
Source File: reference_data.py From models with Apache License 2.0 | 4 votes |
def _construct_and_save_reference_files( self, name, graph, ops_to_eval, correctness_function): """Save reference data files. Constructs a serialized graph_def, layer weights, and computation results. It then saves them to files which are read at test time. Args: name: String defining the run. This will be used to define folder names and will be used for random seed construction. graph: The graph in which the test is conducted. ops_to_eval: Ops which the user wishes to be evaluated under a controlled session. correctness_function: This function accepts the evaluated results of ops_to_eval, and returns a list of values. This list must be JSON serializable; in particular it is up to the user to convert numpy dtypes into builtin dtypes. """ data_dir = os.path.join(self.data_root, name) # Make sure there is a clean space for results. if os.path.exists(data_dir): shutil.rmtree(data_dir) os.makedirs(data_dir) # Serialize graph for comparison. graph_bytes = graph.as_graph_def().SerializeToString() expected_file = os.path.join(data_dir, "expected_graph") with tf.gfile.Open(expected_file, "wb") as f: f.write(graph_bytes) with graph.as_default(): init = tf.global_variables_initializer() saver = tf.train.Saver() with self.test_session(graph=graph) as sess: sess.run(init) saver.save(sess=sess, save_path=os.path.join(data_dir, self.ckpt_prefix)) # These files are not needed for this test. os.remove(os.path.join(data_dir, "checkpoint")) os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta")) # ops are evaluated even if there is no correctness function to ensure # that they can be evaluated. eval_results = [op.eval() for op in ops_to_eval] if correctness_function is not None: results = correctness_function(*eval_results) with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f: json.dump(results, f) with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f: json.dump([tf.VERSION, tf.GIT_VERSION], f)
Example #20
Source File: reference_data.py From models with Apache License 2.0 | 4 votes |
def _construct_and_save_reference_files( self, name, graph, ops_to_eval, correctness_function): """Save reference data files. Constructs a serialized graph_def, layer weights, and computation results. It then saves them to files which are read at test time. Args: name: String defining the run. This will be used to define folder names and will be used for random seed construction. graph: The graph in which the test is conducted. ops_to_eval: Ops which the user wishes to be evaluated under a controlled session. correctness_function: This function accepts the evaluated results of ops_to_eval, and returns a list of values. This list must be JSON serializable; in particular it is up to the user to convert numpy dtypes into builtin dtypes. """ data_dir = os.path.join(self.data_root, name) # Make sure there is a clean space for results. if os.path.exists(data_dir): shutil.rmtree(data_dir) os.makedirs(data_dir) # Serialize graph for comparison. graph_bytes = graph.as_graph_def().SerializeToString() expected_file = os.path.join(data_dir, "expected_graph") with tf.gfile.Open(expected_file, "wb") as f: f.write(graph_bytes) with graph.as_default(): init = tf.global_variables_initializer() saver = tf.train.Saver() with self.test_session(graph=graph) as sess: sess.run(init) saver.save(sess=sess, save_path=os.path.join(data_dir, self.ckpt_prefix)) # These files are not needed for this test. os.remove(os.path.join(data_dir, "checkpoint")) os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta")) # ops are evaluated even if there is no correctness function to ensure # that they can be evaluated. eval_results = [op.eval() for op in ops_to_eval] if correctness_function is not None: results = correctness_function(*eval_results) with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f: json.dump(results, f) with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f: json.dump([tf.VERSION, tf.GIT_VERSION], f)
Example #21
Source File: reference_data.py From Gun-Detector with Apache License 2.0 | 4 votes |
def _construct_and_save_reference_files( self, name, graph, ops_to_eval, correctness_function): """Save reference data files. Constructs a serialized graph_def, layer weights, and computation results. It then saves them to files which are read at test time. Args: name: String defining the run. This will be used to define folder names and will be used for random seed construction. graph: The graph in which the test is conducted. ops_to_eval: Ops which the user wishes to be evaluated under a controlled session. correctness_function: This function accepts the evaluated results of ops_to_eval, and returns a list of values. This list must be JSON serializable; in particular it is up to the user to convert numpy dtypes into builtin dtypes. """ data_dir = os.path.join(self.data_root, name) # Make sure there is a clean space for results. if os.path.exists(data_dir): shutil.rmtree(data_dir) os.makedirs(data_dir) # Serialize graph for comparison. graph_bytes = graph.as_graph_def().SerializeToString() expected_file = os.path.join(data_dir, "expected_graph") with tf.gfile.Open(expected_file, "wb") as f: f.write(graph_bytes) with graph.as_default(): init = tf.global_variables_initializer() saver = tf.train.Saver() with self.test_session(graph=graph) as sess: sess.run(init) saver.save(sess=sess, save_path=os.path.join(data_dir, self.ckpt_prefix)) # These files are not needed for this test. os.remove(os.path.join(data_dir, "checkpoint")) os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta")) # ops are evaluated even if there is no correctness function to ensure # that they can be evaluated. eval_results = [op.eval() for op in ops_to_eval] if correctness_function is not None: results = correctness_function(*eval_results) with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f: json.dump(results, f) with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f: json.dump([tf.VERSION, tf.GIT_VERSION], f)
Example #22
Source File: reference_data.py From nsfw with Apache License 2.0 | 4 votes |
def _construct_and_save_reference_files( self, name, graph, ops_to_eval, correctness_function): """Save reference data files. Constructs a serialized graph_def, layer weights, and computation results. It then saves them to files which are read at test time. Args: name: String defining the run. This will be used to define folder names and will be used for random seed construction. graph: The graph in which the test is conducted. ops_to_eval: Ops which the user wishes to be evaluated under a controlled session. correctness_function: This function accepts the evaluated results of ops_to_eval, and returns a list of values. This list must be JSON serializable; in particular it is up to the user to convert numpy dtypes into builtin dtypes. """ data_dir = os.path.join(self.data_root, name) # Make sure there is a clean space for results. if os.path.exists(data_dir): shutil.rmtree(data_dir) os.makedirs(data_dir) # Serialize graph for comparison. graph_bytes = graph.as_graph_def().SerializeToString() expected_file = os.path.join(data_dir, "expected_graph") with tf.gfile.Open(expected_file, "wb") as f: f.write(graph_bytes) with graph.as_default(): init = tf.global_variables_initializer() saver = tf.train.Saver() with self.test_session(graph=graph) as sess: sess.run(init) saver.save(sess=sess, save_path=os.path.join(data_dir, self.ckpt_prefix)) # These files are not needed for this test. os.remove(os.path.join(data_dir, "checkpoint")) os.remove(os.path.join(data_dir, self.ckpt_prefix + ".meta")) # ops are evaluated even if there is no correctness function to ensure # that they can be evaluated. eval_results = [op.eval() for op in ops_to_eval] if correctness_function is not None: results = correctness_function(*eval_results) with tf.gfile.Open(os.path.join(data_dir, "results.json"), "w") as f: json.dump(results, f) with tf.gfile.Open(os.path.join(data_dir, "tf_version.json"), "w") as f: json.dump([tf.VERSION, tf.GIT_VERSION], f)