Python tensorflow.load_op_library() Examples

The following are 30 code examples of tensorflow.load_op_library(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: shuffle_op_test.py    From EnglishSpeechUpsampler with MIT License 6 votes vote down vote up
def testShuffle(self):
        shuffle_module = tf.load_op_library('shuffle_op.so')
        shuffle = shuffle_module.shuffle

        input_tensor = np.arange(12).reshape((3, 4))
        desired_shape = np.array([6, -1])
        output_tensor = input_tensor.reshape((6, 2))
        with self.test_session():
            result = shuffle(input_tensor, desired_shape)
            self.assertAllEqual(result.eval(), output_tensor)

        input_tensor = np.arange(12).reshape((3, 4))
        desired_shape = np.array([5, -1])
        output_tensor = input_tensor.reshape((6, 2))[:-1]
        with self.test_session():
            result = shuffle(input_tensor, desired_shape)
            self.assertAllEqual(result.eval(), output_tensor) 
Example #2
Source File: tf.py    From deep500 with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _custom_cpp_op(op: CompilableOp, stateful, name):
    """ Compiles and registers a custom C++ Tensorflow operator """
    # Compile the .so file
    tf_path = os.path.abspath(os.path.dirname(tf.__file__))
    
    so_file = TFCompiler().compile_op(op.name, op.files, 
        op.inputs, op.outputs,
        any([f.endswith('.cu') for f in op.files]), op.live_output,  
        additional_cmake_options=['-DTENSORFLOW_PATH=' + tf_path] + op.cmake_options,
        additional_definitions=op.defs, output_folder=op.output_folder)

    # Load the compiled library into Tensorflow
    op_module = tf.load_op_library(so_file)
    op_func = getattr(op_module, 'tf_op' + op.name)
    op_grad_func = getattr(op_module, 'tf_op_grad' + op.name)
    
    # Create the deep500 custom op object
    lib = ctypes.CDLL(so_file)
    if not getattr(lib, 'create_new_op', False):
        raise ValueError('Invalid custom operator library file')
    lib.create_new_op.restype = ctypes.c_int64
    lib.is_cuda_supported.restype = ctypes.c_bool
    lib.report.restype = ctypes.c_int64

    return TFCompiledOp(op, op_func, op_grad_func, lib) 
Example #3
Source File: secure_random.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def _try_load_secure_random_module():
    """
    Attempt to load and return secure random module; returns None if failed.
    """
    so_file = SO_PATH.format(dn=os.path.dirname(tfe.__file__), tfv=tf.__version__)
    if not os.path.exists(so_file):
        logger.warning(
            (
                "Falling back to insecure randomness since the required custom op "
                "could not be found for the installed version of TensorFlow. Fix "
                "this by compiling custom ops. Missing file was '%s'"
            ),
            so_file,
        )
        return None

    try:
        return tf.load_op_library(so_file)

    except NotFoundError as ex:
        logger.warning(
            (
                "Falling back to insecure randomness since the required custom op "
                "could not be found for the installed version of TensorFlow. Fix "
                "this by compiling custom ops. "
                "Missing file was '%s', error was \"%s\"."
            ),
            so_file,
            ex,
        )

    except Exception as ex:  # pylint: disable=broad-except
        logger.error(
            (
                "Falling back to insecure randomness since an error occurred "
                'loading the required custom op: "%s".'
            ),
            ex,
        )

    return None 
Example #4
Source File: register.py    From addons with Apache License 2.0 6 votes vote down vote up
def register_custom_kernels() -> None:
    all_shared_objects = _get_all_shared_objects()
    if not all_shared_objects:
        raise FileNotFoundError(
            "No shared objects files were found in the custom ops "
            "directory in Tensorflow Addons, check your installation again,"
            "or, if you don't need custom ops, call `tfa.register_all(custom_kernels=False)`"
            " instead."
        )
    try:
        for shared_object in all_shared_objects:
            tf.load_op_library(shared_object)
    except tf.errors.NotFoundError as e:
        raise RuntimeError(
            "One of the shared objects ({}) could not be loaded. This may be "
            "due to a number of reasons (incompatible TensorFlow version, buiding from "
            "source with different flags, broken install of TensorFlow Addons...). If you"
            "wanted to register the shared objects because you needed them when loading your "
            "model, you should fix your install of TensorFlow Addons. If you don't "
            "use custom ops in your model, you can skip registering custom ops with "
            "`tfa.register_all(custom_kernels=False)`".format(shared_object)
        ) from e 
Example #5
Source File: __init__.py    From docker-python with Apache License 2.0 6 votes vote down vote up
def _load_library(filename, lib="op"):
  """_load_library"""
  f = inspect.getfile(sys._getframe(1)) # pylint: disable=protected-access

  # Construct filename
  f = os.path.join(os.path.dirname(f), filename)
  filenames = [f]

  # Function to load the library, return True if file system library is loaded
  load_fn = tf.load_op_library if lib == "op" \
      else lambda f: tf.compat.v1.load_file_system_library(f) is None

  # Try to load all paths for file, fail if none succeed
  errs = []
  for f in filenames:
    try:
      l = load_fn(f)
      if l is not None:
        return l
    except errors.NotFoundError as e:
      errs.append(str(e))
  raise NotImplementedError(
      "unable to open file: " +
      "{}, from paths: {}\ncaused by: {}".format(filename, filenames, errs)) 
Example #6
Source File: model_fn.py    From cnn_lstm_ctc_ocr with GNU General Public License v3.0 5 votes vote down vote up
def _get_lexicon_output( rnn_logits, sequence_length, lexicon ):
    """Create lexicon-restricted output ops
        prediction: Dense BxT tensor of predicted character indices
        seq_prob: Bx1 tensor of output sequence probabilities
    """
    # Note: TFWordBeamSearch.so must be in LD_LIBRARY_PATH (on *nix)
    # from github.com/weinman/CTCWordBeamSearch branch var_seq_len
    word_beam_search_module = tf.load_op_library('TFWordBeamSearch.so')
    beam_width = _ctc_beam_width
    with open(lexicon) as lexicon_fd:
        corpus = lexicon_fd.read().encode('utf8')

    rnn_probs = tf.nn.softmax(rnn_logits, axis=2) # decodes in expspace

    # CTCWordBeamSearch requires a non-word char. We hack this by
    # prepending a zero-prob " " entry to the rnn_probs
    rnn_probs = tf.pad( rnn_probs,
                        [[0,0],[0,0],[1,0]], # Add one slice of zeros
                        mode='CONSTANT',
                        constant_values=0.0 )
    chars = (' '+charset.out_charset).encode('utf8')

    # Assume words can be formed from all chars--if punctuation is added
    # or numbers (etc) are to be treated differently, more such 
    # categories should be added to the charset module
    wordChars = chars[1:]
            
    prediction,seq_prob = word_beam_search_module.word_beam_search(
        rnn_probs,
        sequence_length,
        beam_width,
        'Words', # Use No LM
        0.0, # Irrelevant: No LM to smooth
        corpus, # aka lexicon [are unigrams ignored?]
        chars,
        wordChars )
    prediction = prediction - 1 # Remove hacky prepended non-word char

    return prediction, seq_prob 
Example #7
Source File: MorseDecoder.py    From LSTM_morse with MIT License 5 votes vote down vote up
def setupCTC(self):
        "create CTC loss and decoder and return them"
        # BxTxC -> TxBxC
        self.ctcIn3dTBC = tf.transpose(self.rnnOut3d, [1, 0, 2])
        # ground truth text as sparse tensor
        self.gtTexts = tf.SparseTensor(tf.compat.v1.placeholder(tf.int64, shape=[None, 2]) , tf.compat.v1.placeholder(tf.int32, [None]), tf.compat.v1.placeholder(tf.int64, [2]))

        # calc loss for batch
        self.seqLen = tf.compat.v1.placeholder(tf.int32, [None])
        self.loss = tf.reduce_mean(tf.compat.v1.nn.ctc_loss(labels=self.gtTexts, inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, ctc_merge_repeated=True))

        # calc loss for each element to compute label probability
        self.savedCtcInput = tf.compat.v1.placeholder(tf.float32, shape=[self.maxTextLen, None, len(self.charList) + 1])
        self.lossPerElement = tf.compat.v1.nn.ctc_loss(labels=self.gtTexts, inputs=self.savedCtcInput, sequence_length=self.seqLen, ctc_merge_repeated=True)

        # decoder: either best path decoding or beam search decoding
        if self.decoderType == DecoderType.BestPath:
            self.decoder = tf.nn.ctc_greedy_decoder(inputs=self.ctcIn3dTBC, sequence_length=self.seqLen)
        elif self.decoderType == DecoderType.BeamSearch:
            self.decoder = tf.nn.ctc_beam_search_decoder(inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, beam_width=50, merge_repeated=False)
        elif self.decoderType == DecoderType.WordBeamSearch:
            # import compiled word beam search operation (see https://github.com/githubharald/CTCWordBeamSearch)
            print("Loading WordBeamSearch...")
            word_beam_search_module = tf.load_op_library('cpp/proj/TFWordBeamSearch.so')
            # prepare information about language (dictionary, characters in dataset, characters forming words) 
            chars = str().join(self.charList)
            wordChars = self.charList #open(self.modelDir+'wordCharList.txt').read().splitlines()[0]
            corpus = self.corpus
            
            # decode using the "Words" mode of word beam search
            self.decoder = word_beam_search_module.word_beam_search(tf.nn.softmax(self.ctcIn3dTBC, dim=2), 50, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), wordChars.encode('utf8')) 
Example #8
Source File: pipemode.py    From sagemaker-tensorflow-extensions with Apache License 2.0 5 votes vote down vote up
def _load_plugin():
    tf_plugin_path = '/' + '/'.join(list(__file__.split('/'))[:-1] + ["libPipeModeOp.so"])
    return tf.load_op_library(tf_plugin_path) 
Example #9
Source File: test_op.py    From X-Detector with Apache License 2.0 5 votes vote down vote up
def load_op_module(lib_name):
  """
  Load TensorFlow operator library.
  """
  # use absolute path so that ops.py can be called from other directory
  lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'build/lib{0}.so'.format(lib_name))
  # duplicate library with a random new name so that
  # a running program will not be interrupted when the original library is updated
  lib_copy_path = '/tmp/lib{0}_{1}.so'.format(str(uuid.uuid4())[:8], LIB_NAME)
  shutil.copyfile(lib_path, lib_copy_path)
  oplib = tf.load_op_library(lib_copy_path)
  #print(_)
  return oplib 
Example #10
Source File: ops.py    From aster with MIT License 5 votes vote down vote up
def _load_oplib(lib_name):
  """
  Load TensorFlow operator library.
  """
  lib_path = join(dirname(realpath(__file__)), 'lib{0}{1}'.format(lib_name, FLAGS.oplib_suffix))
  assert exists(lib_path), '{0} not found'.format(lib_path)

  # duplicate library with a random new name so that
  # a running program will not be interrupted when the lib file is updated
  lib_copy_path = '/tmp/lib{0}_{1}{2}'.format(lib_name, str(uuid.uuid4())[:8], FLAGS.oplib_suffix)
  shutil.copyfile(lib_path, lib_copy_path)
  oplib = tf.load_op_library(lib_copy_path)
  return oplib 
Example #11
Source File: Model.py    From Handwritten-Line-Text-Recognition-using-Deep-Learning-with-Tensorflow with Apache License 2.0 5 votes vote down vote up
def setupCTC(self):
        """ Create CTC loss and decoder and return them """
        # BxTxC -> TxBxC
        self.ctcIn3dTBC = tf.transpose(self.rnnOut3d, [1, 0, 2])

        # Ground truth text as sparse tensor
        with tf.name_scope('CTC_Loss'):
            self.gtTexts = tf.SparseTensor(tf.placeholder(tf.int64, shape=[
                                           None, 2]), tf.placeholder(tf.int32, [None]), tf.placeholder(tf.int64, [2]))
            # Calculate loss for batch
            self.seqLen = tf.placeholder(tf.int32, [None])
            self.loss = tf.reduce_mean(tf.nn.ctc_loss(labels=self.gtTexts, inputs=self.ctcIn3dTBC, sequence_length=self.seqLen,
                               ctc_merge_repeated=True, ignore_longer_outputs_than_inputs=True))
        with tf.name_scope('CTC_Decoder'):
            # Decoder: Best path decoding or Word beam search decoding
            if self.decoderType == DecoderType.BestPath:
                self.decoder = tf.nn.ctc_greedy_decoder(
                    inputs=self.ctcIn3dTBC, sequence_length=self.seqLen)
            elif self.decoderType == DecoderType.BeamSearch:
                self.decoder = tf.nn.ctc_beam_search_decoder(inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, beam_width=50, merge_repeated=True)
            elif self.decoderType == DecoderType.WordBeamSearch:
                # Import compiled word beam search operation (see https://github.com/githubharald/CTCWordBeamSearch)
                word_beam_search_module = tf.load_op_library(
                    './TFWordBeamSearch.so')

                # Prepare: dictionary, characters in dataset, characters forming words
                chars = codecs.open(FilePaths.wordCharList.txt, 'r').read()
                wordChars = codecs.open(
                    FilePaths.fnWordCharList, 'r').read()
                corpus = codecs.open(FilePaths.corpus.txt, 'r').read()

                # # Decoder using the "NGramsForecastAndSample": restrict number of (possible) next words to at most 20 words: O(W) mode of word beam search
                # decoder = word_beam_search_module.word_beam_search(tf.nn.softmax(ctcIn3dTBC, dim=2), 25, 'NGramsForecastAndSample', 0.0, corpus.encode('utf8'), chars.encode('utf8'), wordChars.encode('utf8'))

                # Decoder using the "Words": only use dictionary, no scoring: O(1) mode of word beam search
                self.decoder = word_beam_search_module.word_beam_search(tf.nn.softmax(
                    self.ctcIn3dTBC, dim=2), 25, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), wordChars.encode('utf8'))

        # Return a CTC operation to compute the loss and CTC operation to decode the RNN output
        return self.loss, self.decoder 
Example #12
Source File: Model.py    From SimpleHTR with MIT License 5 votes vote down vote up
def setupCTC(self):
		"create CTC loss and decoder and return them"
		# BxTxC -> TxBxC
		self.ctcIn3dTBC = tf.transpose(self.rnnOut3d, [1, 0, 2])
		# ground truth text as sparse tensor
		self.gtTexts = tf.SparseTensor(tf.placeholder(tf.int64, shape=[None, 2]) , tf.placeholder(tf.int32, [None]), tf.placeholder(tf.int64, [2]))

		# calc loss for batch
		self.seqLen = tf.placeholder(tf.int32, [None])
		self.loss = tf.reduce_mean(tf.nn.ctc_loss(labels=self.gtTexts, inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, ctc_merge_repeated=True))

		# calc loss for each element to compute label probability
		self.savedCtcInput = tf.placeholder(tf.float32, shape=[Model.maxTextLen, None, len(self.charList) + 1])
		self.lossPerElement = tf.nn.ctc_loss(labels=self.gtTexts, inputs=self.savedCtcInput, sequence_length=self.seqLen, ctc_merge_repeated=True)

		# decoder: either best path decoding or beam search decoding
		if self.decoderType == DecoderType.BestPath:
			self.decoder = tf.nn.ctc_greedy_decoder(inputs=self.ctcIn3dTBC, sequence_length=self.seqLen)
		elif self.decoderType == DecoderType.BeamSearch:
			self.decoder = tf.nn.ctc_beam_search_decoder(inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, beam_width=50, merge_repeated=False)
		elif self.decoderType == DecoderType.WordBeamSearch:
			# import compiled word beam search operation (see https://github.com/githubharald/CTCWordBeamSearch)
			word_beam_search_module = tf.load_op_library('TFWordBeamSearch.so')

			# prepare information about language (dictionary, characters in dataset, characters forming words) 
			chars = str().join(self.charList)
			wordChars = open('../model/wordCharList.txt').read().splitlines()[0]
			corpus = open('../data/corpus.txt').read()

			# decode using the "Words" mode of word beam search
			self.decoder = word_beam_search_module.word_beam_search(tf.nn.softmax(self.ctcIn3dTBC, dim=2), 50, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), wordChars.encode('utf8')) 
Example #13
Source File: render_sprites_ops.py    From auto_yolo with MIT License 5 votes vote down vote up
def render_sprites_so():
    global _render_sprites_so
    if _render_sprites_so is None:
        loc = os.path.join(os.path.split(__file__)[0], "_render_sprites.so")
        print("\nLoading render_sprites library at {}.".format(loc))
        _render_sprites_so = tf.load_op_library(loc)
        print("Success.\n")

    return _render_sprites_so 
Example #14
Source File: resampler_edge_ops.py    From auto_yolo with MIT License 5 votes vote down vote up
def resampler_edge_so():
    global _resampler_edge_so
    if _resampler_edge_so is None:
        loc = os.path.join(os.path.split(__file__)[0], "_resampler_edge.so")
        print("\nLoading resampler_edge library at {}.".format(loc))
        _resampler_edge_so = tf.load_op_library(loc)
        print("Success.\n")
    return _resampler_edge_so 
Example #15
Source File: register_test.py    From addons with Apache License 2.0 5 votes vote down vote up
def test_get_all_shared_objects():
    if resource_loader.SKIP_CUSTOM_OPS:
        pytest.skip(
            "Skipping the test because a custom ops "
            "was being loaded while --skip-custom-ops was set."
        )
    all_shared_objects = _get_all_shared_objects()
    assert len(all_shared_objects) >= 4

    for file in all_shared_objects:
        tf.load_op_library(file) 
Example #16
Source File: light_head_rfcn_train.py    From X-Detector with Apache License 2.0 5 votes vote down vote up
def load_op_module(lib_name):
  """
  Load TensorFlow operator library.
  """
  # use absolute path so that ops.py can be called from other directory
  if FLAGS.run_on_cloud:
      lib_path = os.path.join(FLAGS.data_dir, 'lib{0}.so'.format(lib_name))
      tf.gfile.Copy(lib_path, './' + 'lib{0}.so'.format(lib_name), overwrite=True)
  return tf.load_op_library('./' + 'lib{0}.so'.format(lib_name)) 
Example #17
Source File: topn_ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def Load():
  """Load the TopN ops library and return the loaded module."""
  with _ops_lock:
    global _topn_ops
    if not _topn_ops:
      ops_path = tf.resource_loader.get_path_to_datafile(TOPN_OPS_FILE)
      tf.logging.info('data path: %s', ops_path)
      _topn_ops = tf.load_op_library(ops_path)

      assert _topn_ops, 'Could not load topn_ops.so'
  return _topn_ops 
Example #18
Source File: zero_out_1_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testLoadTwice(self):
    zero_out_loaded_again = tf.load_op_library(os.path.join(
        tf.resource_loader.get_data_files_path(), 'zero_out_op_kernel_1.so'))
    self.assertEqual(zero_out_loaded_again, zero_out_op_1._zero_out_module) 
Example #19
Source File: ackermann_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBasic(self):
    library_filename = os.path.join(tf.resource_loader.get_data_files_path(),
                                    'ackermann_op.so')
    ackermann = tf.load_op_library(library_filename)

    self.assertEqual(len(ackermann.OP_LIST.op), 1)
    self.assertEqual(ackermann.OP_LIST.op[0].name, 'Ackermann')

    with self.test_session():
      self.assertEqual(ackermann.ackermann().eval(), b'A(m, 0) == A(m-1, 1)') 
Example #20
Source File: duplicate_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBasic(self):
    library_filename = os.path.join(tf.resource_loader.get_data_files_path(),
                                    'duplicate_op.so')
    duplicate = tf.load_op_library(library_filename)

    self.assertEqual(len(duplicate.OP_LIST.op), 0)

    with self.test_session():
      self.assertEqual(tf.add(1, 41).eval(), 42) 
Example #21
Source File: invalid_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBasic(self):
    library_filename = os.path.join(tf.resource_loader.get_data_files_path(),
                                    'invalid_op.so')
    with self.assertRaises(tf.errors.InvalidArgumentError):
      tf.load_op_library(library_filename) 
Example #22
Source File: graph_transform.py    From parallax with Apache License 2.0 5 votes vote down vote up
def graph_transform_mpi(single_gpu_meta_graph_def, config,
                        op_library_path=None):
    if op_library_path is not None:
        tf.load_op_library(op_library_path)

    with tf.Graph().as_default() as replica:
        tf.train.import_meta_graph(single_gpu_meta_graph_def)

        tensor_or_op_name_to_replica_names = {}
        for op in replica.get_operations():
            tensor_or_op_name_to_replica_names[op.name] = [op.name]
            for output in op.outputs:
                tensor_or_op_name_to_replica_names[output.name] = [output.name]

        # Initialize horovod
        hvd.init()

        num_workers = hvd.size()
        worker_id = hvd.rank()
        update_shard_values_for_worker(num_workers, worker_id)

        op_to_control_consumer_ops = get_all_control_consumers(replica)
        trainable_variable_ops = [var.op for var in tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES)]

        for gradients_info in tf.get_collection(tf.GraphKeys.GRADIENTS_INFO):
            target_tensor = gradients_info._target
            if target_tensor.op not in trainable_variable_ops:
                parallax_log.debug(
                    "Gradient for non-trainable variable %s is created, ignore"
                    % target_tensor.op.name)
                continue

            _add_aggregation_ops(gradients_info, op_to_control_consumer_ops, config)
        _add_broadcast_ops()

    return tf.train.export_meta_graph(graph=replica), \
           tensor_or_op_name_to_replica_names 
Example #23
Source File: zero_out_1_test.py    From tensorflow-kr with Apache License 2.0 5 votes vote down vote up
def testLoadTwice(self):
    zero_out_loaded_again = tf.load_op_library(os.path.join(
        tf.resource_loader.get_data_files_path(), 'zero_out_op_kernel_1.so'))
    self.assertEqual(zero_out_loaded_again, zero_out_op_1._zero_out_module) 
Example #24
Source File: __init__.py    From tf_kaldi_io with MIT License 5 votes vote down vote up
def find_kaldi_io_library():
    """Check that libtf_kaldi_io.so can be found. If it can, ensure that
    Tensorflow's tf.load_op_library() can find it by potentially adding it to
    the LD_LIBRARY_PATH as necessary.

    If it is not found, raise a helpful and informative error."""
    try:
        libtf_kaldi_io = resource_filename(__package__, "libtf_kaldi_io.so")
        found = os.path.isfile(libtf_kaldi_io)
    except ImportError:
        # If we can't import tf_kaldi_io, definitely can't get its resources.
        found = False

    if found:
        # If we have a libtf_kaldi_io.so from the tf_kaldi_io Python package,
        # then ensure it gets on the path. We stick it on the front of the
        # path, because it would be confusing if a tf_kaldi_io package used a
        # libtf_kaldi_io.so that didn't correspond to it, just because the user
        # happened to have a custom LD_LIBRARY_PATH set.
        old_ld_library_path = os.environ.get("LD_LIBRARY_PATH", "")
        lib_dir = os.path.dirname(libtf_kaldi_io)
        os.environ["LD_LIBRARY_PATH"] = lib_dir + ":" + old_ld_library_path

    # Ensure that at this point, no matter what, Tensorflow should be able to
    # load libtf_kaldi_io.so as an op library.
    kaldi_io_lib_paths = find_shared_library("tf_kaldi_io")
    if kaldi_io_lib_paths:
        return kaldi_io_lib_paths["libtf_kaldi_io.so"]
    else:
        raise RuntimeError(MISSING_LIBRARY_ERROR)


# Find the path to the KaldiIO shared library. 
Example #25
Source File: modellib.py    From rec-attend-public with MIT License 5 votes vote down vote up
def f_segm_match(iou, s_gt):
  """Matching between segmentation output and groundtruth.
  Args:
    y_out: [B, T, H, W], output segmentations
    y_gt: [B, T, H, W], groundtruth segmentations
    s_gt: [B, T], groudtruth score sequence
  """
  global hungarian_module
  if hungarian_module is None:
    mod_name = './hungarian.so'
    hungarian_module = tf.load_op_library(mod_name)
    log.info('Loaded library "{}"'.format(mod_name))

  # Mask X, [B, M] => [B, 1, M]
  mask_x = tf.expand_dims(s_gt, dim=1)
  # Mask Y, [B, M] => [B, N, 1]
  mask_y = tf.expand_dims(s_gt, dim=2)
  iou_mask = iou * mask_x * mask_y

  # Keep certain precision so that we can get optimal matching within
  # reasonable time.
  eps = 1e-5
  precision = 1e6
  iou_mask = tf.round(iou_mask * precision) / precision
  match_eps = hungarian_module.hungarian(iou_mask + eps)[0]

  # [1, N, 1, 1]
  s_gt_shape = tf.shape(s_gt)
  num_segm_out = s_gt_shape[1]
  num_segm_out_mul = tf.pack([1, num_segm_out, 1])
  # Mask the graph algorithm output.
  match = match_eps * mask_x * mask_y

  return match 
Example #26
Source File: resource_loader.py    From addons with Apache License 2.0 5 votes vote down vote up
def ops(self):
        if SKIP_CUSTOM_OPS:
            import pytest

            pytest.skip(
                "Skipping the test because a custom ops "
                "was being loaded while --skip-custom-ops was set."
            )
        if self._ops is None:
            self.display_warning_if_incompatible()
            self._ops = tf.load_op_library(get_path_to_datafile(self.relative_path))
        return self._ops 
Example #27
Source File: light_head_rfcn_eval.py    From X-Detector with Apache License 2.0 5 votes vote down vote up
def load_op_module(lib_name):
  """
  Load TensorFlow operator library.
  """
  # use absolute path so that ops.py can be called from other directory
  if FLAGS.run_on_cloud:
      lib_path = os.path.join(FLAGS.data_dir, 'lib{0}.so'.format(lib_name))
      tf.gfile.Copy(lib_path, './' + 'lib{0}.so'.format(lib_name), overwrite=True)
  return tf.load_op_library('./' + 'lib{0}.so'.format(lib_name)) 
Example #28
Source File: modules.py    From PlaneNet with MIT License 4 votes vote down vote up
def crfrnnModule(inputs, image_dims, num_classes, theta_alpha, theta_beta, theta_gamma, num_iterations):
    custom_module = tf.load_op_library('./cpp/high_dim_filter.so')
    import high_dim_filter_grad  # Register gradients for the custom op

    weights = np.load('weights.npy')
    weights = [weights[0], weights[1], weights[2]]
    spatial_ker_weights = tf.Variable(weights[0][:num_classes, :num_classes], name='spatial_ker_weights', trainable=True)
    bilateral_ker_weights = tf.Variable(weights[1][:num_classes, :num_classes], name='bilateral_ker_weights', trainable=True)
    compatibility_matrix = tf.Variable(weights[2][:num_classes, :num_classes], name='compatibility_matrix', trainable=True)
    

    batchSize = int(inputs[0].shape[0])
    c, h, w = num_classes, image_dims[0], image_dims[1]
    all_ones = np.ones((c, h, w), dtype=np.float32)

    outputs = []
    for batchIndex in xrange(batchSize):
        unaries = tf.transpose(inputs[0][batchIndex, :, :, :], perm=(2, 0, 1))
        rgb = tf.transpose(inputs[1][batchIndex, :, :, :], perm=(2, 0, 1))


        # Prepare filter normalization coefficients
        spatial_norm_vals = custom_module.high_dim_filter(all_ones, rgb, bilateral=False,
                                                          theta_gamma=theta_gamma)
        bilateral_norm_vals = custom_module.high_dim_filter(all_ones, rgb, bilateral=True,
                                                            theta_alpha=theta_alpha,
                                                            theta_beta=theta_beta)
        q_values = unaries

        for i in range(num_iterations):
            softmax_out = tf.nn.softmax(q_values, dim=0)

            # Spatial filtering
            spatial_out = custom_module.high_dim_filter(softmax_out, rgb, bilateral=False,
                                                        theta_gamma=theta_gamma)
            spatial_out = spatial_out / spatial_norm_vals

            # Bilateral filtering
            bilateral_out = custom_module.high_dim_filter(softmax_out, rgb, bilateral=True,
                                                          theta_alpha=theta_alpha,
                                                          theta_beta=theta_beta)
            bilateral_out = bilateral_out / bilateral_norm_vals

            # Weighting filter outputs
            message_passing = (tf.matmul(spatial_ker_weights,
                                         tf.reshape(spatial_out, (c, -1))) +
                               tf.matmul(bilateral_ker_weights,
                                         tf.reshape(bilateral_out, (c, -1))))

            # Compatibility transform
            pairwise = tf.matmul(compatibility_matrix, message_passing)

            # Adding unary potentials
            pairwise = tf.reshape(pairwise, (c, h, w))
            q_values = unaries - pairwise
            continue
        outputs.append(tf.transpose(tf.reshape(q_values, (1, c, h, w)), perm=(0, 2, 3, 1)))
        continue
    outputs = tf.concat(outputs, axis=0)
    return outputs 
Example #29
Source File: ctc-test.py    From OpenSeq2Seq with Apache License 2.0 4 votes vote down vote up
def test_beam_decoders(self):
    '''
    Test on random data that custom decoder outputs the same transcript
    if its parameters are equal to zero: alpha = beta = trie_weight = 0.0
    '''
    np.random.seed(1234)
    logits = tf.constant(np.random.uniform(size=self.seq.shape).astype(np.float32))
    seq_len = tf.constant([self.seq.shape[0]])

    beam_search_decoded = tf.nn.ctc_beam_search_decoder(logits, seq_len,
        beam_width=self.beam_width,
        top_paths=1,
        merge_repeated=False)

    custom_op_module = tf.load_op_library('ctc_decoder_with_lm/libctc_decoder_with_kenlm.so')
    decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = (
        custom_op_module.ctc_beam_search_decoder_with_lm(
            logits, seq_len, beam_width=self.beam_width,
            model_path='ctc_decoder_with_lm/ctc-test-lm.binary',
            trie_path='ctc_decoder_with_lm/ctc-test-lm.trie',
            alphabet_path='open_seq2seq/test_utils/toy_speech_data/vocab.txt',
            alpha=0.0,
            beta=0.0,
            trie_weight=0.0,
            top_paths=1, merge_repeated=False
        )
    )

    with tf.Session() as sess:
      res_beam, res_ixs, res_vals, res_probs = sess.run([beam_search_decoded,
          decoded_ixs, decoded_vals, log_probabilities])

    decoded_beam, prob_beam = res_beam
    prob1 = prob_beam[0][0]
    decoded_text1 = ''.join([self.vocab[c] for c in decoded_beam[0].values])

    prob2 = res_probs[0][0]
    if tf.__version__ >= '1.11':
      # works for newer versions only (with CTC decoder fix)
      self.assertTrue( abs(prob1 - prob2) < self.tol )
    self.assertTrue( prob2 < 0 )
    decoded_text2 = ''.join([self.vocab[c] for c in res_vals[0]])

    self.assertTrue( decoded_text1 == decoded_text2 ) 
Example #30
Source File: ctc-test.py    From OpenSeq2Seq with Apache License 2.0 4 votes vote down vote up
def test_decoders(self):
    '''
    Test all CTC decoders on a sample transcript ('ten seconds').
    Standard TF decoders should output 'then seconds'.
    Custom CTC decoder with LM rescoring should yield 'ten seconds'.
    '''
    logits = tf.constant(self.seq)
    seq_len = tf.constant([self.seq.shape[0]])

    greedy_decoded = tf.nn.ctc_greedy_decoder(logits, seq_len, 
        merge_repeated=True)

    beam_search_decoded = tf.nn.ctc_beam_search_decoder(logits, seq_len, 
        beam_width=self.beam_width, 
        top_paths=1, 
        merge_repeated=False)

    custom_op_module = tf.load_op_library('ctc_decoder_with_lm/libctc_decoder_with_kenlm.so')
    decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = (
        custom_op_module.ctc_beam_search_decoder_with_lm(
            logits, seq_len, beam_width=self.beam_width,
            model_path='ctc_decoder_with_lm/ctc-test-lm.binary', 
            trie_path='ctc_decoder_with_lm/ctc-test-lm.trie',
            alphabet_path='open_seq2seq/test_utils/toy_speech_data/vocab.txt',
            alpha=2.0,
            beta=0.5,
            trie_weight=0.1,
            top_paths=1, merge_repeated=False
        )
    )

    with tf.Session() as sess:
      res_greedy, res_beam, res_ixs, res_vals, res_probs = sess.run([greedy_decoded, 
          beam_search_decoded, decoded_ixs, decoded_vals, log_probabilities])

    decoded_greedy, prob_greedy = res_greedy
    decoded_text = ''.join([self.vocab[c] for c in decoded_greedy[0].values])
    self.assertTrue( abs(7079.117 + prob_greedy[0][0]) < self.tol )
    self.assertTrue( decoded_text == 'then seconds' )

    decoded_beam, prob_beam = res_beam
    decoded_text = ''.join([self.vocab[c] for c in decoded_beam[0].values])
    if tf.__version__ >= '1.11':
      # works for newer versions only (with CTC decoder fix)
      self.assertTrue( abs(1.1842575 + prob_beam[0][0]) < self.tol )
    self.assertTrue( decoded_text == 'then seconds' )

    self.assertTrue( abs(4.619581 + res_probs[0][0]) < self.tol )
    decoded_text = ''.join([self.vocab[c] for c in res_vals[0]])
    self.assertTrue( decoded_text == self.label )