Python tensorflow.string_to_number() Examples
The following are 30
code examples of tensorflow.string_to_number().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: clock_data.py From deep-time-reading with MIT License | 6 votes |
def read_image_and_label(image_label_q): # Returns three Tensors: the decoded PNG image, the hour, and the minute. filename, hour_str, minute_str = tf.decode_csv( image_label_q.dequeue(), [[""], [""], [""]], " ") file_contents = tf.read_file(filename) # Decode image from PNG, and cast it to a float. example = tf.image.decode_png(file_contents, channels=image_channels) image = tf.cast(example, tf.float32) # Set the tensor size manually from the image. image.set_shape([image_size, image_size, image_channels]) # Do per-image whitening (zero mean, unit standard deviation). Without this, # the learning algorithm diverges almost immediately because the gradient is # too big. image = tf.image.per_image_whitening(image) # The label should be an integer. hour = tf.string_to_number(hour_str, out_type=tf.int32) minute = tf.string_to_number(minute_str, out_type=tf.int32) return image, hour, minute
Example #2
Source File: data_decoders.py From Counterfactual-StoryRW with MIT License | 6 votes |
def decode(self, data, items): """Decodes the data to return the tensors specified by the list of items. Args: data: The scalar data to decode. items: A list of strings, each of which is the name of the resulting tensors to retrieve. Returns: A list of tensors, each of which corresponds to each item. """ data = tf.reshape(data, shape=[]) if data.dtype is tf.string: decoded_data = tf.string_to_number(data, out_type=self._dtype) else: decoded_data = tf.cast(data, self._dtype) outputs = { self._data_name: decoded_data } return [outputs[item] for item in items]
Example #3
Source File: data_util.py From reading_comprehension_tf with Apache License 2.0 | 6 votes |
def create_trg_dataset(input_dataset, input_data_type, word_vocab_index, word_max_length, word_pad, word_sos, word_eos, word_placeholder_enable, num_parallel): """create dataset for input target data""" dataset = input_dataset if input_data_type == "span": dataset = dataset.map(lambda span: tf.string_split([span], delimiter='|').values, num_parallel_calls=num_parallel) dataset = dataset.map(lambda span: tf.string_to_number(span, out_type=tf.int32), num_parallel_calls=num_parallel) dataset = dataset.map(lambda span: tf.expand_dims(span, axis=-1), num_parallel_calls=num_parallel) elif input_data_type == "text": dataset = dataset.map(lambda sent: generate_word_feat(sent, word_vocab_index, word_max_length, word_pad, word_sos, word_eos, word_placeholder_enable), num_parallel_calls=num_parallel) return dataset
Example #4
Source File: string_to_number_op_test.py From deep_image_model with Apache License 2.0 | 6 votes |
def testToInt32(self): with self.test_session(): input_string = tf.placeholder(tf.string) output = tf.string_to_number( input_string, out_type=tf.int32) result = output.eval(feed_dict={ input_string: ["0", "3", "-1", " -10", "-2147483648", "2147483647"] }) self.assertAllEqual([0, 3, -1, -10, -2147483648, 2147483647], result) with self.assertRaisesOpError(_ERROR_MESSAGE + "2.9"): output.eval(feed_dict={input_string: ["2.9"]}) # The next two exceed maximum value of int32. for in_string in ["-2147483649", "2147483648"]: with self.assertRaisesOpError(_ERROR_MESSAGE + in_string): output.eval(feed_dict={input_string: [in_string]})
Example #5
Source File: fbms_data_utils.py From unsupervised_detection with MIT License | 6 votes |
def test_dataset_map(self, input_queue): fname_1, fname_2, annotation_fname, samples_per_cat = input_queue[0],\ input_queue[1], input_queue[2], input_queue[3] samples_per_cat = tf.string_to_number(samples_per_cat) file_content = tf.read_file(fname_1) image_1 = tf.image.decode_jpeg(file_content, channels=3) image_1 = self.preprocess_image(image_1) file_content = tf.read_file(fname_2) image_2 = tf.image.decode_jpeg(file_content, channels=3) image_2 = self.preprocess_image(image_2) file_content = tf.read_file(annotation_fname) seg_1 = tf.image.decode_jpeg(file_content, channels=1) seg_1 = self.preprocess_mask(seg_1) # Cropping preprocess image_1 = self.central_cropping(image_1, self.test_crop) image_2 = self.central_cropping(image_2, self.test_crop) seg_1 = self.central_cropping(seg_1, self.test_crop) return image_1, image_2, seg_1, fname_1, samples_per_cat
Example #6
Source File: datasets.py From self-supervision with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _imagenet_load_file(path, epochs=None, shuffle=True, seed=0, subset='train', prepare_path=True): IMAGENET_ROOT = os.environ.get('IMAGENET_DIR', '') if not isinstance(path, list): path = [path] filename_queue = tf.train.string_input_producer(path, num_epochs=epochs, shuffle=shuffle, seed=seed) reader = tf.TextLineReader() key, value = reader.read(filename_queue) image_path, label_str = tf.decode_csv(value, record_defaults=[[''], ['']], field_delim=' ') if prepare_path: image_abspath = IMAGENET_ROOT + '/images/' + subset + image_path else: image_abspath = image_path image_content = tf.read_file(image_abspath) image = decode_image(image_content, channels=3) image.set_shape([None, None, 3]) imgshape = tf.shape(image)[:2] label = tf.string_to_number(label_str, out_type=tf.int32) return image, label, imgshape, image_path
Example #7
Source File: data_decoders.py From texar with Apache License 2.0 | 6 votes |
def decode(self, data, items): """Decodes the data to return the tensors specified by the list of items. Args: data: The scalar data to decode. items: A list of strings, each of which is the name of the resulting tensors to retrieve. Returns: A list of tensors, each of which corresponds to each item. """ data = tf.reshape(data, shape=[]) if data.dtype is tf.string: decoded_data = tf.string_to_number(data, out_type=self._dtype) else: decoded_data = tf.cast(data, self._dtype) outputs = { self._data_name: decoded_data } return [outputs[item] for item in items]
Example #8
Source File: MapillaryLike_instance.py From PReMVOS with MIT License | 5 votes |
def postproc_annotation(self, ann_filename, ann): id_str = tf.string_split([ann_filename], ':').values[1] id_ = tf.string_to_number(id_str, out_type=tf.int32) ann_postproc = tf.cast(tf.equal(tf.cast(ann, tf.int32), id_), tf.uint8) return ann_postproc
Example #9
Source File: main.py From NAO with GNU General Public License v3.0 | 5 votes |
def predict_input_fn(predict_from_file): dataset = tf.data.TextLineDataset(predict_from_file) def decode_record(record): src = tf.string_split([record]).values src = tf.string_to_number(src, out_type=tf.int32) return src, tf.constant([SOS], dtype=tf.int32) dataset = dataset.map(decode_record) dataset = dataset.batch(FLAGS.batch_size) iterator = dataset.make_one_shot_iterator() inputs, targets_inputs = iterator.get_next() assert inputs.shape.ndims == 2 return inputs, targets_inputs
Example #10
Source File: decoder_main.py From NAO with GNU General Public License v3.0 | 5 votes |
def predict_from_file(estimator, batch_size, decode_from_file, decode_to_file=None): def infer_input_fn(): sos_id = tf.constant([SOS], dtype=tf.int32) dataset = tf.data.TextLineDataset(decode_from_file) def decode_record(record): src = tf.string_split([record]).values src = tf.string_to_number(src, out_type=tf.float32) return src, tf.constant([SOS], dtype=tf.int32) dataset = dataset.map(decode_record) dataset = dataset.batch(FLAGS.batch_size) iterator = dataset.make_one_shot_iterator() inputs, targets_inputs = iterator.get_next() assert inputs.shape.ndims == 2 #assert targets_inputs.shape.ndims == 2 return { 'inputs' : inputs, 'targets_inputs' : targets_inputs, 'targets' : None, }, None results = [] result_iter = estimator.predict(infer_input_fn) for result in result_iter: output = result['output'].flatten() output = ' '.join(map(str, output)) tf.logging.info('Inference results OUTPUT: %s' % output) results.append(output) if decode_to_file: output_filename = decode_to_file else: output_filename = '%s.result' % decode_from_file tf.logging.info('Writing results into {0}'.format(output_filename)) with tf.gfile.Open(output_filename, 'w') as f: for res in results: f.write('%s\n' % (res))
Example #11
Source File: file_util.py From multi-label-classification with MIT License | 5 votes |
def _parse_string_line(string_line, root_path): """ 解析文本中的一行字符串行,得到图片路径(拼接图片根目录)和标签 :param string_line: 文本中的一行字符串,image_name label0 label1 label2 label3 ... :param root_path: 图片根目录 :return: DatasetV1Adapter<(图片路径Tensor(shape=(), dtype=string),标签Tensor(shape=(?,), dtype=float32))> """ strings = tf.string_split([string_line], delimiter=' ').values image_path = tf.string_join([root_path, strings[0]], separator=os.sep) labels = tf.string_to_number(strings[1:]) return image_path, labels
Example #12
Source File: dataset_builder_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files, config) dataset = dataset.map(decode_func) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #13
Source File: dataset_builder_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files, config) dataset = dataset.map(decode_func) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #14
Source File: dataset_util_test.py From Elphas with Apache License 2.0 | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_util.read_dataset( tf.data.TextLineDataset, decode_func, files, config) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #15
Source File: dataset_builder_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files, config) dataset = dataset.map(decode_func) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #16
Source File: dataset_util_test.py From AniSeg with Apache License 2.0 | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_util.read_dataset( tf.data.TextLineDataset, decode_func, files, config) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #17
Source File: LoadDemo.py From deep_learning with MIT License | 5 votes |
def input_fn(filenames="./data/knowledge.txt", batch_size=32, epoch_num=None, shuffle_size=256): dataset = tf.data.TextLineDataset(filenames) def clean_data(line): columns_data = tf.string_split([line], '\t') # tensor化 labels = tf.string_to_number(columns_data.values[1], out_type=tf.float32) splits_data = columns_data.values[2] def split_word(text): text = text.decode() print(text) text = rules.sub("", text) text = text.strip() tokens = jieba.lcut(text) print(tokens) if len(tokens)==0: tokens = ["未知空"] # dataset需要保证长度一致 return tokens[:1] # tf.py_func 将普通函数作用在tensor上 result = tf.py_func(split_word, [splits_data], [tf.string]) return {"context": result}, labels dataset = dataset.map(lambda line: clean_data(line)) # shuffle将数据打乱,数值越大,混乱程度越大 if shuffle_size > 0: if epoch_num: # repeat数据集重复了指定次数 dataset = dataset.shuffle(shuffle_size).repeat(epoch_num) else: dataset = dataset.shuffle(shuffle_size).repeat() # 按照顺序取出FLAGS.batch_size行数据,最后一次输出可能小于FLAGS.batch_size dataset = dataset.batch(batch_size).prefetch(1) return dataset
Example #18
Source File: DeepFM.py From deep_learning with MIT License | 5 votes |
def input_fn(filenames, batch_size=64, num_epochs=None, shuffle_buffer_size=None): def decode_data(line): # line = "1 1:0.5 2:0.03519 3:1 4:0.02567 7:0.03708 8:0.01705 9:0.06296 10:0.18185 11:0.02497 12:1 14:0.02565 15:0.03267 17:0.0247 18:0.03158 20:1 22:1 23:0.13169 24:0.02933 27:0.18159 31:0.0177 34:0.02888 38:1 51:1 63:1 132:1 164:1 236:1" # 切分数据 columns_data = tf.string_split([line], ' ') labels = tf.string_to_number(columns_data.values[0], out_type=tf.float32) splits_data = tf.string_split(columns_data.values[1:], ':') id_vals = tf.reshape(splits_data.values, splits_data.dense_shape) feat_ids, feat_vals = tf.split(id_vals, num_or_size_splits=2, axis=1) feat_ids = tf.string_to_number(feat_ids, out_type=tf.int32) feat_vals = tf.string_to_number(feat_vals, out_type=tf.float32) return {"feat_ids": feat_ids, "feat_vals": feat_vals}, labels dataset = tf.data.TextLineDataset(filenames).map(decode_data, num_parallel_calls=10).prefetch( 1000) if shuffle_buffer_size > 0: dataset = dataset.shuffle(buffer_size=shuffle_buffer_size) # None为不指定Epoch轮数 dataset = dataset.repeat(num_epochs) # batch_size dataset = dataset.batch(batch_size) iterator = dataset.make_one_shot_iterator() batch_features, batch_labels = iterator.get_next() return batch_features, batch_labels
Example #19
Source File: dataset_builder_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files, config) dataset = dataset.map(decode_func) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #20
Source File: tf_utils.py From deepsignal with GNU General Public License v3.0 | 5 votes |
def parse_a_line(line): def _kmer2code(kmer_bytes): return np.array([base2code_dna[x] for x in kmer_bytes.decode("utf-8")], np.int32) words = tf.decode_csv(line, [[""]] * 12, "\t") kmer = tf.py_func(_kmer2code, [words[6]], tf.int32) base_mean = tf.string_to_number(tf.string_split([words[7]], ",").values, tf.float32) base_std = tf.string_to_number(tf.string_split([words[8]], ",").values, tf.float32) base_signal_len = tf.string_to_number(tf.string_split([words[9]], ",").values, tf.int32) cent_signals = tf.string_to_number(tf.string_split([words[10]], ",").values, tf.float32) label = tf.string_to_number(words[11], tf.int32) return kmer, base_mean, base_std, base_signal_len, cent_signals, label
Example #21
Source File: dataset_builder_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files, config) dataset = dataset.map(decode_func) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #22
Source File: MapillaryLike_instance.py From TrackR-CNN with MIT License | 5 votes |
def postproc_annotation(self, ann_filename, ann): id_str = tf.string_split([ann_filename], ':').values[1] id_ = tf.string_to_number(id_str, out_type=tf.int32) ann_postproc = tf.cast(tf.equal(tf.cast(ann, tf.int32), id_), tf.uint8) return ann_postproc
Example #23
Source File: 2_adanet_avazu.py From deep-learning-note with MIT License | 5 votes |
def generator(ln): splits = tf.string_split([ln], delimiter=',') label = splits.values[0] # 解析 dense 部分 features = {} for i in range(1, 14): features['I'+str(i)] = tf.string_to_number(splits.values[i], tf.int64) return features, label
Example #24
Source File: logistic_regression.py From tf-encrypted with Apache License 2.0 | 5 votes |
def provide_data(self): def decode(line): fields = tf.string_split([line], self.field_delim).values if self.index: # Skip index fields = fields[1:] fields = tf.regex_replace(fields, "|".join(self.na_values), "nan") fields = tf.string_to_number(fields, tf.float32) return fields def fill_na(fields, fill_values): fields = tf.where(tf.is_nan(fields), fill_values, fields) return fields dataset = tf.data.TextLineDataset(self.local_data_file) if self.header: # Skip header dataset = dataset.skip(1) dataset = ( dataset.map(decode) .map(lambda x: fill_na(x, self.data_schema.field_defaults)) .repeat() .batch(self.batch_size) ) iterator = dataset.make_one_shot_iterator() batch = iterator.get_next() batch = tf.reshape(batch, [self.batch_size, self.data_schema.field_num]) return batch
Example #25
Source File: dataset_builder_test.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files, config) dataset = dataset.map(decode_func) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #26
Source File: dataset_util_test.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_util.read_dataset( tf.data.TextLineDataset, decode_func, files, config) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #27
Source File: model_dummies.py From nucleus7 with Mozilla Public License 2.0 | 5 votes |
def read(self, **data): return {k: tf.string_to_number(v, tf.float32) for k, v in data.items()}
Example #28
Source File: dataset_util_test.py From Person-Detection-and-Tracking with MIT License | 5 votes |
def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_util.read_dataset( tf.data.TextLineDataset, decode_func, files, config) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next()
Example #29
Source File: ops.py From tfdeploy with MIT License | 5 votes |
def test_StringToNumber(self): t = tf.string_to_number(list("0123456789")) self.check(t) # # shapes and shaping #
Example #30
Source File: NFM.py From ML_CIA with MIT License | 5 votes |
def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False): print('Parsing', filenames) def decode_libsvm(line): columns = tf.string_split([line], ' ') labels = tf.string_to_number(columns.values[0], out_type=tf.float32) splits = tf.string_split(columns.values[1:], ':') id_vals = tf.reshape(splits.values,splits.dense_shape) feat_ids, feat_vals = tf.split(id_vals,num_or_size_splits=2,axis=1) feat_ids = tf.string_to_number(feat_ids, out_type=tf.int32) feat_vals = tf.string_to_number(feat_vals, out_type=tf.float32) return {"feat_ids": feat_ids, "feat_vals": feat_vals}, labels # Extract lines from input files using the Dataset API, can pass one filename or filename list dataset = tf.data.TextLineDataset(filenames).map(decode_libsvm, num_parallel_calls=10).prefetch(1000) # Randomizes input using a window of 256 elements (read into memory) if perform_shuffle: dataset = dataset.shuffle(buffer_size=256) # epochs from blending together. dataset = dataset.repeat(num_epochs) dataset = dataset.batch(batch_size) # Batch size to use iterator = dataset.make_one_shot_iterator() batch_features, batch_labels = iterator.get_next() return batch_features, batch_labels