Python onnxruntime.InferenceSession() Examples
The following are 30
code examples of onnxruntime.InferenceSession().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
onnxruntime
, or try the search function
.
Example #1
Source File: test_sklearn_tfidf_vectorizer_converter.py From sklearn-onnx with MIT License | 7 votes |
def test_model_tfidf_vectorizer11(self): corpus = numpy.array([ "This is the first document.", "This document is the second document.", "And this is the third one.", "Is this the first document?", ]).reshape((4, 1)) vect = TfidfVectorizer(ngram_range=(1, 1), norm=None) vect.fit(corpus.ravel()) model_onnx = convert_sklearn(vect, "TfidfVectorizer", [("input", StringTensorType())], options=self.get_options()) self.assertTrue(model_onnx is not None) dump_data_and_model( corpus, vect, model_onnx, basename="SklearnTfidfVectorizer11-OneOff-SklCol", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.4.0')", ) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'input': corpus.ravel()})[0] assert res.shape == (4, 9)
Example #2
Source File: adaptive_model.py From FARM with Apache License 2.0 | 6 votes |
def load(cls, load_dir, device, **kwargs): import onnxruntime sess_options = onnxruntime.SessionOptions() # Set graph optimization level to ORT_ENABLE_EXTENDED to enable bert optimization. sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED # Use OpenMP optimizations. Only useful for CPU, has little impact for GPUs. sess_options.intra_op_num_threads = multiprocessing.cpu_count() onnx_session = onnxruntime.InferenceSession(str(load_dir / "model.onnx"), sess_options) # Prediction heads _, ph_config_files = cls._get_prediction_head_files(load_dir, strict=False) prediction_heads = [] ph_output_type = [] for config_file in ph_config_files: # ONNX Model doesn't need have a separate neural network for PredictionHead. It only uses the # instance methods of PredictionHead class, so, we load with the load_weights param as False. head = PredictionHead.load(config_file, load_weights=False) prediction_heads.append(head) ph_output_type.append(head.ph_output_type) with open(load_dir/"model_config.json") as f: model_config = json.load(f) language = model_config["language"] return cls(onnx_session, prediction_heads, language, device)
Example #3
Source File: test_resnet.py From caffe-onnx with MIT License | 6 votes |
def main(): parser = argparse.ArgumentParser() parser.add_argument('--input_shape', help="caffe's caffemodel file path", nargs='+', default=(224,224)) parser.add_argument('--img_path', help="test image path", type=str, default="./onnxmodel/airplane.jpg") parser.add_argument('--onnx_path', help="onnx model file path", type=str, default="./onnxmodel/resnet50.onnx") args = parser.parse_args() input_shape = [int(x) for x in args.input_shape] #模型输入尺寸 img_path = args.img_path onnx_path = args.onnx_path print("image path:",img_path) print("onnx model path:",onnx_path) data_input = process_image(img_path,input_shape) session = onnxruntime.InferenceSession(onnx_path) inname = [input.name for input in session.get_inputs()] outname = [output.name for output in session.get_outputs()] print("inputs name:",inname,"|| outputs name:",outname) data_output = session.run(outname, {inname[0]: data_input}) output = data_output[0] print("Label predict: ", output.argmax())
Example #4
Source File: services.py From mead-baseline with Apache License 2.0 | 6 votes |
def load(cls, bundle, **kwargs): """Load a model from a bundle. This can be either a local model or a remote, exported model. :returns a Service implementation """ import onnxruntime as ort if os.path.isdir(bundle): directory = bundle else: directory = unzip_files(bundle) model_basename = find_model_basename(directory) model_name = f"{model_basename}.onnx" vocabs = load_vocabs(directory) vectorizers = load_vectorizers(directory) # Currently nothing to do here labels = read_json(model_basename + '.labels') model = ort.InferenceSession(model_name) return cls(vocabs, vectorizers, model, labels)
Example #5
Source File: onnxruntime_SUT.py From inference with Apache License 2.0 | 6 votes |
def __init__(self, args): self.profile = args.profile self.options = onnxruntime.SessionOptions() self.options.enable_profiling = args.profile print("Loading ONNX model...") self.quantized = args.quantized if self.quantized: model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/bert_large_v1_1_fake_quant.onnx" else: model_path = "build/data/bert_tf_v1_1_large_fp32_384_v2/model.onnx" self.sess = onnxruntime.InferenceSession(model_path, self.options) print("Constructing SUT...") self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, self.process_latencies) print("Finished constructing SUT.") self.qsl = get_squad_QSL()
Example #6
Source File: test_sklearn_nearest_neighbour_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_knn_regressor_double(self): model, X = self._fit_model(KNeighborsRegressor(n_neighbors=2)) model_onnx = convert_sklearn( model, "KNN regressor", [("input", DoubleTensorType([None, 4]))], target_opset=TARGET_OPSET, options={id(model): {'optim': 'cdist'}}, dtype=numpy.float64) self.assertIsNotNone(model_onnx) try: InferenceSession(model_onnx.SerializeToString()) except OrtImpl as e: if ("Could not find an implementation for the node " "To_TopK:TopK(11)") in str(e): # onnxruntime does not declare TopK(11) for double return raise e dump_data_and_model( X.astype(numpy.float64)[:7], model, model_onnx, basename="SklearnKNeighborsRegressor64")
Example #7
Source File: test_sklearn_nearest_neighbour_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_onnx_test_knn_transform(self): iris = datasets.load_iris() X, _ = iris.data, iris.target X_train, X_test = train_test_split(X, random_state=11) clr = NearestNeighbors(n_neighbors=3, radius=None) clr.fit(X_train) for to in (9, 10, 11): if to > onnx_opset_version(): break model_def = to_onnx(clr, X_train.astype(numpy.float32), target_opset=to) oinf = InferenceSession(model_def.SerializeToString()) X_test = X_test[:3] y = oinf.run(None, {'X': X_test.astype(numpy.float32)}) dist, ind = clr.kneighbors(X_test) assert_almost_equal(dist, DataFrame(y[1]).values, decimal=5) assert_almost_equal(ind, y[0])
Example #8
Source File: test_algebra_symbolic.py From sklearn-onnx with MIT License | 6 votes |
def test_algebra_normalizer_shape(self): op = OnnxNormalizer('I0', norm='L1', op_version=1, output_names=['O0']) onx = op.to_onnx({'I0': numpy.ones((1, 2), dtype=numpy.float32)}, outputs=[('O0', FloatTensorType((None, 2)))]) assert onx is not None sonx = str(onx) assert "ai.onnx.ml" in sonx assert "version: 1" in sonx import onnxruntime as ort sess = ort.InferenceSession(onx.SerializeToString()) X = numpy.array([[0, 2], [0, -2]]) exp = numpy.array([[0, 1], [0, -1]]) Y = sess.run(None, {'I0': X.astype(numpy.float32)})[0] assert_almost_equal(exp, Y)
Example #9
Source File: test_sklearn_concat.py From sklearn-onnx with MIT License | 6 votes |
def _predict(session: rt.InferenceSession, data: pd.DataFrame) -> pd.Series: def _correctly_typed_column(column: pd.Series) -> pd.Series: if column.dtype in ['float64']: return column.astype(np.float32) return column def _correctly_shaped_values(values): return values.reshape((values.shape[0], 1)) inputs = { c: _correctly_shaped_values(_correctly_typed_column(data[c]).values) for c in data.columns } return pd.Series( session.run(None, inputs)[0].reshape(-1), index=data.index )
Example #10
Source File: test_sklearn_glm_classifier_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_logistic_regression_binary_class(self): model, X = fit_classification_model( linear_model.LogisticRegression(max_iter=100), 2) model_onnx = convert_sklearn( model, "logistic regression", [("input", FloatTensorType([None, X.shape[1]]))]) self.assertIsNotNone(model_onnx) dump_data_and_model( X, model, model_onnx, basename="SklearnLogitisticRegressionBinary", # Operator cast-1 is not implemented in onnxruntime allow_failure="StrictVersion(onnx.__version__)" " < StrictVersion('1.3') or " "StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", ) if StrictVersion(ort_version) >= StrictVersion("1.0.0"): sess = InferenceSession(model_onnx.SerializeToString()) out = sess.get_outputs() lb = out[0].type sh = out[0].shape self.assertEqual(str(lb), "tensor(int64)") self.assertEqual(sh, [None])
Example #11
Source File: test_algebra_symbolic.py From sklearn-onnx with MIT License | 6 votes |
def test_algebra_split(self): op = OnnxSplit('I0', axis=0, output_names=['O1', 'O2'], op_version=TARGET_OPSET) onx = op.to_onnx({'I0': numpy.arange(6, dtype=numpy.float32)}) assert onx is not None sonx = str(onx) assert len(sonx) > 0 import onnxruntime as ort sess = ort.InferenceSession(onx.SerializeToString()) X = numpy.arange(6) exp = [numpy.array([0, 1, 2]), numpy.array([3, 4, 5])] Y = sess.run(None, {'I0': X.astype(numpy.float32)}) assert len(Y) == len(exp) assert_almost_equal(exp[0], Y[0]) assert_almost_equal(exp[1], Y[1])
Example #12
Source File: test_algebra_symbolic.py From sklearn-onnx with MIT License | 6 votes |
def test_algebra_normalizer(self): op = OnnxNormalizer('I0', norm='L1', op_version=1, output_names=['Y']) onx = op.to_onnx({'I0': numpy.ones((1, 2), dtype=numpy.float32)}, outputs=[('Y', FloatTensorType())], target_opset={'': 10}) assert onx is not None sonx = str(onx) assert "ai.onnx.ml" in sonx assert "version: 1" in sonx import onnxruntime as ort sess = ort.InferenceSession(onx.SerializeToString()) X = numpy.array([[0, 2], [0, -2]]) exp = numpy.array([[0, 1], [0, -1]]) Y = sess.run(None, {'I0': X.astype(numpy.float32)})[0] assert_almost_equal(exp, Y)
Example #13
Source File: test_sklearn_tfidf_vectorizer_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_tfidf_vectorizer11_compose(self): corpus = numpy.array([ "This is the first document.", "This document is the second document.", "And this is the third one.", "Is this the first document?", ]).reshape((4, 1)) corpus = numpy.hstack([corpus, corpus]) y = numpy.array([0, 1, 0, 1]) model = ColumnTransformer([ ('a', TfidfVectorizer(), 0), ('b', TfidfVectorizer(), 1), ]) model.fit(corpus, y) model_onnx = convert_sklearn(model, "TfIdfcomp", [("input", StringTensorType([4, 2]))], options=self.get_options()) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'input': corpus})[0] exp = model.transform(corpus) assert_almost_equal(res, exp)
Example #14
Source File: test_algebra_onnx_operators_opset.py From sklearn-onnx with MIT License | 6 votes |
def test_pad_opset_10(self): pad = OnnxPad('X', output_names=['Y'], mode='constant', value=1.5, pads=[0, 1, 0, 1], op_version=2) X = np.array([[0, 1]], dtype=np.float32) model_def = pad.to_onnx({'X': X}, target_opset=10) onnx.checker.check_model(model_def) def predict_with_onnxruntime(model_def, *inputs): sess = ort.InferenceSession(model_def.SerializeToString()) names = [i.name for i in sess.get_inputs()] dinputs = {name: input for name, input in zip(names, inputs)} res = sess.run(None, dinputs) names = [o.name for o in sess.get_outputs()] return {name: output for name, output in zip(names, res)} Y = predict_with_onnxruntime(model_def, X) assert_almost_equal( np.array([[1.5, 0., 1., 1.5]], dtype=np.float32), Y['Y'])
Example #15
Source File: test_parsing_options.py From sklearn-onnx with MIT License | 6 votes |
def test_kmeans(self): model = KMeans() X, y = make_regression(n_features=4, random_state=42) model.fit(X, y) initial_types = [('input', FloatTensorType((None, X.shape[1])))] with self.assertRaises(RuntimeError): convert_sklearn(model, initial_types=initial_types, final_types=[('output4', None)]) with self.assertRaises(RuntimeError): convert_sklearn(model, initial_types=initial_types, final_types=[('dup1', None), ('dup1', None)], target_opset=TARGET_OPSET) model_onnx = convert_sklearn( model, initial_types=initial_types, final_types=[('output4', None), ('output5', None)], target_opset=TARGET_OPSET) assert model_onnx is not None sess = InferenceSession(model_onnx.SerializeToString()) assert sess.get_outputs()[0].name == 'output4' assert sess.get_outputs()[1].name == 'output5'
Example #16
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 6 votes |
def test_kernel_ker2_def(self): ker = Sum( C(0.1, (1e-3, 1e3)) * RBF(length_scale=10, length_scale_bounds=(1e-3, 1e3)), C(0.1, (1e-3, 1e3)) * RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3)) ) onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_) assert_almost_equal(m1, m2, decimal=0)
Example #17
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 6 votes |
def test_kernel_ker2_exp_sine_squared(self): ker = ExpSineSquared() onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_) assert_almost_equal(m1, m2, decimal=4) onx = convert_kernel(ker, 'X', output_names=['Z'], x_train=(Xtest_ * 2).astype(np.float32), dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_, Xtest_ * 2) assert_almost_equal(m1, m2, decimal=4)
Example #18
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 6 votes |
def test_kernel_dot_product(self): ker = DotProduct() onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_) assert_almost_equal(m1 / 1000, m2 / 1000, decimal=5) onx = convert_kernel(ker, 'X', output_names=['Z'], x_train=(Xtest_ * 2).astype(np.float32), dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_, Xtest_ * 2) assert_almost_equal(m1 / 1000, m2 / 1000, decimal=5)
Example #19
Source File: test_algebra_symbolic.py From sklearn-onnx with MIT License | 6 votes |
def test_algebra_abs(self): op = OnnxAbs('I0', op_version=TARGET_OPSET) onx = op.to_onnx({'I0': numpy.empty((1, 2), dtype=numpy.float32)}) assert onx is not None import onnxruntime as ort try: sess = ort.InferenceSession(onx.SerializeToString()) except RuntimeError as e: raise RuntimeError("Unable to read\n{}".format(onx)) from e X = numpy.array([[0, 1], [-1, -2]]) try: Y = sess.run(None, {'I0': X.astype(numpy.float32)})[0] except RuntimeError as e: raise RuntimeError("Unable to run\n{}".format(onx)) from e assert_almost_equal(Y, numpy.abs(X))
Example #20
Source File: test_algebra_symbolic.py From sklearn-onnx with MIT License | 6 votes |
def test_algebra_normalizer_argmin(self): op = OnnxArgMin( OnnxNormalizer( 'I0', norm='L1'), op_version=TARGET_OPSET) onx = op.to_onnx({'I0': numpy.ones((1, 2), dtype=numpy.float32)}) assert onx is not None sonx = str(onx) assert len(sonx) > 0 import onnxruntime as ort sess = ort.InferenceSession(onx.SerializeToString()) X = numpy.array([[0, 2], [0, -2]]) exp = numpy.array([[0, 1]]) Y = sess.run(None, {'I0': X.astype(numpy.float32)})[0] assert_almost_equal(exp, Y)
Example #21
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 5 votes |
def test_kernel_exp_sine_squared_diag(self): ker = ExpSineSquared() onx = convert_kernel_diag( ker, 'X', output_names=['Y'], dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker.diag(Xtest_) assert_almost_equal(m1, m2, decimal=4)
Example #22
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 5 votes |
def test_kernel_ker1_def(self): ker = (C(1.0, (1e-3, 1e3)) * RBF(length_scale=10, length_scale_bounds=(1e-3, 1e3))) onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_) assert_almost_equal(m1, m2, decimal=5)
Example #23
Source File: test_investigate.py From sklearn-onnx with MIT License | 5 votes |
def test_missing_converter(self): data = numpy.array([[0, 0], [0, 0], [2, 1], [2, 1]], dtype=numpy.float32) model = Pipeline([("scaler1", StandardScaler()), ("scaler2", StandardScaler()), ("scaler3", MyScaler())]) model.fit(data) all_models = list(enumerate_pipeline_models(model)) try: collect_intermediate_steps(model, "pipeline", [("input", FloatTensorType([None, 2]))]) except MissingShapeCalculator as e: assert "MyScaler" in str(e) assert "gallery" in str(e) _alter_model_for_debugging(model, recursive=True) model.transform(data) all_models = list(enumerate_pipeline_models(model)) for ind, step, last in all_models: if ind == (0,): # whole pipeline continue step_model = step data_in = step_model._debug.inputs['transform'] t = guess_data_type(data_in) try: onnx_step = convert_sklearn(step_model, initial_types=t) except MissingShapeCalculator as e: if "MyScaler" in str(e): continue raise sess = onnxruntime.InferenceSession(onnx_step.SerializeToString()) onnx_outputs = sess.run(None, {'input': data_in}) onnx_output = onnx_outputs[0] skl_outputs = step_model._debug.outputs['transform'] assert_almost_equal(onnx_output, skl_outputs) compare_objects(onnx_output, skl_outputs)
Example #24
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 5 votes |
def test_kernel_rbf_mul(self): ker = (C(1.0, constant_value_bounds="fixed") * RBF(1.0, length_scale_bounds="fixed")) onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_) assert_almost_equal(m1, m2, decimal=5)
Example #25
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 5 votes |
def test_kernel_constant1(self): ker = C(5.) onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_) assert_almost_equal(m1, m2, decimal=5)
Example #26
Source File: bidaf_model_runtime.py From botbuilder-python with MIT License | 5 votes |
def __init__(self, targets: List[str], queries: Dict[str, str], model_dir: str): self.queries = queries self.targets = targets bidaf_model = os.path.abspath(os.path.join(model_dir, "bidaf.onnx")) print(f"Loading Inference session from {bidaf_model}..", file=sys.stderr) self.session = InferenceSession(bidaf_model) print(f"Inference session loaded..", file=sys.stderr) self.processed_queries = self._process_queries() print(f"Processed queries..", file=sys.stderr)
Example #27
Source File: test_sklearn_tfidf_vectorizer_converter.py From sklearn-onnx with MIT License | 5 votes |
def test_model_tfidf_vectorizer11_64(self): corpus = numpy.array([ "This is the first document.", "This document is the second document.", "And this is the third one.", "Is this the first document?", ]).reshape((4, 1)) vect = TfidfVectorizer(ngram_range=(1, 1), norm=None) vect.fit(corpus.ravel()) model_onnx = convert_sklearn(vect, "TfidfVectorizer", [("input", StringTensorType())], options=self.get_options(), dtype=numpy.float64) self.assertTrue(model_onnx is not None) dump_data_and_model( corpus, vect, model_onnx, basename="SklearnTfidfVectorizer1164-OneOff-SklCol", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.4.0')", ) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'input': corpus.ravel()})[0] assert res.shape == (4, 9)
Example #28
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 5 votes |
def check_outputs(self, model, model_onnx, Xtest, predict_attributes, decimal=5, skip_if_float32=False): if predict_attributes is None: predict_attributes = {} exp = model.predict(Xtest, **predict_attributes) sess = InferenceSession(model_onnx.SerializeToString()) got = sess.run(None, {'X': Xtest}) if isinstance(exp, tuple): if len(exp) != len(got): raise AssertionError("Mismatched number of outputs.") for i, (e, g) in enumerate(zip(exp, got)): if skip_if_float32 and g.dtype == np.float32: continue try: assert_almost_equal(self.remove_dim1(e), self.remove_dim1(g), decimal=decimal) except AssertionError as e: # noqa raise AssertionError( "Mismatch for output {} and attributes {}" ".".format(i, predict_attributes)) from e else: if skip_if_float32 and Xtest.dtype == np.float32: return assert_almost_equal(np.squeeze(exp), np.squeeze(got), decimal=decimal)
Example #29
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 5 votes |
def test_kernel_rbf2(self): ker = RBF(length_scale=1, length_scale_bounds="fixed") onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))], dtype=np.float32) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_) assert_almost_equal(m1, m2, decimal=5)
Example #30
Source File: test_sklearn_gaussian_process.py From sklearn-onnx with MIT License | 5 votes |
def test_kernel_rbf1(self): ker = RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3)) onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32, op_version=_TARGET_OPSET_) model_onnx = onx.to_onnx( inputs=[('X', FloatTensorType([None, None]))]) sess = InferenceSession(model_onnx.SerializeToString()) res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0] m1 = res m2 = ker(Xtest_) assert_almost_equal(m1, m2, decimal=5)