Python grpc.beta.implementations.insecure_channel() Examples
The following are 28
code examples of grpc.beta.implementations.insecure_channel().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
grpc.beta.implementations
, or try the search function
.
Example #1
Source File: test_grpc_serving.py From BERT with Apache License 2.0 | 7 votes |
def run(host, port, test_json, model_name, signature_name): # channel = grpc.insecure_channel('%s:%d' % (host, port)) channel = implementations.insecure_channel(host, port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) with open(test_json, "r") as frobj: content = json.load(frobj) print(len(content), "======") start = time.time() for i, input_dict in enumerate(content): request = prepare_grpc_request(model_name, signature_name, input_dict) result = stub.Predict(request, 10.0) print(result, i) end = time.time() time_diff = end - start print('time elapased: {}'.format(time_diff))
Example #2
Source File: chicago_taxi_client.py From code-snippets with Apache License 2.0 | 6 votes |
def _do_local_inference(host, port, serialized_examples, model_name): """Performs inference on a model hosted by the host:port server.""" channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() # request.model_spec.name = 'chicago_taxi' request.model_spec.name = model_name request.model_spec.signature_name = 'predict' tfproto = tf.contrib.util.make_tensor_proto([serialized_examples], shape=[len(serialized_examples)], dtype=tf.string) # The name of the input tensor is 'examples' based on # https://github.com/tensorflow/tensorflow/blob/r1.9/tensorflow/python/estimator/export/export.py#L290 request.inputs['examples'].CopyFrom(tfproto) print(stub.Predict(request, _LOCAL_INFERENCE_TIMEOUT_SECONDS))
Example #3
Source File: grpc_proxy_util.py From python-compat-runtime with Apache License 2.0 | 6 votes |
def create_stub(grpc_apiserver_host): """Creates a grpc_service.CallHandler stub. Args: grpc_apiserver_host: String, the host that CallHandler service listens on. Should be in the format of hostname:port. Returns: A CallHandler stub. """ # See http://www.grpc.io/grpc/python/_modules/grpc/beta/implementations.html: # the method insecure_channel requires explicitly two parameters (host, port) # here our host already contain port number, so the second parameter is None. prefix = 'http://' if grpc_apiserver_host.startswith(prefix): grpc_apiserver_host = grpc_apiserver_host[len(prefix):] channel = implementations.insecure_channel(grpc_apiserver_host, None) return grpc_service_pb2.beta_create_CallHandler_stub(channel)
Example #4
Source File: predict_client.py From deep_q with Apache License 2.0 | 6 votes |
def main(): host = FLAGS.host port = FLAGS.port model_name = FLAGS.model_name model_version = FLAGS.model_version request_timeout = FLAGS.request_timeout # Generate inference data features = numpy.asarray( [[1, 2, 3, 4], [5, 6, 7, 8]]) features_tensor_proto = tf.contrib.util.make_tensor_proto(features, dtype=tf.float32) # Create gRPC client and request channel = implementations.insecure_channel(host, port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = model_name if model_version > 0: request.model_spec.version.value = model_version request.inputs['state'].CopyFrom(features_tensor_proto) # Send request result = stub.Predict(request, request_timeout) print(result)
Example #5
Source File: predict_client.py From tensorflow_template_application with Apache License 2.0 | 6 votes |
def main(): # Generate inference data keys = numpy.asarray([1, 2, 3, 4]) keys_tensor_proto = tf.contrib.util.make_tensor_proto(keys, dtype=tf.int32) features = numpy.asarray( [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1], [9, 8, 7, 6, 5, 4, 3, 2, 1], [9, 9, 9, 9, 9, 9, 9, 9, 9]]) features_tensor_proto = tf.contrib.util.make_tensor_proto( features, dtype=tf.float32) # Create gRPC client channel = implementations.insecure_channel(FLAGS.host, FLAGS.port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = FLAGS.model_name if FLAGS.model_version > 0: request.model_spec.version.value = FLAGS.model_version if FLAGS.signature_name != "": request.model_spec.signature_name = FLAGS.signature_name request.inputs["keys"].CopyFrom(keys_tensor_proto) request.inputs["features"].CopyFrom(features_tensor_proto) # Send request result = stub.Predict(request, FLAGS.request_timeout) print(result)
Example #6
Source File: predict_client.py From tensorflow_template_application with Apache License 2.0 | 6 votes |
def main(): host = FLAGS.host port = FLAGS.port model_name = FLAGS.model_name model_version = FLAGS.model_version request_timeout = FLAGS.request_timeout # Generate inference data features = numpy.asarray( [1, 2, 3, 4, 5, 6, 7, 8, 9]) features_tensor_proto = tf.contrib.util.make_tensor_proto(features, dtype=tf.float32) # Create gRPC client and request channel = implementations.insecure_channel(host, port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = model_name request.model_spec.version.value = model_version request.inputs['features'].CopyFrom(features_tensor_proto) # Send request result = stub.Predict(request, request_timeout) print(result)
Example #7
Source File: generic_predict_client.py From cloud-ml-sdk with Apache License 2.0 | 5 votes |
def predict(server, model, data, timeout=10.0): """Request generic gRPC server with specified data. Args: server: The address of server. Example: "localhost:9000". model: The name of the model. Example: "mnist". data: The json data to request. Example: {"keys_dtype": "int32", "keys": [[1], [2]]}. Returns: The predict result in dictionary format. Example: {"keys": [1, 2]}. """ host, port = server.split(":") channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = model for k, v in data.items(): if k.endswith("_dtype") == False: numpy_data = np.array(v) dtype = data[k + "_dtype"] request.inputs[k].CopyFrom(tensor_util.make_tensor_proto(numpy_data, dtype=dtype)) result = stub.Predict(request, timeout) result_dict = {} for k, v in result.outputs.items(): result_dict[k] = get_tensor_values(v) return result_dict
Example #8
Source File: serving_utils.py From fine-lm with MIT License | 5 votes |
def _create_stub(server): host, port = server.split(":") channel = implementations.insecure_channel(host, int(port)) # TODO(bgb): Migrate to GA API. return prediction_service_pb2.beta_create_PredictionService_stub(channel)
Example #9
Source File: mnist_client.py From kubeflow-introduction with Apache License 2.0 | 5 votes |
def get_prediction(image, server_host='127.0.0.1', server_port=9000, server_name="server", timeout=10.0): """ Retrieve a prediction from a TensorFlow model server :param image: a MNIST image represented as a 1x784 array :param server_host: the address of the TensorFlow server :param server_port: the port used by the server :param server_name: the name of the server :param timeout: the amount of time to wait for a prediction to complete :return 0: the integer predicted in the MNIST image :return 1: the confidence scores for all classes :return 2: the version number of the model handling the request """ print("connecting to:%s:%i" % (server_host, server_port)) # initialize to server connection channel = implementations.insecure_channel(server_host, server_port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) # build request request = predict_pb2.PredictRequest() request.model_spec.name = server_name request.model_spec.signature_name = 'predict_images' request.inputs['images'].CopyFrom( tf.contrib.util.make_tensor_proto(image, shape=image.shape)) # retrieve results result = stub.Predict(request, timeout) resultVal = result.outputs['prediction'].int64_val scores = result.outputs['scores'].float_val version = result.outputs['model-version'].string_val return resultVal[0], scores, version[0]
Example #10
Source File: tester_client.py From code-for-blog with The Unlicense | 5 votes |
def main(): channel = implementations.insecure_channel('localhost', PORT) stub = stringdb_pb2.beta_create_StringDb_stub(channel) # some sample data for testing print 'Running sample data...' set_value(stub, 'foo', 'bar') set_value(stub, 'baz', 'anaconda is here') print get_value(stub, 'foo') print count_value(stub, 'baz')
Example #11
Source File: inception_client.py From models with Apache License 2.0 | 5 votes |
def main(_): host, port = FLAGS.server.split(':') channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) # Send request with open(FLAGS.image, 'rb') as f: # See prediction_service.proto for gRPC request/response details. data = f.read() request = predict_pb2.PredictRequest() request.model_spec.name = 'inception' request.model_spec.signature_name = 'predict_images' request.inputs['images'].CopyFrom( tf.contrib.util.make_tensor_proto(data, shape=[1])) result = stub.Predict(request, 10.0) # 10 secs timeout print(result)
Example #12
Source File: predict.py From -Learn-Artificial-Intelligence-with-TensorFlow with MIT License | 5 votes |
def get_prediction_service_stub(host, port): channel = implementations.insecure_channel(host, port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) return stub
Example #13
Source File: client.py From Machine-Learning-with-TensorFlow-1.x with MIT License | 5 votes |
def process_image(path, label_data, top_k=3): start_time = datetime.now() img = imread(path) host, port = "0.0.0.0:9000".split(":") channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = "pet-model" request.model_spec.signature_name = "predict_images" request.inputs["images"].CopyFrom( tf.contrib.util.make_tensor_proto( img.astype(dtype=float), shape=img.shape, dtype=tf.float32 ) ) result = stub.Predict(request, 20.) scores = tf.contrib.util.make_ndarray(result.outputs["scores"])[0] probs = softmax(scores) index = sorted(range(len(probs)), key=lambda x: probs[x], reverse=True) outputs = [] for i in range(top_k): outputs.append(Output(score=float(probs[index[i]]), label=label_data[index[i]])) print(outputs) print("total time", (datetime.now() - start_time).total_seconds()) return outputs
Example #14
Source File: gym_agent.py From deep_q with Apache License 2.0 | 5 votes |
def main(): host = FLAGS.host port = FLAGS.port model_name = FLAGS.model_name model_version = FLAGS.model_version request_timeout = FLAGS.request_timeout # Create gRPC client and request channel = implementations.insecure_channel(host, port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = model_name if model_version > 0: request.model_spec.version.value = model_version env = gym.make(FLAGS.gym_env) state = env.reset() total_reward = 0 while True: if FLAGS.render_game: time.sleep(0.1) env.render() # Generate inference data features = numpy.asarray([state]) features_tensor_proto = tf.contrib.util.make_tensor_proto(features, dtype=tf.float32) request.inputs['states'].CopyFrom(features_tensor_proto) # Send request result = stub.Predict(request, request_timeout) action = int(result.outputs.get("actions").int64_val[0]) next_state, reward, done, info = env.step(action) total_reward += reward state = next_state if done: print("End of the game, reward: {}".format(total_reward)) break
Example #15
Source File: benchmark_qps.py From tensorflow_template_application with Apache License 2.0 | 5 votes |
def test_one_process(i): host = FLAGS.host port = FLAGS.port model_name = FLAGS.model_name model_version = FLAGS.model_version request_timeout = FLAGS.request_timeout request_batch = FLAGS.benchmark_batch_size request_data = [i for i in range(request_batch)] # Generate inference data features = numpy.asarray(request_data) features_tensor_proto = tf.contrib.util.make_tensor_proto(features, dtype=tf.float32) # Create gRPC client and request channel = implementations.insecure_channel(host, port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = model_name request.model_spec.version.value = model_version request.inputs['features'].CopyFrom(features_tensor_proto) # Send request request_number = FLAGS.benchmark_test_number #start_time = time.time() events = [] for i in range(request_number): event = threading.Event() result_future = stub.Predict.future(request, request_timeout) #result_future = stub.Predict.future(request, 0.00000001) result_future.add_done_callback(_create_rpc_callback(event)) events.append(event) #result = stub.Predict(request, request_timeout) #end_time = time.time() #print("Average latency is: {} ms".format((end_time - start_time) * 1000 / request_number)) #print("Average qps is: {}".format(request_number / (end_time - start_time))) for event in events: event.wait()
Example #16
Source File: benchmark_latency.py From tensorflow_template_application with Apache License 2.0 | 5 votes |
def main(): host = FLAGS.host port = FLAGS.port model_name = FLAGS.model_name model_version = FLAGS.model_version request_timeout = FLAGS.request_timeout request_batch = FLAGS.benchmark_batch_size request_data = [ i for i in range(request_batch)] # Generate inference data features = numpy.asarray( request_data) features_tensor_proto = tf.contrib.util.make_tensor_proto(features, dtype=tf.float32) # Create gRPC client and request channel = implementations.insecure_channel(host, port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = model_name request.model_spec.version.value = model_version request.inputs['features'].CopyFrom(features_tensor_proto) # Send request request_number = FLAGS.benchmark_test_number start_time = time.time() for i in range(request_number): result = stub.Predict(request, request_timeout) end_time = time.time() print("Average latency is: {} ms".format((end_time - start_time) * 1000 / request_number)) #print(result)
Example #17
Source File: tfserver.py From tf_classification with MIT License | 5 votes |
def predict(image_data, model_name='inception', host='localhost', port=9000, timeout=10): """ Arguments: image_data (list): A list of image data. The image data should either be the image bytes or float arrays. model_name (str): The name of the model to query (specified when you started the Server) model_signature_name (str): The name of the signature to query (specified when you created the exported model) host (str): The machine host identifier that the classifier is running on. port (int): The port that the classifier is listening on. timeout (int): Time in seconds before timing out. Returns: PredictResponse protocol buffer. See here: https://github.com/tensorflow/serving/blob/master/tensorflow_serving/apis/predict.proto """ if len(image_data) <= 0: return None channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = model_name if type(image_data[0]) == str: request.model_spec.signature_name = 'predict_image_bytes' request.inputs['images'].CopyFrom( tf.contrib.util.make_tensor_proto(image_data, shape=[len(image_data)])) else: request.model_spec.signature_name = 'predict_image_array' request.inputs['images'].CopyFrom( tf.contrib.util.make_tensor_proto(image_data, shape=[len(image_data), len(image_data[1])])) result = stub.Predict(request, timeout) return result
Example #18
Source File: client.py From wide_deep with MIT License | 5 votes |
def main(_): host, port = FLAGS.server.split(':') channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = FLAGS.model request.model_spec.signature_name = 'serving_default' # feature_dict = {'age': _float_feature(value=25), # 'capital_gain': _float_feature(value=0), # 'capital_loss': _float_feature(value=0), # 'education': _bytes_feature(value='11th'.encode()), # 'education_num': _float_feature(value=7), # 'gender': _bytes_feature(value='Male'.encode()), # 'hours_per_week': _float_feature(value=40), # 'native_country': _bytes_feature(value='United-States'.encode()), # 'occupation': _bytes_feature(value='Machine-op-inspct'.encode()), # 'relationship': _bytes_feature(value='Own-child'.encode()), # 'workclass': _bytes_feature(value='Private'.encode())} # label = 0 data = _read_test_input() feature_dict = pred_input_fn(data) example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) serialized = example.SerializeToString() request.inputs['inputs'].CopyFrom( tf.contrib.util.make_tensor_proto(serialized, shape=[1])) result_future = stub.Predict.future(request, 5.0) prediction = result_future.result().outputs['scores'] # print('True label: ' + str(label)) print('Prediction: ' + str(np.argmax(prediction.float_val)))
Example #19
Source File: client.py From wide_deep with MIT License | 5 votes |
def do_inference(hostport, work_dir, concurrency, num_tests): """Tests PredictionService with concurrent requests. Args: hostport: Host:port address of the PredictionService. work_dir: The full path of working directory for test data set. concurrency: Maximum number of concurrent requests. num_tests: Number of test images to use. Returns: The classification error rate. Raises: IOError: An error occurred processing test data set. """ test_data_set = mnist_input_data.read_data_sets(work_dir).test host, port = hostport.split(':') channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) result_counter = _ResultCounter(num_tests, concurrency) for _ in range(num_tests): request = predict_pb2.PredictRequest() request.model_spec.name = 'mnist' request.model_spec.signature_name = 'predict_images' image, label = test_data_set.next_batch(1) request.inputs['images'].CopyFrom( tf.contrib.util.make_tensor_proto(image[0], shape=[1, image[0].size])) result_counter.throttle() result_future = stub.Predict.future(request, 5.0) # 5 seconds result_future.add_done_callback( _create_rpc_callback(label[0], result_counter)) return result_counter.get_error_rate()
Example #20
Source File: client.py From c3po-grpc-gateway with MIT License | 5 votes |
def connect(self): for i in range(self.pool_size): channel = implementations.insecure_channel(self.host, self.port) stub = server_pb2.beta_create_SimpleService_stub(channel) # we need to make channels[i] == stubs[i]->channel self.channels.append(channel) self.stubs.append(stub)
Example #21
Source File: tf_serving_client.py From kryptoflow with GNU General Public License v3.0 | 5 votes |
def _open_tf_server_channel(server_name, server_port): channel = implementations.insecure_channel( server_name, int(server_port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) return stub
Example #22
Source File: client.py From MMdnn with MIT License | 5 votes |
def main(_): host, port = FLAGS.server.split(':') channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) # Send request image = tf.gfile.FastGFile(FLAGS.image, 'rb').read() request = predict_pb2.PredictRequest() request.model_spec.name = 'tensorflow-serving' request.model_spec.signature_name = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY request.inputs['image'].CopyFrom(tf.contrib.util.make_tensor_proto(image)) #request.inputs['input'].CopyFrom() result = stub.Predict(request, 10.0) # 10 secs timeout print(result)
Example #23
Source File: client.py From voice-vector with MIT License | 5 votes |
def do_inference(num_tests, concurrency=1): channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) coord = _Coordinator(num_tests, concurrency) for _ in range(num_tests): # dummy audio duration, sr, n_fft, win_length, hop_length, n_mels, max_db, min_db = 4, 16000, 512, 512, 128, 80, 35, -55 filename = librosa.util.example_audio_file() wav = read_wav(filename, sr=sr, duration=duration) mel = wav2melspec_db(wav, sr, n_fft, win_length, hop_length, n_mels) mel = normalize_db(mel, max_db=max_db, min_db=min_db) mel = mel.astype(np.float32) mel = np.expand_dims(mel, axis=0) # single batch n_timesteps = sr / hop_length * duration + 1 # build request request = predict_pb2.PredictRequest() request.model_spec.name = 'voice_vector' request.model_spec.signature_name = 'predict' request.inputs['x'].CopyFrom(tf.contrib.util.make_tensor_proto(mel, shape=[1, n_timesteps, n_mels])) coord.throttle() # send asynchronous response (recommended. use this.) result_future = stub.Predict.future(request, 10.0) # timeout result_future.add_done_callback(_create_rpc_callback(coord)) # send synchronous response (NOT recommended) # result = stub.Predict(request, 5.0) coord.wait_all_done()
Example #24
Source File: greeter_client.py From learning-python with MIT License | 5 votes |
def run(): channel = implementations.insecure_channel('localhost', 50051) stub = helloworld_pb2.beta_create_Greeter_stub(channel) response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'), _TIMEOUT_SECONDS) print "Greeter client received: " + response.message
Example #25
Source File: predictor_client.py From tensorflow_fasttext with MIT License | 5 votes |
def main(_): if not FLAGS.text: raise ValueError("No --text provided") host, port = FLAGS.server.split(':') channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = Request(FLAGS.text, FLAGS.ngrams) result = stub.Classify(request, 10.0) # 10 secs timeout print(result)
Example #26
Source File: mnist_client.py From Make_Money_with_Tensorflow with GNU General Public License v3.0 | 5 votes |
def do_inference(hostport, work_dir, concurrency, num_tests): """Tests PredictionService with concurrent requests. Args: hostport: Host:port address of the PredictionService. work_dir: The full path of working directory for test data set. concurrency: Maximum number of concurrent requests. num_tests: Number of test images to use. Returns: The classification error rate. Raises: IOError: An error occurred processing test data set. """ test_data_set = mnist_input_data.read_data_sets(work_dir).test host, port = hostport.split(':') channel = implementations.insecure_channel(host, int(port)) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) result_counter = _ResultCounter(num_tests, concurrency) for _ in range(num_tests): request = predict_pb2.PredictRequest() request.model_spec.name = 'mnist' request.model_spec.signature_name = 'predict' image, label = test_data_set.next_batch(1) request.inputs['inputs'].CopyFrom( tf.contrib.util.make_tensor_proto(image[0], shape=[1, 28, 28, 1])) result_counter.throttle() result_future = stub.Predict.future(request, 5.0) # 5 seconds result_future.add_done_callback( _create_rpc_callback(label[0], result_counter)) return result_counter.get_error_rate()
Example #27
Source File: sample_client.py From Make_Money_with_Tensorflow with GNU General Public License v3.0 | 5 votes |
def main(): host = FLAGS.host port = FLAGS.port model_name = FLAGS.model_name model_version = FLAGS.model_version request_timeout = FLAGS.request_timeout # Generate inference data keys = numpy.asarray([1, 2, 3]) keys_tensor_proto = tf.contrib.util.make_tensor_proto(keys, dtype=tf.int32) features_tensor_proto = tf.contrib.util.make_tensor_proto(img, dtype=tf.float32) # Create gRPC client and request channel = implementations.insecure_channel(host, port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = model_name if model_version > 0: request.model_spec.version.value = model_version request.inputs['inputs'].CopyFrom(features_tensor_proto) request.model_spec.signature_name = 'predict' #request.inputs['features'].CopyFrom(features_tensor_proto) # Send request result = stub.Predict(request, request_timeout) response = numpy.array(result.outputs['outputs'].float_val) prediction = numpy.argmax(response) print(prediction)
Example #28
Source File: sparse_predict_client.py From tensorflow_template_application with Apache License 2.0 | 4 votes |
def main(): host = FLAGS.host port = FLAGS.port model_name = FLAGS.model_name model_version = FLAGS.model_version request_timeout = FLAGS.request_timeout ''' Example data: 0 5:1 6:1 17:1 21:1 35:1 40:1 53:1 63:1 71:1 73:1 74:1 76:1 80:1 83:1 1 5:1 7:1 17:1 22:1 36:1 40:1 51:1 63:1 67:1 73:1 74:1 76:1 81:1 83:1 ''' # Generate keys TensorProto keys = numpy.asarray([1, 2]) keys_tensor_proto = tf.contrib.util.make_tensor_proto(keys, dtype=tf.int32) # Generate indexs TensorProto indexs = numpy.asarray([[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7], [0, 8], [0, 9], [0, 10], [0, 11], [0, 12], [0, 13], [1, 0], [1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8], [1, 9], [1, 10], [1, 11], [1, 12], [1, 13]]) indexs_tensor_proto = tf.contrib.util.make_tensor_proto(indexs, dtype=tf.int64) # Generate ids TensorProto ids = numpy.asarray([5, 6, 17, 21, 35, 40, 53, 63, 71, 73, 74, 76, 80, 83, 5, 7, 17, 22, 36, 40, 51, 63, 67, 73, 74, 76, 81, 83]) ids_tensor_proto = tf.contrib.util.make_tensor_proto(ids, dtype=tf.int64) # Generate values TensorProto values = numpy.asarray([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) values_tensor_proto = tf.contrib.util.make_tensor_proto(values, dtype=tf.float32) # Generate values TensorProto shape = numpy.asarray([2, 124]) shape_tensor_proto = tf.contrib.util.make_tensor_proto(shape, dtype=tf.int64) # Create gRPC client and request channel = implementations.insecure_channel(host, port) stub = prediction_service_pb2.beta_create_PredictionService_stub(channel) request = predict_pb2.PredictRequest() request.model_spec.name = model_name if model_version > 0: request.model_spec.version.value = model_version request.inputs["keys"].CopyFrom(keys_tensor_proto) request.inputs["indexs"].CopyFrom(indexs_tensor_proto) request.inputs["ids"].CopyFrom(ids_tensor_proto) request.inputs["values"].CopyFrom(values_tensor_proto) request.inputs["shape"].CopyFrom(shape_tensor_proto) # Send request result = stub.Predict(request, request_timeout) print(result)