Python cntk.load_model() Examples

The following are 22 code examples of cntk.load_model(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cntk , or try the search function .
Example #1
Source File: cart_pole_dqn_cntk.py    From ai-gym with MIT License 6 votes vote down vote up
def test(model_path, num_episodes=10):

    root = cntk.load_model(model_path)
    observation = env.reset()  # reset environment for new episode
    for episode in range(num_episodes):
        done = False
        print(episode)
        while not done:
            try:
                env.render()
            except Exception:
                # this might fail on a VM without OpenGL
                pass

            action = np.argmax(root.eval([observation.astype(np.float32)]))
            observation, reward, done, info = env.step(action)
        if done:
            observation = env.reset()  # reset environment for new episode 
Example #2
Source File: atari_breakout_dqn_cntk.py    From ai-gym with MIT License 6 votes vote down vote up
def test(model_path, num_episodes=10):

    root = cntk.load_model(model_path)
    observation = env.reset()  # reset environment for new episode
    done = False
    for episode in range(num_episodes):
        while not done:
            try:
                env.render()
            except Exception:
                # this might fail on a VM without OpenGL
                pass

            observation = preprocess_image(observation)
            action = np.argmax(root.eval(observation.astype(np.float32)))
            observation, reward, done, info = env.step(action)
        if done:
            observation = env.reset()  # reset environment for new episode 
Example #3
Source File: eval_speech.py    From end2end_AU_speech with MIT License 6 votes vote down vote up
def test_one_seq(visualizer):
    # directory to store output video. It will be created if it doesn't exist
    save_dir = "H:/Speech_data/test_output_single"
    model_file = "H:/Speech_data/model_audio2exp_2018-08-01-05-14/model_audio2exp_2018-08-01-05-14.dnn"
    # video directory holding separate frames of the video. Each image should be square.
    video_dir = "H:/FrontalFaceData/RAVDESS/Actor_21/01-01-07-02-01-01-21"
    # spectrogram sequence is stored in a .csv file
    audio_file = "H:/Speech_data/RAVDESS_feat/Actor_21/01-01-07-02-01-01-21/dbspectrogram.csv"
    # AU labels are stored in an .npy file
    exp_file = "H:/Training_data_image/ExpLabels/RAVDESS/Actor_21/01-01-07-02-01-01-21.npy"

    video_list = get_items(video_dir, "full") # set to None if video_dir does not exist
    model = C.load_model(model_file)

    visualize_one_audio_seq(model, video_list, audio_file, exp_file, visualizer, save_dir)

#---------------------------------------------------------------------------------- 
Example #4
Source File: driver.py    From DevOps-For-AI-Apps with MIT License 6 votes vote down vote up
def init():
    """ Initialise ResNet 152 model
    """
    global trainedModel, labelLookup, mem_after_init

    start = t.default_timer()

    # Load the model and labels from disk
    with open(LABEL_FILE, 'r') as f:
        labelLookup = [l.rstrip() for l in f]

    # Load model and load the model from brainscript (3rd index)
    trainedModel = load_model(MODEL_FILE)
    trainedModel = combine([trainedModel.outputs[3].owner])
    end = t.default_timer()

    loadTimeMsg = "Model loading time: {0} ms".format(round((end - start) * 1000, 2))
    logger.info(loadTimeMsg) 
Example #5
Source File: evaluate.py    From cntk-python-web-service-on-azure with MIT License 6 votes vote down vote up
def evaluateimage(file_path, mode, eval_model=None):

    #from plot_helpers import eval_and_plot_faster_rcnn
    if eval_model==None:
        print("Loading existing model from %s" % model_path)
        eval_model = load_model(model_path)
    img_shape = (num_channels, image_height, image_width)
    results_folder = globalvars['temppath']
    results=eval_faster_rcnn(eval_model, file_path, img_shape,
                              results_folder, feature_node_name, globalvars['classes'], mode,
                              drawUnregressedRois=cfg["CNTK"].DRAW_UNREGRESSED_ROIS,
                              drawNegativeRois=cfg["CNTK"].DRAW_NEGATIVE_ROIS,
                              nmsThreshold=cfg["CNTK"].RESULTS_NMS_THRESHOLD,
                              nmsConfThreshold=cfg["CNTK"].RESULTS_NMS_CONF_THRESHOLD,
                              bgrPlotThreshold=cfg["CNTK"].RESULTS_BGR_PLOT_THRESHOLD)
    return results 
Example #6
Source File: helpers_cntk.py    From ImageSimilarityUsingCntk with MIT License 5 votes vote down vote up
def create_model(base_model_file, input_features, num_classes,  dropout_rate = 0.5, freeze_weights = False):
    # Load the pretrained classification net and find nodes
    base_model   = load_model(base_model_file)
    feature_node = find_by_name(base_model, 'features')
    beforePooling_node = find_by_name(base_model, "z.x.x.r")
    #graph.plot(base_model, filename="base_model.pdf") # Write graph visualization

    # Clone model until right before the pooling layer, ie. until including z.x.x.r
    modelCloned = combine([beforePooling_node.owner]).clone(
        CloneMethod.freeze if freeze_weights else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Center the input around zero and set model input.
    # Do this early, to avoid CNTK bug with wrongly estimated layer shapes
    feat_norm = input_features - constant(114)
    model = modelCloned(feat_norm)

    # Pool over all spatial dimensions and add dropout layer
    avgPool = GlobalAveragePooling(name = "poolingLayer")(model)
    if dropout_rate > 0:
        avgPoolDrop = Dropout(dropout_rate)(avgPool)
    else:
        avgPoolDrop = avgPool

    # Add new dense layer for class prediction
    finalModel = Dense(num_classes, activation=None, name="prediction") (avgPoolDrop)
    return finalModel


# Trains a transfer learning model 
Example #7
Source File: models_setup.py    From dnn-model-services with MIT License 5 votes vote down vote up
def force_training(base_model, set_model, set_data, max_training_epochs):
    # Print out all layers in the model
    print("Loading {} and printing all layers:".format(base_model["model_file"]))
    node_outputs = cntk.logging.get_node_outputs(cntk.load_model(base_model["model_file"]))
    for l in node_outputs:
        print("  {0} {1}".format(l.name, l.shape))

    learning_params = {
        "max_epochs": max_training_epochs,
        "mb_size": 50,
        "lr_per_mb": [0.2] * 10 + [0.1],
        "momentum_per_mb": 0.9,
        "l2_reg_weight": 0.0005,
        "freeze_weights": True,
    }

    print("Force Retraining or Model file NOT FOUND...")
    start_time = time.time()
    trained_model = train_model(base_model, set_model["num_classes"], set_data["full_map"], learning_params)
    trained_model.save(set_model["model_file"])
    print("Stored trained model at %s" % set_model["model_file"])

    # Evaluate the test set
    _, _, predict_accuracy = eval_test_images(
        trained_model,
        set_model["results_file"],
        set_data["testing_map"],
        base_model["image_dims"],
    )
    print("Done. Wrote output to %s" % set_model["results_file"])

    # Test: Accuracy on flower data
    print("Prediction accuracy: {0:.2%}".format(float(predict_accuracy)))

    delta_time = time.time() - start_time
    print("Delta Time: {0:.2f}".format(delta_time)) 
Example #8
Source File: models_setup.py    From dnn-model-services with MIT License 5 votes vote down vote up
def create_model(model_details, num_classes, input_features, new_prediction_node_name="prediction", freeze=False):
    # Load the pre-trained classification net and find nodes
    base_model = cntk.load_model(model_details["model_file"])

    feature_node = cntk.logging.find_by_name(base_model, model_details["feature_node_name"])
    last_node = cntk.logging.find_by_name(base_model, model_details["last_hidden_node_name"])

    if model_details["inception"]:
        node_outputs = cntk.logging.get_node_outputs(base_model)
        last_node = node_outputs[5]
        feature_node = cntk.logging.find_all_with_name(base_model, "")[-5]
    if model_details["vgg"]:
        last_node = cntk.logging.find_by_name(base_model, "prob")
        feature_node = cntk.logging.find_by_name(base_model, "data")

    # Clone the desired layers with fixed weights
    cloned_layers = cntk.combine([last_node.owner]).clone(
        cntk.CloneMethod.freeze if freeze else cntk.CloneMethod.clone,
        {feature_node: cntk.placeholder(name="features")},
    )

    # Add new dense layer for class prediction
    feat_norm = input_features - cntk.Constant(114)
    cloned_out = cloned_layers(feat_norm)
    z = cntk.layers.Dense(num_classes, activation=None, name=new_prediction_node_name)(cloned_out)
    return z


# Trains a transfer learning model 
Example #9
Source File: check_model.py    From OLive with MIT License 5 votes vote down vote up
def kerasRunner(model_path, inputs_path):
    import keras
    # Load your Keras model
    keras_model = keras.models.load_model(model_path)
    input_list = gen_input_list(inputs_path)
    output = keras_model.predict(input_list)
    return output 
Example #10
Source File: check_model.py    From OLive with MIT License 5 votes vote down vote up
def cntkRunner(model_path, inputs_path):
    import cntk as C
    model = C.load_model(model_path, device=C.device.cpu())
    input_dict = gen_io_dict(inputs_path, model.arguments, True)
    output = model.eval(input_dict)
    return output 
Example #11
Source File: frcnn_detector.py    From CNTK-FastRCNNDetector with MIT License 5 votes vote down vote up
def ensure_model_is_loaded(self):
        if not self.__model:
            self.load_model() 
Example #12
Source File: lung_cancer_utils.py    From sql_python_deep_learning with MIT License 5 votes vote down vote up
def get_cntk_model(model_name):
    node_name = "z.x"
    loaded_model  = load_model(model_name)
    node_in_graph = loaded_model.find_by_name(node_name)
    output_nodes  = combine([node_in_graph.owner])
    return output_nodes 
Example #13
Source File: lung_cancer_utils.py    From sql_python_deep_learning with MIT License 5 votes vote down vote up
def load_cntk_model_from_binary(model_bin, verbose=False):
    model_file = "tmp.model"
    with open(model_file, "wb") as file:
        file.write(model_bin)
    loaded_model = load_model(model_file)
    if verbose:
        print(len(loaded_model.constants))
        node_outputs = get_node_outputs(loaded_model)
        for out in node_outputs: print("{0} {1}".format(out.name, out.shape))
    return loaded_model 
Example #14
Source File: FasterRCNN.py    From raster-deep-learning with Apache License 2.0 5 votes vote down vote up
def load_model(self, model_path):
        '''
        Fill this method to write your own model loading python code
        save it self object if you would like to reference it later.

        Tips: you can access emd information through self.json_info.
        '''
        #Todo: fill in this method to load your model
        self.model = C.load_model(model_path) 
Example #15
Source File: AzurePixelLevelLandClassification.py    From raster-deep-learning with Apache License 2.0 5 votes vote down vote up
def inference(self, batch, **kwargs):
        '''
        Fill this method to write your own inference python code, you can refer to the model instance that is created
        in the load_model method. Expected results format is described in the returns as below.

        Tips: you can access emd information through self.json_info.

        :param batch: numpy array with shape (B, H, W, D), B is batch size, H, W is specified and equal to
                      ImageHeight and ImageWidth in the emd file and D is the number of bands and equal to the length
                      of ExtractBands in the emd.
        :param kwargs: inference parameters, accessed by the parameter name,
                       i.e. score_threshold=float(kwargs['score_threshold']). If you want to have more inference
                       parameters, add it to the list of the following getParameterInfo method.
        :return: semantic segmentation, numpy array in the shape [B, 1, H, W] and type np.uint8, B is the batch size,
                 H and W are the tile size, equal to ImageHeight and ImageWidth in the emd file respectively if Padding
                 is not set
        '''
        # Todo: fill in this method to inference your model and return bounding boxes, scores and classes
        batch=batch.astype(np.float32)

        output = self.model.eval(
            {
                self.model.arguments[0]: batch
            }
        )
        semantic_predictions = np.argmax(output, axis=1)
        semantic_predictions = np.expand_dims(semantic_predictions, axis=1)

        return semantic_predictions 
Example #16
Source File: AzurePixelLevelLandClassification.py    From raster-deep-learning with Apache License 2.0 5 votes vote down vote up
def load_model(self, model_path):
        '''
        Fill this method to write your own model loading python code
        save it in self object if you would like to reference it later.

        Tips: you can access emd information through self.json_info.
        '''
        # Todo: fill in this method to load your model
        self.model = C.load_model(model_path) 
Example #17
Source File: eval_speech.py    From end2end_AU_speech with MIT License 5 votes vote down vote up
def estimate_one_audio_seq(model, audio_seq, small_mem=False):
    if isinstance(model, str):
        model = C.load_model(model)
    # set up 2 cases: if the model is recurrent or static
    if is_recurrent(model):
        n = audio_seq.shape[0]
        NNN = 125
        if n > NNN and small_mem:
            nseqs = n//NNN + 1
            indices = []
            for i in range(nseqs-1):
                indices.append(NNN*i + NNN)
            input_seqs = np.vsplit(audio_seq, indices)
            outputs = []
            for seq in input_seqs:
                output = model.eval({model.arguments[0]:[seq]})[0]
                outputs.append(output)
            output = np.concatenate(outputs)
        else:
            output = model.eval({model.arguments[0]:[audio_seq]})[0]
    else:
        output = model.eval({model.arguments[0]: audio_seq})
    return output


#----------------------- feed sequence ------------------------- 
Example #18
Source File: train_end2end.py    From end2end_AU_speech with MIT License 5 votes vote down vote up
def audio_encoder_3(input, model_file, cloning=False):
    # Load and freeze pre-trained encoder
    last_layer_name = "t_conv3"
    model = C.load_model(model_file)
    input_node = model.find_by_name("input")
    last_conv = model.find_by_name(last_layer_name)
    if not last_conv:
        raise ValueError("the layer does not exist")
    h = C.combine([last_conv.owner]).clone(C.CloneMethod.clone if cloning else C.CloneMethod.freeze, {input_node: input})
    return h 
Example #19
Source File: helpers_cntk.py    From MachineLearningSamples-ImageClassificationUsingCntk with MIT License 5 votes vote down vote up
def create_model(base_model_file, input_features, num_classes,  dropout_rate = 0.5, freeze_weights = False):
    # Load the pretrained classification net and find nodes
    base_model   = load_model(base_model_file)
    feature_node = find_by_name(base_model, 'features')
    beforePooling_node = find_by_name(base_model, "z.x.x.r")
    #graph.plot(base_model, filename="base_model.pdf") # Write graph visualization

    # Clone model until right before the pooling layer, ie. until including z.x.x.r
    modelCloned = combine([beforePooling_node.owner]).clone(
        CloneMethod.freeze if freeze_weights else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Center the input around zero and set model input.
    # Do this early, to avoid CNTK bug with wrongly estimated layer shapes
    feat_norm = input_features - constant(114)
    model = modelCloned(feat_norm)

    # Pool over all spatial dimensions and add dropout layer
    avgPool = GlobalAveragePooling(name = "poolingLayer")(model)
    if dropout_rate > 0:
        avgPoolDrop = Dropout(dropout_rate)(avgPool)
    else:
        avgPoolDrop = avgPool

    # Add new dense layer for class prediction
    finalModel = Dense(num_classes, activation=None, name="prediction") (avgPoolDrop)
    return finalModel


# Trains a transfer learning model 
Example #20
Source File: frcnn_detector.py    From CNTK-FastRCNNDetector with MIT License 4 votes vote down vote up
def load_model(self):
        if self.__model:
            raise Exception("Model already loaded")
        
        trained_frcnn_model = load_model(self.__model_path)
        self.__is_python_model = True if (len(trained_frcnn_model.arguments) < 3) else False

        if (self.__is_python_model):
            self.__args_indices = {"features" : 0, "rois" : 1}
            self.__nr_rois = trained_frcnn_model.arguments[self.__args_indices["rois"]].shape[0]
            self.__resize_width = trained_frcnn_model.arguments[self.__args_indices["features"]].shape[1]
            self.__resize_height = trained_frcnn_model.arguments[self.__args_indices["features"]].shape[2]
            self.labels_count = trained_frcnn_model.arguments[self.__args_indices["rois"]].shape[1] 
            self.__model = trained_frcnn_model


        else: 
            # cache indices of the model arguments
            args_indices = {}
            for i,arg in enumerate(trained_frcnn_model.arguments):
               args_indices[arg.name] = i

            self.__nr_rois = trained_frcnn_model.arguments[args_indices["rois"]].shape[0]
            self.__resize_width = trained_frcnn_model.arguments[args_indices["features"]].shape[1]
            self.__resize_height = trained_frcnn_model.arguments[args_indices["features"]].shape[2]
            self.labels_count = trained_frcnn_model.arguments[args_indices["roiLabels"]].shape[1]
            
            # next, we adjust the clone the model and create input nodes just for the features (image) and ROIs
            # This will make sure that only the calculations that are needed for evaluating images are performed
            # during test time
            #  
            # find the original features and rois input nodes
            features_node = find_by_name(trained_frcnn_model, "features")
            rois_node = find_by_name(trained_frcnn_model, "rois")

            #  find the output "z" node
            z_node = find_by_name(trained_frcnn_model, 'z')

            # define new input nodes for the features (image) and rois
            image_input = input_variable(features_node.shape, name='features')
            roi_input = input_variable(rois_node.shape, name='rois')

            # Clone the desired layers with fixed weights and place holder for the new input nodes
            cloned_nodes = combine([z_node.owner]).clone(
               CloneMethod.freeze,
               {features_node: placeholder(name='features'), rois_node: placeholder(name='rois')})

            # apply the cloned nodes to the input nodes to obtain the model for evaluation
            self.__model = cloned_nodes(image_input, roi_input)

            # cache the indices of the input nodes
            self.__args_indices = {}

            for i,arg in enumerate(self.__model.arguments):
                self.__args_indices[arg.name] = i 
Example #21
Source File: eval_speech.py    From end2end_AU_speech with MIT License 4 votes vote down vote up
def visualize_one_audio_seq(model, video_frame_list, audio_csv_file, exp_npy_file, visualizer, save_dir):
    if isinstance(model, str):
        model = C.load_model(model)
    # evaluate model with given audio data
    audio = np.loadtxt(audio_csv_file, dtype=np.float32, delimiter=",")
    audio_seq = np.reshape(audio, (audio.shape[0], 1, 128, 32))
    e_fake = estimate_one_audio_seq(model, audio_seq)
    if e_fake.shape[1] != 46:
        if e_fake.shape[1] == 49:
            e_fake = e_fake[:,3:]
        else:
            raise ValueError("unsupported output of audio model")
    # load true labels with optional median filter to smooth it (not used in training)
    e_real = load_exp_sequence(exp_npy_file, use_medfilt=True)

    if e_real.shape[0] != e_fake.shape[0]:
        raise ValueError("number of true labels and number of outputs do not match")
    
    # create directory to store output frames
    if video_frame_list:
        video = load_image_stack(video_frame_list)
        if video.shape[0] != e_real.shape[0]:
            print("number of frames and number of labels do not match. Not using video")
            video = None
    else:
        video = None
    # make folder to store generated frames
    make_dir(save_dir)

    n = e_real.shape[0]
    for i in range(n):
        if video is not None:
            img = video[i,:,:,:]
        else:
            img = None # not include input video in the output
        ef = e_fake[i,:]
        er = e_real[i,:]
        ret = visualizer.visualize(img, er, ef)
        # draw plot
        plot = SU.draw_error_bar_plot(er, ef, (ret.shape[1],200))
        ret = np.concatenate([ret, plot], axis=0)
        save_path = save_dir + "/result{:06d}.jpg".format(i)
        cv2.imwrite(save_path, ret)
        # can call cv2.imshow() here


#---------------------------------------------------------------------------------------- 
Example #22
Source File: image_recon.py    From dnn-model-services with MIT License 4 votes vote down vote up
def image_recognition(method, model, map_names, img_path, image_dims):
    try:
        tmp_img_file = generate_uid() + ".jpg"
        # Link
        if "http://" in img_path or "https://" in img_path:
            header = {'User-Agent': 'Mozilla/5.0 (Windows NT x.y; Win64; x64; rv:9.0) Gecko/20100101 Firefox/10.0'}
            r = requests.get(img_path, headers=header, allow_redirects=True)
            with open(tmp_img_file, "wb") as my_f:
                my_f.write(r.content)
                img_path = tmp_img_file

        # Base64
        elif len(img_path) > 500:
            img_data = base64.b64decode(img_path)
            with open(tmp_img_file, "wb") as f:
                f.write(img_data)
                img_path = tmp_img_file

        model_file = os.path.join(resources_root, "Models", "{}_{}_20.model".format(method, model))

        if model == "AlexNet":
            image_dims = (3, 227, 227)
        elif model == "InceptionV3":
            image_dims = (3, 299, 299)

        start_time = time.time()
        trained_model = cntk.load_model(model_file)
        probs = eval_single_image(trained_model, img_path, image_dims)
        top_5_dict = {}
        p_array = probs.argsort()[-5:][::-1]
        for i, prob in enumerate(p_array):
            perc = probs[prob] * 100
            top_5_dict[i + 1] = "{0:05.2f}%: {1}".format(perc, map_names[int(prob)])

        delta_time = time.time() - start_time
        if os.path.exists(tmp_img_file):
            os.remove(tmp_img_file)
        return {"delta_time": "{:.4f}".format(delta_time), "top_5": top_5_dict}

    except Exception as e:
        log.error(e)
        traceback.print_exc()
        return {"delta_time": "Fail", "top_5": "Fail", "error": str(e)}