Python predict.predict() Examples

The following are 13 code examples of predict.predict(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module predict , or try the search function .
Example #1
Source File: main.py    From xgboost-operator with Apache License 2.0 6 votes vote down vote up
def main(args):

    model_storage_type = args.model_storage_type
    if (model_storage_type == "local" or model_storage_type == "oss"):
      print ( "The storage type is " + model_storage_type)
    else:
      raise Exception("Only supports storage types like local and OSS")

    if args.job_type == "Predict":
        logging.info("starting the predict job")
        predict(args)

    elif args.job_type == "Train":
        logging.info("starting the train job")
        model = train(args)

        if model is not None:
            logging.info("finish the model training, and start to dump model ")
            model_path = args.model_path
            dump_model(model, model_storage_type, model_path, args)

    elif args.job_type == "All":
        logging.info("starting the train and predict job")

    logging.info("Finish distributed XGBoost job") 
Example #2
Source File: deep_quant.py    From deep-quant with MIT License 6 votes vote down vote up
def main(_):
    config = get_configs()

    # Check if Uncertainty Quantification mode
    if config.UQ:
        assert (config.UQ_model_type in ['MVE', 'PIE'])
        # Check to see if we are in training or testing mode
        if config.train is True:
            train_model_uq(config)
        else:
            predict_uq(config)

    else:
        # Check to see if we are in training or testing mode
        if config.train is True:
            train_model(config)
        else:
            predict(config) 
Example #3
Source File: train.py    From torch-light with MIT License 6 votes vote down vote up
def test(i, predict):
    model.eval()
    t = pre = groud = 0
    inf = open("data/dev_data.json", encoding="utf8")
    for line in inf:
        line = json.loads(line)
        text = line["text"]
        g_triples = set()
        for trip in line["spo_list"]:
            g_triples.add((trip["subject"], trip["predicate"], trip["object"]))

        p_triples = predict.predict(text)
        pre += len(p_triples)
        groud += len(g_triples)
        t += len(p_triples.intersection(g_triples))

    print(
        f"test epoch {i+1}/{args.epochs} precision: {t/(pre+0.001):.4f} recall: {t/groud:.4f} f1: {2*t/(pre+groud):.4f}")
    return 2*t/(pre+groud) 
Example #4
Source File: songs.py    From crnn-lid with GNU General Public License v3.0 6 votes vote down vote up
def predict(input_file):

    config = {"pixel_per_second": 50, "input_shape": [129, 500, 1], "num_classes": 4}
    data_generator = SpectrogramGenerator(input_file, config, shuffle=False, run_only_once=True).get_generator()
    data = [np.divide(image, 255.0) for image in data_generator]
    data = np.stack(data)

    # Model Generation
    probabilities = model.predict(data)
    probabilities = probabilities[3:-5] # ignore first 30 sec and last 50 sec

    classes = np.argmax(probabilities, axis=1)
    average_prob = np.mean(probabilities, axis=0)
    average_class = np.argmax(average_prob)

    print(classes, class_labels[average_class], average_prob)
    return average_class 
Example #5
Source File: songs.py    From crnn-lid with GNU General Public License v3.0 6 votes vote down vote up
def eval(root_dir):
    languages = get_immediate_subdirectories(root_dir)

    # Count all files for each language
    for lang in languages:
        print(lang)
        files = list(recursive_glob(os.path.join(root_dir, lang), "*.mp3"))
        classes = []

        for file in files:
            print(file)
            average_class = predict(file)
            classes.append(average_class)

        y_true = np.full((len(classes)), LABELS[lang])

        print(lang)
        print(accuracy_score(y_true, classes))
        print(classification_report(y_true, classes)) 
Example #6
Source File: demo.py    From TensorflowHandwritingRecognition with GNU General Public License v3.0 5 votes vote down vote up
def main():
    global image
    cv2.namedWindow("Input")
    cv2.setMouseCallback("Input", click)
    output = np.ones((512, 512, 1))
    font = cv2.FONT_HERSHEY_SIMPLEX
    bottomLeftCornerOfText = (1, 511)
    fontScale = 23
    fontColor = (0, 0, 0)
    lineType = 2
    while True:
        cv2.imshow("Input", image)
        cv2.imshow("Output", output)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("f"):
            cv2.destroyAllWindows()
            break
        if key == ord("r"):
            image = np.ones((640, 640, 1))
        if key == ord("p"):
            clone = image.copy()
            clone = cv2.resize(clone, (32,32))
            final = np.zeros((32, 32, 1))
            for x in range(len(clone)):
                for y in range(len(clone[x])):
                    final[x][y][0] = clone[x][y]
            pred = p.predict(final)
            print("Predicted " , pred)
            output = np.ones((512, 512, 1))
            cv2.putText(output, pred, (10, 500), font, fontScale, fontColor, 10,  2) 
Example #7
Source File: api.py    From crvi with MIT License 5 votes vote down vote up
def get_tasks():
    #get url from form
    # url = request.form['url']
    url = request.files['url']

    #sends url for prediction
    sender = predict.predict(url)

    #get values from prediction
    rec = sender.predict_only()

    # #list of out values
    # outputlist=[rec]

    # #for multiple json apis
    # tasks = []

    # tasks1 = [
    #     {
    #         'value': outputlist[0],

    #     },

    # ]
    # tasks.append(tasks1)
    # return jsonify({'tasks': tasks})
    return jsonify({'cash': rec}) 
Example #8
Source File: server.py    From iLID with MIT License 5 votes vote down vote up
def get_prediction(file_path):

    LABEL_MAP = {
        0 : "English",
        1 : "German",
        2 : "French",
        3 : "Spanish"
    }

    # TODO remove this for production
    # predictions = [[0.3, 0.7]]
    predictions = predict(file_path, app.config["PROTOTXT"], app.config["MODEL"], app.config["UPLOAD_FOLDER"])
    predictions = np.mean(predictions, axis=0).tolist()

    print predictions

    pred_with_label = {LABEL_MAP[index] : prob for index, prob in enumerate(predictions)}

    file_path = file_path + "?cachebuster=%s" % time.time()
    result = {
        "audio" : {
            "url" : "%s" % file_path,
        },
        "predictions" : pred_with_label
    }

    return result 
Example #9
Source File: predict-from-video.py    From facial-expression-recognition-using-cnn with GNU General Public License v3.0 5 votes vote down vote up
def predict_emotion(self, image):
        image.resize([NETWORK.input_size, NETWORK.input_size], refcheck=False)
        emotion, confidence = predict(image, self.model, self.shape_predictor)
        return emotion, confidence 
Example #10
Source File: interactive_predict.py    From keyphrase-generation-rl with MIT License 5 votes vote down vote up
def main(opt):
    # load vocab
    word2idx, idx2word, vocab = load_vocab(opt)
    # load data
    # read tokenized text file and convert them to 2d list of words
    src_file = opt.src_file
    #trg_file = opt.trg_file
    #tokenized_train_pairs = read_src_and_trg_files(src_file, trg_file, is_train=False, remove_eos=opt.remove_title_eos)  # 2d list of word
    if opt.title_guided:
        tokenized_src, tokenized_title = read_tokenized_src_file(src_file, remove_eos=opt.remove_title_eos, title_guided=True)
    else:
        tokenized_src = read_tokenized_src_file(src_file, remove_eos=opt.remove_title_eos, title_guided=False)
        tokenized_title = None
    # convert the 2d list of words to a list of dictionary, with keys 'src', 'src_oov', 'trg', 'trg_copy', 'src_str', 'trg_str', 'oov_dict', 'oov_list'
    # since we don't need the targets during testing, 'trg' and 'trg_copy' are some dummy variables
    #test_one2many = build_dataset(tokenized_train_pairs, word2idx, idx2word, opt, mode="one2many", include_original=True)
    test_one2many = build_interactive_predict_dataset(tokenized_src, word2idx, idx2word, opt, tokenized_title)
    # build the data loader
    test_one2many_dataset = KeyphraseDataset(test_one2many, word2idx=word2idx, idx2word=idx2word,
                                             type='one2many', delimiter_type=opt.delimiter_type, load_train=False, remove_src_eos=opt.remove_src_eos, title_guided=opt.title_guided)
    test_loader = DataLoader(dataset=test_one2many_dataset,
                             collate_fn=test_one2many_dataset.collate_fn_one2many,
                             num_workers=opt.batch_workers, batch_size=opt.batch_size, pin_memory=True,
                             shuffle=False)
    # init the pretrained model
    model = predict.init_pretrained_model(opt)

    # Print out predict path
    print("Prediction path: %s" % opt.pred_path)

    # predict the keyphrases of the src file and output it to opt.pred_path/predictions.txt
    predict.predict(test_loader, model, opt) 
Example #11
Source File: ai.py    From Game-Bot with Apache License 2.0 4 votes vote down vote up
def main():
    # Get Model:
    model_file = open('Data/Model/model.json', 'r')
    model = model_file.read()
    model_file.close()
    model = model_from_json(model)
    model.load_weights("Data/Model/weights.h5")

    print('AI start now!')

    while 1:
        # Get screenshot:
        screen = ImageGrab.grab()
        # Image to numpy array:
        screen = np.array(screen)
        # 4 channel(PNG) to 3 channel(JPG)
        Y = predict(model, screen)
        if Y == [0,0,0,0]:
            # Not action
            continue
        elif Y[0] == -1 and Y[1] == -1:
            # Only keyboard action.
            key = get_key(Y[3])
            if Y[2] == 1:
                # Press:
                press(key)
            else:
                # Release:
                release(key)
        elif Y[2] == 0 and Y[3] == 0:
            # Only mouse action.
            click(Y[0], Y[1])
        else:
            # Mouse and keyboard action.
            # Mouse:
            click(Y[0], Y[1])
            # Keyboard:
            key = get_key(Y[3])
            if Y[2] == 1:
                # Press:
                press(key)
            else:
                # Release:
                release(key) 
Example #12
Source File: interactive_predict.py    From keyphrase-generation-rl with MIT License 4 votes vote down vote up
def process_opt(opt):
    if opt.seed > 0:
        torch.manual_seed(opt.seed)

    if torch.cuda.is_available():
        if not opt.gpuid:
            opt.gpuid = 0
        opt.device = torch.device("cuda:%d" % opt.gpuid)
    else:
        opt.device = torch.device("cpu")
        opt.gpuid = -1
        print("CUDA is not available, fall back to CPU.")

    opt.exp = 'predict.' + opt.exp
    if opt.one2many:
        opt.exp += '.one2many'

    if opt.one2many_mode == 1:
        opt.exp += '.cat'

    if opt.copy_attention:
        opt.exp += '.copy'

    if opt.coverage_attn:
        opt.exp += '.coverage'

    if opt.review_attn:
        opt.exp += '.review'

    if opt.orthogonal_loss:
        opt.exp += '.orthogonal'

    if opt.use_target_encoder:
        opt.exp += '.target_encode'

    if hasattr(opt, 'bidirectional') and opt.bidirectional:
        opt.exp += '.bi-directional'
    else:
        opt.exp += '.uni-directional'

        # fill time into the name
    if opt.pred_path.find('%s') > 0:
        opt.pred_path = opt.pred_path % (opt.exp, opt.timemark)

    if not os.path.exists(opt.pred_path):
        os.makedirs(opt.pred_path)

    if not opt.one2many and opt.one2many_mode > 0:
        raise ValueError("You cannot choose one2many mode without the -one2many options.")

    if opt.one2many and opt.one2many_mode == 0:
        raise ValueError("If you choose one2many, you must specify the one2many mode.")

    #if opt.greedy and not opt.one2many:
    #    raise ValueError("Greedy sampling can only be used in one2many mode.")
    return opt 
Example #13
Source File: live.py    From Vocalize-Sign-Language with Apache License 2.0 4 votes vote down vote up
def main():
    # Getting model:
    model_file = open('Data/Model/model.json', 'r')
    model = model_file.read()
    model_file.close()
    model = model_from_json(model)
    # Getting weights
    model.load_weights("Data/Model/weights.h5")
    
    print('Press "ESC" button for exit.')

    # Get image from camera, get predict and say it with another process, repeat.
    cap = cv2.VideoCapture(0)
    old_char = ''
    while 1:
        ret, img = cap.read()
        
        # Cropping image:
        img_height, img_width = img.shape[:2]
        side_width = int((img_width-img_height)/2)
        img = img[0:img_height, side_width:side_width+img_height]
        
        # Show window:
        cv2.imshow('VSL', cv2.flip(img,1)) # cv2.flip(img,1) : Flip(mirror effect) for easy handling.
        
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = imresize(img, (img_size, img_size, channel_size))
        img = 1-np.array(img).astype('float32')/255.
        img = img.reshape(1, img_size, img_size, channel_size)
        
        Y_string, Y_possibility = predict(model, img)
        
        if Y_possibility < 0.4: # For secondary vocalization
            old_char = ''
        
        if(platform.system() == 'Darwin') and old_char != Y_string and Y_possibility > 0.6:
            print(Y_string, Y_possibility)
            arg = 'say {0}'.format(Y_string)
            # Say predict with multiprocessing
            Process(target=os.system, args=(arg,)).start()
            old_char = Y_string
        if cv2.waitKey(200) == 27: # Decimal 27 = Esc
            break
    cap.release()
    cv2.destroyAllWindows()