Python tensorpack.utils.stats.RatioCounter() Examples

The following are 16 code examples of tensorpack.utils.stats.RatioCounter(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorpack.utils.stats , or try the search function .
Example #1
Source File: imagenet_utils.py    From benchmarks with The Unlicense 6 votes vote down vote up
def eval_classification(model, sessinit, dataflow):
    """
    Eval a classification model on the dataset. It assumes the model inputs are
    named "input" and "label", and contains "wrong-top1" and "wrong-top5" in the graph.
    """
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #2
Source File: imagenet-resnet-transWino-prune.py    From sparse-winograd-cnn with MIT License 6 votes vote down vote up
def eval_on_ILSVRC12(model_file, data_dir):
    ds = get_data('val')
    pred_config = PredictConfig(
        model=Model(),
        session_init=get_model_loader(model_file),
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    pred = SimpleDatasetPredictor(pred_config, ds)
    acc1, acc5 = RatioCounter(), RatioCounter()
    for o in pred.get_result():
        batch_size = o[0].shape[0]
        acc1.feed(o[0].sum(), batch_size)
        acc5.feed(o[1].sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #3
Source File: tfgo.py    From tensorflow-recipes with Apache License 2.0 6 votes vote down vote up
def eval(model_file, path, k, max_eval=None):
    df_val = get_data(os.path.join(path, 'go_val.lmdb'), shuffle=True, isTrain=False)
    if max_eval:
        df_val = FixedSizeData(df_val, max_eval)
    pred_config = PredictConfig(
        model=Model(k, add_wrong=True),
        session_init=get_model_loader(model_file),
        input_names=['feature_planes', 'labels', 'labels_2d'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    pred = SimpleDatasetPredictor(pred_config, df_val)
    acc1, acc5 = RatioCounter(), RatioCounter()
    try:
        for o in pred.get_result():
            batch_size = o[0].shape[0]
            acc1.feed(o[0].sum(), batch_size)
            acc5.feed(o[1].sum(), batch_size)
    except Exception as e:
        print e
        from IPython import embed
        embed()
    err1 = (acc1.ratio) * 100
    err5 = (acc5.ratio) * 100
    print("Top1 Accuracy: {0:.2f}% Error: {1:.2f}% Random-Guess: ~0.44%".format(100 - err1, err1))
    print("Top5 Accuracy: {0:.2f}% Error: {1:.2f}% Random-Guess: ~2.00%".format(100 - err5, err5)) 
Example #4
Source File: train.py    From tensorflow-recipes with Apache License 2.0 6 votes vote down vote up
def eval(model_file, path, k, max_eval=None):
    df_val = get_data(os.path.join(path, 'go_val.lmdb'), shuffle=True, isTrain=False)
    if max_eval:
        df_val = FixedSizeData(df_val, max_eval)
    pred_config = PredictConfig(
        model=Model(k, add_wrong=True),
        session_init=get_model_loader(model_file),
        input_names=['feature_planes', 'labels', 'labels_2d'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    pred = SimpleDatasetPredictor(pred_config, df_val)
    acc1, acc5 = RatioCounter(), RatioCounter()
    try:
        for o in pred.get_result():
            batch_size = o[0].shape[0]
            acc1.feed(o[0].sum(), batch_size)
            acc5.feed(o[1].sum(), batch_size)
    except Exception as e:
        print e
        from IPython import embed
        embed()
    err1 = (acc1.ratio) * 100
    err5 = (acc5.ratio) * 100
    print("Top1 Accuracy: {0:.2f}% Error: {1:.2f}% Random-Guess: ~0.44%".format(100 - err1, err1))
    print("Top5 Accuracy: {0:.2f}% Error: {1:.2f}% Random-Guess: ~2.00%".format(100 - err5, err5)) 
Example #5
Source File: imagenet_utils.py    From GroupNorm-reproduce with Apache License 2.0 6 votes vote down vote up
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #6
Source File: imagenet_utils.py    From adanet with MIT License 6 votes vote down vote up
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label', 'input2', 'label2'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #7
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def eval_classification(model, sessinit, dataflow):
    """
    Eval a classification model on the dataset. It assumes the model inputs are
    named "input" and "label", and contains "wrong-top1" and "wrong-top5" in the graph.
    """
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #8
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def eval_classification(model, sessinit, dataflow):
    """
    Eval a classification model on the dataset. It assumes the model inputs are
    named "input" and "label", and contains "wrong-top1" and "wrong-top5" in the graph.
    """
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #9
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def eval_classification(model, sessinit, dataflow):
    """
    Eval a classification model on the dataset. It assumes the model inputs are
    named "input" and "label", and contains "wrong-top1" and "wrong-top5" in the graph.
    """
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #10
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def eval_classification(model, sessinit, dataflow):
    """
    Eval a classification model on the dataset. It assumes the model inputs are
    named "input" and "label", and contains "wrong-top1" and "wrong-top5" in the graph.
    """
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #11
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def eval_classification(model, sessinit, dataflow):
    """
    Eval a classification model on the dataset. It assumes the model inputs are
    named "input" and "label", and contains "wrong-top1" and "wrong-top5" in the graph.
    """
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #12
Source File: imagenet_utils.py    From ghostnet with Apache License 2.0 5 votes vote down vote up
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #13
Source File: imagenet_utils.py    From webvision-2.0-benchmarks with Apache License 2.0 5 votes vote down vote up
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5', 'res-top5', 'label', 'logits']
    )
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    top5s = []
    labels = []
    logits = []
    for top1, top5, pred, label, logit in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
        top5s.extend(pred.tolist())
        labels.extend(label.tolist())
        logits.extend(logit.tolist())
    with open("top5_resnet2x.json", "w") as f:
        json.dump(top5s, f)
    
    with open("labels_resnet2x.json", "w") as f:
        json.dump(labels, f)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio))
    return acc1.ratio, acc5.ratio 
Example #14
Source File: imagenet_utils.py    From LQ-Nets with MIT License 5 votes vote down vote up
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
Example #15
Source File: eval_tf.py    From imgclsmob with MIT License 4 votes vote down vote up
def test(net,
         session_init,
         val_dataflow,
         do_calc_flops=False,
         extended_log=False):
    """
    Main test routine.

    Parameters:
    ----------
    net : obj
        Model.
    session_init : SessionInit
        Session initializer.
    do_calc_flops : bool, default False
        Whether to calculate count of weights.
    extended_log : bool, default False
        Whether to log more precise accuracy values.
    """
    pred_config = PredictConfig(
        model=net,
        session_init=session_init,
        input_names=["input", "label"],
        output_names=["wrong-top1", "wrong-top5"]
    )
    err_top1 = RatioCounter()
    err_top5 = RatioCounter()

    tic = time.time()
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(val_dataflow), device="/gpu:0"))

    for _ in tqdm.trange(val_dataflow.size()):
        err_top1_val, err_top5_val = pred()
        batch_size = err_top1_val.shape[0]
        err_top1.feed(err_top1_val.sum(), batch_size)
        err_top5.feed(err_top5_val.sum(), batch_size)

    err_top1_val = err_top1.ratio
    err_top5_val = err_top5.ratio

    if extended_log:
        logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
            top1=err_top1_val, top5=err_top5_val))
    else:
        logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
            top1=err_top1_val, top5=err_top5_val))
    logging.info("Time cost: {:.4f} sec".format(
        time.time() - tic))

    if do_calc_flops:
        calc_flops(model=net) 
Example #16
Source File: ann_app_utils.py    From petridishnn with MIT License 4 votes vote down vote up
def evaluate_ilsvrc(args, subset, model_cls):
    ds = get_augmented_data.get_ilsvrc_augmented_data(subset, args, do_multiprocess=False)
    INPUT_SIZE = ILSVRC_DEFAULT_INPUT_SIZE
    model = model_cls(INPUT_SIZE, args)

    args.store_basename = None # This is disabled for now; it used to help storing predictions

    output_names = []
    accs = []
    n_preds = 0
    if args.num_anytime_preds == 0:
        output_names.append('dummy_image_mean')
    else:
        for i, w in enumerate(model.weights):
            if w > 0:
                n_preds += 1
                scope_name = model.compute_scope_basename(i)
                scope_name = model.prediction_scope(scope_name) 
                output_names.append('{}/wrong-top1'.format(scope_name))
                output_names.append('{}/wrong-top5'.format(scope_name))
                accs.extend([stats.RatioCounter(), stats.RatioCounter()])
                #output_names.append('{}/linear/output:0'.format(scope_name))
            if args.num_anytime_preds > 0 and n_preds >= args.num_anytime_preds:
                break

    pred_config = PredictConfig(
        model=model,
        input_names=['input', 'label'],
        output_names=output_names
    )
    if args.load:
        pred_config.session_init = get_model_loader(args.load)
    pred = SimpleDatasetPredictor(pred_config, ds)

    if args.store_basename is not None:
        store_fn = args.store_basename + "_{}.bin".format(subset)
        f_store_out = open(store_fn, 'wb')

    n_batches = 0
    import time
    start_time = time.time() 
    for o in pred.get_result():
        n_batches += 1
        if args.num_anytime_preds == 0:
            continue
        if args.store_basename is not None:
            preds = o[0]
            f_store_out.write(preds)
        batch_size = o[0].shape[0] 
        for i, acc in enumerate(accs):
            acc.feed(o[i].sum(), batch_size)
    logger.info('Inference finished, time: {:.4f}sec'.format(time.time() - start_time))
    if args.num_anytime_preds != 0:
        for i, name in enumerate(output_names):
            logger.info("Name {}, RatioCount {}".format(name, accs[i].ratio))

    if args.store_basename is not None:
        f_store_out.close()