Python caffe.Classifier() Examples

The following are 27 code examples of caffe.Classifier(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module caffe , or try the search function .
Example #1
Source File: caffe_i2v.py    From illustration2vec with MIT License 6 votes vote down vote up
def _extract(self, inputs, layername):
        # NOTE: we import the following codes from caffe.Classifier
        shape = (
            len(inputs), self.net.image_dims[0],
            self.net.image_dims[1], inputs[0].shape[2])
        input_ = np.zeros(shape, dtype=np.float32)
        for ix, in_ in enumerate(inputs):
            input_[ix] = resize_image(in_, self.net.image_dims)
        # Take center crop.
        center = np.array(self.net.image_dims) / 2.0
        crop = np.tile(center, (1, 2))[0] + np.concatenate([
            -self.net.crop_dims / 2.0,
            self.net.crop_dims / 2.0
        ])
        input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
        # Classify
        caffe_in = np.zeros(
            np.array(input_.shape)[[0, 3, 1, 2]], dtype=np.float32)
        for ix, in_ in enumerate(input_):
            caffe_in[ix] = \
                self.net.transformer.preprocess(self.net.inputs[0], in_)
        out = self.net.forward_all(
            blobs=[layername], **{self.net.inputs[0]: caffe_in})[layername]
        return out 
Example #2
Source File: test_quick.py    From icnn with MIT License 6 votes vote down vote up
def __load_net(self):
        # Load averaged image of ImageNet
        img_mean_file = './examples/data/ilsvrc_2012_mean.npy'
        img_mean = np.load(img_mean_file)
        img_mean = np.float32([img_mean[0].mean(), img_mean[1].mean(), img_mean[2].mean()])

        # Load CNN model
        model_file = './examples/net/VGG_ILSVRC_19_layers/VGG_ILSVRC_19_layers.caffemodel'
        prototxt_file = './examples/net/VGG_ILSVRC_19_layers/VGG_ILSVRC_19_layers.prototxt'
        channel_swap = (2, 1, 0)
        net = caffe.Classifier(prototxt_file, model_file,
                               mean=img_mean, channel_swap=channel_swap)
        h, w = net.blobs['data'].data.shape[-2:]
        net.blobs['data'].reshape(1, 3, h, w)

        return net 
Example #3
Source File: predict.py    From iLID with MIT License 6 votes vote down vote up
def predict(sound_file, prototxt, model, output_path):

  image_files = wav_to_images(sound_file, output_path)

  caffe.set_mode_cpu()
  net = caffe.Classifier(prototxt, model,
                         #image_dims=(224, 224)
                         #channel_swap=(2,1,0),
                         raw_scale=255 # convert 0..255 values into range 0..1
                         #caffe.TEST
                        )

  input_images = np.array([caffe.io.load_image(image_file, color=False) for image_file in image_files["melfilter"]])
  #input_images = np.swapaxes(input_images, 1, 3)

  #prediction = net.forward_all(data=input_images)["prob"]

  prediction = net.predict(input_images, False)  # predict takes any number of images, and formats them for the Caffe net automatically

  print prediction
  print 'prediction shape:', prediction[0].shape
  print 'predicted class:', prediction[0].argmax()
  print image_files

  return prediction 
Example #4
Source File: batcountry.py    From bat-country with MIT License 6 votes vote down vote up
def __init__(self, base_path, deploy_path=None, model_path=None,
		patch_model="./tmp.prototxt", mean=(104.0, 116.0, 122.0),
		channels=(2, 1, 0)):
		# if the deploy path is None, set the default
		if deploy_path is None:
			deploy_path = base_path + "/deploy.prototxt"

		# if the model path is None, set it to the default GoogleLeNet model
		if model_path is None:
			model_path = base_path + "/bvlc_googlenet.caffemodel"

		# check to see if the model should be patched to compute gradients
		if patch_model:
			model = caffe.io.caffe_pb2.NetParameter()
			text_format.Merge(open(deploy_path).read(), model)
			model.force_backward = True
			f = open(patch_model, "w")
			f.write(str(model))
			f.close()

		# load the network and store the patched model path
		self.net = caffe.Classifier(patch_model, model_path, mean=np.float32(mean),
			channel_swap=channels)
		self.patch_model = patch_model 
Example #5
Source File: test.py    From icnn with MIT License 6 votes vote down vote up
def __load_net(self):
        # Load averaged image of ImageNet
        img_mean_file = './examples/data/ilsvrc_2012_mean.npy'
        img_mean = np.load(img_mean_file)
        img_mean = np.float32([img_mean[0].mean(), img_mean[1].mean(), img_mean[2].mean()])

        # Load CNN model
        model_file = './examples/net/VGG_ILSVRC_19_layers/VGG_ILSVRC_19_layers.caffemodel'
        prototxt_file = './examples/net/VGG_ILSVRC_19_layers/VGG_ILSVRC_19_layers.prototxt'
        channel_swap = (2, 1, 0)
        net = caffe.Classifier(prototxt_file, model_file,
                               mean=img_mean, channel_swap=channel_swap)
        h, w = net.blobs['data'].data.shape[-2:]
        net.blobs['data'].reshape(1, 3, h, w)

        return net 
Example #6
Source File: deep_extractor.py    From omgh with MIT License 6 votes vote down vote up
def __init__(self, storage, model_file=settings.BERKELEY_MODEL_FILE, pretrained_file=settings.BERKELEY_CROP_PRET, image_mean=settings.ILSVRC_MEAN, make_net=True, xDim=4096):
        super(CNN_Features_CAFFE_REFERENCE, self).__init__(storage)
        self.STORAGE_SUB_NAME = 'cnn_feature_berkeley'
        self.feature_layer = 'fc7'
        self.feature_crop_index = 0
        self.xDim = xDim

        self.sub_folder = self.storage.get_sub_folder(
            self.STORAGE_SUPER_NAME, self.STORAGE_SUB_NAME)
        self.storage.ensure_dir(self.sub_folder)

        self.model_file = model_file
        self.pretrained_file = pretrained_file
        self.image_mean = image_mean
        self.full = False

        if make_net:
            self.net = caffe.Classifier(self.model_file,
                                        self.pretrained_file,
                                        mean=np.load(self.image_mean),
                                        channel_swap=(2, 1, 0),
                                        raw_scale=255, gpu=True) 
Example #7
Source File: localization.py    From cloudless with Apache License 2.0 6 votes vote down vote up
def classify(images, config, weights):
    """ Classifies our region proposals. """
    print("Classifying: %d region images" % len(images))

    assert(os.path.isfile(config) and os.path.isfile(weights))

    # Caffe swaps RGB channels
    channel_swap = [2, 1, 0]

    # TODO: resizing on incoming config to make batching more efficient, predict
    # loops over each image, slow
    # Make classifier.
    classifier = caffe.Classifier(config,
                                  weights,
                                  raw_scale=255,
                                  channel_swap=channel_swap,
                                 )

    # Classify.
    return classifier.predict(images, oversample=False) 
Example #8
Source File: classify_caffe_server.py    From BerryNet with GNU General Public License v3.0 6 votes vote down vote up
def create_classifier(pretrained_model):
    """Creates a model from saved caffemodel file and returns a classifier."""
    # Creates model from saved .caffemodel.

    # The following file are shipped inside caffe-doc Debian package
    model_def = os.path.join("/", "usr", "share", "doc", "caffe-doc",
                             "models","bvlc_reference_caffenet",
                             "deploy.prototxt")
    image_dims = [ 256, 256 ]
    # The following file are shipped inside python3-caffe-cpu Debian package
    mean = np.load(os.path.join('/', 'usr', 'lib', 'python3',
                                'dist-packages', 'caffe', 'imagenet',
                                'ilsvrc_2012_mean.npy'))
    channel_swap = [2, 1, 0]
    raw_scale = 255.0

    caffe.set_mode_cpu()
    classifier = caffe.Classifier(model_def, pretrained_model,
                                  image_dims=image_dims, mean=mean,
                                  raw_scale=raw_scale,
                                  channel_swap=channel_swap)
    return classifier 
Example #9
Source File: caffe_i2v.py    From illustration2vec with MIT License 6 votes vote down vote up
def make_i2v_with_caffe(net_path, param_path, tag_path=None, threshold_path=None):
    mean = np.array([ 164.76139251,  167.47864617,  181.13838569])
    net = Classifier(
        net_path, param_path, mean=mean, channel_swap=(2, 1, 0))

    kwargs = {}
    if tag_path is not None:
        tags = json.loads(open(tag_path, 'r').read())
        assert(len(tags) == 1539)
        kwargs['tags'] = tags

    if threshold_path is not None:
        fscore_threshold = np.load(threshold_path)['threshold']
        kwargs['threshold'] = fscore_threshold

    return CaffeI2V(net, **kwargs) 
Example #10
Source File: cub_utils.py    From omgh with MIT License 5 votes vote down vote up
def get_custom_net(model_def, pretrained_file, test_phase=True, gpu_mode=True):
        net = caffe.Classifier(model_def, pretrained_file, mean=np.load(settings.ILSVRC_MEAN),  channel_swap=(2, 1, 0), raw_scale=255)
        if test_phase:
            net.set_phase_test()
        if gpu_mode:
            net.set_mode_gpu()

        return net 
Example #11
Source File: loaders.py    From deep-visualization-toolbox with MIT License 5 votes vote down vote up
def load_trained_net(model_prototxt = None, model_weights = None):
    assert (model_prototxt is None) == (model_weights is None), 'Specify both model_prototxt and model_weights or neither'
    if model_prototxt is None:
        load_dir = '/home/jyosinsk/results/140311_234854_afadfd3_priv_netbase_upgraded/'
        model_prototxt = load_dir + 'deploy_1.prototxt'
        model_weights = load_dir + 'caffe_imagenet_train_iter_450000'

    print 'LOADER: loading net:'
    print '  ', model_prototxt
    print '  ', model_weights
    net = caffe.Classifier(model_prototxt, model_weights)
    #net.set_phase_test()

    return net 
Example #12
Source File: cub_utils.py    From omgh with MIT License 5 votes vote down vote up
def get_bvlc_net(test_phase=True, gpu_mode=True):
        net = caffe.Classifier(settings.DEFAULT_MODEL_FILE, settings.DEFAULT_PRETRAINED_FILE, mean=np.load(settings.ILSVRC_MEAN), channel_swap=(2, 1, 0), raw_scale=255)
        if test_phase:
            net.set_phase_test()
        if gpu_mode:
            net.set_mode_gpu()

        return net 
Example #13
Source File: caffe_engine.py    From BerryNet with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, model_def, pretrained_model, mean_file, label, image_dims = [256,256], channel_swap=[2,1,0], raw_scale=255.0, top_k=5):
        super(CaffeEngine, self).__init__()

        # Load model
        caffe.set_mode_cpu()
        self.classifier = caffe.Classifier(model_def, pretrained_model, image_dims=image_dims, mean=mean_file, raw_scale=raw_scale, channel_swap=channel_swap)
        
        # Load labels
        self.labels = [line.rstrip() for line in open(label)]

        self.top_k = top_k 
Example #14
Source File: deep_extractor.py    From omgh with MIT License 5 votes vote down vote up
def __init__(self, storage, model_file=settings.DEFAULT_MODEL_FILE, pretrained_file=settings.DEFAULT_PRETRAINED_FILE, image_mean=settings.ILSVRC_MEAN, full=False, make_net=True, feature_layer='fc7', crop_index=4, xDim=4096):
        super(CNN_Features_CAFFE_REFERENCE, self).__init__(storage)
        self.STORAGE_SUB_NAME = 'cnn_feature_caffe_reference'
        self.full = full
        if self.full:
            self.STORAGE_SUB_NAME = 'cnn_feature_caffe_reference_full'
        self.feature_layer = feature_layer
        self.feature_crop_index = crop_index
        self.xDim = xDim
        if self.full:
            self.full_length = settings.FULL_LENGTH

        self.sub_folder = self.storage.get_sub_folder(
            self.STORAGE_SUPER_NAME, self.STORAGE_SUB_NAME)
        self.storage.ensure_dir(self.sub_folder)

        self.model_file = model_file
        self.pretrained_file = pretrained_file
        self.image_mean = image_mean

        if make_net:
            self.net = caffe.Classifier(self.model_file,
                                        self.pretrained_file,
                                        mean=np.load(self.image_mean),
                                        channel_swap=(2, 1, 0),
                                        raw_scale=255)
            if self.full:
                self.net = caffe.Classifier(self.model_file,
                                            self.pretrained_file,
                                            mean=np.load(self.image_mean),
                                            channel_swap=(2, 1, 0),
                                            raw_scale=255,
                                            image_dims=(256, 256))
            self.net.set_mode_gpu() 
Example #15
Source File: pydemo.py    From omgh with MIT License 5 votes vote down vote up
def main(vid_file, layer, index):
    cap = cv2.VideoCapture()
    cap.open(vid_file)

    net = caffe.Classifier('/home/ipl/installs/caffe-rc/models/bvlc_reference_caffenet/deploy.prototxt', '/home/ipl/installs/caffe-rc/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel', mean=np.load('/home/ipl/installs/caffe-rc/python/caffe/imagenet/ilsvrc_2012_mean.npy'), channel_swap=(2, 1, 0), raw_scale=255)
    net.set_phase_test()
    net.set_mode_gpu()

    fig_img = plt.figure(figsize=(15, 10))
    ax_img = fig_img.add_subplot(121)

    ax_feat = fig_img.add_subplot(122)
    fig_img.show()

    while True:
        _, frame = cap.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        net.predict([frame], oversample=False)
        feat = net.blobs[layer].data[0, :, :, :]

        ax_img.imshow(frame)
        # ax_feat.matshow(feat)
        vis_square(ax_feat, feat, padval=1)

        plt.draw()

        key = cv2.waitKey(1)
        if key >= 30:
            break

    cap.release() 
Example #16
Source File: SVMTraining.py    From MTCNN-VGG-face with MIT License 5 votes vote down vote up
def Training():
   # X=read_imagelist('/home/kjin/caffe-master/examples/VGGNet/pycaffe-mtcnn-master/trainSVM.txt')
    caffe.set_mode_gpu()
    net = caffe.Classifier('/home/kjin/caffe-master/examples/VGGNet/VGG_FACE_deploy.prototxt', '/home/kjin/caffe-master/examples/VGGNet/snapshot_iter_122714.caffemodel', mean=np.load('/home/kjin/caffe-master/examples/VGGNet/VGGNet_mean.npy'))
    Lists = os.listdir('/home/kjin/caffe-master/examples/VGGNet/pycaffe-mtcnn-master/MyImage/')
    Feature = [None] * len(Lists)
    i=0
    for list in Lists:
        X1 = '/home/kjin/caffe-master/examples/VGGNet/pycaffe-mtcnn-master/MyImage/' + list
        X = read_imagelist(X1)
        out = net.forward_all(data=X)
        Feature[i] = np.float64(out['fc7'])
        Feature[i] = np.reshape(Feature[i], 4096)
        i = i + 1



    clf=svm.SVC()


    #neigh = KNeighborsClassifier(n_neighbors=3,metric='euclidean')


    labels=read_labels('/home/kjin/caffe-master/examples/VGGNet/pycaffe-mtcnn-master/Label.txt')


    clf.fit(Feature,labels)
    joblib.dump(clf, 'SVM.model') 
Example #17
Source File: KNNTraining.py    From MTCNN-VGG-face with MIT License 5 votes vote down vote up
def Training():
   # X=read_imagelist('/home/kjin/caffe-master/examples/VGGNet/pycaffe-mtcnn-master/trainSVM.txt')
    caffe.set_mode_gpu()
    net = caffe.Classifier('/home/kjin/caffe-master/examples/VGGNet/VGG_FACE_deploy.prototxt', '/home/kjin/caffe-master/examples/VGGNet/VGG_test_20170512/vgg_10575_iter_150000.caffemodel', mean=np.load('/home/kjin/caffe-master/examples/VGGNet/VGGNet_mean.npy'))
    Lists = os.listdir('/home/kjin/caffe-master/examples/VGGNet/pycaffe-mtcnn-master/MyImageNew/')
    Feature = [None] * len(Lists)
    i=0
    for list in Lists:
        X1 = '/home/kjin/caffe-master/examples/VGGNet/pycaffe-mtcnn-master/MyImage/' + list
        X = read_imagelist(X1)
        out = net.forward_all(data=X)
        Feature[i] = np.float64(out['fc7'])
        Feature[i] = np.reshape(Feature[i], 4096)
        i = i + 1



#     clf=svm.SVC(C = 1.0, cache_size = 200, class_weight = 'balanced', coef0 = 0.0,
# decision_function_shape = None, degree = 3, gamma = 0.0001, kernel = 'rbf',
# max_iter = -1, probability = False, random_state = None, shrinking = True,
# tol = 0.001, verbose = False)


    neigh = KNeighborsClassifier(n_neighbors=3,metric='euclidean')

    labels=read_labels('/home/kjin/caffe-master/examples/VGGNet/pycaffe-mtcnn-master/NewLabel.txt')


    neigh.fit(Feature,labels)
    joblib.dump(neigh, 'knn.model') 
Example #18
Source File: app.py    From mix-and-match with MIT License 5 votes vote down vote up
def __init__(self, model_def_file, pretrained_model_file, mean_file,
                 raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):
        logging.info('Loading net and associated files...')
        if gpu_mode:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()
        self.net = caffe.Classifier(
            model_def_file, pretrained_model_file,
            image_dims=(image_dim, image_dim), raw_scale=raw_scale,
            mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0)
        )

        with open(class_labels_file) as f:
            labels_df = pd.DataFrame([
                {
                    'synset_id': l.strip().split(' ')[0],
                    'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
                }
                for l in f.readlines()
            ])
        self.labels = labels_df.sort('synset_id')['name'].values

        self.bet = cPickle.load(open(bet_file))
        # A bias to prefer children nodes in single-chain paths
        # I am setting the value to 0.1 as a quick, simple model.
        # We could use better psychological models here...
        self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1 
Example #19
Source File: deepdreamer.py    From deepdreamer with GNU General Public License v3.0 5 votes vote down vote up
def deepdream_video(
        video, iter_n=10, octave_n=4, octave_scale=1.4,
        end="inception_4c/output", clip=True, network="bvlc_googlenet",
        frame_rate=24):

    # Select, load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)
    net = Classifier(
        NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

    print("Extracting video...")
    _extract_video(video)

    output_dir = _output_video_dir(video)
    images = listdir(output_dir)

    print("Dreaming...")
    for image in images:
        image = "{}/{}".format(output_dir, image)
        img = np.float32(img_open(image))
        img = _deepdream(
            net, img, iter_n=iter_n, octave_n=octave_n,
            octave_scale=octave_scale, end=end, clip=clip)
        img_fromarray(np.uint8(img)).save(image)

    print("Creating dream video...")
    _create_video(video, frame_rate)
    print("Dream video created.") 
Example #20
Source File: deepdreamer.py    From deepdreamer with GNU General Public License v3.0 5 votes vote down vote up
def list_layers(network="bvlc_googlenet"):
    # Load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)
    net = Classifier(
        NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)
    net.blobs.keys() 
Example #21
Source File: classifier.py    From thingscoop with MIT License 5 votes vote down vote up
def __init__(self, model, gpu_mode=False):
        self.model = model
        
        kwargs = {}

        if self.model.get("image_dims"):
            kwargs['image_dims'] = tuple(self.model.get("image_dims"))

        if self.model.get("channel_swap"):
            kwargs['channel_swap'] = tuple(self.model.get("channel_swap"))

        if self.model.get("raw_scale"):
            kwargs['raw_scale'] = float(self.model.get("raw_scale"))

        if self.model.get("mean"):
            kwargs['mean'] = numpy.array(self.model.get("mean"))
        
        self.net = caffe.Classifier(
            model.deploy_path(),
            model.model_path(),
            **kwargs
        )
        
        self.confidence_threshold = 0.1
        
        if gpu_mode:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()

        self.labels = numpy.array(model.labels())

        if self.model.bet_path():
            self.bet = cPickle.load(open(self.model.bet_path()))
            self.bet['words'] = map(lambda w: w.replace(' ', '_'), self.bet['words'])
        else:
            self.bet = None
        
        self.net.forward() 
Example #22
Source File: app.py    From Deep-Exemplar-based-Colorization with MIT License 5 votes vote down vote up
def __init__(self, model_def_file, pretrained_model_file, mean_file,
                 raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):
        logging.info('Loading net and associated files...')
        if gpu_mode:
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()
        self.net = caffe.Classifier(
            model_def_file, pretrained_model_file,
            image_dims=(image_dim, image_dim), raw_scale=raw_scale,
            mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0)
        )

        with open(class_labels_file) as f:
            labels_df = pd.DataFrame([
                {
                    'synset_id': l.strip().split(' ')[0],
                    'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
                }
                for l in f.readlines()
            ])
        self.labels = labels_df.sort('synset_id')['name'].values

        self.bet = cPickle.load(open(bet_file))
        # A bias to prefer children nodes in single-chain paths
        # I am setting the value to 0.1 as a quick, simple model.
        # We could use better psychological models here...
        self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1 
Example #23
Source File: caffe_classify.py    From CloudCV-Old with MIT License 4 votes vote down vote up
def caffe_classify_image(single_image):
    import operator
    import numpy as np

    import scipy.io as sio
    import caffe
    import os

    matWNID = sio.loadmat(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'WNID.mat'))
    WNID_cells = matWNID['wordsortWNID']

    CAFFE_DIR = os.path.normpath(os.path.join(os.path.dirname(caffe.__file__), "..", ".."))

    # Set the right path to your model file, pretrained model,
    # and the image you would like to classify.
    MODEL_FILE = os.path.join(CAFFE_DIR, 'models/bvlc_reference_caffenet/deploy.prototxt')
    PRETRAINED = os.path.join(CAFFE_DIR, 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')

    # caffe.set_phase_test()
    caffe.set_mode_cpu()

    net = caffe.Classifier(MODEL_FILE, PRETRAINED,
                           mean=np.load(os.path.join(
                               CAFFE_DIR, 'python/caffe/imagenet/ilsvrc_2012_mean.npy')).mean(1).mean(1),
                           channel_swap=(2, 1, 0),
                           raw_scale=255,
                           image_dims=(256, 256))

    input_image = caffe.io.load_image(single_image)
    prediction = net.predict([input_image])
    map = {}
    for i, j in enumerate(prediction[0]):
        map[i] = j

    predsorted = sorted(map.iteritems(), key=operator.itemgetter(1), reverse=True)
    top5 = predsorted[0:5]
    topresults = []

    for i in top5:
        # topresults[str(WNID_cells[i, 0][0][0])] = str(i[1])
        topresults.append([str(WNID_cells[i, 0][0][0]), str(i[1])])
    return topresults 
Example #24
Source File: trainTask.py    From CloudCV-Old with MIT License 4 votes vote down vote up
def customClassifyImages(jobPath, socketid, result_path):

    import caffe
    import scipy.io as sio

    # Establishing connection to send results and write messages
    rs = redis.StrictRedis(host=config.REDIS_HOST, port=6379)

    try:
        ImagePath = os.path.join(jobPath, 'test')
        modelPath = os.path.join(jobPath, 'util')

        new_labels = sio.loadmat(os.path.join(modelPath, 'new_labels.mat'))
        new_labels_cells = new_labels['WNID']

        # Set the right path to your model file, pretrained model,
        # and the image you would like to classify.
        MODEL_FILE = os.path.join(modelPath, 'newCaffeModel.prototxt')
        PRETRAINED = os.path.join(modelPath, 'newCaffeModel.caffemodel')

        # caffe.set_phase_test()
        caffe.set_mode_cpu()

        CAFFE_DIR = os.path.normpath(os.path.join(os.path.dirname(caffe.__file__), "..", ".."))
        net = caffe.Classifier(MODEL_FILE, PRETRAINED,
                               mean=np.load(os.path.join(
                                   CAFFE_DIR, 'python/caffe/imagenet/ilsvrc_2012_mean.npy')).mean(1).mean(1),
                               channel_swap=(2, 1, 0),
                               raw_scale=255,
                               image_dims=(256, 256))

        if os.path.isdir(ImagePath):
            for file_name in os.listdir(ImagePath):
                image_path = os.path.join(ImagePath, file_name)
                if os.path.isfile(image_path):
                    tags = caffe_classify_image(net, image_path, new_labels_cells)  # NOTE: UNDEFINED NAME caffe_classify_image
                    webResult = {}
                    webResult[os.path.join(result_path, file_name)] = tags
                    rs.publish('chat',
                               json.dumps({'web_result': json.dumps(webResult), 'socketid': str(socketid)}))

        rs.publish('chat', json.dumps(
            {'message': 'Classification completed. Thank you for using CloudCV', 'socketid': str(socketid)}))

    except:
        rs.publish('chat', json.dumps({'message': str(traceback.format_exc()), 'socketid': str(socketid)})) 
Example #25
Source File: deepdreamer.py    From deepdreamer with GNU General Public License v3.0 4 votes vote down vote up
def deepdream(
        img_path, zoom=True, scale_coefficient=0.05, irange=100, iter_n=10,
        octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True,
        network="bvlc_googlenet", gif=False, reverse=False, duration=0.1,
        loop=False, gpu=False, gpuid=0):
    img = np.float32(img_open(img_path))
    s = scale_coefficient
    h, w = img.shape[:2]

    if gpu:
        print("Enabling GPU {}...".format(gpuid))
        set_device(gpuid)
        set_mode_gpu()

    # Select, load DNN model
    NET_FN, PARAM_FN, CHANNEL_SWAP, CAFFE_MEAN = _select_network(network)
    net = Classifier(
        NET_FN, PARAM_FN, mean=CAFFE_MEAN, channel_swap=CHANNEL_SWAP)

    img_pool = [img_path]

    # Save settings used in a log file
    logging.info((
        "{} zoom={}, scale_coefficient={}, irange={}, iter_n={}, "
        "octave_n={}, octave_scale={}, end={}, clip={}, network={}, gif={}, "
        "reverse={}, duration={}, loop={}").format(
            img_path, zoom, scale_coefficient, irange, iter_n, octave_n,
            octave_scale, end, clip, network, gif, reverse, duration, loop))

    print("Dreaming...")
    for i in range(irange):
        img = _deepdream(
            net, img, iter_n=iter_n, octave_n=octave_n,
            octave_scale=octave_scale, end=end, clip=clip)
        img_fromarray(np.uint8(img)).save("{}_{}.jpg".format(
            img_path, i))
        if gif:
            img_pool.append("{}_{}.jpg".format(img_path, i))
        print("Dream {} saved.".format(i))
        if zoom:
            img = affine_transform(
                img, [1-s, 1-s, 1], [h*s/2, w*s/2, 0], order=1)
    if gif:
        print("Creating gif...")
        frames = None
        if reverse:
            frames = [img_open(f) for f in img_pool[::-1]]
        else:
            frames = [img_open(f) for f in img_pool]
        writeGif(
            "{}.gif".format(img_path), frames, duration=duration,
            repeat=loop)
        print("gif created.") 
Example #26
Source File: utils_classifiers.py    From DeepVis-PredDiff with MIT License 4 votes vote down vote up
def get_caffenet(netname):
    
    if netname=='googlenet':
     
        # caffemodel paths
        model_path = './Caffe_Models/googlenet/'
        net_fn   = model_path + 'deploy.prototxt'
        param_fn = model_path + 'bvlc_googlenet.caffemodel'
        
        # get the mean (googlenet doesn't do this per feature, but per channel, see train_val.prototxt)
        mean = np.float32([104.0, 117.0, 123.0]) 
        
        # define the neural network classifier
        net = caffe.Classifier(net_fn, param_fn, caffe.TEST, channel_swap = (2,1,0), mean = mean)

    elif netname=='alexnet':
            
        # caffemodel paths
        model_path = './Caffe_Models/bvlc_alexnet/'
        net_fn   = model_path + 'deploy.prototxt'
        param_fn = model_path + 'bvlc_alexnet.caffemodel'
        
        # get the mean
        mean = np.load('./Caffe_Models/ilsvrc_2012_mean.npy')
        # crop mean
        image_dims = (227,227) # see deploy.prototxt file
        excess_h = mean.shape[1] - image_dims[0]
        excess_w = mean.shape[2] - image_dims[1]
        mean = mean[:, excess_h:(excess_h+image_dims[0]), excess_w:(excess_w+image_dims[1])]
        
        # define the neural network classifier
        net = caffe.Classifier(net_fn, param_fn, caffe.TEST, channel_swap = (2,1,0), mean = mean)
        
    elif netname == 'vgg':
    
        # caffemodel paths
        model_path = './Caffe_Models/vgg network/'
        net_fn   = model_path + 'VGG_ILSVRC_16_layers_deploy.prototxt'
        param_fn = model_path + 'VGG_ILSVRC_16_layers.caffemodel'
        
        mean = np.float32([103.939, 116.779, 123.68])    
        
        # define the neural network classifier    
        net = caffe.Classifier(net_fn, param_fn, caffe.TEST, channel_swap = (2,1,0), mean = mean)
        
    else:
        
        print 'Provided netname unknown. Returning None.'
        net = None
    
    return net 
Example #27
Source File: evaluate_file_list.py    From iLID with MIT License 4 votes vote down vote up
def evaluate(input_csv, proto, model):
  ''' Run evaluation on a list of WAV, label files '''

  if not os.path.isdir("tmp"):
    os.mkdir("tmp")

  correct_files = []
  incorrect_files = []
  skipped_files = []

  caffe.set_mode_cpu()
  net = caffe.Classifier(proto, model, raw_scale=255) # convert 0..255 values into range 0..1

  reader = csv.reader(file(input_csv, 'rU'))
  for filename, label in reader:

    # Convert WAV to images
    image_files = wav_to_images(filename, "tmp")

    # some files fail during the conversion, so skip them
    if len(image_files["melfilter"]) == 0:
      skipped_files.append(filename)
      continue

    # Call Caffe and do predicition
    input_images = np.array([caffe.io.load_image(image_file, color=False) for image_file in image_files["melfilter"]])
    prediction = net.predict(input_images, False)
    mean_prediction = np.mean(prediction, axis=0)

    # Evaluation
    best_label = mean_prediction.argmax()
    if best_label == np.int64(label):
      correct_files.append(filename)
    else:
      incorrect_files.append(filename)

  shutil.rmtree("tmp")

  # Stats
  num_correct = len(correct_files)
  num_incorrect = len(incorrect_files)
  print "Correctly Classified: {0} ({1:.2f}%)".format(num_correct, num_correct / float(num_correct + num_incorrect) * 100)
  print "Incorrectly Classified: {0} ({1:.2f}%)".format(num_incorrect, num_incorrect / float(num_correct + num_incorrect) * 100)
  print "Skipped Files: {0}".format(len(skipped_files))

  # Save correct / incorrect filenames in txt file
  correct_out = open("correct_files.txt", "wb")
  incorrect_out = open("incorrect_files.txt", "wb")
  skipped_out = open("skipped_files.txt", "wb")

  correct_out.write("\n".join(correct_files))
  incorrect_out.write("\n".join(incorrect_files))
  skipped_out.write("\n".join(skipped_files))

  correct_out.close()
  incorrect_out.close()
  skipped_out.close()