Python matplotlib.offsetbox.AnnotationBbox() Examples

The following are 16 code examples of matplotlib.offsetbox.AnnotationBbox(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module matplotlib.offsetbox , or try the search function .
Example #1
Source File: utils.py    From MagnetLoss-PyTorch with MIT License 7 votes vote down vote up
def plot_embedding(X, y, imgs=None, title=None, name=None):
    # Adapted from http://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html
    x_min, x_max = np.min(X, 0), np.max(X, 0)
    X = (X - x_min) / (x_max - x_min)

    # Plot colors numbers
    plt.figure(figsize=(10,10))
    ax = plt.subplot(111)
    for i in range(X.shape[0]):
        # plot colored number
        plt.text(X[i, 0], X[i, 1], str(y[i]),
                 color=plt.cm.Set1(y[i] / 10.),
                 fontdict={'weight': 'bold', 'size': 9})

    # Add image overlays
    if imgs is not None and hasattr(offsetbox, 'AnnotationBbox'):
        # only print thumbnails with matplotlib > 1.0
        shown_images = np.array([[1., 1.]])  # just something big
        for i in range(X.shape[0]):
            dist = np.sum((X[i] - shown_images) ** 2, 1)
            if np.min(dist) < 4e-3:
                # don't show points that are too close
                continue
            shown_images = np.r_[shown_images, [X[i]]]
            imagebox = offsetbox.AnnotationBbox(
                offsetbox.OffsetImage(imgs[i], cmap=plt.cm.gray_r), X[i])
            ax.add_artist(imagebox)

    plt.xticks([]), plt.yticks([])
    if title is not None:
        plt.title(title)

    plt.savefig("results/" + str(name) + '.svg') 
Example #2
Source File: test.py    From MomentumContrast.pytorch with MIT License 6 votes vote down vote up
def show(mnist, targets, ret):
    target_ids = range(len(set(targets)))
    
    colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'violet', 'orange', 'purple']
    
    plt.figure(figsize=(12, 10))
    
    ax = plt.subplot(aspect='equal')
    for label in set(targets):
        idx = np.where(np.array(targets) == label)[0]
        plt.scatter(ret[idx, 0], ret[idx, 1], c=colors[label], label=label)
    
    for i in range(0, len(targets), 250):
        img = (mnist[i][0] * 0.3081 + 0.1307).numpy()[0]
        img = OffsetImage(img, cmap=plt.cm.gray_r, zoom=0.5) 
        ax.add_artist(AnnotationBbox(img, ret[i]))
    
    plt.legend()
    plt.show() 
Example #3
Source File: plot_tsne.py    From GroundedTranslation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def ab_plotter(xcoords, ycoords, images, labels):

    ax = plt.subplot(111)
    ax.set_xlim([-30, 30])
    ax.set_ylim([-30, 30])
    
    for x, y, i, l in zip(xcoords, ycoords, images, labels):
        arr_hand = i
        imagebox = OffsetImage(arr_hand, zoom=.1)
        xy = [x, y]               # coordinates to position this image
        
        ab = AnnotationBbox(imagebox, xy,
            xybox=(10., -10.),
            xycoords='data',
            boxcoords="offset points",
            pad=0.0)                                  
        ax.annotate(ab, xy = xy)
    
    # rest is just standard matplotlib boilerplate
    ax.grid(True)
    plt.show() 
Example #4
Source File: tsne.py    From lightnet with MIT License 5 votes vote down vote up
def tsne_plot(labels, tokens):
    "Creates and TSNE model and plots it"
    
    tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
    X_2d = tsne_model.fit_transform(tokens)
    X_2d -= X_2d.min(axis=0)
    X_2d /= X_2d.max(axis=0)

    width = 1200
    grid, to_plot = tsne_to_grid(X_2d)
    out_dim = int(width / np.sqrt(to_plot))
   
    fig, ax = plt.subplots(figsize=(width/100, width/100))
    plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
    
    for pos, label in zip(grid, labels[0:to_plot]):
        ax.scatter(pos[0], pos[1])
        if False:
            ax.annotate(label,
                     xy=(pos[0], pos[1]),
                     xytext=(5, 2),
                     fontsize=9,
                     textcoords='offset points',
                     ha='right',
                     va='bottom')
        ab = AnnotationBbox(getImage(label, new_size = out_dim / 2), (pos[0], pos[1]), frameon=False)
        ax.add_artist(ab)

    plt.show() 
Example #5
Source File: CV_plot_utils.py    From artificio with Apache License 2.0 5 votes vote down vote up
def plot_tsne(X, imgs, outFile):

    def imscatter(x, y, images, ax=None, zoom=1.0):
        if ax is None:
            ax = plt.gca()
        x, y = np.atleast_1d(x, y)
        artists = []
        for x0, y0, img0 in zip(x, y, images):
            im = OffsetImage(img0, zoom=zoom)
            ab = AnnotationBbox(im, (x0, y0), xycoords='data', frameon=True)
            artists.append(ax.add_artist(ab))
        ax.update_datalim(np.column_stack([x, y]))
        ax.autoscale()
        return artists

    def plot_embedding(X, imgs, title=None):
        x_min, x_max = np.min(X, 0), np.max(X, 0)
        X = (X - x_min) / (x_max - x_min)

        plt.figure()
        ax = plt.subplot(111)
        for i in range(X.shape[0]):
            plt.text(X[i, 0], X[i, 1], ".", fontdict={'weight': 'bold', 'size': 9})
        if hasattr(offsetbox, 'AnnotationBbox'):
            imscatter(X[:,0], X[:,1], imgs, zoom=0.3, ax=ax)

        plt.xticks([]), plt.yticks([])
        if title is not None:
            plt.title(title, fontsize=18)

    tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
    X_tsne = tsne.fit_transform(X)
    plot_embedding(X_tsne, imgs, "t-SNE embeddings")
    if outFile is None:
        plt.show()
    else:
        plt.savefig(outFile, bbox_inches='tight')
    plt.close()

# Plot image reconstructions 
Example #6
Source File: plot_lle_digits.py    From scikit-hubness with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_embedding(X, title=None):
    x_min, x_max = np.min(X, 0), np.max(X, 0)
    X = (X - x_min) / (x_max - x_min)

    plt.figure()
    ax = plt.subplot(111)
    for i in range(X.shape[0]):
        plt.text(X[i, 0], X[i, 1], str(y[i]),
                 color=plt.cm.Set1(y[i] / 10.),
                 fontdict={'weight': 'bold', 'size': 9})

    if hasattr(offsetbox, 'AnnotationBbox'):
        # only print thumbnails with matplotlib > 1.0
        shown_images = np.array([[1., 1.]])  # just something big
        for i in range(X.shape[0]):
            dist = np.sum((X[i] - shown_images) ** 2, 1)
            if np.min(dist) < 4e-3:
                # don't show points that are too close
                continue
            shown_images = np.r_[shown_images, [X[i]]]
            imagebox = offsetbox.AnnotationBbox(
                offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
                X[i])
            ax.add_artist(imagebox)
    plt.xticks([]), plt.yticks([])
    if title is not None:
        plt.title(title)


# ----------------------------------------------------------------------
# Plot images of the digits 
Example #7
Source File: plot_lle_digits.py    From scikit-hubness with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_embedding(X, title=None):
    x_min, x_max = np.min(X, 0), np.max(X, 0)
    X = (X - x_min) / (x_max - x_min)

    plt.figure()
    ax = plt.subplot(111)
    for i in range(X.shape[0]):
        plt.text(X[i, 0], X[i, 1], str(y[i]),
                 color=plt.cm.Set1(y[i] / 10.),
                 fontdict={'weight': 'bold', 'size': 9})

    if hasattr(offsetbox, 'AnnotationBbox'):
        # only print thumbnails with matplotlib > 1.0
        shown_images = np.array([[1., 1.]])  # just something big
        for i in range(X.shape[0]):
            dist = np.sum((X[i] - shown_images) ** 2, 1)
            if np.min(dist) < 4e-3:
                # don't show points that are too close
                continue
            shown_images = np.r_[shown_images, [X[i]]]
            imagebox = offsetbox.AnnotationBbox(
                offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
                X[i])
            ax.add_artist(imagebox)
    plt.xticks([]), plt.yticks([])
    if title is not None:
        plt.title(title)


# ----------------------------------------------------------------------
# Plot images of the digits 
Example #8
Source File: tsne.py    From Computer-Vision-with-Python-3 with MIT License 5 votes vote down vote up
def plot_embedding(X, title=None):
    x_min, x_max = np.min(X, 0), np.max(X, 0)
    X = (X - x_min) / (x_max - x_min)

    plt.figure()
    ax = plt.subplot(111)
    
    for i in range(X.shape[0]):
        plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
                 color=plt.cm.Set1(y[i] / 10.),
                 fontdict={'weight': 'bold', 'size': 9})
   
    '''
    if hasattr(offsetbox, 'AnnotationBbox'):
        # only print thumbnails with matplotlib > 1.0
        shown_images = np.array([[1., 1.]])  # just something big
        for i in range(digits.data.shape[0]):
            dist = np.sum((X[i] - shown_images) ** 2, 1)
            if np.min(dist) < 4e-3:
                # don't show points that are too close
                continue
            shown_images = np.r_[shown_images, [X[i]]]
            imagebox = offsetbox.AnnotationBbox(
                offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
                X[i])
            ax.add_artist(imagebox)
    '''
    plt.xticks([]), plt.yticks([])
    if title is not None:
        plt.title(title)


#----------------------------------------------------------------------
# Plot images of the digits 
Example #9
Source File: plot_custom_model.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def plot_embedding(Xp, y, imgs, title=None, figsize=(12, 4)):
    x_min, x_max = numpy.min(Xp, 0), numpy.max(Xp, 0)
    X = (Xp - x_min) / (x_max - x_min)

    fig, ax = plt.subplots(1, 2, figsize=figsize)
    for i in range(X.shape[0]):
        ax[0].text(X[i, 0], X[i, 1], str(y[i]),
                   color=plt.cm.Set1(y[i] / 10.),
                   fontdict={'weight': 'bold', 'size': 9})

    if hasattr(offsetbox, 'AnnotationBbox'):
        # only print thumbnails with matplotlib > 1.0
        shown_images = numpy.array([[1., 1.]])  # just something big
        for i in range(X.shape[0]):
            dist = numpy.sum((X[i] - shown_images) ** 2, 1)
            if numpy.min(dist) < 4e-3:
                # don't show points that are too close
                continue
            shown_images = numpy.r_[shown_images, [X[i]]]
            imagebox = offsetbox.AnnotationBbox(
                offsetbox.OffsetImage(imgs[i], cmap=plt.cm.gray_r),
                X[i])
            ax[0].add_artist(imagebox)
    ax[0].set_xticks([]), ax[0].set_yticks([])
    ax[1].plot(Xp[:, 0], Xp[:, 1], '.')
    if title is not None:
        ax[0].set_title(title)
    return ax 
Example #10
Source File: ex3_digits.py    From PyCon2015 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_embedding(X, title=None):
    x_min, x_max = np.min(X, 0), np.max(X, 0)
    X = (X - x_min) / (x_max - x_min)

    plt.figure()
    ax = plt.subplot(111)
    for i in range(X.shape[0]):
        plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
                 color=plt.cm.Set1(y[i] / 10.),
                 fontdict={'weight': 'bold', 'size': 9})

    if hasattr(offsetbox, 'AnnotationBbox'):
        # only print thumbnails with matplotlib > 1.0
        shown_images = np.array([[1., 1.]])  # just something big
        for i in range(digits.data.shape[0]):
            dist = np.sum((X[i] - shown_images) ** 2, 1)
            if np.min(dist) < 4e-3:
                # don't show points that are too close
                continue
            shown_images = np.r_[shown_images, [X[i]]]
            imagebox = offsetbox.AnnotationBbox(
                offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
                X[i])
            ax.add_artist(imagebox)
    plt.xticks([]), plt.yticks([])
    if title is not None:
        plt.title(title) 
Example #11
Source File: __init__.py    From EDeN with MIT License 4 votes vote down vote up
def plot_embedding(data_matrix, y,
                   labels=None,
                   image_file_name=None,
                   title=None,
                   cmap='rainbow',
                   density=False):
    """plot_embedding."""
    import matplotlib.pyplot as plt
    from matplotlib import offsetbox
    from PIL import Image
    from eden.embedding import embed_dat_matrix_two_dimensions

    if title is not None:
        plt.title(title)
    if density:
        embed_dat_matrix_two_dimensions(data_matrix,
                                        y=y,
                                        instance_colormap=cmap)
    else:
        plt.scatter(data_matrix[:, 0], data_matrix[:, 1],
                    c=y,
                    cmap=cmap,
                    alpha=.7,
                    s=30,
                    edgecolors='black')
        plt.xticks([])
        plt.yticks([])
        plt.axis('off')
    if image_file_name is not None:
        num_instances = data_matrix.shape[0]
        ax = plt.subplot(111)
        for i in range(num_instances):
            img = Image.open(image_file_name + str(i) + '.png')
            imagebox = offsetbox.AnnotationBbox(
                offsetbox.OffsetImage(img, zoom=1),
                data_matrix[i],
                pad=0,
                frameon=False)
            ax.add_artist(imagebox)
    if labels is not None:
        for id in range(data_matrix.shape[0]):
            label = str(labels[id])
            x = data_matrix[id, 0]
            y = data_matrix[id, 1]
            plt.annotate(label,
                         xy=(x, y),
                         xytext=(0, 0),
                         textcoords='offset points') 
Example #12
Source File: animation.py    From YAFS with MIT License 4 votes vote down vote up
def update_coverage_regions(self):
        point_mobiles = []

        for ix,code_mobile in enumerate(self.sim.mobile_fog_entities.keys()):
            if code_mobile in self.track_code_last_position.keys():
                (lng, lat) = self.track_code_last_position[code_mobile]
                point_mobiles.append(np.array([lng, lat]))

        point_mobiles = np.array(point_mobiles)

        if len(point_mobiles)==0:
            self.pointsVOR = self.sim.endpoints
        else:
            self.pointsVOR = np.concatenate((self.sim.endpoints, point_mobiles), axis=0)

        self.sim.coverage.update_coverage_of_endpoints(self.sim.map, self.pointsVOR)
        self.axarr.clear()

        plt.xticks([])
        plt.yticks([])
        plt.grid(False)
        plt.xlim(0, self.sim.map.w)
        plt.ylim(self.sim.map.h, 0)
        plt.axis('off')
        plt.tight_layout()

        self.axarr.imshow(self.sim.map.img)

        # self.axarr.add_collection(
        #     mpl.collections.PolyCollection(
        #         self.sim.coverage.cells, facecolors=self.sim.coverage.colors_cells,
        #         edgecolors='k', alpha=.25))

        # p = PatchCollection(self.sim.coverage.get_polygon_to_map(),facecolors=self.sim.coverage.get_polygon_colors(),alpha=.25)
        # p.set_array(self.sim.coverage.colors_cells)

        self.axarr.add_collection(self.sim.coverage.get_polygons_on_map())


        # self.ppix = [self.sim.map.to_pixels(vp[0], vp[1]) for vp in self.pointsVOR]
        # self.ppix = np.array(self.ppix)
        # for point in self.ppix:
        #     ab = AnnotationBbox(self.car_icon, (point[0], point[1]),frameon=False)
        #     self.axarr.add_artist(ab)

        # Endpoints of the network
        self.ppix = [self.sim.map.to_pixels(vp[0], vp[1]) for vp in self.sim.endpoints]
        for point in self.ppix:
            ab = AnnotationBbox(self.endpoint_icon, (point[0], point[1]), frameon=False)
            self.axarr.add_artist(ab)

        # self.axarr.scatter(self.ppix[:, 0], self.ppix[:, 1]) 
Example #13
Source File: animation.py    From YAFS with MIT License 4 votes vote down vote up
def show_frequency(self,draw_connection_line=False):
        self.axarr.texts = []

        # Draw names
        for ix, vp in enumerate(self.ppix):
            t = plt.text(vp[0] - 3, vp[1] - 8, self.name_mobile[ix], dict(size=6, color='b'))

        # Draw last movement
        for code in self.track_code_last_position:

            (lng, lat) = self.track_code_last_position[code]
            new_point=[lng,lat]

            if code not in self.sim.mobile_fog_entities.keys():
                point_index = self.sim.coverage.connection(new_point)
                self.connection[point_index] += 1
                icon = self.car_icon
            else:
                icon = self.car_endpoint_icon

            lng, lat = self.sim.map.to_pixels(lng, lat)

            plt.annotate(str(code).replace("_0.0",""),
                        xy=(lng, lat),  # theta, radius
                        xytext=(lng-1, lat-5),  # fraction, fraction
                        # arrowprops=dict(facecolor='black', arrowstyle="-|>"),
                        horizontalalignment='center',
                        verticalalignment='bottom', size= 6)

            ab = AnnotationBbox(icon, (lng,lat), frameon=False)
            self.axarr.add_artist(ab)

            # if code not in self.sim.mobile_fog_entities and \
            #         draw_connection_line:
            #     pointA = self.ppix[point_index]
            #     pointB = [lng,lat]
            #
            #     plt.plot([pointA[0], pointB[0]], [pointA[1], pointB[1]], color="gray")

        # draw number of connections by node
        # for k in self.connection:
            # plt.text(20, 20 + (k * 30), "%s : %i" % (self.name_mobile[k], self.connection[k]), dict(size=10, color='black')) 
Example #14
Source File: tsne.py    From agent-trainer with MIT License 4 votes vote down vote up
def _tsne_plot_embedding(self, x, y, inputs, path_result_image, title=""):
        x_min, x_max = np.min(x, 0), np.max(x, 0)
        x_normalized = (x - x_min) / (x_max - x_min)

        tableau20 = style.generate_tableau20_colors()
        figure = plt.figure()
        figure.set_size_inches(18.5, 10.5)
        ax = figure.add_subplot(111)
        ax.axis('off')
        for i in xrange(x.shape[0]):
            plt.text(x_normalized[i, 0], x_normalized[i, 1], str(y[i]),
                     color=tableau20[y[i]],
                     fontdict={'weight': 'bold', 'size': 12})

        labels = [mpatches.Patch(color=tableau20[output_descriptor.value],
                                 label="[{0}] {1}".format(output_descriptor.value, output_descriptor.name)) for output_descriptor in list(self.output_descriptor_enum)]
        legend = ax.legend(handles=labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., frameon=False)

        if hasattr(offsetbox, 'AnnotationBbox'): # only print thumbnails with matplotlib > 1.0
            shown_images = np.array([[1., 1.]])
            for i in xrange(len(x_normalized)):
                distance_between_points = np.sum((x_normalized[i] - shown_images) ** 2, 1)
                if np.min(distance_between_points) < self.MIN_DISTANCE_BETWEEN_IMAGES:
                    continue
                shown_images = np.r_[shown_images, [x_normalized[i]]]
                rendered_image = offsetbox.OffsetImage(self._state_into_grid_of_screenshots(inputs[i]),
                                                       cmap=plt.get_cmap('gray'))
                image_position = x_normalized[i]
                annotation_box_relative_position = (-70, 250) if x_normalized[i][1] > 0.5 else (-70, -250)
                imagebox = offsetbox.AnnotationBbox(rendered_image, image_position,
                                                    xybox=annotation_box_relative_position,
                                                    xycoords='data',
                                                    boxcoords="offset points",
                                                    arrowprops=dict(arrowstyle="->"))
                ax.add_artist(imagebox)

        plt.xticks([]), plt.yticks([])
        if title is not None:
            plt.title(title)

        plt.savefig(path_result_image, bbox_extra_artists=(legend,), bbox_inches='tight', pad_inches=4)
        print("Visualization written to {0}".format(path_result_image)) 
Example #15
Source File: plot_digits_datasets.py    From scipy_2015_sklearn_tutorial with Creative Commons Zero v1.0 Universal 4 votes vote down vote up
def digits_plot():
    digits = datasets.load_digits(n_class=6)
    n_digits = 500
    X = digits.data[:n_digits]
    y = digits.target[:n_digits]
    n_samples, n_features = X.shape
    n_neighbors = 30

    def plot_embedding(X, title=None):
        x_min, x_max = np.min(X, 0), np.max(X, 0)
        X = (X - x_min) / (x_max - x_min)

        plt.figure()
        ax = plt.subplot(111)
        for i in range(X.shape[0]):
            plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
                    color=plt.cm.Set1(y[i] / 10.),
                    fontdict={'weight': 'bold', 'size': 9})

        if hasattr(offsetbox, 'AnnotationBbox'):
            # only print thumbnails with matplotlib > 1.0
            shown_images = np.array([[1., 1.]])  # just something big
            for i in range(X.shape[0]):
                dist = np.sum((X[i] - shown_images) ** 2, 1)
                if np.min(dist) < 1e5:
                    # don't show points that are too close
                    # set a high threshold to basically turn this off
                    continue
                shown_images = np.r_[shown_images, [X[i]]]
                imagebox = offsetbox.AnnotationBbox(
                    offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
                    X[i])
                ax.add_artist(imagebox)
        plt.xticks([]), plt.yticks([])
        if title is not None:
            plt.title(title)

    n_img_per_row = 10
    img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
    for i in range(n_img_per_row):
        ix = 10 * i + 1
        for j in range(n_img_per_row):
            iy = 10 * j + 1
            img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))

    plt.imshow(img, cmap=plt.cm.binary)
    plt.xticks([])
    plt.yticks([])
    plt.title('A selection from the 64-dimensional digits dataset')
    print("Computing PCA projection")
    pca = decomposition.PCA(n_components=2).fit(X)
    X_pca = pca.transform(X)
    plot_embedding(X_pca, "Principal Components projection of the digits")
    plt.figure()
    plt.matshow(pca.components_[0, :].reshape(8, 8), cmap="gray")
    plt.axis('off')
    plt.figure()
    plt.matshow(pca.components_[1, :].reshape(8, 8), cmap="gray")
    plt.axis('off')
    plt.show() 
Example #16
Source File: mnist-embeddings.py    From tensorpack with Apache License 2.0 4 votes vote down vote up
def visualize(model_path, model, algo_name):
    if not MATPLOTLIB_AVAIBLABLE:
        logger.error("visualize requires matplotlib package ...")
        return
    pred = OfflinePredictor(PredictConfig(
        session_init=SmartInit(model_path),
        model=model(),
        input_names=['input'],
        output_names=['emb']))

    NUM_BATCHES = 6
    BATCH_SIZE = 128
    images = np.zeros((BATCH_SIZE * NUM_BATCHES, 28, 28))  # the used digits
    embed = np.zeros((BATCH_SIZE * NUM_BATCHES, 2))  # the actual embeddings in 2-d

    # get only the embedding model data (MNIST test)
    ds = get_test_data()
    ds.reset_state()

    for offset, dp in enumerate(ds):
        digit, label = dp
        prediction = pred(digit)[0]
        embed[offset * BATCH_SIZE:offset * BATCH_SIZE + BATCH_SIZE, ...] = prediction
        images[offset * BATCH_SIZE:offset * BATCH_SIZE + BATCH_SIZE, ...] = digit
        offset += 1
        if offset == NUM_BATCHES:
            break

    plt.figure()
    ax = plt.subplot(111)
    ax_min = np.min(embed, 0)
    ax_max = np.max(embed, 0)

    ax_dist_sq = np.sum((ax_max - ax_min)**2)
    ax.axis('off')
    shown_images = np.array([[1., 1.]])
    for i in range(embed.shape[0]):
        dist = np.sum((embed[i] - shown_images)**2, 1)
        if np.min(dist) < 3e-4 * ax_dist_sq:     # don't show points that are too close
            continue
        shown_images = np.r_[shown_images, [embed[i]]]
        imagebox = offsetbox.AnnotationBbox(offsetbox.OffsetImage(np.reshape(images[i, ...], [28, 28]),
                                            zoom=0.6, cmap=plt.cm.gray_r), xy=embed[i], frameon=False)
        ax.add_artist(imagebox)

    plt.axis([ax_min[0], ax_max[0], ax_min[1], ax_max[1]])
    plt.xticks([]), plt.yticks([])
    plt.title('Embedding using %s-loss' % algo_name)
    plt.savefig('%s.jpg' % algo_name)