Python matplotlib.animation.ArtistAnimation() Examples

The following are 8 code examples of matplotlib.animation.ArtistAnimation(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module matplotlib.animation , or try the search function .
Example #1
Source File: test_visualization.py    From pychubby with MIT License 6 votes vote down vote up
def test_overall(self, face_img):
        shape = (10, 11)

        delta_x = np.random.random(shape)
        delta_y = np.random.random(shape)

        df = DisplacementField(delta_x, delta_y)
        ani = create_animation(df, face_img, fps=2, n_seconds=1)

        assert isinstance(ani, ArtistAnimation) 
Example #2
Source File: pcnn.py    From pyclustering with GNU General Public License v3.0 5 votes vote down vote up
def animate_spike_ensembles(pcnn_output_dynamic, image_size):
        """!
        @brief Shows animation of output dynamic (output of each oscillator) during simulation.
        
        @param[in] pcnn_output_dynamic (pcnn_dynamic): Output dynamic of the pulse-coupled neural network.
        @param[in] image_size (tuple): Image size represented as (height, width).
        
        """
        
        figure = plt.figure()
        
        time_signal = pcnn_output_dynamic.allocate_time_signal()
        spike_ensembles = pcnn_output_dynamic.allocate_spike_ensembles()
        
        spike_animation = []
        ensemble_index = 0
        for t in range(len(time_signal)):
            image_color_segments = [(255, 255, 255)] * (image_size[0] * image_size[1])
            
            if time_signal[t] > 0:
                for index_pixel in spike_ensembles[ensemble_index]:
                    image_color_segments[index_pixel] = (0, 0, 0)
                
                ensemble_index += 1

            stage = numpy.array(image_color_segments, numpy.uint8)
            stage = numpy.reshape(stage, image_size + ((3),)) # ((3),) it's size of RGB - third dimension.
            image_cluster = Image.fromarray(stage, 'RGB')
            
            spike_animation.append( [ plt.imshow(image_cluster, interpolation='none') ] )
            
        
        im_ani = animation.ArtistAnimation(figure, spike_animation, interval=75, repeat_delay=3000, blit=True)
        plt.show() 
Example #3
Source File: life.py    From PyGameofLife with MIT License 4 votes vote down vote up
def animate_life(
    universe_size,
    seed,
    seed_position,
    quality=200,
    cmap="Purples",
    n_generations=50,
    interval=300,
    save=False,
):
    """
    Animate the Game of Life.

    :param universe_size: dimensions of the universe
    :type universe_size: tuple (int, int)
    :param seed: initial starting array
    :type seed: list of lists, np.ndarray
    :param seed_position: coordinates where the top-left corner of the seed array should
                          be pinned
    :type seed_position: tuple (int, int)
    :param cmap: the matplotlib cmap that should be used
    :type cmap: str
    :param n_generations: number of universe iterations, defaults to 30
    :param n_generations: int, optional
    :param interval: time interval between updates (milliseconds), defaults to 300ms
    :param interval: int, optional
    :param save: whether the animation should be saved, defaults to False
    :param save: bool, optional
    """
    # Initialise the universe and seed
    universe = np.zeros(universe_size)
    x_start, y_start = seed_position[0], seed_position[1]
    seed_array = np.array(seeds[seed])
    x_end, y_end = x_start + seed_array.shape[0], y_start + seed_array.shape[1]
    universe[x_start:x_end, y_start:y_end] = seed_array

    # Animate
    fig = plt.figure(dpi=quality)
    plt.axis("off")
    ims = []
    for i in range(n_generations):
        ims.append((plt.imshow(universe, cmap=cmap),))
        universe = generation(universe)
    im_ani = animation.ArtistAnimation(
        fig, ims, interval=interval, repeat_delay=3000, blit=True
    )
    # Optional: save the animation, with a name based on the seed.
    if save:
        im_ani.save((str(seed) + ".gif"), writer="imagemagick") 
Example #4
Source File: nav_world.py    From taco with GNU General Public License v3.0 4 votes vote down vote up
def collect_data(num_traj,save_dir,animate=False,**kwargs):
    transition_noise = kwargs['transition_noise'] if 'transition_noise' in kwargs.keys() else 0.05
    location_noise = kwargs['location_noise'] if 'location_noise' in kwargs.keys() else 0.2
    env = NavEnv(location_noise,transition_noise)
    dataset = {'states':[],'actions':[],'gt_onsets':[],'tasks':[],'params':kwargs}
    train_sketches = [[0,1,3],[1,2,3],[3,2,1],[2,3,0],[1,2,0],[1,0,2],[1,3,2],[0,3,2],[1,2,3],[3,2,0],[2,1,3],[1,3,0]]

    if animate:
        fig = plt.figure()
        ims = []

    for i in range(num_traj):
        g_state,r_state = env.reset()
        colours = ['b', 'r', 'g', 'y', 'k']
        # randomly sample integets that constitute the sketch
        sketch = train_sketches[np.random.randint(0,len(train_sketches),1)[0]]
        curr_idx = 0
        curr_subtask = sketch[curr_idx]
        sketch_traj = []
        # begin trajectory
        traj_states = []
        traj_actions = []
        while True:
            sketch_traj.append(curr_subtask) # This gives us ground truth about the task executed at each timestep
            #all_pos = np.array([agent_pos,red_pos,green_pos,yel_pos,black_pos])
            #state = np.ravel(get_state(all_pos[0],all_pos[1:],type = 'rel'))
            traj_states.append(r_state)
            action = get_action(g_state,curr_subtask)
            g_state,r_state,done = env.step(action,curr_subtask)
            traj_actions.append(action)
            if animate:
                ims.append((plt.scatter(g_state[:, 0], g_state[:, 1], c=colours),))
            if done:
                if curr_idx<len(sketch)-1:
                    curr_idx+=1

                    curr_subtask = sketch[curr_idx]
                else:
                    dataset['states'].append(traj_states)
                    dataset['actions'].append(traj_actions)
                    dataset['gt_onsets'].append(sketch_traj)
                    dataset['tasks'].append(sketch)
                    break
    save_dir = save_dir if save_dir[-2:] == '.p' else save_dir+'.p'
    pickle.dump(dataset,open(save_dir,'wb'))

    if animate:
        print('WRITING')
        Writer = animation.writers['ffmpeg']
        writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
        im_ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=3000,
                                           blit=True)
        im_ani.save(save_dir+'im_mod.mp4', writer=writer) 
Example #5
Source File: nav_world.py    From taco with GNU General Public License v3.0 4 votes vote down vote up
def evaluate_model(nb_traj,policy,save_dir= None,animate=False,**kwargs):
    transition_noise = kwargs['transition_noise'] if 'transition_noise' in kwargs.keys() else 0.02
    location_noise = kwargs['location_noise'] if 'location_noise' in kwargs.keys() else 0.1
    env = NavEnv(location_noise, transition_noise,done_thresh=0.03)
    if kwargs['zero_shot']:
        test_sketches = [[0, 1, 3,0], [3, 2, 3,2], [0, 2, 1,0], [1, 3, 0,2], [1, 3, 0,3], [1, 3, 2,1], [1, 3, 2,3], [2, 3, 2,1], [1, 2, 3,1],
                      [3, 1, 0,1], [3, 1, 2,0], [2, 3, 0,1],[1,3,1,3],[2,3,2,3],[1,2,1,2],[0,1,0,1],[0,2,0,2],[0,3,0,3]]
    else:
        test_sketches = [[0, 1, 3], [1, 2, 3], [3, 2, 1], [2, 3, 0], [1, 2, 0], [1, 0, 2], [1, 3, 2], [0, 3, 2],
                          [1, 2, 3], [3, 2, 0], [2, 1, 3], [1, 3, 0]]

    score = []
    if animate:
        fig = plt.figure()
        ims = []
    for i in range(nb_traj):
        task_score = []
        g_state,r_state = env.reset()
        colours = ['b', 'r', 'g', 'y', 'k']
        # randomly sample integets that constitute the sketch
        sketch = test_sketches[np.random.randint(0,len(test_sketches),1)[0]]
        curr_idx = 0
        curr_subtask = sketch[curr_idx]
        counter=0
        while True:
            action,stop = policy.forward_full([[r_state]], curr_subtask, dropout=1.)
            g_state,r_state,done = env.step(action,curr_subtask)
            if stop == 1 or counter >100:
                if done:
                    task_score.append(1)
                else:
                    task_score.append(0)
                if curr_idx < len(sketch) - 1:
                    curr_idx += 1
                    curr_subtask = sketch[curr_idx]
                    counter = 0
                else:
                    score.append(task_score)
                    break

            if animate:
                ims.append((plt.scatter(g_state[:, 0], g_state[:, 1], c=colours),))
            counter+=1
    if animate:
        print('writing video at:',save_dir+'im_mod.mp4')
        Writer = animation.writers['ffmpeg']
        writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
        im_ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=3000,
                                           blit=True)
        im_ani.save(save_dir+'im_mod.mp4', writer=writer)
    acc = 0
    for s in score:
        if np.sum(sum(s)) == len(s):
            acc+=1
    acc /=float(len(score))
    # also returns accuracy so you dont calculate it outside
    return score,acc 
Example #6
Source File: visualization.py    From pychubby with MIT License 4 votes vote down vote up
def create_animation(df, img, include_backwards=True, fps=24, n_seconds=2, figsize=(8, 8), repeat=True):
    """Create animation from a displacement field.

    Parameters
    ----------
    df : DisplacementField
        Instance of the ``DisplacementField`` representing the coordinate transformation.

    img : np.ndarray
        Image.

    include_backwards : bool
        If True, then animation also played backwards after its played forwards.

    fps : int
        Frames per second.

    n_seconds : int
        Number of seconds to play the animation forwards.

    figsize : tuple
        Size of the figure.

    repeat : bool
        If True, then animation always replayed at the end.

    Returns
    -------
    ani : matplotlib.animation.ArtistAnimation
        Animation showing the transformation.

    Notes
    -----
    To enable animation viewing in a jupyter notebook write:
    ```
    from matplotlib import rc
    rc('animation', html='html5')
    ```

    """
    n_frames = int(n_seconds * fps)
    interval = (1 / fps) * 1000
    frames = []

    fig = plt.figure(figsize=figsize)
    plt.axis('off')

    for i in range(n_frames + 1):
        df_new = df * (i / n_frames)
        warped_img = df_new.warp(img)
        frames.append([plt.imshow(warped_img, cmap='gray')])

    if include_backwards:
        frames += frames[::-1]

    ani = ArtistAnimation(fig, frames, interval=interval, repeat=repeat)

    return ani 
Example #7
Source File: celluloid.py    From celluloid with MIT License 4 votes vote down vote up
def animate(self, *args, **kwargs) -> ArtistAnimation:
        """Animate the snapshots taken.

        Uses matplotlib.animation.ArtistAnimation

        Returns
        -------
        ArtistAnimation

        """
        return ArtistAnimation(self._figure, self._photos, *args, **kwargs) 
Example #8
Source File: gif_shepard_metzler.py    From gqn-dataset-renderer with MIT License 4 votes vote down vote up
def main():
    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    renderer = OffscreenRenderer(
        viewport_width=args.image_size, viewport_height=args.image_size)

    rad_step = math.pi / 18
    total_frames = int(math.pi * 2 / rad_step)
    camera_distance = 2

    fig = plt.figure(figsize=(3, 3))
    ims = []

    for num_cubes in range(1, 8):
        scene = build_scene(num_cubes, color_candidates)[0]
        camera = OrthographicCamera(xmag=0.9, ymag=0.9)
        camera_node = Node(camera=camera)
        scene.add_node(camera_node)

        current_rad = 0
        
        for _ in range(total_frames):
            camera_position = np.array((math.sin(current_rad),
                                        math.sin(math.pi / 6),
                                        math.cos(current_rad)))
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            im = plt.imshow(image, interpolation="none", animated=True)
            ims.append([im])

            current_rad += rad_step

    renderer.delete()

    ani = animation.ArtistAnimation(
        fig, ims, interval=1 / 24, blit=True, repeat_delay=0)
    ani.save("shepard_metzler.gif", writer="imagemagick")