Python pybullet.computeProjectionMatrixFOV() Examples

The following are 17 code examples of pybullet.computeProjectionMatrixFOV(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module pybullet , or try the search function .
Example #1
Source File: env_bases.py    From midlevel-reps with MIT License 6 votes vote down vote up
def render_map(self):
        base_pos=[0, 0, -3]
        if (hasattr(self,'robot')):
            if (hasattr(self.robot,'body_xyz')):
                base_pos[0] = self.robot.body_xyz[0]
                base_pos[1] = self.robot.body_xyz[1]
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=35,
            yaw=0,
            pitch=-89,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
        width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL
            )
        rgb_array = np.array(px)
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #2
Source File: env_bases.py    From midlevel-reps with MIT License 6 votes vote down vote up
def render_physics(self):
        robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=robot_pos,
            distance=self.tracking_camera["distance"],
            yaw=self.tracking_camera["yaw"],
            pitch=self.tracking_camera["pitch"],
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        with Profiler("render physics: Get camera image"):
            (_, _, px, _, _) = p.getCameraImage(
            width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
                projectionMatrix=proj_matrix,
                renderer=p.ER_TINY_RENDERER
                )
        rgb_array = np.array(px)
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #3
Source File: env_bases.py    From midlevel-reps with MIT License 6 votes vote down vote up
def _render(self, mode, close):
        base_pos=[0,0,0]
        if (hasattr(self,'robot')):
            if (hasattr(self.robot,'body_xyz')):
                base_pos = self.robot.body_xyz
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=self._cam_dist,
            yaw=self._cam_yaw,
            pitch=self._cam_pitch,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
        width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL
            )
        rgb_array = np.array(px)
        if close: return None
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #4
Source File: simulate.py    From obman_train with GNU General Public License v3.0 6 votes vote down vote up
def take_picture(renderer, width=256, height=256, scale=0.001, conn_id=None):
    view_matrix = p.computeViewMatrix(
        [0, 0, -1], [0, 0, 0], [0, -1, 0], physicsClientId=conn_id
    )
    proj_matrix = p.computeProjectionMatrixFOV(
        20, 1, 0.05, 2, physicsClientId=conn_id
    )
    w, h, rgba, depth, mask = p.getCameraImage(
        width=width,
        height=height,
        projectionMatrix=proj_matrix,
        viewMatrix=view_matrix,
        renderer=renderer,
        physicsClientId=conn_id,
    )
    return rgba 
Example #5
Source File: env_bases.py    From GtS with MIT License 6 votes vote down vote up
def render_map(self):
        base_pos=[0, 0, -3]
        if (hasattr(self,'robot')):
            if (hasattr(self.robot,'body_xyz')):
                base_pos[0] = self.robot.body_xyz[0]
                base_pos[1] = self.robot.body_xyz[1]
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=35,
            yaw=0,
            pitch=-89,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
        width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL
            )
        rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #6
Source File: env_bases.py    From GtS with MIT License 6 votes vote down vote up
def render_physics(self):
        robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=robot_pos,
            distance=self.tracking_camera["distance"],
            yaw=self.tracking_camera["yaw"],
            pitch=self.tracking_camera["pitch"],
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        with Profiler("render physics: Get camera image"):
            (_, _, px, _, _) = p.getCameraImage(
            width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
                projectionMatrix=proj_matrix,
                renderer=p.ER_TINY_RENDERER
                )
        rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #7
Source File: env_bases.py    From GtS with MIT License 6 votes vote down vote up
def _render(self, mode, close):
        base_pos=[0,0,0]
        if (hasattr(self,'robot')):
            if (hasattr(self.robot,'body_xyz')):
                base_pos = self.robot.body_xyz
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=self._cam_dist,
            yaw=self._cam_yaw,
            pitch=self._cam_pitch,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
        width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL
            )
        rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
        if close: return None
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #8
Source File: bullet_robot_env.py    From Rainbow_ddpg with MIT License 5 votes vote down vote up
def __init__(self,
                 n_actions,  # Dimension of action vector.
                 n_substeps,  # Number of simulation steps to do in every env step.
                 observation_type="low_dim",
                 done_after=float("inf"),
                 use_gui=False,
                 frame_memory_len=0):
        self.n_substeps = n_substeps
        self.metadata = {
            'render.modes': ['rgbd_array'],
            'video.frames_per_second': int(np.round(1.0 / self.dt))
        }
        self.numSteps = 0
        if use_gui:
            physics_client = p.connect(p.GUI)
        else:
            physics_client = p.connect(p.DIRECT)
        self.p = PhysClientWrapper(p, physics_client)
        self.p.setAdditionalSearchPath(pybullet_data.getDataPath())
        self.doneAfter = done_after
        self.observation_type = observation_type
        self.seed()
        self.frameMemoryLen = frame_memory_len
        if frame_memory_len:
            self.frameMemory = deque(maxlen=frame_memory_len)

        self.viewMatrix = p.computeViewMatrix([-1.05, 0, 0.68], [0.1, 0, 0],
                                              [-0.5, 0, 1])
        self.projMatrix = p.computeProjectionMatrixFOV(
            fov=45, aspect=4. / 3., nearVal=0.01, farVal=2.5)
        self.light = {
            "diffuse": 0.4,
            "ambient": 0.5,
            "spec": 0.2,
            "dir": [10, 10, 100],
            "col": [1, 1, 1]
        }
        self._env_setup(initial_qpos=None)

        self.action_space = spaces.Box(
            -1, 1, shape=(n_actions, ), dtype='float32')

        self.pixels_space = spaces.Box(
            -np.inf, np.inf, shape=(84, 84, 3), dtype='float32')
        if observation_type == "low_dim":
            self.observation_space = self.low_dim_space
        elif observation_type == "pixels":
            self.observation_space = self.pixels_space
        elif observation_type == "pixels_stacked":
            self.observation_space = spaces.Box(
                -np.inf, np.inf, shape=(84, 84, 12), dtype='float32')
        elif observation_type == "pixels_depth":
            self.observation_space = spaces.Box(
                -np.inf, np.inf, shape=(84, 84), dtype='float32')
        else:
            raise Exception("Unimplemented observation_type") 
Example #9
Source File: camera.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def __init__(self, name, target,
                 distance,
                 roll, pitch, yaw,
                 up_idx=2,
                 image_width=1024 / 8,
                 image_height=768 / 8,
                 fov=45,
                 near_plane=0.1,
                 far_plane=10):
        '''
        Create camera matrix for a particular position in the simulation. Task
        definitions should produce these and
        '''
        self.name = name
        self.matrix = np.array(pb.computeViewMatrixFromYawPitchRoll(
            target, distance, yaw=yaw, pitch=pitch, roll=roll, upAxisIndex=up_idx))
        self.image_height = image_height
        self.image_width = image_width
        self.aspect_ratio = self.image_width / self.image_height
        self.fov = fov
        self.near_plane = near_plane
        self.far_plane = far_plane
        self.projection_matrix = np.array(pb.computeProjectionMatrixFOV(
            self.fov,
            self.aspect_ratio,
            self.near_plane,
            self.far_plane)) 
Example #10
Source File: pose_env.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def _reset_camera(self):
    look = [0., 0., 0.]
    distance = 3.
    pitch = -30 + np.random.uniform(-10, 10)
    yaw = np.random.uniform(-180, 180)
    roll = 0
    self._view_matrix = pybullet.computeViewMatrixFromYawPitchRoll(
        look, distance, yaw, pitch, roll, 2)
    fov = 30
    aspect = self._width / self._height
    near = 0.1
    far = 10
    self._proj_matrix = pybullet.computeProjectionMatrixFOV(
        fov, aspect, near, far) 
Example #11
Source File: renderer.py    From mvp_grasp with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, im_width, im_height, fov, near_plane, far_plane, DEBUG=False):
        self.im_width = im_width
        self.im_height = im_height
        self.fov = fov
        self.near_plane = near_plane
        self.far_plane = far_plane
        aspect = self.im_width/self.im_height
        self.pm = pb.computeProjectionMatrixFOV(fov, aspect, near_plane, far_plane)
        self.camera_pos = np.array([0, 0, 0.5])
        self.camera_rot = self._rotation_matrix([0, np.pi, 0])

        self.objects = []

        if DEBUG:
            self.cid = pb.connect(pb.GUI)
        else:
            self.cid = pb.connect(pb.DIRECT)

        pb.setAdditionalSearchPath(pybullet_data.getDataPath())

        pb.setGravity(0, 0, -10)
        self.draw_camera_pos()

        self._rendered = None
        self._rendered_pos = None
        self._rendered_rot = None 
Example #12
Source File: bullet_physics_engine.py    From NTP-vat-release with MIT License 5 votes vote down vote up
def foo():
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
                cameraTargetPosition=focal_point,
                distance=focal_dist,
                yaw=yaw,
                pitch=pitch,
                roll=roll,
                upAxisIndex=2)
        projection_matrix = p.computeProjectionMatrixFOV(
                fov=60,
                aspect=aspect,
                nearVal=0.01,
                farVal=1000.0) 
Example #13
Source File: kuka_diverse_object_gym_env.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _reset(self):
    """Environment reset called at the beginning of an episode.
    """
    # Set the camera settings.
    look = [0.23, 0.2, 0.54]
    distance = 1.
    pitch = -56 + self._cameraRandom*np.random.uniform(-3, 3)
    yaw = 245 + self._cameraRandom*np.random.uniform(-3, 3)
    roll = 0
    self._view_matrix = p.computeViewMatrixFromYawPitchRoll(
        look, distance, yaw, pitch, roll, 2)
    fov = 20. + self._cameraRandom*np.random.uniform(-2, 2)
    aspect = self._width / self._height
    near = 0.01
    far = 10
    self._proj_matrix = p.computeProjectionMatrixFOV(
        fov, aspect, near, far)
    
    self._attempted_grasp = False
    self._env_step = 0
    self.terminated = 0

    p.resetSimulation()
    p.setPhysicsEngineParameter(numSolverIterations=150)
    p.setTimeStep(self._timeStep)
    p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1])
    
    p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
            
    p.setGravity(0,0,-10)
    self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
    self._envStepCounter = 0
    p.stepSimulation()

    # Choose the objects in the bin.
    urdfList = self._get_random_object(
      self._numObjects, self._isTest)
    self._objectUids = self._randomly_place_objects(urdfList)
    self._observation = self._get_observation()
    return np.array(self._observation) 
Example #14
Source File: mobile_robot_env.py    From robotics-rl-srl with MIT License 4 votes vote down vote up
def render(self, mode='human', close=False):
        if mode != "rgb_array":
            return np.array([])
        camera_target_pos = self.camera_target_pos

        if self.debug:
            self._cam_dist = p.readUserDebugParameter(self.dist_slider)
            self._cam_yaw = p.readUserDebugParameter(self.yaw_slider)
            self._cam_pitch = p.readUserDebugParameter(self.pitch_slider)
            x = p.readUserDebugParameter(self.x_slider)
            y = p.readUserDebugParameter(self.y_slider)
            z = p.readUserDebugParameter(self.z_slider)
            camera_target_pos = (x, y, z)

        # TODO: recompute view_matrix and proj_matrix only in debug mode
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=camera_target_pos,
            distance=self._cam_dist,
            yaw=self._cam_yaw,
            pitch=self._cam_pitch,
            roll=self._cam_roll,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
            nearVal=0.1, farVal=100.0)
        (_, _, px1, _, _) = p.getCameraImage(
            width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix, renderer=self.renderer)
        rgb_array = np.array(px1)

        rgb_array_res = rgb_array[:, :, :3]

        # if first person view, then stack the obersvation from the car camera
        if self.fpv:
            # move camera
            view_matrix = p.computeViewMatrixFromYawPitchRoll(
                cameraTargetPosition=(self.robot_pos[0]-0.25, self.robot_pos[1], 0.15),
                distance=0.3,
                yaw=self._cam_yaw,
                pitch=-17,
                roll=self._cam_roll,
                upAxisIndex=2)
            proj_matrix = p.computeProjectionMatrixFOV(
                fov=90, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
                nearVal=0.1, farVal=100.0)
            # get and stack image
            (_, _, px1, _, _) = p.getCameraImage(
                width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
                projectionMatrix=proj_matrix, renderer=self.renderer)
            rgb_array = np.array(px1)
            rgb_array_res = np.dstack([rgb_array_res, rgb_array[:, :, :3]])

        return rgb_array_res 
Example #15
Source File: kuka_button_gym_env.py    From robotics-rl-srl with MIT License 4 votes vote down vote up
def render(self, mode='human', close=False):
        if mode != "rgb_array":
            return np.array([])
        camera_target_pos = self.camera_target_pos

        if self.debug:
            self._cam_dist = p.readUserDebugParameter(self.dist_slider)
            self._cam_yaw = p.readUserDebugParameter(self.yaw_slider)
            self._cam_pitch = p.readUserDebugParameter(self.pitch_slider)
            x = p.readUserDebugParameter(self.x_slider)
            y = p.readUserDebugParameter(self.y_slider)
            z = p.readUserDebugParameter(self.z_slider)
            camera_target_pos = (x, y, z)
            # self._cam_roll = p.readUserDebugParameter(self.roll_slider)

        # TODO: recompute view_matrix and proj_matrix only in debug mode
        view_matrix1 = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=camera_target_pos,
            distance=self._cam_dist,
            yaw=self._cam_yaw,
            pitch=self._cam_pitch,
            roll=self._cam_roll,
            upAxisIndex=2)
        proj_matrix1 = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
            nearVal=0.1, farVal=100.0)
        (_, _, px1, _, _) = p.getCameraImage(
            width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix1,
            projectionMatrix=proj_matrix1, renderer=self.renderer)
        rgb_array1 = np.array(px1)

        if self.multi_view:
            # adding a second camera on the other side of the robot
            view_matrix2 = p.computeViewMatrixFromYawPitchRoll(
                cameraTargetPosition=(0.316, 0.316, -0.105),
                distance=1.05,
                yaw=32,
                pitch=-13,
                roll=0,
                upAxisIndex=2)
            proj_matrix2 = p.computeProjectionMatrixFOV(
                fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
                nearVal=0.1, farVal=100.0)
            (_, _, px2, _, _) = p.getCameraImage(
                width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix2,
                projectionMatrix=proj_matrix2, renderer=self.renderer)
            rgb_array2 = np.array(px2)
            rgb_array_res = np.concatenate((rgb_array1[:, :, :3], rgb_array2[:, :, :3]), axis=2)
        else:
            rgb_array_res = rgb_array1[:, :, :3]
        return rgb_array_res 
Example #16
Source File: bullet_world.py    From NTP-vat-release with MIT License 4 votes vote down vote up
def __init__(self,
                 display=True,
                 data_dir='./data',
                 verbose=False,
                 key=None,
                 camera_params={}):

        self._pe = BulletPhysicsEngine()

        self._display = display
        self._data_dir = data_dir
        self._verbose = verbose

        self._bodies = None
        self._robots = None
        self._time_step = None
        self._ctrl_listeners = []

        self._key_dict = None
        self._key_act_dict = None
        self._modifier_dict = None

        # Camera Parameters
        fov = camera_params.get('fov', 60)
        aspect = camera_params.get('aspect', 1)
        near = camera_params.get('near', 0.02)
        far = camera_params.get('far', 1)
        view_matrix = camera_params.get(
            'view_matrix',
            [[0.0, -0.4, 1.4], [0, 0.0, 0], [1, 0, 0]]
        )


        self.view_matrix = p.computeViewMatrix(*view_matrix)
        self.projection_matrix = p.computeProjectionMatrixFOV(
            fov, aspect, near, far)

        self.video_log_key = 0

        # Connect to the simulation
        # TODO(Kuan): If VR
        #     p.connect(p.SHARED_MEMORY)
        if self._display:
            p.connect(p.GUI)
        else:
            if key is None:
                p.connect(p.DIRECT)
            else:
                p.connect(p.DIRECT, key=key) 
Example #17
Source File: turtlebot_pybullet.py    From SocialRobot with Apache License 2.0 4 votes vote down vote up
def get_image(cam_pos, cam_orientation):
    """
    Arguments
        cam_pos: camera position
        cam_orientation: camera orientation in quaternion
    """
    width = 160
    height = 120
    fov = 90
    aspect = width / height
    near = 0.001
    far = 5

    if use_maximal_coordinates:
        # cam_orientation has problem when enable bt_rigid_body,
        # looking at 0.0, 0.0, 0.0 instead
        # this does not affect performance
        cam_pos_offset = cam_pos + np.array([0.0, 0.0, 0.3])
        target_pos = np.array([0.0, 0.0, 0.0])
    else:
        # camera pos, look at, camera up direction
        rot_matrix = p.getMatrixFromQuaternion(cam_orientation)
        # offset to base pos
        cam_pos_offset = cam_pos + np.dot(
            np.array(rot_matrix).reshape(3, 3), np.array([0.1, 0.0, 0.3]))
        target_pos = cam_pos_offset + np.dot(
            np.array(rot_matrix).reshape(3, 3), np.array([-1.0, 0.0, 0.0]))
    # compute view matrix
    view_matrix = p.computeViewMatrix(cam_pos_offset, target_pos, [0, 0, 1])
    projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)

    # Get depth values using the OpenGL renderer
    if enable_open_gl_rendering:
        w, h, rgb, depth, seg = p.getCameraImage(
            width,
            height,
            view_matrix,
            projection_matrix,
            shadow=True,
            renderer=p.ER_BULLET_HARDWARE_OPENGL)
    else:
        w, h, rgb, depth, seg = p.getCameraImage(
            width,
            height,
            view_matrix,
            projection_matrix,
            shadow=True,
            renderer=p.ER_TINY_RENDERER)

    # depth_buffer = np.reshape(images[3], [width, height])
    # depth = far * near / (far - (far - near) * depth_buffer)
    # seg = np.reshape(images[4],[width,height])*1./255.
    return rgb