Python pybullet.computeViewMatrixFromYawPitchRoll() Examples

The following are 14 code examples of pybullet.computeViewMatrixFromYawPitchRoll(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module pybullet , or try the search function .
Example #1
Source File: kukaCamGymEnv.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _render(self, mode='human', close=False):
    if mode != "rgb_array":
      return np.array([])
    base_pos,orn = self._p.getBasePositionAndOrientation(self._racecar.racecarUniqueId)
    view_matrix = self._p.computeViewMatrixFromYawPitchRoll(
        cameraTargetPosition=base_pos,
        distance=self._cam_dist,
        yaw=self._cam_yaw,
        pitch=self._cam_pitch,
        roll=0,
        upAxisIndex=2)
    proj_matrix = self._p.computeProjectionMatrixFOV(
        fov=60, aspect=float(RENDER_WIDTH)/RENDER_HEIGHT,
        nearVal=0.1, farVal=100.0)
    (_, _, px, _, _) = self._p.getCameraImage(
        width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
        projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
    rgb_array = np.array(px)
    rgb_array = rgb_array[:, :, :3]
    return rgb_array 
Example #2
Source File: env_bases.py    From GtS with MIT License 6 votes vote down vote up
def _render(self, mode, close):
        base_pos=[0,0,0]
        if (hasattr(self,'robot')):
            if (hasattr(self.robot,'body_xyz')):
                base_pos = self.robot.body_xyz
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=self._cam_dist,
            yaw=self._cam_yaw,
            pitch=self._cam_pitch,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
        width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL
            )
        rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
        if close: return None
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #3
Source File: env_bases.py    From GtS with MIT License 6 votes vote down vote up
def render_physics(self):
        robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=robot_pos,
            distance=self.tracking_camera["distance"],
            yaw=self.tracking_camera["yaw"],
            pitch=self.tracking_camera["pitch"],
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        with Profiler("render physics: Get camera image"):
            (_, _, px, _, _) = p.getCameraImage(
            width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
                projectionMatrix=proj_matrix,
                renderer=p.ER_TINY_RENDERER
                )
        rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #4
Source File: env_bases.py    From GtS with MIT License 6 votes vote down vote up
def render_map(self):
        base_pos=[0, 0, -3]
        if (hasattr(self,'robot')):
            if (hasattr(self.robot,'body_xyz')):
                base_pos[0] = self.robot.body_xyz[0]
                base_pos[1] = self.robot.body_xyz[1]
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=35,
            yaw=0,
            pitch=-89,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
        width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL
            )
        rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #5
Source File: env_bases.py    From midlevel-reps with MIT License 6 votes vote down vote up
def _render(self, mode, close):
        base_pos=[0,0,0]
        if (hasattr(self,'robot')):
            if (hasattr(self.robot,'body_xyz')):
                base_pos = self.robot.body_xyz
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=self._cam_dist,
            yaw=self._cam_yaw,
            pitch=self._cam_pitch,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
        width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL
            )
        rgb_array = np.array(px)
        if close: return None
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #6
Source File: env_bases.py    From midlevel-reps with MIT License 6 votes vote down vote up
def render_physics(self):
        robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=robot_pos,
            distance=self.tracking_camera["distance"],
            yaw=self.tracking_camera["yaw"],
            pitch=self.tracking_camera["pitch"],
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        with Profiler("render physics: Get camera image"):
            (_, _, px, _, _) = p.getCameraImage(
            width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
                projectionMatrix=proj_matrix,
                renderer=p.ER_TINY_RENDERER
                )
        rgb_array = np.array(px)
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #7
Source File: env_bases.py    From midlevel-reps with MIT License 6 votes vote down vote up
def render_map(self):
        base_pos=[0, 0, -3]
        if (hasattr(self,'robot')):
            if (hasattr(self.robot,'body_xyz')):
                base_pos[0] = self.robot.body_xyz[0]
                base_pos[1] = self.robot.body_xyz[1]
        
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=base_pos,
            distance=35,
            yaw=0,
            pitch=-89,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(self._render_width)/self._render_height,
            nearVal=0.1, farVal=100.0)
        (_, _, px, _, _) = p.getCameraImage(
        width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix,
            renderer=p.ER_BULLET_HARDWARE_OPENGL
            )
        rgb_array = np.array(px)
        rgb_array = rgb_array[:, :, :3]
        return rgb_array 
Example #8
Source File: kukaCamGymEnv.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def getExtendedObservation(self):

     #camEyePos = [0.03,0.236,0.54]
     #distance = 1.06
     #pitch=-56
     #yaw = 258
     #roll=0
     #upAxisIndex = 2
     #camInfo = p.getDebugVisualizerCamera()
     #print("width,height")
     #print(camInfo[0])
     #print(camInfo[1])
     #print("viewMatrix")
     #print(camInfo[2])
     #print("projectionMatrix")
     #print(camInfo[3])
     #viewMat = camInfo[2]
     #viewMat = p.computeViewMatrixFromYawPitchRoll(camEyePos,distance,yaw, pitch,roll,upAxisIndex)
     viewMat = [-0.5120397806167603, 0.7171027660369873, -0.47284144163131714, 0.0, -0.8589617609977722, -0.42747554183006287, 0.28186774253845215, 0.0, 0.0, 0.5504802465438843, 0.8348482847213745, 0.0, 0.1925382763147354, -0.24935829639434814, -0.4401884973049164, 1.0]
     #projMatrix = camInfo[3]#[0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
     projMatrix = [0.75, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]

     img_arr = p.getCameraImage(width=self._width,height=self._height,viewMatrix=viewMat,projectionMatrix=projMatrix)
     rgb=img_arr[2]
     np_img_arr = np.reshape(rgb, (self._height, self._width, 4))
     self._observation = np_img_arr
     return self._observation 
Example #9
Source File: kuka_diverse_object_gym_env.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _reset(self):
    """Environment reset called at the beginning of an episode.
    """
    # Set the camera settings.
    look = [0.23, 0.2, 0.54]
    distance = 1.
    pitch = -56 + self._cameraRandom*np.random.uniform(-3, 3)
    yaw = 245 + self._cameraRandom*np.random.uniform(-3, 3)
    roll = 0
    self._view_matrix = p.computeViewMatrixFromYawPitchRoll(
        look, distance, yaw, pitch, roll, 2)
    fov = 20. + self._cameraRandom*np.random.uniform(-2, 2)
    aspect = self._width / self._height
    near = 0.01
    far = 10
    self._proj_matrix = p.computeProjectionMatrixFOV(
        fov, aspect, near, far)
    
    self._attempted_grasp = False
    self._env_step = 0
    self.terminated = 0

    p.resetSimulation()
    p.setPhysicsEngineParameter(numSolverIterations=150)
    p.setTimeStep(self._timeStep)
    p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1])
    
    p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
            
    p.setGravity(0,0,-10)
    self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
    self._envStepCounter = 0
    p.stepSimulation()

    # Choose the objects in the bin.
    urdfList = self._get_random_object(
      self._numObjects, self._isTest)
    self._objectUids = self._randomly_place_objects(urdfList)
    self._observation = self._get_observation()
    return np.array(self._observation) 
Example #10
Source File: bullet_physics_engine.py    From NTP-vat-release with MIT License 5 votes vote down vote up
def foo():
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
                cameraTargetPosition=focal_point,
                distance=focal_dist,
                yaw=yaw,
                pitch=pitch,
                roll=roll,
                upAxisIndex=2)
        projection_matrix = p.computeProjectionMatrixFOV(
                fov=60,
                aspect=aspect,
                nearVal=0.01,
                farVal=1000.0) 
Example #11
Source File: pose_env.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def _reset_camera(self):
    look = [0., 0., 0.]
    distance = 3.
    pitch = -30 + np.random.uniform(-10, 10)
    yaw = np.random.uniform(-180, 180)
    roll = 0
    self._view_matrix = pybullet.computeViewMatrixFromYawPitchRoll(
        look, distance, yaw, pitch, roll, 2)
    fov = 30
    aspect = self._width / self._height
    near = 0.1
    far = 10
    self._proj_matrix = pybullet.computeProjectionMatrixFOV(
        fov, aspect, near, far) 
Example #12
Source File: camera.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def __init__(self, name, target,
                 distance,
                 roll, pitch, yaw,
                 up_idx=2,
                 image_width=1024 / 8,
                 image_height=768 / 8,
                 fov=45,
                 near_plane=0.1,
                 far_plane=10):
        '''
        Create camera matrix for a particular position in the simulation. Task
        definitions should produce these and
        '''
        self.name = name
        self.matrix = np.array(pb.computeViewMatrixFromYawPitchRoll(
            target, distance, yaw=yaw, pitch=pitch, roll=roll, upAxisIndex=up_idx))
        self.image_height = image_height
        self.image_width = image_width
        self.aspect_ratio = self.image_width / self.image_height
        self.fov = fov
        self.near_plane = near_plane
        self.far_plane = far_plane
        self.projection_matrix = np.array(pb.computeProjectionMatrixFOV(
            self.fov,
            self.aspect_ratio,
            self.near_plane,
            self.far_plane)) 
Example #13
Source File: kuka_button_gym_env.py    From robotics-rl-srl with MIT License 4 votes vote down vote up
def render(self, mode='human', close=False):
        if mode != "rgb_array":
            return np.array([])
        camera_target_pos = self.camera_target_pos

        if self.debug:
            self._cam_dist = p.readUserDebugParameter(self.dist_slider)
            self._cam_yaw = p.readUserDebugParameter(self.yaw_slider)
            self._cam_pitch = p.readUserDebugParameter(self.pitch_slider)
            x = p.readUserDebugParameter(self.x_slider)
            y = p.readUserDebugParameter(self.y_slider)
            z = p.readUserDebugParameter(self.z_slider)
            camera_target_pos = (x, y, z)
            # self._cam_roll = p.readUserDebugParameter(self.roll_slider)

        # TODO: recompute view_matrix and proj_matrix only in debug mode
        view_matrix1 = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=camera_target_pos,
            distance=self._cam_dist,
            yaw=self._cam_yaw,
            pitch=self._cam_pitch,
            roll=self._cam_roll,
            upAxisIndex=2)
        proj_matrix1 = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
            nearVal=0.1, farVal=100.0)
        (_, _, px1, _, _) = p.getCameraImage(
            width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix1,
            projectionMatrix=proj_matrix1, renderer=self.renderer)
        rgb_array1 = np.array(px1)

        if self.multi_view:
            # adding a second camera on the other side of the robot
            view_matrix2 = p.computeViewMatrixFromYawPitchRoll(
                cameraTargetPosition=(0.316, 0.316, -0.105),
                distance=1.05,
                yaw=32,
                pitch=-13,
                roll=0,
                upAxisIndex=2)
            proj_matrix2 = p.computeProjectionMatrixFOV(
                fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
                nearVal=0.1, farVal=100.0)
            (_, _, px2, _, _) = p.getCameraImage(
                width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix2,
                projectionMatrix=proj_matrix2, renderer=self.renderer)
            rgb_array2 = np.array(px2)
            rgb_array_res = np.concatenate((rgb_array1[:, :, :3], rgb_array2[:, :, :3]), axis=2)
        else:
            rgb_array_res = rgb_array1[:, :, :3]
        return rgb_array_res 
Example #14
Source File: mobile_robot_env.py    From robotics-rl-srl with MIT License 4 votes vote down vote up
def render(self, mode='human', close=False):
        if mode != "rgb_array":
            return np.array([])
        camera_target_pos = self.camera_target_pos

        if self.debug:
            self._cam_dist = p.readUserDebugParameter(self.dist_slider)
            self._cam_yaw = p.readUserDebugParameter(self.yaw_slider)
            self._cam_pitch = p.readUserDebugParameter(self.pitch_slider)
            x = p.readUserDebugParameter(self.x_slider)
            y = p.readUserDebugParameter(self.y_slider)
            z = p.readUserDebugParameter(self.z_slider)
            camera_target_pos = (x, y, z)

        # TODO: recompute view_matrix and proj_matrix only in debug mode
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=camera_target_pos,
            distance=self._cam_dist,
            yaw=self._cam_yaw,
            pitch=self._cam_pitch,
            roll=self._cam_roll,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
            nearVal=0.1, farVal=100.0)
        (_, _, px1, _, _) = p.getCameraImage(
            width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
            projectionMatrix=proj_matrix, renderer=self.renderer)
        rgb_array = np.array(px1)

        rgb_array_res = rgb_array[:, :, :3]

        # if first person view, then stack the obersvation from the car camera
        if self.fpv:
            # move camera
            view_matrix = p.computeViewMatrixFromYawPitchRoll(
                cameraTargetPosition=(self.robot_pos[0]-0.25, self.robot_pos[1], 0.15),
                distance=0.3,
                yaw=self._cam_yaw,
                pitch=-17,
                roll=self._cam_roll,
                upAxisIndex=2)
            proj_matrix = p.computeProjectionMatrixFOV(
                fov=90, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
                nearVal=0.1, farVal=100.0)
            # get and stack image
            (_, _, px1, _, _) = p.getCameraImage(
                width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
                projectionMatrix=proj_matrix, renderer=self.renderer)
            rgb_array = np.array(px1)
            rgb_array_res = np.dstack([rgb_array_res, rgb_array[:, :, :3]])

        return rgb_array_res