Python pybullet.getCameraImage() Examples
The following are 22
code examples of pybullet.getCameraImage().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pybullet
, or try the search function
.
Example #1
Source File: kukaCamGymEnv.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _render(self, mode='human', close=False): if mode != "rgb_array": return np.array([]) base_pos,orn = self._p.getBasePositionAndOrientation(self._racecar.racecarUniqueId) view_matrix = self._p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=0, upAxisIndex=2) proj_matrix = self._p.computeProjectionMatrixFOV( fov=60, aspect=float(RENDER_WIDTH)/RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = self._p.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL) rgb_array = np.array(px) rgb_array = rgb_array[:, :, :3] return rgb_array
Example #2
Source File: camera.py From qibullet with Apache License 2.0 | 6 votes |
def _waitForCorrectImageFormat(self): """ INTERNAL METHOD, to be called after the launch of the exctraction thread. Blocking method, that will return when the array retrieved from the @getCameraImage is not None and corresponds to the current resolution """ try: assert self.module_process.isAlive() while self.getFrame() is None: continue while True: image = self.getFrame() if image.shape[1] == self.resolution.width and\ image.shape[0] == self.resolution.height: break except AssertionError: return
Example #3
Source File: simulate.py From obman_train with GNU General Public License v3.0 | 6 votes |
def take_picture(renderer, width=256, height=256, scale=0.001, conn_id=None): view_matrix = p.computeViewMatrix( [0, 0, -1], [0, 0, 0], [0, -1, 0], physicsClientId=conn_id ) proj_matrix = p.computeProjectionMatrixFOV( 20, 1, 0.05, 2, physicsClientId=conn_id ) w, h, rgba, depth, mask = p.getCameraImage( width=width, height=height, projectionMatrix=proj_matrix, viewMatrix=view_matrix, renderer=renderer, physicsClientId=conn_id, ) return rgba
Example #4
Source File: renderer.py From mvp_grasp with BSD 3-Clause "New" or "Revised" License | 6 votes |
def render(self): if np.all(self._rendered_pos == self.camera_pos) and np.all(self._rendered_rot == self.camera_rot): return self._rendered target = self.camera_pos + np.dot(self.camera_rot, [0, 0, 1.0, 1.0])[0:3] up = np.dot(self.camera_rot, [0, -1.0, 0, 1.0])[0:3] vm = pb.computeViewMatrix(self.camera_pos, target, up) i_arr = pb.getCameraImage(self.im_width, self.im_height, vm, self.pm, shadow=0, renderer=pb.ER_TINY_RENDERER) # renderer=pb.ER_BULLET_HARDWARE_OPENGL) # Record the position of the camera, and don't re-render if it hasn't moved. self._rendered = i_arr self._rendered_pos = self.camera_pos.copy() self._rendered_rot = self.camera_rot.copy() return i_arr
Example #5
Source File: env_bases.py From midlevel-reps with MIT License | 6 votes |
def render_map(self): base_pos=[0, 0, -3] if (hasattr(self,'robot')): if (hasattr(self.robot,'body_xyz')): base_pos[0] = self.robot.body_xyz[0] base_pos[1] = self.robot.body_xyz[1] view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=35, yaw=0, pitch=-89, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width)/self._render_height, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage( width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL ) rgb_array = np.array(px) rgb_array = rgb_array[:, :, :3] return rgb_array
Example #6
Source File: env_bases.py From midlevel-reps with MIT License | 6 votes |
def render_physics(self): robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id) view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=robot_pos, distance=self.tracking_camera["distance"], yaw=self.tracking_camera["yaw"], pitch=self.tracking_camera["pitch"], roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width)/self._render_height, nearVal=0.1, farVal=100.0) with Profiler("render physics: Get camera image"): (_, _, px, _, _) = p.getCameraImage( width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER ) rgb_array = np.array(px) rgb_array = rgb_array[:, :, :3] return rgb_array
Example #7
Source File: env_bases.py From midlevel-reps with MIT License | 6 votes |
def _render(self, mode, close): base_pos=[0,0,0] if (hasattr(self,'robot')): if (hasattr(self.robot,'body_xyz')): base_pos = self.robot.body_xyz view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width)/self._render_height, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage( width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL ) rgb_array = np.array(px) if close: return None rgb_array = rgb_array[:, :, :3] return rgb_array
Example #8
Source File: env_bases.py From GtS with MIT License | 6 votes |
def render_map(self): base_pos=[0, 0, -3] if (hasattr(self,'robot')): if (hasattr(self.robot,'body_xyz')): base_pos[0] = self.robot.body_xyz[0] base_pos[1] = self.robot.body_xyz[1] view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=35, yaw=0, pitch=-89, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width)/self._render_height, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage( width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL ) rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1)) rgb_array = rgb_array[:, :, :3] return rgb_array
Example #9
Source File: env_bases.py From GtS with MIT License | 6 votes |
def render_physics(self): robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id) view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=robot_pos, distance=self.tracking_camera["distance"], yaw=self.tracking_camera["yaw"], pitch=self.tracking_camera["pitch"], roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width)/self._render_height, nearVal=0.1, farVal=100.0) with Profiler("render physics: Get camera image"): (_, _, px, _, _) = p.getCameraImage( width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER ) rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1)) rgb_array = rgb_array[:, :, :3] return rgb_array
Example #10
Source File: env_bases.py From GtS with MIT License | 6 votes |
def _render(self, mode, close): base_pos=[0,0,0] if (hasattr(self,'robot')): if (hasattr(self.robot,'body_xyz')): base_pos = self.robot.body_xyz view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width)/self._render_height, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage( width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL ) rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1)) if close: return None rgb_array = rgb_array[:, :, :3] return rgb_array
Example #11
Source File: camera.py From costar_plan with Apache License 2.0 | 5 votes |
def capture(self): _, _, rgb, depth, mask = pb.getCameraImage( self.image_width, self.image_height, viewMatrix=self.matrix, projectionMatrix=self.projection_matrix) return ImageData(self.name, rgb, depth, mask)
Example #12
Source File: helloworld_panda.py From pybullet-robot-envs with GNU Lesser General Public License v2.1 | 5 votes |
def render(robot): pos, rot, _, _, _, _ = p.getLinkState(robot.robot_id, linkIndex=robot.end_eff_idx, computeForwardKinematics=True) rot_matrix = p.getMatrixFromQuaternion(rot) rot_matrix = np.array(rot_matrix).reshape(3, 3) # camera params height = 640 width = 480 fx, fy = 596.6278076171875, 596.6278076171875 cx, cy = 311.98663330078125, 236.76170349121094 near, far = 0.1, 10 camera_vector = rot_matrix.dot((0, 0, 1)) up_vector = rot_matrix.dot((0, -1, 0)) camera_eye_pos = np.array(pos) camera_target_position = camera_eye_pos + 0.2 * camera_vector view_matrix = p.computeViewMatrix(camera_eye_pos, camera_target_position, up_vector) proj_matrix = (2.0 * fx / width, 0.0, 0.0, 0.0, 0.0, 2.0 * fy / height, 0.0, 0.0, 1.0 - 2.0 * cx / width, 2.0 * cy / height - 1.0, (far + near) / (near - far), -1.0, 0.0, 0.0, 2.0 * far * near / (near - far), 0.0) p.getCameraImage(width=width, height=height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) # renderer=self._p.ER_TINY_RENDERER)
Example #13
Source File: camera.py From qibullet with Apache License 2.0 | 5 votes |
def _getCameraImage(self): """ INTERNAL METHOD, Computes the OpenGL virtual camera image. The resolution and the projection matrix have to be computed before calling this method, or it will crash Returns: camera_image - The camera image of the OpenGL virtual camera """ _, _, _, _, pos_world, q_world = pybullet.getLinkState( self.robot_model, self.camera_link.getParentIndex(), computeForwardKinematics=False, physicsClientId=self.physics_client) rotation = pybullet.getMatrixFromQuaternion(q_world) forward_vector = [rotation[0], rotation[3], rotation[6]] up_vector = [rotation[2], rotation[5], rotation[8]] camera_target = [ pos_world[0] + forward_vector[0] * 10, pos_world[1] + forward_vector[1] * 10, pos_world[2] + forward_vector[2] * 10] view_matrix = pybullet.computeViewMatrix( pos_world, camera_target, up_vector, physicsClientId=self.physics_client) with self.resolution_lock: camera_image = pybullet.getCameraImage( self.resolution.width, self.resolution.height, view_matrix, self.projection_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL, flags=pybullet.ER_NO_SEGMENTATION_MASK, physicsClientId=self.physics_client) return camera_image
Example #14
Source File: testMJCF.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def test(args): p.connect(p.GUI) p.setAdditionalSearchPath(pybullet_data.getDataPath()) fileName = os.path.join("mjcf", args.mjcf) print("fileName") print(fileName) p.loadMJCF(fileName) while (1): p.stepSimulation() p.getCameraImage(320,240) time.sleep(0.01)
Example #15
Source File: pose_env.py From tensor2robot with Apache License 2.0 | 5 votes |
def _get_image(self): img_arr = pybullet.getCameraImage(width=self._width, height=self._height, viewMatrix=self._view_matrix, projectionMatrix=self._proj_matrix, physicsClientId=self.cid) rgb = img_arr[2] np_img_arr = np.reshape(rgb, (self._height, self._width, 4)) return np_img_arr[:, :, :3]
Example #16
Source File: env.py From assistive-gym with MIT License | 5 votes |
def record_video_frame(self): if self.record_video and self.gui: frame = np.reshape(p.getCameraImage(width=self.width, height=self.height, renderer=p.ER_BULLET_HARDWARE_OPENGL, physicsClientId=self.id)[2], (self.height, self.width, 4))[:, :, :3] # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # self.video_writer.write(frame)
Example #17
Source File: bullet_world.py From NTP-vat-release with MIT License | 5 votes |
def capture_image(self): width, height, im, depth, seg = p.getCameraImage(64, 64, list( self.view_matrix), list(self.projection_matrix), renderer=p.ER_TINY_RENDERER) self.depth = depth im = np.array(im).reshape([height, width, -1]) return im[:, :, :3]
Example #18
Source File: kuka_diverse_object_gym_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def _get_observation(self): """Return the observation as an image. """ img_arr = p.getCameraImage(width=self._width, height=self._height, viewMatrix=self._view_matrix, projectionMatrix=self._proj_matrix) rgb = img_arr[2] np_img_arr = np.reshape(rgb, (self._height, self._width, 4)) return np_img_arr[:, :, :3]
Example #19
Source File: kukaCamGymEnv.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def getExtendedObservation(self): #camEyePos = [0.03,0.236,0.54] #distance = 1.06 #pitch=-56 #yaw = 258 #roll=0 #upAxisIndex = 2 #camInfo = p.getDebugVisualizerCamera() #print("width,height") #print(camInfo[0]) #print(camInfo[1]) #print("viewMatrix") #print(camInfo[2]) #print("projectionMatrix") #print(camInfo[3]) #viewMat = camInfo[2] #viewMat = p.computeViewMatrixFromYawPitchRoll(camEyePos,distance,yaw, pitch,roll,upAxisIndex) viewMat = [-0.5120397806167603, 0.7171027660369873, -0.47284144163131714, 0.0, -0.8589617609977722, -0.42747554183006287, 0.28186774253845215, 0.0, 0.0, 0.5504802465438843, 0.8348482847213745, 0.0, 0.1925382763147354, -0.24935829639434814, -0.4401884973049164, 1.0] #projMatrix = camInfo[3]#[0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0] projMatrix = [0.75, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0] img_arr = p.getCameraImage(width=self._width,height=self._height,viewMatrix=viewMat,projectionMatrix=projMatrix) rgb=img_arr[2] np_img_arr = np.reshape(rgb, (self._height, self._width, 4)) self._observation = np_img_arr return self._observation
Example #20
Source File: mobile_robot_env.py From robotics-rl-srl with MIT License | 4 votes |
def render(self, mode='human', close=False): if mode != "rgb_array": return np.array([]) camera_target_pos = self.camera_target_pos if self.debug: self._cam_dist = p.readUserDebugParameter(self.dist_slider) self._cam_yaw = p.readUserDebugParameter(self.yaw_slider) self._cam_pitch = p.readUserDebugParameter(self.pitch_slider) x = p.readUserDebugParameter(self.x_slider) y = p.readUserDebugParameter(self.y_slider) z = p.readUserDebugParameter(self.z_slider) camera_target_pos = (x, y, z) # TODO: recompute view_matrix and proj_matrix only in debug mode view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=camera_target_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=self._cam_roll, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, px1, _, _) = p.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=self.renderer) rgb_array = np.array(px1) rgb_array_res = rgb_array[:, :, :3] # if first person view, then stack the obersvation from the car camera if self.fpv: # move camera view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=(self.robot_pos[0]-0.25, self.robot_pos[1], 0.15), distance=0.3, yaw=self._cam_yaw, pitch=-17, roll=self._cam_roll, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=90, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) # get and stack image (_, _, px1, _, _) = p.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=self.renderer) rgb_array = np.array(px1) rgb_array_res = np.dstack([rgb_array_res, rgb_array[:, :, :3]]) return rgb_array_res
Example #21
Source File: kuka_button_gym_env.py From robotics-rl-srl with MIT License | 4 votes |
def render(self, mode='human', close=False): if mode != "rgb_array": return np.array([]) camera_target_pos = self.camera_target_pos if self.debug: self._cam_dist = p.readUserDebugParameter(self.dist_slider) self._cam_yaw = p.readUserDebugParameter(self.yaw_slider) self._cam_pitch = p.readUserDebugParameter(self.pitch_slider) x = p.readUserDebugParameter(self.x_slider) y = p.readUserDebugParameter(self.y_slider) z = p.readUserDebugParameter(self.z_slider) camera_target_pos = (x, y, z) # self._cam_roll = p.readUserDebugParameter(self.roll_slider) # TODO: recompute view_matrix and proj_matrix only in debug mode view_matrix1 = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=camera_target_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=self._cam_roll, upAxisIndex=2) proj_matrix1 = p.computeProjectionMatrixFOV( fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, px1, _, _) = p.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix1, projectionMatrix=proj_matrix1, renderer=self.renderer) rgb_array1 = np.array(px1) if self.multi_view: # adding a second camera on the other side of the robot view_matrix2 = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=(0.316, 0.316, -0.105), distance=1.05, yaw=32, pitch=-13, roll=0, upAxisIndex=2) proj_matrix2 = p.computeProjectionMatrixFOV( fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, px2, _, _) = p.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix2, projectionMatrix=proj_matrix2, renderer=self.renderer) rgb_array2 = np.array(px2) rgb_array_res = np.concatenate((rgb_array1[:, :, :3], rgb_array2[:, :, :3]), axis=2) else: rgb_array_res = rgb_array1[:, :, :3] return rgb_array_res
Example #22
Source File: turtlebot_pybullet.py From SocialRobot with Apache License 2.0 | 4 votes |
def get_image(cam_pos, cam_orientation): """ Arguments cam_pos: camera position cam_orientation: camera orientation in quaternion """ width = 160 height = 120 fov = 90 aspect = width / height near = 0.001 far = 5 if use_maximal_coordinates: # cam_orientation has problem when enable bt_rigid_body, # looking at 0.0, 0.0, 0.0 instead # this does not affect performance cam_pos_offset = cam_pos + np.array([0.0, 0.0, 0.3]) target_pos = np.array([0.0, 0.0, 0.0]) else: # camera pos, look at, camera up direction rot_matrix = p.getMatrixFromQuaternion(cam_orientation) # offset to base pos cam_pos_offset = cam_pos + np.dot( np.array(rot_matrix).reshape(3, 3), np.array([0.1, 0.0, 0.3])) target_pos = cam_pos_offset + np.dot( np.array(rot_matrix).reshape(3, 3), np.array([-1.0, 0.0, 0.0])) # compute view matrix view_matrix = p.computeViewMatrix(cam_pos_offset, target_pos, [0, 0, 1]) projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far) # Get depth values using the OpenGL renderer if enable_open_gl_rendering: w, h, rgb, depth, seg = p.getCameraImage( width, height, view_matrix, projection_matrix, shadow=True, renderer=p.ER_BULLET_HARDWARE_OPENGL) else: w, h, rgb, depth, seg = p.getCameraImage( width, height, view_matrix, projection_matrix, shadow=True, renderer=p.ER_TINY_RENDERER) # depth_buffer = np.reshape(images[3], [width, height]) # depth = far * near / (far - (far - near) * depth_buffer) # seg = np.reshape(images[4],[width,height])*1./255. return rgb