Python pybullet.resetDebugVisualizerCamera() Examples
The following are 16
code examples of pybullet.resetDebugVisualizerCamera().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pybullet
, or try the search function
.
Example #1
Source File: human_testing.py From assistive-gym with MIT License | 6 votes |
def step(self, action): yaw = 0 while True: yaw += -0.75 p.resetDebugVisualizerCamera(cameraDistance=1.2, cameraYaw=yaw, cameraPitch=-20, cameraTargetPosition=[0, 0, 1.0], physicsClientId=self.id) indices = [4, 5, 6] # indices = [14, 15, 16] deltas = [0.01, 0.01, -0.01] indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] deltas = [0, 0, 0, 0, 0.01, 0.01, -0.01, 0, 0, 0] # indices = [] # deltas = [] # indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 10 # deltas = [0, 0, 0, 0, -0.01, 0.01, -0.01, 0, 0, 0] for i, d in zip(indices, deltas): joint_position = p.getJointState(self.human, jointIndex=i, physicsClientId=self.id)[0] if joint_position + d > self.human_lower_limits[i] and joint_position + d < self.human_upper_limits[i]: p.resetJointState(self.human, jointIndex=i, targetValue=joint_position+d, targetVelocity=0, physicsClientId=self.id) p.stepSimulation(physicsClientId=self.id) print('cameraYaw=%.2f, cameraPitch=%.2f, distance=%.2f' % p.getDebugVisualizerCamera(physicsClientId=self.id)[-4:-1]) self.enforce_realistic_human_joint_limits() time.sleep(0.05) return [], None, None, None
Example #2
Source File: human_testing.py From assistive-gym with MIT License | 6 votes |
def reset(self, robot_base_offset=[0, 0, 0], task='scratch_itch_pr2'): self.human, self.wheelchair, self.robot, self.robot_lower_limits, self.robot_upper_limits, self.human_lower_limits, self.human_upper_limits, self.robot_right_arm_joint_indices, self.robot_left_arm_joint_indices, self.gender = self.world_creation.create_new_world(furniture_type=None, static_human_base=True, human_impairment='none', print_joints=False, gender='random') p.resetDebugVisualizerCamera(cameraDistance=1.2, cameraYaw=0, cameraPitch=-20, cameraTargetPosition=[0, 0, 1.0], physicsClientId=self.id) joints_positions = [] # self.human_controllable_joint_indices = [] self.human_controllable_joint_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # self.human_controllable_joint_indices = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] self.world_creation.setup_human_joints(self.human, joints_positions, self.human_controllable_joint_indices, use_static_joints=True, human_reactive_force=None) p.setGravity(0, 0, 0, physicsClientId=self.id) # Enable rendering p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id) return []
Example #3
Source File: env_modalities.py From midlevel-reps with MIT License | 6 votes |
def step_physics(self, a): self.nframe += 1 if not self.scene.multiplayer: self.robot.apply_action(a) self.scene.global_step() self.rewards = self._rewards(a) done = self._termination() self.reward += sum(self.rewards) self.eps_reward += sum(self.rewards) if self.gui: pos = self.robot._get_scaled_position() orn = self.robot.get_orientation() pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset']) pos = np.array(pos) dist = self.tracking_camera['distance'] / self.robot.mjcf_scaling p.resetDebugVisualizerCamera(dist, self.tracking_camera['yaw'], self.tracking_camera['pitch'], pos) return {}, sum(self.rewards), done, {}
Example #4
Source File: simulation.py From onshape-to-robot with MIT License | 5 votes |
def lookAt(self, target): """Control the look of the visualizer camera Arguments: target {tuple} -- target as (x,y,z) tuple """ if self.gui: params = p.getDebugVisualizerCamera() p.resetDebugVisualizerCamera(params[10], params[8], params[9], target)
Example #5
Source File: bullet_physics_engine.py From NTP-vat-release with MIT License | 5 votes |
def set_camera(focal_point, focal_dist, euler, width=500, height=540): """ """ _euler = euler / np.pi * 180 roll = _euler[0] pitch = _euler[1] yaw = _euler[2] aspect = float(height) / float(width) # TODO: Order of pitch and yaw for the camera? p.resetDebugVisualizerCamera( cameraDistance=focal_dist, cameraYaw=pitch, cameraPitch=yaw, cameraTargetPosition=focal_point)
Example #6
Source File: env_modalities.py From GtS with MIT License | 5 votes |
def _reset(self): assert(self._robot_introduced) assert(self._scene_introduced) debugmode = 1 if debugmode: print("Episode: steps:{} score:{}".format(self.nframe, self.reward)) body_xyz = self.robot.body_xyz #print("[{}, {}, {}],".format(body_xyz[0], body_xyz[1], body_xyz[2])) print("Episode count: {}".format(self.eps_count)) self.eps_count += 1 self.nframe = 0 self.eps_reward = 0 BaseEnv._reset(self) if not self.ground_ids: self.parts, self.jdict, self.ordered_joints, self.robot_body = self.robot.addToScene( []) self.ground_ids = set(self.scene.scene_obj_list) ## Todo: (hzyjerry) this part is not working, robot_tracking_id = -1 for i in range (p.getNumBodies()): if (p.getBodyInfo(i)[0].decode() == self.robot_body.get_name()): self.robot_tracking_id=i #print(p.getBodyInfo(i)[0].decode()) i = 0 eye_pos, eye_quat = self.get_eye_pos_orientation() pose = [eye_pos, eye_quat] observations = self.render_observations(pose) pos = self.robot._get_scaled_position() orn = self.robot.get_orientation() pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset']) p.resetDebugVisualizerCamera(self.tracking_camera['distance'],self.tracking_camera['yaw'], self.tracking_camera['pitch'],pos) return observations
Example #7
Source File: env_modalities.py From midlevel-reps with MIT License | 5 votes |
def _reset(self): assert(self._robot_introduced) assert(self._scene_introduced) debugmode = 1 if debugmode: print("Episode: steps:{} score:{}".format(self.nframe, self.reward)) body_xyz = self.robot.body_xyz print("[{}, {}, {}],".format(body_xyz[0], body_xyz[1], body_xyz[2])) print("Episode count: {}".format(self.eps_count)) self.eps_count += 1 self.nframe = 0 self.eps_reward = 0 BaseEnv._reset(self) if not self.ground_ids: self.parts, self.jdict, self.ordered_joints, self.robot_body = self.robot.addToScene( []) self.ground_ids = set(self.scene.scene_obj_list) ## Todo: (hzyjerry) this part is not working, robot_tracking_id = -1 for i in range (p.getNumBodies()): if (p.getBodyInfo(i)[0].decode() == self.robot_body.get_name()): self.robot_tracking_id=i #print(p.getBodyInfo(i)[0].decode()) i = 0 eye_pos, eye_quat = self.get_eye_pos_orientation() pose = [eye_pos, eye_quat] observations = self.render_observations(pose) pos = self.robot._get_scaled_position() orn = self.robot.get_orientation() pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset']) p.resetDebugVisualizerCamera(self.tracking_camera['distance'],self.tracking_camera['yaw'], self.tracking_camera['pitch'],pos) return observations
Example #8
Source File: gym_mujoco_xml_env.py From bullet-gym with MIT License | 5 votes |
def move_and_look_at(self,i,j,k,x,y,z): lookat = [x,y,z] distance = 10 yaw = 10 p.resetDebugVisualizerCamera(distance, yaw, -20, lookat)
Example #9
Source File: pybullet_robot_interface.py From scikit-robot with MIT License | 5 votes |
def main(): # initialize robot robot = skrobot.models.Kuka() interface = skrobot.interfaces.PybulletRobotInterface(robot) pybullet.resetDebugVisualizerCamera( cameraDistance=1.5, cameraYaw=45, cameraPitch=-45, cameraTargetPosition=(0, 0, 0.5), ) print('==> Initialized Kuka Robot on PyBullet') for _ in range(100): pybullet.stepSimulation() time.sleep(3) # reset pose print('==> Moving to Reset Pose') robot.reset_manip_pose() interface.angle_vector(robot.angle_vector(), realtime_simulation=True) interface.wait_interpolation() time.sleep(3) # ik print('==> Solving Inverse Kinematics') target_coords = skrobot.coordinates.Coordinates( pos=[0.5, 0, 0] ).rotate(np.pi / 2.0, 'y', 'local') skrobot.interfaces.pybullet.draw(target_coords) robot.inverse_kinematics( target_coords, link_list=robot.rarm.link_list, move_target=robot.rarm_end_coords, rotation_axis=True, stop=1000, ) interface.angle_vector(robot.angle_vector(), realtime_simulation=True) interface.wait_interpolation() # wait while pybullet.isConnected(): time.sleep(0.01)
Example #10
Source File: kukaGymEnv.py From soccer-matlab with BSD 2-Clause "Simplified" License | 4 votes |
def __init__(self, urdfRoot=pybullet_data.getDataPath(), actionRepeat=1, isEnableSelfCollision=True, renders=False, isDiscrete=False, maxSteps = 1000): #print("KukaGymEnv __init__") self._isDiscrete = isDiscrete self._timeStep = 1./240. self._urdfRoot = urdfRoot self._actionRepeat = actionRepeat self._isEnableSelfCollision = isEnableSelfCollision self._observation = [] self._envStepCounter = 0 self._renders = renders self._maxSteps = maxSteps self.terminated = 0 self._cam_dist = 1.3 self._cam_yaw = 180 self._cam_pitch = -40 self._p = p if self._renders: cid = p.connect(p.SHARED_MEMORY) if (cid<0): cid = p.connect(p.GUI) p.resetDebugVisualizerCamera(1.3,180,-41,[0.52,-0.2,-0.33]) else: p.connect(p.DIRECT) #timinglog = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "kukaTimings.json") self._seed() self.reset() observationDim = len(self.getExtendedObservation()) #print("observationDim") #print(observationDim) observation_high = np.array([largeValObservation] * observationDim) if (self._isDiscrete): self.action_space = spaces.Discrete(7) else: action_dim = 3 self._action_bound = 1 action_high = np.array([self._action_bound] * action_dim) self.action_space = spaces.Box(-action_high, action_high) self.observation_space = spaces.Box(-observation_high, observation_high) self.viewer = None
Example #11
Source File: kukaCamGymEnv.py From soccer-matlab with BSD 2-Clause "Simplified" License | 4 votes |
def __init__(self, urdfRoot=pybullet_data.getDataPath(), actionRepeat=1, isEnableSelfCollision=True, renders=False, isDiscrete=False): self._timeStep = 1./240. self._urdfRoot = urdfRoot self._actionRepeat = actionRepeat self._isEnableSelfCollision = isEnableSelfCollision self._observation = [] self._envStepCounter = 0 self._renders = renders self._width = 341 self._height = 256 self._isDiscrete=isDiscrete self.terminated = 0 self._p = p if self._renders: cid = p.connect(p.SHARED_MEMORY) if (cid<0): p.connect(p.GUI) p.resetDebugVisualizerCamera(1.3,180,-41,[0.52,-0.2,-0.33]) else: p.connect(p.DIRECT) #timinglog = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "kukaTimings.json") self._seed() self.reset() observationDim = len(self.getExtendedObservation()) #print("observationDim") #print(observationDim) observation_high = np.array([np.finfo(np.float32).max] * observationDim) if (self._isDiscrete): self.action_space = spaces.Discrete(7) else: action_dim = 3 self._action_bound = 1 action_high = np.array([self._action_bound] * action_dim) self.action_space = spaces.Box(-action_high, action_high) self.observation_space = spaces.Box(low=0, high=255, shape=(self._height, self._width, 4)) self.viewer = None
Example #12
Source File: env_modalities.py From GtS with MIT License | 4 votes |
def _step(self, a): self.nframe += 1 if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions for i in range(math.ceil(self.config['dt']/self.config['gibson_dt'])): state = self.robot.apply_action(a) self.scene.global_step() self.rewards = self._rewards(a) done = self._termination() debugmode=0 if (debugmode): print("rewards=") print(self.rewards) print("sum rewards") print(sum(self.rewards)) self.reward += sum(self.rewards) self.eps_reward += sum(self.rewards) debugmode = 0 if debugmode: print("Eps frame {} reward {}".format(self.nframe, self.reward)) print("position", self.robot.get_position()) if self.gui: pos = self.robot._get_scaled_position() orn = self.robot.get_orientation() pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset']) pos = np.array(pos) dist = self.tracking_camera['distance'] / self.robot.mjcf_scaling p.resetDebugVisualizerCamera(dist, self.tracking_camera['yaw'], self.tracking_camera['pitch'], pos) eye_pos, eye_quat = self.get_eye_pos_orientation() pose = [eye_pos, eye_quat] observations = self.render_observations(pose) debugmode = 0 if (debugmode): print("Camera env eye position", eye_pos) print("episode rewards", sum(self.rewards), "steps", self.nframe) episode = None if done: episode = {'r': self.reward, 'l': self.nframe} debugmode = 0 if debugmode: print("return episode:", episode) return observations, sum(self.rewards), bool(done), dict(eye_pos= eye_pos, eye_quat= eye_quat, episode= episode, x=state[0], y=state[1], yaw=state[2], height=state[3], speed=state[4])
Example #13
Source File: env_modalities.py From midlevel-reps with MIT License | 4 votes |
def _step(self, a): self.nframe += 1 if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions self.robot.apply_action(a) self.scene.global_step() self.rewards = self._rewards(a) done = self._termination() debugmode=0 if (debugmode): print("rewards=") print(self.rewards) print("sum rewards") print(sum(self.rewards)) self.reward += sum(self.rewards) self.eps_reward += sum(self.rewards) debugmode = 0 if debugmode: print("Eps frame {} reward {}".format(self.nframe, self.reward)) print("position", self.robot.get_position()) if self.gui: pos = self.robot._get_scaled_position() orn = self.robot.get_orientation() pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset']) pos = np.array(pos) dist = self.tracking_camera['distance'] / self.robot.mjcf_scaling p.resetDebugVisualizerCamera(dist, self.tracking_camera['yaw'], self.tracking_camera['pitch'], pos) eye_pos, eye_quat = self.get_eye_pos_orientation() pose = [eye_pos, eye_quat] observations = self.render_observations(pose) debugmode = 0 if (debugmode): print("Camera env eye position", eye_pos) print("episode rewards", sum(self.rewards), "steps", self.nframe) episode = None if done: episode = {'r': self.reward, 'l': self.nframe} debugmode = 0 if debugmode: print("return episode:", episode) return observations, sum(self.rewards), bool(done), dict(eye_pos=eye_pos, eye_quat=eye_quat, episode=episode)
Example #14
Source File: icub_reach_gym_env.py From pybullet-robot-envs with GNU Lesser General Public License v2.1 | 4 votes |
def __init__(self, action_repeat=1, use_IK=1, control_arm='l', control_orientation=0, obj_name=get_objects_list()[0], obj_pose_rnd_std=0, renders=False, max_steps=2000): self._time_step = 1. / 240. self._control_arm = control_arm self._use_IK = use_IK self._control_orientation = control_orientation self._action_repeat = action_repeat self._observation = [] self._hand_pose = [] self._env_step_counter = 0 self._renders = renders self._max_steps = max_steps self._last_frame_time = 0 self.terminated = 0 self._target_dist_min = 0.03 # Initialize PyBullet simulator self._p = p if self._renders: self._physics_client_id = p.connect(p.SHARED_MEMORY) if self._physics_client_id < 0: self._physics_client_id = p.connect(p.GUI) p.resetDebugVisualizerCamera(2.5, 90, -60, [0.0, -0.0, -0.0], physicsClientId=self._physics_client_id) p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0) else: self._physics_client_id = p.connect(p.DIRECT) # Load robot self._robot = iCubEnv(self._physics_client_id, use_IK=self._use_IK, control_arm=self._control_arm, control_orientation=self._control_orientation) # Load world environment self._world = WorldEnv(self._physics_client_id, obj_name=obj_name, obj_pose_rnd_std=obj_pose_rnd_std, workspace_lim=self._robot.get_workspace()) # limit iCub workspace to table plane workspace = self._robot.get_workspace() workspace[2][0] = self._world.get_table_height() self._robot.set_workspace(workspace) # Define spaces self.observation_space, self.action_space = self.create_gym_spaces() # initialize simulation environment self.seed() # self.reset()
Example #15
Source File: panda_push_gym_env.py From pybullet-robot-envs with GNU Lesser General Public License v2.1 | 4 votes |
def __init__(self, numControlledJoints=7, use_IK=0, action_repeat=1, obj_name=get_objects_list()[1], renders=False, max_steps=1000, obj_pose_rnd_std=0.0, tg_pose_rnd_std=0.0, includeVelObs=True): self._timeStep = 1. / 240. self.action_dim = [] self._use_IK = use_IK self._action_repeat = action_repeat self._observation = [] self._env_step_counter = 0 self._renders = renders self._max_steps = max_steps self.terminated = False self._target_pose = [0.0] * 3 self._target_dist_max = 0.3 self._target_dist_min = 0.1 self._tg_pose_rnd_std = tg_pose_rnd_std self.includeVelObs = includeVelObs if self._renders: self._physics_client_id = p.connect(p.SHARED_MEMORY) if self._physics_client_id < 0: self._physics_client_id = p.connect(p.GUI) p.resetDebugVisualizerCamera(2.5, 90, -60, [0.52, -0.2, -0.33], physicsClientId=self._physics_client_id) else: self._physics_client_id = p.connect(p.DIRECT) # Load robot self._robot = pandaEnv(self._physics_client_id, use_IK=self._use_IK, joint_action_space=numControlledJoints) # Load world environment self._world = WorldEnv(self._physics_client_id, obj_name=obj_name, obj_pose_rnd_std=obj_pose_rnd_std, workspace_lim=self._robot.get_workspace()) # limit robot workspace to table plane workspace = self._robot.get_workspace() workspace[2][0] = self._world.get_table_height()-0.2 self._robot.set_workspace(workspace) # Define spaces self.observation_space, self.action_space = self.create_gym_spaces() self.seed() # self.reset()
Example #16
Source File: panda_reach_gym_env.py From pybullet-robot-envs with GNU Lesser General Public License v2.1 | 4 votes |
def __init__(self, numControlledJoints=7, use_IK=0, action_repeat=1, obj_name=get_objects_list()[1], renders=False, max_steps=1000, obj_pose_rnd_std=0, includeVelObs=True): self._timeStep = 1. / 240. self.action_dim = [] self._use_IK = use_IK self._action_repeat = action_repeat self._observation = [] self._env_step_counter = 0 self._renders = renders self._max_steps = max_steps self.terminated = 0 self._target_dist_min = 0.03 self.includeVelObs = includeVelObs if self._renders: self._physics_client_id = p.connect(p.SHARED_MEMORY) if self._physics_client_id < 0: self._physics_client_id = p.connect(p.GUI) p.resetDebugVisualizerCamera(2.5, 90, -60, [0.52, -0.2, -0.33], physicsClientId=self._physics_client_id) else: self._physics_client_id = p.connect(p.DIRECT) # Load robot self._robot = pandaEnv(self._physics_client_id, use_IK=self._use_IK, joint_action_space=numControlledJoints) # Load world environment self._world = WorldEnv(self._physics_client_id, obj_name=obj_name, obj_pose_rnd_std=obj_pose_rnd_std, workspace_lim=self._robot.get_workspace()) # limit robot workspace to table plane workspace = self._robot.get_workspace() workspace[2][0] = self._world.get_table_height() self._robot.set_workspace(workspace) # Define spaces self.observation_space, self.action_space = self.create_gym_spaces() self.seed() # self.reset()