Python pybullet.stepSimulation() Examples
The following are 30
code examples of pybullet.stepSimulation().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pybullet
, or try the search function
.
Example #1
Source File: Motionv0Env.py From bullet-gym with MIT License | 7 votes |
def _reset(self): # reset your environment # reset state self.steps = 0 self.done = False # reset morphology p.resetBasePositionAndOrientation(self.body, self.initPosition, self.initOrientation) # reset body position and orientation resetPosition = 0 for joint in xrange(self.num_joints): if self.random_initial_position: resetPosition = np.random.random() * 2 * np.pi - np.pi p.resetJointState(self.body, joint, resetPosition) # reset joint position of joints for _ in xrange(100): p.stepSimulation() # bootstrap state by running for all repeats for i in xrange(self.repeats): self.set_state_element_for_repeat(i) # return this state return np.copy(self.state)
Example #2
Source File: minitaur_env.py From midlevel-reps with MIT License | 6 votes |
def configure(self, args): self._args = args #def _reset(self): #if self._env_randomizer is not None: # self._env_randomizer.randomize_env(self) #self._last_base_position = [0, 0, 0] #self._objectives = [] #if not self._torque_control_enabled: # for _ in range(1 / self.timestep): # if self._pd_control_enabled or self._accurate_motor_model_enabled: # self.robot.ApplyAction([math.pi / 2] * 8) # pybullet.stepSimulation() #return self._noisy_observation()
Example #3
Source File: Detached2DCartPolev0Env.py From bullet-gym with MIT License | 6 votes |
def _reset(self): # reset state self.steps = 0 self.done = False # reset pole on cart in starting poses p.resetBasePositionAndOrientation(self.cart, (0,0,0.08), (0,0,0,1)) p.resetBasePositionAndOrientation(self.pole, (0,0,0.35), (0,0,0,1)) for _ in xrange(100): p.stepSimulation() # give a fixed force push in a random direction to get things going... theta = np.multiply(np.multiply((np.random.random(), np.random.random(), 0),2) - (1,1,0),5) if self.random_theta else (1,0,0) for _ in xrange(self.initial_force_steps): p.stepSimulation() p.applyExternalForce(self.pole, -1, theta, (0, 0, 0), p.WORLD_FRAME) if self.delay > 0: time.sleep(self.delay) # bootstrap state by running for all repeats for i in xrange(self.repeats): self.set_state_element_for_repeat(i) # return this state return np.copy(self.state)
Example #4
Source File: unittests.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def test_rolling_friction(self): import pybullet as p p.connect(p.DIRECT) p.loadURDF("plane.urdf") sphere = p.loadURDF("sphere2.urdf",[0,0,1]) p.resetBaseVelocity(sphere,linearVelocity=[1,0,0]) p.changeDynamics(sphere,-1,linearDamping=0,angularDamping=0) #p.changeDynamics(sphere,-1,rollingFriction=0) p.setGravity(0,0,-10) for i in range (1000): p.stepSimulation() vel = p.getBaseVelocity(sphere) self.assertLess(vel[0][0],1e-10) self.assertLess(vel[0][1],1e-10) self.assertLess(vel[0][2],1e-10) self.assertLess(vel[1][0],1e-10) self.assertLess(vel[1][1],1e-10) self.assertLess(vel[1][2],1e-10) p.disconnect()
Example #5
Source File: kukaGymEnv.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _reset(self): #print("KukaGymEnv _reset") self.terminated = 0 p.resetSimulation() p.setPhysicsEngineParameter(numSolverIterations=150) p.setTimeStep(self._timeStep) p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1]) p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0) xpos = 0.55 +0.12*random.random() ypos = 0 +0.2*random.random() ang = 3.14*0.5+3.1415925438*random.random() orn = p.getQuaternionFromEuler([0,0,ang]) self.blockUid =p.loadURDF(os.path.join(self._urdfRoot,"block.urdf"), xpos,ypos,-0.15,orn[0],orn[1],orn[2],orn[3]) p.setGravity(0,0,-10) self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep) self._envStepCounter = 0 p.stepSimulation() self._observation = self.getExtendedObservation() return np.array(self._observation)
Example #6
Source File: MJCF2InvPendulumv0Env.py From bullet-gym with MIT License | 6 votes |
def _reset(self): # reset your environment # reset state self.steps = 0 self.done = False # reset pole on cart in starting poses self.slider.set_state(0, 0) # reset joint position of cart for _ in xrange(100): p.stepSimulation() # give a fixed force push in a random direction to get things going... theta = np.random.RandomState().uniform(low=-.1, high=.1, size=[2]) if self.random_theta else (0.0, 0.0) self.polejoint.set_state(float(theta[0]) if not self.swingup else np.pi + float(theta[0]),0) # reset joint position of pole self.pole2joint.set_state(float(theta[0]) if not self.swingup else np.pi + float(theta[1]),0) # reset joint position of pole2 self.polejoint.set_state(float(theta[0]), 0) self.pole2joint.set_state(float(theta[1]), 0) # bootstrap state by running for all repeats for i in xrange(self.repeats): self.set_state_element_for_repeat(i) # return this state return np.copy(self.state)
Example #7
Source File: cartpole_bullet.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _step(self, action): p.stepSimulation() # time.sleep(self.timeStep) self.state = p.getJointState(self.cartpole, 1)[0:2] + p.getJointState(self.cartpole, 0)[0:2] theta, theta_dot, x, x_dot = self.state dv = 0.1 deltav = [-10.*dv,-5.*dv, -2.*dv, -0.1*dv, 0, 0.1*dv, 2.*dv,5.*dv, 10.*dv][action] p.setJointMotorControl2(self.cartpole, 0, p.VELOCITY_CONTROL, targetVelocity=(deltav + self.state[3])) done = x < -self.x_threshold \ or x > self.x_threshold \ or theta < -self.theta_threshold_radians \ or theta > self.theta_threshold_radians reward = 1.0 return np.array(self.state), reward, done, {}
Example #8
Source File: kukaCamGymEnv.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def step2(self, action): for i in range(self._actionRepeat): self._kuka.applyAction(action) p.stepSimulation() if self._termination(): break #self._observation = self.getExtendedObservation() self._envStepCounter += 1 self._observation = self.getExtendedObservation() if self._renders: time.sleep(self._timeStep) #print("self._envStepCounter") #print(self._envStepCounter) done = self._termination() reward = self._reward() #print("len=%r" % len(self._observation)) return np.array(self._observation), reward, done, {}
Example #9
Source File: human_testing.py From assistive-gym with MIT License | 6 votes |
def step(self, action): yaw = 0 while True: yaw += -0.75 p.resetDebugVisualizerCamera(cameraDistance=1.2, cameraYaw=yaw, cameraPitch=-20, cameraTargetPosition=[0, 0, 1.0], physicsClientId=self.id) indices = [4, 5, 6] # indices = [14, 15, 16] deltas = [0.01, 0.01, -0.01] indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] deltas = [0, 0, 0, 0, 0.01, 0.01, -0.01, 0, 0, 0] # indices = [] # deltas = [] # indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 10 # deltas = [0, 0, 0, 0, -0.01, 0.01, -0.01, 0, 0, 0] for i, d in zip(indices, deltas): joint_position = p.getJointState(self.human, jointIndex=i, physicsClientId=self.id)[0] if joint_position + d > self.human_lower_limits[i] and joint_position + d < self.human_upper_limits[i]: p.resetJointState(self.human, jointIndex=i, targetValue=joint_position+d, targetVelocity=0, physicsClientId=self.id) p.stepSimulation(physicsClientId=self.id) print('cameraYaw=%.2f, cameraPitch=%.2f, distance=%.2f' % p.getDebugVisualizerCamera(physicsClientId=self.id)[-4:-1]) self.enforce_realistic_human_joint_limits() time.sleep(0.05) return [], None, None, None
Example #10
Source File: world_creation.py From assistive-gym with MIT License | 6 votes |
def setup_robot_joints(self, robot, robot_joint_indices, lower_limits, upper_limits, randomize_joint_positions=False, default_positions=[1, 1, 0, -1.75, 0, -1.1, -0.5], tool=None): if randomize_joint_positions: # Randomize arm joint positions # Keep trying random joint positions until the end effector is not colliding with anything retry = True while retry: for j, lower_limit, upper_limit in zip(robot_joint_indices, lower_limits, upper_limits): if lower_limit == -1e10: lower_limit = -np.pi upper_limit = np.pi joint_range = upper_limit - lower_limit p.resetJointState(robot, jointIndex=j, targetValue=self.np_random.uniform(lower_limit + joint_range/6.0, upper_limit - joint_range/6.0), targetVelocity=0, physicsClientId=self.id) p.stepSimulation(physicsClientId=self.id) retry = len(p.getContactPoints(bodyA=robot, physicsClientId=self.id)) > 0 if tool is not None: retry = retry or (len(p.getContactPoints(bodyA=tool, physicsClientId=self.id)) > 0) else: default_positions[default_positions < lower_limits] = lower_limits[default_positions < lower_limits] default_positions[default_positions > upper_limits] = upper_limits[default_positions > upper_limits] for i, j in enumerate(robot_joint_indices): p.resetJointState(robot, jointIndex=j, targetValue=default_positions[i], targetVelocity=0, physicsClientId=self.id)
Example #11
Source File: util.py From assistive-gym with MIT License | 6 votes |
def ik_jlwki(self, body, target_joint, target_pos, target_orient, world_creation, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, ik_indices=range(29, 29+7), max_iterations=100, success_threshold=0.03, half_range=False, step_sim=False, check_env_collisions=False): target_joint_positions = self.ik(body, target_joint, target_pos, target_orient, ik_indices=ik_indices, max_iterations=max_iterations, half_range=half_range) world_creation.setup_robot_joints(body, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, randomize_joint_positions=False, default_positions=np.array(target_joint_positions), tool=None) if step_sim: for _ in range(5): p.stepSimulation(physicsClientId=self.id) if len(p.getContactPoints(bodyA=body, bodyB=body, physicsClientId=self.id)) > 0: # The robot's arm is in contact with itself. return False, np.array(target_joint_positions) if check_env_collisions: for _ in range(25): p.stepSimulation(physicsClientId=self.id) gripper_pos, gripper_orient = p.getLinkState(body, target_joint, computeForwardKinematics=True, physicsClientId=self.id)[:2] if np.linalg.norm(target_pos - np.array(gripper_pos)) < success_threshold and (target_orient is None or np.linalg.norm(target_orient - np.array(gripper_orient)) < success_threshold or np.isclose(np.linalg.norm(target_orient - np.array(gripper_orient)), 2, atol=success_threshold)): return True, np.array(target_joint_positions) return False, np.array(target_joint_positions)
Example #12
Source File: util.py From assistive-gym with MIT License | 6 votes |
def ik_random_restarts(self, body, target_joint, target_pos, target_orient, world_creation, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, ik_indices=range(29, 29+7), max_iterations=1000, max_ik_random_restarts=50, random_restart_threshold=0.01, half_range=False, step_sim=False, check_env_collisions=False): orient_orig = target_orient best_ik_joints = None best_ik_distance = 0 for r in range(max_ik_random_restarts): target_joint_positions = self.ik(body, target_joint, target_pos, target_orient, ik_indices=ik_indices, max_iterations=max_iterations, half_range=half_range) world_creation.setup_robot_joints(body, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, randomize_joint_positions=False, default_positions=np.array(target_joint_positions), tool=None) if step_sim: for _ in range(5): p.stepSimulation(physicsClientId=self.id) if len(p.getContactPoints(bodyA=body, bodyB=body, physicsClientId=self.id)) > 0 and orient_orig is not None: # The robot's arm is in contact with itself. Continually randomize end effector orientation until a solution is found target_orient = p.getQuaternionFromEuler(p.getEulerFromQuaternion(orient_orig, physicsClientId=self.id) + np.deg2rad(self.np_random.uniform(-45, 45, size=3)), physicsClientId=self.id) if check_env_collisions: for _ in range(25): p.stepSimulation(physicsClientId=self.id) gripper_pos, gripper_orient = p.getLinkState(body, target_joint, computeForwardKinematics=True, physicsClientId=self.id)[:2] if np.linalg.norm(target_pos - np.array(gripper_pos)) < random_restart_threshold and (target_orient is None or np.linalg.norm(target_orient - np.array(gripper_orient)) < random_restart_threshold or np.isclose(np.linalg.norm(target_orient - np.array(gripper_orient)), 2, atol=random_restart_threshold)): return True, np.array(target_joint_positions) if best_ik_joints is None or np.linalg.norm(target_pos - np.array(gripper_pos)) < best_ik_distance: best_ik_joints = target_joint_positions best_ik_distance = np.linalg.norm(target_pos - np.array(gripper_pos)) world_creation.setup_robot_joints(body, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, randomize_joint_positions=False, default_positions=np.array(best_ik_joints), tool=None) return False, np.array(best_ik_joints)
Example #13
Source File: MJCFInvPendulumv0Env.py From bullet-gym with MIT License | 5 votes |
def _reset(self): # reset your environment # reset state self.steps = 0 self.done = False # reset pole on cart in starting poses self.slider.set_state(0, 0) # reset joint position of cart self.polejoint.set_state(0 if not self.swingup else np.pi,0) # reset joint position of pole for _ in xrange(100): p.stepSimulation() # give a fixed force push in a random direction to get things going... theta = (np.random.random()*2-1) if self.random_theta else 0.0 for _ in xrange(self.initial_force_steps): p.stepSimulation() self.polejoint.set_torque(theta) if self.delay > 0: time.sleep(self.delay) self.polejoint.disable_motor() # after having applied some initial torque, turn off motor # bootstrap state by running for all repeats for i in xrange(self.repeats): self.set_state_element_for_repeat(i) # return this state return np.copy(self.state)
Example #14
Source File: world.py From costar_plan with Apache License 2.0 | 5 votes |
def _update_environment(self): ''' Step the simulation forward after all actors have given their comments to associated simulated robots. Then update all actors' states. ''' # Loop through the given number of steps for i in xrange(self.num_steps): pb.stepSimulation() # Update the states of all actors. for actor in self.actors: actor.state = actor.getState() actor.state.t = self.ticks * self.dt
Example #15
Source File: minitaur_env.py From midlevel-reps with MIT License | 5 votes |
def _step(self, action): """Step forward the simulation, given the action. Args: action: A list of desired motor angles for eight motors. Returns: observations: The angles, velocities and torques of all motors. reward: The reward for the current state-action pair. done: Whether the episode has ended. info: A dictionary that stores diagnostic information. Raises: ValueError: The action dimension is not the same as the number of motors. ValueError: The magnitude of actions is out of bounds. """ #print("Env apply raw action", action) action = self._transform_action_to_motor_command(action) #print("Env apply action", action) #for _ in range(self._action_repeat): # self.robot.ApplyAction(action) # pybullet.stepSimulation() for i in range(len(self.Amax)): if action[i] > self.Amax[i]: self.Amax[i] = action[i] #print("Action max", self.Amax) for _ in range(self.action_repeat): state = CameraRobotEnv._step(self, action) return state
Example #16
Source File: scene_abstract.py From midlevel-reps with MIT License | 5 votes |
def step(self, frame_skip): if self._is_render: # Sleep, otherwise the computation takes less time than real time, # which will make the visualization like a fast-forward video. time_spent = time.time() - self._last_frame_time self._last_frame_time = time.time() #time_to_sleep = self.timestep * self.frame_skip - time_spent #if time_to_sleep > 0: # time.sleep(time_to_sleep) p.stepSimulation()
Example #17
Source File: scene_abstract.py From bullet-gym with MIT License | 5 votes |
def step(self, frame_skip): p.stepSimulation()
Example #18
Source File: MJCFHopperv0Env.py From bullet-gym with MIT License | 5 votes |
def _step(self, action): assert( np.isfinite(action).all() ) if self.done: print >>sys.stderr, "Why is step called when the episode is done?" return np.copy(self.state), 0, True, {} # choose your next action # do some out of bounds checks to reset the environment and agent # calculate the reward for your agent info = {} # step simulation forward. at the end of each repeat we set part of the step's # state by capture parts' states in some form. for r in xrange(self.repeats): for _ in xrange(self.steps_per_repeat): p.stepSimulation() self.thigh_joint.set_torque( 75*float(np.clip(action[0], -1, +1)) ) self.leg_joint.set_torque( 75*float(np.clip(action[1], -1, +1)) ) self.foot_joint.set_torque(75*float(np.clip(action[2],-1, +1))) if self.delay > 0: time.sleep(self.delay) self.set_state_element_for_repeat(r) self.steps += 1 # check for end of episode (by length) if self.steps >= self.max_episode_len: info['done_reason'] = 'episode length' self.done = True self.speed = self.foot.current_position()[0]-self.last_position self.last_position = self.foot.current_position()[0] self.rewards = [self.speed] # return observation return self.state, self.speed, self.done, info
Example #19
Source File: MJCFReacherv0Env.py From bullet-gym with MIT License | 5 votes |
def _step(self, action): assert( np.isfinite(action).all() ) if self.done: print >>sys.stderr, "Why is step called when the episode is done?" return np.copy(self.state), 0, True, {} # choose your next action # do some out of bounds checks to reset the environment and agent # calculate the reward for your agent info = {} # step simulation forward. at the end of each repeat we set part of the step's # state by capture parts' states in some form. for r in xrange(self.repeats): for _ in xrange(self.steps_per_repeat): p.stepSimulation() self.central_joint.set_torque( 0.07*float(np.clip(action[0], -1, +1)) ) self.elbow_joint.set_torque( 0.07*float(np.clip(action[1], -1, +1)) ) if self.delay > 0: time.sleep(self.delay) self.set_state_element_for_repeat(r) self.steps += 1 # check for end of episode (by length) if self.steps >= self.max_episode_len: info['done_reason'] = 'episode length' self.done = True self.to_target_vec = np.array(self.fingertip.current_position()) - np.array(self.target.current_position()) reward_dist = - np.linalg.norm(self.to_target_vec) reward_ctrl = - np.square(action).sum() reward_rotation = - np.square(self.theta_dot*self.dt) - np.square(self.gamma_dot*self.dt) self.rewards = [reward_dist, reward_ctrl, reward_rotation] # return observation return self.state, reward_dist + reward_ctrl + reward_rotation, self.done, info
Example #20
Source File: balancebot_env.py From balance-bot with MIT License | 5 votes |
def _step(self, action): self._assign_throttle(action) p.stepSimulation() self._observation = self._compute_observation() reward = self._compute_reward() done = self._compute_done() self._envStepCounter += 1 return np.array(self._observation), reward, done, {}
Example #21
Source File: motorController.py From bipedal-robot-walking-simulation with MIT License | 5 votes |
def setMotorsAngleInFixedTimestep(self, motorTargetAngles, motorTargetTime, delayTime): if (motorTargetTime == 0): self._joint_targetPos = np.array(motorTargetAngles) for i in range(self._joint_number): p.setJointMotorControl2( bodyIndex=self._robot, jointIndex=self._joint_id[i], controlMode=p.POSITION_CONTROL, targetPosition=self._joint_targetPos[i], positionGain=self._kp, velocityGain=self._kd, force=self._torque, maxVelocity=self._max_velocity, physicsClientId=self._physicsClientId) p.stepSimulation(physicsClientId=self._physicsClientId) else: self._joint_currentPos = self._joint_targetPos self._joint_targetPos = np.array(motorTargetAngles) for i in range(self._joint_number): dydt = (self._joint_targetPos - self._joint_currentPos) / motorTargetTime internalTime = 0.0 while internalTime < motorTargetTime: internalTime += self._timeStep for i in range(self._joint_number): p.setJointMotorControl2( bodyIndex=self._robot, jointIndex=self._joint_id[i], controlMode=p.POSITION_CONTROL, targetPosition=self._joint_currentPos[i] + dydt[i] * internalTime, positionGain=self._kp, velocityGain=self._kd, force=self._torque, maxVelocity=self._max_velocity, physicsClientId=self._physicsClientId) p.stepSimulation(physicsClientId=self._physicsClientId) if delayTime != 0: for _ in range(int(delayTime / self._timeStep)): p.stepSimulation(physicsClientId=self._physicsClientId)
Example #22
Source File: motorController.py From Boston_Dynamics_Atlas_Explained with MIT License | 5 votes |
def setMotorsAngleInFixedTimestep(self, motorTargetAngles, motorTargetTime, delayTime): if(motorTargetTime == 0): self._joint_targetPos = np.array(motorTargetAngles) for i in range(self._joint_number): p.setJointMotorControl2(bodyIndex=self._robot, jointIndex=self._joint_id[i], controlMode=p.POSITION_CONTROL, targetPosition=self._joint_targetPos[i], positionGain=self._kp, velocityGain=self._kd, force=self._torque, maxVelocity=self._max_velocity, physicsClientId=self._physicsClientId) p.stepSimulation(physicsClientId=self._physicsClientId) time.sleep(self._timeStep, physicsClientId=self._physicsClientId) else: self._joint_currentPos = self._joint_targetPos self._joint_targetPos = np.array(motorTargetAngles) for i in range(self._joint_number): dydt = (self._joint_targetPos-self._joint_currentPos)/motorTargetTime internalTime = 0.0 while internalTime < motorTargetTime: internalTime += self._timeStep for i in range(self._joint_number): p.setJointMotorControl2(bodyIndex=self._robot, jointIndex=self._joint_id[i], controlMode=p.POSITION_CONTROL, targetPosition=self._joint_currentPos[i] + dydt[i] * internalTime, positionGain=self._kp, velocityGain=self._kd, force=self._torque, maxVelocity=self._max_velocity, physicsClientId=self._physicsClientId) p.stepSimulation(physicsClientId=self._physicsClientId) if delayTime != 0: for _ in range(int(delayTime/self._timeStep)): p.stepSimulation(physicsClientId=self._physicsClientId)
Example #23
Source File: TemplateEnv.py From bullet-gym with MIT License | 5 votes |
def _reset(self): pass # reset your environment # # reset state # self.steps = 0 # self.done = False # # # reset pole on cart in starting poses # p.resetBasePositionAndOrientation(self.cartpole, (0,0,0.08), (0,0,0,1)) # reset cart position and orientation # p.resetJointState(self.cartpole, 0, 0) # reset joint position of pole # for _ in xrange(100): p.stepSimulation() # # # give a fixed force push in a random direction to get things going... # theta = (np.random.random()*2-1) if self.random_theta else 0.0 # for _ in xrange(self.initial_force_steps): # p.stepSimulation() # p.applyExternalForce(self.cartpole, 0, (theta, 0 , 0), (0, 0, 0), p.WORLD_FRAME) # if self.delay > 0: # time.sleep(self.delay) # # # bootstrap state by running for all repeats # for i in xrange(self.repeats): # self.set_state_element_for_repeat(i) # # # return this state # return np.copy(self.state)
Example #24
Source File: simulation_manager.py From qibullet with Apache License 2.0 | 5 votes |
def _stepSimulation(self, physics_client): """ INTERNAL METHOD: This method is only used for a simulation in DIRECT mode (without the gui). Parameters: physics_client - The id of the simulated instance to be stepped """ try: initial_time = time.time() while True: pybullet.stepSimulation(physicsClientId=physics_client) time.sleep(1./10000.) except Exception: pass
Example #25
Source File: pybullet_robot_interface.py From scikit-robot with MIT License | 5 votes |
def main(): # initialize robot robot = skrobot.models.Kuka() interface = skrobot.interfaces.PybulletRobotInterface(robot) pybullet.resetDebugVisualizerCamera( cameraDistance=1.5, cameraYaw=45, cameraPitch=-45, cameraTargetPosition=(0, 0, 0.5), ) print('==> Initialized Kuka Robot on PyBullet') for _ in range(100): pybullet.stepSimulation() time.sleep(3) # reset pose print('==> Moving to Reset Pose') robot.reset_manip_pose() interface.angle_vector(robot.angle_vector(), realtime_simulation=True) interface.wait_interpolation() time.sleep(3) # ik print('==> Solving Inverse Kinematics') target_coords = skrobot.coordinates.Coordinates( pos=[0.5, 0, 0] ).rotate(np.pi / 2.0, 'y', 'local') skrobot.interfaces.pybullet.draw(target_coords) robot.inverse_kinematics( target_coords, link_list=robot.rarm.link_list, move_target=robot.rarm_end_coords, rotation_axis=True, stop=1000, ) interface.angle_vector(robot.angle_vector(), realtime_simulation=True) interface.wait_interpolation() # wait while pybullet.isConnected(): time.sleep(0.01)
Example #26
Source File: kuka_button_gym_env.py From robotics-rl-srl with MIT License | 5 votes |
def step2(self, action): """ :param action:([float]) """ # Apply force to the button p.setJointMotorControl2(self.button_uid, BUTTON_GLIDER_IDX, controlMode=p.POSITION_CONTROL, targetPosition=0.1) for i in range(self._action_repeat): self._kuka.applyAction(action) p.stepSimulation() if self._termination(): break self._env_step_counter += 1 self._observation = self.getExtendedObservation() if self._renders: time.sleep(self._timestep) reward = self._reward() done = self._termination() if self.saver is not None: self.saver.step(self._observation, self.action, reward, done, self.getGroundTruth()) if self.srl_model != "raw_pixels": return self.getSRLState(self._observation), reward, done, {} return np.array(self._observation), reward, done, {}
Example #27
Source File: minitaur_evaluate.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def evaluate_params(evaluateFunc, params, objectiveParams, urdfRoot='', timeStep=0.01, maxNumSteps=10000, sleepTime=0): print('start evaluation') beforeTime = time.time() p.resetSimulation() p.setTimeStep(timeStep) p.loadURDF("%s/plane.urdf" % urdfRoot) p.setGravity(0,0,-10) global minitaur minitaur = Minitaur(urdfRoot) start_position = current_position() last_position = None # for tracing line total_energy = 0 for i in range(maxNumSteps): torques = minitaur.getMotorTorques() velocities = minitaur.getMotorVelocities() total_energy += np.dot(np.fabs(torques), np.fabs(velocities)) * timeStep joint_values = evaluate_func_map[evaluateFunc](i, params) minitaur.applyAction(joint_values) p.stepSimulation() if (is_fallen()): break if i % 100 == 0: sys.stdout.write('.') sys.stdout.flush() time.sleep(sleepTime) print(' ') alpha = objectiveParams[0] final_distance = np.linalg.norm(start_position - current_position()) finalReturn = final_distance - alpha * total_energy elapsedTime = time.time() - beforeTime print ("trial for ", params, " final_distance", final_distance, "total_energy", total_energy, "finalReturn", finalReturn, "elapsed_time", elapsedTime) return finalReturn
Example #28
Source File: saveRestoreStateTest.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def setupWorld(self): numObjects = 50 maximalCoordinates = False p.resetSimulation() p.setPhysicsEngineParameter(deterministicOverlappingPairs=1) p.loadURDF("planeMesh.urdf",useMaximalCoordinates=maximalCoordinates) kukaId = p.loadURDF("kuka_iiwa/model_free_base.urdf",[0,0,10], useMaximalCoordinates=maximalCoordinates) for i in range (p.getNumJoints(kukaId)): p.setJointMotorControl2(kukaId,i,p.POSITION_CONTROL,force=0) for i in range (numObjects): cube = p.loadURDF("cube_small.urdf",[0,i*0.02,(i+1)*0.2]) #p.changeDynamics(cube,-1,mass=100) p.stepSimulation() p.setGravity(0,0,-10)
Example #29
Source File: testMJCF.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def test(args): p.connect(p.GUI) p.setAdditionalSearchPath(pybullet_data.getDataPath()) fileName = os.path.join("mjcf", args.mjcf) print("fileName") print(fileName) p.loadMJCF(fileName) while (1): p.stepSimulation() p.getCameraImage(320,240) time.sleep(0.01)
Example #30
Source File: kukaGymEnv.py From soccer-matlab with BSD 2-Clause "Simplified" License | 5 votes |
def step2(self, action): for i in range(self._actionRepeat): self._kuka.applyAction(action) p.stepSimulation() if self._termination(): break self._envStepCounter += 1 if self._renders: time.sleep(self._timeStep) self._observation = self.getExtendedObservation() #print("self._envStepCounter") #print(self._envStepCounter) done = self._termination() npaction = np.array([action[3]]) #only penalize rotation until learning works well [action[0],action[1],action[3]]) actionCost = np.linalg.norm(npaction)*10. #print("actionCost") #print(actionCost) reward = self._reward()-actionCost #print("reward") #print(reward) #print("len=%r" % len(self._observation)) return np.array(self._observation), reward, done, {}