Python pybullet.getBaseVelocity() Examples

The following are 17 code examples of pybullet.getBaseVelocity(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module pybullet , or try the search function .
Example #1
Source File: unittests.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_rolling_friction(self):
        import pybullet as p
        p.connect(p.DIRECT)
        p.loadURDF("plane.urdf")
        sphere = p.loadURDF("sphere2.urdf",[0,0,1])
        p.resetBaseVelocity(sphere,linearVelocity=[1,0,0])
        p.changeDynamics(sphere,-1,linearDamping=0,angularDamping=0)
        #p.changeDynamics(sphere,-1,rollingFriction=0)
        p.setGravity(0,0,-10)
        for i in range (1000):
          p.stepSimulation()
        vel = p.getBaseVelocity(sphere)
        self.assertLess(vel[0][0],1e-10)
        self.assertLess(vel[0][1],1e-10)
        self.assertLess(vel[0][2],1e-10)
        self.assertLess(vel[1][0],1e-10)
        self.assertLess(vel[1][1],1e-10)
        self.assertLess(vel[1][2],1e-10)
        p.disconnect() 
Example #2
Source File: feeding.py    From assistive-gym with MIT License 5 votes vote down vote up
def get_food_rewards(self):
        # Check all food particles to see if they have left the spoon or entered the person's mouth
        # Give the robot a reward or penalty depending on food particle status
        food_reward = 0
        food_hit_human_reward = 0
        food_mouth_velocities = []
        foods_to_remove = []
        for f in self.foods:
            food_pos, food_orient = p.getBasePositionAndOrientation(f, physicsClientId=self.id)
            distance_to_mouth = np.linalg.norm(self.target_pos - food_pos)
            if distance_to_mouth < 0.02:
                # Delete particle and give robot a reward
                food_reward += 20
                self.task_success += 1
                food_velocity = np.linalg.norm(p.getBaseVelocity(f, physicsClientId=self.id)[0])
                food_mouth_velocities.append(food_velocity)
                foods_to_remove.append(f)
                p.resetBasePositionAndOrientation(f, self.np_random.uniform(1000, 2000, size=3), [0, 0, 0, 1], physicsClientId=self.id)
                continue
            elif food_pos[-1] < 0.5 or len(p.getContactPoints(bodyA=f, bodyB=self.table, physicsClientId=self.id)) > 0 or len(p.getContactPoints(bodyA=f, bodyB=self.bowl, physicsClientId=self.id)) > 0:
                # Delete particle and give robot a penalty for spilling food
                food_reward -= 5
                foods_to_remove.append(f)
                continue
            if len(p.getContactPoints(bodyA=f, bodyB=self.human, physicsClientId=self.id)) > 0 and f not in self.foods_hit_person:
                # Record that this food particle just hit the person, so that we can penalize the robot
                self.foods_hit_person.append(f)
                food_hit_human_reward -= 1
        self.foods = [f for f in self.foods if f not in foods_to_remove]
        return food_reward, food_mouth_velocities, food_hit_human_reward 
Example #3
Source File: balancebot_env.py    From balance-bot with MIT License 5 votes vote down vote up
def _compute_observation(self):
        cubePos, cubeOrn = p.getBasePositionAndOrientation(self.botId)
        cubeEuler = p.getEulerFromQuaternion(cubeOrn)
        linear, angular = p.getBaseVelocity(self.botId)
        return [cubeEuler[0],angular[0],self.vt] 
Example #4
Source File: ros_wrapper.py    From qibullet with Apache License 2.0 5 votes vote down vote up
def _broadcastOdometry(self, odometry_publisher):
        """
        INTERNAL METHOD, computes an odometry message based on the robot's
        position, and broadcast it

        Parameters:
            odometry_publisher - The ROS publisher for the odometry message
        """
        # Send Transform odom
        x, y, theta = self.robot.getPosition()
        odom_trans = TransformStamped()
        odom_trans.header.frame_id = "odom"
        odom_trans.child_frame_id = "base_link"
        odom_trans.header.stamp = rospy.get_rostime()
        odom_trans.transform.translation.x = x
        odom_trans.transform.translation.y = y
        odom_trans.transform.translation.z = 0
        quaternion = pybullet.getQuaternionFromEuler([0, 0, theta])
        odom_trans.transform.rotation.x = quaternion[0]
        odom_trans.transform.rotation.y = quaternion[1]
        odom_trans.transform.rotation.z = quaternion[2]
        odom_trans.transform.rotation.w = quaternion[3]
        self.transform_broadcaster.sendTransform(odom_trans)
        # Set up the odometry
        odom = Odometry()
        odom.header.stamp = rospy.get_rostime()
        odom.header.frame_id = "odom"
        odom.pose.pose.position.x = x
        odom.pose.pose.position.y = y
        odom.pose.pose.position.z = 0.0
        odom.pose.pose.orientation = odom_trans.transform.rotation
        odom.child_frame_id = "base_link"
        [vx, vy, vz], [wx, wy, wz] = pybullet.getBaseVelocity(
            self.robot.getRobotModel(),
            self.robot.getPhysicsClientId())
        odom.twist.twist.linear.x = vx
        odom.twist.twist.linear.y = vy
        odom.twist.twist.angular.z = wz
        odometry_publisher.publish(odom) 
Example #5
Source File: Motionv0Env.py    From bullet-gym with MIT License 5 votes vote down vote up
def state_fields_of_pv_of(body_id, vHelper, link_id=-1):
    if link_id == -1:
        (x,y,z), (a,b,c,d) = p.getBasePositionAndOrientation(body_id)
        (vx,vy,vz), (va,vb,vc) = p.getBaseVelocity(body_id, 0)
    else:
        (x,y,z), (a,b,c,d),_,_,_,_ = p.getLinkState(body_id, link_id)
        o = vHelper.getVelocities()
        (vx,vy,vz), (va,vb,vc) = (x-o[link_id+1][0],y-o[link_id+1][1],z-o[link_id+1][2]), (a-o[link_id+1][3],b-o[link_id+1][4],c-o[link_id+1][5])
    
    return np.array([x,y,z,a,b,c,d,vx,vy,vz,va,vb,vc]) 
Example #6
Source File: gym_mujoco_xml_env.py    From bullet-gym with MIT License 5 votes vote down vote up
def speed(self):
		if self.bodyPartIndex == -1:
			(vx, vy, vz), _ = p.getBaseVelocity(self.bodies[self.bodyIndex])
		else:
			(x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vy) = p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
		return np.array([vx, vy, vz]) 
Example #7
Source File: robot_bases.py    From midlevel-reps with MIT License 5 votes vote down vote up
def angular_speed(self):
        if self.bodyPartIndex == -1:
            _, (vr,vp,vyaw) = p.getBaseVelocity(self.bodies[self.bodyIndex])
        else:
            (x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vyaw) = p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
        return np.array([vr, vp, vyaw]) 
Example #8
Source File: robot_bases.py    From midlevel-reps with MIT License 5 votes vote down vote up
def speed(self):
        if self.bodyPartIndex == -1:
            (vx, vy, vz), _ = p.getBaseVelocity(self.bodies[self.bodyIndex])
        else:
            (x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vyaw) = p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
        return np.array([vx, vy, vz]) 
Example #9
Source File: robot_bases.py    From GtS with MIT License 5 votes vote down vote up
def angular_speed(self):
        if self.bodyPartIndex == -1:
            _, (vr,vp,vyaw) = p.getBaseVelocity(self.bodies[self.bodyIndex])
        else:
            (x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vyaw) = p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
        return np.array([vr, vp, vyaw]) 
Example #10
Source File: robot_bases.py    From GtS with MIT License 5 votes vote down vote up
def speed(self):
        if self.bodyPartIndex == -1:
            (vx, vy, vz), _ = p.getBaseVelocity(self.bodies[self.bodyIndex])
        else:
            (x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vyaw) = p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
        return np.array([vx, vy, vz]) 
Example #11
Source File: saveRestoreState.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def dumpStateToFile(file):
	for i in  range (p.getNumBodies()):
		pos,orn = p.getBasePositionAndOrientation(i)
		linVel,angVel = p.getBaseVelocity(i)
		txtPos = "pos="+str(pos)+"\n"
		txtOrn = "orn="+str(orn)+"\n"
		txtLinVel = "linVel"+str(linVel)+"\n"
		txtAngVel = "angVel"+str(angVel)+"\n"
		file.write(txtPos)
		file.write(txtOrn)
		file.write(txtLinVel)
		file.write(txtAngVel) 
Example #12
Source File: feeding.py    From assistive-gym with MIT License 5 votes vote down vote up
def step(self, action):
        self.take_step(action, robot_arm='right', gains=self.config('robot_gains'), forces=self.config('robot_forces'), human_gains=0.0005)

        robot_force_on_human, spoon_force_on_human = self.get_total_force()
        total_force_on_human = robot_force_on_human + spoon_force_on_human
        reward_food, food_mouth_velocities, food_hit_human_reward = self.get_food_rewards()
        end_effector_velocity = np.linalg.norm(p.getBaseVelocity(self.spoon, physicsClientId=self.id)[0])
        obs = self._get_obs([spoon_force_on_human], [robot_force_on_human, spoon_force_on_human])

        # Get human preferences
        preferences_score = self.human_preferences(end_effector_velocity=end_effector_velocity, total_force_on_human=robot_force_on_human, tool_force_at_target=spoon_force_on_human, food_hit_human_reward=food_hit_human_reward, food_mouth_velocities=food_mouth_velocities)

        spoon_pos, spoon_orient = p.getBasePositionAndOrientation(self.spoon, physicsClientId=self.id)
        spoon_pos = np.array(spoon_pos)

        reward_distance_mouth_target = -np.linalg.norm(self.target_pos - spoon_pos) # Penalize robot for distance between the spoon and human mouth.
        reward_action = -np.sum(np.square(action)) # Penalize actions

        reward = self.config('distance_weight')*reward_distance_mouth_target + self.config('action_weight')*reward_action + self.config('food_reward_weight')*reward_food + preferences_score

        if self.gui and reward_food != 0:
            print('Task success:', self.task_success, 'Food reward:', reward_food)

        info = {'total_force_on_human': total_force_on_human, 'task_success': int(self.task_success >= self.total_food_count*self.config('task_success_threshold')), 'action_robot_len': self.action_robot_len, 'action_human_len': self.action_human_len, 'obs_robot_len': self.obs_robot_len, 'obs_human_len': self.obs_human_len}
        done = False

        return obs, reward, done, info 
Example #13
Source File: drinking.py    From assistive-gym with MIT License 5 votes vote down vote up
def step(self, action):
        self.take_step(action, robot_arm='right', gains=self.config('robot_gains'), forces=self.config('robot_forces'), human_gains=0.0005)

        robot_force_on_human, cup_force_on_human = self.get_total_force()
        total_force_on_human = robot_force_on_human + cup_force_on_human
        reward_water, water_mouth_velocities, water_hit_human_reward = self.get_water_rewards()
        end_effector_velocity = np.linalg.norm(p.getBaseVelocity(self.cup, physicsClientId=self.id)[0])
        obs = self._get_obs([cup_force_on_human], [robot_force_on_human, cup_force_on_human])

        # Get human preferences
        preferences_score = self.human_preferences(end_effector_velocity=end_effector_velocity, total_force_on_human=robot_force_on_human, tool_force_at_target=cup_force_on_human, food_hit_human_reward=water_hit_human_reward, food_mouth_velocities=water_mouth_velocities)

        cup_pos, cup_orient = p.getBasePositionAndOrientation(self.cup, physicsClientId=self.id)
        cup_pos, cup_orient = p.multiplyTransforms(cup_pos, cup_orient, [0, 0.06, 0], p.getQuaternionFromEuler([np.pi/2.0, 0, 0], physicsClientId=self.id), physicsClientId=self.id)
        cup_top_center_pos, _ = p.multiplyTransforms(cup_pos, cup_orient, self.cup_top_center_offset, [0, 0, 0, 1], physicsClientId=self.id)
        reward_distance = -np.linalg.norm(self.target_pos - np.array(cup_top_center_pos)) # Penalize distances between top of cup and mouth
        reward_action = -np.sum(np.square(action)) # Penalize actions
        # Encourage robot to have a tilted end effector / cup
        cup_euler = p.getEulerFromQuaternion(cup_orient, physicsClientId=self.id)
        reward_tilt = -abs(cup_euler[0] + np.pi/2) if self.robot_type == 'jaco' else -abs(cup_euler[0] - np.pi/2)

        reward = self.config('distance_weight')*reward_distance + self.config('action_weight')*reward_action + self.config('cup_tilt_weight')*reward_tilt + self.config('drinking_reward_weight')*reward_water + preferences_score

        if self.gui and reward_water != 0:
            print('Task success:', self.task_success, 'Water reward:', reward_water)

        info = {'total_force_on_human': total_force_on_human, 'task_success': int(self.task_success >= self.total_water_count*self.config('task_success_threshold')), 'action_robot_len': self.action_robot_len, 'action_human_len': self.action_human_len, 'obs_robot_len': self.obs_robot_len, 'obs_human_len': self.obs_human_len}
        done = False

        return obs, reward, done, info 
Example #14
Source File: bullet_physics_engine.py    From NTP-vat-release with MIT License 5 votes vote down vote up
def get_body_angvel(body):
        _, angvel = p.getBaseVelocity(body)
        return angvel 
Example #15
Source File: bullet_physics_engine.py    From NTP-vat-release with MIT License 5 votes vote down vote up
def get_body_linvel(body):
        linvel, _ = p.getBaseVelocity(body)
        return linvel 
Example #16
Source File: saveRestoreStateTest.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def dumpStateToFile(self, file):
		for i in  range (p.getNumBodies()):
			pos,orn = p.getBasePositionAndOrientation(i)
			linVel,angVel = p.getBaseVelocity(i)
			txtPos = "pos="+str(pos)+"\n"
			txtOrn = "orn="+str(orn)+"\n"
			txtLinVel = "linVel"+str(linVel)+"\n"
			txtAngVel = "angVel"+str(angVel)+"\n"
			file.write(txtPos)
			file.write(txtOrn)
			file.write(txtLinVel)
			file.write(txtAngVel) 
Example #17
Source File: drinking.py    From assistive-gym with MIT License 4 votes vote down vote up
def get_water_rewards(self):
        # Check all water particles to see if they have entered the person's mouth or have left the scene
        # Delete such particles and give the robot a reward or penalty depending on particle status
        cup_pos, cup_orient = p.getBasePositionAndOrientation(self.cup, physicsClientId=self.id)
        cup_pos, cup_orient = p.multiplyTransforms(cup_pos, cup_orient, [0, 0.06, 0], p.getQuaternionFromEuler([np.pi/2.0, 0, 0], physicsClientId=self.id), physicsClientId=self.id)
        top_center_pos, _ = p.multiplyTransforms(cup_pos, cup_orient, self.cup_top_center_offset, [0, 0, 0, 1], physicsClientId=self.id)
        bottom_center_pos, _ = p.multiplyTransforms(cup_pos, cup_orient, self.cup_bottom_center_offset, [0, 0, 0, 1], physicsClientId=self.id)
        top_center_pos = np.array(top_center_pos)
        bottom_center_pos = np.array(bottom_center_pos)
        if self.cup_top_center is not None:
            p.resetBasePositionAndOrientation(self.cup_top_center, top_center_pos, [0, 0, 0, 1], physicsClientId=self.id)
            p.resetBasePositionAndOrientation(self.cup_bottom_center, bottom_center_pos, [0, 0, 0, 1], physicsClientId=self.id)
            p.resetBasePositionAndOrientation(self.cup_cylinder, cup_pos, cup_orient, physicsClientId=self.id)
        water_reward = 0
        water_hit_human_reward = 0
        water_mouth_velocities = []
        waters_to_remove = []
        for w in self.waters:
            water_pos, water_orient = p.getBasePositionAndOrientation(w, physicsClientId=self.id)
            if not self.util.points_in_cylinder(top_center_pos, bottom_center_pos, 0.05, np.array(water_pos)):
                distance_to_mouth = np.linalg.norm(self.target_pos - water_pos)
                if distance_to_mouth < 0.03: # hard
                # if distance_to_mouth < 0.05: # easy
                    # Delete particle and give robot a reward
                    water_reward += 10
                    self.task_success += 1
                    p.resetBasePositionAndOrientation(w, self.np_random.uniform(1000, 2000, size=3), [0, 0, 0, 1], physicsClientId=self.id)
                    water_velocity = np.linalg.norm(p.getBaseVelocity(w, physicsClientId=self.id)[0])
                    water_mouth_velocities.append(water_velocity)
                    waters_to_remove.append(w)
                    continue
                elif water_pos[-1] < 0.5:
                    # Delete particle and give robot a penalty for spilling water
                    water_reward -= 1
                    waters_to_remove.append(w)
                    continue
                if len(p.getContactPoints(bodyA=w, bodyB=self.human, physicsClientId=self.id)) > 0:
                    # Record that this water particle just hit the person, so that we can penalize the robot
                    waters_to_remove.append(w)
                    water_hit_human_reward -= 1
        self.waters = [w for w in self.waters if w not in waters_to_remove]
        return water_reward, water_mouth_velocities, water_hit_human_reward