burlap.mdp.core.action.UniversalActionType Java Examples
The following examples show how to use
burlap.mdp.core.action.UniversalActionType.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ExampleGridWorld.java From burlap_examples with MIT License | 6 votes |
@Override public SADomain generateDomain() { SADomain domain = new SADomain(); domain.addActionTypes( new UniversalActionType(ACTION_NORTH), new UniversalActionType(ACTION_SOUTH), new UniversalActionType(ACTION_EAST), new UniversalActionType(ACTION_WEST)); GridWorldStateModel smodel = new GridWorldStateModel(); RewardFunction rf = new ExampleRF(this.goalx, this.goaly); TerminalFunction tf = new ExampleTF(this.goalx, this.goaly); domain.setModel(new FactoredModel(smodel, rf, tf)); return domain; }
Example #2
Source File: SingleStageNormalFormGame.java From burlap with Apache License 2.0 | 5 votes |
@Override public Domain generateDomain() { SGDomain domain = new SGDomain(); for(int i = 0; i < this.actionNameToIndex.length; i++){ for(Map.Entry<String, Integer> as : this.actionNameToIndex[i].namesToInd.entrySet()){ domain.addActionType(new UniversalActionType(i + "_" + as.getKey(), new MatrixAction(as.getKey(), as.getValue()))); } } domain.setJointActionModel(new StaticRepeatedGameModel()); return domain; }
Example #3
Source File: SingleStageNormalFormGame.java From burlap with Apache License 2.0 | 5 votes |
public SGAgentType generateAgentType(int player){ List<ActionType> actions = new ArrayList<ActionType>(); for(Map.Entry<String, Integer> e : actionNameToIndex[player].namesToInd.entrySet()){ actions.add(new UniversalActionType(player + e.getKey(), new MatrixAction(e.getKey(), e.getValue()))); } SGAgentType type = new SGAgentType("player" + player, actions); return type; }
Example #4
Source File: InvertedPendulum.java From burlap with Apache License 2.0 | 5 votes |
@Override public SADomain generateDomain() { SADomain domain = new SADomain(); IPPhysicsParams cphys = this.physParams.copy(); IPModel smodel = new IPModel(cphys); RewardFunction rf = this.rf; TerminalFunction tf = this.tf; if(rf == null){ rf = new InvertedPendulumRewardFunction(); } if(tf == null){ tf = new InvertedPendulumTerminalFunction(); } FactoredModel model = new FactoredModel(smodel, rf ,tf); domain.setModel(model); domain.addActionType(new UniversalActionType(ACTION_LEFT)) .addActionType(new UniversalActionType(ACTION_RIGHT)) .addActionType(new UniversalActionType(ACTION_NO_FORCE)); return domain; }
Example #5
Source File: ExampleOOGridWorld.java From burlap_examples with MIT License | 4 votes |
@Override public OOSADomain generateDomain() { OOSADomain domain = new OOSADomain(); domain.addStateClass(CLASS_AGENT, ExGridAgent.class) .addStateClass(CLASS_LOCATION, EXGridLocation.class); domain.addActionTypes( new UniversalActionType(ACTION_NORTH), new UniversalActionType(ACTION_SOUTH), new UniversalActionType(ACTION_EAST), new UniversalActionType(ACTION_WEST)); OODomain.Helper.addPfsToDomain(domain, this.generatePfs()); OOGridWorldStateModel smodel = new OOGridWorldStateModel(); RewardFunction rf = new SingleGoalPFRF(domain.propFunction(PF_AT), 100, -1); TerminalFunction tf = new SinglePFTF(domain.propFunction(PF_AT)); domain.setModel(new FactoredModel(smodel, rf, tf)); return domain; }
Example #6
Source File: GridGame.java From burlap with Apache License 2.0 | 4 votes |
@Override public OOSGDomain generateDomain() { OOSGDomain domain = new OOSGDomain(); domain.addStateClass(CLASS_AGENT, GGAgent.class) .addStateClass(CLASS_GOAL, GGGoal.class) .addStateClass(CLASS_DIM_H_WALL, GGWall.GGHorizontalWall.class) .addStateClass(CLASS_DIM_V_WALL, GGWall.GGVerticalWall.class); domain.addActionType(new UniversalActionType(ACTION_NORTH)) .addActionType(new UniversalActionType(ACTION_SOUTH)) .addActionType(new UniversalActionType(ACTION_EAST)) .addActionType(new UniversalActionType(ACTION_WEST)) .addActionType(new UniversalActionType(ACTION_NOOP)); OODomain.Helper.addPfsToDomain(domain, this.generatePFs()); domain.setJointActionModel(new GridGameStandardMechanics(domain, this.semiWallProb)); return domain; }
Example #7
Source File: MountainCar.java From burlap with Apache License 2.0 | 4 votes |
@Override public SADomain generateDomain() { SADomain domain = new SADomain(); MCModel smodel = new MCModel(this.physParams.copy()); if(tf == null){ tf = new ClassicMCTF(physParams.xmax); } if(rf == null){ rf = new GoalBasedRF(tf, 100, 0); } FactoredModel model = new FactoredModel(smodel, rf, tf); domain.setModel(model); domain.addActionType(new UniversalActionType(ACTION_FORWARD)) .addActionType(new UniversalActionType(ACTION_BACKWARDS)) .addActionType(new UniversalActionType(ACTION_COAST)); return domain; }
Example #8
Source File: LunarLanderDomain.java From burlap with Apache License 2.0 | 4 votes |
@Override public OOSADomain generateDomain() { OOSADomain domain = new OOSADomain(); List <Double> thrustValuesTemp = this.thrustValues; if(thrustValuesTemp.isEmpty()){ thrustValuesTemp.add(0.32); thrustValuesTemp.add(-physParams.gravity); } domain.addStateClass(CLASS_AGENT, LLAgent.class) .addStateClass(CLASS_PAD, LLBlock.LLPad.class) .addStateClass(CLASS_OBSTACLE, LLBlock.LLObstacle.class); //make copy of physics parameters LLPhysicsParams cphys = this.physParams.copy(); //add actions domain.addActionType(new UniversalActionType(ACTION_TURN_LEFT)) .addActionType(new UniversalActionType(ACTION_TURN_RIGHT)) .addActionType(new UniversalActionType(ACTION_IDLE)) .addActionType(new ThrustType(thrustValues)); OODomain.Helper.addPfsToDomain(domain, this.generatePfs()); LunarLanderModel smodel = new LunarLanderModel(cphys); RewardFunction rf = this.rf; TerminalFunction tf = this.tf; if(rf == null){ rf = new LunarLanderRF(domain); } if(tf == null){ tf = new LunarLanderTF(domain); } FactoredModel model = new FactoredModel(smodel, rf, tf); domain.setModel(model); return domain; }
Example #9
Source File: FrostbiteDomain.java From burlap with Apache License 2.0 | 4 votes |
/** * Creates a new frostbite domain. * * @return the generated domain object */ @Override public OOSADomain generateDomain() { OOSADomain domain = new OOSADomain(); domain.addStateClass(CLASS_AGENT, FrostbiteAgent.class) .addStateClass(CLASS_IGLOO, FrostbiteIgloo.class) .addStateClass(CLASS_PLATFORM, FrostbitePlatform.class); //add actions domain.addActionType(new UniversalActionType(ACTION_NORTH)) .addActionType(new UniversalActionType(ACTION_SOUTH)) .addActionType(new UniversalActionType(ACTION_EAST)) .addActionType(new UniversalActionType(ACTION_WEST)) .addActionType(new UniversalActionType(ACTION_IDLE)); //add pfs List<PropositionalFunction> pfs = this.generatePFs(); for(PropositionalFunction pf : pfs){ domain.addPropFunction(pf); } FrostbiteModel smodel = new FrostbiteModel(scale); RewardFunction rf = this.rf; TerminalFunction tf = this.tf; if(rf == null){ rf = new FrostbiteRF(domain); } if(tf == null){ tf = new FrostbiteTF(domain); } FactoredModel model = new FactoredModel(smodel, rf, tf); domain.setModel(model); return domain; }
Example #10
Source File: CartPoleDomain.java From burlap with Apache License 2.0 | 3 votes |
@Override public SADomain generateDomain() { SADomain domain = new SADomain(); CPPhysicsParams cphys = this.physParams.copy(); RewardFunction rf = this.rf; TerminalFunction tf = this.tf; if(rf == null){ rf = new CartPoleRewardFunction(); } if(tf == null){ tf = new CartPoleTerminalFunction(); } FullStateModel smodel = cphys.useCorrectModel ? new CPClassicModel(cphys) : new CPClassicModel(cphys); FactoredModel model = new FactoredModel(smodel, rf, tf); domain.setModel(model); domain.addActionType(new UniversalActionType(ACTION_LEFT)) .addActionType(new UniversalActionType(ACTION_RIGHT)); return domain; }
Example #11
Source File: TigerDomain.java From burlap with Apache License 2.0 | 3 votes |
@Override public Domain generateDomain() { PODomain domain = new PODomain(); domain.addActionType(new UniversalActionType(ACTION_LEFT)) .addActionType(new UniversalActionType(ACTION_RIGHT)) .addActionType(new UniversalActionType(ACTION_LISTEN)); if(this.includeDoNothing){ domain.addActionType(new UniversalActionType(ACTION_DO_NOTHING)); } ObservationFunction of = new TigerObservations(this.listenAccuracy, this.includeDoNothing); domain.setObservationFunction(of); TigerModel model = new TigerModel(correctDoorReward, wrongDoorReward, listenReward, nothingReward); domain.setModel(model); StateEnumerator senum = new StateEnumerator(domain, new SimpleHashableStateFactory()); senum.getEnumeratedID(new TigerState(VAL_LEFT)); senum.getEnumeratedID(new TigerState(VAL_RIGHT)); domain.setStateEnumerator(senum); return domain; }