Class sdm::MPOMDPInterface

Class List > sdm > MPOMDPInterface

The class for Discrete Markov Decision Processes.

  • #include <mpomdp_interface.hpp>

Inherits the following classes: sdm::MMDPInterface, sdm::POMDPInterface

Inherited by the following classes: sdm::MPOMDP, sdm::NetworkedDistributedPOMDPInterface, sdm::SerialMPOMDPInterface, sdm::TransformedMPOMDP

Public Functions

Type Name
virtual std::shared_ptr< Space > getObservationSpace (number agent_id, number t) const = 0
Get ths observation space of agent i at timestep t.
virtual std::shared_ptr< Space > getObservationSpace (number t) const = 0
Get ths observation space at timestep t.

Public Functions inherited from sdm::MMDPInterface

See sdm::MMDPInterface

Type Name
virtual std::shared_ptr< Space > getActionSpace (number agent_id, number t) const = 0
Get ths action space of agent i at timestep t.
virtual std::shared_ptr< Space > getActionSpace (number t) const = 0
Get ths action space at timestep t.

Public Functions inherited from sdm::MDPInterface

See sdm::MDPInterface

Type Name
virtual std::shared_ptr< Space > getActionSpace (number t) const = 0
Get ths action space at timestep t.
virtual double getDiscount (number t) const = 0
Get the discount factor at timestep t.
virtual number getHorizon () const = 0
Get the number of agents.
virtual std::shared_ptr< State > getInternalState () const = 0
virtual double getMaxReward (number t) const = 0
virtual double getMinReward (number t) const = 0
virtual number getNumAgents () const = 0
Get the number of agents.
virtual std::set< std::shared_ptr< State > > getReachableStates (const std::shared_ptr< State > & state, const std::shared_ptr< Action > & action, number t) const = 0
Get reachable states.
virtual double getReward (const std::shared_ptr< State > & state, const std::shared_ptr< Action > & action, number t) const = 0
Get the reward at timestep t when executing an action in a specific state.
virtual std::shared_ptr< Distribution< std::shared_ptr< State > > > getStartDistribution () const = 0
Get the initial distribution over states.
virtual std::shared_ptr< Space > getStateSpace (number t) const = 0
Get ths state space at timestep t.
virtual double getTransitionProbability (const std::shared_ptr< State > & state, const std::shared_ptr< Action > & action, const std::shared_ptr< State > & next_state, number t) const = 0
_Get the transition probability, i.e. p(s'
virtual void setInternalState (std::shared_ptr< State > state) = 0
virtual std::tuple< std::shared_ptr< Observation >, std::vector< double >, bool > step (std::shared_ptr< Action > action) = 0
Do a step on the environment.
virtual std::tuple< std::shared_ptr< Observation >, std::vector< double >, bool > step (std::shared_ptr< Action > action, bool increment_timestep) = 0

Public Functions inherited from sdm::GymInterface

See sdm::GymInterface

Type Name
virtual std::shared_ptr< Space > getActionSpaceAt (const std::shared_ptr< Observation > & observation, number t) = 0
Get the action space.
virtual std::shared_ptr< Action > getRandomAction (const std::shared_ptr< Observation > & observation, number t) = 0
Get random action.
virtual std::shared_ptr< Observation > reset () = 0
Reset the environment and return initial observation.
virtual std::tuple< std::shared_ptr< Observation >, std::vector< double >, bool > step (std::shared_ptr< Action > action) = 0
Do a step on the environment.

Public Functions inherited from sdm::POMDPInterface

See sdm::POMDPInterface

Type Name
virtual double getDynamics (const std::shared_ptr< State > & state, const std::shared_ptr< Action > & action, const std::shared_ptr< State > & next_state, const std::shared_ptr< Observation > & observation, number t) const = 0
_Get the dynamics, i.e. p(s', o
virtual double getObservationProbability (const std::shared_ptr< State > & state, const std::shared_ptr< Action > & action, const std::shared_ptr< State > & next_state, const std::shared_ptr< Observation > & observation, number t) const = 0
_Get the observation probability, i.e. p(o
virtual std::shared_ptr< Space > getObservationSpace (number t) const = 0
Get ths observation space at timestep t.
virtual std::set< std::shared_ptr< Observation > > getReachableObservations (const std::shared_ptr< State > & state, const std::shared_ptr< Action > & action, const std::shared_ptr< State > & next_state, number t) const = 0
Get reachable observations.

Public Functions inherited from sdm::MDPInterface

See sdm::MDPInterface

Type Name
virtual std::shared_ptr< Space > getActionSpace (number t) const = 0
Get ths action space at timestep t.
virtual double getDiscount (number t) const = 0
Get the discount factor at timestep t.
virtual number getHorizon () const = 0
Get the number of agents.
virtual std::shared_ptr< State > getInternalState () const = 0
virtual double getMaxReward (number t) const = 0
virtual double getMinReward (number t) const = 0
virtual number getNumAgents () const = 0
Get the number of agents.
virtual std::set< std::shared_ptr< State > > getReachableStates (const std::shared_ptr< State > & state, const std::shared_ptr< Action > & action, number t) const = 0
Get reachable states.
virtual double getReward (const std::shared_ptr< State > & state, const std::shared_ptr< Action > & action, number t) const = 0
Get the reward at timestep t when executing an action in a specific state.
virtual std::shared_ptr< Distribution< std::shared_ptr< State > > > getStartDistribution () const = 0
Get the initial distribution over states.
virtual std::shared_ptr< Space > getStateSpace (number t) const = 0
Get ths state space at timestep t.
virtual double getTransitionProbability (const std::shared_ptr< State > & state, const std::shared_ptr< Action > & action, const std::shared_ptr< State > & next_state, number t) const = 0
_Get the transition probability, i.e. p(s'
virtual void setInternalState (std::shared_ptr< State > state) = 0
virtual std::tuple< std::shared_ptr< Observation >, std::vector< double >, bool > step (std::shared_ptr< Action > action) = 0
Do a step on the environment.
virtual std::tuple< std::shared_ptr< Observation >, std::vector< double >, bool > step (std::shared_ptr< Action > action, bool increment_timestep) = 0

Public Functions inherited from sdm::GymInterface

See sdm::GymInterface

Type Name
virtual std::shared_ptr< Space > getActionSpaceAt (const std::shared_ptr< Observation > & observation, number t) = 0
Get the action space.
virtual std::shared_ptr< Action > getRandomAction (const std::shared_ptr< Observation > & observation, number t) = 0
Get random action.
virtual std::shared_ptr< Observation > reset () = 0
Reset the environment and return initial observation.
virtual std::tuple< std::shared_ptr< Observation >, std::vector< double >, bool > step (std::shared_ptr< Action > action) = 0
Do a step on the environment.

Public Functions Documentation

function getObservationSpace [1/2]

virtual std::shared_ptr< Space > sdm::MPOMDPInterface::getObservationSpace (
    number agent_id,
    number t
) const = 0

Parameters:

  • agent_id the identifier of the agent
  • t the timestep

Returns:

the observation space

function getObservationSpace [2/2]

virtual std::shared_ptr< Space > sdm::MPOMDPInterface::getObservationSpace (
    number t
) const = 0

Parameters:

  • t the timestep

Returns:

the observation space

Implements sdm::POMDPInterface::getObservationSpace


The documentation for this class was generated from the following file src/sdm/world/base/mpomdp_interface.hpp