⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cmontecarlo.h

📁 强化学习算法(R-Learning)难得的珍贵资料
💻 H
字号:
// Copyright (C) 2003
// Gerhard Neumann (gerhard@igi.tu-graz.ac.at)

//                
// This file is part of RL Toolbox.
// http://www.igi.tugraz.at/ril_toolbox
//
// All rights reserved.
// 
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
//    notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
//    notice, this list of conditions and the following disclaimer in the
//    documentation and/or other materials provided with the distribution.
// 3. The name of the author may not be used to endorse or promote products
//    derived from this software without specific prior written permission.
// 
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#ifndef C_MONTECARLO__H
#define C_MONTECARLO__H

#include "cagent.h"
#include "ctransitionfunction.h"
#include "crewardfunction.h"
#include "ril_debug.h"

class CPolicyEvaluator : public CSemiMDPRewardListener
{
protected:
	CAgent *agent;
	rlt_real policyValue;
	
	int nEpisodes;
	int nStepsPerEpisode;

	virtual rlt_real getEpisodeValue() = 0;
public:
	CPolicyEvaluator(CAgent *agent, CRewardFunction *rewardFunction, int nEpisodes, int nStepsPerEpisode);

	virtual rlt_real evaluatePolicy();
	
	virtual void nextStep(CStateCollection *oldState, CAction *action, rlt_real reward, CStateCollection *nextState) = 0;
};

class CAverageRewardCalculator : public CPolicyEvaluator
{
protected:
	int nSteps;
	rlt_real averageReward;
	rlt_real minReward;

	virtual rlt_real getEpisodeValue();
public:
	CAverageRewardCalculator(CAgent *agent, CRewardFunction *rewardFunction, int nEpisodes, int nStepsPerEpisode,rlt_real minReward = -2.0);

	virtual void nextStep(CStateCollection *oldState, CAction *action, rlt_real reward, CStateCollection *nextState);
	virtual void newEpisode();
};

class CValueCalculator : public CPolicyEvaluator
{
protected:
	int nSteps;
	rlt_real value;

	virtual rlt_real getEpisodeValue();
public:
	CValueCalculator(CAgent *agent, CRewardFunction *rewardFunction, int nEpisodes, int nStepsPerEpisode, rlt_real gamma);

	virtual void nextStep(CStateCollection *oldState, CAction *action, rlt_real reward, CStateCollection *nextState);
	virtual void newEpisode();
};

class CPolicySameStateEvaluator : public CPolicyEvaluator
{
protected:
	CStateList *startStates;
	CTransitionFunctionEnvironment *environment;

	virtual rlt_real getEpisodeValue() = 0;
public:
	CPolicySameStateEvaluator(CAgent *agent, CRewardFunction *rewardFunction, CTransitionFunctionEnvironment *environment, CStateList *startStates, int nStepsPerEpisode);

	CPolicySameStateEvaluator(CAgent *agent, CRewardFunction *rewardFunction, CTransitionFunctionEnvironment *environment, int numStartStates, int nStepsPerEpisode);

	virtual rlt_real evaluatePolicy();

	virtual void nextStep(CStateCollection *oldState, CAction *action, rlt_real reward, CStateCollection *nextState) = 0;

	virtual CStateList *getStartStates(){return startStates;};

	void getNewStartStates();
};

class CAverageRewardSameStateCalculator : public CPolicySameStateEvaluator
{
protected:
	int nSteps;
	rlt_real averageReward;
	rlt_real minReward;

	virtual rlt_real getEpisodeValue();
public:
	CAverageRewardSameStateCalculator(CAgent *agent, CRewardFunction *rewardFunction, CTransitionFunctionEnvironment *environment, CStateList *startStates, int nStepsPerEpisode, rlt_real minReward = -2.0);

	virtual void nextStep(CStateCollection *oldState, CAction *action, rlt_real reward, CStateCollection *nextState);
	virtual void newEpisode();
};

class CValueSameStateCalculator : public CPolicySameStateEvaluator
{
protected:
	int nSteps;
	rlt_real value;

	virtual rlt_real getEpisodeValue();
public:
	CValueSameStateCalculator(CAgent *agent, CRewardFunction *rewardFunction, CTransitionFunctionEnvironment *environment, CStateList *startStates, int nStepsPerEpisode, rlt_real gamma);

	virtual void nextStep(CStateCollection *oldState, CAction *action, rlt_real reward, CStateCollection *nextState);
	virtual void newEpisode();
};

class CPolicyGreedynessEvaluator : public CPolicyEvaluator
{
protected:
	CAgentController *greedyPolicy;
	CActionDataSet *actionDataSet;

	virtual rlt_real getEpisodeValue();

	int nGreedyActions;
public:
	CPolicyGreedynessEvaluator(CAgent *agent, CRewardFunction *reward, int nEpisodes, int nStepsPerEpsiode, CAgentController *l_greedyPolicy);
	~CPolicyGreedynessEvaluator();

	virtual void nextStep(CStateCollection *oldState, CAction *action, rlt_real reward, CStateCollection *nextState);
	virtual void newEpisode();

};


#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -