⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ccontinuoustime.cpp

📁 强化学习算法(R-Learning)难得的珍贵资料
💻 CPP
📖 第 1 页 / 共 2 页
字号:
{
	delete derivationState;
	delete c;
	delete actionValues;
	delete derivationU;
	delete derivationX;
}

void CContinuousTimeAndActionSigmoidVMGradientPolicy::getNextContinuousAction(CStateCollection *state, CContinuousActionData *action)
{
	model->getDerivationU(state->getState(model->getStateProperties()), derivationU);
	dVFunction->getInputDerivation(state, derivationX);

	derivationX->multMatrix(derivationU, actionValues);

	noise->initVector(0.0);

	if (randomController && randomControllerMode == INTERN_RANDOM_CONTROLLER)
	{
		randomController->getNextContinuousAction(state, noise);
	}

	getActionValues(actionValues, noise);

	action->setVector(actionValues);
}


void CContinuousTimeAndActionSigmoidVMGradientPolicy::getActionValues(CMyVector *actionValues, CMyVector *noise)
{
	actionValues->dotVector(c);
	actionValues->multScalar(getParameter("SigmoidPolicyCFactor"));
	actionValues->addVector(noise);

	for (unsigned int i = 0; i < actionValues->getNumDimensions(); i++)
	{
		rlt_real umax = getContinuousActionProperties()->getMaxActionValue(i);
		rlt_real umin = getContinuousActionProperties()->getMinActionValue(i);
		if (actionValues->getElement(i) < - 400)
		{
			actionValues->setElement(i, -400);
		}
		rlt_real s = 1/(1 + exp(-(actionValues->getElement(i))));
		actionValues->setElement(i, umin + s * (umax - umin));
	}
}

void CContinuousTimeAndActionSigmoidVMGradientPolicy::getNoise(CStateCollection *state, CContinuousActionData *action, CContinuousActionData *l_noise)
{
	rlt_real generalC = getParameter("SigmoidPolicyCFactor");
	if (randomControllerMode == INTERN_RANDOM_CONTROLLER)
	{
		CMyVector tempVector(contAction->getNumDimensions());
		model->getDerivationU(state->getState(model->getStateProperties()), derivationU);
		dVFunction->getInputDerivation(state, derivationX);

		derivationX->multMatrix(derivationU, l_noise);
		tempVector.setVector(action);

		for (int i = 0; i < tempVector.getNumDimensions(); i ++)
		{
			rlt_real umax = getContinuousActionProperties()->getMaxActionValue(i);
			rlt_real umin = getContinuousActionProperties()->getMinActionValue(i);

			rlt_real actionValue = tempVector.getElement(i);
			actionValue = (actionValue - umin) / (umax - umin);

			actionValue = - log(1 / actionValue - 1) / generalC / c->getElement(i);

			tempVector.setElement(i, actionValue);
		}

		l_noise->multScalar(-1.0);
		l_noise->addVector(&tempVector);

	}
	else
	{
		CContinuousActionController::getNoise(state, action, l_noise);
	}
}


void CContinuousTimeAndActionSigmoidVMGradientPolicy::setC(int index, rlt_real value)
{
	c->setElement(index, value);
}

rlt_real CContinuousTimeAndActionSigmoidVMGradientPolicy::getC(int index)
{
	return c->getElement(index);
}

int CContinuousTimeAndActionSigmoidVMGradientPolicy::getNumWeights()
{
	return vFunction->getNumWeights();
}

void CContinuousTimeAndActionSigmoidVMGradientPolicy::getWeights(rlt_real *parameters)
{
	vFunction->getWeights(parameters);
}

void CContinuousTimeAndActionSigmoidVMGradientPolicy::setWeights(rlt_real *parameters)
{
	vFunction->setWeights(parameters);
}

void CContinuousTimeAndActionSigmoidVMGradientPolicy::updateWeights(CFeatureList *dParams)
{
	vFunction->updateGradient(dParams);
}

void CContinuousTimeAndActionSigmoidVMGradientPolicy::getGradient(CStateCollection *currentState, int outputDimension, CFeatureList *gradientFeatures)
{
	gradientFeatures->clear();

	model->getDerivationU(currentState->getState(model->getStateProperties()), derivationU);
	dVFunction->getInputDerivation(currentState, derivationX);

	derivationX->multMatrix(derivationU, actionValues);
	


	rlt_real prodFactor = my_exp(- actionValues->getElement(outputDimension));
	rlt_real stepSize = 0.01;
	prodFactor = prodFactor / pow(1.0 + prodFactor, 2.0); // s'(dV/dx  * df/du)

	CState *inputState = derivationState->getState(modelState);
	inputState->setState(currentState->getState(modelState));
	for (int x_i = 0; x_i < modelState->getNumContinuousStates(); x_i++)
	{
		gradient1->clear();
		gradient2->clear();
		rlt_real stepSize_i = (modelState->getMaxValue(x_i) - modelState->getMinValue(x_i)) * stepSize;
		inputState->setContinuousState(x_i, inputState->getContinuousState(x_i) + stepSize_i);
		derivationState->newModelState();
		
		vFunction->getGradient(derivationState, gradient1);

		inputState->setContinuousState(x_i, inputState->getContinuousState(x_i) - 2 * stepSize_i);
		derivationState->newModelState();
		vFunction->getGradient(derivationState, gradient2);

        inputState->setContinuousState(x_i, inputState->getContinuousState(x_i) + stepSize_i);
		
		gradient1->add(gradient2, -1.0);

		gradientFeatures->add(gradient1, prodFactor * derivationU->getElement(x_i, outputDimension) / (2 * stepSize_i)); 
	}
	
}

void CContinuousTimeAndActionSigmoidVMGradientPolicy::resetData()
{
	vFunction->resetData();
}


CContinuousTimeAndActionBangBangVMPolicy::CContinuousTimeAndActionBangBangVMPolicy(CContinuousAction *action, CVFunctionInputDerivationCalculator *vfunction, CTransitionFunction *model) : CContinuousTimeAndActionVMPolicy(action, vfunction, model)
{
	
}

void CContinuousTimeAndActionBangBangVMPolicy::getNoise(CStateCollection *state, CContinuousActionData *action, CContinuousActionData *l_noise)
{
	
	CContinuousActionController::getNoise(state, action, l_noise);
}


void CContinuousTimeAndActionBangBangVMPolicy::getActionValues(CMyVector *actionValues, CMyVector *noise)
{
	actionValues->addVector(noise);
	for (unsigned int i = 0; i < actionValues->getNumDimensions(); i++)
	{
		rlt_real umax = getContinuousActionProperties()->getMaxActionValue(i);
		rlt_real umin = getContinuousActionProperties()->getMinActionValue(i);
		if (actionValues->getElement(i) > 0)
		{
			actionValues->setElement(i, umax);
		}
		else 
		{
			actionValues->setElement(i, umin);
		}
	}
}

CContinuousActionSmoother::CContinuousActionSmoother(CContinuousAction *action, CContinuousActionController *policy, rlt_real alpha) : CContinuousActionController(action)
{
	this->policy = policy;
	this->alpha = alpha;

	this->actionValues = new rlt_real[contAction->getContinuousActionProperties()->getNumActionValues()];

	for (int i = 0; i < contAction->getNumDimensions(); i++)
	{
		actionValues[i] = 0.0;
	}
}

CContinuousActionSmoother::~CContinuousActionSmoother()
{
	delete [] actionValues;
}

void CContinuousActionSmoother::getNextContinuousAction(CStateCollection *state, CContinuousActionData *data)
{
	policy->getNextContinuousAction(state, data);

	for (int i = 0; i < contAction->getNumDimensions(); i++)
	{
		data->setElement(i, data->getElement(i) * (1 - getAlpha()) + getAlpha() * actionValues[i]);		
		actionValues[i] = data->getElement(i);
	}
}

void CContinuousActionSmoother::setAlpha(rlt_real alpha)
{
	this->alpha = alpha;
}

rlt_real CContinuousActionSmoother::getAlpha()
{
	return alpha;
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -