📄 stochasticadaptiverungekutta.cpp
字号:
#include "StochasticAdaptiveRungeKutta.hpp"#include "boost/numeric/ublas/operation.hpp"#include <assert.h>#include <gsl/gsl_errno.h>using namespace indii::ml::sde;namespace aux = indii::ml::aux;namespace ublas = boost::numeric::ublas;/* embedded Runge-Kutta Prince-Dormand (8,9) method */const gsl_odeiv_step_type* StochasticAdaptiveRungeKutta::gslStepType = gsl_odeiv_step_rk8pd;StochasticAdaptiveRungeKutta::StochasticAdaptiveRungeKutta( StochasticDifferentialModel* model) : NumericalSolver(model->getDimensions()), model(model), W(model->getDimensions()), deltaW(model->getDimensions()) { setForwardFunction(gslForwardFunction); setBackwardFunction(gslBackwardFunction); setStepType(gslStepType);}StochasticAdaptiveRungeKutta::StochasticAdaptiveRungeKutta( StochasticDifferentialModel* model, const aux::vector& y0) : NumericalSolver(y0), model(model), W(model->getDimensions()), deltaW(model->getDimensions()) { /* pre-condition */ assert (y0.size() == model->getDimensions()); setForwardFunction(gslForwardFunction); setBackwardFunction(gslBackwardFunction); setStepType(gslStepType);}StochasticAdaptiveRungeKutta::~StochasticAdaptiveRungeKutta() { //}double StochasticAdaptiveRungeKutta::step(double upper) { /* pre-condition */ assert (upper > t); double h = stepSize; int err; /* make sure step size won't exceed upper bound */ if (t + h > upper) { h = upper - t; } /* solve */ do { /* sample Wiener process for time step */ sampleNoise(t + h); /* take proposed step */ err = gsl_odeiv_step_apply(gslStep, t, h, y, gslEvolve->yerr, gslEvolve->dydt_in, gslEvolve->dydt_out, &gslForwardSystem); assert (err == GSL_SUCCESS); stepSize = h; /* adjust step size */ err = gsl_odeiv_control_hadjust(gslControl, gslStep, y, gslEvolve->yerr, gslEvolve->dydt_out, &h); } while (err == GSL_ODEIV_HADJ_DEC); // repeat while step size too large /* update time */ t += stepSize; /* update proposed step size */ stepSize = h; /* update derivatives */ double* tmp = gslEvolve->dydt_in; gslEvolve->dydt_in = gslEvolve->dydt_out; gslEvolve->dydt_out = tmp; /* post-condition */ assert (t <= upper); return t; }double StochasticAdaptiveRungeKutta::stepBack(double lower) { double t = NumericalSolver::stepBack(lower); return t;}void StochasticAdaptiveRungeKutta::reset() { NumericalSolver::reset(); noisePath.clear();}int StochasticAdaptiveRungeKutta::gslForwardFunction(double ts, const double y[], double dydt[], void *params) { StochasticAdaptiveRungeKutta* m = (StochasticAdaptiveRungeKutta*)params; unsigned int i; const unsigned int N = m->model->getDimensions(); const unsigned int W = m->model->getNoiseDimensions(); const double t = m->getTime(); const double delta = ts - t; aux::vector x(N); aux::arrayToVector(y, x); aux::vector a(m->model->calculateDrift(ts, x)); assert (a.size() == N); aux::matrix B(m->model->calculateDiffusion(ts, x)); assert (B.size1() == N && B.size2() == W); std::vector<aux::matrix> dBdy; dBdy = m->model->calculateDiffusionDerivatives(ts, x); assert (dBdy.size() == 0 || dBdy.size() == N); #ifdef NDEBUG if (dBdy.size() == N) { unsigned int j; for (j = 0; j < N; j++) { assert (dBdy[i].size1() == N && dBdy[i].size2() == W); } } #endif /* first term */ aux::vector t1(a); /* second term */ aux::vector t2(N); if (dBdy.size() > 0) { t2.clear(); for (i = 0; i < N; i++) { ublas::axpy_prod(dBdy[i], row(B,i), t2); } } /* third term */ aux::vector t3(prod(B, m->deltaW)); aux::vector dxdt(N); if (dBdy.size() > 0) { noalias(dxdt) = t1 - t2 / 2.0 + t3 / delta; } else { noalias(dxdt) = t1 + t3 / delta; } aux::vectorToArray(dxdt, dydt); return GSL_SUCCESS;}int StochasticAdaptiveRungeKutta::gslBackwardFunction(double t, const double y[], double dydt[], void *params) { StochasticAdaptiveRungeKutta* m = (StochasticAdaptiveRungeKutta*)params; /* convert the proposed step to a future time into a proposed step to a past time */ int result = gslForwardFunction(2.0 * m->base - t, y, dydt, params); if (result == GSL_SUCCESS) { /* as we're moving backward in time, negate gradients */ size_t i; for (i = 0; i < m->dimensions; i++) { dydt[i] *= -1.0; } } return result;}aux::vector StochasticAdaptiveRungeKutta::sampleNoise(const double ts) { /* pre-condition */ assert (ts > t); double t = getTime(); double tl, tu; std::map<double,aux::vector>::iterator iter, lower, upper; /* determine tl and tu bounds */ iter = noisePath.upper_bound(ts); upper = iter; if (upper == noisePath.begin()) { lower = noisePath.end(); } else { iter--; lower = iter; } const bool haveLower = lower != noisePath.end(); const bool haveUpper = upper != noisePath.end(); const unsigned int N = model->getNoiseDimensions(); aux::vector dWtl(N), dWts(N), dWtu(N), delta(N); if (haveLower) { tl = lower->first; noalias(dWtl) = lower->second; } else { tl = t; } if (haveUpper) { tu = upper->first; noalias(dWtu) = upper->second; } /* sample, conditioned on bounds */ if (tl == ts) { noalias(dWts) = dWtl; // sampled on previous step } else { noalias(delta) = W.sample(ts - tl); if (!haveUpper) { noalias(dWts) = delta; } else { noalias(dWts) = (ts-tl)/(tu-tl) * dWtu + delta; dWtu = (tu-ts)/(tu-tl) * dWtu - delta; //assert (norm_2(upper->second - dWts - dWtu) == 0.0); // sanity check } } /* store path */ if (ts != tl) { noisePath.insert(lower, make_pair(ts, dWts)); if (haveUpper) { upper->second = dWtu; } } deltaW = dWts; return dWts;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -