calcjx.c

来自「nnToolKit 神经网络工具包是基于 MATLAB 神经网络工具箱自行开发的」· C语言 代码 · 共 1,778 行 · 第 1/5 页

C
1,778
字号
        mlfError(
          mxCreateString(
            "Run-time Error: File: calcjx/reprow Line: 237 Colu"
            "mn: 1 The function \"calcjx/reprow\" was called wi"
            "th more than the declared number of inputs (2)."),
          NULL);
    }
    for (i = 0; i < 1; ++i) {
        mplhs[i] = NULL;
    }
    for (i = 0; i < 2 && i < nrhs; ++i) {
        mprhs[i] = prhs[i];
    }
    for (; i < 2; ++i) {
        mprhs[i] = NULL;
    }
    mlfEnterNewContext(0, 2, mprhs[0], mprhs[1]);
    mplhs[0] = Mcalcjx_reprow(nlhs, mprhs[0], mprhs[1]);
    mlfRestorePreviousContext(0, 2, mprhs[0], mprhs[1]);
    plhs[0] = mplhs[0];
}

/*
 * The function "mlfCalcjx_reprowint" contains the normal interface for the
 * "calcjx/reprowint" M-function from file
 * "d:\matlab6p5\toolbox\nnet\nnutils\calcjx.m" (lines 244-252). This function
 * processes any input arguments and passes them to the implementation version
 * of the function, appearing above.
 */
static mxArray * mlfCalcjx_reprowint(mxArray * m_in, mxArray * n) {
    int nargout = 1;
    mxArray * m = NULL;
    mlfEnterNewContext(0, 2, m_in, n);
    m = Mcalcjx_reprowint(nargout, m_in, n);
    mlfRestorePreviousContext(0, 2, m_in, n);
    return mlfReturnValue(m);
}

/*
 * The function "mlxCalcjx_reprowint" contains the feval interface for the
 * "calcjx/reprowint" M-function from file
 * "d:\matlab6p5\toolbox\nnet\nnutils\calcjx.m" (lines 244-252). The feval
 * function calls the implementation version of calcjx/reprowint through this
 * function. This function processes any input arguments and passes them to the
 * implementation version of the function, appearing above.
 */
static void mlxCalcjx_reprowint(int nlhs,
                                mxArray * plhs[],
                                int nrhs,
                                mxArray * prhs[]) {
    mxArray * mprhs[2];
    mxArray * mplhs[1];
    int i;
    if (nlhs > 1) {
        mlfError(
          mxCreateString(
            "Run-time Error: File: calcjx/reprowint Line: 244 Col"
            "umn: 1 The function \"calcjx/reprowint\" was called "
            "with more than the declared number of outputs (1)."),
          NULL);
    }
    if (nrhs > 2) {
        mlfError(
          mxCreateString(
            "Run-time Error: File: calcjx/reprowint Line: 244 Col"
            "umn: 1 The function \"calcjx/reprowint\" was called "
            "with more than the declared number of inputs (2)."),
          NULL);
    }
    for (i = 0; i < 1; ++i) {
        mplhs[i] = NULL;
    }
    for (i = 0; i < 2 && i < nrhs; ++i) {
        mprhs[i] = prhs[i];
    }
    for (; i < 2; ++i) {
        mprhs[i] = NULL;
    }
    mlfEnterNewContext(0, 2, mprhs[0], mprhs[1]);
    mplhs[0] = Mcalcjx_reprowint(nlhs, mprhs[0], mprhs[1]);
    mlfRestorePreviousContext(0, 2, mprhs[0], mprhs[1]);
    plhs[0] = mplhs[0];
}

/*
 * The function "Mcalcjx" is the implementation version of the "calcjx"
 * M-function from file "d:\matlab6p5\toolbox\nnet\nnutils\calcjx.m" (lines
 * 1-223). It contains the actual compiled code for that M-function. It is a
 * static function and must only be called from one of the interface functions,
 * appearing below.
 */
/*
 * function jx=calcjx(net,PD,BZ,IWZ,LWZ,N,Ac,Q,TS)
 */
static mxArray * Mcalcjx(int nargout_,
                         mxArray * net,
                         mxArray * PD,
                         mxArray * BZ,
                         mxArray * IWZ,
                         mxArray * LWZ,
                         mxArray * N,
                         mxArray * Ac,
                         mxArray * Q,
                         mxArray * TS) {
    mexLocalFunctionTable save_local_function_table_
      = mclSetCurrentLocalFunctionTable(&_local_function_table_calcjx);
    mxArray * jx = NULL;
    mxArray * biasInd = NULL;
    mxArray * layerWeightInd = NULL;
    mxArray * inputWeightInd = NULL;
    mxArray * biasLearn = NULL;
    mxArray * layerLearn = NULL;
    mxArray * inputLearn = NULL;
    mxArray * Ad = NULL;
    mxArray * sW = NULL;
    mxArray * layerWeightCols = NULL;
    mxArray * inputWeightCols = NULL;
    mxArray * ZeroDelayW = NULL;
    mxArray * k = NULL;
    mxArray * gLW = NULL;
    mxArray * gIW = NULL;
    mxArray * gB = NULL;
    mxArray * gLWZ = NULL;
    mxArray * gIWZ = NULL;
    mxArray * gBZ = NULL;
    mxArray * gN = NULL;
    mxArray * gA = NULL;
    mxArray * j = NULL;
    mxArray * ts = NULL;
    mxArray * ind = NULL;
    mxArray * siz = NULL;
    mxArray * i = NULL;
    mxArray * pos = NULL;
    mxArray * gE = NULL;
    mxArray * QNegEyes = NULL;
    mxArray * QS = NULL;
    mxArray * S = NULL;
    mxArray * LCT = NULL;
    mxArray * LCF = NULL;
    mxArray * ICF = NULL;
    mxArray * dLWF = NULL;
    mxArray * LWF = NULL;
    mxArray * dIWF = NULL;
    mxArray * IWF = NULL;
    mxArray * dNF = NULL;
    mxArray * NF = NULL;
    mxArray * dTF = NULL;
    mxArray * TF = NULL;
    mxArray * numLayerDelays = NULL;
    mclCopyArray(&net);
    mclCopyArray(&PD);
    mclCopyArray(&BZ);
    mclCopyArray(&IWZ);
    mclCopyArray(&LWZ);
    mclCopyArray(&N);
    mclCopyArray(&Ac);
    mclCopyArray(&Q);
    mclCopyArray(&TS);
    /*
     * %CALCJX Calculate weight and bias performance Jacobian as a single matrix.
     * %
     * %  Syntax
     * %
     * %    jx = calcjx(net,PD,BZ,IWZ,LWZ,N,Ac,Q,TS)
     * %
     * %  Description
     * %
     * %    This function calculates the Jacobian of a network's errors
     * %    with respect to its vector of weight and bias values X.
     * %
     * %    jX = CALCJX(NET,PD,BZ,IWZ,LWZ,N,Ac,Q,TS) takes,
     * %      NET    - Neural network.
     * %      PD     - Delayed inputs.
     * %      BZ     - Concurrent biases.
     * %      IWZ    - Weighted inputs.
     * %      LWZ    - Weighted layer outputs.
     * %      N      - Net inputs.
     * %      Ac     - Combined layer outputs.
     * %      Q      - Concurrent size.
     * %      TS     - Time steps.
     * %    and returns,
     * %      jX     - Jacobian of network errors with respect to X.
     * %
     * %  Examples
     * %
     * %    Here we create a linear network with a single input element
     * %    ranging from 0 to 1, two neurons, and a tap delay on the
     * %    input with taps at 0, 2, and 4 timesteps.  The network is
     * %    also given a recurrent connection from layer 1 to itself with
     * %    tap delays of [1 2].
     * %
     * %      net = newlin([0 1],2);
     * %      net.layerConnect(1,1) = 1;
     * %      net.layerWeights{1,1}.delays = [1 2];
     * %
     * %    Here is a single (Q = 1) input sequence P with 5 timesteps (TS = 5),
     * %    and the 4 initial input delay conditions Pi, combined inputs Pc,
     * %    and delayed inputs Pd.
     * %
     * %      P = {0 0.1 0.3 0.6 0.4};
     * %      Pi = {0.2 0.3 0.4 0.1};
     * %      Pc = [Pi P];
     * %      Pd = calcpd(net,5,1,Pc);
     * %
     * %    Here the two initial layer delay conditions for each of the
     * %    two neurons, and the layer targets for the two neurons over
     * %    five timesteps are defined.
     * %
     * %      Ai = {[0.5; 0.1] [0.6; 0.5]};
     * %      Tl = {[0.1;0.2] [0.3;0.1], [0.5;0.6] [0.8;0.9], [0.5;0.1]};
     * %
     * %    Here the network's weight and bias values are extracted, and
     * %    the network's performance and other signals are calculated.
     * %
     * %      [perf,El,Ac,N,BZ,IWZ,LWZ] = calcperf(net,X,Pd,Tl,Ai,1,5);
     * %
     * %    Finally we can use CALCJX to calculate the Jacobian.
     * %
     * %      jX = calcjx(net,Pd,BZ,IWZ,LWZ,N,Ac,1,5);
     * %
     * %  See also CALCGX, CALCJEJJ.
     * 
     * % Mark Beale, 11-31-97
     * % Mark Beale, Updated help, 5-25-98
     * % Copyright 1992-2002 The MathWorks, Inc.
     * % $Revision: 1.9 $ $Date: 2002/03/25 16:54:57 $
     * 
     * % Shortcuts
     * numLayerDelays = net.numLayerDelays;
     */
    mlfAssign(
      &numLayerDelays, mlfIndexRef(mclVa(net, "net"), ".numLayerDelays"));
    /*
     * TF = net.hint.transferFcn;
     */
    mlfAssign(&TF, mlfIndexRef(mclVa(net, "net"), ".hint.transferFcn"));
    /*
     * dTF = net.hint.dTransferFcn;
     */
    mlfAssign(&dTF, mlfIndexRef(mclVa(net, "net"), ".hint.dTransferFcn"));
    /*
     * NF = net.hint.netInputFcn;
     */
    mlfAssign(&NF, mlfIndexRef(mclVa(net, "net"), ".hint.netInputFcn"));
    /*
     * dNF = net.hint.dNetInputFcn;
     */
    mlfAssign(&dNF, mlfIndexRef(mclVa(net, "net"), ".hint.dNetInputFcn"));
    /*
     * IWF = net.hint.inputWeightFcn;
     */
    mlfAssign(&IWF, mlfIndexRef(mclVa(net, "net"), ".hint.inputWeightFcn"));
    /*
     * dIWF = net.hint.dInputWeightFcn;
     */
    mlfAssign(&dIWF, mlfIndexRef(mclVa(net, "net"), ".hint.dInputWeightFcn"));
    /*
     * LWF = net.hint.layerWeightFcn;
     */
    mlfAssign(&LWF, mlfIndexRef(mclVa(net, "net"), ".hint.layerWeightFcn"));
    /*
     * dLWF = net.hint.dLayerWeightFcn;
     */
    mlfAssign(&dLWF, mlfIndexRef(mclVa(net, "net"), ".hint.dLayerWeightFcn"));
    /*
     * ICF = net.hint.inputConnectFrom;
     */
    mlfAssign(&ICF, mlfIndexRef(mclVa(net, "net"), ".hint.inputConnectFrom"));
    /*
     * LCF = net.hint.layerConnectFrom;
     */
    mlfAssign(&LCF, mlfIndexRef(mclVa(net, "net"), ".hint.layerConnectFrom"));
    /*
     * LCT = net.hint.layerConnectTo;
     */
    mlfAssign(&LCT, mlfIndexRef(mclVa(net, "net"), ".hint.layerConnectTo"));
    /*
     * 
     * % CALCULATE ERROR SIZE
     * S = net.hint.totalTargetSize;
     */
    mlfAssign(&S, mlfIndexRef(mclVa(net, "net"), ".hint.totalTargetSize"));
    /*
     * QS = Q*S;
     */
    mlfAssign(&QS, mclMtimes(mclVa(Q, "Q"), mclVv(S, "S")));
    /*
     * 
     * % CALCULATE ERROR CONNECTIONS
     * QNegEyes =  repcol(-eye(S),Q);
     */
    mlfAssign(
      &QNegEyes,
      mlfCalcjx_repcol(mclUminus(mlfEye(mclVv(S, "S"), NULL)), mclVa(Q, "Q")));
    /*
     * gE = cell(net.numLayers,1);
     */
    mlfAssign(
      &gE,
      mlfCell(mlfIndexRef(mclVa(net, "net"), ".numLayers"), _mxarray0_, NULL));
    /*
     * pos = 0;
     */
    mlfAssign(&pos, _mxarray1_);
    /*
     * for i=net.hint.targetInd
     */
    {
        mclForLoopIterator viter__;
        for (mclForStart(
               &viter__,
               mlfIndexRef(mclVa(net, "net"), ".hint.targetInd"),
               NULL,
               NULL);
             mclForNext(&viter__, &i);
             ) {
            /*
             * siz = net.layers{i}.size;
             */
            mlfAssign(
              &siz,
              mlfIndexRef(mclVa(net, "net"), ".layers{?}.size", mclVv(i, "i")));
            /*
             * gE{i} = QNegEyes(pos+[1:siz],:);
             */
            mlfIndexAssign(
              &gE,
              "{?}",
              mclVv(i, "i"),
              mclArrayRef2(
                mclVv(QNegEyes, "QNegEyes"),
                mclPlus(
                  mclVv(pos, "pos"),
                  mlfColon(_mxarray0_, mclVv(siz, "siz"), NULL)),
                mlfCreateColonIndex()));
            /*
             * pos = pos + siz;
             */
            mlfAssign(&pos, mclPlus(mclVv(pos, "pos"), mclVv(siz, "siz")));
        /*
         * end
         */
        }
        mclDestroyForLoopIterator(viter__);
    }
    /*
     * 
     * % EXPAND SIGNALS
     * ind = floor([0:(QS-1)]/S)+1;
     */
    mlfAssign(
      &ind,
      mclPlus(
        mlfFloor(
          mclMrdivide(
            mlfColon(_mxarray1_, mclMinus(mclVv(QS, "QS"), _mxarray0_), NULL),

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?