📄 corr_05.cc
字号:
rr = 0.0; j_start = i + 1; for (j = j_start; j <= mod; j++) { kr = j - im1; for (k = j; k <= nterm; k += mod) { kl = kr; kr = k + im1; rr += input_a(k - 1) * (input_a(kl - 1) + input_a(kr - 1)); } } // check if we are done // if (kr != n) { // compute new start/stop indices // if ((n - kr) > (long)i) { kstrt = n - mod + 1; kstop = kr; } else { kstrt = kl + i + 1; kstop = nterm; } // adjust rr // for (long k = kstrt - 1; k < kstop; k++) { kk = k + im1; rr += input_a(k) * input_a(kk); } } // save output value // output_a(i) = rr; } } // normalize: // case: LENGTH // divide by the number of terms used to compute each lag (note for // the tapered algorithm, this depends on the lag // if (normalization_d == LENGTH) { if (N == 0) { return Error::handle(name(), L"computeAutoFactoredFromSignal", Error::ARG, __FILE__, __LINE__); } if (algorithm_d == AUTO) { status = output_a.mult(1.0 / ((float)N - (float)order)); } else if (algorithm_d == AUTO_TAPERED) { for (long k = 0; k < order; k++) { output_a(k) /= N - k; } status = true; } } // normalize: // case: UNIT_ENERGY // divide by the zeroth order lag // else if (normalization_d == UNIT_ENERGY) { if (output_a(0) == (float)0) { return Error::handle(name(), L"computeAutoFactoredFromSignal", Error::ARG, __FILE__, __LINE__); } output_a.mult(1.0 / output_a(0)); } // exit gracefully // return status; }// method: computeAutoUnfactoredFromSignal//// arguments:// VectorFloat& output: (output) Correlation coefficients// const VectorFloat& input: (input) input data//// return: a boolean value indicating status//// this method compute the Correlation function using an unfactored// computation (for sampled data). see://// J.D. Markel and A.H. Gray, Jr.,// Linear Prediction of Speech, Springer-Verlag Berlin Heidelberg,// New York, New York, USA, pp. 14, 1976.//// see equation 1.23b.//boolean Correlation::computeAutoUnfactoredFromSignal(VectorFloat& output_a, const VectorFloat& input_a) { // declare local variables // boolean status = true; long order = order_d; long last_index = input_a.length(); // check order // if (order_d < (long)1) { return Error::handle(name(), L"computeAutoUnfactoredFromSignal", ERR_ORDER, __FILE__, __LINE__); } else if (last_index < (order + 1)) { return Error::handle(name(), L"computeAutoUnfactoredFromSignal", ERR_INSUFD, __FILE__, __LINE__); } // create output space // if (!output_a.setLength((long)order_d + 1)) { return Error::handle(name(), L"computeAutoUnfactoredFromSignal", ERR, __FILE__, __LINE__, Error::WARNING); } // loop over all lags // for (long i = 0; i <= order; i++) { // set the starting sample depending on the mode: // AUTO: starting sample is always order_d // AUTO_TAPERED: starting sample is dependent on the lag // double sum = 0.0; long first_index = order; if (algorithm_d == AUTO_TAPERED) { first_index = i; } for (long n = first_index; n < last_index; n++) { sum += (double)input_a(n) * (double)input_a(n - i); } // save the output value // output_a(i) = sum; } // normalize: // case: LENGTH // divide by the number of terms used to compute each lag (note for // the tapered algorithm, this depends on the lag // if (normalization_d == LENGTH) { if (last_index == 0) { return Error::handle(name(), L"computeAutoUnfactoredFromSignal", Error::ARG, __FILE__, __LINE__); } if (algorithm_d == AUTO) { status = output_a.mult(1.0 / ((float)last_index - (float)order)); } else if (algorithm_d == AUTO_TAPERED) { for (long k = 0; k < order; k++) { output_a(k) /= last_index - k; } status = true; } } // normalize: // case: UNIT_ENERGY // divide by the zeroth order lag // else if (normalization_d == UNIT_ENERGY) { if (output_a(0) == (float)0) { return Error::handle(name(), L"computeAutoUnfactoredFromSignal", Error::ARG, __FILE__, __LINE__); } output_a.mult(1.0 / output_a(0)); } // exit gracefully // return true;}// method: computeAutoFromPrediction//// arguments:// VectorFloat& output: (output) Correlation coefficients// const VectorFloat& input: (input) input data//// return: a boolean value indicating status//// this method computes the Autocorrelation coefficients from// predictor coefficients using Factored algorithm (for linear// predictor coefficients). see://// L. Rabiner and R. Schafer,// Digital Processing of Speech Signals,// Prentice-Hall, Englewood Cliffs, New Jersey, USA, pp. 443, 1976.//// see equation 8.128. this algorithm returns normalized autocorrelation// coefficients (r(0) = 1.0);//boolean Correlation::computeAutoFromPrediction(VectorFloat& output_a, const VectorFloat& input_a) { // convert to reflection coefficients // VectorFloat rc; convertFromPrediction(rc, input_a); // convert to autocorrelation // return computeAutoFromReflection(output_a, rc);}// method: computeAutoFromReflection//// arguments:// VectorFloat& output: (output) Correlation coefficients// const VectorFloat& input: (input) input data//// return: a boolean value indicating status//// this method computes the Autocorrelation coefficients from// reflection coefficients. See://// L. Rabiner and R. Schafer,// Digital Processing of Speech Signals,// Prentice-Hall, Englewood Cliffs, New Jersey, USA, pp. 443, 1976.//// see equation 8.128. this algorithm returns normalized autocorrelation// coefficients (r(0) = 1.0);//boolean Correlation::computeAutoFromReflection(VectorFloat& output_a, const VectorFloat& input_a) { // declare local variables // long i, j; // loop counters long lp_order = order_d; // analysis order long j_bckwd; // a backwards index long middle_index; // inner loop limit double sum; // accumulator double rc_reg; // register double egy_error; // normalized error VectorFloat pc(lp_order + 1); // intermediate storage // create output space // if (!output_a.setLength(lp_order + 1)) { return Error::handle(name(), L"computeAutoFromReflection", ERR, __FILE__, __LINE__, Error::WARNING); } // initialization // egy_error = 1.0; output_a(0) = egy_error; pc(0) = egy_error; // do the first step manually // sum = input_a(0); output_a(1) = -(float)input_a(0) * egy_error; pc(1) = sum; egy_error *= (1 - sum * sum); // recursion // for (i = 2; i <= lp_order; i++) { // compute the next reflection coefficient // sum = 0; for (j = 1; j < i; j++) { sum += pc(j) * output_a(i - j); } rc_reg = input_a(i - 1); output_a(i) = -rc_reg * egy_error - sum; // compute the new prediction coefficients: // note that an algorithm implemented in // Markel and Gray, Linear Prediction of Speech, pp. 219, // is used here to minimize storage. // pc(i) = rc_reg; j_bckwd = i - 1; middle_index = i / 2; for (j = 1; j <= middle_index; j++) { sum = (float)pc(j_bckwd) + rc_reg*pc(j); pc(j) = (float)pc(j) + rc_reg*pc(i - j); pc(j_bckwd) = sum; j_bckwd--; } // compute new error // egy_error *= 1.0 - rc_reg * rc_reg; } // exit gracefully // return true; }// method: convertFromPrediction//// arguments:// VectorFloat& output: (output) LP reflection coefficients// const VectorFloat& input: (input) LP predictor coefficients//// return: a boolean value indicating status//// this method converts linear prediction reflection coefficients// to predictor coefficients. See://// J.D. Markel and A.H. Gray, Jr.,// Linear Prediction of Speech, Springer-Verlag Berlin Heidelberg,// New York, New York, USA, pp. 96, 1976.//boolean Correlation::convertFromPrediction(VectorFloat& output_a, const VectorFloat& input_a) { // copy the input (so we don't destroy it) // and create space for the output data // VectorFloat a(input_a); output_a.setLength(order_d); // create additional scratch space // VectorFloat b(input_a.length()); // declare some local counters // long m = order_d; long mr; long j, k; float d; float a_tmp; // Loop Over All Orders // for (j = 0; j < m; j++) { // establish preliminary counters // mr = m - j; a_tmp = a(mr); d = 1.0 - a_tmp*a_tmp; // store a() in reverse order // for (k = 0; k < mr; k++) { b(k) = a(mr - k); } // compute the new predictor coefficients // for (k = 0; k < mr; k++) { // check for a divide by zero error // if (d == 0) { return Error::handle(name(), L"convertFromPrediction", Error::ARG, __FILE__, __LINE__); } a(k) = (a(k) - a_tmp * b(k)) / d; } // save the output value // output_a(mr - 1) = a_tmp; // stability check // if ((a_tmp > 1.0) || (a_tmp < -1.0)) { return Error::handle(name(), L"convertFromPrediction", ERR_STABLE, __FILE__, __LINE__, Error::WARNING); } } // exit gracefully // return true;}// method: computeCross//// arguments:// VectorFloat& output: (output) Correlation coefficients// const VectorFloat& ref: (input) reference signal// const VectorFloat& tst: (input) test signal//// return: a boolean value indicating status//// this method performs crosscorrelation between two signals. note that// this implementation is not symmetric - the first signal is taken as// the reference and the length of the correlation window is taken// as the length of the second signal.//boolean Correlation::computeCross(VectorFloat& output_a, const VectorFloat& ref_a, const VectorFloat& tst_a) { // declare local variables // long input_len = ref_a.length(); long tst_len = tst_a.length(); order_d = input_len; // check the lengths // if (tst_len > input_len) { return Error::handle(name(), L"computeCross", ERR_INSUFD, __FILE__, __LINE__); } // create output space // if (!output_a.setLength(order_d)) { return Error::handle(name(), L"computeCross", ERR, __FILE__, __LINE__); } // calculate the output // for (long lag_index = 0; lag_index < order_d; lag_index++) { // declare local variables // double sum = 0.0; // loop over possible samples // long last_index = (long)Integral::min(tst_len, input_len - lag_index); for (long sample_index = 0; sample_index < last_index; sample_index++) { // accumulate the dot product // sum += (double)ref_a(lag_index + sample_index) * (double)tst_a(sample_index); } // output sum // output_a(lag_index) = sum; } // normalization type: LENGTH // if (normalization_d == LENGTH) { // check for a divide by zero error // if (tst_len == 0) { return Error::handle(name(), L"computeCross", Error::ARG, __FILE__, __LINE__); } // perform LENGTH normalization and exit gracefully // float temp = (float)1 / ((float)tst_len); return output_a.mult(temp); } // normalization type: UNIT_ENERGY // else if (normalization_d == UNIT_ENERGY) { // check for a divide by zero error // float sum_t = Integral::sqrt(tst_a.sumSquare()); float sum_r = Integral::sqrt(ref_a.sumSquare()); if ((sum_t == 0) || (sum_r == 0)) { return output_a.mult(0.0); } // perform UNIT_ENERGY normalization and exit gracefully // float temp = (float)1 / Integral::sqrt(ref_a.sumSquare() * tst_a.sumSquare()); return output_a.mult(temp); } // exit gracefully // return true;}// method: computeConv//// arguments:// VectorFloat& output: (output) convolution output// const VectorFloat& input: (input) input data, input signal// const VectorFloat& filter: (input) input data, filter signal//// return: a boolean value indicating status//// this method computes the convolution between two signals. see://// John G. Proakis, Dimitris G. Manolakis,// Digital Signal Processing: Principles, Algorithms, and Applications,// Second Edition, Macmillan Publishing Company, New York, USA,// pp. 75-76, 1992//// particularly equation 2.3.17.//boolean Correlation::computeConv(VectorFloat& output_a, const VectorFloat& input_a, const VectorFloat& filter_a) { // declare local variables // boolean status = true; long input_len = input_a.length(); long filter_len = filter_a.length(); // check order // if ((input_len <= 0) || (filter_len <= 0)) { return Error::handle(name(), L"computeConv", ERR_INSUFD, __FILE__, __LINE__); } // create output space // if (!output_a.setLength(input_len)) { return Error::handle(name(), L"computeConv", ERR, __FILE__, __LINE__); } output_a.clear(Integral::RETAIN); // loop through all necessary input signal samples // for (long output_index = 0; output_index < input_len; output_index++) { // loop through all necessary filter signal samples // long last_filter_index = (long)Integral::min(filter_len, output_index + 1); for (long filter_index = 0; filter_index < last_filter_index; filter_index++) { // sum the product of the input signal and filter signal // output_a(output_index) += input_a(output_index - filter_index) * filter_a(filter_index); } } // normalization type: LENGTH // if (normalization_d == LENGTH) { // check for a divide by zero error // if (filter_len == 0) { return Error::handle(name(), L"computeConv", Error::ARG, __FILE__, __LINE__); } // perform LENGTH normalization and exit gracefully // float temp = (float)1 / ((float)filter_len); return output_a.mult(temp); } // normalization type: UNIT_ENERGY // else if (normalization_d == UNIT_ENERGY) { // check for a divide by zero error // if ((Integral::sqrt(filter_a.sumSquare() * input_a.sumSquare())) == 0) { return Error::handle(name(), L"computeConv", Error::ARG, __FILE__, __LINE__); } // perform UNIT_ENERGY normalization and exit gracefully // float temp = (float)1 / Integral::sqrt(filter_a.sumSquare() * input_a.sumSquare()); status = output_a.mult(temp); } // exit gracefully // return status;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -