⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 readme.txt

📁 Mathematical Methods by Moor n Stiling.
💻 TXT
📖 第 1 页 / 共 5 页
字号:
hmmnorm.m
% 
% Compute the branch norm for the HMM using the Viterbi approach
%
% function d = hmmnorm(branchweight,y,state,nextstate)
%
% branchweight= log transition probability 
% y = output
% state = current state in trellis
% nextstate = next state in trellis
%
% d = branch norm (log-likelihood)


hmmnotes.m
% Notes on data structures and functions for the HMM
% 


hmmtest2vb.m
% Test the HMM using both Viterbi and EM-algorithm based training methods


hmmupdaten.m
%
% Compute updated HMM model from observations
%
% function HMM = hmmupdaten(y,HMM) 
%
% y = output sequence
% HMM = current model parameters
%
% hmmo = updated model parameters


hmmupdatev.m
% 
% Compute updated HMM model from observations y using Viterbi methods
% Assumes only a single observation sequence.
%
% function HMM = hmmupdatev(y,HMM) 
%
% y = sequence of observations
% HMM = old HMM (to be updated)
%
% hmmo = updated HMM


hmmupfv.m
% 
% Compute an update to the distribution f based upon the data y
% and the (assumed) state assignment in statelist
%
% function fnew = hmmupfv(y,statelist,n,f)
%
% y = sequence of observations
% statelist = state assignments
% n = number of states
% f = distribution (cell) to update
%
% fnew = updated distribution


houseleft.m
%
% Apply the Householder transformation based on v to A on the left
%
% function A = houseleft(A,v)
%
% A = an mxn matrix
% v = a household vector
%
% B = H_v A


houseright.m
%
% Apply the householder transformation based on v to A on the right
%
% function A = houseright(A,v)
%
% A = an mxn matrix
% v = a household vector
%
% B = H_v A


ifs3a.m
% Plot the logistic map and the orbit of a point
%


initcluster.m
%
% 
% Choose an initial cluster at random
% 
% function Y = initcluster(X,m)
%
% X = input data: each column is a training data vector
% m = number of clusters
% Y = initial cluster: each column is a point


initpoisson.m
% 
% Initialize the global variables for the poisson generator
% 
% function initpoisson


initvit1.m
% 
% Initialize the data structures and pointers for the Viterbi algorithm
% 
% function initvit1(intrellis, inbranchweight, inpathlen, innormfunc)
%
% intrellis: a description of the successor nodes 
%    e.g. [1 3; 3 4; 1 2; 3 4]
% inbranchweight: weights of branches used for comparison, saved as
%    cells in branchweight{state_number, branch_number}
%    branchweight may be a vector
%    e.g.  branchweight{1,1} = 0; branchweight{1,2} = 6;
%          branchweight{2,1} = 3; branchweight{2,2} = 3;
%          branchweight{3,1} = 6; branchweight{3,2} = 0;
%          branchweight{4,1} = 3; branchweight{4,2} = 3;
% inpathlen: length of window over which to compute
% normfun: the norm function used to compute the branch cost


invwavetrans.m
%
% Compute the inverse discrete wavelet transform 
%
% function c = invwavetrans(C,ap,coeff)
%
% C = input data (whose inverse transform is to be found)
% ap = index for start of coefficients for the jth level
% coeff = wavelet coefficients
%
% c = inverse transformed data


invwavetransper.m
%
% Compute the periodized inverse discrete wavelet transform 
%
% function c = invwavetransper(C,coeff,J)
%
% C = input data
% coeff = wavelet coefficients
% J = (optional) number of levels of inverse transform to compute
%    If length(C) is not a power of 2, J must be specified.
%
% c = inverse discrete wavelet transform of C


irwls.m
% 
% Computes the minimum solution c to ||x-Ac||_p using
% iteratively reweighted least squares
%
% function c = irwls(A,x)
%
% A = system matrix
% x = rhs of equation
% p = L_p norm
%
% c = solution vector


jacobi.m
% 
% Produce an updated solution x to Ax = b using Jacobi iteration
%
% function x = jacobi(A,x,b)
%
% A = input matrix
% x = initial solution
% b = right-hand side
% 
% Output x= updated solution


kalex1.m
% Kalman filter example 1
%


kalman1.m
% 
% Computes the Kalman filter esimate xhat(t+1|t+1)
% for the system x(t+1) = Ax(t) + w
%                y(t) = Cx(t) + v
% where cov(w) = Q  and cov(v) = R, 
% The prior estimate is x0, and the prior covariance is P0.
% 


karf.m
%
% Evaluate the potential function f(x,c)
% for karmarkers algorithm
%
% function f = karf(x,c)
% 
% x = value of x
% c = constraint vector
%
% f = potential function


karmarker.m
% 
% Implement a Karmarker-type algorithm for linear programming
% to solve a problem in "Karmarker standard form"
%  min       c'x
% subject to Ax=0,  sum(x)=1, x >=0
%
% function x = karmarker(A,c)
%
% A,c = system matrices
%
% x = solution


kissarma.m
% 
% Determine the ARMA parameters a and b of order p based upon the data in y.
%
% function [a,b] = kissarma(y,p)
%
% y = sequence 
% p = order of AR part
%
% a = AR coefficients
% b = MA coefficients


levinson.m
% 
% Given a vector r = (r_0,r_1,\ldots,r_{n-1}),
% and a vector b = (b_1,b_2,\ldots,b_n)
% solve the nxn Toeplitz system Tx = b
%
% function [y] = levinson(r,b)
%
% Since Matlab has no zero-based indexing, r(1) = r_0
%
% r = vector of coefficients for Toeplitz matrix
% b = right-hand side
% 
% y = solution to Tx = b


lgb.m
% 
% Find m clusters on the data X
%
% function [Y,d] = lgb(X,m)
%
%
% X = input data: each column is a training data vector
% m = number of clusters to find
%
% Y = set of clusters: each column is a cluster centroid
% d = minimum total distortion


lms.m
% 
% Given a (real) scalar input signal x and a desired scalar signal d,
% compute an LMS update of the weight vector h.
% This function must be initialized by lmsinit
%
% function [h,eap] = lms(x,d)
%
% x = input signal (scalar)
% d = desired signal (scalar)
%
% h = updated LMS filter coefficient vector
% eap = (optional) a-priori error


lmsinit.m
%
% Initialize the LMS filter
% 
% function lmsinit(m,mu)
%
% m = dimension of vector
% mu = lms stepsize


logistic.m
% 
% Compute the logistic function y = lambda*x*(1-x)
%
% function y = logistic(x,lambda)
%
% x = input value (may be a vector)
% lambda = factor of the function


lpfilt.m
% 
% Design an optimal linear-phase filter using linear programming
%
% function [h,delta] = lpfilt(fp,fs,n)
%
% fp = pass-band frequency  (0.5=Fs/2)
% fs = stop-band frequency
% n = number of coefficients (assumed odd here)
%
% h = filter coefficients
% delta1 = pass-band ripple


lsfilt.m
%
% Determine a least-squares filter h with m coefficients 
%
% function [h,X] = lsfilt(f,d,m,type)
%
% f = input data
% d = desired output data
% m = order of filter
% type = data matrix type
%     type=1: "covariance" method    2: "autocorrelation" method
%          3: prewindowing           4: postwindowing
%
% h = least-squares filter
% X = (optional) data matrix


makehankel.m
% 
% form a hankel matrix from the input data y
%
% [H] = makehankel(y,m,n)
%
% y = input data  = [y1 y2 ...] (a series of vectors in a _row_)
% m = number of block rows in H
% n = number of block columns in H
%
% H = Hankel matrix formed from y


makehouse.m
%
% Make the Householder vector v such that Hx has zeros in 
% all but the first component
%
% function v = makehouse(x)
%
% x = vector to be transformed
%
% v = Householder vector


massey.m
%
% Return the shortest binary (GF(2)) LFSR consistent with the data sequence y
%
% function [c] = massey(y)
%
% y = input sequence 
%
% c = LFSR connections, c = 1 + c(2)D + c(3)D^2 + ... c(L+1)D^L
%     (Note: opposite from usual Matlab order)


maxeig.m
%
% Compute the largest eigenvalue and associated eigenvector of 
% a matrix A using the power method
%
% function [lambda,x] = maxeig(A)
%
% A = matrix whose eigenvalue is sought
%
% lambda = largest eigenvalue
% x = corresponding eigenvector


mineig.m
% 
% Compute the smallest eigenvalue and associated eigenvector of 
% a matrix A using the power method
% function [lambda,x] = mineig(A)
%
% A = matrix whose eigenvalue is sought
%
% lambda = minimum eigenvalue
% x = corresponding eigenvector


musicfun.m
%
% Compute the "MUSIC spectrum" at a frequency f.
%
% function pf = musicfunc(f,p,V)
%
% f = frequency (may be an array of frequencies)
% p = order of system
% V = eigenvectors of autocorrelation matrix
%
% pf = plotting value for spectrum


neweig.m
% 
% Compute the eigenvalues and eigenvector of a real symmetric matrix A
%
% function [T,Q] = neweig(A)
%
% A = matrix whose eigendecomposition is sought
%
% T = diagonal matrix of eigenvalues
% Q = (optional) matrix of eigenvectors


newlu.m
%
% Compute the lu factorization of A
%
% function [lu,indx] = newlu(A)
%
% A = matrix to be factored
%
% lu = matrix containg L and U factors
% indx = index of pivot permutations


newsvd.m
% 
% Compute the singular value decomposition of the mxn matrix A, as A= u s v'.
% We assume here that m>n
% 
% [u,s,v] = newsvd(A)
% or
% s = newsvd(A)
%
% A = matrix to be factored
% 
% Output:
% s = singular values
% u,v = (optional) orthogonal matrices


nn1.m
%
% Compute the output of a neural network with weights in w
%
% function [y,V,Y] = nn1(xn,w)
% 
% xn = input
% w = cell array of weights
%
% y = output layer output
% V = (optional) internal activity
% Y = (optional) neuron output
%    The optional arguments V and Y are used for training to store output for
%    each layer:
%    Y{1} = input, Y{2} = first hidden layer, etc.
%    V{1} = first hidden layer, etc.


nnrandw.m
% 
% Generate an initial set of weights for a neural network at random,
% based upon the list in m
%
% function w = nnrandw(m)
% m = list of weights 
%   m(1) = number of inputs, m(2) = first hidden layer, etc.
%
% w = random weights


nntrain1.m
%
% Train a neural network using the input/output training data [x,d]
%
% function w = nntrain(x,d,m,ninput,mu)
%
% x = [x(1) x(2) ... x(N)] = input training data   
% d = [d(1) d(2) ... d(N)] = output training data
% nlayer = number of layers
% m = number of neurons on each layer, 
%     m(1) = input layer, ... m(nlayer+1) = ouput layer
% mu = steepest descent step size
% alpha = (optional) momentum constant
% maxiter = (optional) maximum number of iterations (w = no maximum)
% w = (optional) starting weights
%
% w = new weights
% err = (optional) total squared error from training


permutedata.m
% 
% Randomly permute the columns of the data x.
%
% function xp = permutedata(x,type)
%
% x = data to permute
% type=type of permutation
%   type=1: Choose a random starting point, and go sequentially
%   type=2: random selection without replacement (not really a permutation)
%
% xp = permuted x


pisarenko.m
%
% Compute the the modal frequencies using Pisarenko's method,
% then find the amplitudes
%
% function [f,P] = pisarenko(Ryy)
%
% Ryy = autocorrelation function from observed data
%
% f = vector of frequencies
% P = vector of amplitudes


pivottableau.m
% 
% Perform pivoting on an augmented tableau until 
% there are no negative entries on the last row
%
% function [tableau,basicptr] = pivottableau(intableau,inbasicptr)
%
% intableau = input tableau tableau,
% inbasicptr = a list of the basic variables, such as [1 3 4]
%
% tableau = pivoted tableau 
% basicptr = new list of basic variables


poisson.m
% 
% Generate a sample of a random variable x with mean lambda
% (Following Numerical Recipes in C, 2nd ed., p. 294)
% This function should be initialized by initpoisson.m
%
% function x = poisson(lambda)
%
% lambda = Poisson mean
%
% x = Poisson random variable


ptls1.m
% 
% Compute the Partial Total Least Squares solution of Ax = b
% where the first k columns of A are not modified
%
% function [x,Ahat,bhat] = ptls1(A,b,k)
%
% A = system matrix
% b = right-hand side
% k = number of columns of A not modified
%
% x = ptls solution to Ax=b
% Ahat = modified A matrix
% bhat = modified b matrix


ptls2.m
% 
% Find the partial total least-squares solution to Ax = b,
% where k1 rows and k2 columns of A are unmodified
% 
% function [x] = ptls2(A,b,k1,k2)
%
% A = system matrix
% b = right-hand side
% k1 = number of rows of A not modified
% k2 = number of columns of A not modified
%
% x = PTLS solution to Ax=b


qf.m
% 
% Compute the Q function:
%
% function p = qf(x)
%   p = 1/sqrt(2pi)int_x^infty exp(-t^2/2)dt


qfinv.m
% 
% Compute the inverse of the q function
%
% function x = qfinv(q)


qrgivens.m
% 
% Compute the QR factorization of a matrix A without column pivoting
% using Givens rotations
%
% function [R,thetac,thetas] = qrgivens(A)
% 
% A = mxn matrix (assumed to have full column rank)
%
% R = upper triangular matrix

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -