⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lattice.java

📁 It is the Speech recognition software. It is platform independent. To execute the source code,
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
/* * Copyright 1999-2002 Carnegie Mellon University. * Portions Copyright 2002 Sun Microsystems, Inc. * Portions Copyright 2002 Mitsubishi Electric Research Laboratories. * All Rights Reserved.  Use is subject to license terms. * * See the file "license.terms" for information on usage and * redistribution of this file, and for a DISCLAIMER OF ALL * WARRANTIES. * */package edu.cmu.sphinx.result;import java.io.FileReader;import java.io.FileWriter;import java.io.IOException;import java.io.LineNumberReader;import java.io.PrintWriter;import java.util.Collection;import java.util.Collections;import java.util.HashMap;import java.util.HashSet;import java.util.Iterator;import java.util.LinkedList;import java.util.List;import java.util.ListIterator;import java.util.Map;import java.util.Set;import java.util.StringTokenizer;import java.util.Vector;import edu.cmu.sphinx.decoder.search.AlternateHypothesisManager;import edu.cmu.sphinx.decoder.search.Token;import edu.cmu.sphinx.linguist.WordSearchState;import edu.cmu.sphinx.linguist.dictionary.Dictionary;import edu.cmu.sphinx.linguist.dictionary.Pronunciation;import edu.cmu.sphinx.linguist.dictionary.Word;import edu.cmu.sphinx.util.LogMath;/** * <p> * Provides recognition lattice results. Lattices are created from * {@link edu.cmu.sphinx.result.Result Results} * which can be partial or final. * </p> * <p> * Lattices describe all theories considered by the Recognizer that have not * been pruned out.  Lattices are a directed graph containing  * {@link edu.cmu.sphinx.result.Node Nodes} and * {@link edu.cmu.sphinx.result.Edge Edges}. * A Node that correponds to a theory that a word was spoken over a particular * period of time.  An Edge that corresponds to the score of one word following * another.  The usual result transcript is the sequence of Nodes though the * Lattice with the best scoring path. Lattices are a useful tool for  * analyzing "alternate results". * </p> * <p> * A Lattice can be created from a Result that has a full token tree * (with its corresponding AlternativeHypothesisManager). * Currently, only the * {@link edu.cmu.sphinx.decoder.search.WordPruningBreadthFirstSearchManager} * has an AlternativeHypothesisManager. Furthermore, the lattice * construction code currently only works for linguists where the * {@link edu.cmu.sphinx.linguist.WordSearchState} returns false on the * <code>isWordStart</code> method, i.e., where the word states appear * at the end of the word in the linguist. <i>Therefore, lattices should * only be created from Result from the * {@link edu.cmu.sphinx.linguist.lextree.LexTreeLinguist} and the * {@link edu.cmu.sphinx.decoder.search.WordPruningBreadthFirstSearchManager}. * </i> * </p> * <p> * Lattices can also be created from a collapsed * {@link edu.cmu.sphinx.decoder.search.Token} tree and its  * AlternativeHypothesisManager. This is what 'collapsed' means. * Normally, between two word tokens is a series of tokens for other types * of states, such as unit or HMM states. Using 'W' for word tokens, * 'U' for unit tokens, 'H' for HMM tokens, a token chain can look like: * </p> * <pre> * W - U - H - H - H - H - U - H - H - H - H - W * </pre> * <p> * Usually, HMM tokens contains acoustic scores, and word tokens contains * language scores. If we want to know the total acoustic and language  * scores between any two words, it is unnecessary to keep around the * unit and HMM tokens. Therefore, all their acoustic and language scores * are 'collapsed' into one token, so that it will look like: * </p> * <pre> * W - P - W * </pre> * <p> * where 'P' is a token that represents the path between the two words, * and P contains the acoustic and language scores between the two words. * It is this type of collapsed token tree that the Lattice class is * expecting. Normally, the task of collapsing the token tree is done * by the * {@link edu.cmu.sphinx.decoder.search.WordPruningBreadthFirstSearchManager}. * A collapsed token tree can look like: * </p> * <pre> *                             "cat" - P - &lt;/s&gt; *                            /  *                           P *                          / * &lt;s&gt; - P - "a" - P - "big" *                          \ *                           P *                            \ *                             "dog" - P - &lt;/s&gt; * </pre> * <p> * When a Lattice is constructed from a Result, the above collapsed token tree  * together with the alternate hypothesis of "all" instead of "a", * will be converted into a Lattice that looks like the following: * <pre> *       "a"           "cat" *     /     \        /     \ * &lt;s&gt;          "big"         - &lt;/s&gt; *     \     /        \     / *      "all"          "dog" * </pre> * <p> * Initially, a lattice can have redundant nodes, i.e., nodes referring to * the same word and that originate from the same parent node. These * nodes can be collapsed using the {@link LatticeOptimizer}. * </p> * */public class Lattice {    protected Node initialNode;    protected Node terminalNode;    protected Set edges;    protected Map nodes;    protected double logBase;    protected LogMath logMath;    private Set visitedWordTokens;    private AlternateHypothesisManager loserManager;    /**     * Create an empty Lattice.     */    protected Lattice() {        edges = new HashSet();        nodes = new HashMap();    }    /**     * Create an empty Lattice.     */    protected Lattice(LogMath logMath) {	this();	this.logMath = logMath;    }    /**     * Create a Lattice from a Result.     *     * The Lattice is created from the Token tree referenced by the Result.     * The Lattice is then optimized to all collapse equivalent paths.     *     * @param result the result to convert into a lattice     */    public Lattice(Result result) {	this(result.getLogMath());	visitedWordTokens = new HashSet();        loserManager = result.getAlternateHypothesisManager();        if (loserManager != null) {            loserManager.purge();        }        for (Iterator i = result.getResultTokens().iterator(); i.hasNext();) {            Token token = (Token) i.next();            while (token != null && !token.isWord()) {		token = token.getPredecessor();            }            assert token.getWord().isSentenceEndWord();	    if (terminalNode == null) {		terminalNode = new Node(getNodeID(result.getBestToken()),					token.getWord(), -1, -1);		addNode(terminalNode);	    }            collapseWordToken(token);        }    }    /**     * Returns the node corresponding to the given word token.     *     * @param token the token which we want a node of     *     * @return the node of the given token     */    private Node getNode(Token token) {        if (token.getWord().isSentenceEndWord()) {            return terminalNode;        }        Node node = (Node) nodes.get(getNodeID(token));        if (node == null) {	    WordSearchState wordState = 		(WordSearchState) token.getSearchState();            	    int startFrame = -1;            int endFrame = -1;            if (wordState.isWordStart()) {                startFrame = token.getFrameNumber();            } else {                endFrame = token.getFrameNumber();            }            node = new Node(getNodeID(token), token.getWord(),			    startFrame, endFrame);            addNode(node);        }        return node;    }        /**     * Collapse the given word-ending token. This means collapsing all     * the unit and HMM tokens that correspond to the word represented     * by this token into an edge of the lattice.     *     * @param token the word-ending token to collapse     */    private void collapseWordToken(Token token) {        if (visitedWordTokens.contains(token)) {            return;        }        visitedWordTokens.add(token);        collapseWordPath(getNode(token), token.getPredecessor(),                         token.getAcousticScore(), token.getLanguageScore());        if (loserManager != null) {            List list = loserManager.getAlternatePredecessors(token);            if (list != null) {                for (Iterator i = list.iterator(); i.hasNext();) {                    Token loser = (Token) i.next();                    collapseWordPath(getNode(token), loser,                                     token.getAcousticScore(),                                     token.getLanguageScore());                }            }        }    }    /**     * @param parentWordNode the 'toNode' of the returned edge     * @param token the predecessor token of the token represented by     *              the parentWordNode     * @param acousticScore the acoustic score until and including the     *                      parent of token     * @param languageScore the language score until and including the     *                      parent of token     */    private void collapseWordPath(Node parentWordNode, Token token,                                  float acousticScore, float languageScore) {        if (token.isWord()) {            /*             * If this is a word, create a Node for it, and then create an             * edge from the Node to the parentWordNode             */            Node fromNode = getNode(token);            addEdge(fromNode, parentWordNode,                    (double)acousticScore, (double)languageScore);            if (token.getPredecessor() != null) {                /* Collapse the token sequence ending in this token. */                collapseWordToken(token);            } else {                /* we've reached the sentence start token */                assert token.getWord().isSentenceStartWord();                initialNode = fromNode;            }        } else {            /*             * If a non-word token, just add the acoustic and language             * scores to the current totals, and then move on to the             * predecessor token.             */            acousticScore += token.getAcousticScore();            languageScore += token.getLanguageScore();            collapseWordPath(parentWordNode, token.getPredecessor(),                             acousticScore, languageScore);                        /* Traverse the path(s) for the loser token(s). */            if (loserManager != null) {                List list = loserManager.getAlternatePredecessors(token);                if (list != null) {                    for (Iterator i = list.iterator(); i.hasNext();) {                        Token loser = (Token) i.next();                        collapseWordPath(parentWordNode, loser,                                         acousticScore, languageScore);                    }                }            }        }    }    /**     * Returns an ID for the Node associated with the given token.     *     * @param token the token associated with the Node     *     * @return an ID for the Node     */    private String getNodeID(Token token) {        return Integer.toString(token.hashCode());    }    /**     * Create a Lattice from a LAT file.  LAT files are created by     * the method Lattice.dump()     *     * @param fileName     */    public Lattice(String fileName) {        try {            System.err.println("Loading from " + fileName);            // load the nodes            LineNumberReader in = new LineNumberReader(new FileReader(fileName));            String line;            while ((line = in.readLine()) != null) {                StringTokenizer tokens = new StringTokenizer(line);                if (tokens.hasMoreTokens()) {                    String type = tokens.nextToken();                    if (type.equals("edge:")) {                        Edge.load(this, tokens);                    } else if (type.equals("node:")) {                        Node.load(this, tokens);                    } else if (type.equals("initialNode:")) {                        setInitialNode(getNode(tokens.nextToken()));                    } else if (type.equals("terminalNode:")) {                        setTerminalNode(getNode(tokens.nextToken()));                    } else if (type.equals("logBase:")) {                        logBase = Double.parseDouble(tokens.nextToken());                    } else {                        throw new Error("SYNTAX ERROR: " + fileName +                                "[" + in.getLineNumber() + "] " + line);                    }                }            }            in.close();        } catch (Exception e) {            throw new Error(e.toString());        }    }    /**     * Add an edge from fromNode to toNode.  This method creates the Edge     * object and does all the connecting     *     * @param fromNode     * @param toNode     * @param acousticScore     * @param lmScore     * @return the new Edge     */    public Edge addEdge(Node fromNode, Node toNode,                        double acousticScore, double lmScore) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -