⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 antlrparser.java

📁 ANTLR(ANother Tool for Language Recognition)它是这样的一种工具
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
// $ANTLR 2.7.7 (2006-01-29): "antlr.g" -> "ANTLRParser.java"$/* [The "BSD licence"] Copyright (c) 2005-2006 Terence Parr All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright    notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright    notice, this list of conditions and the following disclaimer in the    documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products    derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/package org.antlr.tool;import java.util.*;import java.io.*;import org.antlr.analysis.*;import org.antlr.misc.*;import antlr.*;import antlr.TokenBuffer;import antlr.TokenStreamException;import antlr.TokenStreamIOException;import antlr.ANTLRException;import antlr.LLkParser;import antlr.Token;import antlr.TokenStream;import antlr.RecognitionException;import antlr.NoViableAltException;import antlr.MismatchedTokenException;import antlr.SemanticException;import antlr.ParserSharedInputState;import antlr.collections.impl.BitSet;import antlr.collections.AST;import java.util.Hashtable;import antlr.ASTFactory;import antlr.ASTPair;import antlr.collections.impl.ASTArray;/** Read in an ANTLR grammar and build an AST.  Try not to do *  any actions, just build the tree. * *  The phases are: * *		antlr.g (this file) *		assign.types.g *		define.g *		buildnfa.g *		antlr.print.g (optional) *		codegen.g * *  Terence Parr *  University of San Francisco *  2005 */public class ANTLRParser extends antlr.LLkParser       implements ANTLRTokenTypes {	Grammar grammar = null;	protected int gtype = 0;	protected String currentRuleName = null;	protected GrammarAST currentBlockAST = null;	/* this next stuff supports construction of the Tokens artificial rule.	   I hate having some partial functionality here, I like doing everything	   in future tree passes, but the Tokens rule is sensitive to filter mode.	   And if it adds syn preds, future tree passes will need to process the	   fragments defined in Tokens; a cyclic dependency.	   As of 1-17-06 then, Tokens is created for lexer grammars in the	   antlr grammar parser itself.	   This grammar is also sensitive to the backtrack grammar option that	   tells ANTLR to automatically backtrack when it can't compute a DFA.	   7-2-06 I moved all option processing to antlr.g from define.g as I	   need backtrack option etc... for blocks.  Got messy.	*/	protected List lexerRuleNames = new ArrayList();	public List getLexerRuleNames() { return lexerRuleNames; }	protected GrammarAST setToBlockWithSet(GrammarAST b) {		GrammarAST alt = (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(ALT,"ALT")).add(b).add((GrammarAST)astFactory.create(EOA,"<end-of-alt>")));		prefixWithSynPred(alt);		return (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK,"BLOCK")).add(alt).add((GrammarAST)astFactory.create(EOB,"<end-of-block>")));	}	/** Create a copy of the alt and make it into a BLOCK; all actions,	 *  labels, tree operators, rewrites are removed.	 */	protected GrammarAST createBlockFromDupAlt(GrammarAST alt) {		//GrammarAST nalt = (GrammarAST)astFactory.dupTree(alt);		GrammarAST nalt = GrammarAST.dupTreeNoActions(alt, null);		GrammarAST blk = (GrammarAST)astFactory.make( (new ASTArray(3)).add((GrammarAST)astFactory.create(BLOCK,"BLOCK")).add(nalt).add((GrammarAST)astFactory.create(EOB,"<end-of-block>")));		return blk;	}	/** Rewrite alt to have a synpred as first element;	 *  (xxx)=>xxx	 *  but only if they didn't specify one manually.	 */	protected void prefixWithSynPred(GrammarAST alt) {		// if they want backtracking and it's not a lexer rule in combined grammar		String autoBacktrack = (String)currentBlockAST.getOption("backtrack");		if ( autoBacktrack==null ) {			autoBacktrack = (String)grammar.getOption("backtrack");		}		if ( autoBacktrack!=null&&autoBacktrack.equals("true") &&			 !(gtype==COMBINED_GRAMMAR &&			 Character.isUpperCase(currentRuleName.charAt(0))) &&			 alt.getFirstChild().getType()!=SYN_SEMPRED )		{			// duplicate alt and make a synpred block around that dup'd alt			GrammarAST synpredBlockAST = createBlockFromDupAlt(alt);			// Create a SYN_SEMPRED node as if user had typed this in			// Effectively we replace (xxx)=>xxx with {synpredxxx}? xxx			GrammarAST synpredAST = createSynSemPredFromBlock(synpredBlockAST);			// insert SYN_SEMPRED as first element of alt			synpredAST.getLastSibling().setNextSibling(alt.getFirstChild());			alt.setFirstChild(synpredAST);		}	}	protected GrammarAST createSynSemPredFromBlock(GrammarAST synpredBlockAST) {		// add grammar fragment to a list so we can make fake rules for them		// later.		String predName = grammar.defineSyntacticPredicate(synpredBlockAST,currentRuleName);		// convert (alpha)=> into {synpredN}? where N is some pred count		// during code gen we convert to function call with templates		String synpredinvoke = predName;		GrammarAST p = (GrammarAST)astFactory.create(SYN_SEMPRED,synpredinvoke);		p.setEnclosingRule(currentRuleName);		// track how many decisions have synpreds		grammar.blocksWithSynPreds.add(currentBlockAST);		return p;	}	public GrammarAST createSimpleRuleAST(String name,										  GrammarAST block,										  boolean fragment)   {   		GrammarAST modifier = null;   		if ( fragment ) {   			modifier = (GrammarAST)astFactory.create(FRAGMENT,"fragment");   		}   		GrammarAST EORAST = (GrammarAST)astFactory.create(EOR,"<end-of-rule>");   		GrammarAST EOBAST = block.getLastChild();		EORAST.setLine(EOBAST.getLine());		EORAST.setColumn(EOBAST.getColumn());		GrammarAST ruleAST =		   (GrammarAST)astFactory.make( (new ASTArray(8)).add((GrammarAST)astFactory.create(RULE,"rule")).add((GrammarAST)astFactory.create(ID,name)).add(modifier).add((GrammarAST)astFactory.create(ARG,"ARG")).add((GrammarAST)astFactory.create(RET,"RET")).add((GrammarAST)astFactory.create(SCOPE,"scope")).add(block).add(EORAST));		ruleAST.setLine(block.getLine());		ruleAST.setColumn(block.getColumn());		return ruleAST;	}    public void reportError(RecognitionException ex) {		Token token = null;		try {			token = LT(1);		}		catch (TokenStreamException tse) {			ErrorManager.internalError("can't get token???", tse);		}		ErrorManager.syntaxError(			ErrorManager.MSG_SYNTAX_ERROR,			grammar,			token,			"antlr: "+ex.toString(),			ex);    }    public void cleanup(GrammarAST root) {		if ( gtype==LEXER_GRAMMAR ) {			String filter = (String)grammar.getOption("filter");			GrammarAST tokensRuleAST =			    grammar.addArtificialMatchTokensRule(			    	root,			    	lexerRuleNames,			    	filter!=null&&filter.equals("true"));		}    }protected ANTLRParser(TokenBuffer tokenBuf, int k) {  super(tokenBuf,k);  tokenNames = _tokenNames;  buildTokenTypeASTClassMap();  astFactory = new ASTFactory(getTokenTypeToASTClassMap());}public ANTLRParser(TokenBuffer tokenBuf) {  this(tokenBuf,2);}protected ANTLRParser(TokenStream lexer, int k) {  super(lexer,k);  tokenNames = _tokenNames;  buildTokenTypeASTClassMap();  astFactory = new ASTFactory(getTokenTypeToASTClassMap());}public ANTLRParser(TokenStream lexer) {  this(lexer,2);}public ANTLRParser(ParserSharedInputState state) {  super(state,2);  tokenNames = _tokenNames;  buildTokenTypeASTClassMap();  astFactory = new ASTFactory(getTokenTypeToASTClassMap());}	public final void grammar(		Grammar g	) throws RecognitionException, TokenStreamException {				returnAST = null;		ASTPair currentAST = new ASTPair();		GrammarAST grammar_AST = null;		Token  cmt = null;		GrammarAST cmt_AST = null;		GrammarAST gr_AST = null;		GrammarAST gid_AST = null;		GrammarAST ts_AST = null;		GrammarAST scopes_AST = null;		GrammarAST a_AST = null;		GrammarAST r_AST = null;					this.grammar = g;			GrammarAST opt=null;			Token optionsStartToken = null;			Map opts;						try {      // for error handling			{			switch ( LA(1)) {			case ACTION:			{				GrammarAST tmp1_AST = null;				tmp1_AST = (GrammarAST)astFactory.create(LT(1));				match(ACTION);				break;			}			case PARSER:			case DOC_COMMENT:			case LITERAL_lexer:			case LITERAL_tree:			case LITERAL_grammar:			{				break;			}			default:			{				throw new NoViableAltException(LT(1), getFilename());			}			}			}			{			switch ( LA(1)) {			case DOC_COMMENT:			{				cmt = LT(1);				cmt_AST = (GrammarAST)astFactory.create(cmt);				match(DOC_COMMENT);				break;			}			case PARSER:			case LITERAL_lexer:			case LITERAL_tree:			case LITERAL_grammar:			{				break;			}			default:			{				throw new NoViableAltException(LT(1), getFilename());			}			}			}			grammarType();			gr_AST = (GrammarAST)returnAST;			id();			gid_AST = (GrammarAST)returnAST;			GrammarAST tmp2_AST = null;			tmp2_AST = (GrammarAST)astFactory.create(LT(1));			match(SEMI);			{			switch ( LA(1)) {			case OPTIONS:			{				if ( inputState.guessing==0 ) {					optionsStartToken=LT(1);				}				opts=optionsSpec();				if ( inputState.guessing==0 ) {					grammar.setOptions(opts, optionsStartToken);				}				if ( inputState.guessing==0 ) {					opt=(GrammarAST)returnAST;				}				break;			}			case TOKENS:			case SCOPE:			case FRAGMENT:			case DOC_COMMENT:			case AMPERSAND:			case TOKEN_REF:			case LITERAL_protected:			case LITERAL_public:			case LITERAL_private:			case RULE_REF:			{				break;			}			default:			{				throw new NoViableAltException(LT(1), getFilename());			}			}			}			{			switch ( LA(1)) {			case TOKENS:			{				tokensSpec();				ts_AST = (GrammarAST)returnAST;				break;			}			case SCOPE:			case FRAGMENT:			case DOC_COMMENT:			case AMPERSAND:			case TOKEN_REF:			case LITERAL_protected:			case LITERAL_public:			case LITERAL_private:			case RULE_REF:			{				break;			}			default:			{				throw new NoViableAltException(LT(1), getFilename());			}			}			}			attrScopes();			scopes_AST = (GrammarAST)returnAST;			{			switch ( LA(1)) {			case AMPERSAND:			{				actions();				a_AST = (GrammarAST)returnAST;				break;			}			case FRAGMENT:			case DOC_COMMENT:			case TOKEN_REF:			case LITERAL_protected:			case LITERAL_public:			case LITERAL_private:			case RULE_REF:			{				break;			}			default:			{				throw new NoViableAltException(LT(1), getFilename());			}			}			}			rules();			r_AST = (GrammarAST)returnAST;			GrammarAST tmp3_AST = null;			tmp3_AST = (GrammarAST)astFactory.create(LT(1));			match(Token.EOF_TYPE);			if ( inputState.guessing==0 ) {				grammar_AST = (GrammarAST)currentAST.root;								grammar_AST = (GrammarAST)astFactory.make( (new ASTArray(2)).add(null).add((GrammarAST)astFactory.make( (new ASTArray(8)).add(gr_AST).add(gid_AST).add(cmt_AST).add(opt).add(ts_AST).add(scopes_AST).add(a_AST).add(r_AST))));				cleanup(grammar_AST);								currentAST.root = grammar_AST;				currentAST.child = grammar_AST!=null &&grammar_AST.getFirstChild()!=null ?					grammar_AST.getFirstChild() : grammar_AST;				currentAST.advanceChildToEnd();			}		}		catch (RecognitionException ex) {			if (inputState.guessing==0) {				reportError(ex);				recover(ex,_tokenSet_0);			} else {			  throw ex;			}		}		returnAST = grammar_AST;	}		public final void grammarType() throws RecognitionException, TokenStreamException {				returnAST = null;		ASTPair currentAST = new ASTPair();		GrammarAST grammarType_AST = null;		Token  gr = null;		GrammarAST gr_AST = null;				try {      // for error handling			{			switch ( LA(1)) {			case LITERAL_lexer:			{				match(LITERAL_lexer);				if ( inputState.guessing==0 ) {					gtype=LEXER_GRAMMAR;				}				break;			}			case PARSER:			{				match(PARSER);				if ( inputState.guessing==0 ) {					gtype=PARSER_GRAMMAR;				}				break;			}			case LITERAL_tree:			{				match(LITERAL_tree);				if ( inputState.guessing==0 ) {					gtype=TREE_GRAMMAR;				}				break;			}			case LITERAL_grammar:			{				if ( inputState.guessing==0 ) {					gtype=COMBINED_GRAMMAR;				}				break;			}			default:			{				throw new NoViableAltException(LT(1), getFilename());			}			}			}			gr = LT(1);			gr_AST = (GrammarAST)astFactory.create(gr);			astFactory.addASTChild(currentAST, gr_AST);			match(LITERAL_grammar);			if ( inputState.guessing==0 ) {				gr_AST.setType(gtype);			}			grammarType_AST = (GrammarAST)currentAST.root;		}		catch (RecognitionException ex) {			if (inputState.guessing==0) {				reportError(ex);				recover(ex,_tokenSet_1);			} else {			  throw ex;			}		}		returnAST = grammarType_AST;	}		public final void id() throws RecognitionException, TokenStreamException {				returnAST = null;		ASTPair currentAST = new ASTPair();		GrammarAST id_AST = null;				try {      // for error handling			switch ( LA(1)) {			case TOKEN_REF:			{				GrammarAST tmp7_AST = null;				tmp7_AST = (GrammarAST)astFactory.create(LT(1));				astFactory.addASTChild(currentAST, tmp7_AST);				match(TOKEN_REF);				if ( inputState.guessing==0 ) {					id_AST = (GrammarAST)currentAST.root;					id_AST.setType(ID);				}				id_AST = (GrammarAST)currentAST.root;				break;			}			case RULE_REF:			{				GrammarAST tmp8_AST = null;				tmp8_AST = (GrammarAST)astFactory.create(LT(1));				astFactory.addASTChild(currentAST, tmp8_AST);				match(RULE_REF);				if ( inputState.guessing==0 ) {					id_AST = (GrammarAST)currentAST.root;					id_AST.setType(ID);				}				id_AST = (GrammarAST)currentAST.root;				break;			}			default:			{				throw new NoViableAltException(LT(1), getFilename());			}			}		}		catch (RecognitionException ex) {			if (inputState.guessing==0) {				reportError(ex);				recover(ex,_tokenSet_2);			} else {			  throw ex;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -