⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 grammar.java

📁 ANTLR(ANother Tool for Language Recognition)它是这样的一种工具
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
	 *  into a pure string of 16-bit char values.  Escapes and unicode \u0000	 *  specs are converted to pure chars.  return in a buffer; people may	 *  want to walk/manipulate further.	 *	 *  The NFA construction routine must know the actual char values.	 */	public static StringBuffer getUnescapedStringFromGrammarStringLiteral(String literal) {		//System.out.println("escape: ["+literal+"]");		StringBuffer buf = new StringBuffer();		int last = literal.length()-1; // skip quotes on outside		for (int i=1; i<last; i++) {			char c = literal.charAt(i);			if ( c=='\\' ) {				i++;				c = literal.charAt(i);				if ( Character.toUpperCase(c)=='U' ) {					// \u0000					i++;					String unicodeChars = literal.substring(i,i+4);					// parse the unicode 16 bit hex value					int val = Integer.parseInt(unicodeChars, 16);					i+=4-1; // loop will inc by 1; only jump 3 then					buf.append((char)val);				}				else {					buf.append((char)ANTLRLiteralEscapedCharValue[c]); // normal \x escape				}			}			else {				buf.append(c); // simple char x			}		}		//System.out.println("string: ["+buf.toString()+"]");		return buf;	}	/** Pull your token definitions from an existing grammar in memory.	 *  You must use Grammar() ctor then this method then setGrammarContent()	 *  to make this work.  This is useful primarily for testing and	 *  interpreting grammars.  Return the max token type found.	 */	public int importTokenVocabulary(Grammar importFromGr) {		Set importedTokenIDs = importFromGr.getTokenIDs();		for (Iterator it = importedTokenIDs.iterator(); it.hasNext();) {			String tokenID = (String) it.next();			int tokenType = importFromGr.getTokenType(tokenID);			maxTokenType = Math.max(maxTokenType,tokenType);			if ( tokenType>=Label.MIN_TOKEN_TYPE ) {				//System.out.println("import token from grammar "+tokenID+"="+tokenType);				defineToken(tokenID, tokenType);			}		}		return maxTokenType; // return max found	}	/** Load a vocab file <vocabName>.tokens and return max token type found. */	public int importTokenVocabulary(String vocabName) {		String fullName = tool.getLibraryDirectory()+File.separator+vocabName+".tokens";		try {			BufferedReader br = tool.getLibraryFile(vocabName+".tokens");			StreamTokenizer tokenizer = new StreamTokenizer(br);			tokenizer.parseNumbers();			tokenizer.wordChars('_', '_');			tokenizer.eolIsSignificant(true);			tokenizer.slashSlashComments(true);			tokenizer.slashStarComments(true);			tokenizer.ordinaryChar('=');			tokenizer.quoteChar('\'');			tokenizer.whitespaceChars(' ',' ');			tokenizer.whitespaceChars('\t','\t');			int lineNum = 1;			int token = tokenizer.nextToken();			while (token != StreamTokenizer.TT_EOF) {				String tokenID;				if ( token == StreamTokenizer.TT_WORD ) {					tokenID = tokenizer.sval;				}				else if ( token == '\'' ) {					tokenID = "'"+tokenizer.sval+"'";				}				else {					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,									   vocabName+".tokens",									   Utils.integer(lineNum));					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}					token = tokenizer.nextToken();					continue;				}				token = tokenizer.nextToken();				if ( token != '=' ) {					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,									   vocabName+".tokens",									   Utils.integer(lineNum));					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}					token = tokenizer.nextToken();					continue;				}				token = tokenizer.nextToken(); // skip '='				if ( token != StreamTokenizer.TT_NUMBER ) {					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,									   vocabName+".tokens",									   Utils.integer(lineNum));					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}					token = tokenizer.nextToken();					continue;				}				int tokenType = (int)tokenizer.nval;				token = tokenizer.nextToken();				//System.out.println("import "+tokenID+"="+tokenType);				maxTokenType = Math.max(maxTokenType,tokenType);				defineToken(tokenID, tokenType);				lineNum++;				if ( token != StreamTokenizer.TT_EOL ) {					ErrorManager.error(ErrorManager.MSG_TOKENS_FILE_SYNTAX_ERROR,									   vocabName+".tokens",									   Utils.integer(lineNum));					while ( tokenizer.nextToken() != StreamTokenizer.TT_EOL ) {;}					token = tokenizer.nextToken();					continue;				}				token = tokenizer.nextToken(); // skip newline			}			br.close();		}		catch (FileNotFoundException fnfe) {			ErrorManager.error(ErrorManager.MSG_CANNOT_FIND_TOKENS_FILE,							   fullName);		}		catch (IOException ioe) {			ErrorManager.error(ErrorManager.MSG_ERROR_READING_TOKENS_FILE,							   fullName,							   ioe);		}		catch (Exception e) {			ErrorManager.error(ErrorManager.MSG_ERROR_READING_TOKENS_FILE,							   fullName,							   e);		}		return maxTokenType;	}	/** Given a token type, get a meaningful name for it such as the ID	 *  or string literal.  If this is a lexer and the ttype is in the	 *  char vocabulary, compute an ANTLR-valid (possibly escaped) char literal.	 */	public String getTokenDisplayName(int ttype) {		String tokenName = null;		int index=0;		// inside any target's char range and is lexer grammar?		if ( this.type==LEXER &&			 ttype >= Label.MIN_CHAR_VALUE && ttype <= Label.MAX_CHAR_VALUE )		{			return getANTLRCharLiteralForChar(ttype);		}		// faux label?		else if ( ttype<0 ) {			tokenName = (String)typeToTokenList.get(Label.NUM_FAUX_LABELS+ttype);		}		else {			// compute index in typeToTokenList for ttype			index = ttype-1; // normalize to 0..n-1			index += Label.NUM_FAUX_LABELS;     // jump over faux tokens			if ( index<typeToTokenList.size() ) {				tokenName = (String)typeToTokenList.get(index);			}			else {				tokenName = String.valueOf(ttype);			}		}		//System.out.println("getTokenDisplaYanme ttype="+ttype+", index="+index+", name="+tokenName);		return tokenName;	}	/** Get the list of ANTLR String literals */	public Set getStringLiterals() {		return stringLiteralToTypeMap.keySet();	}	public int getGrammarMaxLookahead() {		if ( global_k>=0 ) {			return global_k;		}		/*		Integer kI = (Integer)getOption("k");		if ( kI!=null ) {			global_k = kI.intValue();		}		else {			global_k = 0;		}		*/		Object k = getOption("k");		if ( k==null ) {			global_k = 0;		}		else if (k instanceof Integer) {			Integer kI = (Integer)k;			global_k = kI.intValue();		}		else {			// must be String "*"			if ( k.equals("*") ) {  // this the default anyway				global_k = 0;			}		}		return global_k;	}	/** Save the option key/value pair and process it; return the key	 *  or null if invalid option.	 */    public String setOption(String key, Object value, antlr.Token optionsStartToken) {		if ( !legalOptions.contains(key) ) {			ErrorManager.grammarError(ErrorManager.MSG_ILLEGAL_OPTION,									  this,									  optionsStartToken,									  key);			return null;		}		if ( !optionIsValid(key, value) ) {			return null;		}		if ( options==null ) {			options = new HashMap();		}		options.put(key, value);		return key;    }    public void setOptions(Map options, antlr.Token optionsStartToken) {		if ( options==null ) {			this.options = null;			return;		}        Set keys = options.keySet();        for (Iterator it = keys.iterator(); it.hasNext();) {            String optionName = (String) it.next();            Object optionValue = options.get(optionName);            String stored=setOption(optionName, optionValue, optionsStartToken);			if ( stored==null ) {				it.remove();			}        }    }    public Object getOption(String key) {		Object value = null;		if ( options!=null ) {			value = options.get(key);		}		if ( value==null ) {			value = defaultOptions.get(key);		}		return value;    }	public boolean optionIsValid(String key, Object value) {		return true;	}	public boolean buildAST() {		String outputType = (String)getOption("output");		if ( outputType!=null ) {			return outputType.equals("AST");		}		return false;	}	public boolean isBuiltFromString() {		return builtFromString;	}	public boolean buildTemplate() {		String outputType = (String)getOption("output");		if ( outputType!=null ) {			return outputType.equals("template");		}		return false;	}    public Collection getRules() {        return nameToRuleMap.values();    }	public void setRuleAST(String ruleName, GrammarAST t) {		Rule r = (Rule)nameToRuleMap.get(ruleName);		if ( r!=null ) {			r.tree = t;			r.EORNode = t.getLastChild();		}	}    public void setRuleStartState(String ruleName, NFAState startState) {		Rule r = (Rule)nameToRuleMap.get(ruleName);		if ( r!=null ) {	        r.startState = startState;		}    }    public void setRuleStopState(String ruleName, NFAState stopState) {		Rule r = (Rule)nameToRuleMap.get(ruleName);		if ( r!=null ) {	        r.stopState = stopState;		}    }	public NFAState getRuleStartState(String ruleName) {		Rule r = (Rule)nameToRuleMap.get(ruleName);		if ( r!=null ) {			return r.startState;		}		return null;	}	public String getRuleModifier(String ruleName) {		Rule r = (Rule)nameToRuleMap.get(ruleName);		if ( r!=null ) {			return r.modifier;		}		return null;	}    public NFAState getRuleStopState(String ruleName) {		Rule r = (Rule)nameToRuleMap.get(ruleName);		if ( r!=null ) {			return r.stopState;		}		return null;    }    public int assignDecisionNumber(NFAState state) {        decisionNumber++;        state.setDecisionNumber(decisionNumber);        return decisionNumber;    }	protected Decision getDecision(int decision) {		int index = decision-1;		if ( index >= indexToDecision.size() ) {			return null;		}		Decision d = (Decision)indexToDecision.get(index);		return d;	}	protected Decision createDecision(int decision) {		int index = decision-1;		if ( index < indexToDecision.size() ) {			return getDecision(decision); // don't recreate		}		Decision d = new Decision();		d.decision = decision;        indexToDecision.setSize(getNumberOfDecisions());        indexToDecision.set(index, d);		return d;	}    public List getDecisionNFAStartStateList() {		List states = new ArrayList(100);		for (int d = 0; d < indexToDecision.size(); d++) {			Decision dec = (Decision) indexToDecision.elementAt(d);			states.add(dec.startState);		}        return states;    }    public NFAState getDecisionNFAStartState(int decision) {        Decision d = getDecision(decision);		if ( d==null ) {			return null;		}		return d.startState;    }	public DFA getLookaheadDFA(int decision) {		Decision d = getDecision(decision);		if ( d==null ) {			return null;		}		return d.dfa;	}	public GrammarAST getDecisionBlockAST(int decision) {		Decision d = getDecision(decision);		if ( d==null ) {			return null;		}		return d.blockAST;	}	/** returns a list of column numbers for all decisions	 *  on a particular line so ANTLRWorks choose the decision	 *  depending on the location of the cursor (otherwise,	 *  ANTLR

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -