⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 grammar.java

📁 ANTLR(ANother Tool for Language Recognition)它是这样的一种工具
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
			catch(InterruptedException e) {				ErrorManager.internalError("what the hell? DFA interruptus", e);			}		}		long stop = System.currentTimeMillis();		DFACreationWallClockTimeInMS = stop - start;		// indicate that we've finished building DFA (even if #decisions==0)		allDecisionDFACreated = true;	}	public void createLookaheadDFA(int decision) {		Decision d = getDecision(decision);		String enclosingRule = d.startState.getEnclosingRule();		Rule r = getRule(enclosingRule);		//System.out.println("createLookaheadDFA(): "+enclosingRule+" dec "+decision+"; synprednames prev used "+synPredNamesUsedInDFA);		if ( r.isSynPred && !synPredNamesUsedInDFA.contains(enclosingRule) ) {			return;		}		NFAState decisionStartState = getDecisionNFAStartState(decision);		long startDFA=0,stopDFA=0;		if ( watchNFAConversion ) {			System.out.println("--------------------\nbuilding lookahead DFA (d="							   +decisionStartState.getDecisionNumber()+") for "+							   decisionStartState.getDescription());			startDFA = System.currentTimeMillis();		}		DFA lookaheadDFA = new DFA(decision, decisionStartState);		if ( (lookaheadDFA.analysisAborted() && // did analysis bug out?			 lookaheadDFA.getUserMaxLookahead()!=1) || // either k=* or k>1			 (lookaheadDFA.probe.nonRegularDecision() && // >1 alt recurses, k=*		      lookaheadDFA.getAutoBacktrackMode()) )		{			// set k=1 option if not already k=1 and try again			// clean up tracking stuff			decisionsWhoseDFAsUsesSynPreds.remove(lookaheadDFA);			// TODO: clean up synPredNamesUsedInDFA also (harder)			lookaheadDFA = null; // make sure other memory is "free" before redoing			d.blockAST.setOption(this, "k", Utils.integer(1));			//System.out.println("trying decision "+decision+" again with k=1");			lookaheadDFA = new DFA(decision, decisionStartState);			if ( lookaheadDFA.analysisAborted() ) { // did analysis bug out?				ErrorManager.internalError("could not even do k=1 for decision "+decision);			}		}		setLookaheadDFA(decision, lookaheadDFA);		// create map from line:col to decision DFA (for ANTLRWorks)		GrammarAST decisionAST = nfa.grammar.getDecisionBlockAST(lookaheadDFA.decisionNumber);		int line = decisionAST.getLine();		int col = decisionAST.getColumn();		lineColumnToLookaheadDFAMap.put(new StringBuffer().append(line + ":")										.append(col).toString(), lookaheadDFA);		if ( watchNFAConversion ) {			stopDFA = System.currentTimeMillis();			System.out.println("cost: "+lookaheadDFA.getNumberOfStates()+							   " states, "+(int)(stopDFA-startDFA)+" ms");		}	}	/** Terminate DFA creation (grammar analysis).  Happens on DFA creation	 *  boundaries so it might take DFA.MAX_TIME_PER_DFA_CREATION ms.	 */	public void abortNFAToDFAConversion() {		externalAnalysisAbort = true;	}	/** Return a new unique integer in the token type space */	public int getNewTokenType() {		maxTokenType++;		return maxTokenType;	}	/** Define a token at a particular token type value.  Blast an	 *  old value with a new one.  This is called directly during import vocab     *  operation to set up tokens with specific values.     */    public void defineToken(String text, int tokenType) {		if ( tokenIDToTypeMap.get(text)!=null ) {			// already defined?  Must be predefined one like EOF;			// do nothing			return;		}		// the index in the typeToTokenList table is actually shifted to		// hold faux labels as you cannot have negative indices.        if ( text.charAt(0)=='\'' ) {            stringLiteralToTypeMap.put(text, Utils.integer(tokenType));        }        else { // must be a label like ID            tokenIDToTypeMap.put(text, Utils.integer(tokenType));        }		int index = Label.NUM_FAUX_LABELS+tokenType-1;		//System.out.println("defining "+name+" token "+text+" at type="+tokenType+", index="+index);		this.maxTokenType = Math.max(this.maxTokenType, tokenType);        if ( index>=typeToTokenList.size() ) {			typeToTokenList.setSize(index+1);		}		String prevToken = (String)typeToTokenList.get(index);		if ( prevToken==null || prevToken.charAt(0)=='\'' ) {			// only record if nothing there before or if thing before was a literal			typeToTokenList.set(index, text);		}    }	/** Define a new rule.  A new rule index is created by incrementing     *  ruleIndex.     */	public void defineRule(antlr.Token ruleToken,						   String modifier,						   Map options,						   GrammarAST tree,						   GrammarAST argActionAST,						   int numAlts)	{		String ruleName = ruleToken.getText();		/*		System.out.println("defineRule("+ruleName+",modifier="+modifier+						   "): index="+ruleIndex);		*/		if ( getRule(ruleName)!=null ) {			ErrorManager.grammarError(ErrorManager.MSG_RULE_REDEFINITION,									  this, ruleToken, ruleName);        }		Rule r = new Rule(this, ruleName, ruleIndex, numAlts);		r.modifier = modifier;        nameToRuleMap.put(ruleName, r);		setRuleAST(ruleName, tree);		r.setOptions(options, ruleToken);		r.argActionAST = argActionAST;        ruleIndexToRuleList.setSize(ruleIndex+1);        ruleIndexToRuleList.set(ruleIndex, ruleName);        ruleIndex++;		if ( ruleName.startsWith(SYNPRED_RULE_PREFIX) ) {			r.isSynPred = true;		}	}	/** Define a new predicate and get back its name for use in building	 *  a semantic predicate reference to the syn pred.	 */	public String defineSyntacticPredicate(GrammarAST blockAST,										   String currentRuleName)	{		if ( nameToSynpredASTMap==null ) {			nameToSynpredASTMap = new LinkedHashMap();		}		String predName = null;		predName = SYNPRED_RULE_PREFIX+(nameToSynpredASTMap.size() + 1);		nameToSynpredASTMap.put(predName, blockAST);		return predName;	}	public LinkedHashMap getSyntacticPredicates() {		return nameToSynpredASTMap;	}	public GrammarAST getSyntacticPredicate(String name) {		if ( nameToSynpredASTMap==null ) {			return null;		}		return (GrammarAST)nameToSynpredASTMap.get(name);	}	public void synPredUsedInDFA(DFA dfa, SemanticContext semCtx) {		decisionsWhoseDFAsUsesSynPreds.add(dfa);		semCtx.trackUseOfSyntacticPredicates(this); // walk ctx looking for preds		/*		System.out.println("after tracking use for dec "+dfa.decisionNumber+": "+		 synPredNamesUsedInDFA);*/	}	/** Given @scope::name {action} define it for this grammar.  Later,	 *  the code generator will ask for the actions table.	 */	public void defineAction(GrammarAST ampersandAST,							 String scope,							 GrammarAST nameAST,							 GrammarAST actionAST)	{		if ( scope==null ) {			scope = getDefaultActionScope(type);		}		//System.out.println("@"+scope+"::"+nameAST.getText()+"{"+actionAST.getText()+"}");		String actionName = nameAST.getText();		Map scopeActions = (Map)actions.get(scope);		if ( scopeActions==null ) {			scopeActions = new HashMap();			actions.put(scope, scopeActions);		}		GrammarAST a = (GrammarAST)scopeActions.get(actionName);		if ( a!=null ) {			ErrorManager.grammarError(				ErrorManager.MSG_ACTION_REDEFINITION,this,				nameAST.getToken(),nameAST.getText());		}		else {			scopeActions.put(actionName,actionAST);		}	}	public Map getActions() {		return actions;	}	/** Given a grammar type, what should be the default action scope?	 *  If I say @members in a COMBINED grammar, for example, the	 *  default scope should be "parser".	 */	public String getDefaultActionScope(int grammarType) {		switch (grammarType) {			case Grammar.LEXER :				return "lexer";			case Grammar.PARSER :			case Grammar.COMBINED :				return "parser";			case Grammar.TREE_PARSER :				return "treeparser";		}		return null;	}	public void defineLexerRuleFoundInParser(antlr.Token ruleToken,											 GrammarAST ruleAST)	{		//System.out.println("rule tree is:\n"+ruleAST.toStringTree());		/*		String ruleText = tokenBuffer.toOriginalString(ruleAST.ruleStartTokenIndex,											   ruleAST.ruleStopTokenIndex);		*/		// first, create the text of the rule		StringBuffer buf = new StringBuffer();		buf.append("// $ANTLR src \"");		buf.append(getFileName());		buf.append("\" ");		buf.append(ruleAST.getLine());		buf.append("\n");		for (int i=ruleAST.ruleStartTokenIndex;			 i<=ruleAST.ruleStopTokenIndex && i<tokenBuffer.size();			 i++)		{			TokenWithIndex t = (TokenWithIndex)tokenBuffer.getToken(i);			// undo the text deletions done by the lexer (ugh)			if ( t.getType()==ANTLRParser.BLOCK ) {				buf.append("(");			}			else if ( t.getType()==ANTLRParser.ACTION ) {				buf.append("{");				buf.append(t.getText());				buf.append("}");			}			else if ( t.getType()==ANTLRParser.SEMPRED ||				t.getType()==ANTLRParser.SYN_SEMPRED ||				t.getType()==ANTLRParser.GATED_SEMPRED )			{				buf.append("{");				buf.append(t.getText());				buf.append("}?");			}			else if ( t.getType()==ANTLRParser.ARG_ACTION ) {				buf.append("[");				buf.append(t.getText());				buf.append("]");			}			else {				buf.append(t.getText());			}		}		String ruleText = buf.toString();		//System.out.println("[["+ruleText+"]]");		// now put the rule into the lexer grammar template		lexerGrammarST.setAttribute("rules", ruleText);		// track this lexer rule's name		lexerRules.add(ruleToken.getText());	}	/** If someone does PLUS='+' in the parser, must make sure we get	 *  "PLUS : '+' ;" in lexer not "T73 : '+';"	 */	public void defineLexerRuleForAliasedStringLiteral(String tokenID,													   String literal,													   int tokenType)	{		lexerGrammarST.setAttribute("literals.{ruleName,type,literal}",									tokenID,									Utils.integer(tokenType),									literal);		// track this lexer rule's name		lexerRules.add(tokenID);	}	public void defineLexerRuleForStringLiteral(String literal, int tokenType) {		//System.out.println("defineLexerRuleForStringLiteral: "+literal+" "+tokenType);		lexerGrammarST.setAttribute("literals.{ruleName,type,literal}",									computeTokenNameFromLiteral(tokenType,literal),									Utils.integer(tokenType),									literal);	}	/*	public void defineLexerRuleForCharLiteral(String literal, int tokenType) {		lexerGrammarST.setAttribute("literals.{ruleName,type,literal}",									computeTokenNameFromLiteral(tokenType,literal),									Utils.integer(tokenType),									literal);	}	*/	public Rule getRule(String ruleName) {		Rule r = (Rule)nameToRuleMap.get(ruleName);		return r;	}    public int getRuleIndex(String ruleName) {		Rule r = getRule(ruleName);		if ( r!=null ) {			return r.index;		}        return INVALID_RULE_INDEX;    }    public String getRuleName(int ruleIndex) {        return (String)ruleIndexToRuleList.get(ruleIndex);    }	public AttributeScope defineGlobalScope(String name, Token scopeAction) {		AttributeScope scope = new AttributeScope(this, name, scopeAction);		scopes.put(name,scope);		return scope;	}	public AttributeScope createReturnScope(String ruleName, Token retAction) {		AttributeScope scope = new AttributeScope(this, ruleName, retAction);		scope.isReturnScope = true;		return scope;	}	public AttributeScope createRuleScope(String ruleName, Token scopeAction) {		AttributeScope scope = new AttributeScope(this, ruleName, scopeAction);		scope.isDynamicRuleScope = true;		return scope;	}	public AttributeScope createParameterScope(String ruleName, Token argAction) {		AttributeScope scope = new AttributeScope(this, ruleName, argAction);		scope.isParameterScope = true;		return scope;	}	/** Get a global scope */	public AttributeScope getGlobalScope(String name) {		return (AttributeScope)scopes.get(name);	}	public Map getGlobalScopes() {		return scopes;	}	/** Define a label defined in a rule r; check the validity then ask the	 *  Rule object to actually define it.	 */	protected void defineLabel(Rule r, antlr.Token label, GrammarAST element, int type) {        boolean err = nameSpaceChecker.checkForLabelTypeMismatch(r, label, type);		if ( err ) {			return;		}		r.defineLabel(label, element, type);	}	public void defineTokenRefLabel(String ruleName,									antlr.Token label,									GrammarAST tokenRef)	{        Rule r = getRule(ruleName);		if ( r!=null ) {			defineLabel(r, label, tokenRef, TOKEN_LABEL);		}	}	public void defineRuleRefLabel(String ruleName,								   antlr.Token label,								   GrammarAST ruleRef)	{		Rule r = getRule(ruleName);		if ( r!=null ) {			defineLabel(r, label, ruleRef, RULE_LABEL);		}	}	public void defineTokenListLabel(String ruleName,									 antlr.Token label,									 GrammarAST element)	{		Rule r = getRule(ruleName);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -