📄 codegenerator.java
字号:
} } } /** Generate a token names table that maps token type to a printable * name: either the label like INT or the literal like "begin". */ protected void genTokenTypeNames(StringTemplate code) { for (int t=Label.MIN_TOKEN_TYPE; t<=grammar.getMaxTokenType(); t++) { String tokenName = grammar.getTokenDisplayName(t); if ( tokenName!=null ) { tokenName=target.getTargetStringLiteralFromString(tokenName, true); code.setAttribute("tokenNames", tokenName); } } } /** Get a meaningful name for a token type useful during code generation. * Literals without associated names are converted to the string equivalent * of their integer values. Used to generate x==ID and x==34 type comparisons * etc... Essentially we are looking for the most obvious way to refer * to a token type in the generated code. If in the lexer, return the * char literal translated to the target language. For example, ttype=10 * will yield '\n' from the getTokenDisplayName method. That must * be converted to the target languages literals. For most C-derived * languages no translation is needed. */ public String getTokenTypeAsTargetLabel(int ttype) { if ( grammar.type==Grammar.LEXER ) { String name = grammar.getTokenDisplayName(ttype); return target.getTargetCharLiteralFromANTLRCharLiteral(this,name); } return target.getTokenTypeAsTargetLabel(this,ttype); } /** Generate a token vocab file with all the token names/types. For example: * ID=7 * FOR=8 * 'for'=8 * * This is independent of the target language; used by antlr internally */ protected StringTemplate genTokenVocabOutput() { StringTemplate vocabFileST = new StringTemplate(vocabFilePattern, AngleBracketTemplateLexer.class); vocabFileST.setName("vocab-file"); // make constants for the token names Iterator tokenIDs = grammar.getTokenIDs().iterator(); while (tokenIDs.hasNext()) { String tokenID = (String) tokenIDs.next(); int tokenType = grammar.getTokenType(tokenID); if ( tokenType>=Label.MIN_TOKEN_TYPE ) { vocabFileST.setAttribute("tokens.{name,type}", tokenID, Utils.integer(tokenType)); } } // now dump the strings Iterator literals = grammar.getStringLiterals().iterator(); while (literals.hasNext()) { String literal = (String) literals.next(); int tokenType = grammar.getTokenType(literal); if ( tokenType>=Label.MIN_TOKEN_TYPE ) { vocabFileST.setAttribute("tokens.{name,type}", literal, Utils.integer(tokenType)); } } return vocabFileST; } public List translateAction(String ruleName, GrammarAST actionTree) { if ( actionTree.getType()==ANTLRParser.ARG_ACTION ) { return translateArgAction(ruleName, actionTree); } ActionTranslatorLexer translator = new ActionTranslatorLexer(this,ruleName,actionTree); List chunks = translator.translateToChunks(); chunks = target.postProcessAction(chunks, actionTree.token); return chunks; } /** Translate an action like [3,"foo",a[3]] and return a List of the * translated actions. Because actions are translated to a list of * chunks, this returns List<List<String|StringTemplate>>. * * Simple ',' separator is assumed. */ public List translateArgAction(String ruleName, GrammarAST actionTree) { String actionText = actionTree.token.getText(); StringTokenizer argTokens = new StringTokenizer(actionText, ","); List args = new ArrayList(); while ( argTokens.hasMoreTokens() ) { String arg = (String)argTokens.nextToken(); antlr.Token actionToken = new antlr.CommonToken(ANTLRParser.ACTION,arg); ActionTranslatorLexer translator = new ActionTranslatorLexer(this,ruleName, actionToken, actionTree.outerAltNum); List chunks = translator.translateToChunks(); chunks = target.postProcessAction(chunks, actionToken); args.add(chunks); } if ( args.size()==0 ) { return null; } return args; } /** Given a template constructor action like %foo(a={...}) in * an action, translate it to the appropriate template constructor * from the templateLib. This translates a *piece* of the action. */ public StringTemplate translateTemplateConstructor(String ruleName, int outerAltNum, antlr.Token actionToken, String templateActionText) { // first, parse with antlr.g //System.out.println("translate template: "+templateActionText); ANTLRLexer lexer = new ANTLRLexer(new StringReader(templateActionText)); lexer.setFilename(grammar.getFileName()); lexer.setTokenObjectClass("antlr.TokenWithIndex"); TokenStreamRewriteEngine tokenBuffer = new TokenStreamRewriteEngine(lexer); tokenBuffer.discard(ANTLRParser.WS); tokenBuffer.discard(ANTLRParser.ML_COMMENT); tokenBuffer.discard(ANTLRParser.COMMENT); tokenBuffer.discard(ANTLRParser.SL_COMMENT); ANTLRParser parser = new ANTLRParser(tokenBuffer); parser.setFilename(grammar.getFileName()); parser.setASTNodeClass("org.antlr.tool.GrammarAST"); try { parser.rewrite_template(); } catch (RecognitionException re) { ErrorManager.grammarError(ErrorManager.MSG_INVALID_TEMPLATE_ACTION, grammar, actionToken, templateActionText); } catch (Exception tse) { ErrorManager.internalError("can't parse template action",tse); } GrammarAST rewriteTree = (GrammarAST)parser.getAST(); // then translate via codegen.g CodeGenTreeWalker gen = new CodeGenTreeWalker(); gen.init(grammar); gen.currentRuleName = ruleName; gen.outerAltNum = outerAltNum; StringTemplate st = null; try { st = gen.rewrite_template((AST)rewriteTree); } catch (RecognitionException re) { ErrorManager.error(ErrorManager.MSG_BAD_AST_STRUCTURE, re); } return st; } public void issueInvalidScopeError(String x, String y, Rule enclosingRule, antlr.Token actionToken, int outerAltNum) { //System.out.println("error $"+x+"::"+y); Rule r = grammar.getRule(x); AttributeScope scope = grammar.getGlobalScope(x); if ( scope==null ) { if ( r!=null ) { scope = r.ruleScope; // if not global, might be rule scope } } if ( scope==null ) { ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE, grammar, actionToken, x); } else if ( scope.getAttribute(y)==null ) { ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_DYNAMIC_SCOPE_ATTRIBUTE, grammar, actionToken, x, y); } } public void issueInvalidAttributeError(String x, String y, Rule enclosingRule, antlr.Token actionToken, int outerAltNum) { //System.out.println("error $"+x+"."+y); if ( enclosingRule==null ) { // action not in a rule ErrorManager.grammarError(ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE, grammar, actionToken, x, y); return; } // action is in a rule Grammar.LabelElementPair label = enclosingRule.getRuleLabel(x); if ( label!=null || enclosingRule.getRuleRefsInAlt(x, outerAltNum)!=null ) { // $rulelabel.attr or $ruleref.attr; must be unknown attr String refdRuleName = x; if ( label!=null ) { refdRuleName = enclosingRule.getRuleLabel(x).referencedRuleName; } Rule refdRule = grammar.getRule(refdRuleName); AttributeScope scope = refdRule.getAttributeScope(y); if ( scope==null ) { ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_RULE_ATTRIBUTE, grammar, actionToken, refdRuleName, y); } else if ( scope.isParameterScope ) { ErrorManager.grammarError(ErrorManager.MSG_INVALID_RULE_PARAMETER_REF, grammar, actionToken, refdRuleName, y); } else if ( scope.isDynamicRuleScope ) { ErrorManager.grammarError(ErrorManager.MSG_INVALID_RULE_SCOPE_ATTRIBUTE_REF, grammar, actionToken, refdRuleName, y); } } } public void issueInvalidAttributeError(String x, Rule enclosingRule, antlr.Token actionToken, int outerAltNum) { //System.out.println("error $"+x); if ( enclosingRule==null ) { // action not in a rule ErrorManager.grammarError(ErrorManager.MSG_ATTRIBUTE_REF_NOT_IN_RULE, grammar, actionToken, x); return; } // action is in a rule Grammar.LabelElementPair label = enclosingRule.getRuleLabel(x); AttributeScope scope = enclosingRule.getAttributeScope(x); if ( label!=null || enclosingRule.getRuleRefsInAlt(x, outerAltNum)!=null || enclosingRule.name.equals(x) ) { ErrorManager.grammarError(ErrorManager.MSG_ISOLATED_RULE_SCOPE, grammar, actionToken, x); } else if ( scope!=null && scope.isDynamicRuleScope ) { ErrorManager.grammarError(ErrorManager.MSG_ISOLATED_RULE_ATTRIBUTE, grammar, actionToken, x); } else { ErrorManager.grammarError(ErrorManager.MSG_UNKNOWN_SIMPLE_ATTRIBUTE, grammar, actionToken, x); } } // M I S C public StringTemplateGroup getTemplates() { return templates; } public StringTemplateGroup getBaseTemplates() { return baseTemplates; } public void setDebug(boolean debug) { this.debug = debug; } public void setTrace(boolean trace) { this.trace = trace; } public void setProfile(boolean profile) { this.profile = profile; if ( profile ) { setDebug(true); // requires debug events } } public StringTemplate getRecognizerST() { return outputFileST; } public String getRecognizerFileName(String name, int type) { StringTemplate extST = templates.getInstanceOf("codeFileExtension"); String suffix = Grammar.grammarTypeToFileNameSuffix[type]; return name+suffix+extST.toString(); } /** What is the name of the vocab file generated for this grammar? * Returns null if no .tokens file should be generated. */ public String getVocabFileName() { if ( grammar.isBuiltFromString() ) { return null; } return grammar.name+VOCAB_FILE_EXTENSION; } public void write(StringTemplate code, String fileName) throws IOException { long start = System.currentTimeMillis(); Writer w = tool.getOutputFile(grammar, fileName); // Write the output to a StringWriter StringTemplateWriter wr = templates.getStringTemplateWriter(w); wr.setLineWidth(lineWidth); code.write(wr); w.close(); long stop = System.currentTimeMillis(); //System.out.println("render time for "+fileName+": "+(int)(stop-start)+"ms"); } /** You can generate a switch rather than if-then-else for a DFA state * if there are no semantic predicates and the number of edge label * values is small enough; e.g., don't generate a switch for a state * containing an edge label such as 20..52330 (the resulting byte codes * would overflow the method 65k limit probably). */ protected boolean canGenerateSwitch(DFAState s) { if ( !GENERATE_SWITCHES_WHEN_POSSIBLE ) { return false; } int size = 0; for (int i = 0; i < s.getNumberOfTransitions(); i++) { Transition edge = (Transition) s.transition(i); if ( edge.label.isSemanticPredicate() ) { return false; } // can't do a switch if the edges are going to require predicates if ( edge.label.getAtom()==Label.EOT ) { int EOTPredicts = ((DFAState)edge.target).getUniquelyPredictedAlt(); if ( EOTPredicts==NFA.INVALID_ALT_NUMBER ) { // EOT target has to be a predicate then; no unique alt return false; } } // if target is a state with gated preds, we need to use preds on // this edge then to reach it. if ( ((DFAState)edge.target).getGatedPredicatesInNFAConfigurations()!=null ) { return false; } size += edge.label.getSet().size(); } if ( s.getNumberOfTransitions()<MIN_SWITCH_ALTS || size>MAX_SWITCH_CASE_LABELS ) { return false; } return true; } /** Create a label to track a token / rule reference's result. * Technically, this is a place where I break model-view separation * as I am creating a variable name that could be invalid in a * target language, however, label ::= <ID><INT> is probably ok in * all languages we care about. */ public String createUniqueLabel(String name) { return new StringBuffer() .append(name).append(uniqueLabelNumber++).toString(); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -