diagnosticcodegenerator.java

来自「SRI international 发布的OAA框架软件」· Java 代码 · 共 892 行 · 第 1/2 页

JAVA
892
字号
		_println("");
	}
	/** Generate code for the given grammar element.
	 * @param blk The (...)* block to generate
	 */
	public void gen(ZeroOrMoreBlock blk) {
		println("Start ZERO-OR-MORE (...)+ block:");
		tabs++;
		genBlockPreamble(blk);
		boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
		if (!ok) {
			println("Warning: This zero-or-more block is non-deterministic");
		}
		genCommonBlock(blk);
		tabs--;
		println("End ZERO-OR-MORE block.");
	}
	protected void genAlt(Alternative alt) {
		for (
			AlternativeElement elem = alt.head;
			!(elem instanceof BlockEndElement);
			elem = elem.next
		)
		{
			elem.generate();
		}
		if (alt.getTreeSpecifier() != null) 
		{
			println("AST will be built as: " + alt.getTreeSpecifier().getText());
		}
	}
	/** Generate the header for a block, which may be a RuleBlock or a
	 * plain AlternativeBLock.  This generates any variable declarations,
	 * init-actions, and syntactic-predicate-testing variables.
	 * @blk The block for which the preamble is to be generated.
	 */
	protected void genBlockPreamble(AlternativeBlock blk) {
		// dump out init action
		if ( blk.initAction!=null ) {
			printAction("Init action: " + blk.initAction);
		}
	}
	/**Generate common code for a block of alternatives; return a postscript
	 * that needs to be generated at the end of the block.  Other routines
	 * may append else-clauses and such for error checking before the postfix
	 * is generated.
	 */
	public void genCommonBlock(AlternativeBlock blk) {
		boolean singleAlt = (blk.alternatives.size() == 1);

		println("Start of an alternative block.");
		tabs++;
		println("The lookahead set for this block is:");
		tabs++;
		genLookaheadSetForBlock(blk);
		tabs--;

		if (singleAlt) {
			println("This block has a single alternative");
			if (blk.getAlternativeAt(0).synPred != null)
			{
				// Generate a warning if there is one alt and it has a synPred
				println("Warning: you specified a syntactic predicate for this alternative,");
				println("and it is the only alternative of a block and will be ignored.");
			}
		}
		else {
			println("This block has multiple alternatives:");
			tabs++;
		}

		for (int i=0; i<blk.alternatives.size(); i++) {
			Alternative alt = blk.getAlternativeAt(i);
			AlternativeElement elem = alt.head;

			// Print lookahead set for alternate
			println("");
			if (i != 0) {
				print("Otherwise, ");
			} else {
				print("");
			}
			_println("Alternate(" + (i+1) + ") will be taken IF:");
			println("The lookahead set: ");
			tabs++;
			genLookaheadSetForAlt(alt);
			tabs--;
			if ( alt.semPred != null || alt.synPred != null ) {
				print("is matched, AND ");
			} else {
				println("is matched.");
			}

			// Dump semantic predicates
			if ( alt.semPred != null ) {
				_println("the semantic predicate:");
				tabs++;
				println(alt.semPred);
				if ( alt.synPred != null ) {
					print("is true, AND ");
				} else {
					println("is true.");
				}
			}

			// Dump syntactic predicate
			if ( alt.synPred != null ) {
				_println("the syntactic predicate:");
				tabs++;
				genSynPred( alt.synPred );
				tabs--;
				println("is matched.");
			}

			// Dump the alternative
			genAlt(alt);
		}
		println("");
		println("OTHERWISE, a NoViableAlt exception will be thrown");
		println("");

		if (!singleAlt) {
			tabs--;
			println("End of alternatives");
		}
		tabs--;
		println("End of alternative block.");
	}
	/** Generate a textual representation of the follow set
	 * for a block.
	 * @param blk  The rule block of interest
	 */
	public void genFollowSetForRuleBlock(RuleBlock blk)
	{
		Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode);
		printSet(grammar.maxk, 1, follow);
	}
	/** Generate a header that is common to all TXT files */
	protected void genHeader() 
	{
		println("ANTLR-generated file resulting from grammar " + tool.grammarFile);
		println("Diagnostic output");
		println("");
		println("Terence Parr, MageLang Institute");
		println("with John Lilley, Empathy Software");
		println("ANTLR Version "+Tool.version+"; 1996,1997");
		println("");
		println("*** Header Action.");
		println("This action will appear at the top of all generated files.");
		tabs++;
		printAction(behavior.getHeaderAction(""));
		tabs--;
		println("*** End of Header Action");
		println("");
	}
	/**Generate the lookahead set for an alternate. */
	protected void genLookaheadSetForAlt(Alternative alt) {
		if ( doingLexRules && alt.cache[1].containsEpsilon() ) {
			println("MATCHES ALL");
			return;
		}
		int depth = alt.lookaheadDepth;
		if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
			// if the decision is nondeterministic, do the best we can: LL(k)
			// any predicates that are around will be generated later.
			depth = grammar.maxk;
		}
		for (int i = 1; i <= depth; i++)
		{
			Lookahead lookahead = alt.cache[i];
			printSet(depth, i, lookahead);
		}
	}
	/** Generate a textual representation of the lookahead set
	 * for a block.
	 * @param blk  The block of interest
	 */
	public void genLookaheadSetForBlock(AlternativeBlock blk)
	{
		// Find the maximal lookahead depth over all alternatives
		int depth = 0;
		for (int i=0; i<blk.alternatives.size(); i++) {
			Alternative alt = blk.getAlternativeAt(i);
			if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
				depth = grammar.maxk;
				break;
			} 
			else if (depth < alt.lookaheadDepth) {
				depth = alt.lookaheadDepth;
			}
		}

		for (int i = 1; i <= depth; i++)
		{
			Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
			printSet(depth, i, lookahead);
		}
	}
	/** Generate the nextToken rule.
	 * nextToken is a synthetic lexer rule that is the implicit OR of all
	 * user-defined lexer rules.
	 */
	public void genNextToken() {
		println("");
		println("*** Lexer nextToken rule:");
		println("The lexer nextToken rule is synthesized from all of the user-defined");
		println("lexer rules.  It logically consists of one big alternative block with");
		println("each user-defined rule being an alternative.");
		println("");

		// Create the synthesized rule block for nextToken consisting
		// of an alternate block containing all the user-defined lexer rules.
		RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");

		// Define the nextToken rule symbol
		RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
		nextTokenRs.setDefined();
		nextTokenRs.setBlock(blk);
		nextTokenRs.access = "private";
		grammar.define(nextTokenRs);

		// Analyze the synthesized block
		if (!grammar.theLLkAnalyzer.deterministic(blk))
		{
			println("The grammar analyzer has determined that the synthesized");
			println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
			println("This means that there is some overlap of the character");
			println("lookahead for two or more of your lexer rules.");
		}

		genCommonBlock(blk);

		println("*** End of nextToken lexer rule.");
	}
	/** Generate code for a named rule block
	 * @param s The RuleSymbol describing the rule to generate
	*/
	public void genRule(RuleSymbol s) {
		println("");
		String ruleType = (doingLexRules ? "Lexer" : "Parser");
		println("*** " + ruleType + " Rule: " + s.getId());
		if (!s.isDefined() ) {
			println("This rule is undefined.");
			println("This means that the rule was referenced somewhere in the grammar,");
			println("but a definition for the rule was not encountered.");
			println("It is also possible that syntax errors during the parse of");
			println("your grammar file prevented correct processing of the rule.");
			println("*** End " + ruleType + " Rule: " + s.getId());
			return;
		}
		tabs++;

		if (s.access.length() != 0) {
			println("Access: " + s.access);
		}

		// Get rule return type and arguments
		RuleBlock rblk = s.getBlock();

		// Gen method return value(s)
		if (rblk.returnAction != null) {
			println("Return value(s): " + rblk.returnAction);
			if ( doingLexRules ) {
				println("Error: you specified return value(s) for a lexical rule.");
				println("\tLexical rules have an implicit return type of 'int'.");
			}
		} else {
			if ( doingLexRules ) {
				println("Return value: lexical rule returns an implicit token type");
			} else {
				println("Return value: none");
			}
		}

		// Gen arguments
		if (rblk.argAction != null) 
		{
			println("Arguments: " + rblk.argAction);
		}

		// Dump any init-action
		genBlockPreamble(rblk);

		// Analyze the rule
		boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
		if (!ok) {
			println("Error: This rule is non-deterministic");
		}
	
		// Dump the alternates of the rule
		genCommonBlock(rblk);

		// Search for an unlabeled exception specification attached to the rule
		ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");

		// Generate user-defined or default catch phrases
		if (unlabeledUserSpec != null) {
			println("You specified error-handler(s) for this rule:");
			tabs++;
			for (int i = 0; i < unlabeledUserSpec.handlers.size(); i++)
			{
				if (i != 0) {
					println("");
				}

				ExceptionHandler handler = (ExceptionHandler)unlabeledUserSpec.handlers.elementAt(i);
				println("Error-handler(" + (i+1) + ") catches [" + handler.exceptionTypeAndName.getText() + "] and executes:");
				printAction(handler.action.getText());
			}
			tabs--;
			println("End error-handlers.");
		}
		else if (!doingLexRules) {
			println("Default error-handling will be generated, which catches all");
			println("parser exceptions and consumes tokens until the follow-set is seen.");
		}


		// Dump the follow set
		// Doesn't seem to work for lexical rules...
		if (!doingLexRules) {
			println("The follow set for this rule is:");
			tabs++;
			genFollowSetForRuleBlock(rblk);
			tabs--;
		}

		tabs--;
		println("*** End " + ruleType + " Rule: " + s.getId());
	}
	/** Generate the syntactic predicate.  This basically generates
	 * the alternative block, buts tracks if we are inside a synPred
	 * @param blk  The syntactic predicate block
	 */
	protected void genSynPred(SynPredBlock blk) {
		syntacticPredLevel++;
		gen((AlternativeBlock)blk);
		syntacticPredLevel--;
	}
	/** Generate the token types TXT file */
	protected void genTokenTypes(TokenManager tm) throws IOException {
		// Open the token output TXT file and set the currentOutput stream
		System.out.println("Generating " + tm.getName() + TokenTypesFileSuffix+TokenTypesFileExt);
		currentOutput = antlr_oaa.Tool.openOutputFile(tm.getName() + TokenTypesFileSuffix+TokenTypesFileExt);
		//SAS: changed for proper text file io
		tabs = 0;
	
		// Generate the header common to all diagnostic files
		genHeader();

		// Generate a string for each token.  This creates a static
		// array of Strings indexed by token type.
		println("");
		println("*** Tokens used by the parser");
		println("This is a list of the token numeric values and the corresponding");
		println("token identifiers.  Some tokens are literals, and because of that");
		println("they have no identifiers.  Literals are double-quoted.");
		tabs++;

		// Enumerate all the valid token types
		Vector v = tm.getVocabulary();
		for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
			String s = (String)v.elementAt(i);
			if (s != null) {
				println(s + " = " + i);
			}
		}

		// Close the interface
		tabs--;
		println("*** End of tokens used by the parser");

		// Close the tokens output file
		currentOutput.close();
		currentOutput = null;
	}
	/** Get a string for an expression to generate creation of an AST subtree.
	  * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
	  */
	public String getASTCreateString(Vector v) {
		return "***Create an AST from a vector here***"+System.getProperty("line.separator");
	}
	/** Get a string for an expression to generate creating of an AST node
	  * @param str The arguments to the AST constructor
	  */
	public String getASTCreateString(GrammarAtom atom, String str) {
		return "[" + str + "]";
	}
	/** Map an identifier to it's corresponding tree-node variable.
	  * This is context-sensitive, depending on the rule and alternative
	  * being generated
	  * @param id The identifier name to map
	  * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
	  */
	public String mapTreeId(String id, ActionTransInfo tInfo) {
		return id;
	}
	/** Format a lookahead or follow set.
	 * @param depth The depth of the entire lookahead/follow
	 * @param k The lookahead level to print
	 * @param lookahead  The lookahead/follow set to print
	 */
	public void printSet(int depth, int k, Lookahead lookahead) {
		int numCols = 5;

		int[] elems = lookahead.fset.toArray();

		if (depth != 1) {
			print("k==" + k + ": {");
		} else {
			print("{ ");
		}
		if (elems.length > numCols) {
			_println("");
			tabs++;
			print("");
		}

		int column = 0;
		for (int i = 0; i < elems.length; i++)
		{
			column++;
			if (column > numCols) {
				_println("");
				print("");
				column = 0;
			}
			if (doingLexRules) {
				_print(charFormatter.literalChar(elems[i]));
			} else {
				_print((String)grammar.tokenManager.getVocabulary().elementAt(elems[i]));
			}
			if (i != elems.length-1) {
				_print(", ");
			}
		}

		if (elems.length > numCols) {
			_println("");
			tabs--;
			print("");
		}
		_println(" }");
	}
}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?