⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cppcodegenerator.java

📁 SRI international 发布的OAA框架软件
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
		saveText = saveText && alt.getAutoGen();

		// Reset the variable name map for the alternative
		Hashtable saveMap = treeVariableMap;
		treeVariableMap = new Hashtable();

		// Generate try block around the alt for  error handling
		if (alt.exceptionSpec != null) {
			println("try {      // for error handling");
			tabs++;
		}

		AlternativeElement elem = alt.head;
		while ( !(elem instanceof BlockEndElement) ) {
			elem.generate(); // alt can begin with anything. Ask target to gen.
			elem = elem.next;
		}

		if ( genAST) {
			if (blk instanceof RuleBlock) {
				// Set the AST return value for the rule
				RuleBlock rblk = (RuleBlock)blk;
				println(rblk.getRuleName() + "_AST = "+labeledElementASTType+"(currentAST.root);");
			}
			else if (blk.getLabel() != null) {
				// ### future: also set AST value for labeled subrules.
				// println(blk.getLabel() + "_AST = "+labeledElementASTType+"(currentAST.root);");
			}
		}

		if (alt.exceptionSpec != null) {
			// close try block
			tabs--;
			println("}");
			genErrorHandler(alt.exceptionSpec);
		}

		genAST = savegenAST;
		saveText = oldsaveTest;

		treeVariableMap = saveMap;
	}
	/** Generate all the bitsets to be used in the parser or lexer
	 * Generate the raw bitset data like "long _tokenSet1_data[] = {...};"
	 * and the BitSet object declarations like "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);"
	 * Note that most languages do not support object initialization inside a
	 * class definition, so other code-generators may have to separate the
	 * bitset declarations from the initializations (e.g., put the initializations
	 * in the generated constructor instead).
	 * @param bitsetList The list of bitsets to generate.
	 * @param maxVocabulary Ensure that each generated bitset can contain at least this value.
	 * @param dumpSets Dump out the token definitions of the contents of the bitset
	 * only for grammars/parsers.
	 */
	protected void genBitsets(
		Vector bitsetList,
		int maxVocabulary,
		String prefix,
		boolean dumpSets
	) {
		println("");
		for (int i = 0; i < bitsetList.size(); i++)
		{
			BitSet p = (BitSet)bitsetList.elementAt(i);
			// Ensure that generated BitSet is large enough for vocabulary
			p.growToInclude(maxVocabulary);
			// initialization data
			println(
				"const unsigned long " + prefix + getBitsetName(i) + "_data_" + "[] = { " +
				p.toStringOfHalfWords() +
				" };"
			);

			if( dumpSets )
			{
				// Dump the contents of the bitset in readable format...
				String t = "// ";
				for( int j = 0; j < p.size(); j++ )
				{
					if ( p.member( j ) )
					{
						t += grammar.tokenManager.getTokenStringAt(j)+" ";
						if( t.length() > 70 )
						{
							println(t);
							t = "// ";
						}
					}
				}
				if ( t != "// " )
					println(t);
			}

			// BitSet object
			println(
				"const "+namespaceAntlr+"BitSet " + prefix + getBitsetName(i) + "(" +
				getBitsetName(i) + "_data_," + p.size()/32 +
				");"
			);
		}
	}
	protected void genBitsetsHeader(
		Vector bitsetList,
		int maxVocabulary
	) {
		println("");
		for (int i = 0; i < bitsetList.size(); i++)
		{
			BitSet p = (BitSet)bitsetList.elementAt(i);
			// Ensure that generated BitSet is large enough for vocabulary
			p.growToInclude(maxVocabulary);
			// initialization data
			println("static const unsigned long " + getBitsetName(i) + "_data_" + "[];");
			// BitSet object
			println("static const "+namespaceAntlr+"BitSet " + getBitsetName(i) + ";");
		}
	}
	/** Generate the finish of a block, using a combination of the info
	 * returned from genCommonBlock() and the action to perform when
	 * no alts were taken
	 * @param howToFinish The return of genCommonBlock()
	 * @param noViableAction What to generate when no alt is taken
	 */
	private void genBlockFinish(CppBlockFinishingInfo howToFinish, String noViableAction)
	{
		if (howToFinish.needAnErrorClause &&
			 (howToFinish.generatedAnIf || howToFinish.generatedSwitch)) {
			if ( howToFinish.generatedAnIf ) {
				println("else {");
			}
			else {
				println("{");
			}
			tabs++;
			println(noViableAction);
			tabs--;
			println("}");
		}

		if ( howToFinish.postscript!=null ) {
			println(howToFinish.postscript);
		}
	}
	/** Generate the header for a block, which may be a RuleBlock or a
	 * plain AlternativeBLock.  This generates any variable declarations,
	 * init-actions, and syntactic-predicate-testing variables.
	 * @blk The block for which the preamble is to be generated.
	 */
	protected void genBlockPreamble(AlternativeBlock blk) {
		// define labels for rule blocks.
		if ( blk instanceof RuleBlock ) {
			RuleBlock rblk = (RuleBlock)blk;
			if ( rblk.labeledElements!=null ) {
				for (int i=0; i<rblk.labeledElements.size(); i++) {

					AlternativeElement a = (AlternativeElement)rblk.labeledElements.elementAt(i);
					//System.out.println("looking at labeled element: "+a);
					// Variables for labeled rule refs and subrules are different than
					// variables for grammar atoms.  This test is a little tricky because
					// we want to get all rule refs and ebnf, but not rule blocks or
					// syntactic predicates
					if (
						a instanceof RuleRefElement ||
						a instanceof AlternativeBlock &&
						!(a instanceof RuleBlock) &&
						!(a instanceof SynPredBlock)
					) {

						if (
							!(a instanceof RuleRefElement) &&
							((AlternativeBlock)a).not &&
							analyzer.subruleCanBeInverted(((AlternativeBlock)a), grammar instanceof LexerGrammar)
						) {
							// Special case for inverted subrules that will be inlined.
							// Treat these like token or char literal references
							println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
							if (grammar.buildAST) {
								println(labeledElementASTType+" " + a.getLabel() + "_AST = "+labeledElementASTInit+";");
							}
						}
						else {
							if (grammar.buildAST) {
								// Always gen AST variables for labeled elements, even if the
								// element itself is marked with !
								println(labeledElementASTType+" " + a.getLabel() + "_AST = "+labeledElementASTInit+";");
							}
							if ( grammar instanceof LexerGrammar ) {
								println(namespaceAntlr+"RefToken "+a.getLabel()+";");
							}
							if (grammar instanceof TreeWalkerGrammar) {
								// always generate rule-ref variables for tree walker
								println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
							}
						}
					}
					else {
						// It is a token or literal reference.  Generate the
						// correct variable type for this grammar
						println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
						// In addition, generate *_AST variables if building ASTs
						if (grammar.buildAST) {
							println(labeledElementASTType+" " + a.getLabel() + "_AST = "+labeledElementASTInit+";");
						}
					}
				}
			}
		}

		// dump out init action
		if ( blk.initAction!=null ) {
			genLineNo(blk);
			printAction(
				processActionForTreeSpecifiers(blk.initAction, 0, currentRule, null)
			);
			genLineNo2();
		}
	}
	public void genBody(LexerGrammar g) throws IOException {
		outputFile = grammar.getClassName() + ".cpp";
		outputLine = 1;
		currentOutput = antlr_oaa.Tool.openOutputFile(outputFile);
		//SAS: changed for proper text file io

		genAST = false;	// no way to gen trees.
		saveText = true;	// save consumed characters.

		tabs=0;

		// Generate header common to all C++ output files
		genHeader(outputFile);

		printHeaderAction(preIncludeCpp);
		// Generate header specific to lexer C++ file
		println("#include \"" + grammar.getClassName() + ".hpp\"");
		println("#include \"antlr/CharBuffer.hpp\"");
		println("#include \"antlr/TokenStreamException.hpp\"");
		println("#include \"antlr/TokenStreamIOException.hpp\"");
		println("#include \"antlr/TokenStreamRecognitionException.hpp\"");
		println("#include \"antlr/CharStreamException.hpp\"");
		println("#include \"antlr/CharStreamIOException.hpp\"");
		println("#include \"antlr/NoViableAltForCharException.hpp\"");
		if (grammar.debuggingOutput)
			println("#include \"antlr/DebuggingInputBuffer.hpp\"");
		println("");
		printHeaderAction(postIncludeCpp);

		if (nameSpace != null)
			nameSpace.emitDeclarations(currentOutput);

		// Generate user-defined lexer file preamble
		printAction(grammar.preambleAction);

		// Generate lexer class definition
		String sup=null;
		if ( grammar.superClass!=null ) {
			sup = grammar.superClass;
		}
		else {
			sup = grammar.getSuperClass();
			if (sup.lastIndexOf('.') != -1)
				sup = sup.substring(sup.lastIndexOf('.')+1);
			sup = namespaceAntlr + sup;
		}

		//
		// Generate the constructor from InputStream
		//
		println(grammar.getClassName() + "::" + grammar.getClassName() + "(" + namespaceStd + "istream& in)");
		tabs++;
		// if debugging, wrap the input buffer in a debugger
		if (grammar.debuggingOutput)
			println(": " + sup + "(new "+namespaceAntlr+"DebuggingInputBuffer(new "+namespaceAntlr+"CharBuffer(in)))");
		else
			println(": " + sup + "(new "+namespaceAntlr+"CharBuffer(in))");
		tabs--;
		println("{");
		tabs++;

		// if debugging, set up array variables and call user-overridable
		//   debugging setup method
		if ( grammar.debuggingOutput ) {
			println("setRuleNames(_ruleNames);");
			println("setSemPredNames(_semPredNames);");
			println("setupDebugging();");
		}

		println("setCaseSensitive("+g.caseSensitive+");");
		println("initLiterals();");
		tabs--;
		println("}");
		println("");

		// Generate the constructor from InputBuffer
		println(grammar.getClassName() + "::" + grammar.getClassName() + "("+namespaceAntlr+"InputBuffer& ib)");
		tabs++;
		// if debugging, wrap the input buffer in a debugger
		if (grammar.debuggingOutput)
			println(": " + sup + "(new "+namespaceAntlr+"DebuggingInputBuffer(ib))");
		else
			println(": " + sup + "(ib)");
		tabs--;
		println("{");
		tabs++;

		// if debugging, set up array variables and call user-overridable
		//   debugging setup method
		if ( grammar.debuggingOutput ) {
			println("setRuleNames(_ruleNames);");
			println("setSemPredNames(_semPredNames);");
			println("setupDebugging();");
		}

		println("setCaseSensitive("+g.caseSensitive+");");
		println("initLiterals();");
		tabs--;
		println("}");
		println("");

		// Generate the constructor from LexerSharedInputState
		println(grammar.getClassName() + "::" + grammar.getClassName() + "(const "+namespaceAntlr+"LexerSharedInputState& state)");
		tabs++;
		println(": " + sup + "(state)");
		tabs--;
		println("{");
		tabs++;

		// if debugging, set up array variables and call user-overridable
		//   debugging setup method
		if ( grammar.debuggingOutput ) {
			println("setRuleNames(_ruleNames);");
			println("setSemPredNames(_semPredNames);");
			println("setupDebugging();");
		}

		println("setCaseSensitive("+g.caseSensitive+");");
		println("initLiterals();");
		tabs--;
		println("}");
		println("");

		println("void " + grammar.getClassName() + "::initLiterals()");
		println("{");
		tabs++;
		// Generate the initialization of the map
		// containing the string literals used in the lexer
		// The literals variable itself is in CharScanner
		/* TJP: get keys now and check to make sure it's a literal not
		 * a label to a literal; was dup'ing string literals before
		 * change.
		 *
		Enumeration ids = grammar.tokenManager.getTokenSymbolElements();
		while ( ids.hasMoreElements() ) {
			TokenSymbol sym = (TokenSymbol)ids.nextElement();
			if ( sym instanceof StringLiteralSymbol ) {
				StringLiteralSymbol s = (StringLiteralSymbol)sym;
				println("literals["+s.getId()+"] = "+s.getTokenType()+";");
			}
		}
		*/
		// TJP changed it to following loop.
		Enumeration keys = grammar.tokenManager.getTokenSymbolKeys();
		while ( keys.hasMoreElements() ) {
			String key = (String)keys.nextElement();
			if ( key.charAt(0) != '"' ) {
				continue;
			}
			TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key);
			if ( sym instanceof StringLiteralSymbol ) {
				StringLiteralSymbol s = (StringLiteralSymbol)sym;
				println("literals["+s.getId()+"] = "+s.getTokenType()+";");
			}
		}

		// Generate the setting of various generated options.
		tabs--;
		println("}");

		// Generate getCaseSensitiveLiterals() method
		println("bool " + grammar.getClassName() + "::getCaseSensitiveLiterals() const");
		println("{");
		tabs++;
		println("return "+g.caseSensitiveLiterals + ";");
		tabs--;
		println("}");

		Enumeration ids;
		// generate the rule name array for debugging
		if (grammar.debuggingOutput) {
			println("const char* "+grammar.getClassName()+"::_ruleNames[] = {");
			tabs++;

			ids = grammar.rules.elements();
			int ruleNum=0;
			while ( ids.hasMoreElements() ) {
				GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
				if ( sym instanceof RuleSymbol)
					println("\""+((RuleSymbol)sym).getId()+"\",");
			}
			println("0");
			tabs--;
			println("};");
		}

		// Generate nextToken() rule.
		// nextToken() is a synthetic lexer rule that is the implicit OR of all
		// user-defined lexer rules.
		genNextToken();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -