📄 c.stg
字号:
/* ============================================================================= * Start of recognizer */<recognizer>/* End of code * ============================================================================= */>>headerFileExtension() ::= ".h"headerFile( LEXER, PARSER, TREE_PARSER, actionScope, actions, docComment, recognizer, name, tokens, tokenNames, rules, cyclicDFAs, bitsets, buildTemplate, buildAST, rewrite, profile, backtracking, synpreds, memoize, numRules, fileName, ANTLRVersion, generatedTimestamp, scopes, superClass, trace, literals ) ::=<<<leadIn("C header")><if(PARSER)> * The parser <mainName()><endif><if(LEXER)> * The lexer <mainName()><endif><if(TREE_PARSER)> * The tree parser <mainName()><endif>has the callable functions (rules) shown below, * which will invoke the code for the associated rule in the source grammar * assuming that the input stream is pointing to a token/text stream that could begin * this rule. * * For instance if you call the first (topmost) rule in a parser grammar, you will * get the results of a full parse, but calling a rule half way through the grammar will * allow you to pass part of a full token stream to the parser, such as for syntax checking * in editors and so on. * * The parser entry points are called indirectly (by function pointer to function) via * a parser context typedef p<name>, which is returned from a call to <name>New(). *<if(LEXER)> * As this is a generated lexer, it is unlikely you will call it 'manually'. However * the entry points are provided anyway. *<endif> * The entry points for <name> are as follows: * * <rules: {r | <if(!r.ruleDescriptor.isSynPred)> - <headerReturnType(ruleDescriptor=r.ruleDescriptor,...)> p<name>-><r.ruleDescriptor.name>(p<name>)<endif>}; separator="\n * "> * * The return type for any particular rule is of course determined by the source * grammar file. */#ifndef _<name>_H#define _<name>_H<actions.(actionScope).preincludes>/* ============================================================================= * Standard antlr3 C runtime definitions */#include \<antlr3.h>/* End of standard antlr 3 runtime definitions * ============================================================================= */<actions.(actionScope).includes><actions.(actionScope).header>#ifdef WIN32// Disable: Unreferenced parameter, - Rules with parameters that are not used// constant conditional, - ANTLR realizes that a prediction is always true (synpred usually)// initialized but unused variable - tree rewrite vairables declared but not needed// Unreferenced local variable - lexer rulle decalres but does not always use _type// potentially unitialized variable used - retval always returned from a rule //// These are only really displayed at warning level /W4 but that is the code ideal I am aiming at// and the codegen must generate some of these warnings by necessity, apart from 4100, which is// usually generated when a parser rule is given a parameter that it does not use. Mostly though// this is a matter of orthogonality hence I disable that one.//#pragma warning( disable : 4100 )#pragma warning( disable : 4101 )#pragma warning( disable : 4127 )#pragma warning( disable : 4189 )#pragma warning( disable : 4701 )#endif<if(backtracking)>/* ======================== * BACKTRACKING IS ENABLED * ======================== */<endif><rules:{r |<headerReturnScope(ruleDescriptor=r.ruleDescriptor,...)>}><scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeDecl(scope=it)><endif>}><scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScopeFuncMacro(scope=it)><endif>}><rules:{r |<ruleAttributeScopeDecl(scope=r.ruleDescriptor.ruleScope)>}><rules:{r |<ruleAttributeScopeFuncMacro(scope=r.ruleDescriptor.ruleScope)>}>/** Context tracking structure for <mainName()> */typedef struct <name>_Ctx_struct{ /** Built in ANTLR3 context tracker contains all the generic elements * required for context tracking. */<if(PARSER)> pANTLR3_PARSER pParser;<endif><if(LEXER)> pANTLR3_LEXER pLexer;<endif><if(TREE_PARSER)> pANTLR3_TREE_PARSER pTreeParser;<endif><scopes:{<if(it.isDynamicGlobalScope)> <globalAttributeScopeDef(scope=it)><endif>}; separator="\n\n"><rules: {r |<if(r.ruleDescriptor.ruleScope)> <ruleAttributeScopeDef(scope=r.ruleDescriptor.ruleScope)><endif>}><if(LEXER)> <rules:{r | <if(!r.ruleDescriptor.isSynPred)><headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*m<r.ruleDescriptor.name>) (struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);<endif>}; separator="\n";><endif><if(PARSER)> <rules:{r | <headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*<r.ruleDescriptor.name>) (struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";><endif><if(TREE_PARSER)> <rules:{r | <headerReturnType(ruleDescriptor=r.ruleDescriptor)> (*<r.ruleDescriptor.name>) (struct <name>_Ctx_struct * ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";><endif> unsigned char * (*getGrammarFileName)(); void (*free) (struct <name>_Ctx_struct * ctx); <@members> <@end>} <name>, * p<name>;<if(LEXER)>/* Function protoypes for the lexer functions that external translation units * may wish to call. */ANTLR3_API p<name> <name>New (pANTLR3_INPUT_STREAM instream);<endif><if(PARSER)>/* Function protoypes for the parser functions that external translation units * may wish to call. */ANTLR3_API p<name> <name>New (pANTLR3_COMMON_TOKEN_STREAM instream);<endif><if(TREE_PARSER)>/* Function protoypes for the treeparser functions that external translation units * may wish to call. */ANTLR3_API p<name> <name>New (pANTLR3_COMMON_TREE_NODE_STREAM instream);<endif>/** Symbolic definitions of all the tokens that the <grammarType()> will work with. * \{ * * Antlr will define EOF, but we can't use that as it it is too common in * in C header files and that would be confusing. There is no way to filter this out at the moment * so we just undef it here for now. That isn't the value we get back from C recognizers * anyway. We are looking for ANTLR3_TOKEN_EOF. */#ifdef EOF#undef EOF#endif#ifdef Tokens#undef Tokens#endif <tokens:{#define <it.name> <it.type>}; separator="\n">/* End of token definitions for <name> * ============================================================================= *//** \} */#endif/* END - Note:Keep extra linefeed to satisfy UNIX systems */>>grammarType() ::= <<<if(PARSER)>parser<endif><if(LEXER)>lexer<endif><if(TREE_PARSER)>tree parser<endif>>>mainName() ::= <<<if(PARSER)><name><endif><if(LEXER)><name><endif><if(TREE_PARSER)><name><endif>>>headerReturnScope(ruleDescriptor) ::= "<returnScope(...)>"headerReturnType(ruleDescriptor) ::= "<returnType()>"// Produce the lexer output//lexer( grammar, name, tokens, scopes, rules, numRules, labelType="pANTLR3_COMMON_TOKEN", filterMode) ::= <<<if(filterMode)>/* Forward declare implementation function for ANTLR3_TOKEN_SOURCE interface when * this is a fliter mode lexer. */static pANTLR3_COMMON_TOKEN <name>NextToken (pANTLR3_TOKEN_SOURCE toksource);/* Override the normal MEMOIZE and HAVEALREADYPARSED macros as this is a filtering * lexer. In filter mode, the memoizing and backtracking are gated at BACKTRACKING > 1 rather * than just BACKTRACKING. IN some cases this might generate code akin to: * if (BACKTRACKING) if (BACKTRACKING > 1) memoize. * However, I assume that the C compilers/optimizers are smart enough to work this one out * these days - Jim */#undef MEMOIZE#define MEMOIZE(ri,si) if (BACKTRACKING>1) { RECOGNIZER->memoize(RECOGNIZER, ri, si) }#undef HAVEPARSEDRULE#define HAVEPARSEDRULE(r) if (BACKTRACKING>1) { RECOGNIZER->alreadyParsedRule(RECOGNIZER, r) }<endif>/* Forward declare the locally static matching functions we have generated and any predicate functions. */<rules:{r | static <headerReturnType(ruleDescriptor=r.ruleDescriptor)> <if(!r.ruleDescriptor.isSynPred)>m<endif><r.ruleDescriptor.name> (p<name> ctx<if(r.ruleDescriptor.parameterScope)>, <endif><r.ruleDescriptor.parameterScope:parameterScope(scope=it)>);}; separator="\n";>static void <name>Free(p<name> ctx);/* ========================================================================= * Lexer matching rules end. * ========================================================================= */<scopes:{<if(it.isDynamicGlobalScope)><globalAttributeScope(scope=it)><endif>}><actions.lexer.members>static void<name>Free (p<name> ctx){<if(memoize)> RULEMEMO->free(RULEMEMO);<endif> LEXER->free(LEXER); ANTLR3_FREE(ctx);}/** \brief Name of the gramar file that generated this code */static unsigned char fileName[] = "<fileName>";/** \brief Return the name of the grammar file that generated this code. */static unsigned char * getGrammarFileName(){ return fileName;}<if(filterMode)> <filteringNextToken()><endif>/** \brief Create a new lexer called <name> * * \param[in] instream Pointer to an initialized input stream * * \return * - Success p<name> initialized for the lex start * - Fail (p<name>)(ANTLR3_ERR_NOMEM) */ANTLR3_API p<name> <name>New (pANTLR3_INPUT_STREAM instream){ p<name> lexCtx; /* Context structure we will build and return */ lexCtx = (p<name>) ANTLR3_MALLOC(sizeof(<name>)); if (lexCtx == NULL) { /* Failed to allocate memory for lexer context */ return (p<name>)ANTLR3_ERR_NOMEM; } /* ------------------------------------------------------------------- * Memory for basic structure is allocated, now to fill in * in base ANTLR3 structures. We intialize the function pointers * for the standard ANTLR3 lexer function set, but upon return * from here, the programmer may set the pointers to provide custom * implementations of each function. * * We don't use the macros defined in <name>.h here so you can get a sense * of what goes where. */ /* Create a base lexer, using the supplied input stream */ lexCtx->pLexer = antlr3LexerNewStream(ANTLR3_SIZE_HINT, instream); /* Check that we allocated the memory correctly */ if (lexCtx->pLexer == (pANTLR3_LEXER)ANTLR3_ERR_NOMEM) { ANTLR3_FREE(lexCtx); return (p<name>)ANTLR3_ERR_NOMEM; }<if(memoize)> /* Create a LIST for recording rule memos. */ lexCtx->pLexer->rec->ruleMemo = antlr3IntTrieNew(15); /* 16 bit depth is enough for 32768 rules! */<endif> /* Install the implementation of our <name> interface */ <rules:{r | <if(!r.ruleDescriptor.isSynPred)>lexCtx->m<r.ruleDescriptor.name> = m<r.ruleDescriptor.name>;<endif>}; separator="\n";> /** When the nextToken() call is made to this lexer's pANTLR3_TOKEN_SOURCE * it will call mTokens() in this generated code, and will pass it the ctx * pointer of this lexer, not the context of the base lexer, so store that now. */ lexCtx->pLexer->ctx = lexCtx; /** Install the token matching function */ lexCtx->pLexer->mTokens = (void (*) (void *))(mTokens); lexCtx->getGrammarFileName = getGrammarFileName; lexCtx->free = <name>Free; <if(filterMode)> /* We have filter mode turned on, so install the filtering nextToken function */ lexCtx->pLexer->tokSource->nextToken = <name>NextToken;<endif> /* Return the newly built lexer to the caller */ return lexCtx;}<if(cyclicDFAs)>/* ========================================================================= * DFA tables for the lexer */<cyclicDFAs:cyclicDFA()> <! dump tables for all DFA !>/* ========================================================================= * End of DFA tables for the lexer */<endif> /* ========================================================================= * Functions to match the lexer grammar defined tokens from the input stream */<rules; separator="\n\n">/* ========================================================================= * Lexer matching rules end. * ========================================================================= */<if(synpreds)>/* ========================================================================= * Lexer syntactic predicates */<synpreds:{p | <lexerSynpred(predname=p)>}>/* ========================================================================= * Lexer syntactic predicates end. * ========================================================================= */<endif>/* End of Lexer code * ================================================ * ================================================ */ >>filteringNextToken() ::= <</** An override of the lexer's nextToken() method that backtracks over mTokens() looking * for matches in lexer filterMode. No error can be generated upon error; just rewind, consume * a token and then try again. BACKTRACKING needs to be set as well. * Make rule memoization happen only at levels above 1 as we start mTokens * at BACKTRACKING==1. */static pANTLR3_COMMON_TOKEN <name>NextToken(pANTLR3_TOKEN_SOURCE toksource) { pANTLR3_LEXER lexer; lexer = (pANTLR3_LEXER)(toksource->super); /* Get rid of any previous token (token factory takes care of
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -