⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ggeneratorlexer.java

📁 UML设计测试工具
💻 JAVA
📖 第 1 页 / 共 4 页
字号:
// $ANTLR 2.7.4: "expandedgenerator.g" -> "GGeneratorLexer.java"$ package org.tzi.use.parser.generator;import java.io.InputStream;import antlr.TokenStreamException;import antlr.TokenStreamIOException;import antlr.TokenStreamRecognitionException;import antlr.CharStreamException;import antlr.CharStreamIOException;import antlr.ANTLRException;import java.io.Reader;import java.util.Hashtable;import antlr.CharScanner;import antlr.InputBuffer;import antlr.ByteBuffer;import antlr.CharBuffer;import antlr.Token;import antlr.CommonToken;import antlr.RecognitionException;import antlr.NoViableAltForCharException;import antlr.MismatchedCharException;import antlr.TokenStream;import antlr.ANTLRHashString;import antlr.LexerSharedInputState;import antlr.collections.impl.BitSet;import antlr.SemanticException;	import org.tzi.use.parser.MyToken;	import org.tzi.use.parser.ParseErrorHandler;	import java.io.PrintWriter;	import org.tzi.use.util.Log;	import org.tzi.use.util.StringUtil;public class GGeneratorLexer extends antlr.CharScanner implements GGeneratorLexerTokenTypes, TokenStream {	protected int fTokColumn = 1;    private PrintWriter fErr;    private ParseErrorHandler fParseErrorHandler;		    public void consume() throws CharStreamException {        if (inputState.guessing == 0 ) {            if (text.length() == 0 ) {                // remember token start column                fTokColumn = getColumn();            }        }        super.consume();    }    public String getFilename() {        return fParseErrorHandler.getFileName();    }        protected Token makeToken(int t) {        MyToken token =             new MyToken(getFilename(), getLine(), fTokColumn);        token.setType(t);        if (t == EOF )            token.setText("end of file or input");        return token;    }    public void reportError(RecognitionException ex) {        fParseErrorHandler.reportError(                ex.getLine() + ":" + ex.getColumn() + ": " + ex.getMessage());    }     /**     * Returns true if word is a reserved keyword.      */    public boolean isKeyword(String word) {        ANTLRHashString s = new ANTLRHashString(word, this);        boolean res = literals.get(s) != null;        Log.trace(this, "keyword " + word + ": " + res);        return res;    }    public void traceIn(String rname) throws CharStreamException {        traceIndent();        traceDepth += 1;        System.out.println("> lexer " + rname + ": c == '" +                            StringUtil.escapeChar(LA(1), '\'') + "'");    }    public void traceOut(String rname) throws CharStreamException {        traceDepth -= 1;        traceIndent();        System.out.println("< lexer " + rname + ": c == '" +                           StringUtil.escapeChar(LA(1), '\'') + "'");    }        public void init(ParseErrorHandler handler) {        fParseErrorHandler = handler;    }public GGeneratorLexer(InputStream in) {	this(new ByteBuffer(in));}public GGeneratorLexer(Reader in) {	this(new CharBuffer(in));}public GGeneratorLexer(InputBuffer ib) {	this(new LexerSharedInputState(ib));}public GGeneratorLexer(LexerSharedInputState state) {	super(state);	caseSensitiveLiterals = true;	setCaseSensitive(true);	literals = new Hashtable();	literals.put(new ANTLRHashString("procedure", this), new Integer(80));	literals.put(new ANTLRHashString("aggregation", this), new Integer(71));	literals.put(new ANTLRHashString("for", this), new Integer(84));	literals.put(new ANTLRHashString("class", this), new Integer(64));	literals.put(new ANTLRHashString("Collection", this), new Integer(59));	literals.put(new ANTLRHashString("constraints", this), new Integer(61));	literals.put(new ANTLRHashString("false", this), new Integer(45));	literals.put(new ANTLRHashString("associationclass", this), new Integer(69));	literals.put(new ANTLRHashString("true", this), new Integer(44));	literals.put(new ANTLRHashString("Sequence", this), new Integer(51));	literals.put(new ANTLRHashString("let", this), new Integer(9));	literals.put(new ANTLRHashString("and", this), new Integer(15));	literals.put(new ANTLRHashString("operations", this), new Integer(66));	literals.put(new ANTLRHashString("implies", this), new Integer(12));	literals.put(new ANTLRHashString("associationClass", this), new Integer(68));	literals.put(new ANTLRHashString("end", this), new Integer(67));	literals.put(new ANTLRHashString("pre", this), new Integer(36));	literals.put(new ANTLRHashString("Bag", this), new Integer(52));	literals.put(new ANTLRHashString("oclIsKindOf", this), new Integer(38));	literals.put(new ANTLRHashString("ordered", this), new Integer(75));	literals.put(new ANTLRHashString("association", this), new Integer(73));	literals.put(new ANTLRHashString("context", this), new Integer(76));	literals.put(new ANTLRHashString("oclAsType", this), new Integer(37));	literals.put(new ANTLRHashString("post", this), new Integer(79));	literals.put(new ANTLRHashString("endif", this), new Integer(43));	literals.put(new ANTLRHashString("begin", this), new Integer(82));	literals.put(new ANTLRHashString("inv", this), new Integer(77));	literals.put(new ANTLRHashString("in", this), new Integer(11));	literals.put(new ANTLRHashString("xor", this), new Integer(14));	literals.put(new ANTLRHashString("div", this), new Integer(25));	literals.put(new ANTLRHashString("Set", this), new Integer(50));	literals.put(new ANTLRHashString("oclEmpty", this), new Integer(56));	literals.put(new ANTLRHashString("enum", this), new Integer(62));	literals.put(new ANTLRHashString("allInstances", this), new Integer(29));	literals.put(new ANTLRHashString("composition", this), new Integer(72));	literals.put(new ANTLRHashString("between", this), new Integer(70));	literals.put(new ANTLRHashString("or", this), new Integer(13));	literals.put(new ANTLRHashString("abstract", this), new Integer(63));	literals.put(new ANTLRHashString("model", this), new Integer(60));	literals.put(new ANTLRHashString("oclUndefined", this), new Integer(57));	literals.put(new ANTLRHashString("role", this), new Integer(74));	literals.put(new ANTLRHashString("if", this), new Integer(40));	literals.put(new ANTLRHashString("iterate", this), new Integer(31));	literals.put(new ANTLRHashString("oclIsTypeOf", this), new Integer(39));	literals.put(new ANTLRHashString("attributes", this), new Integer(65));	literals.put(new ANTLRHashString("Tuple", this), new Integer(58));	literals.put(new ANTLRHashString("else", this), new Integer(42));	literals.put(new ANTLRHashString("var", this), new Integer(81));	literals.put(new ANTLRHashString("not", this), new Integer(26));	literals.put(new ANTLRHashString("then", this), new Integer(41));}public Token nextToken() throws TokenStreamException {	Token theRetToken=null;tryAgain:	for (;;) {		Token _token = null;		int _ttype = Token.INVALID_TYPE;		resetText();		try {   // for char stream error handling			try {   // for lexical error handling				switch ( LA(1)) {				case '\t':  case '\n':  case '\u000c':  case '\r':				case ' ':				{					mWS(true);					theRetToken=_returnToken;					break;				}				case '@':				{					mAT(true);					theRetToken=_returnToken;					break;				}				case '|':				{					mBAR(true);					theRetToken=_returnToken;					break;				}				case ',':				{					mCOMMA(true);					theRetToken=_returnToken;					break;				}				case '=':				{					mEQUAL(true);					theRetToken=_returnToken;					break;				}				case '#':				{					mHASH(true);					theRetToken=_returnToken;					break;				}				case '{':				{					mLBRACE(true);					theRetToken=_returnToken;					break;				}				case '[':				{					mLBRACK(true);					theRetToken=_returnToken;					break;				}				case '(':				{					mLPAREN(true);					theRetToken=_returnToken;					break;				}				case '+':				{					mPLUS(true);					theRetToken=_returnToken;					break;				}				case '}':				{					mRBRACE(true);					theRetToken=_returnToken;					break;				}				case ']':				{					mRBRACK(true);					theRetToken=_returnToken;					break;				}				case ')':				{					mRPAREN(true);					theRetToken=_returnToken;					break;				}				case ';':				{					mSEMI(true);					theRetToken=_returnToken;					break;				}				case '*':				{					mSTAR(true);					theRetToken=_returnToken;					break;				}				case '0':  case '1':  case '2':  case '3':				case '4':  case '5':  case '6':  case '7':				case '8':  case '9':				{					mRANGE_OR_INT(true);					theRetToken=_returnToken;					break;				}				case '\'':				{					mSTRING(true);					theRetToken=_returnToken;					break;				}				case 'A':  case 'B':  case 'C':  case 'D':				case 'E':  case 'F':  case 'G':  case 'H':				case 'I':  case 'J':  case 'K':  case 'L':				case 'M':  case 'N':  case 'O':  case 'P':				case 'Q':  case 'R':  case 'S':  case 'T':				case 'U':  case 'V':  case 'W':  case 'X':				case 'Y':  case 'Z':  case '_':  case 'a':				case 'b':  case 'c':  case 'd':  case 'e':				case 'f':  case 'g':  case 'h':  case 'i':				case 'j':  case 'k':  case 'l':  case 'm':				case 'n':  case 'o':  case 'p':  case 'q':				case 'r':  case 's':  case 't':  case 'u':				case 'v':  case 'w':  case 'x':  case 'y':				case 'z':				{					mIDENT(true);					theRetToken=_returnToken;					break;				}				default:					if ((LA(1)=='-'||LA(1)=='/') && (LA(2)=='-'||LA(2)=='/')) {						mSL_COMMENT(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='/') && (LA(2)=='*')) {						mML_COMMENT(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='-') && (LA(2)=='>')) {						mARROW(true);						theRetToken=_returnToken;					}					else if ((LA(1)==':') && (LA(2)==':')) {						mCOLON_COLON(true);						theRetToken=_returnToken;					}					else if ((LA(1)==':') && (LA(2)=='=')) {						mCOLON_EQUAL(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='.') && (LA(2)=='.')) {						mDOTDOT(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='>') && (LA(2)=='=')) {						mGREATER_EQUAL(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='<') && (LA(2)=='=')) {						mLESS_EQUAL(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='<') && (LA(2)=='>')) {						mNOT_EQUAL(true);						theRetToken=_returnToken;					}					else if ((LA(1)==':') && (true)) {						mCOLON(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='.') && (true)) {						mDOT(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='>') && (true)) {						mGREATER(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='<') && (true)) {						mLESS(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='-') && (true)) {						mMINUS(true);						theRetToken=_returnToken;					}					else if ((LA(1)=='/') && (true)) {						mSLASH(true);						theRetToken=_returnToken;					}				else {					if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}				else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}				}				}				if ( _returnToken==null ) continue tryAgain; // found SKIP token				_ttype = _returnToken.getType();				_returnToken.setType(_ttype);				return _returnToken;			}			catch (RecognitionException e) {				reportError(e);				consume();			}		}		catch (CharStreamException cse) {			if ( cse instanceof CharStreamIOException ) {				throw new TokenStreamIOException(((CharStreamIOException)cse).io);			}			else {				throw new TokenStreamException(cse.getMessage());			}		}	}}	protected final void mVOCAB(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {		int _ttype; Token _token=null; int _begin=text.length();		_ttype = VOCAB;		int _saveIndex;				try {      // for error handling			matchRange('\3','\377');		}		catch (RecognitionException ex) {			if (inputState.guessing==0) {				reportError(ex);				consume();				consumeUntil(_tokenSet_0);			} else {			  throw ex;			}		}		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {			_token = makeToken(_ttype);			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));		}		_returnToken = _token;	}		public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {		int _ttype; Token _token=null; int _begin=text.length();		_ttype = WS;		int _saveIndex;				try {      // for error handling			{			switch ( LA(1)) {			case ' ':			{				match(' ');				break;			}			case '\t':			{				match('\t');				break;			}			case '\u000c':			{				match('\f');				break;			}			case '\n':  case '\r':			{				{				if ((LA(1)=='\r') && (LA(2)=='\n')) {					match("\r\n");				}				else if ((LA(1)=='\r') && (true)) {					match('\r');				}				else if ((LA(1)=='\n')) {					match('\n');				}				else {					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());				}								}				if ( inputState.guessing==0 ) {					newline();				}				break;			}			default:			{				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());			}			}			}			if ( inputState.guessing==0 ) {				_ttype = Token.SKIP;			}		}		catch (RecognitionException ex) {			if (inputState.guessing==0) {				reportError(ex);				consume();				consumeUntil(_tokenSet_0);			} else {			  throw ex;			}		}		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {			_token = makeToken(_ttype);			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));		}		_returnToken = _token;	}		public final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {		int _ttype; Token _token=null; int _begin=text.length();		_ttype = SL_COMMENT;		int _saveIndex;				try {      // for error handling			{			switch ( LA(1)) {			case '/':			{				match("//");				break;			}			case '-':			{				match("--");				break;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -