📄 gocllexer.java
字号:
// $ANTLR 2.7.4: "ocl.g" -> "GOCLLexer.java"$ /* * USE - UML based specification environment * Copyright (C) 1999-2004 Mark Richters, University of Bremen * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */package org.tzi.use.parser.ocl; import java.io.InputStream;import antlr.TokenStreamException;import antlr.TokenStreamIOException;import antlr.TokenStreamRecognitionException;import antlr.CharStreamException;import antlr.CharStreamIOException;import antlr.ANTLRException;import java.io.Reader;import java.util.Hashtable;import antlr.CharScanner;import antlr.InputBuffer;import antlr.ByteBuffer;import antlr.CharBuffer;import antlr.Token;import antlr.CommonToken;import antlr.RecognitionException;import antlr.NoViableAltForCharException;import antlr.MismatchedCharException;import antlr.TokenStream;import antlr.ANTLRHashString;import antlr.LexerSharedInputState;import antlr.collections.impl.BitSet;import antlr.SemanticException; import java.io.PrintWriter; import org.tzi.use.util.Log; import org.tzi.use.util.StringUtil; import org.tzi.use.parser.ParseErrorHandler; import org.tzi.use.parser.MyToken;public class GOCLLexer extends antlr.CharScanner implements GOCLLexerTokenTypes, TokenStream { protected int fTokColumn = 1; private PrintWriter fErr; private ParseErrorHandler fParseErrorHandler; public void consume() throws CharStreamException { if (inputState.guessing == 0 ) { if (text.length() == 0 ) { // remember token start column fTokColumn = getColumn(); } } super.consume(); } public String getFilename() { return fParseErrorHandler.getFileName(); } protected Token makeToken(int t) { MyToken token = new MyToken(getFilename(), getLine(), fTokColumn); token.setType(t); if (t == EOF ) token.setText("end of file or input"); return token; } public void reportError(RecognitionException ex) { fParseErrorHandler.reportError( ex.getLine() + ":" + ex.getColumn() + ": " + ex.getMessage()); } /** * Returns true if word is a reserved keyword. */ public boolean isKeyword(String word) { ANTLRHashString s = new ANTLRHashString(word, this); boolean res = literals.get(s) != null; Log.trace(this, "keyword " + word + ": " + res); return res; } public void traceIn(String rname) throws CharStreamException { traceIndent(); traceDepth += 1; System.out.println("> lexer " + rname + ": c == '" + StringUtil.escapeChar(LA(1), '\'') + "'"); } public void traceOut(String rname) throws CharStreamException { traceDepth -= 1; traceIndent(); System.out.println("< lexer " + rname + ": c == '" + StringUtil.escapeChar(LA(1), '\'') + "'"); } public void init(ParseErrorHandler handler) { fParseErrorHandler = handler; }public GOCLLexer(InputStream in) { this(new ByteBuffer(in));}public GOCLLexer(Reader in) { this(new CharBuffer(in));}public GOCLLexer(InputBuffer ib) { this(new LexerSharedInputState(ib));}public GOCLLexer(LexerSharedInputState state) { super(state); caseSensitiveLiterals = true; setCaseSensitive(true); literals = new Hashtable(); literals.put(new ANTLRHashString("oclIsTypeOf", this), new Integer(39)); literals.put(new ANTLRHashString("oclUndefined", this), new Integer(57)); literals.put(new ANTLRHashString("let", this), new Integer(9)); literals.put(new ANTLRHashString("if", this), new Integer(40)); literals.put(new ANTLRHashString("pre", this), new Integer(36)); literals.put(new ANTLRHashString("endif", this), new Integer(43)); literals.put(new ANTLRHashString("oclAsType", this), new Integer(37)); literals.put(new ANTLRHashString("allInstances", this), new Integer(29)); literals.put(new ANTLRHashString("then", this), new Integer(41)); literals.put(new ANTLRHashString("in", this), new Integer(11)); literals.put(new ANTLRHashString("or", this), new Integer(13)); literals.put(new ANTLRHashString("xor", this), new Integer(14)); literals.put(new ANTLRHashString("Tuple", this), new Integer(58)); literals.put(new ANTLRHashString("Bag", this), new Integer(52)); literals.put(new ANTLRHashString("else", this), new Integer(42)); literals.put(new ANTLRHashString("Collection", this), new Integer(59)); literals.put(new ANTLRHashString("true", this), new Integer(44)); literals.put(new ANTLRHashString("div", this), new Integer(25)); literals.put(new ANTLRHashString("Sequence", this), new Integer(51)); literals.put(new ANTLRHashString("implies", this), new Integer(12)); literals.put(new ANTLRHashString("and", this), new Integer(15)); literals.put(new ANTLRHashString("not", this), new Integer(26)); literals.put(new ANTLRHashString("oclEmpty", this), new Integer(56)); literals.put(new ANTLRHashString("Set", this), new Integer(50)); literals.put(new ANTLRHashString("false", this), new Integer(45)); literals.put(new ANTLRHashString("oclIsKindOf", this), new Integer(38)); literals.put(new ANTLRHashString("iterate", this), new Integer(31));}public Token nextToken() throws TokenStreamException { Token theRetToken=null;tryAgain: for (;;) { Token _token = null; int _ttype = Token.INVALID_TYPE; resetText(); try { // for char stream error handling try { // for lexical error handling switch ( LA(1)) { case '\t': case '\n': case '\u000c': case '\r': case ' ': { mWS(true); theRetToken=_returnToken; break; } case '@': { mAT(true); theRetToken=_returnToken; break; } case '|': { mBAR(true); theRetToken=_returnToken; break; } case ',': { mCOMMA(true); theRetToken=_returnToken; break; } case '=': { mEQUAL(true); theRetToken=_returnToken; break; } case '#': { mHASH(true); theRetToken=_returnToken; break; } case '{': { mLBRACE(true); theRetToken=_returnToken; break; } case '[': { mLBRACK(true); theRetToken=_returnToken; break; } case '(': { mLPAREN(true); theRetToken=_returnToken; break; } case '+': { mPLUS(true); theRetToken=_returnToken; break; } case '}': { mRBRACE(true); theRetToken=_returnToken; break; } case ']': { mRBRACK(true); theRetToken=_returnToken; break; } case ')': { mRPAREN(true); theRetToken=_returnToken; break; } case ';': { mSEMI(true); theRetToken=_returnToken; break; } case '*': { mSTAR(true); theRetToken=_returnToken; break; } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { mRANGE_OR_INT(true); theRetToken=_returnToken; break; } case '\'': { mSTRING(true); theRetToken=_returnToken; break; } case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': { mIDENT(true); theRetToken=_returnToken; break; } default: if ((LA(1)=='-'||LA(1)=='/') && (LA(2)=='-'||LA(2)=='/')) { mSL_COMMENT(true); theRetToken=_returnToken; } else if ((LA(1)=='/') && (LA(2)=='*')) { mML_COMMENT(true); theRetToken=_returnToken; } else if ((LA(1)=='-') && (LA(2)=='>')) { mARROW(true); theRetToken=_returnToken; } else if ((LA(1)==':') && (LA(2)==':')) { mCOLON_COLON(true); theRetToken=_returnToken; } else if ((LA(1)==':') && (LA(2)=='=')) { mCOLON_EQUAL(true); theRetToken=_returnToken; } else if ((LA(1)=='.') && (LA(2)=='.')) { mDOTDOT(true); theRetToken=_returnToken; } else if ((LA(1)=='>') && (LA(2)=='=')) { mGREATER_EQUAL(true); theRetToken=_returnToken; } else if ((LA(1)=='<') && (LA(2)=='=')) { mLESS_EQUAL(true); theRetToken=_returnToken; } else if ((LA(1)=='<') && (LA(2)=='>')) { mNOT_EQUAL(true); theRetToken=_returnToken; } else if ((LA(1)==':') && (true)) { mCOLON(true); theRetToken=_returnToken; } else if ((LA(1)=='.') && (true)) { mDOT(true); theRetToken=_returnToken; } else if ((LA(1)=='>') && (true)) { mGREATER(true); theRetToken=_returnToken; } else if ((LA(1)=='<') && (true)) { mLESS(true); theRetToken=_returnToken; } else if ((LA(1)=='-') && (true)) { mMINUS(true); theRetToken=_returnToken; } else if ((LA(1)=='/') && (true)) { mSLASH(true); theRetToken=_returnToken; } else { if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);} else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());} } } if ( _returnToken==null ) continue tryAgain; // found SKIP token _ttype = _returnToken.getType(); _returnToken.setType(_ttype); return _returnToken; } catch (RecognitionException e) { reportError(e); consume(); } } catch (CharStreamException cse) { if ( cse instanceof CharStreamIOException ) { throw new TokenStreamIOException(((CharStreamIOException)cse).io); } else { throw new TokenStreamException(cse.getMessage()); } } }} public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = WS; int _saveIndex; try { // for error handling { switch ( LA(1)) { case ' ': { match(' '); break; } case '\t': { match('\t'); break; } case '\u000c': { match('\f'); break; } case '\n': case '\r': { { if ((LA(1)=='\r') && (LA(2)=='\n')) { match("\r\n"); } else if ((LA(1)=='\r') && (true)) { match('\r'); } else if ((LA(1)=='\n')) { match('\n'); } else { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } if ( inputState.guessing==0 ) { newline(); } break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } if ( inputState.guessing==0 ) { _ttype = Token.SKIP; } } catch (RecognitionException ex) { if (inputState.guessing==0) { reportError(ex); consume(); consumeUntil(_tokenSet_0); } else { throw ex; } } if ( _createToken && _token==null && _ttype!=Token.SKIP ) { _token = makeToken(_ttype); _token.setText(new String(text.getBuffer(), _begin, text.length()-_begin)); } _returnToken = _token; } public final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException { int _ttype; Token _token=null; int _begin=text.length(); _ttype = SL_COMMENT; int _saveIndex; try { // for error handling { switch ( LA(1)) { case '/': { match("//"); break; } case '-': { match("--"); break; } default: { throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn()); } } } { _loop96: do { if ((_tokenSet_1.member(LA(1)))) { { match(_tokenSet_1); } } else { break _loop96; } } while (true); } { switch ( LA(1)) { case '\n': { match('\n'); break;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -