📄 lex.py
字号:
error = 1 else: for s in states: if not isinstance(s,types.TupleType) or len(s) != 2: print >>sys.stderr, "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s) error = 1 continue name, statetype = s if not isinstance(name,types.StringType): print >>sys.stderr, "lex: state name %s must be a string" % repr(name) error = 1 continue if not (statetype == 'inclusive' or statetype == 'exclusive'): print >>sys.stderr, "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name error = 1 continue if stateinfo.has_key(name): print >>sys.stderr, "lex: state '%s' already defined." % name error = 1 continue stateinfo[name] = statetype # Get a list of symbols with the t_ or s_ prefix tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ] # Now build up a list of functions and a list of strings funcsym = { } # Symbols defined as functions strsym = { } # Symbols defined as strings toknames = { } # Mapping of symbols to token names for s in stateinfo.keys(): funcsym[s] = [] strsym[s] = [] ignore = { } # Ignore strings by state errorf = { } # Error functions by state if len(tsymbols) == 0: raise SyntaxError,"lex: no rules of the form t_rulename are defined." for f in tsymbols: t = ldict[f] states, tokname = _statetoken(f,stateinfo) toknames[f] = tokname if callable(t): for s in states: funcsym[s].append((f,t)) elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)): for s in states: strsym[s].append((f,t)) else: print >>sys.stderr, "lex: %s not defined as a function or string" % f error = 1 # Sort the functions by line number for f in funcsym.values(): f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno)) # Sort the strings by regular expression length for s in strsym.values(): s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1]))) regexs = { } # Build the master regular expressions for state in stateinfo.keys(): regex_list = [] # Add rules defined by functions first for fname, f in funcsym[state]: line = f.func_code.co_firstlineno file = f.func_code.co_filename files[file] = None tokname = toknames[fname] ismethod = isinstance(f, types.MethodType) if not optimize: nargs = f.func_code.co_argcount if ismethod: reqargs = 2 else: reqargs = 1 if nargs > reqargs: print >>sys.stderr, "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__) error = 1 continue if nargs < reqargs: print >>sys.stderr, "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__) error = 1 continue if tokname == 'ignore': print >>sys.stderr, "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__) error = 1 continue if tokname == 'error': errorf[state] = f continue if f.__doc__: if not optimize: try: c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags) if c.match(""): print >>sys.stderr, "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__) error = 1 continue except re.error,e: print >>sys.stderr, "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e) if '#' in f.__doc__: print >>sys.stderr, "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__) error = 1 continue if debug: print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state) # Okay. The regular expression seemed okay. Let's append it to the master regular # expression we're building regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__)) else: print >>sys.stderr, "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__) # Now add all of the simple rules for name,r in strsym[state]: tokname = toknames[name] if tokname == 'ignore': if "\\" in r: print >>sys.stderr, "lex: Warning. %s contains a literal backslash '\\'" % name ignore[state] = r continue if not optimize: if tokname == 'error': raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name error = 1 continue if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0: print >>sys.stderr, "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname) error = 1 continue try: c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags) if (c.match("")): print >>sys.stderr, "lex: Regular expression for rule '%s' matches empty string." % name error = 1 continue except re.error,e: print >>sys.stderr, "lex: Invalid regular expression for rule '%s'. %s" % (name,e) if '#' in r: print >>sys.stderr, "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name error = 1 continue if debug: print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state) regex_list.append("(?P<%s>%s)" % (name,r)) if not regex_list: print >>sys.stderr, "lex: No rules defined for state '%s'" % state error = 1 regexs[state] = regex_list if not optimize: for f in files.keys(): if not _validate_file(f): error = 1 if error: raise SyntaxError,"lex: Unable to build lexer." # From this point forward, we're reasonably confident that we can build the lexer. # No more errors will be generated, but there might be some warning messages. # Build the master regular expressions for state in regexs.keys(): lexre, re_text = _form_master_re(regexs[state],reflags,ldict,toknames) lexobj.lexstatere[state] = lexre lexobj.lexstateretext[state] = re_text if debug: for i in range(len(re_text)): print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i]) # For inclusive states, we need to add the INITIAL state for state,type in stateinfo.items(): if state != "INITIAL" and type == 'inclusive': lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) lexobj.lexstateinfo = stateinfo lexobj.lexre = lexobj.lexstatere["INITIAL"] lexobj.lexretext = lexobj.lexstateretext["INITIAL"] # Set up ignore variables lexobj.lexstateignore = ignore lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","") # Set up error functions lexobj.lexstateerrorf = errorf lexobj.lexerrorf = errorf.get("INITIAL",None) if warn and not lexobj.lexerrorf: print >>sys.stderr, "lex: Warning. no t_error rule is defined." # Check state information for ignore and error rules for s,stype in stateinfo.items(): if stype == 'exclusive': if warn and not errorf.has_key(s): print >>sys.stderr, "lex: Warning. no error rule is defined for exclusive state '%s'" % s if warn and not ignore.has_key(s) and lexobj.lexignore: print >>sys.stderr, "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s elif stype == 'inclusive': if not errorf.has_key(s): errorf[s] = errorf.get("INITIAL",None) if not ignore.has_key(s): ignore[s] = ignore.get("INITIAL","") # Create global versions of the token() and input() functions token = lexobj.token input = lexobj.input lexer = lexobj # If in optimize mode, we write the lextab if lextab and optimize: lexobj.writetab(lextab) return lexobj# -----------------------------------------------------------------------------# runmain()## This runs the lexer as a main program# -----------------------------------------------------------------------------def runmain(lexer=None,data=None): if not data: try: filename = sys.argv[1] f = open(filename) data = f.read() f.close() except IndexError: print "Reading from standard input (type EOF to end):" data = sys.stdin.read() if lexer: _input = lexer.input else: _input = input _input(data) if lexer: _token = lexer.token else: _token = token while 1: tok = _token() if not tok: break print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)# -----------------------------------------------------------------------------# @TOKEN(regex)## This decorator function can be used to set the regex expression on a function# when its docstring might need to be set in an alternative way# -----------------------------------------------------------------------------def TOKEN(r): def set_doc(f): f.__doc__ = r return f return set_doc# Alternative spelling of the TOKEN decoratorToken = TOKEN
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -