⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tokenmarker.java

📁 用java 编写的源码开放的文本编辑器。有很多有用的特性
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
/* * TokenMarker.java - Tokenizes lines of text * :tabSize=8:indentSize=8:noTabs=false: * :folding=explicit:collapseFolds=1: * * Copyright (C) 1998, 2002 Slava Pestov * Copyright (C) 1999, 2000 mike dillon * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA. */package org.gjt.sp.jedit.syntax;//{{{ Importsimport gnu.regexp.*;import javax.swing.text.Segment;import java.util.*;import org.gjt.sp.jedit.search.CharIndexedSegment;import org.gjt.sp.jedit.*;import org.gjt.sp.util.Log;//}}}/** * A token marker splits lines of text into tokens. Each token carries * a length field and an identification tag that can be mapped to a color * or font style for painting that token. * * @author Slava Pestov, mike dillon * @version $Id: TokenMarker.java,v 1.47 2003/01/31 04:49:31 spestov Exp $ * * @see org.gjt.sp.jedit.syntax.Token * @see org.gjt.sp.jedit.syntax.TokenHandler */public class TokenMarker{	//{{{ TokenMarker constructor	public TokenMarker()	{		ruleSets = new Hashtable(64);	} //}}}	//{{{ getName() method	public String getName()	{		return name;	} //}}}	//{{{ setName() method	public void setName(String name)	{		if (name == null)			throw new NullPointerException();		this.name = name;		rulePfx = name.concat("::");	} //}}}	//{{{ addRuleSet() method	public void addRuleSet(String setName, ParserRuleSet rules)	{		if (rules == null)			return;		if (setName == null)			setName = "MAIN";		ruleSets.put(rulePfx.concat(setName), rules);		if (setName.equals("MAIN"))			mainRuleSet = rules;	} //}}}	//{{{ getMainRuleSet() method	public ParserRuleSet getMainRuleSet()	{		return mainRuleSet;	} //}}}	//{{{ getRuleSet() method	public ParserRuleSet getRuleSet(String setName)	{		ParserRuleSet rules;		rules = (ParserRuleSet) ruleSets.get(setName);		if (rules == null && !setName.startsWith(rulePfx))		{			int delim = setName.indexOf("::");			if(delim == -1)			{				byte id = Token.stringToToken(setName);				rules = ParserRuleSet.getStandardRuleSet(id);			}			else			{				String modeName = setName.substring(0, delim);				Mode mode = jEdit.getMode(modeName);				if(mode == null)				{					Log.log(Log.ERROR,TokenMarker.class,						"Unknown edit mode: " + modeName);					rules = null;				}				else				{					TokenMarker marker = mode.getTokenMarker();					rules = marker.getRuleSet(setName);				}			}			// store external ParserRuleSet in the local hashtable			// for faster lookups later			ruleSets.put(setName, rules);		}		if (rules == null)		{			Log.log(Log.ERROR,this,"Unresolved delegate target: " + setName);			return ParserRuleSet.getStandardRuleSet(Token.INVALID);		}		else			return rules;	} //}}}	//{{{ markTokens() method	/**	 * Do not call this method directly; call Buffer.markTokens() instead.	 */	public LineContext markTokens(LineContext prevContext,		TokenHandler tokenHandler, Segment line)	{		//{{{ Set up some instance variables		// this is to avoid having to pass around lots and lots of		// parameters.		this.tokenHandler = tokenHandler;		this.line = line;		lastOffset = line.offset;		lineLength = line.count + line.offset;		context = new LineContext();		if(prevContext == null)			context.rules = getMainRuleSet();		else		{			context.parent = prevContext.parent;			context.inRule = prevContext.inRule;			context.rules = prevContext.rules;		}		keywords = context.rules.getKeywords();		escaped = false;		seenWhitespaceEnd = false;		whitespaceEnd = line.offset;		//}}}		//{{{ Main parser loop		ParserRule rule;		int terminateChar = context.rules.getTerminateChar();		boolean terminated = false;main_loop:	for(pos = line.offset; pos < lineLength; pos++)		{			//{{{ check if we have to stop parsing			if(terminateChar >= 0 && pos - line.offset >= terminateChar				&& !terminated)			{				terminated = true;				context = new LineContext(ParserRuleSet					.getStandardRuleSet(context.rules					.getDefault()),context);				keywords = context.rules.getKeywords();			} //}}}			//{{{ check for end of delegate			if(context.parent != null)			{				rule = context.parent.inRule;				if(rule != null)				{					if(checkDelegateEnd(rule))					{						seenWhitespaceEnd = true;						continue main_loop;					}				}			} //}}}			//{{{ check every rule			char ch = line.array[pos];			rule = context.rules.getRules(ch);			while(rule != null)			{				// stop checking rules if there was a match				if (handleRule(rule,false))				{					seenWhitespaceEnd = true;					continue main_loop;				}					rule = rule.next;			} //}}}			//{{{ check if current character is a word separator			if(Character.isWhitespace(ch))			{				if(!seenWhitespaceEnd)					whitespaceEnd = pos + 1;				if(context.inRule != null)					handleRule(context.inRule,true);				handleNoWordBreak();				markKeyword(false);				if(lastOffset != pos)				{					tokenHandler.handleToken(						context.rules.getDefault(),						lastOffset - line.offset,						pos - lastOffset,						context);				}				tokenHandler.handleToken(					(ch == '\t' ? Token.TAB					: Token.WHITESPACE),pos - line.offset,1,					context);				lastOffset = pos + 1;				escaped = false;			}			else			{				if(keywords != null || context.rules.getRuleCount() != 0)				{					String noWordSep = context.rules.getNoWordSep();					if(!Character.isLetterOrDigit(ch)						&& noWordSep.indexOf(ch) == -1)					{						if(context.inRule != null)							handleRule(context.inRule,true);						handleNoWordBreak();						markKeyword(true);						tokenHandler.handleToken(							context.rules.getDefault(),							lastOffset - line.offset,1,							context);						lastOffset = pos + 1;					}				}				seenWhitespaceEnd = true;				escaped = false;			} //}}}		} //}}}		//{{{ Mark all remaining characters		pos = lineLength;		if(context.inRule != null)			handleRule(context.inRule,true);		handleNoWordBreak();		markKeyword(true);		if(context.parent != null)		{			rule = context.parent.inRule;			if((rule != null && (context.parent.inRule.action				& ParserRule.NO_LINE_BREAK) == ParserRule.NO_LINE_BREAK)				|| terminated)			{				context = context.parent;				keywords = context.rules.getKeywords();				context.inRule = null;			}		} //}}}		tokenHandler.handleToken(Token.END,pos - line.offset,0,context);		return context.intern();	} //}}}	//{{{ Private members	//{{{ Instance variables	private Hashtable ruleSets;	private String name;	private String rulePfx;	private ParserRuleSet mainRuleSet;	// Instead of passing these around to each method, we just store them	// as instance variables. Note that this is not thread-safe.	private TokenHandler tokenHandler;	private Segment line;	private LineContext context;	private KeywordMap keywords;	private Segment pattern = new Segment();	private int lastOffset;	private int lineLength;	private int pos;	private boolean escaped;	private int whitespaceEnd;	private boolean seenWhitespaceEnd;	//}}}	//{{{ checkDelegateEnd() method	private boolean checkDelegateEnd(ParserRule rule)	{		if(rule.end == null)			return false;		LineContext tempContext = context;		context = context.parent;		keywords = context.rules.getKeywords();		boolean tempEscaped = escaped;		boolean b = handleRule(rule,true);		context = tempContext;		keywords = context.rules.getKeywords();		if(b && !tempEscaped)		{			if(context.inRule != null)				handleRule(context.inRule,true);			markKeyword(true);			context = (LineContext)context.parent.clone();			tokenHandler.handleToken(				(context.inRule.action & ParserRule.EXCLUDE_MATCH)				== ParserRule.EXCLUDE_MATCH				? context.rules.getDefault()				: context.inRule.token,				pos - line.offset,pattern.count,context);			keywords = context.rules.getKeywords();			context.inRule = null;			lastOffset = pos + pattern.count;			// move pos to last character of match sequence			pos += (pattern.count - 1);			return true;		}		// check escape rule of parent		rule = context.parent.rules.getEscapeRule();		if(rule != null && handleRule(rule,false))			return true;		return false;	} //}}}	//{{{ handleRule() method	/**	 * Checks if the rule matches the line at the current position	 * and handles the rule if it does match	 */	private boolean handleRule(ParserRule checkRule, boolean end)	{		//{{{ Some rules can only match in certain locations		if(!end)		{			if(Character.toUpperCase(checkRule.hashChar)				!= Character.toUpperCase(line.array[pos]))			{				return false;			}			if((checkRule.action & ParserRule.AT_LINE_START)				== ParserRule.AT_LINE_START)			{				if((((checkRule.action & ParserRule.MARK_PREVIOUS) != 0) ?					lastOffset : pos) != line.offset)					return false;			}			else if((checkRule.action & ParserRule.AT_WHITESPACE_END)				== ParserRule.AT_WHITESPACE_END)			{				if((((checkRule.action & ParserRule.MARK_PREVIOUS) != 0) ?					lastOffset : pos) != whitespaceEnd)					return false;			}			else if((checkRule.action & ParserRule.AT_WORD_START)				== ParserRule.AT_WORD_START)			{				if((((checkRule.action & ParserRule.MARK_PREVIOUS) != 0) ?					lastOffset : pos) != lastOffset)					return false;			}		} //}}}		int matchedChars = 1;		//{{{ See if the rule's start or end sequence matches here		if(!end || (checkRule.action & ParserRule.MARK_FOLLOWING) == 0)		{			// the end cannot be a regular expression			if((checkRule.action & ParserRule.REGEXP) == 0 || end)			{				pattern.array = (end ? checkRule.end : checkRule.start);				pattern.offset = 0;				pattern.count = pattern.array.length;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -