📄 rpp.cpp
字号:
const int identifier = cleanedLine.at(2); //skip "#" and "define" DefineDirective *defineDirective = 0; int replacementListStart; // check if this is a macro function if (cleanedLine.count() >= 4 && m_tokenContainer.text(cleanedLine.at(3)) == "(" && !isWhiteSpace(cleanedLine.at(3) - 1)) { MacroFunctionDefinition *macro; macro = createNode<MacroFunctionDefinition>(m_memoryPool, group); int tokenIndex = 4; //point to first argument or ')' QVector<int> macroParameterList; while(tokenIndex < cleanedLine.count()) { QByteArray currentText = m_tokenContainer.text(cleanedLine.at(tokenIndex)); ++tokenIndex; if(currentText == ")") break; if(currentText == ",") continue; macroParameterList.append(cleanedLine.at(tokenIndex - 1)); } macro->setParameters(TokenList(m_tokenContainer, macroParameterList)); defineDirective = macro; replacementListStart = tokenIndex; } else { MacroDefinition *macro; macro = createNode<MacroDefinition>(m_memoryPool, group); defineDirective = macro; replacementListStart = 3; } Q_ASSERT(defineDirective); // This is a bit hackish.. we want the replacement list with whitepspace // tokens, but cleanedLine() has already removed those. And we can't use // the original line, because that may contain escaped newline tokens. // So we remove the esacped newlines and search for the token number // given by cleanedLine.at(replacementListStart) QVector<int> replacementList; const QVector<int> noEscNewline = cleanEscapedNewLines(line); if (replacementListStart < cleanedLine.count()) { const int cleanedLineReplacementListStart = cleanedLine.at(replacementListStart); const int rListStart = noEscNewline.indexOf(cleanedLineReplacementListStart); if (rListStart != -1) { const int skipNewLineToken = 1; for (int i = rListStart; i < noEscNewline.count() - skipNewLineToken; ++i) { const int tokenContainerIndex = noEscNewline.at(i); const Type type = m_tokenTypeList.at(tokenContainerIndex); // Don't append comment tokens. if (type != Token_line_comment && type != Token_multiline_comment) { replacementList.append(tokenContainerIndex); } } } } defineDirective->setTokenSection(line); defineDirective->setIdentifier(TokenList(m_tokenContainer, QVector<int>() << identifier)); defineDirective->setReplacementList(TokenList(m_tokenContainer, replacementList)); group->toItemComposite()->add(defineDirective); return true;}// # undef identifier newlinebool Preprocessor::parseUndefDirective(Item *group){ Q_ASSERT(group->toItemComposite()); const TokenSection tokenSection = readLine(); const QVector<int> cleanedLine = cleanTokenRange(tokenSection); if(cleanedLine.count() < 3) return false; UndefDirective *undefDirective = createNode<UndefDirective>(m_memoryPool, group); group->toItemComposite()->add(undefDirective); undefDirective->setIdentifier(TokenList(m_tokenContainer, QVector<int>() << cleanedLine.at(2))); undefDirective->setTokenSection(tokenSection); return true;}//include pp-tokens new-linebool Preprocessor::parseIncludeDirective(Item *group){ // cout << "parseIncludeDirective" << endl; Q_ASSERT(group->toItemComposite()); TokenSection tokenSection = readLine(); if(tokenSection.count() == 0) return false; const TokenEngine::TokenContainer tokenContainer = tokenSection.tokenContainer(0); IncludeDirective *includeDirective = createNode<IncludeDirective>(m_memoryPool, group); group->toItemComposite()->add(includeDirective); includeDirective->setTokenSection(tokenSection); //remove whitepspace and comment tokens TokenList tokenList(m_tokenContainer, cleanTokenRange(tokenSection)); //iterate through the tokens, look for a string literal or a '<'. int tokenIndex = 0; const int endIndex = tokenList.count(); while (tokenIndex < endIndex) { const int containerTokenIndex = tokenList.containerIndex(tokenIndex); if(m_tokenTypeList.at(containerTokenIndex) == Token_string_literal) { QByteArray tokenText = tokenList.text(tokenIndex); includeDirective->setFilename(tokenText.mid(1, tokenText.size() -2)); //remove quotes includeDirective->setFilenameTokens(TokenEngine::TokenList(tokenContainer, QVector<int>() << containerTokenIndex)); includeDirective->setIncludeType(IncludeDirective::QuoteInclude); break; } else if(tokenList.text(tokenIndex) == "<") { // We found a <, all following tokens until we read a // > is a part of the file anme QByteArray filename; ++tokenIndex; //skip '<' QVector<int> filenameTokens; while(tokenIndex < endIndex) { const QByteArray tokenText = tokenList.text(tokenIndex); if(tokenText == ">") break; filenameTokens.append(tokenList.containerIndex(tokenIndex)); filename += tokenText; ++tokenIndex; } if(tokenIndex < endIndex) { includeDirective->setFilename(filename); includeDirective->setFilenameTokens(TokenEngine::TokenList(tokenContainer, filenameTokens)); includeDirective->setIncludeType(IncludeDirective::AngleBracketInclude); } break; } ++tokenIndex; } return true;}//# error pp-tokens[opt] new-linebool Preprocessor::parseErrorDirective(Item *group){ Q_ASSERT(group->toItemComposite()); TokenSection tokenSection = readLine(); if(tokenSection.count() == 0) return false; ErrorDirective *errorDirective = createNode<ErrorDirective>(m_memoryPool, group); group->toItemComposite()->add(errorDirective); errorDirective->setTokenSection(tokenSection); return true;}//# pragma pp-tokens[opt] new-linebool Preprocessor::parsePragmaDirective(Item *group){ Q_ASSERT(group->toItemComposite()); TokenSection tokenSection = readLine(); if(tokenSection.count() == 0) return false; PragmaDirective *pragmaDirective = createNode<PragmaDirective>(m_memoryPool, group); group->toItemComposite()->add(pragmaDirective); pragmaDirective->setTokenSection(tokenSection); return true;}/* Reads a preprocessor line from the source by advancing lexerTokenIndex and returing a TokenSection containg the read line. Text lines separated by an escaped newline are joined.*/TokenSection Preprocessor::readLine(){ const int startIndex = lexerTokenIndex; bool gotNewline = false; while(isValidIndex(lexerTokenIndex) && !gotNewline) { if(m_tokenTypeList.at(lexerTokenIndex) == Token_newline) { if (lexerTokenIndex == 0 || m_tokenTypeList.at(lexerTokenIndex-1) != '\\') { gotNewline = true; break; } } ++lexerTokenIndex; } if(gotNewline) ++lexerTokenIndex; //include newline else emit error("Error", "Unexprected end of source"); return TokenSection(m_tokenContainer, startIndex, lexerTokenIndex - startIndex);}/* Returns false if index is past the end of m_tokenContainer.*/inline bool Preprocessor::isValidIndex(const int index) const{ return (index < m_tokenContainer.count());}/* Returns true if the token at index is a whitepsace token.*/inline bool Preprocessor::isWhiteSpace(const int index) const{ return (m_tokenTypeList.at(index) == Token_whitespaces);}/* Looks ahead from lexerTokenIndex, returns the token type found at the first token that is not a comment or whitespace token.*/Type Preprocessor::lookAhead() const{ const int index = skipWhiteSpaceAndComments(); if (index == -1) return Token_eof; return m_tokenTypeList.at(index);}/* Looks ahead from lexerTokenIndex, returns the token type found at the first token that is not a comment, whitespace or '#' token.*/Type Preprocessor::lookAheadSkipHash() const{ const int index = skipWhiteSpaceCommentsHash(); if (index == -1) return Token_eof; return m_tokenTypeList.at(index);}/* Returns the index for the first token after lexerTokenIndex that is not a whitespace or comment token.*/inline int Preprocessor::skipWhiteSpaceAndComments() const{ int index = lexerTokenIndex; if(!isValidIndex(index)) return -1; while(m_tokenTypeList.at(index) == Token_whitespaces || m_tokenTypeList.at(index) == Token_comment || m_tokenTypeList.at(index) == Token_line_comment || m_tokenTypeList.at(index) == Token_multiline_comment ) { ++index; if(!isValidIndex(index)) return -1; } return index;}/* Returns the index for the first token after lexerTokenIndex that is not a whitespace, comment or '#' token.*/inline int Preprocessor::skipWhiteSpaceCommentsHash() const{ int index = lexerTokenIndex; if(!isValidIndex(index)) return -1; while(m_tokenTypeList.at(index) == Token_whitespaces || m_tokenTypeList.at(index) == Token_comment || m_tokenTypeList.at(index) == Token_line_comment || m_tokenTypeList.at(index) == Token_multiline_comment || m_tokenTypeList.at(index) == Token_preproc ) { ++index; if(!isValidIndex(index)) return -1; } return index;}/* Removes escaped newlines from tokenSection. Both the escape token ('\') and the newline token ('\n') are removed.*/QVector<int> Preprocessor::cleanEscapedNewLines(const TokenSection &tokenSection) const{ QVector<int> indexList; int t = 0; const int numTokens = tokenSection.count(); while (t < numTokens) { const int containerIndex = tokenSection.containerIndex(t); const int currentToken = t; ++t; //handle escaped newlines if (tokenSection.text(currentToken) == "\\" && currentToken + 1 < numTokens && m_tokenTypeList.at(containerIndex + 1) == Token_newline) continue; indexList.append(containerIndex); } return indexList;}/* Removes escaped newlines, whitespace and comment tokens from tokenSection*/QVector<int> Preprocessor::cleanTokenRange(const TokenSection &tokenSection) const{ QVector<int> indexList; int t = 0; const int numTokens = tokenSection.count(); while (t < numTokens) { const int containerIndex = tokenSection.containerIndex(t); const Type tokenType = m_tokenTypeList.at(containerIndex); const int currentToken = t; ++t; if(tokenType == Token_whitespaces || tokenType == Token_line_comment || tokenType == Token_multiline_comment ) continue; //handle escaped newlines if(tokenSection.text(currentToken) == "\\" && currentToken + 1 < numTokens && m_tokenTypeList.at(containerIndex + 1) == Token_newline) continue; indexList.append(containerIndex); } return indexList;}/* Returns the text for an Item node and all its children.*/QByteArray visitGetText(Item *item){ QByteArray text; text += item->text().fullText(); if(item->toItemComposite()) { ItemComposite *composite = item->toItemComposite(); for (int i=0; i <composite->count(); ++i) text += visitGetText(composite->item(i)); } return text;}void Source::setFileName(const QString &fileName){ m_fileName = fileName;}} // namespace Rpp
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -