#include "clang/Lex/Lexer.h"
#include "UnicodeCharSets.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/MultipleIncludeOpt.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/Support/NativeFormatting.h"
#include "llvm/Support/Unicode.h"
#include "llvm/Support/UnicodeCharRanges.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <string>
#include <tuple>
#include <utility>
using namespace clang;
bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
if (isAnnotation())
return false;
if (IdentifierInfo *II = getIdentifierInfo())
return II->getObjCKeywordID() == objcKey;
return false;
}
tok::ObjCKeywordKind Token::getObjCKeywordID() const {
if (isAnnotation())
return tok::objc_not_keyword;
IdentifierInfo *specId = getIdentifierInfo();
return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
}
void Lexer::anchor() {}
void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
const char *BufEnd) {
BufferStart = BufStart;
BufferPtr = BufPtr;
BufferEnd = BufEnd;
assert(BufEnd[0] == 0 &&
"We assume that the input buffer has a null character at the end"
" to simplify lexing!");
if (BufferStart == BufferPtr) {
StringRef Buf(BufferStart, BufferEnd - BufferStart);
size_t BOMLength = llvm::StringSwitch<size_t>(Buf)
.StartsWith("\xEF\xBB\xBF", 3) .Default(0);
BufferPtr += BOMLength;
}
Is_PragmaLexer = false;
CurrentConflictMarkerState = CMK_None;
IsAtStartOfLine = true;
IsAtPhysicalStartOfLine = true;
HasLeadingSpace = false;
HasLeadingEmptyMacro = false;
ParsingPreprocessorDirective = false;
ParsingFilename = false;
LexingRawMode = false;
ExtendedTokenMode = 0;
NewLinePtr = nullptr;
}
Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile,
Preprocessor &PP, bool IsFirstIncludeOfFile)
: PreprocessorLexer(&PP, FID),
FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
LangOpts(PP.getLangOpts()), LineComment(LangOpts.LineComment),
IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
InitLexer(InputFile.getBufferStart(), InputFile.getBufferStart(),
InputFile.getBufferEnd());
resetExtendedTokenMode();
}
Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
const char *BufStart, const char *BufPtr, const char *BufEnd,
bool IsFirstIncludeOfFile)
: FileLoc(fileloc), LangOpts(langOpts), LineComment(LangOpts.LineComment),
IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
InitLexer(BufStart, BufPtr, BufEnd);
LexingRawMode = true;
}
Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &FromFile,
const SourceManager &SM, const LangOptions &langOpts,
bool IsFirstIncludeOfFile)
: Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile.getBufferStart(),
FromFile.getBufferStart(), FromFile.getBufferEnd(),
IsFirstIncludeOfFile) {}
void Lexer::resetExtendedTokenMode() {
assert(PP && "Cannot reset token mode without a preprocessor");
if (LangOpts.TraditionalCPP)
SetKeepWhitespaceMode(true);
else
SetCommentRetentionState(PP->getCommentRetentionState());
}
Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
SourceLocation ExpansionLocStart,
SourceLocation ExpansionLocEnd,
unsigned TokLen, Preprocessor &PP) {
SourceManager &SM = PP.getSourceManager();
FileID SpellingFID = SM.getFileID(SpellingLoc);
llvm::MemoryBufferRef InputFile = SM.getBufferOrFake(SpellingFID);
Lexer *L = new Lexer(SpellingFID, InputFile, PP);
const char *StrData = SM.getCharacterData(SpellingLoc);
L->BufferPtr = StrData;
L->BufferEnd = StrData+TokLen;
assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID),
ExpansionLocStart,
ExpansionLocEnd, TokLen);
L->ParsingPreprocessorDirective = true;
L->Is_PragmaLexer = true;
return L;
}
void Lexer::seek(unsigned Offset, bool IsAtStartOfLine) {
this->IsAtPhysicalStartOfLine = IsAtStartOfLine;
this->IsAtStartOfLine = IsAtStartOfLine;
assert((BufferStart + Offset) <= BufferEnd);
BufferPtr = BufferStart + Offset;
}
template <typename T> static void StringifyImpl(T &Str, char Quote) {
typename T::size_type i = 0, e = Str.size();
while (i < e) {
if (Str[i] == '\\' || Str[i] == Quote) {
Str.insert(Str.begin() + i, '\\');
i += 2;
++e;
} else if (Str[i] == '\n' || Str[i] == '\r') {
if ((i < e - 1) && (Str[i + 1] == '\n' || Str[i + 1] == '\r') &&
Str[i] != Str[i + 1]) {
Str[i] = '\\';
Str[i + 1] = 'n';
} else {
Str[i] = '\\';
Str.insert(Str.begin() + i + 1, 'n');
++e;
}
i += 2;
} else
++i;
}
}
std::string Lexer::Stringify(StringRef Str, bool Charify) {
std::string Result = std::string(Str);
char Quote = Charify ? '\'' : '"';
StringifyImpl(Result, Quote);
return Result;
}
void Lexer::Stringify(SmallVectorImpl<char> &Str) { StringifyImpl(Str, '"'); }
static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
const LangOptions &LangOpts, char *Spelling) {
assert(Tok.needsCleaning() && "getSpellingSlow called on simple token");
size_t Length = 0;
const char *BufEnd = BufPtr + Tok.getLength();
if (tok::isStringLiteral(Tok.getKind())) {
while (BufPtr < BufEnd) {
unsigned Size;
Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
BufPtr += Size;
if (Spelling[Length - 1] == '"')
break;
}
if (Length >= 2 &&
Spelling[Length - 2] == 'R' && Spelling[Length - 1] == '"') {
const char *RawEnd = BufEnd;
do --RawEnd; while (*RawEnd != '"');
size_t RawLength = RawEnd - BufPtr + 1;
memcpy(Spelling + Length, BufPtr, RawLength);
Length += RawLength;
BufPtr += RawLength;
}
}
while (BufPtr < BufEnd) {
unsigned Size;
Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
BufPtr += Size;
}
assert(Length < Tok.getLength() &&
"NeedsCleaning flag set on token that didn't need cleaning!");
return Length;
}
StringRef Lexer::getSpelling(SourceLocation loc,
SmallVectorImpl<char> &buffer,
const SourceManager &SM,
const LangOptions &options,
bool *invalid) {
std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
bool invalidTemp = false;
StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
if (invalidTemp) {
if (invalid) *invalid = true;
return {};
}
const char *tokenBegin = file.data() + locInfo.second;
Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options,
file.begin(), tokenBegin, file.end());
Token token;
lexer.LexFromRawLexer(token);
unsigned length = token.getLength();
if (!token.needsCleaning())
return StringRef(tokenBegin, length);
buffer.resize(length);
buffer.resize(getSpellingSlow(token, tokenBegin, options, buffer.data()));
return StringRef(buffer.data(), buffer.size());
}
std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
const LangOptions &LangOpts, bool *Invalid) {
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
bool CharDataInvalid = false;
const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
&CharDataInvalid);
if (Invalid)
*Invalid = CharDataInvalid;
if (CharDataInvalid)
return {};
if (!Tok.needsCleaning())
return std::string(TokStart, TokStart + Tok.getLength());
std::string Result;
Result.resize(Tok.getLength());
Result.resize(getSpellingSlow(Tok, TokStart, LangOpts, &*Result.begin()));
return Result;
}
unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
const SourceManager &SourceMgr,
const LangOptions &LangOpts, bool *Invalid) {
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
const char *TokStart = nullptr;
if (Tok.is(tok::raw_identifier))
TokStart = Tok.getRawIdentifier().data();
else if (!Tok.hasUCN()) {
if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
Buffer = II->getNameStart();
return II->getLength();
}
}
if (Tok.isLiteral())
TokStart = Tok.getLiteralData();
if (!TokStart) {
bool CharDataInvalid = false;
TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
if (Invalid)
*Invalid = CharDataInvalid;
if (CharDataInvalid) {
Buffer = "";
return 0;
}
}
if (!Tok.needsCleaning()) {
Buffer = TokStart;
return Tok.getLength();
}
return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer));
}
unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts) {
Token TheTok;
if (getRawToken(Loc, TheTok, SM, LangOpts))
return 0;
return TheTok.getLength();
}
bool Lexer::getRawToken(SourceLocation Loc, Token &Result,
const SourceManager &SM,
const LangOptions &LangOpts,
bool IgnoreWhiteSpace) {
Loc = SM.getExpansionLoc(Loc);
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
bool Invalid = false;
StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
if (Invalid)
return true;
const char *StrData = Buffer.data()+LocInfo.second;
if (!IgnoreWhiteSpace && isWhitespace(StrData[0]))
return true;
Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
Buffer.begin(), StrData, Buffer.end());
TheLexer.SetCommentRetentionState(true);
TheLexer.LexFromRawLexer(Result);
return false;
}
static const char *findBeginningOfLine(StringRef Buffer, unsigned Offset) {
const char *BufStart = Buffer.data();
if (Offset >= Buffer.size())
return nullptr;
const char *LexStart = BufStart + Offset;
for (; LexStart != BufStart; --LexStart) {
if (isVerticalWhitespace(LexStart[0]) &&
!Lexer::isNewLineEscaped(BufStart, LexStart)) {
++LexStart;
break;
}
}
return LexStart;
}
static SourceLocation getBeginningOfFileToken(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts) {
assert(Loc.isFileID());
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
if (LocInfo.first.isInvalid())
return Loc;
bool Invalid = false;
StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
if (Invalid)
return Loc;
const char *StrData = Buffer.data() + LocInfo.second;
const char *LexStart = findBeginningOfLine(Buffer, LocInfo.second);
if (!LexStart || LexStart == StrData)
return Loc;
SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second);
Lexer TheLexer(LexerStartLoc, LangOpts, Buffer.data(), LexStart,
Buffer.end());
TheLexer.SetCommentRetentionState(true);
Token TheTok;
do {
TheLexer.LexFromRawLexer(TheTok);
if (TheLexer.getBufferLocation() > StrData) {
if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData)
return TheTok.getLocation();
break;
}
} while (TheTok.getKind() != tok::eof);
return Loc;
}
SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts) {
if (Loc.isFileID())
return getBeginningOfFileToken(Loc, SM, LangOpts);
if (!SM.isMacroArgExpansion(Loc))
return Loc;
SourceLocation FileLoc = SM.getSpellingLoc(Loc);
SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts);
std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc);
std::pair<FileID, unsigned> BeginFileLocInfo =
SM.getDecomposedLoc(BeginFileLoc);
assert(FileLocInfo.first == BeginFileLocInfo.first &&
FileLocInfo.second >= BeginFileLocInfo.second);
return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second);
}
namespace {
enum PreambleDirectiveKind {
PDK_Skipped,
PDK_Unknown
};
}
PreambleBounds Lexer::ComputePreamble(StringRef Buffer,
const LangOptions &LangOpts,
unsigned MaxLines) {
const SourceLocation::UIntTy StartOffset = 1;
SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset);
Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(),
Buffer.end());
TheLexer.SetCommentRetentionState(true);
bool InPreprocessorDirective = false;
Token TheTok;
SourceLocation ActiveCommentLoc;
unsigned MaxLineOffset = 0;
if (MaxLines) {
const char *CurPtr = Buffer.begin();
unsigned CurLine = 0;
while (CurPtr != Buffer.end()) {
char ch = *CurPtr++;
if (ch == '\n') {
++CurLine;
if (CurLine == MaxLines)
break;
}
}
if (CurPtr != Buffer.end())
MaxLineOffset = CurPtr - Buffer.begin();
}
do {
TheLexer.LexFromRawLexer(TheTok);
if (InPreprocessorDirective) {
if (TheTok.getKind() == tok::eof) {
break;
}
if (!TheTok.isAtStartOfLine())
continue;
InPreprocessorDirective = false;
}
if (TheTok.isAtStartOfLine()) {
unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset;
if (MaxLineOffset && TokOffset >= MaxLineOffset)
break;
}
if (TheTok.getKind() == tok::comment) {
if (ActiveCommentLoc.isInvalid())
ActiveCommentLoc = TheTok.getLocation();
continue;
}
if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) {
Token HashTok = TheTok;
InPreprocessorDirective = true;
ActiveCommentLoc = SourceLocation();
TheLexer.LexFromRawLexer(TheTok);
if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) {
StringRef Keyword = TheTok.getRawIdentifier();
PreambleDirectiveKind PDK
= llvm::StringSwitch<PreambleDirectiveKind>(Keyword)
.Case("include", PDK_Skipped)
.Case("__include_macros", PDK_Skipped)
.Case("define", PDK_Skipped)
.Case("undef", PDK_Skipped)
.Case("line", PDK_Skipped)
.Case("error", PDK_Skipped)
.Case("pragma", PDK_Skipped)
.Case("import", PDK_Skipped)
.Case("include_next", PDK_Skipped)
.Case("warning", PDK_Skipped)
.Case("ident", PDK_Skipped)
.Case("sccs", PDK_Skipped)
.Case("assert", PDK_Skipped)
.Case("unassert", PDK_Skipped)
.Case("if", PDK_Skipped)
.Case("ifdef", PDK_Skipped)
.Case("ifndef", PDK_Skipped)
.Case("elif", PDK_Skipped)
.Case("elifdef", PDK_Skipped)
.Case("elifndef", PDK_Skipped)
.Case("else", PDK_Skipped)
.Case("endif", PDK_Skipped)
.Default(PDK_Unknown);
switch (PDK) {
case PDK_Skipped:
continue;
case PDK_Unknown:
break;
}
}
TheTok = HashTok;
}
break;
} while (true);
SourceLocation End;
if (ActiveCommentLoc.isValid())
End = ActiveCommentLoc; else
End = TheTok.getLocation();
return PreambleBounds(End.getRawEncoding() - FileLoc.getRawEncoding(),
TheTok.isAtStartOfLine());
}
unsigned Lexer::getTokenPrefixLength(SourceLocation TokStart, unsigned CharNo,
const SourceManager &SM,
const LangOptions &LangOpts) {
bool Invalid = false;
const char *TokPtr = SM.getCharacterData(TokStart, &Invalid);
if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
return 0;
unsigned PhysOffset = 0;
while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
if (CharNo == 0)
return PhysOffset;
++TokPtr;
--CharNo;
++PhysOffset;
}
for (; CharNo; --CharNo) {
unsigned Size;
Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts);
TokPtr += Size;
PhysOffset += Size;
}
if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
return PhysOffset;
}
SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
const SourceManager &SM,
const LangOptions &LangOpts) {
if (Loc.isInvalid())
return {};
if (Loc.isMacroID()) {
if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
return {}; }
unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
if (Len > Offset)
Len = Len - Offset;
else
return Loc;
return Loc.getLocWithOffset(Len);
}
bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
const SourceManager &SM,
const LangOptions &LangOpts,
SourceLocation *MacroBegin) {
assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
SourceLocation expansionLoc;
if (!SM.isAtStartOfImmediateMacroExpansion(loc, &expansionLoc))
return false;
if (expansionLoc.isFileID()) {
if (MacroBegin)
*MacroBegin = expansionLoc;
return true;
}
return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin);
}
bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
const SourceManager &SM,
const LangOptions &LangOpts,
SourceLocation *MacroEnd) {
assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
SourceLocation spellLoc = SM.getSpellingLoc(loc);
unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts);
if (tokLen == 0)
return false;
SourceLocation afterLoc = loc.getLocWithOffset(tokLen);
SourceLocation expansionLoc;
if (!SM.isAtEndOfImmediateMacroExpansion(afterLoc, &expansionLoc))
return false;
if (expansionLoc.isFileID()) {
if (MacroEnd)
*MacroEnd = expansionLoc;
return true;
}
return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd);
}
static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range,
const SourceManager &SM,
const LangOptions &LangOpts) {
SourceLocation Begin = Range.getBegin();
SourceLocation End = Range.getEnd();
assert(Begin.isFileID() && End.isFileID());
if (Range.isTokenRange()) {
End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts);
if (End.isInvalid())
return {};
}
FileID FID;
unsigned BeginOffs;
std::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
if (FID.isInvalid())
return {};
unsigned EndOffs;
if (!SM.isInFileID(End, FID, &EndOffs) ||
BeginOffs > EndOffs)
return {};
return CharSourceRange::getCharRange(Begin, End);
}
static bool isInExpansionTokenRange(const SourceLocation Loc,
const SourceManager &SM) {
return SM.getSLocEntry(SM.getFileID(Loc))
.getExpansion()
.isExpansionTokenRange();
}
CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
const SourceManager &SM,
const LangOptions &LangOpts) {
SourceLocation Begin = Range.getBegin();
SourceLocation End = Range.getEnd();
if (Begin.isInvalid() || End.isInvalid())
return {};
if (Begin.isFileID() && End.isFileID())
return makeRangeFromFileLocs(Range, SM, LangOpts);
if (Begin.isMacroID() && End.isFileID()) {
if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin))
return {};
Range.setBegin(Begin);
return makeRangeFromFileLocs(Range, SM, LangOpts);
}
if (Begin.isFileID() && End.isMacroID()) {
if (Range.isTokenRange()) {
if (!isAtEndOfMacroExpansion(End, SM, LangOpts, &End))
return {};
Range.setTokenRange(isInExpansionTokenRange(Range.getEnd(), SM));
} else if (!isAtStartOfMacroExpansion(End, SM, LangOpts, &End))
return {};
Range.setEnd(End);
return makeRangeFromFileLocs(Range, SM, LangOpts);
}
assert(Begin.isMacroID() && End.isMacroID());
SourceLocation MacroBegin, MacroEnd;
if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) &&
((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts,
&MacroEnd)) ||
(Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts,
&MacroEnd)))) {
Range.setBegin(MacroBegin);
Range.setEnd(MacroEnd);
if (Range.isTokenRange())
Range.setTokenRange(isInExpansionTokenRange(End, SM));
return makeRangeFromFileLocs(Range, SM, LangOpts);
}
bool Invalid = false;
const SrcMgr::SLocEntry &BeginEntry = SM.getSLocEntry(SM.getFileID(Begin),
&Invalid);
if (Invalid)
return {};
if (BeginEntry.getExpansion().isMacroArgExpansion()) {
const SrcMgr::SLocEntry &EndEntry = SM.getSLocEntry(SM.getFileID(End),
&Invalid);
if (Invalid)
return {};
if (EndEntry.getExpansion().isMacroArgExpansion() &&
BeginEntry.getExpansion().getExpansionLocStart() ==
EndEntry.getExpansion().getExpansionLocStart()) {
Range.setBegin(SM.getImmediateSpellingLoc(Begin));
Range.setEnd(SM.getImmediateSpellingLoc(End));
return makeFileCharRange(Range, SM, LangOpts);
}
}
return {};
}
StringRef Lexer::getSourceText(CharSourceRange Range,
const SourceManager &SM,
const LangOptions &LangOpts,
bool *Invalid) {
Range = makeFileCharRange(Range, SM, LangOpts);
if (Range.isInvalid()) {
if (Invalid) *Invalid = true;
return {};
}
std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin());
if (beginInfo.first.isInvalid()) {
if (Invalid) *Invalid = true;
return {};
}
unsigned EndOffs;
if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) ||
beginInfo.second > EndOffs) {
if (Invalid) *Invalid = true;
return {};
}
bool invalidTemp = false;
StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp);
if (invalidTemp) {
if (Invalid) *Invalid = true;
return {};
}
if (Invalid) *Invalid = false;
return file.substr(beginInfo.second, EndOffs - beginInfo.second);
}
StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts) {
assert(Loc.isMacroID() && "Only reasonable to call this on macros");
while (true) {
FileID FID = SM.getFileID(Loc);
const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
Loc = Expansion.getExpansionLocStart();
if (!Expansion.isMacroArgExpansion())
break;
Loc = SM.getImmediateExpansionRange(Loc).getBegin();
SourceLocation SpellLoc = Expansion.getSpellingLoc();
if (SpellLoc.isFileID())
break;
FileID MacroFID = SM.getFileID(Loc);
if (SM.isInFileID(SpellLoc, MacroFID))
break;
Loc = SpellLoc;
}
Loc = SM.getSpellingLoc(Loc);
std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
}
StringRef Lexer::getImmediateMacroNameForDiagnostics(
SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) {
assert(Loc.isMacroID() && "Only reasonable to call this on macros");
while (SM.isMacroArgExpansion(Loc))
Loc = SM.getImmediateExpansionRange(Loc).getBegin();
if (!SM.getFileEntryForID(SM.getFileID(SM.getSpellingLoc(Loc))))
return {};
Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).getBegin());
std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
}
bool Lexer::isAsciiIdentifierContinueChar(char c, const LangOptions &LangOpts) {
return isAsciiIdentifierContinue(c, LangOpts.DollarIdents);
}
bool Lexer::isNewLineEscaped(const char *BufferStart, const char *Str) {
assert(isVerticalWhitespace(Str[0]));
if (Str - 1 < BufferStart)
return false;
if ((Str[0] == '\n' && Str[-1] == '\r') ||
(Str[0] == '\r' && Str[-1] == '\n')) {
if (Str - 2 < BufferStart)
return false;
--Str;
}
--Str;
while (Str > BufferStart && isHorizontalWhitespace(*Str))
--Str;
return *Str == '\\';
}
StringRef Lexer::getIndentationForLine(SourceLocation Loc,
const SourceManager &SM) {
if (Loc.isInvalid() || Loc.isMacroID())
return {};
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
if (LocInfo.first.isInvalid())
return {};
bool Invalid = false;
StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
if (Invalid)
return {};
const char *Line = findBeginningOfLine(Buffer, LocInfo.second);
if (!Line)
return {};
StringRef Rest = Buffer.substr(Line - Buffer.data());
size_t NumWhitespaceChars = Rest.find_first_not_of(" \t");
return NumWhitespaceChars == StringRef::npos
? ""
: Rest.take_front(NumWhitespaceChars);
}
static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen);
static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
SourceLocation FileLoc,
unsigned CharNo, unsigned TokLen) {
assert(FileLoc.isMacroID() && "Must be a macro expansion");
SourceManager &SM = PP.getSourceManager();
SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
SpellingLoc = SpellingLoc.getLocWithOffset(CharNo);
CharSourceRange II = SM.getImmediateExpansionRange(FileLoc);
return SM.createExpansionLoc(SpellingLoc, II.getBegin(), II.getEnd(), TokLen);
}
SourceLocation Lexer::getSourceLocation(const char *Loc,
unsigned TokLen) const {
assert(Loc >= BufferStart && Loc <= BufferEnd &&
"Location out of range for this buffer!");
unsigned CharNo = Loc-BufferStart;
if (FileLoc.isFileID())
return FileLoc.getLocWithOffset(CharNo);
assert(PP && "This doesn't work on raw lexers");
return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
}
DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
return PP->Diag(getSourceLocation(Loc), DiagID);
}
static char GetTrigraphCharForLetter(char Letter) {
switch (Letter) {
default: return 0;
case '=': return '#';
case ')': return ']';
case '(': return '[';
case '!': return '|';
case '\'': return '^';
case '>': return '}';
case '/': return '\\';
case '<': return '{';
case '-': return '~';
}
}
static char DecodeTrigraphChar(const char *CP, Lexer *L, bool Trigraphs) {
char Res = GetTrigraphCharForLetter(*CP);
if (!Res || !L) return Res;
if (!Trigraphs) {
if (!L->isLexingRawMode())
L->Diag(CP-2, diag::trigraph_ignored);
return 0;
}
if (!L->isLexingRawMode())
L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1);
return Res;
}
unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
unsigned Size = 0;
while (isWhitespace(Ptr[Size])) {
++Size;
if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
continue;
if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
Ptr[Size-1] != Ptr[Size])
++Size;
return Size;
}
return 0;
}
const char *Lexer::SkipEscapedNewLines(const char *P) {
while (true) {
const char *AfterEscape;
if (*P == '\\') {
AfterEscape = P+1;
} else if (*P == '?') {
if (P[1] != '?' || P[2] != '/')
return P;
AfterEscape = P+3;
} else {
return P;
}
unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape);
if (NewLineSize == 0) return P;
P = AfterEscape+NewLineSize;
}
}
Optional<Token> Lexer::findNextToken(SourceLocation Loc,
const SourceManager &SM,
const LangOptions &LangOpts) {
if (Loc.isMacroID()) {
if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
return None;
}
Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts);
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
bool InvalidTemp = false;
StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp);
if (InvalidTemp)
return None;
const char *TokenBegin = File.data() + LocInfo.second;
Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(),
TokenBegin, File.end());
Token Tok;
lexer.LexFromRawLexer(Tok);
return Tok;
}
SourceLocation Lexer::findLocationAfterToken(
SourceLocation Loc, tok::TokenKind TKind, const SourceManager &SM,
const LangOptions &LangOpts, bool SkipTrailingWhitespaceAndNewLine) {
Optional<Token> Tok = findNextToken(Loc, SM, LangOpts);
if (!Tok || Tok->isNot(TKind))
return {};
SourceLocation TokenLoc = Tok->getLocation();
unsigned NumWhitespaceChars = 0;
if (SkipTrailingWhitespaceAndNewLine) {
const char *TokenEnd = SM.getCharacterData(TokenLoc) + Tok->getLength();
unsigned char C = *TokenEnd;
while (isHorizontalWhitespace(C)) {
C = *(++TokenEnd);
NumWhitespaceChars++;
}
if (C == '\n' || C == '\r') {
char PrevC = C;
C = *(++TokenEnd);
NumWhitespaceChars++;
if ((C == '\n' || C == '\r') && C != PrevC)
NumWhitespaceChars++;
}
}
return TokenLoc.getLocWithOffset(Tok->getLength() + NumWhitespaceChars);
}
char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
Token *Tok) {
if (Ptr[0] == '\\') {
++Size;
++Ptr;
Slash:
if (!isWhitespace(Ptr[0])) return '\\';
if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
if (Tok) Tok->setFlag(Token::NeedsCleaning);
if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode())
Diag(Ptr, diag::backslash_newline_space);
Size += EscapedNewLineSize;
Ptr += EscapedNewLineSize;
return getCharAndSizeSlow(Ptr, Size, Tok);
}
return '\\';
}
if (Ptr[0] == '?' && Ptr[1] == '?') {
if (char C = DecodeTrigraphChar(Ptr + 2, Tok ? this : nullptr,
LangOpts.Trigraphs)) {
if (Tok) Tok->setFlag(Token::NeedsCleaning);
Ptr += 3;
Size += 3;
if (C == '\\') goto Slash;
return C;
}
}
++Size;
return *Ptr;
}
char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
const LangOptions &LangOpts) {
if (Ptr[0] == '\\') {
++Size;
++Ptr;
Slash:
if (!isWhitespace(Ptr[0])) return '\\';
if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
Size += EscapedNewLineSize;
Ptr += EscapedNewLineSize;
return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
}
return '\\';
}
if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
if (char C = GetTrigraphCharForLetter(Ptr[2])) {
Ptr += 3;
Size += 3;
if (C == '\\') goto Slash;
return C;
}
}
++Size;
return *Ptr;
}
void Lexer::SetByteOffset(unsigned Offset, bool StartOfLine) {
BufferPtr = BufferStart + Offset;
if (BufferPtr > BufferEnd)
BufferPtr = BufferEnd;
IsAtStartOfLine = StartOfLine;
IsAtPhysicalStartOfLine = StartOfLine;
}
static bool isUnicodeWhitespace(uint32_t Codepoint) {
static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars(
UnicodeWhitespaceCharRanges);
return UnicodeWhitespaceChars.contains(Codepoint);
}
static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
if (LangOpts.AsmPreprocessor) {
return false;
} else if (LangOpts.DollarIdents && '$' == C) {
return true;
} else if (LangOpts.CPlusPlus || LangOpts.C2x) {
static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges);
static const llvm::sys::UnicodeCharSet XIDContinueChars(XIDContinueRanges);
return C == '_' || XIDStartChars.contains(C) ||
XIDContinueChars.contains(C);
} else if (LangOpts.C11) {
static const llvm::sys::UnicodeCharSet C11AllowedIDChars(
C11AllowedIDCharRanges);
return C11AllowedIDChars.contains(C);
} else {
static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
C99AllowedIDCharRanges);
return C99AllowedIDChars.contains(C);
}
}
static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts) {
if (LangOpts.AsmPreprocessor) {
return false;
}
if (LangOpts.CPlusPlus || LangOpts.C2x) {
static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges);
return C == '_' || XIDStartChars.contains(C);
}
if (!isAllowedIDChar(C, LangOpts))
return false;
if (LangOpts.C11) {
static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars(
C11DisallowedInitialIDCharRanges);
return !C11DisallowedInitialIDChars.contains(C);
}
static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
C99DisallowedInitialIDCharRanges);
return !C99DisallowedInitialIDChars.contains(C);
}
static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin,
const char *End) {
return CharSourceRange::getCharRange(L.getSourceLocation(Begin),
L.getSourceLocation(End));
}
static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C,
CharSourceRange Range, bool IsFirst) {
if (!Diags.isIgnored(diag::warn_c99_compat_unicode_id, Range.getBegin())) {
enum {
CannotAppearInIdentifier = 0,
CannotStartIdentifier
};
static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
C99AllowedIDCharRanges);
static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
C99DisallowedInitialIDCharRanges);
if (!C99AllowedIDChars.contains(C)) {
Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
<< Range
<< CannotAppearInIdentifier;
} else if (IsFirst && C99DisallowedInitialIDChars.contains(C)) {
Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id)
<< Range
<< CannotStartIdentifier;
}
}
}
static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
CharSourceRange Range) {
struct HomoglyphPair {
uint32_t Character;
char LooksLike;
bool operator<(HomoglyphPair R) const { return Character < R.Character; }
};
static constexpr HomoglyphPair SortedHomoglyphs[] = {
{U'\u00ad', 0}, {U'\u01c3', '!'}, {U'\u037e', ';'}, {U'\u200b', 0}, {U'\u200c', 0}, {U'\u200d', 0}, {U'\u2060', 0}, {U'\u2061', 0}, {U'\u2062', 0}, {U'\u2063', 0}, {U'\u2064', 0}, {U'\u2212', '-'}, {U'\u2215', '/'}, {U'\u2216', '\\'}, {U'\u2217', '*'}, {U'\u2223', '|'}, {U'\u2227', '^'}, {U'\u2236', ':'}, {U'\u223c', '~'}, {U'\ua789', ':'}, {U'\ufeff', 0}, {U'\uff01', '!'}, {U'\uff03', '#'}, {U'\uff04', '$'}, {U'\uff05', '%'}, {U'\uff06', '&'}, {U'\uff08', '('}, {U'\uff09', ')'}, {U'\uff0a', '*'}, {U'\uff0b', '+'}, {U'\uff0c', ','}, {U'\uff0d', '-'}, {U'\uff0e', '.'}, {U'\uff0f', '/'}, {U'\uff1a', ':'}, {U'\uff1b', ';'}, {U'\uff1c', '<'}, {U'\uff1d', '='}, {U'\uff1e', '>'}, {U'\uff1f', '?'}, {U'\uff20', '@'}, {U'\uff3b', '['}, {U'\uff3c', '\\'}, {U'\uff3d', ']'}, {U'\uff3e', '^'}, {U'\uff5b', '{'}, {U'\uff5c', '|'}, {U'\uff5d', '}'}, {U'\uff5e', '~'}, {0, 0}
};
auto Homoglyph =
std::lower_bound(std::begin(SortedHomoglyphs),
std::end(SortedHomoglyphs) - 1, HomoglyphPair{C, '\0'});
if (Homoglyph->Character == C) {
llvm::SmallString<5> CharBuf;
{
llvm::raw_svector_ostream CharOS(CharBuf);
llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4);
}
if (Homoglyph->LooksLike) {
const char LooksLikeStr[] = {Homoglyph->LooksLike, 0};
Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph)
<< Range << CharBuf << LooksLikeStr;
} else {
Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_zero_width)
<< Range << CharBuf;
}
}
}
static void diagnoseInvalidUnicodeCodepointInIdentifier(
DiagnosticsEngine &Diags, const LangOptions &LangOpts, uint32_t CodePoint,
CharSourceRange Range, bool IsFirst) {
if (isASCII(CodePoint))
return;
bool IsIDStart = isAllowedInitiallyIDChar(CodePoint, LangOpts);
bool IsIDContinue = IsIDStart || isAllowedIDChar(CodePoint, LangOpts);
if ((IsFirst && IsIDStart) || (!IsFirst && IsIDContinue))
return;
bool InvalidOnlyAtStart = IsFirst && !IsIDStart && IsIDContinue;
llvm::SmallString<5> CharBuf;
llvm::raw_svector_ostream CharOS(CharBuf);
llvm::write_hex(CharOS, CodePoint, llvm::HexPrintStyle::Upper, 4);
if (!IsFirst || InvalidOnlyAtStart) {
Diags.Report(Range.getBegin(), diag::err_character_not_allowed_identifier)
<< Range << CharBuf << int(InvalidOnlyAtStart)
<< FixItHint::CreateRemoval(Range);
} else {
Diags.Report(Range.getBegin(), diag::err_character_not_allowed)
<< Range << CharBuf << FixItHint::CreateRemoval(Range);
}
}
bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size,
Token &Result) {
const char *UCNPtr = CurPtr + Size;
uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, nullptr);
if (CodePoint == 0) {
return false;
}
if (!isAllowedIDChar(CodePoint, LangOpts)) {
if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint))
return false;
if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
!PP->isPreprocessedOutput())
diagnoseInvalidUnicodeCodepointInIdentifier(
PP->getDiagnostics(), LangOpts, CodePoint,
makeCharRange(*this, CurPtr, UCNPtr),
false);
} else if (!isLexingRawMode())
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
makeCharRange(*this, CurPtr, UCNPtr),
false);
Result.setFlag(Token::HasUCN);
if ((UCNPtr - CurPtr == 6 && CurPtr[1] == 'u') ||
(UCNPtr - CurPtr == 10 && CurPtr[1] == 'U'))
CurPtr = UCNPtr;
else
while (CurPtr != UCNPtr)
(void)getAndAdvanceChar(CurPtr, Result);
return true;
}
bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) {
const char *UnicodePtr = CurPtr;
llvm::UTF32 CodePoint;
llvm::ConversionResult Result =
llvm::convertUTF8Sequence((const llvm::UTF8 **)&UnicodePtr,
(const llvm::UTF8 *)BufferEnd,
&CodePoint,
llvm::strictConversion);
if (Result != llvm::conversionOK)
return false;
if (!isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts)) {
if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint))
return false;
if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
!PP->isPreprocessedOutput())
diagnoseInvalidUnicodeCodepointInIdentifier(
PP->getDiagnostics(), LangOpts, CodePoint,
makeCharRange(*this, CurPtr, UnicodePtr), false);
} else if (!isLexingRawMode()) {
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
makeCharRange(*this, CurPtr, UnicodePtr),
false);
maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), CodePoint,
makeCharRange(*this, CurPtr, UnicodePtr));
}
CurPtr = UnicodePtr;
return true;
}
bool Lexer::LexUnicodeIdentifierStart(Token &Result, uint32_t C,
const char *CurPtr) {
if (isAllowedInitiallyIDChar(C, LangOpts)) {
if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
!PP->isPreprocessedOutput()) {
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C,
makeCharRange(*this, BufferPtr, CurPtr),
true);
maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C,
makeCharRange(*this, BufferPtr, CurPtr));
}
MIOpt.ReadToken();
return LexIdentifierContinue(Result, CurPtr);
}
if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
!PP->isPreprocessedOutput() && !isASCII(*BufferPtr) &&
!isAllowedInitiallyIDChar(C, LangOpts) && !isUnicodeWhitespace(C)) {
diagnoseInvalidUnicodeCodepointInIdentifier(
PP->getDiagnostics(), LangOpts, C,
makeCharRange(*this, BufferPtr, CurPtr), true);
BufferPtr = CurPtr;
return false;
}
MIOpt.ReadToken();
FormTokenWithChars(Result, CurPtr, tok::unknown);
return true;
}
bool Lexer::LexIdentifierContinue(Token &Result, const char *CurPtr) {
while (true) {
unsigned char C = *CurPtr;
if (isAsciiIdentifierContinue(C)) {
++CurPtr;
continue;
}
unsigned Size;
C = getCharAndSize(CurPtr, Size);
if (isAsciiIdentifierContinue(C)) {
CurPtr = ConsumeChar(CurPtr, Size, Result);
continue;
}
if (C == '$') {
if (!LangOpts.DollarIdents)
break;
if (!isLexingRawMode())
Diag(CurPtr, diag::ext_dollar_in_identifier);
CurPtr = ConsumeChar(CurPtr, Size, Result);
continue;
}
if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
continue;
if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
continue;
break;
}
const char *IdStart = BufferPtr;
FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
Result.setRawIdentifierData(IdStart);
if (LexingRawMode)
return true;
IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
if (isCodeCompletionPoint(CurPtr)) {
Result.setKind(tok::code_completion);
assert(*CurPtr == 0 && "Completion character must be 0");
++CurPtr;
if (CurPtr < BufferEnd) {
while (isAsciiIdentifierContinue(*CurPtr))
++CurPtr;
}
BufferPtr = CurPtr;
return true;
}
if (II->isHandleIdentifierCase())
return PP->HandleIdentifier(Result);
return true;
}
bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
unsigned Size;
char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
if (C1 != '0')
return false;
char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts);
return (C2 == 'x' || C2 == 'X');
}
bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
unsigned Size;
char C = getCharAndSize(CurPtr, Size);
char PrevCh = 0;
while (isPreprocessingNumberBody(C)) {
CurPtr = ConsumeChar(CurPtr, Size, Result);
PrevCh = C;
C = getCharAndSize(CurPtr, Size);
}
if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) {
if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts))
return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
}
if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) {
bool IsHexFloat = true;
if (!LangOpts.C99) {
if (!isHexaLiteral(BufferPtr, LangOpts))
IsHexFloat = false;
else if (!LangOpts.CPlusPlus17 &&
std::find(BufferPtr, CurPtr, '_') != CurPtr)
IsHexFloat = false;
}
if (IsHexFloat)
return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
}
if (C == '\'' && (LangOpts.CPlusPlus14 || LangOpts.C2x)) {
unsigned NextSize;
char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, LangOpts);
if (isAsciiIdentifierContinue(Next)) {
if (!isLexingRawMode())
Diag(CurPtr, LangOpts.CPlusPlus
? diag::warn_cxx11_compat_digit_separator
: diag::warn_c2x_compat_digit_separator);
CurPtr = ConsumeChar(CurPtr, Size, Result);
CurPtr = ConsumeChar(CurPtr, NextSize, Result);
return LexNumericConstant(Result, CurPtr);
}
}
if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
return LexNumericConstant(Result, CurPtr);
if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
return LexNumericConstant(Result, CurPtr);
const char *TokStart = BufferPtr;
FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
Result.setLiteralData(TokStart);
return true;
}
const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
bool IsStringLiteral) {
assert(LangOpts.CPlusPlus);
unsigned Size;
char C = getCharAndSize(CurPtr, Size);
bool Consumed = false;
if (!isAsciiIdentifierStart(C)) {
if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
Consumed = true;
else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
Consumed = true;
else
return CurPtr;
}
if (!LangOpts.CPlusPlus11) {
if (!isLexingRawMode())
Diag(CurPtr,
C == '_' ? diag::warn_cxx11_compat_user_defined_literal
: diag::warn_cxx11_compat_reserved_user_defined_literal)
<< FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
return CurPtr;
}
if (!Consumed) {
bool IsUDSuffix = false;
if (C == '_')
IsUDSuffix = true;
else if (IsStringLiteral && LangOpts.CPlusPlus14) {
const unsigned MaxStandardSuffixLength = 3;
char Buffer[MaxStandardSuffixLength] = { C };
unsigned Consumed = Size;
unsigned Chars = 1;
while (true) {
unsigned NextSize;
char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize, LangOpts);
if (!isAsciiIdentifierContinue(Next)) {
const StringRef CompleteSuffix(Buffer, Chars);
IsUDSuffix =
StringLiteralParser::isValidUDSuffix(LangOpts, CompleteSuffix);
break;
}
if (Chars == MaxStandardSuffixLength)
break;
Buffer[Chars++] = Next;
Consumed += NextSize;
}
}
if (!IsUDSuffix) {
if (!isLexingRawMode())
Diag(CurPtr, LangOpts.MSVCCompat
? diag::ext_ms_reserved_user_defined_literal
: diag::ext_reserved_user_defined_literal)
<< FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
return CurPtr;
}
CurPtr = ConsumeChar(CurPtr, Size, Result);
}
Result.setFlag(Token::HasUDSuffix);
while (true) {
C = getCharAndSize(CurPtr, Size);
if (isAsciiIdentifierContinue(C)) {
CurPtr = ConsumeChar(CurPtr, Size, Result);
} else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {
} else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {
} else
break;
}
return CurPtr;
}
bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
tok::TokenKind Kind) {
const char *AfterQuote = CurPtr;
const char *NulCharacter = nullptr;
if (!isLexingRawMode() &&
(Kind == tok::utf8_string_literal ||
Kind == tok::utf16_string_literal ||
Kind == tok::utf32_string_literal))
Diag(BufferPtr, LangOpts.CPlusPlus ? diag::warn_cxx98_compat_unicode_literal
: diag::warn_c99_compat_unicode_literal);
char C = getAndAdvanceChar(CurPtr, Result);
while (C != '"') {
if (C == '\\')
C = getAndAdvanceChar(CurPtr, Result);
if (C == '\n' || C == '\r' || (C == 0 && CurPtr-1 == BufferEnd)) { if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 1;
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return true;
}
if (C == 0) {
if (isCodeCompletionPoint(CurPtr-1)) {
if (ParsingFilename)
codeCompleteIncludedFile(AfterQuote, CurPtr - 1, false);
else
PP->CodeCompleteNaturalLanguage();
FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
cutOffLexing();
return true;
}
NulCharacter = CurPtr-1;
}
C = getAndAdvanceChar(CurPtr, Result);
}
if (LangOpts.CPlusPlus)
CurPtr = LexUDSuffix(Result, CurPtr, true);
if (NulCharacter && !isLexingRawMode())
Diag(NulCharacter, diag::null_in_char_or_string) << 1;
const char *TokStart = BufferPtr;
FormTokenWithChars(Result, CurPtr, Kind);
Result.setLiteralData(TokStart);
return true;
}
bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
tok::TokenKind Kind) {
if (!isLexingRawMode())
Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal);
unsigned PrefixLen = 0;
while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen]))
++PrefixLen;
if (CurPtr[PrefixLen] != '(') {
if (!isLexingRawMode()) {
const char *PrefixEnd = &CurPtr[PrefixLen];
if (PrefixLen == 16) {
Diag(PrefixEnd, diag::err_raw_delim_too_long);
} else {
Diag(PrefixEnd, diag::err_invalid_char_raw_delim)
<< StringRef(PrefixEnd, 1);
}
}
while (true) {
char C = *CurPtr++;
if (C == '"')
break;
if (C == 0 && CurPtr-1 == BufferEnd) {
--CurPtr;
break;
}
}
FormTokenWithChars(Result, CurPtr, tok::unknown);
return true;
}
const char *Prefix = CurPtr;
CurPtr += PrefixLen + 1;
while (true) {
char C = *CurPtr++;
if (C == ')') {
if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') {
CurPtr += PrefixLen + 1; break;
}
} else if (C == 0 && CurPtr-1 == BufferEnd) { if (!isLexingRawMode())
Diag(BufferPtr, diag::err_unterminated_raw_string)
<< StringRef(Prefix, PrefixLen);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return true;
}
}
if (LangOpts.CPlusPlus)
CurPtr = LexUDSuffix(Result, CurPtr, true);
const char *TokStart = BufferPtr;
FormTokenWithChars(Result, CurPtr, Kind);
Result.setLiteralData(TokStart);
return true;
}
bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
const char *NulCharacter = nullptr;
const char *AfterLessPos = CurPtr;
char C = getAndAdvanceChar(CurPtr, Result);
while (C != '>') {
if (C == '\\')
C = getAndAdvanceChar(CurPtr, Result);
if (isVerticalWhitespace(C) || (C == 0 && (CurPtr - 1 == BufferEnd))) { FormTokenWithChars(Result, AfterLessPos, tok::less);
return true;
}
if (C == 0) {
if (isCodeCompletionPoint(CurPtr - 1)) {
codeCompleteIncludedFile(AfterLessPos, CurPtr - 1, true);
cutOffLexing();
FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
return true;
}
NulCharacter = CurPtr-1;
}
C = getAndAdvanceChar(CurPtr, Result);
}
if (NulCharacter && !isLexingRawMode())
Diag(NulCharacter, diag::null_in_char_or_string) << 1;
const char *TokStart = BufferPtr;
FormTokenWithChars(Result, CurPtr, tok::header_name);
Result.setLiteralData(TokStart);
return true;
}
void Lexer::codeCompleteIncludedFile(const char *PathStart,
const char *CompletionPoint,
bool IsAngled) {
StringRef PartialPath(PathStart, CompletionPoint - PathStart);
llvm::StringRef SlashChars = LangOpts.MSVCCompat ? "/\\" : "/";
auto Slash = PartialPath.find_last_of(SlashChars);
StringRef Dir =
(Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash);
const char *StartOfFilename =
(Slash == StringRef::npos) ? PathStart : PathStart + Slash + 1;
PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get(
StringRef(StartOfFilename, CompletionPoint - StartOfFilename)));
while (CompletionPoint < BufferEnd) {
char Next = *(CompletionPoint + 1);
if (Next == 0 || Next == '\r' || Next == '\n')
break;
++CompletionPoint;
if (Next == (IsAngled ? '>' : '"'))
break;
if (llvm::is_contained(SlashChars, Next))
break;
}
PP->setCodeCompletionTokenRange(
FileLoc.getLocWithOffset(StartOfFilename - BufferStart),
FileLoc.getLocWithOffset(CompletionPoint - BufferStart));
PP->CodeCompleteIncludedFile(Dir, IsAngled);
}
bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
tok::TokenKind Kind) {
const char *NulCharacter = nullptr;
if (!isLexingRawMode()) {
if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant)
Diag(BufferPtr, LangOpts.CPlusPlus
? diag::warn_cxx98_compat_unicode_literal
: diag::warn_c99_compat_unicode_literal);
else if (Kind == tok::utf8_char_constant)
Diag(BufferPtr, diag::warn_cxx14_compat_u8_character_literal);
}
char C = getAndAdvanceChar(CurPtr, Result);
if (C == '\'') {
if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
Diag(BufferPtr, diag::ext_empty_character);
FormTokenWithChars(Result, CurPtr, tok::unknown);
return true;
}
while (C != '\'') {
if (C == '\\')
C = getAndAdvanceChar(CurPtr, Result);
if (C == '\n' || C == '\r' || (C == 0 && CurPtr-1 == BufferEnd)) { if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 0;
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return true;
}
if (C == 0) {
if (isCodeCompletionPoint(CurPtr-1)) {
PP->CodeCompleteNaturalLanguage();
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
cutOffLexing();
return true;
}
NulCharacter = CurPtr-1;
}
C = getAndAdvanceChar(CurPtr, Result);
}
if (LangOpts.CPlusPlus)
CurPtr = LexUDSuffix(Result, CurPtr, false);
if (NulCharacter && !isLexingRawMode())
Diag(NulCharacter, diag::null_in_char_or_string) << 0;
const char *TokStart = BufferPtr;
FormTokenWithChars(Result, CurPtr, Kind);
Result.setLiteralData(TokStart);
return true;
}
bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
bool &TokAtPhysicalStartOfLine) {
bool SawNewline = isVerticalWhitespace(CurPtr[-1]);
unsigned char Char = *CurPtr;
const char *lastNewLine = nullptr;
auto setLastNewLine = [&](const char *Ptr) {
lastNewLine = Ptr;
if (!NewLinePtr)
NewLinePtr = Ptr;
};
if (SawNewline)
setLastNewLine(CurPtr - 1);
while (true) {
while (isHorizontalWhitespace(Char))
Char = *++CurPtr;
if (!isVerticalWhitespace(Char))
break;
if (ParsingPreprocessorDirective) {
BufferPtr = CurPtr;
return false;
}
if (*CurPtr == '\n')
setLastNewLine(CurPtr);
SawNewline = true;
Char = *++CurPtr;
}
if (isKeepWhitespaceMode()) {
FormTokenWithChars(Result, CurPtr, tok::unknown);
if (SawNewline) {
IsAtStartOfLine = true;
IsAtPhysicalStartOfLine = true;
}
return true;
}
char PrevChar = CurPtr[-1];
bool HasLeadingSpace = !isVerticalWhitespace(PrevChar);
Result.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
if (SawNewline) {
Result.setFlag(Token::StartOfLine);
TokAtPhysicalStartOfLine = true;
if (NewLinePtr && lastNewLine && NewLinePtr != lastNewLine && PP) {
if (auto *Handler = PP->getEmptylineHandler())
Handler->HandleEmptyline(SourceRange(getSourceLocation(NewLinePtr + 1),
getSourceLocation(lastNewLine)));
}
}
BufferPtr = CurPtr;
return false;
}
bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
bool &TokAtPhysicalStartOfLine) {
if (!LineComment) {
if (!isLexingRawMode()) Diag(BufferPtr, diag::ext_line_comment);
LineComment = true;
}
bool UnicodeDecodingAlreadyDiagnosed = false;
char C;
while (true) {
C = *CurPtr;
while (isASCII(C) && C != 0 && C != '\n' && C != '\r') { C = *++CurPtr;
UnicodeDecodingAlreadyDiagnosed = false;
}
if (!isASCII(C)) {
unsigned Length = llvm::getUTF8SequenceSize(
(const llvm::UTF8 *)CurPtr, (const llvm::UTF8 *)BufferEnd);
if (Length == 0) {
if (!UnicodeDecodingAlreadyDiagnosed && !isLexingRawMode())
Diag(CurPtr, diag::warn_invalid_utf8_in_comment);
UnicodeDecodingAlreadyDiagnosed = true;
++CurPtr;
} else {
UnicodeDecodingAlreadyDiagnosed = false;
CurPtr += Length;
}
continue;
}
const char *NextLine = CurPtr;
if (C != 0) {
const char *EscapePtr = CurPtr-1;
bool HasSpace = false;
while (isHorizontalWhitespace(*EscapePtr)) { --EscapePtr;
HasSpace = true;
}
if (*EscapePtr == '\\')
CurPtr = EscapePtr;
else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' &&
EscapePtr[-2] == '?' && LangOpts.Trigraphs)
CurPtr = EscapePtr-2;
else
break;
if (HasSpace && !isLexingRawMode())
Diag(EscapePtr, diag::backslash_newline_space);
}
const char *OldPtr = CurPtr;
bool OldRawMode = isLexingRawMode();
LexingRawMode = true;
C = getAndAdvanceChar(CurPtr, Result);
LexingRawMode = OldRawMode;
if (C != 0 && CurPtr == OldPtr+1) {
CurPtr = NextLine;
break;
}
if (CurPtr != OldPtr + 1 && C != '/' &&
(CurPtr == BufferEnd + 1 || CurPtr[0] != '/')) {
for (; OldPtr != CurPtr; ++OldPtr)
if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
if (isWhitespace(C)) {
const char *ForwardPtr = CurPtr;
while (isWhitespace(*ForwardPtr)) ++ForwardPtr;
if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
break;
}
if (!isLexingRawMode())
Diag(OldPtr-1, diag::ext_multi_line_line_comment);
break;
}
}
if (C == '\r' || C == '\n' || CurPtr == BufferEnd + 1) {
--CurPtr;
break;
}
if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
PP->CodeCompleteNaturalLanguage();
cutOffLexing();
return false;
}
}
if (PP && !isLexingRawMode() &&
PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
getSourceLocation(CurPtr)))) {
BufferPtr = CurPtr;
return true; }
if (inKeepCommentMode())
return SaveLineComment(Result, CurPtr);
if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
BufferPtr = CurPtr;
return false;
}
NewLinePtr = CurPtr++;
Result.setFlag(Token::StartOfLine);
TokAtPhysicalStartOfLine = true;
Result.clearFlag(Token::LeadingSpace);
BufferPtr = CurPtr;
return false;
}
bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) {
FormTokenWithChars(Result, CurPtr, tok::comment);
if (!ParsingPreprocessorDirective || LexingRawMode)
return true;
bool Invalid = false;
std::string Spelling = PP->getSpelling(Result, &Invalid);
if (Invalid)
return true;
assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?");
Spelling[1] = '*'; Spelling += "*/";
Result.setKind(tok::comment);
PP->CreateString(Spelling, Result,
Result.getLocation(), Result.getLocation());
return true;
}
static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, Lexer *L,
bool Trigraphs) {
assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
const char *TrigraphPos = nullptr;
const char *SpacePos = nullptr;
while (true) {
--CurPtr;
if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
if (CurPtr[0] == CurPtr[1])
return false;
--CurPtr;
}
while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
SpacePos = CurPtr;
--CurPtr;
}
if (*CurPtr == '\\') {
--CurPtr;
} else if (CurPtr[0] == '/' && CurPtr[-1] == '?' && CurPtr[-2] == '?') {
TrigraphPos = CurPtr - 2;
CurPtr -= 3;
} else {
return false;
}
if (*CurPtr == '*')
break;
if (*CurPtr != '\n' && *CurPtr != '\r')
return false;
}
if (TrigraphPos) {
if (!Trigraphs) {
if (!L->isLexingRawMode())
L->Diag(TrigraphPos, diag::trigraph_ignored_block_comment);
return false;
}
if (!L->isLexingRawMode())
L->Diag(TrigraphPos, diag::trigraph_ends_block_comment);
}
if (!L->isLexingRawMode())
L->Diag(CurPtr + 1, diag::escaped_newline_block_comment_end);
if (SpacePos && !L->isLexingRawMode())
L->Diag(SpacePos, diag::backslash_newline_space);
return true;
}
#ifdef __SSE2__
#include <emmintrin.h>
#elif __ALTIVEC__
#include <altivec.h>
#undef bool
#endif
bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
bool &TokAtPhysicalStartOfLine) {
unsigned CharSize;
unsigned char C = getCharAndSize(CurPtr, CharSize);
CurPtr += CharSize;
if (C == 0 && CurPtr == BufferEnd+1) {
if (!isLexingRawMode())
Diag(BufferPtr, diag::err_unterminated_block_comment);
--CurPtr;
if (isKeepWhitespaceMode()) {
FormTokenWithChars(Result, CurPtr, tok::unknown);
return true;
}
BufferPtr = CurPtr;
return false;
}
if (C == '/')
C = *CurPtr++;
bool UnicodeDecodingAlreadyDiagnosed = false;
while (true) {
if (CurPtr + 24 < BufferEnd &&
!(PP && PP->getCodeCompletionFileLoc() == FileLoc)) {
while (C != '/' && (intptr_t)CurPtr % 16 != 0) {
if (!isASCII(C))
goto MultiByteUTF8;
C = *CurPtr++;
}
if (C == '/') goto FoundSlash;
#ifdef __SSE2__
__m128i Slashes = _mm_set1_epi8('/');
while (CurPtr + 16 < BufferEnd) {
int Mask = _mm_movemask_epi8(*(const __m128i *)CurPtr);
if (LLVM_UNLIKELY(Mask != 0)) {
goto MultiByteUTF8;
}
int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr,
Slashes));
if (cmp != 0) {
CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1;
goto FoundSlash;
}
CurPtr += 16;
}
#elif __ALTIVEC__
__vector unsigned char LongUTF = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80};
__vector unsigned char Slashes = {
'/', '/', '/', '/', '/', '/', '/', '/',
'/', '/', '/', '/', '/', '/', '/', '/'
};
while (CurPtr + 16 < BufferEnd) {
if (LLVM_UNLIKELY(
vec_any_ge(*(const __vector unsigned char *)CurPtr, LongUTF)))
goto MultiByteUTF8;
if (vec_any_eq(*(const __vector unsigned char *)CurPtr, Slashes)) {
break;
}
CurPtr += 16;
}
#else
while (CurPtr + 16 < BufferEnd) {
bool HasNonASCII = false;
for (unsigned I = 0; I < 16; ++I)
HasNonASCII |= !isASCII(CurPtr[I]);
if (LLVM_UNLIKELY(HasNonASCII))
goto MultiByteUTF8;
bool HasSlash = false;
for (unsigned I = 0; I < 16; ++I)
HasSlash |= CurPtr[I] == '/';
if (HasSlash)
break;
CurPtr += 16;
}
#endif
C = *CurPtr++;
}
while (C != '/' && C != '\0') {
if (isASCII(C)) {
UnicodeDecodingAlreadyDiagnosed = false;
C = *CurPtr++;
continue;
}
MultiByteUTF8:
unsigned Length = llvm::getUTF8SequenceSize(
(const llvm::UTF8 *)CurPtr - 1, (const llvm::UTF8 *)BufferEnd);
if (Length == 0) {
if (!UnicodeDecodingAlreadyDiagnosed && !isLexingRawMode())
Diag(CurPtr - 1, diag::warn_invalid_utf8_in_comment);
UnicodeDecodingAlreadyDiagnosed = true;
} else {
UnicodeDecodingAlreadyDiagnosed = false;
CurPtr += Length - 1;
}
C = *CurPtr++;
}
if (C == '/') {
FoundSlash:
if (CurPtr[-2] == '*') break;
if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
if (isEndOfBlockCommentWithEscapedNewLine(CurPtr - 2, this,
LangOpts.Trigraphs)) {
break;
}
}
if (CurPtr[0] == '*' && CurPtr[1] != '/') {
if (!isLexingRawMode())
Diag(CurPtr-1, diag::warn_nested_block_comment);
}
} else if (C == 0 && CurPtr == BufferEnd+1) {
if (!isLexingRawMode())
Diag(BufferPtr, diag::err_unterminated_block_comment);
--CurPtr;
if (isKeepWhitespaceMode()) {
FormTokenWithChars(Result, CurPtr, tok::unknown);
return true;
}
BufferPtr = CurPtr;
return false;
} else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
PP->CodeCompleteNaturalLanguage();
cutOffLexing();
return false;
}
C = *CurPtr++;
}
if (PP && !isLexingRawMode() &&
PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
getSourceLocation(CurPtr)))) {
BufferPtr = CurPtr;
return true; }
if (inKeepCommentMode()) {
FormTokenWithChars(Result, CurPtr, tok::comment);
return true;
}
if (isHorizontalWhitespace(*CurPtr)) {
SkipWhitespace(Result, CurPtr+1, TokAtPhysicalStartOfLine);
return false;
}
BufferPtr = CurPtr;
Result.setFlag(Token::LeadingSpace);
return false;
}
void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) {
assert(ParsingPreprocessorDirective && ParsingFilename == false &&
"Must be in a preprocessing directive!");
Token Tmp;
Tmp.startToken();
const char *CurPtr = BufferPtr;
while (true) {
char Char = getAndAdvanceChar(CurPtr, Tmp);
switch (Char) {
default:
if (Result)
Result->push_back(Char);
break;
case 0: if (CurPtr-1 != BufferEnd) {
if (isCodeCompletionPoint(CurPtr-1)) {
PP->CodeCompleteNaturalLanguage();
cutOffLexing();
return;
}
if (Result)
Result->push_back(Char);
break;
}
LLVM_FALLTHROUGH;
case '\r':
case '\n':
assert(CurPtr[-1] == Char && "Trigraphs for newline?");
BufferPtr = CurPtr-1;
Lex(Tmp);
if (Tmp.is(tok::code_completion)) {
if (PP)
PP->CodeCompleteNaturalLanguage();
Lex(Tmp);
}
assert(Tmp.is(tok::eod) && "Unexpected token!");
return;
}
}
}
bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
if (ParsingPreprocessorDirective) {
ParsingPreprocessorDirective = false;
FormTokenWithChars(Result, CurPtr, tok::eod);
if (PP)
resetExtendedTokenMode();
return true; }
if (isLexingRawMode()) {
Result.startToken();
BufferPtr = BufferEnd;
FormTokenWithChars(Result, BufferEnd, tok::eof);
return true;
}
if (PP->isRecordingPreamble() && PP->isInPrimaryFile()) {
PP->setRecordedPreambleConditionalStack(ConditionalStack);
if (!ConditionalStack.empty())
MIOpt.ExitTopLevelConditional();
ConditionalStack.clear();
}
while (!ConditionalStack.empty()) {
if (PP->getCodeCompletionFileLoc() != FileLoc)
PP->Diag(ConditionalStack.back().IfLoc,
diag::err_pp_unterminated_conditional);
ConditionalStack.pop_back();
}
if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) {
DiagnosticsEngine &Diags = PP->getDiagnostics();
SourceLocation EndLoc = getSourceLocation(BufferEnd);
unsigned DiagID;
if (LangOpts.CPlusPlus11) {
if (!Diags.isIgnored(diag::warn_cxx98_compat_no_newline_eof, EndLoc)) {
DiagID = diag::warn_cxx98_compat_no_newline_eof;
} else {
DiagID = diag::warn_no_newline_eof;
}
} else {
DiagID = diag::ext_no_newline_eof;
}
Diag(BufferEnd, DiagID)
<< FixItHint::CreateInsertion(EndLoc, "\n");
}
BufferPtr = CurPtr;
return PP->HandleEndOfFile(Result, isPragmaLexer());
}
unsigned Lexer::isNextPPTokenLParen() {
assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
if (isDependencyDirectivesLexer()) {
if (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size())
return 2;
return DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is(
tok::l_paren);
}
LexingRawMode = true;
const char *TmpBufferPtr = BufferPtr;
bool inPPDirectiveMode = ParsingPreprocessorDirective;
bool atStartOfLine = IsAtStartOfLine;
bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
bool leadingSpace = HasLeadingSpace;
Token Tok;
Lex(Tok);
BufferPtr = TmpBufferPtr;
ParsingPreprocessorDirective = inPPDirectiveMode;
HasLeadingSpace = leadingSpace;
IsAtStartOfLine = atStartOfLine;
IsAtPhysicalStartOfLine = atPhysicalStartOfLine;
LexingRawMode = false;
if (Tok.is(tok::eof))
return 2;
return Tok.is(tok::l_paren);
}
static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
ConflictMarkerKind CMK) {
const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>";
size_t TermLen = CMK == CMK_Perforce ? 5 : 7;
auto RestOfBuffer = StringRef(CurPtr, BufferEnd - CurPtr).substr(TermLen);
size_t Pos = RestOfBuffer.find(Terminator);
while (Pos != StringRef::npos) {
if (Pos == 0 ||
(RestOfBuffer[Pos - 1] != '\r' && RestOfBuffer[Pos - 1] != '\n')) {
RestOfBuffer = RestOfBuffer.substr(Pos+TermLen);
Pos = RestOfBuffer.find(Terminator);
continue;
}
return RestOfBuffer.data()+Pos;
}
return nullptr;
}
bool Lexer::IsStartOfConflictMarker(const char *CurPtr) {
if (CurPtr != BufferStart &&
CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
return false;
if (!StringRef(CurPtr, BufferEnd - CurPtr).startswith("<<<<<<<") &&
!StringRef(CurPtr, BufferEnd - CurPtr).startswith(">>>> "))
return false;
if (CurrentConflictMarkerState || isLexingRawMode())
return false;
ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce;
if (FindConflictEnd(CurPtr, BufferEnd, Kind)) {
Diag(CurPtr, diag::err_conflict_marker);
CurrentConflictMarkerState = Kind;
while (*CurPtr != '\r' && *CurPtr != '\n') {
assert(CurPtr != BufferEnd && "Didn't find end of line");
++CurPtr;
}
BufferPtr = CurPtr;
return true;
}
return false;
}
bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) {
if (CurPtr != BufferStart &&
CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
return false;
if (!CurrentConflictMarkerState || isLexingRawMode())
return false;
for (unsigned i = 1; i != 4; ++i)
if (CurPtr[i] != CurPtr[0])
return false;
if (const char *End = FindConflictEnd(CurPtr, BufferEnd,
CurrentConflictMarkerState)) {
CurPtr = End;
while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n')
++CurPtr;
BufferPtr = CurPtr;
CurrentConflictMarkerState = CMK_None;
return true;
}
return false;
}
static const char *findPlaceholderEnd(const char *CurPtr,
const char *BufferEnd) {
if (CurPtr == BufferEnd)
return nullptr;
BufferEnd -= 1; for (; CurPtr != BufferEnd; ++CurPtr) {
if (CurPtr[0] == '#' && CurPtr[1] == '>')
return CurPtr + 2;
}
return nullptr;
}
bool Lexer::lexEditorPlaceholder(Token &Result, const char *CurPtr) {
assert(CurPtr[-1] == '<' && CurPtr[0] == '#' && "Not a placeholder!");
if (!PP || !PP->getPreprocessorOpts().LexEditorPlaceholders || LexingRawMode)
return false;
const char *End = findPlaceholderEnd(CurPtr + 1, BufferEnd);
if (!End)
return false;
const char *Start = CurPtr - 1;
if (!LangOpts.AllowEditorPlaceholders)
Diag(Start, diag::err_placeholder_in_source);
Result.startToken();
FormTokenWithChars(Result, End, tok::raw_identifier);
Result.setRawIdentifierData(Start);
PP->LookUpIdentifierInfo(Result);
Result.setFlag(Token::IsEditorPlaceholder);
BufferPtr = End;
return true;
}
bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
if (PP && PP->isCodeCompletionEnabled()) {
SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart);
return Loc == PP->getCodeCompletionLoc();
}
return false;
}
llvm::Optional<uint32_t> Lexer::tryReadNumericUCN(const char *&StartPtr,
const char *SlashLoc,
Token *Result) {
unsigned CharSize;
char Kind = getCharAndSize(StartPtr, CharSize);
assert((Kind == 'u' || Kind == 'U') && "expected a UCN");
unsigned NumHexDigits;
if (Kind == 'u')
NumHexDigits = 4;
else if (Kind == 'U')
NumHexDigits = 8;
bool Delimited = false;
bool FoundEndDelimiter = false;
unsigned Count = 0;
bool Diagnose = Result && !isLexingRawMode();
if (!LangOpts.CPlusPlus && !LangOpts.C99) {
if (Diagnose)
Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89);
return llvm::None;
}
const char *CurPtr = StartPtr + CharSize;
const char *KindLoc = &CurPtr[-1];
uint32_t CodePoint = 0;
while (Count != NumHexDigits || Delimited) {
char C = getCharAndSize(CurPtr, CharSize);
if (!Delimited && C == '{') {
Delimited = true;
CurPtr += CharSize;
continue;
}
if (Delimited && C == '}') {
CurPtr += CharSize;
FoundEndDelimiter = true;
break;
}
unsigned Value = llvm::hexDigitValue(C);
if (Value == -1U) {
if (!Delimited)
break;
if (Diagnose)
Diag(BufferPtr, diag::warn_delimited_ucn_incomplete)
<< StringRef(KindLoc, 1);
return llvm::None;
}
if (CodePoint & 0xF000'0000) {
if (Diagnose)
Diag(KindLoc, diag::err_escape_too_large) << 0;
return llvm::None;
}
CodePoint <<= 4;
CodePoint |= Value;
CurPtr += CharSize;
Count++;
}
if (Count == 0) {
if (Diagnose)
Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty
: diag::warn_ucn_escape_no_digits)
<< StringRef(KindLoc, 1);
return llvm::None;
}
if (Delimited && Kind == 'U') {
if (Diagnose)
Diag(StartPtr, diag::err_hex_escape_no_digits) << StringRef(KindLoc, 1);
return llvm::None;
}
if (!Delimited && Count != NumHexDigits) {
if (Diagnose) {
Diag(BufferPtr, diag::warn_ucn_escape_incomplete);
if (Count == 4 && NumHexDigits == 8) {
CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1);
Diag(KindLoc, diag::note_ucn_four_not_eight)
<< FixItHint::CreateReplacement(URange, "u");
}
}
return llvm::None;
}
if (Delimited && PP) {
Diag(BufferPtr, PP->getLangOpts().CPlusPlus2b
? diag::warn_cxx2b_delimited_escape_sequence
: diag::ext_delimited_escape_sequence)
<< 0 << (PP->getLangOpts().CPlusPlus ? 1 : 0);
}
if (Result) {
Result->setFlag(Token::HasUCN);
if (CurPtr - StartPtr == (ptrdiff_t)(Count + 2 + (Delimited ? 2 : 0)))
StartPtr = CurPtr;
else
while (StartPtr != CurPtr)
(void)getAndAdvanceChar(StartPtr, *Result);
} else {
StartPtr = CurPtr;
}
return CodePoint;
}
llvm::Optional<uint32_t> Lexer::tryReadNamedUCN(const char *&StartPtr,
Token *Result) {
unsigned CharSize;
bool Diagnose = Result && !isLexingRawMode();
char C = getCharAndSize(StartPtr, CharSize);
assert(C == 'N' && "expected \\N{...}");
const char *CurPtr = StartPtr + CharSize;
const char *KindLoc = &CurPtr[-1];
C = getCharAndSize(CurPtr, CharSize);
if (C != '{') {
if (Diagnose)
Diag(StartPtr, diag::warn_ucn_escape_incomplete);
return llvm::None;
}
CurPtr += CharSize;
const char *StartName = CurPtr;
bool FoundEndDelimiter = false;
llvm::SmallVector<char, 30> Buffer;
while (C) {
C = getCharAndSize(CurPtr, CharSize);
CurPtr += CharSize;
if (C == '}') {
FoundEndDelimiter = true;
break;
}
if (!isAlphanumeric(C) && C != '_' && C != '-' && C != ' ')
break;
Buffer.push_back(C);
}
if (!FoundEndDelimiter || Buffer.empty()) {
if (Diagnose)
Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty
: diag::warn_delimited_ucn_incomplete)
<< StringRef(KindLoc, 1);
return llvm::None;
}
StringRef Name(Buffer.data(), Buffer.size());
llvm::Optional<char32_t> Res =
llvm::sys::unicode::nameToCodepointStrict(Name);
llvm::Optional<llvm::sys::unicode::LooseMatchingResult> LooseMatch;
if (!Res) {
if (!isLexingRawMode()) {
Diag(StartPtr, diag::err_invalid_ucn_name)
<< StringRef(Buffer.data(), Buffer.size());
LooseMatch = llvm::sys::unicode::nameToCodepointLooseMatching(Name);
if (LooseMatch) {
Diag(StartName, diag::note_invalid_ucn_name_loose_matching)
<< FixItHint::CreateReplacement(
makeCharRange(*this, StartName, CurPtr - CharSize),
LooseMatch->Name);
}
}
if (!LooseMatch)
return llvm::None;
}
if (Diagnose && PP && !LooseMatch)
Diag(BufferPtr, PP->getLangOpts().CPlusPlus2b
? diag::warn_cxx2b_delimited_escape_sequence
: diag::ext_delimited_escape_sequence)
<< 1 << (PP->getLangOpts().CPlusPlus ? 1 : 0);
if (LooseMatch)
Res = LooseMatch->CodePoint;
if (Result) {
Result->setFlag(Token::HasUCN);
if (CurPtr - StartPtr == (ptrdiff_t)(Buffer.size() + 4))
StartPtr = CurPtr;
else
while (StartPtr != CurPtr)
(void)getAndAdvanceChar(StartPtr, *Result);
} else {
StartPtr = CurPtr;
}
return *Res;
}
uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
Token *Result) {
unsigned CharSize;
llvm::Optional<uint32_t> CodePointOpt;
char Kind = getCharAndSize(StartPtr, CharSize);
if (Kind == 'u' || Kind == 'U')
CodePointOpt = tryReadNumericUCN(StartPtr, SlashLoc, Result);
else if (Kind == 'N')
CodePointOpt = tryReadNamedUCN(StartPtr, Result);
if (!CodePointOpt)
return 0;
uint32_t CodePoint = *CodePointOpt;
if (LangOpts.AsmPreprocessor)
return CodePoint;
if (CodePoint < 0xA0) {
if (CodePoint == 0x24 || CodePoint == 0x40 || CodePoint == 0x60)
return CodePoint;
if (Result && PP) {
if (CodePoint < 0x20 || CodePoint >= 0x7F)
Diag(BufferPtr, diag::err_ucn_control_character);
else {
char C = static_cast<char>(CodePoint);
Diag(BufferPtr, diag::err_ucn_escape_basic_scs) << StringRef(&C, 1);
}
}
return 0;
} else if (CodePoint >= 0xD800 && CodePoint <= 0xDFFF) {
if (Result && PP) {
if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus11)
Diag(BufferPtr, diag::warn_ucn_escape_surrogate);
else
Diag(BufferPtr, diag::err_ucn_escape_invalid);
}
return 0;
}
return CodePoint;
}
bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C,
const char *CurPtr) {
if (!isLexingRawMode() && !PP->isPreprocessedOutput() &&
isUnicodeWhitespace(C)) {
Diag(BufferPtr, diag::ext_unicode_whitespace)
<< makeCharRange(*this, BufferPtr, CurPtr);
Result.setFlag(Token::LeadingSpace);
return true;
}
return false;
}
void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
IsAtStartOfLine = Result.isAtStartOfLine();
HasLeadingSpace = Result.hasLeadingSpace();
HasLeadingEmptyMacro = Result.hasLeadingEmptyMacro();
}
bool Lexer::Lex(Token &Result) {
assert(!isDependencyDirectivesLexer());
Result.startToken();
if (IsAtStartOfLine) {
Result.setFlag(Token::StartOfLine);
IsAtStartOfLine = false;
}
if (HasLeadingSpace) {
Result.setFlag(Token::LeadingSpace);
HasLeadingSpace = false;
}
if (HasLeadingEmptyMacro) {
Result.setFlag(Token::LeadingEmptyMacro);
HasLeadingEmptyMacro = false;
}
bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine;
IsAtPhysicalStartOfLine = false;
bool isRawLex = isLexingRawMode();
(void) isRawLex;
bool returnedToken = LexTokenInternal(Result, atPhysicalStartOfLine);
assert((returnedToken || !isRawLex) && "Raw lex must succeed");
return returnedToken;
}
bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) {
LexNextToken:
Result.clearFlag(Token::NeedsCleaning);
Result.setIdentifierInfo(nullptr);
const char *CurPtr = BufferPtr;
if (isHorizontalWhitespace(*CurPtr)) {
do {
++CurPtr;
} while (isHorizontalWhitespace(*CurPtr));
if (isKeepWhitespaceMode()) {
FormTokenWithChars(Result, CurPtr, tok::unknown);
return true;
}
BufferPtr = CurPtr;
Result.setFlag(Token::LeadingSpace);
}
unsigned SizeTmp, SizeTmp2;
char Char = getAndAdvanceChar(CurPtr, Result);
tok::TokenKind Kind;
if (!isVerticalWhitespace(Char))
NewLinePtr = nullptr;
switch (Char) {
case 0: if (CurPtr-1 == BufferEnd)
return LexEndOfFile(Result, CurPtr-1);
if (isCodeCompletionPoint(CurPtr-1)) {
Result.startToken();
FormTokenWithChars(Result, CurPtr, tok::code_completion);
return true;
}
if (!isLexingRawMode())
Diag(CurPtr-1, diag::null_in_file);
Result.setFlag(Token::LeadingSpace);
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
return true;
goto LexNextToken;
case 26: if (LangOpts.MicrosoftExt) {
if (!isLexingRawMode())
Diag(CurPtr-1, diag::ext_ctrl_z_eof_microsoft);
return LexEndOfFile(Result, CurPtr-1);
}
Kind = tok::unknown;
break;
case '\r':
if (CurPtr[0] == '\n')
(void)getAndAdvanceChar(CurPtr, Result);
LLVM_FALLTHROUGH;
case '\n':
if (ParsingPreprocessorDirective) {
ParsingPreprocessorDirective = false;
if (PP)
resetExtendedTokenMode();
IsAtStartOfLine = true;
IsAtPhysicalStartOfLine = true;
NewLinePtr = CurPtr - 1;
Kind = tok::eod;
break;
}
Result.clearFlag(Token::LeadingSpace);
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
return true;
goto LexNextToken;
case ' ':
case '\t':
case '\f':
case '\v':
SkipHorizontalWhitespace:
Result.setFlag(Token::LeadingSpace);
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
return true;
SkipIgnoredUnits:
CurPtr = BufferPtr;
if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
return true; goto SkipIgnoredUnits;
} else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
if (SkipBlockComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
return true; goto SkipIgnoredUnits;
} else if (isHorizontalWhitespace(*CurPtr)) {
goto SkipHorizontalWhitespace;
}
goto LexNextToken;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
MIOpt.ReadToken();
return LexNumericConstant(Result, CurPtr);
case 'u':
MIOpt.ReadToken();
if (LangOpts.CPlusPlus11 || LangOpts.C11) {
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '"')
return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
tok::utf16_string_literal);
if (Char == '\'')
return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
tok::utf16_char_constant);
if (Char == 'R' && LangOpts.CPlusPlus11 &&
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
return LexRawStringLiteral(Result,
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
tok::utf16_string_literal);
if (Char == '8') {
char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2);
if (Char2 == '"')
return LexStringLiteral(Result,
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
tok::utf8_string_literal);
if (Char2 == '\'' && (LangOpts.CPlusPlus17 || LangOpts.C2x))
return LexCharConstant(
Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
tok::utf8_char_constant);
if (Char2 == 'R' && LangOpts.CPlusPlus11) {
unsigned SizeTmp3;
char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
if (Char3 == '"') {
return LexRawStringLiteral(Result,
ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
SizeTmp3, Result),
tok::utf8_string_literal);
}
}
}
}
return LexIdentifierContinue(Result, CurPtr);
case 'U': MIOpt.ReadToken();
if (LangOpts.CPlusPlus11 || LangOpts.C11) {
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '"')
return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
tok::utf32_string_literal);
if (Char == '\'')
return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
tok::utf32_char_constant);
if (Char == 'R' && LangOpts.CPlusPlus11 &&
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
return LexRawStringLiteral(Result,
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
tok::utf32_string_literal);
}
return LexIdentifierContinue(Result, CurPtr);
case 'R': MIOpt.ReadToken();
if (LangOpts.CPlusPlus11) {
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '"')
return LexRawStringLiteral(Result,
ConsumeChar(CurPtr, SizeTmp, Result),
tok::string_literal);
}
return LexIdentifierContinue(Result, CurPtr);
case 'L': MIOpt.ReadToken();
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '"')
return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
tok::wide_string_literal);
if (LangOpts.CPlusPlus11 && Char == 'R' &&
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
return LexRawStringLiteral(Result,
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
tok::wide_string_literal);
if (Char == '\'')
return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
tok::wide_char_constant);
LLVM_FALLTHROUGH;
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
case 'H': case 'I': case 'J': case 'K': case 'M': case 'N':
case 'O': case 'P': case 'Q': case 'S': case 'T':
case 'V': case 'W': case 'X': case 'Y': case 'Z':
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
case 'o': case 'p': case 'q': case 'r': case 's': case 't':
case 'v': case 'w': case 'x': case 'y': case 'z':
case '_':
MIOpt.ReadToken();
return LexIdentifierContinue(Result, CurPtr);
case '$': if (LangOpts.DollarIdents) {
if (!isLexingRawMode())
Diag(CurPtr-1, diag::ext_dollar_in_identifier);
MIOpt.ReadToken();
return LexIdentifierContinue(Result, CurPtr);
}
Kind = tok::unknown;
break;
case '\'':
MIOpt.ReadToken();
return LexCharConstant(Result, CurPtr, tok::char_constant);
case '"':
MIOpt.ReadToken();
return LexStringLiteral(Result, CurPtr,
ParsingFilename ? tok::header_name
: tok::string_literal);
case '?':
Kind = tok::question;
break;
case '[':
Kind = tok::l_square;
break;
case ']':
Kind = tok::r_square;
break;
case '(':
Kind = tok::l_paren;
break;
case ')':
Kind = tok::r_paren;
break;
case '{':
Kind = tok::l_brace;
break;
case '}':
Kind = tok::r_brace;
break;
case '.':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char >= '0' && Char <= '9') {
MIOpt.ReadToken();
return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
} else if (LangOpts.CPlusPlus && Char == '*') {
Kind = tok::periodstar;
CurPtr += SizeTmp;
} else if (Char == '.' &&
getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
Kind = tok::ellipsis;
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
} else {
Kind = tok::period;
}
break;
case '&':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '&') {
Kind = tok::ampamp;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else if (Char == '=') {
Kind = tok::ampequal;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
Kind = tok::amp;
}
break;
case '*':
if (getCharAndSize(CurPtr, SizeTmp) == '=') {
Kind = tok::starequal;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
Kind = tok::star;
}
break;
case '+':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '+') {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::plusplus;
} else if (Char == '=') {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::plusequal;
} else {
Kind = tok::plus;
}
break;
case '-':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '-') { CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::minusminus;
} else if (Char == '>' && LangOpts.CPlusPlus &&
getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
Kind = tok::arrowstar;
} else if (Char == '>') { CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::arrow;
} else if (Char == '=') { CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::minusequal;
} else {
Kind = tok::minus;
}
break;
case '~':
Kind = tok::tilde;
break;
case '!':
if (getCharAndSize(CurPtr, SizeTmp) == '=') {
Kind = tok::exclaimequal;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
Kind = tok::exclaim;
}
break;
case '/':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '/') { bool TreatAsComment =
LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
if (!TreatAsComment)
if (!(PP && PP->isPreprocessedOutput()))
TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*';
if (TreatAsComment) {
if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
TokAtPhysicalStartOfLine))
return true;
goto SkipIgnoredUnits;
}
}
if (Char == '*') { if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result),
TokAtPhysicalStartOfLine))
return true;
goto LexNextToken;
}
if (Char == '=') {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::slashequal;
} else {
Kind = tok::slash;
}
break;
case '%':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '=') {
Kind = tok::percentequal;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else if (LangOpts.Digraphs && Char == '>') {
Kind = tok::r_brace; CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else if (LangOpts.Digraphs && Char == ':') {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
Kind = tok::hashhash; CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
} else if (Char == '@' && LangOpts.MicrosoftExt) { CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
if (!isLexingRawMode())
Diag(BufferPtr, diag::ext_charize_microsoft);
Kind = tok::hashat;
} else { if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
goto HandleDirective;
Kind = tok::hash;
}
} else {
Kind = tok::percent;
}
break;
case '<':
Char = getCharAndSize(CurPtr, SizeTmp);
if (ParsingFilename) {
return LexAngledStringLiteral(Result, CurPtr);
} else if (Char == '<') {
char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
if (After == '=') {
Kind = tok::lesslessequal;
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
} else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) {
goto LexNextToken;
} else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) {
goto LexNextToken;
} else if (LangOpts.CUDA && After == '<') {
Kind = tok::lesslessless;
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
} else {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::lessless;
}
} else if (Char == '=') {
char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
if (After == '>') {
if (LangOpts.CPlusPlus20) {
if (!isLexingRawMode())
Diag(BufferPtr, diag::warn_cxx17_compat_spaceship);
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
Kind = tok::spaceship;
break;
}
if (LangOpts.CPlusPlus && !isLexingRawMode()) {
Diag(BufferPtr, diag::warn_cxx20_compat_spaceship)
<< FixItHint::CreateInsertion(
getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " ");
}
}
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::lessequal;
} else if (LangOpts.Digraphs && Char == ':') { if (LangOpts.CPlusPlus11 &&
getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') {
unsigned SizeTmp3;
char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
if (After != ':' && After != '>') {
Kind = tok::less;
if (!isLexingRawMode())
Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon);
break;
}
}
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::l_square;
} else if (LangOpts.Digraphs && Char == '%') { CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::l_brace;
} else if (Char == '#' && SizeTmp == 1 &&
lexEditorPlaceholder(Result, CurPtr)) {
return true;
} else {
Kind = tok::less;
}
break;
case '>':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '=') {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::greaterequal;
} else if (Char == '>') {
char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
if (After == '=') {
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
Kind = tok::greatergreaterequal;
} else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) {
goto LexNextToken;
} else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) {
goto LexNextToken;
} else if (LangOpts.CUDA && After == '>') {
Kind = tok::greatergreatergreater;
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result);
} else {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::greatergreater;
}
} else {
Kind = tok::greater;
}
break;
case '^':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '=') {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::caretequal;
} else if (LangOpts.OpenCL && Char == '^') {
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::caretcaret;
} else {
Kind = tok::caret;
}
break;
case '|':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '=') {
Kind = tok::pipeequal;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else if (Char == '|') {
if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1))
goto LexNextToken;
Kind = tok::pipepipe;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
Kind = tok::pipe;
}
break;
case ':':
Char = getCharAndSize(CurPtr, SizeTmp);
if (LangOpts.Digraphs && Char == '>') {
Kind = tok::r_square; CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else if ((LangOpts.CPlusPlus ||
LangOpts.DoubleSquareBracketAttributes) &&
Char == ':') {
Kind = tok::coloncolon;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
Kind = tok::colon;
}
break;
case ';':
Kind = tok::semi;
break;
case '=':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '=') {
if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1))
goto LexNextToken;
Kind = tok::equalequal;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
Kind = tok::equal;
}
break;
case ',':
Kind = tok::comma;
break;
case '#':
Char = getCharAndSize(CurPtr, SizeTmp);
if (Char == '#') {
Kind = tok::hashhash;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else if (Char == '@' && LangOpts.MicrosoftExt) { Kind = tok::hashat;
if (!isLexingRawMode())
Diag(BufferPtr, diag::ext_charize_microsoft);
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer)
goto HandleDirective;
Kind = tok::hash;
}
break;
case '@':
if (CurPtr[-1] == '@' && LangOpts.ObjC)
Kind = tok::at;
else
Kind = tok::unknown;
break;
case '\\':
if (!LangOpts.AsmPreprocessor) {
if (uint32_t CodePoint = tryReadUCN(CurPtr, BufferPtr, &Result)) {
if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
return true;
goto LexNextToken;
}
return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr);
}
}
Kind = tok::unknown;
break;
default: {
if (isASCII(Char)) {
Kind = tok::unknown;
break;
}
llvm::UTF32 CodePoint;
--CurPtr;
llvm::ConversionResult Status =
llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr,
(const llvm::UTF8 *)BufferEnd,
&CodePoint,
llvm::strictConversion);
if (Status == llvm::conversionOK) {
if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
return true;
goto LexNextToken;
}
return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr);
}
if (isLexingRawMode() || ParsingPreprocessorDirective ||
PP->isPreprocessedOutput()) {
++CurPtr;
Kind = tok::unknown;
break;
}
Diag(CurPtr, diag::err_invalid_utf8);
BufferPtr = CurPtr+1;
goto LexNextToken;
}
}
MIOpt.ReadToken();
FormTokenWithChars(Result, CurPtr, Kind);
return true;
HandleDirective:
FormTokenWithChars(Result, CurPtr, tok::hash);
PP->HandleDirective(Result);
if (PP->hadModuleLoaderFatalFailure()) {
assert(Result.is(tok::eof) && "Preprocessor did not set tok:eof");
return true;
}
return false;
}
const char *Lexer::convertDependencyDirectiveToken(
const dependency_directives_scan::Token &DDTok, Token &Result) {
const char *TokPtr = BufferStart + DDTok.Offset;
Result.startToken();
Result.setLocation(getSourceLocation(TokPtr));
Result.setKind(DDTok.Kind);
Result.setFlag((Token::TokenFlags)DDTok.Flags);
Result.setLength(DDTok.Length);
BufferPtr = TokPtr + DDTok.Length;
return TokPtr;
}
bool Lexer::LexDependencyDirectiveToken(Token &Result) {
assert(isDependencyDirectivesLexer());
using namespace dependency_directives_scan;
while (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size()) {
if (DepDirectives.front().Kind == pp_eof)
return LexEndOfFile(Result, BufferEnd);
NextDepDirectiveTokenIndex = 0;
DepDirectives = DepDirectives.drop_front();
}
const dependency_directives_scan::Token &DDTok =
DepDirectives.front().Tokens[NextDepDirectiveTokenIndex++];
if (NextDepDirectiveTokenIndex > 1 || DDTok.Kind != tok::hash) {
MIOpt.ReadToken();
}
const char *TokPtr = convertDependencyDirectiveToken(DDTok, Result);
if (Result.is(tok::hash) && Result.isAtStartOfLine()) {
PP->HandleDirective(Result);
return false;
}
if (Result.is(tok::raw_identifier)) {
Result.setRawIdentifierData(TokPtr);
if (!isLexingRawMode()) {
IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
if (II->isHandleIdentifierCase())
return PP->HandleIdentifier(Result);
}
return true;
}
if (Result.isLiteral()) {
Result.setLiteralData(TokPtr);
return true;
}
if (Result.is(tok::colon) &&
(LangOpts.CPlusPlus || LangOpts.DoubleSquareBracketAttributes)) {
if (*BufferPtr == ':') {
assert(DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is(
tok::colon));
++NextDepDirectiveTokenIndex;
Result.setKind(tok::coloncolon);
}
return true;
}
if (Result.is(tok::eod))
ParsingPreprocessorDirective = false;
return true;
}
bool Lexer::LexDependencyDirectiveTokenWhileSkipping(Token &Result) {
assert(isDependencyDirectivesLexer());
using namespace dependency_directives_scan;
bool Stop = false;
unsigned NestedIfs = 0;
do {
DepDirectives = DepDirectives.drop_front();
switch (DepDirectives.front().Kind) {
case pp_none:
llvm_unreachable("unexpected 'pp_none'");
case pp_include:
case pp___include_macros:
case pp_define:
case pp_undef:
case pp_import:
case pp_pragma_import:
case pp_pragma_once:
case pp_pragma_push_macro:
case pp_pragma_pop_macro:
case pp_pragma_include_alias:
case pp_include_next:
case decl_at_import:
case cxx_module_decl:
case cxx_import_decl:
case cxx_export_module_decl:
case cxx_export_import_decl:
break;
case pp_if:
case pp_ifdef:
case pp_ifndef:
++NestedIfs;
break;
case pp_elif:
case pp_elifdef:
case pp_elifndef:
case pp_else:
if (!NestedIfs) {
Stop = true;
}
break;
case pp_endif:
if (!NestedIfs) {
Stop = true;
} else {
--NestedIfs;
}
break;
case pp_eof:
NextDepDirectiveTokenIndex = 0;
return LexEndOfFile(Result, BufferEnd);
}
} while (!Stop);
const dependency_directives_scan::Token &DDTok =
DepDirectives.front().Tokens.front();
assert(DDTok.is(tok::hash));
NextDepDirectiveTokenIndex = 1;
convertDependencyDirectiveToken(DDTok, Result);
return false;
}