1//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the actions class which performs semantic analysis and
10// builds an AST out of a parse stream.
11//
12//===----------------------------------------------------------------------===//
13
14#include "UsedDeclVisitor.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/ASTDiagnostic.h"
17#include "clang/AST/Decl.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/DeclFriend.h"
20#include "clang/AST/DeclObjC.h"
21#include "clang/AST/Expr.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/PrettyDeclStackTrace.h"
24#include "clang/AST/StmtCXX.h"
25#include "clang/Basic/DarwinSDKInfo.h"
26#include "clang/Basic/DiagnosticOptions.h"
27#include "clang/Basic/PartialDiagnostic.h"
28#include "clang/Basic/SourceManager.h"
29#include "clang/Basic/Stack.h"
30#include "clang/Basic/TargetInfo.h"
31#include "clang/Lex/HeaderSearch.h"
32#include "clang/Lex/HeaderSearchOptions.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/CXXFieldCollector.h"
35#include "clang/Sema/DelayedDiagnostic.h"
36#include "clang/Sema/EnterExpressionEvaluationContext.h"
37#include "clang/Sema/ExternalSemaSource.h"
38#include "clang/Sema/Initialization.h"
39#include "clang/Sema/MultiplexExternalSemaSource.h"
40#include "clang/Sema/ObjCMethodList.h"
41#include "clang/Sema/RISCVIntrinsicManager.h"
42#include "clang/Sema/Scope.h"
43#include "clang/Sema/ScopeInfo.h"
44#include "clang/Sema/SemaConsumer.h"
45#include "clang/Sema/SemaInternal.h"
46#include "clang/Sema/TemplateDeduction.h"
47#include "clang/Sema/TemplateInstCallback.h"
48#include "clang/Sema/TypoCorrection.h"
49#include "llvm/ADT/DenseMap.h"
50#include "llvm/ADT/STLExtras.h"
51#include "llvm/ADT/SmallPtrSet.h"
52#include "llvm/Support/TimeProfiler.h"
53#include <optional>
54
55using namespace clang;
56using namespace sema;
57
58SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
59 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
60}
61
62ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
63
64DarwinSDKInfo *
65Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
66 StringRef Platform) {
67 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
68 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
69 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
70 << Platform;
71 WarnedDarwinSDKInfoMissing = true;
72 }
73 return SDKInfo;
74}
75
76DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
77 if (CachedDarwinSDKInfo)
78 return CachedDarwinSDKInfo->get();
79 auto SDKInfo = parseDarwinSDKInfo(
80 PP.getFileManager().getVirtualFileSystem(),
81 PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
82 if (SDKInfo && *SDKInfo) {
83 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo));
84 return CachedDarwinSDKInfo->get();
85 }
86 if (!SDKInfo)
87 llvm::consumeError(SDKInfo.takeError());
88 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
89 return nullptr;
90}
91
92IdentifierInfo *
93Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
94 unsigned int Index) {
95 std::string InventedName;
96 llvm::raw_string_ostream OS(InventedName);
97
98 if (!ParamName)
99 OS << "auto:" << Index + 1;
100 else
101 OS << ParamName->getName() << ":auto";
102
103 OS.flush();
104 return &Context.Idents.get(OS.str());
105}
106
107PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
108 const Preprocessor &PP) {
109 PrintingPolicy Policy = Context.getPrintingPolicy();
110 // In diagnostics, we print _Bool as bool if the latter is defined as the
111 // former.
112 Policy.Bool = Context.getLangOpts().Bool;
113 if (!Policy.Bool) {
114 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) {
115 Policy.Bool = BoolMacro->isObjectLike() &&
116 BoolMacro->getNumTokens() == 1 &&
117 BoolMacro->getReplacementToken(0).is(tok::kw__Bool);
118 }
119 }
120
121 // Shorten the data output if needed
122 Policy.EntireContentsOfLargeArray = false;
123
124 return Policy;
125}
126
127void Sema::ActOnTranslationUnitScope(Scope *S) {
128 TUScope = S;
129 PushDeclContext(S, Context.getTranslationUnitDecl());
130}
131
132namespace clang {
133namespace sema {
134
135class SemaPPCallbacks : public PPCallbacks {
136 Sema *S = nullptr;
137 llvm::SmallVector<SourceLocation, 8> IncludeStack;
138
139public:
140 void set(Sema &S) { this->S = &S; }
141
142 void reset() { S = nullptr; }
143
144 void FileChanged(SourceLocation Loc, FileChangeReason Reason,
145 SrcMgr::CharacteristicKind FileType,
146 FileID PrevFID) override {
147 if (!S)
148 return;
149 switch (Reason) {
150 case EnterFile: {
151 SourceManager &SM = S->getSourceManager();
152 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc));
153 if (IncludeLoc.isValid()) {
154 if (llvm::timeTraceProfilerEnabled()) {
155 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc));
156 llvm::timeTraceProfilerBegin(
157 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>"));
158 }
159
160 IncludeStack.push_back(IncludeLoc);
161 S->DiagnoseNonDefaultPragmaAlignPack(
162 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
163 IncludeLoc);
164 }
165 break;
166 }
167 case ExitFile:
168 if (!IncludeStack.empty()) {
169 if (llvm::timeTraceProfilerEnabled())
170 llvm::timeTraceProfilerEnd();
171
172 S->DiagnoseNonDefaultPragmaAlignPack(
173 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
174 IncludeStack.pop_back_val());
175 }
176 break;
177 default:
178 break;
179 }
180 }
181};
182
183} // end namespace sema
184} // end namespace clang
185
186const unsigned Sema::MaxAlignmentExponent;
187const uint64_t Sema::MaximumAlignment;
188
189Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
190 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
191 : ExternalSource(nullptr), CurFPFeatures(pp.getLangOpts()),
192 LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
193 Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
194 CollectStats(false), CodeCompleter(CodeCompleter), CurContext(nullptr),
195 OriginalLexicalContext(nullptr), MSStructPragmaOn(false),
196 MSPointerToMemberRepresentationMethod(
197 LangOpts.getMSPointerToMemberRepresentationMethod()),
198 VtorDispStack(LangOpts.getVtorDispMode()),
199 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
200 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
201 CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
202 FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
203 VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
204 IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr),
205 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
206 StdInitializerList(nullptr), StdCoroutineTraitsCache(nullptr),
207 CXXTypeInfoDecl(nullptr), StdSourceLocationImplDecl(nullptr),
208 NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr),
209 StringWithUTF8StringMethod(nullptr),
210 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
211 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
212 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
213 TUKind(TUKind), NumSFINAEErrors(0),
214 FullyCheckedComparisonCategories(
215 static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
216 SatisfactionCache(Context), AccessCheckingSFINAE(false),
217 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
218 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
219 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
220 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
221 CurScope(nullptr), Ident_super(nullptr) {
222 assert(pp.TUKind == TUKind);
223 TUScope = nullptr;
224 isConstantEvaluatedOverride = false;
225
226 LoadedExternalKnownNamespaces = false;
227 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
228 NSNumberLiteralMethods[I] = nullptr;
229
230 if (getLangOpts().ObjC)
231 NSAPIObj.reset(new NSAPI(Context));
232
233 if (getLangOpts().CPlusPlus)
234 FieldCollector.reset(new CXXFieldCollector());
235
236 // Tell diagnostics how to render things from the AST library.
237 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
238
239 // This evaluation context exists to ensure that there's always at least one
240 // valid evaluation context available. It is never removed from the
241 // evaluation stack.
242 ExprEvalContexts.emplace_back(
243 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
244 nullptr, ExpressionEvaluationContextRecord::EK_Other);
245
246 // Initialization of data sharing attributes stack for OpenMP
247 InitDataSharingAttributesStack();
248
249 std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
250 std::make_unique<sema::SemaPPCallbacks>();
251 SemaPPCallbackHandler = Callbacks.get();
252 PP.addPPCallbacks(std::move(Callbacks));
253 SemaPPCallbackHandler->set(*this);
254
255 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
256}
257
258// Anchor Sema's type info to this TU.
259void Sema::anchor() {}
260
261void Sema::addImplicitTypedef(StringRef Name, QualType T) {
262 DeclarationName DN = &Context.Idents.get(Name);
263 if (IdResolver.begin(DN) == IdResolver.end())
264 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
265}
266
267void Sema::Initialize() {
268 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
269 SC->InitializeSema(*this);
270
271 // Tell the external Sema source about this Sema object.
272 if (ExternalSemaSource *ExternalSema
273 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
274 ExternalSema->InitializeSema(*this);
275
276 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
277 // will not be able to merge any duplicate __va_list_tag decls correctly.
278 VAListTagName = PP.getIdentifierInfo("__va_list_tag");
279
280 if (!TUScope)
281 return;
282
283 // Initialize predefined 128-bit integer types, if needed.
284 if (Context.getTargetInfo().hasInt128Type() ||
285 (Context.getAuxTargetInfo() &&
286 Context.getAuxTargetInfo()->hasInt128Type())) {
287 // If either of the 128-bit integer types are unavailable to name lookup,
288 // define them now.
289 DeclarationName Int128 = &Context.Idents.get("__int128_t");
290 if (IdResolver.begin(Int128) == IdResolver.end())
291 PushOnScopeChains(Context.getInt128Decl(), TUScope);
292
293 DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
294 if (IdResolver.begin(UInt128) == IdResolver.end())
295 PushOnScopeChains(Context.getUInt128Decl(), TUScope);
296 }
297
298
299 // Initialize predefined Objective-C types:
300 if (getLangOpts().ObjC) {
301 // If 'SEL' does not yet refer to any declarations, make it refer to the
302 // predefined 'SEL'.
303 DeclarationName SEL = &Context.Idents.get("SEL");
304 if (IdResolver.begin(SEL) == IdResolver.end())
305 PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
306
307 // If 'id' does not yet refer to any declarations, make it refer to the
308 // predefined 'id'.
309 DeclarationName Id = &Context.Idents.get("id");
310 if (IdResolver.begin(Id) == IdResolver.end())
311 PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
312
313 // Create the built-in typedef for 'Class'.
314 DeclarationName Class = &Context.Idents.get("Class");
315 if (IdResolver.begin(Class) == IdResolver.end())
316 PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
317
318 // Create the built-in forward declaratino for 'Protocol'.
319 DeclarationName Protocol = &Context.Idents.get("Protocol");
320 if (IdResolver.begin(Protocol) == IdResolver.end())
321 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
322 }
323
324 // Create the internal type for the *StringMakeConstantString builtins.
325 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString");
326 if (IdResolver.begin(ConstantString) == IdResolver.end())
327 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
328
329 // Initialize Microsoft "predefined C++ types".
330 if (getLangOpts().MSVCCompat) {
331 if (getLangOpts().CPlusPlus &&
332 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
333 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class),
334 TUScope);
335
336 addImplicitTypedef("size_t", Context.getSizeType());
337 }
338
339 // Initialize predefined OpenCL types and supported extensions and (optional)
340 // core features.
341 if (getLangOpts().OpenCL) {
342 getOpenCLOptions().addSupport(
343 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
344 addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
345 addImplicitTypedef("event_t", Context.OCLEventTy);
346 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
347 if (OCLCompatibleVersion >= 200) {
348 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
349 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
350 addImplicitTypedef("queue_t", Context.OCLQueueTy);
351 }
352 if (getLangOpts().OpenCLPipes)
353 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
354 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
355 addImplicitTypedef("atomic_uint",
356 Context.getAtomicType(Context.UnsignedIntTy));
357 addImplicitTypedef("atomic_float",
358 Context.getAtomicType(Context.FloatTy));
359 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
360 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
361 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
362
363
364 // OpenCL v2.0 s6.13.11.6:
365 // - The atomic_long and atomic_ulong types are supported if the
366 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
367 // extensions are supported.
368 // - The atomic_double type is only supported if double precision
369 // is supported and the cl_khr_int64_base_atomics and
370 // cl_khr_int64_extended_atomics extensions are supported.
371 // - If the device address space is 64-bits, the data types
372 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
373 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
374 // cl_khr_int64_extended_atomics extensions are supported.
375
376 auto AddPointerSizeDependentTypes = [&]() {
377 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
378 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
379 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
380 auto AtomicPtrDiffT =
381 Context.getAtomicType(Context.getPointerDiffType());
382 addImplicitTypedef("atomic_size_t", AtomicSizeT);
383 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
384 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
385 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
386 };
387
388 if (Context.getTypeSize(Context.getSizeType()) == 32) {
389 AddPointerSizeDependentTypes();
390 }
391
392 if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) {
393 auto AtomicHalfT = Context.getAtomicType(Context.HalfTy);
394 addImplicitTypedef("atomic_half", AtomicHalfT);
395 }
396
397 std::vector<QualType> Atomic64BitTypes;
398 if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics",
399 getLangOpts()) &&
400 getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics",
401 getLangOpts())) {
402 if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) {
403 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
404 addImplicitTypedef("atomic_double", AtomicDoubleT);
405 Atomic64BitTypes.push_back(AtomicDoubleT);
406 }
407 auto AtomicLongT = Context.getAtomicType(Context.LongTy);
408 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
409 addImplicitTypedef("atomic_long", AtomicLongT);
410 addImplicitTypedef("atomic_ulong", AtomicULongT);
411
412
413 if (Context.getTypeSize(Context.getSizeType()) == 64) {
414 AddPointerSizeDependentTypes();
415 }
416 }
417 }
418
419#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
420 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
421 addImplicitTypedef(#ExtType, Context.Id##Ty); \
422 }
423#include "clang/Basic/OpenCLExtensionTypes.def"
424 }
425
426 if (Context.getTargetInfo().hasAArch64SVETypes()) {
427#define SVE_TYPE(Name, Id, SingletonId) \
428 addImplicitTypedef(Name, Context.SingletonId);
429#include "clang/Basic/AArch64SVEACLETypes.def"
430 }
431
432 if (Context.getTargetInfo().getTriple().isPPC64()) {
433#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
434 addImplicitTypedef(#Name, Context.Id##Ty);
435#include "clang/Basic/PPCTypes.def"
436#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
437 addImplicitTypedef(#Name, Context.Id##Ty);
438#include "clang/Basic/PPCTypes.def"
439 }
440
441 if (Context.getTargetInfo().hasRISCVVTypes()) {
442#define RVV_TYPE(Name, Id, SingletonId) \
443 addImplicitTypedef(Name, Context.SingletonId);
444#include "clang/Basic/RISCVVTypes.def"
445 }
446
447 if (Context.getTargetInfo().getTriple().isWasm() &&
448 Context.getTargetInfo().hasFeature("reference-types")) {
449#define WASM_TYPE(Name, Id, SingletonId) \
450 addImplicitTypedef(Name, Context.SingletonId);
451#include "clang/Basic/WebAssemblyReferenceTypes.def"
452 }
453
454 if (Context.getTargetInfo().hasBuiltinMSVaList()) {
455 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
456 if (IdResolver.begin(MSVaList) == IdResolver.end())
457 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
458 }
459
460 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
461 if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
462 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
463}
464
465Sema::~Sema() {
466 assert(InstantiatingSpecializations.empty() &&
467 "failed to clean up an InstantiatingTemplate?");
468
469 if (VisContext) FreeVisContext();
470
471 // Kill all the active scopes.
472 for (sema::FunctionScopeInfo *FSI : FunctionScopes)
473 delete FSI;
474
475 // Tell the SemaConsumer to forget about us; we're going out of scope.
476 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
477 SC->ForgetSema();
478
479 // Detach from the external Sema source.
480 if (ExternalSemaSource *ExternalSema
481 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
482 ExternalSema->ForgetSema();
483
484 // Delete cached satisfactions.
485 std::vector<ConstraintSatisfaction *> Satisfactions;
486 Satisfactions.reserve(Satisfactions.size());
487 for (auto &Node : SatisfactionCache)
488 Satisfactions.push_back(&Node);
489 for (auto *Node : Satisfactions)
490 delete Node;
491
492 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
493
494 // Destroys data sharing attributes stack for OpenMP
495 DestroyDataSharingAttributesStack();
496
497 // Detach from the PP callback handler which outlives Sema since it's owned
498 // by the preprocessor.
499 SemaPPCallbackHandler->reset();
500}
501
502void Sema::warnStackExhausted(SourceLocation Loc) {
503 // Only warn about this once.
504 if (!WarnedStackExhausted) {
505 Diag(Loc, diag::warn_stack_exhausted);
506 WarnedStackExhausted = true;
507 }
508}
509
510void Sema::runWithSufficientStackSpace(SourceLocation Loc,
511 llvm::function_ref<void()> Fn) {
512 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn);
513}
514
515/// makeUnavailableInSystemHeader - There is an error in the current
516/// context. If we're still in a system header, and we can plausibly
517/// make the relevant declaration unavailable instead of erroring, do
518/// so and return true.
519bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
520 UnavailableAttr::ImplicitReason reason) {
521 // If we're not in a function, it's an error.
522 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
523 if (!fn) return false;
524
525 // If we're in template instantiation, it's an error.
526 if (inTemplateInstantiation())
527 return false;
528
529 // If that function's not in a system header, it's an error.
530 if (!Context.getSourceManager().isInSystemHeader(loc))
531 return false;
532
533 // If the function is already unavailable, it's not an error.
534 if (fn->hasAttr<UnavailableAttr>()) return true;
535
536 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
537 return true;
538}
539
540ASTMutationListener *Sema::getASTMutationListener() const {
541 return getASTConsumer().GetASTMutationListener();
542}
543
544///Registers an external source. If an external source already exists,
545/// creates a multiplex external source and appends to it.
546///
547///\param[in] E - A non-null external sema source.
548///
549void Sema::addExternalSource(ExternalSemaSource *E) {
550 assert(E && "Cannot use with NULL ptr");
551
552 if (!ExternalSource) {
553 ExternalSource = E;
554 return;
555 }
556
557 if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(ExternalSource))
558 Ex->AddSource(E);
559 else
560 ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
561}
562
563/// Print out statistics about the semantic analysis.
564void Sema::PrintStats() const {
565 llvm::errs() << "\n*** Semantic Analysis Stats:\n";
566 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
567
568 BumpAlloc.PrintStats();
569 AnalysisWarnings.PrintStats();
570}
571
572void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
573 QualType SrcType,
574 SourceLocation Loc) {
575 std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
576 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
577 *ExprNullability != NullabilityKind::NullableResult))
578 return;
579
580 std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
581 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
582 return;
583
584 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
585}
586
587void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
588 // nullptr only exists from C++11 on, so don't warn on its absence earlier.
589 if (!getLangOpts().CPlusPlus11)
590 return;
591
592 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
593 return;
594 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType())
595 return;
596
597 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
598 E->getBeginLoc()))
599 return;
600
601 // Don't diagnose the conversion from a 0 literal to a null pointer argument
602 // in a synthesized call to operator<=>.
603 if (!CodeSynthesisContexts.empty() &&
604 CodeSynthesisContexts.back().Kind ==
605 CodeSynthesisContext::RewritingOperatorAsSpaceship)
606 return;
607
608 // Ignore null pointers in defaulted comparison operators.
609 FunctionDecl *FD = getCurFunctionDecl();
610 if (FD && FD->isDefaulted()) {
611 return;
612 }
613
614 // If it is a macro from system header, and if the macro name is not "NULL",
615 // do not warn.
616 SourceLocation MaybeMacroLoc = E->getBeginLoc();
617 if (Diags.getSuppressSystemWarnings() &&
618 SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
619 !findMacroSpelling(MaybeMacroLoc, "NULL"))
620 return;
621
622 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
623 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
624}
625
626/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
627/// If there is already an implicit cast, merge into the existing one.
628/// The result is of the given category.
629ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
630 CastKind Kind, ExprValueKind VK,
631 const CXXCastPath *BasePath,
632 CheckedConversionKind CCK) {
633#ifndef NDEBUG
634 if (VK == VK_PRValue && !E->isPRValue()) {
635 switch (Kind) {
636 default:
637 llvm_unreachable(
638 ("can't implicitly cast glvalue to prvalue with this cast "
639 "kind: " +
640 std::string(CastExpr::getCastKindName(Kind)))
641 .c_str());
642 case CK_Dependent:
643 case CK_LValueToRValue:
644 case CK_ArrayToPointerDecay:
645 case CK_FunctionToPointerDecay:
646 case CK_ToVoid:
647 case CK_NonAtomicToAtomic:
648 break;
649 }
650 }
651 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
652 "can't cast prvalue to glvalue");
653#endif
654
655 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
656 diagnoseZeroToNullptrConversion(Kind, E);
657
658 QualType ExprTy = Context.getCanonicalType(E->getType());
659 QualType TypeTy = Context.getCanonicalType(Ty);
660
661 if (ExprTy == TypeTy)
662 return E;
663
664 if (Kind == CK_ArrayToPointerDecay) {
665 // C++1z [conv.array]: The temporary materialization conversion is applied.
666 // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
667 if (getLangOpts().CPlusPlus && E->isPRValue()) {
668 // The temporary is an lvalue in C++98 and an xvalue otherwise.
669 ExprResult Materialized = CreateMaterializeTemporaryExpr(
670 E->getType(), E, !getLangOpts().CPlusPlus11);
671 if (Materialized.isInvalid())
672 return ExprError();
673 E = Materialized.get();
674 }
675 // C17 6.7.1p6 footnote 124: The implementation can treat any register
676 // declaration simply as an auto declaration. However, whether or not
677 // addressable storage is actually used, the address of any part of an
678 // object declared with storage-class specifier register cannot be
679 // computed, either explicitly(by use of the unary & operator as discussed
680 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
681 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
682 // array declared with storage-class specifier register is sizeof.
683 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
684 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
685 if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
686 if (VD->getStorageClass() == SC_Register) {
687 Diag(E->getExprLoc(), diag::err_typecheck_address_of)
688 << /*register variable*/ 3 << E->getSourceRange();
689 return ExprError();
690 }
691 }
692 }
693 }
694 }
695
696 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
697 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
698 ImpCast->setType(Ty);
699 ImpCast->setValueKind(VK);
700 return E;
701 }
702 }
703
704 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
705 CurFPFeatureOverrides());
706}
707
708/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
709/// to the conversion from scalar type ScalarTy to the Boolean type.
710CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
711 switch (ScalarTy->getScalarTypeKind()) {
712 case Type::STK_Bool: return CK_NoOp;
713 case Type::STK_CPointer: return CK_PointerToBoolean;
714 case Type::STK_BlockPointer: return CK_PointerToBoolean;
715 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
716 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
717 case Type::STK_Integral: return CK_IntegralToBoolean;
718 case Type::STK_Floating: return CK_FloatingToBoolean;
719 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
720 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
721 case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
722 }
723 llvm_unreachable("unknown scalar type kind");
724}
725
726/// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
727static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
728 if (D->getMostRecentDecl()->isUsed())
729 return true;
730
731 if (D->isExternallyVisible())
732 return true;
733
734 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
735 // If this is a function template and none of its specializations is used,
736 // we should warn.
737 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
738 for (const auto *Spec : Template->specializations())
739 if (ShouldRemoveFromUnused(SemaRef, Spec))
740 return true;
741
742 // UnusedFileScopedDecls stores the first declaration.
743 // The declaration may have become definition so check again.
744 const FunctionDecl *DeclToCheck;
745 if (FD->hasBody(DeclToCheck))
746 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
747
748 // Later redecls may add new information resulting in not having to warn,
749 // so check again.
750 DeclToCheck = FD->getMostRecentDecl();
751 if (DeclToCheck != FD)
752 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
753 }
754
755 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
756 // If a variable usable in constant expressions is referenced,
757 // don't warn if it isn't used: if the value of a variable is required
758 // for the computation of a constant expression, it doesn't make sense to
759 // warn even if the variable isn't odr-used. (isReferenced doesn't
760 // precisely reflect that, but it's a decent approximation.)
761 if (VD->isReferenced() &&
762 VD->mightBeUsableInConstantExpressions(SemaRef->Context))
763 return true;
764
765 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
766 // If this is a variable template and none of its specializations is used,
767 // we should warn.
768 for (const auto *Spec : Template->specializations())
769 if (ShouldRemoveFromUnused(SemaRef, Spec))
770 return true;
771
772 // UnusedFileScopedDecls stores the first declaration.
773 // The declaration may have become definition so check again.
774 const VarDecl *DeclToCheck = VD->getDefinition();
775 if (DeclToCheck)
776 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
777
778 // Later redecls may add new information resulting in not having to warn,
779 // so check again.
780 DeclToCheck = VD->getMostRecentDecl();
781 if (DeclToCheck != VD)
782 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
783 }
784
785 return false;
786}
787
788static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
789 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
790 return FD->isExternC();
791 return cast<VarDecl>(ND)->isExternC();
792}
793
794/// Determine whether ND is an external-linkage function or variable whose
795/// type has no linkage.
796bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
797 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
798 // because we also want to catch the case where its type has VisibleNoLinkage,
799 // which does not affect the linkage of VD.
800 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
801 !isExternalFormalLinkage(VD->getType()->getLinkage()) &&
802 !isFunctionOrVarDeclExternC(VD);
803}
804
805/// Obtains a sorted list of functions and variables that are undefined but
806/// ODR-used.
807void Sema::getUndefinedButUsed(
808 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
809 for (const auto &UndefinedUse : UndefinedButUsed) {
810 NamedDecl *ND = UndefinedUse.first;
811
812 // Ignore attributes that have become invalid.
813 if (ND->isInvalidDecl()) continue;
814
815 // __attribute__((weakref)) is basically a definition.
816 if (ND->hasAttr<WeakRefAttr>()) continue;
817
818 if (isa<CXXDeductionGuideDecl>(ND))
819 continue;
820
821 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
822 // An exported function will always be emitted when defined, so even if
823 // the function is inline, it doesn't have to be emitted in this TU. An
824 // imported function implies that it has been exported somewhere else.
825 continue;
826 }
827
828 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
829 if (FD->isDefined())
830 continue;
831 if (FD->isExternallyVisible() &&
832 !isExternalWithNoLinkageType(FD) &&
833 !FD->getMostRecentDecl()->isInlined() &&
834 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
835 continue;
836 if (FD->getBuiltinID())
837 continue;
838 } else {
839 auto *VD = cast<VarDecl>(ND);
840 if (VD->hasDefinition() != VarDecl::DeclarationOnly)
841 continue;
842 if (VD->isExternallyVisible() &&
843 !isExternalWithNoLinkageType(VD) &&
844 !VD->getMostRecentDecl()->isInline() &&
845 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
846 continue;
847
848 // Skip VarDecls that lack formal definitions but which we know are in
849 // fact defined somewhere.
850 if (VD->isKnownToBeDefined())
851 continue;
852 }
853
854 Undefined.push_back(std::make_pair(ND, UndefinedUse.second));
855 }
856}
857
858/// checkUndefinedButUsed - Check for undefined objects with internal linkage
859/// or that are inline.
860static void checkUndefinedButUsed(Sema &S) {
861 if (S.UndefinedButUsed.empty()) return;
862
863 // Collect all the still-undefined entities with internal linkage.
864 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
865 S.getUndefinedButUsed(Undefined);
866 if (Undefined.empty()) return;
867
868 for (const auto &Undef : Undefined) {
869 ValueDecl *VD = cast<ValueDecl>(Undef.first);
870 SourceLocation UseLoc = Undef.second;
871
872 if (S.isExternalWithNoLinkageType(VD)) {
873 // C++ [basic.link]p8:
874 // A type without linkage shall not be used as the type of a variable
875 // or function with external linkage unless
876 // -- the entity has C language linkage
877 // -- the entity is not odr-used or is defined in the same TU
878 //
879 // As an extension, accept this in cases where the type is externally
880 // visible, since the function or variable actually can be defined in
881 // another translation unit in that case.
882 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
883 ? diag::ext_undefined_internal_type
884 : diag::err_undefined_internal_type)
885 << isa<VarDecl>(VD) << VD;
886 } else if (!VD->isExternallyVisible()) {
887 // FIXME: We can promote this to an error. The function or variable can't
888 // be defined anywhere else, so the program must necessarily violate the
889 // one definition rule.
890 bool IsImplicitBase = false;
891 if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) {
892 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
893 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
894 llvm::omp::TraitProperty::
895 implementation_extension_disable_implicit_base)) {
896 const auto *Func = cast<FunctionDecl>(
897 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
898 IsImplicitBase = BaseD->isImplicit() &&
899 Func->getIdentifier()->isMangledOpenMPVariantName();
900 }
901 }
902 if (!S.getLangOpts().OpenMP || !IsImplicitBase)
903 S.Diag(VD->getLocation(), diag::warn_undefined_internal)
904 << isa<VarDecl>(VD) << VD;
905 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
906 (void)FD;
907 assert(FD->getMostRecentDecl()->isInlined() &&
908 "used object requires definition but isn't inline or internal?");
909 // FIXME: This is ill-formed; we should reject.
910 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
911 } else {
912 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
913 "used var requires definition but isn't inline or internal?");
914 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
915 }
916 if (UseLoc.isValid())
917 S.Diag(UseLoc, diag::note_used_here);
918 }
919
920 S.UndefinedButUsed.clear();
921}
922
923void Sema::LoadExternalWeakUndeclaredIdentifiers() {
924 if (!ExternalSource)
925 return;
926
927 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
928 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
929 for (auto &WeakID : WeakIDs)
930 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second);
931}
932
933
934typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
935
936/// Returns true, if all methods and nested classes of the given
937/// CXXRecordDecl are defined in this translation unit.
938///
939/// Should only be called from ActOnEndOfTranslationUnit so that all
940/// definitions are actually read.
941static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
942 RecordCompleteMap &MNCComplete) {
943 RecordCompleteMap::iterator Cache = MNCComplete.find(RD);
944 if (Cache != MNCComplete.end())
945 return Cache->second;
946 if (!RD->isCompleteDefinition())
947 return false;
948 bool Complete = true;
949 for (DeclContext::decl_iterator I = RD->decls_begin(),
950 E = RD->decls_end();
951 I != E && Complete; ++I) {
952 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
953 Complete = M->isDefined() || M->isDefaulted() ||
954 (M->isPure() && !isa<CXXDestructorDecl>(M));
955 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
956 // If the template function is marked as late template parsed at this
957 // point, it has not been instantiated and therefore we have not
958 // performed semantic analysis on it yet, so we cannot know if the type
959 // can be considered complete.
960 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
961 F->getTemplatedDecl()->isDefined();
962 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
963 if (R->isInjectedClassName())
964 continue;
965 if (R->hasDefinition())
966 Complete = MethodsAndNestedClassesComplete(R->getDefinition(),
967 MNCComplete);
968 else
969 Complete = false;
970 }
971 }
972 MNCComplete[RD] = Complete;
973 return Complete;
974}
975
976/// Returns true, if the given CXXRecordDecl is fully defined in this
977/// translation unit, i.e. all methods are defined or pure virtual and all
978/// friends, friend functions and nested classes are fully defined in this
979/// translation unit.
980///
981/// Should only be called from ActOnEndOfTranslationUnit so that all
982/// definitions are actually read.
983static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
984 RecordCompleteMap &RecordsComplete,
985 RecordCompleteMap &MNCComplete) {
986 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD);
987 if (Cache != RecordsComplete.end())
988 return Cache->second;
989 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
990 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
991 E = RD->friend_end();
992 I != E && Complete; ++I) {
993 // Check if friend classes and methods are complete.
994 if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
995 // Friend classes are available as the TypeSourceInfo of the FriendDecl.
996 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
997 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete);
998 else
999 Complete = false;
1000 } else {
1001 // Friend functions are available through the NamedDecl of FriendDecl.
1002 if (const FunctionDecl *FD =
1003 dyn_cast<FunctionDecl>((*I)->getFriendDecl()))
1004 Complete = FD->isDefined();
1005 else
1006 // This is a template friend, give up.
1007 Complete = false;
1008 }
1009 }
1010 RecordsComplete[RD] = Complete;
1011 return Complete;
1012}
1013
1014void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1015 if (ExternalSource)
1016 ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1017 UnusedLocalTypedefNameCandidates);
1018 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1019 if (TD->isReferenced())
1020 continue;
1021 Diag(TD->getLocation(), diag::warn_unused_local_typedef)
1022 << isa<TypeAliasDecl>(TD) << TD->getDeclName();
1023 }
1024 UnusedLocalTypedefNameCandidates.clear();
1025}
1026
1027/// This is called before the very first declaration in the translation unit
1028/// is parsed. Note that the ASTContext may have already injected some
1029/// declarations.
1030void Sema::ActOnStartOfTranslationUnit() {
1031 if (getLangOpts().CPlusPlusModules &&
1032 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1033 HandleStartOfHeaderUnit();
1034}
1035
1036void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1037 // No explicit actions are required at the end of the global module fragment.
1038 if (Kind == TUFragmentKind::Global)
1039 return;
1040
1041 // Transfer late parsed template instantiations over to the pending template
1042 // instantiation list. During normal compilation, the late template parser
1043 // will be installed and instantiating these templates will succeed.
1044 //
1045 // If we are building a TU prefix for serialization, it is also safe to
1046 // transfer these over, even though they are not parsed. The end of the TU
1047 // should be outside of any eager template instantiation scope, so when this
1048 // AST is deserialized, these templates will not be parsed until the end of
1049 // the combined TU.
1050 PendingInstantiations.insert(PendingInstantiations.end(),
1051 LateParsedInstantiations.begin(),
1052 LateParsedInstantiations.end());
1053 LateParsedInstantiations.clear();
1054
1055 // If DefinedUsedVTables ends up marking any virtual member functions it
1056 // might lead to more pending template instantiations, which we then need
1057 // to instantiate.
1058 DefineUsedVTables();
1059
1060 // C++: Perform implicit template instantiations.
1061 //
1062 // FIXME: When we perform these implicit instantiations, we do not
1063 // carefully keep track of the point of instantiation (C++ [temp.point]).
1064 // This means that name lookup that occurs within the template
1065 // instantiation will always happen at the end of the translation unit,
1066 // so it will find some names that are not required to be found. This is
1067 // valid, but we could do better by diagnosing if an instantiation uses a
1068 // name that was not visible at its first point of instantiation.
1069 if (ExternalSource) {
1070 // Load pending instantiations from the external source.
1071 SmallVector<PendingImplicitInstantiation, 4> Pending;
1072 ExternalSource->ReadPendingInstantiations(Pending);
1073 for (auto PII : Pending)
1074 if (auto Func = dyn_cast<FunctionDecl>(PII.first))
1075 Func->setInstantiationIsPending(true);
1076 PendingInstantiations.insert(PendingInstantiations.begin(),
1077 Pending.begin(), Pending.end());
1078 }
1079
1080 {
1081 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1082 PerformPendingInstantiations();
1083 }
1084
1085 emitDeferredDiags();
1086
1087 assert(LateParsedInstantiations.empty() &&
1088 "end of TU template instantiation should not create more "
1089 "late-parsed templates");
1090
1091 // Report diagnostics for uncorrected delayed typos. Ideally all of them
1092 // should have been corrected by that time, but it is very hard to cover all
1093 // cases in practice.
1094 for (const auto &Typo : DelayedTypos) {
1095 // We pass an empty TypoCorrection to indicate no correction was performed.
1096 Typo.second.DiagHandler(TypoCorrection());
1097 }
1098 DelayedTypos.clear();
1099}
1100
1101/// ActOnEndOfTranslationUnit - This is called at the very end of the
1102/// translation unit when EOF is reached and all but the top-level scope is
1103/// popped.
1104void Sema::ActOnEndOfTranslationUnit() {
1105 assert(DelayedDiagnostics.getCurrentPool() == nullptr
1106 && "reached end of translation unit with a pool attached?");
1107
1108 // If code completion is enabled, don't perform any end-of-translation-unit
1109 // work.
1110 if (PP.isCodeCompletionEnabled())
1111 return;
1112
1113 // Complete translation units and modules define vtables and perform implicit
1114 // instantiations. PCH files do not.
1115 if (TUKind != TU_Prefix) {
1116 DiagnoseUseOfUnimplementedSelectors();
1117
1118 ActOnEndOfTranslationUnitFragment(
1119 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1120 Module::PrivateModuleFragment
1121 ? TUFragmentKind::Private
1122 : TUFragmentKind::Normal);
1123
1124 if (LateTemplateParserCleanup)
1125 LateTemplateParserCleanup(OpaqueParser);
1126
1127 CheckDelayedMemberExceptionSpecs();
1128 } else {
1129 // If we are building a TU prefix for serialization, it is safe to transfer
1130 // these over, even though they are not parsed. The end of the TU should be
1131 // outside of any eager template instantiation scope, so when this AST is
1132 // deserialized, these templates will not be parsed until the end of the
1133 // combined TU.
1134 PendingInstantiations.insert(PendingInstantiations.end(),
1135 LateParsedInstantiations.begin(),
1136 LateParsedInstantiations.end());
1137 LateParsedInstantiations.clear();
1138
1139 if (LangOpts.PCHInstantiateTemplates) {
1140 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1141 PerformPendingInstantiations();
1142 }
1143 }
1144
1145 DiagnoseUnterminatedPragmaAlignPack();
1146 DiagnoseUnterminatedPragmaAttribute();
1147 DiagnoseUnterminatedOpenMPDeclareTarget();
1148
1149 // All delayed member exception specs should be checked or we end up accepting
1150 // incompatible declarations.
1151 assert(DelayedOverridingExceptionSpecChecks.empty());
1152 assert(DelayedEquivalentExceptionSpecChecks.empty());
1153
1154 // All dllexport classes should have been processed already.
1155 assert(DelayedDllExportClasses.empty());
1156 assert(DelayedDllExportMemberFunctions.empty());
1157
1158 // Remove file scoped decls that turned out to be used.
1159 UnusedFileScopedDecls.erase(
1160 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true),
1161 UnusedFileScopedDecls.end(),
1162 [this](const DeclaratorDecl *DD) {
1163 return ShouldRemoveFromUnused(this, DD);
1164 }),
1165 UnusedFileScopedDecls.end());
1166
1167 if (TUKind == TU_Prefix) {
1168 // Translation unit prefixes don't need any of the checking below.
1169 if (!PP.isIncrementalProcessingEnabled())
1170 TUScope = nullptr;
1171 return;
1172 }
1173
1174 // Check for #pragma weak identifiers that were never declared
1175 LoadExternalWeakUndeclaredIdentifiers();
1176 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1177 if (WeakIDs.second.empty())
1178 continue;
1179
1180 Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(),
1181 LookupOrdinaryName);
1182 if (PrevDecl != nullptr &&
1183 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
1184 for (const auto &WI : WeakIDs.second)
1185 Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
1186 << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1187 else
1188 for (const auto &WI : WeakIDs.second)
1189 Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
1190 << WeakIDs.first;
1191 }
1192
1193 if (LangOpts.CPlusPlus11 &&
1194 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
1195 CheckDelegatingCtorCycles();
1196
1197 if (!Diags.hasErrorOccurred()) {
1198 if (ExternalSource)
1199 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
1200 checkUndefinedButUsed(*this);
1201 }
1202
1203 // A global-module-fragment is only permitted within a module unit.
1204 bool DiagnosedMissingModuleDeclaration = false;
1205 if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1206 Module::ExplicitGlobalModuleFragment) {
1207 Diag(ModuleScopes.back().BeginLoc,
1208 diag::err_module_declaration_missing_after_global_module_introducer);
1209 DiagnosedMissingModuleDeclaration = true;
1210 }
1211
1212 if (TUKind == TU_Module) {
1213 // If we are building a module interface unit, we need to have seen the
1214 // module declaration by now.
1215 if (getLangOpts().getCompilingModule() ==
1216 LangOptions::CMK_ModuleInterface &&
1217 !isCurrentModulePurview() && !DiagnosedMissingModuleDeclaration) {
1218 // FIXME: Make a better guess as to where to put the module declaration.
1219 Diag(getSourceManager().getLocForStartOfFile(
1220 getSourceManager().getMainFileID()),
1221 diag::err_module_declaration_missing);
1222 }
1223
1224 // If we are building a module, resolve all of the exported declarations
1225 // now.
1226 if (Module *CurrentModule = PP.getCurrentModule()) {
1227 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1228
1229 SmallVector<Module *, 2> Stack;
1230 Stack.push_back(CurrentModule);
1231 while (!Stack.empty()) {
1232 Module *Mod = Stack.pop_back_val();
1233
1234 // Resolve the exported declarations and conflicts.
1235 // FIXME: Actually complain, once we figure out how to teach the
1236 // diagnostic client to deal with complaints in the module map at this
1237 // point.
1238 ModMap.resolveExports(Mod, /*Complain=*/false);
1239 ModMap.resolveUses(Mod, /*Complain=*/false);
1240 ModMap.resolveConflicts(Mod, /*Complain=*/false);
1241
1242 // Queue the submodules, so their exports will also be resolved.
1243 auto SubmodulesRange = Mod->submodules();
1244 Stack.append(SubmodulesRange.begin(), SubmodulesRange.end());
1245 }
1246 }
1247
1248 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1249 // modules when they are built, not every time they are used.
1250 emitAndClearUnusedLocalTypedefWarnings();
1251 }
1252
1253 // C++ standard modules. Diagnose cases where a function is declared inline
1254 // in the module purview but has no definition before the end of the TU or
1255 // the start of a Private Module Fragment (if one is present).
1256 if (!PendingInlineFuncDecls.empty()) {
1257 for (auto *D : PendingInlineFuncDecls) {
1258 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1259 bool DefInPMF = false;
1260 if (auto *FDD = FD->getDefinition()) {
1261 assert(FDD->getOwningModule() &&
1262 FDD->getOwningModule()->isModulePurview());
1263 DefInPMF = FDD->getOwningModule()->isPrivateModule();
1264 if (!DefInPMF)
1265 continue;
1266 }
1267 Diag(FD->getLocation(), diag::err_export_inline_not_defined)
1268 << DefInPMF;
1269 // If we have a PMF it should be at the end of the ModuleScopes.
1270 if (DefInPMF &&
1271 ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1272 Diag(ModuleScopes.back().BeginLoc,
1273 diag::note_private_module_fragment);
1274 }
1275 }
1276 }
1277 PendingInlineFuncDecls.clear();
1278 }
1279
1280 // C99 6.9.2p2:
1281 // A declaration of an identifier for an object that has file
1282 // scope without an initializer, and without a storage-class
1283 // specifier or with the storage-class specifier static,
1284 // constitutes a tentative definition. If a translation unit
1285 // contains one or more tentative definitions for an identifier,
1286 // and the translation unit contains no external definition for
1287 // that identifier, then the behavior is exactly as if the
1288 // translation unit contains a file scope declaration of that
1289 // identifier, with the composite type as of the end of the
1290 // translation unit, with an initializer equal to 0.
1291 llvm::SmallSet<VarDecl *, 32> Seen;
1292 for (TentativeDefinitionsType::iterator
1293 T = TentativeDefinitions.begin(ExternalSource.get()),
1294 TEnd = TentativeDefinitions.end();
1295 T != TEnd; ++T) {
1296 VarDecl *VD = (*T)->getActingDefinition();
1297
1298 // If the tentative definition was completed, getActingDefinition() returns
1299 // null. If we've already seen this variable before, insert()'s second
1300 // return value is false.
1301 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second)
1302 continue;
1303
1304 if (const IncompleteArrayType *ArrayT
1305 = Context.getAsIncompleteArrayType(VD->getType())) {
1306 // Set the length of the array to 1 (C99 6.9.2p5).
1307 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
1308 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
1309 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One,
1310 nullptr, ArrayType::Normal, 0);
1311 VD->setType(T);
1312 } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
1313 diag::err_tentative_def_incomplete_type))
1314 VD->setInvalidDecl();
1315
1316 // No initialization is performed for a tentative definition.
1317 CheckCompleteVariableDeclaration(VD);
1318
1319 // Notify the consumer that we've completed a tentative definition.
1320 if (!VD->isInvalidDecl())
1321 Consumer.CompleteTentativeDefinition(VD);
1322 }
1323
1324 for (auto *D : ExternalDeclarations) {
1325 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1326 continue;
1327
1328 Consumer.CompleteExternalDeclaration(D);
1329 }
1330
1331 // If there were errors, disable 'unused' warnings since they will mostly be
1332 // noise. Don't warn for a use from a module: either we should warn on all
1333 // file-scope declarations in modules or not at all, but whether the
1334 // declaration is used is immaterial.
1335 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) {
1336 // Output warning for unused file scoped decls.
1337 for (UnusedFileScopedDeclsType::iterator
1338 I = UnusedFileScopedDecls.begin(ExternalSource.get()),
1339 E = UnusedFileScopedDecls.end();
1340 I != E; ++I) {
1341 if (ShouldRemoveFromUnused(this, *I))
1342 continue;
1343
1344 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
1345 const FunctionDecl *DiagD;
1346 if (!FD->hasBody(DiagD))
1347 DiagD = FD;
1348 if (DiagD->isDeleted())
1349 continue; // Deleted functions are supposed to be unused.
1350 SourceRange DiagRange = DiagD->getLocation();
1351 if (const ASTTemplateArgumentListInfo *ASTTAL =
1352 DiagD->getTemplateSpecializationArgsAsWritten())
1353 DiagRange.setEnd(ASTTAL->RAngleLoc);
1354 if (DiagD->isReferenced()) {
1355 if (isa<CXXMethodDecl>(DiagD))
1356 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
1357 << DiagD << DiagRange;
1358 else {
1359 if (FD->getStorageClass() == SC_Static &&
1360 !FD->isInlineSpecified() &&
1361 !SourceMgr.isInMainFile(
1362 SourceMgr.getExpansionLoc(FD->getLocation())))
1363 Diag(DiagD->getLocation(),
1364 diag::warn_unneeded_static_internal_decl)
1365 << DiagD << DiagRange;
1366 else
1367 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1368 << /*function=*/0 << DiagD << DiagRange;
1369 }
1370 } else {
1371 if (FD->getDescribedFunctionTemplate())
1372 Diag(DiagD->getLocation(), diag::warn_unused_template)
1373 << /*function=*/0 << DiagD << DiagRange;
1374 else
1375 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
1376 ? diag::warn_unused_member_function
1377 : diag::warn_unused_function)
1378 << DiagD << DiagRange;
1379 }
1380 } else {
1381 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
1382 if (!DiagD)
1383 DiagD = cast<VarDecl>(*I);
1384 SourceRange DiagRange = DiagD->getLocation();
1385 if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(DiagD)) {
1386 if (const ASTTemplateArgumentListInfo *ASTTAL =
1387 VTSD->getTemplateArgsInfo())
1388 DiagRange.setEnd(ASTTAL->RAngleLoc);
1389 }
1390 if (DiagD->isReferenced()) {
1391 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1392 << /*variable=*/1 << DiagD << DiagRange;
1393 } else if (DiagD->getDescribedVarTemplate()) {
1394 Diag(DiagD->getLocation(), diag::warn_unused_template)
1395 << /*variable=*/1 << DiagD << DiagRange;
1396 } else if (DiagD->getType().isConstQualified()) {
1397 const SourceManager &SM = SourceMgr;
1398 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
1399 !PP.getLangOpts().IsHeaderFile)
1400 Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
1401 << DiagD << DiagRange;
1402 } else {
1403 Diag(DiagD->getLocation(), diag::warn_unused_variable)
1404 << DiagD << DiagRange;
1405 }
1406 }
1407 }
1408
1409 emitAndClearUnusedLocalTypedefWarnings();
1410 }
1411
1412 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
1413 // FIXME: Load additional unused private field candidates from the external
1414 // source.
1415 RecordCompleteMap RecordsComplete;
1416 RecordCompleteMap MNCComplete;
1417 for (const NamedDecl *D : UnusedPrivateFields) {
1418 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
1419 if (RD && !RD->isUnion() &&
1420 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1421 Diag(D->getLocation(), diag::warn_unused_private_field)
1422 << D->getDeclName();
1423 }
1424 }
1425 }
1426
1427 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
1428 if (ExternalSource)
1429 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1430 for (const auto &DeletedFieldInfo : DeleteExprs) {
1431 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1432 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first,
1433 DeleteExprLoc.second);
1434 }
1435 }
1436 }
1437
1438 AnalysisWarnings.IssueWarnings(Context.getTranslationUnitDecl());
1439
1440 // Check we've noticed that we're no longer parsing the initializer for every
1441 // variable. If we miss cases, then at best we have a performance issue and
1442 // at worst a rejects-valid bug.
1443 assert(ParsingInitForAutoVars.empty() &&
1444 "Didn't unmark var as having its initializer parsed");
1445
1446 if (!PP.isIncrementalProcessingEnabled())
1447 TUScope = nullptr;
1448}
1449
1450
1451//===----------------------------------------------------------------------===//
1452// Helper functions.
1453//===----------------------------------------------------------------------===//
1454
1455DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1456 DeclContext *DC = CurContext;
1457
1458 while (true) {
1459 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
1460 isa<RequiresExprBodyDecl>(DC)) {
1461 DC = DC->getParent();
1462 } else if (!AllowLambda && isa<CXXMethodDecl>(DC) &&
1463 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
1464 cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
1465 DC = DC->getParent()->getParent();
1466 } else break;
1467 }
1468
1469 return DC;
1470}
1471
1472/// getCurFunctionDecl - If inside of a function body, this returns a pointer
1473/// to the function decl for the function being parsed. If we're currently
1474/// in a 'block', this returns the containing context.
1475FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1476 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1477 return dyn_cast<FunctionDecl>(DC);
1478}
1479
1480ObjCMethodDecl *Sema::getCurMethodDecl() {
1481 DeclContext *DC = getFunctionLevelDeclContext();
1482 while (isa<RecordDecl>(DC))
1483 DC = DC->getParent();
1484 return dyn_cast<ObjCMethodDecl>(DC);
1485}
1486
1487NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1488 DeclContext *DC = getFunctionLevelDeclContext();
1489 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
1490 return cast<NamedDecl>(DC);
1491 return nullptr;
1492}
1493
1494LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1495 if (getLangOpts().OpenCL)
1496 return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1497 return LangAS::Default;
1498}
1499
1500void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
1501 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1502 // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1503 // been made more painfully obvious by the refactor that introduced this
1504 // function, but it is possible that the incoming argument can be
1505 // eliminated. If it truly cannot be (for example, there is some reentrancy
1506 // issue I am not seeing yet), then there should at least be a clarifying
1507 // comment somewhere.
1508 if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
1509 switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
1510 Diags.getCurrentDiagID())) {
1511 case DiagnosticIDs::SFINAE_Report:
1512 // We'll report the diagnostic below.
1513 break;
1514
1515 case DiagnosticIDs::SFINAE_SubstitutionFailure:
1516 // Count this failure so that we know that template argument deduction
1517 // has failed.
1518 ++NumSFINAEErrors;
1519
1520 // Make a copy of this suppressed diagnostic and store it with the
1521 // template-deduction information.
1522 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1523 Diagnostic DiagInfo(&Diags);
1524 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1525 PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1526 }
1527
1528 Diags.setLastDiagnosticIgnored(true);
1529 Diags.Clear();
1530 return;
1531
1532 case DiagnosticIDs::SFINAE_AccessControl: {
1533 // Per C++ Core Issue 1170, access control is part of SFINAE.
1534 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
1535 // make access control a part of SFINAE for the purposes of checking
1536 // type traits.
1537 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
1538 break;
1539
1540 SourceLocation Loc = Diags.getCurrentDiagLoc();
1541
1542 // Suppress this diagnostic.
1543 ++NumSFINAEErrors;
1544
1545 // Make a copy of this suppressed diagnostic and store it with the
1546 // template-deduction information.
1547 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1548 Diagnostic DiagInfo(&Diags);
1549 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1550 PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1551 }
1552
1553 Diags.setLastDiagnosticIgnored(true);
1554 Diags.Clear();
1555
1556 // Now the diagnostic state is clear, produce a C++98 compatibility
1557 // warning.
1558 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
1559
1560 // The last diagnostic which Sema produced was ignored. Suppress any
1561 // notes attached to it.
1562 Diags.setLastDiagnosticIgnored(true);
1563 return;
1564 }
1565
1566 case DiagnosticIDs::SFINAE_Suppress:
1567 // Make a copy of this suppressed diagnostic and store it with the
1568 // template-deduction information;
1569 if (*Info) {
1570 Diagnostic DiagInfo(&Diags);
1571 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
1572 PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1573 }
1574
1575 // Suppress this diagnostic.
1576 Diags.setLastDiagnosticIgnored(true);
1577 Diags.Clear();
1578 return;
1579 }
1580 }
1581
1582 // Copy the diagnostic printing policy over the ASTContext printing policy.
1583 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292
1584 Context.setPrintingPolicy(getPrintingPolicy());
1585
1586 // Emit the diagnostic.
1587 if (!Diags.EmitCurrentDiagnostic())
1588 return;
1589
1590 // If this is not a note, and we're in a template instantiation
1591 // that is different from the last template instantiation where
1592 // we emitted an error, print a template instantiation
1593 // backtrace.
1594 if (!DiagnosticIDs::isBuiltinNote(DiagID))
1595 PrintContextStack();
1596}
1597
1598Sema::SemaDiagnosticBuilder
1599Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) {
1600 return Diag(Loc, PD.getDiagID(), DeferHint) << PD;
1601}
1602
1603bool Sema::hasUncompilableErrorOccurred() const {
1604 if (getDiagnostics().hasUncompilableErrorOccurred())
1605 return true;
1606 auto *FD = dyn_cast<FunctionDecl>(CurContext);
1607 if (!FD)
1608 return false;
1609 auto Loc = DeviceDeferredDiags.find(FD);
1610 if (Loc == DeviceDeferredDiags.end())
1611 return false;
1612 for (auto PDAt : Loc->second) {
1613 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID()))
1614 return true;
1615 }
1616 return false;
1617}
1618
1619// Print notes showing how we can reach FD starting from an a priori
1620// known-callable function.
1621static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1622 auto FnIt = S.DeviceKnownEmittedFns.find(FD);
1623 while (FnIt != S.DeviceKnownEmittedFns.end()) {
1624 // Respect error limit.
1625 if (S.Diags.hasFatalErrorOccurred())
1626 return;
1627 DiagnosticBuilder Builder(
1628 S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
1629 Builder << FnIt->second.FD;
1630 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD);
1631 }
1632}
1633
1634namespace {
1635
1636/// Helper class that emits deferred diagnostic messages if an entity directly
1637/// or indirectly using the function that causes the deferred diagnostic
1638/// messages is known to be emitted.
1639///
1640/// During parsing of AST, certain diagnostic messages are recorded as deferred
1641/// diagnostics since it is unknown whether the functions containing such
1642/// diagnostics will be emitted. A list of potentially emitted functions and
1643/// variables that may potentially trigger emission of functions are also
1644/// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1645/// by each function to emit deferred diagnostics.
1646///
1647/// During the visit, certain OpenMP directives or initializer of variables
1648/// with certain OpenMP attributes will cause subsequent visiting of any
1649/// functions enter a state which is called OpenMP device context in this
1650/// implementation. The state is exited when the directive or initializer is
1651/// exited. This state can change the emission states of subsequent uses
1652/// of functions.
1653///
1654/// Conceptually the functions or variables to be visited form a use graph
1655/// where the parent node uses the child node. At any point of the visit,
1656/// the tree nodes traversed from the tree root to the current node form a use
1657/// stack. The emission state of the current node depends on two factors:
1658/// 1. the emission state of the root node
1659/// 2. whether the current node is in OpenMP device context
1660/// If the function is decided to be emitted, its contained deferred diagnostics
1661/// are emitted, together with the information about the use stack.
1662///
1663class DeferredDiagnosticsEmitter
1664 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1665public:
1666 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1667
1668 // Whether the function is already in the current use-path.
1669 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1670
1671 // The current use-path.
1672 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1673
1674 // Whether the visiting of the function has been done. Done[0] is for the
1675 // case not in OpenMP device context. Done[1] is for the case in OpenMP
1676 // device context. We need two sets because diagnostics emission may be
1677 // different depending on whether it is in OpenMP device context.
1678 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1679
1680 // Emission state of the root node of the current use graph.
1681 bool ShouldEmitRootNode;
1682
1683 // Current OpenMP device context level. It is initialized to 0 and each
1684 // entering of device context increases it by 1 and each exit decreases
1685 // it by 1. Non-zero value indicates it is currently in device context.
1686 unsigned InOMPDeviceContext;
1687
1688 DeferredDiagnosticsEmitter(Sema &S)
1689 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1690
1691 bool shouldVisitDiscardedStmt() const { return false; }
1692
1693 void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1694 ++InOMPDeviceContext;
1695 Inherited::VisitOMPTargetDirective(Node);
1696 --InOMPDeviceContext;
1697 }
1698
1699 void visitUsedDecl(SourceLocation Loc, Decl *D) {
1700 if (isa<VarDecl>(D))
1701 return;
1702 if (auto *FD = dyn_cast<FunctionDecl>(D))
1703 checkFunc(Loc, FD);
1704 else
1705 Inherited::visitUsedDecl(Loc, D);
1706 }
1707
1708 void checkVar(VarDecl *VD) {
1709 assert(VD->isFileVarDecl() &&
1710 "Should only check file-scope variables");
1711 if (auto *Init = VD->getInit()) {
1712 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1713 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1714 *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1715 if (IsDev)
1716 ++InOMPDeviceContext;
1717 this->Visit(Init);
1718 if (IsDev)
1719 --InOMPDeviceContext;
1720 }
1721 }
1722
1723 void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1724 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1725 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1726 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1727 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
1728 return;
1729 // Finalize analysis of OpenMP-specific constructs.
1730 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1731 (ShouldEmitRootNode || InOMPDeviceContext))
1732 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
1733 if (Caller)
1734 S.DeviceKnownEmittedFns[FD] = {Caller, Loc};
1735 // Always emit deferred diagnostics for the direct users. This does not
1736 // lead to explosion of diagnostics since each user is visited at most
1737 // twice.
1738 if (ShouldEmitRootNode || InOMPDeviceContext)
1739 emitDeferredDiags(FD, Caller);
1740 // Do not revisit a function if the function body has been completely
1741 // visited before.
1742 if (!Done.insert(FD).second)
1743 return;
1744 InUsePath.insert(FD);
1745 UsePath.push_back(FD);
1746 if (auto *S = FD->getBody()) {
1747 this->Visit(S);
1748 }
1749 UsePath.pop_back();
1750 InUsePath.erase(FD);
1751 }
1752
1753 void checkRecordedDecl(Decl *D) {
1754 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1755 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) ==
1756 Sema::FunctionEmissionStatus::Emitted;
1757 checkFunc(SourceLocation(), FD);
1758 } else
1759 checkVar(cast<VarDecl>(D));
1760 }
1761
1762 // Emit any deferred diagnostics for FD
1763 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
1764 auto It = S.DeviceDeferredDiags.find(FD);
1765 if (It == S.DeviceDeferredDiags.end())
1766 return;
1767 bool HasWarningOrError = false;
1768 bool FirstDiag = true;
1769 for (PartialDiagnosticAt &PDAt : It->second) {
1770 // Respect error limit.
1771 if (S.Diags.hasFatalErrorOccurred())
1772 return;
1773 const SourceLocation &Loc = PDAt.first;
1774 const PartialDiagnostic &PD = PDAt.second;
1775 HasWarningOrError |=
1776 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >=
1777 DiagnosticsEngine::Warning;
1778 {
1779 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
1780 PD.Emit(Builder);
1781 }
1782 // Emit the note on the first diagnostic in case too many diagnostics
1783 // cause the note not emitted.
1784 if (FirstDiag && HasWarningOrError && ShowCallStack) {
1785 emitCallStackNotes(S, FD);
1786 FirstDiag = false;
1787 }
1788 }
1789 }
1790};
1791} // namespace
1792
1793void Sema::emitDeferredDiags() {
1794 if (ExternalSource)
1795 ExternalSource->ReadDeclsToCheckForDeferredDiags(
1796 DeclsToCheckForDeferredDiags);
1797
1798 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
1799 DeclsToCheckForDeferredDiags.empty())
1800 return;
1801
1802 DeferredDiagnosticsEmitter DDE(*this);
1803 for (auto *D : DeclsToCheckForDeferredDiags)
1804 DDE.checkRecordedDecl(D);
1805}
1806
1807// In CUDA, there are some constructs which may appear in semantically-valid
1808// code, but trigger errors if we ever generate code for the function in which
1809// they appear. Essentially every construct you're not allowed to use on the
1810// device falls into this category, because you are allowed to use these
1811// constructs in a __host__ __device__ function, but only if that function is
1812// never codegen'ed on the device.
1813//
1814// To handle semantic checking for these constructs, we keep track of the set of
1815// functions we know will be emitted, either because we could tell a priori that
1816// they would be emitted, or because they were transitively called by a
1817// known-emitted function.
1818//
1819// We also keep a partial call graph of which not-known-emitted functions call
1820// which other not-known-emitted functions.
1821//
1822// When we see something which is illegal if the current function is emitted
1823// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
1824// CheckCUDACall), we first check if the current function is known-emitted. If
1825// so, we immediately output the diagnostic.
1826//
1827// Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
1828// until we discover that the function is known-emitted, at which point we take
1829// it out of this map and emit the diagnostic.
1830
1831Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
1832 unsigned DiagID,
1833 const FunctionDecl *Fn,
1834 Sema &S)
1835 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
1836 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
1837 switch (K) {
1838 case K_Nop:
1839 break;
1840 case K_Immediate:
1841 case K_ImmediateWithCallStack:
1842 ImmediateDiag.emplace(
1843 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
1844 break;
1845 case K_Deferred:
1846 assert(Fn && "Must have a function to attach the deferred diag to.");
1847 auto &Diags = S.DeviceDeferredDiags[Fn];
1848 PartialDiagId.emplace(Diags.size());
1849 Diags.emplace_back(Loc, S.PDiag(DiagID));
1850 break;
1851 }
1852}
1853
1854Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
1855 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
1856 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
1857 PartialDiagId(D.PartialDiagId) {
1858 // Clean the previous diagnostics.
1859 D.ShowCallStack = false;
1860 D.ImmediateDiag.reset();
1861 D.PartialDiagId.reset();
1862}
1863
1864Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
1865 if (ImmediateDiag) {
1866 // Emit our diagnostic and, if it was a warning or error, output a callstack
1867 // if Fn isn't a priori known-emitted.
1868 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
1869 DiagID, Loc) >= DiagnosticsEngine::Warning;
1870 ImmediateDiag.reset(); // Emit the immediate diag.
1871 if (IsWarningOrError && ShowCallStack)
1872 emitCallStackNotes(S, Fn);
1873 } else {
1874 assert((!PartialDiagId || ShowCallStack) &&
1875 "Must always show call stack for deferred diags.");
1876 }
1877}
1878
1879Sema::SemaDiagnosticBuilder
1880Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
1881 FD = FD ? FD : getCurFunctionDecl();
1882 if (LangOpts.OpenMP)
1883 return LangOpts.OpenMPIsTargetDevice
1884 ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
1885 : diagIfOpenMPHostCode(Loc, DiagID, FD);
1886 if (getLangOpts().CUDA)
1887 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
1888 : CUDADiagIfHostCode(Loc, DiagID);
1889
1890 if (getLangOpts().SYCLIsDevice)
1891 return SYCLDiagIfDeviceCode(Loc, DiagID);
1892
1893 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
1894 FD, *this);
1895}
1896
1897Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
1898 bool DeferHint) {
1899 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
1900 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag &&
1901 DiagnosticIDs::isDeferrable(DiagID) &&
1902 (DeferHint || DeferDiags || !IsError);
1903 auto SetIsLastErrorImmediate = [&](bool Flag) {
1904 if (IsError)
1905 IsLastErrorImmediate = Flag;
1906 };
1907 if (!ShouldDefer) {
1908 SetIsLastErrorImmediate(true);
1909 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc,
1910 DiagID, getCurFunctionDecl(), *this);
1911 }
1912
1913 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice
1914 ? CUDADiagIfDeviceCode(Loc, DiagID)
1915 : CUDADiagIfHostCode(Loc, DiagID);
1916 SetIsLastErrorImmediate(DB.isImmediate());
1917 return DB;
1918}
1919
1920void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
1921 if (isUnevaluatedContext() || Ty.isNull())
1922 return;
1923
1924 // The original idea behind checkTypeSupport function is that unused
1925 // declarations can be replaced with an array of bytes of the same size during
1926 // codegen, such replacement doesn't seem to be possible for types without
1927 // constant byte size like zero length arrays. So, do a deep check for SYCL.
1928 if (D && LangOpts.SYCLIsDevice) {
1929 llvm::DenseSet<QualType> Visited;
1930 deepTypeCheckForSYCLDevice(Loc, Visited, D);
1931 }
1932
1933 Decl *C = cast<Decl>(getCurLexicalContext());
1934
1935 // Memcpy operations for structs containing a member with unsupported type
1936 // are ok, though.
1937 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) {
1938 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
1939 MD->isTrivial())
1940 return;
1941
1942 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD))
1943 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
1944 return;
1945 }
1946
1947 // Try to associate errors with the lexical context, if that is a function, or
1948 // the value declaration otherwise.
1949 const FunctionDecl *FD = isa<FunctionDecl>(C)
1950 ? cast<FunctionDecl>(C)
1951 : dyn_cast_or_null<FunctionDecl>(D);
1952
1953 auto CheckDeviceType = [&](QualType Ty) {
1954 if (Ty->isDependentType())
1955 return;
1956
1957 if (Ty->isBitIntType()) {
1958 if (!Context.getTargetInfo().hasBitIntType()) {
1959 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
1960 if (D)
1961 PD << D;
1962 else
1963 PD << "expression";
1964 targetDiag(Loc, PD, FD)
1965 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
1966 << Ty << Context.getTargetInfo().getTriple().str();
1967 }
1968 return;
1969 }
1970
1971 // Check if we are dealing with two 'long double' but with different
1972 // semantics.
1973 bool LongDoubleMismatched = false;
1974 if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) {
1975 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty);
1976 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
1977 !Context.getTargetInfo().hasFloat128Type()) ||
1978 (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
1979 !Context.getTargetInfo().hasIbm128Type()))
1980 LongDoubleMismatched = true;
1981 }
1982
1983 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
1984 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
1985 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
1986 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
1987 !Context.getTargetInfo().hasInt128Type()) ||
1988 (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
1989 !LangOpts.CUDAIsDevice) ||
1990 LongDoubleMismatched) {
1991 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
1992 if (D)
1993 PD << D;
1994 else
1995 PD << "expression";
1996
1997 if (targetDiag(Loc, PD, FD)
1998 << true /*show bit size*/
1999 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
2000 << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
2001 if (D)
2002 D->setInvalidDecl();
2003 }
2004 if (D)
2005 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2006 }
2007 };
2008
2009 auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2010 if (LangOpts.SYCLIsDevice ||
2011 (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2012 LangOpts.CUDAIsDevice)
2013 CheckDeviceType(Ty);
2014
2015 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2016 const TargetInfo &TI = Context.getTargetInfo();
2017 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2018 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2019 if (D)
2020 PD << D;
2021 else
2022 PD << "expression";
2023
2024 if (Diag(Loc, PD, FD)
2025 << false /*show bit size*/ << 0 << Ty << false /*return*/
2026 << TI.getTriple().str()) {
2027 if (D)
2028 D->setInvalidDecl();
2029 }
2030 if (D)
2031 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2032 }
2033
2034 bool IsDouble = UnqualTy == Context.DoubleTy;
2035 bool IsFloat = UnqualTy == Context.FloatTy;
2036 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2037 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2038 if (D)
2039 PD << D;
2040 else
2041 PD << "expression";
2042
2043 if (Diag(Loc, PD, FD)
2044 << false /*show bit size*/ << 0 << Ty << true /*return*/
2045 << TI.getTriple().str()) {
2046 if (D)
2047 D->setInvalidDecl();
2048 }
2049 if (D)
2050 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2051 }
2052
2053 if (Ty->isRVVType())
2054 checkRVVTypeSupport(Ty, Loc, D);
2055
2056 // Don't allow SVE types in functions without a SVE target.
2057 if (Ty->isSVESizelessBuiltinType() && FD && FD->hasBody()) {
2058 llvm::StringMap<bool> CallerFeatureMap;
2059 Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2060 if (!Builtin::evaluateRequiredTargetFeatures(
2061 "sve", CallerFeatureMap))
2062 Diag(D->getLocation(), diag::err_sve_vector_in_non_sve_target) << Ty;
2063 }
2064 };
2065
2066 CheckType(Ty);
2067 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
2068 for (const auto &ParamTy : FPTy->param_types())
2069 CheckType(ParamTy);
2070 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2071 }
2072 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty))
2073 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2074}
2075
2076/// Looks through the macro-expansion chain for the given
2077/// location, looking for a macro expansion with the given name.
2078/// If one is found, returns true and sets the location to that
2079/// expansion loc.
2080bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2081 SourceLocation loc = locref;
2082 if (!loc.isMacroID()) return false;
2083
2084 // There's no good way right now to look at the intermediate
2085 // expansions, so just jump to the expansion location.
2086 loc = getSourceManager().getExpansionLoc(loc);
2087
2088 // If that's written with the name, stop here.
2089 SmallString<16> buffer;
2090 if (getPreprocessor().getSpelling(loc, buffer) == name) {
2091 locref = loc;
2092 return true;
2093 }
2094 return false;
2095}
2096
2097/// Determines the active Scope associated with the given declaration
2098/// context.
2099///
2100/// This routine maps a declaration context to the active Scope object that
2101/// represents that declaration context in the parser. It is typically used
2102/// from "scope-less" code (e.g., template instantiation, lazy creation of
2103/// declarations) that injects a name for name-lookup purposes and, therefore,
2104/// must update the Scope.
2105///
2106/// \returns The scope corresponding to the given declaraion context, or NULL
2107/// if no such scope is open.
2108Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2109
2110 if (!Ctx)
2111 return nullptr;
2112
2113 Ctx = Ctx->getPrimaryContext();
2114 for (Scope *S = getCurScope(); S; S = S->getParent()) {
2115 // Ignore scopes that cannot have declarations. This is important for
2116 // out-of-line definitions of static class members.
2117 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2118 if (DeclContext *Entity = S->getEntity())
2119 if (Ctx == Entity->getPrimaryContext())
2120 return S;
2121 }
2122
2123 return nullptr;
2124}
2125
2126/// Enter a new function scope
2127void Sema::PushFunctionScope() {
2128 if (FunctionScopes.empty() && CachedFunctionScope) {
2129 // Use CachedFunctionScope to avoid allocating memory when possible.
2130 CachedFunctionScope->Clear();
2131 FunctionScopes.push_back(CachedFunctionScope.release());
2132 } else {
2133 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
2134 }
2135 if (LangOpts.OpenMP)
2136 pushOpenMPFunctionRegion();
2137}
2138
2139void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2140 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
2141 BlockScope, Block));
2142 CapturingFunctionScopes++;
2143}
2144
2145LambdaScopeInfo *Sema::PushLambdaScope() {
2146 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2147 FunctionScopes.push_back(LSI);
2148 CapturingFunctionScopes++;
2149 return LSI;
2150}
2151
2152void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2153 if (LambdaScopeInfo *const LSI = getCurLambda()) {
2154 LSI->AutoTemplateParameterDepth = Depth;
2155 return;
2156 }
2157 llvm_unreachable(
2158 "Remove assertion if intentionally called in a non-lambda context.");
2159}
2160
2161// Check that the type of the VarDecl has an accessible copy constructor and
2162// resolve its destructor's exception specification.
2163// This also performs initialization of block variables when they are moved
2164// to the heap. It uses the same rules as applicable for implicit moves
2165// according to the C++ standard in effect ([class.copy.elision]p3).
2166static void checkEscapingByref(VarDecl *VD, Sema &S) {
2167 QualType T = VD->getType();
2168 EnterExpressionEvaluationContext scope(
2169 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2170 SourceLocation Loc = VD->getLocation();
2171 Expr *VarRef =
2172 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2173 ExprResult Result;
2174 auto IE = InitializedEntity::InitializeBlock(Loc, T);
2175 if (S.getLangOpts().CPlusPlus23) {
2176 auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr,
2177 VK_XValue, FPOptionsOverride());
2178 Result = S.PerformCopyInitialization(IE, SourceLocation(), E);
2179 } else {
2180 Result = S.PerformMoveOrCopyInitialization(
2181 IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible},
2182 VarRef);
2183 }
2184
2185 if (!Result.isInvalid()) {
2186 Result = S.MaybeCreateExprWithCleanups(Result);
2187 Expr *Init = Result.getAs<Expr>();
2188 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
2189 }
2190
2191 // The destructor's exception specification is needed when IRGen generates
2192 // block copy/destroy functions. Resolve it here.
2193 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2194 if (CXXDestructorDecl *DD = RD->getDestructor()) {
2195 auto *FPT = DD->getType()->getAs<FunctionProtoType>();
2196 S.ResolveExceptionSpec(Loc, FPT);
2197 }
2198}
2199
2200static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2201 // Set the EscapingByref flag of __block variables captured by
2202 // escaping blocks.
2203 for (const BlockDecl *BD : FSI.Blocks) {
2204 for (const BlockDecl::Capture &BC : BD->captures()) {
2205 VarDecl *VD = BC.getVariable();
2206 if (VD->hasAttr<BlocksAttr>()) {
2207 // Nothing to do if this is a __block variable captured by a
2208 // non-escaping block.
2209 if (BD->doesNotEscape())
2210 continue;
2211 VD->setEscapingByref();
2212 }
2213 // Check whether the captured variable is or contains an object of
2214 // non-trivial C union type.
2215 QualType CapType = BC.getVariable()->getType();
2216 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2217 CapType.hasNonTrivialToPrimitiveCopyCUnion())
2218 S.checkNonTrivialCUnion(BC.getVariable()->getType(),
2219 BD->getCaretLocation(),
2220 Sema::NTCUC_BlockCapture,
2221 Sema::NTCUK_Destruct|Sema::NTCUK_Copy);
2222 }
2223 }
2224
2225 for (VarDecl *VD : FSI.ByrefBlockVars) {
2226 // __block variables might require us to capture a copy-initializer.
2227 if (!VD->isEscapingByref())
2228 continue;
2229 // It's currently invalid to ever have a __block variable with an
2230 // array type; should we diagnose that here?
2231 // Regardless, we don't want to ignore array nesting when
2232 // constructing this copy.
2233 if (VD->getType()->isStructureOrClassType())
2234 checkEscapingByref(VD, S);
2235 }
2236}
2237
2238/// Pop a function (or block or lambda or captured region) scope from the stack.
2239///
2240/// \param WP The warning policy to use for CFG-based warnings, or null if such
2241/// warnings should not be produced.
2242/// \param D The declaration corresponding to this function scope, if producing
2243/// CFG-based warnings.
2244/// \param BlockType The type of the block expression, if D is a BlockDecl.
2245Sema::PoppedFunctionScopePtr
2246Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
2247 const Decl *D, QualType BlockType) {
2248 assert(!FunctionScopes.empty() && "mismatched push/pop!");
2249
2250 markEscapingByrefs(*FunctionScopes.back(), *this);
2251
2252 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2253 PoppedFunctionScopeDeleter(this));
2254
2255 if (LangOpts.OpenMP)
2256 popOpenMPFunctionRegion(Scope.get());
2257
2258 // Issue any analysis-based warnings.
2259 if (WP && D)
2260 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType);
2261 else
2262 for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2263 Diag(PUD.Loc, PUD.PD);
2264
2265 return Scope;
2266}
2267
2268void Sema::PoppedFunctionScopeDeleter::
2269operator()(sema::FunctionScopeInfo *Scope) const {
2270 if (!Scope->isPlainFunction())
2271 Self->CapturingFunctionScopes--;
2272 // Stash the function scope for later reuse if it's for a normal function.
2273 if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2274 Self->CachedFunctionScope.reset(Scope);
2275 else
2276 delete Scope;
2277}
2278
2279void Sema::PushCompoundScope(bool IsStmtExpr) {
2280 getCurFunction()->CompoundScopes.push_back(
2281 CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2282}
2283
2284void Sema::PopCompoundScope() {
2285 FunctionScopeInfo *CurFunction = getCurFunction();
2286 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2287
2288 CurFunction->CompoundScopes.pop_back();
2289}
2290
2291/// Determine whether any errors occurred within this function/method/
2292/// block.
2293bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2294 return getCurFunction()->hasUnrecoverableErrorOccurred();
2295}
2296
2297void Sema::setFunctionHasBranchIntoScope() {
2298 if (!FunctionScopes.empty())
2299 FunctionScopes.back()->setHasBranchIntoScope();
2300}
2301
2302void Sema::setFunctionHasBranchProtectedScope() {
2303 if (!FunctionScopes.empty())
2304 FunctionScopes.back()->setHasBranchProtectedScope();
2305}
2306
2307void Sema::setFunctionHasIndirectGoto() {
2308 if (!FunctionScopes.empty())
2309 FunctionScopes.back()->setHasIndirectGoto();
2310}
2311
2312void Sema::setFunctionHasMustTail() {
2313 if (!FunctionScopes.empty())
2314 FunctionScopes.back()->setHasMustTail();
2315}
2316
2317BlockScopeInfo *Sema::getCurBlock() {
2318 if (FunctionScopes.empty())
2319 return nullptr;
2320
2321 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back());
2322 if (CurBSI && CurBSI->TheDecl &&
2323 !CurBSI->TheDecl->Encloses(CurContext)) {
2324 // We have switched contexts due to template instantiation.
2325 assert(!CodeSynthesisContexts.empty());
2326 return nullptr;
2327 }
2328
2329 return CurBSI;
2330}
2331
2332FunctionScopeInfo *Sema::getEnclosingFunction() const {
2333 if (FunctionScopes.empty())
2334 return nullptr;
2335
2336 for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2337 if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
2338 continue;
2339 return FunctionScopes[e];
2340 }
2341 return nullptr;
2342}
2343
2344LambdaScopeInfo *Sema::getEnclosingLambda() const {
2345 for (auto *Scope : llvm::reverse(FunctionScopes)) {
2346 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) {
2347 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
2348 LSI->AfterParameterList) {
2349 // We have switched contexts due to template instantiation.
2350 // FIXME: We should swap out the FunctionScopes during code synthesis
2351 // so that we don't need to check for this.
2352 assert(!CodeSynthesisContexts.empty());
2353 return nullptr;
2354 }
2355 return LSI;
2356 }
2357 }
2358 return nullptr;
2359}
2360
2361LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2362 if (FunctionScopes.empty())
2363 return nullptr;
2364
2365 auto I = FunctionScopes.rbegin();
2366 if (IgnoreNonLambdaCapturingScope) {
2367 auto E = FunctionScopes.rend();
2368 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I))
2369 ++I;
2370 if (I == E)
2371 return nullptr;
2372 }
2373 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I);
2374 if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2375 !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
2376 // We have switched contexts due to template instantiation.
2377 assert(!CodeSynthesisContexts.empty());
2378 return nullptr;
2379 }
2380
2381 return CurLSI;
2382}
2383
2384// We have a generic lambda if we parsed auto parameters, or we have
2385// an associated template parameter list.
2386LambdaScopeInfo *Sema::getCurGenericLambda() {
2387 if (LambdaScopeInfo *LSI = getCurLambda()) {
2388 return (LSI->TemplateParams.size() ||
2389 LSI->GLTemplateParameterList) ? LSI : nullptr;
2390 }
2391 return nullptr;
2392}
2393
2394
2395void Sema::ActOnComment(SourceRange Comment) {
2396 if (!LangOpts.RetainCommentsFromSystemHeaders &&
2397 SourceMgr.isInSystemHeader(Comment.getBegin()))
2398 return;
2399 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2400 if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2401 SourceRange MagicMarkerRange(Comment.getBegin(),
2402 Comment.getBegin().getLocWithOffset(3));
2403 StringRef MagicMarkerText;
2404 switch (RC.getKind()) {
2405 case RawComment::RCK_OrdinaryBCPL:
2406 MagicMarkerText = "///<";
2407 break;
2408 case RawComment::RCK_OrdinaryC:
2409 MagicMarkerText = "/**<";
2410 break;
2411 case RawComment::RCK_Invalid:
2412 // FIXME: are there other scenarios that could produce an invalid
2413 // raw comment here?
2414 Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
2415 return;
2416 default:
2417 llvm_unreachable("if this is an almost Doxygen comment, "
2418 "it should be ordinary");
2419 }
2420 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
2421 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
2422 }
2423 Context.addComment(RC);
2424}
2425
2426// Pin this vtable to this file.
2427ExternalSemaSource::~ExternalSemaSource() {}
2428char ExternalSemaSource::ID;
2429
2430void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
2431void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2432
2433void ExternalSemaSource::ReadKnownNamespaces(
2434 SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2435}
2436
2437void ExternalSemaSource::ReadUndefinedButUsed(
2438 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2439
2440void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2441 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2442
2443/// Figure out if an expression could be turned into a call.
2444///
2445/// Use this when trying to recover from an error where the programmer may have
2446/// written just the name of a function instead of actually calling it.
2447///
2448/// \param E - The expression to examine.
2449/// \param ZeroArgCallReturnTy - If the expression can be turned into a call
2450/// with no arguments, this parameter is set to the type returned by such a
2451/// call; otherwise, it is set to an empty QualType.
2452/// \param OverloadSet - If the expression is an overloaded function
2453/// name, this parameter is populated with the decls of the various overloads.
2454bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2455 UnresolvedSetImpl &OverloadSet) {
2456 ZeroArgCallReturnTy = QualType();
2457 OverloadSet.clear();
2458
2459 const OverloadExpr *Overloads = nullptr;
2460 bool IsMemExpr = false;
2461 if (E.getType() == Context.OverloadTy) {
2462 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E));
2463
2464 // Ignore overloads that are pointer-to-member constants.
2465 if (FR.HasFormOfMemberPointer)
2466 return false;
2467
2468 Overloads = FR.Expression;
2469 } else if (E.getType() == Context.BoundMemberTy) {
2470 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens());
2471 IsMemExpr = true;
2472 }
2473
2474 bool Ambiguous = false;
2475 bool IsMV = false;
2476
2477 if (Overloads) {
2478 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2479 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2480 OverloadSet.addDecl(*it);
2481
2482 // Check whether the function is a non-template, non-member which takes no
2483 // arguments.
2484 if (IsMemExpr)
2485 continue;
2486 if (const FunctionDecl *OverloadDecl
2487 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
2488 if (OverloadDecl->getMinRequiredArguments() == 0) {
2489 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2490 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2491 OverloadDecl->isCPUSpecificMultiVersion()))) {
2492 ZeroArgCallReturnTy = QualType();
2493 Ambiguous = true;
2494 } else {
2495 ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2496 IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2497 OverloadDecl->isCPUSpecificMultiVersion();
2498 }
2499 }
2500 }
2501 }
2502
2503 // If it's not a member, use better machinery to try to resolve the call
2504 if (!IsMemExpr)
2505 return !ZeroArgCallReturnTy.isNull();
2506 }
2507
2508 // Attempt to call the member with no arguments - this will correctly handle
2509 // member templates with defaults/deduction of template arguments, overloads
2510 // with default arguments, etc.
2511 if (IsMemExpr && !E.isTypeDependent()) {
2512 Sema::TentativeAnalysisScope Trap(*this);
2513 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(),
2514 std::nullopt, SourceLocation());
2515 if (R.isUsable()) {
2516 ZeroArgCallReturnTy = R.get()->getType();
2517 return true;
2518 }
2519 return false;
2520 }
2521
2522 if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
2523 if (const auto *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
2524 if (Fun->getMinRequiredArguments() == 0)
2525 ZeroArgCallReturnTy = Fun->getReturnType();
2526 return true;
2527 }
2528 }
2529
2530 // We don't have an expression that's convenient to get a FunctionDecl from,
2531 // but we can at least check if the type is "function of 0 arguments".
2532 QualType ExprTy = E.getType();
2533 const FunctionType *FunTy = nullptr;
2534 QualType PointeeTy = ExprTy->getPointeeType();
2535 if (!PointeeTy.isNull())
2536 FunTy = PointeeTy->getAs<FunctionType>();
2537 if (!FunTy)
2538 FunTy = ExprTy->getAs<FunctionType>();
2539
2540 if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(FunTy)) {
2541 if (FPT->getNumParams() == 0)
2542 ZeroArgCallReturnTy = FunTy->getReturnType();
2543 return true;
2544 }
2545 return false;
2546}
2547
2548/// Give notes for a set of overloads.
2549///
2550/// A companion to tryExprAsCall. In cases when the name that the programmer
2551/// wrote was an overloaded function, we may be able to make some guesses about
2552/// plausible overloads based on their return types; such guesses can be handed
2553/// off to this method to be emitted as notes.
2554///
2555/// \param Overloads - The overloads to note.
2556/// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2557/// -fshow-overloads=best, this is the location to attach to the note about too
2558/// many candidates. Typically this will be the location of the original
2559/// ill-formed expression.
2560static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2561 const SourceLocation FinalNoteLoc) {
2562 unsigned ShownOverloads = 0;
2563 unsigned SuppressedOverloads = 0;
2564 for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2565 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2566 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2567 ++SuppressedOverloads;
2568 continue;
2569 }
2570
2571 const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2572 // Don't print overloads for non-default multiversioned functions.
2573 if (const auto *FD = Fn->getAsFunction()) {
2574 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2575 !FD->getAttr<TargetAttr>()->isDefaultVersion())
2576 continue;
2577 if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2578 !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2579 continue;
2580 }
2581 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
2582 ++ShownOverloads;
2583 }
2584
2585 S.Diags.overloadCandidatesShown(ShownOverloads);
2586
2587 if (SuppressedOverloads)
2588 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
2589 << SuppressedOverloads;
2590}
2591
2592static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2593 const UnresolvedSetImpl &Overloads,
2594 bool (*IsPlausibleResult)(QualType)) {
2595 if (!IsPlausibleResult)
2596 return noteOverloads(S, Overloads, Loc);
2597
2598 UnresolvedSet<2> PlausibleOverloads;
2599 for (OverloadExpr::decls_iterator It = Overloads.begin(),
2600 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2601 const auto *OverloadDecl = cast<FunctionDecl>(*It);
2602 QualType OverloadResultTy = OverloadDecl->getReturnType();
2603 if (IsPlausibleResult(OverloadResultTy))
2604 PlausibleOverloads.addDecl(It.getDecl());
2605 }
2606 noteOverloads(S, PlausibleOverloads, Loc);
2607}
2608
2609/// Determine whether the given expression can be called by just
2610/// putting parentheses after it. Notably, expressions with unary
2611/// operators can't be because the unary operator will start parsing
2612/// outside the call.
2613static bool IsCallableWithAppend(const Expr *E) {
2614 E = E->IgnoreImplicit();
2615 return (!isa<CStyleCastExpr>(E) &&
2616 !isa<UnaryOperator>(E) &&
2617 !isa<BinaryOperator>(E) &&
2618 !isa<CXXOperatorCallExpr>(E));
2619}
2620
2621static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2622 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2623 E = UO->getSubExpr();
2624
2625 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
2626 if (ULE->getNumDecls() == 0)
2627 return false;
2628
2629 const NamedDecl *ND = *ULE->decls_begin();
2630 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2631 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2632 }
2633 return false;
2634}
2635
2636bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2637 bool ForceComplain,
2638 bool (*IsPlausibleResult)(QualType)) {
2639 SourceLocation Loc = E.get()->getExprLoc();
2640 SourceRange Range = E.get()->getSourceRange();
2641 UnresolvedSet<4> Overloads;
2642
2643 // If this is a SFINAE context, don't try anything that might trigger ADL
2644 // prematurely.
2645 if (!isSFINAEContext()) {
2646 QualType ZeroArgCallTy;
2647 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
2648 !ZeroArgCallTy.isNull() &&
2649 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2650 // At this point, we know E is potentially callable with 0
2651 // arguments and that it returns something of a reasonable type,
2652 // so we can emit a fixit and carry on pretending that E was
2653 // actually a CallExpr.
2654 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
2655 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2656 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2657 << (IsCallableWithAppend(E.get())
2658 ? FixItHint::CreateInsertion(ParenInsertionLoc,
2659 "()")
2660 : FixItHint());
2661 if (!IsMV)
2662 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2663
2664 // FIXME: Try this before emitting the fixit, and suppress diagnostics
2665 // while doing so.
2666 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), std::nullopt,
2667 Range.getEnd().getLocWithOffset(1));
2668 return true;
2669 }
2670 }
2671 if (!ForceComplain) return false;
2672
2673 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2674 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2675 if (!IsMV)
2676 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2677 E = ExprError();
2678 return true;
2679}
2680
2681IdentifierInfo *Sema::getSuperIdentifier() const {
2682 if (!Ident_super)
2683 Ident_super = &Context.Idents.get("super");
2684 return Ident_super;
2685}
2686
2687void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2688 CapturedRegionKind K,
2689 unsigned OpenMPCaptureLevel) {
2690 auto *CSI = new CapturedRegionScopeInfo(
2691 getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2692 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0,
2693 OpenMPCaptureLevel);
2694 CSI->ReturnType = Context.VoidTy;
2695 FunctionScopes.push_back(CSI);
2696 CapturingFunctionScopes++;
2697}
2698
2699CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2700 if (FunctionScopes.empty())
2701 return nullptr;
2702
2703 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back());
2704}
2705
2706const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
2707Sema::getMismatchingDeleteExpressions() const {
2708 return DeleteExprs;
2709}
2710
2711Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2712 : S(S), OldFPFeaturesState(S.CurFPFeatures),
2713 OldOverrides(S.FpPragmaStack.CurrentValue),
2714 OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2715 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2716
2717Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2718 S.CurFPFeatures = OldFPFeaturesState;
2719 S.FpPragmaStack.CurrentValue = OldOverrides;
2720 S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod);
2721}
2722
2723bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2724 assert(D.getCXXScopeSpec().isSet() &&
2725 "can only be called for qualified names");
2726
2727 auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2728 LookupOrdinaryName, forRedeclarationInCurContext());
2729 DeclContext *DC = computeDeclContext(D.getCXXScopeSpec(),
2730 !D.getDeclSpec().isFriendSpecified());
2731 if (!DC)
2732 return false;
2733
2734 LookupQualifiedName(LR, DC);
2735 bool Result = std::all_of(LR.begin(), LR.end(), [](Decl *Dcl) {
2736 if (NamedDecl *ND = dyn_cast<NamedDecl>(Dcl)) {
2737 ND = ND->getUnderlyingDecl();
2738 return isa<FunctionDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
2739 isa<UsingDecl>(ND);
2740 }
2741 return false;
2742 });
2743 return Result;
2744}
2745