clang 22.0.0git
ASTContext.cpp
Go to the documentation of this file.
1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
14#include "ByteCode/Context.h"
15#include "CXXABI.h"
16#include "clang/AST/APValue.h"
21#include "clang/AST/Attr.h"
23#include "clang/AST/CharUnits.h"
24#include "clang/AST/Comment.h"
25#include "clang/AST/Decl.h"
26#include "clang/AST/DeclBase.h"
27#include "clang/AST/DeclCXX.h"
29#include "clang/AST/DeclObjC.h"
34#include "clang/AST/Expr.h"
35#include "clang/AST/ExprCXX.h"
37#include "clang/AST/Mangle.h"
43#include "clang/AST/Stmt.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
55#include "clang/Basic/LLVM.h"
57#include "clang/Basic/Linkage.h"
58#include "clang/Basic/Module.h"
68#include "llvm/ADT/APFixedPoint.h"
69#include "llvm/ADT/APInt.h"
70#include "llvm/ADT/APSInt.h"
71#include "llvm/ADT/ArrayRef.h"
72#include "llvm/ADT/DenseMap.h"
73#include "llvm/ADT/DenseSet.h"
74#include "llvm/ADT/FoldingSet.h"
75#include "llvm/ADT/PointerUnion.h"
76#include "llvm/ADT/STLExtras.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/ADT/SmallVector.h"
79#include "llvm/ADT/StringExtras.h"
80#include "llvm/ADT/StringRef.h"
81#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
82#include "llvm/Support/Capacity.h"
83#include "llvm/Support/Casting.h"
84#include "llvm/Support/Compiler.h"
85#include "llvm/Support/ErrorHandling.h"
86#include "llvm/Support/MD5.h"
87#include "llvm/Support/MathExtras.h"
88#include "llvm/Support/SipHash.h"
89#include "llvm/Support/raw_ostream.h"
90#include "llvm/TargetParser/AArch64TargetParser.h"
91#include "llvm/TargetParser/Triple.h"
92#include <algorithm>
93#include <cassert>
94#include <cstddef>
95#include <cstdint>
96#include <cstdlib>
97#include <map>
98#include <memory>
99#include <optional>
100#include <string>
101#include <tuple>
102#include <utility>
103
104using namespace clang;
105
116
117/// \returns The locations that are relevant when searching for Doc comments
118/// related to \p D.
121 assert(D);
122
123 // User can not attach documentation to implicit declarations.
124 if (D->isImplicit())
125 return {};
126
127 // User can not attach documentation to implicit instantiations.
128 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
129 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
130 return {};
131 }
132
133 if (const auto *VD = dyn_cast<VarDecl>(D)) {
134 if (VD->isStaticDataMember() &&
135 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
136 return {};
137 }
138
139 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) {
140 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
141 return {};
142 }
143
144 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
145 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
146 if (TSK == TSK_ImplicitInstantiation ||
147 TSK == TSK_Undeclared)
148 return {};
149 }
150
151 if (const auto *ED = dyn_cast<EnumDecl>(D)) {
152 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
153 return {};
154 }
155 if (const auto *TD = dyn_cast<TagDecl>(D)) {
156 // When tag declaration (but not definition!) is part of the
157 // decl-specifier-seq of some other declaration, it doesn't get comment
158 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
159 return {};
160 }
161 // TODO: handle comments for function parameters properly.
162 if (isa<ParmVarDecl>(D))
163 return {};
164
165 // TODO: we could look up template parameter documentation in the template
166 // documentation.
170 return {};
171
173 // Find declaration location.
174 // For Objective-C declarations we generally don't expect to have multiple
175 // declarators, thus use declaration starting location as the "declaration
176 // location".
177 // For all other declarations multiple declarators are used quite frequently,
178 // so we use the location of the identifier as the "declaration location".
179 SourceLocation BaseLocation;
183 // Allow association with Y across {} in `typedef struct X {} Y`.
185 BaseLocation = D->getBeginLoc();
186 else
187 BaseLocation = D->getLocation();
188
189 if (!D->getLocation().isMacroID()) {
190 Locations.emplace_back(BaseLocation);
191 } else {
192 const auto *DeclCtx = D->getDeclContext();
193
194 // When encountering definitions generated from a macro (that are not
195 // contained by another declaration in the macro) we need to try and find
196 // the comment at the location of the expansion but if there is no comment
197 // there we should retry to see if there is a comment inside the macro as
198 // well. To this end we return first BaseLocation to first look at the
199 // expansion site, the second value is the spelling location of the
200 // beginning of the declaration defined inside the macro.
201 if (!(DeclCtx &&
202 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
203 Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation));
204 }
205
206 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
207 // we don't refer to the macro argument location at the expansion site (this
208 // can happen if the name's spelling is provided via macro argument), and
209 // always to the declaration itself.
210 Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc()));
211 }
212
213 return Locations;
214}
215
217 const Decl *D, const SourceLocation RepresentativeLocForDecl,
218 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
219 // If the declaration doesn't map directly to a location in a file, we
220 // can't find the comment.
221 if (RepresentativeLocForDecl.isInvalid() ||
222 !RepresentativeLocForDecl.isFileID())
223 return nullptr;
224
225 // If there are no comments anywhere, we won't find anything.
226 if (CommentsInTheFile.empty())
227 return nullptr;
228
229 // Decompose the location for the declaration and find the beginning of the
230 // file buffer.
231 const FileIDAndOffset DeclLocDecomp =
232 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl);
233
234 // Slow path.
235 auto OffsetCommentBehindDecl =
236 CommentsInTheFile.lower_bound(DeclLocDecomp.second);
237
238 // First check whether we have a trailing comment.
239 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
240 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
241 if ((CommentBehindDecl->isDocumentation() ||
242 LangOpts.CommentOpts.ParseAllComments) &&
243 CommentBehindDecl->isTrailingComment() &&
246
247 // Check that Doxygen trailing comment comes after the declaration, starts
248 // on the same line and in the same file as the declaration.
249 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) ==
250 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first,
251 OffsetCommentBehindDecl->first)) {
252 return CommentBehindDecl;
253 }
254 }
255 }
256
257 // The comment just after the declaration was not a trailing comment.
258 // Let's look at the previous comment.
259 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
260 return nullptr;
261
262 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
263 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
264
265 // Check that we actually have a non-member Doxygen comment.
266 if (!(CommentBeforeDecl->isDocumentation() ||
267 LangOpts.CommentOpts.ParseAllComments) ||
268 CommentBeforeDecl->isTrailingComment())
269 return nullptr;
270
271 // Decompose the end of the comment.
272 const unsigned CommentEndOffset =
273 Comments.getCommentEndOffset(CommentBeforeDecl);
274
275 // Get the corresponding buffer.
276 bool Invalid = false;
277 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
278 &Invalid).data();
279 if (Invalid)
280 return nullptr;
281
282 // Extract text between the comment and declaration.
283 StringRef Text(Buffer + CommentEndOffset,
284 DeclLocDecomp.second - CommentEndOffset);
285
286 // There should be no other declarations or preprocessor directives between
287 // comment and declaration.
288 if (Text.find_last_of(";{}#@") != StringRef::npos)
289 return nullptr;
290
291 return CommentBeforeDecl;
292}
293
295 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
296
297 for (const auto DeclLoc : DeclLocs) {
298 // If the declaration doesn't map directly to a location in a file, we
299 // can't find the comment.
300 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
301 continue;
302
304 ExternalSource->ReadComments();
305 CommentsLoaded = true;
306 }
307
308 if (Comments.empty())
309 continue;
310
311 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
312 if (!File.isValid())
313 continue;
314
315 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
316 if (!CommentsInThisFile || CommentsInThisFile->empty())
317 continue;
318
319 if (RawComment *Comment =
320 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile))
321 return Comment;
322 }
323
324 return nullptr;
325}
326
328 assert(LangOpts.RetainCommentsFromSystemHeaders ||
329 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
330 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc);
331}
332
334 const Decl *D,
335 const Decl **OriginalDecl) const {
336 if (!D) {
337 if (OriginalDecl)
338 OriginalDecl = nullptr;
339 return nullptr;
340 }
341
342 D = &adjustDeclToTemplate(*D);
343
344 // Any comment directly attached to D?
345 {
346 auto DeclComment = DeclRawComments.find(D);
347 if (DeclComment != DeclRawComments.end()) {
348 if (OriginalDecl)
349 *OriginalDecl = D;
350 return DeclComment->second;
351 }
352 }
353
354 // Any comment attached to any redeclaration of D?
355 const Decl *CanonicalD = D->getCanonicalDecl();
356 if (!CanonicalD)
357 return nullptr;
358
359 {
360 auto RedeclComment = RedeclChainComments.find(CanonicalD);
361 if (RedeclComment != RedeclChainComments.end()) {
362 if (OriginalDecl)
363 *OriginalDecl = RedeclComment->second;
364 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second);
365 assert(CommentAtRedecl != DeclRawComments.end() &&
366 "This decl is supposed to have comment attached.");
367 return CommentAtRedecl->second;
368 }
369 }
370
371 // Any redeclarations of D that we haven't checked for comments yet?
372 const Decl *LastCheckedRedecl = [&]() {
373 const Decl *LastChecked = CommentlessRedeclChains.lookup(CanonicalD);
374 bool CanUseCommentlessCache = false;
375 if (LastChecked) {
376 for (auto *Redecl : CanonicalD->redecls()) {
377 if (Redecl == D) {
378 CanUseCommentlessCache = true;
379 break;
380 }
381 if (Redecl == LastChecked)
382 break;
383 }
384 }
385 // FIXME: This could be improved so that even if CanUseCommentlessCache
386 // is false, once we've traversed past CanonicalD we still skip ahead
387 // LastChecked.
388 return CanUseCommentlessCache ? LastChecked : nullptr;
389 }();
390
391 for (const Decl *Redecl : D->redecls()) {
392 assert(Redecl);
393 // Skip all redeclarations that have been checked previously.
394 if (LastCheckedRedecl) {
395 if (LastCheckedRedecl == Redecl) {
396 LastCheckedRedecl = nullptr;
397 }
398 continue;
399 }
400 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl);
401 if (RedeclComment) {
402 cacheRawCommentForDecl(*Redecl, *RedeclComment);
403 if (OriginalDecl)
404 *OriginalDecl = Redecl;
405 return RedeclComment;
406 }
407 CommentlessRedeclChains[CanonicalD] = Redecl;
408 }
409
410 if (OriginalDecl)
411 *OriginalDecl = nullptr;
412 return nullptr;
413}
414
416 const RawComment &Comment) const {
417 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
418 DeclRawComments.try_emplace(&OriginalD, &Comment);
419 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
420 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD);
421 CommentlessRedeclChains.erase(CanonicalDecl);
422}
423
424static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
426 const DeclContext *DC = ObjCMethod->getDeclContext();
427 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
428 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
429 if (!ID)
430 return;
431 // Add redeclared method here.
432 for (const auto *Ext : ID->known_extensions()) {
433 if (ObjCMethodDecl *RedeclaredMethod =
434 Ext->getMethod(ObjCMethod->getSelector(),
435 ObjCMethod->isInstanceMethod()))
436 Redeclared.push_back(RedeclaredMethod);
437 }
438 }
439}
440
442 const Preprocessor *PP) {
443 if (Comments.empty() || Decls.empty())
444 return;
445
446 FileID File;
447 for (const Decl *D : Decls) {
448 if (D->isInvalidDecl())
449 continue;
450
451 D = &adjustDeclToTemplate(*D);
452 SourceLocation Loc = D->getLocation();
453 if (Loc.isValid()) {
454 // See if there are any new comments that are not attached to a decl.
455 // The location doesn't have to be precise - we care only about the file.
456 File = SourceMgr.getDecomposedLoc(Loc).first;
457 break;
458 }
459 }
460
461 if (File.isInvalid())
462 return;
463
464 auto CommentsInThisFile = Comments.getCommentsInFile(File);
465 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
466 CommentsInThisFile->rbegin()->second->isAttached())
467 return;
468
469 // There is at least one comment not attached to a decl.
470 // Maybe it should be attached to one of Decls?
471 //
472 // Note that this way we pick up not only comments that precede the
473 // declaration, but also comments that *follow* the declaration -- thanks to
474 // the lookahead in the lexer: we've consumed the semicolon and looked
475 // ahead through comments.
476 for (const Decl *D : Decls) {
477 assert(D);
478 if (D->isInvalidDecl())
479 continue;
480
481 D = &adjustDeclToTemplate(*D);
482
483 if (DeclRawComments.count(D) > 0)
484 continue;
485
486 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
487
488 for (const auto DeclLoc : DeclLocs) {
489 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
490 continue;
491
492 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
493 D, DeclLoc, *CommentsInThisFile)) {
494 cacheRawCommentForDecl(*D, *DocComment);
495 comments::FullComment *FC = DocComment->parse(*this, PP, D);
496 ParsedComments[D->getCanonicalDecl()] = FC;
497 break;
498 }
499 }
500 }
501}
502
504 const Decl *D) const {
505 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
506 ThisDeclInfo->CommentDecl = D;
507 ThisDeclInfo->IsFilled = false;
508 ThisDeclInfo->fill();
509 ThisDeclInfo->CommentDecl = FC->getDecl();
510 if (!ThisDeclInfo->TemplateParameters)
511 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
513 new (*this) comments::FullComment(FC->getBlocks(),
514 ThisDeclInfo);
515 return CFC;
516}
517
520 return RC ? RC->parse(*this, nullptr, D) : nullptr;
521}
522
524 const Decl *D,
525 const Preprocessor *PP) const {
526 if (!D || D->isInvalidDecl())
527 return nullptr;
528 D = &adjustDeclToTemplate(*D);
529
530 const Decl *Canonical = D->getCanonicalDecl();
531 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
532 ParsedComments.find(Canonical);
533
534 if (Pos != ParsedComments.end()) {
535 if (Canonical != D) {
536 comments::FullComment *FC = Pos->second;
538 return CFC;
539 }
540 return Pos->second;
541 }
542
543 const Decl *OriginalDecl = nullptr;
544
545 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
546 if (!RC) {
549 const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
550 if (OMD && OMD->isPropertyAccessor())
551 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
552 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
553 return cloneFullComment(FC, D);
554 if (OMD)
555 addRedeclaredMethods(OMD, Overridden);
556 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
557 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
558 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
559 return cloneFullComment(FC, D);
560 }
561 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
562 // Attach any tag type's documentation to its typedef if latter
563 // does not have one of its own.
564 QualType QT = TD->getUnderlyingType();
565 if (const auto *TT = QT->getAs<TagType>())
566 if (comments::FullComment *FC = getCommentForDecl(TT->getDecl(), PP))
567 return cloneFullComment(FC, D);
568 }
569 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
570 while (IC->getSuperClass()) {
571 IC = IC->getSuperClass();
573 return cloneFullComment(FC, D);
574 }
575 }
576 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) {
577 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
579 return cloneFullComment(FC, D);
580 }
581 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
582 if (!(RD = RD->getDefinition()))
583 return nullptr;
584 // Check non-virtual bases.
585 for (const auto &I : RD->bases()) {
586 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
587 continue;
588 QualType Ty = I.getType();
589 if (Ty.isNull())
590 continue;
592 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
593 continue;
594
596 return cloneFullComment(FC, D);
597 }
598 }
599 // Check virtual bases.
600 for (const auto &I : RD->vbases()) {
601 if (I.getAccessSpecifier() != AS_public)
602 continue;
603 QualType Ty = I.getType();
604 if (Ty.isNull())
605 continue;
606 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
607 if (!(VirtualBase= VirtualBase->getDefinition()))
608 continue;
610 return cloneFullComment(FC, D);
611 }
612 }
613 }
614 return nullptr;
615 }
616
617 // If the RawComment was attached to other redeclaration of this Decl, we
618 // should parse the comment in context of that other Decl. This is important
619 // because comments can contain references to parameter names which can be
620 // different across redeclarations.
621 if (D != OriginalDecl && OriginalDecl)
622 return getCommentForDecl(OriginalDecl, PP);
623
624 comments::FullComment *FC = RC->parse(*this, PP, D);
625 ParsedComments[Canonical] = FC;
626 return FC;
627}
628
629void ASTContext::CanonicalTemplateTemplateParm::Profile(
630 llvm::FoldingSetNodeID &ID, const ASTContext &C,
632 ID.AddInteger(Parm->getDepth());
633 ID.AddInteger(Parm->getPosition());
634 ID.AddBoolean(Parm->isParameterPack());
635 ID.AddInteger(Parm->templateParameterKind());
636
638 ID.AddInteger(Params->size());
640 PEnd = Params->end();
641 P != PEnd; ++P) {
642 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
643 ID.AddInteger(0);
644 ID.AddBoolean(TTP->isParameterPack());
645 ID.AddInteger(
646 TTP->getNumExpansionParameters().toInternalRepresentation());
647 continue;
648 }
649
650 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
651 ID.AddInteger(1);
652 ID.AddBoolean(NTTP->isParameterPack());
653 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType()))
654 .getAsOpaquePtr());
655 if (NTTP->isExpandedParameterPack()) {
656 ID.AddBoolean(true);
657 ID.AddInteger(NTTP->getNumExpansionTypes());
658 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
659 QualType T = NTTP->getExpansionType(I);
660 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
661 }
662 } else
663 ID.AddBoolean(false);
664 continue;
665 }
666
667 auto *TTP = cast<TemplateTemplateParmDecl>(*P);
668 ID.AddInteger(2);
669 Profile(ID, C, TTP);
670 }
671}
672
673TemplateTemplateParmDecl *
675 TemplateTemplateParmDecl *TTP) const {
676 // Check if we already have a canonical template template parameter.
677 llvm::FoldingSetNodeID ID;
678 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP);
679 void *InsertPos = nullptr;
680 CanonicalTemplateTemplateParm *Canonical
681 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
682 if (Canonical)
683 return Canonical->getParam();
684
685 // Build a canonical template parameter list.
687 SmallVector<NamedDecl *, 4> CanonParams;
688 CanonParams.reserve(Params->size());
690 PEnd = Params->end();
691 P != PEnd; ++P) {
692 // Note that, per C++20 [temp.over.link]/6, when determining whether
693 // template-parameters are equivalent, constraints are ignored.
694 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
697 TTP->getDepth(), TTP->getIndex(), nullptr, false,
698 TTP->isParameterPack(), /*HasTypeConstraint=*/false,
699 TTP->getNumExpansionParameters());
700 CanonParams.push_back(NewTTP);
701 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
705 if (NTTP->isExpandedParameterPack()) {
706 SmallVector<QualType, 2> ExpandedTypes;
708 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
709 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
710 ExpandedTInfos.push_back(
711 getTrivialTypeSourceInfo(ExpandedTypes.back()));
712 }
713
717 NTTP->getDepth(),
718 NTTP->getPosition(), nullptr,
719 T,
720 TInfo,
721 ExpandedTypes,
722 ExpandedTInfos);
723 } else {
727 NTTP->getDepth(),
728 NTTP->getPosition(), nullptr,
729 T,
730 NTTP->isParameterPack(),
731 TInfo);
732 }
733 CanonParams.push_back(Param);
734 } else
735 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
737 }
738
741 TTP->getPosition(), TTP->isParameterPack(), nullptr,
743 /*Typename=*/false,
745 CanonParams, SourceLocation(),
746 /*RequiresClause=*/nullptr));
747
748 // Get the new insert position for the node we care about.
749 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
750 assert(!Canonical && "Shouldn't be in the map!");
751 (void)Canonical;
752
753 // Create the canonical template template parameter entry.
754 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
755 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
756 return CanonTTP;
757}
758
761 TemplateTemplateParmDecl *TTP) const {
762 llvm::FoldingSetNodeID ID;
763 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP);
764 void *InsertPos = nullptr;
765 CanonicalTemplateTemplateParm *Canonical =
766 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
767 return Canonical ? Canonical->getParam() : nullptr;
768}
769
772 TemplateTemplateParmDecl *CanonTTP) const {
773 llvm::FoldingSetNodeID ID;
774 CanonicalTemplateTemplateParm::Profile(ID, *this, CanonTTP);
775 void *InsertPos = nullptr;
776 if (auto *Existing =
777 CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos))
778 return Existing->getParam();
779 CanonTemplateTemplateParms.InsertNode(
780 new (*this) CanonicalTemplateTemplateParm(CanonTTP), InsertPos);
781 return CanonTTP;
782}
783
784/// Check if a type can have its sanitizer instrumentation elided based on its
785/// presence within an ignorelist.
787 const QualType &Ty) const {
788 std::string TyName = Ty.getUnqualifiedType().getAsString(getPrintingPolicy());
789 return NoSanitizeL->containsType(Mask, TyName);
790}
791
793 auto Kind = getTargetInfo().getCXXABI().getKind();
794 return getLangOpts().CXXABI.value_or(Kind);
795}
796
797CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
798 if (!LangOpts.CPlusPlus) return nullptr;
799
800 switch (getCXXABIKind()) {
801 case TargetCXXABI::AppleARM64:
802 case TargetCXXABI::Fuchsia:
803 case TargetCXXABI::GenericARM: // Same as Itanium at this level
804 case TargetCXXABI::iOS:
805 case TargetCXXABI::WatchOS:
806 case TargetCXXABI::GenericAArch64:
807 case TargetCXXABI::GenericMIPS:
808 case TargetCXXABI::GenericItanium:
809 case TargetCXXABI::WebAssembly:
810 case TargetCXXABI::XL:
811 return CreateItaniumCXXABI(*this);
812 case TargetCXXABI::Microsoft:
813 return CreateMicrosoftCXXABI(*this);
814 }
815 llvm_unreachable("Invalid CXXABI type!");
816}
817
819 if (!InterpContext) {
820 InterpContext.reset(new interp::Context(*this));
821 }
822 return *InterpContext;
823}
824
826 if (!ParentMapCtx)
827 ParentMapCtx.reset(new ParentMapContext(*this));
828 return *ParentMapCtx;
829}
830
832 const LangOptions &LangOpts) {
833 switch (LangOpts.getAddressSpaceMapMangling()) {
835 return TI.useAddressSpaceMapMangling();
837 return true;
839 return false;
840 }
841 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
842}
843
845 IdentifierTable &idents, SelectorTable &sels,
847 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
848 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
849 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
850 DependentSizedMatrixTypes(this_()),
851 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
852 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
853 DependentPackIndexingTypes(this_()), TemplateSpecializationTypes(this_()),
854 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
855 DeducedTemplates(this_()), ArrayParameterTypes(this_()),
856 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
857 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
858 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
859 LangOpts.XRayNeverInstrumentFiles,
860 LangOpts.XRayAttrListFiles, SM)),
861 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
862 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
863 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
864 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
865 CompCategories(this_()), LastSDM(nullptr, 0) {
867}
868
870 // Release the DenseMaps associated with DeclContext objects.
871 // FIXME: Is this the ideal solution?
872 ReleaseDeclContextMaps();
873
874 // Call all of the deallocation functions on all of their targets.
875 for (auto &Pair : Deallocations)
876 (Pair.first)(Pair.second);
877 Deallocations.clear();
878
879 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
880 // because they can contain DenseMaps.
881 for (llvm::DenseMap<const ObjCInterfaceDecl *,
883 I = ObjCLayouts.begin(),
884 E = ObjCLayouts.end();
885 I != E;)
886 // Increment in loop to prevent using deallocated memory.
887 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
888 R->Destroy(*this);
889 ObjCLayouts.clear();
890
891 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
892 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
893 // Increment in loop to prevent using deallocated memory.
894 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
895 R->Destroy(*this);
896 }
897 ASTRecordLayouts.clear();
898
899 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
900 AEnd = DeclAttrs.end();
901 A != AEnd; ++A)
902 A->second->~AttrVec();
903 DeclAttrs.clear();
904
905 for (const auto &Value : ModuleInitializers)
906 Value.second->~PerModuleInitializers();
907 ModuleInitializers.clear();
908
909 XRayFilter.reset();
910 NoSanitizeL.reset();
911}
912
914
915void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
916 TraversalScope = TopLevelDecls;
918}
919
920void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
921 Deallocations.push_back({Callback, Data});
922}
923
924void
928
930 llvm::errs() << "\n*** AST Context Stats:\n";
931 llvm::errs() << " " << Types.size() << " types total.\n";
932
933 unsigned counts[] = {
934#define TYPE(Name, Parent) 0,
935#define ABSTRACT_TYPE(Name, Parent)
936#include "clang/AST/TypeNodes.inc"
937 0 // Extra
938 };
939
940 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
941 Type *T = Types[i];
942 counts[(unsigned)T->getTypeClass()]++;
943 }
944
945 unsigned Idx = 0;
946 unsigned TotalBytes = 0;
947#define TYPE(Name, Parent) \
948 if (counts[Idx]) \
949 llvm::errs() << " " << counts[Idx] << " " << #Name \
950 << " types, " << sizeof(Name##Type) << " each " \
951 << "(" << counts[Idx] * sizeof(Name##Type) \
952 << " bytes)\n"; \
953 TotalBytes += counts[Idx] * sizeof(Name##Type); \
954 ++Idx;
955#define ABSTRACT_TYPE(Name, Parent)
956#include "clang/AST/TypeNodes.inc"
957
958 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
959
960 // Implicit special member functions.
961 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
963 << " implicit default constructors created\n";
964 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
966 << " implicit copy constructors created\n";
968 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
970 << " implicit move constructors created\n";
971 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
973 << " implicit copy assignment operators created\n";
975 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
977 << " implicit move assignment operators created\n";
978 llvm::errs() << NumImplicitDestructorsDeclared << "/"
980 << " implicit destructors created\n";
981
982 if (ExternalSource) {
983 llvm::errs() << "\n";
984 ExternalSource->PrintStats();
985 }
986
987 BumpAlloc.PrintStats();
988}
989
991 bool NotifyListeners) {
992 if (NotifyListeners)
993 if (auto *Listener = getASTMutationListener();
995 Listener->RedefinedHiddenDefinition(ND, M);
996
997 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
998}
999
1001 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
1002 if (It == MergedDefModules.end())
1003 return;
1004
1005 auto &Merged = It->second;
1006 llvm::DenseSet<Module*> Found;
1007 for (Module *&M : Merged)
1008 if (!Found.insert(M).second)
1009 M = nullptr;
1010 llvm::erase(Merged, nullptr);
1011}
1012
1015 auto MergedIt =
1016 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
1017 if (MergedIt == MergedDefModules.end())
1018 return {};
1019 return MergedIt->second;
1020}
1021
1022void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1023 if (LazyInitializers.empty())
1024 return;
1025
1026 auto *Source = Ctx.getExternalSource();
1027 assert(Source && "lazy initializers but no external source");
1028
1029 auto LazyInits = std::move(LazyInitializers);
1030 LazyInitializers.clear();
1031
1032 for (auto ID : LazyInits)
1033 Initializers.push_back(Source->GetExternalDecl(ID));
1034
1035 assert(LazyInitializers.empty() &&
1036 "GetExternalDecl for lazy module initializer added more inits");
1037}
1038
1040 // One special case: if we add a module initializer that imports another
1041 // module, and that module's only initializer is an ImportDecl, simplify.
1042 if (const auto *ID = dyn_cast<ImportDecl>(D)) {
1043 auto It = ModuleInitializers.find(ID->getImportedModule());
1044
1045 // Maybe the ImportDecl does nothing at all. (Common case.)
1046 if (It == ModuleInitializers.end())
1047 return;
1048
1049 // Maybe the ImportDecl only imports another ImportDecl.
1050 auto &Imported = *It->second;
1051 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1052 Imported.resolve(*this);
1053 auto *OnlyDecl = Imported.Initializers.front();
1054 if (isa<ImportDecl>(OnlyDecl))
1055 D = OnlyDecl;
1056 }
1057 }
1058
1059 auto *&Inits = ModuleInitializers[M];
1060 if (!Inits)
1061 Inits = new (*this) PerModuleInitializers;
1062 Inits->Initializers.push_back(D);
1063}
1064
1067 auto *&Inits = ModuleInitializers[M];
1068 if (!Inits)
1069 Inits = new (*this) PerModuleInitializers;
1070 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
1071 IDs.begin(), IDs.end());
1072}
1073
1075 auto It = ModuleInitializers.find(M);
1076 if (It == ModuleInitializers.end())
1077 return {};
1078
1079 auto *Inits = It->second;
1080 Inits->resolve(*this);
1081 return Inits->Initializers;
1082}
1083
1085 assert(M->isNamedModule());
1086 assert(!CurrentCXXNamedModule &&
1087 "We should set named module for ASTContext for only once");
1088 CurrentCXXNamedModule = M;
1089}
1090
1091bool ASTContext::isInSameModule(const Module *M1, const Module *M2) const {
1092 if (!M1 != !M2)
1093 return false;
1094
1095 /// Get the representative module for M. The representative module is the
1096 /// first module unit for a specific primary module name. So that the module
1097 /// units have the same representative module belongs to the same module.
1098 ///
1099 /// The process is helpful to reduce the expensive string operations.
1100 auto GetRepresentativeModule = [this](const Module *M) {
1101 auto Iter = SameModuleLookupSet.find(M);
1102 if (Iter != SameModuleLookupSet.end())
1103 return Iter->second;
1104
1105 const Module *RepresentativeModule =
1106 PrimaryModuleNameMap.try_emplace(M->getPrimaryModuleInterfaceName(), M)
1107 .first->second;
1108 SameModuleLookupSet[M] = RepresentativeModule;
1109 return RepresentativeModule;
1110 };
1111
1112 assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none.");
1113 return GetRepresentativeModule(M1) == GetRepresentativeModule(M2);
1114}
1115
1117 if (!ExternCContext)
1118 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
1119
1120 return ExternCContext;
1121}
1122
1133
1134#define BuiltinTemplate(BTName) \
1135 BuiltinTemplateDecl *ASTContext::get##BTName##Decl() const { \
1136 if (!Decl##BTName) \
1137 Decl##BTName = \
1138 buildBuiltinTemplateDecl(BTK##BTName, get##BTName##Name()); \
1139 return Decl##BTName; \
1140 }
1141#include "clang/Basic/BuiltinTemplates.inc"
1142
1144 RecordDecl::TagKind TK) const {
1145 SourceLocation Loc;
1146 RecordDecl *NewDecl;
1147 if (getLangOpts().CPlusPlus)
1148 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1149 Loc, &Idents.get(Name));
1150 else
1151 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1152 &Idents.get(Name));
1153 NewDecl->setImplicit();
1154 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1155 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1156 return NewDecl;
1157}
1158
1160 StringRef Name) const {
1163 const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1164 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1165 NewDecl->setImplicit();
1166 return NewDecl;
1167}
1168
1170 if (!Int128Decl)
1171 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1172 return Int128Decl;
1173}
1174
1176 if (!UInt128Decl)
1177 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1178 return UInt128Decl;
1179}
1180
1181void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1182 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1184 Types.push_back(Ty);
1185}
1186
1188 const TargetInfo *AuxTarget) {
1189 assert((!this->Target || this->Target == &Target) &&
1190 "Incorrect target reinitialization");
1191 assert(VoidTy.isNull() && "Context reinitialized?");
1192
1193 this->Target = &Target;
1194 this->AuxTarget = AuxTarget;
1195
1196 ABI.reset(createCXXABI(Target));
1197 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
1198
1199 // C99 6.2.5p19.
1200 InitBuiltinType(VoidTy, BuiltinType::Void);
1201
1202 // C99 6.2.5p2.
1203 InitBuiltinType(BoolTy, BuiltinType::Bool);
1204 // C99 6.2.5p3.
1205 if (LangOpts.CharIsSigned)
1206 InitBuiltinType(CharTy, BuiltinType::Char_S);
1207 else
1208 InitBuiltinType(CharTy, BuiltinType::Char_U);
1209 // C99 6.2.5p4.
1210 InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1211 InitBuiltinType(ShortTy, BuiltinType::Short);
1212 InitBuiltinType(IntTy, BuiltinType::Int);
1213 InitBuiltinType(LongTy, BuiltinType::Long);
1214 InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1215
1216 // C99 6.2.5p6.
1217 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1218 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1219 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1220 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1221 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1222
1223 // C99 6.2.5p10.
1224 InitBuiltinType(FloatTy, BuiltinType::Float);
1225 InitBuiltinType(DoubleTy, BuiltinType::Double);
1226 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1227
1228 // GNU extension, __float128 for IEEE quadruple precision
1229 InitBuiltinType(Float128Ty, BuiltinType::Float128);
1230
1231 // __ibm128 for IBM extended precision
1232 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128);
1233
1234 // C11 extension ISO/IEC TS 18661-3
1235 InitBuiltinType(Float16Ty, BuiltinType::Float16);
1236
1237 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1238 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1239 InitBuiltinType(AccumTy, BuiltinType::Accum);
1240 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1241 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1242 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1243 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1244 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1245 InitBuiltinType(FractTy, BuiltinType::Fract);
1246 InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1247 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1248 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1249 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1250 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1251 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1252 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1253 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1254 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1255 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1256 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1257 InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1258 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1259 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1260 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1261 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1262
1263 // GNU extension, 128-bit integers.
1264 InitBuiltinType(Int128Ty, BuiltinType::Int128);
1265 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1266
1267 // C++ 3.9.1p5
1268 if (TargetInfo::isTypeSigned(Target.getWCharType()))
1269 InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1270 else // -fshort-wchar makes wchar_t be unsigned.
1271 InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1272 if (LangOpts.CPlusPlus && LangOpts.WChar)
1274 else {
1275 // C99 (or C++ using -fno-wchar).
1276 WideCharTy = getFromTargetType(Target.getWCharType());
1277 }
1278
1279 WIntTy = getFromTargetType(Target.getWIntType());
1280
1281 // C++20 (proposed)
1282 InitBuiltinType(Char8Ty, BuiltinType::Char8);
1283
1284 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1285 InitBuiltinType(Char16Ty, BuiltinType::Char16);
1286 else // C99
1287 Char16Ty = getFromTargetType(Target.getChar16Type());
1288
1289 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1290 InitBuiltinType(Char32Ty, BuiltinType::Char32);
1291 else // C99
1292 Char32Ty = getFromTargetType(Target.getChar32Type());
1293
1294 // Placeholder type for type-dependent expressions whose type is
1295 // completely unknown. No code should ever check a type against
1296 // DependentTy and users should never see it; however, it is here to
1297 // help diagnose failures to properly check for type-dependent
1298 // expressions.
1299 InitBuiltinType(DependentTy, BuiltinType::Dependent);
1300
1301 // Placeholder type for functions.
1302 InitBuiltinType(OverloadTy, BuiltinType::Overload);
1303
1304 // Placeholder type for bound members.
1305 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1306
1307 // Placeholder type for unresolved templates.
1308 InitBuiltinType(UnresolvedTemplateTy, BuiltinType::UnresolvedTemplate);
1309
1310 // Placeholder type for pseudo-objects.
1311 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1312
1313 // "any" type; useful for debugger-like clients.
1314 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1315
1316 // Placeholder type for unbridged ARC casts.
1317 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1318
1319 // Placeholder type for builtin functions.
1320 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1321
1322 // Placeholder type for OMP array sections.
1323 if (LangOpts.OpenMP) {
1324 InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection);
1325 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
1326 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
1327 }
1328 // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode,
1329 // don't bother, as we're just using the same type as OMP.
1330 if (LangOpts.OpenACC && !LangOpts.OpenMP) {
1331 InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection);
1332 }
1333 if (LangOpts.MatrixTypes)
1334 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
1335
1336 // Builtin types for 'id', 'Class', and 'SEL'.
1337 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1338 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1339 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1340
1341 if (LangOpts.OpenCL) {
1342#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1343 InitBuiltinType(SingletonId, BuiltinType::Id);
1344#include "clang/Basic/OpenCLImageTypes.def"
1345
1346 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1347 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1348 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1349 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1350 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1351
1352#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1353 InitBuiltinType(Id##Ty, BuiltinType::Id);
1354#include "clang/Basic/OpenCLExtensionTypes.def"
1355 }
1356
1357 if (LangOpts.HLSL) {
1358#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
1359 InitBuiltinType(SingletonId, BuiltinType::Id);
1360#include "clang/Basic/HLSLIntangibleTypes.def"
1361 }
1362
1363 if (Target.hasAArch64ACLETypes() ||
1364 (AuxTarget && AuxTarget->hasAArch64ACLETypes())) {
1365#define SVE_TYPE(Name, Id, SingletonId) \
1366 InitBuiltinType(SingletonId, BuiltinType::Id);
1367#include "clang/Basic/AArch64ACLETypes.def"
1368 }
1369
1370 if (Target.getTriple().isPPC64()) {
1371#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1372 InitBuiltinType(Id##Ty, BuiltinType::Id);
1373#include "clang/Basic/PPCTypes.def"
1374#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1375 InitBuiltinType(Id##Ty, BuiltinType::Id);
1376#include "clang/Basic/PPCTypes.def"
1377 }
1378
1379 if (Target.hasRISCVVTypes()) {
1380#define RVV_TYPE(Name, Id, SingletonId) \
1381 InitBuiltinType(SingletonId, BuiltinType::Id);
1382#include "clang/Basic/RISCVVTypes.def"
1383 }
1384
1385 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) {
1386#define WASM_TYPE(Name, Id, SingletonId) \
1387 InitBuiltinType(SingletonId, BuiltinType::Id);
1388#include "clang/Basic/WebAssemblyReferenceTypes.def"
1389 }
1390
1391 if (Target.getTriple().isAMDGPU() ||
1392 (AuxTarget && AuxTarget->getTriple().isAMDGPU())) {
1393#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \
1394 InitBuiltinType(SingletonId, BuiltinType::Id);
1395#include "clang/Basic/AMDGPUTypes.def"
1396 }
1397
1398 // Builtin type for __objc_yes and __objc_no
1399 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1401
1402 ObjCConstantStringType = QualType();
1403
1404 ObjCSuperType = QualType();
1405
1406 // void * type
1407 if (LangOpts.OpenCLGenericAddressSpace) {
1408 auto Q = VoidTy.getQualifiers();
1409 Q.setAddressSpace(LangAS::opencl_generic);
1411 getQualifiedType(VoidTy.getUnqualifiedType(), Q)));
1412 } else {
1414 }
1415
1416 // nullptr type (C++0x 2.14.7)
1417 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1418
1419 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1420 InitBuiltinType(HalfTy, BuiltinType::Half);
1421
1422 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
1423
1424 // Builtin type used to help define __builtin_va_list.
1425 VaListTagDecl = nullptr;
1426
1427 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1428 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1431 }
1432}
1433
1435 return SourceMgr.getDiagnostics();
1436}
1437
1439 AttrVec *&Result = DeclAttrs[D];
1440 if (!Result) {
1441 void *Mem = Allocate(sizeof(AttrVec));
1442 Result = new (Mem) AttrVec;
1443 }
1444
1445 return *Result;
1446}
1447
1448/// Erase the attributes corresponding to the given declaration.
1450 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
1451 if (Pos != DeclAttrs.end()) {
1452 Pos->second->~AttrVec();
1453 DeclAttrs.erase(Pos);
1454 }
1455}
1456
1457// FIXME: Remove ?
1460 assert(Var->isStaticDataMember() && "Not a static data member");
1462 .dyn_cast<MemberSpecializationInfo *>();
1463}
1464
1467 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1468 TemplateOrInstantiation.find(Var);
1469 if (Pos == TemplateOrInstantiation.end())
1470 return {};
1471
1472 return Pos->second;
1473}
1474
1475void
1478 SourceLocation PointOfInstantiation) {
1479 assert(Inst->isStaticDataMember() && "Not a static data member");
1480 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1482 Tmpl, TSK, PointOfInstantiation));
1483}
1484
1485void
1488 assert(!TemplateOrInstantiation[Inst] &&
1489 "Already noted what the variable was instantiated from");
1490 TemplateOrInstantiation[Inst] = TSI;
1491}
1492
1493NamedDecl *
1495 return InstantiatedFromUsingDecl.lookup(UUD);
1496}
1497
1498void
1500 assert((isa<UsingDecl>(Pattern) ||
1503 "pattern decl is not a using decl");
1504 assert((isa<UsingDecl>(Inst) ||
1507 "instantiation did not produce a using decl");
1508 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1509 InstantiatedFromUsingDecl[Inst] = Pattern;
1510}
1511
1514 return InstantiatedFromUsingEnumDecl.lookup(UUD);
1515}
1516
1518 UsingEnumDecl *Pattern) {
1519 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1520 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1521}
1522
1525 return InstantiatedFromUsingShadowDecl.lookup(Inst);
1526}
1527
1528void
1530 UsingShadowDecl *Pattern) {
1531 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1532 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1533}
1534
1535FieldDecl *
1537 return InstantiatedFromUnnamedFieldDecl.lookup(Field);
1538}
1539
1541 FieldDecl *Tmpl) {
1542 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1543 "Instantiated field decl is not unnamed");
1544 assert((!Inst->getDeclName() || Inst->isPlaceholderVar(getLangOpts())) &&
1545 "Template field decl is not unnamed");
1546 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1547 "Already noted what unnamed field was instantiated from");
1548
1549 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1550}
1551
1556
1561
1562unsigned
1564 auto Range = overridden_methods(Method);
1565 return Range.end() - Range.begin();
1566}
1567
1570 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1571 OverriddenMethods.find(Method->getCanonicalDecl());
1572 if (Pos == OverriddenMethods.end())
1573 return overridden_method_range(nullptr, nullptr);
1574 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1575}
1576
1578 const CXXMethodDecl *Overridden) {
1579 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1580 OverriddenMethods[Method].push_back(Overridden);
1581}
1582
1584 const NamedDecl *D,
1585 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1586 assert(D);
1587
1588 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
1589 Overridden.append(overridden_methods_begin(CXXMethod),
1590 overridden_methods_end(CXXMethod));
1591 return;
1592 }
1593
1594 const auto *Method = dyn_cast<ObjCMethodDecl>(D);
1595 if (!Method)
1596 return;
1597
1599 Method->getOverriddenMethods(OverDecls);
1600 Overridden.append(OverDecls.begin(), OverDecls.end());
1601}
1602
1603std::optional<ASTContext::CXXRecordDeclRelocationInfo>
1605 assert(RD);
1606 CXXRecordDecl *D = RD->getDefinition();
1607 auto it = RelocatableClasses.find(D);
1608 if (it != RelocatableClasses.end())
1609 return it->getSecond();
1610 return std::nullopt;
1611}
1612
1615 assert(RD);
1616 CXXRecordDecl *D = RD->getDefinition();
1617 assert(RelocatableClasses.find(D) == RelocatableClasses.end());
1618 RelocatableClasses.insert({D, Info});
1619}
1620
1622 const ASTContext &Context, const CXXRecordDecl *Class) {
1623 if (!Class->isPolymorphic())
1624 return false;
1625 const CXXRecordDecl *BaseType = Context.baseForVTableAuthentication(Class);
1626 using AuthAttr = VTablePointerAuthenticationAttr;
1627 const AuthAttr *ExplicitAuth = BaseType->getAttr<AuthAttr>();
1628 if (!ExplicitAuth)
1629 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1630 AuthAttr::AddressDiscriminationMode AddressDiscrimination =
1631 ExplicitAuth->getAddressDiscrimination();
1632 if (AddressDiscrimination == AuthAttr::DefaultAddressDiscrimination)
1633 return Context.getLangOpts().PointerAuthVTPtrAddressDiscrimination;
1634 return AddressDiscrimination == AuthAttr::AddressDiscrimination;
1635}
1636
1637ASTContext::PointerAuthContent
1638ASTContext::findPointerAuthContent(QualType T) const {
1639 assert(isPointerAuthenticationAvailable());
1640
1641 T = T.getCanonicalType();
1642 if (T->isDependentType())
1643 return PointerAuthContent::None;
1644
1645 if (T.hasAddressDiscriminatedPointerAuth())
1646 return PointerAuthContent::AddressDiscriminatedData;
1647 const RecordDecl *RD = T->getAsRecordDecl();
1648 if (!RD)
1649 return PointerAuthContent::None;
1650
1651 if (RD->isInvalidDecl())
1652 return PointerAuthContent::None;
1653
1654 if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(RD);
1655 Existing != RecordContainsAddressDiscriminatedPointerAuth.end())
1656 return Existing->second;
1657
1658 PointerAuthContent Result = PointerAuthContent::None;
1659
1660 auto SaveResultAndReturn = [&]() -> PointerAuthContent {
1661 auto [ResultIter, DidAdd] =
1662 RecordContainsAddressDiscriminatedPointerAuth.try_emplace(RD, Result);
1663 (void)ResultIter;
1664 (void)DidAdd;
1665 assert(DidAdd);
1666 return Result;
1667 };
1668 auto ShouldContinueAfterUpdate = [&](PointerAuthContent NewResult) {
1669 static_assert(PointerAuthContent::None <
1670 PointerAuthContent::AddressDiscriminatedVTable);
1671 static_assert(PointerAuthContent::AddressDiscriminatedVTable <
1672 PointerAuthContent::AddressDiscriminatedData);
1673 if (NewResult > Result)
1674 Result = NewResult;
1675 return Result != PointerAuthContent::AddressDiscriminatedData;
1676 };
1677 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1679 !ShouldContinueAfterUpdate(
1680 PointerAuthContent::AddressDiscriminatedVTable))
1681 return SaveResultAndReturn();
1682 for (auto Base : CXXRD->bases()) {
1683 if (!ShouldContinueAfterUpdate(findPointerAuthContent(Base.getType())))
1684 return SaveResultAndReturn();
1685 }
1686 }
1687 for (auto *FieldDecl : RD->fields()) {
1688 if (!ShouldContinueAfterUpdate(
1689 findPointerAuthContent(FieldDecl->getType())))
1690 return SaveResultAndReturn();
1691 }
1692 return SaveResultAndReturn();
1693}
1694
1696 assert(!Import->getNextLocalImport() &&
1697 "Import declaration already in the chain");
1698 assert(!Import->isFromASTFile() && "Non-local import declaration");
1699 if (!FirstLocalImport) {
1700 FirstLocalImport = Import;
1701 LastLocalImport = Import;
1702 return;
1703 }
1704
1705 LastLocalImport->setNextLocalImport(Import);
1706 LastLocalImport = Import;
1707}
1708
1709//===----------------------------------------------------------------------===//
1710// Type Sizing and Analysis
1711//===----------------------------------------------------------------------===//
1712
1713/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1714/// scalar floating point type.
1715const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1716 switch (T->castAs<BuiltinType>()->getKind()) {
1717 default:
1718 llvm_unreachable("Not a floating point type!");
1719 case BuiltinType::BFloat16:
1720 return Target->getBFloat16Format();
1721 case BuiltinType::Float16:
1722 return Target->getHalfFormat();
1723 case BuiltinType::Half:
1724 return Target->getHalfFormat();
1725 case BuiltinType::Float: return Target->getFloatFormat();
1726 case BuiltinType::Double: return Target->getDoubleFormat();
1727 case BuiltinType::Ibm128:
1728 return Target->getIbm128Format();
1729 case BuiltinType::LongDouble:
1730 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1731 return AuxTarget->getLongDoubleFormat();
1732 return Target->getLongDoubleFormat();
1733 case BuiltinType::Float128:
1734 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1735 return AuxTarget->getFloat128Format();
1736 return Target->getFloat128Format();
1737 }
1738}
1739
1740CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1741 unsigned Align = Target->getCharWidth();
1742
1743 const unsigned AlignFromAttr = D->getMaxAlignment();
1744 if (AlignFromAttr)
1745 Align = AlignFromAttr;
1746
1747 // __attribute__((aligned)) can increase or decrease alignment
1748 // *except* on a struct or struct member, where it only increases
1749 // alignment unless 'packed' is also specified.
1750 //
1751 // It is an error for alignas to decrease alignment, so we can
1752 // ignore that possibility; Sema should diagnose it.
1753 bool UseAlignAttrOnly;
1754 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D))
1755 UseAlignAttrOnly =
1756 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1757 else
1758 UseAlignAttrOnly = AlignFromAttr != 0;
1759 // If we're using the align attribute only, just ignore everything
1760 // else about the declaration and its type.
1761 if (UseAlignAttrOnly) {
1762 // do nothing
1763 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) {
1764 QualType T = VD->getType();
1765 if (const auto *RT = T->getAs<ReferenceType>()) {
1766 if (ForAlignof)
1767 T = RT->getPointeeType();
1768 else
1769 T = getPointerType(RT->getPointeeType());
1770 }
1771 QualType BaseT = getBaseElementType(T);
1772 if (T->isFunctionType())
1773 Align = getTypeInfoImpl(T.getTypePtr()).Align;
1774 else if (!BaseT->isIncompleteType()) {
1775 // Adjust alignments of declarations with array type by the
1776 // large-array alignment on the target.
1777 if (const ArrayType *arrayType = getAsArrayType(T)) {
1778 unsigned MinWidth = Target->getLargeArrayMinWidth();
1779 if (!ForAlignof && MinWidth) {
1781 Align = std::max(Align, Target->getLargeArrayAlign());
1784 Align = std::max(Align, Target->getLargeArrayAlign());
1785 }
1786 }
1787 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
1788 if (BaseT.getQualifiers().hasUnaligned())
1789 Align = Target->getCharWidth();
1790 }
1791
1792 // Ensure minimum alignment for global variables.
1793 if (const auto *VD = dyn_cast<VarDecl>(D))
1794 if (VD->hasGlobalStorage() && !ForAlignof) {
1795 uint64_t TypeSize =
1796 !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0;
1797 Align = std::max(Align, getMinGlobalAlignOfVar(TypeSize, VD));
1798 }
1799
1800 // Fields can be subject to extra alignment constraints, like if
1801 // the field is packed, the struct is packed, or the struct has a
1802 // a max-field-alignment constraint (#pragma pack). So calculate
1803 // the actual alignment of the field within the struct, and then
1804 // (as we're expected to) constrain that by the alignment of the type.
1805 if (const auto *Field = dyn_cast<FieldDecl>(VD)) {
1806 const RecordDecl *Parent = Field->getParent();
1807 // We can only produce a sensible answer if the record is valid.
1808 if (!Parent->isInvalidDecl()) {
1809 const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
1810
1811 // Start with the record's overall alignment.
1812 unsigned FieldAlign = toBits(Layout.getAlignment());
1813
1814 // Use the GCD of that and the offset within the record.
1815 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
1816 if (Offset > 0) {
1817 // Alignment is always a power of 2, so the GCD will be a power of 2,
1818 // which means we get to do this crazy thing instead of Euclid's.
1819 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1820 if (LowBitOfOffset < FieldAlign)
1821 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1822 }
1823
1824 Align = std::min(Align, FieldAlign);
1825 }
1826 }
1827 }
1828
1829 // Some targets have hard limitation on the maximum requestable alignment in
1830 // aligned attribute for static variables.
1831 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1832 const auto *VD = dyn_cast<VarDecl>(D);
1833 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1834 Align = std::min(Align, MaxAlignedAttr);
1835
1836 return toCharUnitsFromBits(Align);
1837}
1838
1840 return toCharUnitsFromBits(Target->getExnObjectAlignment());
1841}
1842
1843// getTypeInfoDataSizeInChars - Return the size of a type, in
1844// chars. If the type is a record, its data size is returned. This is
1845// the size of the memcpy that's performed when assigning this type
1846// using a trivial copy/move assignment operator.
1849
1850 // In C++, objects can sometimes be allocated into the tail padding
1851 // of a base-class subobject. We decide whether that's possible
1852 // during class layout, so here we can just trust the layout results.
1853 if (getLangOpts().CPlusPlus) {
1854 if (const auto *RD = T->getAsCXXRecordDecl(); RD && !RD->isInvalidDecl()) {
1855 const ASTRecordLayout &layout = getASTRecordLayout(RD);
1856 Info.Width = layout.getDataSize();
1857 }
1858 }
1859
1860 return Info;
1861}
1862
1863/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1864/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1867 const ConstantArrayType *CAT) {
1868 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
1869 uint64_t Size = CAT->getZExtSize();
1870 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1871 (uint64_t)(-1)/Size) &&
1872 "Overflow in array type char size evaluation");
1873 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1874 unsigned Align = EltInfo.Align.getQuantity();
1875 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1876 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64)
1877 Width = llvm::alignTo(Width, Align);
1880 EltInfo.AlignRequirement);
1881}
1882
1884 if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1885 return getConstantArrayInfoInChars(*this, CAT);
1886 TypeInfo Info = getTypeInfo(T);
1889}
1890
1894
1896 // HLSL doesn't promote all small integer types to int, it
1897 // just uses the rank-based promotion rules for all types.
1898 if (getLangOpts().HLSL)
1899 return false;
1900
1901 if (const auto *BT = T->getAs<BuiltinType>())
1902 switch (BT->getKind()) {
1903 case BuiltinType::Bool:
1904 case BuiltinType::Char_S:
1905 case BuiltinType::Char_U:
1906 case BuiltinType::SChar:
1907 case BuiltinType::UChar:
1908 case BuiltinType::Short:
1909 case BuiltinType::UShort:
1910 case BuiltinType::WChar_S:
1911 case BuiltinType::WChar_U:
1912 case BuiltinType::Char8:
1913 case BuiltinType::Char16:
1914 case BuiltinType::Char32:
1915 return true;
1916 default:
1917 return false;
1918 }
1919
1920 // Enumerated types are promotable to their compatible integer types
1921 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1922 if (const auto *ED = T->getAsEnumDecl()) {
1923 if (T->isDependentType() || ED->getPromotionType().isNull() ||
1924 ED->isScoped())
1925 return false;
1926
1927 return true;
1928 }
1929
1930 return false;
1931}
1932
1936
1938 return isAlignmentRequired(T.getTypePtr());
1939}
1940
1942 bool NeedsPreferredAlignment) const {
1943 // An alignment on a typedef overrides anything else.
1944 if (const auto *TT = T->getAs<TypedefType>())
1945 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1946 return Align;
1947
1948 // If we have an (array of) complete type, we're done.
1950 if (!T->isIncompleteType())
1951 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1952
1953 // If we had an array type, its element type might be a typedef
1954 // type with an alignment attribute.
1955 if (const auto *TT = T->getAs<TypedefType>())
1956 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1957 return Align;
1958
1959 // Otherwise, see if the declaration of the type had an attribute.
1960 if (const auto *TD = T->getAsTagDecl())
1961 return TD->getMaxAlignment();
1962
1963 return 0;
1964}
1965
1967 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
1968 if (I != MemoizedTypeInfo.end())
1969 return I->second;
1970
1971 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1972 TypeInfo TI = getTypeInfoImpl(T);
1973 MemoizedTypeInfo[T] = TI;
1974 return TI;
1975}
1976
1977/// getTypeInfoImpl - Return the size of the specified type, in bits. This
1978/// method does not work on incomplete types.
1979///
1980/// FIXME: Pointers into different addr spaces could have different sizes and
1981/// alignment requirements: getPointerInfo should take an AddrSpace, this
1982/// should take a QualType, &c.
1983TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1984 uint64_t Width = 0;
1985 unsigned Align = 8;
1988 switch (T->getTypeClass()) {
1989#define TYPE(Class, Base)
1990#define ABSTRACT_TYPE(Class, Base)
1991#define NON_CANONICAL_TYPE(Class, Base)
1992#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1993#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1994 case Type::Class: \
1995 assert(!T->isDependentType() && "should not see dependent types here"); \
1996 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1997#include "clang/AST/TypeNodes.inc"
1998 llvm_unreachable("Should not see dependent types");
1999
2000 case Type::FunctionNoProto:
2001 case Type::FunctionProto:
2002 // GCC extension: alignof(function) = 32 bits
2003 Width = 0;
2004 Align = 32;
2005 break;
2006
2007 case Type::IncompleteArray:
2008 case Type::VariableArray:
2009 case Type::ConstantArray:
2010 case Type::ArrayParameter: {
2011 // Model non-constant sized arrays as size zero, but track the alignment.
2012 uint64_t Size = 0;
2013 if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
2014 Size = CAT->getZExtSize();
2015
2016 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
2017 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
2018 "Overflow in array type bit size evaluation");
2019 Width = EltInfo.Width * Size;
2020 Align = EltInfo.Align;
2021 AlignRequirement = EltInfo.AlignRequirement;
2022 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
2023 getTargetInfo().getPointerWidth(LangAS::Default) == 64)
2024 Width = llvm::alignTo(Width, Align);
2025 break;
2026 }
2027
2028 case Type::ExtVector:
2029 case Type::Vector: {
2030 const auto *VT = cast<VectorType>(T);
2031 TypeInfo EltInfo = getTypeInfo(VT->getElementType());
2032 Width = VT->isPackedVectorBoolType(*this)
2033 ? VT->getNumElements()
2034 : EltInfo.Width * VT->getNumElements();
2035 // Enforce at least byte size and alignment.
2036 Width = std::max<unsigned>(8, Width);
2037 Align = std::max<unsigned>(8, Width);
2038
2039 // If the alignment is not a power of 2, round up to the next power of 2.
2040 // This happens for non-power-of-2 length vectors.
2041 if (Align & (Align-1)) {
2042 Align = llvm::bit_ceil(Align);
2043 Width = llvm::alignTo(Width, Align);
2044 }
2045 // Adjust the alignment based on the target max.
2046 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
2047 if (TargetVectorAlign && TargetVectorAlign < Align)
2048 Align = TargetVectorAlign;
2049 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
2050 // Adjust the alignment for fixed-length SVE vectors. This is important
2051 // for non-power-of-2 vector lengths.
2052 Align = 128;
2053 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
2054 // Adjust the alignment for fixed-length SVE predicates.
2055 Align = 16;
2056 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
2057 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
2058 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
2059 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
2060 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
2061 // Adjust the alignment for fixed-length RVV vectors.
2062 Align = std::min<unsigned>(64, Width);
2063 break;
2064 }
2065
2066 case Type::ConstantMatrix: {
2067 const auto *MT = cast<ConstantMatrixType>(T);
2068 TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
2069 // The internal layout of a matrix value is implementation defined.
2070 // Initially be ABI compatible with arrays with respect to alignment and
2071 // size.
2072 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
2073 Align = ElementInfo.Align;
2074 break;
2075 }
2076
2077 case Type::Builtin:
2078 switch (cast<BuiltinType>(T)->getKind()) {
2079 default: llvm_unreachable("Unknown builtin type!");
2080 case BuiltinType::Void:
2081 // GCC extension: alignof(void) = 8 bits.
2082 Width = 0;
2083 Align = 8;
2084 break;
2085 case BuiltinType::Bool:
2086 Width = Target->getBoolWidth();
2087 Align = Target->getBoolAlign();
2088 break;
2089 case BuiltinType::Char_S:
2090 case BuiltinType::Char_U:
2091 case BuiltinType::UChar:
2092 case BuiltinType::SChar:
2093 case BuiltinType::Char8:
2094 Width = Target->getCharWidth();
2095 Align = Target->getCharAlign();
2096 break;
2097 case BuiltinType::WChar_S:
2098 case BuiltinType::WChar_U:
2099 Width = Target->getWCharWidth();
2100 Align = Target->getWCharAlign();
2101 break;
2102 case BuiltinType::Char16:
2103 Width = Target->getChar16Width();
2104 Align = Target->getChar16Align();
2105 break;
2106 case BuiltinType::Char32:
2107 Width = Target->getChar32Width();
2108 Align = Target->getChar32Align();
2109 break;
2110 case BuiltinType::UShort:
2111 case BuiltinType::Short:
2112 Width = Target->getShortWidth();
2113 Align = Target->getShortAlign();
2114 break;
2115 case BuiltinType::UInt:
2116 case BuiltinType::Int:
2117 Width = Target->getIntWidth();
2118 Align = Target->getIntAlign();
2119 break;
2120 case BuiltinType::ULong:
2121 case BuiltinType::Long:
2122 Width = Target->getLongWidth();
2123 Align = Target->getLongAlign();
2124 break;
2125 case BuiltinType::ULongLong:
2126 case BuiltinType::LongLong:
2127 Width = Target->getLongLongWidth();
2128 Align = Target->getLongLongAlign();
2129 break;
2130 case BuiltinType::Int128:
2131 case BuiltinType::UInt128:
2132 Width = 128;
2133 Align = Target->getInt128Align();
2134 break;
2135 case BuiltinType::ShortAccum:
2136 case BuiltinType::UShortAccum:
2137 case BuiltinType::SatShortAccum:
2138 case BuiltinType::SatUShortAccum:
2139 Width = Target->getShortAccumWidth();
2140 Align = Target->getShortAccumAlign();
2141 break;
2142 case BuiltinType::Accum:
2143 case BuiltinType::UAccum:
2144 case BuiltinType::SatAccum:
2145 case BuiltinType::SatUAccum:
2146 Width = Target->getAccumWidth();
2147 Align = Target->getAccumAlign();
2148 break;
2149 case BuiltinType::LongAccum:
2150 case BuiltinType::ULongAccum:
2151 case BuiltinType::SatLongAccum:
2152 case BuiltinType::SatULongAccum:
2153 Width = Target->getLongAccumWidth();
2154 Align = Target->getLongAccumAlign();
2155 break;
2156 case BuiltinType::ShortFract:
2157 case BuiltinType::UShortFract:
2158 case BuiltinType::SatShortFract:
2159 case BuiltinType::SatUShortFract:
2160 Width = Target->getShortFractWidth();
2161 Align = Target->getShortFractAlign();
2162 break;
2163 case BuiltinType::Fract:
2164 case BuiltinType::UFract:
2165 case BuiltinType::SatFract:
2166 case BuiltinType::SatUFract:
2167 Width = Target->getFractWidth();
2168 Align = Target->getFractAlign();
2169 break;
2170 case BuiltinType::LongFract:
2171 case BuiltinType::ULongFract:
2172 case BuiltinType::SatLongFract:
2173 case BuiltinType::SatULongFract:
2174 Width = Target->getLongFractWidth();
2175 Align = Target->getLongFractAlign();
2176 break;
2177 case BuiltinType::BFloat16:
2178 if (Target->hasBFloat16Type()) {
2179 Width = Target->getBFloat16Width();
2180 Align = Target->getBFloat16Align();
2181 } else if ((getLangOpts().SYCLIsDevice ||
2182 (getLangOpts().OpenMP &&
2183 getLangOpts().OpenMPIsTargetDevice)) &&
2184 AuxTarget->hasBFloat16Type()) {
2185 Width = AuxTarget->getBFloat16Width();
2186 Align = AuxTarget->getBFloat16Align();
2187 }
2188 break;
2189 case BuiltinType::Float16:
2190 case BuiltinType::Half:
2191 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2192 !getLangOpts().OpenMPIsTargetDevice) {
2193 Width = Target->getHalfWidth();
2194 Align = Target->getHalfAlign();
2195 } else {
2196 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2197 "Expected OpenMP device compilation.");
2198 Width = AuxTarget->getHalfWidth();
2199 Align = AuxTarget->getHalfAlign();
2200 }
2201 break;
2202 case BuiltinType::Float:
2203 Width = Target->getFloatWidth();
2204 Align = Target->getFloatAlign();
2205 break;
2206 case BuiltinType::Double:
2207 Width = Target->getDoubleWidth();
2208 Align = Target->getDoubleAlign();
2209 break;
2210 case BuiltinType::Ibm128:
2211 Width = Target->getIbm128Width();
2212 Align = Target->getIbm128Align();
2213 break;
2214 case BuiltinType::LongDouble:
2215 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2216 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2217 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2218 Width = AuxTarget->getLongDoubleWidth();
2219 Align = AuxTarget->getLongDoubleAlign();
2220 } else {
2221 Width = Target->getLongDoubleWidth();
2222 Align = Target->getLongDoubleAlign();
2223 }
2224 break;
2225 case BuiltinType::Float128:
2226 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2227 !getLangOpts().OpenMPIsTargetDevice) {
2228 Width = Target->getFloat128Width();
2229 Align = Target->getFloat128Align();
2230 } else {
2231 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2232 "Expected OpenMP device compilation.");
2233 Width = AuxTarget->getFloat128Width();
2234 Align = AuxTarget->getFloat128Align();
2235 }
2236 break;
2237 case BuiltinType::NullPtr:
2238 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2239 Width = Target->getPointerWidth(LangAS::Default);
2240 Align = Target->getPointerAlign(LangAS::Default);
2241 break;
2242 case BuiltinType::ObjCId:
2243 case BuiltinType::ObjCClass:
2244 case BuiltinType::ObjCSel:
2245 Width = Target->getPointerWidth(LangAS::Default);
2246 Align = Target->getPointerAlign(LangAS::Default);
2247 break;
2248 case BuiltinType::OCLSampler:
2249 case BuiltinType::OCLEvent:
2250 case BuiltinType::OCLClkEvent:
2251 case BuiltinType::OCLQueue:
2252 case BuiltinType::OCLReserveID:
2253#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2254 case BuiltinType::Id:
2255#include "clang/Basic/OpenCLImageTypes.def"
2256#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2257 case BuiltinType::Id:
2258#include "clang/Basic/OpenCLExtensionTypes.def"
2259 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T));
2260 Width = Target->getPointerWidth(AS);
2261 Align = Target->getPointerAlign(AS);
2262 break;
2263 // The SVE types are effectively target-specific. The length of an
2264 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2265 // of 128 bits. There is one predicate bit for each vector byte, so the
2266 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2267 //
2268 // Because the length is only known at runtime, we use a dummy value
2269 // of 0 for the static length. The alignment values are those defined
2270 // by the Procedure Call Standard for the Arm Architecture.
2271#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId) \
2272 case BuiltinType::Id: \
2273 Width = 0; \
2274 Align = 128; \
2275 break;
2276#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId) \
2277 case BuiltinType::Id: \
2278 Width = 0; \
2279 Align = 16; \
2280 break;
2281#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2282 case BuiltinType::Id: \
2283 Width = 0; \
2284 Align = 16; \
2285 break;
2286#define SVE_SCALAR_TYPE(Name, MangledName, Id, SingletonId, Bits) \
2287 case BuiltinType::Id: \
2288 Width = Bits; \
2289 Align = Bits; \
2290 break;
2291#include "clang/Basic/AArch64ACLETypes.def"
2292#define PPC_VECTOR_TYPE(Name, Id, Size) \
2293 case BuiltinType::Id: \
2294 Width = Size; \
2295 Align = Size; \
2296 break;
2297#include "clang/Basic/PPCTypes.def"
2298#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2299 IsFP, IsBF) \
2300 case BuiltinType::Id: \
2301 Width = 0; \
2302 Align = ElBits; \
2303 break;
2304#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2305 case BuiltinType::Id: \
2306 Width = 0; \
2307 Align = 8; \
2308 break;
2309#include "clang/Basic/RISCVVTypes.def"
2310#define WASM_TYPE(Name, Id, SingletonId) \
2311 case BuiltinType::Id: \
2312 Width = 0; \
2313 Align = 8; \
2314 break;
2315#include "clang/Basic/WebAssemblyReferenceTypes.def"
2316#define AMDGPU_TYPE(NAME, ID, SINGLETONID, WIDTH, ALIGN) \
2317 case BuiltinType::ID: \
2318 Width = WIDTH; \
2319 Align = ALIGN; \
2320 break;
2321#include "clang/Basic/AMDGPUTypes.def"
2322#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
2323#include "clang/Basic/HLSLIntangibleTypes.def"
2324 Width = Target->getPointerWidth(LangAS::Default);
2325 Align = Target->getPointerAlign(LangAS::Default);
2326 break;
2327 }
2328 break;
2329 case Type::ObjCObjectPointer:
2330 Width = Target->getPointerWidth(LangAS::Default);
2331 Align = Target->getPointerAlign(LangAS::Default);
2332 break;
2333 case Type::BlockPointer:
2334 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace();
2335 Width = Target->getPointerWidth(AS);
2336 Align = Target->getPointerAlign(AS);
2337 break;
2338 case Type::LValueReference:
2339 case Type::RValueReference:
2340 // alignof and sizeof should never enter this code path here, so we go
2341 // the pointer route.
2342 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace();
2343 Width = Target->getPointerWidth(AS);
2344 Align = Target->getPointerAlign(AS);
2345 break;
2346 case Type::Pointer:
2347 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace();
2348 Width = Target->getPointerWidth(AS);
2349 Align = Target->getPointerAlign(AS);
2350 break;
2351 case Type::MemberPointer: {
2352 const auto *MPT = cast<MemberPointerType>(T);
2353 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT);
2354 Width = MPI.Width;
2355 Align = MPI.Align;
2356 break;
2357 }
2358 case Type::Complex: {
2359 // Complex types have the same alignment as their elements, but twice the
2360 // size.
2361 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2362 Width = EltInfo.Width * 2;
2363 Align = EltInfo.Align;
2364 break;
2365 }
2366 case Type::ObjCObject:
2367 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2368 case Type::Adjusted:
2369 case Type::Decayed:
2370 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2371 case Type::ObjCInterface: {
2372 const auto *ObjCI = cast<ObjCInterfaceType>(T);
2373 if (ObjCI->getDecl()->isInvalidDecl()) {
2374 Width = 8;
2375 Align = 8;
2376 break;
2377 }
2378 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2379 Width = toBits(Layout.getSize());
2380 Align = toBits(Layout.getAlignment());
2381 break;
2382 }
2383 case Type::BitInt: {
2384 const auto *EIT = cast<BitIntType>(T);
2385 Align = Target->getBitIntAlign(EIT->getNumBits());
2386 Width = Target->getBitIntWidth(EIT->getNumBits());
2387 break;
2388 }
2389 case Type::Record:
2390 case Type::Enum: {
2391 const auto *TT = cast<TagType>(T);
2392 const TagDecl *TD = TT->getDecl()->getDefinitionOrSelf();
2393
2394 if (TD->isInvalidDecl()) {
2395 Width = 8;
2396 Align = 8;
2397 break;
2398 }
2399
2400 if (isa<EnumType>(TT)) {
2401 const EnumDecl *ED = cast<EnumDecl>(TD);
2402 TypeInfo Info =
2404 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2405 Info.Align = AttrAlign;
2407 }
2408 return Info;
2409 }
2410
2411 const auto *RD = cast<RecordDecl>(TD);
2412 const ASTRecordLayout &Layout = getASTRecordLayout(RD);
2413 Width = toBits(Layout.getSize());
2414 Align = toBits(Layout.getAlignment());
2415 AlignRequirement = RD->hasAttr<AlignedAttr>()
2417 : AlignRequirementKind::None;
2418 break;
2419 }
2420
2421 case Type::SubstTemplateTypeParm:
2423 getReplacementType().getTypePtr());
2424
2425 case Type::Auto:
2426 case Type::DeducedTemplateSpecialization: {
2427 const auto *A = cast<DeducedType>(T);
2428 assert(!A->getDeducedType().isNull() &&
2429 "cannot request the size of an undeduced or dependent auto type");
2430 return getTypeInfo(A->getDeducedType().getTypePtr());
2431 }
2432
2433 case Type::Paren:
2434 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2435
2436 case Type::MacroQualified:
2437 return getTypeInfo(
2439
2440 case Type::ObjCTypeParam:
2441 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2442
2443 case Type::Using:
2444 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr());
2445
2446 case Type::Typedef: {
2447 const auto *TT = cast<TypedefType>(T);
2448 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr());
2449 // If the typedef has an aligned attribute on it, it overrides any computed
2450 // alignment we have. This violates the GCC documentation (which says that
2451 // attribute(aligned) can only round up) but matches its implementation.
2452 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2453 Align = AttrAlign;
2454 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2455 } else {
2456 Align = Info.Align;
2457 AlignRequirement = Info.AlignRequirement;
2458 }
2459 Width = Info.Width;
2460 break;
2461 }
2462
2463 case Type::Attributed:
2464 return getTypeInfo(
2465 cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2466
2467 case Type::CountAttributed:
2468 return getTypeInfo(cast<CountAttributedType>(T)->desugar().getTypePtr());
2469
2470 case Type::BTFTagAttributed:
2471 return getTypeInfo(
2472 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr());
2473
2474 case Type::HLSLAttributedResource:
2475 return getTypeInfo(
2476 cast<HLSLAttributedResourceType>(T)->getWrappedType().getTypePtr());
2477
2478 case Type::HLSLInlineSpirv: {
2479 const auto *ST = cast<HLSLInlineSpirvType>(T);
2480 // Size is specified in bytes, convert to bits
2481 Width = ST->getSize() * 8;
2482 Align = ST->getAlignment();
2483 if (Width == 0 && Align == 0) {
2484 // We are defaulting to laying out opaque SPIR-V types as 32-bit ints.
2485 Width = 32;
2486 Align = 32;
2487 }
2488 break;
2489 }
2490
2491 case Type::Atomic: {
2492 // Start with the base type information.
2493 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2494 Width = Info.Width;
2495 Align = Info.Align;
2496
2497 if (!Width) {
2498 // An otherwise zero-sized type should still generate an
2499 // atomic operation.
2500 Width = Target->getCharWidth();
2501 assert(Align);
2502 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2503 // If the size of the type doesn't exceed the platform's max
2504 // atomic promotion width, make the size and alignment more
2505 // favorable to atomic operations:
2506
2507 // Round the size up to a power of 2.
2508 Width = llvm::bit_ceil(Width);
2509
2510 // Set the alignment equal to the size.
2511 Align = static_cast<unsigned>(Width);
2512 }
2513 }
2514 break;
2515
2516 case Type::PredefinedSugar:
2517 return getTypeInfo(cast<PredefinedSugarType>(T)->desugar().getTypePtr());
2518
2519 case Type::Pipe:
2520 Width = Target->getPointerWidth(LangAS::opencl_global);
2521 Align = Target->getPointerAlign(LangAS::opencl_global);
2522 break;
2523 }
2524
2525 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2526 return TypeInfo(Width, Align, AlignRequirement);
2527}
2528
2530 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T);
2531 if (I != MemoizedUnadjustedAlign.end())
2532 return I->second;
2533
2534 unsigned UnadjustedAlign;
2535 if (const auto *RT = T->getAsCanonical<RecordType>()) {
2536 const ASTRecordLayout &Layout = getASTRecordLayout(RT->getDecl());
2537 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2538 } else if (const auto *ObjCI = T->getAsCanonical<ObjCInterfaceType>()) {
2539 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
2540 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment());
2541 } else {
2542 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType());
2543 }
2544
2545 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2546 return UnadjustedAlign;
2547}
2548
2550 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2551 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap);
2552 return SimdAlign;
2553}
2554
2555/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2557 return CharUnits::fromQuantity(BitSize / getCharWidth());
2558}
2559
2560/// toBits - Convert a size in characters to a size in characters.
2561int64_t ASTContext::toBits(CharUnits CharSize) const {
2562 return CharSize.getQuantity() * getCharWidth();
2563}
2564
2565/// getTypeSizeInChars - Return the size of the specified type, in characters.
2566/// This method does not work on incomplete types.
2573
2574/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2575/// characters. This method does not work on incomplete types.
2582
2583/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2584/// type, in characters, before alignment adjustments. This method does
2585/// not work on incomplete types.
2592
2593/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2594/// type for the current target in bits. This can be different than the ABI
2595/// alignment in cases where it is beneficial for performance or backwards
2596/// compatibility preserving to overalign a data type. (Note: despite the name,
2597/// the preferred alignment is ABI-impacting, and not an optimization.)
2599 TypeInfo TI = getTypeInfo(T);
2600 unsigned ABIAlign = TI.Align;
2601
2602 T = T->getBaseElementTypeUnsafe();
2603
2604 // The preferred alignment of member pointers is that of a pointer.
2605 if (T->isMemberPointerType())
2606 return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
2607
2608 if (!Target->allowsLargerPreferedTypeAlignment())
2609 return ABIAlign;
2610
2611 if (const auto *RD = T->getAsRecordDecl()) {
2612 // When used as part of a typedef, or together with a 'packed' attribute,
2613 // the 'aligned' attribute can be used to decrease alignment. Note that the
2614 // 'packed' case is already taken into consideration when computing the
2615 // alignment, we only need to handle the typedef case here.
2617 RD->isInvalidDecl())
2618 return ABIAlign;
2619
2620 unsigned PreferredAlign = static_cast<unsigned>(
2621 toBits(getASTRecordLayout(RD).PreferredAlignment));
2622 assert(PreferredAlign >= ABIAlign &&
2623 "PreferredAlign should be at least as large as ABIAlign.");
2624 return PreferredAlign;
2625 }
2626
2627 // Double (and, for targets supporting AIX `power` alignment, long double) and
2628 // long long should be naturally aligned (despite requiring less alignment) if
2629 // possible.
2630 if (const auto *CT = T->getAs<ComplexType>())
2631 T = CT->getElementType().getTypePtr();
2632 if (const auto *ED = T->getAsEnumDecl())
2633 T = ED->getIntegerType().getTypePtr();
2634 if (T->isSpecificBuiltinType(BuiltinType::Double) ||
2635 T->isSpecificBuiltinType(BuiltinType::LongLong) ||
2636 T->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2637 (T->isSpecificBuiltinType(BuiltinType::LongDouble) &&
2638 Target->defaultsToAIXPowerAlignment()))
2639 // Don't increase the alignment if an alignment attribute was specified on a
2640 // typedef declaration.
2641 if (!TI.isAlignRequired())
2642 return std::max(ABIAlign, (unsigned)getTypeSize(T));
2643
2644 return ABIAlign;
2645}
2646
2647/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2648/// for __attribute__((aligned)) on this target, to be used if no alignment
2649/// value is specified.
2653
2654/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2655/// to a global variable of the specified type.
2657 uint64_t TypeSize = getTypeSize(T.getTypePtr());
2658 return std::max(getPreferredTypeAlign(T),
2659 getMinGlobalAlignOfVar(TypeSize, VD));
2660}
2661
2662/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2663/// should be given to a global variable of the specified type.
2668
2670 const VarDecl *VD) const {
2671 // Make the default handling as that of a non-weak definition in the
2672 // current translation unit.
2673 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
2674 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
2675}
2676
2678 CharUnits Offset = CharUnits::Zero();
2679 const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2680 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2681 Offset += Layout->getBaseClassOffset(Base);
2682 Layout = &getASTRecordLayout(Base);
2683 }
2684 return Offset;
2685}
2686
2688 const ValueDecl *MPD = MP.getMemberPointerDecl();
2691 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2693 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2694 const CXXRecordDecl *Base = RD;
2695 const CXXRecordDecl *Derived = Path[I];
2696 if (DerivedMember)
2697 std::swap(Base, Derived);
2699 RD = Path[I];
2700 }
2701 if (DerivedMember)
2703 return ThisAdjustment;
2704}
2705
2706/// DeepCollectObjCIvars -
2707/// This routine first collects all declared, but not synthesized, ivars in
2708/// super class and then collects all ivars, including those synthesized for
2709/// current class. This routine is used for implementation of current class
2710/// when all ivars, declared and synthesized are known.
2712 bool leafClass,
2714 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2715 DeepCollectObjCIvars(SuperClass, false, Ivars);
2716 if (!leafClass) {
2717 llvm::append_range(Ivars, OI->ivars());
2718 } else {
2719 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2720 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2721 Iv= Iv->getNextIvar())
2722 Ivars.push_back(Iv);
2723 }
2724}
2725
2726/// CollectInheritedProtocols - Collect all protocols in current class and
2727/// those inherited by it.
2730 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
2731 // We can use protocol_iterator here instead of
2732 // all_referenced_protocol_iterator since we are walking all categories.
2733 for (auto *Proto : OI->all_referenced_protocols()) {
2734 CollectInheritedProtocols(Proto, Protocols);
2735 }
2736
2737 // Categories of this Interface.
2738 for (const auto *Cat : OI->visible_categories())
2739 CollectInheritedProtocols(Cat, Protocols);
2740
2741 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2742 while (SD) {
2743 CollectInheritedProtocols(SD, Protocols);
2744 SD = SD->getSuperClass();
2745 }
2746 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
2747 for (auto *Proto : OC->protocols()) {
2748 CollectInheritedProtocols(Proto, Protocols);
2749 }
2750 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
2751 // Insert the protocol.
2752 if (!Protocols.insert(
2753 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2754 return;
2755
2756 for (auto *Proto : OP->protocols())
2757 CollectInheritedProtocols(Proto, Protocols);
2758 }
2759}
2760
2762 const RecordDecl *RD,
2763 bool CheckIfTriviallyCopyable) {
2764 assert(RD->isUnion() && "Must be union type");
2765 CharUnits UnionSize =
2766 Context.getTypeSizeInChars(Context.getCanonicalTagType(RD));
2767
2768 for (const auto *Field : RD->fields()) {
2769 if (!Context.hasUniqueObjectRepresentations(Field->getType(),
2770 CheckIfTriviallyCopyable))
2771 return false;
2772 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2773 if (FieldSize != UnionSize)
2774 return false;
2775 }
2776 return !RD->field_empty();
2777}
2778
2779static int64_t getSubobjectOffset(const FieldDecl *Field,
2780 const ASTContext &Context,
2781 const clang::ASTRecordLayout & /*Layout*/) {
2782 return Context.getFieldOffset(Field);
2783}
2784
2785static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2786 const ASTContext &Context,
2787 const clang::ASTRecordLayout &Layout) {
2788 return Context.toBits(Layout.getBaseClassOffset(RD));
2789}
2790
2791static std::optional<int64_t>
2793 const RecordDecl *RD,
2794 bool CheckIfTriviallyCopyable);
2795
2796static std::optional<int64_t>
2797getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2798 bool CheckIfTriviallyCopyable) {
2799 if (const auto *RD = Field->getType()->getAsRecordDecl();
2800 RD && !RD->isUnion())
2801 return structHasUniqueObjectRepresentations(Context, RD,
2802 CheckIfTriviallyCopyable);
2803
2804 // A _BitInt type may not be unique if it has padding bits
2805 // but if it is a bitfield the padding bits are not used.
2806 bool IsBitIntType = Field->getType()->isBitIntType();
2807 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2808 !Context.hasUniqueObjectRepresentations(Field->getType(),
2809 CheckIfTriviallyCopyable))
2810 return std::nullopt;
2811
2812 int64_t FieldSizeInBits =
2813 Context.toBits(Context.getTypeSizeInChars(Field->getType()));
2814 if (Field->isBitField()) {
2815 // If we have explicit padding bits, they don't contribute bits
2816 // to the actual object representation, so return 0.
2817 if (Field->isUnnamedBitField())
2818 return 0;
2819
2820 int64_t BitfieldSize = Field->getBitWidthValue();
2821 if (IsBitIntType) {
2822 if ((unsigned)BitfieldSize >
2823 cast<BitIntType>(Field->getType())->getNumBits())
2824 return std::nullopt;
2825 } else if (BitfieldSize > FieldSizeInBits) {
2826 return std::nullopt;
2827 }
2828 FieldSizeInBits = BitfieldSize;
2829 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2830 Field->getType(), CheckIfTriviallyCopyable)) {
2831 return std::nullopt;
2832 }
2833 return FieldSizeInBits;
2834}
2835
2836static std::optional<int64_t>
2838 bool CheckIfTriviallyCopyable) {
2839 return structHasUniqueObjectRepresentations(Context, RD,
2840 CheckIfTriviallyCopyable);
2841}
2842
2843template <typename RangeT>
2845 const RangeT &Subobjects, int64_t CurOffsetInBits,
2846 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2847 bool CheckIfTriviallyCopyable) {
2848 for (const auto *Subobject : Subobjects) {
2849 std::optional<int64_t> SizeInBits =
2850 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2851 if (!SizeInBits)
2852 return std::nullopt;
2853 if (*SizeInBits != 0) {
2854 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2855 if (Offset != CurOffsetInBits)
2856 return std::nullopt;
2857 CurOffsetInBits += *SizeInBits;
2858 }
2859 }
2860 return CurOffsetInBits;
2861}
2862
2863static std::optional<int64_t>
2865 const RecordDecl *RD,
2866 bool CheckIfTriviallyCopyable) {
2867 assert(!RD->isUnion() && "Must be struct/class type");
2868 const auto &Layout = Context.getASTRecordLayout(RD);
2869
2870 int64_t CurOffsetInBits = 0;
2871 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
2872 if (ClassDecl->isDynamicClass())
2873 return std::nullopt;
2874
2876 for (const auto &Base : ClassDecl->bases()) {
2877 // Empty types can be inherited from, and non-empty types can potentially
2878 // have tail padding, so just make sure there isn't an error.
2879 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl());
2880 }
2881
2882 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2883 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
2884 });
2885
2886 std::optional<int64_t> OffsetAfterBases =
2888 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2889 if (!OffsetAfterBases)
2890 return std::nullopt;
2891 CurOffsetInBits = *OffsetAfterBases;
2892 }
2893
2894 std::optional<int64_t> OffsetAfterFields =
2896 RD->fields(), CurOffsetInBits, Context, Layout,
2897 CheckIfTriviallyCopyable);
2898 if (!OffsetAfterFields)
2899 return std::nullopt;
2900 CurOffsetInBits = *OffsetAfterFields;
2901
2902 return CurOffsetInBits;
2903}
2904
2906 QualType Ty, bool CheckIfTriviallyCopyable) const {
2907 // C++17 [meta.unary.prop]:
2908 // The predicate condition for a template specialization
2909 // has_unique_object_representations<T> shall be satisfied if and only if:
2910 // (9.1) - T is trivially copyable, and
2911 // (9.2) - any two objects of type T with the same value have the same
2912 // object representation, where:
2913 // - two objects of array or non-union class type are considered to have
2914 // the same value if their respective sequences of direct subobjects
2915 // have the same values, and
2916 // - two objects of union type are considered to have the same value if
2917 // they have the same active member and the corresponding members have
2918 // the same value.
2919 // The set of scalar types for which this condition holds is
2920 // implementation-defined. [ Note: If a type has padding bits, the condition
2921 // does not hold; otherwise, the condition holds true for unsigned integral
2922 // types. -- end note ]
2923 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2924
2925 // Arrays are unique only if their element type is unique.
2926 if (Ty->isArrayType())
2928 CheckIfTriviallyCopyable);
2929
2930 assert((Ty->isVoidType() || !Ty->isIncompleteType()) &&
2931 "hasUniqueObjectRepresentations should not be called with an "
2932 "incomplete type");
2933
2934 // (9.1) - T is trivially copyable...
2935 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this))
2936 return false;
2937
2938 // All integrals and enums are unique.
2939 if (Ty->isIntegralOrEnumerationType()) {
2940 // Address discriminated integer types are not unique.
2942 return false;
2943 // Except _BitInt types that have padding bits.
2944 if (const auto *BIT = Ty->getAs<BitIntType>())
2945 return getTypeSize(BIT) == BIT->getNumBits();
2946
2947 return true;
2948 }
2949
2950 // All other pointers are unique.
2951 if (Ty->isPointerType())
2953
2954 if (const auto *MPT = Ty->getAs<MemberPointerType>())
2955 return !ABI->getMemberPointerInfo(MPT).HasPadding;
2956
2957 if (const auto *Record = Ty->getAsRecordDecl()) {
2958 if (Record->isInvalidDecl())
2959 return false;
2960
2961 if (Record->isUnion())
2963 CheckIfTriviallyCopyable);
2964
2965 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
2966 *this, Record, CheckIfTriviallyCopyable);
2967
2968 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty));
2969 }
2970
2971 // FIXME: More cases to handle here (list by rsmith):
2972 // vectors (careful about, eg, vector of 3 foo)
2973 // _Complex int and friends
2974 // _Atomic T
2975 // Obj-C block pointers
2976 // Obj-C object pointers
2977 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2978 // clk_event_t, queue_t, reserve_id_t)
2979 // There're also Obj-C class types and the Obj-C selector type, but I think it
2980 // makes sense for those to return false here.
2981
2982 return false;
2983}
2984
2986 unsigned count = 0;
2987 // Count ivars declared in class extension.
2988 for (const auto *Ext : OI->known_extensions())
2989 count += Ext->ivar_size();
2990
2991 // Count ivar defined in this class's implementation. This
2992 // includes synthesized ivars.
2993 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2994 count += ImplDecl->ivar_size();
2995
2996 return count;
2997}
2998
3000 if (!E)
3001 return false;
3002
3003 // nullptr_t is always treated as null.
3004 if (E->getType()->isNullPtrType()) return true;
3005
3006 if (E->getType()->isAnyPointerType() &&
3009 return true;
3010
3011 // Unfortunately, __null has type 'int'.
3012 if (isa<GNUNullExpr>(E)) return true;
3013
3014 return false;
3015}
3016
3017/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
3018/// exists.
3020 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3021 I = ObjCImpls.find(D);
3022 if (I != ObjCImpls.end())
3023 return cast<ObjCImplementationDecl>(I->second);
3024 return nullptr;
3025}
3026
3027/// Get the implementation of ObjCCategoryDecl, or nullptr if none
3028/// exists.
3030 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
3031 I = ObjCImpls.find(D);
3032 if (I != ObjCImpls.end())
3033 return cast<ObjCCategoryImplDecl>(I->second);
3034 return nullptr;
3035}
3036
3037/// Set the implementation of ObjCInterfaceDecl.
3039 ObjCImplementationDecl *ImplD) {
3040 assert(IFaceD && ImplD && "Passed null params");
3041 ObjCImpls[IFaceD] = ImplD;
3042}
3043
3044/// Set the implementation of ObjCCategoryDecl.
3046 ObjCCategoryImplDecl *ImplD) {
3047 assert(CatD && ImplD && "Passed null params");
3048 ObjCImpls[CatD] = ImplD;
3049}
3050
3051const ObjCMethodDecl *
3053 return ObjCMethodRedecls.lookup(MD);
3054}
3055
3057 const ObjCMethodDecl *Redecl) {
3058 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
3059 ObjCMethodRedecls[MD] = Redecl;
3060}
3061
3063 const NamedDecl *ND) const {
3064 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
3065 return ID;
3066 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
3067 return CD->getClassInterface();
3068 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
3069 return IMD->getClassInterface();
3070
3071 return nullptr;
3072}
3073
3074/// Get the copy initialization expression of VarDecl, or nullptr if
3075/// none exists.
3077 assert(VD && "Passed null params");
3078 assert(VD->hasAttr<BlocksAttr>() &&
3079 "getBlockVarCopyInits - not __block var");
3080 auto I = BlockVarCopyInits.find(VD);
3081 if (I != BlockVarCopyInits.end())
3082 return I->second;
3083 return {nullptr, false};
3084}
3085
3086/// Set the copy initialization expression of a block var decl.
3088 bool CanThrow) {
3089 assert(VD && CopyExpr && "Passed null params");
3090 assert(VD->hasAttr<BlocksAttr>() &&
3091 "setBlockVarCopyInits - not __block var");
3092 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
3093}
3094
3096 unsigned DataSize) const {
3097 if (!DataSize)
3099 else
3100 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
3101 "incorrect data size provided to CreateTypeSourceInfo!");
3102
3103 auto *TInfo =
3104 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
3105 new (TInfo) TypeSourceInfo(T, DataSize);
3106 return TInfo;
3107}
3108
3110 SourceLocation L) const {
3112 TSI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
3113 return TSI;
3114}
3115
3116const ASTRecordLayout &
3118 return getObjCLayout(D);
3119}
3120
3123 bool &AnyNonCanonArgs) {
3124 SmallVector<TemplateArgument, 16> CanonArgs(Args);
3125 AnyNonCanonArgs |= C.canonicalizeTemplateArguments(CanonArgs);
3126 return CanonArgs;
3127}
3128
3131 bool AnyNonCanonArgs = false;
3132 for (auto &Arg : Args) {
3133 TemplateArgument OrigArg = Arg;
3135 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg);
3136 }
3137 return AnyNonCanonArgs;
3138}
3139
3140//===----------------------------------------------------------------------===//
3141// Type creation/memoization methods
3142//===----------------------------------------------------------------------===//
3143
3145ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3146 unsigned fastQuals = quals.getFastQualifiers();
3147 quals.removeFastQualifiers();
3148
3149 // Check if we've already instantiated this type.
3150 llvm::FoldingSetNodeID ID;
3151 ExtQuals::Profile(ID, baseType, quals);
3152 void *insertPos = nullptr;
3153 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
3154 assert(eq->getQualifiers() == quals);
3155 return QualType(eq, fastQuals);
3156 }
3157
3158 // If the base type is not canonical, make the appropriate canonical type.
3159 QualType canon;
3160 if (!baseType->isCanonicalUnqualified()) {
3161 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3162 canonSplit.Quals.addConsistentQualifiers(quals);
3163 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
3164
3165 // Re-find the insert position.
3166 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
3167 }
3168
3169 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3170 ExtQualNodes.InsertNode(eq, insertPos);
3171 return QualType(eq, fastQuals);
3172}
3173
3175 LangAS AddressSpace) const {
3176 QualType CanT = getCanonicalType(T);
3177 if (CanT.getAddressSpace() == AddressSpace)
3178 return T;
3179
3180 // If we are composing extended qualifiers together, merge together
3181 // into one ExtQuals node.
3182 QualifierCollector Quals;
3183 const Type *TypeNode = Quals.strip(T);
3184
3185 // If this type already has an address space specified, it cannot get
3186 // another one.
3187 assert(!Quals.hasAddressSpace() &&
3188 "Type cannot be in multiple addr spaces!");
3189 Quals.addAddressSpace(AddressSpace);
3190
3191 return getExtQualType(TypeNode, Quals);
3192}
3193
3195 // If the type is not qualified with an address space, just return it
3196 // immediately.
3197 if (!T.hasAddressSpace())
3198 return T;
3199
3200 QualifierCollector Quals;
3201 const Type *TypeNode;
3202 // For arrays, strip the qualifier off the element type, then reconstruct the
3203 // array type
3204 if (T.getTypePtr()->isArrayType()) {
3205 T = getUnqualifiedArrayType(T, Quals);
3206 TypeNode = T.getTypePtr();
3207 } else {
3208 // If we are composing extended qualifiers together, merge together
3209 // into one ExtQuals node.
3210 while (T.hasAddressSpace()) {
3211 TypeNode = Quals.strip(T);
3212
3213 // If the type no longer has an address space after stripping qualifiers,
3214 // jump out.
3215 if (!QualType(TypeNode, 0).hasAddressSpace())
3216 break;
3217
3218 // There might be sugar in the way. Strip it and try again.
3219 T = T.getSingleStepDesugaredType(*this);
3220 }
3221 }
3222
3223 Quals.removeAddressSpace();
3224
3225 // Removal of the address space can mean there are no longer any
3226 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3227 // or required.
3228 if (Quals.hasNonFastQualifiers())
3229 return getExtQualType(TypeNode, Quals);
3230 else
3231 return QualType(TypeNode, Quals.getFastQualifiers());
3232}
3233
3234uint16_t
3236 assert(RD->isPolymorphic() &&
3237 "Attempted to get vtable pointer discriminator on a monomorphic type");
3238 std::unique_ptr<MangleContext> MC(createMangleContext());
3239 SmallString<256> Str;
3240 llvm::raw_svector_ostream Out(Str);
3241 MC->mangleCXXVTable(RD, Out);
3242 return llvm::getPointerAuthStableSipHash(Str);
3243}
3244
3245/// Encode a function type for use in the discriminator of a function pointer
3246/// type. We can't use the itanium scheme for this since C has quite permissive
3247/// rules for type compatibility that we need to be compatible with.
3248///
3249/// Formally, this function associates every function pointer type T with an
3250/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as
3251/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type
3252/// compatibility requires equivalent treatment under the ABI, so
3253/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be
3254/// a subset of ~. Crucially, however, it must be a proper subset because
3255/// CCompatible is not an equivalence relation: for example, int[] is compatible
3256/// with both int[1] and int[2], but the latter are not compatible with each
3257/// other. Therefore this encoding function must be careful to only distinguish
3258/// types if there is no third type with which they are both required to be
3259/// compatible.
3261 raw_ostream &OS, QualType QT) {
3262 // FIXME: Consider address space qualifiers.
3263 const Type *T = QT.getCanonicalType().getTypePtr();
3264
3265 // FIXME: Consider using the C++ type mangling when we encounter a construct
3266 // that is incompatible with C.
3267
3268 switch (T->getTypeClass()) {
3269 case Type::Atomic:
3271 Ctx, OS, cast<AtomicType>(T)->getValueType());
3272
3273 case Type::LValueReference:
3274 OS << "R";
3277 return;
3278 case Type::RValueReference:
3279 OS << "O";
3282 return;
3283
3284 case Type::Pointer:
3285 // C11 6.7.6.1p2:
3286 // For two pointer types to be compatible, both shall be identically
3287 // qualified and both shall be pointers to compatible types.
3288 // FIXME: we should also consider pointee types.
3289 OS << "P";
3290 return;
3291
3292 case Type::ObjCObjectPointer:
3293 case Type::BlockPointer:
3294 OS << "P";
3295 return;
3296
3297 case Type::Complex:
3298 OS << "C";
3300 Ctx, OS, cast<ComplexType>(T)->getElementType());
3301
3302 case Type::VariableArray:
3303 case Type::ConstantArray:
3304 case Type::IncompleteArray:
3305 case Type::ArrayParameter:
3306 // C11 6.7.6.2p6:
3307 // For two array types to be compatible, both shall have compatible
3308 // element types, and if both size specifiers are present, and are integer
3309 // constant expressions, then both size specifiers shall have the same
3310 // constant value [...]
3311 //
3312 // So since ElemType[N] has to be compatible ElemType[], we can't encode the
3313 // width of the array.
3314 OS << "A";
3316 Ctx, OS, cast<ArrayType>(T)->getElementType());
3317
3318 case Type::ObjCInterface:
3319 case Type::ObjCObject:
3320 OS << "<objc_object>";
3321 return;
3322
3323 case Type::Enum: {
3324 // C11 6.7.2.2p4:
3325 // Each enumerated type shall be compatible with char, a signed integer
3326 // type, or an unsigned integer type.
3327 //
3328 // So we have to treat enum types as integers.
3329 QualType UnderlyingType = T->castAsEnumDecl()->getIntegerType();
3331 Ctx, OS, UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType);
3332 }
3333
3334 case Type::FunctionNoProto:
3335 case Type::FunctionProto: {
3336 // C11 6.7.6.3p15:
3337 // For two function types to be compatible, both shall specify compatible
3338 // return types. Moreover, the parameter type lists, if both are present,
3339 // shall agree in the number of parameters and in the use of the ellipsis
3340 // terminator; corresponding parameters shall have compatible types.
3341 //
3342 // That paragraph goes on to describe how unprototyped functions are to be
3343 // handled, which we ignore here. Unprototyped function pointers are hashed
3344 // as though they were prototyped nullary functions since thats probably
3345 // what the user meant. This behavior is non-conforming.
3346 // FIXME: If we add a "custom discriminator" function type attribute we
3347 // should encode functions as their discriminators.
3348 OS << "F";
3349 const auto *FuncType = cast<FunctionType>(T);
3350 encodeTypeForFunctionPointerAuth(Ctx, OS, FuncType->getReturnType());
3351 if (const auto *FPT = dyn_cast<FunctionProtoType>(FuncType)) {
3352 for (QualType Param : FPT->param_types()) {
3353 Param = Ctx.getSignatureParameterType(Param);
3354 encodeTypeForFunctionPointerAuth(Ctx, OS, Param);
3355 }
3356 if (FPT->isVariadic())
3357 OS << "z";
3358 }
3359 OS << "E";
3360 return;
3361 }
3362
3363 case Type::MemberPointer: {
3364 OS << "M";
3365 const auto *MPT = T->castAs<MemberPointerType>();
3367 Ctx, OS, QualType(MPT->getQualifier().getAsType(), 0));
3368 encodeTypeForFunctionPointerAuth(Ctx, OS, MPT->getPointeeType());
3369 return;
3370 }
3371 case Type::ExtVector:
3372 case Type::Vector:
3373 OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity();
3374 break;
3375
3376 // Don't bother discriminating based on these types.
3377 case Type::Pipe:
3378 case Type::BitInt:
3379 case Type::ConstantMatrix:
3380 OS << "?";
3381 return;
3382
3383 case Type::Builtin: {
3384 const auto *BTy = T->castAs<BuiltinType>();
3385 switch (BTy->getKind()) {
3386#define SIGNED_TYPE(Id, SingletonId) \
3387 case BuiltinType::Id: \
3388 OS << "i"; \
3389 return;
3390#define UNSIGNED_TYPE(Id, SingletonId) \
3391 case BuiltinType::Id: \
3392 OS << "i"; \
3393 return;
3394#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
3395#define BUILTIN_TYPE(Id, SingletonId)
3396#include "clang/AST/BuiltinTypes.def"
3397 llvm_unreachable("placeholder types should not appear here.");
3398
3399 case BuiltinType::Half:
3400 OS << "Dh";
3401 return;
3402 case BuiltinType::Float:
3403 OS << "f";
3404 return;
3405 case BuiltinType::Double:
3406 OS << "d";
3407 return;
3408 case BuiltinType::LongDouble:
3409 OS << "e";
3410 return;
3411 case BuiltinType::Float16:
3412 OS << "DF16_";
3413 return;
3414 case BuiltinType::Float128:
3415 OS << "g";
3416 return;
3417
3418 case BuiltinType::Void:
3419 OS << "v";
3420 return;
3421
3422 case BuiltinType::ObjCId:
3423 case BuiltinType::ObjCClass:
3424 case BuiltinType::ObjCSel:
3425 case BuiltinType::NullPtr:
3426 OS << "P";
3427 return;
3428
3429 // Don't bother discriminating based on OpenCL types.
3430 case BuiltinType::OCLSampler:
3431 case BuiltinType::OCLEvent:
3432 case BuiltinType::OCLClkEvent:
3433 case BuiltinType::OCLQueue:
3434 case BuiltinType::OCLReserveID:
3435 case BuiltinType::BFloat16:
3436 case BuiltinType::VectorQuad:
3437 case BuiltinType::VectorPair:
3438 case BuiltinType::DMR1024:
3439 case BuiltinType::DMR2048:
3440 OS << "?";
3441 return;
3442
3443 // Don't bother discriminating based on these seldom-used types.
3444 case BuiltinType::Ibm128:
3445 return;
3446#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3447 case BuiltinType::Id: \
3448 return;
3449#include "clang/Basic/OpenCLImageTypes.def"
3450#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3451 case BuiltinType::Id: \
3452 return;
3453#include "clang/Basic/OpenCLExtensionTypes.def"
3454#define SVE_TYPE(Name, Id, SingletonId) \
3455 case BuiltinType::Id: \
3456 return;
3457#include "clang/Basic/AArch64ACLETypes.def"
3458#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) \
3459 case BuiltinType::Id: \
3460 return;
3461#include "clang/Basic/HLSLIntangibleTypes.def"
3462 case BuiltinType::Dependent:
3463 llvm_unreachable("should never get here");
3464#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
3465#include "clang/Basic/AMDGPUTypes.def"
3466 case BuiltinType::WasmExternRef:
3467#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3468#include "clang/Basic/RISCVVTypes.def"
3469 llvm_unreachable("not yet implemented");
3470 }
3471 llvm_unreachable("should never get here");
3472 }
3473 case Type::Record: {
3474 const RecordDecl *RD = T->castAsCanonical<RecordType>()->getDecl();
3475 const IdentifierInfo *II = RD->getIdentifier();
3476
3477 // In C++, an immediate typedef of an anonymous struct or union
3478 // is considered to name it for ODR purposes, but C's specification
3479 // of type compatibility does not have a similar rule. Using the typedef
3480 // name in function type discriminators anyway, as we do here,
3481 // therefore technically violates the C standard: two function pointer
3482 // types defined in terms of two typedef'd anonymous structs with
3483 // different names are formally still compatible, but we are assigning
3484 // them different discriminators and therefore incompatible ABIs.
3485 //
3486 // This is a relatively minor violation that significantly improves
3487 // discrimination in some cases and has not caused problems in
3488 // practice. Regardless, it is now part of the ABI in places where
3489 // function type discrimination is used, and it can no longer be
3490 // changed except on new platforms.
3491
3492 if (!II)
3493 if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl())
3494 II = Typedef->getDeclName().getAsIdentifierInfo();
3495
3496 if (!II) {
3497 OS << "<anonymous_record>";
3498 return;
3499 }
3500 OS << II->getLength() << II->getName();
3501 return;
3502 }
3503 case Type::HLSLAttributedResource:
3504 case Type::HLSLInlineSpirv:
3505 llvm_unreachable("should never get here");
3506 break;
3507 case Type::DeducedTemplateSpecialization:
3508 case Type::Auto:
3509#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3510#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3511#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3512#define ABSTRACT_TYPE(Class, Base)
3513#define TYPE(Class, Base)
3514#include "clang/AST/TypeNodes.inc"
3515 llvm_unreachable("unexpected non-canonical or dependent type!");
3516 return;
3517 }
3518}
3519
3521 assert(!T->isDependentType() &&
3522 "cannot compute type discriminator of a dependent type");
3523 SmallString<256> Str;
3524 llvm::raw_svector_ostream Out(Str);
3525
3526 if (T->isFunctionPointerType() || T->isFunctionReferenceType())
3527 T = T->getPointeeType();
3528
3529 if (T->isFunctionType()) {
3531 } else {
3532 T = T.getUnqualifiedType();
3533 // Calls to member function pointers don't need to worry about
3534 // language interop or the laxness of the C type compatibility rules.
3535 // We just mangle the member pointer type directly, which is
3536 // implicitly much stricter about type matching. However, we do
3537 // strip any top-level exception specification before this mangling.
3538 // C++23 requires calls to work when the function type is convertible
3539 // to the pointer type by a function pointer conversion, which can
3540 // change the exception specification. This does not technically
3541 // require the exception specification to not affect representation,
3542 // because the function pointer conversion is still always a direct
3543 // value conversion and therefore an opportunity to resign the
3544 // pointer. (This is in contrast to e.g. qualification conversions,
3545 // which can be applied in nested pointer positions, effectively
3546 // requiring qualified and unqualified representations to match.)
3547 // However, it is pragmatic to ignore exception specifications
3548 // because it allows a certain amount of `noexcept` mismatching
3549 // to not become a visible ODR problem. This also leaves some
3550 // room for the committee to add laxness to function pointer
3551 // conversions in future standards.
3552 if (auto *MPT = T->getAs<MemberPointerType>())
3553 if (MPT->isMemberFunctionPointer()) {
3554 QualType PointeeType = MPT->getPointeeType();
3555 if (PointeeType->castAs<FunctionProtoType>()->getExceptionSpecType() !=
3556 EST_None) {
3558 T = getMemberPointerType(FT, MPT->getQualifier(),
3559 MPT->getMostRecentCXXRecordDecl());
3560 }
3561 }
3562 std::unique_ptr<MangleContext> MC(createMangleContext());
3563 MC->mangleCanonicalTypeName(T, Out);
3564 }
3565
3566 return llvm::getPointerAuthStableSipHash(Str);
3567}
3568
3570 Qualifiers::GC GCAttr) const {
3571 QualType CanT = getCanonicalType(T);
3572 if (CanT.getObjCGCAttr() == GCAttr)
3573 return T;
3574
3575 if (const auto *ptr = T->getAs<PointerType>()) {
3576 QualType Pointee = ptr->getPointeeType();
3577 if (Pointee->isAnyPointerType()) {
3578 QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
3579 return getPointerType(ResultType);
3580 }
3581 }
3582
3583 // If we are composing extended qualifiers together, merge together
3584 // into one ExtQuals node.
3585 QualifierCollector Quals;
3586 const Type *TypeNode = Quals.strip(T);
3587
3588 // If this type already has an ObjCGC specified, it cannot get
3589 // another one.
3590 assert(!Quals.hasObjCGCAttr() &&
3591 "Type cannot have multiple ObjCGCs!");
3592 Quals.addObjCGCAttr(GCAttr);
3593
3594 return getExtQualType(TypeNode, Quals);
3595}
3596
3598 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3599 QualType Pointee = Ptr->getPointeeType();
3600 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) {
3601 return getPointerType(removeAddrSpaceQualType(Pointee));
3602 }
3603 }
3604 return T;
3605}
3606
3608 QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull,
3609 ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const {
3610 assert(WrappedTy->isPointerType() || WrappedTy->isArrayType());
3611
3612 llvm::FoldingSetNodeID ID;
3613 CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, OrNull);
3614
3615 void *InsertPos = nullptr;
3616 CountAttributedType *CATy =
3617 CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
3618 if (CATy)
3619 return QualType(CATy, 0);
3620
3621 QualType CanonTy = getCanonicalType(WrappedTy);
3622 size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>(
3623 DependentDecls.size());
3625 new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes,
3626 OrNull, DependentDecls);
3627 Types.push_back(CATy);
3628 CountAttributedTypes.InsertNode(CATy, InsertPos);
3629
3630 return QualType(CATy, 0);
3631}
3632
3635 llvm::function_ref<QualType(QualType)> Adjust) const {
3636 switch (Orig->getTypeClass()) {
3637 case Type::Attributed: {
3638 const auto *AT = cast<AttributedType>(Orig);
3639 return getAttributedType(AT->getAttrKind(),
3640 adjustType(AT->getModifiedType(), Adjust),
3641 adjustType(AT->getEquivalentType(), Adjust),
3642 AT->getAttr());
3643 }
3644
3645 case Type::BTFTagAttributed: {
3646 const auto *BTFT = dyn_cast<BTFTagAttributedType>(Orig);
3647 return getBTFTagAttributedType(BTFT->getAttr(),
3648 adjustType(BTFT->getWrappedType(), Adjust));
3649 }
3650
3651 case Type::Paren:
3652 return getParenType(
3653 adjustType(cast<ParenType>(Orig)->getInnerType(), Adjust));
3654
3655 case Type::Adjusted: {
3656 const auto *AT = cast<AdjustedType>(Orig);
3657 return getAdjustedType(AT->getOriginalType(),
3658 adjustType(AT->getAdjustedType(), Adjust));
3659 }
3660
3661 case Type::MacroQualified: {
3662 const auto *MQT = cast<MacroQualifiedType>(Orig);
3663 return getMacroQualifiedType(adjustType(MQT->getUnderlyingType(), Adjust),
3664 MQT->getMacroIdentifier());
3665 }
3666
3667 default:
3668 return Adjust(Orig);
3669 }
3670}
3671
3673 FunctionType::ExtInfo Info) {
3674 if (T->getExtInfo() == Info)
3675 return T;
3676
3678 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
3679 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
3680 } else {
3681 const auto *FPT = cast<FunctionProtoType>(T);
3682 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3683 EPI.ExtInfo = Info;
3684 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
3685 }
3686
3687 return cast<FunctionType>(Result.getTypePtr());
3688}
3689
3691 QualType ResultType) {
3692 return adjustType(FunctionType, [&](QualType Orig) {
3693 if (const auto *FNPT = Orig->getAs<FunctionNoProtoType>())
3694 return getFunctionNoProtoType(ResultType, FNPT->getExtInfo());
3695
3696 const auto *FPT = Orig->castAs<FunctionProtoType>();
3697 return getFunctionType(ResultType, FPT->getParamTypes(),
3698 FPT->getExtProtoInfo());
3699 });
3700}
3701
3703 QualType ResultType) {
3704 FD = FD->getMostRecentDecl();
3705 while (true) {
3706 FD->setType(adjustFunctionResultType(FD->getType(), ResultType));
3707 if (FunctionDecl *Next = FD->getPreviousDecl())
3708 FD = Next;
3709 else
3710 break;
3711 }
3713 L->DeducedReturnType(FD, ResultType);
3714}
3715
3716/// Get a function type and produce the equivalent function type with the
3717/// specified exception specification. Type sugar that can be present on a
3718/// declaration of a function with an exception specification is permitted
3719/// and preserved. Other type sugar (for instance, typedefs) is not.
3721 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3722 return adjustType(Orig, [&](QualType Ty) {
3723 const auto *Proto = Ty->castAs<FunctionProtoType>();
3724 return getFunctionType(Proto->getReturnType(), Proto->getParamTypes(),
3725 Proto->getExtProtoInfo().withExceptionSpec(ESI));
3726 });
3727}
3728
3736
3738 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3739 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3740 SmallVector<QualType, 16> Args(Proto->param_types().size());
3741 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3742 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]);
3743 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo());
3744 }
3745
3746 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3747 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
3748 return getFunctionNoProtoType(RetTy, Proto->getExtInfo());
3749 }
3750
3751 return T;
3752}
3753
3759
3761 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3762 FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
3763 EPI.ExtParameterInfos = nullptr;
3764 return getFunctionType(Proto->getReturnType(), Proto->param_types(), EPI);
3765 }
3766 return T;
3767}
3768
3774
3777 bool AsWritten) {
3778 // Update the type.
3779 QualType Updated =
3781 FD->setType(Updated);
3782
3783 if (!AsWritten)
3784 return;
3785
3786 // Update the type in the type source information too.
3787 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3788 // If the type and the type-as-written differ, we may need to update
3789 // the type-as-written too.
3790 if (TSInfo->getType() != FD->getType())
3791 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI);
3792
3793 // FIXME: When we get proper type location information for exceptions,
3794 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3795 // up the TypeSourceInfo;
3796 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3797 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3798 "TypeLoc size mismatch from updating exception specification");
3799 TSInfo->overrideType(Updated);
3800 }
3801}
3802
3803/// getComplexType - Return the uniqued reference to the type for a complex
3804/// number with the specified element type.
3806 // Unique pointers, to guarantee there is only one pointer of a particular
3807 // structure.
3808 llvm::FoldingSetNodeID ID;
3810
3811 void *InsertPos = nullptr;
3812 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3813 return QualType(CT, 0);
3814
3815 // If the pointee type isn't canonical, this won't be a canonical type either,
3816 // so fill in the canonical type field.
3817 QualType Canonical;
3818 if (!T.isCanonical()) {
3819 Canonical = getComplexType(getCanonicalType(T));
3820
3821 // Get the new insert position for the node we care about.
3822 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3823 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3824 }
3825 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3826 Types.push_back(New);
3827 ComplexTypes.InsertNode(New, InsertPos);
3828 return QualType(New, 0);
3829}
3830
3831/// getPointerType - Return the uniqued reference to the type for a pointer to
3832/// the specified type.
3834 // Unique pointers, to guarantee there is only one pointer of a particular
3835 // structure.
3836 llvm::FoldingSetNodeID ID;
3838
3839 void *InsertPos = nullptr;
3840 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3841 return QualType(PT, 0);
3842
3843 // If the pointee type isn't canonical, this won't be a canonical type either,
3844 // so fill in the canonical type field.
3845 QualType Canonical;
3846 if (!T.isCanonical()) {
3847 Canonical = getPointerType(getCanonicalType(T));
3848
3849 // Get the new insert position for the node we care about.
3850 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3851 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3852 }
3853 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3854 Types.push_back(New);
3855 PointerTypes.InsertNode(New, InsertPos);
3856 return QualType(New, 0);
3857}
3858
3860 llvm::FoldingSetNodeID ID;
3861 AdjustedType::Profile(ID, Orig, New);
3862 void *InsertPos = nullptr;
3863 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3864 if (AT)
3865 return QualType(AT, 0);
3866
3867 QualType Canonical = getCanonicalType(New);
3868
3869 // Get the new insert position for the node we care about.
3870 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3871 assert(!AT && "Shouldn't be in the map!");
3872
3873 AT = new (*this, alignof(AdjustedType))
3874 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3875 Types.push_back(AT);
3876 AdjustedTypes.InsertNode(AT, InsertPos);
3877 return QualType(AT, 0);
3878}
3879
3881 llvm::FoldingSetNodeID ID;
3882 AdjustedType::Profile(ID, Orig, Decayed);
3883 void *InsertPos = nullptr;
3884 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3885 if (AT)
3886 return QualType(AT, 0);
3887
3888 QualType Canonical = getCanonicalType(Decayed);
3889
3890 // Get the new insert position for the node we care about.
3891 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3892 assert(!AT && "Shouldn't be in the map!");
3893
3894 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3895 Types.push_back(AT);
3896 AdjustedTypes.InsertNode(AT, InsertPos);
3897 return QualType(AT, 0);
3898}
3899
3901 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3902
3903 QualType Decayed;
3904
3905 // C99 6.7.5.3p7:
3906 // A declaration of a parameter as "array of type" shall be
3907 // adjusted to "qualified pointer to type", where the type
3908 // qualifiers (if any) are those specified within the [ and ] of
3909 // the array type derivation.
3910 if (T->isArrayType())
3911 Decayed = getArrayDecayedType(T);
3912
3913 // C99 6.7.5.3p8:
3914 // A declaration of a parameter as "function returning type"
3915 // shall be adjusted to "pointer to function returning type", as
3916 // in 6.3.2.1.
3917 if (T->isFunctionType())
3918 Decayed = getPointerType(T);
3919
3920 return getDecayedType(T, Decayed);
3921}
3922
3924 if (Ty->isArrayParameterType())
3925 return Ty;
3926 assert(Ty->isConstantArrayType() && "Ty must be an array type.");
3927 QualType DTy = Ty.getDesugaredType(*this);
3928 const auto *ATy = cast<ConstantArrayType>(DTy);
3929 llvm::FoldingSetNodeID ID;
3930 ATy->Profile(ID, *this, ATy->getElementType(), ATy->getZExtSize(),
3931 ATy->getSizeExpr(), ATy->getSizeModifier(),
3932 ATy->getIndexTypeQualifiers().getAsOpaqueValue());
3933 void *InsertPos = nullptr;
3934 ArrayParameterType *AT =
3935 ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3936 if (AT)
3937 return QualType(AT, 0);
3938
3939 QualType Canonical;
3940 if (!DTy.isCanonical()) {
3941 Canonical = getArrayParameterType(getCanonicalType(Ty));
3942
3943 // Get the new insert position for the node we care about.
3944 AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos);
3945 assert(!AT && "Shouldn't be in the map!");
3946 }
3947
3948 AT = new (*this, alignof(ArrayParameterType))
3949 ArrayParameterType(ATy, Canonical);
3950 Types.push_back(AT);
3951 ArrayParameterTypes.InsertNode(AT, InsertPos);
3952 return QualType(AT, 0);
3953}
3954
3955/// getBlockPointerType - Return the uniqued reference to the type for
3956/// a pointer to the specified block.
3958 assert(T->isFunctionType() && "block of function types only");
3959 // Unique pointers, to guarantee there is only one block of a particular
3960 // structure.
3961 llvm::FoldingSetNodeID ID;
3963
3964 void *InsertPos = nullptr;
3965 if (BlockPointerType *PT =
3966 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3967 return QualType(PT, 0);
3968
3969 // If the block pointee type isn't canonical, this won't be a canonical
3970 // type either so fill in the canonical type field.
3971 QualType Canonical;
3972 if (!T.isCanonical()) {
3974
3975 // Get the new insert position for the node we care about.
3976 BlockPointerType *NewIP =
3977 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3978 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3979 }
3980 auto *New =
3981 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
3982 Types.push_back(New);
3983 BlockPointerTypes.InsertNode(New, InsertPos);
3984 return QualType(New, 0);
3985}
3986
3987/// getLValueReferenceType - Return the uniqued reference to the type for an
3988/// lvalue reference to the specified type.
3990ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3991 assert((!T->isPlaceholderType() ||
3992 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3993 "Unresolved placeholder type");
3994
3995 // Unique pointers, to guarantee there is only one pointer of a particular
3996 // structure.
3997 llvm::FoldingSetNodeID ID;
3998 ReferenceType::Profile(ID, T, SpelledAsLValue);
3999
4000 void *InsertPos = nullptr;
4001 if (LValueReferenceType *RT =
4002 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4003 return QualType(RT, 0);
4004
4005 const auto *InnerRef = T->getAs<ReferenceType>();
4006
4007 // If the referencee type isn't canonical, this won't be a canonical type
4008 // either, so fill in the canonical type field.
4009 QualType Canonical;
4010 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
4011 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4012 Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
4013
4014 // Get the new insert position for the node we care about.
4015 LValueReferenceType *NewIP =
4016 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4017 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4018 }
4019
4020 auto *New = new (*this, alignof(LValueReferenceType))
4021 LValueReferenceType(T, Canonical, SpelledAsLValue);
4022 Types.push_back(New);
4023 LValueReferenceTypes.InsertNode(New, InsertPos);
4024
4025 return QualType(New, 0);
4026}
4027
4028/// getRValueReferenceType - Return the uniqued reference to the type for an
4029/// rvalue reference to the specified type.
4031 assert((!T->isPlaceholderType() ||
4032 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
4033 "Unresolved placeholder type");
4034
4035 // Unique pointers, to guarantee there is only one pointer of a particular
4036 // structure.
4037 llvm::FoldingSetNodeID ID;
4038 ReferenceType::Profile(ID, T, false);
4039
4040 void *InsertPos = nullptr;
4041 if (RValueReferenceType *RT =
4042 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
4043 return QualType(RT, 0);
4044
4045 const auto *InnerRef = T->getAs<ReferenceType>();
4046
4047 // If the referencee type isn't canonical, this won't be a canonical type
4048 // either, so fill in the canonical type field.
4049 QualType Canonical;
4050 if (InnerRef || !T.isCanonical()) {
4051 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
4052 Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
4053
4054 // Get the new insert position for the node we care about.
4055 RValueReferenceType *NewIP =
4056 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
4057 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4058 }
4059
4060 auto *New = new (*this, alignof(RValueReferenceType))
4061 RValueReferenceType(T, Canonical);
4062 Types.push_back(New);
4063 RValueReferenceTypes.InsertNode(New, InsertPos);
4064 return QualType(New, 0);
4065}
4066
4068 NestedNameSpecifier Qualifier,
4069 const CXXRecordDecl *Cls) const {
4070 if (!Qualifier) {
4071 assert(Cls && "At least one of Qualifier or Cls must be provided");
4072 Qualifier = NestedNameSpecifier(getCanonicalTagType(Cls).getTypePtr());
4073 } else if (!Cls) {
4074 Cls = Qualifier.getAsRecordDecl();
4075 }
4076 // Unique pointers, to guarantee there is only one pointer of a particular
4077 // structure.
4078 llvm::FoldingSetNodeID ID;
4079 MemberPointerType::Profile(ID, T, Qualifier, Cls);
4080
4081 void *InsertPos = nullptr;
4082 if (MemberPointerType *PT =
4083 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
4084 return QualType(PT, 0);
4085
4086 NestedNameSpecifier CanonicalQualifier = [&] {
4087 if (!Cls)
4088 return Qualifier.getCanonical();
4089 NestedNameSpecifier R(getCanonicalTagType(Cls).getTypePtr());
4090 assert(R.isCanonical());
4091 return R;
4092 }();
4093 // If the pointee or class type isn't canonical, this won't be a canonical
4094 // type either, so fill in the canonical type field.
4095 QualType Canonical;
4096 if (!T.isCanonical() || Qualifier != CanonicalQualifier) {
4097 Canonical =
4098 getMemberPointerType(getCanonicalType(T), CanonicalQualifier, Cls);
4099 assert(!cast<MemberPointerType>(Canonical)->isSugared());
4100 // Get the new insert position for the node we care about.
4101 [[maybe_unused]] MemberPointerType *NewIP =
4102 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
4103 assert(!NewIP && "Shouldn't be in the map!");
4104 }
4105 auto *New = new (*this, alignof(MemberPointerType))
4106 MemberPointerType(T, Qualifier, Canonical);
4107 Types.push_back(New);
4108 MemberPointerTypes.InsertNode(New, InsertPos);
4109 return QualType(New, 0);
4110}
4111
4112/// getConstantArrayType - Return the unique reference to the type for an
4113/// array of the specified element type.
4115 const llvm::APInt &ArySizeIn,
4116 const Expr *SizeExpr,
4118 unsigned IndexTypeQuals) const {
4119 assert((EltTy->isDependentType() ||
4120 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
4121 "Constant array of VLAs is illegal!");
4122
4123 // We only need the size as part of the type if it's instantiation-dependent.
4124 if (SizeExpr && !SizeExpr->isInstantiationDependent())
4125 SizeExpr = nullptr;
4126
4127 // Convert the array size into a canonical width matching the pointer size for
4128 // the target.
4129 llvm::APInt ArySize(ArySizeIn);
4130 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
4131
4132 llvm::FoldingSetNodeID ID;
4133 ConstantArrayType::Profile(ID, *this, EltTy, ArySize.getZExtValue(), SizeExpr,
4134 ASM, IndexTypeQuals);
4135
4136 void *InsertPos = nullptr;
4137 if (ConstantArrayType *ATP =
4138 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
4139 return QualType(ATP, 0);
4140
4141 // If the element type isn't canonical or has qualifiers, or the array bound
4142 // is instantiation-dependent, this won't be a canonical type either, so fill
4143 // in the canonical type field.
4144 QualType Canon;
4145 // FIXME: Check below should look for qualifiers behind sugar.
4146 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
4147 SplitQualType canonSplit = getCanonicalType(EltTy).split();
4148 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr,
4149 ASM, IndexTypeQuals);
4150 Canon = getQualifiedType(Canon, canonSplit.Quals);
4151
4152 // Get the new insert position for the node we care about.
4153 ConstantArrayType *NewIP =
4154 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
4155 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4156 }
4157
4158 auto *New = ConstantArrayType::Create(*this, EltTy, Canon, ArySize, SizeExpr,
4159 ASM, IndexTypeQuals);
4160 ConstantArrayTypes.InsertNode(New, InsertPos);
4161 Types.push_back(New);
4162 return QualType(New, 0);
4163}
4164
4165/// getVariableArrayDecayedType - Turns the given type, which may be
4166/// variably-modified, into the corresponding type with all the known
4167/// sizes replaced with [*].
4169 // Vastly most common case.
4170 if (!type->isVariablyModifiedType()) return type;
4171
4172 QualType result;
4173
4174 SplitQualType split = type.getSplitDesugaredType();
4175 const Type *ty = split.Ty;
4176 switch (ty->getTypeClass()) {
4177#define TYPE(Class, Base)
4178#define ABSTRACT_TYPE(Class, Base)
4179#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
4180#include "clang/AST/TypeNodes.inc"
4181 llvm_unreachable("didn't desugar past all non-canonical types?");
4182
4183 // These types should never be variably-modified.
4184 case Type::Builtin:
4185 case Type::Complex:
4186 case Type::Vector:
4187 case Type::DependentVector:
4188 case Type::ExtVector:
4189 case Type::DependentSizedExtVector:
4190 case Type::ConstantMatrix:
4191 case Type::DependentSizedMatrix:
4192 case Type::DependentAddressSpace:
4193 case Type::ObjCObject:
4194 case Type::ObjCInterface:
4195 case Type::ObjCObjectPointer:
4196 case Type::Record:
4197 case Type::Enum:
4198 case Type::UnresolvedUsing:
4199 case Type::TypeOfExpr:
4200 case Type::TypeOf:
4201 case Type::Decltype:
4202 case Type::UnaryTransform:
4203 case Type::DependentName:
4204 case Type::InjectedClassName:
4205 case Type::TemplateSpecialization:
4206 case Type::TemplateTypeParm:
4207 case Type::SubstTemplateTypeParmPack:
4208 case Type::SubstBuiltinTemplatePack:
4209 case Type::Auto:
4210 case Type::DeducedTemplateSpecialization:
4211 case Type::PackExpansion:
4212 case Type::PackIndexing:
4213 case Type::BitInt:
4214 case Type::DependentBitInt:
4215 case Type::ArrayParameter:
4216 case Type::HLSLAttributedResource:
4217 case Type::HLSLInlineSpirv:
4218 llvm_unreachable("type should never be variably-modified");
4219
4220 // These types can be variably-modified but should never need to
4221 // further decay.
4222 case Type::FunctionNoProto:
4223 case Type::FunctionProto:
4224 case Type::BlockPointer:
4225 case Type::MemberPointer:
4226 case Type::Pipe:
4227 return type;
4228
4229 // These types can be variably-modified. All these modifications
4230 // preserve structure except as noted by comments.
4231 // TODO: if we ever care about optimizing VLAs, there are no-op
4232 // optimizations available here.
4233 case Type::Pointer:
4236 break;
4237
4238 case Type::LValueReference: {
4239 const auto *lv = cast<LValueReferenceType>(ty);
4240 result = getLValueReferenceType(
4241 getVariableArrayDecayedType(lv->getPointeeType()),
4242 lv->isSpelledAsLValue());
4243 break;
4244 }
4245
4246 case Type::RValueReference: {
4247 const auto *lv = cast<RValueReferenceType>(ty);
4248 result = getRValueReferenceType(
4249 getVariableArrayDecayedType(lv->getPointeeType()));
4250 break;
4251 }
4252
4253 case Type::Atomic: {
4254 const auto *at = cast<AtomicType>(ty);
4255 result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
4256 break;
4257 }
4258
4259 case Type::ConstantArray: {
4260 const auto *cat = cast<ConstantArrayType>(ty);
4261 result = getConstantArrayType(
4262 getVariableArrayDecayedType(cat->getElementType()),
4263 cat->getSize(),
4264 cat->getSizeExpr(),
4265 cat->getSizeModifier(),
4266 cat->getIndexTypeCVRQualifiers());
4267 break;
4268 }
4269
4270 case Type::DependentSizedArray: {
4271 const auto *dat = cast<DependentSizedArrayType>(ty);
4273 getVariableArrayDecayedType(dat->getElementType()), dat->getSizeExpr(),
4274 dat->getSizeModifier(), dat->getIndexTypeCVRQualifiers());
4275 break;
4276 }
4277
4278 // Turn incomplete types into [*] types.
4279 case Type::IncompleteArray: {
4280 const auto *iat = cast<IncompleteArrayType>(ty);
4281 result =
4283 /*size*/ nullptr, ArraySizeModifier::Normal,
4284 iat->getIndexTypeCVRQualifiers());
4285 break;
4286 }
4287
4288 // Turn VLA types into [*] types.
4289 case Type::VariableArray: {
4290 const auto *vat = cast<VariableArrayType>(ty);
4291 result =
4293 /*size*/ nullptr, ArraySizeModifier::Star,
4294 vat->getIndexTypeCVRQualifiers());
4295 break;
4296 }
4297 }
4298
4299 // Apply the top-level qualifiers from the original.
4300 return getQualifiedType(result, split.Quals);
4301}
4302
4303/// getVariableArrayType - Returns a non-unique reference to the type for a
4304/// variable array of the specified element type.
4307 unsigned IndexTypeQuals) const {
4308 // Since we don't unique expressions, it isn't possible to unique VLA's
4309 // that have an expression provided for their size.
4310 QualType Canon;
4311
4312 // Be sure to pull qualifiers off the element type.
4313 // FIXME: Check below should look for qualifiers behind sugar.
4314 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
4315 SplitQualType canonSplit = getCanonicalType(EltTy).split();
4316 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
4317 IndexTypeQuals);
4318 Canon = getQualifiedType(Canon, canonSplit.Quals);
4319 }
4320
4321 auto *New = new (*this, alignof(VariableArrayType))
4322 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals);
4323
4324 VariableArrayTypes.push_back(New);
4325 Types.push_back(New);
4326 return QualType(New, 0);
4327}
4328
4329/// getDependentSizedArrayType - Returns a non-unique reference to
4330/// the type for a dependently-sized array of the specified element
4331/// type.
4335 unsigned elementTypeQuals) const {
4336 assert((!numElements || numElements->isTypeDependent() ||
4337 numElements->isValueDependent()) &&
4338 "Size must be type- or value-dependent!");
4339
4340 SplitQualType canonElementType = getCanonicalType(elementType).split();
4341
4342 void *insertPos = nullptr;
4343 llvm::FoldingSetNodeID ID;
4345 ID, *this, numElements ? QualType(canonElementType.Ty, 0) : elementType,
4346 ASM, elementTypeQuals, numElements);
4347
4348 // Look for an existing type with these properties.
4349 DependentSizedArrayType *canonTy =
4350 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
4351
4352 // Dependently-sized array types that do not have a specified number
4353 // of elements will have their sizes deduced from a dependent
4354 // initializer.
4355 if (!numElements) {
4356 if (canonTy)
4357 return QualType(canonTy, 0);
4358
4359 auto *newType = new (*this, alignof(DependentSizedArrayType))
4360 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
4361 elementTypeQuals);
4362 DependentSizedArrayTypes.InsertNode(newType, insertPos);
4363 Types.push_back(newType);
4364 return QualType(newType, 0);
4365 }
4366
4367 // If we don't have one, build one.
4368 if (!canonTy) {
4369 canonTy = new (*this, alignof(DependentSizedArrayType))
4370 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
4371 numElements, ASM, elementTypeQuals);
4372 DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
4373 Types.push_back(canonTy);
4374 }
4375
4376 // Apply qualifiers from the element type to the array.
4377 QualType canon = getQualifiedType(QualType(canonTy,0),
4378 canonElementType.Quals);
4379
4380 // If we didn't need extra canonicalization for the element type or the size
4381 // expression, then just use that as our result.
4382 if (QualType(canonElementType.Ty, 0) == elementType &&
4383 canonTy->getSizeExpr() == numElements)
4384 return canon;
4385
4386 // Otherwise, we need to build a type which follows the spelling
4387 // of the element type.
4388 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
4389 DependentSizedArrayType(elementType, canon, numElements, ASM,
4390 elementTypeQuals);
4391 Types.push_back(sugaredType);
4392 return QualType(sugaredType, 0);
4393}
4394
4397 unsigned elementTypeQuals) const {
4398 llvm::FoldingSetNodeID ID;
4399 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
4400
4401 void *insertPos = nullptr;
4402 if (IncompleteArrayType *iat =
4403 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
4404 return QualType(iat, 0);
4405
4406 // If the element type isn't canonical, this won't be a canonical type
4407 // either, so fill in the canonical type field. We also have to pull
4408 // qualifiers off the element type.
4409 QualType canon;
4410
4411 // FIXME: Check below should look for qualifiers behind sugar.
4412 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
4413 SplitQualType canonSplit = getCanonicalType(elementType).split();
4414 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
4415 ASM, elementTypeQuals);
4416 canon = getQualifiedType(canon, canonSplit.Quals);
4417
4418 // Get the new insert position for the node we care about.
4419 IncompleteArrayType *existing =
4420 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
4421 assert(!existing && "Shouldn't be in the map!"); (void) existing;
4422 }
4423
4424 auto *newType = new (*this, alignof(IncompleteArrayType))
4425 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
4426
4427 IncompleteArrayTypes.InsertNode(newType, insertPos);
4428 Types.push_back(newType);
4429 return QualType(newType, 0);
4430}
4431
4434#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
4435 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
4436 NUMVECTORS};
4437
4438#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
4439 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
4440
4441 switch (Ty->getKind()) {
4442 default:
4443 llvm_unreachable("Unsupported builtin vector type");
4444
4445#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4446 ElBits, NF, IsSigned) \
4447 case BuiltinType::Id: \
4448 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4449 llvm::ElementCount::getScalable(NumEls), NF};
4450#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4451 ElBits, NF) \
4452 case BuiltinType::Id: \
4453 return {ElBits == 16 ? HalfTy : (ElBits == 32 ? FloatTy : DoubleTy), \
4454 llvm::ElementCount::getScalable(NumEls), NF};
4455#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4456 ElBits, NF) \
4457 case BuiltinType::Id: \
4458 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4459#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4460 ElBits, NF) \
4461 case BuiltinType::Id: \
4462 return {MFloat8Ty, llvm::ElementCount::getScalable(NumEls), NF};
4463#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4464 case BuiltinType::Id: \
4465 return {BoolTy, llvm::ElementCount::getScalable(NumEls), NF};
4466#include "clang/Basic/AArch64ACLETypes.def"
4467
4468#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
4469 IsSigned) \
4470 case BuiltinType::Id: \
4471 return {getIntTypeForBitwidth(ElBits, IsSigned), \
4472 llvm::ElementCount::getScalable(NumEls), NF};
4473#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4474 case BuiltinType::Id: \
4475 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
4476 llvm::ElementCount::getScalable(NumEls), NF};
4477#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
4478 case BuiltinType::Id: \
4479 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
4480#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4481 case BuiltinType::Id: \
4482 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
4483#include "clang/Basic/RISCVVTypes.def"
4484 }
4485}
4486
4487/// getExternrefType - Return a WebAssembly externref type, which represents an
4488/// opaque reference to a host value.
4490 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) {
4491#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
4492 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
4493 return SingletonId;
4494#include "clang/Basic/WebAssemblyReferenceTypes.def"
4495 }
4496 llvm_unreachable(
4497 "shouldn't try to generate type externref outside WebAssembly target");
4498}
4499
4500/// getScalableVectorType - Return the unique reference to a scalable vector
4501/// type of the specified element type and size. VectorType must be a built-in
4502/// type.
4504 unsigned NumFields) const {
4505 auto K = llvm::ScalableVecTyKey{EltTy, NumElts, NumFields};
4506 if (auto It = ScalableVecTyMap.find(K); It != ScalableVecTyMap.end())
4507 return It->second;
4508
4509 if (Target->hasAArch64ACLETypes()) {
4510 uint64_t EltTySize = getTypeSize(EltTy);
4511
4512#define SVE_VECTOR_TYPE_INT(Name, MangledName, Id, SingletonId, NumEls, \
4513 ElBits, NF, IsSigned) \
4514 if (EltTy->hasIntegerRepresentation() && !EltTy->isBooleanType() && \
4515 EltTy->hasSignedIntegerRepresentation() == IsSigned && \
4516 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4517 return ScalableVecTyMap[K] = SingletonId; \
4518 }
4519#define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4520 ElBits, NF) \
4521 if (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4522 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4523 return ScalableVecTyMap[K] = SingletonId; \
4524 }
4525#define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4526 ElBits, NF) \
4527 if (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4528 EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \
4529 return ScalableVecTyMap[K] = SingletonId; \
4530 }
4531#define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \
4532 ElBits, NF) \
4533 if (EltTy->isMFloat8Type() && EltTySize == ElBits && \
4534 NumElts == (NumEls * NF) && NumFields == 1) { \
4535 return ScalableVecTyMap[K] = SingletonId; \
4536 }
4537#define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \
4538 if (EltTy->isBooleanType() && NumElts == (NumEls * NF) && NumFields == 1) \
4539 return ScalableVecTyMap[K] = SingletonId;
4540#include "clang/Basic/AArch64ACLETypes.def"
4541 } else if (Target->hasRISCVVTypes()) {
4542 uint64_t EltTySize = getTypeSize(EltTy);
4543#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4544 IsFP, IsBF) \
4545 if (!EltTy->isBooleanType() && \
4546 ((EltTy->hasIntegerRepresentation() && \
4547 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4548 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4549 IsFP && !IsBF) || \
4550 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4551 IsBF && !IsFP)) && \
4552 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4553 return ScalableVecTyMap[K] = SingletonId;
4554#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4555 if (EltTy->isBooleanType() && NumElts == NumEls) \
4556 return ScalableVecTyMap[K] = SingletonId;
4557#include "clang/Basic/RISCVVTypes.def"
4558 }
4559 return QualType();
4560}
4561
4562/// getVectorType - Return the unique reference to a vector type of
4563/// the specified element type and size. VectorType must be a built-in type.
4565 VectorKind VecKind) const {
4566 assert(vecType->isBuiltinType() ||
4567 (vecType->isBitIntType() &&
4568 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4569 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4570
4571 // Check if we've already instantiated a vector of this type.
4572 llvm::FoldingSetNodeID ID;
4573 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
4574
4575 void *InsertPos = nullptr;
4576 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4577 return QualType(VTP, 0);
4578
4579 // If the element type isn't canonical, this won't be a canonical type either,
4580 // so fill in the canonical type field.
4581 QualType Canonical;
4582 if (!vecType.isCanonical()) {
4583 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
4584
4585 // Get the new insert position for the node we care about.
4586 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4587 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4588 }
4589 auto *New = new (*this, alignof(VectorType))
4590 VectorType(vecType, NumElts, Canonical, VecKind);
4591 VectorTypes.InsertNode(New, InsertPos);
4592 Types.push_back(New);
4593 return QualType(New, 0);
4594}
4595
4597 SourceLocation AttrLoc,
4598 VectorKind VecKind) const {
4599 llvm::FoldingSetNodeID ID;
4600 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
4601 VecKind);
4602 void *InsertPos = nullptr;
4603 DependentVectorType *Canon =
4604 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4606
4607 if (Canon) {
4608 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4609 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4610 } else {
4611 QualType CanonVecTy = getCanonicalType(VecType);
4612 if (CanonVecTy == VecType) {
4613 New = new (*this, alignof(DependentVectorType))
4614 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4615
4616 DependentVectorType *CanonCheck =
4617 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4618 assert(!CanonCheck &&
4619 "Dependent-sized vector_size canonical type broken");
4620 (void)CanonCheck;
4621 DependentVectorTypes.InsertNode(New, InsertPos);
4622 } else {
4623 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
4624 SourceLocation(), VecKind);
4625 New = new (*this, alignof(DependentVectorType))
4626 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4627 }
4628 }
4629
4630 Types.push_back(New);
4631 return QualType(New, 0);
4632}
4633
4634/// getExtVectorType - Return the unique reference to an extended vector type of
4635/// the specified element type and size. VectorType must be a built-in type.
4637 unsigned NumElts) const {
4638 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4639 (vecType->isBitIntType() &&
4640 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4641 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits())));
4642
4643 // Check if we've already instantiated a vector of this type.
4644 llvm::FoldingSetNodeID ID;
4645 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
4647 void *InsertPos = nullptr;
4648 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4649 return QualType(VTP, 0);
4650
4651 // If the element type isn't canonical, this won't be a canonical type either,
4652 // so fill in the canonical type field.
4653 QualType Canonical;
4654 if (!vecType.isCanonical()) {
4655 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
4656
4657 // Get the new insert position for the node we care about.
4658 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4659 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4660 }
4661 auto *New = new (*this, alignof(ExtVectorType))
4662 ExtVectorType(vecType, NumElts, Canonical);
4663 VectorTypes.InsertNode(New, InsertPos);
4664 Types.push_back(New);
4665 return QualType(New, 0);
4666}
4667
4670 Expr *SizeExpr,
4671 SourceLocation AttrLoc) const {
4672 llvm::FoldingSetNodeID ID;
4674 SizeExpr);
4675
4676 void *InsertPos = nullptr;
4678 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4680 if (Canon) {
4681 // We already have a canonical version of this array type; use it as
4682 // the canonical type for a newly-built type.
4683 New = new (*this, alignof(DependentSizedExtVectorType))
4684 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4685 AttrLoc);
4686 } else {
4687 QualType CanonVecTy = getCanonicalType(vecType);
4688 if (CanonVecTy == vecType) {
4689 New = new (*this, alignof(DependentSizedExtVectorType))
4690 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4691
4692 DependentSizedExtVectorType *CanonCheck
4693 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4694 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4695 (void)CanonCheck;
4696 DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
4697 } else {
4698 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
4699 SourceLocation());
4700 New = new (*this, alignof(DependentSizedExtVectorType))
4701 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4702 }
4703 }
4704
4705 Types.push_back(New);
4706 return QualType(New, 0);
4707}
4708
4710 unsigned NumColumns) const {
4711 llvm::FoldingSetNodeID ID;
4712 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
4713 Type::ConstantMatrix);
4714
4715 assert(MatrixType::isValidElementType(ElementTy, getLangOpts()) &&
4716 "need a valid element type");
4717 assert(NumRows > 0 && NumRows <= LangOpts.MaxMatrixDimension &&
4718 NumColumns > 0 && NumColumns <= LangOpts.MaxMatrixDimension &&
4719 "need valid matrix dimensions");
4720 void *InsertPos = nullptr;
4721 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4722 return QualType(MTP, 0);
4723
4724 QualType Canonical;
4725 if (!ElementTy.isCanonical()) {
4726 Canonical =
4727 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns);
4728
4729 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4730 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4731 (void)NewIP;
4732 }
4733
4734 auto *New = new (*this, alignof(ConstantMatrixType))
4735 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4736 MatrixTypes.InsertNode(New, InsertPos);
4737 Types.push_back(New);
4738 return QualType(New, 0);
4739}
4740
4742 Expr *RowExpr,
4743 Expr *ColumnExpr,
4744 SourceLocation AttrLoc) const {
4745 QualType CanonElementTy = getCanonicalType(ElementTy);
4746 llvm::FoldingSetNodeID ID;
4747 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr,
4748 ColumnExpr);
4749
4750 void *InsertPos = nullptr;
4752 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4753
4754 if (!Canon) {
4755 Canon = new (*this, alignof(DependentSizedMatrixType))
4756 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4757 ColumnExpr, AttrLoc);
4758#ifndef NDEBUG
4759 DependentSizedMatrixType *CanonCheck =
4760 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4761 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4762#endif
4763 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos);
4764 Types.push_back(Canon);
4765 }
4766
4767 // Already have a canonical version of the matrix type
4768 //
4769 // If it exactly matches the requested type, use it directly.
4770 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4771 Canon->getRowExpr() == ColumnExpr)
4772 return QualType(Canon, 0);
4773
4774 // Use Canon as the canonical type for newly-built type.
4776 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4777 ColumnExpr, AttrLoc);
4778 Types.push_back(New);
4779 return QualType(New, 0);
4780}
4781
4783 Expr *AddrSpaceExpr,
4784 SourceLocation AttrLoc) const {
4785 assert(AddrSpaceExpr->isInstantiationDependent());
4786
4787 QualType canonPointeeType = getCanonicalType(PointeeType);
4788
4789 void *insertPos = nullptr;
4790 llvm::FoldingSetNodeID ID;
4791 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType,
4792 AddrSpaceExpr);
4793
4794 DependentAddressSpaceType *canonTy =
4795 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
4796
4797 if (!canonTy) {
4798 canonTy = new (*this, alignof(DependentAddressSpaceType))
4799 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4800 AttrLoc);
4801 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
4802 Types.push_back(canonTy);
4803 }
4804
4805 if (canonPointeeType == PointeeType &&
4806 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4807 return QualType(canonTy, 0);
4808
4809 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4810 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4811 AddrSpaceExpr, AttrLoc);
4812 Types.push_back(sugaredType);
4813 return QualType(sugaredType, 0);
4814}
4815
4816/// Determine whether \p T is canonical as the result type of a function.
4818 return T.isCanonical() &&
4819 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4820 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4821}
4822
4823/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4824QualType
4826 const FunctionType::ExtInfo &Info) const {
4827 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4828 // functionality creates a function without a prototype regardless of
4829 // language mode (so it makes them even in C++). Once the rewriter has been
4830 // fixed, this assertion can be enabled again.
4831 //assert(!LangOpts.requiresStrictPrototypes() &&
4832 // "strict prototypes are disabled");
4833
4834 // Unique functions, to guarantee there is only one function of a particular
4835 // structure.
4836 llvm::FoldingSetNodeID ID;
4837 FunctionNoProtoType::Profile(ID, ResultTy, Info);
4838
4839 void *InsertPos = nullptr;
4840 if (FunctionNoProtoType *FT =
4841 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4842 return QualType(FT, 0);
4843
4844 QualType Canonical;
4845 if (!isCanonicalResultType(ResultTy)) {
4846 Canonical =
4848
4849 // Get the new insert position for the node we care about.
4850 FunctionNoProtoType *NewIP =
4851 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4852 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4853 }
4854
4855 auto *New = new (*this, alignof(FunctionNoProtoType))
4856 FunctionNoProtoType(ResultTy, Canonical, Info);
4857 Types.push_back(New);
4858 FunctionNoProtoTypes.InsertNode(New, InsertPos);
4859 return QualType(New, 0);
4860}
4861
4864 CanQualType CanResultType = getCanonicalType(ResultType);
4865
4866 // Canonical result types do not have ARC lifetime qualifiers.
4867 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4868 Qualifiers Qs = CanResultType.getQualifiers();
4869 Qs.removeObjCLifetime();
4871 getQualifiedType(CanResultType.getUnqualifiedType(), Qs));
4872 }
4873
4874 return CanResultType;
4875}
4876
4878 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4879 if (ESI.Type == EST_None)
4880 return true;
4881 if (!NoexceptInType)
4882 return false;
4883
4884 // C++17 onwards: exception specification is part of the type, as a simple
4885 // boolean "can this function type throw".
4886 if (ESI.Type == EST_BasicNoexcept)
4887 return true;
4888
4889 // A noexcept(expr) specification is (possibly) canonical if expr is
4890 // value-dependent.
4891 if (ESI.Type == EST_DependentNoexcept)
4892 return true;
4893
4894 // A dynamic exception specification is canonical if it only contains pack
4895 // expansions (so we can't tell whether it's non-throwing) and all its
4896 // contained types are canonical.
4897 if (ESI.Type == EST_Dynamic) {
4898 bool AnyPackExpansions = false;
4899 for (QualType ET : ESI.Exceptions) {
4900 if (!ET.isCanonical())
4901 return false;
4902 if (ET->getAs<PackExpansionType>())
4903 AnyPackExpansions = true;
4904 }
4905 return AnyPackExpansions;
4906 }
4907
4908 return false;
4909}
4910
4911QualType ASTContext::getFunctionTypeInternal(
4912 QualType ResultTy, ArrayRef<QualType> ArgArray,
4913 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4914 size_t NumArgs = ArgArray.size();
4915
4916 // Unique functions, to guarantee there is only one function of a particular
4917 // structure.
4918 llvm::FoldingSetNodeID ID;
4919 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
4920 *this, true);
4921
4922 QualType Canonical;
4923 bool Unique = false;
4924
4925 void *InsertPos = nullptr;
4926 if (FunctionProtoType *FPT =
4927 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4928 QualType Existing = QualType(FPT, 0);
4929
4930 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4931 // it so long as our exception specification doesn't contain a dependent
4932 // noexcept expression, or we're just looking for a canonical type.
4933 // Otherwise, we're going to need to create a type
4934 // sugar node to hold the concrete expression.
4935 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
4936 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4937 return Existing;
4938
4939 // We need a new type sugar node for this one, to hold the new noexcept
4940 // expression. We do no canonicalization here, but that's OK since we don't
4941 // expect to see the same noexcept expression much more than once.
4942 Canonical = getCanonicalType(Existing);
4943 Unique = true;
4944 }
4945
4946 bool NoexceptInType = getLangOpts().CPlusPlus17;
4947 bool IsCanonicalExceptionSpec =
4949
4950 // Determine whether the type being created is already canonical or not.
4951 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
4952 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
4953 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
4954 if (!ArgArray[i].isCanonicalAsParam())
4955 isCanonical = false;
4956
4957 if (OnlyWantCanonical)
4958 assert(isCanonical &&
4959 "given non-canonical parameters constructing canonical type");
4960
4961 // If this type isn't canonical, get the canonical version of it if we don't
4962 // already have it. The exception spec is only partially part of the
4963 // canonical type, and only in C++17 onwards.
4964 if (!isCanonical && Canonical.isNull()) {
4965 SmallVector<QualType, 16> CanonicalArgs;
4966 CanonicalArgs.reserve(NumArgs);
4967 for (unsigned i = 0; i != NumArgs; ++i)
4968 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
4969
4970 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
4971 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
4972 CanonicalEPI.HasTrailingReturn = false;
4973
4974 if (IsCanonicalExceptionSpec) {
4975 // Exception spec is already OK.
4976 } else if (NoexceptInType) {
4977 switch (EPI.ExceptionSpec.Type) {
4979 // We don't know yet. It shouldn't matter what we pick here; no-one
4980 // should ever look at this.
4981 [[fallthrough]];
4982 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
4983 CanonicalEPI.ExceptionSpec.Type = EST_None;
4984 break;
4985
4986 // A dynamic exception specification is almost always "not noexcept",
4987 // with the exception that a pack expansion might expand to no types.
4988 case EST_Dynamic: {
4989 bool AnyPacks = false;
4990 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
4991 if (ET->getAs<PackExpansionType>())
4992 AnyPacks = true;
4993 ExceptionTypeStorage.push_back(getCanonicalType(ET));
4994 }
4995 if (!AnyPacks)
4996 CanonicalEPI.ExceptionSpec.Type = EST_None;
4997 else {
4998 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
4999 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
5000 }
5001 break;
5002 }
5003
5004 case EST_DynamicNone:
5005 case EST_BasicNoexcept:
5006 case EST_NoexceptTrue:
5007 case EST_NoThrow:
5008 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
5009 break;
5010
5012 llvm_unreachable("dependent noexcept is already canonical");
5013 }
5014 } else {
5015 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
5016 }
5017
5018 // Adjust the canonical function result type.
5019 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
5020 Canonical =
5021 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
5022
5023 // Get the new insert position for the node we care about.
5024 FunctionProtoType *NewIP =
5025 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
5026 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5027 }
5028
5029 // Compute the needed size to hold this FunctionProtoType and the
5030 // various trailing objects.
5031 auto ESH = FunctionProtoType::getExceptionSpecSize(
5032 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
5033 size_t Size = FunctionProtoType::totalSizeToAlloc<
5034 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
5035 FunctionType::FunctionTypeExtraAttributeInfo,
5036 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
5037 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers,
5038 FunctionEffect, EffectConditionExpr>(
5041 EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType,
5042 ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
5043 EPI.ExtParameterInfos ? NumArgs : 0,
5045 EPI.FunctionEffects.conditions().size());
5046
5047 auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType));
5048 FunctionProtoType::ExtProtoInfo newEPI = EPI;
5049 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
5050 Types.push_back(FTP);
5051 if (!Unique)
5052 FunctionProtoTypes.InsertNode(FTP, InsertPos);
5053 if (!EPI.FunctionEffects.empty())
5054 AnyFunctionEffects = true;
5055 return QualType(FTP, 0);
5056}
5057
5058QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
5059 llvm::FoldingSetNodeID ID;
5060 PipeType::Profile(ID, T, ReadOnly);
5061
5062 void *InsertPos = nullptr;
5063 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
5064 return QualType(PT, 0);
5065
5066 // If the pipe element type isn't canonical, this won't be a canonical type
5067 // either, so fill in the canonical type field.
5068 QualType Canonical;
5069 if (!T.isCanonical()) {
5070 Canonical = getPipeType(getCanonicalType(T), ReadOnly);
5071
5072 // Get the new insert position for the node we care about.
5073 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
5074 assert(!NewIP && "Shouldn't be in the map!");
5075 (void)NewIP;
5076 }
5077 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
5078 Types.push_back(New);
5079 PipeTypes.InsertNode(New, InsertPos);
5080 return QualType(New, 0);
5081}
5082
5084 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
5085 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant)
5086 : Ty;
5087}
5088
5090 return getPipeType(T, true);
5091}
5092
5094 return getPipeType(T, false);
5095}
5096
5097QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
5098 llvm::FoldingSetNodeID ID;
5099 BitIntType::Profile(ID, IsUnsigned, NumBits);
5100
5101 void *InsertPos = nullptr;
5102 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5103 return QualType(EIT, 0);
5104
5105 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
5106 BitIntTypes.InsertNode(New, InsertPos);
5107 Types.push_back(New);
5108 return QualType(New, 0);
5109}
5110
5112 Expr *NumBitsExpr) const {
5113 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
5114 llvm::FoldingSetNodeID ID;
5115 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
5116
5117 void *InsertPos = nullptr;
5118 if (DependentBitIntType *Existing =
5119 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
5120 return QualType(Existing, 0);
5121
5122 auto *New = new (*this, alignof(DependentBitIntType))
5123 DependentBitIntType(IsUnsigned, NumBitsExpr);
5124 DependentBitIntTypes.InsertNode(New, InsertPos);
5125
5126 Types.push_back(New);
5127 return QualType(New, 0);
5128}
5129
5132 using Kind = PredefinedSugarType::Kind;
5133
5134 if (auto *Target = PredefinedSugarTypes[llvm::to_underlying(KD)];
5135 Target != nullptr)
5136 return QualType(Target, 0);
5137
5138 auto getCanonicalType = [](const ASTContext &Ctx, Kind KDI) -> QualType {
5139 switch (KDI) {
5140 // size_t (C99TC3 6.5.3.4), signed size_t (C++23 5.13.2) and
5141 // ptrdiff_t (C99TC3 6.5.6) Although these types are not built-in, they
5142 // are part of the core language and are widely used. Using
5143 // PredefinedSugarType makes these types as named sugar types rather than
5144 // standard integer types, enabling better hints and diagnostics.
5145 case Kind::SizeT:
5146 return Ctx.getFromTargetType(Ctx.Target->getSizeType());
5147 case Kind::SignedSizeT:
5148 return Ctx.getFromTargetType(Ctx.Target->getSignedSizeType());
5149 case Kind::PtrdiffT:
5150 return Ctx.getFromTargetType(Ctx.Target->getPtrDiffType(LangAS::Default));
5151 }
5152 llvm_unreachable("unexpected kind");
5153 };
5154 auto *New = new (*this, alignof(PredefinedSugarType))
5155 PredefinedSugarType(KD, &Idents.get(PredefinedSugarType::getName(KD)),
5156 getCanonicalType(*this, static_cast<Kind>(KD)));
5157 Types.push_back(New);
5158 PredefinedSugarTypes[llvm::to_underlying(KD)] = New;
5159 return QualType(New, 0);
5160}
5161
5163 NestedNameSpecifier Qualifier,
5164 const TypeDecl *Decl) const {
5165 if (auto *Tag = dyn_cast<TagDecl>(Decl))
5166 return getTagType(Keyword, Qualifier, Tag,
5167 /*OwnsTag=*/false);
5168 if (auto *Typedef = dyn_cast<TypedefNameDecl>(Decl))
5169 return getTypedefType(Keyword, Qualifier, Typedef);
5170 if (auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(Decl))
5171 return getUnresolvedUsingType(Keyword, Qualifier, UD);
5172
5174 assert(!Qualifier);
5175 return QualType(Decl->TypeForDecl, 0);
5176}
5177
5179 if (auto *Tag = dyn_cast<TagDecl>(TD))
5180 return getCanonicalTagType(Tag);
5181 if (auto *TN = dyn_cast<TypedefNameDecl>(TD))
5182 return getCanonicalType(TN->getUnderlyingType());
5183 if (const auto *UD = dyn_cast<UnresolvedUsingTypenameDecl>(TD))
5185 assert(TD->TypeForDecl);
5186 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5187}
5188
5190 if (const auto *TD = dyn_cast<TagDecl>(Decl))
5191 return getCanonicalTagType(TD);
5192 if (const auto *TD = dyn_cast<TypedefNameDecl>(Decl);
5193 isa_and_nonnull<TypedefDecl, TypeAliasDecl>(TD))
5195 /*Qualifier=*/std::nullopt, TD);
5196 if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl))
5197 return getCanonicalUnresolvedUsingType(Using);
5198
5199 assert(Decl->TypeForDecl);
5200 return QualType(Decl->TypeForDecl, 0);
5201}
5202
5203/// getTypedefType - Return the unique reference to the type for the
5204/// specified typedef name decl.
5207 NestedNameSpecifier Qualifier,
5208 const TypedefNameDecl *Decl, QualType UnderlyingType,
5209 std::optional<bool> TypeMatchesDeclOrNone) const {
5210 if (!TypeMatchesDeclOrNone) {
5211 QualType DeclUnderlyingType = Decl->getUnderlyingType();
5212 assert(!DeclUnderlyingType.isNull());
5213 if (UnderlyingType.isNull())
5214 UnderlyingType = DeclUnderlyingType;
5215 else
5216 assert(hasSameType(UnderlyingType, DeclUnderlyingType));
5217 TypeMatchesDeclOrNone = UnderlyingType == DeclUnderlyingType;
5218 } else {
5219 // FIXME: This is a workaround for a serialization cycle: assume the decl
5220 // underlying type is not available; don't touch it.
5221 assert(!UnderlyingType.isNull());
5222 }
5223
5224 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier &&
5225 *TypeMatchesDeclOrNone) {
5226 if (Decl->TypeForDecl)
5227 return QualType(Decl->TypeForDecl, 0);
5228
5229 auto *NewType = new (*this, alignof(TypedefType))
5230 TypedefType(Type::Typedef, Keyword, Qualifier, Decl, UnderlyingType,
5231 !*TypeMatchesDeclOrNone);
5232
5233 Types.push_back(NewType);
5234 Decl->TypeForDecl = NewType;
5235 return QualType(NewType, 0);
5236 }
5237
5238 llvm::FoldingSetNodeID ID;
5239 TypedefType::Profile(ID, Keyword, Qualifier, Decl,
5240 *TypeMatchesDeclOrNone ? QualType() : UnderlyingType);
5241
5242 void *InsertPos = nullptr;
5243 if (FoldingSetPlaceholder<TypedefType> *Placeholder =
5244 TypedefTypes.FindNodeOrInsertPos(ID, InsertPos))
5245 return QualType(Placeholder->getType(), 0);
5246
5247 void *Mem =
5248 Allocate(TypedefType::totalSizeToAlloc<FoldingSetPlaceholder<TypedefType>,
5250 1, !!Qualifier, !*TypeMatchesDeclOrNone),
5251 alignof(TypedefType));
5252 auto *NewType =
5253 new (Mem) TypedefType(Type::Typedef, Keyword, Qualifier, Decl,
5254 UnderlyingType, !*TypeMatchesDeclOrNone);
5255 auto *Placeholder = new (NewType->getFoldingSetPlaceholder())
5257 TypedefTypes.InsertNode(Placeholder, InsertPos);
5258 Types.push_back(NewType);
5259 return QualType(NewType, 0);
5260}
5261
5263 NestedNameSpecifier Qualifier,
5264 const UsingShadowDecl *D,
5265 QualType UnderlyingType) const {
5266 // FIXME: This is expensive to compute every time!
5267 if (UnderlyingType.isNull()) {
5268 const auto *UD = cast<UsingDecl>(D->getIntroducer());
5269 UnderlyingType =
5272 UD->getQualifier(), cast<TypeDecl>(D->getTargetDecl()));
5273 }
5274
5275 llvm::FoldingSetNodeID ID;
5276 UsingType::Profile(ID, Keyword, Qualifier, D, UnderlyingType);
5277
5278 void *InsertPos = nullptr;
5279 if (const UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5280 return QualType(T, 0);
5281
5282 assert(!UnderlyingType.hasLocalQualifiers());
5283
5284 assert(
5286 UnderlyingType));
5287
5288 void *Mem =
5289 Allocate(UsingType::totalSizeToAlloc<NestedNameSpecifier>(!!Qualifier),
5290 alignof(UsingType));
5291 UsingType *T = new (Mem) UsingType(Keyword, Qualifier, D, UnderlyingType);
5292 Types.push_back(T);
5293 UsingTypes.InsertNode(T, InsertPos);
5294 return QualType(T, 0);
5295}
5296
5297TagType *ASTContext::getTagTypeInternal(ElaboratedTypeKeyword Keyword,
5298 NestedNameSpecifier Qualifier,
5299 const TagDecl *TD, bool OwnsTag,
5300 bool IsInjected,
5301 const Type *CanonicalType,
5302 bool WithFoldingSetNode) const {
5303 auto [TC, Size] = [&] {
5304 switch (TD->getDeclKind()) {
5305 case Decl::Enum:
5306 static_assert(alignof(EnumType) == alignof(TagType));
5307 return std::make_tuple(Type::Enum, sizeof(EnumType));
5308 case Decl::ClassTemplatePartialSpecialization:
5309 case Decl::ClassTemplateSpecialization:
5310 case Decl::CXXRecord:
5311 static_assert(alignof(RecordType) == alignof(TagType));
5312 static_assert(alignof(InjectedClassNameType) == alignof(TagType));
5313 if (cast<CXXRecordDecl>(TD)->hasInjectedClassType())
5314 return std::make_tuple(Type::InjectedClassName,
5315 sizeof(InjectedClassNameType));
5316 [[fallthrough]];
5317 case Decl::Record:
5318 return std::make_tuple(Type::Record, sizeof(RecordType));
5319 default:
5320 llvm_unreachable("unexpected decl kind");
5321 }
5322 }();
5323
5324 if (Qualifier) {
5325 static_assert(alignof(NestedNameSpecifier) <= alignof(TagType));
5326 Size = llvm::alignTo(Size, alignof(NestedNameSpecifier)) +
5327 sizeof(NestedNameSpecifier);
5328 }
5329 void *Mem;
5330 if (WithFoldingSetNode) {
5331 // FIXME: It would be more profitable to tail allocate the folding set node
5332 // from the type, instead of the other way around, due to the greater
5333 // alignment requirements of the type. But this makes it harder to deal with
5334 // the different type node sizes. This would require either uniquing from
5335 // different folding sets, or having the folding setaccept a
5336 // contextual parameter which is not fixed at construction.
5337 Mem = Allocate(
5338 sizeof(TagTypeFoldingSetPlaceholder) +
5339 TagTypeFoldingSetPlaceholder::getOffset() + Size,
5340 std::max(alignof(TagTypeFoldingSetPlaceholder), alignof(TagType)));
5341 auto *T = new (Mem) TagTypeFoldingSetPlaceholder();
5342 Mem = T->getTagType();
5343 } else {
5344 Mem = Allocate(Size, alignof(TagType));
5345 }
5346
5347 auto *T = [&, TC = TC]() -> TagType * {
5348 switch (TC) {
5349 case Type::Enum: {
5350 assert(isa<EnumDecl>(TD));
5351 auto *T = new (Mem) EnumType(TC, Keyword, Qualifier, TD, OwnsTag,
5352 IsInjected, CanonicalType);
5353 assert(reinterpret_cast<void *>(T) ==
5354 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5355 "TagType must be the first base of EnumType");
5356 return T;
5357 }
5358 case Type::Record: {
5359 assert(isa<RecordDecl>(TD));
5360 auto *T = new (Mem) RecordType(TC, Keyword, Qualifier, TD, OwnsTag,
5361 IsInjected, CanonicalType);
5362 assert(reinterpret_cast<void *>(T) ==
5363 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5364 "TagType must be the first base of RecordType");
5365 return T;
5366 }
5367 case Type::InjectedClassName: {
5368 auto *T = new (Mem) InjectedClassNameType(Keyword, Qualifier, TD,
5369 IsInjected, CanonicalType);
5370 assert(reinterpret_cast<void *>(T) ==
5371 reinterpret_cast<void *>(static_cast<TagType *>(T)) &&
5372 "TagType must be the first base of InjectedClassNameType");
5373 return T;
5374 }
5375 default:
5376 llvm_unreachable("unexpected type class");
5377 }
5378 }();
5379 assert(T->getKeyword() == Keyword);
5380 assert(T->getQualifier() == Qualifier);
5381 assert(T->getDecl() == TD);
5382 assert(T->isInjected() == IsInjected);
5383 assert(T->isTagOwned() == OwnsTag);
5384 assert((T->isCanonicalUnqualified()
5385 ? QualType()
5386 : T->getCanonicalTypeInternal()) == QualType(CanonicalType, 0));
5387 Types.push_back(T);
5388 return T;
5389}
5390
5391static const TagDecl *getNonInjectedClassName(const TagDecl *TD) {
5392 if (const auto *RD = dyn_cast<CXXRecordDecl>(TD);
5393 RD && RD->isInjectedClassName())
5394 return cast<TagDecl>(RD->getDeclContext());
5395 return TD;
5396}
5397
5400 if (TD->TypeForDecl)
5401 return TD->TypeForDecl->getCanonicalTypeUnqualified();
5402
5403 const Type *CanonicalType = getTagTypeInternal(
5405 /*Qualifier=*/std::nullopt, TD,
5406 /*OwnsTag=*/false, /*IsInjected=*/false, /*CanonicalType=*/nullptr,
5407 /*WithFoldingSetNode=*/false);
5408 TD->TypeForDecl = CanonicalType;
5409 return CanQualType::CreateUnsafe(QualType(CanonicalType, 0));
5410}
5411
5413 NestedNameSpecifier Qualifier,
5414 const TagDecl *TD, bool OwnsTag) const {
5415
5416 const TagDecl *NonInjectedTD = ::getNonInjectedClassName(TD);
5417 bool IsInjected = TD != NonInjectedTD;
5418
5419 ElaboratedTypeKeyword PreferredKeyword =
5422 NonInjectedTD->getTagKind());
5423
5424 if (Keyword == PreferredKeyword && !Qualifier && !OwnsTag) {
5425 if (const Type *T = TD->TypeForDecl; T && !T->isCanonicalUnqualified())
5426 return QualType(T, 0);
5427
5428 const Type *CanonicalType = getCanonicalTagType(NonInjectedTD).getTypePtr();
5429 const Type *T =
5430 getTagTypeInternal(Keyword,
5431 /*Qualifier=*/std::nullopt, NonInjectedTD,
5432 /*OwnsTag=*/false, IsInjected, CanonicalType,
5433 /*WithFoldingSetNode=*/false);
5434 TD->TypeForDecl = T;
5435 return QualType(T, 0);
5436 }
5437
5438 llvm::FoldingSetNodeID ID;
5439 TagTypeFoldingSetPlaceholder::Profile(ID, Keyword, Qualifier, NonInjectedTD,
5440 OwnsTag, IsInjected);
5441
5442 void *InsertPos = nullptr;
5443 if (TagTypeFoldingSetPlaceholder *T =
5444 TagTypes.FindNodeOrInsertPos(ID, InsertPos))
5445 return QualType(T->getTagType(), 0);
5446
5447 const Type *CanonicalType = getCanonicalTagType(NonInjectedTD).getTypePtr();
5448 TagType *T =
5449 getTagTypeInternal(Keyword, Qualifier, NonInjectedTD, OwnsTag, IsInjected,
5450 CanonicalType, /*WithFoldingSetNode=*/true);
5451 TagTypes.InsertNode(TagTypeFoldingSetPlaceholder::fromTagType(T), InsertPos);
5452 return QualType(T, 0);
5453}
5454
5455bool ASTContext::computeBestEnumTypes(bool IsPacked, unsigned NumNegativeBits,
5456 unsigned NumPositiveBits,
5457 QualType &BestType,
5458 QualType &BestPromotionType) {
5459 unsigned IntWidth = Target->getIntWidth();
5460 unsigned CharWidth = Target->getCharWidth();
5461 unsigned ShortWidth = Target->getShortWidth();
5462 bool EnumTooLarge = false;
5463 unsigned BestWidth;
5464 if (NumNegativeBits) {
5465 // If there is a negative value, figure out the smallest integer type (of
5466 // int/long/longlong) that fits.
5467 // If it's packed, check also if it fits a char or a short.
5468 if (IsPacked && NumNegativeBits <= CharWidth &&
5469 NumPositiveBits < CharWidth) {
5470 BestType = SignedCharTy;
5471 BestWidth = CharWidth;
5472 } else if (IsPacked && NumNegativeBits <= ShortWidth &&
5473 NumPositiveBits < ShortWidth) {
5474 BestType = ShortTy;
5475 BestWidth = ShortWidth;
5476 } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
5477 BestType = IntTy;
5478 BestWidth = IntWidth;
5479 } else {
5480 BestWidth = Target->getLongWidth();
5481
5482 if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
5483 BestType = LongTy;
5484 } else {
5485 BestWidth = Target->getLongLongWidth();
5486
5487 if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
5488 EnumTooLarge = true;
5489 BestType = LongLongTy;
5490 }
5491 }
5492 BestPromotionType = (BestWidth <= IntWidth ? IntTy : BestType);
5493 } else {
5494 // If there is no negative value, figure out the smallest type that fits
5495 // all of the enumerator values.
5496 // If it's packed, check also if it fits a char or a short.
5497 if (IsPacked && NumPositiveBits <= CharWidth) {
5498 BestType = UnsignedCharTy;
5499 BestPromotionType = IntTy;
5500 BestWidth = CharWidth;
5501 } else if (IsPacked && NumPositiveBits <= ShortWidth) {
5502 BestType = UnsignedShortTy;
5503 BestPromotionType = IntTy;
5504 BestWidth = ShortWidth;
5505 } else if (NumPositiveBits <= IntWidth) {
5506 BestType = UnsignedIntTy;
5507 BestWidth = IntWidth;
5508 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5510 : IntTy;
5511 } else if (NumPositiveBits <= (BestWidth = Target->getLongWidth())) {
5512 BestType = UnsignedLongTy;
5513 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5515 : LongTy;
5516 } else {
5517 BestWidth = Target->getLongLongWidth();
5518 if (NumPositiveBits > BestWidth) {
5519 // This can happen with bit-precise integer types, but those are not
5520 // allowed as the type for an enumerator per C23 6.7.2.2p4 and p12.
5521 // FIXME: GCC uses __int128_t and __uint128_t for cases that fit within
5522 // a 128-bit integer, we should consider doing the same.
5523 EnumTooLarge = true;
5524 }
5525 BestType = UnsignedLongLongTy;
5526 BestPromotionType = (NumPositiveBits == BestWidth || !LangOpts.CPlusPlus)
5528 : LongLongTy;
5529 }
5530 }
5531 return EnumTooLarge;
5532}
5533
5535 assert((T->isIntegralType(*this) || T->isEnumeralType()) &&
5536 "Integral type required!");
5537 unsigned BitWidth = getIntWidth(T);
5538
5539 if (Value.isUnsigned() || Value.isNonNegative()) {
5540 if (T->isSignedIntegerOrEnumerationType())
5541 --BitWidth;
5542 return Value.getActiveBits() <= BitWidth;
5543 }
5544 return Value.getSignificantBits() <= BitWidth;
5545}
5546
5547UnresolvedUsingType *ASTContext::getUnresolvedUsingTypeInternal(
5549 const UnresolvedUsingTypenameDecl *D, void *InsertPos,
5550 const Type *CanonicalType) const {
5551 void *Mem = Allocate(
5552 UnresolvedUsingType::totalSizeToAlloc<
5554 !!InsertPos, !!Qualifier),
5555 alignof(UnresolvedUsingType));
5556 auto *T = new (Mem) UnresolvedUsingType(Keyword, Qualifier, D, CanonicalType);
5557 if (InsertPos) {
5558 auto *Placeholder = new (T->getFoldingSetPlaceholder())
5560 TypedefTypes.InsertNode(Placeholder, InsertPos);
5561 }
5562 Types.push_back(T);
5563 return T;
5564}
5565
5567 const UnresolvedUsingTypenameDecl *D) const {
5568 D = D->getCanonicalDecl();
5569 if (D->TypeForDecl)
5570 return D->TypeForDecl->getCanonicalTypeUnqualified();
5571
5572 const Type *CanonicalType = getUnresolvedUsingTypeInternal(
5574 /*Qualifier=*/std::nullopt, D,
5575 /*InsertPos=*/nullptr, /*CanonicalType=*/nullptr);
5576 D->TypeForDecl = CanonicalType;
5577 return CanQualType::CreateUnsafe(QualType(CanonicalType, 0));
5578}
5579
5582 NestedNameSpecifier Qualifier,
5583 const UnresolvedUsingTypenameDecl *D) const {
5584 if (Keyword == ElaboratedTypeKeyword::None && !Qualifier) {
5585 if (const Type *T = D->TypeForDecl; T && !T->isCanonicalUnqualified())
5586 return QualType(T, 0);
5587
5588 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5589 const Type *T =
5590 getUnresolvedUsingTypeInternal(ElaboratedTypeKeyword::None,
5591 /*Qualifier=*/std::nullopt, D,
5592 /*InsertPos=*/nullptr, CanonicalType);
5593 D->TypeForDecl = T;
5594 return QualType(T, 0);
5595 }
5596
5597 llvm::FoldingSetNodeID ID;
5598 UnresolvedUsingType::Profile(ID, Keyword, Qualifier, D);
5599
5600 void *InsertPos = nullptr;
5602 UnresolvedUsingTypes.FindNodeOrInsertPos(ID, InsertPos))
5603 return QualType(Placeholder->getType(), 0);
5604 assert(InsertPos);
5605
5606 const Type *CanonicalType = getCanonicalUnresolvedUsingType(D).getTypePtr();
5607 const Type *T = getUnresolvedUsingTypeInternal(Keyword, Qualifier, D,
5608 InsertPos, CanonicalType);
5609 return QualType(T, 0);
5610}
5611
5613 QualType modifiedType,
5614 QualType equivalentType,
5615 const Attr *attr) const {
5616 llvm::FoldingSetNodeID id;
5617 AttributedType::Profile(id, attrKind, modifiedType, equivalentType, attr);
5618
5619 void *insertPos = nullptr;
5620 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
5621 if (type) return QualType(type, 0);
5622
5623 assert(!attr || attr->getKind() == attrKind);
5624
5625 QualType canon = getCanonicalType(equivalentType);
5626 type = new (*this, alignof(AttributedType))
5627 AttributedType(canon, attrKind, attr, modifiedType, equivalentType);
5628
5629 Types.push_back(type);
5630 AttributedTypes.InsertNode(type, insertPos);
5631
5632 return QualType(type, 0);
5633}
5634
5636 QualType equivalentType) const {
5637 return getAttributedType(attr->getKind(), modifiedType, equivalentType, attr);
5638}
5639
5641 QualType modifiedType,
5642 QualType equivalentType) {
5643 switch (nullability) {
5645 return getAttributedType(attr::TypeNonNull, modifiedType, equivalentType);
5646
5648 return getAttributedType(attr::TypeNullable, modifiedType, equivalentType);
5649
5651 return getAttributedType(attr::TypeNullableResult, modifiedType,
5652 equivalentType);
5653
5655 return getAttributedType(attr::TypeNullUnspecified, modifiedType,
5656 equivalentType);
5657 }
5658
5659 llvm_unreachable("Unknown nullability kind");
5660}
5661
5662QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
5663 QualType Wrapped) const {
5664 llvm::FoldingSetNodeID ID;
5665 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
5666
5667 void *InsertPos = nullptr;
5668 BTFTagAttributedType *Ty =
5669 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
5670 if (Ty)
5671 return QualType(Ty, 0);
5672
5673 QualType Canon = getCanonicalType(Wrapped);
5674 Ty = new (*this, alignof(BTFTagAttributedType))
5675 BTFTagAttributedType(Canon, Wrapped, BTFAttr);
5676
5677 Types.push_back(Ty);
5678 BTFTagAttributedTypes.InsertNode(Ty, InsertPos);
5679
5680 return QualType(Ty, 0);
5681}
5682
5684 QualType Wrapped, QualType Contained,
5685 const HLSLAttributedResourceType::Attributes &Attrs) {
5686
5687 llvm::FoldingSetNodeID ID;
5688 HLSLAttributedResourceType::Profile(ID, Wrapped, Contained, Attrs);
5689
5690 void *InsertPos = nullptr;
5691 HLSLAttributedResourceType *Ty =
5692 HLSLAttributedResourceTypes.FindNodeOrInsertPos(ID, InsertPos);
5693 if (Ty)
5694 return QualType(Ty, 0);
5695
5696 Ty = new (*this, alignof(HLSLAttributedResourceType))
5697 HLSLAttributedResourceType(Wrapped, Contained, Attrs);
5698
5699 Types.push_back(Ty);
5700 HLSLAttributedResourceTypes.InsertNode(Ty, InsertPos);
5701
5702 return QualType(Ty, 0);
5703}
5704
5705QualType ASTContext::getHLSLInlineSpirvType(uint32_t Opcode, uint32_t Size,
5706 uint32_t Alignment,
5707 ArrayRef<SpirvOperand> Operands) {
5708 llvm::FoldingSetNodeID ID;
5709 HLSLInlineSpirvType::Profile(ID, Opcode, Size, Alignment, Operands);
5710
5711 void *InsertPos = nullptr;
5712 HLSLInlineSpirvType *Ty =
5713 HLSLInlineSpirvTypes.FindNodeOrInsertPos(ID, InsertPos);
5714 if (Ty)
5715 return QualType(Ty, 0);
5716
5717 void *Mem = Allocate(
5718 HLSLInlineSpirvType::totalSizeToAlloc<SpirvOperand>(Operands.size()),
5719 alignof(HLSLInlineSpirvType));
5720
5721 Ty = new (Mem) HLSLInlineSpirvType(Opcode, Size, Alignment, Operands);
5722
5723 Types.push_back(Ty);
5724 HLSLInlineSpirvTypes.InsertNode(Ty, InsertPos);
5725
5726 return QualType(Ty, 0);
5727}
5728
5729/// Retrieve a substitution-result type.
5731 Decl *AssociatedDecl,
5732 unsigned Index,
5733 UnsignedOrNone PackIndex,
5734 bool Final) const {
5735 llvm::FoldingSetNodeID ID;
5736 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
5737 PackIndex, Final);
5738 void *InsertPos = nullptr;
5739 SubstTemplateTypeParmType *SubstParm =
5740 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5741
5742 if (!SubstParm) {
5743 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
5744 !Replacement.isCanonical()),
5745 alignof(SubstTemplateTypeParmType));
5746 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
5747 Index, PackIndex, Final);
5748 Types.push_back(SubstParm);
5749 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
5750 }
5751
5752 return QualType(SubstParm, 0);
5753}
5754
5757 unsigned Index, bool Final,
5758 const TemplateArgument &ArgPack) {
5759#ifndef NDEBUG
5760 for (const auto &P : ArgPack.pack_elements())
5761 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
5762#endif
5763
5764 llvm::FoldingSetNodeID ID;
5765 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
5766 ArgPack);
5767 void *InsertPos = nullptr;
5768 if (SubstTemplateTypeParmPackType *SubstParm =
5769 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
5770 return QualType(SubstParm, 0);
5771
5772 QualType Canon;
5773 {
5774 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack);
5775 if (!AssociatedDecl->isCanonicalDecl() ||
5776 !CanonArgPack.structurallyEquals(ArgPack)) {
5778 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack);
5779 [[maybe_unused]] const auto *Nothing =
5780 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
5781 assert(!Nothing);
5782 }
5783 }
5784
5785 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
5786 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
5787 ArgPack);
5788 Types.push_back(SubstParm);
5789 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
5790 return QualType(SubstParm, 0);
5791}
5792
5795 assert(llvm::all_of(ArgPack.pack_elements(),
5796 [](const auto &P) {
5797 return P.getKind() == TemplateArgument::Type;
5798 }) &&
5799 "Pack contains a non-type");
5800
5801 llvm::FoldingSetNodeID ID;
5802 SubstBuiltinTemplatePackType::Profile(ID, ArgPack);
5803
5804 void *InsertPos = nullptr;
5805 if (auto *T =
5806 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos))
5807 return QualType(T, 0);
5808
5809 QualType Canon;
5810 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack);
5811 if (!CanonArgPack.structurallyEquals(ArgPack)) {
5812 Canon = getSubstBuiltinTemplatePack(CanonArgPack);
5813 // Refresh InsertPos, in case the recursive call above caused rehashing,
5814 // which would invalidate the bucket pointer.
5815 [[maybe_unused]] const auto *Nothing =
5816 SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos);
5817 assert(!Nothing);
5818 }
5819
5820 auto *PackType = new (*this, alignof(SubstBuiltinTemplatePackType))
5821 SubstBuiltinTemplatePackType(Canon, ArgPack);
5822 Types.push_back(PackType);
5823 SubstBuiltinTemplatePackTypes.InsertNode(PackType, InsertPos);
5824 return QualType(PackType, 0);
5825}
5826
5827/// Retrieve the template type parameter type for a template
5828/// parameter or parameter pack with the given depth, index, and (optionally)
5829/// name.
5830QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
5831 bool ParameterPack,
5832 TemplateTypeParmDecl *TTPDecl) const {
5833 llvm::FoldingSetNodeID ID;
5834 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
5835 void *InsertPos = nullptr;
5836 TemplateTypeParmType *TypeParm
5837 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5838
5839 if (TypeParm)
5840 return QualType(TypeParm, 0);
5841
5842 if (TTPDecl) {
5843 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
5844 TypeParm = new (*this, alignof(TemplateTypeParmType))
5845 TemplateTypeParmType(Depth, Index, ParameterPack, TTPDecl, Canon);
5846
5847 TemplateTypeParmType *TypeCheck
5848 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
5849 assert(!TypeCheck && "Template type parameter canonical type broken");
5850 (void)TypeCheck;
5851 } else
5852 TypeParm = new (*this, alignof(TemplateTypeParmType)) TemplateTypeParmType(
5853 Depth, Index, ParameterPack, /*TTPDecl=*/nullptr, /*Canon=*/QualType());
5854
5855 Types.push_back(TypeParm);
5856 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
5857
5858 return QualType(TypeParm, 0);
5859}
5860
5863 switch (Keyword) {
5864 // These are just themselves.
5870 return Keyword;
5871
5872 // These are equivalent.
5875
5876 // These are functionally equivalent, so relying on their equivalence is
5877 // IFNDR. By making them equivalent, we disallow overloading, which at least
5878 // can produce a diagnostic.
5881 }
5882 llvm_unreachable("unexpected keyword kind");
5883}
5884
5886 ElaboratedTypeKeyword Keyword, SourceLocation ElaboratedKeywordLoc,
5887 NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKeywordLoc,
5888 TemplateName Name, SourceLocation NameLoc,
5889 const TemplateArgumentListInfo &SpecifiedArgs,
5890 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5892 Keyword, Name, SpecifiedArgs.arguments(), CanonicalArgs, Underlying);
5893
5896 ElaboratedKeywordLoc, QualifierLoc, TemplateKeywordLoc, NameLoc,
5897 SpecifiedArgs);
5898 return TSI;
5899}
5900
5903 ArrayRef<TemplateArgumentLoc> SpecifiedArgs,
5904 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5905 SmallVector<TemplateArgument, 4> SpecifiedArgVec;
5906 SpecifiedArgVec.reserve(SpecifiedArgs.size());
5907 for (const TemplateArgumentLoc &Arg : SpecifiedArgs)
5908 SpecifiedArgVec.push_back(Arg.getArgument());
5909
5910 return getTemplateSpecializationType(Keyword, Template, SpecifiedArgVec,
5911 CanonicalArgs, Underlying);
5912}
5913
5914[[maybe_unused]] static bool
5916 for (const TemplateArgument &Arg : Args)
5917 if (Arg.isPackExpansion())
5918 return true;
5919 return false;
5920}
5921
5924 ArrayRef<TemplateArgument> Args) const {
5925 assert(Template ==
5926 getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true));
5928 Template.getAsDependentTemplateName()));
5929#ifndef NDEBUG
5930 for (const auto &Arg : Args)
5931 assert(Arg.structurallyEquals(getCanonicalTemplateArgument(Arg)));
5932#endif
5933
5934 llvm::FoldingSetNodeID ID;
5935 TemplateSpecializationType::Profile(ID, Keyword, Template, Args, QualType(),
5936 *this);
5937 void *InsertPos = nullptr;
5938 if (auto *T = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
5939 return QualType(T, 0);
5940
5941 void *Mem = Allocate(sizeof(TemplateSpecializationType) +
5942 sizeof(TemplateArgument) * Args.size(),
5943 alignof(TemplateSpecializationType));
5944 auto *Spec =
5945 new (Mem) TemplateSpecializationType(Keyword, Template,
5946 /*IsAlias=*/false, Args, QualType());
5947 assert(Spec->isDependentType() &&
5948 "canonical template specialization must be dependent");
5949 Types.push_back(Spec);
5950 TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
5951 return QualType(Spec, 0);
5952}
5953
5956 ArrayRef<TemplateArgument> SpecifiedArgs,
5957 ArrayRef<TemplateArgument> CanonicalArgs, QualType Underlying) const {
5958 const auto *TD = Template.getAsTemplateDecl(/*IgnoreDeduced=*/true);
5959 bool IsTypeAlias = TD && TD->isTypeAlias();
5960 if (Underlying.isNull()) {
5961 TemplateName CanonTemplate =
5962 getCanonicalTemplateName(Template, /*IgnoreDeduced=*/true);
5963 ElaboratedTypeKeyword CanonKeyword =
5964 CanonTemplate.getAsDependentTemplateName()
5967 bool NonCanonical = Template != CanonTemplate || Keyword != CanonKeyword;
5969 if (CanonicalArgs.empty()) {
5970 CanonArgsVec = SmallVector<TemplateArgument, 4>(SpecifiedArgs);
5971 NonCanonical |= canonicalizeTemplateArguments(CanonArgsVec);
5972 CanonicalArgs = CanonArgsVec;
5973 } else {
5974 NonCanonical |= !llvm::equal(
5975 SpecifiedArgs, CanonicalArgs,
5976 [](const TemplateArgument &A, const TemplateArgument &B) {
5977 return A.structurallyEquals(B);
5978 });
5979 }
5980
5981 // We can get here with an alias template when the specialization
5982 // contains a pack expansion that does not match up with a parameter
5983 // pack, or a builtin template which cannot be resolved due to dependency.
5984 assert((!isa_and_nonnull<TypeAliasTemplateDecl>(TD) ||
5985 hasAnyPackExpansions(CanonicalArgs)) &&
5986 "Caller must compute aliased type");
5987 IsTypeAlias = false;
5988
5990 CanonKeyword, CanonTemplate, CanonicalArgs);
5991 if (!NonCanonical)
5992 return Underlying;
5993 }
5994 void *Mem = Allocate(sizeof(TemplateSpecializationType) +
5995 sizeof(TemplateArgument) * SpecifiedArgs.size() +
5996 (IsTypeAlias ? sizeof(QualType) : 0),
5997 alignof(TemplateSpecializationType));
5998 auto *Spec = new (Mem) TemplateSpecializationType(
5999 Keyword, Template, IsTypeAlias, SpecifiedArgs, Underlying);
6000 Types.push_back(Spec);
6001 return QualType(Spec, 0);
6002}
6003
6006 llvm::FoldingSetNodeID ID;
6007 ParenType::Profile(ID, InnerType);
6008
6009 void *InsertPos = nullptr;
6010 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6011 if (T)
6012 return QualType(T, 0);
6013
6014 QualType Canon = InnerType;
6015 if (!Canon.isCanonical()) {
6016 Canon = getCanonicalType(InnerType);
6017 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
6018 assert(!CheckT && "Paren canonical type broken");
6019 (void)CheckT;
6020 }
6021
6022 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
6023 Types.push_back(T);
6024 ParenTypes.InsertNode(T, InsertPos);
6025 return QualType(T, 0);
6026}
6027
6030 const IdentifierInfo *MacroII) const {
6031 QualType Canon = UnderlyingTy;
6032 if (!Canon.isCanonical())
6033 Canon = getCanonicalType(UnderlyingTy);
6034
6035 auto *newType = new (*this, alignof(MacroQualifiedType))
6036 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
6037 Types.push_back(newType);
6038 return QualType(newType, 0);
6039}
6040
6043 const IdentifierInfo *Name) const {
6044 llvm::FoldingSetNodeID ID;
6045 DependentNameType::Profile(ID, Keyword, NNS, Name);
6046
6047 void *InsertPos = nullptr;
6048 if (DependentNameType *T =
6049 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos))
6050 return QualType(T, 0);
6051
6052 ElaboratedTypeKeyword CanonKeyword =
6054 NestedNameSpecifier CanonNNS = NNS.getCanonical();
6055
6056 QualType Canon;
6057 if (CanonKeyword != Keyword || CanonNNS != NNS) {
6058 Canon = getDependentNameType(CanonKeyword, CanonNNS, Name);
6059 [[maybe_unused]] DependentNameType *T =
6060 DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
6061 assert(!T && "broken canonicalization");
6062 assert(Canon.isCanonical());
6063 }
6064
6065 DependentNameType *T = new (*this, alignof(DependentNameType))
6066 DependentNameType(Keyword, NNS, Name, Canon);
6067 Types.push_back(T);
6068 DependentNameTypes.InsertNode(T, InsertPos);
6069 return QualType(T, 0);
6070}
6071
6073 TemplateArgument Arg;
6074 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
6076 if (TTP->isParameterPack())
6077 ArgType = getPackExpansionType(ArgType, std::nullopt);
6078
6080 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
6081 QualType T =
6082 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this);
6083 // For class NTTPs, ensure we include the 'const' so the type matches that
6084 // of a real template argument.
6085 // FIXME: It would be more faithful to model this as something like an
6086 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
6088 if (T->isRecordType()) {
6089 // C++ [temp.param]p8: An id-expression naming a non-type
6090 // template-parameter of class type T denotes a static storage duration
6091 // object of type const T.
6092 T.addConst();
6093 VK = VK_LValue;
6094 } else {
6095 VK = Expr::getValueKindForType(NTTP->getType());
6096 }
6097 Expr *E = new (*this)
6098 DeclRefExpr(*this, NTTP, /*RefersToEnclosingVariableOrCapture=*/false,
6099 T, VK, NTTP->getLocation());
6100
6101 if (NTTP->isParameterPack())
6102 E = new (*this) PackExpansionExpr(E, NTTP->getLocation(), std::nullopt);
6103 Arg = TemplateArgument(E, /*IsCanonical=*/false);
6104 } else {
6105 auto *TTP = cast<TemplateTemplateParmDecl>(Param);
6107 /*Qualifier=*/std::nullopt, /*TemplateKeyword=*/false,
6108 TemplateName(TTP));
6109 if (TTP->isParameterPack())
6110 Arg = TemplateArgument(Name, /*NumExpansions=*/std::nullopt);
6111 else
6112 Arg = TemplateArgument(Name);
6113 }
6114
6115 if (Param->isTemplateParameterPack())
6116 Arg =
6117 TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), Arg);
6118
6119 return Arg;
6120}
6121
6123 UnsignedOrNone NumExpansions,
6124 bool ExpectPackInType) const {
6125 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
6126 "Pack expansions must expand one or more parameter packs");
6127
6128 llvm::FoldingSetNodeID ID;
6129 PackExpansionType::Profile(ID, Pattern, NumExpansions);
6130
6131 void *InsertPos = nullptr;
6132 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6133 if (T)
6134 return QualType(T, 0);
6135
6136 QualType Canon;
6137 if (!Pattern.isCanonical()) {
6138 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions,
6139 /*ExpectPackInType=*/false);
6140
6141 // Find the insert position again, in case we inserted an element into
6142 // PackExpansionTypes and invalidated our insert position.
6143 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
6144 }
6145
6146 T = new (*this, alignof(PackExpansionType))
6147 PackExpansionType(Pattern, Canon, NumExpansions);
6148 Types.push_back(T);
6149 PackExpansionTypes.InsertNode(T, InsertPos);
6150 return QualType(T, 0);
6151}
6152
6153/// CmpProtocolNames - Comparison predicate for sorting protocols
6154/// alphabetically.
6155static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
6156 ObjCProtocolDecl *const *RHS) {
6157 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
6158}
6159
6161 if (Protocols.empty()) return true;
6162
6163 if (Protocols[0]->getCanonicalDecl() != Protocols[0])
6164 return false;
6165
6166 for (unsigned i = 1; i != Protocols.size(); ++i)
6167 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
6168 Protocols[i]->getCanonicalDecl() != Protocols[i])
6169 return false;
6170 return true;
6171}
6172
6173static void
6175 // Sort protocols, keyed by name.
6176 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames);
6177
6178 // Canonicalize.
6179 for (ObjCProtocolDecl *&P : Protocols)
6180 P = P->getCanonicalDecl();
6181
6182 // Remove duplicates.
6183 auto ProtocolsEnd = llvm::unique(Protocols);
6184 Protocols.erase(ProtocolsEnd, Protocols.end());
6185}
6186
6188 ObjCProtocolDecl * const *Protocols,
6189 unsigned NumProtocols) const {
6190 return getObjCObjectType(BaseType, {}, ArrayRef(Protocols, NumProtocols),
6191 /*isKindOf=*/false);
6192}
6193
6195 QualType baseType,
6196 ArrayRef<QualType> typeArgs,
6198 bool isKindOf) const {
6199 // If the base type is an interface and there aren't any protocols or
6200 // type arguments to add, then the interface type will do just fine.
6201 if (typeArgs.empty() && protocols.empty() && !isKindOf &&
6202 isa<ObjCInterfaceType>(baseType))
6203 return baseType;
6204
6205 // Look in the folding set for an existing type.
6206 llvm::FoldingSetNodeID ID;
6207 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
6208 void *InsertPos = nullptr;
6209 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
6210 return QualType(QT, 0);
6211
6212 // Determine the type arguments to be used for canonicalization,
6213 // which may be explicitly specified here or written on the base
6214 // type.
6215 ArrayRef<QualType> effectiveTypeArgs = typeArgs;
6216 if (effectiveTypeArgs.empty()) {
6217 if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
6218 effectiveTypeArgs = baseObject->getTypeArgs();
6219 }
6220
6221 // Build the canonical type, which has the canonical base type and a
6222 // sorted-and-uniqued list of protocols and the type arguments
6223 // canonicalized.
6224 QualType canonical;
6225 bool typeArgsAreCanonical = llvm::all_of(
6226 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); });
6227 bool protocolsSorted = areSortedAndUniqued(protocols);
6228 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
6229 // Determine the canonical type arguments.
6230 ArrayRef<QualType> canonTypeArgs;
6231 SmallVector<QualType, 4> canonTypeArgsVec;
6232 if (!typeArgsAreCanonical) {
6233 canonTypeArgsVec.reserve(effectiveTypeArgs.size());
6234 for (auto typeArg : effectiveTypeArgs)
6235 canonTypeArgsVec.push_back(getCanonicalType(typeArg));
6236 canonTypeArgs = canonTypeArgsVec;
6237 } else {
6238 canonTypeArgs = effectiveTypeArgs;
6239 }
6240
6241 ArrayRef<ObjCProtocolDecl *> canonProtocols;
6242 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
6243 if (!protocolsSorted) {
6244 canonProtocolsVec.append(protocols.begin(), protocols.end());
6245 SortAndUniqueProtocols(canonProtocolsVec);
6246 canonProtocols = canonProtocolsVec;
6247 } else {
6248 canonProtocols = protocols;
6249 }
6250
6251 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
6252 canonProtocols, isKindOf);
6253
6254 // Regenerate InsertPos.
6255 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
6256 }
6257
6258 unsigned size = sizeof(ObjCObjectTypeImpl);
6259 size += typeArgs.size() * sizeof(QualType);
6260 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6261 void *mem = Allocate(size, alignof(ObjCObjectTypeImpl));
6262 auto *T =
6263 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
6264 isKindOf);
6265
6266 Types.push_back(T);
6267 ObjCObjectTypes.InsertNode(T, InsertPos);
6268 return QualType(T, 0);
6269}
6270
6271/// Apply Objective-C protocol qualifiers to the given type.
6272/// If this is for the canonical type of a type parameter, we can apply
6273/// protocol qualifiers on the ObjCObjectPointerType.
6276 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
6277 bool allowOnPointerType) const {
6278 hasError = false;
6279
6280 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
6281 return getObjCTypeParamType(objT->getDecl(), protocols);
6282 }
6283
6284 // Apply protocol qualifiers to ObjCObjectPointerType.
6285 if (allowOnPointerType) {
6286 if (const auto *objPtr =
6287 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
6288 const ObjCObjectType *objT = objPtr->getObjectType();
6289 // Merge protocol lists and construct ObjCObjectType.
6291 protocolsVec.append(objT->qual_begin(),
6292 objT->qual_end());
6293 protocolsVec.append(protocols.begin(), protocols.end());
6294 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
6296 objT->getBaseType(),
6297 objT->getTypeArgsAsWritten(),
6298 protocols,
6299 objT->isKindOfTypeAsWritten());
6301 }
6302 }
6303
6304 // Apply protocol qualifiers to ObjCObjectType.
6305 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
6306 // FIXME: Check for protocols to which the class type is already
6307 // known to conform.
6308
6309 return getObjCObjectType(objT->getBaseType(),
6310 objT->getTypeArgsAsWritten(),
6311 protocols,
6312 objT->isKindOfTypeAsWritten());
6313 }
6314
6315 // If the canonical type is ObjCObjectType, ...
6316 if (type->isObjCObjectType()) {
6317 // Silently overwrite any existing protocol qualifiers.
6318 // TODO: determine whether that's the right thing to do.
6319
6320 // FIXME: Check for protocols to which the class type is already
6321 // known to conform.
6322 return getObjCObjectType(type, {}, protocols, false);
6323 }
6324
6325 // id<protocol-list>
6326 if (type->isObjCIdType()) {
6327 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6328 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
6329 objPtr->isKindOfType());
6331 }
6332
6333 // Class<protocol-list>
6334 if (type->isObjCClassType()) {
6335 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
6336 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
6337 objPtr->isKindOfType());
6339 }
6340
6341 hasError = true;
6342 return type;
6343}
6344
6347 ArrayRef<ObjCProtocolDecl *> protocols) const {
6348 // Look in the folding set for an existing type.
6349 llvm::FoldingSetNodeID ID;
6350 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols);
6351 void *InsertPos = nullptr;
6352 if (ObjCTypeParamType *TypeParam =
6353 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
6354 return QualType(TypeParam, 0);
6355
6356 // We canonicalize to the underlying type.
6357 QualType Canonical = getCanonicalType(Decl->getUnderlyingType());
6358 if (!protocols.empty()) {
6359 // Apply the protocol qualifers.
6360 bool hasError;
6362 Canonical, protocols, hasError, true /*allowOnPointerType*/));
6363 assert(!hasError && "Error when apply protocol qualifier to bound type");
6364 }
6365
6366 unsigned size = sizeof(ObjCTypeParamType);
6367 size += protocols.size() * sizeof(ObjCProtocolDecl *);
6368 void *mem = Allocate(size, alignof(ObjCTypeParamType));
6369 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
6370
6371 Types.push_back(newType);
6372 ObjCTypeParamTypes.InsertNode(newType, InsertPos);
6373 return QualType(newType, 0);
6374}
6375
6377 ObjCTypeParamDecl *New) const {
6378 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType()));
6379 // Update TypeForDecl after updating TypeSourceInfo.
6380 auto *NewTypeParamTy = cast<ObjCTypeParamType>(New->TypeForDecl);
6382 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end());
6383 QualType UpdatedTy = getObjCTypeParamType(New, protocols);
6384 New->TypeForDecl = UpdatedTy.getTypePtr();
6385}
6386
6387/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
6388/// protocol list adopt all protocols in QT's qualified-id protocol
6389/// list.
6391 ObjCInterfaceDecl *IC) {
6392 if (!QT->isObjCQualifiedIdType())
6393 return false;
6394
6395 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
6396 // If both the right and left sides have qualifiers.
6397 for (auto *Proto : OPT->quals()) {
6398 if (!IC->ClassImplementsProtocol(Proto, false))
6399 return false;
6400 }
6401 return true;
6402 }
6403 return false;
6404}
6405
6406/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
6407/// QT's qualified-id protocol list adopt all protocols in IDecl's list
6408/// of protocols.
6410 ObjCInterfaceDecl *IDecl) {
6411 if (!QT->isObjCQualifiedIdType())
6412 return false;
6413 const auto *OPT = QT->getAs<ObjCObjectPointerType>();
6414 if (!OPT)
6415 return false;
6416 if (!IDecl->hasDefinition())
6417 return false;
6419 CollectInheritedProtocols(IDecl, InheritedProtocols);
6420 if (InheritedProtocols.empty())
6421 return false;
6422 // Check that if every protocol in list of id<plist> conforms to a protocol
6423 // of IDecl's, then bridge casting is ok.
6424 bool Conforms = false;
6425 for (auto *Proto : OPT->quals()) {
6426 Conforms = false;
6427 for (auto *PI : InheritedProtocols) {
6428 if (ProtocolCompatibleWithProtocol(Proto, PI)) {
6429 Conforms = true;
6430 break;
6431 }
6432 }
6433 if (!Conforms)
6434 break;
6435 }
6436 if (Conforms)
6437 return true;
6438
6439 for (auto *PI : InheritedProtocols) {
6440 // If both the right and left sides have qualifiers.
6441 bool Adopts = false;
6442 for (auto *Proto : OPT->quals()) {
6443 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
6444 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
6445 break;
6446 }
6447 if (!Adopts)
6448 return false;
6449 }
6450 return true;
6451}
6452
6453/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
6454/// the given object type.
6456 llvm::FoldingSetNodeID ID;
6457 ObjCObjectPointerType::Profile(ID, ObjectT);
6458
6459 void *InsertPos = nullptr;
6460 if (ObjCObjectPointerType *QT =
6461 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
6462 return QualType(QT, 0);
6463
6464 // Find the canonical object type.
6465 QualType Canonical;
6466 if (!ObjectT.isCanonical()) {
6467 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
6468
6469 // Regenerate InsertPos.
6470 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
6471 }
6472
6473 // No match.
6474 void *Mem =
6476 auto *QType =
6477 new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
6478
6479 Types.push_back(QType);
6480 ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
6481 return QualType(QType, 0);
6482}
6483
6484/// getObjCInterfaceType - Return the unique reference to the type for the
6485/// specified ObjC interface decl. The list of protocols is optional.
6487 ObjCInterfaceDecl *PrevDecl) const {
6488 if (Decl->TypeForDecl)
6489 return QualType(Decl->TypeForDecl, 0);
6490
6491 if (PrevDecl) {
6492 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
6493 Decl->TypeForDecl = PrevDecl->TypeForDecl;
6494 return QualType(PrevDecl->TypeForDecl, 0);
6495 }
6496
6497 // Prefer the definition, if there is one.
6498 if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
6499 Decl = Def;
6500
6501 void *Mem = Allocate(sizeof(ObjCInterfaceType), alignof(ObjCInterfaceType));
6502 auto *T = new (Mem) ObjCInterfaceType(Decl);
6503 Decl->TypeForDecl = T;
6504 Types.push_back(T);
6505 return QualType(T, 0);
6506}
6507
6508/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
6509/// TypeOfExprType AST's (since expression's are never shared). For example,
6510/// multiple declarations that refer to "typeof(x)" all contain different
6511/// DeclRefExpr's. This doesn't effect the type checker, since it operates
6512/// on canonical type's (which are always unique).
6514 TypeOfExprType *toe;
6515 if (tofExpr->isTypeDependent()) {
6516 llvm::FoldingSetNodeID ID;
6517 DependentTypeOfExprType::Profile(ID, *this, tofExpr,
6518 Kind == TypeOfKind::Unqualified);
6519
6520 void *InsertPos = nullptr;
6522 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
6523 if (Canon) {
6524 // We already have a "canonical" version of an identical, dependent
6525 // typeof(expr) type. Use that as our canonical type.
6526 toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType(
6527 *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
6528 } else {
6529 // Build a new, canonical typeof(expr) type.
6530 Canon = new (*this, alignof(DependentTypeOfExprType))
6531 DependentTypeOfExprType(*this, tofExpr, Kind);
6532 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
6533 toe = Canon;
6534 }
6535 } else {
6536 QualType Canonical = getCanonicalType(tofExpr->getType());
6537 toe = new (*this, alignof(TypeOfExprType))
6538 TypeOfExprType(*this, tofExpr, Kind, Canonical);
6539 }
6540 Types.push_back(toe);
6541 return QualType(toe, 0);
6542}
6543
6544/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
6545/// TypeOfType nodes. The only motivation to unique these nodes would be
6546/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
6547/// an issue. This doesn't affect the type checker, since it operates
6548/// on canonical types (which are always unique).
6550 QualType Canonical = getCanonicalType(tofType);
6551 auto *tot = new (*this, alignof(TypeOfType))
6552 TypeOfType(*this, tofType, Canonical, Kind);
6553 Types.push_back(tot);
6554 return QualType(tot, 0);
6555}
6556
6557/// getReferenceQualifiedType - Given an expr, will return the type for
6558/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
6559/// and class member access into account.
6561 // C++11 [dcl.type.simple]p4:
6562 // [...]
6563 QualType T = E->getType();
6564 switch (E->getValueKind()) {
6565 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
6566 // type of e;
6567 case VK_XValue:
6568 return getRValueReferenceType(T);
6569 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
6570 // type of e;
6571 case VK_LValue:
6572 return getLValueReferenceType(T);
6573 // - otherwise, decltype(e) is the type of e.
6574 case VK_PRValue:
6575 return T;
6576 }
6577 llvm_unreachable("Unknown value kind");
6578}
6579
6580/// Unlike many "get<Type>" functions, we don't unique DecltypeType
6581/// nodes. This would never be helpful, since each such type has its own
6582/// expression, and would not give a significant memory saving, since there
6583/// is an Expr tree under each such type.
6585 // C++11 [temp.type]p2:
6586 // If an expression e involves a template parameter, decltype(e) denotes a
6587 // unique dependent type. Two such decltype-specifiers refer to the same
6588 // type only if their expressions are equivalent (14.5.6.1).
6589 QualType CanonType;
6590 if (!E->isInstantiationDependent()) {
6591 CanonType = getCanonicalType(UnderlyingType);
6592 } else if (!UnderlyingType.isNull()) {
6593 CanonType = getDecltypeType(E, QualType());
6594 } else {
6595 llvm::FoldingSetNodeID ID;
6596 DependentDecltypeType::Profile(ID, *this, E);
6597
6598 void *InsertPos = nullptr;
6599 if (DependentDecltypeType *Canon =
6600 DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos))
6601 return QualType(Canon, 0);
6602
6603 // Build a new, canonical decltype(expr) type.
6604 auto *DT =
6605 new (*this, alignof(DependentDecltypeType)) DependentDecltypeType(E);
6606 DependentDecltypeTypes.InsertNode(DT, InsertPos);
6607 Types.push_back(DT);
6608 return QualType(DT, 0);
6609 }
6610 auto *DT = new (*this, alignof(DecltypeType))
6611 DecltypeType(E, UnderlyingType, CanonType);
6612 Types.push_back(DT);
6613 return QualType(DT, 0);
6614}
6615
6617 bool FullySubstituted,
6618 ArrayRef<QualType> Expansions,
6619 UnsignedOrNone Index) const {
6620 QualType Canonical;
6621 if (FullySubstituted && Index) {
6622 Canonical = getCanonicalType(Expansions[*Index]);
6623 } else {
6624 llvm::FoldingSetNodeID ID;
6625 PackIndexingType::Profile(ID, *this, Pattern.getCanonicalType(), IndexExpr,
6626 FullySubstituted, Expansions);
6627 void *InsertPos = nullptr;
6628 PackIndexingType *Canon =
6629 DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos);
6630 if (!Canon) {
6631 void *Mem = Allocate(
6632 PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()),
6634 Canon =
6635 new (Mem) PackIndexingType(QualType(), Pattern.getCanonicalType(),
6636 IndexExpr, FullySubstituted, Expansions);
6637 DependentPackIndexingTypes.InsertNode(Canon, InsertPos);
6638 }
6639 Canonical = QualType(Canon, 0);
6640 }
6641
6642 void *Mem =
6643 Allocate(PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()),
6645 auto *T = new (Mem) PackIndexingType(Canonical, Pattern, IndexExpr,
6646 FullySubstituted, Expansions);
6647 Types.push_back(T);
6648 return QualType(T, 0);
6649}
6650
6651/// getUnaryTransformationType - We don't unique these, since the memory
6652/// savings are minimal and these are rare.
6655 UnaryTransformType::UTTKind Kind) const {
6656
6657 llvm::FoldingSetNodeID ID;
6658 UnaryTransformType::Profile(ID, BaseType, UnderlyingType, Kind);
6659
6660 void *InsertPos = nullptr;
6661 if (UnaryTransformType *UT =
6662 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos))
6663 return QualType(UT, 0);
6664
6665 QualType CanonType;
6666 if (!BaseType->isDependentType()) {
6667 CanonType = UnderlyingType.getCanonicalType();
6668 } else {
6669 assert(UnderlyingType.isNull() || BaseType == UnderlyingType);
6670 UnderlyingType = QualType();
6671 if (QualType CanonBase = BaseType.getCanonicalType();
6672 BaseType != CanonBase) {
6673 CanonType = getUnaryTransformType(CanonBase, QualType(), Kind);
6674 assert(CanonType.isCanonical());
6675
6676 // Find the insertion position again.
6677 [[maybe_unused]] UnaryTransformType *UT =
6678 UnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
6679 assert(!UT && "broken canonicalization");
6680 }
6681 }
6682
6683 auto *UT = new (*this, alignof(UnaryTransformType))
6684 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType);
6685 UnaryTransformTypes.InsertNode(UT, InsertPos);
6686 Types.push_back(UT);
6687 return QualType(UT, 0);
6688}
6689
6690QualType ASTContext::getAutoTypeInternal(
6691 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent,
6692 bool IsPack, TemplateDecl *TypeConstraintConcept,
6693 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const {
6694 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
6695 !TypeConstraintConcept && !IsDependent)
6696 return getAutoDeductType();
6697
6698 // Look in the folding set for an existing type.
6699 llvm::FoldingSetNodeID ID;
6700 bool IsDeducedDependent =
6701 isa_and_nonnull<TemplateTemplateParmDecl>(TypeConstraintConcept) ||
6702 (!DeducedType.isNull() && DeducedType->isDependentType());
6703 AutoType::Profile(ID, *this, DeducedType, Keyword,
6704 IsDependent || IsDeducedDependent, TypeConstraintConcept,
6705 TypeConstraintArgs);
6706 if (auto const AT_iter = AutoTypes.find(ID); AT_iter != AutoTypes.end())
6707 return QualType(AT_iter->getSecond(), 0);
6708
6709 QualType Canon;
6710 if (!IsCanon) {
6711 if (!DeducedType.isNull()) {
6712 Canon = DeducedType.getCanonicalType();
6713 } else if (TypeConstraintConcept) {
6714 bool AnyNonCanonArgs = false;
6715 auto *CanonicalConcept =
6716 cast<TemplateDecl>(TypeConstraintConcept->getCanonicalDecl());
6717 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
6718 *this, TypeConstraintArgs, AnyNonCanonArgs);
6719 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) {
6720 Canon = getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack,
6721 CanonicalConcept, CanonicalConceptArgs,
6722 /*IsCanon=*/true);
6723 }
6724 }
6725 }
6726
6727 void *Mem = Allocate(sizeof(AutoType) +
6728 sizeof(TemplateArgument) * TypeConstraintArgs.size(),
6729 alignof(AutoType));
6730 auto *AT = new (Mem) AutoType(
6731 DeducedType, Keyword,
6732 (IsDependent ? TypeDependence::DependentInstantiation
6733 : TypeDependence::None) |
6734 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
6735 Canon, TypeConstraintConcept, TypeConstraintArgs);
6736#ifndef NDEBUG
6737 llvm::FoldingSetNodeID InsertedID;
6738 AT->Profile(InsertedID, *this);
6739 assert(InsertedID == ID && "ID does not match");
6740#endif
6741 Types.push_back(AT);
6742 AutoTypes.try_emplace(ID, AT);
6743 return QualType(AT, 0);
6744}
6745
6746/// getAutoType - Return the uniqued reference to the 'auto' type which has been
6747/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
6748/// canonical deduced-but-dependent 'auto' type.
6749QualType
6751 bool IsDependent, bool IsPack,
6752 TemplateDecl *TypeConstraintConcept,
6753 ArrayRef<TemplateArgument> TypeConstraintArgs) const {
6754 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
6755 assert((!IsDependent || DeducedType.isNull()) &&
6756 "A dependent auto should be undeduced");
6757 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack,
6758 TypeConstraintConcept, TypeConstraintArgs);
6759}
6760
6762 QualType CanonT = T.getNonPackExpansionType().getCanonicalType();
6763
6764 // Remove a type-constraint from a top-level auto or decltype(auto).
6765 if (auto *AT = CanonT->getAs<AutoType>()) {
6766 if (!AT->isConstrained())
6767 return T;
6768 return getQualifiedType(getAutoType(QualType(), AT->getKeyword(),
6769 AT->isDependentType(),
6770 AT->containsUnexpandedParameterPack()),
6771 T.getQualifiers());
6772 }
6773
6774 // FIXME: We only support constrained auto at the top level in the type of a
6775 // non-type template parameter at the moment. Once we lift that restriction,
6776 // we'll need to recursively build types containing auto here.
6777 assert(!CanonT->getContainedAutoType() ||
6778 !CanonT->getContainedAutoType()->isConstrained());
6779 return T;
6780}
6781
6782QualType ASTContext::getDeducedTemplateSpecializationTypeInternal(
6784 bool IsDependent, QualType Canon) const {
6785 // Look in the folding set for an existing type.
6786 void *InsertPos = nullptr;
6787 llvm::FoldingSetNodeID ID;
6788 DeducedTemplateSpecializationType::Profile(ID, Keyword, Template, DeducedType,
6789 IsDependent);
6790 if (DeducedTemplateSpecializationType *DTST =
6791 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
6792 return QualType(DTST, 0);
6793
6794 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
6795 DeducedTemplateSpecializationType(Keyword, Template, DeducedType,
6796 IsDependent, Canon);
6797
6798#ifndef NDEBUG
6799 llvm::FoldingSetNodeID TempID;
6800 DTST->Profile(TempID);
6801 assert(ID == TempID && "ID does not match");
6802#endif
6803 Types.push_back(DTST);
6804 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
6805 return QualType(DTST, 0);
6806}
6807
6808/// Return the uniqued reference to the deduced template specialization type
6809/// which has been deduced to the given type, or to the canonical undeduced
6810/// such type, or the canonical deduced-but-dependent such type.
6813 bool IsDependent) const {
6814 // FIXME: This could save an extra hash table lookup if it handled all the
6815 // parameters already being canonical.
6816 // FIXME: Can this be formed from a DependentTemplateName, such that the
6817 // keyword should be part of the canonical type?
6818 QualType Canon =
6819 DeducedType.isNull()
6820 ? getDeducedTemplateSpecializationTypeInternal(
6822 QualType(), IsDependent, QualType())
6823 : DeducedType.getCanonicalType();
6824 return getDeducedTemplateSpecializationTypeInternal(
6825 Keyword, Template, DeducedType, IsDependent, Canon);
6826}
6827
6828/// getAtomicType - Return the uniqued reference to the atomic type for
6829/// the given value type.
6831 // Unique pointers, to guarantee there is only one pointer of a particular
6832 // structure.
6833 llvm::FoldingSetNodeID ID;
6835
6836 void *InsertPos = nullptr;
6837 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
6838 return QualType(AT, 0);
6839
6840 // If the atomic value type isn't canonical, this won't be a canonical type
6841 // either, so fill in the canonical type field.
6842 QualType Canonical;
6843 if (!T.isCanonical()) {
6844 Canonical = getAtomicType(getCanonicalType(T));
6845
6846 // Get the new insert position for the node we care about.
6847 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
6848 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
6849 }
6850 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical);
6851 Types.push_back(New);
6852 AtomicTypes.InsertNode(New, InsertPos);
6853 return QualType(New, 0);
6854}
6855
6856/// getAutoDeductType - Get type pattern for deducing against 'auto'.
6858 if (AutoDeductTy.isNull())
6859 AutoDeductTy = QualType(new (*this, alignof(AutoType))
6860 AutoType(QualType(), AutoTypeKeyword::Auto,
6861 TypeDependence::None, QualType(),
6862 /*concept*/ nullptr, /*args*/ {}),
6863 0);
6864 return AutoDeductTy;
6865}
6866
6867/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
6869 if (AutoRRefDeductTy.isNull())
6871 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
6872 return AutoRRefDeductTy;
6873}
6874
6875/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
6876/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
6877/// needs to agree with the definition in <stddef.h>.
6881
6883 return getFromTargetType(Target->getSizeType());
6884}
6885
6886/// Return the unique signed counterpart of the integer type
6887/// corresponding to size_t.
6891
6892/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
6893/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
6897
6898/// Return the unique unsigned counterpart of "ptrdiff_t"
6899/// integer type. The standard (C11 7.21.6.1p7) refers to this type
6900/// in the definition of %tu format specifier.
6902 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default));
6903}
6904
6905/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
6907 return getFromTargetType(Target->getIntMaxType());
6908}
6909
6910/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
6912 return getFromTargetType(Target->getUIntMaxType());
6913}
6914
6915/// getSignedWCharType - Return the type of "signed wchar_t".
6916/// Used when in C++, as a GCC extension.
6918 // FIXME: derive from "Target" ?
6919 return WCharTy;
6920}
6921
6922/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
6923/// Used when in C++, as a GCC extension.
6925 // FIXME: derive from "Target" ?
6926 return UnsignedIntTy;
6927}
6928
6930 return getFromTargetType(Target->getIntPtrType());
6931}
6932
6936
6937/// Return the unique type for "pid_t" defined in
6938/// <sys/types.h>. We need this to compute the correct type for vfork().
6940 return getFromTargetType(Target->getProcessIDType());
6941}
6942
6943//===----------------------------------------------------------------------===//
6944// Type Operators
6945//===----------------------------------------------------------------------===//
6946
6948 // Push qualifiers into arrays, and then discard any remaining
6949 // qualifiers.
6950 T = getCanonicalType(T);
6952 const Type *Ty = T.getTypePtr();
6956 } else if (isa<ArrayType>(Ty)) {
6958 } else if (isa<FunctionType>(Ty)) {
6959 Result = getPointerType(QualType(Ty, 0));
6960 } else {
6961 Result = QualType(Ty, 0);
6962 }
6963
6965}
6966
6968 Qualifiers &quals) const {
6969 SplitQualType splitType = type.getSplitUnqualifiedType();
6970
6971 // FIXME: getSplitUnqualifiedType() actually walks all the way to
6972 // the unqualified desugared type and then drops it on the floor.
6973 // We then have to strip that sugar back off with
6974 // getUnqualifiedDesugaredType(), which is silly.
6975 const auto *AT =
6976 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
6977
6978 // If we don't have an array, just use the results in splitType.
6979 if (!AT) {
6980 quals = splitType.Quals;
6981 return QualType(splitType.Ty, 0);
6982 }
6983
6984 // Otherwise, recurse on the array's element type.
6985 QualType elementType = AT->getElementType();
6986 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
6987
6988 // If that didn't change the element type, AT has no qualifiers, so we
6989 // can just use the results in splitType.
6990 if (elementType == unqualElementType) {
6991 assert(quals.empty()); // from the recursive call
6992 quals = splitType.Quals;
6993 return QualType(splitType.Ty, 0);
6994 }
6995
6996 // Otherwise, add in the qualifiers from the outermost type, then
6997 // build the type back up.
6998 quals.addConsistentQualifiers(splitType.Quals);
6999
7000 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
7001 return getConstantArrayType(unqualElementType, CAT->getSize(),
7002 CAT->getSizeExpr(), CAT->getSizeModifier(), 0);
7003 }
7004
7005 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) {
7006 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
7007 }
7008
7009 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) {
7010 return getVariableArrayType(unqualElementType, VAT->getSizeExpr(),
7011 VAT->getSizeModifier(),
7012 VAT->getIndexTypeCVRQualifiers());
7013 }
7014
7015 const auto *DSAT = cast<DependentSizedArrayType>(AT);
7016 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
7017 DSAT->getSizeModifier(), 0);
7018}
7019
7020/// Attempt to unwrap two types that may both be array types with the same bound
7021/// (or both be array types of unknown bound) for the purpose of comparing the
7022/// cv-decomposition of two types per C++ [conv.qual].
7023///
7024/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7025/// C++20 [conv.qual], if permitted by the current language mode.
7027 bool AllowPiMismatch) const {
7028 while (true) {
7029 auto *AT1 = getAsArrayType(T1);
7030 if (!AT1)
7031 return;
7032
7033 auto *AT2 = getAsArrayType(T2);
7034 if (!AT2)
7035 return;
7036
7037 // If we don't have two array types with the same constant bound nor two
7038 // incomplete array types, we've unwrapped everything we can.
7039 // C++20 also permits one type to be a constant array type and the other
7040 // to be an incomplete array type.
7041 // FIXME: Consider also unwrapping array of unknown bound and VLA.
7042 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
7043 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
7044 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) ||
7045 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7047 return;
7048 } else if (isa<IncompleteArrayType>(AT1)) {
7049 if (!(isa<IncompleteArrayType>(AT2) ||
7050 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
7052 return;
7053 } else {
7054 return;
7055 }
7056
7057 T1 = AT1->getElementType();
7058 T2 = AT2->getElementType();
7059 }
7060}
7061
7062/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
7063///
7064/// If T1 and T2 are both pointer types of the same kind, or both array types
7065/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
7066/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
7067///
7068/// This function will typically be called in a loop that successively
7069/// "unwraps" pointer and pointer-to-member types to compare them at each
7070/// level.
7071///
7072/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
7073/// C++20 [conv.qual], if permitted by the current language mode.
7074///
7075/// \return \c true if a pointer type was unwrapped, \c false if we reached a
7076/// pair of types that can't be unwrapped further.
7078 bool AllowPiMismatch) const {
7079 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch);
7080
7081 const auto *T1PtrType = T1->getAs<PointerType>();
7082 const auto *T2PtrType = T2->getAs<PointerType>();
7083 if (T1PtrType && T2PtrType) {
7084 T1 = T1PtrType->getPointeeType();
7085 T2 = T2PtrType->getPointeeType();
7086 return true;
7087 }
7088
7089 if (const auto *T1MPType = T1->getAs<MemberPointerType>(),
7090 *T2MPType = T2->getAs<MemberPointerType>();
7091 T1MPType && T2MPType) {
7092 if (auto *RD1 = T1MPType->getMostRecentCXXRecordDecl(),
7093 *RD2 = T2MPType->getMostRecentCXXRecordDecl();
7094 RD1 != RD2 && RD1->getCanonicalDecl() != RD2->getCanonicalDecl())
7095 return false;
7096 if (T1MPType->getQualifier().getCanonical() !=
7097 T2MPType->getQualifier().getCanonical())
7098 return false;
7099 T1 = T1MPType->getPointeeType();
7100 T2 = T2MPType->getPointeeType();
7101 return true;
7102 }
7103
7104 if (getLangOpts().ObjC) {
7105 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
7106 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
7107 if (T1OPType && T2OPType) {
7108 T1 = T1OPType->getPointeeType();
7109 T2 = T2OPType->getPointeeType();
7110 return true;
7111 }
7112 }
7113
7114 // FIXME: Block pointers, too?
7115
7116 return false;
7117}
7118
7120 while (true) {
7121 Qualifiers Quals;
7122 T1 = getUnqualifiedArrayType(T1, Quals);
7123 T2 = getUnqualifiedArrayType(T2, Quals);
7124 if (hasSameType(T1, T2))
7125 return true;
7126 if (!UnwrapSimilarTypes(T1, T2))
7127 return false;
7128 }
7129}
7130
7132 while (true) {
7133 Qualifiers Quals1, Quals2;
7134 T1 = getUnqualifiedArrayType(T1, Quals1);
7135 T2 = getUnqualifiedArrayType(T2, Quals2);
7136
7137 Quals1.removeCVRQualifiers();
7138 Quals2.removeCVRQualifiers();
7139 if (Quals1 != Quals2)
7140 return false;
7141
7142 if (hasSameType(T1, T2))
7143 return true;
7144
7145 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false))
7146 return false;
7147 }
7148}
7149
7152 SourceLocation NameLoc) const {
7153 switch (Name.getKind()) {
7156 // DNInfo work in progress: CHECKME: what about DNLoc?
7158 NameLoc);
7159
7162 // DNInfo work in progress: CHECKME: what about DNLoc?
7163 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
7164 }
7165
7168 return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
7169 }
7170
7174 DeclarationName DName;
7175 if (const IdentifierInfo *II = TN.getIdentifier()) {
7176 DName = DeclarationNames.getIdentifier(II);
7177 return DeclarationNameInfo(DName, NameLoc);
7178 } else {
7179 DName = DeclarationNames.getCXXOperatorName(TN.getOperator());
7180 // DNInfo work in progress: FIXME: source locations?
7181 DeclarationNameLoc DNLoc =
7183 return DeclarationNameInfo(DName, NameLoc, DNLoc);
7184 }
7185 }
7186
7190 return DeclarationNameInfo(subst->getParameter()->getDeclName(),
7191 NameLoc);
7192 }
7193
7198 NameLoc);
7199 }
7202 NameLoc);
7205 return getNameForTemplate(DTS->getUnderlying(), NameLoc);
7206 }
7207 }
7208
7209 llvm_unreachable("bad template name kind!");
7210}
7211
7212static const TemplateArgument *
7214 auto handleParam = [](auto *TP) -> const TemplateArgument * {
7215 if (!TP->hasDefaultArgument())
7216 return nullptr;
7217 return &TP->getDefaultArgument().getArgument();
7218 };
7219 switch (P->getKind()) {
7220 case NamedDecl::TemplateTypeParm:
7221 return handleParam(cast<TemplateTypeParmDecl>(P));
7222 case NamedDecl::NonTypeTemplateParm:
7223 return handleParam(cast<NonTypeTemplateParmDecl>(P));
7224 case NamedDecl::TemplateTemplateParm:
7225 return handleParam(cast<TemplateTemplateParmDecl>(P));
7226 default:
7227 llvm_unreachable("Unexpected template parameter kind");
7228 }
7229}
7230
7232 bool IgnoreDeduced) const {
7233 while (std::optional<TemplateName> UnderlyingOrNone =
7234 Name.desugar(IgnoreDeduced))
7235 Name = *UnderlyingOrNone;
7236
7237 switch (Name.getKind()) {
7240 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template))
7242
7243 // The canonical template name is the canonical template declaration.
7244 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
7245 }
7246
7249 llvm_unreachable("cannot canonicalize unresolved template");
7250
7253 assert(DTN && "Non-dependent template names must refer to template decls.");
7254 NestedNameSpecifier Qualifier = DTN->getQualifier();
7255 NestedNameSpecifier CanonQualifier = Qualifier.getCanonical();
7256 if (Qualifier != CanonQualifier || !DTN->hasTemplateKeyword())
7257 return getDependentTemplateName({CanonQualifier, DTN->getName(),
7258 /*HasTemplateKeyword=*/true});
7259 return Name;
7260 }
7261
7265 TemplateArgument canonArgPack =
7268 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(),
7269 subst->getIndex(), subst->getFinal());
7270 }
7272 assert(IgnoreDeduced == false);
7274 DefaultArguments DefArgs = DTS->getDefaultArguments();
7275 TemplateName Underlying = DTS->getUnderlying();
7276
7277 TemplateName CanonUnderlying =
7278 getCanonicalTemplateName(Underlying, /*IgnoreDeduced=*/true);
7279 bool NonCanonical = CanonUnderlying != Underlying;
7280 auto CanonArgs =
7281 getCanonicalTemplateArguments(*this, DefArgs.Args, NonCanonical);
7282
7283 ArrayRef<NamedDecl *> Params =
7284 CanonUnderlying.getAsTemplateDecl()->getTemplateParameters()->asArray();
7285 assert(CanonArgs.size() <= Params.size());
7286 // A deduced template name which deduces the same default arguments already
7287 // declared in the underlying template is the same template as the
7288 // underlying template. We need need to note any arguments which differ from
7289 // the corresponding declaration. If any argument differs, we must build a
7290 // deduced template name.
7291 for (int I = CanonArgs.size() - 1; I >= 0; --I) {
7293 if (!A)
7294 break;
7295 auto CanonParamDefArg = getCanonicalTemplateArgument(*A);
7296 TemplateArgument &CanonDefArg = CanonArgs[I];
7297 if (CanonDefArg.structurallyEquals(CanonParamDefArg))
7298 continue;
7299 // Keep popping from the back any deault arguments which are the same.
7300 if (I == int(CanonArgs.size() - 1))
7301 CanonArgs.pop_back();
7302 NonCanonical = true;
7303 }
7304 return NonCanonical ? getDeducedTemplateName(
7305 CanonUnderlying,
7306 /*DefaultArgs=*/{DefArgs.StartPos, CanonArgs})
7307 : Name;
7308 }
7312 llvm_unreachable("always sugar node");
7313 }
7314
7315 llvm_unreachable("bad template name!");
7316}
7317
7319 const TemplateName &Y,
7320 bool IgnoreDeduced) const {
7321 return getCanonicalTemplateName(X, IgnoreDeduced) ==
7322 getCanonicalTemplateName(Y, IgnoreDeduced);
7323}
7324
7326 const AssociatedConstraint &ACX, const AssociatedConstraint &ACY) const {
7327 if (ACX.ArgPackSubstIndex != ACY.ArgPackSubstIndex)
7328 return false;
7330 return false;
7331 return true;
7332}
7333
7334bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
7335 if (!XCE != !YCE)
7336 return false;
7337
7338 if (!XCE)
7339 return true;
7340
7341 llvm::FoldingSetNodeID XCEID, YCEID;
7342 XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7343 YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
7344 return XCEID == YCEID;
7345}
7346
7348 const TypeConstraint *YTC) const {
7349 if (!XTC != !YTC)
7350 return false;
7351
7352 if (!XTC)
7353 return true;
7354
7355 auto *NCX = XTC->getNamedConcept();
7356 auto *NCY = YTC->getNamedConcept();
7357 if (!NCX || !NCY || !isSameEntity(NCX, NCY))
7358 return false;
7361 return false;
7363 if (XTC->getConceptReference()
7365 ->NumTemplateArgs !=
7367 return false;
7368
7369 // Compare slowly by profiling.
7370 //
7371 // We couldn't compare the profiling result for the template
7372 // args here. Consider the following example in different modules:
7373 //
7374 // template <__integer_like _Tp, C<_Tp> Sentinel>
7375 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
7376 // return __t;
7377 // }
7378 //
7379 // When we compare the profiling result for `C<_Tp>` in different
7380 // modules, it will compare the type of `_Tp` in different modules.
7381 // However, the type of `_Tp` in different modules refer to different
7382 // types here naturally. So we couldn't compare the profiling result
7383 // for the template args directly.
7386}
7387
7389 const NamedDecl *Y) const {
7390 if (X->getKind() != Y->getKind())
7391 return false;
7392
7393 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) {
7394 auto *TY = cast<TemplateTypeParmDecl>(Y);
7395 if (TX->isParameterPack() != TY->isParameterPack())
7396 return false;
7397 if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
7398 return false;
7399 return isSameTypeConstraint(TX->getTypeConstraint(),
7400 TY->getTypeConstraint());
7401 }
7402
7403 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
7404 auto *TY = cast<NonTypeTemplateParmDecl>(Y);
7405 return TX->isParameterPack() == TY->isParameterPack() &&
7406 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) &&
7407 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(),
7408 TY->getPlaceholderTypeConstraint());
7409 }
7410
7412 auto *TY = cast<TemplateTemplateParmDecl>(Y);
7413 return TX->isParameterPack() == TY->isParameterPack() &&
7414 isSameTemplateParameterList(TX->getTemplateParameters(),
7415 TY->getTemplateParameters());
7416}
7417
7419 const TemplateParameterList *X, const TemplateParameterList *Y) const {
7420 if (X->size() != Y->size())
7421 return false;
7422
7423 for (unsigned I = 0, N = X->size(); I != N; ++I)
7424 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I)))
7425 return false;
7426
7427 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause());
7428}
7429
7431 const NamedDecl *Y) const {
7432 // If the type parameter isn't the same already, we don't need to check the
7433 // default argument further.
7434 if (!isSameTemplateParameter(X, Y))
7435 return false;
7436
7437 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) {
7438 auto *TTPY = cast<TemplateTypeParmDecl>(Y);
7439 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7440 return false;
7441
7442 return hasSameType(TTPX->getDefaultArgument().getArgument().getAsType(),
7443 TTPY->getDefaultArgument().getArgument().getAsType());
7444 }
7445
7446 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
7447 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y);
7448 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
7449 return false;
7450
7451 Expr *DefaultArgumentX =
7452 NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7453 Expr *DefaultArgumentY =
7454 NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts();
7455 llvm::FoldingSetNodeID XID, YID;
7456 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true);
7457 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true);
7458 return XID == YID;
7459 }
7460
7461 auto *TTPX = cast<TemplateTemplateParmDecl>(X);
7462 auto *TTPY = cast<TemplateTemplateParmDecl>(Y);
7463
7464 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
7465 return false;
7466
7467 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument();
7468 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument();
7469 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate());
7470}
7471
7473 const NestedNameSpecifier Y) {
7474 if (X == Y)
7475 return true;
7476 if (!X || !Y)
7477 return false;
7478
7479 auto Kind = X.getKind();
7480 if (Kind != Y.getKind())
7481 return false;
7482
7483 // FIXME: For namespaces and types, we're permitted to check that the entity
7484 // is named via the same tokens. We should probably do so.
7485 switch (Kind) {
7487 auto [NamespaceX, PrefixX] = X.getAsNamespaceAndPrefix();
7488 auto [NamespaceY, PrefixY] = Y.getAsNamespaceAndPrefix();
7489 if (!declaresSameEntity(NamespaceX->getNamespace(),
7490 NamespaceY->getNamespace()))
7491 return false;
7492 return isSameQualifier(PrefixX, PrefixY);
7493 }
7495 const auto *TX = X.getAsType(), *TY = Y.getAsType();
7496 if (TX->getCanonicalTypeInternal() != TY->getCanonicalTypeInternal())
7497 return false;
7498 return isSameQualifier(TX->getPrefix(), TY->getPrefix());
7499 }
7503 return true;
7504 }
7505 llvm_unreachable("unhandled qualifier kind");
7506}
7507
7508static bool hasSameCudaAttrs(const FunctionDecl *A, const FunctionDecl *B) {
7509 if (!A->getASTContext().getLangOpts().CUDA)
7510 return true; // Target attributes are overloadable in CUDA compilation only.
7511 if (A->hasAttr<CUDADeviceAttr>() != B->hasAttr<CUDADeviceAttr>())
7512 return false;
7513 if (A->hasAttr<CUDADeviceAttr>() && B->hasAttr<CUDADeviceAttr>())
7514 return A->hasAttr<CUDAHostAttr>() == B->hasAttr<CUDAHostAttr>();
7515 return true; // unattributed and __host__ functions are the same.
7516}
7517
7518/// Determine whether the attributes we can overload on are identical for A and
7519/// B. Will ignore any overloadable attrs represented in the type of A and B.
7521 const FunctionDecl *B) {
7522 // Note that pass_object_size attributes are represented in the function's
7523 // ExtParameterInfo, so we don't need to check them here.
7524
7525 llvm::FoldingSetNodeID Cand1ID, Cand2ID;
7526 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
7527 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
7528
7529 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) {
7530 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
7531 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
7532
7533 // Return false if the number of enable_if attributes is different.
7534 if (!Cand1A || !Cand2A)
7535 return false;
7536
7537 Cand1ID.clear();
7538 Cand2ID.clear();
7539
7540 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true);
7541 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true);
7542
7543 // Return false if any of the enable_if expressions of A and B are
7544 // different.
7545 if (Cand1ID != Cand2ID)
7546 return false;
7547 }
7548 return hasSameCudaAttrs(A, B);
7549}
7550
7551bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
7552 // Caution: this function is called by the AST reader during deserialization,
7553 // so it cannot rely on AST invariants being met. Non-trivial accessors
7554 // should be avoided, along with any traversal of redeclaration chains.
7555
7556 if (X == Y)
7557 return true;
7558
7559 if (X->getDeclName() != Y->getDeclName())
7560 return false;
7561
7562 // Must be in the same context.
7563 //
7564 // Note that we can't use DeclContext::Equals here, because the DeclContexts
7565 // could be two different declarations of the same function. (We will fix the
7566 // semantic DC to refer to the primary definition after merging.)
7567 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()),
7569 return false;
7570
7571 // If either X or Y are local to the owning module, they are only possible to
7572 // be the same entity if they are in the same module.
7573 if (X->isModuleLocal() || Y->isModuleLocal())
7574 if (!isInSameModule(X->getOwningModule(), Y->getOwningModule()))
7575 return false;
7576
7577 // Two typedefs refer to the same entity if they have the same underlying
7578 // type.
7579 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X))
7580 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y))
7581 return hasSameType(TypedefX->getUnderlyingType(),
7582 TypedefY->getUnderlyingType());
7583
7584 // Must have the same kind.
7585 if (X->getKind() != Y->getKind())
7586 return false;
7587
7588 // Objective-C classes and protocols with the same name always match.
7590 return true;
7591
7593 // No need to handle these here: we merge them when adding them to the
7594 // template.
7595 return false;
7596 }
7597
7598 // Compatible tags match.
7599 if (const auto *TagX = dyn_cast<TagDecl>(X)) {
7600 const auto *TagY = cast<TagDecl>(Y);
7601 return (TagX->getTagKind() == TagY->getTagKind()) ||
7602 ((TagX->getTagKind() == TagTypeKind::Struct ||
7603 TagX->getTagKind() == TagTypeKind::Class ||
7604 TagX->getTagKind() == TagTypeKind::Interface) &&
7605 (TagY->getTagKind() == TagTypeKind::Struct ||
7606 TagY->getTagKind() == TagTypeKind::Class ||
7607 TagY->getTagKind() == TagTypeKind::Interface));
7608 }
7609
7610 // Functions with the same type and linkage match.
7611 // FIXME: This needs to cope with merging of prototyped/non-prototyped
7612 // functions, etc.
7613 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) {
7614 const auto *FuncY = cast<FunctionDecl>(Y);
7615 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) {
7616 const auto *CtorY = cast<CXXConstructorDecl>(Y);
7617 if (CtorX->getInheritedConstructor() &&
7618 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(),
7619 CtorY->getInheritedConstructor().getConstructor()))
7620 return false;
7621 }
7622
7623 if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
7624 return false;
7625
7626 // Multiversioned functions with different feature strings are represented
7627 // as separate declarations.
7628 if (FuncX->isMultiVersion()) {
7629 const auto *TAX = FuncX->getAttr<TargetAttr>();
7630 const auto *TAY = FuncY->getAttr<TargetAttr>();
7631 assert(TAX && TAY && "Multiversion Function without target attribute");
7632
7633 if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
7634 return false;
7635 }
7636
7637 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
7638 // not the same entity if they are constrained.
7639 if ((FuncX->isMemberLikeConstrainedFriend() ||
7640 FuncY->isMemberLikeConstrainedFriend()) &&
7641 !FuncX->getLexicalDeclContext()->Equals(
7642 FuncY->getLexicalDeclContext())) {
7643 return false;
7644 }
7645
7646 if (!isSameAssociatedConstraint(FuncX->getTrailingRequiresClause(),
7647 FuncY->getTrailingRequiresClause()))
7648 return false;
7649
7650 auto GetTypeAsWritten = [](const FunctionDecl *FD) {
7651 // Map to the first declaration that we've already merged into this one.
7652 // The TSI of redeclarations might not match (due to calling conventions
7653 // being inherited onto the type but not the TSI), but the TSI type of
7654 // the first declaration of the function should match across modules.
7655 FD = FD->getCanonicalDecl();
7656 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
7657 : FD->getType();
7658 };
7659 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
7660 if (!hasSameType(XT, YT)) {
7661 // We can get functions with different types on the redecl chain in C++17
7662 // if they have differing exception specifications and at least one of
7663 // the excpetion specs is unresolved.
7664 auto *XFPT = XT->getAs<FunctionProtoType>();
7665 auto *YFPT = YT->getAs<FunctionProtoType>();
7666 if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
7667 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) ||
7670 return true;
7671 return false;
7672 }
7673
7674 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
7675 hasSameOverloadableAttrs(FuncX, FuncY);
7676 }
7677
7678 // Variables with the same type and linkage match.
7679 if (const auto *VarX = dyn_cast<VarDecl>(X)) {
7680 const auto *VarY = cast<VarDecl>(Y);
7681 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
7682 // During deserialization, we might compare variables before we load
7683 // their types. Assume the types will end up being the same.
7684 if (VarX->getType().isNull() || VarY->getType().isNull())
7685 return true;
7686
7687 if (hasSameType(VarX->getType(), VarY->getType()))
7688 return true;
7689
7690 // We can get decls with different types on the redecl chain. Eg.
7691 // template <typename T> struct S { static T Var[]; }; // #1
7692 // template <typename T> T S<T>::Var[sizeof(T)]; // #2
7693 // Only? happens when completing an incomplete array type. In this case
7694 // when comparing #1 and #2 we should go through their element type.
7695 const ArrayType *VarXTy = getAsArrayType(VarX->getType());
7696 const ArrayType *VarYTy = getAsArrayType(VarY->getType());
7697 if (!VarXTy || !VarYTy)
7698 return false;
7699 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
7700 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType());
7701 }
7702 return false;
7703 }
7704
7705 // Namespaces with the same name and inlinedness match.
7706 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
7707 const auto *NamespaceY = cast<NamespaceDecl>(Y);
7708 return NamespaceX->isInline() == NamespaceY->isInline();
7709 }
7710
7711 // Identical template names and kinds match if their template parameter lists
7712 // and patterns match.
7713 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) {
7714 const auto *TemplateY = cast<TemplateDecl>(Y);
7715
7716 // ConceptDecl wouldn't be the same if their constraint expression differs.
7717 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) {
7718 const auto *ConceptY = cast<ConceptDecl>(Y);
7719 if (!isSameConstraintExpr(ConceptX->getConstraintExpr(),
7720 ConceptY->getConstraintExpr()))
7721 return false;
7722 }
7723
7724 return isSameEntity(TemplateX->getTemplatedDecl(),
7725 TemplateY->getTemplatedDecl()) &&
7726 isSameTemplateParameterList(TemplateX->getTemplateParameters(),
7727 TemplateY->getTemplateParameters());
7728 }
7729
7730 // Fields with the same name and the same type match.
7731 if (const auto *FDX = dyn_cast<FieldDecl>(X)) {
7732 const auto *FDY = cast<FieldDecl>(Y);
7733 // FIXME: Also check the bitwidth is odr-equivalent, if any.
7734 return hasSameType(FDX->getType(), FDY->getType());
7735 }
7736
7737 // Indirect fields with the same target field match.
7738 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) {
7739 const auto *IFDY = cast<IndirectFieldDecl>(Y);
7740 return IFDX->getAnonField()->getCanonicalDecl() ==
7741 IFDY->getAnonField()->getCanonicalDecl();
7742 }
7743
7744 // Enumerators with the same name match.
7746 // FIXME: Also check the value is odr-equivalent.
7747 return true;
7748
7749 // Using shadow declarations with the same target match.
7750 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) {
7751 const auto *USY = cast<UsingShadowDecl>(Y);
7752 return declaresSameEntity(USX->getTargetDecl(), USY->getTargetDecl());
7753 }
7754
7755 // Using declarations with the same qualifier match. (We already know that
7756 // the name matches.)
7757 if (const auto *UX = dyn_cast<UsingDecl>(X)) {
7758 const auto *UY = cast<UsingDecl>(Y);
7759 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
7760 UX->hasTypename() == UY->hasTypename() &&
7761 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7762 }
7763 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) {
7764 const auto *UY = cast<UnresolvedUsingValueDecl>(Y);
7765 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
7766 UX->isAccessDeclaration() == UY->isAccessDeclaration();
7767 }
7768 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) {
7769 return isSameQualifier(
7770 UX->getQualifier(),
7771 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier());
7772 }
7773
7774 // Using-pack declarations are only created by instantiation, and match if
7775 // they're instantiated from matching UnresolvedUsing...Decls.
7776 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) {
7777 return declaresSameEntity(
7778 UX->getInstantiatedFromUsingDecl(),
7779 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl());
7780 }
7781
7782 // Namespace alias definitions with the same target match.
7783 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
7784 const auto *NAY = cast<NamespaceAliasDecl>(Y);
7785 return NAX->getNamespace()->Equals(NAY->getNamespace());
7786 }
7787
7788 return false;
7789}
7790
7793 switch (Arg.getKind()) {
7795 return Arg;
7796
7798 return TemplateArgument(Arg.getAsExpr(), /*IsCanonical=*/true,
7799 Arg.getIsDefaulted());
7800
7802 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
7804 Arg.getIsDefaulted());
7805 }
7806
7809 /*isNullPtr*/ true, Arg.getIsDefaulted());
7810
7813 Arg.getIsDefaulted());
7814
7816 return TemplateArgument(
7819
7822
7824 return TemplateArgument(*this,
7827
7830 /*isNullPtr*/ false, Arg.getIsDefaulted());
7831
7833 bool AnyNonCanonArgs = false;
7834 auto CanonArgs = ::getCanonicalTemplateArguments(
7835 *this, Arg.pack_elements(), AnyNonCanonArgs);
7836 if (!AnyNonCanonArgs)
7837 return Arg;
7839 const_cast<ASTContext &>(*this), CanonArgs);
7840 NewArg.setIsDefaulted(Arg.getIsDefaulted());
7841 return NewArg;
7842 }
7843 }
7844
7845 // Silence GCC warning
7846 llvm_unreachable("Unhandled template argument kind");
7847}
7848
7850 const TemplateArgument &Arg2) const {
7851 if (Arg1.getKind() != Arg2.getKind())
7852 return false;
7853
7854 switch (Arg1.getKind()) {
7856 llvm_unreachable("Comparing NULL template argument");
7857
7859 return hasSameType(Arg1.getAsType(), Arg2.getAsType());
7860
7862 return Arg1.getAsDecl()->getUnderlyingDecl()->getCanonicalDecl() ==
7864
7866 return hasSameType(Arg1.getNullPtrType(), Arg2.getNullPtrType());
7867
7872
7874 return llvm::APSInt::isSameValue(Arg1.getAsIntegral(),
7875 Arg2.getAsIntegral());
7876
7878 return Arg1.structurallyEquals(Arg2);
7879
7881 llvm::FoldingSetNodeID ID1, ID2;
7882 Arg1.getAsExpr()->Profile(ID1, *this, /*Canonical=*/true);
7883 Arg2.getAsExpr()->Profile(ID2, *this, /*Canonical=*/true);
7884 return ID1 == ID2;
7885 }
7886
7888 return llvm::equal(
7889 Arg1.getPackAsArray(), Arg2.getPackAsArray(),
7890 [&](const TemplateArgument &Arg1, const TemplateArgument &Arg2) {
7891 return isSameTemplateArgument(Arg1, Arg2);
7892 });
7893 }
7894
7895 llvm_unreachable("Unhandled template argument kind");
7896}
7897
7899 // Handle the non-qualified case efficiently.
7900 if (!T.hasLocalQualifiers()) {
7901 // Handle the common positive case fast.
7902 if (const auto *AT = dyn_cast<ArrayType>(T))
7903 return AT;
7904 }
7905
7906 // Handle the common negative case fast.
7907 if (!isa<ArrayType>(T.getCanonicalType()))
7908 return nullptr;
7909
7910 // Apply any qualifiers from the array type to the element type. This
7911 // implements C99 6.7.3p8: "If the specification of an array type includes
7912 // any type qualifiers, the element type is so qualified, not the array type."
7913
7914 // If we get here, we either have type qualifiers on the type, or we have
7915 // sugar such as a typedef in the way. If we have type qualifiers on the type
7916 // we must propagate them down into the element type.
7917
7918 SplitQualType split = T.getSplitDesugaredType();
7919 Qualifiers qs = split.Quals;
7920
7921 // If we have a simple case, just return now.
7922 const auto *ATy = dyn_cast<ArrayType>(split.Ty);
7923 if (!ATy || qs.empty())
7924 return ATy;
7925
7926 // Otherwise, we have an array and we have qualifiers on it. Push the
7927 // qualifiers into the array element type and return a new array type.
7928 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
7929
7930 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy))
7931 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
7932 CAT->getSizeExpr(),
7933 CAT->getSizeModifier(),
7934 CAT->getIndexTypeCVRQualifiers()));
7935 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy))
7937 IAT->getSizeModifier(),
7938 IAT->getIndexTypeCVRQualifiers()));
7939
7940 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy))
7942 NewEltTy, DSAT->getSizeExpr(), DSAT->getSizeModifier(),
7943 DSAT->getIndexTypeCVRQualifiers()));
7944
7945 const auto *VAT = cast<VariableArrayType>(ATy);
7946 return cast<ArrayType>(
7947 getVariableArrayType(NewEltTy, VAT->getSizeExpr(), VAT->getSizeModifier(),
7948 VAT->getIndexTypeCVRQualifiers()));
7949}
7950
7952 if (getLangOpts().HLSL && T->isConstantArrayType())
7953 return getArrayParameterType(T);
7954 if (T->isArrayType() || T->isFunctionType())
7955 return getDecayedType(T);
7956 return T;
7957}
7958
7962 return T.getUnqualifiedType();
7963}
7964
7966 // C++ [except.throw]p3:
7967 // A throw-expression initializes a temporary object, called the exception
7968 // object, the type of which is determined by removing any top-level
7969 // cv-qualifiers from the static type of the operand of throw and adjusting
7970 // the type from "array of T" or "function returning T" to "pointer to T"
7971 // or "pointer to function returning T", [...]
7973 if (T->isArrayType() || T->isFunctionType())
7974 T = getDecayedType(T);
7975 return T.getUnqualifiedType();
7976}
7977
7978/// getArrayDecayedType - Return the properly qualified result of decaying the
7979/// specified array type to a pointer. This operation is non-trivial when
7980/// handling typedefs etc. The canonical type of "T" must be an array type,
7981/// this returns a pointer to a properly qualified element of the array.
7982///
7983/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
7985 // Get the element type with 'getAsArrayType' so that we don't lose any
7986 // typedefs in the element type of the array. This also handles propagation
7987 // of type qualifiers from the array type into the element type if present
7988 // (C99 6.7.3p8).
7989 const ArrayType *PrettyArrayType = getAsArrayType(Ty);
7990 assert(PrettyArrayType && "Not an array type!");
7991
7992 QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
7993
7994 // int x[restrict 4] -> int *restrict
7996 PrettyArrayType->getIndexTypeQualifiers());
7997
7998 // int x[_Nullable] -> int * _Nullable
7999 if (auto Nullability = Ty->getNullability()) {
8000 Result = const_cast<ASTContext *>(this)->getAttributedType(*Nullability,
8001 Result, Result);
8002 }
8003 return Result;
8004}
8005
8007 return getBaseElementType(array->getElementType());
8008}
8009
8011 Qualifiers qs;
8012 while (true) {
8013 SplitQualType split = type.getSplitDesugaredType();
8014 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
8015 if (!array) break;
8016
8017 type = array->getElementType();
8019 }
8020
8021 return getQualifiedType(type, qs);
8022}
8023
8024/// getConstantArrayElementCount - Returns number of constant array elements.
8025uint64_t
8027 uint64_t ElementCount = 1;
8028 do {
8029 ElementCount *= CA->getZExtSize();
8030 CA = dyn_cast_or_null<ConstantArrayType>(
8032 } while (CA);
8033 return ElementCount;
8034}
8035
8037 const ArrayInitLoopExpr *AILE) const {
8038 if (!AILE)
8039 return 0;
8040
8041 uint64_t ElementCount = 1;
8042
8043 do {
8044 ElementCount *= AILE->getArraySize().getZExtValue();
8045 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr());
8046 } while (AILE);
8047
8048 return ElementCount;
8049}
8050
8051/// getFloatingRank - Return a relative rank for floating point types.
8052/// This routine will assert if passed a built-in type that isn't a float.
8054 if (const auto *CT = T->getAs<ComplexType>())
8055 return getFloatingRank(CT->getElementType());
8056
8057 switch (T->castAs<BuiltinType>()->getKind()) {
8058 default: llvm_unreachable("getFloatingRank(): not a floating type");
8059 case BuiltinType::Float16: return Float16Rank;
8060 case BuiltinType::Half: return HalfRank;
8061 case BuiltinType::Float: return FloatRank;
8062 case BuiltinType::Double: return DoubleRank;
8063 case BuiltinType::LongDouble: return LongDoubleRank;
8064 case BuiltinType::Float128: return Float128Rank;
8065 case BuiltinType::BFloat16: return BFloat16Rank;
8066 case BuiltinType::Ibm128: return Ibm128Rank;
8067 }
8068}
8069
8070/// getFloatingTypeOrder - Compare the rank of the two specified floating
8071/// point types, ignoring the domain of the type (i.e. 'double' ==
8072/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
8073/// LHS < RHS, return -1.
8075 FloatingRank LHSR = getFloatingRank(LHS);
8076 FloatingRank RHSR = getFloatingRank(RHS);
8077
8078 if (LHSR == RHSR)
8079 return 0;
8080 if (LHSR > RHSR)
8081 return 1;
8082 return -1;
8083}
8084
8087 return 0;
8088 return getFloatingTypeOrder(LHS, RHS);
8089}
8090
8091/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
8092/// routine will assert if passed a built-in type that isn't an integer or enum,
8093/// or if it is not canonicalized.
8094unsigned ASTContext::getIntegerRank(const Type *T) const {
8095 assert(T->isCanonicalUnqualified() && "T should be canonicalized");
8096
8097 // Results in this 'losing' to any type of the same size, but winning if
8098 // larger.
8099 if (const auto *EIT = dyn_cast<BitIntType>(T))
8100 return 0 + (EIT->getNumBits() << 3);
8101
8102 switch (cast<BuiltinType>(T)->getKind()) {
8103 default: llvm_unreachable("getIntegerRank(): not a built-in integer");
8104 case BuiltinType::Bool:
8105 return 1 + (getIntWidth(BoolTy) << 3);
8106 case BuiltinType::Char_S:
8107 case BuiltinType::Char_U:
8108 case BuiltinType::SChar:
8109 case BuiltinType::UChar:
8110 return 2 + (getIntWidth(CharTy) << 3);
8111 case BuiltinType::Short:
8112 case BuiltinType::UShort:
8113 return 3 + (getIntWidth(ShortTy) << 3);
8114 case BuiltinType::Int:
8115 case BuiltinType::UInt:
8116 return 4 + (getIntWidth(IntTy) << 3);
8117 case BuiltinType::Long:
8118 case BuiltinType::ULong:
8119 return 5 + (getIntWidth(LongTy) << 3);
8120 case BuiltinType::LongLong:
8121 case BuiltinType::ULongLong:
8122 return 6 + (getIntWidth(LongLongTy) << 3);
8123 case BuiltinType::Int128:
8124 case BuiltinType::UInt128:
8125 return 7 + (getIntWidth(Int128Ty) << 3);
8126
8127 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
8128 // their underlying types" [c++20 conv.rank]
8129 case BuiltinType::Char8:
8130 return getIntegerRank(UnsignedCharTy.getTypePtr());
8131 case BuiltinType::Char16:
8132 return getIntegerRank(
8133 getFromTargetType(Target->getChar16Type()).getTypePtr());
8134 case BuiltinType::Char32:
8135 return getIntegerRank(
8136 getFromTargetType(Target->getChar32Type()).getTypePtr());
8137 case BuiltinType::WChar_S:
8138 case BuiltinType::WChar_U:
8139 return getIntegerRank(
8140 getFromTargetType(Target->getWCharType()).getTypePtr());
8141 }
8142}
8143
8144/// Whether this is a promotable bitfield reference according
8145/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
8146///
8147/// \returns the type this bit-field will promote to, or NULL if no
8148/// promotion occurs.
8150 if (E->isTypeDependent() || E->isValueDependent())
8151 return {};
8152
8153 // C++ [conv.prom]p5:
8154 // If the bit-field has an enumerated type, it is treated as any other
8155 // value of that type for promotion purposes.
8157 return {};
8158
8159 // FIXME: We should not do this unless E->refersToBitField() is true. This
8160 // matters in C where getSourceBitField() will find bit-fields for various
8161 // cases where the source expression is not a bit-field designator.
8162
8163 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
8164 if (!Field)
8165 return {};
8166
8167 QualType FT = Field->getType();
8168
8169 uint64_t BitWidth = Field->getBitWidthValue();
8170 uint64_t IntSize = getTypeSize(IntTy);
8171 // C++ [conv.prom]p5:
8172 // A prvalue for an integral bit-field can be converted to a prvalue of type
8173 // int if int can represent all the values of the bit-field; otherwise, it
8174 // can be converted to unsigned int if unsigned int can represent all the
8175 // values of the bit-field. If the bit-field is larger yet, no integral
8176 // promotion applies to it.
8177 // C11 6.3.1.1/2:
8178 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
8179 // If an int can represent all values of the original type (as restricted by
8180 // the width, for a bit-field), the value is converted to an int; otherwise,
8181 // it is converted to an unsigned int.
8182 //
8183 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
8184 // We perform that promotion here to match GCC and C++.
8185 // FIXME: C does not permit promotion of an enum bit-field whose rank is
8186 // greater than that of 'int'. We perform that promotion to match GCC.
8187 //
8188 // C23 6.3.1.1p2:
8189 // The value from a bit-field of a bit-precise integer type is converted to
8190 // the corresponding bit-precise integer type. (The rest is the same as in
8191 // C11.)
8192 if (QualType QT = Field->getType(); QT->isBitIntType())
8193 return QT;
8194
8195 if (BitWidth < IntSize)
8196 return IntTy;
8197
8198 if (BitWidth == IntSize)
8199 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
8200
8201 // Bit-fields wider than int are not subject to promotions, and therefore act
8202 // like the base type. GCC has some weird bugs in this area that we
8203 // deliberately do not follow (GCC follows a pre-standard resolution to
8204 // C's DR315 which treats bit-width as being part of the type, and this leaks
8205 // into their semantics in some cases).
8206 return {};
8207}
8208
8209/// getPromotedIntegerType - Returns the type that Promotable will
8210/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
8211/// integer type.
8213 assert(!Promotable.isNull());
8214 assert(isPromotableIntegerType(Promotable));
8215 if (const auto *ED = Promotable->getAsEnumDecl())
8216 return ED->getPromotionType();
8217
8218 if (const auto *BT = Promotable->getAs<BuiltinType>()) {
8219 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
8220 // (3.9.1) can be converted to a prvalue of the first of the following
8221 // types that can represent all the values of its underlying type:
8222 // int, unsigned int, long int, unsigned long int, long long int, or
8223 // unsigned long long int [...]
8224 // FIXME: Is there some better way to compute this?
8225 if (BT->getKind() == BuiltinType::WChar_S ||
8226 BT->getKind() == BuiltinType::WChar_U ||
8227 BT->getKind() == BuiltinType::Char8 ||
8228 BT->getKind() == BuiltinType::Char16 ||
8229 BT->getKind() == BuiltinType::Char32) {
8230 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
8231 uint64_t FromSize = getTypeSize(BT);
8232 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
8234 for (const auto &PT : PromoteTypes) {
8235 uint64_t ToSize = getTypeSize(PT);
8236 if (FromSize < ToSize ||
8237 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType()))
8238 return PT;
8239 }
8240 llvm_unreachable("char type should fit into long long");
8241 }
8242 }
8243
8244 // At this point, we should have a signed or unsigned integer type.
8245 if (Promotable->isSignedIntegerType())
8246 return IntTy;
8247 uint64_t PromotableSize = getIntWidth(Promotable);
8248 uint64_t IntSize = getIntWidth(IntTy);
8249 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
8250 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
8251}
8252
8253/// Recurses in pointer/array types until it finds an objc retainable
8254/// type and returns its ownership.
8256 while (!T.isNull()) {
8257 if (T.getObjCLifetime() != Qualifiers::OCL_None)
8258 return T.getObjCLifetime();
8259 if (T->isArrayType())
8261 else if (const auto *PT = T->getAs<PointerType>())
8262 T = PT->getPointeeType();
8263 else if (const auto *RT = T->getAs<ReferenceType>())
8264 T = RT->getPointeeType();
8265 else
8266 break;
8267 }
8268
8269 return Qualifiers::OCL_None;
8270}
8271
8272static const Type *getIntegerTypeForEnum(const EnumType *ET) {
8273 // Incomplete enum types are not treated as integer types.
8274 // FIXME: In C++, enum types are never integer types.
8275 const EnumDecl *ED = ET->getDecl()->getDefinitionOrSelf();
8276 if (ED->isComplete() && !ED->isScoped())
8277 return ED->getIntegerType().getTypePtr();
8278 return nullptr;
8279}
8280
8281/// getIntegerTypeOrder - Returns the highest ranked integer type:
8282/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
8283/// LHS < RHS, return -1.
8285 const Type *LHSC = getCanonicalType(LHS).getTypePtr();
8286 const Type *RHSC = getCanonicalType(RHS).getTypePtr();
8287
8288 // Unwrap enums to their underlying type.
8289 if (const auto *ET = dyn_cast<EnumType>(LHSC))
8290 LHSC = getIntegerTypeForEnum(ET);
8291 if (const auto *ET = dyn_cast<EnumType>(RHSC))
8292 RHSC = getIntegerTypeForEnum(ET);
8293
8294 if (LHSC == RHSC) return 0;
8295
8296 bool LHSUnsigned = LHSC->isUnsignedIntegerType();
8297 bool RHSUnsigned = RHSC->isUnsignedIntegerType();
8298
8299 unsigned LHSRank = getIntegerRank(LHSC);
8300 unsigned RHSRank = getIntegerRank(RHSC);
8301
8302 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
8303 if (LHSRank == RHSRank) return 0;
8304 return LHSRank > RHSRank ? 1 : -1;
8305 }
8306
8307 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
8308 if (LHSUnsigned) {
8309 // If the unsigned [LHS] type is larger, return it.
8310 if (LHSRank >= RHSRank)
8311 return 1;
8312
8313 // If the signed type can represent all values of the unsigned type, it
8314 // wins. Because we are dealing with 2's complement and types that are
8315 // powers of two larger than each other, this is always safe.
8316 return -1;
8317 }
8318
8319 // If the unsigned [RHS] type is larger, return it.
8320 if (RHSRank >= LHSRank)
8321 return -1;
8322
8323 // If the signed type can represent all values of the unsigned type, it
8324 // wins. Because we are dealing with 2's complement and types that are
8325 // powers of two larger than each other, this is always safe.
8326 return 1;
8327}
8328
8330 if (CFConstantStringTypeDecl)
8331 return CFConstantStringTypeDecl;
8332
8333 assert(!CFConstantStringTagDecl &&
8334 "tag and typedef should be initialized together");
8335 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag");
8336 CFConstantStringTagDecl->startDefinition();
8337
8338 struct {
8339 QualType Type;
8340 const char *Name;
8341 } Fields[5];
8342 unsigned Count = 0;
8343
8344 /// Objective-C ABI
8345 ///
8346 /// typedef struct __NSConstantString_tag {
8347 /// const int *isa;
8348 /// int flags;
8349 /// const char *str;
8350 /// long length;
8351 /// } __NSConstantString;
8352 ///
8353 /// Swift ABI (4.1, 4.2)
8354 ///
8355 /// typedef struct __NSConstantString_tag {
8356 /// uintptr_t _cfisa;
8357 /// uintptr_t _swift_rc;
8358 /// _Atomic(uint64_t) _cfinfoa;
8359 /// const char *_ptr;
8360 /// uint32_t _length;
8361 /// } __NSConstantString;
8362 ///
8363 /// Swift ABI (5.0)
8364 ///
8365 /// typedef struct __NSConstantString_tag {
8366 /// uintptr_t _cfisa;
8367 /// uintptr_t _swift_rc;
8368 /// _Atomic(uint64_t) _cfinfoa;
8369 /// const char *_ptr;
8370 /// uintptr_t _length;
8371 /// } __NSConstantString;
8372
8373 const auto CFRuntime = getLangOpts().CFRuntime;
8374 if (static_cast<unsigned>(CFRuntime) <
8375 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
8376 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" };
8377 Fields[Count++] = { IntTy, "flags" };
8378 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" };
8379 Fields[Count++] = { LongTy, "length" };
8380 } else {
8381 Fields[Count++] = { getUIntPtrType(), "_cfisa" };
8382 Fields[Count++] = { getUIntPtrType(), "_swift_rc" };
8383 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" };
8384 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" };
8387 Fields[Count++] = { IntTy, "_ptr" };
8388 else
8389 Fields[Count++] = { getUIntPtrType(), "_ptr" };
8390 }
8391
8392 // Create fields
8393 for (unsigned i = 0; i < Count; ++i) {
8394 FieldDecl *Field =
8395 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(),
8396 SourceLocation(), &Idents.get(Fields[i].Name),
8397 Fields[i].Type, /*TInfo=*/nullptr,
8398 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit);
8399 Field->setAccess(AS_public);
8400 CFConstantStringTagDecl->addDecl(Field);
8401 }
8402
8403 CFConstantStringTagDecl->completeDefinition();
8404 // This type is designed to be compatible with NSConstantString, but cannot
8405 // use the same name, since NSConstantString is an interface.
8406 CanQualType tagType = getCanonicalTagType(CFConstantStringTagDecl);
8407 CFConstantStringTypeDecl =
8408 buildImplicitTypedef(tagType, "__NSConstantString");
8409
8410 return CFConstantStringTypeDecl;
8411}
8412
8414 if (!CFConstantStringTagDecl)
8415 getCFConstantStringDecl(); // Build the tag and the typedef.
8416 return CFConstantStringTagDecl;
8417}
8418
8419// getCFConstantStringType - Return the type used for constant CFStrings.
8424
8426 if (ObjCSuperType.isNull()) {
8427 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
8428 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl);
8429 ObjCSuperType = getCanonicalTagType(ObjCSuperTypeDecl);
8430 }
8431 return ObjCSuperType;
8432}
8433
8435 const auto *TT = T->castAs<TypedefType>();
8436 CFConstantStringTypeDecl = cast<TypedefDecl>(TT->getDecl());
8437 CFConstantStringTagDecl = TT->castAsRecordDecl();
8438}
8439
8441 if (BlockDescriptorType)
8442 return getCanonicalTagType(BlockDescriptorType);
8443
8444 RecordDecl *RD;
8445 // FIXME: Needs the FlagAppleBlock bit.
8446 RD = buildImplicitRecord("__block_descriptor");
8447 RD->startDefinition();
8448
8449 QualType FieldTypes[] = {
8452 };
8453
8454 static const char *const FieldNames[] = {
8455 "reserved",
8456 "Size"
8457 };
8458
8459 for (size_t i = 0; i < 2; ++i) {
8461 *this, RD, SourceLocation(), SourceLocation(),
8462 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
8463 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit);
8464 Field->setAccess(AS_public);
8465 RD->addDecl(Field);
8466 }
8467
8468 RD->completeDefinition();
8469
8470 BlockDescriptorType = RD;
8471
8472 return getCanonicalTagType(BlockDescriptorType);
8473}
8474
8476 if (BlockDescriptorExtendedType)
8477 return getCanonicalTagType(BlockDescriptorExtendedType);
8478
8479 RecordDecl *RD;
8480 // FIXME: Needs the FlagAppleBlock bit.
8481 RD = buildImplicitRecord("__block_descriptor_withcopydispose");
8482 RD->startDefinition();
8483
8484 QualType FieldTypes[] = {
8489 };
8490
8491 static const char *const FieldNames[] = {
8492 "reserved",
8493 "Size",
8494 "CopyFuncPtr",
8495 "DestroyFuncPtr"
8496 };
8497
8498 for (size_t i = 0; i < 4; ++i) {
8500 *this, RD, SourceLocation(), SourceLocation(),
8501 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
8502 /*BitWidth=*/nullptr,
8503 /*Mutable=*/false, ICIS_NoInit);
8504 Field->setAccess(AS_public);
8505 RD->addDecl(Field);
8506 }
8507
8508 RD->completeDefinition();
8509
8510 BlockDescriptorExtendedType = RD;
8511 return getCanonicalTagType(BlockDescriptorExtendedType);
8512}
8513
8515 const auto *BT = dyn_cast<BuiltinType>(T);
8516
8517 if (!BT) {
8518 if (isa<PipeType>(T))
8519 return OCLTK_Pipe;
8520
8521 return OCLTK_Default;
8522 }
8523
8524 switch (BT->getKind()) {
8525#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
8526 case BuiltinType::Id: \
8527 return OCLTK_Image;
8528#include "clang/Basic/OpenCLImageTypes.def"
8529
8530 case BuiltinType::OCLClkEvent:
8531 return OCLTK_ClkEvent;
8532
8533 case BuiltinType::OCLEvent:
8534 return OCLTK_Event;
8535
8536 case BuiltinType::OCLQueue:
8537 return OCLTK_Queue;
8538
8539 case BuiltinType::OCLReserveID:
8540 return OCLTK_ReserveID;
8541
8542 case BuiltinType::OCLSampler:
8543 return OCLTK_Sampler;
8544
8545 default:
8546 return OCLTK_Default;
8547 }
8548}
8549
8551 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T));
8552}
8553
8554/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
8555/// requires copy/dispose. Note that this must match the logic
8556/// in buildByrefHelpers.
8558 const VarDecl *D) {
8559 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
8560 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr();
8561 if (!copyExpr && record->hasTrivialDestructor()) return false;
8562
8563 return true;
8564 }
8565
8567 return true;
8568
8569 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
8570 // move or destroy.
8572 return true;
8573
8574 if (!Ty->isObjCRetainableType()) return false;
8575
8576 Qualifiers qs = Ty.getQualifiers();
8577
8578 // If we have lifetime, that dominates.
8579 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
8580 switch (lifetime) {
8581 case Qualifiers::OCL_None: llvm_unreachable("impossible");
8582
8583 // These are just bits as far as the runtime is concerned.
8586 return false;
8587
8588 // These cases should have been taken care of when checking the type's
8589 // non-triviality.
8592 llvm_unreachable("impossible");
8593 }
8594 llvm_unreachable("fell out of lifetime switch!");
8595 }
8596 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
8598}
8599
8601 Qualifiers::ObjCLifetime &LifeTime,
8602 bool &HasByrefExtendedLayout) const {
8603 if (!getLangOpts().ObjC ||
8604 getLangOpts().getGC() != LangOptions::NonGC)
8605 return false;
8606
8607 HasByrefExtendedLayout = false;
8608 if (Ty->isRecordType()) {
8609 HasByrefExtendedLayout = true;
8610 LifeTime = Qualifiers::OCL_None;
8611 } else if ((LifeTime = Ty.getObjCLifetime())) {
8612 // Honor the ARC qualifiers.
8613 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
8614 // The MRR rule.
8616 } else {
8617 LifeTime = Qualifiers::OCL_None;
8618 }
8619 return true;
8620}
8621
8623 assert(Target && "Expected target to be initialized");
8624 const llvm::Triple &T = Target->getTriple();
8625 // Windows is LLP64 rather than LP64
8626 if (T.isOSWindows() && T.isArch64Bit())
8627 return UnsignedLongLongTy;
8628 return UnsignedLongTy;
8629}
8630
8632 assert(Target && "Expected target to be initialized");
8633 const llvm::Triple &T = Target->getTriple();
8634 // Windows is LLP64 rather than LP64
8635 if (T.isOSWindows() && T.isArch64Bit())
8636 return LongLongTy;
8637 return LongTy;
8638}
8639
8641 if (!ObjCInstanceTypeDecl)
8642 ObjCInstanceTypeDecl =
8643 buildImplicitTypedef(getObjCIdType(), "instancetype");
8644 return ObjCInstanceTypeDecl;
8645}
8646
8647// This returns true if a type has been typedefed to BOOL:
8648// typedef <type> BOOL;
8650 if (const auto *TT = dyn_cast<TypedefType>(T))
8651 if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
8652 return II->isStr("BOOL");
8653
8654 return false;
8655}
8656
8657/// getObjCEncodingTypeSize returns size of type for objective-c encoding
8658/// purpose.
8660 if (!type->isIncompleteArrayType() && type->isIncompleteType())
8661 return CharUnits::Zero();
8662
8664
8665 // Make all integer and enum types at least as large as an int
8666 if (sz.isPositive() && type->isIntegralOrEnumerationType())
8667 sz = std::max(sz, getTypeSizeInChars(IntTy));
8668 // Treat arrays as pointers, since that's how they're passed in.
8669 else if (type->isArrayType())
8671 return sz;
8672}
8673
8680
8683 if (!VD->isInline())
8685
8686 // In almost all cases, it's a weak definition.
8687 auto *First = VD->getFirstDecl();
8688 if (First->isInlineSpecified() || !First->isStaticDataMember())
8690
8691 // If there's a file-context declaration in this translation unit, it's a
8692 // non-discardable definition.
8693 for (auto *D : VD->redecls())
8695 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr()))
8697
8698 // If we've not seen one yet, we don't know.
8700}
8701
8702static std::string charUnitsToString(const CharUnits &CU) {
8703 return llvm::itostr(CU.getQuantity());
8704}
8705
8706/// getObjCEncodingForBlock - Return the encoded type for this block
8707/// declaration.
8709 std::string S;
8710
8711 const BlockDecl *Decl = Expr->getBlockDecl();
8712 QualType BlockTy =
8714 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType();
8715 // Encode result type.
8716 if (getLangOpts().EncodeExtendedBlockSig)
8718 true /*Extended*/);
8719 else
8720 getObjCEncodingForType(BlockReturnTy, S);
8721 // Compute size of all parameters.
8722 // Start with computing size of a pointer in number of bytes.
8723 // FIXME: There might(should) be a better way of doing this computation!
8725 CharUnits ParmOffset = PtrSize;
8726 for (auto *PI : Decl->parameters()) {
8727 QualType PType = PI->getType();
8729 if (sz.isZero())
8730 continue;
8731 assert(sz.isPositive() && "BlockExpr - Incomplete param type");
8732 ParmOffset += sz;
8733 }
8734 // Size of the argument frame
8735 S += charUnitsToString(ParmOffset);
8736 // Block pointer and offset.
8737 S += "@?0";
8738
8739 // Argument types.
8740 ParmOffset = PtrSize;
8741 for (auto *PVDecl : Decl->parameters()) {
8742 QualType PType = PVDecl->getOriginalType();
8743 if (const auto *AT =
8744 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
8745 // Use array's original type only if it has known number of
8746 // elements.
8747 if (!isa<ConstantArrayType>(AT))
8748 PType = PVDecl->getType();
8749 } else if (PType->isFunctionType())
8750 PType = PVDecl->getType();
8751 if (getLangOpts().EncodeExtendedBlockSig)
8753 S, true /*Extended*/);
8754 else
8755 getObjCEncodingForType(PType, S);
8756 S += charUnitsToString(ParmOffset);
8757 ParmOffset += getObjCEncodingTypeSize(PType);
8758 }
8759
8760 return S;
8761}
8762
8763std::string
8765 std::string S;
8766 // Encode result type.
8767 getObjCEncodingForType(Decl->getReturnType(), S);
8768 CharUnits ParmOffset;
8769 // Compute size of all parameters.
8770 for (auto *PI : Decl->parameters()) {
8771 QualType PType = PI->getType();
8773 if (sz.isZero())
8774 continue;
8775
8776 assert(sz.isPositive() &&
8777 "getObjCEncodingForFunctionDecl - Incomplete param type");
8778 ParmOffset += sz;
8779 }
8780 S += charUnitsToString(ParmOffset);
8781 ParmOffset = CharUnits::Zero();
8782
8783 // Argument types.
8784 for (auto *PVDecl : Decl->parameters()) {
8785 QualType PType = PVDecl->getOriginalType();
8786 if (const auto *AT =
8787 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
8788 // Use array's original type only if it has known number of
8789 // elements.
8790 if (!isa<ConstantArrayType>(AT))
8791 PType = PVDecl->getType();
8792 } else if (PType->isFunctionType())
8793 PType = PVDecl->getType();
8794 getObjCEncodingForType(PType, S);
8795 S += charUnitsToString(ParmOffset);
8796 ParmOffset += getObjCEncodingTypeSize(PType);
8797 }
8798
8799 return S;
8800}
8801
8802/// getObjCEncodingForMethodParameter - Return the encoded type for a single
8803/// method parameter or return type. If Extended, include class names and
8804/// block object types.
8806 QualType T, std::string& S,
8807 bool Extended) const {
8808 // Encode type qualifier, 'in', 'inout', etc. for the parameter.
8810 // Encode parameter type.
8811 ObjCEncOptions Options = ObjCEncOptions()
8812 .setExpandPointedToStructures()
8813 .setExpandStructures()
8814 .setIsOutermostType();
8815 if (Extended)
8816 Options.setEncodeBlockParameters().setEncodeClassNames();
8817 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr);
8818}
8819
8820/// getObjCEncodingForMethodDecl - Return the encoded type for this method
8821/// declaration.
8823 bool Extended) const {
8824 // FIXME: This is not very efficient.
8825 // Encode return type.
8826 std::string S;
8827 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
8828 Decl->getReturnType(), S, Extended);
8829 // Compute size of all parameters.
8830 // Start with computing size of a pointer in number of bytes.
8831 // FIXME: There might(should) be a better way of doing this computation!
8833 // The first two arguments (self and _cmd) are pointers; account for
8834 // their size.
8835 CharUnits ParmOffset = 2 * PtrSize;
8836 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8837 E = Decl->sel_param_end(); PI != E; ++PI) {
8838 QualType PType = (*PI)->getType();
8840 if (sz.isZero())
8841 continue;
8842
8843 assert(sz.isPositive() &&
8844 "getObjCEncodingForMethodDecl - Incomplete param type");
8845 ParmOffset += sz;
8846 }
8847 S += charUnitsToString(ParmOffset);
8848 S += "@0:";
8849 S += charUnitsToString(PtrSize);
8850
8851 // Argument types.
8852 ParmOffset = 2 * PtrSize;
8853 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
8854 E = Decl->sel_param_end(); PI != E; ++PI) {
8855 const ParmVarDecl *PVDecl = *PI;
8856 QualType PType = PVDecl->getOriginalType();
8857 if (const auto *AT =
8858 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
8859 // Use array's original type only if it has known number of
8860 // elements.
8861 if (!isa<ConstantArrayType>(AT))
8862 PType = PVDecl->getType();
8863 } else if (PType->isFunctionType())
8864 PType = PVDecl->getType();
8866 PType, S, Extended);
8867 S += charUnitsToString(ParmOffset);
8868 ParmOffset += getObjCEncodingTypeSize(PType);
8869 }
8870
8871 return S;
8872}
8873
8876 const ObjCPropertyDecl *PD,
8877 const Decl *Container) const {
8878 if (!Container)
8879 return nullptr;
8880 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) {
8881 for (auto *PID : CID->property_impls())
8882 if (PID->getPropertyDecl() == PD)
8883 return PID;
8884 } else {
8885 const auto *OID = cast<ObjCImplementationDecl>(Container);
8886 for (auto *PID : OID->property_impls())
8887 if (PID->getPropertyDecl() == PD)
8888 return PID;
8889 }
8890 return nullptr;
8891}
8892
8893/// getObjCEncodingForPropertyDecl - Return the encoded type for this
8894/// property declaration. If non-NULL, Container must be either an
8895/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
8896/// NULL when getting encodings for protocol properties.
8897/// Property attributes are stored as a comma-delimited C string. The simple
8898/// attributes readonly and bycopy are encoded as single characters. The
8899/// parametrized attributes, getter=name, setter=name, and ivar=name, are
8900/// encoded as single characters, followed by an identifier. Property types
8901/// are also encoded as a parametrized attribute. The characters used to encode
8902/// these attributes are defined by the following enumeration:
8903/// @code
8904/// enum PropertyAttributes {
8905/// kPropertyReadOnly = 'R', // property is read-only.
8906/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
8907/// kPropertyByref = '&', // property is a reference to the value last assigned
8908/// kPropertyDynamic = 'D', // property is dynamic
8909/// kPropertyGetter = 'G', // followed by getter selector name
8910/// kPropertySetter = 'S', // followed by setter selector name
8911/// kPropertyInstanceVariable = 'V' // followed by instance variable name
8912/// kPropertyType = 'T' // followed by old-style type encoding.
8913/// kPropertyWeak = 'W' // 'weak' property
8914/// kPropertyStrong = 'P' // property GC'able
8915/// kPropertyNonAtomic = 'N' // property non-atomic
8916/// kPropertyOptional = '?' // property optional
8917/// };
8918/// @endcode
8919std::string
8921 const Decl *Container) const {
8922 // Collect information from the property implementation decl(s).
8923 bool Dynamic = false;
8924 ObjCPropertyImplDecl *SynthesizePID = nullptr;
8925
8926 if (ObjCPropertyImplDecl *PropertyImpDecl =
8928 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
8929 Dynamic = true;
8930 else
8931 SynthesizePID = PropertyImpDecl;
8932 }
8933
8934 // FIXME: This is not very efficient.
8935 std::string S = "T";
8936
8937 // Encode result type.
8938 // GCC has some special rules regarding encoding of properties which
8939 // closely resembles encoding of ivars.
8941
8942 if (PD->isOptional())
8943 S += ",?";
8944
8945 if (PD->isReadOnly()) {
8946 S += ",R";
8948 S += ",C";
8950 S += ",&";
8952 S += ",W";
8953 } else {
8954 switch (PD->getSetterKind()) {
8955 case ObjCPropertyDecl::Assign: break;
8956 case ObjCPropertyDecl::Copy: S += ",C"; break;
8957 case ObjCPropertyDecl::Retain: S += ",&"; break;
8958 case ObjCPropertyDecl::Weak: S += ",W"; break;
8959 }
8960 }
8961
8962 // It really isn't clear at all what this means, since properties
8963 // are "dynamic by default".
8964 if (Dynamic)
8965 S += ",D";
8966
8968 S += ",N";
8969
8971 S += ",G";
8972 S += PD->getGetterName().getAsString();
8973 }
8974
8976 S += ",S";
8977 S += PD->getSetterName().getAsString();
8978 }
8979
8980 if (SynthesizePID) {
8981 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
8982 S += ",V";
8983 S += OID->getNameAsString();
8984 }
8985
8986 // FIXME: OBJCGC: weak & strong
8987 return S;
8988}
8989
8990/// getLegacyIntegralTypeEncoding -
8991/// Another legacy compatibility encoding: 32-bit longs are encoded as
8992/// 'l' or 'L' , but not always. For typedefs, we need to use
8993/// 'i' or 'I' instead if encoding a struct field, or a pointer!
8995 if (PointeeTy->getAs<TypedefType>()) {
8996 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
8997 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
8998 PointeeTy = UnsignedIntTy;
8999 else
9000 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32)
9001 PointeeTy = IntTy;
9002 }
9003 }
9004}
9005
9007 const FieldDecl *Field,
9008 QualType *NotEncodedT) const {
9009 // We follow the behavior of gcc, expanding structures which are
9010 // directly pointed to, and expanding embedded structures. Note that
9011 // these rules are sufficient to prevent recursive encoding of the
9012 // same type.
9013 getObjCEncodingForTypeImpl(T, S,
9014 ObjCEncOptions()
9015 .setExpandPointedToStructures()
9016 .setExpandStructures()
9017 .setIsOutermostType(),
9018 Field, NotEncodedT);
9019}
9020
9022 std::string& S) const {
9023 // Encode result type.
9024 // GCC has some special rules regarding encoding of properties which
9025 // closely resembles encoding of ivars.
9026 getObjCEncodingForTypeImpl(T, S,
9027 ObjCEncOptions()
9028 .setExpandPointedToStructures()
9029 .setExpandStructures()
9030 .setIsOutermostType()
9031 .setEncodingProperty(),
9032 /*Field=*/nullptr);
9033}
9034
9036 const BuiltinType *BT) {
9037 BuiltinType::Kind kind = BT->getKind();
9038 switch (kind) {
9039 case BuiltinType::Void: return 'v';
9040 case BuiltinType::Bool: return 'B';
9041 case BuiltinType::Char8:
9042 case BuiltinType::Char_U:
9043 case BuiltinType::UChar: return 'C';
9044 case BuiltinType::Char16:
9045 case BuiltinType::UShort: return 'S';
9046 case BuiltinType::Char32:
9047 case BuiltinType::UInt: return 'I';
9048 case BuiltinType::ULong:
9049 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
9050 case BuiltinType::UInt128: return 'T';
9051 case BuiltinType::ULongLong: return 'Q';
9052 case BuiltinType::Char_S:
9053 case BuiltinType::SChar: return 'c';
9054 case BuiltinType::Short: return 's';
9055 case BuiltinType::WChar_S:
9056 case BuiltinType::WChar_U:
9057 case BuiltinType::Int: return 'i';
9058 case BuiltinType::Long:
9059 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
9060 case BuiltinType::LongLong: return 'q';
9061 case BuiltinType::Int128: return 't';
9062 case BuiltinType::Float: return 'f';
9063 case BuiltinType::Double: return 'd';
9064 case BuiltinType::LongDouble: return 'D';
9065 case BuiltinType::NullPtr: return '*'; // like char*
9066
9067 case BuiltinType::BFloat16:
9068 case BuiltinType::Float16:
9069 case BuiltinType::Float128:
9070 case BuiltinType::Ibm128:
9071 case BuiltinType::Half:
9072 case BuiltinType::ShortAccum:
9073 case BuiltinType::Accum:
9074 case BuiltinType::LongAccum:
9075 case BuiltinType::UShortAccum:
9076 case BuiltinType::UAccum:
9077 case BuiltinType::ULongAccum:
9078 case BuiltinType::ShortFract:
9079 case BuiltinType::Fract:
9080 case BuiltinType::LongFract:
9081 case BuiltinType::UShortFract:
9082 case BuiltinType::UFract:
9083 case BuiltinType::ULongFract:
9084 case BuiltinType::SatShortAccum:
9085 case BuiltinType::SatAccum:
9086 case BuiltinType::SatLongAccum:
9087 case BuiltinType::SatUShortAccum:
9088 case BuiltinType::SatUAccum:
9089 case BuiltinType::SatULongAccum:
9090 case BuiltinType::SatShortFract:
9091 case BuiltinType::SatFract:
9092 case BuiltinType::SatLongFract:
9093 case BuiltinType::SatUShortFract:
9094 case BuiltinType::SatUFract:
9095 case BuiltinType::SatULongFract:
9096 // FIXME: potentially need @encodes for these!
9097 return ' ';
9098
9099#define SVE_TYPE(Name, Id, SingletonId) \
9100 case BuiltinType::Id:
9101#include "clang/Basic/AArch64ACLETypes.def"
9102#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9103#include "clang/Basic/RISCVVTypes.def"
9104#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9105#include "clang/Basic/WebAssemblyReferenceTypes.def"
9106#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
9107#include "clang/Basic/AMDGPUTypes.def"
9108 {
9109 DiagnosticsEngine &Diags = C->getDiagnostics();
9110 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
9111 "cannot yet @encode type %0");
9112 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy());
9113 return ' ';
9114 }
9115
9116 case BuiltinType::ObjCId:
9117 case BuiltinType::ObjCClass:
9118 case BuiltinType::ObjCSel:
9119 llvm_unreachable("@encoding ObjC primitive type");
9120
9121 // OpenCL and placeholder types don't need @encodings.
9122#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
9123 case BuiltinType::Id:
9124#include "clang/Basic/OpenCLImageTypes.def"
9125#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
9126 case BuiltinType::Id:
9127#include "clang/Basic/OpenCLExtensionTypes.def"
9128 case BuiltinType::OCLEvent:
9129 case BuiltinType::OCLClkEvent:
9130 case BuiltinType::OCLQueue:
9131 case BuiltinType::OCLReserveID:
9132 case BuiltinType::OCLSampler:
9133 case BuiltinType::Dependent:
9134#define PPC_VECTOR_TYPE(Name, Id, Size) \
9135 case BuiltinType::Id:
9136#include "clang/Basic/PPCTypes.def"
9137#define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
9138#include "clang/Basic/HLSLIntangibleTypes.def"
9139#define BUILTIN_TYPE(KIND, ID)
9140#define PLACEHOLDER_TYPE(KIND, ID) \
9141 case BuiltinType::KIND:
9142#include "clang/AST/BuiltinTypes.def"
9143 llvm_unreachable("invalid builtin type for @encode");
9144 }
9145 llvm_unreachable("invalid BuiltinType::Kind value");
9146}
9147
9148static char ObjCEncodingForEnumDecl(const ASTContext *C, const EnumDecl *ED) {
9150
9151 // The encoding of an non-fixed enum type is always 'i', regardless of size.
9152 if (!Enum->isFixed())
9153 return 'i';
9154
9155 // The encoding of a fixed enum type matches its fixed underlying type.
9156 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
9158}
9159
9160static void EncodeBitField(const ASTContext *Ctx, std::string& S,
9161 QualType T, const FieldDecl *FD) {
9162 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
9163 S += 'b';
9164 // The NeXT runtime encodes bit fields as b followed by the number of bits.
9165 // The GNU runtime requires more information; bitfields are encoded as b,
9166 // then the offset (in bits) of the first element, then the type of the
9167 // bitfield, then the size in bits. For example, in this structure:
9168 //
9169 // struct
9170 // {
9171 // int integer;
9172 // int flags:2;
9173 // };
9174 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
9175 // runtime, but b32i2 for the GNU runtime. The reason for this extra
9176 // information is not especially sensible, but we're stuck with it for
9177 // compatibility with GCC, although providing it breaks anything that
9178 // actually uses runtime introspection and wants to work on both runtimes...
9179 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
9180 uint64_t Offset;
9181
9182 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) {
9183 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), IVD);
9184 } else {
9185 const RecordDecl *RD = FD->getParent();
9186 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
9187 Offset = RL.getFieldOffset(FD->getFieldIndex());
9188 }
9189
9190 S += llvm::utostr(Offset);
9191
9192 if (const auto *ET = T->getAsCanonical<EnumType>())
9193 S += ObjCEncodingForEnumDecl(Ctx, ET->getDecl());
9194 else {
9195 const auto *BT = T->castAs<BuiltinType>();
9196 S += getObjCEncodingForPrimitiveType(Ctx, BT);
9197 }
9198 }
9199 S += llvm::utostr(FD->getBitWidthValue());
9200}
9201
9202// Helper function for determining whether the encoded type string would include
9203// a template specialization type.
9205 bool VisitBasesAndFields) {
9206 T = T->getBaseElementTypeUnsafe();
9207
9208 if (auto *PT = T->getAs<PointerType>())
9210 PT->getPointeeType().getTypePtr(), false);
9211
9212 auto *CXXRD = T->getAsCXXRecordDecl();
9213
9214 if (!CXXRD)
9215 return false;
9216
9218 return true;
9219
9220 if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
9221 return false;
9222
9223 for (const auto &B : CXXRD->bases())
9224 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(),
9225 true))
9226 return true;
9227
9228 for (auto *FD : CXXRD->fields())
9229 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(),
9230 true))
9231 return true;
9232
9233 return false;
9234}
9235
9236// FIXME: Use SmallString for accumulating string.
9237void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
9238 const ObjCEncOptions Options,
9239 const FieldDecl *FD,
9240 QualType *NotEncodedT) const {
9242 switch (CT->getTypeClass()) {
9243 case Type::Builtin:
9244 case Type::Enum:
9245 if (FD && FD->isBitField())
9246 return EncodeBitField(this, S, T, FD);
9247 if (const auto *BT = dyn_cast<BuiltinType>(CT))
9248 S += getObjCEncodingForPrimitiveType(this, BT);
9249 else
9250 S += ObjCEncodingForEnumDecl(this, cast<EnumType>(CT)->getDecl());
9251 return;
9252
9253 case Type::Complex:
9254 S += 'j';
9255 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S,
9256 ObjCEncOptions(),
9257 /*Field=*/nullptr);
9258 return;
9259
9260 case Type::Atomic:
9261 S += 'A';
9262 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S,
9263 ObjCEncOptions(),
9264 /*Field=*/nullptr);
9265 return;
9266
9267 // encoding for pointer or reference types.
9268 case Type::Pointer:
9269 case Type::LValueReference:
9270 case Type::RValueReference: {
9271 QualType PointeeTy;
9272 if (isa<PointerType>(CT)) {
9273 const auto *PT = T->castAs<PointerType>();
9274 if (PT->isObjCSelType()) {
9275 S += ':';
9276 return;
9277 }
9278 PointeeTy = PT->getPointeeType();
9279 } else {
9280 PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
9281 }
9282
9283 bool isReadOnly = false;
9284 // For historical/compatibility reasons, the read-only qualifier of the
9285 // pointee gets emitted _before_ the '^'. The read-only qualifier of
9286 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
9287 // Also, do not emit the 'r' for anything but the outermost type!
9288 if (T->getAs<TypedefType>()) {
9289 if (Options.IsOutermostType() && T.isConstQualified()) {
9290 isReadOnly = true;
9291 S += 'r';
9292 }
9293 } else if (Options.IsOutermostType()) {
9294 QualType P = PointeeTy;
9295 while (auto PT = P->getAs<PointerType>())
9296 P = PT->getPointeeType();
9297 if (P.isConstQualified()) {
9298 isReadOnly = true;
9299 S += 'r';
9300 }
9301 }
9302 if (isReadOnly) {
9303 // Another legacy compatibility encoding. Some ObjC qualifier and type
9304 // combinations need to be rearranged.
9305 // Rewrite "in const" from "nr" to "rn"
9306 if (StringRef(S).ends_with("nr"))
9307 S.replace(S.end()-2, S.end(), "rn");
9308 }
9309
9310 if (PointeeTy->isCharType()) {
9311 // char pointer types should be encoded as '*' unless it is a
9312 // type that has been typedef'd to 'BOOL'.
9313 if (!isTypeTypedefedAsBOOL(PointeeTy)) {
9314 S += '*';
9315 return;
9316 }
9317 } else if (const auto *RTy = PointeeTy->getAsCanonical<RecordType>()) {
9318 const IdentifierInfo *II = RTy->getDecl()->getIdentifier();
9319 // GCC binary compat: Need to convert "struct objc_class *" to "#".
9320 if (II == &Idents.get("objc_class")) {
9321 S += '#';
9322 return;
9323 }
9324 // GCC binary compat: Need to convert "struct objc_object *" to "@".
9325 if (II == &Idents.get("objc_object")) {
9326 S += '@';
9327 return;
9328 }
9329 // If the encoded string for the class includes template names, just emit
9330 // "^v" for pointers to the class.
9331 if (getLangOpts().CPlusPlus &&
9332 (!getLangOpts().EncodeCXXClassTemplateSpec &&
9334 RTy, Options.ExpandPointedToStructures()))) {
9335 S += "^v";
9336 return;
9337 }
9338 // fall through...
9339 }
9340 S += '^';
9342
9343 ObjCEncOptions NewOptions;
9344 if (Options.ExpandPointedToStructures())
9345 NewOptions.setExpandStructures();
9346 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions,
9347 /*Field=*/nullptr, NotEncodedT);
9348 return;
9349 }
9350
9351 case Type::ConstantArray:
9352 case Type::IncompleteArray:
9353 case Type::VariableArray: {
9354 const auto *AT = cast<ArrayType>(CT);
9355
9356 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) {
9357 // Incomplete arrays are encoded as a pointer to the array element.
9358 S += '^';
9359
9360 getObjCEncodingForTypeImpl(
9361 AT->getElementType(), S,
9362 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD);
9363 } else {
9364 S += '[';
9365
9366 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
9367 S += llvm::utostr(CAT->getZExtSize());
9368 else {
9369 //Variable length arrays are encoded as a regular array with 0 elements.
9371 "Unknown array type!");
9372 S += '0';
9373 }
9374
9375 getObjCEncodingForTypeImpl(
9376 AT->getElementType(), S,
9377 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD,
9378 NotEncodedT);
9379 S += ']';
9380 }
9381 return;
9382 }
9383
9384 case Type::FunctionNoProto:
9385 case Type::FunctionProto:
9386 S += '?';
9387 return;
9388
9389 case Type::Record: {
9390 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl();
9391 S += RDecl->isUnion() ? '(' : '{';
9392 // Anonymous structures print as '?'
9393 if (const IdentifierInfo *II = RDecl->getIdentifier()) {
9394 S += II->getName();
9395 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) {
9396 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
9397 llvm::raw_string_ostream OS(S);
9398 printTemplateArgumentList(OS, TemplateArgs.asArray(),
9400 }
9401 } else {
9402 S += '?';
9403 }
9404 if (Options.ExpandStructures()) {
9405 S += '=';
9406 if (!RDecl->isUnion()) {
9407 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT);
9408 } else {
9409 for (const auto *Field : RDecl->fields()) {
9410 if (FD) {
9411 S += '"';
9412 S += Field->getNameAsString();
9413 S += '"';
9414 }
9415
9416 // Special case bit-fields.
9417 if (Field->isBitField()) {
9418 getObjCEncodingForTypeImpl(Field->getType(), S,
9419 ObjCEncOptions().setExpandStructures(),
9420 Field);
9421 } else {
9422 QualType qt = Field->getType();
9424 getObjCEncodingForTypeImpl(
9425 qt, S,
9426 ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
9427 NotEncodedT);
9428 }
9429 }
9430 }
9431 }
9432 S += RDecl->isUnion() ? ')' : '}';
9433 return;
9434 }
9435
9436 case Type::BlockPointer: {
9437 const auto *BT = T->castAs<BlockPointerType>();
9438 S += "@?"; // Unlike a pointer-to-function, which is "^?".
9439 if (Options.EncodeBlockParameters()) {
9440 const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
9441
9442 S += '<';
9443 // Block return type
9444 getObjCEncodingForTypeImpl(FT->getReturnType(), S,
9445 Options.forComponentType(), FD, NotEncodedT);
9446 // Block self
9447 S += "@?";
9448 // Block parameters
9449 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) {
9450 for (const auto &I : FPT->param_types())
9451 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD,
9452 NotEncodedT);
9453 }
9454 S += '>';
9455 }
9456 return;
9457 }
9458
9459 case Type::ObjCObject: {
9460 // hack to match legacy encoding of *id and *Class
9461 QualType Ty = getObjCObjectPointerType(CT);
9462 if (Ty->isObjCIdType()) {
9463 S += "{objc_object=}";
9464 return;
9465 }
9466 else if (Ty->isObjCClassType()) {
9467 S += "{objc_class=}";
9468 return;
9469 }
9470 // TODO: Double check to make sure this intentionally falls through.
9471 [[fallthrough]];
9472 }
9473
9474 case Type::ObjCInterface: {
9475 // Ignore protocol qualifiers when mangling at this level.
9476 // @encode(class_name)
9477 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
9478 S += '{';
9479 S += OI->getObjCRuntimeNameAsString();
9480 if (Options.ExpandStructures()) {
9481 S += '=';
9482 SmallVector<const ObjCIvarDecl*, 32> Ivars;
9483 DeepCollectObjCIvars(OI, true, Ivars);
9484 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
9485 const FieldDecl *Field = Ivars[i];
9486 if (Field->isBitField())
9487 getObjCEncodingForTypeImpl(Field->getType(), S,
9488 ObjCEncOptions().setExpandStructures(),
9489 Field);
9490 else
9491 getObjCEncodingForTypeImpl(Field->getType(), S,
9492 ObjCEncOptions().setExpandStructures(), FD,
9493 NotEncodedT);
9494 }
9495 }
9496 S += '}';
9497 return;
9498 }
9499
9500 case Type::ObjCObjectPointer: {
9501 const auto *OPT = T->castAs<ObjCObjectPointerType>();
9502 if (OPT->isObjCIdType()) {
9503 S += '@';
9504 return;
9505 }
9506
9507 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
9508 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
9509 // Since this is a binary compatibility issue, need to consult with
9510 // runtime folks. Fortunately, this is a *very* obscure construct.
9511 S += '#';
9512 return;
9513 }
9514
9515 if (OPT->isObjCQualifiedIdType()) {
9516 getObjCEncodingForTypeImpl(
9517 getObjCIdType(), S,
9518 Options.keepingOnly(ObjCEncOptions()
9519 .setExpandPointedToStructures()
9520 .setExpandStructures()),
9521 FD);
9522 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
9523 // Note that we do extended encoding of protocol qualifier list
9524 // Only when doing ivar or property encoding.
9525 S += '"';
9526 for (const auto *I : OPT->quals()) {
9527 S += '<';
9528 S += I->getObjCRuntimeNameAsString();
9529 S += '>';
9530 }
9531 S += '"';
9532 }
9533 return;
9534 }
9535
9536 S += '@';
9537 if (OPT->getInterfaceDecl() &&
9538 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
9539 S += '"';
9540 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
9541 for (const auto *I : OPT->quals()) {
9542 S += '<';
9543 S += I->getObjCRuntimeNameAsString();
9544 S += '>';
9545 }
9546 S += '"';
9547 }
9548 return;
9549 }
9550
9551 // gcc just blithely ignores member pointers.
9552 // FIXME: we should do better than that. 'M' is available.
9553 case Type::MemberPointer:
9554 // This matches gcc's encoding, even though technically it is insufficient.
9555 //FIXME. We should do a better job than gcc.
9556 case Type::Vector:
9557 case Type::ExtVector:
9558 // Until we have a coherent encoding of these three types, issue warning.
9559 if (NotEncodedT)
9560 *NotEncodedT = T;
9561 return;
9562
9563 case Type::ConstantMatrix:
9564 if (NotEncodedT)
9565 *NotEncodedT = T;
9566 return;
9567
9568 case Type::BitInt:
9569 if (NotEncodedT)
9570 *NotEncodedT = T;
9571 return;
9572
9573 // We could see an undeduced auto type here during error recovery.
9574 // Just ignore it.
9575 case Type::Auto:
9576 case Type::DeducedTemplateSpecialization:
9577 return;
9578
9579 case Type::HLSLAttributedResource:
9580 case Type::HLSLInlineSpirv:
9581 llvm_unreachable("unexpected type");
9582
9583 case Type::ArrayParameter:
9584 case Type::Pipe:
9585#define ABSTRACT_TYPE(KIND, BASE)
9586#define TYPE(KIND, BASE)
9587#define DEPENDENT_TYPE(KIND, BASE) \
9588 case Type::KIND:
9589#define NON_CANONICAL_TYPE(KIND, BASE) \
9590 case Type::KIND:
9591#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
9592 case Type::KIND:
9593#include "clang/AST/TypeNodes.inc"
9594 llvm_unreachable("@encode for dependent type!");
9595 }
9596 llvm_unreachable("bad type kind!");
9597}
9598
9599void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
9600 std::string &S,
9601 const FieldDecl *FD,
9602 bool includeVBases,
9603 QualType *NotEncodedT) const {
9604 assert(RDecl && "Expected non-null RecordDecl");
9605 assert(!RDecl->isUnion() && "Should not be called for unions");
9606 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
9607 return;
9608
9609 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl);
9610 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
9611 const ASTRecordLayout &layout = getASTRecordLayout(RDecl);
9612
9613 if (CXXRec) {
9614 for (const auto &BI : CXXRec->bases()) {
9615 if (!BI.isVirtual()) {
9616 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9617 if (base->isEmpty())
9618 continue;
9619 uint64_t offs = toBits(layout.getBaseClassOffset(base));
9620 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
9621 std::make_pair(offs, base));
9622 }
9623 }
9624 }
9625
9626 for (FieldDecl *Field : RDecl->fields()) {
9627 if (!Field->isZeroLengthBitField() && Field->isZeroSize(*this))
9628 continue;
9629 uint64_t offs = layout.getFieldOffset(Field->getFieldIndex());
9630 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
9631 std::make_pair(offs, Field));
9632 }
9633
9634 if (CXXRec && includeVBases) {
9635 for (const auto &BI : CXXRec->vbases()) {
9636 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
9637 if (base->isEmpty())
9638 continue;
9639 uint64_t offs = toBits(layout.getVBaseClassOffset(base));
9640 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) &&
9641 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end())
9642 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(),
9643 std::make_pair(offs, base));
9644 }
9645 }
9646
9647 CharUnits size;
9648 if (CXXRec) {
9649 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
9650 } else {
9651 size = layout.getSize();
9652 }
9653
9654#ifndef NDEBUG
9655 uint64_t CurOffs = 0;
9656#endif
9657 std::multimap<uint64_t, NamedDecl *>::iterator
9658 CurLayObj = FieldOrBaseOffsets.begin();
9659
9660 if (CXXRec && CXXRec->isDynamicClass() &&
9661 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
9662 if (FD) {
9663 S += "\"_vptr$";
9664 std::string recname = CXXRec->getNameAsString();
9665 if (recname.empty()) recname = "?";
9666 S += recname;
9667 S += '"';
9668 }
9669 S += "^^?";
9670#ifndef NDEBUG
9671 CurOffs += getTypeSize(VoidPtrTy);
9672#endif
9673 }
9674
9675 if (!RDecl->hasFlexibleArrayMember()) {
9676 // Mark the end of the structure.
9677 uint64_t offs = toBits(size);
9678 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
9679 std::make_pair(offs, nullptr));
9680 }
9681
9682 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
9683#ifndef NDEBUG
9684 assert(CurOffs <= CurLayObj->first);
9685 if (CurOffs < CurLayObj->first) {
9686 uint64_t padding = CurLayObj->first - CurOffs;
9687 // FIXME: There doesn't seem to be a way to indicate in the encoding that
9688 // packing/alignment of members is different that normal, in which case
9689 // the encoding will be out-of-sync with the real layout.
9690 // If the runtime switches to just consider the size of types without
9691 // taking into account alignment, we could make padding explicit in the
9692 // encoding (e.g. using arrays of chars). The encoding strings would be
9693 // longer then though.
9694 CurOffs += padding;
9695 }
9696#endif
9697
9698 NamedDecl *dcl = CurLayObj->second;
9699 if (!dcl)
9700 break; // reached end of structure.
9701
9702 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) {
9703 // We expand the bases without their virtual bases since those are going
9704 // in the initial structure. Note that this differs from gcc which
9705 // expands virtual bases each time one is encountered in the hierarchy,
9706 // making the encoding type bigger than it really is.
9707 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false,
9708 NotEncodedT);
9709 assert(!base->isEmpty());
9710#ifndef NDEBUG
9711 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
9712#endif
9713 } else {
9714 const auto *field = cast<FieldDecl>(dcl);
9715 if (FD) {
9716 S += '"';
9717 S += field->getNameAsString();
9718 S += '"';
9719 }
9720
9721 if (field->isBitField()) {
9722 EncodeBitField(this, S, field->getType(), field);
9723#ifndef NDEBUG
9724 CurOffs += field->getBitWidthValue();
9725#endif
9726 } else {
9727 QualType qt = field->getType();
9729 getObjCEncodingForTypeImpl(
9730 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(),
9731 FD, NotEncodedT);
9732#ifndef NDEBUG
9733 CurOffs += getTypeSize(field->getType());
9734#endif
9735 }
9736 }
9737 }
9738}
9739
9741 std::string& S) const {
9742 if (QT & Decl::OBJC_TQ_In)
9743 S += 'n';
9744 if (QT & Decl::OBJC_TQ_Inout)
9745 S += 'N';
9746 if (QT & Decl::OBJC_TQ_Out)
9747 S += 'o';
9748 if (QT & Decl::OBJC_TQ_Bycopy)
9749 S += 'O';
9750 if (QT & Decl::OBJC_TQ_Byref)
9751 S += 'R';
9752 if (QT & Decl::OBJC_TQ_Oneway)
9753 S += 'V';
9754}
9755
9757 if (!ObjCIdDecl) {
9760 ObjCIdDecl = buildImplicitTypedef(T, "id");
9761 }
9762 return ObjCIdDecl;
9763}
9764
9766 if (!ObjCSelDecl) {
9768 ObjCSelDecl = buildImplicitTypedef(T, "SEL");
9769 }
9770 return ObjCSelDecl;
9771}
9772
9774 if (!ObjCClassDecl) {
9777 ObjCClassDecl = buildImplicitTypedef(T, "Class");
9778 }
9779 return ObjCClassDecl;
9780}
9781
9783 if (!ObjCProtocolClassDecl) {
9784 ObjCProtocolClassDecl
9787 &Idents.get("Protocol"),
9788 /*typeParamList=*/nullptr,
9789 /*PrevDecl=*/nullptr,
9790 SourceLocation(), true);
9791 }
9792
9793 return ObjCProtocolClassDecl;
9794}
9795
9797 if (!getLangOpts().PointerAuthObjcInterfaceSel)
9798 return PointerAuthQualifier();
9800 getLangOpts().PointerAuthObjcInterfaceSelKey,
9801 /*isAddressDiscriminated=*/true, SelPointerConstantDiscriminator,
9803 /*isIsaPointer=*/false,
9804 /*authenticatesNullValues=*/false);
9805}
9806
9807//===----------------------------------------------------------------------===//
9808// __builtin_va_list Construction Functions
9809//===----------------------------------------------------------------------===//
9810
9812 StringRef Name) {
9813 // typedef char* __builtin[_ms]_va_list;
9814 QualType T = Context->getPointerType(Context->CharTy);
9815 return Context->buildImplicitTypedef(T, Name);
9816}
9817
9819 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list");
9820}
9821
9823 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list");
9824}
9825
9827 // typedef void* __builtin_va_list;
9828 QualType T = Context->getPointerType(Context->VoidTy);
9829 return Context->buildImplicitTypedef(T, "__builtin_va_list");
9830}
9831
9832static TypedefDecl *
9834 // struct __va_list
9835 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list");
9836 if (Context->getLangOpts().CPlusPlus) {
9837 // namespace std { struct __va_list {
9838 auto *NS = NamespaceDecl::Create(
9839 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(),
9840 /*Inline=*/false, SourceLocation(), SourceLocation(),
9841 &Context->Idents.get("std"),
9842 /*PrevDecl=*/nullptr, /*Nested=*/false);
9843 NS->setImplicit();
9845 }
9846
9847 VaListTagDecl->startDefinition();
9848
9849 const size_t NumFields = 5;
9850 QualType FieldTypes[NumFields];
9851 const char *FieldNames[NumFields];
9852
9853 // void *__stack;
9854 FieldTypes[0] = Context->getPointerType(Context->VoidTy);
9855 FieldNames[0] = "__stack";
9856
9857 // void *__gr_top;
9858 FieldTypes[1] = Context->getPointerType(Context->VoidTy);
9859 FieldNames[1] = "__gr_top";
9860
9861 // void *__vr_top;
9862 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
9863 FieldNames[2] = "__vr_top";
9864
9865 // int __gr_offs;
9866 FieldTypes[3] = Context->IntTy;
9867 FieldNames[3] = "__gr_offs";
9868
9869 // int __vr_offs;
9870 FieldTypes[4] = Context->IntTy;
9871 FieldNames[4] = "__vr_offs";
9872
9873 // Create fields
9874 for (unsigned i = 0; i < NumFields; ++i) {
9875 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
9879 &Context->Idents.get(FieldNames[i]),
9880 FieldTypes[i], /*TInfo=*/nullptr,
9881 /*BitWidth=*/nullptr,
9882 /*Mutable=*/false,
9883 ICIS_NoInit);
9884 Field->setAccess(AS_public);
9885 VaListTagDecl->addDecl(Field);
9886 }
9887 VaListTagDecl->completeDefinition();
9888 Context->VaListTagDecl = VaListTagDecl;
9889 CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
9890
9891 // } __builtin_va_list;
9892 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list");
9893}
9894
9896 // typedef struct __va_list_tag {
9898
9899 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
9900 VaListTagDecl->startDefinition();
9901
9902 const size_t NumFields = 5;
9903 QualType FieldTypes[NumFields];
9904 const char *FieldNames[NumFields];
9905
9906 // unsigned char gpr;
9907 FieldTypes[0] = Context->UnsignedCharTy;
9908 FieldNames[0] = "gpr";
9909
9910 // unsigned char fpr;
9911 FieldTypes[1] = Context->UnsignedCharTy;
9912 FieldNames[1] = "fpr";
9913
9914 // unsigned short reserved;
9915 FieldTypes[2] = Context->UnsignedShortTy;
9916 FieldNames[2] = "reserved";
9917
9918 // void* overflow_arg_area;
9919 FieldTypes[3] = Context->getPointerType(Context->VoidTy);
9920 FieldNames[3] = "overflow_arg_area";
9921
9922 // void* reg_save_area;
9923 FieldTypes[4] = Context->getPointerType(Context->VoidTy);
9924 FieldNames[4] = "reg_save_area";
9925
9926 // Create fields
9927 for (unsigned i = 0; i < NumFields; ++i) {
9928 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl,
9931 &Context->Idents.get(FieldNames[i]),
9932 FieldTypes[i], /*TInfo=*/nullptr,
9933 /*BitWidth=*/nullptr,
9934 /*Mutable=*/false,
9935 ICIS_NoInit);
9936 Field->setAccess(AS_public);
9937 VaListTagDecl->addDecl(Field);
9938 }
9939 VaListTagDecl->completeDefinition();
9940 Context->VaListTagDecl = VaListTagDecl;
9941 CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
9942
9943 // } __va_list_tag;
9944 TypedefDecl *VaListTagTypedefDecl =
9945 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
9946
9947 QualType VaListTagTypedefType =
9948 Context->getTypedefType(ElaboratedTypeKeyword::None,
9949 /*Qualifier=*/std::nullopt, VaListTagTypedefDecl);
9950
9951 // typedef __va_list_tag __builtin_va_list[1];
9952 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
9953 QualType VaListTagArrayType = Context->getConstantArrayType(
9954 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0);
9955 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
9956}
9957
9958static TypedefDecl *
9960 // struct __va_list_tag {
9962 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
9963 VaListTagDecl->startDefinition();
9964
9965 const size_t NumFields = 4;
9966 QualType FieldTypes[NumFields];
9967 const char *FieldNames[NumFields];
9968
9969 // unsigned gp_offset;
9970 FieldTypes[0] = Context->UnsignedIntTy;
9971 FieldNames[0] = "gp_offset";
9972
9973 // unsigned fp_offset;
9974 FieldTypes[1] = Context->UnsignedIntTy;
9975 FieldNames[1] = "fp_offset";
9976
9977 // void* overflow_arg_area;
9978 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
9979 FieldNames[2] = "overflow_arg_area";
9980
9981 // void* reg_save_area;
9982 FieldTypes[3] = Context->getPointerType(Context->VoidTy);
9983 FieldNames[3] = "reg_save_area";
9984
9985 // Create fields
9986 for (unsigned i = 0; i < NumFields; ++i) {
9987 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
9991 &Context->Idents.get(FieldNames[i]),
9992 FieldTypes[i], /*TInfo=*/nullptr,
9993 /*BitWidth=*/nullptr,
9994 /*Mutable=*/false,
9995 ICIS_NoInit);
9996 Field->setAccess(AS_public);
9997 VaListTagDecl->addDecl(Field);
9998 }
9999 VaListTagDecl->completeDefinition();
10000 Context->VaListTagDecl = VaListTagDecl;
10001 CanQualType VaListTagType = Context->getCanonicalTagType(VaListTagDecl);
10002
10003 // };
10004
10005 // typedef struct __va_list_tag __builtin_va_list[1];
10006 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
10007 QualType VaListTagArrayType = Context->getConstantArrayType(
10008 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0);
10009 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
10010}
10011
10012static TypedefDecl *