1//===- ABIInfo.cpp --------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfo.h"
10#include "ABIInfoImpl.h"
11
12using namespace clang;
13using namespace clang::CodeGen;
14
15// Pin the vtable to this file.
16ABIInfo::~ABIInfo() = default;
17
18CGCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); }
19
20ASTContext &ABIInfo::getContext() const { return CGT.getContext(); }
21
22llvm::LLVMContext &ABIInfo::getVMContext() const {
23 return CGT.getLLVMContext();
24}
25
26const llvm::DataLayout &ABIInfo::getDataLayout() const {
27 return CGT.getDataLayout();
28}
29
30const TargetInfo &ABIInfo::getTarget() const { return CGT.getTarget(); }
31
32const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
33 return CGT.getCodeGenOpts();
34}
35
36bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
37
38bool ABIInfo::isOHOSFamily() const {
39 return getTarget().getTriple().isOHOSFamily();
40}
41
42Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
43 QualType Ty) const {
44 return Address::invalid();
45}
46
47bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
48 return false;
49}
50
51bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
52 uint64_t Members) const {
53 return false;
54}
55
56bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
57 // For compatibility with GCC, ignore empty bitfields in C++ mode.
58 return getContext().getLangOpts().CPlusPlus;
59}
60
61bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
62 uint64_t &Members) const {
63 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
64 uint64_t NElements = AT->getSize().getZExtValue();
65 if (NElements == 0)
66 return false;
67 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
68 return false;
69 Members *= NElements;
70 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
71 const RecordDecl *RD = RT->getDecl();
72 if (RD->hasFlexibleArrayMember())
73 return false;
74
75 Members = 0;
76
77 // If this is a C++ record, check the properties of the record such as
78 // bases and ABI specific restrictions
79 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
80 if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
81 return false;
82
83 for (const auto &I : CXXRD->bases()) {
84 // Ignore empty records.
85 if (isEmptyRecord(getContext(), I.getType(), true))
86 continue;
87
88 uint64_t FldMembers;
89 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
90 return false;
91
92 Members += FldMembers;
93 }
94 }
95
96 for (const auto *FD : RD->fields()) {
97 // Ignore (non-zero arrays of) empty records.
98 QualType FT = FD->getType();
99 while (const ConstantArrayType *AT =
100 getContext().getAsConstantArrayType(FT)) {
101 if (AT->getSize().getZExtValue() == 0)
102 return false;
103 FT = AT->getElementType();
104 }
105 if (isEmptyRecord(getContext(), FT, true))
106 continue;
107
108 if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() &&
109 FD->isZeroLengthBitField(getContext()))
110 continue;
111
112 uint64_t FldMembers;
113 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
114 return false;
115
116 Members = (RD->isUnion() ?
117 std::max(Members, FldMembers) : Members + FldMembers);
118 }
119
120 if (!Base)
121 return false;
122
123 // Ensure there is no padding.
124 if (getContext().getTypeSize(Base) * Members !=
125 getContext().getTypeSize(Ty))
126 return false;
127 } else {
128 Members = 1;
129 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
130 Members = 2;
131 Ty = CT->getElementType();
132 }
133
134 // Most ABIs only support float, double, and some vector type widths.
135 if (!isHomogeneousAggregateBaseType(Ty))
136 return false;
137
138 // The base type must be the same for all members. Types that
139 // agree in both total size and mode (float vs. vector) are
140 // treated as being equivalent here.
141 const Type *TyPtr = Ty.getTypePtr();
142 if (!Base) {
143 Base = TyPtr;
144 // If it's a non-power-of-2 vector, its size is already a power-of-2,
145 // so make sure to widen it explicitly.
146 if (const VectorType *VT = Base->getAs<VectorType>()) {
147 QualType EltTy = VT->getElementType();
148 unsigned NumElements =
149 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
150 Base = getContext()
151 .getVectorType(EltTy, NumElements, VT->getVectorKind())
152 .getTypePtr();
153 }
154 }
155
156 if (Base->isVectorType() != TyPtr->isVectorType() ||
157 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
158 return false;
159 }
160 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
161}
162
163bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
164 if (getContext().isPromotableIntegerType(Ty))
165 return true;
166
167 if (const auto *EIT = Ty->getAs<BitIntType>())
168 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
169 return true;
170
171 return false;
172}
173
174ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
175 bool Realign,
176 llvm::Type *Padding) const {
177 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
178 Realign, Padding);
179}
180
181ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty,
182 bool Realign) const {
183 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
184 /*ByVal*/ false, Realign);
185}
186
187// Pin the vtable to this file.
188SwiftABIInfo::~SwiftABIInfo() = default;
189
190/// Does the given lowering require more than the given number of
191/// registers when expanded?
192///
193/// This is intended to be the basis of a reasonable basic implementation
194/// of should{Pass,Return}Indirectly.
195///
196/// For most targets, a limit of four total registers is reasonable; this
197/// limits the amount of code required in order to move around the value
198/// in case it wasn't produced immediately prior to the call by the caller
199/// (or wasn't produced in exactly the right registers) or isn't used
200/// immediately within the callee. But some targets may need to further
201/// limit the register count due to an inability to support that many
202/// return registers.
203bool SwiftABIInfo::occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes,
204 unsigned maxAllRegisters) const {
205 unsigned intCount = 0, fpCount = 0;
206 for (llvm::Type *type : scalarTypes) {
207 if (type->isPointerTy()) {
208 intCount++;
209 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
210 auto ptrWidth = CGT.getTarget().getPointerWidth(LangAS::Default);
211 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
212 } else {
213 assert(type->isVectorTy() || type->isFloatingPointTy());
214 fpCount++;
215 }
216 }
217
218 return (intCount + fpCount > maxAllRegisters);
219}
220
221bool SwiftABIInfo::shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
222 bool AsReturnValue) const {
223 return occupiesMoreThan(ComponentTys, /*total=*/4);
224}
225
226bool SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
227 unsigned NumElts) const {
228 // The default implementation of this assumes that the target guarantees
229 // 128-bit SIMD support but nothing more.
230 return (VectorSize.getQuantity() > 8 && VectorSize.getQuantity() <= 16);
231}
232