1//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the code that handles AST -> LLVM type lowering.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenTypes.h"
14#include "CGCXXABI.h"
15#include "CGCall.h"
16#include "CGOpenCLRuntime.h"
17#include "CGRecordLayout.h"
18#include "TargetInfo.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/RecordLayout.h"
24#include "clang/CodeGen/CGFunctionInfo.h"
25#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Module.h"
28
29using namespace clang;
30using namespace CodeGen;
31
32CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
33 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
34 Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
35 TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) {
36 SkippedLayout = false;
37}
38
39CodeGenTypes::~CodeGenTypes() {
40 for (llvm::FoldingSet<CGFunctionInfo>::iterator
41 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
42 delete &*I++;
43}
44
45const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
46 return CGM.getCodeGenOpts();
47}
48
49void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
50 llvm::StructType *Ty,
51 StringRef suffix) {
52 SmallString<256> TypeName;
53 llvm::raw_svector_ostream OS(TypeName);
54 OS << RD->getKindName() << '.';
55
56 // FIXME: We probably want to make more tweaks to the printing policy. For
57 // example, we should probably enable PrintCanonicalTypes and
58 // FullyQualifiedNames.
59 PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy();
60 Policy.SuppressInlineNamespace = false;
61
62 // Name the codegen type after the typedef name
63 // if there is no tag type name available
64 if (RD->getIdentifier()) {
65 // FIXME: We should not have to check for a null decl context here.
66 // Right now we do it because the implicit Obj-C decls don't have one.
67 if (RD->getDeclContext())
68 RD->printQualifiedName(OS, Policy);
69 else
70 RD->printName(OS, Policy);
71 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
72 // FIXME: We should not have to check for a null decl context here.
73 // Right now we do it because the implicit Obj-C decls don't have one.
74 if (TDD->getDeclContext())
75 TDD->printQualifiedName(OS, Policy);
76 else
77 TDD->printName(OS);
78 } else
79 OS << "anon";
80
81 if (!suffix.empty())
82 OS << suffix;
83
84 Ty->setName(OS.str());
85}
86
87/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
88/// ConvertType in that it is used to convert to the memory representation for
89/// a type. For example, the scalar representation for _Bool is i1, but the
90/// memory representation is usually i8 or i32, depending on the target.
91llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
92 if (T->isConstantMatrixType()) {
93 const Type *Ty = Context.getCanonicalType(T).getTypePtr();
94 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
95 return llvm::ArrayType::get(ConvertType(MT->getElementType()),
96 MT->getNumRows() * MT->getNumColumns());
97 }
98
99 llvm::Type *R = ConvertType(T);
100
101 // Check for the boolean vector case.
102 if (T->isExtVectorBoolType()) {
103 auto *FixedVT = cast<llvm::FixedVectorType>(R);
104 // Pad to at least one byte.
105 uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8);
106 return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
107 }
108
109 // If this is a bool type, or a bit-precise integer type in a bitfield
110 // representation, map this integer to the target-specified size.
111 if ((ForBitField && T->isBitIntType()) ||
112 (!T->isBitIntType() && R->isIntegerTy(1)))
113 return llvm::IntegerType::get(getLLVMContext(),
114 (unsigned)Context.getTypeSize(T));
115
116 // Else, don't map it.
117 return R;
118}
119
120/// isRecordLayoutComplete - Return true if the specified type is already
121/// completely laid out.
122bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
123 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
124 RecordDeclTypes.find(Ty);
125 return I != RecordDeclTypes.end() && !I->second->isOpaque();
126}
127
128/// isFuncParamTypeConvertible - Return true if the specified type in a
129/// function parameter or result position can be converted to an IR type at this
130/// point. This boils down to being whether it is complete.
131bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
132 // Some ABIs cannot have their member pointers represented in IR unless
133 // certain circumstances have been reached.
134 if (const auto *MPT = Ty->getAs<MemberPointerType>())
135 return getCXXABI().isMemberPointerConvertible(MPT);
136
137 // If this isn't a tagged type, we can convert it!
138 const TagType *TT = Ty->getAs<TagType>();
139 if (!TT) return true;
140
141 // Incomplete types cannot be converted.
142 return !TT->isIncompleteType();
143}
144
145
146/// Code to verify a given function type is complete, i.e. the return type
147/// and all of the parameter types are complete. Also check to see if we are in
148/// a RS_StructPointer context, and if so whether any struct types have been
149/// pended. If so, we don't want to ask the ABI lowering code to handle a type
150/// that cannot be converted to an IR type.
151bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
152 if (!isFuncParamTypeConvertible(FT->getReturnType()))
153 return false;
154
155 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
156 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
157 if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
158 return false;
159
160 return true;
161}
162
163/// UpdateCompletedType - When we find the full definition for a TagDecl,
164/// replace the 'opaque' type we previously made for it if applicable.
165void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
166 // If this is an enum being completed, then we flush all non-struct types from
167 // the cache. This allows function types and other things that may be derived
168 // from the enum to be recomputed.
169 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
170 // Only flush the cache if we've actually already converted this type.
171 if (TypeCache.count(ED->getTypeForDecl())) {
172 // Okay, we formed some types based on this. We speculated that the enum
173 // would be lowered to i32, so we only need to flush the cache if this
174 // didn't happen.
175 if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
176 TypeCache.clear();
177 }
178 // If necessary, provide the full definition of a type only used with a
179 // declaration so far.
180 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
181 DI->completeType(ED);
182 return;
183 }
184
185 // If we completed a RecordDecl that we previously used and converted to an
186 // anonymous type, then go ahead and complete it now.
187 const RecordDecl *RD = cast<RecordDecl>(TD);
188 if (RD->isDependentType()) return;
189
190 // Only complete it if we converted it already. If we haven't converted it
191 // yet, we'll just do it lazily.
192 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
193 ConvertRecordDeclType(RD);
194
195 // If necessary, provide the full definition of a type only used with a
196 // declaration so far.
197 if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
198 DI->completeType(RD);
199}
200
201void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
202 QualType T = Context.getRecordType(RD);
203 T = Context.getCanonicalType(T);
204
205 const Type *Ty = T.getTypePtr();
206 if (RecordsWithOpaqueMemberPointers.count(Ty)) {
207 TypeCache.clear();
208 RecordsWithOpaqueMemberPointers.clear();
209 }
210}
211
212static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
213 const llvm::fltSemantics &format,
214 bool UseNativeHalf = false) {
215 if (&format == &llvm::APFloat::IEEEhalf()) {
216 if (UseNativeHalf)
217 return llvm::Type::getHalfTy(VMContext);
218 else
219 return llvm::Type::getInt16Ty(VMContext);
220 }
221 if (&format == &llvm::APFloat::BFloat())
222 return llvm::Type::getBFloatTy(VMContext);
223 if (&format == &llvm::APFloat::IEEEsingle())
224 return llvm::Type::getFloatTy(VMContext);
225 if (&format == &llvm::APFloat::IEEEdouble())
226 return llvm::Type::getDoubleTy(VMContext);
227 if (&format == &llvm::APFloat::IEEEquad())
228 return llvm::Type::getFP128Ty(VMContext);
229 if (&format == &llvm::APFloat::PPCDoubleDouble())
230 return llvm::Type::getPPC_FP128Ty(VMContext);
231 if (&format == &llvm::APFloat::x87DoubleExtended())
232 return llvm::Type::getX86_FP80Ty(VMContext);
233 llvm_unreachable("Unknown float format!");
234}
235
236llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
237 assert(QFT.isCanonical());
238 const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
239 // First, check whether we can build the full function type. If the
240 // function type depends on an incomplete type (e.g. a struct or enum), we
241 // cannot lower the function type.
242 if (!isFuncTypeConvertible(FT)) {
243 // This function's type depends on an incomplete tag type.
244
245 // Force conversion of all the relevant record types, to make sure
246 // we re-convert the FunctionType when appropriate.
247 if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
248 ConvertRecordDeclType(RT->getDecl());
249 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
250 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
251 if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
252 ConvertRecordDeclType(RT->getDecl());
253
254 SkippedLayout = true;
255
256 // Return a placeholder type.
257 return llvm::StructType::get(getLLVMContext());
258 }
259
260 // The function type can be built; call the appropriate routines to
261 // build it.
262 const CGFunctionInfo *FI;
263 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
264 FI = &arrangeFreeFunctionType(
265 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
266 } else {
267 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
268 FI = &arrangeFreeFunctionType(
269 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
270 }
271
272 llvm::Type *ResultType = nullptr;
273 // If there is something higher level prodding our CGFunctionInfo, then
274 // don't recurse into it again.
275 if (FunctionsBeingProcessed.count(FI)) {
276
277 ResultType = llvm::StructType::get(getLLVMContext());
278 SkippedLayout = true;
279 } else {
280
281 // Otherwise, we're good to go, go ahead and convert it.
282 ResultType = GetFunctionType(*FI);
283 }
284
285 return ResultType;
286}
287
288/// ConvertType - Convert the specified type to its LLVM form.
289llvm::Type *CodeGenTypes::ConvertType(QualType T) {
290 T = Context.getCanonicalType(T);
291
292 const Type *Ty = T.getTypePtr();
293
294 // For the device-side compilation, CUDA device builtin surface/texture types
295 // may be represented in different types.
296 if (Context.getLangOpts().CUDAIsDevice) {
297 if (T->isCUDADeviceBuiltinSurfaceType()) {
298 if (auto *Ty = CGM.getTargetCodeGenInfo()
299 .getCUDADeviceBuiltinSurfaceDeviceType())
300 return Ty;
301 } else if (T->isCUDADeviceBuiltinTextureType()) {
302 if (auto *Ty = CGM.getTargetCodeGenInfo()
303 .getCUDADeviceBuiltinTextureDeviceType())
304 return Ty;
305 }
306 }
307
308 // RecordTypes are cached and processed specially.
309 if (const RecordType *RT = dyn_cast<RecordType>(Ty))
310 return ConvertRecordDeclType(RT->getDecl());
311
312 llvm::Type *CachedType = nullptr;
313 auto TCI = TypeCache.find(Ty);
314 if (TCI != TypeCache.end())
315 CachedType = TCI->second;
316 // With expensive checks, check that the type we compute matches the
317 // cached type.
318#ifndef EXPENSIVE_CHECKS
319 if (CachedType)
320 return CachedType;
321#endif
322
323 // If we don't have it in the cache, convert it now.
324 llvm::Type *ResultType = nullptr;
325 switch (Ty->getTypeClass()) {
326 case Type::Record: // Handled above.
327#define TYPE(Class, Base)
328#define ABSTRACT_TYPE(Class, Base)
329#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
330#define DEPENDENT_TYPE(Class, Base) case Type::Class:
331#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
332#include "clang/AST/TypeNodes.inc"
333 llvm_unreachable("Non-canonical or dependent types aren't possible.");
334
335 case Type::Builtin: {
336 switch (cast<BuiltinType>(Ty)->getKind()) {
337 case BuiltinType::Void:
338 case BuiltinType::ObjCId:
339 case BuiltinType::ObjCClass:
340 case BuiltinType::ObjCSel:
341 // LLVM void type can only be used as the result of a function call. Just
342 // map to the same as char.
343 ResultType = llvm::Type::getInt8Ty(getLLVMContext());
344 break;
345
346 case BuiltinType::Bool:
347 // Note that we always return bool as i1 for use as a scalar type.
348 ResultType = llvm::Type::getInt1Ty(getLLVMContext());
349 break;
350
351 case BuiltinType::Char_S:
352 case BuiltinType::Char_U:
353 case BuiltinType::SChar:
354 case BuiltinType::UChar:
355 case BuiltinType::Short:
356 case BuiltinType::UShort:
357 case BuiltinType::Int:
358 case BuiltinType::UInt:
359 case BuiltinType::Long:
360 case BuiltinType::ULong:
361 case BuiltinType::LongLong:
362 case BuiltinType::ULongLong:
363 case BuiltinType::WChar_S:
364 case BuiltinType::WChar_U:
365 case BuiltinType::Char8:
366 case BuiltinType::Char16:
367 case BuiltinType::Char32:
368 case BuiltinType::ShortAccum:
369 case BuiltinType::Accum:
370 case BuiltinType::LongAccum:
371 case BuiltinType::UShortAccum:
372 case BuiltinType::UAccum:
373 case BuiltinType::ULongAccum:
374 case BuiltinType::ShortFract:
375 case BuiltinType::Fract:
376 case BuiltinType::LongFract:
377 case BuiltinType::UShortFract:
378 case BuiltinType::UFract:
379 case BuiltinType::ULongFract:
380 case BuiltinType::SatShortAccum:
381 case BuiltinType::SatAccum:
382 case BuiltinType::SatLongAccum:
383 case BuiltinType::SatUShortAccum:
384 case BuiltinType::SatUAccum:
385 case BuiltinType::SatULongAccum:
386 case BuiltinType::SatShortFract:
387 case BuiltinType::SatFract:
388 case BuiltinType::SatLongFract:
389 case BuiltinType::SatUShortFract:
390 case BuiltinType::SatUFract:
391 case BuiltinType::SatULongFract:
392 ResultType = llvm::IntegerType::get(getLLVMContext(),
393 static_cast<unsigned>(Context.getTypeSize(T)));
394 break;
395
396 case BuiltinType::Float16:
397 ResultType =
398 getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
399 /* UseNativeHalf = */ true);
400 break;
401
402 case BuiltinType::Half:
403 // Half FP can either be storage-only (lowered to i16) or native.
404 ResultType = getTypeForFormat(
405 getLLVMContext(), Context.getFloatTypeSemantics(T),
406 Context.getLangOpts().NativeHalfType ||
407 !Context.getTargetInfo().useFP16ConversionIntrinsics());
408 break;
409 case BuiltinType::BFloat16:
410 case BuiltinType::Float:
411 case BuiltinType::Double:
412 case BuiltinType::LongDouble:
413 case BuiltinType::Float128:
414 case BuiltinType::Ibm128:
415 ResultType = getTypeForFormat(getLLVMContext(),
416 Context.getFloatTypeSemantics(T),
417 /* UseNativeHalf = */ false);
418 break;
419
420 case BuiltinType::NullPtr:
421 // Model std::nullptr_t as i8*
422 ResultType = llvm::Type::getInt8PtrTy(getLLVMContext());
423 break;
424
425 case BuiltinType::UInt128:
426 case BuiltinType::Int128:
427 ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
428 break;
429
430#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
431 case BuiltinType::Id:
432#include "clang/Basic/OpenCLImageTypes.def"
433#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
434 case BuiltinType::Id:
435#include "clang/Basic/OpenCLExtensionTypes.def"
436 case BuiltinType::OCLSampler:
437 case BuiltinType::OCLEvent:
438 case BuiltinType::OCLClkEvent:
439 case BuiltinType::OCLQueue:
440 case BuiltinType::OCLReserveID:
441 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
442 break;
443 case BuiltinType::SveInt8:
444 case BuiltinType::SveUint8:
445 case BuiltinType::SveInt8x2:
446 case BuiltinType::SveUint8x2:
447 case BuiltinType::SveInt8x3:
448 case BuiltinType::SveUint8x3:
449 case BuiltinType::SveInt8x4:
450 case BuiltinType::SveUint8x4:
451 case BuiltinType::SveInt16:
452 case BuiltinType::SveUint16:
453 case BuiltinType::SveInt16x2:
454 case BuiltinType::SveUint16x2:
455 case BuiltinType::SveInt16x3:
456 case BuiltinType::SveUint16x3:
457 case BuiltinType::SveInt16x4:
458 case BuiltinType::SveUint16x4:
459 case BuiltinType::SveInt32:
460 case BuiltinType::SveUint32:
461 case BuiltinType::SveInt32x2:
462 case BuiltinType::SveUint32x2:
463 case BuiltinType::SveInt32x3:
464 case BuiltinType::SveUint32x3:
465 case BuiltinType::SveInt32x4:
466 case BuiltinType::SveUint32x4:
467 case BuiltinType::SveInt64:
468 case BuiltinType::SveUint64:
469 case BuiltinType::SveInt64x2:
470 case BuiltinType::SveUint64x2:
471 case BuiltinType::SveInt64x3:
472 case BuiltinType::SveUint64x3:
473 case BuiltinType::SveInt64x4:
474 case BuiltinType::SveUint64x4:
475 case BuiltinType::SveBool:
476 case BuiltinType::SveBoolx2:
477 case BuiltinType::SveBoolx4:
478 case BuiltinType::SveFloat16:
479 case BuiltinType::SveFloat16x2:
480 case BuiltinType::SveFloat16x3:
481 case BuiltinType::SveFloat16x4:
482 case BuiltinType::SveFloat32:
483 case BuiltinType::SveFloat32x2:
484 case BuiltinType::SveFloat32x3:
485 case BuiltinType::SveFloat32x4:
486 case BuiltinType::SveFloat64:
487 case BuiltinType::SveFloat64x2:
488 case BuiltinType::SveFloat64x3:
489 case BuiltinType::SveFloat64x4:
490 case BuiltinType::SveBFloat16:
491 case BuiltinType::SveBFloat16x2:
492 case BuiltinType::SveBFloat16x3:
493 case BuiltinType::SveBFloat16x4: {
494 ASTContext::BuiltinVectorTypeInfo Info =
495 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
496 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
497 Info.EC.getKnownMinValue() *
498 Info.NumVectors);
499 }
500 case BuiltinType::SveCount:
501 return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
502#define PPC_VECTOR_TYPE(Name, Id, Size) \
503 case BuiltinType::Id: \
504 ResultType = \
505 llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
506 break;
507#include "clang/Basic/PPCTypes.def"
508#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
509#include "clang/Basic/RISCVVTypes.def"
510 {
511 ASTContext::BuiltinVectorTypeInfo Info =
512 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
513 // Tuple types are expressed as aggregregate types of the same scalable
514 // vector type (e.g. vint32m1x2_t is two vint32m1_t, which is {<vscale x
515 // 2 x i32>, <vscale x 2 x i32>}).
516 if (Info.NumVectors != 1) {
517 llvm::Type *EltTy = llvm::ScalableVectorType::get(
518 ConvertType(Info.ElementType), Info.EC.getKnownMinValue());
519 llvm::SmallVector<llvm::Type *, 4> EltTys(Info.NumVectors, EltTy);
520 return llvm::StructType::get(getLLVMContext(), EltTys);
521 }
522 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
523 Info.EC.getKnownMinValue() *
524 Info.NumVectors);
525 }
526#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
527 case BuiltinType::Id: { \
528 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
529 ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
530 else \
531 llvm_unreachable("Unexpected wasm reference builtin type!"); \
532 } break;
533#include "clang/Basic/WebAssemblyReferenceTypes.def"
534 case BuiltinType::Dependent:
535#define BUILTIN_TYPE(Id, SingletonId)
536#define PLACEHOLDER_TYPE(Id, SingletonId) \
537 case BuiltinType::Id:
538#include "clang/AST/BuiltinTypes.def"
539 llvm_unreachable("Unexpected placeholder builtin type!");
540 }
541 break;
542 }
543 case Type::Auto:
544 case Type::DeducedTemplateSpecialization:
545 llvm_unreachable("Unexpected undeduced type!");
546 case Type::Complex: {
547 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
548 ResultType = llvm::StructType::get(EltTy, EltTy);
549 break;
550 }
551 case Type::LValueReference:
552 case Type::RValueReference: {
553 const ReferenceType *RTy = cast<ReferenceType>(Ty);
554 QualType ETy = RTy->getPointeeType();
555 unsigned AS = getTargetAddressSpace(ETy);
556 ResultType = llvm::PointerType::get(getLLVMContext(), AS);
557 break;
558 }
559 case Type::Pointer: {
560 const PointerType *PTy = cast<PointerType>(Ty);
561 QualType ETy = PTy->getPointeeType();
562 unsigned AS = getTargetAddressSpace(ETy);
563 ResultType = llvm::PointerType::get(getLLVMContext(), AS);
564 break;
565 }
566
567 case Type::VariableArray: {
568 const VariableArrayType *A = cast<VariableArrayType>(Ty);
569 assert(A->getIndexTypeCVRQualifiers() == 0 &&
570 "FIXME: We only handle trivial array types so far!");
571 // VLAs resolve to the innermost element type; this matches
572 // the return of alloca, and there isn't any obviously better choice.
573 ResultType = ConvertTypeForMem(A->getElementType());
574 break;
575 }
576 case Type::IncompleteArray: {
577 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
578 assert(A->getIndexTypeCVRQualifiers() == 0 &&
579 "FIXME: We only handle trivial array types so far!");
580 // int X[] -> [0 x int], unless the element type is not sized. If it is
581 // unsized (e.g. an incomplete struct) just use [0 x i8].
582 ResultType = ConvertTypeForMem(A->getElementType());
583 if (!ResultType->isSized()) {
584 SkippedLayout = true;
585 ResultType = llvm::Type::getInt8Ty(getLLVMContext());
586 }
587 ResultType = llvm::ArrayType::get(ResultType, 0);
588 break;
589 }
590 case Type::ConstantArray: {
591 const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
592 llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
593
594 // Lower arrays of undefined struct type to arrays of i8 just to have a
595 // concrete type.
596 if (!EltTy->isSized()) {
597 SkippedLayout = true;
598 EltTy = llvm::Type::getInt8Ty(getLLVMContext());
599 }
600
601 ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
602 break;
603 }
604 case Type::ExtVector:
605 case Type::Vector: {
606 const auto *VT = cast<VectorType>(Ty);
607 // An ext_vector_type of Bool is really a vector of bits.
608 llvm::Type *IRElemTy = VT->isExtVectorBoolType()
609 ? llvm::Type::getInt1Ty(getLLVMContext())
610 : ConvertType(VT->getElementType());
611 ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements());
612 break;
613 }
614 case Type::ConstantMatrix: {
615 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
616 ResultType =
617 llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
618 MT->getNumRows() * MT->getNumColumns());
619 break;
620 }
621 case Type::FunctionNoProto:
622 case Type::FunctionProto:
623 ResultType = ConvertFunctionTypeInternal(T);
624 break;
625 case Type::ObjCObject:
626 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
627 break;
628
629 case Type::ObjCInterface: {
630 // Objective-C interfaces are always opaque (outside of the
631 // runtime, which can do whatever it likes); we never refine
632 // these.
633 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
634 if (!T)
635 T = llvm::StructType::create(getLLVMContext());
636 ResultType = T;
637 break;
638 }
639
640 case Type::ObjCObjectPointer:
641 ResultType = llvm::PointerType::getUnqual(getLLVMContext());
642 break;
643
644 case Type::Enum: {
645 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
646 if (ED->isCompleteDefinition() || ED->isFixed())
647 return ConvertType(ED->getIntegerType());
648 // Return a placeholder 'i32' type. This can be changed later when the
649 // type is defined (see UpdateCompletedType), but is likely to be the
650 // "right" answer.
651 ResultType = llvm::Type::getInt32Ty(getLLVMContext());
652 break;
653 }
654
655 case Type::BlockPointer: {
656 // Block pointers lower to function type. For function type,
657 // getTargetAddressSpace() returns default address space for
658 // function pointer i.e. program address space. Therefore, for block
659 // pointers, it is important to pass the pointee AST address space when
660 // calling getTargetAddressSpace(), to ensure that we get the LLVM IR
661 // address space for data pointers and not function pointers.
662 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
663 unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace());
664 ResultType = llvm::PointerType::get(getLLVMContext(), AS);
665 break;
666 }
667
668 case Type::MemberPointer: {
669 auto *MPTy = cast<MemberPointerType>(Ty);
670 if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
671 auto *C = MPTy->getClass();
672 auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr});
673 if (Insertion.second)
674 Insertion.first->second = llvm::StructType::create(getLLVMContext());
675 ResultType = Insertion.first->second;
676 } else {
677 ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
678 }
679 break;
680 }
681
682 case Type::Atomic: {
683 QualType valueType = cast<AtomicType>(Ty)->getValueType();
684 ResultType = ConvertTypeForMem(valueType);
685
686 // Pad out to the inflated size if necessary.
687 uint64_t valueSize = Context.getTypeSize(valueType);
688 uint64_t atomicSize = Context.getTypeSize(Ty);
689 if (valueSize != atomicSize) {
690 assert(valueSize < atomicSize);
691 llvm::Type *elts[] = {
692 ResultType,
693 llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
694 };
695 ResultType =
696 llvm::StructType::get(getLLVMContext(), llvm::ArrayRef(elts));
697 }
698 break;
699 }
700 case Type::Pipe: {
701 ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
702 break;
703 }
704 case Type::BitInt: {
705 const auto &EIT = cast<BitIntType>(Ty);
706 ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
707 break;
708 }
709 }
710
711 assert(ResultType && "Didn't convert a type?");
712 assert((!CachedType || CachedType == ResultType) &&
713 "Cached type doesn't match computed type");
714
715 TypeCache[Ty] = ResultType;
716 return ResultType;
717}
718
719bool CodeGenModule::isPaddedAtomicType(QualType type) {
720 return isPaddedAtomicType(type->castAs<AtomicType>());
721}
722
723bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
724 return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType());
725}
726
727/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
728llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
729 // TagDecl's are not necessarily unique, instead use the (clang)
730 // type connected to the decl.
731 const Type *Key = Context.getTagDeclType(RD).getTypePtr();
732
733 llvm::StructType *&Entry = RecordDeclTypes[Key];
734
735 // If we don't have a StructType at all yet, create the forward declaration.
736 if (!Entry) {
737 Entry = llvm::StructType::create(getLLVMContext());
738 addRecordTypeName(RD, Entry, "");
739 }
740 llvm::StructType *Ty = Entry;
741
742 // If this is still a forward declaration, or the LLVM type is already
743 // complete, there's nothing more to do.
744 RD = RD->getDefinition();
745 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
746 return Ty;
747
748 // Force conversion of non-virtual base classes recursively.
749 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
750 for (const auto &I : CRD->bases()) {
751 if (I.isVirtual()) continue;
752 ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
753 }
754 }
755
756 // Layout fields.
757 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
758 CGRecordLayouts[Key] = std::move(Layout);
759
760 // If this struct blocked a FunctionType conversion, then recompute whatever
761 // was derived from that.
762 // FIXME: This is hugely overconservative.
763 if (SkippedLayout)
764 TypeCache.clear();
765
766 return Ty;
767}
768
769/// getCGRecordLayout - Return record layout info for the given record decl.
770const CGRecordLayout &
771CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
772 const Type *Key = Context.getTagDeclType(RD).getTypePtr();
773
774 auto I = CGRecordLayouts.find(Key);
775 if (I != CGRecordLayouts.end())
776 return *I->second;
777 // Compute the type information.
778 ConvertRecordDeclType(RD);
779
780 // Now try again.
781 I = CGRecordLayouts.find(Key);
782
783 assert(I != CGRecordLayouts.end() &&
784 "Unable to find record layout information for type");
785 return *I->second;
786}
787
788bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
789 assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type");
790 return isZeroInitializable(T);
791}
792
793bool CodeGenTypes::isZeroInitializable(QualType T) {
794 if (T->getAs<PointerType>())
795 return Context.getTargetNullPointerValue(T) == 0;
796
797 if (const auto *AT = Context.getAsArrayType(T)) {
798 if (isa<IncompleteArrayType>(AT))
799 return true;
800 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
801 if (Context.getConstantArrayElementCount(CAT) == 0)
802 return true;
803 T = Context.getBaseElementType(T);
804 }
805
806 // Records are non-zero-initializable if they contain any
807 // non-zero-initializable subobjects.
808 if (const RecordType *RT = T->getAs<RecordType>()) {
809 const RecordDecl *RD = RT->getDecl();
810 return isZeroInitializable(RD);
811 }
812
813 // We have to ask the ABI about member pointers.
814 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
815 return getCXXABI().isZeroInitializable(MPT);
816
817 // Everything else is okay.
818 return true;
819}
820
821bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
822 return getCGRecordLayout(RD).isZeroInitializable();
823}
824
825unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const {
826 // Return the address space for the type. If the type is a
827 // function type without an address space qualifier, the
828 // program address space is used. Otherwise, the target picks
829 // the best address space based on the type information
830 return T->isFunctionType() && !T.hasAddressSpace()
831 ? getDataLayout().getProgramAddressSpace()
832 : getContext().getTargetAddressSpace(T.getAddressSpace());
833}
834