32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/AttributeMask.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include "llvm/Transforms/Utils/Local.h"
46using namespace CodeGen;
52 default:
return llvm::CallingConv::C;
57 case CC_Win64:
return llvm::CallingConv::Win64;
59 case CC_AAPCS:
return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
73 case CC_Swift:
return llvm::CallingConv::Swift;
75 case CC_M68kRTD:
return llvm::CallingConv::M68k_RTD;
129 unsigned totalArgs) {
131 assert(paramInfos.size() <= prefixArgs);
132 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
134 paramInfos.reserve(totalArgs);
137 paramInfos.resize(prefixArgs);
141 paramInfos.push_back(ParamInfo);
143 if (ParamInfo.hasPassObjectSize())
144 paramInfos.emplace_back();
147 assert(paramInfos.size() <= totalArgs &&
148 "Did we forget to insert pass_object_size args?");
150 paramInfos.resize(totalArgs);
160 if (!FPT->hasExtParameterInfos()) {
161 assert(paramInfos.empty() &&
162 "We have paramInfos, but the prototype doesn't?");
163 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
167 unsigned PrefixSize = prefix.size();
171 prefix.reserve(prefix.size() + FPT->getNumParams());
173 auto ExtInfos = FPT->getExtParameterInfos();
174 assert(ExtInfos.size() == FPT->getNumParams());
175 for (
unsigned I = 0,
E = FPT->getNumParams(); I !=
E; ++I) {
176 prefix.push_back(FPT->getParamType(I));
177 if (ExtInfos[I].hasPassObjectSize())
200 FTP->getExtInfo(), paramInfos,
Required);
208 return ::arrangeLLVMFunctionInfo(*
this,
false, argTypes,
233 if (PcsAttr *PCS =
D->
getAttr<PcsAttr>())
236 if (
D->
hasAttr<AArch64VectorPcsAttr>())
239 if (
D->
hasAttr<AArch64SVEPcsAttr>())
242 if (
D->
hasAttr<AMDGPUKernelCallAttr>())
266 if (
D->
hasAttr<RISCVVectorCCAttr>())
287 return ::arrangeLLVMFunctionInfo(
288 *
this,
true, argTypes,
295 if (FD->
hasAttr<CUDAGlobalAttr>()) {
308 assert(!isa<CXXConstructorDecl>(MD) &&
"wrong method for constructors!");
309 assert(!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!");
331 !
Target.getCXXABI().hasConstructorVariants();
336 auto *MD = cast<CXXMethodDecl>(GD.
getDecl());
344 bool PassParams =
true;
346 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
349 if (
auto Inherited = CD->getInheritedConstructor())
361 if (!paramInfos.empty()) {
364 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
367 paramInfos.append(AddedArgs.
Suffix,
372 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
381 argTypes, extInfo, paramInfos, required);
387 for (
auto &arg : args)
395 for (
auto &arg : args)
402 unsigned prefixArgs,
unsigned totalArgs) {
422 unsigned ExtraPrefixArgs,
423 unsigned ExtraSuffixArgs,
424 bool PassProtoArgs) {
427 for (
const auto &Arg : args)
431 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
436 FPT, TotalPrefixArgs + ExtraSuffixArgs)
449 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
456 ArgTypes, Info, ParamInfos,
Required);
464 if (MD->isImplicitObjectMemberFunction())
469 assert(isa<FunctionType>(FTy));
476 {}, noProto->getExtInfo(), {},
511 I->hasAttr<NoEscapeAttr>());
512 extParamInfos.push_back(extParamInfo);
519 if (
getContext().getLangOpts().ObjCAutoRefCount &&
520 MD->
hasAttr<NSReturnsRetainedAttr>())
546 if (isa<CXXConstructorDecl>(GD.
getDecl()) ||
547 isa<CXXDestructorDecl>(GD.
getDecl()))
560 assert(MD->
isVirtual() &&
"only methods have thunks");
577 ArgTys.push_back(*FTP->param_type_begin());
579 ArgTys.push_back(Context.
IntTy);
594 unsigned numExtraRequiredArgs,
596 assert(args.size() >= numExtraRequiredArgs);
606 if (proto->isVariadic())
609 if (proto->hasExtParameterInfos())
619 cast<FunctionNoProtoType>(fnType))) {
625 for (
const auto &arg : args)
630 paramInfos, required);
642 chainCall ? 1 : 0, chainCall);
671 for (
const auto &Arg : args)
704 unsigned numPrefixArgs) {
705 assert(numPrefixArgs + 1 <= args.size() &&
706 "Emitting a call with less args than the required prefix?");
718 paramInfos, required);
730 assert(signature.
arg_size() <= args.size());
731 if (signature.
arg_size() == args.size())
736 if (!sigParamInfos.empty()) {
737 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
738 paramInfos.resize(args.size());
770 assert(llvm::all_of(argTypes,
774 llvm::FoldingSetNodeID ID;
779 bool isDelegateCall =
782 info, paramInfos, required, resultType, argTypes);
784 void *insertPos =
nullptr;
785 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
793 info, paramInfos, resultType, argTypes, required);
794 FunctionInfos.InsertNode(FI, insertPos);
796 bool inserted = FunctionsBeingProcessed.insert(FI).second;
798 assert(inserted &&
"Recursively being processed?");
801 if (CC == llvm::CallingConv::SPIR_KERNEL) {
819 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
822 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
823 assert(erased &&
"Not in set?");
829 bool chainCall,
bool delegateCall,
835 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
840 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
841 argTypes.size() + 1, paramInfos.size()));
844 FI->CallingConvention = llvmCC;
845 FI->EffectiveCallingConvention = llvmCC;
846 FI->ASTCallingConvention = info.
getCC();
847 FI->InstanceMethod = instanceMethod;
848 FI->ChainCall = chainCall;
849 FI->DelegateCall = delegateCall;
855 FI->Required = required;
858 FI->ArgStruct =
nullptr;
859 FI->ArgStructAlign = 0;
860 FI->NumArgs = argTypes.size();
861 FI->HasExtParameterInfos = !paramInfos.empty();
862 FI->getArgsBuffer()[0].
type = resultType;
863 FI->MaxVectorWidth = 0;
864 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
865 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
866 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
867 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
877struct TypeExpansion {
878 enum TypeExpansionKind {
890 const TypeExpansionKind
Kind;
892 TypeExpansion(TypeExpansionKind K) :
Kind(K) {}
893 virtual ~TypeExpansion() {}
896struct ConstantArrayExpansion : TypeExpansion {
900 ConstantArrayExpansion(
QualType EltTy, uint64_t NumElts)
901 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
902 static bool classof(
const TypeExpansion *TE) {
903 return TE->Kind == TEK_ConstantArray;
907struct RecordExpansion : TypeExpansion {
914 : TypeExpansion(TEK_Record), Bases(
std::move(Bases)),
915 Fields(
std::move(Fields)) {}
916 static bool classof(
const TypeExpansion *TE) {
917 return TE->Kind == TEK_Record;
921struct ComplexExpansion : TypeExpansion {
925 static bool classof(
const TypeExpansion *TE) {
930struct NoExpansion : TypeExpansion {
931 NoExpansion() : TypeExpansion(TEK_None) {}
932 static bool classof(
const TypeExpansion *TE) {
933 return TE->Kind == TEK_None;
938static std::unique_ptr<TypeExpansion>
941 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
949 "Cannot expand structure with flexible array.");
956 for (
const auto *FD : RD->
fields()) {
957 if (FD->isZeroLengthBitField())
959 assert(!FD->isBitField() &&
960 "Cannot expand structure with bit-field members.");
962 if (UnionSize < FieldSize) {
963 UnionSize = FieldSize;
968 Fields.push_back(LargestFD);
970 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
971 assert(!CXXRD->isDynamicClass() &&
972 "cannot expand vtable pointers in dynamic classes");
973 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
976 for (
const auto *FD : RD->
fields()) {
977 if (FD->isZeroLengthBitField())
979 assert(!FD->isBitField() &&
980 "Cannot expand structure with bit-field members.");
981 Fields.push_back(FD);
984 return std::make_unique<RecordExpansion>(std::move(Bases),
988 return std::make_unique<ComplexExpansion>(CT->getElementType());
990 return std::make_unique<NoExpansion>();
995 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
998 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1000 for (
auto BS : RExp->Bases)
1002 for (
auto FD : RExp->Fields)
1006 if (isa<ComplexExpansion>(Exp.get()))
1008 assert(isa<NoExpansion>(Exp.get()));
1016 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1017 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1020 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1021 for (
auto BS : RExp->Bases)
1023 for (
auto FD : RExp->Fields)
1025 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1030 assert(isa<NoExpansion>(Exp.get()));
1036 ConstantArrayExpansion *CAE,
1038 llvm::function_ref<
void(
Address)> Fn) {
1039 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1046 llvm::Function::arg_iterator &AI) {
1048 "Unexpected non-simple lvalue during struct expansion.");
1051 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1054 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1055 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1057 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1067 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1069 for (
auto FD : RExp->Fields) {
1072 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1074 }
else if (isa<ComplexExpansion>(Exp.get())) {
1075 auto realValue = &*AI++;
1076 auto imagValue = &*AI++;
1081 assert(isa<NoExpansion>(Exp.get()));
1082 llvm::Value *Arg = &*AI++;
1089 if (Arg->getType()->isPointerTy()) {
1098void CodeGenFunction::ExpandTypeToArgs(
1102 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1106 *
this, CAExp, Addr, [&](
Address EltAddr) {
1110 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1113 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1124 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1129 for (
auto FD : RExp->Fields) {
1132 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1135 }
else if (isa<ComplexExpansion>(Exp.get())) {
1137 IRCallArgs[IRCallArgPos++] = CV.first;
1138 IRCallArgs[IRCallArgPos++] = CV.second;
1140 assert(isa<NoExpansion>(Exp.get()));
1142 assert(RV.isScalar() &&
1143 "Unexpected non-scalar rvalue during struct expansion.");
1146 llvm::Value *
V = RV.getScalarVal();
1147 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1148 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1149 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1151 IRCallArgs[IRCallArgPos++] =
V;
1159 const Twine &Name =
"tmp") {
1173 llvm::StructType *SrcSTy,
1176 if (SrcSTy->getNumElements() == 0)
return SrcPtr;
1184 uint64_t FirstEltSize =
1186 if (FirstEltSize < DstSize &&
1195 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1211 if (Val->getType() == Ty)
1214 if (isa<llvm::PointerType>(Val->getType())) {
1216 if (isa<llvm::PointerType>(Ty))
1217 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1223 llvm::Type *DestIntTy = Ty;
1224 if (isa<llvm::PointerType>(DestIntTy))
1227 if (Val->getType() != DestIntTy) {
1229 if (DL.isBigEndian()) {
1232 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1233 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1235 if (SrcSize > DstSize) {
1236 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1237 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1239 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1240 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1244 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1248 if (isa<llvm::PointerType>(Ty))
1249 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1272 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1274 DstSize.getFixedValue(), CGF);
1282 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1283 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1289 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1290 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1304 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1305 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1308 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1309 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1310 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1311 ScalableDstTy = llvm::ScalableVectorType::get(
1312 FixedSrcTy->getElementType(),
1313 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1315 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1317 auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
1318 auto *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1320 ScalableDstTy, PoisonVec, Load, Zero,
"cast.scalable");
1321 if (ScalableDstTy != Ty)
1334 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1339 llvm::TypeSize DstSize,
1340 bool DstIsVolatile) {
1344 llvm::Type *SrcTy = Src->getType();
1351 if (llvm::StructType *DstSTy =
1353 assert(!SrcSize.isScalable());
1355 SrcSize.getFixedValue(), *
this);
1359 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1360 if (SrcTy->isIntegerTy() && Dst.
getElementType()->isPointerTy() &&
1365 }
else if (llvm::StructType *STy =
1366 dyn_cast<llvm::StructType>(Src->getType())) {
1369 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1371 llvm::Value *Elt =
Builder.CreateExtractValue(Src, i);
1377 }
else if (SrcTy->isIntegerTy()) {
1379 llvm::Type *DstIntTy =
Builder.getIntNTy(DstSize.getFixedValue() * 8);
1413static std::pair<llvm::Value *, bool>
1415 llvm::ScalableVectorType *FromTy, llvm::Value *
V,
1416 StringRef Name =
"") {
1419 if (FromTy->getElementType()->isIntegerTy(1) &&
1420 FromTy->getElementCount().isKnownMultipleOf(8) &&
1421 ToTy->getElementType() == CGF.
Builder.getInt8Ty()) {
1422 FromTy = llvm::ScalableVectorType::get(
1423 ToTy->getElementType(),
1424 FromTy->getElementCount().getKnownMinValue() / 8);
1425 V = CGF.
Builder.CreateBitCast(
V, FromTy);
1427 if (FromTy->getElementType() == ToTy->getElementType()) {
1428 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1430 V->setName(Name +
".coerce");
1431 V = CGF.
Builder.CreateExtractVector(ToTy,
V, Zero,
"cast.fixed");
1441class ClangToLLVMArgMapping {
1442 static const unsigned InvalidIndex = ~0
U;
1443 unsigned InallocaArgNo;
1445 unsigned TotalIRArgs;
1449 unsigned PaddingArgIndex;
1452 unsigned FirstArgIndex;
1453 unsigned NumberOfArgs;
1456 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1464 bool OnlyRequiredArgs =
false)
1465 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1466 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1467 construct(Context, FI, OnlyRequiredArgs);
1470 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1471 unsigned getInallocaArgNo()
const {
1472 assert(hasInallocaArg());
1473 return InallocaArgNo;
1476 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1477 unsigned getSRetArgNo()
const {
1478 assert(hasSRetArg());
1482 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1484 bool hasPaddingArg(
unsigned ArgNo)
const {
1485 assert(ArgNo < ArgInfo.size());
1486 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1488 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1489 assert(hasPaddingArg(ArgNo));
1490 return ArgInfo[ArgNo].PaddingArgIndex;
1495 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1496 assert(ArgNo < ArgInfo.size());
1497 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1498 ArgInfo[ArgNo].NumberOfArgs);
1503 bool OnlyRequiredArgs);
1506void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1508 bool OnlyRequiredArgs) {
1509 unsigned IRArgNo = 0;
1510 bool SwapThisWithSRet =
false;
1515 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1526 auto &IRArgs = ArgInfo[ArgNo];
1529 IRArgs.PaddingArgIndex = IRArgNo++;
1535 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1537 IRArgs.NumberOfArgs = STy->getNumElements();
1539 IRArgs.NumberOfArgs = 1;
1545 IRArgs.NumberOfArgs = 1;
1550 IRArgs.NumberOfArgs = 0;
1560 if (IRArgs.NumberOfArgs > 0) {
1561 IRArgs.FirstArgIndex = IRArgNo;
1562 IRArgNo += IRArgs.NumberOfArgs;
1567 if (IRArgNo == 1 && SwapThisWithSRet)
1570 assert(ArgNo == ArgInfo.size());
1573 InallocaArgNo = IRArgNo++;
1575 TotalIRArgs = IRArgNo;
1583 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1598 switch (BT->getKind()) {
1601 case BuiltinType::Float:
1603 case BuiltinType::Double:
1605 case BuiltinType::LongDouble:
1616 if (BT->getKind() == BuiltinType::LongDouble)
1632 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1634 assert(Inserted &&
"Recursively being processed?");
1636 llvm::Type *resultType =
nullptr;
1641 llvm_unreachable(
"Invalid ABI kind for return argument");
1653 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1669 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1673 if (IRFunctionArgs.hasSRetArg()) {
1676 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1681 if (IRFunctionArgs.hasInallocaArg())
1682 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1689 for (; it != ie; ++it, ++ArgNo) {
1693 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1694 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1697 unsigned FirstIRArg, NumIRArgs;
1698 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1703 assert(NumIRArgs == 0);
1707 assert(NumIRArgs == 1);
1709 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1713 assert(NumIRArgs == 1);
1714 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1722 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1724 assert(NumIRArgs == st->getNumElements());
1725 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1726 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1728 assert(NumIRArgs == 1);
1729 ArgTypes[FirstIRArg] = argType;
1735 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1737 *ArgTypesIter++ = EltTy;
1739 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1744 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1746 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1751 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1752 assert(Erased &&
"Not in set?");
1754 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1768 llvm::AttrBuilder &FuncAttrs,
1775 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1779 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1781 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1783 FuncAttrs.addAttribute(
"aarch64_za_state_agnostic");
1787 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1789 FuncAttrs.addAttribute(
"aarch64_in_za");
1791 FuncAttrs.addAttribute(
"aarch64_out_za");
1793 FuncAttrs.addAttribute(
"aarch64_inout_za");
1797 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1799 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1801 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1803 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1807 const Decl *Callee) {
1813 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1814 AA->getAssumption().split(Attrs,
",");
1817 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1818 llvm::join(Attrs.begin(), Attrs.end(),
","));
1827 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1828 return ClassDecl->hasTrivialDestructor();
1834 const Decl *TargetDecl) {
1840 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1844 if (!
Module.getLangOpts().CPlusPlus)
1847 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1848 if (FDecl->isExternC())
1850 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1852 if (VDecl->isExternC())
1860 return Module.getCodeGenOpts().StrictReturn ||
1861 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1862 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1869 llvm::DenormalMode FP32DenormalMode,
1870 llvm::AttrBuilder &FuncAttrs) {
1871 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1872 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1874 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1875 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1883 llvm::AttrBuilder &FuncAttrs) {
1889 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1891 llvm::AttrBuilder &FuncAttrs) {
1894 if (CodeGenOpts.OptimizeSize)
1895 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1896 if (CodeGenOpts.OptimizeSize == 2)
1897 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1900 if (CodeGenOpts.DisableRedZone)
1901 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1902 if (CodeGenOpts.IndirectTlsSegRefs)
1903 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1904 if (CodeGenOpts.NoImplicitFloat)
1905 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1907 if (AttrOnCallSite) {
1912 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1914 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1916 switch (CodeGenOpts.getFramePointer()) {
1923 FuncAttrs.addAttribute(
"frame-pointer",
1925 CodeGenOpts.getFramePointer()));
1928 if (CodeGenOpts.LessPreciseFPMAD)
1929 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
1931 if (CodeGenOpts.NullPointerIsValid)
1932 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1935 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
1939 if (LangOpts.NoHonorInfs)
1940 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
1941 if (LangOpts.NoHonorNaNs)
1942 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
1943 if (LangOpts.ApproxFunc)
1944 FuncAttrs.addAttribute(
"approx-func-fp-math",
"true");
1945 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1946 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1947 (LangOpts.getDefaultFPContractMode() ==
1949 LangOpts.getDefaultFPContractMode() ==
1951 FuncAttrs.addAttribute(
"unsafe-fp-math",
"true");
1952 if (CodeGenOpts.SoftFloat)
1953 FuncAttrs.addAttribute(
"use-soft-float",
"true");
1954 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
1955 llvm::utostr(CodeGenOpts.SSPBufferSize));
1956 if (LangOpts.NoSignedZero)
1957 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
1960 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
1961 if (!Recips.empty())
1962 FuncAttrs.addAttribute(
"reciprocal-estimates",
1963 llvm::join(Recips,
","));
1967 FuncAttrs.addAttribute(
"prefer-vector-width",
1970 if (CodeGenOpts.StackRealignment)
1971 FuncAttrs.addAttribute(
"stackrealign");
1972 if (CodeGenOpts.Backchain)
1973 FuncAttrs.addAttribute(
"backchain");
1974 if (CodeGenOpts.EnableSegmentedStacks)
1975 FuncAttrs.addAttribute(
"split-stack");
1977 if (CodeGenOpts.SpeculativeLoadHardening)
1978 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1981 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1982 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1983 FuncAttrs.removeAttribute(
"zero-call-used-regs");
1985 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1986 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
1988 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1989 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
1991 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1992 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
1994 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1995 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
1997 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1998 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
2000 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
2001 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
2003 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2004 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
2006 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2007 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2018 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2023 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2024 LangOpts.SYCLIsDevice) {
2025 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2028 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2029 FuncAttrs.addAttribute(
"save-reg-params");
2032 StringRef Var,
Value;
2034 FuncAttrs.addAttribute(Var,
Value);
2048 const llvm::Function &F,
2050 auto FFeatures = F.getFnAttribute(
"target-features");
2052 llvm::StringSet<> MergedNames;
2054 MergedFeatures.reserve(TargetOpts.
Features.size());
2056 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2057 for (StringRef Feature : FeatureRange) {
2058 if (Feature.empty())
2060 assert(Feature[0] ==
'+' || Feature[0] ==
'-');
2061 StringRef Name = Feature.drop_front(1);
2062 bool Merged = !MergedNames.insert(Name).second;
2064 MergedFeatures.push_back(Feature);
2068 if (FFeatures.isValid())
2069 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2070 AddUnmergedFeatures(TargetOpts.
Features);
2072 if (!MergedFeatures.empty()) {
2073 llvm::sort(MergedFeatures);
2074 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2081 bool WillInternalize) {
2083 llvm::AttrBuilder FuncAttrs(F.getContext());
2086 if (!TargetOpts.
CPU.empty())
2087 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2088 if (!TargetOpts.
TuneCPU.empty())
2089 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2092 CodeGenOpts, LangOpts,
2095 if (!WillInternalize && F.isInterposable()) {
2100 F.addFnAttrs(FuncAttrs);
2104 llvm::AttributeMask AttrsToRemove;
2106 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2107 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2108 llvm::DenormalMode Merged =
2112 if (DenormModeToMergeF32.isValid()) {
2117 if (Merged == llvm::DenormalMode::getDefault()) {
2118 AttrsToRemove.addAttribute(
"denormal-fp-math");
2119 }
else if (Merged != DenormModeToMerge) {
2121 FuncAttrs.addAttribute(
"denormal-fp-math",
2125 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2126 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2127 }
else if (MergedF32 != DenormModeToMergeF32) {
2129 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2133 F.removeFnAttrs(AttrsToRemove);
2138 F.addFnAttrs(FuncAttrs);
2141void CodeGenModule::getTrivialDefaultFunctionAttributes(
2142 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2143 llvm::AttrBuilder &FuncAttrs) {
2144 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone,
getCodeGenOpts(),
2149void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2151 bool AttrOnCallSite,
2152 llvm::AttrBuilder &FuncAttrs) {
2153 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2157 if (!AttrOnCallSite)
2162 llvm::AttrBuilder &attrs) {
2163 getDefaultFunctionAttributes(
"",
false,
2165 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2170 const NoBuiltinAttr *NBA =
nullptr) {
2171 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2173 AttributeName +=
"no-builtin-";
2174 AttributeName += BuiltinName;
2175 FuncAttrs.addAttribute(AttributeName);
2179 if (LangOpts.NoBuiltin) {
2181 FuncAttrs.addAttribute(
"no-builtins");
2195 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2196 FuncAttrs.addAttribute(
"no-builtins");
2201 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2205 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2206 bool CheckCoerce =
true) {
2207 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2213 if (!DL.typeSizeEqualsStoreSize(Ty))
2220 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2221 DL.getTypeSizeInBits(Ty)))
2245 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2247 if (
const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2256 unsigned NumRequiredArgs,
unsigned ArgNo) {
2257 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2262 if (ArgNo >= NumRequiredArgs)
2266 if (ArgNo < FD->getNumParams()) {
2267 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2268 if (Param && Param->
hasAttr<MaybeUndefAttr>())
2285 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2288 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2290 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2291 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2300 llvm::FPClassTest Mask = llvm::fcNone;
2301 if (LangOpts.NoHonorInfs)
2302 Mask |= llvm::fcInf;
2303 if (LangOpts.NoHonorNaNs)
2304 Mask |= llvm::fcNan;
2310 llvm::AttributeList &Attrs) {
2311 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2312 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2313 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2339 llvm::AttributeList &AttrList,
2341 bool AttrOnCallSite,
bool IsThunk) {
2349 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2351 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2363 bool HasOptnone =
false;
2365 const NoBuiltinAttr *NBA =
nullptr;
2369 auto AddPotentialArgAccess = [&]() {
2370 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2372 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2373 llvm::MemoryEffects::argMemOnly());
2380 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2381 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2382 if (TargetDecl->
hasAttr<NoThrowAttr>())
2383 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2384 if (TargetDecl->
hasAttr<NoReturnAttr>())
2385 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2386 if (TargetDecl->
hasAttr<ColdAttr>())
2387 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2388 if (TargetDecl->
hasAttr<HotAttr>())
2389 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2390 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2391 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2392 if (TargetDecl->
hasAttr<ConvergentAttr>())
2393 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2395 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2398 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2400 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2402 (Kind == OO_New || Kind == OO_Array_New))
2403 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2406 const bool IsVirtualCall = MD && MD->
isVirtual();
2409 if (!(AttrOnCallSite && IsVirtualCall)) {
2410 if (Fn->isNoReturn())
2411 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2412 NBA = Fn->getAttr<NoBuiltinAttr>();
2416 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2419 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2420 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2424 if (TargetDecl->
hasAttr<ConstAttr>()) {
2425 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2426 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2429 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2430 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2431 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2432 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2434 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2435 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2436 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2437 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2439 if (TargetDecl->
hasAttr<RestrictAttr>())
2440 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2441 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2442 !CodeGenOpts.NullPointerIsValid)
2443 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2444 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2445 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2446 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2447 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2448 if (TargetDecl->
hasAttr<LeafAttr>())
2449 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2450 if (TargetDecl->
hasAttr<BPFFastCallAttr>())
2451 FuncAttrs.addAttribute(
"bpf_fastcall");
2453 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2454 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2455 std::optional<unsigned> NumElemsParam;
2456 if (AllocSize->getNumElemsParam().isValid())
2457 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2458 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2462 if (TargetDecl->
hasAttr<OpenCLKernelAttr>()) {
2465 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2472 FuncAttrs.addAttribute(
2473 "uniform-work-group-size",
2474 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2478 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2480 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2482 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2483 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2495 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2500 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2501 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2502 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2503 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2504 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2505 FuncAttrs.removeAttribute(
"split-stack");
2506 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2509 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2510 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2511 FuncAttrs.addAttribute(
2512 "zero-call-used-regs",
2513 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2520 if (CodeGenOpts.NoPLT) {
2521 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2522 if (!Fn->isDefined() && !AttrOnCallSite) {
2523 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2528 if (TargetDecl->
hasAttr<NoConvergentAttr>())
2529 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2534 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2535 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2536 if (!FD->isExternallyVisible())
2537 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2544 if (!AttrOnCallSite) {
2545 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2546 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2549 auto shouldDisableTailCalls = [&] {
2551 if (CodeGenOpts.DisableTailCalls)
2557 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2558 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2561 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2562 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2563 if (!BD->doesNotEscape())
2569 if (shouldDisableTailCalls())
2570 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2574 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2578 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2585 if (CodeGenOpts.EnableNoundefAttrs &&
2589 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2595 RetAttrs.addAttribute(llvm::Attribute::SExt);
2597 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2599 RetAttrs.addAttribute(llvm::Attribute::NoExt);
2603 RetAttrs.addAttribute(llvm::Attribute::InReg);
2615 AddPotentialArgAccess();
2624 llvm_unreachable(
"Invalid ABI kind for return argument");
2632 RetAttrs.addDereferenceableAttr(
2634 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2635 !CodeGenOpts.NullPointerIsValid)
2636 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2638 llvm::Align Alignment =
2640 RetAttrs.addAlignmentAttr(Alignment);
2645 bool hasUsedSRet =
false;
2649 if (IRFunctionArgs.hasSRetArg()) {
2651 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2652 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2653 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2656 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2658 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2663 if (IRFunctionArgs.hasInallocaArg()) {
2666 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2675 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2677 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2684 if (!CodeGenOpts.NullPointerIsValid &&
2686 Attrs.addAttribute(llvm::Attribute::NonNull);
2693 Attrs.addDereferenceableOrNullAttr(
2699 llvm::Align Alignment =
2703 Attrs.addAlignmentAttr(Alignment);
2705 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2711 I !=
E; ++I, ++ArgNo) {
2717 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2719 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2720 llvm::AttributeSet::get(
2722 llvm::AttrBuilder(
getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2727 if (CodeGenOpts.EnableNoundefAttrs &&
2729 Attrs.addAttribute(llvm::Attribute::NoUndef);
2738 Attrs.addAttribute(llvm::Attribute::SExt);
2740 Attrs.addAttribute(llvm::Attribute::ZExt);
2742 Attrs.addAttribute(llvm::Attribute::NoExt);
2746 Attrs.addAttribute(llvm::Attribute::Nest);
2748 Attrs.addAttribute(llvm::Attribute::InReg);
2749 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2756 Attrs.addAttribute(llvm::Attribute::InReg);
2759 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2762 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2763 Decl->getArgPassingRestrictions() ==
2767 Attrs.addAttribute(llvm::Attribute::NoAlias);
2792 AddPotentialArgAccess();
2797 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2808 AddPotentialArgAccess();
2815 Attrs.addDereferenceableAttr(
2817 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2818 !CodeGenOpts.NullPointerIsValid)
2819 Attrs.addAttribute(llvm::Attribute::NonNull);
2821 llvm::Align Alignment =
2823 Attrs.addAlignmentAttr(Alignment);
2831 if (TargetDecl && TargetDecl->
hasAttr<OpenCLKernelAttr>() &&
2835 llvm::Align Alignment =
2837 Attrs.addAlignmentAttr(Alignment);
2844 Attrs.addAttribute(llvm::Attribute::NoAlias);
2853 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2858 Attrs.addAttribute(llvm::Attribute::NoAlias);
2862 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2864 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2865 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2871 Attrs.addAttribute(llvm::Attribute::SwiftError);
2875 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2879 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2884 Attrs.addCapturesAttr(llvm::CaptureInfo::none());
2886 if (Attrs.hasAttributes()) {
2887 unsigned FirstIRArg, NumIRArgs;
2888 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2889 for (
unsigned i = 0; i < NumIRArgs; i++)
2890 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2896 AttrList = llvm::AttributeList::get(
2905 llvm::Value *value) {
2906 llvm::Type *varType = CGF.
ConvertType(var->getType());
2910 if (value->getType() == varType)
return value;
2912 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2913 &&
"unexpected promotion type");
2915 if (isa<llvm::IntegerType>(varType))
2916 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
2918 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
2924 QualType ArgType,
unsigned ArgNo) {
2936 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
2943 if (NNAttr->isNonNull(ArgNo))
2973 if (FD->hasImplicitReturnZero()) {
2974 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2976 llvm::Constant*
Zero = llvm::Constant::getNullValue(LLVMTy);
2985 assert(
Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2990 if (IRFunctionArgs.hasInallocaArg())
2991 ArgStruct =
Address(
Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2995 if (IRFunctionArgs.hasSRetArg()) {
2996 auto AI =
Fn->getArg(IRFunctionArgs.getSRetArgNo());
2997 AI->setName(
"agg.result");
2998 AI->addAttr(llvm::Attribute::NoAlias);
3005 ArgVals.reserve(Args.size());
3011 assert(FI.
arg_size() == Args.size() &&
3012 "Mismatch between function signature & arguments.");
3015 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
3016 i != e; ++i, ++info_it, ++ArgNo) {
3021 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
3029 unsigned FirstIRArg, NumIRArgs;
3030 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3034 assert(NumIRArgs == 0);
3047 assert(NumIRArgs == 1);
3071 ParamAddr = AlignedTemp;
3088 auto AI =
Fn->getArg(FirstIRArg);
3096 assert(NumIRArgs == 1);
3098 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3101 PVD->getFunctionScopeIndex()) &&
3103 AI->addAttr(llvm::Attribute::NonNull);
3105 QualType OTy = PVD->getOriginalType();
3106 if (
const auto *ArrTy =
3113 QualType ETy = ArrTy->getElementType();
3114 llvm::Align Alignment =
3116 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3117 uint64_t ArrSize = ArrTy->getZExtSize();
3121 Attrs.addDereferenceableAttr(
3122 getContext().getTypeSizeInChars(ETy).getQuantity() *
3124 AI->addAttrs(Attrs);
3125 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3128 AI->addAttr(llvm::Attribute::NonNull);
3131 }
else if (
const auto *ArrTy =
3137 QualType ETy = ArrTy->getElementType();
3138 llvm::Align Alignment =
3140 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3141 if (!
getTypes().getTargetAddressSpace(ETy) &&
3143 AI->addAttr(llvm::Attribute::NonNull);
3148 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3151 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3152 if (AVAttr && !
SanOpts.
has(SanitizerKind::Alignment)) {
3156 llvm::ConstantInt *AlignmentCI =
3159 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3160 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3161 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3162 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(
3163 llvm::Align(AlignmentInt)));
3170 AI->addAttr(llvm::Attribute::NoAlias);
3178 assert(NumIRArgs == 1);
3182 llvm::Value *
V = AI;
3190 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3213 if (
V->getType() != LTy)
3224 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3225 llvm::Value *ArgVal =
Fn->getArg(FirstIRArg);
3226 if (
auto *VecTyFrom =
3227 dyn_cast<llvm::ScalableVectorType>(ArgVal->getType())) {
3229 *
this, VecTyTo, VecTyFrom, ArgVal, Arg->
getName());
3231 assert(NumIRArgs == 1);
3238 llvm::StructType *STy =
3249 STy->getNumElements() > 1) {
3251 llvm::TypeSize PtrElementSize =
3253 if (StructSize.isScalable()) {
3254 assert(STy->containsHomogeneousScalableVectorTypes() &&
3255 "ABI only supports structure with homogeneous scalable vector "
3257 assert(StructSize == PtrElementSize &&
3258 "Only allow non-fractional movement of structure with"
3259 "homogeneous scalable vector type");
3260 assert(STy->getNumElements() == NumIRArgs);
3262 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3263 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3264 auto *AI =
Fn->getArg(FirstIRArg + i);
3265 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3267 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3272 uint64_t SrcSize = StructSize.getFixedValue();
3273 uint64_t DstSize = PtrElementSize.getFixedValue();
3276 if (SrcSize <= DstSize) {
3283 assert(STy->getNumElements() == NumIRArgs);
3284 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3285 auto AI =
Fn->getArg(FirstIRArg + i);
3286 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3291 if (SrcSize > DstSize) {
3297 assert(NumIRArgs == 1);
3298 auto AI =
Fn->getArg(FirstIRArg);
3299 AI->setName(Arg->
getName() +
".coerce");
3302 llvm::TypeSize::getFixed(
3303 getContext().getTypeSizeInChars(Ty).getQuantity() -
3328 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3332 unsigned argIndex = FirstIRArg;
3333 unsigned unpaddedIndex = 0;
3334 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3335 llvm::Type *eltType = coercionType->getElementType(i);
3340 llvm::Value *elt =
Fn->getArg(argIndex++);
3342 auto paramType = unpaddedStruct
3343 ? unpaddedStruct->getElementType(unpaddedIndex++)
3344 : unpaddedCoercionType;
3346 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(eltType)) {
3347 if (
auto *VecTyFrom = dyn_cast<llvm::ScalableVectorType>(paramType)) {
3350 *
this, VecTyTo, VecTyFrom, elt, elt->getName());
3351 assert(Extracted &&
"Unexpected scalable to fixed vector coercion");
3356 assert(argIndex == FirstIRArg + NumIRArgs);
3368 auto FnArgIter =
Fn->arg_begin() + FirstIRArg;
3369 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3370 assert(FnArgIter ==
Fn->arg_begin() + FirstIRArg + NumIRArgs);
3371 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3372 auto AI =
Fn->getArg(FirstIRArg + i);
3373 AI->setName(Arg->
getName() +
"." + Twine(i));
3379 assert(NumIRArgs == 0);
3391 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3392 for (
int I = Args.size() - 1; I >= 0; --I)
3395 for (
unsigned I = 0,
E = Args.size(); I !=
E; ++I)
3401 while (insn->use_empty()) {
3402 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3403 if (!bitcast)
return;
3406 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3407 bitcast->eraseFromParent();
3413 llvm::Value *result) {
3415 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3416 if (BB->empty())
return nullptr;
3417 if (&BB->back() != result)
return nullptr;
3419 llvm::Type *resultType = result->getType();
3422 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3428 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3431 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3434 if (generator->getNextNode() != bitcast)
3437 InstsToKill.push_back(bitcast);
3444 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3445 if (!call)
return nullptr;
3447 bool doRetainAutorelease;
3450 doRetainAutorelease =
true;
3451 }
else if (call->getCalledOperand() ==
3453 doRetainAutorelease =
false;
3461 llvm::Instruction *prev = call->getPrevNode();
3463 if (isa<llvm::BitCastInst>(prev)) {
3464 prev = prev->getPrevNode();
3467 assert(isa<llvm::CallInst>(prev));
3468 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3470 InstsToKill.push_back(prev);
3476 result = call->getArgOperand(0);
3477 InstsToKill.push_back(call);
3481 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3482 if (!bitcast->hasOneUse())
break;
3483 InstsToKill.push_back(bitcast);
3484 result = bitcast->getOperand(0);
3488 for (
auto *I : InstsToKill)
3489 I->eraseFromParent();
3492 if (doRetainAutorelease)
3496 return CGF.
Builder.CreateBitCast(result, resultType);
3501 llvm::Value *result) {
3504 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3505 if (!method)
return nullptr;
3511 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3512 if (!retainCall || retainCall->getCalledOperand() !=
3517 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3518 llvm::LoadInst *load =
3519 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3520 if (!load || load->isAtomic() || load->isVolatile() ||
3527 llvm::Type *resultType = result->getType();
3529 assert(retainCall->use_empty());
3530 retainCall->eraseFromParent();
3533 return CGF.
Builder.CreateBitCast(load, resultType);
3540 llvm::Value *result) {
3563 auto GetStoreIfValid = [&CGF,
3564 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3565 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3566 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3572 assert(!SI->isAtomic() &&
3580 if (!ReturnValuePtr->hasOneUse()) {
3581 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3582 if (IP->empty())
return nullptr;
3586 const llvm::Instruction *LoadIntoFakeUse =
nullptr;
3587 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3591 if (LoadIntoFakeUse == &I)
3593 if (isa<llvm::BitCastInst>(&I))
3595 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) {
3596 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3599 if (II->getIntrinsicID() == llvm::Intrinsic::fake_use) {
3600 LoadIntoFakeUse = dyn_cast<llvm::Instruction>(II->getArgOperand(0));
3604 return GetStoreIfValid(&I);
3609 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3610 if (!store)
return nullptr;
3614 llvm::BasicBlock *StoreBB = store->getParent();
3615 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3617 while (IP != StoreBB) {
3618 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3634 int BitWidth,
int CharWidth) {
3635 assert(CharWidth <= 64);
3636 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3639 if (BitOffset >= CharWidth) {
3640 Pos += BitOffset / CharWidth;
3641 BitOffset = BitOffset % CharWidth;
3644 const uint64_t
Used = (uint64_t(1) << CharWidth) - 1;
3645 if (BitOffset + BitWidth >= CharWidth) {
3646 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3647 BitWidth -= CharWidth - BitOffset;
3651 while (BitWidth >= CharWidth) {
3653 BitWidth -= CharWidth;
3657 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3665 int StorageSize,
int BitOffset,
int BitWidth,
3666 int CharWidth,
bool BigEndian) {
3669 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3672 std::reverse(TmpBits.begin(), TmpBits.end());
3674 for (uint64_t
V : TmpBits)
3675 Bits[StorageOffset++] |=
V;
3706 BFI.
Size, CharWidth,
3728 auto Src = TmpBits.begin();
3729 auto Dst = Bits.begin() + Offset + I * Size;
3730 for (
int J = 0; J < Size; ++J)
3750 std::fill_n(Bits.begin() + Offset, Size,
3755 int Pos,
int Size,
int CharWidth,
3760 for (
auto P = Bits.begin() + Pos,
E = Bits.begin() + Pos + Size;
P !=
E;
3762 Mask = (Mask << CharWidth) | *
P;
3764 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3766 Mask = (Mask << CharWidth) | *--
P;
3775 llvm::IntegerType *ITy,
3777 assert(Src->getType() == ITy);
3778 assert(ITy->getScalarSizeInBits() <= 64);
3781 int Size = DataLayout.getTypeStoreSize(ITy);
3789 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3795 llvm::ArrayType *ATy,
3798 int Size = DataLayout.getTypeStoreSize(ATy);
3805 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3807 llvm::Value *R = llvm::PoisonValue::get(ATy);
3808 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3810 DataLayout.isBigEndian());
3811 MaskIndex += CharsPerElt;
3812 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3813 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3814 R =
Builder.CreateInsertValue(R, T1, I);
3841 llvm::DebugLoc RetDbgLoc;
3842 llvm::Value *RV =
nullptr;
3852 llvm::Function::arg_iterator EI =
CurFn->arg_end();
3854 llvm::Value *ArgStruct = &*EI;
3858 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3864 auto AI =
CurFn->arg_begin();
3904 if (llvm::StoreInst *SI =
3910 RetDbgLoc = SI->getDebugLoc();
3912 RV = SI->getValueOperand();
3913 SI->eraseFromParent();
3936 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
3937 RT = FD->getReturnType();
3938 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
3939 RT = MD->getReturnType();
3943 llvm_unreachable(
"Unexpected function/method type");
3960 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
3965 unsigned unpaddedIndex = 0;
3966 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3967 auto coercedEltType = coercionType->getElementType(i);
3974 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
3975 : unpaddedCoercionType,
3977 results.push_back(elt);
3981 if (results.size() == 1) {
3989 RV = llvm::PoisonValue::get(returnType);
3990 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
3991 RV =
Builder.CreateInsertValue(RV, results[i], i);
3998 llvm_unreachable(
"Invalid ABI kind for return argument");
4001 llvm::Instruction *
Ret;
4007 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
4018 Ret->setDebugLoc(std::move(RetDbgLoc));
4031 ReturnsNonNullAttr *RetNNAttr =
nullptr;
4032 if (
SanOpts.
has(SanitizerKind::ReturnsNonnullAttribute))
4035 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4043 assert(!requiresReturnValueNullabilityCheck() &&
4044 "Cannot check nullability and the nonnull attribute");
4045 AttrLoc = RetNNAttr->getLocation();
4046 CheckKind = SanitizerKind::SO_ReturnsNonnullAttribute;
4047 Handler = SanitizerHandler::NonnullReturn;
4049 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4050 if (
auto *TSI = DD->getTypeSourceInfo())
4052 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4053 CheckKind = SanitizerKind::SO_NullabilityReturn;
4054 Handler = SanitizerHandler::NullabilityReturn;
4057 SanitizerScope SanScope(
this);
4064 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4065 if (requiresReturnValueNullabilityCheck())
4067 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4068 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4072 llvm::Value *Cond =
Builder.CreateIsNotNull(RV);
4074 llvm::Value *DynamicData[] = {SLocPtr};
4075 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4095 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4096 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4123 if (
type->isReferenceType()) {
4132 param->
hasAttr<NSConsumedAttr>() &&
4133 type->isObjCRetainableType()) {
4136 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4151 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4153 "cleanup for callee-destructed param not recorded");
4155 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4161 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4174 "shouldn't have writeback for provably null argument");
4185 llvm::BasicBlock *contBB =
nullptr;
4191 if (!provablyNonNull) {
4196 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4205 "icr.writeback-cast");
4214 if (writeback.
ToUse) {
4239 if (!provablyNonNull)
4248 for (
const auto &I : llvm::reverse(Cleanups)) {
4250 I.IsActiveIP->eraseFromParent();
4256 if (uop->getOpcode() == UO_AddrOf)
4257 return uop->getSubExpr();
4287 llvm::PointerType *destType =
4289 llvm::Type *destElemType =
4306 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4312 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4316 llvm::BasicBlock *contBB =
nullptr;
4317 llvm::BasicBlock *originBB =
nullptr;
4320 llvm::Value *finalArgument;
4324 if (provablyNonNull) {
4329 finalArgument = CGF.
Builder.CreateSelect(
4330 isNull, llvm::ConstantPointerNull::get(destType),
4336 originBB = CGF.
Builder.GetInsertBlock();
4339 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4341 condEval.begin(CGF);
4345 llvm::Value *valueToUse =
nullptr;
4353 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4370 if (shouldCopy && !provablyNonNull) {
4371 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4376 llvm::PHINode *phiToUse = CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
4378 phiToUse->addIncoming(valueToUse, copyBB);
4379 phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
4381 valueToUse = phiToUse;
4395 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4401 CGF.
Builder.CreateStackRestore(StackBase);
4409 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4414 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) :
nullptr;
4415 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4418 const NonNullAttr *NNAttr =
nullptr;
4419 if (
SanOpts.
has(SanitizerKind::NonnullAttribute))
4422 bool CanCheckNullability =
false;
4423 if (
SanOpts.
has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4424 !PVD->getType()->isRecordType()) {
4425 auto Nullability = PVD->getType()->getNullability();
4426 CanCheckNullability = Nullability &&
4428 PVD->getTypeSourceInfo();
4431 if (!NNAttr && !CanCheckNullability)
4438 AttrLoc = NNAttr->getLocation();
4439 CheckKind = SanitizerKind::SO_NonnullAttribute;
4440 Handler = SanitizerHandler::NonnullArg;
4442 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4443 CheckKind = SanitizerKind::SO_NullabilityArg;
4444 Handler = SanitizerHandler::NullabilityArg;
4447 SanitizerScope SanScope(
this);
4449 llvm::Constant *StaticData[] = {
4451 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4453 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, {});
4458 AbstractCallee AC,
unsigned ParmNum) {
4459 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4479 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4490 return classDecl->getTypeParamListAsWritten();
4494 return catDecl->getTypeParamList();
4504 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4505 AbstractCallee AC,
unsigned ParamsToSkip, EvaluationOrder Order) {
4508 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4509 "Can't skip parameters if type info is not provided");
4519 bool IsVariadic =
false;
4521 const auto *MD = dyn_cast<const ObjCMethodDecl *>(
Prototype.P);
4523 IsVariadic = MD->isVariadic();
4526 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4527 MD->param_type_end());
4529 const auto *FPT = cast<const FunctionProtoType *>(
Prototype.P);
4530 IsVariadic = FPT->isVariadic();
4531 ExplicitCC = FPT->getExtInfo().getCC();
4532 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4533 FPT->param_type_end());
4541 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4543 (isGenericMethod || Ty->isVariablyModifiedType() ||
4544 Ty.getNonReferenceType()->isObjCRetainableType() ||
4546 .getCanonicalType(Ty.getNonReferenceType())
4548 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4549 "type mismatch in call argument!");
4555 assert((Arg == ArgRange.end() || IsVariadic) &&
4556 "Extra arguments in non-variadic function!");
4561 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4562 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4563 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4575 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4577 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4579 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4586 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4587 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(),
T,
4588 EmittedArg.getScalarVal(),
4594 std::swap(Args.back(), *(&Args.back() - 1));
4599 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4600 "inalloca only supported on x86");
4605 size_t CallArgsStart = Args.size();
4606 for (
unsigned I = 0,
E = ArgTypes.size(); I !=
E; ++I) {
4607 unsigned Idx = LeftToRight ? I :
E - I - 1;
4609 unsigned InitialArgSize = Args.size();
4612 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4613 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4615 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4617 "Argument and parameter types don't match");
4621 assert(InitialArgSize + 1 == Args.size() &&
4622 "The code below depends on only adding one arg per EmitCallArg");
4623 (void)InitialArgSize;
4626 if (!Args.back().hasLValue()) {
4627 RValue RVArg = Args.back().getKnownRValue();
4629 ParamsToSkip + Idx);
4633 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4640 std::reverse(Args.begin() + CallArgsStart, Args.end());
4651 : Addr(Addr), Ty(Ty) {}
4669struct DisableDebugLocationUpdates {
4671 bool disabledDebugInfo;
4673 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(
E) && CGF.
getDebugInfo()))
4676 ~DisableDebugLocationUpdates() {
4677 if (disabledDebugInfo)
4718 DisableDebugLocationUpdates Dis(*
this,
E);
4720 = dyn_cast<ObjCIndirectCopyRestoreExpr>(
E)) {
4734 "reference binding to unmaterialized r-value!");
4746 if (
type->isRecordType() &&
4753 bool DestroyedInCallee =
true, NeedsCleanup =
true;
4754 if (
const auto *RD =
type->getAsCXXRecordDecl())
4755 DestroyedInCallee = RD->hasNonTrivialDestructor();
4757 NeedsCleanup =
type.isDestructedType();
4759 if (DestroyedInCallee)
4766 if (DestroyedInCallee && NeedsCleanup) {
4773 llvm::Instruction *IsActive =
4780 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(
E) &&
4781 cast<CastExpr>(
E)->getCastKind() == CK_LValueToRValue &&
4782 !
type->isArrayParameterType()) {
4792QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4796 if (!
getTarget().getTriple().isOSWindows())
4813CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4816 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4823 const llvm::Twine &name) {
4831 const llvm::Twine &name) {
4833 for (
auto arg : args)
4834 values.push_back(
arg.emitRawPointer(*
this));
4841 const llvm::Twine &name) {
4843 call->setDoesNotThrow();
4850 const llvm::Twine &name) {
4865 if (
auto *CalleeFn = dyn_cast<llvm::Function>(
Callee->stripPointerCasts())) {
4866 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4867 auto IID = CalleeFn->getIntrinsicID();
4868 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4881 const llvm::Twine &name) {
4882 llvm::CallInst *call =
Builder.CreateCall(
4887 return cast<llvm::CallInst>(addConvergenceControlToken(call));
4898 llvm::InvokeInst *invoke =
4904 invoke->setDoesNotReturn();
4907 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
4908 call->setDoesNotReturn();
4917 const Twine &name) {
4925 const Twine &name) {
4935 const Twine &Name) {
4940 llvm::CallBase *Inst;
4942 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
4945 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4953 AddObjCARCExceptionMetadata(Inst);
4958void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4960 DeferredReplacements.push_back(
4961 std::make_pair(llvm::WeakTrackingVH(Old), New));
4968[[nodiscard]] llvm::AttributeList
4969maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4970 const llvm::AttributeList &Attrs,
4971 llvm::Align NewAlign) {
4972 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4973 if (CurAlign >= NewAlign)
4975 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4976 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4977 .addRetAttribute(Ctx, AlignAttr);
4980template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
4985 const AlignedAttrTy *AA =
nullptr;
4987 llvm::Value *Alignment =
nullptr;
4988 llvm::ConstantInt *OffsetCI =
nullptr;
4994 AA = FuncDecl->
getAttr<AlignedAttrTy>();
4999 [[nodiscard]] llvm::AttributeList
5000 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
5001 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
5003 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
5008 if (!AlignmentCI->getValue().isPowerOf2())
5010 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5013 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5025 AA->getLocation(), Alignment, OffsetCI);
5031class AssumeAlignedAttrEmitter final
5032 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
5035 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5039 Alignment = cast<llvm::ConstantInt>(CGF.
EmitScalarExpr(AA->getAlignment()));
5040 if (
Expr *Offset = AA->getOffset()) {
5042 if (OffsetCI->isNullValue())
5049class AllocAlignAttrEmitter final
5050 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
5054 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5058 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5067 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5068 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5069 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5072 unsigned MaxVectorWidth = 0;
5073 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5074 for (
auto *I : ST->elements())
5076 return MaxVectorWidth;
5083 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5085 bool IsVirtualFunctionPointerThunk) {
5097 const Decl *TargetDecl =
Callee.getAbstractInfo().getCalleeDecl().getDecl();
5098 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5105 if (TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5106 (TargetDecl->
hasAttr<TargetAttr>() ||
5114 const FunctionDecl *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl);
5116 CalleeDecl, CallArgs, RetTy);
5123 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5126 llvm::AllocaInst *AI;
5128 IP = IP->getNextNode();
5129 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
"argmem",
5135 AI->setAlignment(Align.getAsAlign());
5136 AI->setUsedWithInAlloca(
true);
5137 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5138 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5141 ClangToLLVMArgMapping IRFunctionArgs(
CGM.
getContext(), CallInfo);
5148 llvm::Value *UnusedReturnSizePtr =
nullptr;
5154 if ((IsVirtualFunctionPointerThunk || IsMustTail) && RetAI.
isIndirect()) {
5156 IRFunctionArgs.getSRetArgNo(),
5163 llvm::TypeSize size =
5168 if (IRFunctionArgs.hasSRetArg()) {
5169 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5187 assert(CallInfo.
arg_size() == CallArgs.size() &&
5188 "Mismatch between function signature & arguments.");
5191 for (CallArgList::const_iterator I = CallArgs.begin(),
E = CallArgs.end();
5192 I !=
E; ++I, ++info_it, ++ArgNo) {
5196 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5197 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5200 unsigned FirstIRArg, NumIRArgs;
5201 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5203 bool ArgHasMaybeUndefAttr =
5208 assert(NumIRArgs == 0);
5209 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86);
5210 if (I->isAggregate()) {
5212 ? I->getKnownLValue().getAddress()
5213 : I->getKnownRValue().getAggregateAddress();
5214 llvm::Instruction *Placeholder =
5219 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5220 Builder.SetInsertPoint(Placeholder);
5233 deferPlaceholderReplacement(Placeholder, Addr.
getPointer());
5238 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5239 "indirect-arg-temp");
5240 I->copyInto(*
this, Addr);
5249 I->copyInto(*
this, Addr);
5256 assert(NumIRArgs == 1);
5257 if (I->isAggregate()) {
5267 ? I->getKnownLValue().getAddress()
5268 : I->getKnownRValue().getAggregateAddress();
5272 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5273 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5274 TD->getAllocaAddrSpace()) &&
5275 "indirect argument must be in alloca address space");
5277 bool NeedCopy =
false;
5283 }
else if (I->hasLValue()) {
5284 auto LV = I->getKnownLValue();
5290 if (!isByValOrRef ||
5295 if ((isByValOrRef &&
5303 else if ((isByValOrRef &&
5304 Addr.
getType()->getAddressSpace() != IRFuncTy->
5313 auto *
T = llvm::PointerType::get(
5319 if (ArgHasMaybeUndefAttr)
5320 Val =
Builder.CreateFreeze(Val);
5321 IRCallArgs[FirstIRArg] = Val;
5324 }
else if (I->getType()->isArrayParameterType()) {
5330 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5339 if (ArgHasMaybeUndefAttr)
5340 Val =
Builder.CreateFreeze(Val);
5341 IRCallArgs[FirstIRArg] = Val;
5344 llvm::TypeSize ByvalTempElementSize =
5346 llvm::Value *LifetimeSize =
5351 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5354 I->copyInto(*
this, AI);
5359 assert(NumIRArgs == 0);
5367 assert(NumIRArgs == 1);
5369 if (!I->isAggregate())
5370 V = I->getKnownRValue().getScalarVal();
5373 I->hasLValue() ? I->getKnownLValue().getAddress()
5374 : I->getKnownRValue().getAggregateAddress());
5380 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5384 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5389 cast<llvm::AllocaInst>(
V)->setSwiftError(
true);
5397 V->getType()->isIntegerTy())
5402 if (FirstIRArg < IRFuncTy->getNumParams() &&
5403 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5404 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(FirstIRArg));
5406 if (ArgHasMaybeUndefAttr)
5408 IRCallArgs[FirstIRArg] =
V;
5412 llvm::StructType *STy =
5417 if (!I->isAggregate()) {
5419 I->copyInto(*
this, Src);
5421 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5422 : I->getKnownRValue().getAggregateAddress();
5432 llvm::TypeSize SrcTypeSize =
5435 if (SrcTypeSize.isScalable()) {
5436 assert(STy->containsHomogeneousScalableVectorTypes() &&
5437 "ABI only supports structure with homogeneous scalable vector "
5439 assert(SrcTypeSize == DstTypeSize &&
5440 "Only allow non-fractional movement of structure with "
5441 "homogeneous scalable vector type");
5442 assert(NumIRArgs == STy->getNumElements());
5444 llvm::Value *StoredStructValue =
5446 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5447 llvm::Value *Extract =
Builder.CreateExtractValue(
5448 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5449 IRCallArgs[FirstIRArg + i] = Extract;
5452 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5453 uint64_t DstSize = DstTypeSize.getFixedValue();
5459 if (SrcSize < DstSize) {
5468 assert(NumIRArgs == STy->getNumElements());
5469 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5472 if (ArgHasMaybeUndefAttr)
5473 LI =
Builder.CreateFreeze(LI);
5474 IRCallArgs[FirstIRArg + i] = LI;
5479 assert(NumIRArgs == 1);
5487 auto *ATy = dyn_cast<llvm::ArrayType>(
Load->getType());
5488 if (ATy !=
nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5492 if (ArgHasMaybeUndefAttr)
5494 IRCallArgs[FirstIRArg] =
Load;
5504 auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
5506 llvm::Value *tempSize =
nullptr;
5509 if (I->isAggregate()) {
5510 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5511 : I->getKnownRValue().getAggregateAddress();
5514 RValue RV = I->getKnownRValue();
5526 nullptr, &AllocaAddr);
5534 unsigned IRArgPos = FirstIRArg;
5535 unsigned unpaddedIndex = 0;
5536 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5537 llvm::Type *eltType = coercionType->getElementType(i);
5542 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
5543 : unpaddedCoercionType,
5545 if (ArgHasMaybeUndefAttr)
5546 elt =
Builder.CreateFreeze(elt);
5547 IRCallArgs[IRArgPos++] = elt;
5549 assert(IRArgPos == FirstIRArg + NumIRArgs);
5559 unsigned IRArgPos = FirstIRArg;
5560 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5561 assert(IRArgPos == FirstIRArg + NumIRArgs);
5567 const CGCallee &ConcreteCallee =
Callee.prepareConcreteCallee(*
this);
5573 assert(IRFunctionArgs.hasInallocaArg());
5574 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5585 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5586 llvm::Value *Ptr) -> llvm::Function * {
5587 if (!CalleeFT->isVarArg())
5591 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5592 if (CE->getOpcode() == llvm::Instruction::BitCast)
5593 Ptr = CE->getOperand(0);
5596 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5600 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5604 if (OrigFT->isVarArg() ||
5605 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5606 OrigFT->getReturnType() != CalleeFT->getReturnType())
5609 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5610 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5616 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5618 IRFuncTy = OrigFn->getFunctionType();
5633 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5634 for (
unsigned i = 0; i < IRCallArgs.size(); ++i) {
5636 if (IRFunctionArgs.hasInallocaArg() &&
5637 i == IRFunctionArgs.getInallocaArgNo())
5639 if (i < IRFuncTy->getNumParams())
5640 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5645 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5646 LargestVectorWidth = std::max(LargestVectorWidth,
5651 llvm::AttributeList Attrs;
5657 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5658 getTarget().getTriple().isWindowsArm64EC()) {
5659 CGM.
Error(
Loc,
"__vectorcall calling convention is not currently "
5664 if (FD->hasAttr<StrictFPAttr>())
5666 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5671 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5677 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5681 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5687 CallerDecl, CalleeDecl))
5689 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5694 Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Convergent);
5703 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>()) &&
5705 CallerDecl, CalleeDecl)) {
5707 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5712 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5719 CannotThrow =
false;
5728 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5730 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5731 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5739 if (UnusedReturnSizePtr)
5741 UnusedReturnSizePtr);
5743 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr :
getInvokeDest();
5749 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5756 if (FD->hasAttr<StrictFPAttr>())
5758 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5760 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
5761 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5763 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
5764 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5769 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5772 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5776 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5777 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
5786 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
5787 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
5788 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5794 CI->setAttributes(Attrs);
5795 CI->setCallingConv(
static_cast<llvm::CallingConv::ID
>(
CallingConv));
5799 if (!CI->getType()->isVoidTy())
5800 CI->setName(
"call");
5803 CI = addConvergenceControlToken(CI);
5806 LargestVectorWidth =
5812 if (!CI->getCalledFunction())
5819 AddObjCARCExceptionMetadata(CI);
5822 if (llvm::CallInst *
Call = dyn_cast<llvm::CallInst>(CI)) {
5823 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
5824 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5825 else if (IsMustTail) {
5832 else if (
Call->isIndirectCall())
5834 else if (isa_and_nonnull<FunctionDecl>(TargetDecl)) {
5835 if (!cast<FunctionDecl>(TargetDecl)->isDefined())
5840 {cast<FunctionDecl>(TargetDecl),
Loc});
5844 if (llvm::GlobalValue::isWeakForLinker(
Linkage) ||
5845 llvm::GlobalValue::isDiscardableIfUnused(
Linkage))
5852 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5858 TargetDecl->
hasAttr<MSAllocatorAttr>())
5862 if (TargetDecl && TargetDecl->
hasAttr<ErrorAttr>()) {
5863 llvm::ConstantInt *
Line =
5865 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(
Line);
5867 CI->setMetadata(
"srcloc", MDT);
5875 if (CI->doesNotReturn()) {
5876 if (UnusedReturnSizePtr)
5880 if (
SanOpts.
has(SanitizerKind::Unreachable)) {
5883 if (
auto *F = CI->getCalledFunction())
5884 F->removeFnAttr(llvm::Attribute::NoReturn);
5885 CI->removeFnAttr(llvm::Attribute::NoReturn);
5890 SanitizerKind::KernelAddress)) {
5891 SanitizerScope SanScope(
this);
5892 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
5894 auto *FnType = llvm::FunctionType::get(
CGM.
VoidTy,
false);
5895 llvm::FunctionCallee
Fn =
5902 Builder.ClearInsertionPoint();
5922 if (CI->getType()->isVoidTy())
5926 Builder.ClearInsertionPoint();
5932 if (swiftErrorTemp.
isValid()) {
5951 if (IsVirtualFunctionPointerThunk) {
5962 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5964 unsigned unpaddedIndex = 0;
5965 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5966 llvm::Type *eltType = coercionType->getElementType(i);
5970 llvm::Value *elt = CI;
5971 if (requiresExtract)
5972 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
5974 assert(unpaddedIndex == 0);
5983 if (UnusedReturnSizePtr)
6000 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
6001 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
6009 llvm::Value *
V = CI;
6010 if (
V->getType() != RetIRTy)
6020 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
6021 llvm::Value *
V = CI;
6022 if (
auto *ScalableSrcTy =
6023 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
6024 if (FixedDstTy->getElementType() ==
6025 ScalableSrcTy->getElementType()) {
6027 V =
Builder.CreateExtractVector(FixedDstTy,
V, Zero,
6041 DestIsVolatile =
false;
6062 llvm_unreachable(
"Invalid ABI kind for return argument");
6065 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
6070 if (
Ret.isScalar() && TargetDecl) {
6071 AssumeAlignedAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6072 AllocAlignAttrEmitter.EmitAsAnAssumption(
Loc, RetTy, Ret);
6077 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
6078 LifetimeEnd.Emit(*
this, {});
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsWindows)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
static std::pair< llvm::Value *, bool > CoerceScalableToFixed(CodeGenFunction &CGF, llvm::FixedVectorType *ToTy, llvm::ScalableVectorType *FromTy, llvm::Value *V, StringRef Name="")
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
enum clang::sema::@1727::IndirectLocalPathEntry::EntryKind Kind
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static QualType getParamType(Sema &SemaRef, ArrayRef< ResultCandidate > Candidates, unsigned N)
Get the type of the Nth parameter from a given set of overload candidates.
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin=false) const
Retrieves the default calling convention for the current target.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const LLVM_READONLY
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StringRef getName() const
Return the IR name of the pointer value.
llvm::PointerType * getType() const
Return the type of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
const BlockExpr * BlockExpression
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr, llvm::Value *lifetimeSz=nullptr)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const CodeGen::CGBlockInfo * BlockInfo
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
const TargetInfo & getTarget() const
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
CGDebugInfo * getDebugInfo()
Address EmitVAListRef(const Expr *E)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Type * ConvertType(QualType T)
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
CodeGenTypes & getTypes() const
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const CallExpr * MustTailCall
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
This class organizes the cross-function state that is used while generating LLVM code.
llvm::MDNode * getNoObjCARCExceptionsMetadata()
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const ABIInfo & getABIInfo()
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
void addUndefinedGlobalForTailCall(std::pair< const FunctionDecl *, SourceLocation > Global)
ObjCEntrypoints & getObjCEntrypoints() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD)
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind, llvm::Instruction *ValueSite, llvm::Value *ValuePtr)
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
unsigned getTargetAddressSpace(QualType T) const
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
EHScopeStack::Cleanup * getCleanup()
Information for lazily generating a cleanup.
virtual bool isRedundantBeforeReturn()
A saved depth on the scope stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator end() const
Returns an iterator pointing to the outermost EH scope.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
bool isVolatileQualified() const
LangAS getAddressSpace() const
CharUnits getAlignment() const
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Qualifiers::ObjCLifetime getObjCLifetime() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual bool wouldInliningViolateFunctionCallABI(const FunctionDecl *Caller, const FunctionDecl *Callee) const
Returns true if inlining the function call would produce incorrect code for the current target and sh...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
bool isZeroLengthBitField() const
Is this a zero-length bit-field? Such bit-fields aren't really bit-fields at all and instead act as a...
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
@ SME_AgnosticZAStateMask
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
const Decl * getDecl() const
This class represents temporary values used to represent inout and out arguments in HLSL.
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
bool isParamDestroyedInCallee() const
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change,...
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool useObjCFPRetForRealType(FloatModeKind T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
The base class of the type hierarchy.
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
bool isBlockPointerType() const
bool isIncompleteArrayType() const
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
QualType getCanonicalTypeInternal() const
bool isMemberPointerType() const
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
bool isAnyPointerType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isObjCRetainableType() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
const Expr * getSubExpr() const
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool This(InterpState &S, CodePtr OpPC)
bool Zero(InterpState &S, CodePtr OpPC)
bool Load(InterpState &S, CodePtr OpPC)
bool Ret(InterpState &S, CodePtr &PC)
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ Result
The result type of a method or function.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_Complete
Complete object dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
const Expr * WritebackExpr
An Expression (optional) that performs the writeback with any required casting.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::BasicBlock * getBlock() const
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
LangAS getASTAllocaAddressSpace() const
bool isMSVCXXPersonality() const
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Iterator for iterating over Stmt * arrays that contain only T *.