clang 21.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "../ExprConstShared.h"
9#include "Boolean.h"
10#include "Compiler.h"
11#include "EvalEmitter.h"
12#include "Interp.h"
14#include "PrimType.h"
15#include "clang/AST/OSLog.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/Support/SipHash.h"
22
23namespace clang {
24namespace interp {
25
26static unsigned callArgSize(const InterpState &S, const CallExpr *C) {
27 unsigned O = 0;
28
29 for (const Expr *E : C->arguments()) {
30 O += align(primSize(*S.getContext().classify(E)));
31 }
32
33 return O;
34}
35
36template <typename T>
37static T getParam(const InterpFrame *Frame, unsigned Index) {
38 assert(Frame->getFunction()->getNumParams() > Index);
39 unsigned Offset = Frame->getFunction()->getParamOffset(Index);
40 return Frame->getParam<T>(Offset);
41}
42
43static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index) {
44 APSInt R;
45 unsigned Offset = Frame->getFunction()->getParamOffset(Index);
46 INT_TYPE_SWITCH(Frame->getFunction()->getParamType(Index),
47 R = Frame->getParam<T>(Offset).toAPSInt());
48 return R;
49}
50
52 const TargetInfo &TI = S.getASTContext().getTargetInfo();
53 unsigned IntWidth = TI.getIntWidth();
54
55 if (IntWidth == 32)
56 return PT_Sint32;
57 else if (IntWidth == 16)
58 return PT_Sint16;
59 llvm_unreachable("Int isn't 16 or 32 bit?");
60}
61
63 const TargetInfo &TI = S.getASTContext().getTargetInfo();
64 unsigned LongWidth = TI.getLongWidth();
65
66 if (LongWidth == 64)
67 return PT_Sint64;
68 else if (LongWidth == 32)
69 return PT_Sint32;
70 else if (LongWidth == 16)
71 return PT_Sint16;
72 llvm_unreachable("long isn't 16, 32 or 64 bit?");
73}
74
75/// Peek an integer value from the stack into an APSInt.
76static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) {
77 if (Offset == 0)
78 Offset = align(primSize(T));
79
80 APSInt R;
81 INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt());
82
83 return R;
84}
85
86/// Pushes \p Val on the stack as the type given by \p QT.
87static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
90 std::optional<PrimType> T = S.getContext().classify(QT);
91 assert(T);
92
93 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
95 int64_t V = Val.getSExtValue();
96 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
97 } else {
99 uint64_t V = Val.getZExtValue();
100 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
101 }
102}
103
104template <typename T>
105static void pushInteger(InterpState &S, T Val, QualType QT) {
106 if constexpr (std::is_same_v<T, APInt>)
107 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
108 else if constexpr (std::is_same_v<T, APSInt>)
109 pushInteger(S, Val, QT);
110 else
111 pushInteger(S,
112 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
113 std::is_signed_v<T>),
114 !std::is_signed_v<T>),
115 QT);
116}
117
118static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) {
120 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
121}
122
123static bool retPrimValue(InterpState &S, CodePtr OpPC,
124 std::optional<PrimType> &T) {
125 if (!T)
126 return RetVoid(S, OpPC);
127
128#define RET_CASE(X) \
129 case X: \
130 return Ret<X>(S, OpPC);
131 switch (*T) {
146 default:
147 llvm_unreachable("Unsupported return type for builtin function");
148 }
149#undef RET_CASE
150}
151
153 unsigned ID) {
154 auto Loc = S.Current->getSource(OpPC);
155 if (S.getLangOpts().CPlusPlus11)
156 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
157 << /*isConstexpr=*/0 << /*isConstructor=*/0
159 else
160 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
161}
162
164 const InterpFrame *Frame,
165 const CallExpr *Call) {
166 unsigned Depth = S.Current->getDepth();
167 auto isStdCall = [](const FunctionDecl *F) -> bool {
168 return F && F->isInStdNamespace() && F->getIdentifier() &&
169 F->getIdentifier()->isStr("is_constant_evaluated");
170 };
171 const InterpFrame *Caller = Frame->Caller;
172 // The current frame is the one for __builtin_is_constant_evaluated.
173 // The one above that, potentially the one for std::is_constant_evaluated().
174 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
175 S.getEvalStatus().Diag &&
176 (Depth == 1 || (Depth == 2 && isStdCall(Caller->getCallee())))) {
177 if (Caller->Caller && isStdCall(Caller->getCallee())) {
178 const Expr *E = Caller->Caller->getExpr(Caller->getRetPC());
179 S.report(E->getExprLoc(),
180 diag::warn_is_constant_evaluated_always_true_constexpr)
181 << "std::is_constant_evaluated" << E->getSourceRange();
182 } else {
183 const Expr *E = Frame->Caller->getExpr(Frame->getRetPC());
184 S.report(E->getExprLoc(),
185 diag::warn_is_constant_evaluated_always_true_constexpr)
186 << "__builtin_is_constant_evaluated" << E->getSourceRange();
187 }
188 }
189
190 S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
191 return true;
192}
193
195 const InterpFrame *Frame,
196 const Function *Func, const CallExpr *Call) {
197 unsigned ID = Func->getBuiltinID();
198 const Pointer &A = getParam<Pointer>(Frame, 0);
199 const Pointer &B = getParam<Pointer>(Frame, 1);
200
201 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp)
202 diagnoseNonConstexprBuiltin(S, OpPC, ID);
203
204 uint64_t Limit = ~static_cast<uint64_t>(0);
205 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp)
206 Limit = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
207 .getZExtValue();
208
209 if (Limit == 0) {
210 pushInteger(S, 0, Call->getType());
211 return true;
212 }
213
214 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
215 return false;
216
217 if (A.isDummy() || B.isDummy())
218 return false;
219
220 assert(A.getFieldDesc()->isPrimitiveArray());
221 assert(B.getFieldDesc()->isPrimitiveArray());
222
223 unsigned IndexA = A.getIndex();
224 unsigned IndexB = B.getIndex();
225 int32_t Result = 0;
226 uint64_t Steps = 0;
227 for (;; ++IndexA, ++IndexB, ++Steps) {
228
229 if (Steps >= Limit)
230 break;
231 const Pointer &PA = A.atIndex(IndexA);
232 const Pointer &PB = B.atIndex(IndexB);
233 if (!CheckRange(S, OpPC, PA, AK_Read) ||
234 !CheckRange(S, OpPC, PB, AK_Read)) {
235 return false;
236 }
237 uint8_t CA = PA.deref<uint8_t>();
238 uint8_t CB = PB.deref<uint8_t>();
239
240 if (CA > CB) {
241 Result = 1;
242 break;
243 } else if (CA < CB) {
244 Result = -1;
245 break;
246 }
247 if (CA == 0 || CB == 0)
248 break;
249 }
250
251 pushInteger(S, Result, Call->getType());
252 return true;
253}
254
256 const InterpFrame *Frame,
257 const Function *Func, const CallExpr *Call) {
258 unsigned ID = Func->getBuiltinID();
259 const Pointer &StrPtr = getParam<Pointer>(Frame, 0);
260
261 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
262 diagnoseNonConstexprBuiltin(S, OpPC, ID);
263
264 if (!CheckArray(S, OpPC, StrPtr))
265 return false;
266
267 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
268 return false;
269
270 if (!CheckDummy(S, OpPC, StrPtr, AK_Read))
271 return false;
272
273 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
274 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
275
276 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
277 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
278 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
279 }
280
281 size_t Len = 0;
282 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
283 const Pointer &ElemPtr = StrPtr.atIndex(I);
284
285 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
286 return false;
287
288 uint32_t Val;
289 switch (ElemSize) {
290 case 1:
291 Val = ElemPtr.deref<uint8_t>();
292 break;
293 case 2:
294 Val = ElemPtr.deref<uint16_t>();
295 break;
296 case 4:
297 Val = ElemPtr.deref<uint32_t>();
298 break;
299 default:
300 llvm_unreachable("Unsupported char size");
301 }
302 if (Val == 0)
303 break;
304 }
305
306 pushInteger(S, Len, Call->getType());
307
308 return true;
309}
310
312 const InterpFrame *Frame, const Function *F,
313 bool Signaling) {
314 const Pointer &Arg = getParam<Pointer>(Frame, 0);
315
316 if (!CheckLoad(S, OpPC, Arg))
317 return false;
318
319 assert(Arg.getFieldDesc()->isPrimitiveArray());
320
321 // Convert the given string to an integer using StringRef's API.
322 llvm::APInt Fill;
323 std::string Str;
324 assert(Arg.getNumElems() >= 1);
325 for (unsigned I = 0;; ++I) {
326 const Pointer &Elem = Arg.atIndex(I);
327
328 if (!CheckLoad(S, OpPC, Elem))
329 return false;
330
331 if (Elem.deref<int8_t>() == 0)
332 break;
333
334 Str += Elem.deref<char>();
335 }
336
337 // Treat empty strings as if they were zero.
338 if (Str.empty())
339 Fill = llvm::APInt(32, 0);
340 else if (StringRef(Str).getAsInteger(0, Fill))
341 return false;
342
343 const llvm::fltSemantics &TargetSemantics =
345
348 if (Signaling)
350 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
351 else
353 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
354 } else {
355 // Prior to IEEE 754-2008, architectures were allowed to choose whether
356 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
357 // a different encoding to what became a standard in 2008, and for pre-
358 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
359 // sNaN. This is now known as "legacy NaN" encoding.
360 if (Signaling)
362 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
363 else
365 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
366 }
367
368 S.Stk.push<Floating>(Result);
369 return true;
370}
371
373 const InterpFrame *Frame, const Function *F) {
374 const llvm::fltSemantics &TargetSemantics =
376
377 S.Stk.push<Floating>(Floating::getInf(TargetSemantics));
378 return true;
379}
380
382 const InterpFrame *Frame,
383 const Function *F) {
384 const Floating &Arg1 = getParam<Floating>(Frame, 0);
385 const Floating &Arg2 = getParam<Floating>(Frame, 1);
386
387 APFloat Copy = Arg1.getAPFloat();
388 Copy.copySign(Arg2.getAPFloat());
389 S.Stk.push<Floating>(Floating(Copy));
390
391 return true;
392}
393
395 const InterpFrame *Frame, const Function *F,
396 bool IsNumBuiltin) {
397 const Floating &LHS = getParam<Floating>(Frame, 0);
398 const Floating &RHS = getParam<Floating>(Frame, 1);
399
401
402 if (IsNumBuiltin) {
403 Result = llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat());
404 } else {
405 // When comparing zeroes, return -0.0 if one of the zeroes is negative.
406 if (LHS.isZero() && RHS.isZero() && RHS.isNegative())
407 Result = RHS;
408 else if (LHS.isNan() || RHS < LHS)
409 Result = RHS;
410 else
411 Result = LHS;
412 }
413
414 S.Stk.push<Floating>(Result);
415 return true;
416}
417
419 const InterpFrame *Frame, const Function *Func,
420 bool IsNumBuiltin) {
421 const Floating &LHS = getParam<Floating>(Frame, 0);
422 const Floating &RHS = getParam<Floating>(Frame, 1);
423
425
426 if (IsNumBuiltin) {
427 Result = llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat());
428 } else {
429 // When comparing zeroes, return +0.0 if one of the zeroes is positive.
430 if (LHS.isZero() && RHS.isZero() && LHS.isNegative())
431 Result = RHS;
432 else if (LHS.isNan() || RHS > LHS)
433 Result = RHS;
434 else
435 Result = LHS;
436 }
437
438 S.Stk.push<Floating>(Result);
439 return true;
440}
441
442/// Defined as __builtin_isnan(...), to accommodate the fact that it can
443/// take a float, double, long double, etc.
444/// But for us, that's all a Floating anyway.
446 const InterpFrame *Frame, const Function *F,
447 const CallExpr *Call) {
448 const Floating &Arg = S.Stk.peek<Floating>();
449
450 pushInteger(S, Arg.isNan(), Call->getType());
451 return true;
452}
453
455 const InterpFrame *Frame,
456 const Function *F,
457 const CallExpr *Call) {
458 const Floating &Arg = S.Stk.peek<Floating>();
459
460 pushInteger(S, Arg.isSignaling(), Call->getType());
461 return true;
462}
463
465 const InterpFrame *Frame, const Function *F,
466 bool CheckSign, const CallExpr *Call) {
467 const Floating &Arg = S.Stk.peek<Floating>();
468 bool IsInf = Arg.isInf();
469
470 if (CheckSign)
471 pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType());
472 else
473 pushInteger(S, Arg.isInf(), Call->getType());
474 return true;
475}
476
478 const InterpFrame *Frame,
479 const Function *F, const CallExpr *Call) {
480 const Floating &Arg = S.Stk.peek<Floating>();
481
482 pushInteger(S, Arg.isFinite(), Call->getType());
483 return true;
484}
485
487 const InterpFrame *Frame,
488 const Function *F, const CallExpr *Call) {
489 const Floating &Arg = S.Stk.peek<Floating>();
490
491 pushInteger(S, Arg.isNormal(), Call->getType());
492 return true;
493}
494
496 const InterpFrame *Frame,
497 const Function *F,
498 const CallExpr *Call) {
499 const Floating &Arg = S.Stk.peek<Floating>();
500
501 pushInteger(S, Arg.isDenormal(), Call->getType());
502 return true;
503}
504
506 const InterpFrame *Frame, const Function *F,
507 const CallExpr *Call) {
508 const Floating &Arg = S.Stk.peek<Floating>();
509
510 pushInteger(S, Arg.isZero(), Call->getType());
511 return true;
512}
513
515 const InterpFrame *Frame, const Function *F,
516 const CallExpr *Call) {
517 const Floating &Arg = S.Stk.peek<Floating>();
518
519 pushInteger(S, Arg.isNegative(), Call->getType());
520 return true;
521}
522
524 const InterpFrame *Frame,
525 const Function *F,
526 const CallExpr *Call) {
527 const Floating &RHS = S.Stk.peek<Floating>();
528 const Floating &LHS = S.Stk.peek<Floating>(align(2u * primSize(PT_Float)));
529 unsigned ID = F->getBuiltinID();
530
532 S,
533 [&] {
534 switch (ID) {
535 case Builtin::BI__builtin_isgreater:
536 return LHS > RHS;
537 case Builtin::BI__builtin_isgreaterequal:
538 return LHS >= RHS;
539 case Builtin::BI__builtin_isless:
540 return LHS < RHS;
541 case Builtin::BI__builtin_islessequal:
542 return LHS <= RHS;
543 case Builtin::BI__builtin_islessgreater: {
544 ComparisonCategoryResult cmp = LHS.compare(RHS);
545 return cmp == ComparisonCategoryResult::Less ||
547 }
548 case Builtin::BI__builtin_isunordered:
549 return LHS.compare(RHS) == ComparisonCategoryResult::Unordered;
550 default:
551 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
552 "comparison function");
553 }
554 }(),
555 Call->getType());
556 return true;
557}
558
559/// First parameter to __builtin_isfpclass is the floating value, the
560/// second one is an integral value.
562 const InterpFrame *Frame,
563 const Function *Func,
564 const CallExpr *Call) {
565 PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType());
566 APSInt FPClassArg = peekToAPSInt(S.Stk, FPClassArgT);
567 const Floating &F =
568 S.Stk.peek<Floating>(align(primSize(FPClassArgT) + primSize(PT_Float)));
569
570 int32_t Result =
571 static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue());
572 pushInteger(S, Result, Call->getType());
573
574 return true;
575}
576
577/// Five int values followed by one floating value.
579 const InterpFrame *Frame,
580 const Function *Func,
581 const CallExpr *Call) {
582 const Floating &Val = S.Stk.peek<Floating>();
583
584 unsigned Index;
585 switch (Val.getCategory()) {
586 case APFloat::fcNaN:
587 Index = 0;
588 break;
589 case APFloat::fcInfinity:
590 Index = 1;
591 break;
592 case APFloat::fcNormal:
593 Index = Val.isDenormal() ? 3 : 2;
594 break;
595 case APFloat::fcZero:
596 Index = 4;
597 break;
598 }
599
600 // The last argument is first on the stack.
601 assert(Index <= 4);
602 unsigned IntSize = primSize(getIntPrimType(S));
603 unsigned Offset =
604 align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize));
605
606 APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset);
607 pushInteger(S, I, Call->getType());
608 return true;
609}
610
611// The C standard says "fabs raises no floating-point exceptions,
612// even if x is a signaling NaN. The returned value is independent of
613// the current rounding direction mode." Therefore constant folding can
614// proceed without regard to the floating point settings.
615// Reference, WG14 N2478 F.10.4.3
617 const InterpFrame *Frame,
618 const Function *Func) {
619 const Floating &Val = getParam<Floating>(Frame, 0);
620
621 S.Stk.push<Floating>(Floating::abs(Val));
622 return true;
623}
624
626 const InterpFrame *Frame, const Function *Func,
627 const CallExpr *Call) {
628 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
629 APSInt Val = peekToAPSInt(S.Stk, ArgT);
630 if (Val ==
631 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
632 return false;
633 if (Val.isNegative())
634 Val.negate();
635 pushInteger(S, Val, Call->getType());
636 return true;
637}
638
640 const InterpFrame *Frame,
641 const Function *Func,
642 const CallExpr *Call) {
643 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
644 APSInt Val = peekToAPSInt(S.Stk, ArgT);
645 pushInteger(S, Val.popcount(), Call->getType());
646 return true;
647}
648
650 const InterpFrame *Frame,
651 const Function *Func, const CallExpr *Call) {
652 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
653 APSInt Val = peekToAPSInt(S.Stk, ArgT);
654 pushInteger(S, Val.popcount() % 2, Call->getType());
655 return true;
656}
657
659 const InterpFrame *Frame,
660 const Function *Func, const CallExpr *Call) {
661 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
662 APSInt Val = peekToAPSInt(S.Stk, ArgT);
663 pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
664 return true;
665}
666
668 const InterpFrame *Frame,
669 const Function *Func,
670 const CallExpr *Call) {
671 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
672 APSInt Val = peekToAPSInt(S.Stk, ArgT);
673 pushInteger(S, Val.reverseBits(), Call->getType());
674 return true;
675}
676
678 const InterpFrame *Frame,
679 const Function *Func,
680 const CallExpr *Call) {
681 // This is an unevaluated call, so there are no arguments on the stack.
682 assert(Call->getNumArgs() == 1);
683 const Expr *Arg = Call->getArg(0);
684
685 GCCTypeClass ResultClass =
687 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
688 pushInteger(S, ReturnVal, Call->getType());
689 return true;
690}
691
692// __builtin_expect(long, long)
693// __builtin_expect_with_probability(long, long, double)
695 const InterpFrame *Frame,
696 const Function *Func, const CallExpr *Call) {
697 // The return value is simply the value of the first parameter.
698 // We ignore the probability.
699 unsigned NumArgs = Call->getNumArgs();
700 assert(NumArgs == 2 || NumArgs == 3);
701
702 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
703 unsigned Offset = align(primSize(getLongPrimType(S))) * 2;
704 if (NumArgs == 3)
705 Offset += align(primSize(PT_Float));
706
707 APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset);
708 pushInteger(S, Val, Call->getType());
709 return true;
710}
711
712/// rotateleft(value, amount)
714 const InterpFrame *Frame,
715 const Function *Func, const CallExpr *Call,
716 bool Right) {
717 PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType());
718 PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType());
719
720 APSInt Amount = peekToAPSInt(S.Stk, AmountT);
722 S.Stk, ValueT, align(primSize(AmountT)) + align(primSize(ValueT)));
723
725 if (Right)
726 Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
727 /*IsUnsigned=*/true);
728 else // Left.
729 Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
730 /*IsUnsigned=*/true);
731
732 pushInteger(S, Result, Call->getType());
733 return true;
734}
735
737 const InterpFrame *Frame, const Function *Func,
738 const CallExpr *Call) {
739 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
740 APSInt Value = peekToAPSInt(S.Stk, ArgT);
741
742 uint64_t N = Value.countr_zero();
743 pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
744 return true;
745}
746
748 const InterpFrame *Frame,
749 const Function *Func,
750 const CallExpr *Call) {
751 assert(Call->getArg(0)->isLValue());
752 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
753
754 if (PtrT == PT_FnPtr) {
755 const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>();
756 S.Stk.push<FunctionPointer>(Arg);
757 } else if (PtrT == PT_Ptr) {
758 const Pointer &Arg = S.Stk.peek<Pointer>();
759 S.Stk.push<Pointer>(Arg);
760 } else {
761 assert(false && "Unsupported pointer type passed to __builtin_addressof()");
762 }
763 return true;
764}
765
767 const InterpFrame *Frame, const Function *Func,
768 const CallExpr *Call) {
769
770 PrimType ArgT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
771
772 TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg););
773
774 return Func->getDecl()->isConstexpr();
775}
776
778 const InterpFrame *Frame,
779 const Function *Func,
780 const CallExpr *Call) {
781 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
782 APSInt Arg = peekToAPSInt(S.Stk, ArgT);
783
785 Arg.getZExtValue());
786 pushInteger(S, Result, Call->getType());
787 return true;
788}
789
790/// Just takes the first Argument to the call and puts it on the stack.
791static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
792 const Function *Func, const CallExpr *Call) {
793 const Pointer &Arg = S.Stk.peek<Pointer>();
794 S.Stk.push<Pointer>(Arg);
795 return true;
796}
797
798// Two integral values followed by a pointer (lhs, rhs, resultOut)
800 const InterpFrame *Frame,
801 const Function *Func,
802 const CallExpr *Call) {
803 Pointer &ResultPtr = S.Stk.peek<Pointer>();
804 if (ResultPtr.isDummy())
805 return false;
806
807 unsigned BuiltinOp = Func->getBuiltinID();
808 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
809 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
810 APSInt RHS = peekToAPSInt(S.Stk, RHST,
811 align(primSize(PT_Ptr)) + align(primSize(RHST)));
812 APSInt LHS = peekToAPSInt(S.Stk, LHST,
813 align(primSize(PT_Ptr)) + align(primSize(RHST)) +
814 align(primSize(LHST)));
815 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
816 PrimType ResultT = *S.getContext().classify(ResultType);
817 bool Overflow;
818
820 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
821 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
822 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
823 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
825 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
827 uint64_t LHSSize = LHS.getBitWidth();
828 uint64_t RHSSize = RHS.getBitWidth();
829 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
830 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
831
832 // Add an additional bit if the signedness isn't uniformly agreed to. We
833 // could do this ONLY if there is a signed and an unsigned that both have
834 // MaxBits, but the code to check that is pretty nasty. The issue will be
835 // caught in the shrink-to-result later anyway.
836 if (IsSigned && !AllSigned)
837 ++MaxBits;
838
839 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
840 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
841 Result = APSInt(MaxBits, !IsSigned);
842 }
843
844 // Find largest int.
845 switch (BuiltinOp) {
846 default:
847 llvm_unreachable("Invalid value for BuiltinOp");
848 case Builtin::BI__builtin_add_overflow:
849 case Builtin::BI__builtin_sadd_overflow:
850 case Builtin::BI__builtin_saddl_overflow:
851 case Builtin::BI__builtin_saddll_overflow:
852 case Builtin::BI__builtin_uadd_overflow:
853 case Builtin::BI__builtin_uaddl_overflow:
854 case Builtin::BI__builtin_uaddll_overflow:
855 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
856 : LHS.uadd_ov(RHS, Overflow);
857 break;
858 case Builtin::BI__builtin_sub_overflow:
859 case Builtin::BI__builtin_ssub_overflow:
860 case Builtin::BI__builtin_ssubl_overflow:
861 case Builtin::BI__builtin_ssubll_overflow:
862 case Builtin::BI__builtin_usub_overflow:
863 case Builtin::BI__builtin_usubl_overflow:
864 case Builtin::BI__builtin_usubll_overflow:
865 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
866 : LHS.usub_ov(RHS, Overflow);
867 break;
868 case Builtin::BI__builtin_mul_overflow:
869 case Builtin::BI__builtin_smul_overflow:
870 case Builtin::BI__builtin_smull_overflow:
871 case Builtin::BI__builtin_smulll_overflow:
872 case Builtin::BI__builtin_umul_overflow:
873 case Builtin::BI__builtin_umull_overflow:
874 case Builtin::BI__builtin_umulll_overflow:
875 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
876 : LHS.umul_ov(RHS, Overflow);
877 break;
878 }
879
880 // In the case where multiple sizes are allowed, truncate and see if
881 // the values are the same.
882 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
883 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
884 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
885 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
886 // since it will give us the behavior of a TruncOrSelf in the case where
887 // its parameter <= its size. We previously set Result to be at least the
888 // type-size of the result, so getTypeSize(ResultType) <= Resu
889 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
890 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
891
892 if (!APSInt::isSameValue(Temp, Result))
893 Overflow = true;
894 Result = Temp;
895 }
896
897 // Write Result to ResultPtr and put Overflow on the stacl.
898 assignInteger(ResultPtr, ResultT, Result);
899 ResultPtr.initialize();
900 assert(Func->getDecl()->getReturnType()->isBooleanType());
901 S.Stk.push<Boolean>(Overflow);
902 return true;
903}
904
905/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
907 const InterpFrame *Frame,
908 const Function *Func,
909 const CallExpr *Call) {
910 unsigned BuiltinOp = Func->getBuiltinID();
911 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
912 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
913 PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType());
914 APSInt RHS = peekToAPSInt(S.Stk, RHST,
915 align(primSize(PT_Ptr)) + align(primSize(CarryT)) +
916 align(primSize(RHST)));
917 APSInt LHS =
918 peekToAPSInt(S.Stk, LHST,
919 align(primSize(PT_Ptr)) + align(primSize(RHST)) +
920 align(primSize(CarryT)) + align(primSize(LHST)));
921 APSInt CarryIn = peekToAPSInt(
922 S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT)));
923 APSInt CarryOut;
924
926 // Copy the number of bits and sign.
927 Result = LHS;
928 CarryOut = LHS;
929
930 bool FirstOverflowed = false;
931 bool SecondOverflowed = false;
932 switch (BuiltinOp) {
933 default:
934 llvm_unreachable("Invalid value for BuiltinOp");
935 case Builtin::BI__builtin_addcb:
936 case Builtin::BI__builtin_addcs:
937 case Builtin::BI__builtin_addc:
938 case Builtin::BI__builtin_addcl:
939 case Builtin::BI__builtin_addcll:
940 Result =
941 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
942 break;
943 case Builtin::BI__builtin_subcb:
944 case Builtin::BI__builtin_subcs:
945 case Builtin::BI__builtin_subc:
946 case Builtin::BI__builtin_subcl:
947 case Builtin::BI__builtin_subcll:
948 Result =
949 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
950 break;
951 }
952 // It is possible for both overflows to happen but CGBuiltin uses an OR so
953 // this is consistent.
954 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
955
956 Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
957 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
958 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
959 assignInteger(CarryOutPtr, CarryOutT, CarryOut);
960 CarryOutPtr.initialize();
961
962 assert(Call->getType() == Call->getArg(0)->getType());
963 pushInteger(S, Result, Call->getType());
964 return true;
965}
966
968 const InterpFrame *Frame, const Function *Func,
969 const CallExpr *Call) {
970 unsigned CallSize = callArgSize(S, Call);
971 unsigned BuiltinOp = Func->getBuiltinID();
972 PrimType ValT = *S.getContext().classify(Call->getArg(0));
973 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
974
975 // When the argument is 0, the result of GCC builtins is undefined, whereas
976 // for Microsoft intrinsics, the result is the bit-width of the argument.
977 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
978 BuiltinOp != Builtin::BI__lzcnt &&
979 BuiltinOp != Builtin::BI__lzcnt64;
980
981 if (Val == 0) {
982 if (Func->getBuiltinID() == Builtin::BI__builtin_clzg &&
983 Call->getNumArgs() == 2) {
984 // We have a fallback parameter.
985 PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
986 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
987 pushInteger(S, Fallback, Call->getType());
988 return true;
989 }
990
991 if (ZeroIsUndefined)
992 return false;
993 }
994
995 pushInteger(S, Val.countl_zero(), Call->getType());
996 return true;
997}
998
1000 const InterpFrame *Frame, const Function *Func,
1001 const CallExpr *Call) {
1002 unsigned CallSize = callArgSize(S, Call);
1003 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1004 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
1005
1006 if (Val == 0) {
1007 if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg &&
1008 Call->getNumArgs() == 2) {
1009 // We have a fallback parameter.
1010 PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
1011 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
1012 pushInteger(S, Fallback, Call->getType());
1013 return true;
1014 }
1015 return false;
1016 }
1017
1018 pushInteger(S, Val.countr_zero(), Call->getType());
1019 return true;
1020}
1021
1023 const InterpFrame *Frame,
1024 const Function *Func, const CallExpr *Call) {
1025 PrimType ReturnT = *S.getContext().classify(Call->getType());
1026 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1027 const APSInt &Val = peekToAPSInt(S.Stk, ValT);
1028 assert(Val.getActiveBits() <= 64);
1029
1030 INT_TYPE_SWITCH(ReturnT,
1031 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
1032 return true;
1033}
1034
1035/// bool __atomic_always_lock_free(size_t, void const volatile*)
1036/// bool __atomic_is_lock_free(size_t, void const volatile*)
1037/// bool __c11_atomic_is_lock_free(size_t)
1039 const InterpFrame *Frame,
1040 const Function *Func,
1041 const CallExpr *Call) {
1042 unsigned BuiltinOp = Func->getBuiltinID();
1043
1044 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1045 unsigned SizeValOffset = 0;
1046 if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free)
1047 SizeValOffset = align(primSize(ValT)) + align(primSize(PT_Ptr));
1048 const APSInt &SizeVal = peekToAPSInt(S.Stk, ValT, SizeValOffset);
1049
1050 auto returnBool = [&S](bool Value) -> bool {
1051 S.Stk.push<Boolean>(Value);
1052 return true;
1053 };
1054
1055 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1056 // of two less than or equal to the maximum inline atomic width, we know it
1057 // is lock-free. If the size isn't a power of two, or greater than the
1058 // maximum alignment where we promote atomics, we know it is not lock-free
1059 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1060 // the answer can only be determined at runtime; for example, 16-byte
1061 // atomics have lock-free implementations on some, but not all,
1062 // x86-64 processors.
1063
1064 // Check power-of-two.
1065 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1066 if (Size.isPowerOfTwo()) {
1067 // Check against inlining width.
1068 unsigned InlineWidthBits =
1070 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1071
1072 // OK, we will inline appropriately-aligned operations of this size,
1073 // and _Atomic(T) is appropriately-aligned.
1074 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
1075 Size == CharUnits::One())
1076 return returnBool(true);
1077
1078 // Same for null pointers.
1079 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1080 const Pointer &Ptr = S.Stk.peek<Pointer>();
1081 if (Ptr.isZero())
1082 return returnBool(true);
1083
1084 if (Ptr.isIntegralPointer()) {
1085 uint64_t IntVal = Ptr.getIntegerRepresentation();
1086 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1087 return returnBool(true);
1088 }
1089
1090 const Expr *PtrArg = Call->getArg(1);
1091 // Otherwise, check if the type's alignment against Size.
1092 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1093 // Drop the potential implicit-cast to 'const volatile void*', getting
1094 // the underlying type.
1095 if (ICE->getCastKind() == CK_BitCast)
1096 PtrArg = ICE->getSubExpr();
1097 }
1098
1099 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1100 QualType PointeeType = PtrTy->getPointeeType();
1101 if (!PointeeType->isIncompleteType() &&
1102 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1103 // OK, we will inline operations on this object.
1104 return returnBool(true);
1105 }
1106 }
1107 }
1108 }
1109
1110 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1111 return returnBool(false);
1112
1113 return false;
1114}
1115
1116/// __builtin_complex(Float A, float B);
1118 const InterpFrame *Frame,
1119 const Function *Func,
1120 const CallExpr *Call) {
1121 const Floating &Arg2 = S.Stk.peek<Floating>();
1122 const Floating &Arg1 = S.Stk.peek<Floating>(align(primSize(PT_Float)) * 2);
1123 Pointer &Result = S.Stk.peek<Pointer>(align(primSize(PT_Float)) * 2 +
1125
1126 Result.atIndex(0).deref<Floating>() = Arg1;
1127 Result.atIndex(0).initialize();
1128 Result.atIndex(1).deref<Floating>() = Arg2;
1129 Result.atIndex(1).initialize();
1130 Result.initialize();
1131
1132 return true;
1133}
1134
1135/// __builtin_is_aligned()
1136/// __builtin_align_up()
1137/// __builtin_align_down()
1138/// The first parameter is either an integer or a pointer.
1139/// The second parameter is the requested alignment as an integer.
1141 const InterpFrame *Frame,
1142 const Function *Func,
1143 const CallExpr *Call) {
1144 unsigned BuiltinOp = Func->getBuiltinID();
1145 unsigned CallSize = callArgSize(S, Call);
1146
1147 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1148 const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT);
1149
1150 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1151 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1152 return false;
1153 }
1154 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1155 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1156 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1157 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1158 << MaxValue << Call->getArg(0)->getType() << Alignment;
1159 return false;
1160 }
1161
1162 // The first parameter is either an integer or a pointer (but not a function
1163 // pointer).
1164 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1165
1166 if (isIntegralType(FirstArgT)) {
1167 const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize);
1168 APSInt Align = Alignment.extOrTrunc(Src.getBitWidth());
1169 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1170 APSInt AlignedVal =
1171 APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned());
1172 pushInteger(S, AlignedVal, Call->getType());
1173 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1174 APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned());
1175 pushInteger(S, AlignedVal, Call->getType());
1176 } else {
1177 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1178 S.Stk.push<Boolean>((Src & (Align - 1)) == 0);
1179 }
1180 return true;
1181 }
1182
1183 assert(FirstArgT == PT_Ptr);
1184 const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize);
1185
1186 unsigned PtrOffset = Ptr.getByteOffset();
1187 PtrOffset = Ptr.getIndex();
1188 CharUnits BaseAlignment =
1190 CharUnits PtrAlign =
1191 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1192
1193 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1194 if (PtrAlign.getQuantity() >= Alignment) {
1195 S.Stk.push<Boolean>(true);
1196 return true;
1197 }
1198 // If the alignment is not known to be sufficient, some cases could still
1199 // be aligned at run time. However, if the requested alignment is less or
1200 // equal to the base alignment and the offset is not aligned, we know that
1201 // the run-time value can never be aligned.
1202 if (BaseAlignment.getQuantity() >= Alignment &&
1203 PtrAlign.getQuantity() < Alignment) {
1204 S.Stk.push<Boolean>(false);
1205 return true;
1206 }
1207
1208 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1209 << Alignment;
1210 return false;
1211 }
1212
1213 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1214 BuiltinOp == Builtin::BI__builtin_align_up);
1215
1216 // For align_up/align_down, we can return the same value if the alignment
1217 // is known to be greater or equal to the requested value.
1218 if (PtrAlign.getQuantity() >= Alignment) {
1219 S.Stk.push<Pointer>(Ptr);
1220 return true;
1221 }
1222
1223 // The alignment could be greater than the minimum at run-time, so we cannot
1224 // infer much about the resulting pointer value. One case is possible:
1225 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1226 // can infer the correct index if the requested alignment is smaller than
1227 // the base alignment so we can perform the computation on the offset.
1228 if (BaseAlignment.getQuantity() >= Alignment) {
1229 assert(Alignment.getBitWidth() <= 64 &&
1230 "Cannot handle > 64-bit address-space");
1231 uint64_t Alignment64 = Alignment.getZExtValue();
1232 CharUnits NewOffset =
1233 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1234 ? llvm::alignDown(PtrOffset, Alignment64)
1235 : llvm::alignTo(PtrOffset, Alignment64));
1236
1237 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1238 return true;
1239 }
1240
1241 // Otherwise, we cannot constant-evaluate the result.
1242 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1243 return false;
1244}
1245
1246/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1248 const InterpFrame *Frame,
1249 const Function *Func,
1250 const CallExpr *Call) {
1251 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1252
1253 // Might be called with function pointers in C.
1254 std::optional<PrimType> PtrT = S.Ctx.classify(Call->getArg(0));
1255 if (PtrT != PT_Ptr)
1256 return false;
1257
1258 unsigned ArgSize = callArgSize(S, Call);
1259 const Pointer &Ptr = S.Stk.peek<Pointer>(ArgSize);
1260 std::optional<APSInt> ExtraOffset;
1261 APSInt Alignment;
1262 if (Call->getNumArgs() == 2) {
1263 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1264 } else {
1265 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1266 PrimType ExtraOffsetT = *S.Ctx.classify(Call->getArg(2));
1267 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)),
1268 align(primSize(AlignmentT)) +
1269 align(primSize(ExtraOffsetT)));
1270 ExtraOffset = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1271 }
1272
1273 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1274
1275 // If there is a base object, then it must have the correct alignment.
1276 if (Ptr.isBlockPointer()) {
1277 CharUnits BaseAlignment;
1278 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1279 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1280 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1281 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1282
1283 if (BaseAlignment < Align) {
1284 S.CCEDiag(Call->getArg(0),
1285 diag::note_constexpr_baa_insufficient_alignment)
1286 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1287 return false;
1288 }
1289 }
1290
1291 APValue AV = Ptr.toAPValue(S.getASTContext());
1292 CharUnits AVOffset = AV.getLValueOffset();
1293 if (ExtraOffset)
1294 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1295 if (AVOffset.alignTo(Align) != AVOffset) {
1296 if (Ptr.isBlockPointer())
1297 S.CCEDiag(Call->getArg(0),
1298 diag::note_constexpr_baa_insufficient_alignment)
1299 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1300 else
1301 S.CCEDiag(Call->getArg(0),
1302 diag::note_constexpr_baa_value_insufficient_alignment)
1303 << AVOffset.getQuantity() << Align.getQuantity();
1304 return false;
1305 }
1306
1307 S.Stk.push<Pointer>(Ptr);
1308 return true;
1309}
1310
1312 const InterpFrame *Frame,
1313 const Function *Func,
1314 const CallExpr *Call) {
1315 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1316 !Call->getArg(1)->getType()->isIntegerType())
1317 return false;
1318
1319 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1320 PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1321 APSInt Val = peekToAPSInt(S.Stk, ValT,
1322 align(primSize(ValT)) + align(primSize(IndexT)));
1323 APSInt Index = peekToAPSInt(S.Stk, IndexT);
1324
1325 unsigned BitWidth = Val.getBitWidth();
1326 uint64_t Shift = Index.extractBitsAsZExtValue(8, 0);
1327 uint64_t Length = Index.extractBitsAsZExtValue(8, 8);
1328 Length = Length > BitWidth ? BitWidth : Length;
1329
1330 // Handle out of bounds cases.
1331 if (Length == 0 || Shift >= BitWidth) {
1332 pushInteger(S, 0, Call->getType());
1333 return true;
1334 }
1335
1336 uint64_t Result = Val.getZExtValue() >> Shift;
1337 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
1338 pushInteger(S, Result, Call->getType());
1339 return true;
1340}
1341
1343 const InterpFrame *Frame,
1344 const Function *Func,
1345 const CallExpr *Call) {
1346 QualType CallType = Call->getType();
1347 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1348 !Call->getArg(1)->getType()->isIntegerType() ||
1349 !CallType->isIntegerType())
1350 return false;
1351
1352 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1353 PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1354
1355 APSInt Val = peekToAPSInt(S.Stk, ValT,
1356 align(primSize(ValT)) + align(primSize(IndexT)));
1357 APSInt Idx = peekToAPSInt(S.Stk, IndexT);
1358
1359 unsigned BitWidth = Val.getBitWidth();
1360 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
1361
1362 if (Index < BitWidth)
1363 Val.clearHighBits(BitWidth - Index);
1364
1365 pushInteger(S, Val, CallType);
1366 return true;
1367}
1368
1370 const InterpFrame *Frame,
1371 const Function *Func,
1372 const CallExpr *Call) {
1373 QualType CallType = Call->getType();
1374 if (!CallType->isIntegerType() ||
1375 !Call->getArg(0)->getType()->isIntegerType())
1376 return false;
1377
1378 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1379 pushInteger(S, Val.countLeadingZeros(), CallType);
1380 return true;
1381}
1382
1384 const InterpFrame *Frame,
1385 const Function *Func,
1386 const CallExpr *Call) {
1387 QualType CallType = Call->getType();
1388 if (!CallType->isIntegerType() ||
1389 !Call->getArg(0)->getType()->isIntegerType())
1390 return false;
1391
1392 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1393 pushInteger(S, Val.countTrailingZeros(), CallType);
1394 return true;
1395}
1396
1398 const InterpFrame *Frame,
1399 const Function *Func,
1400 const CallExpr *Call) {
1401 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1402 !Call->getArg(1)->getType()->isIntegerType())
1403 return false;
1404
1405 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1406 PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1407
1408 APSInt Val =
1409 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1410 APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1411
1412 unsigned BitWidth = Val.getBitWidth();
1413 APInt Result = APInt::getZero(BitWidth);
1414 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1415 if (Mask[I])
1416 Result.setBitVal(I, Val[P++]);
1417 }
1418 pushInteger(S, Result, Call->getType());
1419 return true;
1420}
1421
1423 const InterpFrame *Frame,
1424 const Function *Func,
1425 const CallExpr *Call) {
1426 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1427 !Call->getArg(1)->getType()->isIntegerType())
1428 return false;
1429
1430 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1431 PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1432
1433 APSInt Val =
1434 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1435 APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1436
1437 unsigned BitWidth = Val.getBitWidth();
1438 APInt Result = APInt::getZero(BitWidth);
1439 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1440 if (Mask[I])
1441 Result.setBitVal(P++, Val[I]);
1442 }
1443 pushInteger(S, Result, Call->getType());
1444 return true;
1445}
1446
1448 CodePtr OpPC,
1449 const InterpFrame *Frame,
1450 const Function *Func,
1451 const CallExpr *Call) {
1452 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1453 !Call->getArg(1)->getType()->isIntegerType() ||
1454 !Call->getArg(2)->getType()->isIntegerType())
1455 return false;
1456
1457 unsigned BuiltinOp = Func->getBuiltinID();
1458 APSInt CarryIn = getAPSIntParam(Frame, 0);
1459 APSInt LHS = getAPSIntParam(Frame, 1);
1460 APSInt RHS = getAPSIntParam(Frame, 2);
1461
1462 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1463 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1464
1465 unsigned BitWidth = LHS.getBitWidth();
1466 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1467 APInt ExResult =
1468 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1469 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1470
1471 APInt Result = ExResult.extractBits(BitWidth, 0);
1472 APSInt CarryOut =
1473 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1474
1475 Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
1476 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1477 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1478 assignInteger(CarryOutPtr, CarryOutT, APSInt(Result, true));
1479
1480 pushInteger(S, CarryOut, Call->getType());
1481
1482 return true;
1483}
1484
1486 CodePtr OpPC,
1487 const InterpFrame *Frame,
1488 const Function *Func,
1489 const CallExpr *Call) {
1492 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1493 return true;
1494}
1495
1497 InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
1498 const Function *Func, const CallExpr *Call) {
1499 const auto &Ptr = S.Stk.peek<Pointer>();
1500 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1501
1502 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1503 uint64_t Result = getPointerAuthStableSipHash(R);
1504 pushInteger(S, Result, Call->getType());
1505 return true;
1506}
1507
1508// FIXME: This implementation is not complete.
1509// The Compiler instance we create cannot access the current stack frame, local
1510// variables, function parameters, etc. We also need protection from
1511// side-effects, fatal errors, etc.
1513 const InterpFrame *Frame,
1514 const Function *Func,
1515 const CallExpr *Call) {
1516 const Expr *Arg = Call->getArg(0);
1517 QualType ArgType = Arg->getType();
1518
1519 auto returnInt = [&S, Call](bool Value) -> bool {
1520 pushInteger(S, Value, Call->getType());
1521 return true;
1522 };
1523
1524 // __builtin_constant_p always has one operand. The rules which gcc follows
1525 // are not precisely documented, but are as follows:
1526 //
1527 // - If the operand is of integral, floating, complex or enumeration type,
1528 // and can be folded to a known value of that type, it returns 1.
1529 // - If the operand can be folded to a pointer to the first character
1530 // of a string literal (or such a pointer cast to an integral type)
1531 // or to a null pointer or an integer cast to a pointer, it returns 1.
1532 //
1533 // Otherwise, it returns 0.
1534 //
1535 // FIXME: GCC also intends to return 1 for literals of aggregate types, but
1536 // its support for this did not work prior to GCC 9 and is not yet well
1537 // understood.
1538 if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() ||
1539 ArgType->isAnyComplexType() || ArgType->isPointerType() ||
1540 ArgType->isNullPtrType()) {
1541 auto PrevDiags = S.getEvalStatus().Diag;
1542 S.getEvalStatus().Diag = nullptr;
1543 InterpStack Stk;
1544 Compiler<EvalEmitter> C(S.Ctx, S.P, S, Stk);
1545 auto Res = C.interpretExpr(Arg, /*ConvertResultToRValue=*/Arg->isGLValue());
1546 S.getEvalStatus().Diag = PrevDiags;
1547 if (Res.isInvalid()) {
1548 C.cleanup();
1549 Stk.clear();
1550 return returnInt(false);
1551 }
1552
1553 if (!Res.empty()) {
1554 const APValue &LV = Res.toAPValue();
1555 if (LV.isLValue()) {
1557 if (Base.isNull()) {
1558 // A null base is acceptable.
1559 return returnInt(true);
1560 } else if (const auto *E = Base.dyn_cast<const Expr *>()) {
1561 if (!isa<StringLiteral>(E))
1562 return returnInt(false);
1563 return returnInt(LV.getLValueOffset().isZero());
1564 } else if (Base.is<TypeInfoLValue>()) {
1565 // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to
1566 // evaluate to true.
1567 return returnInt(true);
1568 } else {
1569 // Any other base is not constant enough for GCC.
1570 return returnInt(false);
1571 }
1572 }
1573 }
1574
1575 // Otherwise, any constant value is good enough.
1576 return returnInt(true);
1577 }
1578
1579 return returnInt(false);
1580}
1581
1583 const InterpFrame *Frame,
1584 const Function *Func,
1585 const CallExpr *Call) {
1586 // A call to __operator_new is only valid within std::allocate<>::allocate.
1587 // Walk up the call stack to find the appropriate caller and get the
1588 // element type from it.
1589 QualType ElemType;
1590 const CallExpr *NewCall = nullptr;
1591
1592 for (const InterpFrame *F = Frame; F; F = F->Caller) {
1593 const Function *Func = F->getFunction();
1594 if (!Func)
1595 continue;
1596 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Func->getDecl());
1597 if (!MD)
1598 continue;
1599 const IdentifierInfo *FnII = MD->getIdentifier();
1600 if (!FnII || !FnII->isStr("allocate"))
1601 continue;
1602
1603 const auto *CTSD =
1604 dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent());
1605 if (!CTSD)
1606 continue;
1607
1608 const IdentifierInfo *ClassII = CTSD->getIdentifier();
1609 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
1610 if (CTSD->isInStdNamespace() && ClassII && ClassII->isStr("allocator") &&
1611 TAL.size() >= 1 && TAL[0].getKind() == TemplateArgument::Type) {
1612 ElemType = TAL[0].getAsType();
1613 NewCall = cast<CallExpr>(F->Caller->getExpr(F->getRetPC()));
1614 break;
1615 }
1616 }
1617
1618 if (ElemType.isNull()) {
1619 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1620 ? diag::note_constexpr_new_untyped
1621 : diag::note_constexpr_new);
1622 return false;
1623 }
1624 assert(NewCall);
1625
1626 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1627 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1628 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1629 return false;
1630 }
1631
1632 APSInt Bytes = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)));
1633 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1634 assert(!ElemSize.isZero());
1635 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1636 // elements we should allocate.
1637 APInt NumElems, Remainder;
1638 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1639 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1640 if (Remainder != 0) {
1641 // This likely indicates a bug in the implementation of 'std::allocator'.
1642 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1643 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1644 return false;
1645 }
1646
1647 // NB: The same check we're using in CheckArraySize()
1648 if (NumElems.getActiveBits() >
1650 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1651 // FIXME: NoThrow check?
1652 const SourceInfo &Loc = S.Current->getSource(OpPC);
1653 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1654 << NumElems.getZExtValue();
1655 return false;
1656 }
1657
1658 std::optional<PrimType> ElemT = S.getContext().classify(ElemType);
1659 DynamicAllocator &Allocator = S.getAllocator();
1660 if (ElemT) {
1661 if (NumElems.ule(1)) {
1662 const Descriptor *Desc =
1663 S.P.createDescriptor(NewCall, *ElemT, Descriptor::InlineDescMD,
1664 /*IsConst=*/false, /*IsTemporary=*/false,
1665 /*IsMutable=*/false);
1666 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1668 assert(B);
1669
1670 S.Stk.push<Pointer>(B);
1671 return true;
1672 }
1673 assert(NumElems.ugt(1));
1674
1675 Block *B =
1676 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1677 S.Ctx.getEvalID(), DynamicAllocator::Form::Operator);
1678 assert(B);
1679 S.Stk.push<Pointer>(B);
1680 return true;
1681 }
1682
1683 assert(!ElemT);
1684 // Structs etc.
1685 const Descriptor *Desc = S.P.createDescriptor(
1687 /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false,
1688 /*Init=*/nullptr);
1689
1690 if (NumElems.ule(1)) {
1691 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1693 assert(B);
1694 S.Stk.push<Pointer>(B);
1695 return true;
1696 }
1697
1698 Block *B =
1699 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1701 assert(B);
1702 S.Stk.push<Pointer>(B);
1703 return true;
1704}
1705
1707 const InterpFrame *Frame,
1708 const Function *Func,
1709 const CallExpr *Call) {
1710 const Expr *Source = nullptr;
1711 const Block *BlockToDelete = nullptr;
1712
1713 {
1714 const Pointer &Ptr = S.Stk.peek<Pointer>();
1715
1716 if (Ptr.isZero()) {
1717 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1718 return true;
1719 }
1720
1721 Source = Ptr.getDeclDesc()->asExpr();
1722 BlockToDelete = Ptr.block();
1723 }
1724 assert(BlockToDelete);
1725
1726 DynamicAllocator &Allocator = S.getAllocator();
1727 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1728 std::optional<DynamicAllocator::Form> AllocForm =
1729 Allocator.getAllocationForm(Source);
1730
1731 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1732 // Nothing has been deallocated, this must be a double-delete.
1733 const SourceInfo &Loc = S.Current->getSource(OpPC);
1734 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1735 return false;
1736 }
1737 assert(AllocForm);
1738
1739 return CheckNewDeleteForms(
1740 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1741}
1742
1744 const InterpFrame *Frame,
1745 const Function *Func,
1746 const CallExpr *Call) {
1747 const Floating &Arg0 = S.Stk.peek<Floating>();
1748 S.Stk.push<Floating>(Arg0);
1749 return true;
1750}
1751
1753 const InterpFrame *Frame,
1754 const Function *Func,
1755 const CallExpr *Call) {
1756 const Pointer &Arg = S.Stk.peek<Pointer>();
1757 assert(Arg.getFieldDesc()->isPrimitiveArray());
1758
1759 unsigned ID = Func->getBuiltinID();
1760 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1761 assert(Call->getType() == ElemType);
1762 PrimType ElemT = *S.getContext().classify(ElemType);
1763 unsigned NumElems = Arg.getNumElems();
1764
1766 T Result = Arg.atIndex(0).deref<T>();
1767 unsigned BitWidth = Result.bitWidth();
1768 for (unsigned I = 1; I != NumElems; ++I) {
1769 T Elem = Arg.atIndex(I).deref<T>();
1770 T PrevResult = Result;
1771
1772 if (ID == Builtin::BI__builtin_reduce_add) {
1773 if (T::add(Result, Elem, BitWidth, &Result)) {
1774 unsigned OverflowBits = BitWidth + 1;
1775 (void)handleOverflow(S, OpPC,
1776 (PrevResult.toAPSInt(OverflowBits) +
1777 Elem.toAPSInt(OverflowBits)));
1778 return false;
1779 }
1780 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1781 if (T::mul(Result, Elem, BitWidth, &Result)) {
1782 unsigned OverflowBits = BitWidth * 2;
1783 (void)handleOverflow(S, OpPC,
1784 (PrevResult.toAPSInt(OverflowBits) *
1785 Elem.toAPSInt(OverflowBits)));
1786 return false;
1787 }
1788
1789 } else if (ID == Builtin::BI__builtin_reduce_and) {
1790 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1791 } else if (ID == Builtin::BI__builtin_reduce_or) {
1792 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1793 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1794 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1795 } else {
1796 llvm_unreachable("Unhandled vector reduce builtin");
1797 }
1798 }
1799 pushInteger(S, Result.toAPSInt(), Call->getType());
1800 });
1801
1802 return true;
1803}
1804
1805/// Can be called with an integer or vector as the first and only parameter.
1807 const InterpFrame *Frame,
1808 const Function *Func,
1809 const CallExpr *Call) {
1810 assert(Call->getNumArgs() == 1);
1811 if (Call->getArg(0)->getType()->isIntegerType()) {
1812 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1813 APSInt Val = peekToAPSInt(S.Stk, ArgT);
1814 pushInteger(S, Val.popcount(), Call->getType());
1815 return true;
1816 }
1817 // Otherwise, the argument must be a vector.
1818 assert(Call->getArg(0)->getType()->isVectorType());
1819 const Pointer &Arg = S.Stk.peek<Pointer>();
1820 assert(Arg.getFieldDesc()->isPrimitiveArray());
1821 const Pointer &Dst = S.Stk.peek<Pointer>(primSize(PT_Ptr) * 2);
1822 assert(Dst.getFieldDesc()->isPrimitiveArray());
1823 assert(Arg.getFieldDesc()->getNumElems() ==
1824 Dst.getFieldDesc()->getNumElems());
1825
1826 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1827 PrimType ElemT = *S.getContext().classify(ElemType);
1828 unsigned NumElems = Arg.getNumElems();
1829
1830 // FIXME: Reading from uninitialized vector elements?
1831 for (unsigned I = 0; I != NumElems; ++I) {
1833 Dst.atIndex(I).deref<T>() =
1834 T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount());
1835 Dst.atIndex(I).initialize();
1836 });
1837 }
1838
1839 return true;
1840}
1841
1843 const InterpFrame *Frame,
1844 const Function *Func, const CallExpr *Call) {
1845 assert(Call->getNumArgs() == 3);
1846 unsigned ID = Func->getBuiltinID();
1847 Pointer DestPtr = getParam<Pointer>(Frame, 0);
1848 const ASTContext &ASTCtx = S.getASTContext();
1849 const Pointer &SrcPtr = getParam<Pointer>(Frame, 1);
1850 const APSInt &Size =
1851 peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)));
1852 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1853
1854 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1855 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1856
1857 bool Move = (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove);
1858
1859 // If the size is zero, we treat this as always being a valid no-op.
1860 if (Size.isZero()) {
1861 S.Stk.push<Pointer>(DestPtr);
1862 return true;
1863 }
1864
1865 if (SrcPtr.isZero() || DestPtr.isZero()) {
1866 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1867 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1868 << /*IsMove=*/Move << /*IsWchar=*/false << !SrcPtr.isZero()
1869 << DiagPtr.toDiagnosticString(ASTCtx);
1870 return false;
1871 }
1872
1873 // Can't read from dummy pointers.
1874 if (DestPtr.isDummy() || SrcPtr.isDummy())
1875 return false;
1876
1877 QualType DestElemType;
1878 size_t RemainingDestElems;
1879 if (DestPtr.getFieldDesc()->isArray()) {
1880 DestElemType = DestPtr.getFieldDesc()->getElemQualType();
1881 RemainingDestElems = DestPtr.isUnknownSizeArray()
1882 ? 0
1883 : (DestPtr.getNumElems() - DestPtr.getIndex());
1884 } else {
1885 DestElemType = DestPtr.getType();
1886 RemainingDestElems = 1;
1887 }
1888 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1889
1890 if (Size.urem(DestElemSize) != 0) {
1891 S.FFDiag(S.Current->getSource(OpPC),
1892 diag::note_constexpr_memcpy_unsupported)
1893 << Move << /*IsWchar=*/false << 0 << DestElemType << Size
1894 << DestElemSize;
1895 return false;
1896 }
1897
1898 QualType SrcElemType;
1899 size_t RemainingSrcElems;
1900 if (SrcPtr.getFieldDesc()->isArray()) {
1901 SrcElemType = SrcPtr.getFieldDesc()->getElemQualType();
1902 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1903 ? 0
1904 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1905 } else {
1906 SrcElemType = SrcPtr.getType();
1907 RemainingSrcElems = 1;
1908 }
1909 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1910
1911 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1912 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1913 << Move << SrcElemType << DestElemType;
1914 return false;
1915 }
1916
1917 // Check if we have enough elements to read from and write to/
1918 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1919 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1920 if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
1921 APInt N = Size.udiv(DestElemSize);
1922 S.FFDiag(S.Current->getSource(OpPC),
1923 diag::note_constexpr_memcpy_unsupported)
1924 << Move << /*IsWChar*/ false << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
1925 << DestElemType << toString(N, 10, /*Signed=*/false);
1926 return false;
1927 }
1928
1929 // Check for overlapping memory regions.
1930 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1931 unsigned SrcIndex = SrcPtr.getIndex() * SrcPtr.elemSize();
1932 unsigned DstIndex = DestPtr.getIndex() * DestPtr.elemSize();
1933 unsigned N = Size.getZExtValue();
1934
1935 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1936 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1937 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1938 << /*IsWChar=*/false;
1939 return false;
1940 }
1941 }
1942
1943 assert(Size.getZExtValue() % DestElemSize == 0);
1944 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
1945 return false;
1946
1947 S.Stk.push<Pointer>(DestPtr);
1948 return true;
1949}
1950
1951/// Determine if T is a character type for which we guarantee that
1952/// sizeof(T) == 1.
1954 return T->isCharType() || T->isChar8Type();
1955}
1956
1958 const InterpFrame *Frame,
1959 const Function *Func, const CallExpr *Call) {
1960 assert(Call->getNumArgs() == 3);
1961 unsigned ID = Func->getBuiltinID();
1962 const Pointer &PtrA = getParam<Pointer>(Frame, 0);
1963 const Pointer &PtrB = getParam<Pointer>(Frame, 1);
1964 const APSInt &Size =
1965 peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)));
1966
1967 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1968 ID == Builtin::BIwmemcmp)
1969 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1970
1971 if (Size.isZero()) {
1972 pushInteger(S, 0, Call->getType());
1973 return true;
1974 }
1975
1976 bool IsWide =
1977 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1978
1979 const ASTContext &ASTCtx = S.getASTContext();
1980 // FIXME: This is an arbitrary limitation the current constant interpreter
1981 // had. We could remove this.
1982 if (!IsWide && (!isOneByteCharacterType(PtrA.getType()) ||
1983 !isOneByteCharacterType(PtrB.getType()))) {
1984 S.FFDiag(S.Current->getSource(OpPC),
1985 diag::note_constexpr_memcmp_unsupported)
1986 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1987 << PtrB.getType();
1988 return false;
1989 }
1990
1991 if (PtrA.isDummy() || PtrB.isDummy())
1992 return false;
1993
1994 // Now, read both pointers to a buffer and compare those.
1995 BitcastBuffer BufferA(
1996 Bits(ASTCtx.getTypeSize(PtrA.getFieldDesc()->getType())));
1997 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1998 // FIXME: The swapping here is UNDOING something we do when reading the
1999 // data into the buffer.
2000 if (ASTCtx.getTargetInfo().isBigEndian())
2001 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
2002
2003 BitcastBuffer BufferB(
2004 Bits(ASTCtx.getTypeSize(PtrB.getFieldDesc()->getType())));
2005 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
2006 // FIXME: The swapping here is UNDOING something we do when reading the
2007 // data into the buffer.
2008 if (ASTCtx.getTargetInfo().isBigEndian())
2009 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
2010
2011 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
2012 BufferB.byteSize().getQuantity());
2013
2014 unsigned ElemSize = 1;
2015 if (IsWide)
2016 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
2017 // The Size given for the wide variants is in wide-char units. Convert it
2018 // to bytes.
2019 size_t ByteSize = Size.getZExtValue() * ElemSize;
2020 size_t CmpSize = std::min(MinBufferSize, ByteSize);
2021
2022 for (size_t I = 0; I != CmpSize; I += ElemSize) {
2023 if (IsWide) {
2024 INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
2025 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
2026 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
2027 if (A < B) {
2028 pushInteger(S, -1, Call->getType());
2029 return true;
2030 } else if (A > B) {
2031 pushInteger(S, 1, Call->getType());
2032 return true;
2033 }
2034 });
2035 } else {
2036 std::byte A = BufferA.Data[I];
2037 std::byte B = BufferB.Data[I];
2038
2039 if (A < B) {
2040 pushInteger(S, -1, Call->getType());
2041 return true;
2042 } else if (A > B) {
2043 pushInteger(S, 1, Call->getType());
2044 return true;
2045 }
2046 }
2047 }
2048
2049 // We compared CmpSize bytes above. If the limiting factor was the Size
2050 // passed, we're done and the result is equality (0).
2051 if (ByteSize <= CmpSize) {
2052 pushInteger(S, 0, Call->getType());
2053 return true;
2054 }
2055
2056 // However, if we read all the available bytes but were instructed to read
2057 // even more, diagnose this as a "read of dereferenced one-past-the-end
2058 // pointer". This is what would happen if we called CheckRead() on every array
2059 // element.
2060 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2061 << AK_Read << S.Current->getRange(OpPC);
2062 return false;
2063}
2064
2066 const CallExpr *Call, uint32_t BuiltinID) {
2067 const InterpFrame *Frame = S.Current;
2068
2069 std::optional<PrimType> ReturnT = S.getContext().classify(Call);
2070
2071 switch (BuiltinID) {
2072 case Builtin::BI__builtin_is_constant_evaluated:
2074 return false;
2075 break;
2076 case Builtin::BI__builtin_assume:
2077 case Builtin::BI__assume:
2078 break;
2079 case Builtin::BI__builtin_strcmp:
2080 case Builtin::BIstrcmp:
2081 case Builtin::BI__builtin_strncmp:
2082 case Builtin::BIstrncmp:
2083 if (!interp__builtin_strcmp(S, OpPC, Frame, F, Call))
2084 return false;
2085 break;
2086 case Builtin::BI__builtin_strlen:
2087 case Builtin::BIstrlen:
2088 case Builtin::BI__builtin_wcslen:
2089 case Builtin::BIwcslen:
2090 if (!interp__builtin_strlen(S, OpPC, Frame, F, Call))
2091 return false;
2092 break;
2093 case Builtin::BI__builtin_nan:
2094 case Builtin::BI__builtin_nanf:
2095 case Builtin::BI__builtin_nanl:
2096 case Builtin::BI__builtin_nanf16:
2097 case Builtin::BI__builtin_nanf128:
2098 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false))
2099 return false;
2100 break;
2101 case Builtin::BI__builtin_nans:
2102 case Builtin::BI__builtin_nansf:
2103 case Builtin::BI__builtin_nansl:
2104 case Builtin::BI__builtin_nansf16:
2105 case Builtin::BI__builtin_nansf128:
2106 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true))
2107 return false;
2108 break;
2109
2110 case Builtin::BI__builtin_huge_val:
2111 case Builtin::BI__builtin_huge_valf:
2112 case Builtin::BI__builtin_huge_vall:
2113 case Builtin::BI__builtin_huge_valf16:
2114 case Builtin::BI__builtin_huge_valf128:
2115 case Builtin::BI__builtin_inf:
2116 case Builtin::BI__builtin_inff:
2117 case Builtin::BI__builtin_infl:
2118 case Builtin::BI__builtin_inff16:
2119 case Builtin::BI__builtin_inff128:
2120 if (!interp__builtin_inf(S, OpPC, Frame, F))
2121 return false;
2122 break;
2123 case Builtin::BI__builtin_copysign:
2124 case Builtin::BI__builtin_copysignf:
2125 case Builtin::BI__builtin_copysignl:
2126 case Builtin::BI__builtin_copysignf128:
2127 if (!interp__builtin_copysign(S, OpPC, Frame, F))
2128 return false;
2129 break;
2130
2131 case Builtin::BI__builtin_fmin:
2132 case Builtin::BI__builtin_fminf:
2133 case Builtin::BI__builtin_fminl:
2134 case Builtin::BI__builtin_fminf16:
2135 case Builtin::BI__builtin_fminf128:
2136 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
2137 return false;
2138 break;
2139
2140 case Builtin::BI__builtin_fminimum_num:
2141 case Builtin::BI__builtin_fminimum_numf:
2142 case Builtin::BI__builtin_fminimum_numl:
2143 case Builtin::BI__builtin_fminimum_numf16:
2144 case Builtin::BI__builtin_fminimum_numf128:
2145 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
2146 return false;
2147 break;
2148
2149 case Builtin::BI__builtin_fmax:
2150 case Builtin::BI__builtin_fmaxf:
2151 case Builtin::BI__builtin_fmaxl:
2152 case Builtin::BI__builtin_fmaxf16:
2153 case Builtin::BI__builtin_fmaxf128:
2154 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
2155 return false;
2156 break;
2157
2158 case Builtin::BI__builtin_fmaximum_num:
2159 case Builtin::BI__builtin_fmaximum_numf:
2160 case Builtin::BI__builtin_fmaximum_numl:
2161 case Builtin::BI__builtin_fmaximum_numf16:
2162 case Builtin::BI__builtin_fmaximum_numf128:
2163 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
2164 return false;
2165 break;
2166
2167 case Builtin::BI__builtin_isnan:
2168 if (!interp__builtin_isnan(S, OpPC, Frame, F, Call))
2169 return false;
2170 break;
2171 case Builtin::BI__builtin_issignaling:
2172 if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call))
2173 return false;
2174 break;
2175
2176 case Builtin::BI__builtin_isinf:
2177 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false, Call))
2178 return false;
2179 break;
2180
2181 case Builtin::BI__builtin_isinf_sign:
2182 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true, Call))
2183 return false;
2184 break;
2185
2186 case Builtin::BI__builtin_isfinite:
2187 if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call))
2188 return false;
2189 break;
2190 case Builtin::BI__builtin_isnormal:
2191 if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call))
2192 return false;
2193 break;
2194 case Builtin::BI__builtin_issubnormal:
2195 if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call))
2196 return false;
2197 break;
2198 case Builtin::BI__builtin_iszero:
2199 if (!interp__builtin_iszero(S, OpPC, Frame, F, Call))
2200 return false;
2201 break;
2202 case Builtin::BI__builtin_signbit:
2203 case Builtin::BI__builtin_signbitf:
2204 case Builtin::BI__builtin_signbitl:
2205 if (!interp__builtin_signbit(S, OpPC, Frame, F, Call))
2206 return false;
2207 break;
2208 case Builtin::BI__builtin_isgreater:
2209 case Builtin::BI__builtin_isgreaterequal:
2210 case Builtin::BI__builtin_isless:
2211 case Builtin::BI__builtin_islessequal:
2212 case Builtin::BI__builtin_islessgreater:
2213 case Builtin::BI__builtin_isunordered:
2214 if (!interp_floating_comparison(S, OpPC, Frame, F, Call))
2215 return false;
2216 break;
2217 case Builtin::BI__builtin_isfpclass:
2218 if (!interp__builtin_isfpclass(S, OpPC, Frame, F, Call))
2219 return false;
2220 break;
2221 case Builtin::BI__builtin_fpclassify:
2222 if (!interp__builtin_fpclassify(S, OpPC, Frame, F, Call))
2223 return false;
2224 break;
2225
2226 case Builtin::BI__builtin_fabs:
2227 case Builtin::BI__builtin_fabsf:
2228 case Builtin::BI__builtin_fabsl:
2229 case Builtin::BI__builtin_fabsf128:
2230 if (!interp__builtin_fabs(S, OpPC, Frame, F))
2231 return false;
2232 break;
2233
2234 case Builtin::BI__builtin_abs:
2235 case Builtin::BI__builtin_labs:
2236 case Builtin::BI__builtin_llabs:
2237 if (!interp__builtin_abs(S, OpPC, Frame, F, Call))
2238 return false;
2239 break;
2240
2241 case Builtin::BI__builtin_popcount:
2242 case Builtin::BI__builtin_popcountl:
2243 case Builtin::BI__builtin_popcountll:
2244 case Builtin::BI__builtin_popcountg:
2245 case Builtin::BI__popcnt16: // Microsoft variants of popcount
2246 case Builtin::BI__popcnt:
2247 case Builtin::BI__popcnt64:
2248 if (!interp__builtin_popcount(S, OpPC, Frame, F, Call))
2249 return false;
2250 break;
2251
2252 case Builtin::BI__builtin_parity:
2253 case Builtin::BI__builtin_parityl:
2254 case Builtin::BI__builtin_parityll:
2255 if (!interp__builtin_parity(S, OpPC, Frame, F, Call))
2256 return false;
2257 break;
2258
2259 case Builtin::BI__builtin_clrsb:
2260 case Builtin::BI__builtin_clrsbl:
2261 case Builtin::BI__builtin_clrsbll:
2262 if (!interp__builtin_clrsb(S, OpPC, Frame, F, Call))
2263 return false;
2264 break;
2265
2266 case Builtin::BI__builtin_bitreverse8:
2267 case Builtin::BI__builtin_bitreverse16:
2268 case Builtin::BI__builtin_bitreverse32:
2269 case Builtin::BI__builtin_bitreverse64:
2270 if (!interp__builtin_bitreverse(S, OpPC, Frame, F, Call))
2271 return false;
2272 break;
2273
2274 case Builtin::BI__builtin_classify_type:
2275 if (!interp__builtin_classify_type(S, OpPC, Frame, F, Call))
2276 return false;
2277 break;
2278
2279 case Builtin::BI__builtin_expect:
2280 case Builtin::BI__builtin_expect_with_probability:
2281 if (!interp__builtin_expect(S, OpPC, Frame, F, Call))
2282 return false;
2283 break;
2284
2285 case Builtin::BI__builtin_rotateleft8:
2286 case Builtin::BI__builtin_rotateleft16:
2287 case Builtin::BI__builtin_rotateleft32:
2288 case Builtin::BI__builtin_rotateleft64:
2289 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2290 case Builtin::BI_rotl16:
2291 case Builtin::BI_rotl:
2292 case Builtin::BI_lrotl:
2293 case Builtin::BI_rotl64:
2294 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/false))
2295 return false;
2296 break;
2297
2298 case Builtin::BI__builtin_rotateright8:
2299 case Builtin::BI__builtin_rotateright16:
2300 case Builtin::BI__builtin_rotateright32:
2301 case Builtin::BI__builtin_rotateright64:
2302 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2303 case Builtin::BI_rotr16:
2304 case Builtin::BI_rotr:
2305 case Builtin::BI_lrotr:
2306 case Builtin::BI_rotr64:
2307 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/true))
2308 return false;
2309 break;
2310
2311 case Builtin::BI__builtin_ffs:
2312 case Builtin::BI__builtin_ffsl:
2313 case Builtin::BI__builtin_ffsll:
2314 if (!interp__builtin_ffs(S, OpPC, Frame, F, Call))
2315 return false;
2316 break;
2317 case Builtin::BIaddressof:
2318 case Builtin::BI__addressof:
2319 case Builtin::BI__builtin_addressof:
2320 if (!interp__builtin_addressof(S, OpPC, Frame, F, Call))
2321 return false;
2322 break;
2323
2324 case Builtin::BIas_const:
2325 case Builtin::BIforward:
2326 case Builtin::BIforward_like:
2327 case Builtin::BImove:
2328 case Builtin::BImove_if_noexcept:
2329 if (!interp__builtin_move(S, OpPC, Frame, F, Call))
2330 return false;
2331 break;
2332
2333 case Builtin::BI__builtin_eh_return_data_regno:
2335 return false;
2336 break;
2337
2338 case Builtin::BI__builtin_launder:
2339 if (!noopPointer(S, OpPC, Frame, F, Call))
2340 return false;
2341 break;
2342
2343 case Builtin::BI__builtin_add_overflow:
2344 case Builtin::BI__builtin_sub_overflow:
2345 case Builtin::BI__builtin_mul_overflow:
2346 case Builtin::BI__builtin_sadd_overflow:
2347 case Builtin::BI__builtin_uadd_overflow:
2348 case Builtin::BI__builtin_uaddl_overflow:
2349 case Builtin::BI__builtin_uaddll_overflow:
2350 case Builtin::BI__builtin_usub_overflow:
2351 case Builtin::BI__builtin_usubl_overflow:
2352 case Builtin::BI__builtin_usubll_overflow:
2353 case Builtin::BI__builtin_umul_overflow:
2354 case Builtin::BI__builtin_umull_overflow:
2355 case Builtin::BI__builtin_umulll_overflow:
2356 case Builtin::BI__builtin_saddl_overflow:
2357 case Builtin::BI__builtin_saddll_overflow:
2358 case Builtin::BI__builtin_ssub_overflow:
2359 case Builtin::BI__builtin_ssubl_overflow:
2360 case Builtin::BI__builtin_ssubll_overflow:
2361 case Builtin::BI__builtin_smul_overflow:
2362 case Builtin::BI__builtin_smull_overflow:
2363 case Builtin::BI__builtin_smulll_overflow:
2364 if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call))
2365 return false;
2366 break;
2367
2368 case Builtin::BI__builtin_addcb:
2369 case Builtin::BI__builtin_addcs:
2370 case Builtin::BI__builtin_addc:
2371 case Builtin::BI__builtin_addcl:
2372 case Builtin::BI__builtin_addcll:
2373 case Builtin::BI__builtin_subcb:
2374 case Builtin::BI__builtin_subcs:
2375 case Builtin::BI__builtin_subc:
2376 case Builtin::BI__builtin_subcl:
2377 case Builtin::BI__builtin_subcll:
2378 if (!interp__builtin_carryop(S, OpPC, Frame, F, Call))
2379 return false;
2380 break;
2381
2382 case Builtin::BI__builtin_clz:
2383 case Builtin::BI__builtin_clzl:
2384 case Builtin::BI__builtin_clzll:
2385 case Builtin::BI__builtin_clzs:
2386 case Builtin::BI__builtin_clzg:
2387 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
2388 case Builtin::BI__lzcnt:
2389 case Builtin::BI__lzcnt64:
2390 if (!interp__builtin_clz(S, OpPC, Frame, F, Call))
2391 return false;
2392 break;
2393
2394 case Builtin::BI__builtin_ctz:
2395 case Builtin::BI__builtin_ctzl:
2396 case Builtin::BI__builtin_ctzll:
2397 case Builtin::BI__builtin_ctzs:
2398 case Builtin::BI__builtin_ctzg:
2399 if (!interp__builtin_ctz(S, OpPC, Frame, F, Call))
2400 return false;
2401 break;
2402
2403 case Builtin::BI__builtin_bswap16:
2404 case Builtin::BI__builtin_bswap32:
2405 case Builtin::BI__builtin_bswap64:
2406 if (!interp__builtin_bswap(S, OpPC, Frame, F, Call))
2407 return false;
2408 break;
2409
2410 case Builtin::BI__atomic_always_lock_free:
2411 case Builtin::BI__atomic_is_lock_free:
2412 case Builtin::BI__c11_atomic_is_lock_free:
2413 if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, F, Call))
2414 return false;
2415 break;
2416
2417 case Builtin::BI__builtin_complex:
2418 if (!interp__builtin_complex(S, OpPC, Frame, F, Call))
2419 return false;
2420 break;
2421
2422 case Builtin::BI__builtin_is_aligned:
2423 case Builtin::BI__builtin_align_up:
2424 case Builtin::BI__builtin_align_down:
2426 return false;
2427 break;
2428
2429 case Builtin::BI__builtin_assume_aligned:
2430 if (!interp__builtin_assume_aligned(S, OpPC, Frame, F, Call))
2431 return false;
2432 break;
2433
2434 case clang::X86::BI__builtin_ia32_bextr_u32:
2435 case clang::X86::BI__builtin_ia32_bextr_u64:
2436 case clang::X86::BI__builtin_ia32_bextri_u32:
2437 case clang::X86::BI__builtin_ia32_bextri_u64:
2438 if (!interp__builtin_ia32_bextr(S, OpPC, Frame, F, Call))
2439 return false;
2440 break;
2441
2442 case clang::X86::BI__builtin_ia32_bzhi_si:
2443 case clang::X86::BI__builtin_ia32_bzhi_di:
2444 if (!interp__builtin_ia32_bzhi(S, OpPC, Frame, F, Call))
2445 return false;
2446 break;
2447
2448 case clang::X86::BI__builtin_ia32_lzcnt_u16:
2449 case clang::X86::BI__builtin_ia32_lzcnt_u32:
2450 case clang::X86::BI__builtin_ia32_lzcnt_u64:
2451 if (!interp__builtin_ia32_lzcnt(S, OpPC, Frame, F, Call))
2452 return false;
2453 break;
2454
2455 case clang::X86::BI__builtin_ia32_tzcnt_u16:
2456 case clang::X86::BI__builtin_ia32_tzcnt_u32:
2457 case clang::X86::BI__builtin_ia32_tzcnt_u64:
2458 if (!interp__builtin_ia32_tzcnt(S, OpPC, Frame, F, Call))
2459 return false;
2460 break;
2461
2462 case clang::X86::BI__builtin_ia32_pdep_si:
2463 case clang::X86::BI__builtin_ia32_pdep_di:
2464 if (!interp__builtin_ia32_pdep(S, OpPC, Frame, F, Call))
2465 return false;
2466 break;
2467
2468 case clang::X86::BI__builtin_ia32_pext_si:
2469 case clang::X86::BI__builtin_ia32_pext_di:
2470 if (!interp__builtin_ia32_pext(S, OpPC, Frame, F, Call))
2471 return false;
2472 break;
2473
2474 case clang::X86::BI__builtin_ia32_addcarryx_u32:
2475 case clang::X86::BI__builtin_ia32_addcarryx_u64:
2476 case clang::X86::BI__builtin_ia32_subborrow_u32:
2477 case clang::X86::BI__builtin_ia32_subborrow_u64:
2479 return false;
2480 break;
2481
2482 case Builtin::BI__builtin_os_log_format_buffer_size:
2484 return false;
2485 break;
2486
2487 case Builtin::BI__builtin_ptrauth_string_discriminator:
2489 return false;
2490 break;
2491
2492 case Builtin::BI__builtin_constant_p:
2493 if (!interp__builtin_constant_p(S, OpPC, Frame, F, Call))
2494 return false;
2495 break;
2496
2497 case Builtin::BI__noop:
2498 pushInteger(S, 0, Call->getType());
2499 break;
2500
2501 case Builtin::BI__builtin_operator_new:
2502 if (!interp__builtin_operator_new(S, OpPC, Frame, F, Call))
2503 return false;
2504 break;
2505
2506 case Builtin::BI__builtin_operator_delete:
2507 if (!interp__builtin_operator_delete(S, OpPC, Frame, F, Call))
2508 return false;
2509 break;
2510
2511 case Builtin::BI__arithmetic_fence:
2512 if (!interp__builtin_arithmetic_fence(S, OpPC, Frame, F, Call))
2513 return false;
2514 break;
2515
2516 case Builtin::BI__builtin_reduce_add:
2517 case Builtin::BI__builtin_reduce_mul:
2518 case Builtin::BI__builtin_reduce_and:
2519 case Builtin::BI__builtin_reduce_or:
2520 case Builtin::BI__builtin_reduce_xor:
2521 if (!interp__builtin_vector_reduce(S, OpPC, Frame, F, Call))
2522 return false;
2523 break;
2524
2525 case Builtin::BI__builtin_elementwise_popcount:
2527 return false;
2528 break;
2529
2530 case Builtin::BI__builtin_memcpy:
2531 case Builtin::BImemcpy:
2532 case Builtin::BI__builtin_memmove:
2533 case Builtin::BImemmove:
2534 if (!interp__builtin_memcpy(S, OpPC, Frame, F, Call))
2535 return false;
2536 break;
2537
2538 case Builtin::BI__builtin_memcmp:
2539 case Builtin::BImemcmp:
2540 case Builtin::BI__builtin_bcmp:
2541 case Builtin::BIbcmp:
2542 case Builtin::BI__builtin_wmemcmp:
2543 case Builtin::BIwmemcmp:
2544 if (!interp__builtin_memcmp(S, OpPC, Frame, F, Call))
2545 return false;
2546 break;
2547
2548 default:
2549 S.FFDiag(S.Current->getLocation(OpPC),
2550 diag::note_invalid_subexpr_in_const_expr)
2551 << S.Current->getRange(OpPC);
2552
2553 return false;
2554 }
2555
2556 return retPrimValue(S, OpPC, ReturnT);
2557}
2558
2560 llvm::ArrayRef<int64_t> ArrayIndices,
2561 int64_t &IntResult) {
2563 unsigned N = E->getNumComponents();
2564 assert(N > 0);
2565
2566 unsigned ArrayIndex = 0;
2567 QualType CurrentType = E->getTypeSourceInfo()->getType();
2568 for (unsigned I = 0; I != N; ++I) {
2569 const OffsetOfNode &Node = E->getComponent(I);
2570 switch (Node.getKind()) {
2571 case OffsetOfNode::Field: {
2572 const FieldDecl *MemberDecl = Node.getField();
2573 const RecordType *RT = CurrentType->getAs<RecordType>();
2574 if (!RT)
2575 return false;
2576 const RecordDecl *RD = RT->getDecl();
2577 if (RD->isInvalidDecl())
2578 return false;
2580 unsigned FieldIndex = MemberDecl->getFieldIndex();
2581 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
2582 Result +=
2584 CurrentType = MemberDecl->getType().getNonReferenceType();
2585 break;
2586 }
2587 case OffsetOfNode::Array: {
2588 // When generating bytecode, we put all the index expressions as Sint64 on
2589 // the stack.
2590 int64_t Index = ArrayIndices[ArrayIndex];
2591 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
2592 if (!AT)
2593 return false;
2594 CurrentType = AT->getElementType();
2595 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
2596 Result += Index * ElementSize;
2597 ++ArrayIndex;
2598 break;
2599 }
2600 case OffsetOfNode::Base: {
2601 const CXXBaseSpecifier *BaseSpec = Node.getBase();
2602 if (BaseSpec->isVirtual())
2603 return false;
2604
2605 // Find the layout of the class whose base we are looking into.
2606 const RecordType *RT = CurrentType->getAs<RecordType>();
2607 if (!RT)
2608 return false;
2609 const RecordDecl *RD = RT->getDecl();
2610 if (RD->isInvalidDecl())
2611 return false;
2613
2614 // Find the base class itself.
2615 CurrentType = BaseSpec->getType();
2616 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2617 if (!BaseRT)
2618 return false;
2619
2620 // Add the offset to the base.
2621 Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
2622 break;
2623 }
2625 llvm_unreachable("Dependent OffsetOfExpr?");
2626 }
2627 }
2628
2629 IntResult = Result.getQuantity();
2630
2631 return true;
2632}
2633
2635 const Pointer &Ptr, const APSInt &IntValue) {
2636
2637 const Record *R = Ptr.getRecord();
2638 assert(R);
2639 assert(R->getNumFields() == 1);
2640
2641 unsigned FieldOffset = R->getField(0u)->Offset;
2642 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
2643 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
2644
2645 INT_TYPE_SWITCH(FieldT,
2646 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
2647 FieldPtr.initialize();
2648 return true;
2649}
2650
2651static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2652 Pointer &Dest, bool Activate);
2653static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
2654 Pointer &Dest, bool Activate = false) {
2655 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2656 const Descriptor *DestDesc = Dest.getFieldDesc();
2657
2658 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
2659 Pointer DestField = Dest.atField(F.Offset);
2660 if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) {
2661 TYPE_SWITCH(*FT, {
2662 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
2663 if (Src.atField(F.Offset).isInitialized())
2664 DestField.initialize();
2665 if (Activate)
2666 DestField.activate();
2667 });
2668 return true;
2669 }
2670 // Composite field.
2671 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
2672 };
2673
2674 assert(SrcDesc->isRecord());
2675 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
2676 const Record *R = DestDesc->ElemRecord;
2677 for (const Record::Field &F : R->fields()) {
2678 if (R->isUnion()) {
2679 // For unions, only copy the active field.
2680 const Pointer &SrcField = Src.atField(F.Offset);
2681 if (SrcField.isActive()) {
2682 if (!copyField(F, /*Activate=*/true))
2683 return false;
2684 }
2685 } else {
2686 if (!copyField(F, Activate))
2687 return false;
2688 }
2689 }
2690
2691 for (const Record::Base &B : R->bases()) {
2692 Pointer DestBase = Dest.atField(B.Offset);
2693 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
2694 return false;
2695 }
2696
2697 Dest.initialize();
2698 return true;
2699}
2700
2701static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2702 Pointer &Dest, bool Activate = false) {
2703 assert(Src.isLive() && Dest.isLive());
2704
2705 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2706 const Descriptor *DestDesc = Dest.getFieldDesc();
2707
2708 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
2709
2710 if (DestDesc->isPrimitiveArray()) {
2711 assert(SrcDesc->isPrimitiveArray());
2712 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2713 PrimType ET = DestDesc->getPrimType();
2714 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2715 Pointer DestElem = Dest.atIndex(I);
2716 TYPE_SWITCH(ET, {
2717 DestElem.deref<T>() = Src.atIndex(I).deref<T>();
2718 DestElem.initialize();
2719 });
2720 }
2721 return true;
2722 }
2723
2724 if (DestDesc->isRecord())
2725 return copyRecord(S, OpPC, Src, Dest, Activate);
2726 return Invalid(S, OpPC);
2727}
2728
2729bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
2730 return copyComposite(S, OpPC, Src, Dest);
2731}
2732
2733} // namespace interp
2734} // namespace clang
#define V(N, I)
Definition: ASTContext.h:3460
DynTypedNode Node
StringRef P
Defines enum values for all the target-independent builtin functions.
Expr * E
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
#define RET_CASE(X)
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition: PrimType.h:194
#define INT_TYPE_SWITCH(Expr, B)
Definition: PrimType.h:175
#define TYPE_SWITCH(Expr, B)
Definition: PrimType.h:153
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
SourceLocation Loc
Definition: SemaObjC.cpp:759
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
const LValueBase getLValueBase() const
Definition: APValue.cpp:984
CharUnits & getLValueOffset()
Definition: APValue.cpp:994
bool isLValue() const
Definition: APValue.h:472
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:682
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
Definition: ASTContext.h:1922
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2770
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2489
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:799
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:196
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3578
QualType getElementType() const
Definition: Type.h:3590
std::string getQuotedName(unsigned ID) const
Return a quoted name for the specified builtin for use in diagnostics.
Definition: Builtins.cpp:166
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition: CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition: Type.cpp:245
bool isInvalidDecl() const
Definition: DeclBase.h:591
This represents one expression.
Definition: Expr.h:110
bool isGLValue() const
Definition: Expr.h:280
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:276
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3040
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.h:3125
Represents a function declaration or definition.
Definition: Decl.h:1935
QualType getReturnType() const
Definition: Decl.h:2727
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition: Expr.h:2519
Helper class for OffsetOfExpr.
Definition: Expr.h:2413
@ Array
An index into an array.
Definition: Expr.h:2418
@ Identifier
A field in a dependent type, known only by its name.
Definition: Expr.h:2422
@ Field
A field.
Definition: Expr.h:2420
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition: Expr.h:2425
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3199
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:7937
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:8140
Represents a struct/union/class.
Definition: Decl.h:4169
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6078
RecordDecl * getDecl() const
Definition: Type.h:6088
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint=false)
Emit a diagnostic.
Definition: SemaBase.cpp:60
ASTContext & getASTContext() const
Definition: Sema.h:534
const LangOptions & getLangOpts() const
Definition: Sema.h:527
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:334
Exposes information about the current target.
Definition: TargetInfo.h:220
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition: TargetInfo.h:844
unsigned getIntWidth() const
getIntWidth/Align - Return the size of 'signed int' and 'unsigned int' for this target,...
Definition: TargetInfo.h:519
bool isBigEndian() const
Definition: TargetInfo.h:1672
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
Definition: TargetInfo.h:1617
unsigned getLongWidth() const
getLongWidth/Align - Return the size of 'signed long' and 'unsigned long' for this target,...
Definition: TargetInfo.h:524
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
Definition: TargetInfo.h:1257
A template argument list.
Definition: DeclTemplate.h:250
unsigned size() const
Retrieve the number of template arguments in this template argument list.
Definition: DeclTemplate.h:286
@ Type
The template argument is a type.
Definition: TemplateBase.h:70
Symbolic representation of typeid(T) for some type T.
Definition: APValue.h:44
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2201
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:2251
bool isCharType() const
Definition: Type.cpp:2123
bool isPointerType() const
Definition: Type.h:8192
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:8560
bool isChar8Type() const
Definition: Type.cpp:2139
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition: Type.h:8635
bool isAnyComplexType() const
Definition: Type.h:8300
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2396
bool isFunctionType() const
Definition: Type.h:8188
bool isFloatingType() const
Definition: Type.cpp:2283
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8741
bool isNullPtrType() const
Definition: Type.h:8553
QualType getType() const
Definition: Decl.h:682
A memory block, either on the stack or in the heap.
Definition: InterpBlock.h:49
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition: InterpBlock.h:68
Wrapper around boolean types.
Definition: Boolean.h:25
static Boolean from(T Value)
Definition: Boolean.h:103
Pointer into the code segment.
Definition: Source.h:30
Compilation context for expressions.
Definition: Compiler.h:108
Manages dynamic memory allocations done during bytecode interpretation.
bool isInf() const
Definition: Floating.h:97
const APFloat & getAPFloat() const
Definition: Floating.h:40
llvm::FPClassTest classify() const
Definition: Floating.h:101
bool isSignaling() const
Definition: Floating.h:96
bool isNormal() const
Definition: Floating.h:99
ComparisonCategoryResult compare(const Floating &RHS) const
Definition: Floating.h:104
bool isNan() const
Definition: Floating.h:95
bool isZero() const
Definition: Floating.h:91
bool isNegative() const
Definition: Floating.h:89
static Floating getInf(const llvm::fltSemantics &Sem)
Definition: Floating.h:37
bool isFinite() const
Definition: Floating.h:98
bool isDenormal() const
Definition: Floating.h:100
static Floating abs(const Floating &F)
Definition: Floating.h:164
APFloat::fltCategory getCategory() const
Definition: Floating.h:102
Base class for stack frames, shared between VM and walker.
Definition: Frame.h:25
Bytecode function.
Definition: Function.h:81
const FunctionDecl * getDecl() const
Returns the original FunctionDecl.
Definition: Function.h:96
unsigned getBuiltinID() const
Definition: Function.h:196
Frame storing local variables.
Definition: InterpFrame.h:26
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition: InterpFrame.h:29
CodePtr getRetPC() const
Returns the return address of the frame.
Definition: InterpFrame.h:110
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition: InterpStack.h:28
void clear()
Clears the stack without calling any destructors.
Definition: InterpStack.cpp:24
T & peek() const
Returns a reference to the value on the top of the stack.
Definition: InterpStack.h:69
Interpreter context.
Definition: InterpState.h:36
A pointer to a memory block, live or dead.
Definition: Pointer.h:88
bool isInitialized() const
Checks if an object was initialized.
Definition: Pointer.cpp:346
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition: Pointer.h:161
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition: Pointer.h:560
int64_t getIndex() const
Returns the index into an array.
Definition: Pointer.h:605
bool isActive() const
Checks if the object is active.
Definition: Pointer.h:549
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition: Pointer.h:180
T & deref() const
Dereferences the pointer, if it's live.
Definition: Pointer.h:657
unsigned getNumElems() const
Returns the number of elements.
Definition: Pointer.h:596
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition: Pointer.h:432
void activate() const
Activats a field.
Definition: Pointer.cpp:430
bool isIntegralPointer() const
Definition: Pointer.h:483
QualType getType() const
Returns the type of the innermost field.
Definition: Pointer.h:351
bool isLive() const
Checks if the pointer is live.
Definition: Pointer.h:282
uint64_t getByteOffset() const
Returns the byte offset from the start.
Definition: Pointer.h:587
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition: Pointer.cpp:336
bool isZero() const
Checks if the pointer is null.
Definition: Pointer.h:271
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition: Pointer.h:296
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition: Pointer.cpp:491
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition: Pointer.cpp:144
uint64_t getIntegerRepresentation() const
Definition: Pointer.h:148
bool isBlockPointer() const
Definition: Pointer.h:482
const Block * block() const
Definition: Pointer.h:602
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition: Pointer.h:341
size_t elemSize() const
Returns the element size of the innermost field.
Definition: Pointer.h:373
void initialize() const
Initializes a field.
Definition: Pointer.cpp:382
const Record * getRecord() const
Returns the record descriptor of a class.
Definition: Pointer.h:488
Structure/Class descriptor.
Definition: Record.h:25
bool isUnion() const
Checks if the record is a union.
Definition: Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition: Record.cpp:40
llvm::iterator_range< const_base_iter > bases() const
Definition: Record.h:88
unsigned getNumFields() const
Definition: Record.h:84
llvm::iterator_range< const_field_iter > fields() const
Definition: Record.h:80
Describes the statement/declaration an opcode was generated from.
Definition: Source.h:77
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition: OSLog.cpp:180
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
llvm::APFloat APFloat
Definition: Floating.h:23
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset=0)
Peek an integer value from the stack into an APSInt.
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
llvm::APInt APInt
Definition: FixedPoint.h:19
static PrimType getLongPrimType(const InterpState &S)
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition: Interp.cpp:849
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, llvm::ArrayRef< int64_t > ArrayIndices, int64_t &Result)
Interpret an offsetof operation.
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, bool Signaling)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}_ordering type.
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool retPrimValue(InterpState &S, CodePtr OpPC, std::optional< PrimType > &T)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F)
static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static unsigned callArgSize(const InterpState &S, const CallExpr *C)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_constant_p(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool RetVoid(InterpState &S, CodePtr &PC)
Definition: Interp.h:340
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition: Interp.cpp:594
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static T getParam(const InterpFrame *Frame, unsigned Index)
static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
constexpr size_t align(size_t Size)
Aligns a size to the pointer alignment.
Definition: PrimType.h:131
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition: Interp.cpp:418
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition: Interp.cpp:309
static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Can be called with an integer or vector as the first and only parameter.
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
Definition: Interp.h:164
PrimType
Enumeration of the primitive types of the VM.
Definition: PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, bool IsNumBuiltin)
static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index)
static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Just takes the first Argument to the call and puts it on the stack.
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition: Interp.cpp:301
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition: Interp.cpp:910
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static PrimType getIntPrimType(const InterpState &S)
size_t primSize(PrimType Type)
Returns the size of a primitive type in bytes.
Definition: PrimType.cpp:23
static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
llvm::APSInt APSInt
Definition: FixedPoint.h:20
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, bool IsNumBuiltin)
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call, bool Right)
rotateleft(value, amount)
constexpr bool isIntegralType(PrimType T)
Definition: PrimType.h:74
static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
@ AK_Read
Definition: State.h:27
const FunctionProtoType * T
Track what bits have been initialized to known values and which ones have indeterminate value.
Definition: BitcastBuffer.h:81
std::unique_ptr< std::byte[]> Data
Definition: BitcastBuffer.h:83
A quantity in bits.
Definition: BitcastBuffer.h:24
A quantity in bytes.
Definition: BitcastBuffer.h:55
size_t getQuantity() const
Definition: BitcastBuffer.h:58
Bits toBits() const
Definition: BitcastBuffer.h:59
Describes a memory block created by an allocation site.
Definition: Descriptor.h:116
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition: Descriptor.h:243
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition: Descriptor.h:257
QualType getElemQualType() const
Definition: Descriptor.cpp:408
const ValueDecl * asValueDecl() const
Definition: Descriptor.h:208
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition: Descriptor.h:141
QualType getType() const
Definition: Descriptor.cpp:393
static constexpr MetadataSize InlineDescMD
Definition: Descriptor.h:137
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition: Descriptor.h:238
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition: Descriptor.h:248
PrimType getPrimType() const
Definition: Descriptor.h:230
bool isRecord() const
Checks if the descriptor is of a record.
Definition: Descriptor.h:262
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition: Descriptor.h:146
const Expr * asExpr() const
Definition: Descriptor.h:205
bool isArray() const
Checks if the descriptor is of an array.
Definition: Descriptor.h:260