21 #include "llvm/ADT/DenseMap.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/Operator.h" 26 using namespace clang;
27 using namespace CodeGen;
34 uint64_t AtomicSizeInBits;
35 uint64_t ValueSizeInBits;
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
51 ValueTy = ATy->getValueType();
56 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.
Width;
60 ValueAlignInBits = ValueTI.
Align;
62 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.
Width;
64 AtomicAlignInBits = AtomicTI.
Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
77 ValueSizeInBits = C.getTypeSize(ValueTy);
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(
Offset + OrigBFI.Size + C.getCharWidth() - 1)
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
87 VoidPtrAddr = CGF.
Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
91 CGF.
Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
100 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101 if (AtomicTy.isNull()) {
104 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
111 ValueSizeInBits = C.getTypeSize(ValueTy);
113 AtomicSizeInBits = C.getTypeSize(AtomicTy);
119 ValueSizeInBits = C.getTypeSize(ValueTy);
123 AtomicSizeInBits = C.getTypeSize(AtomicTy);
127 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
131 QualType getAtomicType()
const {
return AtomicTy; }
132 QualType getValueType()
const {
return ValueTy; }
133 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
134 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
135 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
137 bool shouldUseLibcall()
const {
return UseLibcall; }
138 const LValue &getAtomicLValue()
const {
return LVal; }
149 Address getAtomicAddress()
const {
150 return Address(getAtomicPointer(), getAtomicAlignment());
153 Address getAtomicAddressAsAtomicIntPointer()
const {
154 return emitCastToAtomicIntPointer(getAtomicAddress());
163 bool hasPadding()
const {
164 return (ValueSizeInBits != AtomicSizeInBits);
167 bool emitMemSetZeroIfNecessary()
const;
195 void emitCopyIntoMemory(
RValue rvalue)
const;
198 LValue projectValue()
const {
200 Address addr = getAtomicAddress();
211 bool AsValue, llvm::AtomicOrdering AO,
222 std::pair<RValue, llvm::Value *>
224 llvm::AtomicOrdering Success =
225 llvm::AtomicOrdering::SequentiallyConsistent,
226 llvm::AtomicOrdering Failure =
227 llvm::AtomicOrdering::SequentiallyConsistent,
228 bool IsWeak =
false);
233 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
238 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
245 Address CreateTempAlloca()
const;
251 void EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
252 llvm::AtomicOrdering AO,
bool IsVolatile);
254 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile);
258 llvm::AtomicOrdering Success =
259 llvm::AtomicOrdering::SequentiallyConsistent,
260 llvm::AtomicOrdering Failure =
261 llvm::AtomicOrdering::SequentiallyConsistent);
263 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
265 llvm::AtomicOrdering Success =
266 llvm::AtomicOrdering::SequentiallyConsistent,
267 llvm::AtomicOrdering Failure =
268 llvm::AtomicOrdering::SequentiallyConsistent,
269 bool IsWeak =
false);
272 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
276 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
280 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
283 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
288 Address AtomicInfo::CreateTempAlloca()
const {
290 (LVal.
isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
292 getAtomicAlignment(),
297 TempAlloca, getAtomicAddress().getType());
315 uint64_t expectedSize) {
316 return (CGM.
getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
324 if (hasPadding())
return true;
327 switch (getEvaluationKind()) {
334 AtomicSizeInBits / 2);
340 llvm_unreachable(
"bad evaluation kind");
343 bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
346 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
350 addr, llvm::ConstantInt::get(CGF.
Int8Ty, 0),
360 llvm::AtomicOrdering SuccessOrder,
361 llvm::AtomicOrdering FailureOrder,
367 llvm::AtomicCmpXchgInst *Pair = CGF.
Builder.CreateAtomicCmpXchg(
368 Ptr.
getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
371 Pair->setWeak(IsWeak);
380 llvm::BasicBlock *StoreExpectedBB =
385 llvm::BasicBlock *ContinueBB =
390 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
392 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
396 CGF.
Builder.CreateBr(ContinueBB);
398 CGF.
Builder.SetInsertPoint(ContinueBB);
411 llvm::AtomicOrdering SuccessOrder,
413 llvm::AtomicOrdering FailureOrder;
414 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
415 auto FOS = FO->getSExtValue();
416 if (!llvm::isValidAtomicOrderingCABI(FOS))
417 FailureOrder = llvm::AtomicOrdering::Monotonic;
419 switch ((llvm::AtomicOrderingCABI)FOS) {
420 case llvm::AtomicOrderingCABI::relaxed:
421 case llvm::AtomicOrderingCABI::release:
422 case llvm::AtomicOrderingCABI::acq_rel:
423 FailureOrder = llvm::AtomicOrdering::Monotonic;
425 case llvm::AtomicOrderingCABI::consume:
426 case llvm::AtomicOrderingCABI::acquire:
427 FailureOrder = llvm::AtomicOrdering::Acquire;
429 case llvm::AtomicOrderingCABI::seq_cst:
430 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
433 if (isStrongerThan(FailureOrder, SuccessOrder)) {
437 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
440 FailureOrder, Scope);
445 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
448 if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
449 SuccessOrder != llvm::AtomicOrdering::Release)
451 if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
456 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
463 CGF.
Builder.SetInsertPoint(MonotonicBB);
465 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
469 CGF.
Builder.SetInsertPoint(AcquireBB);
471 Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
473 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
475 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
479 CGF.
Builder.SetInsertPoint(SeqCstBB);
481 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
483 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
487 CGF.
Builder.SetInsertPoint(ContBB);
493 uint64_t Size, llvm::AtomicOrdering Order,
495 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
496 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
498 switch (E->
getOp()) {
499 case AtomicExpr::AO__c11_atomic_init:
500 case AtomicExpr::AO__opencl_atomic_init:
501 llvm_unreachable(
"Already handled!");
503 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
504 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
506 FailureOrder, Size, Order, Scope);
508 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
509 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
511 FailureOrder, Size, Order, Scope);
513 case AtomicExpr::AO__atomic_compare_exchange:
514 case AtomicExpr::AO__atomic_compare_exchange_n: {
515 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
517 Val1, Val2, FailureOrder, Size, Order, Scope);
520 llvm::BasicBlock *StrongBB =
523 llvm::BasicBlock *ContBB =
526 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
527 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
529 CGF.
Builder.SetInsertPoint(StrongBB);
531 FailureOrder, Size, Order, Scope);
534 CGF.
Builder.SetInsertPoint(WeakBB);
536 FailureOrder, Size, Order, Scope);
539 CGF.
Builder.SetInsertPoint(ContBB);
543 case AtomicExpr::AO__c11_atomic_load:
544 case AtomicExpr::AO__opencl_atomic_load:
545 case AtomicExpr::AO__atomic_load_n:
546 case AtomicExpr::AO__atomic_load: {
548 Load->setAtomic(Order, Scope);
554 case AtomicExpr::AO__c11_atomic_store:
555 case AtomicExpr::AO__opencl_atomic_store:
556 case AtomicExpr::AO__atomic_store:
557 case AtomicExpr::AO__atomic_store_n: {
560 Store->setAtomic(Order, Scope);
565 case AtomicExpr::AO__c11_atomic_exchange:
566 case AtomicExpr::AO__opencl_atomic_exchange:
567 case AtomicExpr::AO__atomic_exchange_n:
568 case AtomicExpr::AO__atomic_exchange:
569 Op = llvm::AtomicRMWInst::Xchg;
572 case AtomicExpr::AO__atomic_add_fetch:
573 PostOp = llvm::Instruction::Add;
575 case AtomicExpr::AO__c11_atomic_fetch_add:
576 case AtomicExpr::AO__opencl_atomic_fetch_add:
577 case AtomicExpr::AO__atomic_fetch_add:
578 Op = llvm::AtomicRMWInst::Add;
581 case AtomicExpr::AO__atomic_sub_fetch:
582 PostOp = llvm::Instruction::Sub;
584 case AtomicExpr::AO__c11_atomic_fetch_sub:
585 case AtomicExpr::AO__opencl_atomic_fetch_sub:
586 case AtomicExpr::AO__atomic_fetch_sub:
587 Op = llvm::AtomicRMWInst::Sub;
590 case AtomicExpr::AO__opencl_atomic_fetch_min:
591 case AtomicExpr::AO__atomic_fetch_min:
593 : llvm::AtomicRMWInst::UMin;
596 case AtomicExpr::AO__opencl_atomic_fetch_max:
597 case AtomicExpr::AO__atomic_fetch_max:
599 : llvm::AtomicRMWInst::UMax;
602 case AtomicExpr::AO__atomic_and_fetch:
605 case AtomicExpr::AO__c11_atomic_fetch_and:
606 case AtomicExpr::AO__opencl_atomic_fetch_and:
607 case AtomicExpr::AO__atomic_fetch_and:
611 case AtomicExpr::AO__atomic_or_fetch:
612 PostOp = llvm::Instruction::Or;
614 case AtomicExpr::AO__c11_atomic_fetch_or:
615 case AtomicExpr::AO__opencl_atomic_fetch_or:
616 case AtomicExpr::AO__atomic_fetch_or:
617 Op = llvm::AtomicRMWInst::Or;
620 case AtomicExpr::AO__atomic_xor_fetch:
621 PostOp = llvm::Instruction::Xor;
623 case AtomicExpr::AO__c11_atomic_fetch_xor:
624 case AtomicExpr::AO__opencl_atomic_fetch_xor:
625 case AtomicExpr::AO__atomic_fetch_xor:
626 Op = llvm::AtomicRMWInst::Xor;
629 case AtomicExpr::AO__atomic_nand_fetch:
632 case AtomicExpr::AO__atomic_fetch_nand:
633 Op = llvm::AtomicRMWInst::Nand;
638 llvm::AtomicRMWInst *RMWI =
646 Result = CGF.
Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
647 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
648 Result = CGF.
Builder.CreateNot(Result);
665 uint64_t Size, llvm::AtomicOrdering Order,
672 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
678 if (
auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
682 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
689 auto Scopes = ScopeModel->getRuntimeValues();
690 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
691 for (
auto S : Scopes)
694 llvm::BasicBlock *ContBB =
697 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(),
false);
700 auto FallBack = ScopeModel->getFallBackValue();
701 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
702 for (
auto S : Scopes) {
705 SI->addCase(Builder.getInt32(S), B);
707 Builder.SetInsertPoint(B);
708 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
714 Builder.CreateBr(ContBB);
717 Builder.SetInsertPoint(ContBB);
724 if (UseOptimizedLibcall) {
731 SizeInBits)->getPointerTo();
749 MemTy = AT->getValueType();
750 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
757 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
758 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
759 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
760 EmitAtomicInit(E->
getVal1(), lvalue);
765 std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
767 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
769 bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
770 bool Misaligned = (Ptr.
getAlignment() % sizeChars) != 0;
771 bool UseLibcall = Misaligned | Oversized;
774 CGM.getDiags().Report(E->
getBeginLoc(), diag::warn_atomic_op_misaligned)
782 switch (E->
getOp()) {
783 case AtomicExpr::AO__c11_atomic_init:
784 case AtomicExpr::AO__opencl_atomic_init:
785 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
787 case AtomicExpr::AO__c11_atomic_load:
788 case AtomicExpr::AO__opencl_atomic_load:
789 case AtomicExpr::AO__atomic_load_n:
792 case AtomicExpr::AO__atomic_load:
793 Dest = EmitPointerWithAlignment(E->
getVal1());
796 case AtomicExpr::AO__atomic_store:
797 Val1 = EmitPointerWithAlignment(E->
getVal1());
800 case AtomicExpr::AO__atomic_exchange:
801 Val1 = EmitPointerWithAlignment(E->
getVal1());
802 Dest = EmitPointerWithAlignment(E->
getVal2());
805 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
806 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
807 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
808 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
809 case AtomicExpr::AO__atomic_compare_exchange_n:
810 case AtomicExpr::AO__atomic_compare_exchange:
811 Val1 = EmitPointerWithAlignment(E->
getVal1());
812 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
813 Val2 = EmitPointerWithAlignment(E->
getVal2());
817 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
818 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
819 IsWeak = EmitScalarExpr(E->
getWeak());
822 case AtomicExpr::AO__c11_atomic_fetch_add:
823 case AtomicExpr::AO__c11_atomic_fetch_sub:
824 case AtomicExpr::AO__opencl_atomic_fetch_add:
825 case AtomicExpr::AO__opencl_atomic_fetch_sub:
835 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
836 auto Temp = CreateMemTemp(Val1Ty,
".atomictmp");
838 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
842 case AtomicExpr::AO__atomic_fetch_add:
843 case AtomicExpr::AO__atomic_fetch_sub:
844 case AtomicExpr::AO__atomic_add_fetch:
845 case AtomicExpr::AO__atomic_sub_fetch:
846 case AtomicExpr::AO__c11_atomic_store:
847 case AtomicExpr::AO__c11_atomic_exchange:
848 case AtomicExpr::AO__opencl_atomic_store:
849 case AtomicExpr::AO__opencl_atomic_exchange:
850 case AtomicExpr::AO__atomic_store_n:
851 case AtomicExpr::AO__atomic_exchange_n:
852 case AtomicExpr::AO__c11_atomic_fetch_and:
853 case AtomicExpr::AO__c11_atomic_fetch_or:
854 case AtomicExpr::AO__c11_atomic_fetch_xor:
855 case AtomicExpr::AO__opencl_atomic_fetch_and:
856 case AtomicExpr::AO__opencl_atomic_fetch_or:
857 case AtomicExpr::AO__opencl_atomic_fetch_xor:
858 case AtomicExpr::AO__opencl_atomic_fetch_min:
859 case AtomicExpr::AO__opencl_atomic_fetch_max:
860 case AtomicExpr::AO__atomic_fetch_and:
861 case AtomicExpr::AO__atomic_fetch_or:
862 case AtomicExpr::AO__atomic_fetch_xor:
863 case AtomicExpr::AO__atomic_fetch_nand:
864 case AtomicExpr::AO__atomic_and_fetch:
865 case AtomicExpr::AO__atomic_or_fetch:
866 case AtomicExpr::AO__atomic_xor_fetch:
867 case AtomicExpr::AO__atomic_nand_fetch:
868 case AtomicExpr::AO__atomic_fetch_min:
869 case AtomicExpr::AO__atomic_fetch_max:
879 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
880 AtomicInfo Atomics(*
this, AtomicVal);
882 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
883 if (Val1.
isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
884 if (Val2.
isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
886 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
888 Dest = CreateMemTemp(RValTy,
"cmpxchg.bool");
890 Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
894 bool UseOptimizedLibcall =
false;
895 switch (E->
getOp()) {
896 case AtomicExpr::AO__c11_atomic_init:
897 case AtomicExpr::AO__opencl_atomic_init:
898 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
900 case AtomicExpr::AO__c11_atomic_fetch_add:
901 case AtomicExpr::AO__opencl_atomic_fetch_add:
902 case AtomicExpr::AO__atomic_fetch_add:
903 case AtomicExpr::AO__c11_atomic_fetch_and:
904 case AtomicExpr::AO__opencl_atomic_fetch_and:
905 case AtomicExpr::AO__atomic_fetch_and:
906 case AtomicExpr::AO__c11_atomic_fetch_or:
907 case AtomicExpr::AO__opencl_atomic_fetch_or:
908 case AtomicExpr::AO__atomic_fetch_or:
909 case AtomicExpr::AO__atomic_fetch_nand:
910 case AtomicExpr::AO__c11_atomic_fetch_sub:
911 case AtomicExpr::AO__opencl_atomic_fetch_sub:
912 case AtomicExpr::AO__atomic_fetch_sub:
913 case AtomicExpr::AO__c11_atomic_fetch_xor:
914 case AtomicExpr::AO__opencl_atomic_fetch_xor:
915 case AtomicExpr::AO__opencl_atomic_fetch_min:
916 case AtomicExpr::AO__opencl_atomic_fetch_max:
917 case AtomicExpr::AO__atomic_fetch_xor:
918 case AtomicExpr::AO__atomic_add_fetch:
919 case AtomicExpr::AO__atomic_and_fetch:
920 case AtomicExpr::AO__atomic_nand_fetch:
921 case AtomicExpr::AO__atomic_or_fetch:
922 case AtomicExpr::AO__atomic_sub_fetch:
923 case AtomicExpr::AO__atomic_xor_fetch:
924 case AtomicExpr::AO__atomic_fetch_min:
925 case AtomicExpr::AO__atomic_fetch_max:
927 UseOptimizedLibcall =
true;
930 case AtomicExpr::AO__atomic_load:
931 case AtomicExpr::AO__atomic_store:
932 case AtomicExpr::AO__atomic_exchange:
933 case AtomicExpr::AO__atomic_compare_exchange:
939 case AtomicExpr::AO__c11_atomic_load:
940 case AtomicExpr::AO__c11_atomic_store:
941 case AtomicExpr::AO__c11_atomic_exchange:
942 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
943 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
944 case AtomicExpr::AO__opencl_atomic_load:
945 case AtomicExpr::AO__opencl_atomic_store:
946 case AtomicExpr::AO__opencl_atomic_exchange:
947 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
948 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
949 case AtomicExpr::AO__atomic_load_n:
950 case AtomicExpr::AO__atomic_store_n:
951 case AtomicExpr::AO__atomic_exchange_n:
952 case AtomicExpr::AO__atomic_compare_exchange_n:
955 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
956 UseOptimizedLibcall =
true;
961 if (!UseOptimizedLibcall) {
964 getContext().getSizeType());
972 auto AS = PT->getAs<
PointerType>()->getPointeeType().getAddressSpace();
976 auto T = V->getType();
977 auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
979 return getTargetHooks().performAddrSpaceCast(
985 getContext().VoidPtrTy);
987 std::string LibCallName;
989 MemTy->
isPointerType() ? getContext().getIntPtrType() : MemTy;
991 bool HaveRetTy =
false;
992 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
993 switch (E->
getOp()) {
994 case AtomicExpr::AO__c11_atomic_init:
995 case AtomicExpr::AO__opencl_atomic_init:
996 llvm_unreachable(
"Already handled!");
1005 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1006 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1007 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1008 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1009 case AtomicExpr::AO__atomic_compare_exchange:
1010 case AtomicExpr::AO__atomic_compare_exchange_n:
1011 LibCallName =
"__atomic_compare_exchange";
1012 RetTy = getContext().BoolTy;
1017 getContext().VoidPtrTy);
1026 case AtomicExpr::AO__c11_atomic_exchange:
1027 case AtomicExpr::AO__opencl_atomic_exchange:
1028 case AtomicExpr::AO__atomic_exchange_n:
1029 case AtomicExpr::AO__atomic_exchange:
1030 LibCallName =
"__atomic_exchange";
1036 case AtomicExpr::AO__c11_atomic_store:
1037 case AtomicExpr::AO__opencl_atomic_store:
1038 case AtomicExpr::AO__atomic_store:
1039 case AtomicExpr::AO__atomic_store_n:
1040 LibCallName =
"__atomic_store";
1041 RetTy = getContext().VoidTy;
1048 case AtomicExpr::AO__c11_atomic_load:
1049 case AtomicExpr::AO__opencl_atomic_load:
1050 case AtomicExpr::AO__atomic_load:
1051 case AtomicExpr::AO__atomic_load_n:
1052 LibCallName =
"__atomic_load";
1056 case AtomicExpr::AO__atomic_add_fetch:
1057 PostOp = llvm::Instruction::Add;
1059 case AtomicExpr::AO__c11_atomic_fetch_add:
1060 case AtomicExpr::AO__opencl_atomic_fetch_add:
1061 case AtomicExpr::AO__atomic_fetch_add:
1062 LibCallName =
"__atomic_fetch_add";
1068 case AtomicExpr::AO__atomic_and_fetch:
1071 case AtomicExpr::AO__c11_atomic_fetch_and:
1072 case AtomicExpr::AO__opencl_atomic_fetch_and:
1073 case AtomicExpr::AO__atomic_fetch_and:
1074 LibCallName =
"__atomic_fetch_and";
1080 case AtomicExpr::AO__atomic_or_fetch:
1081 PostOp = llvm::Instruction::Or;
1083 case AtomicExpr::AO__c11_atomic_fetch_or:
1084 case AtomicExpr::AO__opencl_atomic_fetch_or:
1085 case AtomicExpr::AO__atomic_fetch_or:
1086 LibCallName =
"__atomic_fetch_or";
1092 case AtomicExpr::AO__atomic_sub_fetch:
1093 PostOp = llvm::Instruction::Sub;
1095 case AtomicExpr::AO__c11_atomic_fetch_sub:
1096 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1097 case AtomicExpr::AO__atomic_fetch_sub:
1098 LibCallName =
"__atomic_fetch_sub";
1104 case AtomicExpr::AO__atomic_xor_fetch:
1105 PostOp = llvm::Instruction::Xor;
1107 case AtomicExpr::AO__c11_atomic_fetch_xor:
1108 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1109 case AtomicExpr::AO__atomic_fetch_xor:
1110 LibCallName =
"__atomic_fetch_xor";
1114 case AtomicExpr::AO__atomic_fetch_min:
1115 case AtomicExpr::AO__opencl_atomic_fetch_min:
1117 ?
"__atomic_fetch_min" 1118 :
"__atomic_fetch_umin";
1122 case AtomicExpr::AO__atomic_fetch_max:
1123 case AtomicExpr::AO__opencl_atomic_fetch_max:
1125 ?
"__atomic_fetch_max" 1126 :
"__atomic_fetch_umax";
1132 case AtomicExpr::AO__atomic_nand_fetch:
1135 case AtomicExpr::AO__atomic_fetch_nand:
1136 LibCallName =
"__atomic_fetch_nand";
1143 LibCallName = std::string(
"__opencl") +
1144 StringRef(LibCallName).drop_front(1).str();
1148 if (UseOptimizedLibcall)
1149 LibCallName +=
"_" + llvm::utostr(Size);
1152 if (UseOptimizedLibcall) {
1155 RetTy = getContext().getIntTypeForBitwidth(
1156 getContext().toBits(sizeChars),
false);
1159 RetTy = getContext().VoidTy;
1161 getContext().VoidPtrTy);
1166 getContext().IntTy);
1173 assert(UseOptimizedLibcall || !PostOp);
1185 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1186 ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1188 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
1189 ResVal = Builder.CreateNot(ResVal);
1191 Builder.CreateStore(
1193 Builder.CreateBitCast(Dest, ResVal->
getType()->getPointerTo()));
1199 return convertTempToRValue(
1200 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1204 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1205 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1206 E->
getOp() == AtomicExpr::AO__atomic_store ||
1207 E->
getOp() == AtomicExpr::AO__atomic_store_n;
1208 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1209 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1210 E->
getOp() == AtomicExpr::AO__atomic_load ||
1211 E->
getOp() == AtomicExpr::AO__atomic_load_n;
1213 if (isa<llvm::ConstantInt>(Order)) {
1214 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1217 if (llvm::isValidAtomicOrderingCABI(ord))
1218 switch ((llvm::AtomicOrderingCABI)ord) {
1219 case llvm::AtomicOrderingCABI::relaxed:
1220 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1221 llvm::AtomicOrdering::Monotonic, Scope);
1223 case llvm::AtomicOrderingCABI::consume:
1224 case llvm::AtomicOrderingCABI::acquire:
1227 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1228 llvm::AtomicOrdering::Acquire, Scope);
1230 case llvm::AtomicOrderingCABI::release:
1233 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1234 llvm::AtomicOrdering::Release, Scope);
1236 case llvm::AtomicOrderingCABI::acq_rel:
1237 if (IsLoad || IsStore)
1239 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1240 llvm::AtomicOrdering::AcquireRelease, Scope);
1242 case llvm::AtomicOrderingCABI::seq_cst:
1243 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1244 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1250 return convertTempToRValue(
1251 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1259 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1260 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1261 *SeqCstBB =
nullptr;
1262 MonotonicBB = createBasicBlock(
"monotonic", CurFn);
1264 AcquireBB = createBasicBlock(
"acquire", CurFn);
1266 ReleaseBB = createBasicBlock(
"release", CurFn);
1267 if (!IsLoad && !IsStore)
1268 AcqRelBB = createBasicBlock(
"acqrel", CurFn);
1269 SeqCstBB = createBasicBlock(
"seqcst", CurFn);
1270 llvm::BasicBlock *ContBB = createBasicBlock(
"atomic.continue", CurFn);
1276 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(),
false);
1277 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1280 Builder.SetInsertPoint(MonotonicBB);
1281 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1282 llvm::AtomicOrdering::Monotonic, Scope);
1283 Builder.CreateBr(ContBB);
1285 Builder.SetInsertPoint(AcquireBB);
1286 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1287 llvm::AtomicOrdering::Acquire, Scope);
1288 Builder.CreateBr(ContBB);
1289 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1291 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1295 Builder.SetInsertPoint(ReleaseBB);
1296 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1297 llvm::AtomicOrdering::Release, Scope);
1298 Builder.CreateBr(ContBB);
1299 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1302 if (!IsLoad && !IsStore) {
1303 Builder.SetInsertPoint(AcqRelBB);
1304 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1305 llvm::AtomicOrdering::AcquireRelease, Scope);
1306 Builder.CreateBr(ContBB);
1307 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1310 Builder.SetInsertPoint(SeqCstBB);
1311 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1313 Builder.CreateBr(ContBB);
1314 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1318 Builder.SetInsertPoint(ContBB);
1322 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1323 return convertTempToRValue(
1324 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1329 Address AtomicInfo::emitCastToAtomicIntPointer(
Address addr)
const {
1330 unsigned addrspace =
1331 cast<llvm::PointerType>(addr.
getPointer()->getType())->getAddressSpace();
1332 llvm::IntegerType *ty =
1337 Address AtomicInfo::convertToAtomicIntPointer(
Address Addr)
const {
1340 if (SourceSizeInBits != AtomicSizeInBits) {
1341 Address Tmp = CreateTempAlloca();
1343 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1347 return emitCastToAtomicIntPointer(Addr);
1353 bool asValue)
const {
1386 bool AsValue)
const {
1388 assert(IntVal->getType()->isIntegerTy() &&
"Expected integer value");
1394 auto *ValTy = AsValue
1396 : getAtomicAddress().getType()->getPointerElementType();
1397 if (ValTy->isIntegerTy()) {
1398 assert(IntVal->getType() == ValTy &&
"Different integer types.");
1400 }
else if (ValTy->isPointerTy())
1402 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1409 bool TempIsVolatile =
false;
1415 Temp = CreateTempAlloca();
1419 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1421 ->setVolatile(TempIsVolatile);
1423 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1426 void AtomicInfo::EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
1427 llvm::AtomicOrdering AO,
bool) {
1441 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1444 Address Addr = getAtomicAddressAsAtomicIntPointer();
1446 Load->setAtomic(AO);
1450 Load->setVolatile(
true);
1459 if (!CGM.getCodeGenOpts().MSVolatile)
return false;
1460 AtomicInfo AI(*
this, LV);
1463 bool AtomicIsInline = !AI.shouldUseLibcall();
1465 if (getContext().getTypeSize(LV.
getType()) >
1466 getContext().getTypeSize(getContext().getIntPtrType()))
1468 return IsVolatile && AtomicIsInline;
1473 llvm::AtomicOrdering AO;
1476 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1478 AO = llvm::AtomicOrdering::Acquire;
1481 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1485 bool AsValue, llvm::AtomicOrdering AO,
1488 if (shouldUseLibcall()) {
1494 TempAddr = CreateTempAlloca();
1496 EmitAtomicLoadLibcall(TempAddr.
getPointer(), AO, IsVolatile);
1500 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1504 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1512 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1518 llvm::AtomicOrdering AO,
bool IsVolatile,
1520 AtomicInfo Atomics(*
this, src);
1521 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1527 void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1546 emitMemSetZeroIfNecessary();
1549 LValue TempLVal = projectValue();
1562 Address AtomicInfo::materializeRValue(
RValue rvalue)
const {
1570 AtomicInfo Atomics(CGF, TempLV);
1571 Atomics.emitCopyIntoMemory(rvalue);
1580 if (isa<llvm::IntegerType>(Value->getType()))
1583 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1585 LVal.
isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1586 if (isa<llvm::PointerType>(Value->getType()))
1587 return CGF.
Builder.CreatePtrToInt(Value, InputIntTy);
1588 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1594 Address Addr = materializeRValue(RVal);
1597 Addr = emitCastToAtomicIntPointer(Addr);
1601 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1603 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1605 Address Addr = getAtomicAddressAsAtomicIntPointer();
1607 ExpectedVal, DesiredVal,
1611 Inst->setWeak(IsWeak);
1614 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1615 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1616 return std::make_pair(PreviousVal, SuccessFailureVal);
1620 AtomicInfo::EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr,
1622 llvm::AtomicOrdering Success,
1623 llvm::AtomicOrdering Failure) {
1635 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Success))),
1638 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1646 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1648 llvm::AtomicOrdering Failure,
bool IsWeak) {
1649 if (isStrongerThan(Failure, Success))
1652 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1655 if (shouldUseLibcall()) {
1657 Address ExpectedAddr = materializeRValue(Expected);
1658 Address DesiredAddr = materializeRValue(Desired);
1659 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1662 return std::make_pair(
1670 auto *ExpectedVal = convertRValueToInt(Expected);
1671 auto *DesiredVal = convertRValueToInt(Desired);
1672 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1674 return std::make_pair(
1685 LValue AtomicLVal = Atomics.getAtomicLValue();
1692 Address Ptr = Atomics.materializeRValue(OldRVal);
1725 RValue NewRVal = UpdateOp(UpRVal);
1735 void AtomicInfo::EmitAtomicUpdateLibcall(
1736 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1738 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1740 Address ExpectedAddr = CreateTempAlloca();
1742 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1746 Address DesiredAddr = CreateTempAlloca();
1748 requiresMemSetZero(getAtomicAddress().getElementType())) {
1752 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1757 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1760 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1764 void AtomicInfo::EmitAtomicUpdateOp(
1765 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1767 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1770 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1774 auto *CurBB = CGF.
Builder.GetInsertBlock();
1776 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1778 PHI->addIncoming(OldVal, CurBB);
1779 Address NewAtomicAddr = CreateTempAlloca();
1780 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1782 requiresMemSetZero(getAtomicAddress().getElementType())) {
1790 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1791 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1792 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1798 LValue AtomicLVal = Atomics.getAtomicLValue();
1822 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1823 RValue UpdateRVal,
bool IsVolatile) {
1824 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1826 Address ExpectedAddr = CreateTempAlloca();
1828 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1832 Address DesiredAddr = CreateTempAlloca();
1834 requiresMemSetZero(getAtomicAddress().getElementType())) {
1840 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1843 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1847 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1849 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1852 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1856 auto *CurBB = CGF.
Builder.GetInsertBlock();
1858 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1860 PHI->addIncoming(OldVal, CurBB);
1861 Address NewAtomicAddr = CreateTempAlloca();
1862 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1864 requiresMemSetZero(getAtomicAddress().getElementType())) {
1870 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1871 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1872 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1876 void AtomicInfo::EmitAtomicUpdate(
1877 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1879 if (shouldUseLibcall()) {
1880 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1882 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1886 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1888 if (shouldUseLibcall()) {
1889 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1891 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1898 llvm::AtomicOrdering AO;
1900 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1902 AO = llvm::AtomicOrdering::Release;
1905 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1914 llvm::AtomicOrdering AO,
bool IsVolatile,
1922 AtomicInfo atomics(*
this, dest);
1923 LValue LVal = atomics.getAtomicLValue();
1928 atomics.emitCopyIntoMemory(rvalue);
1933 if (atomics.shouldUseLibcall()) {
1935 Address srcAddr = atomics.materializeRValue(rvalue);
1940 getContext().getSizeType());
1941 args.
add(
RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1942 getContext().VoidPtrTy);
1944 getContext().VoidPtrTy);
1946 RValue::get(llvm::ConstantInt::get(IntTy, (
int)llvm::toCABI(AO))),
1947 getContext().IntTy);
1953 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1957 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1958 intValue = Builder.CreateIntCast(
1960 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1964 store->setAtomic(AO);
1968 store->setVolatile(
true);
1969 CGM.DecorateInstructionWithTBAA(store, dest.
getTBAAInfo());
1974 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1981 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak,
1991 AtomicInfo Atomics(*
this, Obj);
1993 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1998 LValue LVal, llvm::AtomicOrdering AO,
1999 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2000 AtomicInfo Atomics(*
this, LVal);
2001 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2005 AtomicInfo atomics(*
this, dest);
2007 switch (atomics.getEvaluationKind()) {
2023 bool Zeroed =
false;
2025 Zeroed = atomics.emitMemSetZeroIfNecessary();
2026 dest = atomics.projectValue();
2038 EmitAggExpr(init, slot);
2042 llvm_unreachable(
"bad evaluation kind");
const llvm::DataLayout & getDataLayout() const
llvm::Value * getVectorPointer() const
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Defines the clang::ASTContext interface.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
llvm::IntegerType * IntTy
int
Address getAddress() const
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
CodeGenTypes & getTypes()
llvm::Type * ConvertTypeForMem(QualType T)
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
llvm::LLVMContext & getLLVMContext()
void setAlignment(CharUnits A)
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const T * getAs() const
Member-template getAs<specific type>'.
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::Value * getPointer() const
unsigned getAddressSpace() const
Return the address space that this address resides in.
void add(RValue rvalue, QualType type)
Address getAddress() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
bool isVolatileQualified() const
CharUnits getAlignment() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitLoadOfExtVectorElementLValue(LValue V)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
unsigned Size
The total size of the bit-field, in bits.
CharUnits - This is an opaque type for sizes expressed in character units.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
CharUnits getAlignment() const
Return the alignment of this pointer.
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Scope - A scope is a transient data structure that is used while parsing the program.
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
RValue EmitAtomicExpr(AtomicExpr *E)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ASTContext & getContext() const
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
LValueBaseInfo getBaseInfo() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Address getExtVectorAddress() const
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
This represents one expression.
llvm::StringRef getAsString(SyncScope S)
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
TBAAAccessInfo getTBAAInfo() const
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Represents a GCC generic vector type.
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
const LangOptions & getLangOpts() const
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
The l-value was considered opaque, so the alignment was determined from a type.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Encodes a location in the source.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
const CGBitFieldInfo & getBitFieldInfo() const
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>, and corresponding __opencl_atomic_* for OpenCL 2.0.
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
const TargetCodeGenInfo & getTargetHooks() const
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
SourceLocation getBeginLoc() const LLVM_READONLY
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored...
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::Value * getExtVectorPointer() const
Expr * getOrderFail() const
bool isVolatileQualified() const
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
bool isAtomicType() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false)
Create or return a runtime function declaration with the specified type and name. ...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
llvm::Value * getBitFieldPointer() const
true
A convenience builder class for complex constant initializers, especially for anonymous global struct...
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
__DEVICE__ int min(int __a, int __b)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
static RValue get(llvm::Value *V)
bool isPointerType() const
bool isExtVectorElt() const
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static RValue getAggregate(Address addr, bool isVolatile=false)
LValue - This represents an lvalue references.
QualType getValueType() const
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::Value * getVectorIdx() const
CallArgList - Type for representing both the value and type of arguments in a call.
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
llvm::Value * getPointer() const
llvm::Constant * getExtVectorElts() const
Structure with information about how a bitfield should be accessed.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.