22 #include "llvm/ADT/DenseMap.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/Intrinsics.h" 25 #include "llvm/IR/Operator.h" 27 using namespace clang;
28 using namespace CodeGen;
35 uint64_t AtomicSizeInBits;
36 uint64_t ValueSizeInBits;
46 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
53 ValueTy = ATy->getValueType();
58 uint64_t ValueAlignInBits;
59 uint64_t AtomicAlignInBits;
60 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
61 ValueSizeInBits = ValueTI.
Width;
62 ValueAlignInBits = ValueTI.
Align;
64 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
65 AtomicSizeInBits = AtomicTI.
Width;
66 AtomicAlignInBits = AtomicTI.
Align;
68 assert(ValueSizeInBits <= AtomicSizeInBits);
69 assert(ValueAlignInBits <= AtomicAlignInBits);
71 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
72 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
79 ValueSizeInBits = C.getTypeSize(ValueTy);
82 AtomicSizeInBits = C.toBits(
83 C.toCharUnitsFromBits(
Offset + OrigBFI.Size + C.getCharWidth() - 1)
87 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
89 VoidPtrAddr = CGF.
Builder.CreateConstGEP1_64(
90 VoidPtrAddr, OffsetInChars.getQuantity());
93 CGF.
Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
94 "atomic_bitfield_base");
102 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
103 if (AtomicTy.isNull()) {
106 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
113 ValueSizeInBits = C.getTypeSize(ValueTy);
115 AtomicSizeInBits = C.getTypeSize(AtomicTy);
121 ValueSizeInBits = C.getTypeSize(ValueTy);
125 AtomicSizeInBits = C.getTypeSize(AtomicTy);
129 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
133 QualType getAtomicType()
const {
return AtomicTy; }
134 QualType getValueType()
const {
return ValueTy; }
135 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
136 CharUnits getValueAlignment()
const {
return ValueAlign; }
137 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
138 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
140 bool shouldUseLibcall()
const {
return UseLibcall; }
141 const LValue &getAtomicLValue()
const {
return LVal; }
152 Address getAtomicAddress()
const {
153 return Address(getAtomicPointer(), getAtomicAlignment());
156 Address getAtomicAddressAsAtomicIntPointer()
const {
157 return emitCastToAtomicIntPointer(getAtomicAddress());
166 bool hasPadding()
const {
167 return (ValueSizeInBits != AtomicSizeInBits);
170 bool emitMemSetZeroIfNecessary()
const;
198 void emitCopyIntoMemory(
RValue rvalue)
const;
201 LValue projectValue()
const {
203 Address addr = getAtomicAddress();
214 bool AsValue, llvm::AtomicOrdering AO,
225 std::pair<RValue, llvm::Value *>
227 llvm::AtomicOrdering Success =
228 llvm::AtomicOrdering::SequentiallyConsistent,
229 llvm::AtomicOrdering Failure =
230 llvm::AtomicOrdering::SequentiallyConsistent,
231 bool IsWeak =
false);
236 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
241 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
248 Address CreateTempAlloca()
const;
254 void EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
255 llvm::AtomicOrdering AO,
bool IsVolatile);
257 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile);
261 llvm::AtomicOrdering Success =
262 llvm::AtomicOrdering::SequentiallyConsistent,
263 llvm::AtomicOrdering Failure =
264 llvm::AtomicOrdering::SequentiallyConsistent);
266 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
268 llvm::AtomicOrdering Success =
269 llvm::AtomicOrdering::SequentiallyConsistent,
270 llvm::AtomicOrdering Failure =
271 llvm::AtomicOrdering::SequentiallyConsistent,
272 bool IsWeak =
false);
275 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
279 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
283 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
286 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
291 Address AtomicInfo::CreateTempAlloca()
const {
293 (LVal.
isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
295 getAtomicAlignment(),
300 TempAlloca, getAtomicAddress().getType());
318 uint64_t expectedSize) {
319 return (CGM.
getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
327 if (hasPadding())
return true;
330 switch (getEvaluationKind()) {
337 AtomicSizeInBits / 2);
343 llvm_unreachable(
"bad evaluation kind");
346 bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
349 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
353 addr, llvm::ConstantInt::get(CGF.
Int8Ty, 0),
363 llvm::AtomicOrdering SuccessOrder,
364 llvm::AtomicOrdering FailureOrder,
370 llvm::AtomicCmpXchgInst *Pair = CGF.
Builder.CreateAtomicCmpXchg(
371 Ptr.
getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
374 Pair->setWeak(IsWeak);
383 llvm::BasicBlock *StoreExpectedBB =
388 llvm::BasicBlock *ContinueBB =
393 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
395 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
399 CGF.
Builder.CreateBr(ContinueBB);
401 CGF.
Builder.SetInsertPoint(ContinueBB);
414 llvm::AtomicOrdering SuccessOrder,
416 llvm::AtomicOrdering FailureOrder;
417 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
418 auto FOS = FO->getSExtValue();
419 if (!llvm::isValidAtomicOrderingCABI(FOS))
420 FailureOrder = llvm::AtomicOrdering::Monotonic;
422 switch ((llvm::AtomicOrderingCABI)FOS) {
423 case llvm::AtomicOrderingCABI::relaxed:
424 case llvm::AtomicOrderingCABI::release:
425 case llvm::AtomicOrderingCABI::acq_rel:
426 FailureOrder = llvm::AtomicOrdering::Monotonic;
428 case llvm::AtomicOrderingCABI::consume:
429 case llvm::AtomicOrderingCABI::acquire:
430 FailureOrder = llvm::AtomicOrdering::Acquire;
432 case llvm::AtomicOrderingCABI::seq_cst:
433 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
436 if (isStrongerThan(FailureOrder, SuccessOrder)) {
440 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
443 FailureOrder, Scope);
448 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
451 if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
452 SuccessOrder != llvm::AtomicOrdering::Release)
454 if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
459 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
466 CGF.
Builder.SetInsertPoint(MonotonicBB);
468 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
472 CGF.
Builder.SetInsertPoint(AcquireBB);
474 Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
476 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
478 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
482 CGF.
Builder.SetInsertPoint(SeqCstBB);
484 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
486 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
490 CGF.
Builder.SetInsertPoint(ContBB);
496 uint64_t Size, llvm::AtomicOrdering Order,
498 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
499 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
501 switch (E->
getOp()) {
502 case AtomicExpr::AO__c11_atomic_init:
503 case AtomicExpr::AO__opencl_atomic_init:
504 llvm_unreachable(
"Already handled!");
506 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
507 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
509 FailureOrder, Size, Order, Scope);
511 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
512 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
514 FailureOrder, Size, Order, Scope);
516 case AtomicExpr::AO__atomic_compare_exchange:
517 case AtomicExpr::AO__atomic_compare_exchange_n: {
518 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
520 Val1, Val2, FailureOrder, Size, Order, Scope);
523 llvm::BasicBlock *StrongBB =
526 llvm::BasicBlock *ContBB =
529 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
530 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
532 CGF.
Builder.SetInsertPoint(StrongBB);
534 FailureOrder, Size, Order, Scope);
537 CGF.
Builder.SetInsertPoint(WeakBB);
539 FailureOrder, Size, Order, Scope);
542 CGF.
Builder.SetInsertPoint(ContBB);
546 case AtomicExpr::AO__c11_atomic_load:
547 case AtomicExpr::AO__opencl_atomic_load:
548 case AtomicExpr::AO__atomic_load_n:
549 case AtomicExpr::AO__atomic_load: {
551 Load->setAtomic(Order, Scope);
557 case AtomicExpr::AO__c11_atomic_store:
558 case AtomicExpr::AO__opencl_atomic_store:
559 case AtomicExpr::AO__atomic_store:
560 case AtomicExpr::AO__atomic_store_n: {
563 Store->setAtomic(Order, Scope);
568 case AtomicExpr::AO__c11_atomic_exchange:
569 case AtomicExpr::AO__opencl_atomic_exchange:
570 case AtomicExpr::AO__atomic_exchange_n:
571 case AtomicExpr::AO__atomic_exchange:
572 Op = llvm::AtomicRMWInst::Xchg;
575 case AtomicExpr::AO__atomic_add_fetch:
576 PostOp = llvm::Instruction::Add;
578 case AtomicExpr::AO__c11_atomic_fetch_add:
579 case AtomicExpr::AO__opencl_atomic_fetch_add:
580 case AtomicExpr::AO__atomic_fetch_add:
581 Op = llvm::AtomicRMWInst::Add;
584 case AtomicExpr::AO__atomic_sub_fetch:
585 PostOp = llvm::Instruction::Sub;
587 case AtomicExpr::AO__c11_atomic_fetch_sub:
588 case AtomicExpr::AO__opencl_atomic_fetch_sub:
589 case AtomicExpr::AO__atomic_fetch_sub:
590 Op = llvm::AtomicRMWInst::Sub;
593 case AtomicExpr::AO__opencl_atomic_fetch_min:
594 case AtomicExpr::AO__atomic_fetch_min:
596 : llvm::AtomicRMWInst::UMin;
599 case AtomicExpr::AO__opencl_atomic_fetch_max:
600 case AtomicExpr::AO__atomic_fetch_max:
602 : llvm::AtomicRMWInst::UMax;
605 case AtomicExpr::AO__atomic_and_fetch:
608 case AtomicExpr::AO__c11_atomic_fetch_and:
609 case AtomicExpr::AO__opencl_atomic_fetch_and:
610 case AtomicExpr::AO__atomic_fetch_and:
614 case AtomicExpr::AO__atomic_or_fetch:
615 PostOp = llvm::Instruction::Or;
617 case AtomicExpr::AO__c11_atomic_fetch_or:
618 case AtomicExpr::AO__opencl_atomic_fetch_or:
619 case AtomicExpr::AO__atomic_fetch_or:
620 Op = llvm::AtomicRMWInst::Or;
623 case AtomicExpr::AO__atomic_xor_fetch:
624 PostOp = llvm::Instruction::Xor;
626 case AtomicExpr::AO__c11_atomic_fetch_xor:
627 case AtomicExpr::AO__opencl_atomic_fetch_xor:
628 case AtomicExpr::AO__atomic_fetch_xor:
629 Op = llvm::AtomicRMWInst::Xor;
632 case AtomicExpr::AO__atomic_nand_fetch:
635 case AtomicExpr::AO__atomic_fetch_nand:
636 Op = llvm::AtomicRMWInst::Nand;
641 llvm::AtomicRMWInst *RMWI =
649 Result = CGF.
Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
650 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
651 Result = CGF.
Builder.CreateNot(Result);
668 uint64_t Size, llvm::AtomicOrdering Order,
675 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
681 if (
auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
684 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
691 auto Scopes = ScopeModel->getRuntimeValues();
692 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
693 for (
auto S : Scopes)
696 llvm::BasicBlock *ContBB =
699 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(),
false);
702 auto FallBack = ScopeModel->getFallBackValue();
703 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
704 for (
auto S : Scopes) {
707 SI->addCase(Builder.getInt32(S), B);
709 Builder.SetInsertPoint(B);
710 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
714 Builder.CreateBr(ContBB);
717 Builder.SetInsertPoint(ContBB);
724 if (UseOptimizedLibcall) {
731 SizeInBits)->getPointerTo();
749 MemTy = AT->getValueType();
750 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
757 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
758 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
759 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
760 EmitAtomicInit(E->
getVal1(), lvalue);
765 std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
767 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
768 bool UseLibcall = ((Ptr.
getAlignment() % sizeChars) != 0 ||
769 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
772 CGM.getDiags().Report(E->
getLocStart(), diag::warn_atomic_op_misaligned);
778 switch (E->
getOp()) {
779 case AtomicExpr::AO__c11_atomic_init:
780 case AtomicExpr::AO__opencl_atomic_init:
781 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
783 case AtomicExpr::AO__c11_atomic_load:
784 case AtomicExpr::AO__opencl_atomic_load:
785 case AtomicExpr::AO__atomic_load_n:
788 case AtomicExpr::AO__atomic_load:
789 Dest = EmitPointerWithAlignment(E->
getVal1());
792 case AtomicExpr::AO__atomic_store:
793 Val1 = EmitPointerWithAlignment(E->
getVal1());
796 case AtomicExpr::AO__atomic_exchange:
797 Val1 = EmitPointerWithAlignment(E->
getVal1());
798 Dest = EmitPointerWithAlignment(E->
getVal2());
801 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
802 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
803 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
804 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
805 case AtomicExpr::AO__atomic_compare_exchange_n:
806 case AtomicExpr::AO__atomic_compare_exchange:
807 Val1 = EmitPointerWithAlignment(E->
getVal1());
808 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
809 Val2 = EmitPointerWithAlignment(E->
getVal2());
813 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
814 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
815 IsWeak = EmitScalarExpr(E->
getWeak());
818 case AtomicExpr::AO__c11_atomic_fetch_add:
819 case AtomicExpr::AO__c11_atomic_fetch_sub:
820 case AtomicExpr::AO__opencl_atomic_fetch_add:
821 case AtomicExpr::AO__opencl_atomic_fetch_sub:
831 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
832 auto Temp = CreateMemTemp(Val1Ty,
".atomictmp");
834 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
838 case AtomicExpr::AO__atomic_fetch_add:
839 case AtomicExpr::AO__atomic_fetch_sub:
840 case AtomicExpr::AO__atomic_add_fetch:
841 case AtomicExpr::AO__atomic_sub_fetch:
842 case AtomicExpr::AO__c11_atomic_store:
843 case AtomicExpr::AO__c11_atomic_exchange:
844 case AtomicExpr::AO__opencl_atomic_store:
845 case AtomicExpr::AO__opencl_atomic_exchange:
846 case AtomicExpr::AO__atomic_store_n:
847 case AtomicExpr::AO__atomic_exchange_n:
848 case AtomicExpr::AO__c11_atomic_fetch_and:
849 case AtomicExpr::AO__c11_atomic_fetch_or:
850 case AtomicExpr::AO__c11_atomic_fetch_xor:
851 case AtomicExpr::AO__opencl_atomic_fetch_and:
852 case AtomicExpr::AO__opencl_atomic_fetch_or:
853 case AtomicExpr::AO__opencl_atomic_fetch_xor:
854 case AtomicExpr::AO__opencl_atomic_fetch_min:
855 case AtomicExpr::AO__opencl_atomic_fetch_max:
856 case AtomicExpr::AO__atomic_fetch_and:
857 case AtomicExpr::AO__atomic_fetch_or:
858 case AtomicExpr::AO__atomic_fetch_xor:
859 case AtomicExpr::AO__atomic_fetch_nand:
860 case AtomicExpr::AO__atomic_and_fetch:
861 case AtomicExpr::AO__atomic_or_fetch:
862 case AtomicExpr::AO__atomic_xor_fetch:
863 case AtomicExpr::AO__atomic_nand_fetch:
864 case AtomicExpr::AO__atomic_fetch_min:
865 case AtomicExpr::AO__atomic_fetch_max:
875 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
876 AtomicInfo Atomics(*
this, AtomicVal);
878 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
879 if (Val1.
isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
880 if (Val2.
isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
882 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
884 Dest = CreateMemTemp(RValTy,
"cmpxchg.bool");
886 Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
890 bool UseOptimizedLibcall =
false;
891 switch (E->
getOp()) {
892 case AtomicExpr::AO__c11_atomic_init:
893 case AtomicExpr::AO__opencl_atomic_init:
894 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
896 case AtomicExpr::AO__c11_atomic_fetch_add:
897 case AtomicExpr::AO__opencl_atomic_fetch_add:
898 case AtomicExpr::AO__atomic_fetch_add:
899 case AtomicExpr::AO__c11_atomic_fetch_and:
900 case AtomicExpr::AO__opencl_atomic_fetch_and:
901 case AtomicExpr::AO__atomic_fetch_and:
902 case AtomicExpr::AO__c11_atomic_fetch_or:
903 case AtomicExpr::AO__opencl_atomic_fetch_or:
904 case AtomicExpr::AO__atomic_fetch_or:
905 case AtomicExpr::AO__atomic_fetch_nand:
906 case AtomicExpr::AO__c11_atomic_fetch_sub:
907 case AtomicExpr::AO__opencl_atomic_fetch_sub:
908 case AtomicExpr::AO__atomic_fetch_sub:
909 case AtomicExpr::AO__c11_atomic_fetch_xor:
910 case AtomicExpr::AO__opencl_atomic_fetch_xor:
911 case AtomicExpr::AO__opencl_atomic_fetch_min:
912 case AtomicExpr::AO__opencl_atomic_fetch_max:
913 case AtomicExpr::AO__atomic_fetch_xor:
914 case AtomicExpr::AO__atomic_add_fetch:
915 case AtomicExpr::AO__atomic_and_fetch:
916 case AtomicExpr::AO__atomic_nand_fetch:
917 case AtomicExpr::AO__atomic_or_fetch:
918 case AtomicExpr::AO__atomic_sub_fetch:
919 case AtomicExpr::AO__atomic_xor_fetch:
920 case AtomicExpr::AO__atomic_fetch_min:
921 case AtomicExpr::AO__atomic_fetch_max:
923 UseOptimizedLibcall =
true;
926 case AtomicExpr::AO__c11_atomic_load:
927 case AtomicExpr::AO__c11_atomic_store:
928 case AtomicExpr::AO__c11_atomic_exchange:
929 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
930 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
931 case AtomicExpr::AO__opencl_atomic_load:
932 case AtomicExpr::AO__opencl_atomic_store:
933 case AtomicExpr::AO__opencl_atomic_exchange:
934 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
935 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
936 case AtomicExpr::AO__atomic_load_n:
937 case AtomicExpr::AO__atomic_load:
938 case AtomicExpr::AO__atomic_store_n:
939 case AtomicExpr::AO__atomic_store:
940 case AtomicExpr::AO__atomic_exchange_n:
941 case AtomicExpr::AO__atomic_exchange:
942 case AtomicExpr::AO__atomic_compare_exchange_n:
943 case AtomicExpr::AO__atomic_compare_exchange:
945 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
946 UseOptimizedLibcall =
true;
951 if (!UseOptimizedLibcall) {
954 getContext().getSizeType());
962 auto AS = PT->getAs<
PointerType>()->getPointeeType().getAddressSpace();
966 auto T = V->getType();
967 auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
969 return getTargetHooks().performAddrSpaceCast(
975 getContext().VoidPtrTy);
977 std::string LibCallName;
979 MemTy->
isPointerType() ? getContext().getIntPtrType() : MemTy;
981 bool HaveRetTy =
false;
982 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
983 switch (E->
getOp()) {
984 case AtomicExpr::AO__c11_atomic_init:
985 case AtomicExpr::AO__opencl_atomic_init:
986 llvm_unreachable(
"Already handled!");
995 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
996 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
997 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
998 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
999 case AtomicExpr::AO__atomic_compare_exchange:
1000 case AtomicExpr::AO__atomic_compare_exchange_n:
1001 LibCallName =
"__atomic_compare_exchange";
1002 RetTy = getContext().BoolTy;
1007 getContext().VoidPtrTy);
1016 case AtomicExpr::AO__c11_atomic_exchange:
1017 case AtomicExpr::AO__opencl_atomic_exchange:
1018 case AtomicExpr::AO__atomic_exchange_n:
1019 case AtomicExpr::AO__atomic_exchange:
1020 LibCallName =
"__atomic_exchange";
1026 case AtomicExpr::AO__c11_atomic_store:
1027 case AtomicExpr::AO__opencl_atomic_store:
1028 case AtomicExpr::AO__atomic_store:
1029 case AtomicExpr::AO__atomic_store_n:
1030 LibCallName =
"__atomic_store";
1031 RetTy = getContext().VoidTy;
1038 case AtomicExpr::AO__c11_atomic_load:
1039 case AtomicExpr::AO__opencl_atomic_load:
1040 case AtomicExpr::AO__atomic_load:
1041 case AtomicExpr::AO__atomic_load_n:
1042 LibCallName =
"__atomic_load";
1046 case AtomicExpr::AO__atomic_add_fetch:
1047 PostOp = llvm::Instruction::Add;
1049 case AtomicExpr::AO__c11_atomic_fetch_add:
1050 case AtomicExpr::AO__opencl_atomic_fetch_add:
1051 case AtomicExpr::AO__atomic_fetch_add:
1052 LibCallName =
"__atomic_fetch_add";
1058 case AtomicExpr::AO__atomic_and_fetch:
1061 case AtomicExpr::AO__c11_atomic_fetch_and:
1062 case AtomicExpr::AO__opencl_atomic_fetch_and:
1063 case AtomicExpr::AO__atomic_fetch_and:
1064 LibCallName =
"__atomic_fetch_and";
1070 case AtomicExpr::AO__atomic_or_fetch:
1071 PostOp = llvm::Instruction::Or;
1073 case AtomicExpr::AO__c11_atomic_fetch_or:
1074 case AtomicExpr::AO__opencl_atomic_fetch_or:
1075 case AtomicExpr::AO__atomic_fetch_or:
1076 LibCallName =
"__atomic_fetch_or";
1082 case AtomicExpr::AO__atomic_sub_fetch:
1083 PostOp = llvm::Instruction::Sub;
1085 case AtomicExpr::AO__c11_atomic_fetch_sub:
1086 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1087 case AtomicExpr::AO__atomic_fetch_sub:
1088 LibCallName =
"__atomic_fetch_sub";
1094 case AtomicExpr::AO__atomic_xor_fetch:
1095 PostOp = llvm::Instruction::Xor;
1097 case AtomicExpr::AO__c11_atomic_fetch_xor:
1098 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1099 case AtomicExpr::AO__atomic_fetch_xor:
1100 LibCallName =
"__atomic_fetch_xor";
1104 case AtomicExpr::AO__atomic_fetch_min:
1105 case AtomicExpr::AO__opencl_atomic_fetch_min:
1107 ?
"__atomic_fetch_min" 1108 :
"__atomic_fetch_umin";
1112 case AtomicExpr::AO__atomic_fetch_max:
1113 case AtomicExpr::AO__opencl_atomic_fetch_max:
1115 ?
"__atomic_fetch_max" 1116 :
"__atomic_fetch_umax";
1122 case AtomicExpr::AO__atomic_nand_fetch:
1125 case AtomicExpr::AO__atomic_fetch_nand:
1126 LibCallName =
"__atomic_fetch_nand";
1133 LibCallName = std::string(
"__opencl") +
1134 StringRef(LibCallName).drop_front(1).str();
1138 if (UseOptimizedLibcall)
1139 LibCallName +=
"_" + llvm::utostr(Size);
1142 if (UseOptimizedLibcall) {
1145 RetTy = getContext().getIntTypeForBitwidth(
1146 getContext().toBits(sizeChars),
false);
1149 RetTy = getContext().VoidTy;
1151 getContext().VoidPtrTy);
1156 getContext().IntTy);
1163 assert(UseOptimizedLibcall || !PostOp);
1175 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1176 ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1178 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
1179 ResVal = Builder.CreateNot(ResVal);
1181 Builder.CreateStore(
1183 Builder.CreateBitCast(Dest, ResVal->
getType()->getPointerTo()));
1189 return convertTempToRValue(
1190 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1194 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1195 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1196 E->
getOp() == AtomicExpr::AO__atomic_store ||
1197 E->
getOp() == AtomicExpr::AO__atomic_store_n;
1198 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1199 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1200 E->
getOp() == AtomicExpr::AO__atomic_load ||
1201 E->
getOp() == AtomicExpr::AO__atomic_load_n;
1203 if (isa<llvm::ConstantInt>(Order)) {
1204 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1207 if (llvm::isValidAtomicOrderingCABI(ord))
1208 switch ((llvm::AtomicOrderingCABI)ord) {
1209 case llvm::AtomicOrderingCABI::relaxed:
1210 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1211 llvm::AtomicOrdering::Monotonic, Scope);
1213 case llvm::AtomicOrderingCABI::consume:
1214 case llvm::AtomicOrderingCABI::acquire:
1217 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1218 llvm::AtomicOrdering::Acquire, Scope);
1220 case llvm::AtomicOrderingCABI::release:
1223 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1224 llvm::AtomicOrdering::Release, Scope);
1226 case llvm::AtomicOrderingCABI::acq_rel:
1227 if (IsLoad || IsStore)
1229 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1230 llvm::AtomicOrdering::AcquireRelease, Scope);
1232 case llvm::AtomicOrderingCABI::seq_cst:
1233 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1234 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1240 return convertTempToRValue(
1241 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1249 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1250 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1251 *SeqCstBB =
nullptr;
1252 MonotonicBB = createBasicBlock(
"monotonic", CurFn);
1254 AcquireBB = createBasicBlock(
"acquire", CurFn);
1256 ReleaseBB = createBasicBlock(
"release", CurFn);
1257 if (!IsLoad && !IsStore)
1258 AcqRelBB = createBasicBlock(
"acqrel", CurFn);
1259 SeqCstBB = createBasicBlock(
"seqcst", CurFn);
1260 llvm::BasicBlock *ContBB = createBasicBlock(
"atomic.continue", CurFn);
1266 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(),
false);
1267 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1270 Builder.SetInsertPoint(MonotonicBB);
1271 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1272 llvm::AtomicOrdering::Monotonic, Scope);
1273 Builder.CreateBr(ContBB);
1275 Builder.SetInsertPoint(AcquireBB);
1276 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1277 llvm::AtomicOrdering::Acquire, Scope);
1278 Builder.CreateBr(ContBB);
1279 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1281 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1285 Builder.SetInsertPoint(ReleaseBB);
1286 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1287 llvm::AtomicOrdering::Release, Scope);
1288 Builder.CreateBr(ContBB);
1289 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1292 if (!IsLoad && !IsStore) {
1293 Builder.SetInsertPoint(AcqRelBB);
1294 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1295 llvm::AtomicOrdering::AcquireRelease, Scope);
1296 Builder.CreateBr(ContBB);
1297 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1300 Builder.SetInsertPoint(SeqCstBB);
1301 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1302 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1303 Builder.CreateBr(ContBB);
1304 SI->addCase(Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1308 Builder.SetInsertPoint(ContBB);
1312 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1313 return convertTempToRValue(
1314 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1319 Address AtomicInfo::emitCastToAtomicIntPointer(
Address addr)
const {
1320 unsigned addrspace =
1321 cast<llvm::PointerType>(addr.
getPointer()->getType())->getAddressSpace();
1322 llvm::IntegerType *ty =
1327 Address AtomicInfo::convertToAtomicIntPointer(
Address Addr)
const {
1330 if (SourceSizeInBits != AtomicSizeInBits) {
1331 Address Tmp = CreateTempAlloca();
1333 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1337 return emitCastToAtomicIntPointer(Addr);
1343 bool asValue)
const {
1376 bool AsValue)
const {
1378 assert(IntVal->getType()->isIntegerTy() &&
"Expected integer value");
1384 auto *ValTy = AsValue
1386 : getAtomicAddress().getType()->getPointerElementType();
1387 if (ValTy->isIntegerTy()) {
1388 assert(IntVal->getType() == ValTy &&
"Different integer types.");
1390 }
else if (ValTy->isPointerTy())
1392 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1399 bool TempIsVolatile =
false;
1405 Temp = CreateTempAlloca();
1409 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1411 ->setVolatile(TempIsVolatile);
1413 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1416 void AtomicInfo::EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
1417 llvm::AtomicOrdering AO,
bool) {
1431 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1434 Address Addr = getAtomicAddressAsAtomicIntPointer();
1436 Load->setAtomic(AO);
1440 Load->setVolatile(
true);
1449 if (!CGM.getCodeGenOpts().MSVolatile)
return false;
1450 AtomicInfo AI(*
this, LV);
1453 bool AtomicIsInline = !AI.shouldUseLibcall();
1455 if (getContext().getTypeSize(LV.
getType()) >
1456 getContext().getTypeSize(getContext().getIntPtrType()))
1458 return IsVolatile && AtomicIsInline;
1463 llvm::AtomicOrdering AO;
1466 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1468 AO = llvm::AtomicOrdering::Acquire;
1471 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1475 bool AsValue, llvm::AtomicOrdering AO,
1478 if (shouldUseLibcall()) {
1484 TempAddr = CreateTempAlloca();
1486 EmitAtomicLoadLibcall(TempAddr.
getPointer(), AO, IsVolatile);
1490 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1494 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1502 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1508 llvm::AtomicOrdering AO,
bool IsVolatile,
1510 AtomicInfo Atomics(*
this, src);
1511 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1517 void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1536 emitMemSetZeroIfNecessary();
1539 LValue TempLVal = projectValue();
1552 Address AtomicInfo::materializeRValue(
RValue rvalue)
const {
1560 AtomicInfo Atomics(CGF, TempLV);
1561 Atomics.emitCopyIntoMemory(rvalue);
1570 if (isa<llvm::IntegerType>(Value->getType()))
1573 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1575 LVal.
isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1576 if (isa<llvm::PointerType>(Value->getType()))
1577 return CGF.
Builder.CreatePtrToInt(Value, InputIntTy);
1578 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1584 Address Addr = materializeRValue(RVal);
1587 Addr = emitCastToAtomicIntPointer(Addr);
1591 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1593 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1595 Address Addr = getAtomicAddressAsAtomicIntPointer();
1597 ExpectedVal, DesiredVal,
1601 Inst->setWeak(IsWeak);
1604 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1605 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1606 return std::make_pair(PreviousVal, SuccessFailureVal);
1610 AtomicInfo::EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr,
1612 llvm::AtomicOrdering Success,
1613 llvm::AtomicOrdering Failure) {
1625 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Success))),
1628 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1636 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1638 llvm::AtomicOrdering Failure,
bool IsWeak) {
1639 if (isStrongerThan(Failure, Success))
1642 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1645 if (shouldUseLibcall()) {
1647 Address ExpectedAddr = materializeRValue(Expected);
1648 Address DesiredAddr = materializeRValue(Desired);
1649 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1652 return std::make_pair(
1660 auto *ExpectedVal = convertRValueToInt(Expected);
1661 auto *DesiredVal = convertRValueToInt(Desired);
1662 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1664 return std::make_pair(
1675 LValue AtomicLVal = Atomics.getAtomicLValue();
1682 Address Ptr = Atomics.materializeRValue(OldRVal);
1715 RValue NewRVal = UpdateOp(UpRVal);
1725 void AtomicInfo::EmitAtomicUpdateLibcall(
1726 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1728 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1730 Address ExpectedAddr = CreateTempAlloca();
1732 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1736 Address DesiredAddr = CreateTempAlloca();
1738 requiresMemSetZero(getAtomicAddress().getElementType())) {
1742 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1747 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1750 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1754 void AtomicInfo::EmitAtomicUpdateOp(
1755 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1757 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1760 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1764 auto *CurBB = CGF.
Builder.GetInsertBlock();
1766 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1768 PHI->addIncoming(OldVal, CurBB);
1769 Address NewAtomicAddr = CreateTempAlloca();
1770 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1772 requiresMemSetZero(getAtomicAddress().getElementType())) {
1780 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1781 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1782 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1788 LValue AtomicLVal = Atomics.getAtomicLValue();
1812 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1813 RValue UpdateRVal,
bool IsVolatile) {
1814 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1816 Address ExpectedAddr = CreateTempAlloca();
1818 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1822 Address DesiredAddr = CreateTempAlloca();
1824 requiresMemSetZero(getAtomicAddress().getElementType())) {
1830 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1833 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1837 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1839 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1842 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1846 auto *CurBB = CGF.
Builder.GetInsertBlock();
1848 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1850 PHI->addIncoming(OldVal, CurBB);
1851 Address NewAtomicAddr = CreateTempAlloca();
1852 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1854 requiresMemSetZero(getAtomicAddress().getElementType())) {
1860 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1861 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1862 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1866 void AtomicInfo::EmitAtomicUpdate(
1867 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1869 if (shouldUseLibcall()) {
1870 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1872 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1876 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1878 if (shouldUseLibcall()) {
1879 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1881 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1888 llvm::AtomicOrdering AO;
1890 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1892 AO = llvm::AtomicOrdering::Release;
1895 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1904 llvm::AtomicOrdering AO,
bool IsVolatile,
1912 AtomicInfo atomics(*
this, dest);
1913 LValue LVal = atomics.getAtomicLValue();
1918 atomics.emitCopyIntoMemory(rvalue);
1923 if (atomics.shouldUseLibcall()) {
1925 Address srcAddr = atomics.materializeRValue(rvalue);
1930 getContext().getSizeType());
1931 args.
add(
RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1932 getContext().VoidPtrTy);
1934 getContext().VoidPtrTy);
1936 RValue::get(llvm::ConstantInt::get(IntTy, (
int)llvm::toCABI(AO))),
1937 getContext().IntTy);
1943 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1947 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1948 intValue = Builder.CreateIntCast(
1950 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1954 store->setAtomic(AO);
1958 store->setVolatile(
true);
1959 CGM.DecorateInstructionWithTBAA(store, dest.
getTBAAInfo());
1964 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1971 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak,
1981 AtomicInfo Atomics(*
this, Obj);
1983 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1988 LValue LVal, llvm::AtomicOrdering AO,
1989 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
1990 AtomicInfo Atomics(*
this, LVal);
1991 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1995 AtomicInfo atomics(*
this, dest);
1997 switch (atomics.getEvaluationKind()) {
2013 bool Zeroed =
false;
2015 Zeroed = atomics.emitMemSetZeroIfNecessary();
2016 dest = atomics.projectValue();
2028 EmitAggExpr(init, slot);
2032 llvm_unreachable(
"bad evaluation kind");
const llvm::DataLayout & getDataLayout() const
llvm::Value * getVectorPointer() const
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Defines the clang::ASTContext interface.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
llvm::IntegerType * IntTy
int
Address getAddress() const
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
CodeGenTypes & getTypes()
llvm::Type * ConvertTypeForMem(QualType T)
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
llvm::LLVMContext & getLLVMContext()
void setAlignment(CharUnits A)
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const T * getAs() const
Member-template getAs<specific type>'.
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::Value * getPointer() const
unsigned getAddressSpace() const
Return the address space that this address resides in.
void add(RValue rvalue, QualType type)
Address getAddress() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
bool isVolatileQualified() const
CharUnits getAlignment() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitLoadOfExtVectorElementLValue(LValue V)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
unsigned Size
The total size of the bit-field, in bits.
CharUnits - This is an opaque type for sizes expressed in character units.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
CharUnits getAlignment() const
Return the alignment of this pointer.
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Scope - A scope is a transient data structure that is used while parsing the program.
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
RValue EmitAtomicExpr(AtomicExpr *E)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
llvm::Constant * CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false)
Create a new runtime function with the specified type and name.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ASTContext & getContext() const
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
LValueBaseInfo getBaseInfo() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Address getExtVectorAddress() const
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
Expr - This represents one expression.
llvm::StringRef getAsString(SyncScope S)
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
llvm::PointerType * getType() const
Return the type of the pointer value.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
TBAAAccessInfo getTBAAInfo() const
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Represents a GCC generic vector type.
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
The l-value was considered opaque, so the alignment was determined from a type.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Encodes a location in the source.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
const CGBitFieldInfo & getBitFieldInfo() const
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>, and corresponding __opencl_atomic_* for OpenCL 2.0.
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
const TargetCodeGenInfo & getTargetHooks() const
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored...
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
SourceLocation getLocStart() const LLVM_READONLY
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::Value * getExtVectorPointer() const
Expr * getOrderFail() const
bool isVolatileQualified() const
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
bool isAtomicType() const
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const
Get the syncscope used in LLVM IR.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
llvm::Value * getBitFieldPointer() const
true
A convenience builder class for complex constant initializers, especially for anonymous global struct...
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
static RValue get(llvm::Value *V)
bool isPointerType() const
__DEVICE__ int min(int __a, int __b)
bool isExtVectorElt() const
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static RValue getAggregate(Address addr, bool isVolatile=false)
LValue - This represents an lvalue references.
QualType getValueType() const
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
llvm::Value * getVectorIdx() const
CallArgList - Type for representing both the value and type of arguments in a call.
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
llvm::Value * getPointer() const
llvm::Constant * getExtVectorElts() const
Structure with information about how a bitfield should be accessed.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.