Skip to content

[AMDGPU] Enable atomic optimizer for 64 bit divergent values #96473

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 15 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
122 changes: 45 additions & 77 deletions llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,20 @@ bool AMDGPUAtomicOptimizerImpl::run(Function &F) {
return Changed;
}

static bool shouldOptimize(Type *Ty) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Better name that expresses why this type is handleable.

Also in a follow up, really should cover the i16/half/bfloat and 2 x half, 2 x bfloat cases

switch (Ty->getTypeID()) {
case Type::FloatTyID:
case Type::DoubleTyID:
return true;
case Type::IntegerTyID: {
if (Ty->getIntegerBitWidth() == 32 || Ty->getIntegerBitWidth() == 64)
return true;
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't forget pointers

default:
return false;
}
}

void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) {
// Early exit for unhandled address space atomic instructions.
switch (I.getPointerAddressSpace()) {
Expand Down Expand Up @@ -227,11 +241,10 @@ void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) {
const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx));

// If the value operand is divergent, each lane is contributing a different
// value to the atomic calculation. We can only optimize divergent values if
// we have DPP available on our subtarget, and the atomic operation is 32
// bits.
if (ValDivergent &&
(!ST->hasDPP() || DL->getTypeSizeInBits(I.getType()) != 32)) {
// value to the atomic calculation. We only optimize divergent values if
// we have DPP available on our subtarget, and the atomic operation is of
// 32/64 bit integer, float or double type.
if (ValDivergent && (!ST->hasDPP() || !shouldOptimize(I.getType()))) {
return;
}

Expand Down Expand Up @@ -310,11 +323,10 @@ void AMDGPUAtomicOptimizerImpl::visitIntrinsicInst(IntrinsicInst &I) {
const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx));

// If the value operand is divergent, each lane is contributing a different
// value to the atomic calculation. We can only optimize divergent values if
// we have DPP available on our subtarget, and the atomic operation is 32
// bits.
if (ValDivergent &&
(!ST->hasDPP() || DL->getTypeSizeInBits(I.getType()) != 32)) {
// value to the atomic calculation. We only optimize divergent values if
// we have DPP available on our subtarget, and the atomic operation is of
// 32/64 bit integer, float or double type.
if (ValDivergent && (!ST->hasDPP() || !shouldOptimize(I.getType()))) {
return;
}

Expand Down Expand Up @@ -386,7 +398,6 @@ Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B,
Value *V,
Value *const Identity) const {
Type *AtomicTy = V->getType();
Type *IntNTy = B.getIntNTy(AtomicTy->getPrimitiveSizeInBits());
Module *M = B.GetInsertBlock()->getModule();
Function *UpdateDPP =
Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy);
Expand All @@ -402,34 +413,30 @@ Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B,

// Reduce within each pair of rows (i.e. 32 lanes).
assert(ST->hasPermLaneX16());
V = B.CreateBitCast(V, IntNTy);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please submit an NFC cleanup patch that just removes unnecessary bitcasting, before adding support for new atomic operations.

Value *Permlanex16Call = B.CreateIntrinsic(
V->getType(), Intrinsic::amdgcn_permlanex16,
{V, V, B.getInt32(-1), B.getInt32(-1), B.getFalse(), B.getFalse()});
V = buildNonAtomicBinOp(B, Op, B.CreateBitCast(V, AtomicTy),
B.CreateBitCast(Permlanex16Call, AtomicTy));
V = buildNonAtomicBinOp(B, Op, V,
Permlanex16Call);
if (ST->isWave32()) {
return V;
}

if (ST->hasPermLane64()) {
// Reduce across the upper and lower 32 lanes.
V = B.CreateBitCast(V, IntNTy);
Value *Permlane64Call =
B.CreateIntrinsic(V->getType(), Intrinsic::amdgcn_permlane64, V);
return buildNonAtomicBinOp(B, Op, B.CreateBitCast(V, AtomicTy),
B.CreateBitCast(Permlane64Call, AtomicTy));
return buildNonAtomicBinOp(B, Op, V,
Permlane64Call);
}

// Pick an arbitrary lane from 0..31 and an arbitrary lane from 32..63 and
// combine them with a scalar operation.
Function *ReadLane =
Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, B.getInt32Ty());
V = B.CreateBitCast(V, IntNTy);
Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, AtomicTy);
Value *Lane0 = B.CreateCall(ReadLane, {V, B.getInt32(0)});
Value *Lane32 = B.CreateCall(ReadLane, {V, B.getInt32(32)});
return buildNonAtomicBinOp(B, Op, B.CreateBitCast(Lane0, AtomicTy),
B.CreateBitCast(Lane32, AtomicTy));
return buildNonAtomicBinOp(B, Op, Lane0, Lane32);
}

// Use the builder to create an inclusive scan of V across the wavefront, with
Expand All @@ -438,7 +445,6 @@ Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B,
AtomicRMWInst::BinOp Op, Value *V,
Value *Identity) const {
Type *AtomicTy = V->getType();
Type *IntNTy = B.getIntNTy(AtomicTy->getPrimitiveSizeInBits());

Module *M = B.GetInsertBlock()->getModule();
Function *UpdateDPP =
Expand Down Expand Up @@ -470,28 +476,26 @@ Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B,
// Combine lane 15 into lanes 16..31 (and, for wave 64, lane 47 into lanes
// 48..63).
assert(ST->hasPermLaneX16());
V = B.CreateBitCast(V, IntNTy);
Value *PermX = B.CreateIntrinsic(
V->getType(), Intrinsic::amdgcn_permlanex16,
{V, V, B.getInt32(-1), B.getInt32(-1), B.getFalse(), B.getFalse()});

Value *UpdateDPPCall =
B.CreateCall(UpdateDPP, {Identity, B.CreateBitCast(PermX, AtomicTy),
B.CreateCall(UpdateDPP, {Identity, PermX,
B.getInt32(DPP::QUAD_PERM_ID), B.getInt32(0xa),
B.getInt32(0xf), B.getFalse()});
V = buildNonAtomicBinOp(B, Op, B.CreateBitCast(V, AtomicTy), UpdateDPPCall);
V = buildNonAtomicBinOp(B, Op, V, UpdateDPPCall);

if (!ST->isWave32()) {
// Combine lane 31 into lanes 32..63.
V = B.CreateBitCast(V, IntNTy);
Value *const Lane31 = B.CreateIntrinsic(
V->getType(), Intrinsic::amdgcn_readlane, {V, B.getInt32(31)});

Value *UpdateDPPCall = B.CreateCall(
UpdateDPP, {Identity, Lane31, B.getInt32(DPP::QUAD_PERM_ID),
B.getInt32(0xc), B.getInt32(0xf), B.getFalse()});

V = buildNonAtomicBinOp(B, Op, B.CreateBitCast(V, AtomicTy),
V = buildNonAtomicBinOp(B, Op, V,
UpdateDPPCall);
}
}
Expand All @@ -503,8 +507,6 @@ Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B,
Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V,
Value *Identity) const {
Type *AtomicTy = V->getType();
Type *IntNTy = B.getIntNTy(AtomicTy->getPrimitiveSizeInBits());

Module *M = B.GetInsertBlock()->getModule();
Function *UpdateDPP =
Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy);
Expand All @@ -515,9 +517,9 @@ Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V,
B.getInt32(0xf), B.getFalse()});
} else {
Function *ReadLane = Intrinsic::getDeclaration(
M, Intrinsic::amdgcn_readlane, B.getInt32Ty());
M, Intrinsic::amdgcn_readlane, AtomicTy);
Function *WriteLane = Intrinsic::getDeclaration(
M, Intrinsic::amdgcn_writelane, B.getInt32Ty());
M, Intrinsic::amdgcn_writelane, AtomicTy);

// On GFX10 all DPP operations are confined to a single row. To get cross-
// row operations we have to use permlane or readlane.
Expand All @@ -529,22 +531,18 @@ Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V,
// Copy the old lane 15 to the new lane 16.
V = B.CreateCall(
WriteLane,
{B.CreateCall(ReadLane, {B.CreateBitCast(Old, IntNTy), B.getInt32(15)}),
B.getInt32(16), B.CreateBitCast(V, IntNTy)});
V = B.CreateBitCast(V, AtomicTy);
{B.CreateCall(ReadLane, {Old, B.getInt32(15)}),
B.getInt32(16), V});
if (!ST->isWave32()) {
// Copy the old lane 31 to the new lane 32.
V = B.CreateBitCast(V, IntNTy);
V = B.CreateCall(WriteLane,
{B.CreateCall(ReadLane, {B.CreateBitCast(Old, IntNTy),
B.getInt32(31)}),
{B.CreateCall(ReadLane, {Old, B.getInt32(31)}),
B.getInt32(32), V});

// Copy the old lane 47 to the new lane 48.
V = B.CreateCall(
WriteLane,
{B.CreateCall(ReadLane, {Old, B.getInt32(47)}), B.getInt32(48), V});
V = B.CreateBitCast(V, AtomicTy);
}
}

Expand Down Expand Up @@ -584,24 +582,20 @@ std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively(
auto *FF1 =
B.CreateIntrinsic(Intrinsic::cttz, WaveTy, {ActiveBits, B.getTrue()});

Type *IntNTy = B.getIntNTy(Ty->getPrimitiveSizeInBits());
auto *LaneIdxInt = B.CreateTrunc(FF1, IntNTy);
auto *LaneIdxInt = B.CreateTrunc(FF1, B.getInt32Ty());

// Get the value required for atomic operation
V = B.CreateBitCast(V, IntNTy);
Value *LaneValue = B.CreateIntrinsic(V->getType(), Intrinsic::amdgcn_readlane,
{V, LaneIdxInt});
LaneValue = B.CreateBitCast(LaneValue, Ty);

// Perform writelane if intermediate scan results are required later in the
// kernel computations
Value *OldValue = nullptr;
if (NeedResult) {
OldValue =
B.CreateIntrinsic(IntNTy, Intrinsic::amdgcn_writelane,
{B.CreateBitCast(Accumulator, IntNTy), LaneIdxInt,
B.CreateBitCast(OldValuePhi, IntNTy)});
OldValue = B.CreateBitCast(OldValue, Ty);
B.CreateIntrinsic(V->getType(), Intrinsic::amdgcn_writelane,
{Accumulator, LaneIdxInt,
OldValuePhi});
OldValuePhi->addIncoming(OldValue, ComputeLoop);
}

Expand Down Expand Up @@ -700,10 +694,8 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,

Type *const Ty = I.getType();
Type *Int32Ty = B.getInt32Ty();
Type *IntNTy = B.getIntNTy(Ty->getPrimitiveSizeInBits());
bool isAtomicFloatingPointTy = Ty->isFloatingPointTy();
const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty);
auto *const VecTy = FixedVectorType::get(Int32Ty, 2);

// This is the value in the atomic operation we need to combine in order to
// reduce the number of atomic operations.
Expand Down Expand Up @@ -758,13 +750,8 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
if (ScanImpl == ScanOptions::DPP) {
// First we need to set all inactive invocations to the identity value, so
// that they can correctly contribute to the final result.
V = B.CreateBitCast(V, IntNTy);
Identity = B.CreateBitCast(Identity, IntNTy);
NewV = B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, IntNTy,
NewV = B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, Ty,
{V, Identity});
NewV = B.CreateBitCast(NewV, Ty);
V = B.CreateBitCast(V, Ty);
Identity = B.CreateBitCast(Identity, Ty);
if (!NeedResult && ST->hasPermLaneX16()) {
// On GFX10 the permlanex16 instruction helps us build a reduction
// without too many readlanes and writelanes, which are generally bad
Expand All @@ -778,11 +765,9 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
// of each active lane in the wavefront. This will be our new value
// which we will provide to the atomic operation.
Value *const LastLaneIdx = B.getInt32(ST->getWavefrontSize() - 1);
assert(TyBitWidth == 32);
NewV = B.CreateBitCast(NewV, IntNTy);
NewV = B.CreateIntrinsic(IntNTy, Intrinsic::amdgcn_readlane,
assert(TyBitWidth == 32 || TyBitWidth == 64);
NewV = B.CreateIntrinsic(Ty, Intrinsic::amdgcn_readlane,
{NewV, LastLaneIdx});
NewV = B.CreateBitCast(NewV, Ty);
}
// Finally mark the readlanes in the WWM section.
NewV = B.CreateIntrinsic(Intrinsic::amdgcn_strict_wwm, Ty, NewV);
Expand Down Expand Up @@ -922,26 +907,9 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
// but have to handle 64-bit broadcasts with two calls to this intrinsic.
Value *BroadcastI = nullptr;

if (TyBitWidth == 64) {
Value *CastedPhi = B.CreateBitCast(PHI, IntNTy);
Value *const ExtractLo = B.CreateTrunc(CastedPhi, Int32Ty);
Value *const ExtractHi =
B.CreateTrunc(B.CreateLShr(CastedPhi, 32), Int32Ty);
CallInst *const ReadFirstLaneLo = B.CreateIntrinsic(
Int32Ty, Intrinsic::amdgcn_readfirstlane, ExtractLo);
CallInst *const ReadFirstLaneHi = B.CreateIntrinsic(
Int32Ty, Intrinsic::amdgcn_readfirstlane, ExtractHi);
Value *const PartialInsert = B.CreateInsertElement(
PoisonValue::get(VecTy), ReadFirstLaneLo, B.getInt32(0));
Value *const Insert =
B.CreateInsertElement(PartialInsert, ReadFirstLaneHi, B.getInt32(1));
BroadcastI = B.CreateBitCast(Insert, Ty);
} else if (TyBitWidth == 32) {
Value *CastedPhi = B.CreateBitCast(PHI, IntNTy);
if (TyBitWidth == 32 || TyBitWidth == 64) {
BroadcastI =
B.CreateIntrinsic(IntNTy, Intrinsic::amdgcn_readfirstlane, CastedPhi);
BroadcastI = B.CreateBitCast(BroadcastI, Ty);

B.CreateIntrinsic(Ty, Intrinsic::amdgcn_readfirstlane, PHI);
} else {
llvm_unreachable("Unhandled atomic bit width");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,38 +169,37 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX90A_GFX940-NEXT: [[V_MBCNT_LO_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_LO_U32_B32_e64 [[COPY8]], [[COPY9]], implicit $exec
; GFX90A_GFX940-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
; GFX90A_GFX940-NEXT: [[V_MBCNT_HI_U32_B32_e64_:%[0-9]+]]:vgpr_32 = V_MBCNT_HI_U32_B32_e64 [[COPY10]], [[V_MBCNT_LO_U32_B32_e64_]], implicit $exec
; GFX90A_GFX940-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
; GFX90A_GFX940-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
; GFX90A_GFX940-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
; GFX90A_GFX940-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 [[COPY2]], [[COPY11]], implicit-def dead $scc, implicit $exec
; GFX90A_GFX940-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
; GFX90A_GFX940-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
; GFX90A_GFX940-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
; GFX90A_GFX940-NEXT: [[V_MOV_B32_dpp:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY12]], [[V_SET_INACTIVE_B32_]], 273, 15, 15, 0, implicit $exec
; GFX90A_GFX940-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_SET_INACTIVE_B32_]], 0, [[V_MOV_B32_dpp]], 0, 0, implicit $mode, implicit $exec
; GFX90A_GFX940-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
; GFX90A_GFX940-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
; GFX90A_GFX940-NEXT: [[V_MOV_B32_dpp1:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY13]], [[V_ADD_F32_e64_]], 274, 15, 15, 0, implicit $exec
; GFX90A_GFX940-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_MOV_B32_dpp1]], 0, 0, implicit $mode, implicit $exec
; GFX90A_GFX940-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
; GFX90A_GFX940-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
; GFX90A_GFX940-NEXT: [[V_MOV_B32_dpp2:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY14]], [[V_ADD_F32_e64_1]], 276, 15, 15, 0, implicit $exec
; GFX90A_GFX940-NEXT: [[V_ADD_F32_e64_2:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_1]], 0, [[V_MOV_B32_dpp2]], 0, 0, implicit $mode, implicit $exec
; GFX90A_GFX940-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
; GFX90A_GFX940-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
; GFX90A_GFX940-NEXT: [[V_MOV_B32_dpp3:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY15]], [[V_ADD_F32_e64_2]], 280, 15, 15, 0, implicit $exec
; GFX90A_GFX940-NEXT: [[V_ADD_F32_e64_3:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_2]], 0, [[V_MOV_B32_dpp3]], 0, 0, implicit $mode, implicit $exec
; GFX90A_GFX940-NEXT: [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
; GFX90A_GFX940-NEXT: [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
; GFX90A_GFX940-NEXT: [[V_MOV_B32_dpp4:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY16]], [[V_ADD_F32_e64_3]], 322, 10, 15, 0, implicit $exec
; GFX90A_GFX940-NEXT: [[V_ADD_F32_e64_4:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_3]], 0, [[V_MOV_B32_dpp4]], 0, 0, implicit $mode, implicit $exec
; GFX90A_GFX940-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]]
; GFX90A_GFX940-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
; GFX90A_GFX940-NEXT: [[V_MOV_B32_dpp5:%[0-9]+]]:vgpr_32 = V_MOV_B32_dpp [[COPY17]], [[V_ADD_F32_e64_4]], 323, 12, 15, 0, implicit $exec
; GFX90A_GFX940-NEXT: [[V_ADD_F32_e64_5:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[V_ADD_F32_e64_4]], 0, [[V_MOV_B32_dpp5]], 0, 0, implicit $mode, implicit $exec
; GFX90A_GFX940-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 63
; GFX90A_GFX940-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], [[S_MOV_B32_4]]
; GFX90A_GFX940-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 63
; GFX90A_GFX940-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sreg_32 = V_READLANE_B32 [[V_ADD_F32_e64_5]], [[S_MOV_B32_3]]
; GFX90A_GFX940-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[V_READLANE_B32_]]
; GFX90A_GFX940-NEXT: [[STRICT_WWM:%[0-9]+]]:vgpr_32 = STRICT_WWM [[COPY18]], implicit $exec
; GFX90A_GFX940-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_1]]
; GFX90A_GFX940-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_MBCNT_HI_U32_B32_e64_]], [[COPY19]], implicit $exec
; GFX90A_GFX940-NEXT: [[SI_IF1:%[0-9]+]]:sreg_64_xexec = SI_IF [[V_CMP_EQ_U32_e64_]], %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX90A_GFX940-NEXT: S_BRANCH %bb.3
; GFX90A_GFX940-NEXT: {{ $}}
; GFX90A_GFX940-NEXT: bb.3 (%ir-block.35):
; GFX90A_GFX940-NEXT: bb.3 (%ir-block.31):
; GFX90A_GFX940-NEXT: successors: %bb.4(0x80000000)
; GFX90A_GFX940-NEXT: {{ $}}
; GFX90A_GFX940-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
Expand All @@ -211,7 +210,7 @@ define amdgpu_ps void @global_atomic_fadd_f32_saddr_no_rtn_atomicrmw(ptr addrspa
; GFX90A_GFX940-NEXT: {{ $}}
; GFX90A_GFX940-NEXT: SI_END_CF [[SI_IF1]], implicit-def $exec, implicit-def $scc, implicit $exec
; GFX90A_GFX940-NEXT: {{ $}}
; GFX90A_GFX940-NEXT: bb.5 (%ir-block.37):
; GFX90A_GFX940-NEXT: bb.5 (%ir-block.33):
; GFX90A_GFX940-NEXT: SI_END_CF [[SI_IF]], implicit-def $exec, implicit-def $scc, implicit $exec
; GFX90A_GFX940-NEXT: S_ENDPGM 0
%ret = atomicrmw fadd ptr addrspace(1) %ptr, float %data syncscope("wavefront") monotonic
Expand Down
Loading