-
Notifications
You must be signed in to change notification settings - Fork 13.4k
[AMDGPU] Enable atomic optimizer for 64 bit divergent values #96473
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Closed
Closed
Changes from all commits
Commits
Show all changes
15 commits
Select commit
Hold shift + click to select a range
b002711
[AMDGPU] Extend readlane, writelane and readfirstlane intrinsic lower…
vikramRH 881e116
[AMDGPU] Extend permlane16, permlanex16 and permlane64 intrinsic lowe…
vikramRH 827d209
fix builtin handling
vikramRH 6047848
Review comments
vikramRH 8a36f07
updated test cases, added new pointer/vector tests
vikramRH 5a4c4c4
Take over recent changes from original patch
vikramRH 12155f5
add hepler to emit N-ary builtins
vikramRH 6714741
Merge branch 'main' into permlane_generic
vikramRH 40381ca
update with latest changes from #89217
vikramRH 4e4cdd9
clang format
vikramRH fe9acb8
Merge branch 'main' into permlane_generic
vikramRH 0a0f93e
Merge branch 'main' into permlane_generic
vikramRH 2091436
[AMDGPU] Enable atomic optimizer for 64 bit values
vikramRH 0bf7890
Merge branch 'main' into atomicOpt_64
vikramRH b4f0198
review comments
vikramRH File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -178,6 +178,20 @@ bool AMDGPUAtomicOptimizerImpl::run(Function &F) { | |
return Changed; | ||
} | ||
|
||
static bool shouldOptimize(Type *Ty) { | ||
switch (Ty->getTypeID()) { | ||
case Type::FloatTyID: | ||
case Type::DoubleTyID: | ||
return true; | ||
case Type::IntegerTyID: { | ||
if (Ty->getIntegerBitWidth() == 32 || Ty->getIntegerBitWidth() == 64) | ||
return true; | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Don't forget pointers |
||
default: | ||
return false; | ||
} | ||
} | ||
|
||
void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) { | ||
// Early exit for unhandled address space atomic instructions. | ||
switch (I.getPointerAddressSpace()) { | ||
|
@@ -227,11 +241,10 @@ void AMDGPUAtomicOptimizerImpl::visitAtomicRMWInst(AtomicRMWInst &I) { | |
const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx)); | ||
|
||
// If the value operand is divergent, each lane is contributing a different | ||
// value to the atomic calculation. We can only optimize divergent values if | ||
// we have DPP available on our subtarget, and the atomic operation is 32 | ||
// bits. | ||
if (ValDivergent && | ||
(!ST->hasDPP() || DL->getTypeSizeInBits(I.getType()) != 32)) { | ||
// value to the atomic calculation. We only optimize divergent values if | ||
// we have DPP available on our subtarget, and the atomic operation is of | ||
// 32/64 bit integer, float or double type. | ||
if (ValDivergent && (!ST->hasDPP() || !shouldOptimize(I.getType()))) { | ||
return; | ||
} | ||
|
||
|
@@ -310,11 +323,10 @@ void AMDGPUAtomicOptimizerImpl::visitIntrinsicInst(IntrinsicInst &I) { | |
const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx)); | ||
|
||
// If the value operand is divergent, each lane is contributing a different | ||
// value to the atomic calculation. We can only optimize divergent values if | ||
// we have DPP available on our subtarget, and the atomic operation is 32 | ||
// bits. | ||
if (ValDivergent && | ||
(!ST->hasDPP() || DL->getTypeSizeInBits(I.getType()) != 32)) { | ||
// value to the atomic calculation. We only optimize divergent values if | ||
// we have DPP available on our subtarget, and the atomic operation is of | ||
// 32/64 bit integer, float or double type. | ||
if (ValDivergent && (!ST->hasDPP() || !shouldOptimize(I.getType()))) { | ||
return; | ||
} | ||
|
||
|
@@ -386,7 +398,6 @@ Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B, | |
Value *V, | ||
Value *const Identity) const { | ||
Type *AtomicTy = V->getType(); | ||
Type *IntNTy = B.getIntNTy(AtomicTy->getPrimitiveSizeInBits()); | ||
Module *M = B.GetInsertBlock()->getModule(); | ||
Function *UpdateDPP = | ||
Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy); | ||
|
@@ -402,34 +413,30 @@ Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B, | |
|
||
// Reduce within each pair of rows (i.e. 32 lanes). | ||
assert(ST->hasPermLaneX16()); | ||
V = B.CreateBitCast(V, IntNTy); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please submit an NFC cleanup patch that just removes unnecessary bitcasting, before adding support for new atomic operations. |
||
Value *Permlanex16Call = B.CreateIntrinsic( | ||
V->getType(), Intrinsic::amdgcn_permlanex16, | ||
{V, V, B.getInt32(-1), B.getInt32(-1), B.getFalse(), B.getFalse()}); | ||
V = buildNonAtomicBinOp(B, Op, B.CreateBitCast(V, AtomicTy), | ||
B.CreateBitCast(Permlanex16Call, AtomicTy)); | ||
V = buildNonAtomicBinOp(B, Op, V, | ||
Permlanex16Call); | ||
if (ST->isWave32()) { | ||
return V; | ||
} | ||
|
||
if (ST->hasPermLane64()) { | ||
// Reduce across the upper and lower 32 lanes. | ||
V = B.CreateBitCast(V, IntNTy); | ||
Value *Permlane64Call = | ||
B.CreateIntrinsic(V->getType(), Intrinsic::amdgcn_permlane64, V); | ||
return buildNonAtomicBinOp(B, Op, B.CreateBitCast(V, AtomicTy), | ||
B.CreateBitCast(Permlane64Call, AtomicTy)); | ||
return buildNonAtomicBinOp(B, Op, V, | ||
Permlane64Call); | ||
} | ||
|
||
// Pick an arbitrary lane from 0..31 and an arbitrary lane from 32..63 and | ||
// combine them with a scalar operation. | ||
Function *ReadLane = | ||
Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, B.getInt32Ty()); | ||
V = B.CreateBitCast(V, IntNTy); | ||
Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, AtomicTy); | ||
Value *Lane0 = B.CreateCall(ReadLane, {V, B.getInt32(0)}); | ||
Value *Lane32 = B.CreateCall(ReadLane, {V, B.getInt32(32)}); | ||
return buildNonAtomicBinOp(B, Op, B.CreateBitCast(Lane0, AtomicTy), | ||
B.CreateBitCast(Lane32, AtomicTy)); | ||
return buildNonAtomicBinOp(B, Op, Lane0, Lane32); | ||
} | ||
|
||
// Use the builder to create an inclusive scan of V across the wavefront, with | ||
|
@@ -438,7 +445,6 @@ Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B, | |
AtomicRMWInst::BinOp Op, Value *V, | ||
Value *Identity) const { | ||
Type *AtomicTy = V->getType(); | ||
Type *IntNTy = B.getIntNTy(AtomicTy->getPrimitiveSizeInBits()); | ||
|
||
Module *M = B.GetInsertBlock()->getModule(); | ||
Function *UpdateDPP = | ||
|
@@ -470,28 +476,26 @@ Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B, | |
// Combine lane 15 into lanes 16..31 (and, for wave 64, lane 47 into lanes | ||
// 48..63). | ||
assert(ST->hasPermLaneX16()); | ||
V = B.CreateBitCast(V, IntNTy); | ||
Value *PermX = B.CreateIntrinsic( | ||
V->getType(), Intrinsic::amdgcn_permlanex16, | ||
{V, V, B.getInt32(-1), B.getInt32(-1), B.getFalse(), B.getFalse()}); | ||
|
||
Value *UpdateDPPCall = | ||
B.CreateCall(UpdateDPP, {Identity, B.CreateBitCast(PermX, AtomicTy), | ||
B.CreateCall(UpdateDPP, {Identity, PermX, | ||
B.getInt32(DPP::QUAD_PERM_ID), B.getInt32(0xa), | ||
B.getInt32(0xf), B.getFalse()}); | ||
V = buildNonAtomicBinOp(B, Op, B.CreateBitCast(V, AtomicTy), UpdateDPPCall); | ||
V = buildNonAtomicBinOp(B, Op, V, UpdateDPPCall); | ||
|
||
if (!ST->isWave32()) { | ||
// Combine lane 31 into lanes 32..63. | ||
V = B.CreateBitCast(V, IntNTy); | ||
Value *const Lane31 = B.CreateIntrinsic( | ||
V->getType(), Intrinsic::amdgcn_readlane, {V, B.getInt32(31)}); | ||
|
||
Value *UpdateDPPCall = B.CreateCall( | ||
UpdateDPP, {Identity, Lane31, B.getInt32(DPP::QUAD_PERM_ID), | ||
B.getInt32(0xc), B.getInt32(0xf), B.getFalse()}); | ||
|
||
V = buildNonAtomicBinOp(B, Op, B.CreateBitCast(V, AtomicTy), | ||
V = buildNonAtomicBinOp(B, Op, V, | ||
UpdateDPPCall); | ||
} | ||
} | ||
|
@@ -503,8 +507,6 @@ Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B, | |
Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V, | ||
Value *Identity) const { | ||
Type *AtomicTy = V->getType(); | ||
Type *IntNTy = B.getIntNTy(AtomicTy->getPrimitiveSizeInBits()); | ||
|
||
Module *M = B.GetInsertBlock()->getModule(); | ||
Function *UpdateDPP = | ||
Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy); | ||
|
@@ -515,9 +517,9 @@ Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V, | |
B.getInt32(0xf), B.getFalse()}); | ||
} else { | ||
Function *ReadLane = Intrinsic::getDeclaration( | ||
M, Intrinsic::amdgcn_readlane, B.getInt32Ty()); | ||
M, Intrinsic::amdgcn_readlane, AtomicTy); | ||
Function *WriteLane = Intrinsic::getDeclaration( | ||
M, Intrinsic::amdgcn_writelane, B.getInt32Ty()); | ||
M, Intrinsic::amdgcn_writelane, AtomicTy); | ||
|
||
// On GFX10 all DPP operations are confined to a single row. To get cross- | ||
// row operations we have to use permlane or readlane. | ||
|
@@ -529,22 +531,18 @@ Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V, | |
// Copy the old lane 15 to the new lane 16. | ||
V = B.CreateCall( | ||
WriteLane, | ||
{B.CreateCall(ReadLane, {B.CreateBitCast(Old, IntNTy), B.getInt32(15)}), | ||
B.getInt32(16), B.CreateBitCast(V, IntNTy)}); | ||
V = B.CreateBitCast(V, AtomicTy); | ||
{B.CreateCall(ReadLane, {Old, B.getInt32(15)}), | ||
B.getInt32(16), V}); | ||
if (!ST->isWave32()) { | ||
// Copy the old lane 31 to the new lane 32. | ||
V = B.CreateBitCast(V, IntNTy); | ||
V = B.CreateCall(WriteLane, | ||
{B.CreateCall(ReadLane, {B.CreateBitCast(Old, IntNTy), | ||
B.getInt32(31)}), | ||
{B.CreateCall(ReadLane, {Old, B.getInt32(31)}), | ||
B.getInt32(32), V}); | ||
|
||
// Copy the old lane 47 to the new lane 48. | ||
V = B.CreateCall( | ||
WriteLane, | ||
{B.CreateCall(ReadLane, {Old, B.getInt32(47)}), B.getInt32(48), V}); | ||
V = B.CreateBitCast(V, AtomicTy); | ||
} | ||
} | ||
|
||
|
@@ -584,24 +582,20 @@ std::pair<Value *, Value *> AMDGPUAtomicOptimizerImpl::buildScanIteratively( | |
auto *FF1 = | ||
B.CreateIntrinsic(Intrinsic::cttz, WaveTy, {ActiveBits, B.getTrue()}); | ||
|
||
Type *IntNTy = B.getIntNTy(Ty->getPrimitiveSizeInBits()); | ||
auto *LaneIdxInt = B.CreateTrunc(FF1, IntNTy); | ||
auto *LaneIdxInt = B.CreateTrunc(FF1, B.getInt32Ty()); | ||
|
||
// Get the value required for atomic operation | ||
V = B.CreateBitCast(V, IntNTy); | ||
Value *LaneValue = B.CreateIntrinsic(V->getType(), Intrinsic::amdgcn_readlane, | ||
{V, LaneIdxInt}); | ||
LaneValue = B.CreateBitCast(LaneValue, Ty); | ||
|
||
// Perform writelane if intermediate scan results are required later in the | ||
// kernel computations | ||
Value *OldValue = nullptr; | ||
if (NeedResult) { | ||
OldValue = | ||
B.CreateIntrinsic(IntNTy, Intrinsic::amdgcn_writelane, | ||
{B.CreateBitCast(Accumulator, IntNTy), LaneIdxInt, | ||
B.CreateBitCast(OldValuePhi, IntNTy)}); | ||
OldValue = B.CreateBitCast(OldValue, Ty); | ||
B.CreateIntrinsic(V->getType(), Intrinsic::amdgcn_writelane, | ||
{Accumulator, LaneIdxInt, | ||
OldValuePhi}); | ||
OldValuePhi->addIncoming(OldValue, ComputeLoop); | ||
} | ||
|
||
|
@@ -700,10 +694,8 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I, | |
|
||
Type *const Ty = I.getType(); | ||
Type *Int32Ty = B.getInt32Ty(); | ||
Type *IntNTy = B.getIntNTy(Ty->getPrimitiveSizeInBits()); | ||
bool isAtomicFloatingPointTy = Ty->isFloatingPointTy(); | ||
const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty); | ||
auto *const VecTy = FixedVectorType::get(Int32Ty, 2); | ||
|
||
// This is the value in the atomic operation we need to combine in order to | ||
// reduce the number of atomic operations. | ||
|
@@ -758,13 +750,8 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I, | |
if (ScanImpl == ScanOptions::DPP) { | ||
// First we need to set all inactive invocations to the identity value, so | ||
// that they can correctly contribute to the final result. | ||
V = B.CreateBitCast(V, IntNTy); | ||
Identity = B.CreateBitCast(Identity, IntNTy); | ||
NewV = B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, IntNTy, | ||
NewV = B.CreateIntrinsic(Intrinsic::amdgcn_set_inactive, Ty, | ||
{V, Identity}); | ||
NewV = B.CreateBitCast(NewV, Ty); | ||
V = B.CreateBitCast(V, Ty); | ||
Identity = B.CreateBitCast(Identity, Ty); | ||
if (!NeedResult && ST->hasPermLaneX16()) { | ||
// On GFX10 the permlanex16 instruction helps us build a reduction | ||
// without too many readlanes and writelanes, which are generally bad | ||
|
@@ -778,11 +765,9 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I, | |
// of each active lane in the wavefront. This will be our new value | ||
// which we will provide to the atomic operation. | ||
Value *const LastLaneIdx = B.getInt32(ST->getWavefrontSize() - 1); | ||
assert(TyBitWidth == 32); | ||
NewV = B.CreateBitCast(NewV, IntNTy); | ||
NewV = B.CreateIntrinsic(IntNTy, Intrinsic::amdgcn_readlane, | ||
assert(TyBitWidth == 32 || TyBitWidth == 64); | ||
NewV = B.CreateIntrinsic(Ty, Intrinsic::amdgcn_readlane, | ||
{NewV, LastLaneIdx}); | ||
NewV = B.CreateBitCast(NewV, Ty); | ||
} | ||
// Finally mark the readlanes in the WWM section. | ||
NewV = B.CreateIntrinsic(Intrinsic::amdgcn_strict_wwm, Ty, NewV); | ||
|
@@ -922,26 +907,9 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I, | |
// but have to handle 64-bit broadcasts with two calls to this intrinsic. | ||
Value *BroadcastI = nullptr; | ||
|
||
if (TyBitWidth == 64) { | ||
Value *CastedPhi = B.CreateBitCast(PHI, IntNTy); | ||
Value *const ExtractLo = B.CreateTrunc(CastedPhi, Int32Ty); | ||
Value *const ExtractHi = | ||
B.CreateTrunc(B.CreateLShr(CastedPhi, 32), Int32Ty); | ||
CallInst *const ReadFirstLaneLo = B.CreateIntrinsic( | ||
Int32Ty, Intrinsic::amdgcn_readfirstlane, ExtractLo); | ||
CallInst *const ReadFirstLaneHi = B.CreateIntrinsic( | ||
Int32Ty, Intrinsic::amdgcn_readfirstlane, ExtractHi); | ||
Value *const PartialInsert = B.CreateInsertElement( | ||
PoisonValue::get(VecTy), ReadFirstLaneLo, B.getInt32(0)); | ||
Value *const Insert = | ||
B.CreateInsertElement(PartialInsert, ReadFirstLaneHi, B.getInt32(1)); | ||
BroadcastI = B.CreateBitCast(Insert, Ty); | ||
} else if (TyBitWidth == 32) { | ||
Value *CastedPhi = B.CreateBitCast(PHI, IntNTy); | ||
if (TyBitWidth == 32 || TyBitWidth == 64) { | ||
BroadcastI = | ||
B.CreateIntrinsic(IntNTy, Intrinsic::amdgcn_readfirstlane, CastedPhi); | ||
BroadcastI = B.CreateBitCast(BroadcastI, Ty); | ||
|
||
B.CreateIntrinsic(Ty, Intrinsic::amdgcn_readfirstlane, PHI); | ||
} else { | ||
llvm_unreachable("Unhandled atomic bit width"); | ||
} | ||
|
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Better name that expresses why this type is handleable.
Also in a follow up, really should cover the i16/half/bfloat and 2 x half, 2 x bfloat cases