Skip to content

JIT: Emit mulx for GT_MULHI and GT_MUL_LONG if BMI2 is available #116198

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 24 commits into from
Jun 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
b6fbd02
WIP: Emit mulx for GT_MULHI
Daniel-Svensson May 30, 2025
b8d054f
* Handle containment for GT_MUL_LONG on x86
Daniel-Svensson May 31, 2025
4ae76cb
update comments
Daniel-Svensson May 31, 2025
9f84b3f
merge upstream/main
Daniel-Svensson May 31, 2025
1266195
update after merge
Daniel-Svensson May 31, 2025
8929847
* remove move instruction since it is handled by lsra
Daniel-Svensson May 31, 2025
f5d77fc
minor formatting fixes
Daniel-Svensson May 31, 2025
feeb24b
clenaup
Daniel-Svensson Jun 1, 2025
59ecc67
Ensure magic number for GT_MULHI for division with constant, is put i…
Daniel-Svensson Jun 2, 2025
0ff6c20
only swap operands for GT_MULHI and GT_MUL_LONG
Daniel-Svensson Jun 2, 2025
e56ee0a
fix formatting
Daniel-Svensson Jun 2, 2025
ce9e87b
Merge remote-tracking branch 'upstream/main' into x86_bmi_mulhi
Daniel-Svensson Jun 2, 2025
691e442
Fix operand order
Daniel-Svensson Jun 2, 2025
280022a
merge upstream/main
Daniel-Svensson Jun 12, 2025
46ee7af
Fixes after merge:
Daniel-Svensson Jun 12, 2025
7854142
fix review comment
Daniel-Svensson Jun 13, 2025
5f848c6
kill rdx register for mulx instead of specifying as fixed register fo…
Daniel-Svensson Jun 13, 2025
82ece23
fix format
Daniel-Svensson Jun 13, 2025
04450b6
remove register preference for mul, it does only make sense for exten…
Daniel-Svensson Jun 13, 2025
45625b7
fix formatting
Daniel-Svensson Jun 13, 2025
7c8dcfb
remove swap in lowering
Daniel-Svensson Jun 14, 2025
8ab4c9d
update fixed reg in lowering for division by constant
Daniel-Svensson Jun 14, 2025
cbd5824
change from isUsedFromMemory to isContained()
Daniel-Svensson Jun 16, 2025
ab4ec11
Fix review comments
Daniel-Svensson Jun 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 54 additions & 17 deletions src/coreclr/jit/codegenxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -822,38 +822,75 @@ void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
// to get the high bits of the multiply, we are constrained to using the
// 1-op form: RDX:RAX = RAX * rm
// The 3-op form (Rx=Ry*Rz) does not support it.

// When BMI2 is available, we can use the MULX instruction to get the high bits
genConsumeOperands(treeNode->AsOp());

GenTree* regOp = op1;
GenTree* rmOp = op2;

// Set rmOp to the memory operand (if any)
if (op1->isUsedFromMemory() || (op2->isUsedFromReg() && (op2->GetRegNum() == REG_RAX)))
if (op1->isUsedFromMemory())
{
regOp = op2;
rmOp = op1;
}
assert(regOp->isUsedFromReg());

// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, REG_RAX, regOp->GetRegNum(), /* canSkip */ true);

instruction ins;
if ((treeNode->gtFlags & GTF_UNSIGNED) == 0)
if (treeNode->IsUnsigned() && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
ins = INS_imulEAX;
if (rmOp->isUsedFromReg() && (rmOp->GetRegNum() == REG_RDX))
{
std::swap(regOp, rmOp);
}

// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, REG_RDX, regOp->GetRegNum(), /* canSkip */ true);

if (treeNode->OperIs(GT_MULHI))
{
// emit MULX instruction, use targetReg twice to only store high result
inst_RV_RV_TT(INS_mulx, size, targetReg, targetReg, rmOp, /* isRMW */ false, INS_OPTS_NONE);
}
else
{
#if TARGET_64BIT
assert(false);
#else
assert(treeNode->OperIs(GT_MUL_LONG));

// emit MULX instruction
regNumber hiReg = treeNode->AsMultiRegOp()->GetRegByIndex(1);
inst_RV_RV_TT(INS_mulx, size, hiReg, targetReg, rmOp, /* isRMW */ false, INS_OPTS_NONE);
#endif
}
}
else
else // Generate MUL or IMUL instruction
{
ins = INS_mulEAX;
}
emit->emitInsBinary(ins, size, treeNode, rmOp);
// If op2 is already present in RAX use that as implicit operand
if (rmOp->isUsedFromReg() && (rmOp->GetRegNum() == REG_RAX))
{
std::swap(regOp, rmOp);
}

// Move the result to the desired register, if necessary
if (treeNode->OperIs(GT_MULHI))
{
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
// Setup targetReg when neither of the source operands was a matching register
inst_Mov(targetType, REG_RAX, regOp->GetRegNum(), /* canSkip */ true);

instruction ins;
if (!treeNode->IsUnsigned())
{
ins = INS_imulEAX;
}
else
{
ins = INS_mulEAX;
}
emit->emitInsBinary(ins, size, treeNode, rmOp);

// Move the result to the desired register, if necessary
if (treeNode->OperIs(GT_MULHI))
{
assert(targetReg == REG_RDX);
inst_Mov(targetType, targetReg, REG_RDX, /* canSkip */ true);
}
}

genProduceReg(treeNode);
Expand Down
7 changes: 4 additions & 3 deletions src/coreclr/jit/lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7934,11 +7934,12 @@ bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod)
}

#ifdef TARGET_XARCH
// force input transformation to RAX because the following MULHI will kill RDX:RAX anyway and LSRA often causes
// redundant copies otherwise
// force input transformation to RAX/RDX because the following MULHI will kill RDX:RAX (RDX if mulx is
// available) anyway and LSRA often causes redundant copies otherwise
if (firstNode && !simpleMul)
{
adjustedDividend->SetRegNum(REG_RAX);
regNumber implicitReg = comp->compOpportunisticallyDependsOn(InstructionSet_AVX2) ? REG_RDX : REG_RAX;
adjustedDividend->SetRegNum(implicitReg);
}
#endif

Expand Down
7 changes: 7 additions & 0 deletions src/coreclr/jit/lower.h
Original file line number Diff line number Diff line change
Expand Up @@ -496,6 +496,13 @@ class Lowering final : public Phase

#endif // TARGET_XARCH

#if TARGET_X86
if (parentNode->OperIs(GT_MUL_LONG))
{
return genTypeSize(childNode->TypeGet()) == operatorSize / 2;
}
#endif // TARGET_X86

return genTypeSize(childNode->TypeGet()) == operatorSize;
}

Expand Down
11 changes: 7 additions & 4 deletions src/coreclr/jit/lowerxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7851,14 +7851,15 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
bool isSafeToContainOp1 = true;
bool isSafeToContainOp2 = true;

bool isUnsignedMultiply = ((node->gtFlags & GTF_UNSIGNED) != 0);
bool isUnsignedMultiply = node->IsUnsigned();
bool requiresOverflowCheck = node->gtOverflowEx();
bool useLeaEncoding = false;
GenTree* memOp = nullptr;

bool hasImpliedFirstOperand = false;
GenTreeIntConCommon* imm = nullptr;
GenTree* other = nullptr;
var_types nodeType = node->TypeGet();

// Multiply should never be using small types
assert(!varTypeIsSmall(node->TypeGet()));
Expand All @@ -7878,6 +7879,8 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
else if (node->OperIs(GT_MUL_LONG))
{
hasImpliedFirstOperand = true;
// GT_MUL_LONG hsa node type LONG but work on INT
nodeType = TYP_INT;
}
#endif
else if (IsContainableImmed(node, op2) || IsContainableImmed(node, op1))
Expand Down Expand Up @@ -7914,7 +7917,7 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
//
if (memOp == nullptr)
{
if ((op2->TypeGet() == node->TypeGet()) && IsContainableMemoryOp(op2))
if ((op2->TypeGet() == nodeType) && IsContainableMemoryOp(op2))
{
isSafeToContainOp2 = IsSafeToContainMem(node, op2);
if (isSafeToContainOp2)
Expand All @@ -7923,7 +7926,7 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
}
}

if ((memOp == nullptr) && (op1->TypeGet() == node->TypeGet()) && IsContainableMemoryOp(op1))
if ((memOp == nullptr) && (op1->TypeGet() == nodeType) && IsContainableMemoryOp(op1))
{
isSafeToContainOp1 = IsSafeToContainMem(node, op1);
if (isSafeToContainOp1)
Expand All @@ -7934,7 +7937,7 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
}
else
{
if ((memOp->TypeGet() != node->TypeGet()))
if ((memOp->TypeGet() != nodeType))
{
memOp = nullptr;
}
Expand Down
22 changes: 21 additions & 1 deletion src/coreclr/jit/lsrabuild.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -782,7 +782,27 @@ regMaskTP LinearScan::getKillSetForMul(GenTreeOp* mulNode)
regMaskTP killMask = RBM_NONE;
#ifdef TARGET_XARCH
assert(mulNode->OperIsMul());
if (!mulNode->OperIs(GT_MUL) || (((mulNode->gtFlags & GTF_UNSIGNED) != 0) && mulNode->gtOverflowEx()))
if (!mulNode->OperIs(GT_MUL))
{
// If we can use the mulx instruction, we don't need to kill RAX
if (mulNode->IsUnsigned() && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
// If on operand is contained, we define fixed RDX register for use, so we don't need to kill register.
if (mulNode->gtGetOp1()->isContained() || mulNode->gtGetOp2()->isContained())
{
killMask = RBM_NONE;
}
else
{
killMask = RBM_RDX;
}
}
else
{
killMask = RBM_RAX | RBM_RDX;
}
}
else if (mulNode->IsUnsigned() && mulNode->gtOverflowEx())
{
killMask = RBM_RAX | RBM_RDX;
}
Expand Down
106 changes: 70 additions & 36 deletions src/coreclr/jit/lsraxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -795,6 +795,14 @@ bool LinearScan::isRMWRegOper(GenTree* tree)
}
return (!tree->gtGetOp2()->isContainedIntOrIImmed() && !tree->gtGetOp1()->isContainedIntOrIImmed());
}
#ifdef TARGET_X86
case GT_MUL_LONG:
#endif
case GT_MULHI:
{
// MUL, IMUL are RMW but mulx is not (which is used for unsigned operands when BMI2 is availible)
return !(tree->IsUnsigned() && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX2));
}

#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
Expand Down Expand Up @@ -3223,18 +3231,22 @@ int LinearScan::BuildMul(GenTree* tree)
return BuildSimple(tree);
}

// ToDo-APX : imul currently doesn't have rex2 support. So, cannot use R16-R31.
int srcCount = BuildBinaryUses(tree->AsOp());
bool isUnsignedMultiply = tree->IsUnsigned();
bool requiresOverflowCheck = tree->gtOverflowEx();
bool useMulx =
!tree->OperIs(GT_MUL) && isUnsignedMultiply && compiler->compOpportunisticallyDependsOn(InstructionSet_AVX2);

// ToDo-APX : imul currently doesn't have rex2 support. So, cannot use R16-R31.
int srcCount = 0;
int dstCount = 1;
SingleTypeRegSet dstCandidates = RBM_NONE;

bool isUnsignedMultiply = ((tree->gtFlags & GTF_UNSIGNED) != 0);
bool requiresOverflowCheck = tree->gtOverflowEx();

// There are three forms of x86 multiply:
// There are three forms of x86 multiply in base instruction set
// one-op form: RDX:RAX = RAX * r/m
// two-op form: reg *= r/m
// three-op form: reg = r/m * imm
// If the BMI2 instruction set is supported there is an additional unsigned multiply
// mulx reg1:reg2 = RDX * reg3/m

// This special widening 32x32->64 MUL is not used on x64
#if defined(TARGET_X86)
Expand All @@ -3244,42 +3256,64 @@ int LinearScan::BuildMul(GenTree* tree)
assert((tree->gtFlags & GTF_MUL_64RSLT) == 0);
}

// We do use the widening multiply to implement
// the overflow checking for unsigned multiply
//
if (isUnsignedMultiply && requiresOverflowCheck)
if (useMulx)
{
// The only encoding provided is RDX:RAX = RAX * rm
//
// Here we set RAX as the only destination candidate
// In LSRA we set the kill set for this operation to RBM_RAX|RBM_RDX
//
dstCandidates = SRBM_RAX;
}
else if (tree->OperIs(GT_MULHI))
{
// Have to use the encoding:RDX:RAX = RAX * rm. Since we only care about the
// upper 32 bits of the result set the destination candidate to REG_RDX.
dstCandidates = SRBM_RDX;
}
// If one of the operands is contained, specify RDX for the other operand
SingleTypeRegSet srcCandidates1 = RBM_NONE;
SingleTypeRegSet srcCandidates2 = RBM_NONE;
if (op1->isContained())
{
assert(!op2->isContained());
srcCandidates2 = SRBM_RDX;
}
else if (op2->isContained())
{
srcCandidates1 = SRBM_RDX;
}

srcCount = BuildOperandUses(op1, srcCandidates1);
srcCount += BuildOperandUses(op2, srcCandidates2);

#if defined(TARGET_X86)
else if (tree->OperIs(GT_MUL_LONG))
{
// have to use the encoding:RDX:RAX = RAX * rm
dstCandidates = SRBM_RAX | SRBM_RDX;
dstCount = 2;
}
if (tree->OperIs(GT_MUL_LONG))
{
dstCount = 2;
}
#endif
GenTree* containedMemOp = nullptr;
if (op1->isContained() && !op1->IsCnsIntOrI())
{
assert(!op2->isContained() || op2->IsCnsIntOrI());
containedMemOp = op1;
}
else if (op2->isContained() && !op2->IsCnsIntOrI())
else
{
containedMemOp = op2;
assert(!(op1->isContained() && !op1->IsCnsIntOrI()) || !(op2->isContained() && !op2->IsCnsIntOrI()));
srcCount = BuildBinaryUses(tree->AsOp());

// We do use the widening multiply to implement
// the overflow checking for unsigned multiply
//
if (isUnsignedMultiply && requiresOverflowCheck)
{
// The only encoding provided is RDX:RAX = RAX * rm
//
// Here we set RAX as the only destination candidate
// In LSRA we set the kill set for this operation to RBM_RAX|RBM_RDX
//
dstCandidates = SRBM_RAX;
}
else if (tree->OperIs(GT_MULHI))
{
// Have to use the encoding:RDX:RAX = RAX * rm. Since we only care about the
// upper 32 bits of the result set the destination candidate to REG_RDX.
dstCandidates = SRBM_RDX;
}
#if defined(TARGET_X86)
else if (tree->OperIs(GT_MUL_LONG))
{
// We have to use the encoding:RDX:RAX = RAX * rm
dstCandidates = SRBM_RAX | SRBM_RDX;
dstCount = 2;
}
#endif
}

regMaskTP killMask = getKillSetForMul(tree->AsOp());
BuildDefWithKills(tree, dstCount, dstCandidates, killMask);
return srcCount;
Expand Down
Loading