Skip to content

[Codegen] Make Width in getMemOperandsWithOffsetWidth a LocationSize. #83875

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion llvm/include/llvm/CodeGen/TargetInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -1446,7 +1446,7 @@ class TargetInstrInfo : public MCInstrInfo {
/// abstraction that supports negative offsets.
virtual bool getMemOperandsWithOffsetWidth(
const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const {
return false;
}
Expand Down
13 changes: 7 additions & 6 deletions llvm/lib/CodeGen/MachineScheduler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1729,11 +1729,11 @@ class BaseMemOpClusterMutation : public ScheduleDAGMutation {
SUnit *SU;
SmallVector<const MachineOperand *, 4> BaseOps;
int64_t Offset;
unsigned Width;
LocationSize Width;
bool OffsetIsScalable;

MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps,
int64_t Offset, bool OffsetIsScalable, unsigned Width)
int64_t Offset, bool OffsetIsScalable, LocationSize Width)
: SU(SU), BaseOps(BaseOps.begin(), BaseOps.end()), Offset(Offset),
Width(Width), OffsetIsScalable(OffsetIsScalable) {}

Expand Down Expand Up @@ -1866,11 +1866,12 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(

auto MemOpb = MemOpRecords[NextIdx];
unsigned ClusterLength = 2;
unsigned CurrentClusterBytes = MemOpa.Width + MemOpb.Width;
unsigned CurrentClusterBytes = MemOpa.Width.getValue().getKnownMinValue() +
MemOpb.Width.getValue().getKnownMinValue();
if (SUnit2ClusterInfo.count(MemOpa.SU->NodeNum)) {
ClusterLength = SUnit2ClusterInfo[MemOpa.SU->NodeNum].first + 1;
CurrentClusterBytes =
SUnit2ClusterInfo[MemOpa.SU->NodeNum].second + MemOpb.Width;
CurrentClusterBytes = SUnit2ClusterInfo[MemOpa.SU->NodeNum].second +
MemOpb.Width.getValue().getKnownMinValue();
}

if (!TII->shouldClusterMemOps(MemOpa.BaseOps, MemOpa.Offset,
Expand Down Expand Up @@ -1940,7 +1941,7 @@ void BaseMemOpClusterMutation::collectMemOpRecords(
SmallVector<const MachineOperand *, 4> BaseOps;
int64_t Offset;
bool OffsetIsScalable;
unsigned Width;
LocationSize Width = 0;
if (TII->getMemOperandsWithOffsetWidth(MI, BaseOps, Offset,
OffsetIsScalable, Width, TRI)) {
MemOpRecords.push_back(
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/CodeGen/TargetInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1365,7 +1365,7 @@ bool TargetInstrInfo::getMemOperandWithOffset(
const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
SmallVector<const MachineOperand *, 4> BaseOps;
unsigned Width;
LocationSize Width = 0;
if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
Width, TRI) ||
BaseOps.size() != 1)
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2675,7 +2675,7 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {

bool AArch64InstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const {
if (!LdSt.mayLoadOrStore())
return false;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64InstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ class AArch64InstrInfo final : public AArch64GenInstrInfo {

bool getMemOperandsWithOffsetWidth(
const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const override;

/// If \p OffsetIsScalable is set to 'true', the offset is scaled by `vscale`.
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ class SIInsertHardClauses : public MachineFunctionPass {

int64_t Dummy1;
bool Dummy2;
unsigned Dummy3;
LocationSize Dummy3 = 0;
SmallVector<const MachineOperand *, 4> BaseOps;
if (Type <= LAST_REAL_HARDCLAUSE_TYPE) {
if (!SII->getMemOperandsWithOffsetWidth(MI, BaseOps, Dummy1, Dummy2,
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ static bool isStride64(unsigned Opc) {

bool SIInstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const {
if (!LdSt.mayLoadOrStore())
return false;
Expand Down Expand Up @@ -424,7 +424,7 @@ bool SIInstrInfo::getMemOperandsWithOffsetWidth(
DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
Width = getOpSize(LdSt, DataOpIdx);
DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
Width += getOpSize(LdSt, DataOpIdx);
Width = Width.getValue() + getOpSize(LdSt, DataOpIdx);
} else {
Width = getOpSize(LdSt, DataOpIdx);
}
Expand Down Expand Up @@ -3647,7 +3647,7 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
const MachineInstr &MIb) const {
SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1;
int64_t Offset0, Offset1;
unsigned Dummy0, Dummy1;
LocationSize Dummy0 = 0, Dummy1 = 0;
bool Offset0IsScalable, Offset1IsScalable;
if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable,
Dummy0, &RI) ||
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/SIInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
bool getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt,
SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
bool &OffsetIsScalable, unsigned &Width,
bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const final;

bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
Expand Down
8 changes: 4 additions & 4 deletions llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3070,7 +3070,7 @@ bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
/// Get the base register and byte offset of a load/store instr.
bool HexagonInstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const {
OffsetIsScalable = false;
const MachineOperand *BaseOp = getBaseAndOffset(LdSt, Offset, Width);
Expand Down Expand Up @@ -3286,9 +3286,9 @@ unsigned HexagonInstrInfo::getAddrMode(const MachineInstr &MI) const {
// returned in Offset and the access size is returned in AccessSize.
// If the base operand has a subregister or the offset field does not contain
// an immediate value, return nullptr.
MachineOperand *HexagonInstrInfo::getBaseAndOffset(const MachineInstr &MI,
int64_t &Offset,
unsigned &AccessSize) const {
MachineOperand *
HexagonInstrInfo::getBaseAndOffset(const MachineInstr &MI, int64_t &Offset,
LocationSize &AccessSize) const {
// Return if it is not a base+offset type instruction or a MemOp.
if (getAddrMode(MI) != HexagonII::BaseImmOffset &&
getAddrMode(MI) != HexagonII::BaseLongOffset &&
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/Hexagon/HexagonInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ class HexagonInstrInfo : public HexagonGenInstrInfo {
bool getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt,
SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
bool &OffsetIsScalable, unsigned &Width,
bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const override;

/// Reverses the branch condition of the specified condition list,
Expand Down Expand Up @@ -437,7 +437,7 @@ class HexagonInstrInfo : public HexagonGenInstrInfo {

unsigned getAddrMode(const MachineInstr &MI) const;
MachineOperand *getBaseAndOffset(const MachineInstr &MI, int64_t &Offset,
unsigned &AccessSize) const;
LocationSize &AccessSize) const;
SmallVector<MachineInstr*,2> getBranchingInstrs(MachineBasicBlock& MBB) const;
unsigned getCExtOpNum(const MachineInstr &MI) const;
HexagonII::CompoundGroup
Expand Down
11 changes: 6 additions & 5 deletions llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -395,10 +395,11 @@ void HexagonSubtarget::BankConflictMutation::apply(ScheduleDAGInstrs *DAG) {
HII.getAddrMode(L0) != HexagonII::BaseImmOffset)
continue;
int64_t Offset0;
unsigned Size0;
LocationSize Size0 = 0;
MachineOperand *BaseOp0 = HII.getBaseAndOffset(L0, Offset0, Size0);
// Is the access size is longer than the L1 cache line, skip the check.
if (BaseOp0 == nullptr || !BaseOp0->isReg() || Size0 >= 32)
if (BaseOp0 == nullptr || !BaseOp0->isReg() || !Size0.hasValue() ||
Size0.getValue() >= 32)
continue;
// Scan only up to 32 instructions ahead (to avoid n^2 complexity).
for (unsigned j = i+1, m = std::min(i+32, e); j != m; ++j) {
Expand All @@ -408,10 +409,10 @@ void HexagonSubtarget::BankConflictMutation::apply(ScheduleDAGInstrs *DAG) {
HII.getAddrMode(L1) != HexagonII::BaseImmOffset)
continue;
int64_t Offset1;
unsigned Size1;
LocationSize Size1 = 0;
MachineOperand *BaseOp1 = HII.getBaseAndOffset(L1, Offset1, Size1);
if (BaseOp1 == nullptr || !BaseOp1->isReg() || Size1 >= 32 ||
BaseOp0->getReg() != BaseOp1->getReg())
if (BaseOp1 == nullptr || !BaseOp1->isReg() || !Size0.hasValue() ||
Size1.getValue() >= 32 || BaseOp0->getReg() != BaseOp1->getReg())
continue;
// Check bits 3 and 4 of the offset: if they differ, a bank conflict
// is unlikely.
Expand Down
11 changes: 6 additions & 5 deletions llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,14 +102,15 @@ bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(
const TargetRegisterInfo *TRI = &getRegisterInfo();
const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
int64_t OffsetA = 0, OffsetB = 0;
unsigned int WidthA = 0, WidthB = 0;
LocationSize WidthA = 0, WidthB = 0;
if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
if (BaseOpA->isIdenticalTo(*BaseOpB)) {
int LowOffset = std::min(OffsetA, OffsetB);
int HighOffset = std::max(OffsetA, OffsetB);
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
if (LowOffset + LowWidth <= HighOffset)
LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
if (LowWidth.hasValue() &&
LowOffset + (int)LowWidth.getValue() <= HighOffset)
return true;
}
}
Expand Down Expand Up @@ -752,7 +753,7 @@ Register LanaiInstrInfo::isStoreToStackSlot(const MachineInstr &MI,

bool LanaiInstrInfo::getMemOperandWithOffsetWidth(
const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
unsigned &Width, const TargetRegisterInfo * /*TRI*/) const {
LocationSize &Width, const TargetRegisterInfo * /*TRI*/) const {
// Handle only loads/stores with base register followed by immediate offset
// and with add as ALU op.
if (LdSt.getNumOperands() != 4)
Expand Down Expand Up @@ -793,7 +794,7 @@ bool LanaiInstrInfo::getMemOperandWithOffsetWidth(

bool LanaiInstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const {
switch (LdSt.getOpcode()) {
default:
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/Lanai/LanaiInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,12 +70,12 @@ class LanaiInstrInfo : public LanaiGenInstrInfo {
bool getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt,
SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
bool &OffsetIsScalable, unsigned &Width,
bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const override;

bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
const MachineOperand *&BaseOp,
int64_t &Offset, unsigned &Width,
int64_t &Offset, LocationSize &Width,
const TargetRegisterInfo *TRI) const;

std::pair<unsigned, unsigned>
Expand Down
15 changes: 8 additions & 7 deletions llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2821,7 +2821,7 @@ bool PPCInstrInfo::optimizeCmpPostRA(MachineInstr &CmpMI) const {

bool PPCInstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const {
const MachineOperand *BaseOp;
OffsetIsScalable = false;
Expand Down Expand Up @@ -2913,7 +2913,7 @@ bool PPCInstrInfo::shouldClusterMemOps(
return false;

int64_t Offset1 = 0, Offset2 = 0;
unsigned Width1 = 0, Width2 = 0;
LocationSize Width1 = 0, Width2 = 0;
const MachineOperand *Base1 = nullptr, *Base2 = nullptr;
if (!getMemOperandWithOffsetWidth(FirstLdSt, Base1, Offset1, Width1, TRI) ||
!getMemOperandWithOffsetWidth(SecondLdSt, Base2, Offset2, Width2, TRI) ||
Expand All @@ -2924,7 +2924,7 @@ bool PPCInstrInfo::shouldClusterMemOps(
"getMemOperandWithOffsetWidth return incorrect base op");
// The caller should already have ordered FirstMemOp/SecondMemOp by offset.
assert(Offset1 <= Offset2 && "Caller should have ordered offsets.");
return Offset1 + Width1 == Offset2;
return Offset1 + (int64_t)Width1.getValue() == Offset2;
}

/// GetInstSize - Return the number of bytes of code the specified
Expand Down Expand Up @@ -5504,7 +5504,7 @@ MachineInstr *PPCInstrInfo::findLoopInstr(
// memory width. Width is the size of memory that is being loaded/stored.
bool PPCInstrInfo::getMemOperandWithOffsetWidth(
const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
unsigned &Width, const TargetRegisterInfo *TRI) const {
LocationSize &Width, const TargetRegisterInfo *TRI) const {
if (!LdSt.mayLoadOrStore() || LdSt.getNumExplicitOperands() != 3)
return false;

Expand Down Expand Up @@ -5542,14 +5542,15 @@ bool PPCInstrInfo::areMemAccessesTriviallyDisjoint(
const TargetRegisterInfo *TRI = &getRegisterInfo();
const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
int64_t OffsetA = 0, OffsetB = 0;
unsigned int WidthA = 0, WidthB = 0;
LocationSize WidthA = 0, WidthB = 0;
if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
if (BaseOpA->isIdenticalTo(*BaseOpB)) {
int LowOffset = std::min(OffsetA, OffsetB);
int HighOffset = std::max(OffsetA, OffsetB);
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
if (LowOffset + LowWidth <= HighOffset)
LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
if (LowWidth.hasValue() &&
LowOffset + (int)LowWidth.getValue() <= HighOffset)
return true;
}
}
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/PowerPC/PPCInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,7 @@ class PPCInstrInfo : public PPCGenInstrInfo {
/// loaded/stored (e.g. 1, 2, 4, 8).
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
const MachineOperand *&BaseOp,
int64_t &Offset, unsigned &Width,
int64_t &Offset, LocationSize &Width,
const TargetRegisterInfo *TRI) const;

bool optimizeCmpPostRA(MachineInstr &MI) const;
Expand All @@ -553,7 +553,7 @@ class PPCInstrInfo : public PPCGenInstrInfo {
bool getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt,
SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
bool &OffsetIsScalable, unsigned &Width,
bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const override;

/// Returns true if the two given memory operations should be scheduled
Expand Down
11 changes: 6 additions & 5 deletions llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2195,7 +2195,7 @@ MachineInstr *RISCVInstrInfo::emitLdStWithAddr(MachineInstr &MemI,

bool RISCVInstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const {
if (!LdSt.mayLoadOrStore())
return false;
Expand Down Expand Up @@ -2300,7 +2300,7 @@ bool RISCVInstrInfo::shouldClusterMemOps(
// function) and set it as appropriate.
bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
unsigned &Width, const TargetRegisterInfo *TRI) const {
LocationSize &Width, const TargetRegisterInfo *TRI) const {
if (!LdSt.mayLoadOrStore())
return false;

Expand Down Expand Up @@ -2339,14 +2339,15 @@ bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
int64_t OffsetA = 0, OffsetB = 0;
unsigned int WidthA = 0, WidthB = 0;
LocationSize WidthA = 0, WidthB = 0;
if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
if (BaseOpA->isIdenticalTo(*BaseOpB)) {
int LowOffset = std::min(OffsetA, OffsetB);
int HighOffset = std::max(OffsetA, OffsetB);
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
if (LowOffset + LowWidth <= HighOffset)
LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
if (LowWidth.hasValue() &&
LowOffset + (int)LowWidth.getValue() <= HighOffset)
return true;
}
}
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/RISCV/RISCVInstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {

bool getMemOperandsWithOffsetWidth(
const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const override;

bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
Expand All @@ -168,7 +168,7 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {

bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
const MachineOperand *&BaseOp,
int64_t &Offset, unsigned &Width,
int64_t &Offset, LocationSize &Width,
const TargetRegisterInfo *TRI) const;

bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/X86/X86InstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4514,7 +4514,7 @@ bool X86InstrInfo::preservesZeroValueInReg(

bool X86InstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const {
const MCInstrDesc &Desc = MemOp.getDesc();
int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/X86/X86InstrInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ class X86InstrInfo final : public X86GenInstrInfo {
bool getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt,
SmallVectorImpl<const MachineOperand *> &BaseOps, int64_t &Offset,
bool &OffsetIsScalable, unsigned &Width,
bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const override;
bool analyzeBranchPredicate(MachineBasicBlock &MBB,
TargetInstrInfo::MachineBranchPredicate &MBP,
Expand Down
Loading