@@ -62,8 +62,6 @@ STATISTIC(NumUnscaledPairCreated,
62
62
" Number of load/store from unscaled generated" );
63
63
STATISTIC (NumZeroStoresPromoted, " Number of narrow zero stores promoted" );
64
64
STATISTIC (NumLoadsFromStoresPromoted, " Number of loads from stores promoted" );
65
- STATISTIC (NumFailedAlignmentCheck, " Number of load/store pair transformation "
66
- " not passed the alignment check" );
67
65
68
66
DEBUG_COUNTER (RegRenamingCounter, DEBUG_TYPE " -reg-renaming" ,
69
67
" Controls which pairs are considered for renaming" );
@@ -2339,6 +2337,9 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
2339
2337
MachineBasicBlock::iterator Paired =
2340
2338
findMatchingInsn (MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false );
2341
2339
if (Paired != E) {
2340
+ ++NumPairCreated;
2341
+ if (TII->hasUnscaledLdStOffset (MI))
2342
+ ++NumUnscaledPairCreated;
2342
2343
// Keeping the iterator straight is a pain, so we let the merge routine tell
2343
2344
// us what the next instruction is after it's done mucking about.
2344
2345
auto Prev = std::prev (MBBI);
@@ -2348,38 +2349,31 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
2348
2349
MachineMemOperand *MemOp =
2349
2350
MI.memoperands_empty () ? nullptr : MI.memoperands ().front ();
2350
2351
2351
- // If a load/store arrives and ldp/stp-aligned-only feature is opted, check
2352
- // that the alignment of the source pointer is at least double the alignment
2353
- // of the type.
2354
- if ((MI.mayLoad () && Subtarget->hasLdpAlignedOnly ()) ||
2355
- (MI.mayStore () && Subtarget->hasStpAlignedOnly ())) {
2356
- // If there is no size/align information, cancel the transformation.
2357
- if (!MemOp || !MemOp->getMemoryType ().isValid ()) {
2358
- NumFailedAlignmentCheck++;
2359
- return false ;
2360
- }
2352
+ // Get the needed alignments to check them if
2353
+ // ldp-aligned-only/stp-aligned-only features are opted.
2354
+ uint64_t MemAlignment = MemOp ? MemOp->getAlign ().value () : -1 ;
2355
+ uint64_t TypeAlignment = MemOp ? Align (MemOp->getSize ()).value () : -1 ;
2361
2356
2362
- // Get the needed alignments to check them if
2363
- // ldp-aligned-only/stp-aligned-only features are opted.
2364
- uint64_t MemAlignment = MemOp->getAlign ().value ();
2365
- uint64_t TypeAlignment = Align (MemOp->getSize ()).value ();
2357
+ // If a load arrives and ldp-aligned-only feature is opted, check that the
2358
+ // alignment of the source pointer is at least double the alignment of the
2359
+ // type.
2360
+ if (MI.mayLoad () && Subtarget->hasLdpAlignedOnly () && MemOp &&
2361
+ MemAlignment < 2 * TypeAlignment)
2362
+ return false ;
2366
2363
2367
- if (MemAlignment < 2 * TypeAlignment) {
2368
- NumFailedAlignmentCheck++;
2369
- return false ;
2370
- }
2371
- }
2364
+ // If a store arrives and stp-aligned-only feature is opted, check that the
2365
+ // alignment of the source pointer is at least double the alignment of the
2366
+ // type.
2367
+ if (MI.mayStore () && Subtarget->hasStpAlignedOnly () && MemOp &&
2368
+ MemAlignment < 2 * TypeAlignment)
2369
+ return false ;
2372
2370
2373
2371
MBBI = mergePairedInsns (MBBI, Paired, Flags);
2374
2372
// Collect liveness info for instructions between Prev and the new position
2375
2373
// MBBI.
2376
2374
for (auto I = std::next (Prev); I != MBBI; I++)
2377
2375
updateDefinedRegisters (*I, DefinedInBB, TRI);
2378
2376
2379
- ++NumPairCreated;
2380
- if (TII->hasUnscaledLdStOffset (MI))
2381
- ++NumUnscaledPairCreated;
2382
-
2383
2377
return true ;
2384
2378
}
2385
2379
return false ;
0 commit comments