From 66867c02d1221add17296bc468949c0e1fc4f9d9 Mon Sep 17 00:00:00 2001 From: Nate Chandler Date: Fri, 16 Jun 2023 18:11:03 -0700 Subject: [PATCH] [MoveOnlyAddressChecker] Fix used fields repr. The address checker records uses in its livenessUses map. Previously, that map mapped from an instruction to a range of fields of the type. But an instruction can use multiple discontiguous fields of a single value. Here, such instructions are properly recorded by fixing the map to store a bit vector for each instruction. rdar://110676577 --- .../swift/SIL/FieldSensitivePrunedLiveness.h | 84 ++++++- .../Utils/FieldSensitivePrunedLiveness.cpp | 46 ++++ .../Mandatory/MoveOnlyAddressCheckerUtils.cpp | 233 ++++++++++-------- test/SILOptimizer/moveonly_addresschecker.sil | 60 ----- .../moveonly_addresschecker_unmaximized.sil | 67 +++++ 5 files changed, 319 insertions(+), 171 deletions(-) create mode 100644 test/SILOptimizer/moveonly_addresschecker_unmaximized.sil diff --git a/include/swift/SIL/FieldSensitivePrunedLiveness.h b/include/swift/SIL/FieldSensitivePrunedLiveness.h index 456ba6c387bee..b38ae79a1d30c 100644 --- a/include/swift/SIL/FieldSensitivePrunedLiveness.h +++ b/include/swift/SIL/FieldSensitivePrunedLiveness.h @@ -345,6 +345,16 @@ struct TypeTreeLeafTypeRange { return TypeTreeLeafTypeRange(start, end); } + /// Whether \p bits contains any of the in-range bits. + bool intersects(SmallBitVector const &bits) const { + for (auto element : getRange()) { + if (bits.test(element)) { + return true; + } + } + return false; + } + /// Is the given leaf type specified by \p singleLeafElementNumber apart of /// our \p range of leaf type values in the our larger type. bool contains(SubElementOffset singleLeafElementNumber) const { @@ -359,7 +369,7 @@ struct TypeTreeLeafTypeRange { } /// Sets each bit in \p bits corresponding to an element of this range. - void setBits(SmallBitVector &bits) { + void setBits(SmallBitVector &bits) const { for (auto element : getRange()) { bits.set(element); } @@ -696,6 +706,14 @@ class FieldSensitivePrunedLiveness { } } + /// Record that the instruction uses the bits in \p bits. + void addUses(SmallBitVector const &bits, bool lifetimeEnding) { + liveBits |= bits; + if (lifetimeEnding) { + consumingBits |= bits; + } + } + /// Populates the provided vector with contiguous ranges of bits which are /// users of the same sort. void getContiguousRanges( @@ -838,6 +856,9 @@ class FieldSensitivePrunedLiveness { void updateForUse(SILInstruction *user, TypeTreeLeafTypeRange span, bool lifetimeEnding); + void updateForUse(SILInstruction *user, SmallBitVector const &bits, + bool lifetimeEnding); + void getBlockLiveness(SILBasicBlock *bb, TypeTreeLeafTypeRange span, SmallVectorImpl &resultingFoundLiveness) const { @@ -862,6 +883,14 @@ class FieldSensitivePrunedLiveness { SmallBitVector &liveOutBits, SmallBitVector &deadBits) const; + InterestingUser &getOrCreateInterestingUser(SILInstruction *user) { + auto iter = users.find(user); + if (iter == users.end()) { + iter = users.insert({user, InterestingUser(getNumSubElements())}).first; + } + return *&iter->second; + } + /// If \p user has had uses recored, return a pointer to the InterestingUser /// where they've been recorded. InterestingUser const *getInterestingUser(SILInstruction *user) const { @@ -885,11 +914,12 @@ class FieldSensitivePrunedLiveness { bool isInterestingUserOfKind(SILInstruction *user, IsInterestingUser kind, TypeTreeLeafTypeRange range) const { auto *record = getInterestingUser(user); - if (!record) + if (!record) { return kind == IsInterestingUser::NonUser; + } for (auto element : range.getRange()) { - if (isInterestingUser(user, element) != kind) + if (record->isInterestingUser(element) != kind) return false; } return true; @@ -918,11 +948,12 @@ class FieldSensitivePrunedLiveness { /// argument must be copied. void addInterestingUser(SILInstruction *user, TypeTreeLeafTypeRange range, bool lifetimeEnding) { - auto iter = users.find(user); - if (iter == users.end()) { - iter = users.insert({user, InterestingUser(getNumSubElements())}).first; - } - iter->second.addUses(range, lifetimeEnding); + getOrCreateInterestingUser(user).addUses(range, lifetimeEnding); + } + + void addInterestingUser(SILInstruction *user, SmallBitVector const &bits, + bool lifetimeEnding) { + getOrCreateInterestingUser(user).addUses(bits, lifetimeEnding); } }; @@ -1036,6 +1067,11 @@ class FieldSensitivePrunedLiveRange : public FieldSensitivePrunedLiveness { void updateForUse(SILInstruction *user, TypeTreeLeafTypeRange span, bool lifetimeEnding); + /// Customize updateForUse for FieldSensitivePrunedLiveness such that we check + /// that we consider defs as stopping liveness from being propagated up. + void updateForUse(SILInstruction *user, SmallBitVector const &bits, + bool lifetimeEnding); + /// Compute the boundary from the blocks discovered during liveness analysis. /// /// Precondition: \p liveness.getDiscoveredBlocks() is a valid list of all @@ -1107,6 +1143,14 @@ class FieldSensitiveSSAPrunedLiveRange return inst == defInst.first && defInst.second->contains(bit); } + bool isDef(SILInstruction *inst, SmallBitVector const &bits) const { + if (inst != defInst.first) + return false; + SmallBitVector defBits(bits.size()); + defInst.second->setBits(defBits); + return (defBits & bits) == bits; + } + bool isDef(SILInstruction *inst, TypeTreeLeafTypeRange span) const { return inst == defInst.first && defInst.second->setIntersection(span).has_value(); @@ -1217,6 +1261,30 @@ class FieldSensitiveMultiDefPrunedLiveRange *iter, [&](TypeTreeLeafTypeRange span) { return span.contains(bit); }); } + bool isDef(SILValue value, SmallBitVector const &bits) const { + assert(isInitialized()); + auto iter = defs.find(cast(value)); + if (!iter) + return false; + SmallBitVector allBits(bits.size()); + for (auto range : *iter) { + range.setBits(allBits); + } + return (bits & allBits) == bits; + } + + bool isDef(SILInstruction *inst, SmallBitVector const &bits) const { + assert(isInitialized()); + auto iter = defs.find(cast(inst)); + if (!iter) + return false; + SmallBitVector allBits(bits.size()); + for (auto range : *iter) { + range.setBits(allBits); + } + return (bits & allBits) == bits; + } + bool isDef(SILInstruction *inst, TypeTreeLeafTypeRange span) const { assert(isInitialized()); auto iter = defs.find(cast(inst)); diff --git a/lib/SIL/Utils/FieldSensitivePrunedLiveness.cpp b/lib/SIL/Utils/FieldSensitivePrunedLiveness.cpp index e80dbac5946d0..e75dcb22867fa 100644 --- a/lib/SIL/Utils/FieldSensitivePrunedLiveness.cpp +++ b/lib/SIL/Utils/FieldSensitivePrunedLiveness.cpp @@ -609,6 +609,16 @@ void FieldSensitivePrunedLiveness::updateForUse(SILInstruction *user, addInterestingUser(user, range, lifetimeEnding); } +void FieldSensitivePrunedLiveness::updateForUse(SILInstruction *user, + SmallBitVector const &bits, + bool lifetimeEnding) { + for (auto bit : bits.set_bits()) { + liveBlocks.updateForUse(user, bit); + } + + addInterestingUser(user, bits, lifetimeEnding); +} + //===----------------------------------------------------------------------===// // MARK: FieldSensitivePrunedLiveRange //===----------------------------------------------------------------------===// @@ -822,6 +832,42 @@ void FieldSensitivePrunedLiveRange::updateForUse( FieldSensitivePrunedLiveness::updateForUse(user, range, lifetimeEnding); } +template +void FieldSensitivePrunedLiveRange::updateForUse( + SILInstruction *user, SmallBitVector const &bits, bool lifetimeEnding) { + PRUNED_LIVENESS_LOG( + llvm::dbgs() + << "Begin FieldSensitivePrunedLiveRange::updateForUse " + "for: " + << *user); + PRUNED_LIVENESS_LOG(llvm::dbgs() + << "Looking for def instruction earlier in the block!\n"); + + auto *parentBlock = user->getParent(); + for (auto ii = std::next(user->getReverseIterator()), + ie = parentBlock->rend(); + ii != ie; ++ii) { + // If we find the def, just mark this instruction as being an interesting + // instruction. + if (asImpl().isDef(&*ii, bits)) { + PRUNED_LIVENESS_LOG(llvm::dbgs() << " Found def: " << *ii); + PRUNED_LIVENESS_LOG( + llvm::dbgs() + << " Marking inst as interesting user and returning!\n"); + addInterestingUser(user, bits, lifetimeEnding); + return; + } + } + + // Otherwise, just delegate to our parent class's update for use. This will + // update liveness for our predecessor blocks and add this instruction as an + // interesting user. + PRUNED_LIVENESS_LOG(llvm::dbgs() + << "No defs found! Delegating to " + "FieldSensitivePrunedLiveness::updateForUse.\n"); + FieldSensitivePrunedLiveness::updateForUse(user, bits, lifetimeEnding); +} + //===----------------------------------------------------------------------===// // MARK: Boundary Computation Utilities //===----------------------------------------------------------------------===// diff --git a/lib/SILOptimizer/Mandatory/MoveOnlyAddressCheckerUtils.cpp b/lib/SILOptimizer/Mandatory/MoveOnlyAddressCheckerUtils.cpp index 1e52e99d8ae69..a7d7ed6bf798e 100644 --- a/lib/SILOptimizer/Mandatory/MoveOnlyAddressCheckerUtils.cpp +++ b/lib/SILOptimizer/Mandatory/MoveOnlyAddressCheckerUtils.cpp @@ -532,92 +532,6 @@ static bool isCopyableValue(SILValue value) { return true; } -//===----------------------------------------------------------------------===// -// MARK: Partial Apply Utilities -//===----------------------------------------------------------------------===// - -static bool findNonEscapingPartialApplyUses( - PartialApplyInst *pai, TypeTreeLeafTypeRange leafRange, - llvm::SmallMapVector - &livenessUses) { - StackList worklist(pai->getFunction()); - for (auto *use : pai->getUses()) - worklist.push_back(use); - - LLVM_DEBUG(llvm::dbgs() << "Searching for partial apply uses!\n"); - while (!worklist.empty()) { - auto *use = worklist.pop_back_val(); - - if (use->isTypeDependent()) - continue; - - auto *user = use->getUser(); - - // These instructions do not cause us to escape. - if (isIncidentalUse(user) || isa(user)) - continue; - - // Look through these instructions. - if (isa(user) || isa(user) || - isa(user) || - // If we capture this partial_apply in another partial_apply, then we - // know that said partial_apply must not have escaped the value since - // otherwise we could not have an inout_aliasable argument or be - // on_stack. Process it recursively so that we treat uses of that - // partial_apply and applies of that partial_apply as uses of our - // partial_apply. - // - // We have this separately from the other look through sections so that - // we can make it clearer what we are doing here. - isa(user)) { - for (auto *use : cast(user)->getUses()) - worklist.push_back(use); - continue; - } - - // If we have a mark_dependence and are the value, look through the - // mark_dependence. - if (auto *mdi = dyn_cast(user)) { - if (mdi->getValue() == use->get()) { - for (auto *use : mdi->getUses()) - worklist.push_back(use); - continue; - } - } - - if (auto apply = FullApplySite::isa(user)) { - // If we apply the function or pass the function off to an apply, then we - // need to treat the function application as a liveness use of the - // variable since if the partial_apply is invoked within the function - // application, we may access the captured variable. - livenessUses.insert({user, leafRange}); - if (apply.beginsCoroutineEvaluation()) { - // If we have a coroutine, we need to treat the abort_apply and - // end_apply as liveness uses since once we execute one of those - // instructions, we have returned control to the coroutine which means - // that we could then access the captured variable again. - auto *bai = cast(user); - SmallVector endApplies; - SmallVector abortApplies; - bai->getCoroutineEndPoints(endApplies, abortApplies); - for (auto *eai : endApplies) - livenessUses.insert({eai, leafRange}); - for (auto *aai : abortApplies) - livenessUses.insert({aai, leafRange}); - } - continue; - } - - LLVM_DEBUG( - llvm::dbgs() - << "Found instruction we did not understand... returning false!\n"); - LLVM_DEBUG(llvm::dbgs() << "Instruction: " << *user); - return false; - } - - return true; -} - //===----------------------------------------------------------------------===// // MARK: Find Candidate Mark Must Checks //===----------------------------------------------------------------------===// @@ -648,8 +562,7 @@ namespace { struct UseState { MarkMustCheckInst *address; - /// The number of fields in the exploded type. Set in initializeLiveness. - unsigned fieldCount = UINT_MAX; + Optional cachedNumSubelements; /// The blocks that consume fields of the value. /// @@ -662,7 +575,7 @@ struct UseState { /// A map from a liveness requiring use to the part of the type that it /// requires liveness for. - llvm::SmallMapVector livenessUses; + llvm::SmallMapVector livenessUses; /// A map from a load [copy] or load [take] that we determined must be /// converted to a load_borrow to the part of the type tree that it needs to @@ -731,6 +644,34 @@ struct UseState { SILFunction *getFunction() const { return address->getFunction(); } + /// The number of fields in the exploded type. + unsigned getNumSubelements() { + if (!cachedNumSubelements) { + cachedNumSubelements = TypeSubElementCount(address); + } + return *cachedNumSubelements; + } + + SmallBitVector &getOrCreateLivenessUse(SILInstruction *inst) { + auto iter = livenessUses.find(inst); + if (iter == livenessUses.end()) { + iter = livenessUses.insert({inst, SmallBitVector(getNumSubelements())}) + .first; + } + return iter->second; + } + + void recordLivenessUse(SILInstruction *inst, SmallBitVector const &bits) { + getOrCreateLivenessUse(inst) |= bits; + } + + void recordLivenessUse(SILInstruction *inst, TypeTreeLeafTypeRange range) { + auto &bits = getOrCreateLivenessUse(inst); + for (auto element : range.getRange()) { + bits.set(element); + } + } + /// Returns true if this is a terminator instruction that although it doesn't /// use our inout argument directly is used by the pass to ensure that we /// reinit said argument if we consumed it in the body of the function. @@ -765,6 +706,7 @@ struct UseState { void clear() { address = nullptr; + cachedNumSubelements = llvm::None; consumingBlocks.clear(); destroys.clear(); livenessUses.clear(); @@ -825,7 +767,9 @@ struct UseState { void recordConsumingBlock(SILBasicBlock *block, TypeTreeLeafTypeRange range) { auto iter = consumingBlocks.find(block); if (iter == consumingBlocks.end()) { - iter = consumingBlocks.insert({block, SmallBitVector(fieldCount)}).first; + iter = + consumingBlocks.insert({block, SmallBitVector(getNumSubelements())}) + .first; } range.setBits(iter->second); } @@ -879,7 +823,7 @@ struct UseState { { auto iter = livenessUses.find(inst); if (iter != livenessUses.end()) { - if (span.setIntersection(iter->second)) + if (span.intersects(iter->second)) return true; } } @@ -929,10 +873,94 @@ struct UseState { } // namespace +//===----------------------------------------------------------------------===// +// MARK: Partial Apply Utilities +//===----------------------------------------------------------------------===// + +static bool findNonEscapingPartialApplyUses(PartialApplyInst *pai, + TypeTreeLeafTypeRange leafRange, + UseState &useState) { + StackList worklist(pai->getFunction()); + for (auto *use : pai->getUses()) + worklist.push_back(use); + + LLVM_DEBUG(llvm::dbgs() << "Searching for partial apply uses!\n"); + while (!worklist.empty()) { + auto *use = worklist.pop_back_val(); + + if (use->isTypeDependent()) + continue; + + auto *user = use->getUser(); + + // These instructions do not cause us to escape. + if (isIncidentalUse(user) || isa(user)) + continue; + + // Look through these instructions. + if (isa(user) || isa(user) || + isa(user) || + // If we capture this partial_apply in another partial_apply, then we + // know that said partial_apply must not have escaped the value since + // otherwise we could not have an inout_aliasable argument or be + // on_stack. Process it recursively so that we treat uses of that + // partial_apply and applies of that partial_apply as uses of our + // partial_apply. + // + // We have this separately from the other look through sections so that + // we can make it clearer what we are doing here. + isa(user)) { + for (auto *use : cast(user)->getUses()) + worklist.push_back(use); + continue; + } + + // If we have a mark_dependence and are the value, look through the + // mark_dependence. + if (auto *mdi = dyn_cast(user)) { + if (mdi->getValue() == use->get()) { + for (auto *use : mdi->getUses()) + worklist.push_back(use); + continue; + } + } + + if (auto apply = FullApplySite::isa(user)) { + // If we apply the function or pass the function off to an apply, then we + // need to treat the function application as a liveness use of the + // variable since if the partial_apply is invoked within the function + // application, we may access the captured variable. + useState.recordLivenessUse(user, leafRange); + if (apply.beginsCoroutineEvaluation()) { + // If we have a coroutine, we need to treat the abort_apply and + // end_apply as liveness uses since once we execute one of those + // instructions, we have returned control to the coroutine which means + // that we could then access the captured variable again. + auto *bai = cast(user); + SmallVector endApplies; + SmallVector abortApplies; + bai->getCoroutineEndPoints(endApplies, abortApplies); + for (auto *eai : endApplies) + useState.recordLivenessUse(eai, leafRange); + for (auto *aai : abortApplies) + useState.recordLivenessUse(aai, leafRange); + } + continue; + } + + LLVM_DEBUG( + llvm::dbgs() + << "Found instruction we did not understand... returning false!\n"); + LLVM_DEBUG(llvm::dbgs() << "Instruction: " << *user); + return false; + } + + return true; +} + void UseState::initializeLiveness( FieldSensitiveMultiDefPrunedLiveRange &liveness) { - fieldCount = liveness.getNumSubElements(); - + assert(liveness.getNumSubElements() == getNumSubelements()); // We begin by initializing all of our init uses. for (auto initInstAndValue : initInsts) { LLVM_DEBUG(llvm::dbgs() << "Found def: " << *initInstAndValue.first); @@ -1764,7 +1792,7 @@ bool GatherUsesVisitor::visitUse(Operand *op) { LLVM_DEBUG(llvm::dbgs() << "Found copy of copyable type. Treating as liveness use! " << *user); - useState.livenessUses.insert({user, *leafRange}); + useState.recordLivenessUse(user, *leafRange); return true; } @@ -1816,7 +1844,7 @@ bool GatherUsesVisitor::visitUse(Operand *op) { auto leafRange = TypeTreeLeafTypeRange::get(op->get(), getRootAddress()); if (!leafRange) return false; - useState.livenessUses.insert({user, *leafRange}); + useState.recordLivenessUse(user, *leafRange); return true; } @@ -1924,7 +1952,7 @@ bool GatherUsesVisitor::visitUse(Operand *op) { "since they will become end_borrows.\n"); for (auto *consumeUse : li->getConsumingUses()) { auto *dvi = cast(consumeUse->getUser()); - useState.livenessUses.insert({dvi, *leafRange}); + useState.recordLivenessUse(dvi, *leafRange); } return true; @@ -1963,7 +1991,7 @@ bool GatherUsesVisitor::visitUse(Operand *op) { "since they will become end_borrows.\n"); for (auto *consumeUse : li->getConsumingUses()) { auto *dvi = cast(consumeUse->getUser()); - useState.livenessUses.insert({dvi, *leafRange}); + useState.recordLivenessUse(dvi, *leafRange); } } else { // If we had a load [copy], store this into the copy list. These are the @@ -2030,7 +2058,7 @@ bool GatherUsesVisitor::visitUse(Operand *op) { if (!leafRange) return false; - useState.livenessUses.insert({user, *leafRange}); + useState.recordLivenessUse(user, *leafRange); return true; } @@ -2055,7 +2083,7 @@ bool GatherUsesVisitor::visitUse(Operand *op) { if (!leafRange) return false; - useState.livenessUses.insert({user, *leafRange}); + useState.recordLivenessUse(user, *leafRange); return true; } } @@ -2089,8 +2117,7 @@ bool GatherUsesVisitor::visitUse(Operand *op) { // where the partial apply is passed to a function. We treat those as // liveness uses. If we find a use we don't understand, we return false // here. - if (!findNonEscapingPartialApplyUses(pas, *leafRange, - useState.livenessUses)) { + if (!findNonEscapingPartialApplyUses(pas, *leafRange, useState)) { LLVM_DEBUG( llvm::dbgs() << "Failed to understand use of a non-escaping partial apply?!\n"); @@ -2113,7 +2140,7 @@ bool GatherUsesVisitor::visitUse(Operand *op) { return false; } - useState.livenessUses.insert({user, *leafRange}); + useState.recordLivenessUse(user, *leafRange); return true; } @@ -2132,7 +2159,7 @@ bool GatherUsesVisitor::visitUse(Operand *op) { llvm_unreachable("standard failure"); } #endif - useState.livenessUses.insert({user, *leafRange}); + useState.recordLivenessUse(user, *leafRange); return true; } diff --git a/test/SILOptimizer/moveonly_addresschecker.sil b/test/SILOptimizer/moveonly_addresschecker.sil index e03a8a420790f..10f730140af23 100644 --- a/test/SILOptimizer/moveonly_addresschecker.sil +++ b/test/SILOptimizer/moveonly_addresschecker.sil @@ -632,63 +632,3 @@ bb0: %22 = tuple () return %22 : $() } - -@_moveOnly -struct M4 { - let s1: M - let s2: M - let s3: M - let s4: M -} - -sil @get_M4 : $@convention(thin) () -> @owned M4 -sil @end_2 : $@convention(thin) (@owned M, @owned M) -> () -sil @see_addr_2 : $@convention(thin) (@in_guaranteed M, @in_guaranteed M) -> () - - -/// Two non-contiguous fields (#M4.s2, #M4.s4) are borrowed by @see_addr_2. -/// Two non-contiguous fields (#M4.s1, #M$.s3) are consumed by @end_2. -/// -/// Verify that #M4.s2 and #M4.s4 both survive past the apply of @see_addr_2. -// CHECK-LABEL: sil [ossa] @rdar110676577 : {{.*}} { -// CHECK: [[STACK:%[^,]+]] = alloc_stack $M4 -// CHECK: [[GET_M4:%[^,]+]] = function_ref @get_M4 -// CHECK: [[M4:%[^,]+]] = apply [[GET_M4]]() : $@convention(thin) () -> @owned M4 -// CHECK: store [[M4]] to [init] [[STACK]] : $*M4 -// CHECK: [[M4_S2_ADDR:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s2 -// CHECK: [[M4_S4_ADDR:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s4 -// CHECK: [[SEE_ADDR_2:%[^,]+]] = function_ref @see_addr_2 -// CHECK: apply [[SEE_ADDR_2]]([[M4_S2_ADDR]], [[M4_S4_ADDR]]) -// CHECK: [[M4_S1_ADDR:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s1 -// CHECK: [[M4_S1:%[^,]+]] = load [take] [[M4_S1_ADDR]] : $*M -// CHECK: [[M4_S3_ADDR:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s3 -// CHECK: [[M4_S3:%[^,]+]] = load [take] [[M4_S3_ADDR]] : $*M -// CHECK: [[END_2:%[^,]+]] = function_ref @end_2 -// CHECK: apply [[END_2]]([[M4_S1]], [[M4_S3]]) -// CHECK: [[M4_S4_ADDR_2:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s4 -// CHECK: destroy_addr [[M4_S4_ADDR_2]] -// CHECK: [[M4_S2_ADDR_2:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s2 -// CHECK: destroy_addr [[M4_S2_ADDR_2]] -// CHECK-LABEL: } // end sil function 'rdar110676577' -sil [ossa] @rdar110676577 : $@convention(thin) () -> () { -bb0: - %0 = alloc_stack $M4 - %1 = mark_must_check [consumable_and_assignable] %0 : $*M4 - %3 = function_ref @get_M4 : $@convention(thin) () -> @owned M4 - %4 = apply %3() : $@convention(thin) () -> @owned M4 - store %4 to [init] %1 : $*M4 - %6 = struct_element_addr %1 : $*M4, #M4.s2 - %6a = struct_element_addr %1 : $*M4, #M4.s4 - %see_addr_2 = function_ref @see_addr_2 : $@convention(thin) (@in_guaranteed M, @in_guaranteed M) -> () - apply %see_addr_2(%6, %6a) : $@convention(thin) (@in_guaranteed M, @in_guaranteed M) -> () - %12 = struct_element_addr %1 : $*M4, #M4.s1 - %13 = load [copy] %12 : $*M - %14 = struct_element_addr %1 : $*M4, #M4.s3 - %15 = load [copy] %14 : $*M - %16 = function_ref @end_2 : $@convention(thin) (@owned M, @owned M) -> () - %17 = apply %16(%13, %15) : $@convention(thin) (@owned M, @owned M) -> () - destroy_addr %1 : $*M4 - dealloc_stack %0 : $*M4 - %22 = tuple () - return %22 : $() -} diff --git a/test/SILOptimizer/moveonly_addresschecker_unmaximized.sil b/test/SILOptimizer/moveonly_addresschecker_unmaximized.sil new file mode 100644 index 0000000000000..530828ac39ce0 --- /dev/null +++ b/test/SILOptimizer/moveonly_addresschecker_unmaximized.sil @@ -0,0 +1,67 @@ +// RUN: %target-sil-opt -module-name moveonly_addresschecker -sil-move-only-address-checker -enable-experimental-feature MoveOnlyClasses -enable-sil-verify-all %s -move-only-address-checker-disable-lifetime-extension=true | %FileCheck %s + +@_moveOnly +struct M { + deinit {} +} + +@_moveOnly +struct M4 { + let s1: M + let s2: M + let s3: M + let s4: M +} + +sil @get_M4 : $@convention(thin) () -> @owned M4 +sil @end_2 : $@convention(thin) (@owned M, @owned M) -> () +sil @see_addr_2 : $@convention(thin) (@in_guaranteed M, @in_guaranteed M) -> () + + +/// Two non-contiguous fields (#M4.s2, #M4.s4) are borrowed by @see_addr_2. +/// Two non-contiguous fields (#M4.s1, #M$.s3) are consumed by @end_2. +/// +/// Verify that #M4.s2 and #M4.s4 both survive past the apply of @see_addr_2. +// CHECK-LABEL: sil [ossa] @rdar110676577 : {{.*}} { +// CHECK: [[STACK:%[^,]+]] = alloc_stack $M4 +// CHECK: [[GET_M4:%[^,]+]] = function_ref @get_M4 +// CHECK: [[M4:%[^,]+]] = apply [[GET_M4]]() : $@convention(thin) () -> @owned M4 +// CHECK: store [[M4]] to [init] [[STACK]] : $*M4 +// CHECK: [[M4_S2_ADDR:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s2 +// CHECK: [[M4_S4_ADDR:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s4 +// CHECK: [[SEE_ADDR_2:%[^,]+]] = function_ref @see_addr_2 +// CHECK: apply [[SEE_ADDR_2]]([[M4_S2_ADDR]], [[M4_S4_ADDR]]) +// CHECK: [[M4_S4_ADDR_2:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s4 +// CHECK: destroy_addr [[M4_S4_ADDR_2]] +// CHECK: [[M4_S2_ADDR_2:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s2 +// CHECK: destroy_addr [[M4_S2_ADDR_2]] +// CHECK: [[M4_S1_ADDR:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s1 +// CHECK: [[M4_S1:%[^,]+]] = load [take] [[M4_S1_ADDR]] : $*M +// CHECK: [[M4_S3_ADDR:%[^,]+]] = struct_element_addr [[STACK]] : $*M4, #M4.s3 +// CHECK: [[M4_S3:%[^,]+]] = load [take] [[M4_S3_ADDR]] : $*M +// CHECK: [[END_2:%[^,]+]] = function_ref @end_2 +// CHECK: apply [[END_2]]([[M4_S1]], [[M4_S3]]) +// CHECK-LABEL: } // end sil function 'rdar110676577' +sil [ossa] @rdar110676577 : $@convention(thin) () -> () { +bb0: + %0 = alloc_stack $M4 + %1 = mark_must_check [consumable_and_assignable] %0 : $*M4 + %3 = function_ref @get_M4 : $@convention(thin) () -> @owned M4 + %4 = apply %3() : $@convention(thin) () -> @owned M4 + store %4 to [init] %1 : $*M4 + %6 = struct_element_addr %1 : $*M4, #M4.s2 + %6a = struct_element_addr %1 : $*M4, #M4.s4 + %see_addr_2 = function_ref @see_addr_2 : $@convention(thin) (@in_guaranteed M, @in_guaranteed M) -> () + apply %see_addr_2(%6, %6a) : $@convention(thin) (@in_guaranteed M, @in_guaranteed M) -> () + %12 = struct_element_addr %1 : $*M4, #M4.s1 + %13 = load [copy] %12 : $*M + %14 = struct_element_addr %1 : $*M4, #M4.s3 + %15 = load [copy] %14 : $*M + %16 = function_ref @end_2 : $@convention(thin) (@owned M, @owned M) -> () + %17 = apply %16(%13, %15) : $@convention(thin) (@owned M, @owned M) -> () + destroy_addr %1 : $*M4 + dealloc_stack %0 : $*M4 + %22 = tuple () + return %22 : $() +} +