From e7ec8c35af3c9bdce80cbc0bac43ce02275cf122 Mon Sep 17 00:00:00 2001 From: Joe Groff Date: Fri, 30 Oct 2020 16:53:44 -0700 Subject: [PATCH 01/75] SILGen: Caller-side codegen for invoking foreign async functions Immediately before invoking the ObjC API, get the current continuation, capture it into a block to pass as the completion handler, and then await the continuation, whose resume/error successors serve as the semantic return/throw result of the call. This should complete the caller-side part of SILGen; the completion handler block implementation is however still only a stub. --- docs/ABI/Mangling.rst | 2 +- include/swift/AST/ASTMangler.h | 3 + lib/AST/ASTMangler.cpp | 10 +++ lib/Demangling/Demangler.cpp | 6 +- lib/Demangling/NodePrinter.cpp | 2 + lib/SILGen/ResultPlan.cpp | 107 ++++++++++++++++++++++++++++++--- lib/SILGen/ResultPlan.h | 5 ++ lib/SILGen/SILGen.h | 7 +++ lib/SILGen/SILGenApply.cpp | 39 ++++++------ lib/SILGen/SILGenThunk.cpp | 54 +++++++++++++++++ test/SILGen/objc_async.swift | 34 ++++++++++- 11 files changed, 238 insertions(+), 31 deletions(-) diff --git a/docs/ABI/Mangling.rst b/docs/ABI/Mangling.rst index ee42a97b3a2a6..f55da15251570 100644 --- a/docs/ABI/Mangling.rst +++ b/docs/ABI/Mangling.rst @@ -219,7 +219,7 @@ types where the metadata itself has unknown layout.) global ::= entity // some identifiable thing global ::= from-type to-type generic-signature? 'TR' // reabstraction thunk global ::= from-type to-type generic-signature? 'TR' // reabstraction thunk - global ::= impl-function-type 'Tz' // objc-to-swift-async completion handler block implementation + global ::= impl-function-type type 'Tz' // objc-to-swift-async completion handler block implementation global ::= from-type to-type self-type generic-signature? 'Ty' // reabstraction thunk with dynamic 'Self' capture global ::= from-type to-type generic-signature? 'Tr' // obsolete mangling for reabstraction thunk global ::= entity generic-signature? type type* 'TK' // key path getter diff --git a/include/swift/AST/ASTMangler.h b/include/swift/AST/ASTMangler.h index af7294875332e..d4a628374c3e0 100644 --- a/include/swift/AST/ASTMangler.h +++ b/include/swift/AST/ASTMangler.h @@ -155,6 +155,9 @@ class ASTMangler : public Mangler { Type SelfType, ModuleDecl *Module); + std::string mangleObjCAsyncCompletionHandlerImpl(CanSILFunctionType BlockType, + CanType ResultType); + /// Mangle the derivative function (JVP/VJP) for the given: /// - Mangled original function name. /// - Derivative function kind. diff --git a/lib/AST/ASTMangler.cpp b/lib/AST/ASTMangler.cpp index 490385afbe1a1..9092c5681d6be 100644 --- a/lib/AST/ASTMangler.cpp +++ b/lib/AST/ASTMangler.cpp @@ -394,6 +394,16 @@ std::string ASTMangler::mangleReabstractionThunkHelper( return finalize(); } +std::string ASTMangler::mangleObjCAsyncCompletionHandlerImpl( + CanSILFunctionType BlockType, + CanType ResultType) { + beginMangling(); + appendType(BlockType); + appendType(ResultType); + appendOperator("Tz"); + return finalize(); +} + std::string ASTMangler::mangleAutoDiffDerivativeFunctionHelper( StringRef name, AutoDiffDerivativeFunctionKind kind, AutoDiffConfig config) { diff --git a/lib/Demangling/Demangler.cpp b/lib/Demangling/Demangler.cpp index 7165580f5909a..9ff7bfc6c51cb 100644 --- a/lib/Demangling/Demangler.cpp +++ b/lib/Demangling/Demangler.cpp @@ -2264,8 +2264,10 @@ NodePointer Demangler::popProtocolConformance() { return createWithChild(Node::Kind::CoroutineContinuationPrototype, type); } case 'z': { - NodePointer implType = popNode(Node::Kind::ImplFunctionType); - return createWithChild(Node::Kind::ObjCAsyncCompletionHandlerImpl, implType); + NodePointer resultType = popNode(Node::Kind::Type); + NodePointer implType = popNode(Node::Kind::Type); + return createWithChildren(Node::Kind::ObjCAsyncCompletionHandlerImpl, + implType, resultType); } case 'V': { NodePointer Base = popNode(isEntity); diff --git a/lib/Demangling/NodePrinter.cpp b/lib/Demangling/NodePrinter.cpp index f465677d82c89..1412de539b40a 100644 --- a/lib/Demangling/NodePrinter.cpp +++ b/lib/Demangling/NodePrinter.cpp @@ -2536,6 +2536,8 @@ NodePointer NodePrinter::print(NodePointer Node, bool asPrefixContext) { case Node::Kind::ObjCAsyncCompletionHandlerImpl: Printer << "@objc completion handler block implementation for "; print(Node->getChild(0)); + Printer << " with result type "; + print(Node->getChild(1)); return nullptr; } printer_unreachable("bad node kind!"); diff --git a/lib/SILGen/ResultPlan.cpp b/lib/SILGen/ResultPlan.cpp index 0f3309dbea041..308000367e368 100644 --- a/lib/SILGen/ResultPlan.cpp +++ b/lib/SILGen/ResultPlan.cpp @@ -453,15 +453,70 @@ class TupleInitializationResultPlan final : public ResultPlan { class ForeignAsyncInitializationPlan final : public ResultPlan { SILLocation loc; + CalleeTypeInfo calleeTypeInfo; + SILType opaqueResumeType; + SILValue resumeBuf; + SILValue continuation; + public: - ForeignAsyncInitializationPlan(SILLocation loc) : loc(loc) {} + ForeignAsyncInitializationPlan(SILGenFunction &SGF, SILLocation loc, + const CalleeTypeInfo &calleeTypeInfo) + : loc(loc), calleeTypeInfo(calleeTypeInfo) + { + // Allocate space to receive the resume value when the continuation is + // resumed. + opaqueResumeType = SGF.getLoweredType(AbstractionPattern::getOpaque(), + calleeTypeInfo.substResultType); + resumeBuf = SGF.emitTemporaryAllocation(loc, opaqueResumeType); + } void gatherIndirectResultAddrs(SILGenFunction &SGF, SILLocation loc, SmallVectorImpl &outList) const override { - // TODO: Move values from the continuation result buffer to the individual - // out argument buffers, unless we were able to emit the resume buffer - // in-place. + // A foreign async function shouldn't have any indirect results. + } + + ManagedValue + emitForeignAsyncCompletionHandler(SILGenFunction &SGF, SILLocation loc) + override { + // Get the current continuation for the task. + auto continuationDecl = calleeTypeInfo.foreign.async->completionHandlerErrorParamIndex() + ? SGF.getASTContext().getUnsafeThrowingContinuationDecl() + : SGF.getASTContext().getUnsafeContinuationDecl(); + + auto continuationTy = BoundGenericType::get(continuationDecl, Type(), + calleeTypeInfo.substResultType) + ->getCanonicalType(); + + + continuation = SGF.B.createGetAsyncContinuationAddr(loc, resumeBuf, + SILType::getPrimitiveObjectType(continuationTy)); + + // Stash it in a buffer for a block object. + auto blockStorageTy = SILType::getPrimitiveAddressType(SILBlockStorageType::get(continuationTy)); + auto blockStorage = SGF.emitTemporaryAllocation(loc, blockStorageTy); + auto continuationAddr = SGF.B.createProjectBlockStorage(loc, blockStorage); + SGF.B.createStore(loc, continuation, continuationAddr, + StoreOwnershipQualifier::Trivial); + + // Get the block invocation function for the given completion block type. + auto completionHandlerIndex = calleeTypeInfo.foreign.async + ->completionHandlerParamIndex(); + auto implTy = cast(calleeTypeInfo.substFnType + ->getParameters()[completionHandlerIndex] + .getInterfaceType()); + SILFunction *impl = SGF.SGM + .getOrCreateForeignAsyncCompletionHandlerImplFunction(implTy, + continuationTy); + auto implRef = SGF.B.createFunctionRef(loc, impl); + + // Initialize the block object for the completion handler. + auto block = SGF.B.createInitBlockStorageHeader(loc, blockStorage, implRef, + SILType::getPrimitiveObjectType(implTy), {}); + // We don't need to manage the block because it's still on the stack. We + // know we won't escape it locally so the callee can be responsible for + // _Block_copy-ing it. + return ManagedValue::forUnmanaged(block); } RValue finish(SILGenFunction &SGF, SILLocation loc, CanType substType, @@ -469,9 +524,44 @@ class ForeignAsyncInitializationPlan final : public ResultPlan { // There should be no direct results from the call. assert(directResults.empty()); - // TODO: Get the actual result values from the awaited continuation. - // For now, produce an undef RValue. - return SGF.emitUndefRValue(loc, substType); + // Await the continuation we handed off to the completion handler. + SILBasicBlock *resumeBlock = SGF.createBasicBlock(); + SILBasicBlock *errorBlock = nullptr; + auto errorParamIndex = calleeTypeInfo.foreign.async->completionHandlerErrorParamIndex(); + if (errorParamIndex) { + errorBlock = SGF.createBasicBlock(FunctionSection::Postmatter); + } + + SGF.B.createAwaitAsyncContinuation(loc, continuation, resumeBlock, errorBlock); + + // Propagate an error if we have one. + if (errorBlock) { + SGF.B.emitBlock(errorBlock); + + Scope errorScope(SGF, loc); + + auto errorTy = SGF.getASTContext().getErrorDecl()->getDeclaredType() + ->getCanonicalType(); + auto errorVal + = SGF.B.createOwnedPhiArgument(SILType::getPrimitiveObjectType(errorTy)); + + SGF.emitThrow(loc, errorVal, true); + } + + SGF.B.emitBlock(resumeBlock); + + // The incoming value is the maximally-abstracted result type of the + // continuation. Move it out of the resume buffer and reabstract it if + // necessary. + auto resumeResult = SGF.emitLoad(loc, resumeBuf, + calleeTypeInfo.origResultType + ? *calleeTypeInfo.origResultType + : AbstractionPattern(calleeTypeInfo.substResultType), + calleeTypeInfo.substResultType, + SGF.getTypeLowering(calleeTypeInfo.substResultType), + SGFContext(), IsTake); + + return RValue(SGF, loc, calleeTypeInfo.substResultType, resumeResult); } }; @@ -572,8 +662,7 @@ ResultPlanPtr ResultPlanBuilder::buildTopLevelResult(Initialization *init, // Create a result plan that gets the result schema from the completion // handler callback's arguments. // completion handler. - return ResultPlanPtr(new ForeignAsyncInitializationPlan(loc)); - + return ResultPlanPtr(new ForeignAsyncInitializationPlan(SGF, loc, calleeTypeInfo)); } else if (auto foreignError = calleeTypeInfo.foreign.error) { // Handle the foreign error first. // diff --git a/lib/SILGen/ResultPlan.h b/lib/SILGen/ResultPlan.h index 93d0db808be4c..39a62477dbfe7 100644 --- a/lib/SILGen/ResultPlan.h +++ b/lib/SILGen/ResultPlan.h @@ -49,6 +49,11 @@ class ResultPlan { emitForeignErrorArgument(SILGenFunction &SGF, SILLocation loc) { return None; } + + virtual ManagedValue + emitForeignAsyncCompletionHandler(SILGenFunction &SGF, SILLocation loc) { + return {}; + } }; using ResultPlanPtr = std::unique_ptr; diff --git a/lib/SILGen/SILGen.h b/lib/SILGen/SILGen.h index 37a0e7fdb11b8..b29cdfb174db2 100644 --- a/lib/SILGen/SILGen.h +++ b/lib/SILGen/SILGen.h @@ -163,6 +163,13 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { CanSILFunctionType fromType, CanSILFunctionType toType, CanType dynamicSelfType); + + /// Get or create the declaration of a completion handler block + /// implementation function for an ObjC API that was imported + /// as `async` in Swift. + SILFunction *getOrCreateForeignAsyncCompletionHandlerImplFunction( + CanSILFunctionType blockType, + CanType continuationTy); /// Determine whether the given class has any instance variables that /// need to be destroyed. diff --git a/lib/SILGen/SILGenApply.cpp b/lib/SILGen/SILGenApply.cpp index 7682802ed7e67..5e19008366c13 100644 --- a/lib/SILGen/SILGenApply.cpp +++ b/lib/SILGen/SILGenApply.cpp @@ -3276,14 +3276,15 @@ class ArgEmitter { if (Foreign.async && Foreign.async->completionHandlerParamIndex() == Args.size()) { SILParameterInfo param = claimNextParameters(1).front(); - - // TODO: Get or create the completion handler block implementation - // function for the given argument type, then create a block containing - // the current continuation. (This probably needs to be deferred to right - // before the actual call, since evaluating other arguments to the call - // may suspend the task) - auto argTy = SILType::getPrimitiveObjectType(param.getInterfaceType()); - Args.push_back(ManagedValue::forUnmanaged(SILUndef::get(argTy, SGF.F))); + (void)param; + + // Leave a placeholder in the position. We'll fill this in with a block + // capturing the current continuation right before we invoke the + // function. + // (We can't do this immediately, because evaluating other arguments + // may require suspending the async task, which is not allowed while its + // continuation is active.) + Args.push_back(ManagedValue::forInContext()); } else if (Foreign.error && Foreign.error->getErrorParameterIndex() == Args.size()) { SILParameterInfo param = claimNextParameters(1).front(); @@ -4327,14 +4328,21 @@ RValue SILGenFunction::emitApply(ResultPlanPtr &&resultPlan, // If there's a foreign error or async parameter, fill it in. ManagedValue errorTemp; - if (calleeTypeInfo.foreign.async) { - // TODO: prepare the callback continuation and block here. - - } else if (calleeTypeInfo.foreign.error) { + if (auto foreignAsync = calleeTypeInfo.foreign.async) { + unsigned completionIndex = foreignAsync->completionHandlerParamIndex(); + + // Ram the emitted error into the argument list, over the placeholder + // we left during the first pass. + auto &completionArgSlot = const_cast(args[completionIndex]); + + completionArgSlot = resultPlan->emitForeignAsyncCompletionHandler(*this, loc); + + } else if (auto foreignError = calleeTypeInfo.foreign.error) { unsigned errorParamIndex = - calleeTypeInfo.foreign.error->getErrorParameterIndex(); + foreignError->getErrorParameterIndex(); - // This is pretty evil. + // Ram the emitted error into the argument list, over the placeholder + // we left during the first pass. auto &errorArgSlot = const_cast(args[errorParamIndex]); std::tie(errorTemp, errorArgSlot) = @@ -4442,9 +4450,6 @@ RValue SILGenFunction::emitApply(ResultPlanPtr &&resultPlan, *foreignError); } - // TODO(async): If there's a foreign async convention, await the continuation - // to get the result from the completion callback. - auto directResultsArray = makeArrayRef(directResults); RValue result = resultPlan->finish(*this, loc, substResultType, directResultsArray); diff --git a/lib/SILGen/SILGenThunk.cpp b/lib/SILGen/SILGenThunk.cpp index 74d1efeceec69..d56edea9abe02 100644 --- a/lib/SILGen/SILGenThunk.cpp +++ b/lib/SILGen/SILGenThunk.cpp @@ -138,6 +138,60 @@ SILGenFunction::emitGlobalFunctionRef(SILLocation loc, SILDeclRef constant, return B.createFunctionRefFor(loc, f); } +SILFunction * +SILGenModule::getOrCreateForeignAsyncCompletionHandlerImplFunction( + CanSILFunctionType blockType, + CanType continuationTy) { + // Extract the result type from the continuation type. + auto resumeType = cast(continuationTy).getGenericArgs()[0]; + + // Build up the implementation function type, which matches the + // block signature with an added block storage argument that points at the + // block buffer. The block storage holds the continuation we feed the + // result values into. + SmallVector implArgs; + auto blockStorageTy = SILBlockStorageType::get(continuationTy); + implArgs.push_back(SILParameterInfo(blockStorageTy, + ParameterConvention::Indirect_InoutAliasable)); + + std::copy(blockType->getParameters().begin(), + blockType->getParameters().end(), + std::back_inserter(implArgs)); + + auto implTy = SILFunctionType::get(GenericSignature(), + blockType->getExtInfo() + .withRepresentation(SILFunctionTypeRepresentation::CFunctionPointer), + SILCoroutineKind::None, + ParameterConvention::Direct_Unowned, + implArgs, {}, blockType->getResults(), + None, + SubstitutionMap(), SubstitutionMap(), getASTContext()); + + auto loc = RegularLocation::getAutoGeneratedLocation(); + + Mangle::ASTMangler Mangler; + auto name = Mangler.mangleObjCAsyncCompletionHandlerImpl(blockType, + resumeType); + + SILGenFunctionBuilder builder(*this); + auto F = builder.getOrCreateSharedFunction(loc, name, implTy, + IsBare, IsTransparent, IsSerializable, + ProfileCounter(), + IsThunk, + IsNotDynamic); + + if (F->empty()) { + // TODO: Emit the implementation. + SILGenFunction SGF(*this, *F, SwiftModule); + SmallVector params; + SGF.collectThunkParams(loc, params); + + SGF.B.createUnreachable(loc); + } + + return F; +} + SILFunction *SILGenModule:: getOrCreateReabstractionThunk(CanSILFunctionType thunkType, CanSILFunctionType fromType, diff --git a/test/SILGen/objc_async.swift b/test/SILGen/objc_async.swift index 94f89ad9f6e3e..0d1e0c13c1797 100644 --- a/test/SILGen/objc_async.swift +++ b/test/SILGen/objc_async.swift @@ -6,9 +6,39 @@ import ObjCConcurrency // CHECK-LABEL: sil {{.*}}@${{.*}}14testSlowServer func testSlowServer(slowServer: SlowServer) async throws { - // CHECK: objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) (Int) -> (), SlowServer) -> () + // CHECK: [[RESUME_BUF:%.*]] = alloc_stack $Int + // CHECK: [[METHOD:%.*]] = objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) (Int) -> (), SlowServer) -> () + // CHECK: [[CONT:%.*]] = get_async_continuation_addr $Int, [[RESUME_BUF]] + // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeContinuation + // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] + // CHECK: store [[CONT]] to [[CONT_SLOT]] + // CHECK: [[BLOCK_IMPL:%.*]] = %20 = function_ref @{{.*}} : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation, Int) -> () + // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] + // CHECK: apply [[METHOD]]({{.*}}, [[BLOCK]], %0) + // CHECK: await_async_continuation [[CONT]] {{.*}}, resume [[RESUME:bb[0-9]+]] + // CHECK: [[RESUME]]: + // CHECK: [[RESULT:%.*]] = load [trivial] [[RESUME_BUF]] + // CHECK: dealloc_stack [[RESUME_BUF]] let _: Int = await slowServer.doSomethingSlow("mail") - // CHECK: objc_method {{.*}} $@convention(objc_method) (@convention(block) (Optional, Optional) -> (), SlowServer) -> () + + // CHECK: [[RESUME_BUF:%.*]] = alloc_stack $Optional + // CHECK: [[METHOD:%.*]] = objc_method {{.*}} $@convention(objc_method) (@convention(block) (Optional, Optional) -> (), SlowServer) -> () + // CHECK: [[CONT:%.*]] = get_async_continuation_addr $Optional, [[RESUME_BUF]] + // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeContinuation> + // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] + // CHECK: store [[CONT]] to [[CONT_SLOT]] + // CHECK: [[BLOCK_IMPL:%.*]] = %20 = function_ref @{{.*}} : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation>, Optional, Optional) -> () + // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] + // CHECK: apply [[METHOD]]({{.*}}, [[BLOCK]], %0) + // CHECK: await_async_continuation [[CONT]] {{.*}}, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] + // CHECK: [[RESUME]]: + // CHECK: [[RESULT:%.*]] = load [take] [[RESUME_BUF]] + // CHECK: destroy_value [[RESULT]] + // CHECK: dealloc_stack [[RESUME_BUF]] + // CHECK: [[ERROR]]([[ERROR_VALUE:%.*]] : @owned $Error): + // CHECK: dealloc_stack [[RESUME_BUF]] + // CHECK: throw [[ERROR_VALUE]] + let _: String? = try await slowServer.findAnswer() // CHECK: objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) () -> (), SlowServer) -> () From 4b33f26add277895e6f054a6d33c3d829539960d Mon Sep 17 00:00:00 2001 From: Joe Groff Date: Mon, 2 Nov 2020 14:25:51 -0800 Subject: [PATCH 02/75] Reserve a mangling for predefined completion handler impls in the runtime. To manage code size in user binaries, we want to be able to implement common completion handler signatures in the Swift runtime once. Using a different mangling for these lets us add new ones without clobbering symbols in existing binaries. --- docs/ABI/Mangling.rst | 1 + include/swift/AST/ASTMangler.h | 8 ++++++- include/swift/Demangling/DemangleNodes.def | 1 + lib/AST/ASTMangler.cpp | 5 +++-- lib/Demangling/Demangler.cpp | 7 ++++-- lib/Demangling/NodePrinter.cpp | 4 ++++ lib/Demangling/OldRemangler.cpp | 3 +++ lib/Demangling/Remangler.cpp | 5 +++++ lib/SILGen/SILGenThunk.cpp | 3 ++- test/SILGen/objc_async.swift | 25 +++++++++++----------- 10 files changed, 44 insertions(+), 18 deletions(-) diff --git a/docs/ABI/Mangling.rst b/docs/ABI/Mangling.rst index f55da15251570..fb64bbea807f1 100644 --- a/docs/ABI/Mangling.rst +++ b/docs/ABI/Mangling.rst @@ -220,6 +220,7 @@ types where the metadata itself has unknown layout.) global ::= from-type to-type generic-signature? 'TR' // reabstraction thunk global ::= from-type to-type generic-signature? 'TR' // reabstraction thunk global ::= impl-function-type type 'Tz' // objc-to-swift-async completion handler block implementation + global ::= impl-function-type type 'TZ' // objc-to-swift-async completion handler block implementation (predefined by runtime) global ::= from-type to-type self-type generic-signature? 'Ty' // reabstraction thunk with dynamic 'Self' capture global ::= from-type to-type generic-signature? 'Tr' // obsolete mangling for reabstraction thunk global ::= entity generic-signature? type type* 'TK' // key path getter diff --git a/include/swift/AST/ASTMangler.h b/include/swift/AST/ASTMangler.h index d4a628374c3e0..6b69083c97de4 100644 --- a/include/swift/AST/ASTMangler.h +++ b/include/swift/AST/ASTMangler.h @@ -155,8 +155,14 @@ class ASTMangler : public Mangler { Type SelfType, ModuleDecl *Module); + /// Mangle a completion handler block implementation function, used for importing ObjC + /// APIs as async. + /// + /// - If `predefined` is true, this mangles the symbol name of the completion handler + /// predefined in the Swift runtime for the given type signature. std::string mangleObjCAsyncCompletionHandlerImpl(CanSILFunctionType BlockType, - CanType ResultType); + CanType ResultType, + bool predefined); /// Mangle the derivative function (JVP/VJP) for the given: /// - Mangled original function name. diff --git a/include/swift/Demangling/DemangleNodes.def b/include/swift/Demangling/DemangleNodes.def index 22c704446f77d..a5b8527b634bf 100644 --- a/include/swift/Demangling/DemangleNodes.def +++ b/include/swift/Demangling/DemangleNodes.def @@ -160,6 +160,7 @@ NODE(NominalTypeDescriptor) NODE(NonObjCAttribute) NODE(Number) NODE(ObjCAsyncCompletionHandlerImpl) +NODE(PredefinedObjCAsyncCompletionHandlerImpl) NODE(ObjCAttribute) NODE(ObjCBlock) NODE(EscapingObjCBlock) diff --git a/lib/AST/ASTMangler.cpp b/lib/AST/ASTMangler.cpp index 9092c5681d6be..5fab71b7d4a6a 100644 --- a/lib/AST/ASTMangler.cpp +++ b/lib/AST/ASTMangler.cpp @@ -396,11 +396,12 @@ std::string ASTMangler::mangleReabstractionThunkHelper( std::string ASTMangler::mangleObjCAsyncCompletionHandlerImpl( CanSILFunctionType BlockType, - CanType ResultType) { + CanType ResultType, + bool predefined) { beginMangling(); appendType(BlockType); appendType(ResultType); - appendOperator("Tz"); + appendOperator(predefined ? "TZ" : "Tz"); return finalize(); } diff --git a/lib/Demangling/Demangler.cpp b/lib/Demangling/Demangler.cpp index 9ff7bfc6c51cb..82284bb70edad 100644 --- a/lib/Demangling/Demangler.cpp +++ b/lib/Demangling/Demangler.cpp @@ -2263,10 +2263,13 @@ NodePointer Demangler::popProtocolConformance() { NodePointer type = popNode(Node::Kind::Type); return createWithChild(Node::Kind::CoroutineContinuationPrototype, type); } - case 'z': { + case 'z': + case 'Z': { NodePointer resultType = popNode(Node::Kind::Type); NodePointer implType = popNode(Node::Kind::Type); - return createWithChildren(Node::Kind::ObjCAsyncCompletionHandlerImpl, + return createWithChildren(c == 'z' + ? Node::Kind::ObjCAsyncCompletionHandlerImpl + : Node::Kind::PredefinedObjCAsyncCompletionHandlerImpl, implType, resultType); } case 'V': { diff --git a/lib/Demangling/NodePrinter.cpp b/lib/Demangling/NodePrinter.cpp index 1412de539b40a..734f203f285a7 100644 --- a/lib/Demangling/NodePrinter.cpp +++ b/lib/Demangling/NodePrinter.cpp @@ -455,6 +455,7 @@ class NodePrinter { case Node::Kind::PartialApplyForwarder: case Node::Kind::PartialApplyObjCForwarder: case Node::Kind::PostfixOperator: + case Node::Kind::PredefinedObjCAsyncCompletionHandlerImpl: case Node::Kind::PrefixOperator: case Node::Kind::PrivateDeclName: case Node::Kind::PropertyDescriptor: @@ -2533,6 +2534,9 @@ NodePointer NodePrinter::print(NodePointer Node, bool asPrefixContext) { Printer << ')'; } return nullptr; + case Node::Kind::PredefinedObjCAsyncCompletionHandlerImpl: + Printer << "predefined "; + LLVM_FALLTHROUGH; case Node::Kind::ObjCAsyncCompletionHandlerImpl: Printer << "@objc completion handler block implementation for "; print(Node->getChild(0)); diff --git a/lib/Demangling/OldRemangler.cpp b/lib/Demangling/OldRemangler.cpp index d311cf298b05c..88e6668d1a760 100644 --- a/lib/Demangling/OldRemangler.cpp +++ b/lib/Demangling/OldRemangler.cpp @@ -2166,6 +2166,9 @@ void Remangler::mangleGlobalVariableOnceFunction(Node *node) { void Remangler::mangleGlobalVariableOnceDeclList(Node *node) { unreachable("unsupported"); } +void Remangler::manglePredefinedObjCAsyncCompletionHandlerImpl(Node *node) { + unreachable("unsupported"); +} void Remangler::mangleObjCAsyncCompletionHandlerImpl(Node *node) { unreachable("unsupported"); } diff --git a/lib/Demangling/Remangler.cpp b/lib/Demangling/Remangler.cpp index c0bab96963a31..a377f8b5fc45b 100644 --- a/lib/Demangling/Remangler.cpp +++ b/lib/Demangling/Remangler.cpp @@ -805,6 +805,11 @@ void Remangler::mangleCoroutineContinuationPrototype(Node *node) { Buffer << "TC"; } +void Remangler::manglePredefinedObjCAsyncCompletionHandlerImpl(Node *node) { + mangleChildNodes(node); + Buffer << "TZ"; +} + void Remangler::mangleObjCAsyncCompletionHandlerImpl(Node *node) { mangleChildNodes(node); Buffer << "Tz"; diff --git a/lib/SILGen/SILGenThunk.cpp b/lib/SILGen/SILGenThunk.cpp index d56edea9abe02..e9d3f02437973 100644 --- a/lib/SILGen/SILGenThunk.cpp +++ b/lib/SILGen/SILGenThunk.cpp @@ -171,7 +171,8 @@ SILGenModule::getOrCreateForeignAsyncCompletionHandlerImplFunction( Mangle::ASTMangler Mangler; auto name = Mangler.mangleObjCAsyncCompletionHandlerImpl(blockType, - resumeType); + resumeType, + /*predefined*/ false); SILGenFunctionBuilder builder(*this); auto F = builder.getOrCreateSharedFunction(loc, name, implTy, diff --git a/test/SILGen/objc_async.swift b/test/SILGen/objc_async.swift index 0d1e0c13c1797..f962609298b64 100644 --- a/test/SILGen/objc_async.swift +++ b/test/SILGen/objc_async.swift @@ -11,8 +11,8 @@ func testSlowServer(slowServer: SlowServer) async throws { // CHECK: [[CONT:%.*]] = get_async_continuation_addr $Int, [[RESUME_BUF]] // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeContinuation // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] - // CHECK: store [[CONT]] to [[CONT_SLOT]] - // CHECK: [[BLOCK_IMPL:%.*]] = %20 = function_ref @{{.*}} : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation, Int) -> () + // CHECK: store [[CONT]] to [trivial] [[CONT_SLOT]] + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @{{.*}} : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation, Int) -> () // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] // CHECK: apply [[METHOD]]({{.*}}, [[BLOCK]], %0) // CHECK: await_async_continuation [[CONT]] {{.*}}, resume [[RESUME:bb[0-9]+]] @@ -21,26 +21,27 @@ func testSlowServer(slowServer: SlowServer) async throws { // CHECK: dealloc_stack [[RESUME_BUF]] let _: Int = await slowServer.doSomethingSlow("mail") - // CHECK: [[RESUME_BUF:%.*]] = alloc_stack $Optional + // CHECK: [[RESUME_BUF:%.*]] = alloc_stack $String // CHECK: [[METHOD:%.*]] = objc_method {{.*}} $@convention(objc_method) (@convention(block) (Optional, Optional) -> (), SlowServer) -> () - // CHECK: [[CONT:%.*]] = get_async_continuation_addr $Optional, [[RESUME_BUF]] - // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeContinuation> + // CHECK: [[CONT:%.*]] = get_async_continuation_addr [throws] $String, [[RESUME_BUF]] + // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeThrowingContinuation // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] - // CHECK: store [[CONT]] to [[CONT_SLOT]] - // CHECK: [[BLOCK_IMPL:%.*]] = %20 = function_ref @{{.*}} : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation>, Optional, Optional) -> () + // CHECK: store [[CONT]] to [trivial] [[CONT_SLOT]] + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @{{.*}} : $@convention(c) (@inout_aliasable @block_storage UnsafeThrowingContinuation, Optional, Optional) -> () // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] - // CHECK: apply [[METHOD]]({{.*}}, [[BLOCK]], %0) + // CHECK: apply [[METHOD]]([[BLOCK]], %0) // CHECK: await_async_continuation [[CONT]] {{.*}}, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] // CHECK: [[RESUME]]: // CHECK: [[RESULT:%.*]] = load [take] [[RESUME_BUF]] // CHECK: destroy_value [[RESULT]] // CHECK: dealloc_stack [[RESUME_BUF]] + let _: String = try await slowServer.findAnswer() + + // CHECK: objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) () -> (), SlowServer) -> () + await slowServer.serverRestart("somewhere") + // CHECK: [[ERROR]]([[ERROR_VALUE:%.*]] : @owned $Error): // CHECK: dealloc_stack [[RESUME_BUF]] // CHECK: throw [[ERROR_VALUE]] - let _: String? = try await slowServer.findAnswer() - - // CHECK: objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) () -> (), SlowServer) -> () - await slowServer.serverRestart("somewhere") } From d9a14836e2c81a368b99df4cca47f4b9ed0c185c Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Thu, 9 Jul 2020 10:31:30 -0700 Subject: [PATCH 03/75] Add a simple AccessUseDefChainCloner. --- include/swift/SIL/MemAccessUtils.h | 78 +++++++++++++++++++++++++++++- lib/SIL/Utils/MemAccessUtils.cpp | 15 +++--- 2 files changed, 83 insertions(+), 10 deletions(-) diff --git a/include/swift/SIL/MemAccessUtils.h b/include/swift/SIL/MemAccessUtils.h index 2b01282935a2e..231c1d90a58c7 100644 --- a/include/swift/SIL/MemAccessUtils.h +++ b/include/swift/SIL/MemAccessUtils.h @@ -1368,7 +1368,7 @@ class AccessUseDefChainVisitor { // Result visitNonAccess(SILValue base); // Result visitPhi(SILPhiArgument *phi); // Result visitStorageCast(SingleValueInstruction *cast, Operand *sourceOper); - // Result visitAccessProjection(SingleValueInstruction *cast, + // Result visitAccessProjection(SingleValueInstruction *projectedAddr, // Operand *sourceOper); Result visit(SILValue sourceAddr); @@ -1479,6 +1479,82 @@ Result AccessUseDefChainVisitor::visit(SILValue sourceAddr) { } // end namespace swift +//===----------------------------------------------------------------------===// +// AccessUseDefChainCloner +//===----------------------------------------------------------------------===// + +namespace swift { + +/// Clone all projections and casts on the access use-def chain until either the +/// specified predicate is true or the access base is reached. +/// +/// This will not clone ref_element_addr or ref_tail_addr because those aren't +/// part of the access chain. +template +class AccessUseDefChainCloner + : public AccessUseDefChainVisitor, + SILValue> { + UnaryPredicate predicate; + SILInstruction *insertionPoint; + +public: + AccessUseDefChainCloner(UnaryPredicate predicate, + SILInstruction *insertionPoint) + : predicate(predicate), insertionPoint(insertionPoint) {} + + // Recursive main entry point + SILValue cloneUseDefChain(SILValue addr) { + if (!predicate(addr)) + return addr; + + return this->visit(addr); + } + + // Recursively clone an address on the use-def chain. + SingleValueInstruction *cloneProjection(SingleValueInstruction *projectedAddr, + Operand *sourceOper) { + SILValue projectedSource = cloneUseDefChain(sourceOper->get()); + SILInstruction *clone = projectedAddr->clone(insertionPoint); + clone->setOperand(sourceOper->getOperandNumber(), projectedSource); + return cast(clone); + } + + // MARK: Visitor implementation + + SILValue visitBase(SILValue base, AccessedStorage::Kind kind) { + assert(false && "access base cannot be cloned"); + } + + SILValue visitNonAccess(SILValue base) { + assert(false && "unknown address root cannot be cloned"); + return SILValue(); + } + + SILValue visitPhi(SILPhiArgument *phi) { + assert(false && "unexpected phi on access path"); + return SILValue(); + } + + SILValue visitStorageCast(SingleValueInstruction *cast, Operand *sourceOper) { + return cloneProjection(cast, sourceOper); + } + + SILValue visitAccessProjection(SingleValueInstruction *projectedAddr, + Operand *sourceOper) { + return cloneProjection(projectedAddr, sourceOper); + } +}; + +template +SILValue cloneUseDefChain(SILValue addr, SILInstruction *insertionPoint, + UnaryPredicate shouldFollowUse) { + return AccessUseDefChainCloner(shouldFollowUse, + insertionPoint) + .cloneUseDefChain(addr); +} + +} // end namespace swift + //===----------------------------------------------------------------------===// // MARK: Verification //===----------------------------------------------------------------------===// diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index 1abab93e3d816..afb5c2c443bc1 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -122,14 +122,13 @@ class AccessPhiVisitor phiArg->getIncomingPhiValues(pointerWorklist); } - void visitStorageCast(SingleValueInstruction *projectedAddr, - Operand *sourceOper) { + void visitStorageCast(SingleValueInstruction *cast, Operand *sourceOper) { // Allow conversions to/from pointers and addresses on disjoint phi paths // only if the underlying useDefVisitor allows it. if (storageCastTy == IgnoreStorageCast) pointerWorklist.push_back(sourceOper->get()); else - visitNonAccess(projectedAddr); + visitNonAccess(cast); } void visitAccessProjection(SingleValueInstruction *projectedAddr, @@ -207,8 +206,7 @@ class FindAccessVisitorImpl : public AccessUseDefChainVisitor { return this->asImpl().visitNonAccess(phiArg); } - SILValue visitStorageCast(SingleValueInstruction *projectedAddr, - Operand *sourceAddr) { + SILValue visitStorageCast(SingleValueInstruction *, Operand *sourceAddr) { assert(storageCastTy == IgnoreStorageCast); return sourceAddr->get(); } @@ -303,12 +301,11 @@ class FindAccessBaseVisitor } // Override visitStorageCast to avoid seeing through arbitrary address casts. - SILValue visitStorageCast(SingleValueInstruction *projectedAddr, - Operand *sourceAddr) { + SILValue visitStorageCast(SingleValueInstruction *cast, Operand *sourceAddr) { if (storageCastTy == StopAtStorageCast) - return visitNonAccess(projectedAddr); + return visitNonAccess(cast); - return SuperTy::visitStorageCast(projectedAddr, sourceAddr); + return SuperTy::visitStorageCast(cast, sourceAddr); } }; From d86099f05fcd8a05093f6bbe44d5312ebbf7f37c Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 7 Jul 2020 11:01:43 -0700 Subject: [PATCH 04/75] Use AccessPath in LICM. The LICM algorithm was not robust with respect to address projection because it identifies a projected address by its SILValue. This should never be done! Use AccessPath instead. Fixes regressions caused by rdar://66791257 (Print statement provokes "Can't unsafeBitCast between types of different sizes" when optimizations enabled) --- include/swift/SIL/LoopInfo.h | 2 + lib/SILOptimizer/LoopTransforms/LICM.cpp | 259 +++++++++++++++-------- test/SILOptimizer/licm.sil | 37 ++-- 3 files changed, 187 insertions(+), 111 deletions(-) diff --git a/include/swift/SIL/LoopInfo.h b/include/swift/SIL/LoopInfo.h index 200797c1a76d4..1ab4d6fe0e4ce 100644 --- a/include/swift/SIL/LoopInfo.h +++ b/include/swift/SIL/LoopInfo.h @@ -60,6 +60,8 @@ class SILLoop : public llvm::LoopBase { } } + SILFunction *getFunction() const { return getHeader()->getParent(); } + private: friend class llvm::LoopInfoBase; diff --git a/lib/SILOptimizer/LoopTransforms/LICM.cpp b/lib/SILOptimizer/LoopTransforms/LICM.cpp index bf489fe892e13..7b82f42898683 100644 --- a/lib/SILOptimizer/LoopTransforms/LICM.cpp +++ b/lib/SILOptimizer/LoopTransforms/LICM.cpp @@ -15,6 +15,7 @@ #include "swift/SIL/Dominance.h" #include "swift/SIL/InstructionUtils.h" #include "swift/SIL/MemAccessUtils.h" +#include "swift/SIL/Projection.h" #include "swift/SIL/SILArgument.h" #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILInstruction.h" @@ -61,8 +62,8 @@ static bool mayWriteTo(AliasAnalysis *AA, InstSet &SideEffectInsts, return false; } -/// Returns true if \p I is a store to \p addr. -static StoreInst *isStoreToAddr(SILInstruction *I, SILValue addr) { +/// Returns a non-null StoreInst if \p I is a store to \p accessPath. +static StoreInst *isStoreToAccess(SILInstruction *I, AccessPath accessPath) { auto *SI = dyn_cast(I); if (!SI) return nullptr; @@ -71,53 +72,91 @@ static StoreInst *isStoreToAddr(SILInstruction *I, SILValue addr) { if (SI->getOwnershipQualifier() == StoreOwnershipQualifier::Init) return nullptr; - if (SI->getDest() != addr) + auto storeAccessPath = AccessPath::compute(SI->getDest()); + if (accessPath != storeAccessPath) return nullptr; return SI; } -/// Returns true if \p I is a load from \p addr or a projected address from -/// \p addr. -static LoadInst *isLoadFromAddr(SILInstruction *I, SILValue addr) { +struct LoadWithAccess { + LoadInst *li = nullptr; + AccessPath accessPath; + + operator bool() { return li != nullptr; } +}; + +static LoadWithAccess doesLoadOverlapAccess(SILInstruction *I, + AccessPath accessPath) { auto *LI = dyn_cast_or_null(I); if (!LI) - return nullptr; + return LoadWithAccess(); - // TODO: handle StoreOwnershipQualifier::Take + // TODO: handle LoadOwnershipQualifier::Take if (LI->getOwnershipQualifier() == LoadOwnershipQualifier::Take) - return nullptr; + return LoadWithAccess(); - SILValue v = LI->getOperand(); - for (;;) { - if (v == addr) { - return LI; - } else if (isa(v) || isa(v)) { - v = cast(v)->getOperand(0); - } else { - return nullptr; + AccessPath loadAccessPath = AccessPath::compute(LI->getOperand()); + if (!loadAccessPath.isValid()) + return LoadWithAccess(); + + // Don't use AccessPath::mayOverlap. We only want definite overlap. + if (loadAccessPath.contains(accessPath) + || accessPath.contains(loadAccessPath)) { + return {LI, loadAccessPath}; + } + return LoadWithAccess(); +} + +/// Returns a valid LoadWithAccess if \p I is a load from \p accessPath or a +/// projected address from \p accessPath. +static LoadWithAccess isLoadWithinAccess(SILInstruction *I, + AccessPath accessPath) { + auto loadWithAccess = doesLoadOverlapAccess(I, accessPath); + if (!loadWithAccess) + return loadWithAccess; + + // Make sure that any additional path components beyond the store's access + // path can be converted to value projections during projectLoadValue (it + // currently only supports StructElementAddr and TupleElementAddr). + auto storePathNode = accessPath.getPathNode(); + auto loadPathNode = loadWithAccess.accessPath.getPathNode(); + SILValue loadAddr = loadWithAccess.li->getOperand(); + while (loadPathNode != storePathNode) { + if (!isa(loadAddr) + && !isa(loadAddr)) { + return LoadWithAccess(); } + loadAddr = cast(loadAddr)->getOperand(0); + loadPathNode = loadPathNode.getParent(); } + return loadWithAccess; } /// Returns true if all instructions in \p SideEffectInsts which may alias with -/// \p addr are either loads or stores from \p addr. +/// \p access are either loads or stores from \p access. +/// +/// \p storeAddr is only needed for AliasAnalysis until we have an interface +/// that supports AccessPath. static bool isOnlyLoadedAndStored(AliasAnalysis *AA, InstSet &SideEffectInsts, ArrayRef Loads, ArrayRef Stores, - SILValue addr) { + SILValue storeAddr, AccessPath accessPath) { for (auto *I : SideEffectInsts) { - if (AA->mayReadOrWriteMemory(I, addr) && - !isStoreToAddr(I, addr) && !isLoadFromAddr(I, addr)) { + // Pass the original address value until we can fix AA + if (AA->mayReadOrWriteMemory(I, storeAddr) + && !isStoreToAccess(I, accessPath) + && !isLoadWithinAccess(I, accessPath)) { return false; } } for (auto *LI : Loads) { - if (AA->mayReadFromMemory(LI, addr) && !isLoadFromAddr(LI, addr)) + if (AA->mayReadFromMemory(LI, storeAddr) + && !doesLoadOverlapAccess(LI, accessPath)) return false; } for (auto *SI : Stores) { - if (AA->mayWriteToMemory(SI, addr) && !isStoreToAddr(SI, addr)) + if (AA->mayWriteToMemory(SI, storeAddr) && !isStoreToAccess(SI, accessPath)) return false; } return true; @@ -488,8 +527,8 @@ class LoopTreeOptimization { /// Load and store instructions that we may be able to move out of the loop. InstVector LoadsAndStores; - /// All addresses of the \p LoadsAndStores instructions. - llvm::SetVector LoadAndStoreAddrs; + /// All access paths of the \p LoadsAndStores instructions. + llvm::SetVector LoadAndStoreAddrs; /// Hoistable Instructions that need special treatment /// e.g. begin_access @@ -525,8 +564,9 @@ class LoopTreeOptimization { /// Optimize the current loop nest. bool optimizeLoop(std::unique_ptr &CurrSummary); - /// Move all loads and stores from/to \p addr out of the \p loop. - void hoistLoadsAndStores(SILValue addr, SILLoop *loop, InstVector &toDelete); + /// Move all loads and stores from/to \p access out of the \p loop. + void hoistLoadsAndStores(AccessPath accessPath, SILLoop *loop, + InstVector &toDelete); /// Move all loads and stores from all addresses in LoadAndStoreAddrs out of /// the \p loop. @@ -876,10 +916,15 @@ void LoopTreeOptimization::analyzeCurrentLoop( // Collect memory locations for which we can move all loads and stores out // of the loop. for (StoreInst *SI : Stores) { - SILValue addr = SI->getDest(); - if (isLoopInvariant(addr, Loop) && - isOnlyLoadedAndStored(AA, sideEffects, Loads, Stores, addr)) { - LoadAndStoreAddrs.insert(addr); + // Use AccessPathWithBase to recover a base address that can be used for + // newly inserted memory operations. If we instead teach hoistLoadsAndStores + // how to rematerialize global_addr, then we don't need this base. + auto access = AccessPathWithBase::compute(SI->getDest()); + if (access.accessPath.isValid() && isLoopInvariant(access.base, Loop)) { + if (isOnlyLoadedAndStored(AA, sideEffects, Loads, Stores, SI->getDest(), + access.accessPath)) { + LoadAndStoreAddrs.insert(accessPath); + } } } if (!FixLifetimes.empty()) { @@ -923,22 +968,31 @@ bool LoopTreeOptimization::optimizeLoop( } /// Creates a value projection from \p rootVal based on the address projection -/// from \a rootAddr to \a addr. -static SILValue projectLoadValue(SILValue addr, SILValue rootAddr, - SILValue rootVal, SILInstruction *beforeInst) { - if (addr == rootAddr) +/// from \a rootVal to \a addr. +static SILValue projectLoadValue(SILValue addr, AccessPath accessPath, + SILValue rootVal, AccessPath rootAccessPath, + SILInstruction *beforeInst) { + if (accessPath == rootAccessPath) return rootVal; + auto pathNode = accessPath.getPathNode(); + int elementIdx = pathNode.getIndex().getSubObjectIndex(); if (auto *SEI = dyn_cast(addr)) { - SILValue val = projectLoadValue(SEI->getOperand(), rootAddr, rootVal, - beforeInst); + assert(ProjectionIndex(SEI).Index == elementIdx); + SILValue val = projectLoadValue( + SEI->getOperand(), + AccessPath(accessPath.getStorage(), pathNode.getParent(), 0), + rootVal, rootAccessPath, beforeInst); SILBuilder B(beforeInst); return B.createStructExtract(beforeInst->getLoc(), val, SEI->getField(), SEI->getType().getObjectType()); } if (auto *TEI = dyn_cast(addr)) { - SILValue val = projectLoadValue(TEI->getOperand(), rootAddr, rootVal, - beforeInst); + assert(ProjectionIndex(TEI).Index == elementIdx); + SILValue val = projectLoadValue( + TEI->getOperand(), + AccessPath(accessPath.getStorage(), pathNode.getParent(), 0), + rootVal, rootAccessPath, beforeInst); SILBuilder B(beforeInst); return B.createTupleExtract(beforeInst->getLoc(), val, TEI->getFieldIndex(), TEI->getType().getObjectType()); @@ -946,12 +1000,17 @@ static SILValue projectLoadValue(SILValue addr, SILValue rootAddr, llvm_unreachable("unknown projection"); } -/// Returns true if all stores to \p addr commonly dominate the loop exitst of -/// \p loop. -static bool storesCommonlyDominateLoopExits(SILValue addr, SILLoop *loop, - ArrayRef exitingBlocks) { +/// Returns true if all stores to \p addr commonly dominate the loop exits. +static bool +storesCommonlyDominateLoopExits(AccessPath accessPath, + SILLoop *loop, + ArrayRef exitingBlocks) { SmallPtrSet stores; - for (Operand *use : addr->getUses()) { + SmallVector uses; + // Collect as many recognizable stores as possible. It's ok if not all stores + // are collected. + accessPath.collectUses(uses, AccessUseType::Exact, loop->getFunction()); + for (Operand *use : uses) { SILInstruction *user = use->getUser(); if (isa(user)) stores.insert(user->getParent()); @@ -1030,24 +1089,26 @@ static bool storesCommonlyDominateLoopExits(SILValue addr, SILLoop *loop, return true; } -void LoopTreeOptimization::hoistLoadsAndStores(SILValue addr, SILLoop *loop, InstVector &toDelete) { - - SmallVector exitingBlocks; - loop->getExitingBlocks(exitingBlocks); +void LoopTreeOptimization::hoistLoadsAndStores( + AccessPath accessPath, SILLoop *loop, InstVector &toDelete) { + SmallVector exitingAndLatchBlocks; + loop->getExitingAndLatchBlocks(exitingAndLatchBlocks); // This is not a requirement for functional correctness, but we don't want to // _speculatively_ load and store the value (outside of the loop). - if (!storesCommonlyDominateLoopExits(addr, loop, exitingBlocks)) + if (!storesCommonlyDominateLoopExits(accessPath, loop, + exitingAndLatchBlocks)) return; // Inserting the stores requires the exit edges to be not critical. - for (SILBasicBlock *exitingBlock : exitingBlocks) { - for (unsigned idx = 0, e = exitingBlock->getSuccessors().size(); + for (SILBasicBlock *exitingOrLatchBlock : exitingAndLatchBlocks) { + for (unsigned idx = 0, e = exitingOrLatchBlock->getSuccessors().size(); idx != e; ++idx) { // exitingBlock->getSuccessors() must not be moved out of this loop, // because the successor list is invalidated by splitCriticalEdge. - if (!loop->contains(exitingBlock->getSuccessors()[idx])) { - splitCriticalEdge(exitingBlock->getTerminator(), idx, DomTree, LoopInfo); + if (!loop->contains(exitingOrLatchBlock->getSuccessors()[idx])) { + splitCriticalEdge(exitingOrLatchBlock->getTerminator(), idx, DomTree, + LoopInfo); } } } @@ -1057,30 +1118,46 @@ void LoopTreeOptimization::hoistLoadsAndStores(SILValue addr, SILLoop *loop, Ins // Initially load the value in the loop pre header. SILBuilder B(preheader->getTerminator()); - auto *initialLoad = B.createLoad(preheader->getTerminator()->getLoc(), addr, - LoadOwnershipQualifier::Unqualified); - LLVM_DEBUG(llvm::dbgs() << "Creating preload " << *initialLoad); - + SILValue storeAddr; SILSSAUpdater ssaUpdater; - ssaUpdater.initialize(initialLoad->getType()); - ssaUpdater.addAvailableValue(preheader, initialLoad); // Set all stored values as available values in the ssaUpdater. // If there are multiple stores in a block, only the last one counts. Optional loc; for (SILInstruction *I : LoadsAndStores) { - if (auto *SI = isStoreToAddr(I, addr)) { + if (auto *SI = isStoreToAccess(I, accessPath)) { loc = SI->getLoc(); // If a store just stores the loaded value, bail. The operand (= the load) // will be removed later, so it cannot be used as available value. // This corner case is suprisingly hard to handle, so we just give up. - if (isLoadFromAddr(dyn_cast(SI->getSrc()), addr)) + if (isLoadWithinAccess(dyn_cast(SI->getSrc()), accessPath)) return; + if (!storeAddr) { + storeAddr = SI->getDest(); + ssaUpdater.initialize(storeAddr->getType().getObjectType()); + } else if (SI->getDest()->getType() != storeAddr->getType()) { + // This transformation assumes that the values of all stores in the loop + // must be interchangeable. It won't work if stores different types + // because of casting or payload extraction even though they have the + // same access path. + return; + } ssaUpdater.addAvailableValue(SI->getParent(), SI->getSrc()); } } + assert(storeAddr && "hoistLoadsAndStores requires a store in the loop"); + SILValue initialAddr = cloneUseDefChain( + storeAddr, preheader->getTerminator(), [&](SILValue srcAddr) { + // Clone projections until the address dominates preheader. + return !DomTree->dominates(srcAddr->getParentBlock(), preheader); + }); + LoadInst *initialLoad = + B.createLoad(preheader->getTerminator()->getLoc(), initialAddr, + LoadOwnershipQualifier::Unqualified); + LLVM_DEBUG(llvm::dbgs() << "Creating preload " << *initialLoad); + ssaUpdater.addAvailableValue(preheader, initialLoad); // Remove all stores and replace the loads with the current value. SILBasicBlock *currentBlock = nullptr; @@ -1091,37 +1168,45 @@ void LoopTreeOptimization::hoistLoadsAndStores(SILValue addr, SILLoop *loop, Ins currentBlock = block; currentVal = SILValue(); } - if (auto *SI = isStoreToAddr(I, addr)) { + if (auto *SI = isStoreToAccess(I, accessPath)) { LLVM_DEBUG(llvm::dbgs() << "Deleting reloaded store " << *SI); currentVal = SI->getSrc(); toDelete.push_back(SI); - } else if (auto *LI = isLoadFromAddr(I, addr)) { - // If we didn't see a store in this block yet, get the current value from - // the ssaUpdater. - if (!currentVal) - currentVal = ssaUpdater.getValueInMiddleOfBlock(block); - SILValue projectedValue = projectLoadValue(LI->getOperand(), addr, - currentVal, LI); - LLVM_DEBUG(llvm::dbgs() << "Replacing stored load " << *LI << " with " - << projectedValue); - LI->replaceAllUsesWith(projectedValue); - toDelete.push_back(LI); + continue; + } + auto loadWithAccess = isLoadWithinAccess(I, accessPath); + if (!loadWithAccess) { + continue; } + // If we didn't see a store in this block yet, get the current value from + // the ssaUpdater. + if (!currentVal) + currentVal = ssaUpdater.getValueInMiddleOfBlock(block); + + LoadInst *load = loadWithAccess.li; + auto loadAddress = load->getOperand(); + SILValue projectedValue = projectLoadValue( + loadAddress, loadWithAccess.accessPath, currentVal, accessPath, load); + LLVM_DEBUG(llvm::dbgs() << "Replacing stored load " << *load << " with " + << projectedValue); + load->replaceAllUsesWith(projectedValue); + toDelete.push_back(load); } // Store back the value at all loop exits. - for (SILBasicBlock *exitingBlock : exitingBlocks) { - for (SILBasicBlock *succ : exitingBlock->getSuccessors()) { - if (!loop->contains(succ)) { - assert(succ->getSinglePredecessorBlock() && - "should have split critical edges"); - SILBuilder B(succ->begin()); - auto *SI = B.createStore(loc.getValue(), - ssaUpdater.getValueInMiddleOfBlock(succ), addr, - StoreOwnershipQualifier::Unqualified); - (void)SI; - LLVM_DEBUG(llvm::dbgs() << "Creating loop-exit store " << *SI); - } + for (SILBasicBlock *exitingOrLatchBlock : exitingAndLatchBlocks) { + for (SILBasicBlock *succ : exitingOrLatchBlock->getSuccessors()) { + if (loop->contains(succ)) + continue; + + assert(succ->getSinglePredecessorBlock() + && "should have split critical edges"); + SILBuilder B(succ->begin()); + auto *SI = B.createStore( + loc.getValue(), ssaUpdater.getValueInMiddleOfBlock(succ), initialAddr, + StoreOwnershipQualifier::Unqualified); + (void)SI; + LLVM_DEBUG(llvm::dbgs() << "Creating loop-exit store " << *SI); } } @@ -1131,8 +1216,8 @@ void LoopTreeOptimization::hoistLoadsAndStores(SILValue addr, SILLoop *loop, Ins bool LoopTreeOptimization::hoistAllLoadsAndStores(SILLoop *loop) { InstVector toDelete; - for (SILValue addr : LoadAndStoreAddrs) { - hoistLoadsAndStores(addr, loop, toDelete); + for (AccessPath accessPath : LoadAndStoreAddrs) { + hoistLoadsAndStores(accessPath, loop, toDelete); } LoadsAndStores.clear(); LoadAndStoreAddrs.clear(); diff --git a/test/SILOptimizer/licm.sil b/test/SILOptimizer/licm.sil index dcc8871611f91..fe52698e8f28a 100644 --- a/test/SILOptimizer/licm.sil +++ b/test/SILOptimizer/licm.sil @@ -634,15 +634,19 @@ struct Index { // ----------------------------------------------------------------------------- // Test combined load/store hoisting/sinking with obvious aliasing loads +// The loop contains loads and stores to the same accesspath: %3 alloc_stack -> #0 -> #0 +// However, they don't share the same projection instructions. +// LICM should still hoist the loads and sink the stores in a combined transformation. +// // CHECK-LABEL: sil shared @testCombinedLdStAliasingLoad : $@convention(method) (Int64) -> Int64 { // CHECK: bb0(%0 : $Int64): -// CHECK: store +// CHECK: store {{.*}} to %{{.*}} : $*Int64 +// CHECK: load %{{.*}} : $*Int64 +// CHECK: br bb1 // CHECK-NOT: {{(load|store)}} -// CHECK: bb1: -// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64 -// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64 -// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64 -// CHECK-NEXT: cond_br +// CHECK: bb3: +// CHECK-NOT: {{(load|store)}} +// CHECK: store %{{.*}} to %{{.*}} : $*Int64 // CHECK-NOT: {{(load|store)}} // CHECK-LABEL: } // end sil function 'testCombinedLdStAliasingLoad' sil shared @testCombinedLdStAliasingLoad : $@convention(method) (Int64) -> Int64 { @@ -768,25 +772,10 @@ sil @getRange : $@convention(thin) () -> Range // CHECK-LABEL: sil shared @testLICMReducedCombinedLdStExtraProjection : $@convention(method) (Int64) -> Int64 { // CHECK: bb0(%0 : $Int64): // CHECK: store %0 to %{{.*}} : $*Int64 +// CHECK: load %{{.*}} : $*Int64 // CHECK-NOT: {{(load|store)}} -// CHECK: bb1(%{{.*}} : $Builtin.Int64): -// CHECK: builtin "sadd_with_overflow_Int64" -// CHECK: load %{{.*}} : $*Builtin.Int64 -// CHECK: builtin "sadd_with_overflow_Int64" -// CHECK: builtin "cmp_eq_Int64" -// CHECK-NEXT: cond_br -// CHECK: bb3: -// CHECK: store %{{.*}} to %{{.*}} : $*Int64 -// CHECK: bb4: -// CHECK: store %{{.*}} to %{{.*}} : $*Int64 -// CHECK: bb5: -// CHECK: function_ref @getRange : $@convention(thin) () -> Range -// CHECK: apply %{{.*}}() : $@convention(thin) () -> Range -// CHECK: store %{{.*}} to %{{.*}} : $*Int64 -// CHECK: bb6: -// CHECK: load %{{.*}} : $*Builtin.Int64 -// CHECK: builtin "cmp_eq_Int64" -// CHECK: cond_br +// CHECK: bb7: +// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64 // CHECK-NOT: {{(load|store)}} // CHECK-LABEL: } // end sil function 'testLICMReducedCombinedLdStExtraProjection' sil shared @testLICMReducedCombinedLdStExtraProjection : $@convention(method) (Int64) -> Int64 { From 0ff82ca051fd4a417257c886a44869fea7b9265a Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 10 Nov 2020 16:50:07 -0500 Subject: [PATCH 05/75] Sema: Check conformance availability when ranking solutions This completes the work on , and also adds a test case for . --- include/swift/Sema/ConstraintSystem.h | 8 ++- lib/Sema/CSSimplify.cpp | 15 +++--- lib/Sema/ConstraintSystem.cpp | 32 ++++++++++++ test/Sema/conformance_availability.swift | 64 +++++++++++++++++++++++- 4 files changed, 111 insertions(+), 8 deletions(-) diff --git a/include/swift/Sema/ConstraintSystem.h b/include/swift/Sema/ConstraintSystem.h index 64fa45f2ceed8..40d7dc6ffc94f 100644 --- a/include/swift/Sema/ConstraintSystem.h +++ b/include/swift/Sema/ConstraintSystem.h @@ -3134,10 +3134,16 @@ class ConstraintSystem { }); } - /// Determine whether given declaration is unavailable in the current context. + /// Determine whether the given declaration is unavailable from the + /// current context. bool isDeclUnavailable(const Decl *D, ConstraintLocator *locator = nullptr) const; + /// Determine whether the given conformance is unavailable from the + /// current context. + bool isConformanceUnavailable(ProtocolConformanceRef conformance, + ConstraintLocator *locator = nullptr) const; + public: /// Whether we should attempt to fix problems. diff --git a/lib/Sema/CSSimplify.cpp b/lib/Sema/CSSimplify.cpp index 4400a7dfbe415..0dc807f97176b 100644 --- a/lib/Sema/CSSimplify.cpp +++ b/lib/Sema/CSSimplify.cpp @@ -5612,11 +5612,16 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( return SolutionKind::Unsolved; } + auto *loc = getConstraintLocator(locator); + /// Record the given conformance as the result, adding any conditional /// requirements if necessary. auto recordConformance = [&](ProtocolConformanceRef conformance) { // Record the conformance. - CheckedConformances.push_back({getConstraintLocator(locator), conformance}); + CheckedConformances.push_back({loc, conformance}); + + if (isConformanceUnavailable(conformance, loc)) + increaseScore(SK_Unavailable); // This conformance may be conditional, in which case we need to consider // those requirements as constraints too. @@ -5664,7 +5669,7 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( auto protocolTy = protocol->getDeclaredInterfaceType(); // If this conformance has been fixed already, let's just consider this done. - if (isFixedRequirement(getConstraintLocator(locator), protocolTy)) + if (isFixedRequirement(loc, protocolTy)) return SolutionKind::Solved; // If this is a generic requirement let's try to record that @@ -5711,7 +5716,7 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( auto dstType = getType(assignment->getDest()); auto *fix = IgnoreAssignmentDestinationType::create( - *this, srcType, dstType, getConstraintLocator(locator)); + *this, srcType, dstType, loc); return recordFix(fix) ? SolutionKind::Error : SolutionKind::Solved; } @@ -5722,8 +5727,7 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( // let's record it as a "contextual mismatch" because diagnostic // is going to be dependent on other contextual information. if (path.back().is()) { - auto *fix = ContextualMismatch::create(*this, type, protocolTy, - getConstraintLocator(locator)); + auto *fix = ContextualMismatch::create(*this, type, protocolTy, loc); return recordFix(fix) ? SolutionKind::Error : SolutionKind::Solved; } @@ -5761,7 +5765,6 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( // If this is an implicit Hashable conformance check generated for each // index argument of the keypath subscript component, we could just treat // it as though it conforms. - auto *loc = getConstraintLocator(locator); if (loc->isResultOfKeyPathDynamicMemberLookup() || loc->isKeyPathSubscriptComponent()) { if (protocol == diff --git a/lib/Sema/ConstraintSystem.cpp b/lib/Sema/ConstraintSystem.cpp index a47c01ed70521..08cd82da1fd64 100644 --- a/lib/Sema/ConstraintSystem.cpp +++ b/lib/Sema/ConstraintSystem.cpp @@ -22,6 +22,7 @@ #include "swift/AST/Initializer.h" #include "swift/AST/GenericEnvironment.h" #include "swift/AST/ParameterList.h" +#include "swift/AST/ProtocolConformance.h" #include "swift/AST/TypeCheckRequests.h" #include "swift/Basic/Statistic.h" #include "swift/Sema/CSFix.h" @@ -5088,6 +5089,37 @@ bool ConstraintSystem::isDeclUnavailable(const Decl *D, return result.hasValue(); } +bool ConstraintSystem::isConformanceUnavailable(ProtocolConformanceRef conformance, + ConstraintLocator *locator) const { + if (!conformance.isConcrete()) + return false; + + auto *concrete = conformance.getConcrete(); + auto *rootConf = concrete->getRootConformance(); + auto *ext = dyn_cast(rootConf->getDeclContext()); + if (ext == nullptr) + return false; + + auto &ctx = getASTContext(); + + // First check whether this declaration is universally unavailable. + if (ext->getAttrs().isUnavailable(ctx)) + return true; + + SourceLoc loc; + + if (locator) { + if (auto anchor = locator->getAnchor()) + loc = getLoc(anchor); + } + + // If not, let's check contextual unavailability. + ExportContext where = ExportContext::forFunctionBody(DC, loc); + auto result = TypeChecker::checkConformanceAvailability( + rootConf, ext, where); + return result.hasValue(); +} + /// If we aren't certain that we've emitted a diagnostic, emit a fallback /// diagnostic. void ConstraintSystem::maybeProduceFallbackDiagnostic( diff --git a/test/Sema/conformance_availability.swift b/test/Sema/conformance_availability.swift index 61dd90e25d207..e700055658467 100644 --- a/test/Sema/conformance_availability.swift +++ b/test/Sema/conformance_availability.swift @@ -259,4 +259,66 @@ struct AssocConformanceAvailable4 {} @available(macOS 100, *) extension AssocConformanceAvailable4 : Rider { typealias H = HasAvailableConformance1 -} \ No newline at end of file +} + +// Solution ranking should down-rank solutions involving unavailable conformances +protocol First {} +extension First { + func doStuff(_: T) -> Bool {} +} + +protocol Second {} +extension Second { + func doStuff(_: Int) -> Int {} +} + +struct ConformingType1 {} + +extension ConformingType1 : First {} + +@available(macOS 100, *) +extension ConformingType1 : Second {} + +func usesConformingType1(_ c: ConformingType1) { + // We should pick First.doStuff() here, since Second.doStuff() is unavailable + let result = c.doStuff(123) + let _: Bool = result +} + +@available(macOS 100, *) +func usesConformingType1a(_ c: ConformingType1) { + // We should pick Second.doStuff() here, since it is more specialized than + // First.doStuff() + let result = c.doStuff(123) + let _: Int = result +} + +// Same as above but unconditionally unavailable +struct ConformingType2 {} + +extension ConformingType2 : First {} + +@available(*, unavailable) +extension ConformingType2 : Second {} + +func usesConformingType2(_ c: ConformingType2) { + // We should pick First.doStuff() here, since Second.doStuff() is unavailable + let result = c.doStuff(123) + let _: Bool = result +} + +// Make sure this also works for synthesized conformances +struct UnavailableHashable { + let x: Int + let y: Int +} + +@available(macOS 100, *) +extension UnavailableHashable : Hashable {} + +func usesUnavailableHashable(_ c: UnavailableHashable) { + // expected-note@-1 2 {{add @available attribute to enclosing global function}} + _ = Set([c]) + // expected-error@-1 2 {{conformance of 'UnavailableHashable' to 'Hashable' is only available in macOS 100 or newer}} + // expected-note@-2 2 {{add 'if #available' version check}} +} From 368dc0f4016c178d2d47904abd6ad2bb72ae8757 Mon Sep 17 00:00:00 2001 From: Joe Groff Date: Tue, 10 Nov 2020 16:36:50 -0800 Subject: [PATCH 06/75] SILGen: Generate bodies for completion handler block impls --- lib/SILGen/ResultPlan.cpp | 3 +- lib/SILGen/SILGen.cpp | 47 +++++++ lib/SILGen/SILGen.h | 15 ++- lib/SILGen/SILGenThunk.cpp | 125 +++++++++++++++++- .../public/Concurrency/PartialAsyncTask.swift | 37 +++++- test/SILGen/objc_async.swift | 44 +++++- 6 files changed, 260 insertions(+), 11 deletions(-) diff --git a/lib/SILGen/ResultPlan.cpp b/lib/SILGen/ResultPlan.cpp index 308000367e368..f249c9261d3f6 100644 --- a/lib/SILGen/ResultPlan.cpp +++ b/lib/SILGen/ResultPlan.cpp @@ -507,7 +507,8 @@ class ForeignAsyncInitializationPlan final : public ResultPlan { .getInterfaceType()); SILFunction *impl = SGF.SGM .getOrCreateForeignAsyncCompletionHandlerImplFunction(implTy, - continuationTy); + continuationTy, + *calleeTypeInfo.foreign.async); auto implRef = SGF.B.createFunctionRef(loc, impl); // Initialize the block object for the completion handler. diff --git a/lib/SILGen/SILGen.cpp b/lib/SILGen/SILGen.cpp index 17b1d981ece8e..f857510014026 100644 --- a/lib/SILGen/SILGen.cpp +++ b/lib/SILGen/SILGen.cpp @@ -327,6 +327,53 @@ SILGenModule::getConformanceToBridgedStoredNSError(SILLocation loc, Type type) { return SwiftModule->lookupConformance(type, proto); } +static FuncDecl * +lookUpResumeContinuationIntrinsic(ASTContext &C, + Optional &cache, + StringRef name) { + if (cache) + return *cache; + + auto *module = C.getLoadedModule(C.Id_Concurrency); + if (!module) { + cache = nullptr; + return nullptr; + } + + SmallVector decls; + module->lookupQualified(module, + DeclNameRef(C.getIdentifier(name)), + NL_QualifiedDefault | NL_IncludeUsableFromInline, + decls); + + if (decls.size() != 1) { + cache = nullptr; + return nullptr; + } + auto func = dyn_cast(decls[0]); + cache = func; + return func; +} + +FuncDecl * +SILGenModule::getResumeUnsafeContinuation() { + return lookUpResumeContinuationIntrinsic(getASTContext(), + ResumeUnsafeContinuation, + "_resumeUnsafeContinuation"); +} +FuncDecl * +SILGenModule::getResumeUnsafeThrowingContinuation() { + return lookUpResumeContinuationIntrinsic(getASTContext(), + ResumeUnsafeThrowingContinuation, + "_resumeUnsafeThrowingContinuation"); +} +FuncDecl * +SILGenModule::getResumeUnsafeThrowingContinuationWithError() { + return lookUpResumeContinuationIntrinsic(getASTContext(), + ResumeUnsafeThrowingContinuationWithError, + "_resumeUnsafeThrowingContinuationWithError"); +} + ProtocolConformance *SILGenModule::getNSErrorConformanceToError() { if (NSErrorConformanceToError) return *NSErrorConformanceToError; diff --git a/lib/SILGen/SILGen.h b/lib/SILGen/SILGen.h index b29cdfb174db2..20f02bf40babb 100644 --- a/lib/SILGen/SILGen.h +++ b/lib/SILGen/SILGen.h @@ -27,6 +27,7 @@ namespace swift { class SILBasicBlock; + class ForeignAsyncConvention; namespace Lowering { class TypeConverter; @@ -118,6 +119,10 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { Optional NSErrorConformanceToError; + Optional ResumeUnsafeContinuation; + Optional ResumeUnsafeThrowingContinuation; + Optional ResumeUnsafeThrowingContinuationWithError; + public: SILGenModule(SILModule &M, ModuleDecl *SM); @@ -169,7 +174,8 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { /// as `async` in Swift. SILFunction *getOrCreateForeignAsyncCompletionHandlerImplFunction( CanSILFunctionType blockType, - CanType continuationTy); + CanType continuationTy, + ForeignAsyncConvention convention); /// Determine whether the given class has any instance variables that /// need to be destroyed. @@ -467,6 +473,13 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { /// Retrieve the conformance of NSError to the Error protocol. ProtocolConformance *getNSErrorConformanceToError(); + /// Retrieve the _Concurrency._resumeUnsafeContinuation intrinsic. + FuncDecl *getResumeUnsafeContinuation(); + /// Retrieve the _Concurrency._resumeUnsafeThrowingContinuation intrinsic. + FuncDecl *getResumeUnsafeThrowingContinuation(); + /// Retrieve the _Concurrency._resumeUnsafeThrowingContinuationWithError intrinsic. + FuncDecl *getResumeUnsafeThrowingContinuationWithError(); + SILFunction *getKeyPathProjectionCoroutine(bool isReadAccess, KeyPathTypeKind typeKind); diff --git a/lib/SILGen/SILGenThunk.cpp b/lib/SILGen/SILGenThunk.cpp index e9d3f02437973..dbd2ba3674cdc 100644 --- a/lib/SILGen/SILGenThunk.cpp +++ b/lib/SILGen/SILGenThunk.cpp @@ -27,6 +27,7 @@ #include "swift/AST/ASTMangler.h" #include "swift/AST/DiagnosticsSIL.h" #include "swift/AST/FileUnit.h" +#include "swift/AST/ForeignAsyncConvention.h" #include "swift/AST/GenericEnvironment.h" #include "swift/SIL/PrettyStackTrace.h" #include "swift/SIL/SILArgument.h" @@ -141,7 +142,8 @@ SILGenFunction::emitGlobalFunctionRef(SILLocation loc, SILDeclRef constant, SILFunction * SILGenModule::getOrCreateForeignAsyncCompletionHandlerImplFunction( CanSILFunctionType blockType, - CanType continuationTy) { + CanType continuationTy, + ForeignAsyncConvention convention) { // Extract the result type from the continuation type. auto resumeType = cast(continuationTy).getGenericArgs()[0]; @@ -184,10 +186,125 @@ SILGenModule::getOrCreateForeignAsyncCompletionHandlerImplFunction( if (F->empty()) { // TODO: Emit the implementation. SILGenFunction SGF(*this, *F, SwiftModule); - SmallVector params; - SGF.collectThunkParams(loc, params); + { + Scope scope(SGF, loc); + SmallVector params; + SGF.collectThunkParams(loc, params); - SGF.B.createUnreachable(loc); + // Get the continuation out of the block object. + auto blockStorage = params[0].getValue(); + auto continuationAddr = SGF.B.createProjectBlockStorage(loc, blockStorage); + auto continuationVal = SGF.B.createLoad(loc, continuationAddr, + LoadOwnershipQualifier::Trivial); + auto continuation = ManagedValue::forUnmanaged(continuationVal); + + // Check for an error if the convention includes one. + auto errorIndex = convention.completionHandlerErrorParamIndex(); + + FuncDecl *resumeIntrinsic, *errorIntrinsic; + + SILBasicBlock *returnBB = nullptr; + if (errorIndex) { + resumeIntrinsic = getResumeUnsafeThrowingContinuation(); + errorIntrinsic = getResumeUnsafeThrowingContinuationWithError(); + + auto errorArgument = params[*errorIndex + 1]; + auto someErrorBB = SGF.createBasicBlock(FunctionSection::Postmatter); + auto noneErrorBB = SGF.createBasicBlock(); + returnBB = SGF.createBasicBlockAfter(noneErrorBB); + + auto &C = SGF.getASTContext(); + std::pair switchErrorBBs[] = { + {C.getOptionalSomeDecl(), someErrorBB}, + {C.getOptionalNoneDecl(), noneErrorBB} + }; + + SGF.B.createSwitchEnum(loc, errorArgument.borrow(SGF, loc).getValue(), + /*default*/ nullptr, + switchErrorBBs); + + SGF.B.emitBlock(someErrorBB); + + auto matchedErrorTy = errorArgument.getType().getOptionalObjectType(); + auto matchedError = SGF.B + .createGuaranteedTransformingTerminatorArgument(matchedErrorTy); + + // Resume the continuation as throwing the given error, bridged to a + // native Swift error. + auto nativeError = SGF.emitBridgedToNativeError(loc, matchedError); + Type replacementTypes[] = {resumeType}; + auto subs = SubstitutionMap::get(errorIntrinsic->getGenericSignature(), + replacementTypes, + ArrayRef{}); + SGF.emitApplyOfLibraryIntrinsic(loc, errorIntrinsic, subs, + {continuation, nativeError}, + SGFContext()); + + SGF.B.createBranch(loc, returnBB); + SGF.B.emitBlock(noneErrorBB); + } else { + resumeIntrinsic = getResumeUnsafeContinuation(); + } + + auto loweredResumeTy = SGF.getLoweredType(AbstractionPattern::getOpaque(), + resumeType); + + // Prepare the argument for the resume intrinsic, using the non-error + // arguments to the callback. + { + Scope resumeScope(SGF, loc); + unsigned errorIndexBoundary = errorIndex ? *errorIndex : ~0u; + auto resumeArgBuf = SGF.emitTemporaryAllocation(loc, + loweredResumeTy.getAddressType()); + + auto prepareArgument = [&](SILValue destBuf, ManagedValue arg) { + // Convert the ObjC argument to the bridged Swift representation we + // want. + ManagedValue bridgedArg = SGF.emitBridgedToNativeValue(loc, + arg, + arg.getType().getASTType(), + // FIXME: pass down formal type + destBuf->getType().getASTType(), + destBuf->getType().getObjectType()); + bridgedArg.forwardInto(SGF, loc, destBuf); + }; + + if (auto resumeTuple = dyn_cast(resumeType)) { + assert(params.size() == resumeTuple->getNumElements() + + 1 + (bool)errorIndex); + for (auto i : indices(resumeTuple.getElementTypes())) { + auto resumeEltBuf = SGF.B.createTupleElementAddr(loc, + resumeArgBuf, i); + auto arg = params[1 + i + (i >= errorIndexBoundary)]; + prepareArgument(resumeEltBuf, arg); + } + } else { + assert(params.size() == 2 + (bool)errorIndex); + prepareArgument(resumeArgBuf, params[1 + (errorIndexBoundary == 0)]); + } + + + // Resume the continuation with the composed bridged result. + ManagedValue resumeArg = SGF.emitManagedBufferWithCleanup(resumeArgBuf); + Type replacementTypes[] = {resumeType}; + auto subs = SubstitutionMap::get(resumeIntrinsic->getGenericSignature(), + replacementTypes, + ArrayRef{}); + SGF.emitApplyOfLibraryIntrinsic(loc, resumeIntrinsic, subs, + {continuation, resumeArg}, + SGFContext()); + } + + // Now we've resumed the continuation one way or another. Return from the + // completion callback. + if (returnBB) { + SGF.B.createBranch(loc, returnBB); + SGF.B.emitBlock(returnBB); + } + } + + SGF.B.createReturn(loc, + SILUndef::get(SGF.SGM.Types.getEmptyTupleType(), SGF.F)); } return F; diff --git a/stdlib/public/Concurrency/PartialAsyncTask.swift b/stdlib/public/Concurrency/PartialAsyncTask.swift index dec49a85eda83..618ec38ef9463 100644 --- a/stdlib/public/Concurrency/PartialAsyncTask.swift +++ b/stdlib/public/Concurrency/PartialAsyncTask.swift @@ -24,15 +24,46 @@ public struct PartialAsyncTask { public struct UnsafeContinuation { private var context: UnsafeRawPointer - public func resume(_: T) { } + public func resume(_: __owned T) { } } @frozen public struct UnsafeThrowingContinuation { private var context: UnsafeRawPointer - public func resume(_: T) { } - public func fail(_: Error) { } + public func resume(_: __owned T) { } + public func fail(_: __owned Error) { } } +#if _runtime(_ObjC) +// Intrinsics used by SILGen to resume or fail continuations +// for +@_alwaysEmitIntoClient +@usableFromInline +internal func _resumeUnsafeContinuation( + _ continuation: UnsafeContinuation, + _ value: __owned T +) { + continuation.resume(value) +} + +@_alwaysEmitIntoClient +@usableFromInline +internal func _resumeUnsafeThrowingContinuation( + _ continuation: UnsafeThrowingContinuation, + _ value: __owned T +) { + continuation.resume(value) +} + +@_alwaysEmitIntoClient +@usableFromInline +internal func _resumeUnsafeThrowingContinuationWithError( + _ continuation: UnsafeThrowingContinuation, + _ error: __owned Error +) { + continuation.fail(error) +} + +#endif diff --git a/test/SILGen/objc_async.swift b/test/SILGen/objc_async.swift index f962609298b64..30f6c91c7f8a3 100644 --- a/test/SILGen/objc_async.swift +++ b/test/SILGen/objc_async.swift @@ -12,7 +12,7 @@ func testSlowServer(slowServer: SlowServer) async throws { // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeContinuation // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] // CHECK: store [[CONT]] to [trivial] [[CONT_SLOT]] - // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @{{.*}} : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation, Int) -> () + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[INT_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation, Int) -> () // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] // CHECK: apply [[METHOD]]({{.*}}, [[BLOCK]], %0) // CHECK: await_async_continuation [[CONT]] {{.*}}, resume [[RESUME:bb[0-9]+]] @@ -27,7 +27,7 @@ func testSlowServer(slowServer: SlowServer) async throws { // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeThrowingContinuation // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] // CHECK: store [[CONT]] to [trivial] [[CONT_SLOT]] - // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @{{.*}} : $@convention(c) (@inout_aliasable @block_storage UnsafeThrowingContinuation, Optional, Optional) -> () + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[STRING_COMPLETION_THROW_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeThrowingContinuation, Optional, Optional) -> () // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] // CHECK: apply [[METHOD]]([[BLOCK]], %0) // CHECK: await_async_continuation [[CONT]] {{.*}}, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] @@ -38,6 +38,7 @@ func testSlowServer(slowServer: SlowServer) async throws { let _: String = try await slowServer.findAnswer() // CHECK: objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) () -> (), SlowServer) -> () + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[VOID_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation<()>) -> () await slowServer.serverRestart("somewhere") // CHECK: [[ERROR]]([[ERROR_VALUE:%.*]] : @owned $Error): @@ -45,3 +46,42 @@ func testSlowServer(slowServer: SlowServer) async throws { // CHECK: throw [[ERROR_VALUE]] } + +// CHECK: sil{{.*}}@[[INT_COMPLETION_BLOCK]] +// CHECK: [[CONT_ADDR:%.*]] = project_block_storage %0 +// CHECK: [[CONT:%.*]] = load [trivial] [[CONT_ADDR]] +// CHECK: [[RESULT_BUF:%.*]] = alloc_stack $Int +// CHECK: store %1 to [trivial] [[RESULT_BUF]] +// CHECK: [[RESUME:%.*]] = function_ref @{{.*}}resumeUnsafeContinuation +// CHECK: apply [[RESUME]]([[CONT]], [[RESULT_BUF]]) + +// CHECK: sil{{.*}}@[[STRING_COMPLETION_THROW_BLOCK]] +// CHECK: [[RESUME_IN:%.*]] = copy_value %1 +// CHECK: [[ERROR_IN:%.*]] = copy_value %2 +// CHECK: [[CONT_ADDR:%.*]] = project_block_storage %0 +// CHECK: [[CONT:%.*]] = load [trivial] [[CONT_ADDR]] +// CHECK: [[ERROR_IN_B:%.*]] = begin_borrow [[ERROR_IN]] +// CHECK: switch_enum [[ERROR_IN_B]] : {{.*}}, case #Optional.some!enumelt: [[ERROR_BB:bb[0-9]+]], case #Optional.none!enumelt: [[RESUME_BB:bb[0-9]+]] +// CHECK: [[RESUME_BB]]: +// CHECK: [[RESULT_BUF:%.*]] = alloc_stack $String +// CHECK: [[BRIDGE:%.*]] = function_ref @{{.*}}unconditionallyBridgeFromObjectiveC +// CHECK: [[BRIDGED_RESULT:%.*]] = apply [[BRIDGE]]([[RESUME_IN]] +// CHECK: store [[BRIDGED_RESULT]] to [init] [[RESULT_BUF]] +// CHECK: [[RESUME:%.*]] = function_ref @{{.*}}resumeUnsafeThrowingContinuation +// CHECK: apply [[RESUME]]([[CONT]], [[RESULT_BUF]]) +// CHECK: br [[END_BB:bb[0-9]+]] +// CHECK: [[END_BB]]: +// CHECK: return +// CHECK: [[ERROR_BB]]([[ERROR_IN_UNWRAPPED:%.*]] : @guaranteed $NSError): +// CHECK: [[ERROR:%.*]] = init_existential_ref [[ERROR_IN_UNWRAPPED]] +// CHECK: [[RESUME_WITH_ERROR:%.*]] = function_ref @{{.*}}resumeUnsafeThrowingContinuationWithError +// CHECK: [[ERROR_COPY:%.*]] = copy_value [[ERROR]] +// CHECK: apply [[RESUME_WITH_ERROR]]([[CONT]], [[ERROR_COPY]]) +// CHECK: br [[END_BB]] + +// CHECK: sil {{.*}} @[[VOID_COMPLETION_BLOCK]] +// CHECK: [[CONT_ADDR:%.*]] = project_block_storage %0 +// CHECK: [[CONT:%.*]] = load [trivial] [[CONT_ADDR]] +// CHECK: [[RESULT_BUF:%.*]] = alloc_stack $() +// CHECK: [[RESUME:%.*]] = function_ref @{{.*}}resumeUnsafeContinuation +// CHECK: apply [[RESUME]]<()>([[CONT]], [[RESULT_BUF]]) From b7d26337e3252f013d271cd001635e260a50eb77 Mon Sep 17 00:00:00 2001 From: Butta Date: Wed, 11 Nov 2020 01:24:23 +0530 Subject: [PATCH 07/75] [android] Add support for x86_64 arch --- utils/build-script | 3 +++ utils/build-script-impl | 7 ++++++- utils/build_swift/build_swift/driver_arguments.py | 6 +++--- utils/swift_build_support/swift_build_support/targets.py | 2 +- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/utils/build-script b/utils/build-script index 639f790fe3543..8b1c1cd4fde60 100755 --- a/utils/build-script +++ b/utils/build-script @@ -349,6 +349,9 @@ def apply_default_arguments(toolchain, args): elif args.android_arch == "aarch64": args.stdlib_deployment_targets.append( StdlibDeploymentTarget.Android.aarch64.name) + elif args.android_arch == "x86_64": + args.stdlib_deployment_targets.append( + StdlibDeploymentTarget.Android.x86_64.name) # Infer platform flags from manually-specified configure targets. # This doesn't apply to Darwin platforms, as they are diff --git a/utils/build-script-impl b/utils/build-script-impl index f7fd838905c88..b99da016f5c53 100755 --- a/utils/build-script-impl +++ b/utils/build-script-impl @@ -414,7 +414,8 @@ function verify_host_is_supported() { | watchsimulator-arm64 \ | watchos-armv7k \ | android-armv7 \ - | android-aarch64) + | android-aarch64 \ + | android-x86_64) ;; *) echo "Unknown host tools target: ${host}" @@ -458,6 +459,10 @@ function set_build_options_for_host() { SWIFT_HOST_TRIPLE="armv7-unknown-linux-androideabi" llvm_target_arch="ARM" ;; + android-x86_64) + SWIFT_HOST_TRIPLE="x86_64-unknown-linux-android${ANDROID_API_LEVEL}" + llvm_target_arch="X86" + ;; linux-armv6) SWIFT_HOST_TRIPLE="armv6-unknown-linux-gnueabihf" llvm_target_arch="ARM" diff --git a/utils/build_swift/build_swift/driver_arguments.py b/utils/build_swift/build_swift/driver_arguments.py index cb1c5c0b0e380..45d76b5195178 100644 --- a/utils/build_swift/build_swift/driver_arguments.py +++ b/utils/build_swift/build_swift/driver_arguments.py @@ -1087,10 +1087,10 @@ def create_argument_parser(): android.adb.commands.DEVICE_TEMP_DIR)) option('--android-arch', store, - choices=['armv7', 'aarch64'], + choices=['armv7', 'aarch64', 'x86_64'], default='armv7', - help='The Android target architecture when building for Android. ' - 'Currently only armv7 and aarch64 are supported. ' + help='The target architecture when building for Android. ' + 'Currently, only armv7, aarch64, and x86_64 are supported. ' '%(default)s is the default.') # ------------------------------------------------------------------------- diff --git a/utils/swift_build_support/swift_build_support/targets.py b/utils/swift_build_support/swift_build_support/targets.py index 9370a1a82e0ee..02613404af9b6 100644 --- a/utils/swift_build_support/swift_build_support/targets.py +++ b/utils/swift_build_support/swift_build_support/targets.py @@ -201,7 +201,7 @@ class StdlibDeploymentTarget(object): Cygwin = Platform("cygwin", archs=["x86_64"]) - Android = AndroidPlatform("android", archs=["armv7", "aarch64"]) + Android = AndroidPlatform("android", archs=["armv7", "aarch64", "x86_64"]) Windows = Platform("windows", archs=["x86_64"]) From adacecb47a4809eef50e5de6185c5d30586b5742 Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Wed, 11 Nov 2020 14:17:00 -0800 Subject: [PATCH 08/75] [NFC] Split Make-Style Dependency Emission from FrontendTool --- lib/FrontendTool/CMakeLists.txt | 1 + lib/FrontendTool/Dependencies.h | 31 +++++ lib/FrontendTool/FrontendTool.cpp | 124 +----------------- lib/FrontendTool/MakeStyleDependencies.cpp | 144 +++++++++++++++++++++ 4 files changed, 178 insertions(+), 122 deletions(-) create mode 100644 lib/FrontendTool/Dependencies.h create mode 100644 lib/FrontendTool/MakeStyleDependencies.cpp diff --git a/lib/FrontendTool/CMakeLists.txt b/lib/FrontendTool/CMakeLists.txt index 26b37f661b82b..210bb45b47674 100644 --- a/lib/FrontendTool/CMakeLists.txt +++ b/lib/FrontendTool/CMakeLists.txt @@ -2,6 +2,7 @@ set_swift_llvm_is_available() add_swift_host_library(swiftFrontendTool STATIC FrontendTool.cpp ImportedModules.cpp + MakeStyleDependencies.cpp ScanDependencies.cpp TBD.cpp) add_dependencies(swiftFrontendTool diff --git a/lib/FrontendTool/Dependencies.h b/lib/FrontendTool/Dependencies.h new file mode 100644 index 0000000000000..24e55ef1cba71 --- /dev/null +++ b/lib/FrontendTool/Dependencies.h @@ -0,0 +1,31 @@ +//===--- Dependencies.h -- Unified header for dependnecy tracing utilies --===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_FRONTENDTOOL_DEPENDENCIES_H +#define SWIFT_FRONTENDTOOL_DEPENDENCIES_H + +namespace swift { + +class ASTContext; +class DependencyTracker; +class DiagnosticEngine; +class FrontendOptions; +class InputFile; +class ModuleDecl; + +bool emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, + DependencyTracker *depTracker, + const FrontendOptions &opts, + const InputFile &input); +} // end namespace swift + +#endif diff --git a/lib/FrontendTool/FrontendTool.cpp b/lib/FrontendTool/FrontendTool.cpp index a57b6d69e1be8..501bfcdf7cd7c 100644 --- a/lib/FrontendTool/FrontendTool.cpp +++ b/lib/FrontendTool/FrontendTool.cpp @@ -21,7 +21,7 @@ //===----------------------------------------------------------------------===// #include "swift/FrontendTool/FrontendTool.h" -#include "ImportedModules.h" +#include "Dependencies.h" #include "ScanDependencies.h" #include "TBD.h" @@ -107,132 +107,12 @@ static std::string displayName(StringRef MainExecutablePath) { return Name; } -StringRef -swift::frontend::utils::escapeForMake(StringRef raw, - llvm::SmallVectorImpl &buffer) { - buffer.clear(); - - // The escaping rules for GNU make are complicated due to the various - // subsitutions and use of the tab in the leading position for recipes. - // Various symbols have significance in different contexts. It is not - // possible to correctly quote all characters in Make (as of 3.7). Match - // gcc and clang's behaviour for the escaping which covers only a subset of - // characters. - for (unsigned I = 0, E = raw.size(); I != E; ++I) { - switch (raw[I]) { - case '#': // Handle '#' the broken GCC way - buffer.push_back('\\'); - break; - - case ' ': - for (unsigned J = I; J && raw[J - 1] == '\\'; --J) - buffer.push_back('\\'); - buffer.push_back('\\'); - break; - - case '$': // $ is escaped by $ - buffer.push_back('$'); - break; - } - buffer.push_back(raw[I]); - } - buffer.push_back('\0'); - - return buffer.data(); -} - -/// This sorting function is used to stabilize the order in which dependencies -/// are emitted into \c .d files that are consumed by external build systems. -/// This serves to eliminate order as a source of non-determinism in these -/// outputs. -/// -/// The exact sorting predicate is not important. Currently, it is a -/// lexicographic comparison that reverses the provided strings before applying -/// the sorting predicate. This has the benefit of being somewhat -/// invariant with respect to the installation location of various system -/// components. e.g. on two systems, the same file identified by two different -/// paths differing only in their relative install location such as -/// -/// /Applications/MyXcode.app/Path/To/A/Framework/In/The/SDK/Header.h -/// /Applications/Xcodes/AnotherXcode.app/Path/To/A/Framework/In/The/SDK/Header.h -/// -/// should appear in roughly the same order relative to other paths. Ultimately, -/// this makes it easier to test the contents of the emitted files with tools -/// like FileCheck. -static std::vector -reversePathSortedFilenames(const ArrayRef elts) { - std::vector tmp(elts.begin(), elts.end()); - std::sort(tmp.begin(), tmp.end(), [](const std::string &a, - const std::string &b) -> bool { - return std::lexicographical_compare(a.rbegin(), a.rend(), - b.rbegin(), b.rend()); - }); - return tmp; -} - -/// Emits a Make-style dependencies file. -static bool emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, - DependencyTracker *depTracker, - const FrontendOptions &opts, - const InputFile &input) { - const std::string &dependenciesFilePath = input.dependenciesFilePath(); - if (dependenciesFilePath.empty()) - return false; - - std::error_code EC; - llvm::raw_fd_ostream out(dependenciesFilePath, EC, llvm::sys::fs::F_None); - - if (out.has_error() || EC) { - diags.diagnose(SourceLoc(), diag::error_opening_output, - dependenciesFilePath, EC.message()); - out.clear_error(); - return true; - } - - llvm::SmallString<256> buffer; - - // collect everything in memory to avoid redundant work - // when there are multiple targets - std::string dependencyString; - - // First include all other files in the module. Make-style dependencies - // need to be conservative! - auto inputPaths = - reversePathSortedFilenames(opts.InputsAndOutputs.getInputFilenames()); - for (auto const &path : inputPaths) { - dependencyString.push_back(' '); - dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); - } - // Then print dependencies we've picked up during compilation. - auto dependencyPaths = - reversePathSortedFilenames(depTracker->getDependencies()); - for (auto const &path : dependencyPaths) { - dependencyString.push_back(' '); - dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); - } - auto incrementalDependencyPaths = - reversePathSortedFilenames(depTracker->getIncrementalDependencies()); - for (auto const &path : incrementalDependencyPaths) { - dependencyString.push_back(' '); - dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); - } - - // FIXME: Xcode can't currently handle multiple targets in a single - // dependency line. - opts.forAllOutputPaths(input, [&](const StringRef targetName) { - auto targetNameEscaped = frontend::utils::escapeForMake(targetName, buffer); - out << targetNameEscaped << " :" << dependencyString << '\n'; - }); - - return false; -} - static void emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, DependencyTracker *depTracker, const FrontendOptions &opts) { opts.InputsAndOutputs.forEachInputProducingSupplementaryOutput( [&](const InputFile &f) -> bool { - return emitMakeDependenciesIfNeeded(diags, depTracker, opts, f); + return swift::emitMakeDependenciesIfNeeded(diags, depTracker, opts, f); }); } diff --git a/lib/FrontendTool/MakeStyleDependencies.cpp b/lib/FrontendTool/MakeStyleDependencies.cpp new file mode 100644 index 0000000000000..f3e4d77a213ef --- /dev/null +++ b/lib/FrontendTool/MakeStyleDependencies.cpp @@ -0,0 +1,144 @@ +//===--- MakeStyleDependencies.cpp -- Emit make-style dependencies --------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "Dependencies.h" +#include "swift/AST/DiagnosticEngine.h" +#include "swift/AST/DiagnosticsFrontend.h" +#include "swift/AST/ModuleLoader.h" +#include "swift/Frontend/FrontendOptions.h" +#include "swift/Frontend/InputFile.h" +#include "swift/FrontendTool/FrontendTool.h" + +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringRef.h" + +using namespace swift; + +StringRef +swift::frontend::utils::escapeForMake(StringRef raw, + llvm::SmallVectorImpl &buffer) { + buffer.clear(); + + // The escaping rules for GNU make are complicated due to the various + // subsitutions and use of the tab in the leading position for recipes. + // Various symbols have significance in different contexts. It is not + // possible to correctly quote all characters in Make (as of 3.7). Match + // gcc and clang's behaviour for the escaping which covers only a subset of + // characters. + for (unsigned I = 0, E = raw.size(); I != E; ++I) { + switch (raw[I]) { + case '#': // Handle '#' the broken GCC way + buffer.push_back('\\'); + break; + + case ' ': + for (unsigned J = I; J && raw[J - 1] == '\\'; --J) + buffer.push_back('\\'); + buffer.push_back('\\'); + break; + + case '$': // $ is escaped by $ + buffer.push_back('$'); + break; + } + buffer.push_back(raw[I]); + } + buffer.push_back('\0'); + + return buffer.data(); +} + +/// This sorting function is used to stabilize the order in which dependencies +/// are emitted into \c .d files that are consumed by external build systems. +/// This serves to eliminate order as a source of non-determinism in these +/// outputs. +/// +/// The exact sorting predicate is not important. Currently, it is a +/// lexicographic comparison that reverses the provided strings before applying +/// the sorting predicate. This has the benefit of being somewhat +/// invariant with respect to the installation location of various system +/// components. e.g. on two systems, the same file identified by two different +/// paths differing only in their relative install location such as +/// +/// /Applications/MyXcode.app/Path/To/A/Framework/In/The/SDK/Header.h +/// /Applications/Xcodes/AnotherXcode.app/Path/To/A/Framework/In/The/SDK/Header.h +/// +/// should appear in roughly the same order relative to other paths. Ultimately, +/// this makes it easier to test the contents of the emitted files with tools +/// like FileCheck. +static std::vector +reversePathSortedFilenames(const ArrayRef elts) { + std::vector tmp(elts.begin(), elts.end()); + std::sort(tmp.begin(), tmp.end(), + [](const std::string &a, const std::string &b) -> bool { + return std::lexicographical_compare(a.rbegin(), a.rend(), + b.rbegin(), b.rend()); + }); + return tmp; +} + +/// Emits a Make-style dependencies file. +bool swift::emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, + DependencyTracker *depTracker, + const FrontendOptions &opts, + const InputFile &input) { + const std::string &dependenciesFilePath = input.dependenciesFilePath(); + if (dependenciesFilePath.empty()) + return false; + + std::error_code EC; + llvm::raw_fd_ostream out(dependenciesFilePath, EC, llvm::sys::fs::F_None); + + if (out.has_error() || EC) { + diags.diagnose(SourceLoc(), diag::error_opening_output, + dependenciesFilePath, EC.message()); + out.clear_error(); + return true; + } + + llvm::SmallString<256> buffer; + + // collect everything in memory to avoid redundant work + // when there are multiple targets + std::string dependencyString; + + // First include all other files in the module. Make-style dependencies + // need to be conservative! + auto inputPaths = + reversePathSortedFilenames(opts.InputsAndOutputs.getInputFilenames()); + for (auto const &path : inputPaths) { + dependencyString.push_back(' '); + dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); + } + // Then print dependencies we've picked up during compilation. + auto dependencyPaths = + reversePathSortedFilenames(depTracker->getDependencies()); + for (auto const &path : dependencyPaths) { + dependencyString.push_back(' '); + dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); + } + auto incrementalDependencyPaths = + reversePathSortedFilenames(depTracker->getIncrementalDependencies()); + for (auto const &path : incrementalDependencyPaths) { + dependencyString.push_back(' '); + dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); + } + + // FIXME: Xcode can't currently handle multiple targets in a single + // dependency line. + opts.forAllOutputPaths(input, [&](const StringRef targetName) { + auto targetNameEscaped = frontend::utils::escapeForMake(targetName, buffer); + out << targetNameEscaped << " :" << dependencyString << '\n'; + }); + + return false; +} From 17143cb9c3d936a3e9567074c8fd0c3da34c1af6 Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Wed, 11 Nov 2020 14:17:36 -0800 Subject: [PATCH 09/75] [NFC] Split ModuleTrace Infrastructure out of FrontendTool --- lib/FrontendTool/CMakeLists.txt | 1 + lib/FrontendTool/Dependencies.h | 7 + lib/FrontendTool/FrontendTool.cpp | 744 +----------------------- lib/FrontendTool/ImportedModules.cpp | 2 +- lib/FrontendTool/ImportedModules.h | 26 - lib/FrontendTool/LoadedModuleTrace.cpp | 764 +++++++++++++++++++++++++ lib/FrontendTool/ScanDependencies.cpp | 3 +- 7 files changed, 777 insertions(+), 770 deletions(-) delete mode 100644 lib/FrontendTool/ImportedModules.h create mode 100644 lib/FrontendTool/LoadedModuleTrace.cpp diff --git a/lib/FrontendTool/CMakeLists.txt b/lib/FrontendTool/CMakeLists.txt index 210bb45b47674..67e725145dedb 100644 --- a/lib/FrontendTool/CMakeLists.txt +++ b/lib/FrontendTool/CMakeLists.txt @@ -2,6 +2,7 @@ set_swift_llvm_is_available() add_swift_host_library(swiftFrontendTool STATIC FrontendTool.cpp ImportedModules.cpp + LoadedModuleTrace.cpp MakeStyleDependencies.cpp ScanDependencies.cpp TBD.cpp) diff --git a/lib/FrontendTool/Dependencies.h b/lib/FrontendTool/Dependencies.h index 24e55ef1cba71..b6ad92fb44467 100644 --- a/lib/FrontendTool/Dependencies.h +++ b/lib/FrontendTool/Dependencies.h @@ -22,10 +22,17 @@ class FrontendOptions; class InputFile; class ModuleDecl; +/// Emit the names of the modules imported by \c mainModule. +bool emitImportedModules(ModuleDecl *mainModule, const FrontendOptions &opts); bool emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, DependencyTracker *depTracker, const FrontendOptions &opts, const InputFile &input); +bool emitLoadedModuleTraceIfNeeded(ModuleDecl *mainModule, + DependencyTracker *depTracker, + const FrontendOptions &opts, + const InputFile &input); + } // end namespace swift #endif diff --git a/lib/FrontendTool/FrontendTool.cpp b/lib/FrontendTool/FrontendTool.cpp index 501bfcdf7cd7c..e67962af25023 100644 --- a/lib/FrontendTool/FrontendTool.cpp +++ b/lib/FrontendTool/FrontendTool.cpp @@ -93,12 +93,6 @@ #include #include -#if !defined(_MSC_VER) && !defined(__MINGW32__) -#include -#else -#include -#endif - using namespace swift; static std::string displayName(StringRef MainExecutablePath) { @@ -116,748 +110,14 @@ static void emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, }); } -// MARK: - Module Trace - -namespace { -struct SwiftModuleTraceInfo { - Identifier Name; - std::string Path; - bool IsImportedDirectly; - bool SupportsLibraryEvolution; -}; - -struct LoadedModuleTraceFormat { - static const unsigned CurrentVersion = 2; - unsigned Version; - Identifier Name; - std::string Arch; - std::vector SwiftModules; -}; -} - -namespace swift { -namespace json { -template <> struct ObjectTraits { - static void mapping(Output &out, SwiftModuleTraceInfo &contents) { - StringRef name = contents.Name.str(); - out.mapRequired("name", name); - out.mapRequired("path", contents.Path); - out.mapRequired("isImportedDirectly", contents.IsImportedDirectly); - out.mapRequired("supportsLibraryEvolution", - contents.SupportsLibraryEvolution); - } -}; - -// Version notes: -// 1. Keys: name, arch, swiftmodules -// 2. New keys: version, swiftmodulesDetailedInfo -template <> struct ObjectTraits { - static void mapping(Output &out, LoadedModuleTraceFormat &contents) { - out.mapRequired("version", contents.Version); - - StringRef name = contents.Name.str(); - out.mapRequired("name", name); - - out.mapRequired("arch", contents.Arch); - - // The 'swiftmodules' key is kept for backwards compatibility. - std::vector moduleNames; - for (auto &m : contents.SwiftModules) - moduleNames.push_back(m.Path); - out.mapRequired("swiftmodules", moduleNames); - - out.mapRequired("swiftmodulesDetailedInfo", contents.SwiftModules); - } -}; -} -} - -static bool isClangOverlayOf(ModuleDecl *potentialOverlay, - ModuleDecl *potentialUnderlying) { - return !potentialOverlay->isNonSwiftModule() - && potentialUnderlying->isNonSwiftModule() - && potentialOverlay->getName() == potentialUnderlying->getName(); -} - -// TODO: Delete this once changes from https://reviews.llvm.org/D83449 land on -// apple/llvm-project's swift/main branch. -template -static bool contains(const SetLike &setLike, Item item) { - return setLike.find(item) != setLike.end(); -} - -/// Get a set of modules imported by \p module. -/// -/// By default, all imports are included. -static void getImmediateImports( - ModuleDecl *module, - SmallPtrSetImpl &imports, - ModuleDecl::ImportFilter importFilter = { - ModuleDecl::ImportFilterKind::Exported, - ModuleDecl::ImportFilterKind::Default, - ModuleDecl::ImportFilterKind::ImplementationOnly, - ModuleDecl::ImportFilterKind::SPIAccessControl, - ModuleDecl::ImportFilterKind::ShadowedByCrossImportOverlay - }) { - SmallVector importList; - module->getImportedModules(importList, importFilter); - - for (ImportedModule &import : importList) - imports.insert(import.importedModule); -} - -namespace { -/// Helper type for computing (approximate) information about ABI-dependencies. -/// -/// This misses out on details such as typealiases and more. -/// See the "isImportedDirectly" field above for more details. -class ABIDependencyEvaluator { - /// Map of ABIs exported by a particular module, excluding itself. - /// - /// For example, consider (primed letters represent Clang modules): - /// \code - /// - A is @_exported-imported by B - /// - B is #imported by C' (via a compiler-generated umbrella header) - /// - C' is @_exported-imported by C (Swift overlay) - /// - D' is #imported by E' - /// - D' is @_exported-imported by D (Swift overlay) - /// - E' is @_exported-imported by E (Swift overlay) - /// \endcode - /// - /// Then the \c abiExportMap will be - /// \code - /// { A: {}, B: {A}, C: {B}, C': {B}, D: {}, D': {}, E: {D}, E': {D'} } - /// \endcode - /// - /// \b WARNING: Use \c reexposeImportedABI instead of inserting directly. - llvm::DenseMap> abiExportMap; - - /// Stack for depth-first traversal. - SmallVector searchStack; - - llvm::DenseSet visited; - - /// Helper function to handle invariant violations as crashes in debug mode. - void crashOnInvariantViolation( - llvm::function_ref f) const; - - /// Computes the ABI exports for \p importedModule and adds them to - /// \p module's ABI exports. - /// - /// If \p includeImportedModule is true, also adds \p importedModule to - /// \p module's ABI exports. - /// - /// Correct way to add entries to \c abiExportMap. - void reexposeImportedABI(ModuleDecl *module, ModuleDecl *importedModule, - bool includeImportedModule = true); - - /// Check if a Swift module is an overlay for some Clang module. - /// - /// FIXME: Delete this hack once SR-13363 is fixed and ModuleDecl has the - /// right API which we can use directly. - bool isOverlayOfClangModule(ModuleDecl *swiftModule); - - /// Check for cases where we have a fake cycle through an overlay. - /// - /// Sometimes, we have fake cycles in the import graph due to the Clang - /// importer injecting overlays between Clang modules. These don't represent - /// an actual cycle in the build, so we should ignore them. - /// - /// We check this lazily after detecting a cycle because it is difficult to - /// determine at the point where we see the overlay whether it was incorrectly - /// injected by the Clang importer or whether any of its imports will - /// eventually lead to a cycle. - /// - /// For more details, see [NOTE: ABIDependencyEvaluator-fake-cycle-detection] - /// - /// \param startOfCycle A pointer to the element of \c searchStack where - /// the module \em first appeared. - /// - /// \pre The module on top of \c searchStack is the same module as - /// *startOfCycle. - /// - /// \pre searchStack.begin() <= startOfCycle < searchStack.end() - bool isFakeCycleThroughOverlay(ModuleDecl **startOfCycle); - - /// Recursive step in computing ABI dependencies. - /// - /// Use this method instead of using the \c forClangModule/\c forSwiftModule - /// methods. - void computeABIDependenciesForModule(ModuleDecl *module); - void computeABIDependenciesForSwiftModule(ModuleDecl *module); - void computeABIDependenciesForClangModule(ModuleDecl *module); - - static void printModule(const ModuleDecl *module, llvm::raw_ostream &os); - - template - static void printModuleSet(const SetLike &set, llvm::raw_ostream &os); - -public: - ABIDependencyEvaluator() = default; - ABIDependencyEvaluator(const ABIDependencyEvaluator &) = delete; - ABIDependencyEvaluator(ABIDependencyEvaluator &&) = default; - - void getABIDependenciesForSwiftModule( - ModuleDecl *module, SmallPtrSetImpl &abiDependencies); - - void printABIExportMap(llvm::raw_ostream &os) const; -}; -} // end anonymous namespace - -// See [NOTE: Bailing-vs-crashing-in-trace-emission]. -// TODO: Use PrettyStackTrace instead? -void ABIDependencyEvaluator::crashOnInvariantViolation( - llvm::function_ref f) const { -#ifndef NDEBUG - std::string msg; - llvm::raw_string_ostream os(msg); - os << "error: invariant violation: "; - f(os); - llvm::report_fatal_error(os.str()); -#endif -} - -// [NOTE: Trace-Clang-submodule-complexity] -// -// A Clang module may have zero or more submodules. In practice, when traversing -// the imports of a module, we observe that different submodules of the same -// top-level module (almost) freely import each other. Despite this, we still -// need to conceptually traverse the tree formed by the submodule relationship -// (with the top-level module being the root). -// -// This needs to be taken care of in two ways: -// 1. We need to make sure we only go towards the leaves. It's okay if we "jump" -// branches, so long as we don't try to visit an ancestor when one of its -// descendants is still on the traversal stack, so that we don't end up with -// arbitrarily complex intra-module cycles. -// See also: [NOTE: Intra-module-leafwards-traversal]. -// 2. When adding entries to the ABI export map, we need to avoid marking -// dependencies within the same top-level module. This step is needed in -// addition to step 1 to avoid creating cycles like -// Overlay -> Underlying -> Submodule -> Overlay. - -void ABIDependencyEvaluator::reexposeImportedABI( - ModuleDecl *module, ModuleDecl *importedModule, - bool includeImportedModule) { - if (module == importedModule) { - crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { - os << "module "; printModule(module, os); os << " imports itself!\n"; - }); - return; - } - - auto addToABIExportMap = [this](ModuleDecl *module, ModuleDecl *reexport) { - if (module == reexport) { - crashOnInvariantViolation([&](llvm::raw_string_ostream &os){ - os << "expected module "; printModule(reexport, os); - os << " to not re-export itself\n"; - }); - return; - } - if (reexport->isNonSwiftModule() - && module->isNonSwiftModule() - && module->getTopLevelModule() == reexport->getTopLevelModule()) { - // Dependencies within the same top-level Clang module are not useful. - // See also: [NOTE: Trace-Clang-submodule-complexity]. - return; - } - - // We only care about dependencies across top-level modules and we want to - // avoid exploding abiExportMap with submodules. So we only insert entries - // after calling getTopLevelModule(). - - if (::isClangOverlayOf(module, reexport)) { - // For overlays, we need to have a dependency on the underlying module. - // Otherwise, we might accidentally create a Swift -> Swift cycle. - abiExportMap[module].insert( - reexport->getTopLevelModule(/*preferOverlay*/false)); - return; - } - abiExportMap[module].insert( - reexport->getTopLevelModule(/*preferOverlay*/true)); - }; - - computeABIDependenciesForModule(importedModule); - if (includeImportedModule) { - addToABIExportMap(module, importedModule); - } - // Force creation of default value if missing. This prevents abiExportMap from - // growing (and moving) when calling addToABIExportMap. If abiExportMap gets - // moved, then abiExportMap[importedModule] will be moved, forcing us to - // create a defensive copy to avoid iterator invalidation on move. - (void)abiExportMap[module]; - for (auto reexportedModule: abiExportMap[importedModule]) - addToABIExportMap(module, reexportedModule); -} - -bool ABIDependencyEvaluator::isOverlayOfClangModule(ModuleDecl *swiftModule) { - assert(!swiftModule->isNonSwiftModule()); - - llvm::SmallPtrSet importList; - ::getImmediateImports(swiftModule, importList, - {ModuleDecl::ImportFilterKind::Exported}); - bool isOverlay = - llvm::any_of(importList, [&](ModuleDecl *importedModule) -> bool { - return isClangOverlayOf(swiftModule, importedModule); - }); - return isOverlay; -} - -// [NOTE: ABIDependencyEvaluator-fake-cycle-detection] -// -// First, let's consider a concrete example. -// - In Clang-land, ToyKit #imports CoreDoll. -// - The Swift overlay for CoreDoll imports both CoreDoll and ToyKit. -// Importing ToyKit from CoreDoll's overlay informally violates the layering -// of frameworks, but it doesn't actually create any cycles in the build -// dependencies. -// ┌───────────────────────────┐ -// ┌───│ CoreDoll.swiftmodule │ -// │ └───────────────────────────┘ -// │ │ -// import ToyKit @_exported import CoreDoll -// │ │ -// │ │ -// ▼ │ -// ┌──────────────────────────┐ │ -// │ ToyKit (ToyKit/ToyKit.h) │ │ -// └──────────────────────────┘ │ -// │ │ -// #import │ -// │ │ -// ▼ │ -// ┌──────────────────────────────┐ │ -// │CoreDoll (CoreDoll/CoreDoll.h)│◀──┘ -// └──────────────────────────────┘ -// -// Say we are trying to build a Swift module that imports ToyKit. Due to how -// module loading works, the Clang importer inserts the CoreDoll overlay -// between the ToyKit and CoreDoll Clang modules, creating a cycle in the -// import graph. -// -// ┌──────────────────────────┐ -// │ ToyKit (ToyKit/ToyKit.h) │◀──────────┐ -// └──────────────────────────┘ │ -// │ │ -// #import import ToyKit -// │ │ -// ▼ │ -// ┌────────────────────────────┐ │ -// │ CoreDoll.swiftmodule │─────────┘ -// └────────────────────────────┘ -// │ -// @_exported import CoreDoll -// │ -// ▼ -// ┌──────────────────────────────┐ -// │CoreDoll (CoreDoll/CoreDoll.h)│ -// └──────────────────────────────┘ -// -// This means that, at some point, searchStack will look like: -// -// [others] → ToyKit → CoreDoll (overlay) → ToyKit -// -// In the general case, there may be arbitrarily many modules in the cycle, -// including submodules. -// -// [others] → ToyKit → [others] → CoreDoll (overlay) → [others] → ToyKit -// -// where "[others]" indicates 0 or more modules of any kind. -// -// To detect this, we check that the start of the cycle is a Clang module and -// that there is at least one overlay between it and its recurrence at the end -// of the searchStack. If so, we assume we have detected a benign cycle which -// can be safely ignored. - -bool ABIDependencyEvaluator::isFakeCycleThroughOverlay( - ModuleDecl **startOfCycle) { - assert(startOfCycle >= searchStack.begin() && - startOfCycle < searchStack.end() && - "startOfCycleIter points to an element in searchStack"); - // The startOfCycle module must be a Clang module. - if (!(*startOfCycle)->isNonSwiftModule()) - return false; - // Next, we must have zero or more modules followed by a Swift overlay for a - // Clang module. - return std::any_of(startOfCycle + 1, searchStack.end(), - [this](ModuleDecl *module) { - return !module->isNonSwiftModule() && - isOverlayOfClangModule(module); - }); -} - -void ABIDependencyEvaluator::computeABIDependenciesForModule( - ModuleDecl *module) { - auto moduleIter = llvm::find(searchStack, module); - if (moduleIter != searchStack.end()) { - if (isFakeCycleThroughOverlay(moduleIter)) - return; - crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { - os << "unexpected cycle in import graph!\n"; - for (auto m: searchStack) { - printModule(m, os); - if (!m->isNonSwiftModule()) { - os << " (isOverlay = " << isOverlayOfClangModule(m) << ")"; - } - os << "\ndepends on "; - } - printModule(module, os); os << '\n'; - }); - return; - } - if (::contains(visited, module)) - return; - searchStack.push_back(module); - if (module->isNonSwiftModule()) - computeABIDependenciesForClangModule(module); - else - computeABIDependenciesForSwiftModule(module); - searchStack.pop_back(); - visited.insert(module); -} - -void ABIDependencyEvaluator::computeABIDependenciesForSwiftModule( - ModuleDecl *module) { - SmallPtrSet allImports; - ::getImmediateImports(module, allImports); - for (auto import: allImports) { - computeABIDependenciesForModule(import); - if (::isClangOverlayOf(module, import)) { - reexposeImportedABI(module, import, - /*includeImportedModule=*/false); - } - } - - SmallPtrSet reexportedImports; - ::getImmediateImports(module, reexportedImports, - {ModuleDecl::ImportFilterKind::Exported}); - for (auto reexportedImport: reexportedImports) { - reexposeImportedABI(module, reexportedImport); - } -} - -void ABIDependencyEvaluator::computeABIDependenciesForClangModule( - ModuleDecl *module) { - SmallPtrSet imports; - ::getImmediateImports(module, imports); - for (auto import: imports) { - // There are three cases here which can potentially create cycles: - // - // 1. Clang modules importing the stdlib. - // See [NOTE: Pure-Clang-modules-privately-import-stdlib]. - // 2. Overlay S @_exported-imports underlying module S' and another Clang - // module C'. C' (transitively) #imports S' but it gets treated as if - // C' imports S. This creates a cycle: S -> C' -> ... -> S. - // In practice, this case is hit for - // Darwin (Swift) -> SwiftOverlayShims (Clang) -> Darwin (Swift). - // We may also hit this in a slightly different direction, in case - // the module directly imports SwiftOverlayShims: - // SwiftOverlayShims -> Darwin (Swift) -> SwiftOverlayShims - // The latter is handled later by isFakeCycleThroughOverlay. - // 3. [NOTE: Intra-module-leafwards-traversal] - // Cycles within the same top-level module. - // These don't matter for us, since we only care about the dependency - // graph at the granularity of top-level modules. So we ignore these - // by only considering parent -> submodule dependencies. - // See also [NOTE: Trace-Clang-submodule-complexity]. - if (import->isStdlibModule()) { - continue; - } - if (!import->isNonSwiftModule() && isOverlayOfClangModule(import) && - llvm::find(searchStack, import) != searchStack.end()) { - continue; - } - if (import->isNonSwiftModule() - && module->getTopLevelModule() == import->getTopLevelModule() - && (module == import - || !import->findUnderlyingClangModule() - ->isSubModuleOf(module->findUnderlyingClangModule()))) { - continue; - } - computeABIDependenciesForModule(import); - reexposeImportedABI(module, import); - } -} - -void ABIDependencyEvaluator::getABIDependenciesForSwiftModule( - ModuleDecl *module, SmallPtrSetImpl &abiDependencies) { - computeABIDependenciesForModule(module); - SmallPtrSet allImports; - ::getImmediateImports(module, allImports); - for (auto directDependency: allImports) { - abiDependencies.insert(directDependency); - for (auto exposedDependency: abiExportMap[directDependency]) { - abiDependencies.insert(exposedDependency); - } - } -} - -void ABIDependencyEvaluator::printModule( - const ModuleDecl *module, llvm::raw_ostream &os) { - module->getReverseFullModuleName().printForward(os); - os << (module->isNonSwiftModule() ? " (Clang)" : " (Swift)"); - os << " @ " << llvm::format("0x%llx", reinterpret_cast(module)); -} - -template -void ABIDependencyEvaluator::printModuleSet( - const SetLike &set, llvm::raw_ostream &os) { - os << "{ "; - for (auto module: set) { - printModule(module, os); os << ", "; - } - os << "}"; -} - -void ABIDependencyEvaluator::printABIExportMap(llvm::raw_ostream &os) const { - os << "ABI Export Map {{\n"; - for (auto &entry: abiExportMap) { - printModule(entry.first, os); os << " : "; - printModuleSet(entry.second, os); - os << "\n"; - } - os << "}}\n"; -} - -/// Compute the per-module information to be recorded in the trace file. -// -// The most interesting/tricky thing here is _which_ paths get recorded in -// the trace file as dependencies. It depends on how the module was synthesized. -// The key points are: -// -// 1. Paths to swiftmodules in the module cache or in the prebuilt cache are not -// recorded - Precondition: the corresponding path to the swiftinterface must -// already be present as a key in pathToModuleDecl. -// 2. swiftmodules next to a swiftinterface are saved if they are up-to-date. -// -// FIXME: Use the VFS instead of handling paths directly. We are particularly -// sloppy about handling relative paths in the dependency tracker. -static void computeSwiftModuleTraceInfo( - const SmallPtrSetImpl &abiDependencies, - const llvm::DenseMap &pathToModuleDecl, - const DependencyTracker &depTracker, - StringRef prebuiltCachePath, - std::vector &traceInfo) { - - SmallString<256> buffer; - - std::string errMsg; - llvm::raw_string_ostream err(errMsg); - - // FIXME: Use PrettyStackTrace instead. - auto errorUnexpectedPath = - [&pathToModuleDecl](llvm::raw_string_ostream &errStream) { - errStream << "The module <-> path mapping we have is:\n"; - for (auto &m: pathToModuleDecl) - errStream << m.second->getName() << " <-> " << m.first << '\n'; - llvm::report_fatal_error(errStream.str()); - }; - - using namespace llvm::sys; - - auto computeAdjacentInterfacePath = [](SmallVectorImpl &modPath) { - auto swiftInterfaceExt = - file_types::getExtension(file_types::TY_SwiftModuleInterfaceFile); - path::replace_extension(modPath, swiftInterfaceExt); - }; - - for (auto &depPath : depTracker.getDependencies()) { - - // Decide if this is a swiftmodule based on the extension of the raw - // dependency path, as the true file may have a different one. - // For example, this might happen when the canonicalized path points to - // a Content Addressed Storage (CAS) location. - auto moduleFileType = - file_types::lookupTypeForExtension(path::extension(depPath)); - auto isSwiftmodule = - moduleFileType == file_types::TY_SwiftModuleFile; - auto isSwiftinterface = - moduleFileType == file_types::TY_SwiftModuleInterfaceFile; - - if (!(isSwiftmodule || isSwiftinterface)) - continue; - - auto dep = pathToModuleDecl.find(depPath); - if (dep != pathToModuleDecl.end()) { - // Great, we recognize the path! Check if the file is still around. - - ModuleDecl *depMod = dep->second; - if(depMod->isResilient() && !isSwiftinterface) { - // FIXME: Ideally, we would check that the swiftmodule has a - // swiftinterface next to it. Tracked by rdar://problem/56351399. - } - - // FIXME: Better error handling - StringRef realDepPath - = fs::real_path(depPath, buffer, /*expand_tile*/true) - ? StringRef(depPath) // Couldn't find the canonical path, assume - // this is good enough. - : buffer.str(); - - bool isImportedDirectly = ::contains(abiDependencies, depMod); - - traceInfo.push_back( - {/*Name=*/ - depMod->getName(), - /*Path=*/ - realDepPath.str(), - // TODO: There is an edge case which is not handled here. - // When we build a framework using -import-underlying-module, or an - // app/test using -import-objc-header, we should look at the direct - // imports of the bridging modules, and mark those as our direct - // imports. - // TODO: Add negative test cases for the comment above. - // TODO: Describe precise semantics of "isImportedDirectly". - /*IsImportedDirectly=*/ - isImportedDirectly, - /*SupportsLibraryEvolution=*/ - depMod->isResilient()}); - buffer.clear(); - - continue; - } - - // If the depTracker had an interface, that means that we must've - // built a swiftmodule from that interface, so we should have that - // filename available. - if (isSwiftinterface) { - err << "Unexpected path for swiftinterface file:\n" << depPath << "\n"; - errorUnexpectedPath(err); - } - - // Skip cached modules in the prebuilt cache. We will add the corresponding - // swiftinterface from the SDK directly, but this isn't checked. :-/ - // - // FIXME: This is incorrect if both paths are not relative w.r.t. to the - // same root. - if (StringRef(depPath).startswith(prebuiltCachePath)) - continue; - - // If we have a swiftmodule next to an interface, that interface path will - // be saved (not checked), so don't save the path to this swiftmodule. - SmallString<256> moduleAdjacentInterfacePath(depPath); - computeAdjacentInterfacePath(moduleAdjacentInterfacePath); - if (::contains(pathToModuleDecl, moduleAdjacentInterfacePath)) - continue; - - // FIXME: The behavior of fs::exists for relative paths is undocumented. - // Use something else instead? - if (fs::exists(moduleAdjacentInterfacePath)) { - // This should be an error but it is not because of funkiness around - // compatible modules such as us having both armv7s.swiftinterface - // and armv7.swiftinterface in the dependency tracker. - continue; - } - buffer.clear(); - - // We might land here when we have a arm.swiftmodule in the cache path - // which added a dependency on a arm.swiftinterface (which was not loaded). - } - - // Almost a re-implementation of reversePathSortedFilenames :(. - std::sort( - traceInfo.begin(), traceInfo.end(), - [](const SwiftModuleTraceInfo &m1, const SwiftModuleTraceInfo &m2) -> bool { - return std::lexicographical_compare( - m1.Path.rbegin(), m1.Path.rend(), - m2.Path.rbegin(), m2.Path.rend()); - }); -} - -// [NOTE: Bailing-vs-crashing-in-trace-emission] There are certain edge cases -// in trace emission where an invariant that you think should hold does not hold -// in practice. For example, sometimes we have seen modules without any -// corresponding filename. -// -// Since the trace is a supplementary output for build system consumption, it -// it better to emit it on a best-effort basis instead of crashing and failing -// the build. -// -// Moreover, going forward, it would be nice if trace emission were more robust -// so we could emit the trace on a best-effort basis even if the dependency -// graph is ill-formed, so that the trace can be used as a debugging aid. -static bool emitLoadedModuleTraceIfNeeded(ModuleDecl *mainModule, - DependencyTracker *depTracker, - StringRef prebuiltCachePath, - StringRef loadedModuleTracePath) { - ASTContext &ctxt = mainModule->getASTContext(); - assert(!ctxt.hadError() - && "We should've already exited earlier if there was an error."); - - if (loadedModuleTracePath.empty()) - return false; - std::error_code EC; - llvm::raw_fd_ostream out(loadedModuleTracePath, EC, llvm::sys::fs::F_Append); - - if (out.has_error() || EC) { - ctxt.Diags.diagnose(SourceLoc(), diag::error_opening_output, - loadedModuleTracePath, EC.message()); - out.clear_error(); - return true; - } - - SmallPtrSet abiDependencies; - { - ABIDependencyEvaluator evaluator{}; - evaluator.getABIDependenciesForSwiftModule(mainModule, - abiDependencies); - } - - llvm::DenseMap pathToModuleDecl; - for (const auto &module : ctxt.getLoadedModules()) { - ModuleDecl *loadedDecl = module.second; - if (!loadedDecl) - llvm::report_fatal_error("Expected loaded modules to be non-null."); - if (loadedDecl == mainModule) - continue; - if (loadedDecl->getModuleFilename().empty()) { - // FIXME: rdar://problem/59853077 - // Ideally, this shouldn't happen. As a temporary workaround, avoid - // crashing with a message while we investigate the problem. - llvm::errs() << "WARNING: Module '" << loadedDecl->getName().str() - << "' has an empty filename. This is probably an " - << "invariant violation.\n" - << "Please report it as a compiler bug.\n"; - continue; - } - pathToModuleDecl.insert( - std::make_pair(loadedDecl->getModuleFilename(), loadedDecl)); - } - - std::vector swiftModules; - computeSwiftModuleTraceInfo(abiDependencies, - pathToModuleDecl, *depTracker, - prebuiltCachePath, swiftModules); - - LoadedModuleTraceFormat trace = { - /*version=*/LoadedModuleTraceFormat::CurrentVersion, - /*name=*/mainModule->getName(), - /*arch=*/ctxt.LangOpts.Target.getArchName().str(), swiftModules}; - - // raw_fd_ostream is unbuffered, and we may have multiple processes writing, - // so first write to memory and then dump the buffer to the trace file. - std::string stringBuffer; - { - llvm::raw_string_ostream memoryBuffer(stringBuffer); - json::Output jsonOutput(memoryBuffer, /*UserInfo=*/{}, - /*PrettyPrint=*/false); - json::jsonize(jsonOutput, trace, /*Required=*/true); - } - stringBuffer += "\n"; - out << stringBuffer; - - return true; -} - static void emitLoadedModuleTraceForAllPrimariesIfNeeded(ModuleDecl *mainModule, DependencyTracker *depTracker, const FrontendOptions &opts) { opts.InputsAndOutputs.forEachInputProducingSupplementaryOutput( [&](const InputFile &input) -> bool { - return emitLoadedModuleTraceIfNeeded( - mainModule, depTracker, opts.PrebuiltModuleCachePath, - input.loadedModuleTracePath()); + return emitLoadedModuleTraceIfNeeded(mainModule, depTracker, opts, + input); }); } diff --git a/lib/FrontendTool/ImportedModules.cpp b/lib/FrontendTool/ImportedModules.cpp index 857efcda8f98b..9316af5a09dc1 100644 --- a/lib/FrontendTool/ImportedModules.cpp +++ b/lib/FrontendTool/ImportedModules.cpp @@ -10,7 +10,7 @@ // //===----------------------------------------------------------------------===// -#include "ImportedModules.h" +#include "Dependencies.h" #include "swift/AST/ASTContext.h" #include "swift/AST/Decl.h" #include "swift/AST/DiagnosticEngine.h" diff --git a/lib/FrontendTool/ImportedModules.h b/lib/FrontendTool/ImportedModules.h deleted file mode 100644 index 510fa4ccdedde..0000000000000 --- a/lib/FrontendTool/ImportedModules.h +++ /dev/null @@ -1,26 +0,0 @@ -//===--- ImportedModules.h -- generates the list of imported modules ------===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// - -#ifndef SWIFT_FRONTENDTOOL_IMPORTEDMODULES_H -#define SWIFT_FRONTENDTOOL_IMPORTEDMODULES_H - -namespace swift { - -class ASTContext; -class FrontendOptions; -class ModuleDecl; - -/// Emit the names of the modules imported by \c mainModule. -bool emitImportedModules(ModuleDecl *mainModule, const FrontendOptions &opts); -} // end namespace swift - -#endif diff --git a/lib/FrontendTool/LoadedModuleTrace.cpp b/lib/FrontendTool/LoadedModuleTrace.cpp new file mode 100644 index 0000000000000..7f81c982a5efe --- /dev/null +++ b/lib/FrontendTool/LoadedModuleTrace.cpp @@ -0,0 +1,764 @@ +//===--- ModuleTrace.cpp -- Emit a trace of all loaded Swift modules ------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "Dependencies.h" +#include "swift/AST/ASTContext.h" +#include "swift/AST/DiagnosticEngine.h" +#include "swift/AST/DiagnosticsFrontend.h" +#include "swift/AST/Module.h" +#include "swift/Basic/FileTypes.h" +#include "swift/Basic/JSONSerialization.h" +#include "swift/Frontend/FrontendOptions.h" + +#include "clang/Basic/Module.h" + +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/YAMLTraits.h" + +#if !defined(_MSC_VER) && !defined(__MINGW32__) +#include +#else +#include +#endif + +using namespace swift; + +namespace { +struct SwiftModuleTraceInfo { + Identifier Name; + std::string Path; + bool IsImportedDirectly; + bool SupportsLibraryEvolution; +}; + +struct LoadedModuleTraceFormat { + static const unsigned CurrentVersion = 2; + unsigned Version; + Identifier Name; + std::string Arch; + std::vector SwiftModules; +}; +} // namespace + +namespace swift { +namespace json { +template <> struct ObjectTraits { + static void mapping(Output &out, SwiftModuleTraceInfo &contents) { + StringRef name = contents.Name.str(); + out.mapRequired("name", name); + out.mapRequired("path", contents.Path); + out.mapRequired("isImportedDirectly", contents.IsImportedDirectly); + out.mapRequired("supportsLibraryEvolution", + contents.SupportsLibraryEvolution); + } +}; + +// Version notes: +// 1. Keys: name, arch, swiftmodules +// 2. New keys: version, swiftmodulesDetailedInfo +template <> struct ObjectTraits { + static void mapping(Output &out, LoadedModuleTraceFormat &contents) { + out.mapRequired("version", contents.Version); + + StringRef name = contents.Name.str(); + out.mapRequired("name", name); + + out.mapRequired("arch", contents.Arch); + + // The 'swiftmodules' key is kept for backwards compatibility. + std::vector moduleNames; + for (auto &m : contents.SwiftModules) + moduleNames.push_back(m.Path); + out.mapRequired("swiftmodules", moduleNames); + + out.mapRequired("swiftmodulesDetailedInfo", contents.SwiftModules); + } +}; +} // namespace json +} // namespace swift + +static bool isClangOverlayOf(ModuleDecl *potentialOverlay, + ModuleDecl *potentialUnderlying) { + return !potentialOverlay->isNonSwiftModule() && + potentialUnderlying->isNonSwiftModule() && + potentialOverlay->getName() == potentialUnderlying->getName(); +} + +// TODO: Delete this once changes from https://reviews.llvm.org/D83449 land on +// apple/llvm-project's swift/main branch. +template +static bool contains(const SetLike &setLike, Item item) { + return setLike.find(item) != setLike.end(); +} + +/// Get a set of modules imported by \p module. +/// +/// By default, all imports are included. +static void getImmediateImports( + ModuleDecl *module, SmallPtrSetImpl &imports, + ModuleDecl::ImportFilter importFilter = { + ModuleDecl::ImportFilterKind::Exported, + ModuleDecl::ImportFilterKind::Default, + ModuleDecl::ImportFilterKind::ImplementationOnly, + ModuleDecl::ImportFilterKind::SPIAccessControl, + ModuleDecl::ImportFilterKind::ShadowedByCrossImportOverlay}) { + SmallVector importList; + module->getImportedModules(importList, importFilter); + + for (ImportedModule &import : importList) + imports.insert(import.importedModule); +} + +namespace { +/// Helper type for computing (approximate) information about ABI-dependencies. +/// +/// This misses out on details such as typealiases and more. +/// See the "isImportedDirectly" field above for more details. +class ABIDependencyEvaluator { + /// Map of ABIs exported by a particular module, excluding itself. + /// + /// For example, consider (primed letters represent Clang modules): + /// \code + /// - A is @_exported-imported by B + /// - B is #imported by C' (via a compiler-generated umbrella header) + /// - C' is @_exported-imported by C (Swift overlay) + /// - D' is #imported by E' + /// - D' is @_exported-imported by D (Swift overlay) + /// - E' is @_exported-imported by E (Swift overlay) + /// \endcode + /// + /// Then the \c abiExportMap will be + /// \code + /// { A: {}, B: {A}, C: {B}, C': {B}, D: {}, D': {}, E: {D}, E': {D'} } + /// \endcode + /// + /// \b WARNING: Use \c reexposeImportedABI instead of inserting directly. + llvm::DenseMap> abiExportMap; + + /// Stack for depth-first traversal. + SmallVector searchStack; + + llvm::DenseSet visited; + + /// Helper function to handle invariant violations as crashes in debug mode. + void crashOnInvariantViolation( + llvm::function_ref f) const; + + /// Computes the ABI exports for \p importedModule and adds them to + /// \p module's ABI exports. + /// + /// If \p includeImportedModule is true, also adds \p importedModule to + /// \p module's ABI exports. + /// + /// Correct way to add entries to \c abiExportMap. + void reexposeImportedABI(ModuleDecl *module, ModuleDecl *importedModule, + bool includeImportedModule = true); + + /// Check if a Swift module is an overlay for some Clang module. + /// + /// FIXME: Delete this hack once SR-13363 is fixed and ModuleDecl has the + /// right API which we can use directly. + bool isOverlayOfClangModule(ModuleDecl *swiftModule); + + /// Check for cases where we have a fake cycle through an overlay. + /// + /// Sometimes, we have fake cycles in the import graph due to the Clang + /// importer injecting overlays between Clang modules. These don't represent + /// an actual cycle in the build, so we should ignore them. + /// + /// We check this lazily after detecting a cycle because it is difficult to + /// determine at the point where we see the overlay whether it was incorrectly + /// injected by the Clang importer or whether any of its imports will + /// eventually lead to a cycle. + /// + /// For more details, see [NOTE: ABIDependencyEvaluator-fake-cycle-detection] + /// + /// \param startOfCycle A pointer to the element of \c searchStack where + /// the module \em first appeared. + /// + /// \pre The module on top of \c searchStack is the same module as + /// *startOfCycle. + /// + /// \pre searchStack.begin() <= startOfCycle < searchStack.end() + bool isFakeCycleThroughOverlay(ModuleDecl **startOfCycle); + + /// Recursive step in computing ABI dependencies. + /// + /// Use this method instead of using the \c forClangModule/\c forSwiftModule + /// methods. + void computeABIDependenciesForModule(ModuleDecl *module); + void computeABIDependenciesForSwiftModule(ModuleDecl *module); + void computeABIDependenciesForClangModule(ModuleDecl *module); + + static void printModule(const ModuleDecl *module, llvm::raw_ostream &os); + + template + static void printModuleSet(const SetLike &set, llvm::raw_ostream &os); + +public: + ABIDependencyEvaluator() = default; + ABIDependencyEvaluator(const ABIDependencyEvaluator &) = delete; + ABIDependencyEvaluator(ABIDependencyEvaluator &&) = default; + + void getABIDependenciesForSwiftModule( + ModuleDecl *module, SmallPtrSetImpl &abiDependencies); + + void printABIExportMap(llvm::raw_ostream &os) const; +}; +} // end anonymous namespace + +// See [NOTE: Bailing-vs-crashing-in-trace-emission]. +// TODO: Use PrettyStackTrace instead? +void ABIDependencyEvaluator::crashOnInvariantViolation( + llvm::function_ref f) const { +#ifndef NDEBUG + std::string msg; + llvm::raw_string_ostream os(msg); + os << "error: invariant violation: "; + f(os); + llvm::report_fatal_error(os.str()); +#endif +} + +// [NOTE: Trace-Clang-submodule-complexity] +// +// A Clang module may have zero or more submodules. In practice, when traversing +// the imports of a module, we observe that different submodules of the same +// top-level module (almost) freely import each other. Despite this, we still +// need to conceptually traverse the tree formed by the submodule relationship +// (with the top-level module being the root). +// +// This needs to be taken care of in two ways: +// 1. We need to make sure we only go towards the leaves. It's okay if we "jump" +// branches, so long as we don't try to visit an ancestor when one of its +// descendants is still on the traversal stack, so that we don't end up with +// arbitrarily complex intra-module cycles. +// See also: [NOTE: Intra-module-leafwards-traversal]. +// 2. When adding entries to the ABI export map, we need to avoid marking +// dependencies within the same top-level module. This step is needed in +// addition to step 1 to avoid creating cycles like +// Overlay -> Underlying -> Submodule -> Overlay. + +void ABIDependencyEvaluator::reexposeImportedABI(ModuleDecl *module, + ModuleDecl *importedModule, + bool includeImportedModule) { + if (module == importedModule) { + crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { + os << "module "; + printModule(module, os); + os << " imports itself!\n"; + }); + return; + } + + auto addToABIExportMap = [this](ModuleDecl *module, ModuleDecl *reexport) { + if (module == reexport) { + crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { + os << "expected module "; + printModule(reexport, os); + os << " to not re-export itself\n"; + }); + return; + } + if (reexport->isNonSwiftModule() && module->isNonSwiftModule() && + module->getTopLevelModule() == reexport->getTopLevelModule()) { + // Dependencies within the same top-level Clang module are not useful. + // See also: [NOTE: Trace-Clang-submodule-complexity]. + return; + } + + // We only care about dependencies across top-level modules and we want to + // avoid exploding abiExportMap with submodules. So we only insert entries + // after calling getTopLevelModule(). + + if (::isClangOverlayOf(module, reexport)) { + // For overlays, we need to have a dependency on the underlying module. + // Otherwise, we might accidentally create a Swift -> Swift cycle. + abiExportMap[module].insert( + reexport->getTopLevelModule(/*preferOverlay*/ false)); + return; + } + abiExportMap[module].insert( + reexport->getTopLevelModule(/*preferOverlay*/ true)); + }; + + computeABIDependenciesForModule(importedModule); + if (includeImportedModule) { + addToABIExportMap(module, importedModule); + } + // Force creation of default value if missing. This prevents abiExportMap from + // growing (and moving) when calling addToABIExportMap. If abiExportMap gets + // moved, then abiExportMap[importedModule] will be moved, forcing us to + // create a defensive copy to avoid iterator invalidation on move. + (void)abiExportMap[module]; + for (auto reexportedModule : abiExportMap[importedModule]) + addToABIExportMap(module, reexportedModule); +} + +bool ABIDependencyEvaluator::isOverlayOfClangModule(ModuleDecl *swiftModule) { + assert(!swiftModule->isNonSwiftModule()); + + llvm::SmallPtrSet importList; + ::getImmediateImports(swiftModule, importList, + {ModuleDecl::ImportFilterKind::Exported}); + bool isOverlay = + llvm::any_of(importList, [&](ModuleDecl *importedModule) -> bool { + return isClangOverlayOf(swiftModule, importedModule); + }); + return isOverlay; +} + +// [NOTE: ABIDependencyEvaluator-fake-cycle-detection] +// +// First, let's consider a concrete example. +// - In Clang-land, ToyKit #imports CoreDoll. +// - The Swift overlay for CoreDoll imports both CoreDoll and ToyKit. +// Importing ToyKit from CoreDoll's overlay informally violates the layering +// of frameworks, but it doesn't actually create any cycles in the build +// dependencies. +// ┌───────────────────────────┐ +// ┌───│ CoreDoll.swiftmodule │ +// │ └───────────────────────────┘ +// │ │ +// import ToyKit @_exported import CoreDoll +// │ │ +// │ │ +// ▼ │ +// ┌──────────────────────────┐ │ +// │ ToyKit (ToyKit/ToyKit.h) │ │ +// └──────────────────────────┘ │ +// │ │ +// #import │ +// │ │ +// ▼ │ +// ┌──────────────────────────────┐ │ +// │CoreDoll (CoreDoll/CoreDoll.h)│◀──┘ +// └──────────────────────────────┘ +// +// Say we are trying to build a Swift module that imports ToyKit. Due to how +// module loading works, the Clang importer inserts the CoreDoll overlay +// between the ToyKit and CoreDoll Clang modules, creating a cycle in the +// import graph. +// +// ┌──────────────────────────┐ +// │ ToyKit (ToyKit/ToyKit.h) │◀──────────┐ +// └──────────────────────────┘ │ +// │ │ +// #import import ToyKit +// │ │ +// ▼ │ +// ┌────────────────────────────┐ │ +// │ CoreDoll.swiftmodule │─────────┘ +// └────────────────────────────┘ +// │ +// @_exported import CoreDoll +// │ +// ▼ +// ┌──────────────────────────────┐ +// │CoreDoll (CoreDoll/CoreDoll.h)│ +// └──────────────────────────────┘ +// +// This means that, at some point, searchStack will look like: +// +// [others] → ToyKit → CoreDoll (overlay) → ToyKit +// +// In the general case, there may be arbitrarily many modules in the cycle, +// including submodules. +// +// [others] → ToyKit → [others] → CoreDoll (overlay) → [others] → ToyKit +// +// where "[others]" indicates 0 or more modules of any kind. +// +// To detect this, we check that the start of the cycle is a Clang module and +// that there is at least one overlay between it and its recurrence at the end +// of the searchStack. If so, we assume we have detected a benign cycle which +// can be safely ignored. + +bool ABIDependencyEvaluator::isFakeCycleThroughOverlay( + ModuleDecl **startOfCycle) { + assert(startOfCycle >= searchStack.begin() && + startOfCycle < searchStack.end() && + "startOfCycleIter points to an element in searchStack"); + // The startOfCycle module must be a Clang module. + if (!(*startOfCycle)->isNonSwiftModule()) + return false; + // Next, we must have zero or more modules followed by a Swift overlay for a + // Clang module. + return std::any_of( + startOfCycle + 1, searchStack.end(), [this](ModuleDecl *module) { + return !module->isNonSwiftModule() && isOverlayOfClangModule(module); + }); +} + +void ABIDependencyEvaluator::computeABIDependenciesForModule( + ModuleDecl *module) { + auto moduleIter = llvm::find(searchStack, module); + if (moduleIter != searchStack.end()) { + if (isFakeCycleThroughOverlay(moduleIter)) + return; + crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { + os << "unexpected cycle in import graph!\n"; + for (auto m : searchStack) { + printModule(m, os); + if (!m->isNonSwiftModule()) { + os << " (isOverlay = " << isOverlayOfClangModule(m) << ")"; + } + os << "\ndepends on "; + } + printModule(module, os); + os << '\n'; + }); + return; + } + if (::contains(visited, module)) + return; + searchStack.push_back(module); + if (module->isNonSwiftModule()) + computeABIDependenciesForClangModule(module); + else + computeABIDependenciesForSwiftModule(module); + searchStack.pop_back(); + visited.insert(module); +} + +void ABIDependencyEvaluator::computeABIDependenciesForSwiftModule( + ModuleDecl *module) { + SmallPtrSet allImports; + ::getImmediateImports(module, allImports); + for (auto import : allImports) { + computeABIDependenciesForModule(import); + if (::isClangOverlayOf(module, import)) { + reexposeImportedABI(module, import, + /*includeImportedModule=*/false); + } + } + + SmallPtrSet reexportedImports; + ::getImmediateImports(module, reexportedImports, + {ModuleDecl::ImportFilterKind::Exported}); + for (auto reexportedImport : reexportedImports) { + reexposeImportedABI(module, reexportedImport); + } +} + +void ABIDependencyEvaluator::computeABIDependenciesForClangModule( + ModuleDecl *module) { + SmallPtrSet imports; + ::getImmediateImports(module, imports); + for (auto import : imports) { + // There are three cases here which can potentially create cycles: + // + // 1. Clang modules importing the stdlib. + // See [NOTE: Pure-Clang-modules-privately-import-stdlib]. + // 2. Overlay S @_exported-imports underlying module S' and another Clang + // module C'. C' (transitively) #imports S' but it gets treated as if + // C' imports S. This creates a cycle: S -> C' -> ... -> S. + // In practice, this case is hit for + // Darwin (Swift) -> SwiftOverlayShims (Clang) -> Darwin (Swift). + // We may also hit this in a slightly different direction, in case + // the module directly imports SwiftOverlayShims: + // SwiftOverlayShims -> Darwin (Swift) -> SwiftOverlayShims + // The latter is handled later by isFakeCycleThroughOverlay. + // 3. [NOTE: Intra-module-leafwards-traversal] + // Cycles within the same top-level module. + // These don't matter for us, since we only care about the dependency + // graph at the granularity of top-level modules. So we ignore these + // by only considering parent -> submodule dependencies. + // See also [NOTE: Trace-Clang-submodule-complexity]. + if (import->isStdlibModule()) { + continue; + } + if (!import->isNonSwiftModule() && isOverlayOfClangModule(import) && + llvm::find(searchStack, import) != searchStack.end()) { + continue; + } + if (import->isNonSwiftModule() && + module->getTopLevelModule() == import->getTopLevelModule() && + (module == import || + !import->findUnderlyingClangModule()->isSubModuleOf( + module->findUnderlyingClangModule()))) { + continue; + } + computeABIDependenciesForModule(import); + reexposeImportedABI(module, import); + } +} + +void ABIDependencyEvaluator::getABIDependenciesForSwiftModule( + ModuleDecl *module, SmallPtrSetImpl &abiDependencies) { + computeABIDependenciesForModule(module); + SmallPtrSet allImports; + ::getImmediateImports(module, allImports); + for (auto directDependency : allImports) { + abiDependencies.insert(directDependency); + for (auto exposedDependency : abiExportMap[directDependency]) { + abiDependencies.insert(exposedDependency); + } + } +} + +void ABIDependencyEvaluator::printModule(const ModuleDecl *module, + llvm::raw_ostream &os) { + module->getReverseFullModuleName().printForward(os); + os << (module->isNonSwiftModule() ? " (Clang)" : " (Swift)"); + os << " @ " << llvm::format("0x%llx", reinterpret_cast(module)); +} + +template +void ABIDependencyEvaluator::printModuleSet(const SetLike &set, + llvm::raw_ostream &os) { + os << "{ "; + for (auto module : set) { + printModule(module, os); + os << ", "; + } + os << "}"; +} + +void ABIDependencyEvaluator::printABIExportMap(llvm::raw_ostream &os) const { + os << "ABI Export Map {{\n"; + for (auto &entry : abiExportMap) { + printModule(entry.first, os); + os << " : "; + printModuleSet(entry.second, os); + os << "\n"; + } + os << "}}\n"; +} + +/// Compute the per-module information to be recorded in the trace file. +// +// The most interesting/tricky thing here is _which_ paths get recorded in +// the trace file as dependencies. It depends on how the module was synthesized. +// The key points are: +// +// 1. Paths to swiftmodules in the module cache or in the prebuilt cache are not +// recorded - Precondition: the corresponding path to the swiftinterface must +// already be present as a key in pathToModuleDecl. +// 2. swiftmodules next to a swiftinterface are saved if they are up-to-date. +// +// FIXME: Use the VFS instead of handling paths directly. We are particularly +// sloppy about handling relative paths in the dependency tracker. +static void computeSwiftModuleTraceInfo( + const SmallPtrSetImpl &abiDependencies, + const llvm::DenseMap &pathToModuleDecl, + const DependencyTracker &depTracker, StringRef prebuiltCachePath, + std::vector &traceInfo) { + + SmallString<256> buffer; + + std::string errMsg; + llvm::raw_string_ostream err(errMsg); + + // FIXME: Use PrettyStackTrace instead. + auto errorUnexpectedPath = + [&pathToModuleDecl](llvm::raw_string_ostream &errStream) { + errStream << "The module <-> path mapping we have is:\n"; + for (auto &m : pathToModuleDecl) + errStream << m.second->getName() << " <-> " << m.first << '\n'; + llvm::report_fatal_error(errStream.str()); + }; + + using namespace llvm::sys; + + auto computeAdjacentInterfacePath = [](SmallVectorImpl &modPath) { + auto swiftInterfaceExt = + file_types::getExtension(file_types::TY_SwiftModuleInterfaceFile); + path::replace_extension(modPath, swiftInterfaceExt); + }; + + for (auto &depPath : depTracker.getDependencies()) { + + // Decide if this is a swiftmodule based on the extension of the raw + // dependency path, as the true file may have a different one. + // For example, this might happen when the canonicalized path points to + // a Content Addressed Storage (CAS) location. + auto moduleFileType = + file_types::lookupTypeForExtension(path::extension(depPath)); + auto isSwiftmodule = moduleFileType == file_types::TY_SwiftModuleFile; + auto isSwiftinterface = + moduleFileType == file_types::TY_SwiftModuleInterfaceFile; + + if (!(isSwiftmodule || isSwiftinterface)) + continue; + + auto dep = pathToModuleDecl.find(depPath); + if (dep != pathToModuleDecl.end()) { + // Great, we recognize the path! Check if the file is still around. + + ModuleDecl *depMod = dep->second; + if (depMod->isResilient() && !isSwiftinterface) { + // FIXME: Ideally, we would check that the swiftmodule has a + // swiftinterface next to it. Tracked by rdar://problem/56351399. + } + + // FIXME: Better error handling + StringRef realDepPath = + fs::real_path(depPath, buffer, /*expand_tile*/ true) + ? StringRef(depPath) // Couldn't find the canonical path, assume + // this is good enough. + : buffer.str(); + + bool isImportedDirectly = ::contains(abiDependencies, depMod); + + traceInfo.push_back( + {/*Name=*/ + depMod->getName(), + /*Path=*/ + realDepPath.str(), + // TODO: There is an edge case which is not handled here. + // When we build a framework using -import-underlying-module, or an + // app/test using -import-objc-header, we should look at the direct + // imports of the bridging modules, and mark those as our direct + // imports. + // TODO: Add negative test cases for the comment above. + // TODO: Describe precise semantics of "isImportedDirectly". + /*IsImportedDirectly=*/ + isImportedDirectly, + /*SupportsLibraryEvolution=*/ + depMod->isResilient()}); + buffer.clear(); + + continue; + } + + // If the depTracker had an interface, that means that we must've + // built a swiftmodule from that interface, so we should have that + // filename available. + if (isSwiftinterface) { + err << "Unexpected path for swiftinterface file:\n" << depPath << "\n"; + errorUnexpectedPath(err); + } + + // Skip cached modules in the prebuilt cache. We will add the corresponding + // swiftinterface from the SDK directly, but this isn't checked. :-/ + // + // FIXME: This is incorrect if both paths are not relative w.r.t. to the + // same root. + if (StringRef(depPath).startswith(prebuiltCachePath)) + continue; + + // If we have a swiftmodule next to an interface, that interface path will + // be saved (not checked), so don't save the path to this swiftmodule. + SmallString<256> moduleAdjacentInterfacePath(depPath); + computeAdjacentInterfacePath(moduleAdjacentInterfacePath); + if (::contains(pathToModuleDecl, moduleAdjacentInterfacePath)) + continue; + + // FIXME: The behavior of fs::exists for relative paths is undocumented. + // Use something else instead? + if (fs::exists(moduleAdjacentInterfacePath)) { + // This should be an error but it is not because of funkiness around + // compatible modules such as us having both armv7s.swiftinterface + // and armv7.swiftinterface in the dependency tracker. + continue; + } + buffer.clear(); + + // We might land here when we have a arm.swiftmodule in the cache path + // which added a dependency on a arm.swiftinterface (which was not loaded). + } + + // Almost a re-implementation of reversePathSortedFilenames :(. + std::sort(traceInfo.begin(), traceInfo.end(), + [](const SwiftModuleTraceInfo &m1, + const SwiftModuleTraceInfo &m2) -> bool { + return std::lexicographical_compare( + m1.Path.rbegin(), m1.Path.rend(), m2.Path.rbegin(), + m2.Path.rend()); + }); +} + +// [NOTE: Bailing-vs-crashing-in-trace-emission] There are certain edge cases +// in trace emission where an invariant that you think should hold does not hold +// in practice. For example, sometimes we have seen modules without any +// corresponding filename. +// +// Since the trace is a supplementary output for build system consumption, it +// it better to emit it on a best-effort basis instead of crashing and failing +// the build. +// +// Moreover, going forward, it would be nice if trace emission were more robust +// so we could emit the trace on a best-effort basis even if the dependency +// graph is ill-formed, so that the trace can be used as a debugging aid. +bool swift::emitLoadedModuleTraceIfNeeded(ModuleDecl *mainModule, + DependencyTracker *depTracker, + const FrontendOptions &opts, + const InputFile &input) { + ASTContext &ctxt = mainModule->getASTContext(); + assert(!ctxt.hadError() && + "We should've already exited earlier if there was an error."); + + StringRef loadedModuleTracePath = input.loadedModuleTracePath(); + if (loadedModuleTracePath.empty()) + return false; + std::error_code EC; + llvm::raw_fd_ostream out(loadedModuleTracePath, EC, llvm::sys::fs::F_Append); + + if (out.has_error() || EC) { + ctxt.Diags.diagnose(SourceLoc(), diag::error_opening_output, + loadedModuleTracePath, EC.message()); + out.clear_error(); + return true; + } + + SmallPtrSet abiDependencies; + { + ABIDependencyEvaluator evaluator{}; + evaluator.getABIDependenciesForSwiftModule(mainModule, abiDependencies); + } + + llvm::DenseMap pathToModuleDecl; + for (const auto &module : ctxt.getLoadedModules()) { + ModuleDecl *loadedDecl = module.second; + if (!loadedDecl) + llvm::report_fatal_error("Expected loaded modules to be non-null."); + if (loadedDecl == mainModule) + continue; + if (loadedDecl->getModuleFilename().empty()) { + // FIXME: rdar://problem/59853077 + // Ideally, this shouldn't happen. As a temporary workaround, avoid + // crashing with a message while we investigate the problem. + llvm::errs() << "WARNING: Module '" << loadedDecl->getName().str() + << "' has an empty filename. This is probably an " + << "invariant violation.\n" + << "Please report it as a compiler bug.\n"; + continue; + } + pathToModuleDecl.insert( + std::make_pair(loadedDecl->getModuleFilename(), loadedDecl)); + } + + std::vector swiftModules; + computeSwiftModuleTraceInfo(abiDependencies, pathToModuleDecl, *depTracker, + opts.PrebuiltModuleCachePath, swiftModules); + + LoadedModuleTraceFormat trace = { + /*version=*/LoadedModuleTraceFormat::CurrentVersion, + /*name=*/mainModule->getName(), + /*arch=*/ctxt.LangOpts.Target.getArchName().str(), swiftModules}; + + // raw_fd_ostream is unbuffered, and we may have multiple processes writing, + // so first write to memory and then dump the buffer to the trace file. + std::string stringBuffer; + { + llvm::raw_string_ostream memoryBuffer(stringBuffer); + json::Output jsonOutput(memoryBuffer, /*UserInfo=*/{}, + /*PrettyPrint=*/false); + json::jsonize(jsonOutput, trace, /*Required=*/true); + } + stringBuffer += "\n"; + out << stringBuffer; + + return true; +} diff --git a/lib/FrontendTool/ScanDependencies.cpp b/lib/FrontendTool/ScanDependencies.cpp index 4968a135ff3c8..4c5443762478b 100644 --- a/lib/FrontendTool/ScanDependencies.cpp +++ b/lib/FrontendTool/ScanDependencies.cpp @@ -2,13 +2,14 @@ // // This source file is part of the Swift.org open source project // -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// + #include "ScanDependencies.h" #include "swift/AST/ASTContext.h" #include "swift/AST/Decl.h" From 13c97a89054125ab8e2b51fe27e3c1ae2dd252f1 Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Wed, 11 Nov 2020 14:26:25 -0800 Subject: [PATCH 10/75] [NFC] Downgrade CompilerInvocation to FrontendOptions --- lib/FrontendTool/FrontendTool.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/FrontendTool/FrontendTool.cpp b/lib/FrontendTool/FrontendTool.cpp index e67962af25023..2756fd899ad5c 100644 --- a/lib/FrontendTool/FrontendTool.cpp +++ b/lib/FrontendTool/FrontendTool.cpp @@ -461,10 +461,9 @@ static bool compileLLVMIR(CompilerInstance &Instance) { Module.get(), inputsAndOutputs.getSingleOutputFilename()); } -static void verifyGenericSignaturesIfNeeded(const CompilerInvocation &Invocation, +static void verifyGenericSignaturesIfNeeded(const FrontendOptions &opts, ASTContext &Context) { - auto verifyGenericSignaturesInModule = - Invocation.getFrontendOptions().VerifyGenericSignaturesInModule; + auto verifyGenericSignaturesInModule = opts.VerifyGenericSignaturesInModule; if (verifyGenericSignaturesInModule.empty()) return; if (auto module = Context.getModuleByName(verifyGenericSignaturesInModule)) @@ -932,7 +931,7 @@ static void performEndOfPipelineActions(CompilerInstance &Instance) { ctx.verifyAllLoadedModules(); // Verify generic signatures if we've been asked to. - verifyGenericSignaturesIfNeeded(Invocation, ctx); + verifyGenericSignaturesIfNeeded(Invocation.getFrontendOptions(), ctx); } // Emit any additional outputs that we only need for a successful compilation. From e9a80debd4ace1c58b1279f3aafd089e90f20679 Mon Sep 17 00:00:00 2001 From: Ben Barham Date: Thu, 12 Nov 2020 10:35:44 +1000 Subject: [PATCH 11/75] Prevent superfluous diagnostic-database execution Add dependencies and output to the diagnostic-database target so that it's not re-run every time swift-frontend is compiled. --- localization/CMakeLists.txt | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/localization/CMakeLists.txt b/localization/CMakeLists.txt index 221fb264ee62d..07a3585a66c55 100644 --- a/localization/CMakeLists.txt +++ b/localization/CMakeLists.txt @@ -1,6 +1,8 @@ -add_custom_target(diagnostic-database) +set(diagnostic_witness "${CMAKE_BINARY_DIR}/share/swift/diagnostics/generated") -add_custom_command(TARGET diagnostic-database +add_custom_command( + OUTPUT + ${diagnostic_witness} COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/diagnostics/ ${CMAKE_BINARY_DIR}/share/swift/diagnostics/ COMMAND @@ -10,11 +12,17 @@ add_custom_command(TARGET diagnostic-database "${SWIFT_NATIVE_SWIFT_TOOLS_PATH}/swift-serialize-diagnostics" --input-file-path ${CMAKE_BINARY_DIR}/share/swift/diagnostics/en.yaml --output-directory ${CMAKE_BINARY_DIR}/share/swift/diagnostics/ + COMMAND + ${CMAKE_COMMAND} -E touch ${diagnostic_witness} + DEPENDS + swift-def-to-yaml-converter + swift-serialize-diagnostics + # Add files in diagnostics subdirectory when they're created ) +add_custom_target(diagnostic-database DEPENDS ${diagnostic_witness}) + add_dependencies(swift-frontend diagnostic-database) -add_dependencies(diagnostic-database swift-serialize-diagnostics) -add_dependencies(diagnostic-database swift-def-to-yaml-converter) swift_install_in_component( DIRECTORY ${CMAKE_BINARY_DIR}/share/swift/diagnostics/ From d57d9fd7268fa4a3493e0bccd07b75754b196782 Mon Sep 17 00:00:00 2001 From: Ben Barham Date: Thu, 12 Nov 2020 10:03:45 +1000 Subject: [PATCH 12/75] Add features file describing new available flags The only available method to check for features at the moment is through version checks. This is errorprone and doesn't work well for OSS toolchains or locally built compilers. features.json is intended to communicate to build systems that a new flag is available, in order to assist with a transitional period where not all supported toolchains may have a particular flag. It is *not* intended to be a comprehensive report of all flags available. Note that the names are intended to be features, so while they may match up to the corresponding flag name, this isn't necessarily the case. --- lib/Option/CMakeLists.txt | 25 +++++++++++++++++++++++++ lib/Option/features.json | 10 ++++++++++ 2 files changed, 35 insertions(+) create mode 100644 lib/Option/features.json diff --git a/lib/Option/CMakeLists.txt b/lib/Option/CMakeLists.txt index 70280746f2b28..0e8999981fa46 100644 --- a/lib/Option/CMakeLists.txt +++ b/lib/Option/CMakeLists.txt @@ -6,3 +6,28 @@ add_dependencies(swiftOption target_link_libraries(swiftOption PRIVATE swiftBasic) +set(features_file_src "${CMAKE_CURRENT_SOURCE_DIR}/features.json") +set(features_file_dest "${CMAKE_BINARY_DIR}/share/swift/features.json") + +add_custom_command( + OUTPUT + ${features_file_dest} + COMMAND + ${CMAKE_COMMAND} -E copy ${features_file_src} ${features_file_dest} + DEPENDS + ${features_file_src} +) + +add_custom_target(swift-features-file DEPENDS ${features_file_dest}) + +add_dependencies(swiftOption swift-features-file) + +swift_install_in_component( + FILES + ${features_file_dest} + DESTINATION + "share/swift" + COMPONENT + compiler +) + diff --git a/lib/Option/features.json b/lib/Option/features.json new file mode 100644 index 0000000000000..fabdc50e67277 --- /dev/null +++ b/lib/Option/features.json @@ -0,0 +1,10 @@ +{ + "features": [ + { + "name": "experimental-skip-all-function-bodies" + }, + { + "name": "experimental-allow-module-with-compiler-errors" + } + ] +} From fcd70cb06d862d1cce39032776bc45fd0f91db54 Mon Sep 17 00:00:00 2001 From: Nate Chandler Date: Thu, 12 Nov 2020 07:54:30 -0800 Subject: [PATCH 13/75] [Async CC] Add execution test for thin_to_thick. --- .../async/run-thintothick-int64-to-void.sil | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 test/IRGen/async/run-thintothick-int64-to-void.sil diff --git a/test/IRGen/async/run-thintothick-int64-to-void.sil b/test/IRGen/async/run-thintothick-int64-to-void.sil new file mode 100644 index 0000000000000..fe34fee02b038 --- /dev/null +++ b/test/IRGen/async/run-thintothick-int64-to-void.sil @@ -0,0 +1,49 @@ +// RUN: %empty-directory(%t) +// RUN: %target-build-swift-dylib(%t/%target-library-name(PrintShims)) %S/../../Inputs/print-shims.swift -module-name PrintShims -emit-module -emit-module-path %t/PrintShims.swiftmodule +// RUN: %target-codesign %t/%target-library-name(PrintShims) +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -emit-ir -I %t -L %t -lPrintShim | %FileCheck %s --check-prefix=CHECK-LL +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -module-name main -o %t/main -I %t -L %t -lPrintShims %target-rpath(%t) +// RUN: %target-codesign %t/main +// RUN: %target-run %t/main %t/%target-library-name(PrintShims) | %FileCheck %s + +// REQUIRES: executable_test +// REQUIRES: swift_test_mode_optimize_none +// REQUIRES: concurrency +// UNSUPPORTED: use_os_stdlib + +import Builtin +import Swift +import PrintShims +import _Concurrency + +sil public_external @printInt64 : $@convention(thin) (Int64) -> () + +// CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @afun2(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { +sil @afun2 : $@async @convention(thin) (Int64) -> () { +entry(%int : $Int64): + %print = function_ref @printInt64 : $@convention(thin) (Int64) -> () + %result = apply %print(%int) : $@convention(thin) (Int64) -> () // CHECK: 9999 + return %result : $() +} + +// CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @test_apply_of_thin_to_thick(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { +sil @test_apply_of_thin_to_thick : $@async @convention(thin) () -> () { +entry: + %f = function_ref @afun2 : $@async @convention(thin) (Int64) -> () + %c = thin_to_thick_function %f : $@async @convention(thin) (Int64) -> () to $@async @callee_guaranteed (Int64) -> () + %int_literal = integer_literal $Builtin.Int64, 9999 + %int = struct $Int64 (%int_literal : $Builtin.Int64) + %app = apply %c(%int) : $@async @callee_guaranteed (Int64) -> () + %result = tuple() + return %result : $() +} + +sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): + %test_apply_of_thin_to_thick = function_ref @test_apply_of_thin_to_thick : $@async @convention(thin) () -> () + %result = apply %test_apply_of_thin_to_thick() : $@async @convention(thin) () -> () + + %out_literal = integer_literal $Builtin.Int32, 0 + %out = struct $Int32 (%out_literal : $Builtin.Int32) + return %out : $Int32 +} From 0484f3a3e7bc820cd1f8df80b7f100e5f0a1ab2f Mon Sep 17 00:00:00 2001 From: Varun Gandhi Date: Thu, 29 Oct 2020 22:30:19 -0700 Subject: [PATCH 14/75] [NFC] Reuse AST/ClangTypeConverter in IRGen. Also remove the original implementation from GenClangType.cpp since it isn't used anymore. --- include/swift/AST/ASTContext.h | 8 + lib/AST/ASTContext.cpp | 5 + lib/AST/ClangTypeConverter.cpp | 11 +- lib/AST/ClangTypeConverter.h | 2 + lib/IRGen/GenClangType.cpp | 786 +-------------------------------- lib/IRGen/GenObjC.cpp | 2 + lib/IRGen/IRGenModule.cpp | 6 +- lib/IRGen/IRGenModule.h | 3 - 8 files changed, 34 insertions(+), 789 deletions(-) diff --git a/include/swift/AST/ASTContext.h b/include/swift/AST/ASTContext.h index c9f3fbe06e187..5ff10d980b4c0 100644 --- a/include/swift/AST/ASTContext.h +++ b/include/swift/AST/ASTContext.h @@ -640,6 +640,14 @@ class ASTContext final { /// if applicable. const Decl *getSwiftDeclForExportedClangDecl(const clang::Decl *decl); + /// General conversion method from Swift types -> Clang types. + /// + /// HACK: This method is only intended to be called from a specific place in + /// IRGen. For converting function types, strongly prefer using one of the + /// other methods instead, instead of manually iterating over parameters + /// and results. + const clang::Type *getClangTypeForIRGen(Type ty); + /// Determine whether the given Swift type is representable in a /// given foreign language. ForeignRepresentationInfo diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp index 7cf42afa2d904..22edb7daec01c 100644 --- a/lib/AST/ASTContext.cpp +++ b/lib/AST/ASTContext.cpp @@ -54,6 +54,7 @@ #include "swift/Subsystems.h" #include "swift/Syntax/References.h" #include "swift/Syntax/SyntaxArena.h" +#include "clang/AST/Type.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringMap.h" @@ -4589,6 +4590,10 @@ ASTContext::getSwiftDeclForExportedClangDecl(const clang::Decl *decl) { return impl.Converter->getSwiftDeclForExportedClangDecl(decl); } +const clang::Type * +ASTContext::getClangTypeForIRGen(Type ty) { + return getClangTypeConverter().convert(ty).getTypePtrOrNull(); +} CanGenericSignature ASTContext::getSingleGenericParameterSignature() const { if (auto theSig = getImpl().SingleGenericParameterSignature) diff --git a/lib/AST/ClangTypeConverter.cpp b/lib/AST/ClangTypeConverter.cpp index 8c3b4773d43ef..52e0df90de641 100644 --- a/lib/AST/ClangTypeConverter.cpp +++ b/lib/AST/ClangTypeConverter.cpp @@ -12,13 +12,14 @@ // // This file implements generation of Clang AST types from Swift AST types for // types that are representable in Objective-C interfaces. -// Large chunks of the code are lightly modified versions of the code in -// IRGen/GenClangType.cpp (which should eventually go away), so make sure -// to keep the two in sync. -// The three major differences are that, in this file: +// +// The usage of ClangTypeConverter at the AST level means that we may +// encounter ill-formed types and/or sugared types. To avoid crashing and +// keeping sugar as much as possible (in case the generated Clang type needs +// to be surfaced to the user): +// // 1. We fail gracefully instead of asserting/UB. // 2. We try to keep clang sugar instead of discarding it. -// 3. We use getAs instead of cast as we handle Swift types with sugar. // //===----------------------------------------------------------------------===// diff --git a/lib/AST/ClangTypeConverter.h b/lib/AST/ClangTypeConverter.h index b590b3ef0c0ec..55223b086e9dc 100644 --- a/lib/AST/ClangTypeConverter.h +++ b/lib/AST/ClangTypeConverter.h @@ -94,6 +94,8 @@ class ClangTypeConverter : SmallVectorImpl &templateArgs); private: + friend ASTContext; // HACK: expose `convert` method to ASTContext + clang::QualType convert(Type type); clang::QualType convertMemberType(NominalTypeDecl *DC, diff --git a/lib/IRGen/GenClangType.cpp b/lib/IRGen/GenClangType.cpp index 8bc35dd99c134..5fbda4ebd0ed7 100644 --- a/lib/IRGen/GenClangType.cpp +++ b/lib/IRGen/GenClangType.cpp @@ -10,782 +10,25 @@ // //===----------------------------------------------------------------------===// // -// This file implements generation of Clang AST types from Swift AST types -// for types that are representable in Objective-C interfaces. -// AST/ClangTypeConverter.cpp duplicates a bunch of code from here, so make -// sure to keep the two in sync. +// Wrapper functions for creating Clang types from Swift types. // //===----------------------------------------------------------------------===// -#include "llvm/ADT/StringSwitch.h" +#include "IRGenModule.h" + #include "swift/AST/ASTContext.h" -#include "swift/AST/CanTypeVisitor.h" -#include "swift/AST/Decl.h" -#include "swift/AST/ExistentialLayout.h" -#include "swift/AST/NameLookup.h" -#include "swift/SIL/SILType.h" -#include "swift/ClangImporter/ClangImporter.h" +#include "swift/AST/Types.h" + #include "clang/AST/ASTContext.h" -#include "clang/AST/Attr.h" #include "clang/AST/CanonicalType.h" -#include "clang/AST/Decl.h" -#include "clang/AST/DeclObjC.h" #include "clang/AST/Type.h" -#include "clang/Sema/Sema.h" -#include "clang/Basic/TargetInfo.h" -#include "IRGenModule.h" using namespace swift; using namespace irgen; -/// Global information about importing clang types. -class swift::irgen::ClangTypeConverter { - llvm::DenseMap Cache; - - ClangTypeConverter(const ClangTypeConverter &) = delete; - ClangTypeConverter &operator=(const ClangTypeConverter &) = delete; - -public: - ClangTypeConverter() = default; - clang::CanQualType convert(IRGenModule &IGM, CanType type); - clang::CanQualType reverseBuiltinTypeMapping(IRGenModule &IGM, - CanStructType type); -}; - -static CanType getNamedSwiftType(ModuleDecl *stdlib, StringRef name) { - auto &ctx = stdlib->getASTContext(); - SmallVector results; - stdlib->lookupValue(ctx.getIdentifier(name), NLKind::QualifiedLookup, - results); - - // If we have one single type decl, and that decl has been - // type-checked, return its declared type. - // - // ...non-type-checked types should only ever show up here because - // of test cases using -enable-source-import, but unfortunately - // that's a real thing. - if (results.size() == 1) { - if (auto typeDecl = dyn_cast(results[0])) - return typeDecl->getDeclaredInterfaceType()->getCanonicalType(); - } - return CanType(); -} - -static clang::CanQualType -getClangBuiltinTypeFromKind(const clang::ASTContext &context, - clang::BuiltinType::Kind kind) { - switch (kind) { -#define BUILTIN_TYPE(Id, SingletonId) \ - case clang::BuiltinType::Id: \ - return context.SingletonId; -#include "clang/AST/BuiltinTypes.def" -#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ - case clang::BuiltinType::Id: \ - return context.SingletonId; -#include "clang/Basic/OpenCLImageTypes.def" -#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ - case clang::BuiltinType::Id: \ - return context.Id##Ty; -#include "clang/Basic/OpenCLExtensionTypes.def" -#define SVE_TYPE(Name, Id, SingletonId) \ - case clang::BuiltinType::Id: \ - return context.SingletonId; -#include "clang/Basic/AArch64SVEACLETypes.def" - } - - llvm_unreachable("Not a valid BuiltinType."); -} - -static clang::CanQualType getClangSelectorType( - const clang::ASTContext &clangCtx) { - return clangCtx.getPointerType(clangCtx.ObjCBuiltinSelTy); -} - -static clang::CanQualType getClangMetatypeType( - const clang::ASTContext &clangCtx) { - clang::QualType clangType = - clangCtx.getObjCObjectType(clangCtx.ObjCBuiltinClassTy, nullptr, 0); - clangType = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(clangType); -} - -static clang::CanQualType getClangIdType( - const clang::ASTContext &clangCtx) { - clang::QualType clangType = - clangCtx.getObjCObjectType(clangCtx.ObjCBuiltinIdTy, nullptr, 0); - clangType = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(clangType); -} - -static clang::CanQualType getClangDecayedVaListType( - const clang::ASTContext &clangCtx) { - clang::QualType clangType = - clangCtx.getCanonicalType(clangCtx.getBuiltinVaListType()); - if (clangType->isConstantArrayType()) - clangType = clangCtx.getDecayedType(clangType); - return clangCtx.getCanonicalType(clangType); -} - -namespace { -/// Given a Swift type, attempt to return an appropriate Clang -/// CanQualType for the purpose of generating correct code for the -/// ABI. -class GenClangType : public CanTypeVisitor { - IRGenModule &IGM; - irgen::ClangTypeConverter &Converter; - -public: - GenClangType(IRGenModule &IGM, irgen::ClangTypeConverter &converter) - : IGM(IGM), Converter(converter) {} - - const clang::ASTContext &getClangASTContext() const { - return IGM.getClangASTContext(); - } - - /// Return the Clang struct type which was imported and resulted in - /// this Swift struct type. We do not currently handle generating a - /// new Clang struct type for Swift struct types that are created - /// independently of importing a Clang module. - clang::CanQualType visitStructType(CanStructType type); - clang::CanQualType visitTupleType(CanTupleType type); - clang::CanQualType visitMetatypeType(CanMetatypeType type); - clang::CanQualType visitExistentialMetatypeType(CanExistentialMetatypeType type); - clang::CanQualType visitProtocolType(CanProtocolType type); - clang::CanQualType visitClassType(CanClassType type); - clang::CanQualType visitBoundGenericClassType(CanBoundGenericClassType type); - clang::CanQualType visitBoundGenericType(CanBoundGenericType type); - clang::CanQualType visitEnumType(CanEnumType type); - clang::CanQualType visitFunctionType(CanFunctionType type); - clang::CanQualType visitProtocolCompositionType( - CanProtocolCompositionType type); - clang::CanQualType visitBuiltinRawPointerType(CanBuiltinRawPointerType type); - clang::CanQualType visitBuiltinIntegerType(CanBuiltinIntegerType type); - clang::CanQualType visitBuiltinFloatType(CanBuiltinFloatType type); - clang::CanQualType visitArchetypeType(CanArchetypeType type); - clang::CanQualType visitSILFunctionType(CanSILFunctionType type); - clang::CanQualType visitGenericTypeParamType(CanGenericTypeParamType type); - clang::CanQualType visitDynamicSelfType(CanDynamicSelfType type); - - clang::CanQualType visitSILBlockStorageType(CanSILBlockStorageType type); - - clang::CanQualType visitType(CanType type); - - clang::CanQualType getCanonicalType(clang::QualType type) { - return getClangASTContext().getCanonicalType(type); - } - - clang::CanQualType convertMemberType(NominalTypeDecl *DC, - StringRef memberName); -}; -} // end anonymous namespace - -clang::CanQualType -GenClangType::convertMemberType(NominalTypeDecl *DC, StringRef memberName) { - auto memberTypeDecl = cast( - DC->lookupDirect(IGM.Context.getIdentifier(memberName))[0]); - auto memberType = memberTypeDecl->getDeclaredInterfaceType() - ->getCanonicalType(); - return Converter.convert(IGM, memberType); -} - -static clang::CanQualType getClangVectorType(const clang::ASTContext &ctx, - clang::BuiltinType::Kind eltKind, - clang::VectorType::VectorKind vecKind, - StringRef numEltsString) { - unsigned numElts; - bool failedParse = numEltsString.getAsInteger(10, numElts); - assert(!failedParse && "vector type name didn't end in count?"); - (void) failedParse; - - auto eltTy = getClangBuiltinTypeFromKind(ctx, eltKind); - auto vecTy = ctx.getVectorType(eltTy, numElts, vecKind); - return ctx.getCanonicalType(vecTy); -} - -clang::CanQualType GenClangType::visitStructType(CanStructType type) { - auto &ctx = IGM.getClangASTContext(); - - auto swiftDecl = type->getDecl(); - StringRef name = swiftDecl->getName().str(); - - // We assume that the importer translates all of the following types - // directly to structs in the standard library. - - // We want to recognize most of these types by name. -#define CHECK_NAMED_TYPE(NAME, CLANG_TYPE) do { \ - if (name == (NAME)) return CLANG_TYPE; \ - } while (false) - - CHECK_NAMED_TYPE("CGFloat", convertMemberType(swiftDecl, "NativeType")); - CHECK_NAMED_TYPE("OpaquePointer", ctx.VoidPtrTy); - CHECK_NAMED_TYPE("CVaListPointer", getClangDecayedVaListType(ctx)); - CHECK_NAMED_TYPE("DarwinBoolean", ctx.UnsignedCharTy); - CHECK_NAMED_TYPE(swiftDecl->getASTContext().getSwiftName( - KnownFoundationEntity::NSZone), - ctx.VoidPtrTy); - CHECK_NAMED_TYPE("WindowsBool", ctx.IntTy); - CHECK_NAMED_TYPE("ObjCBool", ctx.ObjCBuiltinBoolTy); - CHECK_NAMED_TYPE("Selector", getClangSelectorType(ctx)); - CHECK_NAMED_TYPE("UnsafeRawPointer", ctx.VoidPtrTy); - CHECK_NAMED_TYPE("UnsafeMutableRawPointer", ctx.VoidPtrTy); -#undef CHECK_NAMED_TYPE - - // Map vector types to the corresponding C vectors. -#define MAP_SIMD_TYPE(TYPE_NAME, _, BUILTIN_KIND) \ - if (name.startswith(#TYPE_NAME)) { \ - return getClangVectorType(ctx, clang::BuiltinType::BUILTIN_KIND, \ - clang::VectorType::GenericVector, \ - name.drop_front(sizeof(#TYPE_NAME)-1)); \ - } -#include "swift/ClangImporter/SIMDMappedTypes.def" - - // Everything else we see here ought to be a translation of a builtin. - return Converter.reverseBuiltinTypeMapping(IGM, type); -} - -static clang::CanQualType getClangBuiltinTypeFromTypedef( - clang::Sema &sema, StringRef typedefName) { - auto &context = sema.getASTContext(); - - auto identifier = &context.Idents.get(typedefName); - auto found = sema.LookupSingleName(sema.TUScope, identifier, - clang::SourceLocation(), - clang::Sema::LookupOrdinaryName); - auto typedefDecl = dyn_cast_or_null(found); - if (!typedefDecl) - return {}; - - auto underlyingTy = - context.getCanonicalType(typedefDecl->getUnderlyingType()); - - if (underlyingTy->getAs()) - return underlyingTy; - return {}; -} - -clang::CanQualType -irgen::ClangTypeConverter::reverseBuiltinTypeMapping(IRGenModule &IGM, - CanStructType type) { - // Handle builtin types by adding entries to the cache that reverse - // the mapping done by the importer. We could try to look at the - // members of the struct instead, but even if that's ABI-equivalent - // (which it had better be!), it might erase interesting semantic - // differences like integers vs. characters. This is important - // because CC lowering isn't the only purpose of this conversion. - // - // The importer maps builtin types like 'int' to named types like - // 'CInt', which are generally typealiases. So what we do here is - // map the underlying types of those typealiases back to the builtin - // type. These typealiases frequently create a many-to-one mapping, - // so just use the first type that mapped to a particular underlying - // type. - // - // This is the last thing that happens before asserting that the - // struct type doesn't have a mapping. Furthermore, all of the - // builtin types are pre-built in the clang ASTContext. So it's not - // really a significant performance problem to just cache all them - // right here; it makes making a few more entries in the cache than - // we really need, but it also means we won't end up repeating these - // stdlib lookups multiple times, and we have to perform multiple - // lookups anyway because the MAP_BUILTIN_TYPE database uses - // typealias names (like 'CInt') that aren't obviously associated - // with the underlying C library type. - - auto stdlib = IGM.Context.getStdlibModule(); - assert(stdlib && "translating stdlib type to C without stdlib module?"); - auto &ctx = IGM.getClangASTContext(); - auto cacheStdlibType = [&](StringRef swiftName, - clang::BuiltinType::Kind builtinKind) { - CanType swiftType = getNamedSwiftType(stdlib, swiftName); - if (!swiftType) return; - - auto &sema = IGM.Context.getClangModuleLoader()->getClangSema(); - // Handle Int and UInt specially. On Apple platforms, these correspond to - // the NSInteger and NSUInteger typedefs, so map them back to those typedefs - // if they're available, to ensure we get consistent ObjC @encode strings. - if (swiftType->getAnyNominal() == IGM.Context.getIntDecl()) { - if (auto NSIntegerTy = getClangBuiltinTypeFromTypedef(sema, "NSInteger")){ - Cache.insert({swiftType, NSIntegerTy}); - return; - } - } else if (swiftType->getAnyNominal() == IGM.Context.getUIntDecl()) { - if (auto NSUIntegerTy = - getClangBuiltinTypeFromTypedef(sema, "NSUInteger")) { - Cache.insert({swiftType, NSUIntegerTy}); - return; - } - } - - Cache.insert({swiftType, getClangBuiltinTypeFromKind(ctx, builtinKind)}); - }; - -#define MAP_BUILTIN_TYPE(CLANG_BUILTIN_KIND, SWIFT_TYPE_NAME) \ - cacheStdlibType(#SWIFT_TYPE_NAME, clang::BuiltinType::CLANG_BUILTIN_KIND); -#include "swift/ClangImporter/BuiltinMappedTypes.def" - - // On 64-bit Windows, no C type is imported as an Int or UInt; CLong is - // imported as an Int32 and CLongLong as an Int64. Therefore, manually - // add mappings to C for Int and UInt. - // On 64-bit Cygwin, no manual mapping is required. - if (IGM.Triple.isOSWindows() && !IGM.Triple.isWindowsCygwinEnvironment() && - IGM.Triple.isArch64Bit()) { - // Map UInt to uintptr_t - auto swiftUIntType = getNamedSwiftType(stdlib, "UInt"); - auto clangUIntPtrType = ctx.getCanonicalType(ctx.getUIntPtrType()); - Cache.insert({swiftUIntType, clangUIntPtrType}); - - // Map Int to intptr_t - auto swiftIntType = getNamedSwiftType(stdlib, "Int"); - auto clangIntPtrType = ctx.getCanonicalType(ctx.getIntPtrType()); - Cache.insert({swiftIntType, clangIntPtrType}); - } - - // The above code sets up a bunch of mappings in the cache; just - // assume that we hit one of them. - auto it = Cache.find(type); - assert(it != Cache.end() && - "cannot translate Swift type to C! type is not specially known"); - return it->second; -} - -clang::CanQualType GenClangType::visitTupleType(CanTupleType type) { - unsigned e = type->getNumElements(); - if (e == 0) - return getClangASTContext().VoidTy; - - CanType eltTy = type.getElementType(0); - for (unsigned i = 1; i < e; ++i) { - assert(eltTy == type.getElementType(i) && - "Only tuples where all element types are equal " - "map to fixed-size arrays"); - } - - auto clangEltTy = Converter.convert(IGM, eltTy); - if (!clangEltTy) return clang::CanQualType(); - - APInt size(32, e); - auto &ctx = getClangASTContext(); - return ctx.getCanonicalType( - ctx.getConstantArrayType(clangEltTy, size, nullptr, - clang::ArrayType::Normal, 0)); -} - -clang::CanQualType GenClangType::visitProtocolType(CanProtocolType type) { - auto proto = type->getDecl(); - auto &clangCtx = getClangASTContext(); - - if (!proto->isObjC()) { - std::string s; - llvm::raw_string_ostream err(s); - err << "Trying to compute the clang type for a non-ObjC protocol type\n"; - proto->dump(err); - llvm::report_fatal_error(err.str()); - } - - // Single protocol -> id - clang::IdentifierInfo *name = &clangCtx.Idents.get(proto->getName().get()); - auto *PDecl = clang::ObjCProtocolDecl::Create( - const_cast(clangCtx), - clangCtx.getTranslationUnitDecl(), name, - clang::SourceLocation(), clang::SourceLocation(), nullptr); - - // Attach an objc_runtime_name attribute with the Objective-C name to use - // for this protocol. - SmallString<64> runtimeNameBuffer; - PDecl->addAttr(clang::ObjCRuntimeNameAttr::CreateImplicit( - PDecl->getASTContext(), - proto->getObjCRuntimeName(runtimeNameBuffer))); - - auto clangType = clangCtx.getObjCObjectType(clangCtx.ObjCBuiltinIdTy, - &PDecl, 1); - auto ptrTy = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(ptrTy); -} - -clang::CanQualType GenClangType::visitMetatypeType(CanMetatypeType type) { - return getClangMetatypeType(getClangASTContext()); -} - -clang::CanQualType -GenClangType::visitExistentialMetatypeType(CanExistentialMetatypeType type) { - return getClangMetatypeType(getClangASTContext()); -} - -clang::CanQualType GenClangType::visitClassType(CanClassType type) { - auto &clangCtx = getClangASTContext(); - auto swiftDecl = type->getDecl(); - - // TODO: [non-objc-class-clang-type-conversion] - // Crashing here instead of returning a bogus 'id' leads to test failures, - // which is surprising. - if (!swiftDecl->isObjC()) - return getClangIdType(clangCtx); - - // produce the clang type INTF * if it is imported ObjC object. - clang::IdentifierInfo *ForwardClassId = - &clangCtx.Idents.get(swiftDecl->getName().get()); - auto *CDecl = clang::ObjCInterfaceDecl::Create( - clangCtx, clangCtx.getTranslationUnitDecl(), - clang::SourceLocation(), ForwardClassId, - /*typeParamList*/nullptr, /*PrevDecl=*/nullptr, - clang::SourceLocation()); - - // Attach an objc_runtime_name attribute with the Objective-C name to use - // for this class. - SmallString<64> runtimeNameBuffer; - CDecl->addAttr(clang::ObjCRuntimeNameAttr::CreateImplicit( - CDecl->getASTContext(), - swiftDecl->getObjCRuntimeName(runtimeNameBuffer))); - - auto clangType = clangCtx.getObjCInterfaceType(CDecl); - auto ptrTy = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(ptrTy); -} - -clang::CanQualType GenClangType::visitBoundGenericClassType( - CanBoundGenericClassType type) { - // Any @objc class type in Swift that shows up in an @objc method maps 1-1 to - // "id "; with clang's encoding ignoring the protocol list. - return getClangIdType(getClangASTContext()); -} - -clang::CanQualType -GenClangType::visitBoundGenericType(CanBoundGenericType type) { - // We only expect *Pointer, SIMD* and Optional. - if (auto underlyingTy = - SILType::getPrimitiveObjectType(type).getOptionalObjectType()) { - // The underlying type could be a bridged type, which makes any - // sort of casual assertion here difficult. - return Converter.convert(IGM, underlyingTy.getASTType()); - } - - auto swiftStructDecl = type->getDecl(); - - enum class StructKind { - Invalid, - UnsafeMutablePointer, - UnsafePointer, - AutoreleasingUnsafeMutablePointer, - Unmanaged, - CFunctionPointer, - SIMD, - } kind = llvm::StringSwitch(swiftStructDecl->getName().str()) - .Case("UnsafeMutablePointer", StructKind::UnsafeMutablePointer) - .Case("UnsafePointer", StructKind::UnsafePointer) - .Case( - "AutoreleasingUnsafeMutablePointer", - StructKind::AutoreleasingUnsafeMutablePointer) - .Case("Unmanaged", StructKind::Unmanaged) - .Case("CFunctionPointer", StructKind::CFunctionPointer) - .StartsWith("SIMD", StructKind::SIMD) - .Default(StructKind::Invalid); - - auto args = type.getGenericArgs(); - assert(args.size() == 1 && - "should have a single generic argument!"); - auto loweredArgTy = IGM.getLoweredType(args[0]).getASTType(); - - switch (kind) { - case StructKind::Invalid: - llvm_unreachable("Unexpected non-pointer generic struct type in imported" - " Clang module!"); - - case StructKind::UnsafeMutablePointer: - case StructKind::Unmanaged: - case StructKind::AutoreleasingUnsafeMutablePointer: { - auto clangCanTy = Converter.convert(IGM, loweredArgTy); - if (!clangCanTy) return clang::CanQualType(); - return getClangASTContext().getPointerType(clangCanTy); - } - case StructKind::UnsafePointer: { - clang::QualType clangTy - = Converter.convert(IGM, loweredArgTy).withConst(); - return getCanonicalType(getClangASTContext().getPointerType(clangTy)); - } - - case StructKind::CFunctionPointer: { - auto &clangCtx = getClangASTContext(); - - clang::QualType functionTy; - if (isa(loweredArgTy)) { - functionTy = Converter.convert(IGM, loweredArgTy); - } else { - // Fall back to void(). - functionTy = clangCtx.getFunctionNoProtoType(clangCtx.VoidTy); - } - auto fnPtrTy = clangCtx.getPointerType(functionTy); - return getCanonicalType(fnPtrTy); - } - - case StructKind::SIMD: { - clang::QualType scalarTy = Converter.convert(IGM, loweredArgTy); - auto numEltsString = swiftStructDecl->getName().str(); - numEltsString.consume_front("SIMD"); - unsigned numElts; - bool failedParse = numEltsString.getAsInteger(10, numElts); - assert(!failedParse && "SIMD type name didn't end in count?"); - (void) failedParse; - auto vectorTy = getClangASTContext().getVectorType(scalarTy, numElts, - clang::VectorType::VectorKind::GenericVector); - return getCanonicalType(vectorTy); - } - } - - llvm_unreachable("Not a valid StructKind."); -} - -clang::CanQualType GenClangType::visitEnumType(CanEnumType type) { - // Special case: Uninhabited enums are not @objc, so we don't - // know what to do below, but we can just convert to 'void'. - if (type->isUninhabited()) - return Converter.convert(IGM, IGM.Context.TheEmptyTupleType); - - assert(type->getDecl()->isObjC() && "not an @objc enum?!"); - - // @objc enums lower to their raw types. - return Converter.convert(IGM, - type->getDecl()->getRawType()->getCanonicalType()); -} - -clang::CanQualType GenClangType::visitFunctionType(CanFunctionType type) { - llvm_unreachable("FunctionType should have been lowered away"); -} - -clang::CanQualType GenClangType::visitSILFunctionType(CanSILFunctionType type) { - auto &clangCtx = getClangASTContext(); - - enum FunctionPointerKind { - Block, CFunctionPointer, - }; - - FunctionPointerKind kind; - - switch (type->getRepresentation()) { - case SILFunctionType::Representation::Block: - kind = Block; - break; - - case SILFunctionType::Representation::CFunctionPointer: - kind = CFunctionPointer; - break; - - case SILFunctionType::Representation::Thick: - case SILFunctionType::Representation::Thin: - case SILFunctionType::Representation::Method: - case SILFunctionType::Representation::ObjCMethod: - case SILFunctionType::Representation::WitnessMethod: - case SILFunctionType::Representation::Closure: - llvm_unreachable("not an ObjC-compatible function"); - } - - // Convert the return and parameter types. - auto allResults = type->getResults(); - assert(allResults.size() <= 1 && "multiple results with C convention"); - clang::QualType resultType; - if (allResults.empty()) { - resultType = clangCtx.VoidTy; - } else { - resultType = Converter.convert( - IGM, - allResults[0].getReturnValueType(IGM.getSILModule(), type, - IGM.getMaximalTypeExpansionContext())); - if (resultType.isNull()) - return clang::CanQualType(); - } - - SmallVector paramTypes; - SmallVector extParamInfos; - for (auto paramTy : type->getParameters()) { - clang::FunctionProtoType::ExtParameterInfo extParamInfo; - - // Blocks should only take direct +0 parameters. - switch (paramTy.getConvention()) { - case ParameterConvention::Direct_Guaranteed: - case ParameterConvention::Direct_Unowned: - // OK. - break; - - case ParameterConvention::Direct_Owned: - extParamInfo = extParamInfo.withIsConsumed(true); - break; - - case ParameterConvention::Indirect_In: - case ParameterConvention::Indirect_In_Constant: - case ParameterConvention::Indirect_Inout: - case ParameterConvention::Indirect_InoutAliasable: - case ParameterConvention::Indirect_In_Guaranteed: - llvm_unreachable("block takes indirect parameter"); - } - auto param = Converter.convert( - IGM, paramTy.getArgumentType(IGM.getSILModule(), type, - IGM.getMaximalTypeExpansionContext())); - if (param.isNull()) - return clang::CanQualType(); - - paramTypes.push_back(param); - extParamInfos.push_back(extParamInfo); - } - - // Build the Clang function type. - clang::FunctionProtoType::ExtProtoInfo extProtoInfo; - extProtoInfo.ExtParameterInfos = extParamInfos.begin(); - - auto fnTy = clangCtx.getFunctionType(resultType, paramTypes, extProtoInfo); - clang::QualType ptrTy; - - switch (kind) { - case Block: - ptrTy = clangCtx.getBlockPointerType(fnTy); - break; - case CFunctionPointer: - ptrTy = clangCtx.getPointerType(fnTy); - } - return clangCtx.getCanonicalType(ptrTy); -} - -clang::CanQualType GenClangType::visitSILBlockStorageType(CanSILBlockStorageType type) { - // We'll select (void)(^)(). This isn't correct for all blocks, but block - // storage type should only be converted for function signature lowering, - // where the parameter types do not matter. - auto &clangCtx = getClangASTContext(); - auto fnTy = clangCtx.getFunctionNoProtoType(clangCtx.VoidTy); - auto blockTy = clangCtx.getBlockPointerType(fnTy); - return clangCtx.getCanonicalType(blockTy); -} - -clang::CanQualType GenClangType::visitProtocolCompositionType( - CanProtocolCompositionType type) { - auto &clangCtx = getClangASTContext(); - - // FIXME. Eventually, this will have its own helper routine. - SmallVector Protocols; - auto layout = type.getExistentialLayout(); - assert(layout.isObjC() && "Cannot represent opaque existential in Clang"); - - // AnyObject -> id. - if (layout.isAnyObject()) - return getClangIdType(getClangASTContext()); - - auto superclassTy = clangCtx.ObjCBuiltinIdTy; - if (auto layoutSuperclassTy = layout.getSuperclass()) { - superclassTy = clangCtx.getCanonicalType( - cast( - Converter.convert(IGM, CanType(layoutSuperclassTy))) - ->getPointeeType()); - } - - for (Type t : layout.getProtocols()) { - auto opt = cast( - Converter.convert(IGM, CanType(t))); - for (auto p : opt->quals()) - Protocols.push_back(p); - } - - if (Protocols.empty()) - return superclassTy; - - // id - clang::ObjCProtocolDecl **ProtoQuals = - new(clangCtx) clang::ObjCProtocolDecl*[Protocols.size()]; - memcpy(ProtoQuals, Protocols.data(), - sizeof(clang::ObjCProtocolDecl*)*Protocols.size()); - auto clangType = clangCtx.getObjCObjectType(superclassTy, - ProtoQuals, - Protocols.size()); - auto ptrTy = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(ptrTy); -} - -clang::CanQualType GenClangType::visitBuiltinRawPointerType( - CanBuiltinRawPointerType type) { - return getClangASTContext().VoidPtrTy; -} - -clang::CanQualType GenClangType::visitBuiltinIntegerType( - CanBuiltinIntegerType type) { - auto &ctx = getClangASTContext(); - if (type->getWidth().isPointerWidth()) - return ctx.getCanonicalType(ctx.getUIntPtrType()); - assert(type->getWidth().isFixedWidth()); - auto width = type->getWidth().getFixedWidth(); - if (width == 1) - return ctx.BoolTy; - return ctx.getCanonicalType(ctx.getIntTypeForBitwidth(width, /*signed*/ 0)); -} - -clang::CanQualType GenClangType::visitBuiltinFloatType( - CanBuiltinFloatType type) { - auto &ctx = getClangASTContext(); - auto &clangTargetInfo = ctx.getTargetInfo(); - const llvm::fltSemantics *format = &type->getAPFloatSemantics(); - if (format == &clangTargetInfo.getHalfFormat()) return ctx.HalfTy; - if (format == &clangTargetInfo.getFloatFormat()) return ctx.FloatTy; - if (format == &clangTargetInfo.getDoubleFormat()) return ctx.DoubleTy; - if (format == &clangTargetInfo.getLongDoubleFormat()) return ctx.LongDoubleTy; - llvm_unreachable("cannot translate floating-point format to C"); -} - -clang::CanQualType GenClangType::visitArchetypeType(CanArchetypeType type) { - // We see these in the case where we invoke an @objc function - // through a protocol. - return getClangIdType(getClangASTContext()); -} - -clang::CanQualType GenClangType::visitDynamicSelfType(CanDynamicSelfType type) { - // Dynamic Self is equivalent to 'instancetype', which is treated as - // 'id' within the Objective-C type system. - return getClangIdType(getClangASTContext()); -} - -clang::CanQualType GenClangType::visitGenericTypeParamType( - CanGenericTypeParamType type) { - // We see these in the case where we invoke an @objc function - // through a protocol argument that is a generic type. - return getClangIdType(getClangASTContext()); -} - -clang::CanQualType GenClangType::visitType(CanType type) { - llvm_unreachable("Unexpected type in Clang type generation."); -} - -clang::CanQualType irgen::ClangTypeConverter::convert(IRGenModule &IGM, CanType type) { - // Look in the cache. - auto it = Cache.find(type); - if (it != Cache.end()) { - return it->second; - } - - // Try to do this without making cache entries for obvious cases. - if (auto nominal = dyn_cast(type)) { - auto decl = nominal->getDecl(); - if (auto clangDecl = decl->getClangDecl()) { - auto &ctx = IGM.getClangASTContext(); - if (auto clangTypeDecl = dyn_cast(clangDecl)) { - return ctx.getCanonicalType(ctx.getTypeDeclType(clangTypeDecl)) - .getUnqualifiedType(); - } else if (auto ifaceDecl = dyn_cast(clangDecl)) { - auto clangType = ctx.getObjCInterfaceType(ifaceDecl); - auto ptrTy = ctx.getObjCObjectPointerType(clangType); - return ctx.getCanonicalType(ptrTy); - } else if (auto protoDecl = dyn_cast(clangDecl)){ - auto clangType = ctx.getObjCObjectType( - ctx.ObjCBuiltinIdTy, - const_cast(&protoDecl), - 1); - auto ptrTy = ctx.getObjCObjectPointerType(clangType); - return ctx.getCanonicalType(ptrTy); - } - } - } - - // If that failed, convert the type, cache, and return. - clang::CanQualType result = GenClangType(IGM, *this).visit(type); - Cache.insert({type, result}); - return result; -} - clang::CanQualType IRGenModule::getClangType(CanType type) { - return ClangTypes->convert(*this, type); + auto *ty = type->getASTContext().getClangTypeForIRGen(type); + return ty ? ty->getCanonicalTypeUnqualified() : clang::CanQualType(); } clang::CanQualType IRGenModule::getClangType(SILType type) { @@ -811,18 +54,3 @@ clang::CanQualType IRGenModule::getClangType(SILParameterInfo params, } return clangType; } - -void IRGenModule::initClangTypeConverter() { - if (auto loader = Context.getClangModuleLoader()) { - auto importer = static_cast(loader); - ClangASTContext = &importer->getClangASTContext(); - ClangTypes = new ClangTypeConverter(); - } else { - ClangASTContext = nullptr; - ClangTypes = nullptr; - } -} - -void IRGenModule::destroyClangTypeConverter() { - delete ClangTypes; -} diff --git a/lib/IRGen/GenObjC.cpp b/lib/IRGen/GenObjC.cpp index 25feeda16a592..0eabfe12660a2 100644 --- a/lib/IRGen/GenObjC.cpp +++ b/lib/IRGen/GenObjC.cpp @@ -1084,6 +1084,8 @@ static llvm::Constant *getObjCEncodingForTypes(IRGenModule &IGM, std::string encodingString; + auto fnClangTy = fnType->getClangTypeInfo().getType(); + // Return type. { auto clangType = IGM.getClangType(resultType.getASTType()); diff --git a/lib/IRGen/IRGenModule.cpp b/lib/IRGen/IRGenModule.cpp index c295d349860bc..73fea486e095c 100644 --- a/lib/IRGen/IRGenModule.cpp +++ b/lib/IRGen/IRGenModule.cpp @@ -554,7 +554,10 @@ IRGenModule::IRGenModule(IRGenerator &irgen, MainInputFilenameForDebugInfo, PrivateDiscriminator); - initClangTypeConverter(); + if (auto loader = Context.getClangModuleLoader()) { + ClangASTContext = + &static_cast(loader)->getClangASTContext(); + } if (ClangASTContext) { auto atomicBoolTy = ClangASTContext->getAtomicType(ClangASTContext->BoolTy); @@ -611,7 +614,6 @@ IRGenModule::IRGenModule(IRGenerator &irgen, } IRGenModule::~IRGenModule() { - destroyClangTypeConverter(); destroyMetadataLayoutMap(); destroyPointerAuthCaches(); delete &Types; diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h index 69a107d61b672..49cf07b882f5d 100644 --- a/lib/IRGen/IRGenModule.h +++ b/lib/IRGen/IRGenModule.h @@ -964,9 +964,6 @@ class IRGenModule { friend TypeConverter; const clang::ASTContext *ClangASTContext; - ClangTypeConverter *ClangTypes; - void initClangTypeConverter(); - void destroyClangTypeConverter(); llvm::DenseMap MetadataLayouts; void destroyMetadataLayoutMap(); From 19e6bee3747260beeb7bf7604bf27db737c97732 Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Wed, 11 Nov 2020 14:31:18 -0800 Subject: [PATCH 15/75] [NFC] Use the Correct const-Qualification in Dependency Code T *const does not prevent logically non-const accesses to the underlying data, it merely indicates that the pointer value itself is const. This modifier can be cast off by a copy, so it's not generally what you want here. Switch to const T * instead. --- include/swift/AST/FineGrainedDependencies.h | 2 +- lib/AST/FrontendSourceFileDepGraphFactory.cpp | 37 ++++++++++--------- lib/AST/FrontendSourceFileDepGraphFactory.h | 12 +++--- lib/FrontendTool/FrontendTool.cpp | 4 +- lib/FrontendTool/LoadedModuleTrace.cpp | 2 +- 5 files changed, 29 insertions(+), 28 deletions(-) diff --git a/include/swift/AST/FineGrainedDependencies.h b/include/swift/AST/FineGrainedDependencies.h index e701ad31cce8a..3d224cd842389 100644 --- a/include/swift/AST/FineGrainedDependencies.h +++ b/include/swift/AST/FineGrainedDependencies.h @@ -351,7 +351,7 @@ class BiIndexedTwoStageMap { /// /// \Note The returned graph should not be escaped from the callback. bool withReferenceDependencies( - llvm::PointerUnion MSF, + llvm::PointerUnion MSF, const DependencyTracker &depTracker, StringRef outputPath, bool alsoEmitDotFile, llvm::function_ref); diff --git a/lib/AST/FrontendSourceFileDepGraphFactory.cpp b/lib/AST/FrontendSourceFileDepGraphFactory.cpp index fe358fbffd7fd..d85b06f92588a 100644 --- a/lib/AST/FrontendSourceFileDepGraphFactory.cpp +++ b/lib/AST/FrontendSourceFileDepGraphFactory.cpp @@ -193,16 +193,16 @@ std::string DependencyKey::computeNameForProvidedEntity< //============================================================================== bool fine_grained_dependencies::withReferenceDependencies( - llvm::PointerUnion MSF, + llvm::PointerUnion MSF, const DependencyTracker &depTracker, StringRef outputPath, bool alsoEmitDotFile, llvm::function_ref cont) { - if (auto *MD = MSF.dyn_cast()) { + if (auto *MD = MSF.dyn_cast()) { SourceFileDepGraph g = ModuleDepGraphFactory(MD, alsoEmitDotFile).construct(); return cont(std::move(g)); } else { - auto *SF = MSF.get(); + auto *SF = MSF.get(); SourceFileDepGraph g = FrontendSourceFileDepGraphFactory( SF, outputPath, depTracker, alsoEmitDotFile) .construct(); @@ -215,22 +215,22 @@ bool fine_grained_dependencies::withReferenceDependencies( //============================================================================== FrontendSourceFileDepGraphFactory::FrontendSourceFileDepGraphFactory( - SourceFile *SF, StringRef outputPath, const DependencyTracker &depTracker, - const bool alsoEmitDotFile) + const SourceFile *SF, StringRef outputPath, + const DependencyTracker &depTracker, const bool alsoEmitDotFile) : AbstractSourceFileDepGraphFactory( - SF->getASTContext().hadError(), - outputPath, getInterfaceHash(SF), alsoEmitDotFile, - SF->getASTContext().Diags), + SF->getASTContext().hadError(), outputPath, getInterfaceHash(SF), + alsoEmitDotFile, SF->getASTContext().Diags), SF(SF), depTracker(depTracker) {} /// Centralize the invariant that the fingerprint of the whole file is the /// interface hash -std::string FrontendSourceFileDepGraphFactory::getFingerprint(SourceFile *SF) { +std::string +FrontendSourceFileDepGraphFactory::getFingerprint(const SourceFile *SF) { return getInterfaceHash(SF); } std::string -FrontendSourceFileDepGraphFactory::getInterfaceHash(SourceFile *SF) { +FrontendSourceFileDepGraphFactory::getInterfaceHash(const SourceFile *SF) { llvm::SmallString<32> interfaceHash; SF->getInterfaceHash(interfaceHash); return interfaceHash.str().str(); @@ -415,7 +415,7 @@ void FrontendSourceFileDepGraphFactory::addAllDefinedDecls() { namespace { /// Extracts uses out of a SourceFile class UsedDeclEnumerator { - SourceFile *SF; + const SourceFile *SF; const DependencyTracker &depTracker; StringRef swiftDeps; @@ -427,7 +427,8 @@ class UsedDeclEnumerator { public: UsedDeclEnumerator( - SourceFile *SF, const DependencyTracker &depTracker, StringRef swiftDeps, + const SourceFile *SF, const DependencyTracker &depTracker, + StringRef swiftDeps, function_ref createDefUse) : SF(SF), depTracker(depTracker), swiftDeps(swiftDeps), @@ -435,8 +436,7 @@ class UsedDeclEnumerator { DeclAspect::interface, swiftDeps)), sourceFileImplementation(DependencyKey::createKeyForWholeSourceFile( DeclAspect::implementation, swiftDeps)), - createDefUse(createDefUse) { - } + createDefUse(createDefUse) {} public: void enumerateAllUses() { @@ -517,10 +517,11 @@ void FrontendSourceFileDepGraphFactory::addAllUsedDecls() { // MARK: ModuleDepGraphFactory //============================================================================== -ModuleDepGraphFactory::ModuleDepGraphFactory(ModuleDecl *Mod, bool emitDot) - : AbstractSourceFileDepGraphFactory( - Mod->getASTContext().hadError(), - Mod->getNameStr(), "0xBADBEEF", emitDot, Mod->getASTContext().Diags), +ModuleDepGraphFactory::ModuleDepGraphFactory(const ModuleDecl *Mod, + bool emitDot) + : AbstractSourceFileDepGraphFactory(Mod->getASTContext().hadError(), + Mod->getNameStr(), "0xBADBEEF", emitDot, + Mod->getASTContext().Diags), Mod(Mod) {} void ModuleDepGraphFactory::addAllDefinedDecls() { diff --git a/lib/AST/FrontendSourceFileDepGraphFactory.h b/lib/AST/FrontendSourceFileDepGraphFactory.h index e0a85a029140b..e97587dac6b9a 100644 --- a/lib/AST/FrontendSourceFileDepGraphFactory.h +++ b/lib/AST/FrontendSourceFileDepGraphFactory.h @@ -23,29 +23,29 @@ namespace fine_grained_dependencies { class FrontendSourceFileDepGraphFactory : public AbstractSourceFileDepGraphFactory { - SourceFile *const SF; + const SourceFile *SF; const DependencyTracker &depTracker; public: - FrontendSourceFileDepGraphFactory(SourceFile *SF, StringRef outputPath, + FrontendSourceFileDepGraphFactory(const SourceFile *SF, StringRef outputPath, const DependencyTracker &depTracker, bool alsoEmitDotFile); ~FrontendSourceFileDepGraphFactory() override = default; private: - static std::string getFingerprint(SourceFile *SF); - static std::string getInterfaceHash(SourceFile *SF); + static std::string getFingerprint(const SourceFile *SF); + static std::string getInterfaceHash(const SourceFile *SF); void addAllDefinedDecls() override; void addAllUsedDecls() override; }; class ModuleDepGraphFactory : public AbstractSourceFileDepGraphFactory { - ModuleDecl *const Mod; + const ModuleDecl *Mod; public: - ModuleDepGraphFactory(ModuleDecl *Mod, bool emitDot); + ModuleDepGraphFactory(const ModuleDecl *Mod, bool emitDot); ~ModuleDepGraphFactory() override = default; diff --git a/lib/FrontendTool/FrontendTool.cpp b/lib/FrontendTool/FrontendTool.cpp index 2756fd899ad5c..ee78f97ace5d9 100644 --- a/lib/FrontendTool/FrontendTool.cpp +++ b/lib/FrontendTool/FrontendTool.cpp @@ -116,8 +116,8 @@ emitLoadedModuleTraceForAllPrimariesIfNeeded(ModuleDecl *mainModule, const FrontendOptions &opts) { opts.InputsAndOutputs.forEachInputProducingSupplementaryOutput( [&](const InputFile &input) -> bool { - return emitLoadedModuleTraceIfNeeded(mainModule, depTracker, opts, - input); + return swift::emitLoadedModuleTraceIfNeeded(mainModule, depTracker, + opts, input); }); } diff --git a/lib/FrontendTool/LoadedModuleTrace.cpp b/lib/FrontendTool/LoadedModuleTrace.cpp index 7f81c982a5efe..01aafe1b4f1ee 100644 --- a/lib/FrontendTool/LoadedModuleTrace.cpp +++ b/lib/FrontendTool/LoadedModuleTrace.cpp @@ -699,7 +699,7 @@ bool swift::emitLoadedModuleTraceIfNeeded(ModuleDecl *mainModule, assert(!ctxt.hadError() && "We should've already exited earlier if there was an error."); - StringRef loadedModuleTracePath = input.loadedModuleTracePath(); + auto loadedModuleTracePath = input.loadedModuleTracePath(); if (loadedModuleTracePath.empty()) return false; std::error_code EC; From 98f5e3f1cb335fba870cf577b2754140690ea4e5 Mon Sep 17 00:00:00 2001 From: Nate Chandler Date: Thu, 12 Nov 2020 11:37:33 -0800 Subject: [PATCH 16/75] [Async CC] Add execution test for convert_function. --- .../run-convertfunction-int64-to-void.sil | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 test/IRGen/async/run-convertfunction-int64-to-void.sil diff --git a/test/IRGen/async/run-convertfunction-int64-to-void.sil b/test/IRGen/async/run-convertfunction-int64-to-void.sil new file mode 100644 index 0000000000000..d267c77e8f41e --- /dev/null +++ b/test/IRGen/async/run-convertfunction-int64-to-void.sil @@ -0,0 +1,46 @@ +// RUN: %empty-directory(%t) +// RUN: %target-build-swift-dylib(%t/%target-library-name(PrintShims)) %S/../../Inputs/print-shims.swift -module-name PrintShims -emit-module -emit-module-path %t/PrintShims.swiftmodule +// RUN: %target-codesign %t/%target-library-name(PrintShims) +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -emit-ir -I %t -L %t -lPrintShim | %FileCheck %s --check-prefix=CHECK-LL +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -module-name main -o %t/main -I %t -L %t -lPrintShims %target-rpath(%t) +// RUN: %target-codesign %t/main +// RUN: %target-run %t/main %t/%target-library-name(PrintShims) | %FileCheck %s + +// REQUIRES: executable_test +// REQUIRES: swift_test_mode_optimize_none +// REQUIRES: concurrency +// UNSUPPORTED: use_os_stdlib + +import Builtin +import Swift +import PrintShims +import _Concurrency + +sil public_external @printInt64 : $@convention(thin) (Int64) -> () + +// CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @int64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { +sil @int64ToVoid : $@async @convention(thin) (Int64) -> () { +entry(%int : $Int64): + %printInt64 = function_ref @printInt64 : $@convention(thin) (Int64) -> () + %result = apply %printInt64(%int) : $@convention(thin) (Int64) -> () // CHECK: 9999 + return %result : $() +} + +sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): + %int64ToVoid = function_ref @int64ToVoid : $@async @convention(thin) (Int64) -> () + %int64ToVoidThick = thin_to_thick_function %int64ToVoid : $@convention(thin) @async (Int64) -> () to $@async @callee_guaranteed (Int64) -> () + %int64ThrowsToVoid = convert_function %int64ToVoidThick : $@async @callee_guaranteed (Int64) -> () to $@async @callee_guaranteed (Int64) -> @error Error + %int_literal = integer_literal $Builtin.Int64, 9999 + %int = struct $Int64 (%int_literal : $Builtin.Int64) + try_apply %int64ThrowsToVoid(%int) : $@async @callee_guaranteed (Int64) -> @error Error, normal success, error failure + +success(%value : $()): + %out_literal = integer_literal $Builtin.Int32, 0 + %out = struct $Int32 (%out_literal : $Builtin.Int32) + return %out : $Int32 + +failure(%error : $Error): + %end = builtin "errorInMain"(%error : $Error) : $() + unreachable +} From 3b051a2b7b0fff16d44631bbe4fe06b0a7b3dbd7 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Thu, 12 Nov 2020 15:46:06 -0500 Subject: [PATCH 17/75] Convert tabs to spaces in test/Sema/conformance_availability.swift --- test/Sema/conformance_availability.swift | 40 ++++++++++++------------ 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/test/Sema/conformance_availability.swift b/test/Sema/conformance_availability.swift index e700055658467..4a255cb60d90e 100644 --- a/test/Sema/conformance_availability.swift +++ b/test/Sema/conformance_availability.swift @@ -216,20 +216,20 @@ func passAvailableConformance1a(x: HasAvailableConformance1) { // Associated conformance with unavailability protocol Rider { - associatedtype H : Horse + associatedtype H : Horse } struct AssocConformanceUnavailable : Rider { // expected-error@-1 {{conformance of 'HasUnavailableConformance1' to 'Horse' is unavailable}} // expected-note@-2 {{in associated type 'Self.H' (inferred as 'HasUnavailableConformance1')}} - typealias H = HasUnavailableConformance1 + typealias H = HasUnavailableConformance1 } // Associated conformance with deprecation struct AssocConformanceDeprecated : Rider { // expected-warning@-1 {{conformance of 'HasDeprecatedConformance1' to 'Horse' is deprecated}} // expected-note@-2 {{in associated type 'Self.H' (inferred as 'HasDeprecatedConformance1')}} - typealias H = HasDeprecatedConformance1 + typealias H = HasDeprecatedConformance1 } // Associated conformance with availability @@ -237,12 +237,12 @@ struct AssocConformanceAvailable1 : Rider { // expected-error@-1 {{conformance of 'HasAvailableConformance1' to 'Horse' is only available in macOS 100 or newer}} // expected-note@-2 {{in associated type 'Self.H' (inferred as 'HasAvailableConformance1')}} // expected-note@-3 {{add @available attribute to enclosing struct}} - typealias H = HasAvailableConformance1 + typealias H = HasAvailableConformance1 } @available(macOS 100, *) struct AssocConformanceAvailable2 : Rider { - typealias H = HasAvailableConformance1 + typealias H = HasAvailableConformance1 } struct AssocConformanceAvailable3 {} @@ -251,25 +251,25 @@ extension AssocConformanceAvailable3 : Rider { // expected-error@-1 {{conformance of 'HasAvailableConformance1' to 'Horse' is only available in macOS 100 or newer}} // expected-note@-2 {{in associated type 'Self.H' (inferred as 'HasAvailableConformance1')}} // expected-note@-3 {{add @available attribute to enclosing extension}} - typealias H = HasAvailableConformance1 + typealias H = HasAvailableConformance1 } struct AssocConformanceAvailable4 {} @available(macOS 100, *) extension AssocConformanceAvailable4 : Rider { - typealias H = HasAvailableConformance1 + typealias H = HasAvailableConformance1 } // Solution ranking should down-rank solutions involving unavailable conformances protocol First {} extension First { - func doStuff(_: T) -> Bool {} + func doStuff(_: T) -> Bool {} } protocol Second {} extension Second { - func doStuff(_: Int) -> Int {} + func doStuff(_: Int) -> Int {} } struct ConformingType1 {} @@ -281,16 +281,16 @@ extension ConformingType1 : Second {} func usesConformingType1(_ c: ConformingType1) { // We should pick First.doStuff() here, since Second.doStuff() is unavailable - let result = c.doStuff(123) - let _: Bool = result + let result = c.doStuff(123) + let _: Bool = result } @available(macOS 100, *) func usesConformingType1a(_ c: ConformingType1) { // We should pick Second.doStuff() here, since it is more specialized than // First.doStuff() - let result = c.doStuff(123) - let _: Int = result + let result = c.doStuff(123) + let _: Int = result } // Same as above but unconditionally unavailable @@ -303,14 +303,14 @@ extension ConformingType2 : Second {} func usesConformingType2(_ c: ConformingType2) { // We should pick First.doStuff() here, since Second.doStuff() is unavailable - let result = c.doStuff(123) - let _: Bool = result + let result = c.doStuff(123) + let _: Bool = result } // Make sure this also works for synthesized conformances struct UnavailableHashable { - let x: Int - let y: Int + let x: Int + let y: Int } @available(macOS 100, *) @@ -318,7 +318,7 @@ extension UnavailableHashable : Hashable {} func usesUnavailableHashable(_ c: UnavailableHashable) { // expected-note@-1 2 {{add @available attribute to enclosing global function}} - _ = Set([c]) - // expected-error@-1 2 {{conformance of 'UnavailableHashable' to 'Hashable' is only available in macOS 100 or newer}} - // expected-note@-2 2 {{add 'if #available' version check}} + _ = Set([c]) + // expected-error@-1 2 {{conformance of 'UnavailableHashable' to 'Hashable' is only available in macOS 100 or newer}} + // expected-note@-2 2 {{add 'if #available' version check}} } From 07fafa3d8422963825005819318aac7674ad5346 Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Thu, 12 Nov 2020 13:07:39 -0800 Subject: [PATCH 18/75] [NFC] Add a TypeBase Conversion Constructor to NeverNullType --- lib/Sema/TypeCheckType.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/Sema/TypeCheckType.cpp b/lib/Sema/TypeCheckType.cpp index d45f3e12693f2..53bf11521ecb5 100644 --- a/lib/Sema/TypeCheckType.cpp +++ b/lib/Sema/TypeCheckType.cpp @@ -1706,11 +1706,14 @@ namespace { public: /// Construct a never-null Type. If \p Ty is null, a fatal error is thrown. NeverNullType(Type Ty) : WrappedTy(Ty) { - if (Ty.isNull()) { + if (WrappedTy.isNull()) { llvm::report_fatal_error("Resolved to null type!"); } } + /// Construct a never-null Type. If \p TyB is null, a fatal error is thrown. + NeverNullType(TypeBase *TyB) : NeverNullType(Type(TyB)) {} + operator Type() const { return WrappedTy; } Type get() const { return WrappedTy; } From 4a8fdb8b13e8474747b0270c3d113cfa2e9e1996 Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Thu, 12 Nov 2020 13:08:51 -0800 Subject: [PATCH 19/75] Push NeverNullType Into The Leaves of Type Resolution This ought to afford more specific traces when a null type is actually propagated out of the branches. As an added benefit, we no longer have to check for the null type when making recursive calls into type resolution and will now recieve a compile-time error when such a comparison is attempted. --- lib/Sema/TypeCheckType.cpp | 187 +++++++++++++++++++------------------ 1 file changed, 94 insertions(+), 93 deletions(-) diff --git a/lib/Sema/TypeCheckType.cpp b/lib/Sema/TypeCheckType.cpp index 53bf11521ecb5..8c34a1b08bc2c 100644 --- a/lib/Sema/TypeCheckType.cpp +++ b/lib/Sema/TypeCheckType.cpp @@ -1758,24 +1758,24 @@ namespace { return diags.diagnose(std::forward(Args)...); } - Type resolveAttributedType(AttributedTypeRepr *repr, - TypeResolutionOptions options); - Type resolveAttributedType(TypeAttributes &attrs, TypeRepr *repr, - TypeResolutionOptions options); - Type resolveASTFunctionType(FunctionTypeRepr *repr, - TypeResolutionOptions options, - AnyFunctionType::Representation representation - = AnyFunctionType::Representation::Swift, - bool noescape = false, - const clang::Type *parsedClangFunctionType - = nullptr, - DifferentiabilityKind diffKind - = DifferentiabilityKind::NonDifferentiable); + NeverNullType resolveAttributedType(AttributedTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveAttributedType(TypeAttributes &attrs, TypeRepr *repr, + TypeResolutionOptions options); + NeverNullType + resolveASTFunctionType(FunctionTypeRepr *repr, + TypeResolutionOptions options, + AnyFunctionType::Representation representation = + AnyFunctionType::Representation::Swift, + bool noescape = false, + const clang::Type *parsedClangFunctionType = nullptr, + DifferentiabilityKind diffKind = + DifferentiabilityKind::NonDifferentiable); SmallVector resolveASTFunctionTypeParams( TupleTypeRepr *inputRepr, TypeResolutionOptions options, bool requiresMappingOut, DifferentiabilityKind diffKind); - Type resolveSILFunctionType( + NeverNullType resolveSILFunctionType( FunctionTypeRepr *repr, TypeResolutionOptions options, SILCoroutineKind coroutineKind = SILCoroutineKind::None, SILFunctionType::ExtInfoBuilder extInfoBuilder = @@ -1794,40 +1794,40 @@ namespace { SmallVectorImpl &yields, SmallVectorImpl &results, Optional &errorResult); - Type resolveIdentifierType(IdentTypeRepr *IdType, - TypeResolutionOptions options); - Type resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, - TypeResolutionOptions options); - Type resolveArrayType(ArrayTypeRepr *repr, - TypeResolutionOptions options); - Type resolveDictionaryType(DictionaryTypeRepr *repr, - TypeResolutionOptions options); - Type resolveOptionalType(OptionalTypeRepr *repr, - TypeResolutionOptions options); - Type resolveImplicitlyUnwrappedOptionalType(ImplicitlyUnwrappedOptionalTypeRepr *repr, - TypeResolutionOptions options, - bool isDirect); - Type resolveTupleType(TupleTypeRepr *repr, - TypeResolutionOptions options); - Type resolveCompositionType(CompositionTypeRepr *repr, - TypeResolutionOptions options); - Type resolveMetatypeType(MetatypeTypeRepr *repr, - TypeResolutionOptions options); - Type resolveProtocolType(ProtocolTypeRepr *repr, - TypeResolutionOptions options); - Type resolveSILBoxType(SILBoxTypeRepr *repr, - TypeResolutionOptions options); - - Type buildMetatypeType(MetatypeTypeRepr *repr, - Type instanceType, - Optional storedRepr); - Type buildProtocolType(ProtocolTypeRepr *repr, - Type instanceType, - Optional storedRepr); - - Type resolveOpaqueReturnType(TypeRepr *repr, StringRef mangledName, - unsigned ordinal, - TypeResolutionOptions options); + NeverNullType resolveIdentifierType(IdentTypeRepr *IdType, + TypeResolutionOptions options); + NeverNullType resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveArrayType(ArrayTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveDictionaryType(DictionaryTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveOptionalType(OptionalTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveImplicitlyUnwrappedOptionalType( + ImplicitlyUnwrappedOptionalTypeRepr *repr, + TypeResolutionOptions options, bool isDirect); + NeverNullType resolveTupleType(TupleTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveCompositionType(CompositionTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveMetatypeType(MetatypeTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveProtocolType(ProtocolTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveSILBoxType(SILBoxTypeRepr *repr, + TypeResolutionOptions options); + + NeverNullType + buildMetatypeType(MetatypeTypeRepr *repr, Type instanceType, + Optional storedRepr); + NeverNullType + buildProtocolType(ProtocolTypeRepr *repr, Type instanceType, + Optional storedRepr); + + NeverNullType resolveOpaqueReturnType(TypeRepr *repr, StringRef mangledName, + unsigned ordinal, + TypeResolutionOptions options); /// Returns true if the given type conforms to `Differentiable` in the /// module of `DC`. If `tangentVectorEqualsSelf` is true, returns true iff @@ -1923,7 +1923,7 @@ NeverNullType TypeResolver::resolveType(TypeRepr *repr, // Default non-escaping for closure parameters auto result = resolveASTFunctionType(cast(repr), options); - if (result && result->is()) + if (result->is()) return applyNonEscapingIfNecessary(result, options); return result; } @@ -1995,8 +1995,9 @@ static Type rebuildWithDynamicSelf(ASTContext &Context, Type ty) { } } -Type TypeResolver::resolveAttributedType(AttributedTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveAttributedType(AttributedTypeRepr *repr, + TypeResolutionOptions options) { // Copy the attributes, since we're about to start hacking on them. TypeAttributes attrs = repr->getAttrs(); assert(!attrs.empty()); @@ -2004,9 +2005,9 @@ Type TypeResolver::resolveAttributedType(AttributedTypeRepr *repr, return resolveAttributedType(attrs, repr->getTypeRepr(), options); } -Type TypeResolver::resolveAttributedType(TypeAttributes &attrs, - TypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveAttributedType(TypeAttributes &attrs, TypeRepr *repr, + TypeResolutionOptions options) { // Convenience to grab the source range of a type attribute. auto getTypeAttrRangeWithAt = [](ASTContext &ctx, SourceLoc attrLoc) { return SourceRange(attrLoc, attrLoc.getAdvancedLoc(1)); @@ -2630,10 +2631,10 @@ TypeResolver::resolveASTFunctionTypeParams(TupleTypeRepr *inputRepr, return elements; } -Type TypeResolver::resolveOpaqueReturnType(TypeRepr *repr, - StringRef mangledName, - unsigned ordinal, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveOpaqueReturnType(TypeRepr *repr, StringRef mangledName, + unsigned ordinal, + TypeResolutionOptions options) { // The type repr should be a generic identifier type. We don't really use // the identifier for anything, but we do resolve the generic arguments // to instantiate the possibly-generic opaque type. @@ -2672,7 +2673,7 @@ Type TypeResolver::resolveOpaqueReturnType(TypeRepr *repr, return ty; } -Type TypeResolver::resolveASTFunctionType( +NeverNullType TypeResolver::resolveASTFunctionType( FunctionTypeRepr *repr, TypeResolutionOptions parentOptions, AnyFunctionType::Representation representation, bool noescape, const clang::Type *parsedClangFunctionType, @@ -2807,8 +2808,8 @@ bool TypeResolver::isDifferentiable(Type type, bool tangentVectorEqualsSelf) { return type->getCanonicalType() == tanSpace->getCanonicalType(); } -Type TypeResolver::resolveSILBoxType(SILBoxTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveSILBoxType(SILBoxTypeRepr *repr, + TypeResolutionOptions options) { // Resolve the field types. SmallVector fields; { @@ -2872,7 +2873,7 @@ Type TypeResolver::resolveSILBoxType(SILBoxTypeRepr *repr, return SILBoxType::get(getASTContext(), layout, subMap); } -Type TypeResolver::resolveSILFunctionType( +NeverNullType TypeResolver::resolveSILFunctionType( FunctionTypeRepr *repr, TypeResolutionOptions options, SILCoroutineKind coroutineKind, SILFunctionType::ExtInfoBuilder extInfoBuilder, ParameterConvention callee, @@ -3287,8 +3288,9 @@ bool TypeResolver::resolveSILResults(TypeRepr *repr, yields, ordinaryResults, errorResult); } -Type TypeResolver::resolveIdentifierType(IdentTypeRepr *IdType, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveIdentifierType(IdentTypeRepr *IdType, + TypeResolutionOptions options) { auto ComponentRange = IdType->getComponentRange(); auto Components = llvm::makeArrayRef(ComponentRange.begin(), ComponentRange.end()); @@ -3322,8 +3324,9 @@ Type TypeResolver::resolveIdentifierType(IdentTypeRepr *IdType, return result; } -Type TypeResolver::resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, + TypeResolutionOptions options) { // inout is only valid for (non-Subscript and non-EnumCaseDecl) // function parameters. if (!options.is(TypeResolverContext::FunctionInput) || @@ -3364,9 +3367,8 @@ Type TypeResolver::resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, return resolveType(repr->getBase(), options); } - -Type TypeResolver::resolveArrayType(ArrayTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveArrayType(ArrayTypeRepr *repr, + TypeResolutionOptions options) { auto baseTy = resolveType(repr->getBase(), options.withoutContext()); if (baseTy->hasError()) { return ErrorType::get(getASTContext()); @@ -3380,8 +3382,9 @@ Type TypeResolver::resolveArrayType(ArrayTypeRepr *repr, return sliceTy; } -Type TypeResolver::resolveDictionaryType(DictionaryTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveDictionaryType(DictionaryTypeRepr *repr, + TypeResolutionOptions options) { options = adjustOptionsForGenericArgs(options); auto keyTy = resolveType(repr->getKey(), options.withoutContext()); @@ -3410,8 +3413,8 @@ Type TypeResolver::resolveDictionaryType(DictionaryTypeRepr *repr, return DictionaryType::get(keyTy, valueTy); } -Type TypeResolver::resolveOptionalType(OptionalTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveOptionalType(OptionalTypeRepr *repr, + TypeResolutionOptions options) { TypeResolutionOptions elementOptions = options.withoutContext(true); elementOptions.setContext(TypeResolverContext::ImmediateOptionalTypeArgument); @@ -3429,10 +3432,9 @@ Type TypeResolver::resolveOptionalType(OptionalTypeRepr *repr, return optionalTy; } -Type TypeResolver::resolveImplicitlyUnwrappedOptionalType( - ImplicitlyUnwrappedOptionalTypeRepr *repr, - TypeResolutionOptions options, - bool isDirect) { +NeverNullType TypeResolver::resolveImplicitlyUnwrappedOptionalType( + ImplicitlyUnwrappedOptionalTypeRepr *repr, TypeResolutionOptions options, + bool isDirect) { TypeResolutionFlags allowIUO = TypeResolutionFlags::SILType; bool doDiag = false; @@ -3500,8 +3502,8 @@ Type TypeResolver::resolveImplicitlyUnwrappedOptionalType( return uncheckedOptionalTy; } -Type TypeResolver::resolveTupleType(TupleTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveTupleType(TupleTypeRepr *repr, + TypeResolutionOptions options) { SmallVector elements; elements.reserve(repr->getNumElements()); @@ -3568,8 +3570,9 @@ Type TypeResolver::resolveTupleType(TupleTypeRepr *repr, return TupleType::get(elements, getASTContext()); } -Type TypeResolver::resolveCompositionType(CompositionTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveCompositionType(CompositionTypeRepr *repr, + TypeResolutionOptions options) { // Note that the superclass type will appear as part of one of the // types in 'Members', so it's not used when constructing the @@ -3636,8 +3639,8 @@ Type TypeResolver::resolveCompositionType(CompositionTypeRepr *repr, /*HasExplicitAnyObject=*/false); } -Type TypeResolver::resolveMetatypeType(MetatypeTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveMetatypeType(MetatypeTypeRepr *repr, + TypeResolutionOptions options) { // The instance type of a metatype is always abstract, not SIL-lowered. auto ty = resolveType(repr->getBase(), options.withoutContext()); if (ty->hasError()) { @@ -3657,10 +3660,9 @@ Type TypeResolver::resolveMetatypeType(MetatypeTypeRepr *repr, return buildMetatypeType(repr, ty, storedRepr); } -Type TypeResolver::buildMetatypeType( - MetatypeTypeRepr *repr, - Type instanceType, - Optional storedRepr) { +NeverNullType +TypeResolver::buildMetatypeType(MetatypeTypeRepr *repr, Type instanceType, + Optional storedRepr) { if (instanceType->isAnyExistentialType()) { // TODO: diagnose invalid representations? return ExistentialMetatypeType::get(instanceType, storedRepr); @@ -3669,8 +3671,8 @@ Type TypeResolver::buildMetatypeType( } } -Type TypeResolver::resolveProtocolType(ProtocolTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveProtocolType(ProtocolTypeRepr *repr, + TypeResolutionOptions options) { // The instance type of a metatype is always abstract, not SIL-lowered. auto ty = resolveType(repr->getBase(), options.withoutContext()); if (ty->hasError()) { @@ -3690,10 +3692,9 @@ Type TypeResolver::resolveProtocolType(ProtocolTypeRepr *repr, return buildProtocolType(repr, ty, storedRepr); } -Type TypeResolver::buildProtocolType( - ProtocolTypeRepr *repr, - Type instanceType, - Optional storedRepr) { +NeverNullType +TypeResolver::buildProtocolType(ProtocolTypeRepr *repr, Type instanceType, + Optional storedRepr) { if (!instanceType->isAnyExistentialType()) { diagnose(repr->getProtocolLoc(), diag::dot_protocol_on_non_existential, instanceType); From c7cce10d7ad29d7c1df9faaabbfa70b6a3d6ea7c Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Wed, 11 Nov 2020 20:23:28 -0800 Subject: [PATCH 20/75] Add an Assert To Make Sure the SIL Parser Emits Diagnostics In #34693, we discovered the SIL parser can silently fail. Try to detect this in +asserts builds. --- lib/SIL/Parser/ParseSIL.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/SIL/Parser/ParseSIL.cpp b/lib/SIL/Parser/ParseSIL.cpp index 6cef09b209a40..72bf2d63c975c 100644 --- a/lib/SIL/Parser/ParseSIL.cpp +++ b/lib/SIL/Parser/ParseSIL.cpp @@ -84,6 +84,14 @@ ParseSILModuleRequest::evaluate(Evaluator &evaluator, if (hadError) { // The rest of the SIL pipeline expects well-formed SIL, so if we encounter // a parsing error, just return an empty SIL module. + // + // Because the SIL parser's notion of failing with an error is distinct from + // the ASTContext's notion of having emitted a diagnostic, it's possible for + // the parser to fail silently without emitting a diagnostic. This assertion + // ensures that +asserts builds will fail fast. If you crash here, please go + // back and add a diagnostic after identifying where the SIL parser failed. + assert(SF->getASTContext().hadError() && + "Failed to parse SIL but did not emit any errors!"); return SILModule::createEmptyModule(desc.context, desc.conv, desc.opts); } return silMod; From 517bcc449358fbd35b97ed633adb5ba0647f479b Mon Sep 17 00:00:00 2001 From: Richard Wei Date: Thu, 12 Nov 2020 13:30:56 -0800 Subject: [PATCH 21/75] [AutoDiff] Fix differentiation transform crashers in library evolution mode. (#34704) AD-generated data structures (linear map structs and branching trace enums) do not need to be resilient data structures. These decls ade missing a `@frozen` attribute. Resolves rdar://71319547. --- .../Differentiation/LinearMapInfo.cpp | 4 ++ ...iation-of-extension-method-optimized.swift | 30 +++++++------- ...nerated-decls-shall-not-be-resilient.swift | 39 +++++++++++++++++++ 3 files changed, 58 insertions(+), 15 deletions(-) create mode 100644 test/AutoDiff/compiler_crashers_fixed/rdar71319547-generated-decls-shall-not-be-resilient.swift diff --git a/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp b/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp index fc785c112ffef..6de6cee60781e 100644 --- a/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp +++ b/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp @@ -138,6 +138,8 @@ LinearMapInfo::createBranchingTraceDecl(SILBasicBlock *originalBB, // Note: must mark enum as implicit to satisfy assertion in // `Parser::parseDeclListDelayed`. branchingTraceDecl->setImplicit(); + // Branching trace enums shall not be resilient. + branchingTraceDecl->getAttrs().add(new (astCtx) FrozenAttr(/*implicit*/ true)); if (genericSig) branchingTraceDecl->setGenericSignature(genericSig); computeAccessLevel(branchingTraceDecl, original->getEffectiveSymbolLinkage()); @@ -201,6 +203,8 @@ LinearMapInfo::createLinearMapStruct(SILBasicBlock *originalBB, // Note: must mark struct as implicit to satisfy assertion in // `Parser::parseDeclListDelayed`. linearMapStruct->setImplicit(); + // Linear map structs shall not be resilient. + linearMapStruct->getAttrs().add(new (astCtx) FrozenAttr(/*implicit*/ true)); if (genericSig) linearMapStruct->setGenericSignature(genericSig); computeAccessLevel(linearMapStruct, original->getEffectiveSymbolLinkage()); diff --git a/test/AutoDiff/compiler_crashers_fixed/rdar71191415-nested-differentiation-of-extension-method-optimized.swift b/test/AutoDiff/compiler_crashers_fixed/rdar71191415-nested-differentiation-of-extension-method-optimized.swift index 892943d791bcc..826a3f8f9a944 100644 --- a/test/AutoDiff/compiler_crashers_fixed/rdar71191415-nested-differentiation-of-extension-method-optimized.swift +++ b/test/AutoDiff/compiler_crashers_fixed/rdar71191415-nested-differentiation-of-extension-method-optimized.swift @@ -5,29 +5,29 @@ import _Differentiation protocol P { - @differentiable - func req(_ input: Float) -> Float + @differentiable + func req(_ input: Float) -> Float } extension P { - @differentiable - func foo(_ input: Float) -> Float { - return req(input) - } + @differentiable + func foo(_ input: Float) -> Float { + return req(input) + } } struct Dummy: P { - @differentiable - func req(_ input: Float) -> Float { - input - } + @differentiable + func req(_ input: Float) -> Float { + input + } } struct DummyComposition: P { - var layer = Dummy() + var layer = Dummy() - @differentiable - func req(_ input: Float) -> Float { - layer.foo(input) - } + @differentiable + func req(_ input: Float) -> Float { + layer.foo(input) + } } diff --git a/test/AutoDiff/compiler_crashers_fixed/rdar71319547-generated-decls-shall-not-be-resilient.swift b/test/AutoDiff/compiler_crashers_fixed/rdar71319547-generated-decls-shall-not-be-resilient.swift new file mode 100644 index 0000000000000..2b4eb4be88a2f --- /dev/null +++ b/test/AutoDiff/compiler_crashers_fixed/rdar71319547-generated-decls-shall-not-be-resilient.swift @@ -0,0 +1,39 @@ +// RUN: %target-build-swift -enable-library-evolution %s +// RUN: %target-build-swift -O -enable-library-evolution %s +// RUN: %target-build-swift -O -g -enable-library-evolution %s + +// rdar://71319547 + +import _Differentiation + + +// Assertion failed: (mainPullbackStruct->getType() == pbStructLoweredType), function run, file swift/lib/SILOptimizer/Differentiation/PullbackCloner.cpp, line 1899. +// Stack dump: +// 1. Swift version 5.3-dev (LLVM 618cb952e0f199a, Swift d74c261f098665c) +// 2. While evaluating request ExecuteSILPipelineRequest(Run pipelines { Mandatory Diagnostic Passes + Enabling Optimization Passes } on SIL for main.main) +// 3. While running pass #17 SILModuleTransform "Differentiation". +// 4. While processing // differentiability witness for foo(_:) +// sil_differentiability_witness [serialized] [parameters 0] [results 0] @$s4main3fooyS2fF : $@convention(thin) (Float) -> Float { +// } +@differentiable(wrt: x) +public func i_have_a_pullback_struct(_ x: Float) -> Float { + return x +} + + +// Assertion failed: (v->getType().isObject()), function operator(), file swift/lib/SIL/Utils/ValueUtils.cpp, line 22. +// Stack dump: +// 1. Swift version 5.3-dev (LLVM 618cb952e0f199a, Swift d74c261f098665c) +// 2. While evaluating request ExecuteSILPipelineRequest(Run pipelines { Mandatory Diagnostic Passes + Enabling Optimization Passes } on SIL for main.main) +// 3. While running pass #24 SILModuleTransform "Differentiation". +// 4. While processing // differentiability witness for i_have_a_branching_trace_enum(_:) +// sil_differentiability_witness [serialized] [parameters 0] [results 0] @$s4main29i_have_a_branching_trace_enumyS2fF : $@convention(thin) (Float) -> Float { +// } +@differentiable(wrt: x) +public func i_have_a_branching_trace_enum(_ x: Float) -> Float { + if true { + return x + } else { + return x.squareRoot() + } +} From 437765e7e1dbbc40bcac10ad3ff5748b5a70fe06 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 22 Sep 2020 17:44:29 -0700 Subject: [PATCH 22/75] LICM: split loads that are wider than the loop-stored value. For combined load-store hoisting, split loads that contain the loop-stored value into a single load from the same address as the loop-stores, and a set of loads disjoint from the loop-stores. The single load will be hoisted while sinking the stores to the same address. The disjoint loads will be hoisted normally in a subsequent iteration on the same loop. loop: load %outer store %inner1 exit: Will be split into loop: load %inner1 load %inner2 store %inner1 exit: Then, combined load/store hoisting will produce: load %inner1 loop: load %inner2 exit: store %inner1 --- lib/SILOptimizer/LoopTransforms/LICM.cpp | 221 ++++++++++++- test/SILOptimizer/licm.sil | 396 +++++++++++++++++++++++ 2 files changed, 603 insertions(+), 14 deletions(-) diff --git a/lib/SILOptimizer/LoopTransforms/LICM.cpp b/lib/SILOptimizer/LoopTransforms/LICM.cpp index 7b82f42898683..06091676978b3 100644 --- a/lib/SILOptimizer/LoopTransforms/LICM.cpp +++ b/lib/SILOptimizer/LoopTransforms/LICM.cpp @@ -505,6 +505,7 @@ class LoopTreeOptimization { llvm::DenseMap> LoopNestSummaryMap; SmallVector BotUpWorkList; + InstSet toDelete; SILLoopInfo *LoopInfo; AliasAnalysis *AA; SideEffectAnalysis *SEA; @@ -525,6 +526,8 @@ class LoopTreeOptimization { InstVector SinkDown; /// Load and store instructions that we may be able to move out of the loop. + /// All loads and stores within a block must be in instruction order to + /// simplify replacement of values after SSA update. InstVector LoadsAndStores; /// All access paths of the \p LoadsAndStores instructions. @@ -561,12 +564,22 @@ class LoopTreeOptimization { /// Collect a set of instructions that can be hoisted void analyzeCurrentLoop(std::unique_ptr &CurrSummary); + SingleValueInstruction *splitLoad(SILValue splitAddress, + ArrayRef remainingPath, + SILBuilder &builder, + SmallVectorImpl &Loads, + unsigned ldStIdx); + + /// Given an \p accessPath that is only loaded and stored, split loads that + /// are wider than \p accessPath. + bool splitLoads(SmallVectorImpl &Loads, AccessPath accessPath, + SILValue storeAddr); + /// Optimize the current loop nest. bool optimizeLoop(std::unique_ptr &CurrSummary); - /// Move all loads and stores from/to \p access out of the \p loop. - void hoistLoadsAndStores(AccessPath accessPath, SILLoop *loop, - InstVector &toDelete); + /// Move all loads and stores from/to \p accessPath out of the \p loop. + void hoistLoadsAndStores(AccessPath accessPath, SILLoop *loop); /// Move all loads and stores from all addresses in LoadAndStoreAddrs out of /// the \p loop. @@ -799,6 +812,8 @@ static bool analyzeBeginAccess(BeginAccessInst *BI, // We *need* to discover all SideEffectInsts - // even if the loop is otherwise skipped! // This is because outer loops will depend on the inner loop's writes. +// +// This may split some loads into smaller loads. void LoopTreeOptimization::analyzeCurrentLoop( std::unique_ptr &CurrSummary) { InstSet &sideEffects = CurrSummary->SideEffectInsts; @@ -915,15 +930,22 @@ void LoopTreeOptimization::analyzeCurrentLoop( // Collect memory locations for which we can move all loads and stores out // of the loop. + // + // Note: The Loads set and LoadsAndStores set may mutate during this loop. for (StoreInst *SI : Stores) { // Use AccessPathWithBase to recover a base address that can be used for // newly inserted memory operations. If we instead teach hoistLoadsAndStores // how to rematerialize global_addr, then we don't need this base. auto access = AccessPathWithBase::compute(SI->getDest()); - if (access.accessPath.isValid() && isLoopInvariant(access.base, Loop)) { + auto accessPath = access.accessPath; + if (accessPath.isValid() && isLoopInvariant(access.base, Loop)) { if (isOnlyLoadedAndStored(AA, sideEffects, Loads, Stores, SI->getDest(), - access.accessPath)) { - LoadAndStoreAddrs.insert(accessPath); + accessPath)) { + if (!LoadAndStoreAddrs.count(accessPath)) { + if (splitLoads(Loads, accessPath, SI->getDest())) { + LoadAndStoreAddrs.insert(accessPath); + } + } } } } @@ -950,6 +972,172 @@ void LoopTreeOptimization::analyzeCurrentLoop( } } +// Recursively determine whether the innerAddress is a direct tuple or struct +// projection chain from outerPath. Populate \p reversePathIndices with the path +// difference. +static bool +computeInnerAccessPath(AccessPath::PathNode outerPath, + AccessPath::PathNode innerPath, SILValue innerAddress, + SmallVectorImpl &reversePathIndices) { + if (outerPath == innerPath) + return true; + + if (!isa(innerAddress) + && !isa(innerAddress)) { + return false; + } + assert(ProjectionIndex(innerAddress).Index + == innerPath.getIndex().getSubObjectIndex()); + + reversePathIndices.push_back(innerPath.getIndex()); + SILValue srcAddr = cast(innerAddress)->getOperand(0); + if (!computeInnerAccessPath(outerPath, innerPath.getParent(), srcAddr, + reversePathIndices)) { + return false; + } + return true; +} + +/// Split a load from \p outerAddress recursively following remainingPath. +/// +/// Creates a load with identical \p accessPath and a set of +/// non-overlapping loads. Add the new non-overlapping loads to HoistUp. +/// +/// \p ldstIdx is the index into LoadsAndStores of the original outer load. +/// +/// Return the aggregate produced by merging the loads. +SingleValueInstruction *LoopTreeOptimization::splitLoad( + SILValue splitAddress, ArrayRef remainingPath, + SILBuilder &builder, SmallVectorImpl &Loads, unsigned ldstIdx) { + auto loc = LoadsAndStores[ldstIdx]->getLoc(); + // Recurse until we have a load that matches accessPath. + if (remainingPath.empty()) { + // Create a load that matches the stored access path. + LoadInst *load = builder.createLoad(loc, splitAddress, + LoadOwnershipQualifier::Unqualified); + Loads.push_back(load); + // Replace the outer load in the list of loads and stores to hoist and + // sink. LoadsAndStores must remain in instruction order. + LoadsAndStores[ldstIdx] = load; + LLVM_DEBUG(llvm::dbgs() << "Created load from stored path: " << *load); + return load; + } + auto recordDisjointLoad = [&](LoadInst *newLoad) { + Loads.push_back(newLoad); + LoadsAndStores.insert(LoadsAndStores.begin() + ldstIdx + 1, newLoad); + }; + auto subIndex = remainingPath.back().getSubObjectIndex(); + SILType loadTy = splitAddress->getType(); + if (CanTupleType tupleTy = loadTy.getAs()) { + SmallVector elements; + for (int tupleIdx : range(tupleTy->getNumElements())) { + auto *projection = builder.createTupleElementAddr( + loc, splitAddress, tupleIdx, loadTy.getTupleElementType(tupleIdx)); + SILValue elementVal; + if (tupleIdx == subIndex) { + elementVal = splitLoad(projection, remainingPath.drop_back(), builder, + Loads, ldstIdx); + } else { + elementVal = builder.createLoad(loc, projection, + LoadOwnershipQualifier::Unqualified); + recordDisjointLoad(cast(elementVal)); + } + elements.push_back(elementVal); + } + return builder.createTuple(loc, elements); + } + auto structTy = loadTy.getStructOrBoundGenericStruct(); + assert(structTy && "tuple and struct elements are checked earlier"); + auto &module = builder.getModule(); + auto expansionContext = builder.getFunction().getTypeExpansionContext(); + + SmallVector elements; + int fieldIdx = 0; + for (auto *field : structTy->getStoredProperties()) { + SILType fieldTy = loadTy.getFieldType(field, module, expansionContext); + auto *projection = + builder.createStructElementAddr(loc, splitAddress, field, fieldTy); + SILValue fieldVal; + if (fieldIdx++ == subIndex) + fieldVal = splitLoad(projection, remainingPath.drop_back(), builder, + Loads, ldstIdx); + else { + fieldVal = builder.createLoad(loc, projection, + LoadOwnershipQualifier::Unqualified); + recordDisjointLoad(cast(fieldVal)); + } + elements.push_back(fieldVal); + } + return builder.createStruct(loc, loadTy.getObjectType(), elements); +} + +/// Find all loads that contain \p accessPath. Split them into a load with +/// identical accessPath and a set of non-overlapping loads. Add the new +/// non-overlapping loads to LoadsAndStores and HoistUp. +/// +/// TODO: The \p storeAddr parameter is only needed until we have an +/// AliasAnalysis interface that handles AccessPath. +bool LoopTreeOptimization::splitLoads(SmallVectorImpl &Loads, + AccessPath accessPath, + SILValue storeAddr) { + // The Loads set may mutate during this loop, but we only want to visit the + // original set. + for (unsigned loadsIdx = 0, endIdx = Loads.size(); loadsIdx != endIdx; + ++loadsIdx) { + auto *load = Loads[loadsIdx]; + if (toDelete.count(load)) + continue; + + if (!AA->mayReadFromMemory(load, storeAddr)) + continue; + + AccessPath loadAccessPath = AccessPath::compute(load->getOperand()); + if (accessPath.contains(loadAccessPath)) + continue; + + assert(loadAccessPath.contains(accessPath)); + LLVM_DEBUG(llvm::dbgs() << "Overlaps with loop stores: " << *load); + SmallVector reversePathIndices; + if (!computeInnerAccessPath(loadAccessPath.getPathNode(), + accessPath.getPathNode(), storeAddr, + reversePathIndices)) { + return false; + } + // Found a load wider than the store to accessPath. + // + // SplitLoads is called for each unique access path in the loop that is + // only loaded from and stored to and this loop takes time proportional to: + // num-wide-loads x num-fields x num-loop-memops + // + // For each load wider than the store, it creates a new load for each field + // in that type. Each new load is inserted in the LoadsAndStores vector. To + // avoid super-linear behavior for large types (e.g. giant tuples), limit + // growth of new loads to an arbitrary constant factor per access path. + if (Loads.size() >= endIdx + 6) { + LLVM_DEBUG(llvm::dbgs() << "...Refusing to split more loads\n"); + return false; + } + LLVM_DEBUG(llvm::dbgs() << "...Splitting load\n"); + + unsigned ldstIdx = [this, load]() { + auto ldstIter = llvm::find(LoadsAndStores, load); + assert(ldstIter != LoadsAndStores.end() && "outerLoad missing"); + return std::distance(LoadsAndStores.begin(), ldstIter); + }(); + + SILBuilderWithScope builder(load); + + SILValue aggregateVal = splitLoad(load->getOperand(), reversePathIndices, + builder, Loads, ldstIdx); + + load->replaceAllUsesWith(aggregateVal); + auto iterAndInserted = toDelete.insert(load); + (void)iterAndInserted; + assert(iterAndInserted.second && "the same load should only be split once"); + } + return true; +} + bool LoopTreeOptimization::optimizeLoop( std::unique_ptr &CurrSummary) { auto *CurrentLoop = CurrSummary->Loop; @@ -964,6 +1152,8 @@ bool LoopTreeOptimization::optimizeLoop( currChanged |= sinkInstructions(CurrSummary, DomTree, LoopInfo, SinkDown); currChanged |= hoistSpecialInstruction(CurrSummary, DomTree, LoopInfo, SpecialHoist); + + assert(toDelete.empty() && "only hostAllLoadsAndStores deletes"); return currChanged; } @@ -1089,8 +1279,8 @@ storesCommonlyDominateLoopExits(AccessPath accessPath, return true; } -void LoopTreeOptimization::hoistLoadsAndStores( - AccessPath accessPath, SILLoop *loop, InstVector &toDelete) { +void LoopTreeOptimization:: +hoistLoadsAndStores(AccessPath accessPath, SILLoop *loop) { SmallVector exitingAndLatchBlocks; loop->getExitingAndLatchBlocks(exitingAndLatchBlocks); @@ -1171,7 +1361,7 @@ void LoopTreeOptimization::hoistLoadsAndStores( if (auto *SI = isStoreToAccess(I, accessPath)) { LLVM_DEBUG(llvm::dbgs() << "Deleting reloaded store " << *SI); currentVal = SI->getSrc(); - toDelete.push_back(SI); + toDelete.insert(SI); continue; } auto loadWithAccess = isLoadWithinAccess(I, accessPath); @@ -1190,7 +1380,7 @@ void LoopTreeOptimization::hoistLoadsAndStores( LLVM_DEBUG(llvm::dbgs() << "Replacing stored load " << *load << " with " << projectedValue); load->replaceAllUsesWith(projectedValue); - toDelete.push_back(load); + toDelete.insert(load); } // Store back the value at all loop exits. @@ -1215,17 +1405,20 @@ void LoopTreeOptimization::hoistLoadsAndStores( } bool LoopTreeOptimization::hoistAllLoadsAndStores(SILLoop *loop) { - InstVector toDelete; for (AccessPath accessPath : LoadAndStoreAddrs) { - hoistLoadsAndStores(accessPath, loop, toDelete); + hoistLoadsAndStores(accessPath, loop); } LoadsAndStores.clear(); LoadAndStoreAddrs.clear(); + if (toDelete.empty()) + return false; + for (SILInstruction *I : toDelete) { - I->eraseFromParent(); + recursivelyDeleteTriviallyDeadInstructions(I, /*force*/ true); } - return !toDelete.empty(); + toDelete.clear(); + return true; } namespace { diff --git a/test/SILOptimizer/licm.sil b/test/SILOptimizer/licm.sil index fe52698e8f28a..378139f604990 100644 --- a/test/SILOptimizer/licm.sil +++ b/test/SILOptimizer/licm.sil @@ -922,3 +922,399 @@ bb5: %99 = tuple () return %99 : $() } + +// Test load splitting with a loop-invariant stored value. The loop +// will be empty after combined load/store hoisting/sinking. +// +// TODO: sink a struct_extract (or other non-side-effect instructions) +// with no uses in the loop. +// +// CHECK-LABEL: sil shared @testLoadSplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) { +// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64 +// CHECK: [[STOREDVAL:%.*]] = struct_extract %0 : $Int64, #Int64._value +// CHECK: br bb1([[PRELOAD]] : $Int64) +// CHECK: bb1([[PHI:%.*]] : $Int64): +// CHECK-NEXT: [[OUTERVAL:%.*]] = struct $Index ([[PHI]] : $Int64) +// CHECK-NEXT: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb1(%0 : $Int64) +// CHECK: bb3: +// CHECK-NEXT: store %0 to %{{.*}} : $*Int64 +// CHECK-NEXT: tuple ([[OUTERVAL]] : $Index, [[PHI]] : $Int64, [[STOREDVAL]] : $Builtin.Int64) +// CHECK-LABEL: } // end sil function 'testLoadSplit' +sil shared @testLoadSplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index + %middleAddr1 = struct_element_addr %outerAddr1 : $*Index, #Index.value + br bb1 + +bb1: + %val1 = load %outerAddr1 : $*Index + %val2 = load %middleAddr1 : $*Int64 + %outerAddr2 = pointer_to_address %1 : $Builtin.RawPointer to $*Index + %middleAddr2 = struct_element_addr %outerAddr2 : $*Index, #Index.value + store %0 to %middleAddr2 : $*Int64 + %innerAddr1 = struct_element_addr %middleAddr1 : $*Int64, #Int64._value + %val3 = load %innerAddr1 : $*Builtin.Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + %result = tuple (%val1 : $Index, %val2 : $Int64, %val3 : $Builtin.Int64) + return %result : $(Index, Int64, Builtin.Int64) +} + +// Test load splitting with a loop-varying stored value. +// CHECK-LABEL: sil shared @testLoadSplitPhi : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) { +// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64 +// CHECK: br bb1(%4 : $Int64) +// CHECK: bb1([[PHI:%.*]] : $Int64): +// CHECK-NEXT: [[OUTERVAL:%.*]] = struct $Index ([[PHI]] : $Int64) +// CHECK-NEXT: [[EXTRACT:%.*]] = struct_extract [[PHI]] : $Int64, #Int64._value +// CHECK-NEXT: builtin "uadd_with_overflow_Int32"([[EXTRACT]] : $Builtin.Int64 +// CHECK-NEXT: tuple_extract +// CHECK-NEXT: [[ADD:%.*]] = struct $Int64 +// CHECK-NEXT: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb1([[ADD]] : $Int64) +// CHECK: bb3: +// CHECK-NEXT: store [[ADD]] to %{{.*}} : $*Int64 +// CHECK-NEXT: tuple ([[OUTERVAL]] : $Index, [[ADD]] : $Int64, [[EXTRACT]] : $Builtin.Int64) +// CHECK-LABEL: } // end sil function 'testLoadSplitPhi' +sil shared @testLoadSplitPhi : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index + %middleAddr1 = struct_element_addr %outerAddr1 : $*Index, #Index.value + %innerAddr1 = struct_element_addr %middleAddr1 : $*Int64, #Int64._value + br bb1 + +bb1: + %outerVal = load %outerAddr1 : $*Index + %innerVal = load %innerAddr1 : $*Builtin.Int64 + %one = integer_literal $Builtin.Int64, 1 + %zero = integer_literal $Builtin.Int1, 0 + %add = builtin "uadd_with_overflow_Int32"(%innerVal : $Builtin.Int64, %one : $Builtin.Int64, %zero : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %inc = tuple_extract %add : $(Builtin.Int64, Builtin.Int1), 0 + %outerAddr2 = pointer_to_address %1 : $Builtin.RawPointer to $*Index + %middleAddr2 = struct_element_addr %outerAddr2 : $*Index, #Index.value + %newVal = struct $Int64 (%inc : $Builtin.Int64) + store %newVal to %middleAddr2 : $*Int64 + %middleVal = load %middleAddr1 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + %result = tuple (%outerVal : $Index, %middleVal : $Int64, %innerVal : $Builtin.Int64) + return %result : $(Index, Int64, Builtin.Int64) +} + +struct State { + @_hasStorage var valueSet: (Int64, Int64, Int64) { get set } + @_hasStorage var singleValue: Int64 { get set } +} + +// Test the we can remove a store to an individual tuple element when +// the struct containing the tuple is used within the loop. +// The optimized loop should only contain the add operation and a phi, with no memory access. +// +// CHECK-LABEL: sil shared @testTupleSplit : $@convention(method) (Builtin.RawPointer) -> State { +// CHECK: bb0(%0 : $Builtin.RawPointer): +// CHECK: [[HOISTADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0 +// ...Preload stored element #1 +// CHECK: [[PRELOADADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 1 +// CHECK: [[PRELOAD:%.*]] = load [[PRELOADADR]] : $*Int64 +// ...Split element 0 +// CHECK: [[SPLIT0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0 +// CHECK: [[ELT0:%.*]] = load [[SPLIT0]] : $*Int64 +// ...Split element 2 +// CHECK: [[SPLIT2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 2 +// CHECK: [[ELT2:%.*]] = load [[SPLIT2]] : $*Int64 +// ...Split State.singlevalue +// CHECK: [[SINGLEADR:%.*]] = struct_element_addr %{{.*}} : $*State, #State.singleValue +// CHECK: [[SINGLEVAL:%.*]] = load [[SINGLEADR]] : $*Int64 +// ...Hoisted element 0 +// CHECK: [[HOISTLOAD:%.*]] = load [[HOISTADR]] : $*Int64 +// CHECK: [[HOISTVAL:%.*]] = struct_extract [[HOISTLOAD]] : $Int64, #Int64._value +// CHECK: br bb1([[PRELOAD]] : $Int64) +// ...Loop +// CHECK: bb1([[PHI:%.*]] : $Int64): +// CHECK-NEXT: [[TUPLE:%.*]] = tuple ([[ELT0]] : $Int64, [[PHI]] : $Int64, [[ELT2]] : $Int64) +// CHECK-NEXT: [[STRUCT:%.*]] = struct $State ([[TUPLE]] : $(Int64, Int64, Int64), [[SINGLEVAL]] : $Int64) +// CHECK-NEXT: [[ADDEND:%.*]] = struct_extract [[PHI]] : $Int64, #Int64._value +// CHECK-NEXT: [[UADD:%.*]] = builtin "uadd_with_overflow_Int32"([[HOISTVAL]] : $Builtin.Int64, [[ADDEND]] : $Builtin.Int64, %{{.*}} : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) +// CHECK-NEXT: [[ADDVAL:%.*]] = tuple_extract [[UADD]] : $(Builtin.Int64, Builtin.Int1), 0 +// CHECK-NEXT: [[ADDINT:%.*]] = struct $Int64 ([[ADDVAL]] : $Builtin.Int64) +// CHECK-NEXT: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb1([[ADDINT]] : $Int64) +// CHECK: bb3: +// CHECK-NEXT: store [[ADDINT]] to [[PRELOADADR]] : $*Int64 +// CHECK-NEXT: return [[STRUCT]] : $State +// CHECK-LABEL: } // end sil function 'testTupleSplit' +sil shared @testTupleSplit : $@convention(method) (Builtin.RawPointer) -> State { +bb0(%0 : $Builtin.RawPointer): + %stateAddr = pointer_to_address %0 : $Builtin.RawPointer to $*State + %tupleAddr0 = struct_element_addr %stateAddr : $*State, #State.valueSet + %elementAddr0 = tuple_element_addr %tupleAddr0 : $*(Int64, Int64, Int64), 0 + %tupleAddr1 = struct_element_addr %stateAddr : $*State, #State.valueSet + %elementAddr1 = tuple_element_addr %tupleAddr1 : $*(Int64, Int64, Int64), 1 + %tupleAddr11 = struct_element_addr %stateAddr : $*State, #State.valueSet + %elementAddr11 = tuple_element_addr %tupleAddr11 : $*(Int64, Int64, Int64), 1 + br bb1 + +bb1: + %state = load %stateAddr : $*State + %element0 = load %elementAddr0 : $*Int64 + %val0 = struct_extract %element0 : $Int64, #Int64._value + %element1 = load %elementAddr1 : $*Int64 + %val1 = struct_extract %element1 : $Int64, #Int64._value + %zero = integer_literal $Builtin.Int1, 0 + %add = builtin "uadd_with_overflow_Int32"(%val0 : $Builtin.Int64, %val1 : $Builtin.Int64, %zero : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %addVal = tuple_extract %add : $(Builtin.Int64, Builtin.Int1), 0 + %addInt = struct $Int64 (%addVal : $Builtin.Int64) + store %addInt to %elementAddr11 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + return %state : $State +} + +// Test multiple stores to disjoint access paths with a single load +// that spans both of them. The load should be split and hosited and +// and the stores be sunk. +// testCommonSplitLoad +// CHECK-LABEL: sil shared @testCommonSplitLoad : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, Int64, Int64) { +// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer): +// CHECK: [[ELT0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0 +// CHECK: [[V0:%.*]] = load [[ELT0]] : $*Int64 +// CHECK: [[ELT2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 2 +// CHECK: [[V2:%.*]] = load [[ELT2]] : $*Int64 +// CHECK: [[ELT1:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 1 +// CHECK: [[V1:%.*]] = load [[ELT1]] : $*Int64 +// CHECK: br bb1([[V0]] : $Int64, [[V2]] : $Int64) +// +// Nothing in this loop except phis... +// CHECK: bb1([[PHI0:%.*]] : $Int64, [[PHI2:%.*]] : $Int64): +// CHECK-NEXT: [[RESULT:%.*]] = tuple ([[PHI0]] : $Int64, [[V1]] : $Int64, [[PHI2]] : $Int64) +// CHECK-NEXT: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb1(%0 : $Int64, %0 : $Int64) +// +// Stores are all sunk... +// CHECK: bb3: +// CHECK: store %0 to [[ELT2]] : $*Int64 +// CHECK: store %0 to [[ELT0]] : $*Int64 +// CHECK: return [[RESULT]] : $(Int64, Int64, Int64) +// CHECK-LABEL: } // end sil function 'testCommonSplitLoad' +sil shared @testCommonSplitLoad : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, Int64, Int64) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, Int64, Int64) + br bb1 + +bb1: + %val1 = load %outerAddr1 : $*(Int64, Int64, Int64) + %elementAddr0 = tuple_element_addr %outerAddr1 : $*(Int64, Int64, Int64), 0 + store %0 to %elementAddr0 : $*Int64 + %elementAddr2 = tuple_element_addr %outerAddr1 : $*(Int64, Int64, Int64), 2 + store %0 to %elementAddr2 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + return %val1 : $(Int64, Int64, Int64) +} + +// Two stores, one to the outer tuple and one to the inner tuple. This +// results in two access paths that are only loaded/stored to. First +// split the outer tuple when processing the outer access path, then +// the inner tuple when processing the inner access path. All loads +// should be hoisted and all stores should be sunk. +// +// CHECK-LABEL: sil shared @testResplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer): +// CHECK: [[ELT_0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 0 +// CHECK: [[V0:%.*]] = load [[ELT_0]] : $*Int64 +// CHECK: [[ELT_1a:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 1 +// CHECK: [[ELT_1_0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 0 +// CHECK: [[V_1_0:%.*]] = load [[ELT_1_0]] : $*Int64 +// CHECK: [[ELT_1b:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 1 +// CHECK: [[ELT_1_1:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 1 +// CHECK: [[V_1_1:%.*]] = load [[ELT_1_1]] : $*Int64 +// CHECK: br bb1([[V_0:%.*]] : $Int64, [[V_1_0]] : $Int64) +// +// Nothing in this loop except phis and tuple reconstruction... +// CHECK: bb1([[PHI_0:%.*]] : $Int64, [[PHI_1_0:%.*]] : $Int64): +// CHECK: [[INNER:%.*]] = tuple ([[PHI_1_0]] : $Int64, [[V_1_1]] : $Int64) +// CHECK: [[OUTER:%.*]] = tuple ([[PHI_0]] : $Int64, [[INNER]] : $(Int64, Int64)) +// CHECK: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK: br bb1(%0 : $Int64, %0 : $Int64) +// +// The two stores are sunk... +// CHECK: bb3: +// CHECK: store %0 to [[ELT_1_0]] : $*Int64 +// CHECK: store %0 to [[ELT_0]] : $*Int64 +// CHECK: return [[OUTER]] : $(Int64, (Int64, Int64)) +// CHECK-LABEL: } // end sil function 'testResplit' +sil shared @testResplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, (Int64, Int64)) + br bb1 + +bb1: + %val1 = load %outerAddr1 : $*(Int64, (Int64, Int64)) + %elementAddr0 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 0 + store %0 to %elementAddr0 : $*Int64 + %elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1 + %elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0 + store %0 to %elementAddr10 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + return %val1 : $(Int64, (Int64, Int64)) +} + +// Two stores to overlapping accesspaths. Combined load/store hoisting +// cannot currently handle stores to overlapping accesspaths, so +// nothing is optimized. +// CHECK-LABEL: sil shared @testTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +// CHECK-LABEL: bb0(%0 : $Int64, %1 : $Int64, %2 : $Builtin.RawPointer): +// CHECK-NOT: load +// CHECK: br bb1 +// CHECK: bb1: +// CHECK: load %{{.*}} : $*(Int64, (Int64, Int64)) +// CHECK: store {{.*}} : $*(Int64, Int64) +// CHECK: store {{.*}} : $*Int64 +// CHECK: cond_br undef, bb2, bb3 +// CHECK-NOT: store +// CHECK-LABEL: } // end sil function 'testTwoStores' +sil shared @testTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +bb0(%0 : $Int64, %1: $Int64, %2 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %2 : $Builtin.RawPointer to $*(Int64, (Int64, Int64)) + br bb1 + +bb1: + %val1 = load %outerAddr1 : $*(Int64, (Int64, Int64)) + %elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1 + %tuple = tuple (%0 : $Int64, %1: $Int64) + store %tuple to %elementAddr1 : $*(Int64, Int64) + %elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0 + store %1 to %elementAddr10 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + return %val1 : $(Int64, (Int64, Int64)) +} + +// Two wide loads. The first can be successfully split and the second +// half hoisted. The second cannot be split because of a pointer +// cast. Make sure two remaining loads and the store are still in the loop. +// +// CHECK-LABEL: sil hidden @testSplitNonStandardProjection : $@convention(method) (Int64, Builtin.RawPointer) -> ((Int64, (Int64, Int64)), (Int64, Int64)) { +// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer): +// +// The first load was split, so one half is hoisted. +// CHECK: [[V1:%.*]] = load %{{.*}} : $*Int64 +// CHECK: br bb1 +// CHECK: bb1: +// CHECK: [[V0:%.*]] = load %{{.*}} : $*Int64 +// CHECK: [[INNER:%.*]] = tuple ([[V0]] : $Int64, [[V1]] : $Int64) +// CHECK: store %0 to %{{.*}} : $*Int64 +// CHECK: [[OUTER:%.*]] = load %{{.*}} : $*(Int64, (Int64, Int64)) +// CHECK: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK: br bb1 +// CHECK: bb3: +// CHECK: [[RESULT:%.*]] = tuple ([[OUTER]] : $(Int64, (Int64, Int64)), [[INNER]] : $(Int64, Int64)) +// CHECK: return [[RESULT]] : $((Int64, (Int64, Int64)), (Int64, Int64)) +// CHECK-LABEL: } // end sil function 'testSplitNonStandardProjection' +sil hidden @testSplitNonStandardProjection : $@convention(method) (Int64, Builtin.RawPointer) -> ((Int64, (Int64, Int64)), (Int64, Int64)) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, (Int64, Int64)) + br bb1 + +bb1: + %elt1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1 + %ptr = address_to_pointer %elt1 : $*(Int64, Int64) to $Builtin.RawPointer + %ptrAdr = pointer_to_address %ptr : $Builtin.RawPointer to [strict] $*(Int64, Int64) + %val2 = load %ptrAdr : $*(Int64, Int64) + %eltptr0 = tuple_element_addr %ptrAdr : $*(Int64, Int64), 0 + store %0 to %eltptr0 : $*Int64 + // Process the outermost load after splitting the inner load + %val1 = load %outerAddr1 : $*(Int64, (Int64, Int64)) + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + %result = tuple (%val1 : $(Int64, (Int64, Int64)), %val2 : $(Int64, Int64)) + return %result : $((Int64, (Int64, Int64)), (Int64, Int64)) +} + +// CHECK-LABEL: sil shared @testSameTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +// CHECK: bb0(%0 : $Int64, %1 : $Int64, %2 : $Builtin.RawPointer): +// CHECK: [[ELT_1:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 1 +// CHECK: [[V1:%.*]] = load %4 : $*(Int64, Int64) +// CHECK: [[ELT_0:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 0 +// CHECK: [[V0:%.*]] = load %6 : $*Int64 +// CHECK: [[ARG0:%.*]] = tuple (%0 : $Int64, %0 : $Int64) +// CHECK: [[ARG0_0:%.*]] = tuple_extract %8 : $(Int64, Int64), 0 +// CHECK: [[ARG1:%.*]] = tuple (%1 : $Int64, %1 : $Int64) +// CHECK: br bb1([[V1]] : $(Int64, Int64)) +// CHECK: bb1([[PHI:%.*]] : $(Int64, Int64)): +// CHECK: [[LOOPVAL:%.*]] = tuple ([[V0]] : $Int64, [[PHI]] : $(Int64, Int64)) +// CHECK: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK: br bb1([[ARG1]] : $(Int64, Int64)) +// CHECK: bb3: +// CHECK: store [[ARG1]] to [[ELT_1]] : $*(Int64, Int64) +// CHECK: [[EXTRACT0:%.*]] = tuple_extract [[LOOPVAL]] : $(Int64, (Int64, Int64)), 0 +// CHECK: [[EXTRACT1:%.*]] = tuple_extract [[LOOPVAL]] : $(Int64, (Int64, Int64)), 1 +// CHECK: [[EXTRACT1_1:%.*]] = tuple_extract [[EXTRACT1]] : $(Int64, Int64), 1 +// CHECK: [[TUPLE1:%.*]] = tuple ([[ARG0_0]] : $Int64, [[EXTRACT1_1]] : $Int64) +// CHECK: [[RESULT:%.*]] = tuple ([[EXTRACT0]] : $Int64, [[TUPLE1]] : $(Int64, Int64)) +// CHECK: return [[RESULT]] : $(Int64, (Int64, Int64)) +// CHECK-LABEL: } // end sil function 'testSameTwoStores' +sil shared @testSameTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +bb0(%0 : $Int64, %1: $Int64, %2 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %2 : $Builtin.RawPointer to $*(Int64, (Int64, Int64)) + br bb1 + +bb1: + %val = load %outerAddr1 : $*(Int64, (Int64, Int64)) + %elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1 + %tupleA = tuple (%0 : $Int64, %0: $Int64) + store %tupleA to %elementAddr1 : $*(Int64, Int64) + %elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0 + %val10 = load %elementAddr10 : $*Int64 + %tupleB = tuple (%1 : $Int64, %1: $Int64) + store %tupleB to %elementAddr1 : $*(Int64, Int64) + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + %extract0 = tuple_extract %val : $(Int64, (Int64, Int64)), 0 + %extract1 = tuple_extract %val : $(Int64, (Int64, Int64)), 1 + %extract11 = tuple_extract %extract1 : $(Int64, Int64), 1 + %inner = tuple (%val10 : $Int64, %extract11: $Int64) + %outer = tuple (%extract0 : $Int64, %inner: $(Int64, Int64)) + return %outer : $(Int64, (Int64, Int64)) +} From 92d2c236b8b68c9ea81beace2a0832bc8a740ae9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexis=20Laferrie=CC=80re?= Date: Thu, 12 Nov 2020 14:20:12 -0800 Subject: [PATCH 23/75] [Tests] Fix and refactor skip-function-bodies.swift --- test/Frontend/skip-function-bodies.swift | 107 +++++++++-------------- 1 file changed, 41 insertions(+), 66 deletions(-) diff --git a/test/Frontend/skip-function-bodies.swift b/test/Frontend/skip-function-bodies.swift index c8b1ef0ae7e78..6b37418ae5adc 100644 --- a/test/Frontend/skip-function-bodies.swift +++ b/test/Frontend/skip-function-bodies.swift @@ -13,21 +13,21 @@ // WARNING: module 'SwiftOnoneSupport' cannot be built with -experimental-skip-non-inlinable-function-bodies; this option has been automatically disabled // Check skipped bodies are neither typechecked nor SILgen'd -// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-non-inlinable-function-bodies -debug-forbid-typecheck-prefix INLINENOTYPECHECK %s -o %t/Skip.noninlinable.sil -// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -O -experimental-skip-all-function-bodies -debug-forbid-typecheck-prefix ALLNOTYPECHECK %s -o %t/Skip.all.sil -// %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY < %t/Skip.noninlinable.sil -// %FileCheck %s --check-prefixes CHECK,CHECK-ALL-ONLY < %t/Skip.all.sil +// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-non-inlinable-function-bodies -debug-forbid-typecheck-prefix NEVERTYPECHECK -debug-forbid-typecheck-prefix INLINENOTYPECHECK %s -o %t/Skip.noninlinable.sil +// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-all-function-bodies -debug-forbid-typecheck-prefix NEVERTYPECHECK -debug-forbid-typecheck-prefix ALLNOTYPECHECK %s -o %t/Skip.all.sil +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY,CHECK-NONINLINE-SIL < %t/Skip.noninlinable.sil +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-ALL-ONLY < %t/Skip.all.sil // Emit the module interface and check it against the same set of strings. // RUN: %target-swift-frontend -typecheck %s -enable-library-evolution -emit-module-interface-path %t/Skip.noninlinable.swiftinterface -experimental-skip-non-inlinable-function-bodies -// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY < %t/Skip.noninlinable.swiftinterface +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY,CHECK-NONINLINE-TEXTUAL < %t/Skip.noninlinable.swiftinterface // RUN: %target-swift-frontend -typecheck %s -enable-library-evolution -emit-module-interface-path %t/Skip.all.swiftinterface -experimental-skip-all-function-bodies -// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-ALL-ONLY < %t/Skip.all.swiftinterface +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-ALL-ONLY,CHECK-NONINLINE-TEXTUAL < %t/Skip.all.swiftinterface // Emit the module interface normally, it should be the same as when skipping // non-inlinable. // RUN: %target-swift-frontend -typecheck %s -enable-library-evolution -emit-module-interface-path %t/Skip.swiftinterface -// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY < %t/Skip.swiftinterface +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY,CHECK-NONINLINE-TEXTUAL < %t/Skip.swiftinterface // RUN: diff -u %t/Skip.noninlinable.swiftinterface %t/Skip.swiftinterface @usableFromInline @@ -58,16 +58,14 @@ public class InlinableDeinit { @_fixed_layout public class InlineAlwaysDeinit { @inline(__always) deinit { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) deinit body") // CHECK-NOT: "@inline(__always) deinit body" } } public class NormalDeinit { deinit { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK = 1 + let NEVERTYPECHECK_local = 1 _blackHole("regular deinit body") // CHECK-NOT: "regular deinit body" } } @@ -80,52 +78,44 @@ public class NormalDeinit { } @inline(__always) public func inlineAlwaysFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) func body") // CHECK-NOT: "@inline(__always) func body" } func internalFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("internal func body") // CHECK-NOT: "internal func body" } public func publicFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public func body") // CHECK-NOT: "public func body" } private func privateFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("private func body") // CHECK-NOT: "private func body" } @inline(__always) public func inlineAlwaysLocalTypeFunc() { - let ALLNOTYPECHECK_outerLocal = 1 - let INLINENOTYPECHECK_outerLocal = 1 + let NEVERTYPECHECK_outerLocal = 1 typealias InlineAlwaysLocalType = Int _blackHole("@inline(__always) func body with local type") // CHECK-NOT: "@inline(__always) func body with local type" func takesInlineAlwaysLocalType(_ x: InlineAlwaysLocalType) { - let ALLNOTYPECHECK_innerLocal = 1 - let INLINENOTYPECHECK_innerLocal = 1 + let NEVERTYPECHECK_innerLocal = 1 _blackHole("nested func body inside @inline(__always) func body taking local type") // CHECK-NOT: "nested func body inside @inline(__always) func body taking local type" } takesInlineAlwaysLocalType(0) } public func publicLocalTypeFunc() { - let ALLNOTYPECHECK_outerLocal = 1 - let INLINENOTYPECHECK_outerLocal = 1 + let NEVERTYPECHECK_outerLocal = 1 typealias LocalType = Int _blackHole("public func body with local type") // CHECK-NOT: "public func body with local type" func takesLocalType(_ x: LocalType) { - let ALLNOTYPECHECK_innerLocal = 1 - let INLINENOTYPECHECK_innerLocal = 1 + let NEVERTYPECHECK_innerLocal = 1 _blackHole("nested func body inside public func body taking local type") // CHECK-NOT: "nested func body inside public func body taking local type" } takesLocalType(0) @@ -206,8 +196,7 @@ public struct Struct { @inline(__always) public func inlineAlwaysFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) method body") // CHECK-NOT: "@inline(__always) method body" } @@ -236,13 +225,6 @@ public struct Struct { } } - public var didSetVar: Int = 1 { - didSet { - // Body typechecked regardless - _blackHole("didSet body") // CHECK-NOT: "didSet body" - } - } - @_transparent public func transparentFunc() { let ALLNOTYPECHECK_local = 1 @@ -252,20 +234,17 @@ public struct Struct { } func internalFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("internal method body") // CHECK-NOT: "internal method body" } public func publicFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public method body") // CHECK-NOT: "public method body" } private func privateFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("private method body") // CHECK-NOT: "private method body" } @@ -276,6 +255,14 @@ public struct Struct { // CHECK-ALL-ONLY-NOT: "@_transparent init body" } + public var didSetVar: Int = 1 { + didSet { + // Body typechecked regardless + _blackHole("didSet body") // CHECK-NONINLINE-SIL: "didSet body" + // CHECK-NONINLINE-TEXTUAL-NOT: "didSet body" + } + } + @inlinable public init() { let ALLNOTYPECHECK_local = 1 _blackHole("@inlinable init body") @@ -284,26 +271,22 @@ public struct Struct { } @inline(__always) public init(a: Int) { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) init body") // CHECK-NOT: "@inline(__always) init body" } init(c: Int) { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("internal init body") // CHECK-NOT: "internal init body" } public init(d: Int) { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public init body") // CHECK-NOT: "public init body" } private init(e: Int) { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("private init body") // CHECK-NOT: "private init body" } @@ -316,8 +299,7 @@ public struct Struct { } @inline(__always) public subscript(a: Int, b: Int) -> Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) subscript getter") // CHECK-NOT: "@inline(__always) subscript getter" return 0 } @@ -333,36 +315,31 @@ public struct Struct { } subscript(a: Int, b: Int, c: Int, d: Int) -> Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("internal subscript getter") // CHECK-NOT: "internal subscript getter" return 0 } public subscript(a: Int, b: Int, c: Int, d: Int, e: Int) -> Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public subscript getter") // CHECK-NOT: "public subscript getter" return 0 } private subscript(e: Int) -> Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("private subscript getter") // CHECK-NOT: "private subscript getter" return 0 } @inline(__always) public var inlineAlwaysVar: Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) getter body") // CHECK-NOT: "@inline(__always) getter body" return 0 } public var publicVar: Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public getter body") // CHECK-NOT: "public getter body" return 0 } @@ -370,8 +347,7 @@ public struct Struct { public var inlineAlwaysSetter: Int { get { 0 } @inline(__always) set { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) setter body") // CHECK-NOT: "@inline(__always) setter body" } } @@ -379,8 +355,7 @@ public struct Struct { public var regularSetter: Int { get { 0 } set { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) setter body") // CHECK-NOT: "regular setter body" } } From 02c134372f4f380cd66bed997eb540a4fdf3b019 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexis=20Laferrie=CC=80re?= Date: Fri, 6 Nov 2020 12:40:29 -0800 Subject: [PATCH 24/75] [Sema] Add option to skip non-inlinable functions without types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This frontend flag can be used as an alternative to -experimental-skip-non-inlinable-function-bodies that doesn’t skip functions defining nested types. We want to keep these types as they are used by LLDB. Other functions ares safe to skip parsing and type-checking. rdar://71130519 --- include/swift/AST/Decl.h | 19 ++++++- include/swift/Basic/FunctionBodySkipping.h | 3 + include/swift/Option/Options.td | 4 ++ include/swift/Parse/Parser.h | 5 +- .../ArgsToFrontendOptionsConverter.cpp | 4 +- lib/Frontend/CompilerInvocation.cpp | 6 ++ lib/Parse/ParseDecl.cpp | 21 +++++-- lib/Sema/TypeCheckDeclPrimary.cpp | 8 +++ test/Frontend/skip-function-bodies.swift | 55 +++++++++++++++++++ 9 files changed, 116 insertions(+), 9 deletions(-) diff --git a/include/swift/AST/Decl.h b/include/swift/AST/Decl.h index f65216567d9cc..f01ef63d51cf0 100644 --- a/include/swift/AST/Decl.h +++ b/include/swift/AST/Decl.h @@ -391,7 +391,7 @@ class alignas(1 << DeclAlignInBits) Decl { SWIFT_INLINE_BITFIELD(SubscriptDecl, VarDecl, 2, StaticSpelling : 2 ); - SWIFT_INLINE_BITFIELD(AbstractFunctionDecl, ValueDecl, 3+8+1+1+1+1+1+1, + SWIFT_INLINE_BITFIELD(AbstractFunctionDecl, ValueDecl, 3+8+1+1+1+1+1+1+1, /// \see AbstractFunctionDecl::BodyKind BodyKind : 3, @@ -415,7 +415,11 @@ class alignas(1 << DeclAlignInBits) Decl { Synthesized : 1, /// Whether this member's body consists of a single expression. - HasSingleExpressionBody : 1 + HasSingleExpressionBody : 1, + + /// Whether peeking into this function detected nested type declarations. + /// This is set when skipping over the decl at parsing. + HasNestedTypeDeclarations : 1 ); SWIFT_INLINE_BITFIELD(FuncDecl, AbstractFunctionDecl, 1+1+2+1+1+2+1, @@ -5544,6 +5548,7 @@ class AbstractFunctionDecl : public GenericContext, public ValueDecl { Bits.AbstractFunctionDecl.Throws = Throws; Bits.AbstractFunctionDecl.Synthesized = false; Bits.AbstractFunctionDecl.HasSingleExpressionBody = false; + Bits.AbstractFunctionDecl.HasNestedTypeDeclarations = false; } void setBodyKind(BodyKind K) { @@ -5690,6 +5695,16 @@ class AbstractFunctionDecl : public GenericContext, public ValueDecl { setBody(S, BodyKind::Parsed); } + /// Was there a nested type declaration detected when parsing this + /// function was skipped? + bool hasNestedTypeDeclarations() const { + return Bits.AbstractFunctionDecl.HasNestedTypeDeclarations; + } + + void setHasNestedTypeDeclarations(bool value) { + Bits.AbstractFunctionDecl.HasNestedTypeDeclarations = value; + } + /// Note that parsing for the body was delayed. /// /// The function should return the body statement and a flag indicating diff --git a/include/swift/Basic/FunctionBodySkipping.h b/include/swift/Basic/FunctionBodySkipping.h index 1d1f8b2deb43a..cd7042a8e595b 100644 --- a/include/swift/Basic/FunctionBodySkipping.h +++ b/include/swift/Basic/FunctionBodySkipping.h @@ -23,6 +23,9 @@ enum class FunctionBodySkipping : uint8_t { None, /// Only non-inlinable function bodies should be skipped. NonInlinable, + /// Only non-inlinable functions bodies without type definitions should + /// be skipped. + NonInlinableWithoutTypes, /// All function bodies should be skipped, where not otherwise required /// for type inference. All diff --git a/include/swift/Option/Options.td b/include/swift/Option/Options.td index d0bb1e0cb990f..e78486daa8014 100644 --- a/include/swift/Option/Options.td +++ b/include/swift/Option/Options.td @@ -297,6 +297,10 @@ def experimental_skip_non_inlinable_function_bodies: Flag<["-"], "experimental-skip-non-inlinable-function-bodies">, Flags<[FrontendOption, HelpHidden]>, HelpText<"Skip type-checking and SIL generation for non-inlinable function bodies">; +def experimental_skip_non_inlinable_function_bodies_without_types: + Flag<["-"], "experimental-skip-non-inlinable-function-bodies-without-types">, + Flags<[FrontendOption, HelpHidden]>, + HelpText<"Skip work on non-inlinable function bodies that do not declare nested types">; def profile_stats_events: Flag<["-"], "profile-stats-events">, Flags<[FrontendOption, HelpHidden]>, HelpText<"Profile changes to stats in -stats-output-dir">; diff --git a/include/swift/Parse/Parser.h b/include/swift/Parse/Parser.h index 6fdb6f54c0f24..1ea1d70925078 100644 --- a/include/swift/Parse/Parser.h +++ b/include/swift/Parse/Parser.h @@ -687,7 +687,10 @@ class Parser { /// Skip a braced block (e.g. function body). The current token must be '{'. /// Returns \c true if the parser hit the eof before finding matched '}'. - bool skipBracedBlock(); + /// + /// Set \c HasNestedTypeDeclarations to true if a token for a type + /// declaration is detected in the skipped block. + bool skipBracedBlock(bool &HasNestedTypeDeclarations); /// Skip over SIL decls until we encounter the start of a Swift decl or eof. void skipSILUntilSwiftDecl(); diff --git a/lib/Frontend/ArgsToFrontendOptionsConverter.cpp b/lib/Frontend/ArgsToFrontendOptionsConverter.cpp index e96093e8fdad8..2f62de53646e4 100644 --- a/lib/Frontend/ArgsToFrontendOptionsConverter.cpp +++ b/lib/Frontend/ArgsToFrontendOptionsConverter.cpp @@ -195,7 +195,9 @@ bool ArgsToFrontendOptionsConverter::convert( if (FrontendOptions::doesActionGenerateIR(Opts.RequestedAction) && (Args.hasArg(OPT_experimental_skip_non_inlinable_function_bodies) || - Args.hasArg(OPT_experimental_skip_all_function_bodies))) { + Args.hasArg(OPT_experimental_skip_all_function_bodies) || + Args.hasArg( + OPT_experimental_skip_non_inlinable_function_bodies_without_types))) { Diags.diagnose(SourceLoc(), diag::cannot_emit_ir_skipping_function_bodies); return true; } diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp index a9fd9ea827192..fe9e25d00a0b7 100644 --- a/lib/Frontend/CompilerInvocation.cpp +++ b/lib/Frontend/CompilerInvocation.cpp @@ -716,6 +716,12 @@ static bool ParseTypeCheckerArgs(TypeCheckerOptions &Opts, ArgList &Args, Opts.DebugTimeExpressions |= Args.hasArg(OPT_debug_time_expression_type_checking); + // Check for SkipFunctionBodies arguments in order from skipping less to + // skipping more. + if (Args.hasArg( + OPT_experimental_skip_non_inlinable_function_bodies_without_types)) + Opts.SkipFunctionBodies = FunctionBodySkipping::NonInlinableWithoutTypes; + // If asked to perform InstallAPI, go ahead and enable non-inlinable function // body skipping. if (Args.hasArg(OPT_experimental_skip_non_inlinable_function_bodies) || diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp index 8daeb13a237a7..cd0de335cbea2 100644 --- a/lib/Parse/ParseDecl.cpp +++ b/lib/Parse/ParseDecl.cpp @@ -3519,10 +3519,12 @@ static void diagnoseOperatorFixityAttributes(Parser &P, static unsigned skipUntilMatchingRBrace(Parser &P, bool &HasPoundDirective, bool &HasOperatorDeclarations, - bool &HasNestedClassDeclarations) { + bool &HasNestedClassDeclarations, + bool &HasNestedTypeDeclarations) { HasPoundDirective = false; HasOperatorDeclarations = false; HasNestedClassDeclarations = false; + HasNestedTypeDeclarations = false; unsigned OpenBraces = 1; @@ -3541,6 +3543,10 @@ static unsigned skipUntilMatchingRBrace(Parser &P, HasPoundDirective |= P.Tok.isAny(tok::pound_sourceLocation, tok::pound_line, tok::pound_if, tok::pound_else, tok::pound_endif, tok::pound_elseif); + + HasNestedTypeDeclarations |= P.Tok.isAny(tok::kw_class, tok::kw_struct, + tok::kw_enum); + if (P.consumeIf(tok::l_brace)) { ++OpenBraces; continue; @@ -4823,10 +4829,12 @@ bool Parser::canDelayMemberDeclParsing(bool &HasOperatorDeclarations, // we can't lazily parse. BacktrackingScope BackTrack(*this); bool HasPoundDirective; + bool HasNestedTypeDeclarations; skipUntilMatchingRBrace(*this, HasPoundDirective, HasOperatorDeclarations, - HasNestedClassDeclarations); + HasNestedClassDeclarations, + HasNestedTypeDeclarations); if (!HasPoundDirective) BackTrack.cancelBacktrack(); return !BackTrack.willBacktrack(); @@ -5514,7 +5522,7 @@ static ParameterList *parseOptionalAccessorArgument(SourceLoc SpecifierLoc, return ParameterList::create(P.Context, StartLoc, param, EndLoc); } -bool Parser::skipBracedBlock() { +bool Parser::skipBracedBlock(bool &HasNestedTypeDeclarations) { SyntaxParsingContext disabled(SyntaxContext); SyntaxContext->disable(); consumeToken(tok::l_brace); @@ -5528,7 +5536,8 @@ bool Parser::skipBracedBlock() { unsigned OpenBraces = skipUntilMatchingRBrace(*this, HasPoundDirectives, HasOperatorDeclarations, - HasNestedClassDeclarations); + HasNestedClassDeclarations, + HasNestedTypeDeclarations); if (consumeIf(tok::r_brace)) --OpenBraces; return OpenBraces != 0; @@ -6424,11 +6433,13 @@ void Parser::consumeAbstractFunctionBody(AbstractFunctionDecl *AFD, BodyRange.Start = Tok.getLoc(); // Advance the parser to the end of the block; '{' ... '}'. - skipBracedBlock(); + bool HasNestedTypeDeclarations; + skipBracedBlock(HasNestedTypeDeclarations); BodyRange.End = PreviousLoc; AFD->setBodyDelayed(BodyRange); + AFD->setHasNestedTypeDeclarations(HasNestedTypeDeclarations); if (isCodeCompletionFirstPass() && SourceMgr.rangeContainsCodeCompletionLoc(BodyRange)) { diff --git a/lib/Sema/TypeCheckDeclPrimary.cpp b/lib/Sema/TypeCheckDeclPrimary.cpp index f44e6be8a43d8..4dedb12ba6e29 100644 --- a/lib/Sema/TypeCheckDeclPrimary.cpp +++ b/lib/Sema/TypeCheckDeclPrimary.cpp @@ -2345,6 +2345,14 @@ class DeclChecker : public DeclVisitor { FunctionBodySkipping::All) return true; + // If we want all types (for LLDB) we can't skip functions with nested + // types. We could probably improve upon this and type-check only the + // nested types instead for better performances. + if (AFD->hasNestedTypeDeclarations() && + getASTContext().TypeCheckerOpts.SkipFunctionBodies == + FunctionBodySkipping::NonInlinableWithoutTypes) + return false; + // Only skip functions where their body won't be serialized return AFD->getResilienceExpansion() != ResilienceExpansion::Minimal; } diff --git a/test/Frontend/skip-function-bodies.swift b/test/Frontend/skip-function-bodies.swift index 6b37418ae5adc..b2711efea575c 100644 --- a/test/Frontend/skip-function-bodies.swift +++ b/test/Frontend/skip-function-bodies.swift @@ -3,6 +3,8 @@ // Check -emit-ir and -c are invalid when skipping function bodies // RUN: not %target-swift-frontend -emit-ir %s -experimental-skip-non-inlinable-function-bodies %s 2>&1 | %FileCheck %s --check-prefix ERROR // RUN: not %target-swift-frontend -c %s -experimental-skip-non-inlinable-function-bodies %s 2>&1 | %FileCheck %s --check-prefix ERROR +// RUN: not %target-swift-frontend -emit-ir %s -experimental-skip-non-inlinable-function-bodies-without-types %s 2>&1 | %FileCheck %s --check-prefix ERROR +// RUN: not %target-swift-frontend -c %s -experimental-skip-non-inlinable-function-bodies-without-types %s 2>&1 | %FileCheck %s --check-prefix ERROR // RUN: not %target-swift-frontend -emit-ir %s -experimental-skip-all-function-bodies %s 2>&1 | %FileCheck %s --check-prefix ERROR // RUN: not %target-swift-frontend -c %s -experimental-skip-all-function-bodies %s 2>&1 | %FileCheck %s --check-prefix ERROR // ERROR: -experimental-skip-*-function-bodies do not support emitting IR @@ -14,8 +16,10 @@ // Check skipped bodies are neither typechecked nor SILgen'd // RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-non-inlinable-function-bodies -debug-forbid-typecheck-prefix NEVERTYPECHECK -debug-forbid-typecheck-prefix INLINENOTYPECHECK %s -o %t/Skip.noninlinable.sil +// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-non-inlinable-function-bodies-without-types -debug-forbid-typecheck-prefix NEVERTYPECHECK -debug-forbid-typecheck-prefix TYPESNOTYPECHECK %s -o %t/Skip.withouttypes.sil // RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-all-function-bodies -debug-forbid-typecheck-prefix NEVERTYPECHECK -debug-forbid-typecheck-prefix ALLNOTYPECHECK %s -o %t/Skip.all.sil // RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY,CHECK-NONINLINE-SIL < %t/Skip.noninlinable.sil +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-WITHOUTTYPES-ONLY,CHECK-NONINLINE-SIL < %t/Skip.withouttypes.sil // RUN: %FileCheck %s --check-prefixes CHECK,CHECK-ALL-ONLY < %t/Skip.all.sil // Emit the module interface and check it against the same set of strings. @@ -178,6 +182,57 @@ public func inlinableNestedLocalTypeFunc() { nestedFunc() } +public func funcWithEnum() { + let INLINENOTYPECHECK_local = 1 + let ALLNOTYPECHECK_local = 1 + _blackHole("func with enum body") + // CHECK-WITHOUTTYPES-ONLY: "func with enum body" + // CHECK-NONINLINE-ONLY-NOT: "func with enum body" + // CHECK-ALL-ONLY-NOT: "func with enum body" + enum E {} +} + +public func funcWithClass() { + let INLINENOTYPECHECK_local = 1 + let ALLNOTYPECHECK_local = 1 + _blackHole("func with class body") + // CHECK-WITHOUTTYPES-ONLY: "func with class body" + // CHECK-NONINLINE-ONLY-NOT: "func with class body" + // CHECK-ALL-ONLY-NOT: "func with class body" + class C {} +} + +public func funcWithStruct() { + let INLINENOTYPECHECK_local = 1 + let ALLNOTYPECHECK_local = 1 + _blackHole("func with struct body") + // CHECK-WITHOUTTYPES-ONLY: "func with struct body" + // CHECK-NONINLINE-ONLY-NOT: "func with struct body" + // CHECK-ALL-ONLY-NOT: "func with struct body" + struct S {} +} + +public func funcWithNestedFuncs() { + let INLINENOTYPECHECK_local = 1 + let ALLNOTYPECHECK_local = 1 + _blackHole("func with nested funcs body") + // CHECK-WITHOUTTYPES-ONLY: "func with nested funcs body" + // CHECK-NONINLINE-ONLY-NOT: "func with nested funcs body" + // CHECK-ALL-ONLY-NOT: "func with nested funcs body" + + func bar() { + _blackHole("nested func body") + // CHECK-WITHOUTTYPES-ONLY: "nested func body" + // FIXME: We could skip this nested function. + } + + func foo() { + _blackHole("nested func with type body") + // CHECK-WITHOUTTYPES-ONLY: "nested func with type body" + struct S {} + } +} + public struct Struct { @inlinable public var inlinableVar: Int { let ALLNOTYPECHECK_local = 1 From a89f8e04d65ce804c6899008a8d4d900b6cbec85 Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Thu, 12 Nov 2020 12:42:55 -0800 Subject: [PATCH 25/75] [NFC] Drop Unused Includes from FrontendTool --- lib/FrontendTool/FrontendTool.cpp | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/lib/FrontendTool/FrontendTool.cpp b/lib/FrontendTool/FrontendTool.cpp index ee78f97ace5d9..d35aa8cea4f58 100644 --- a/lib/FrontendTool/FrontendTool.cpp +++ b/lib/FrontendTool/FrontendTool.cpp @@ -41,7 +41,6 @@ #include "swift/Basic/Dwarf.h" #include "swift/Basic/Edit.h" #include "swift/Basic/FileSystem.h" -#include "swift/Basic/JSONSerialization.h" #include "swift/Basic/LLVMInitialize.h" #include "swift/Basic/Platform.h" #include "swift/Basic/PrettyStackTrace.h" @@ -49,7 +48,6 @@ #include "swift/Basic/Statistic.h" #include "swift/Basic/UUID.h" #include "swift/Option/Options.h" -#include "swift/Frontend/DiagnosticVerifier.h" #include "swift/Frontend/Frontend.h" #include "swift/Frontend/PrintingDiagnosticConsumer.h" #include "swift/Frontend/SerializedDiagnosticConsumer.h" @@ -64,29 +62,20 @@ #include "swift/Serialization/SerializationOptions.h" #include "swift/Serialization/SerializedModuleLoader.h" #include "swift/SILOptimizer/PassManager/Passes.h" -#include "swift/SIL/SILRemarkStreamer.h" #include "swift/Syntax/Serialization/SyntaxSerialization.h" #include "swift/Syntax/SyntaxNodes.h" #include "swift/TBDGen/TBDGen.h" -#include "clang/AST/ASTContext.h" -#include "clang/Basic/Module.h" - #include "llvm/ADT/Statistic.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IRReader/IRReader.h" #include "llvm/Option/Option.h" #include "llvm/Option/OptTable.h" -#include "llvm/Remarks/RemarkSerializer.h" #include "llvm/Support/Error.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Support/TargetSelect.h" -#include "llvm/Support/Timer.h" -#include "llvm/Support/YAMLTraits.h" -#include "llvm/Target/TargetMachine.h" #include #include From 76d25e7097f5834a61303c8e8894c2aced66cd21 Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Thu, 12 Nov 2020 12:43:43 -0800 Subject: [PATCH 26/75] [NFC] Fixup InputFile Convenience Getters Follow programming guidelines for these getters more closely and have them return a non-owning view of the underlying data instead of relying on callers to take const references to the copy that is returned here. --- include/swift/Frontend/InputFile.h | 8 ++--- lib/FrontendTool/FrontendTool.cpp | 38 ++++++++++++---------- lib/FrontendTool/LoadedModuleTrace.cpp | 2 +- lib/FrontendTool/MakeStyleDependencies.cpp | 2 +- 4 files changed, 27 insertions(+), 23 deletions(-) diff --git a/include/swift/Frontend/InputFile.h b/include/swift/Frontend/InputFile.h index 417d0e5e5a283..f70f9f9437765 100644 --- a/include/swift/Frontend/InputFile.h +++ b/include/swift/Frontend/InputFile.h @@ -121,17 +121,17 @@ class InputFile final { // FrontendInputsAndOutputs. They merely make the call sites // a bit shorter. Add more forwarding methods as needed. - std::string dependenciesFilePath() const { + StringRef getDependenciesFilePath() const { return getPrimarySpecificPaths().SupplementaryOutputs.DependenciesFilePath; } - std::string loadedModuleTracePath() const { + StringRef getLoadedModuleTracePath() const { return getPrimarySpecificPaths().SupplementaryOutputs.LoadedModuleTracePath; } - std::string serializedDiagnosticsPath() const { + StringRef getSerializedDiagnosticsPath() const { return getPrimarySpecificPaths().SupplementaryOutputs .SerializedDiagnosticsPath; } - std::string fixItsOutputPath() const { + StringRef getFixItsOutputPath() const { return getPrimarySpecificPaths().SupplementaryOutputs.FixItsOutputPath; } }; diff --git a/lib/FrontendTool/FrontendTool.cpp b/lib/FrontendTool/FrontendTool.cpp index d35aa8cea4f58..f7ed7443018f3 100644 --- a/lib/FrontendTool/FrontendTool.cpp +++ b/lib/FrontendTool/FrontendTool.cpp @@ -127,7 +127,7 @@ getFileOutputStream(StringRef OutputFilename, ASTContext &Ctx) { } /// Writes the Syntax tree to the given file -static bool emitSyntax(SourceFile &SF, StringRef OutputFilename) { +static bool emitSyntax(const SourceFile &SF, StringRef OutputFilename) { auto os = getFileOutputStream(OutputFilename, SF.getASTContext()); if (!os) return true; @@ -221,8 +221,8 @@ class JSONFixitWriter public: JSONFixitWriter(std::string fixitsOutputPath, const DiagnosticOptions &DiagOpts) - : FixitsOutputPath(fixitsOutputPath), - FixitAll(DiagOpts.FixitCodeForAllDiagnostics) {} + : FixitsOutputPath(std::move(fixitsOutputPath)), + FixitAll(DiagOpts.FixitCodeForAllDiagnostics) {} private: void handleDiagnostic(SourceManager &SM, @@ -1612,10 +1612,13 @@ static void emitIndexDataForSourceFile(SourceFile *PrimarySourceFile, if (moduleToken.empty()) moduleToken = opts.InputsAndOutputs.getSingleOutputFilename(); - (void) index::indexAndRecord(Instance.getMainModule(), opts.InputsAndOutputs.copyOutputFilenames(), + (void) index::indexAndRecord(Instance.getMainModule(), + opts.InputsAndOutputs.copyOutputFilenames(), moduleToken, opts.IndexStorePath, - opts.IndexSystemModules, opts.IndexIgnoreStdlib, - isDebugCompilation, Invocation.getTargetTriple(), + opts.IndexSystemModules, + opts.IndexIgnoreStdlib, + isDebugCompilation, + Invocation.getTargetTriple(), *Instance.getDependencyTracker()); } } @@ -1683,11 +1686,12 @@ createSerializedDiagnosticConsumerIfNeeded( return createDispatchingDiagnosticConsumerIfNeeded( inputsAndOutputs, [](const InputFile &input) -> std::unique_ptr { - std::string serializedDiagnosticsPath = input.serializedDiagnosticsPath(); - if (serializedDiagnosticsPath.empty()) - return nullptr; - return serialized_diagnostics::createConsumer(serializedDiagnosticsPath); - }); + auto serializedDiagnosticsPath = input.getSerializedDiagnosticsPath(); + if (serializedDiagnosticsPath.empty()) + return nullptr; + return serialized_diagnostics::createConsumer( + serializedDiagnosticsPath); + }); } /// Creates a diagnostic consumer that handles serializing diagnostics, based on @@ -1704,12 +1708,12 @@ createJSONFixItDiagnosticConsumerIfNeeded( return createDispatchingDiagnosticConsumerIfNeeded( invocation.getFrontendOptions().InputsAndOutputs, [&](const InputFile &input) -> std::unique_ptr { - std::string fixItsOutputPath = input.fixItsOutputPath(); - if (fixItsOutputPath.empty()) - return nullptr; - return std::make_unique( - fixItsOutputPath, invocation.getDiagnosticOptions()); - }); + auto fixItsOutputPath = input.getFixItsOutputPath(); + if (fixItsOutputPath.empty()) + return nullptr; + return std::make_unique( + fixItsOutputPath.str(), invocation.getDiagnosticOptions()); + }); } /// Print information about a diff --git a/lib/FrontendTool/LoadedModuleTrace.cpp b/lib/FrontendTool/LoadedModuleTrace.cpp index 01aafe1b4f1ee..8f51a0ebe1ad7 100644 --- a/lib/FrontendTool/LoadedModuleTrace.cpp +++ b/lib/FrontendTool/LoadedModuleTrace.cpp @@ -699,7 +699,7 @@ bool swift::emitLoadedModuleTraceIfNeeded(ModuleDecl *mainModule, assert(!ctxt.hadError() && "We should've already exited earlier if there was an error."); - auto loadedModuleTracePath = input.loadedModuleTracePath(); + auto loadedModuleTracePath = input.getLoadedModuleTracePath(); if (loadedModuleTracePath.empty()) return false; std::error_code EC; diff --git a/lib/FrontendTool/MakeStyleDependencies.cpp b/lib/FrontendTool/MakeStyleDependencies.cpp index f3e4d77a213ef..b305233f35274 100644 --- a/lib/FrontendTool/MakeStyleDependencies.cpp +++ b/lib/FrontendTool/MakeStyleDependencies.cpp @@ -91,7 +91,7 @@ bool swift::emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, DependencyTracker *depTracker, const FrontendOptions &opts, const InputFile &input) { - const std::string &dependenciesFilePath = input.dependenciesFilePath(); + auto dependenciesFilePath = input.getDependenciesFilePath(); if (dependenciesFilePath.empty()) return false; From b4b8778e48d77761b6319e08dd92dd2af5e758e8 Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Thu, 12 Nov 2020 16:18:24 -0800 Subject: [PATCH 27/75] Fixup getOverlayModule ClangModuleUnit::getOverlayModule implies that it requests the Swift module with the same name as the top level module, and loads it if it is available. Unfortunately, it used ASTContext::getModule to do so, which consults every module loader, including the clang importer. So, even though you couldn't actually get a clang module out at the end of the day, you could still force a clang module to load implicitly. When combined with namelookup's import graph computation forcing overlays, this meant the entire transitive import graph would be loaded because of the complicated mix of recursion and re-entrancy this created. The first step in teasing this apart is to define an API that doesn't re-enter the clang importer when it loads modules. Then, the callers that were relying on this need to be updated to explicitly call ASTContext::getModule themselves. This will also fix rdar://70745521 by happenstance. --- include/swift/AST/ASTContext.h | 7 +++++++ lib/AST/ASTContext.cpp | 21 +++++++++++++++++++++ lib/ClangImporter/ClangImporter.cpp | 25 +++++++++++++++---------- 3 files changed, 43 insertions(+), 10 deletions(-) diff --git a/include/swift/AST/ASTContext.h b/include/swift/AST/ASTContext.h index 5ff10d980b4c0..5e536c5aaaea1 100644 --- a/include/swift/AST/ASTContext.h +++ b/include/swift/AST/ASTContext.h @@ -909,6 +909,13 @@ class ASTContext final { /// \returns The requested module, or NULL if the module cannot be found. ModuleDecl *getModule(ImportPath::Module ModulePath); + /// Attempts to load the matching overlay module for the given clang + /// module into this ASTContext. + /// + /// \returns The Swift overlay module corresponding to the given Clang module, + /// or NULL if the overlay module cannot be found. + ModuleDecl *getOverlayModule(const FileUnit *ClangModule); + ModuleDecl *getModuleByName(StringRef ModuleName); ModuleDecl *getModuleByIdentifier(Identifier ModuleID); diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp index 22edb7daec01c..7eb3d255e3a6b 100644 --- a/lib/AST/ASTContext.cpp +++ b/lib/AST/ASTContext.cpp @@ -1928,6 +1928,27 @@ ASTContext::getModule(ImportPath::Module ModulePath) { return nullptr; } +ModuleDecl *ASTContext::getOverlayModule(const FileUnit *FU) { + assert(FU && FU->getKind() == FileUnitKind::ClangModule && + "Overlays can only be retrieved for clang modules!"); + ImportPath::Module::Builder builder(FU->getParentModule()->getName()); + auto ModPath = builder.get(); + if (auto *Existing = getLoadedModule(ModPath)) { + if (!Existing->isNonSwiftModule()) + return Existing; + } + + for (auto &importer : getImpl().ModuleLoaders) { + if (importer.get() == getClangModuleLoader()) + continue; + if (ModuleDecl *M = importer->loadModule(SourceLoc(), ModPath)) { + return M; + } + } + + return nullptr; +} + ModuleDecl *ASTContext::getModuleByName(StringRef ModuleName) { ImportPath::Module::Builder builder(*this, ModuleName, /*separator=*/'.'); return getModule(builder.get()); diff --git a/lib/ClangImporter/ClangImporter.cpp b/lib/ClangImporter/ClangImporter.cpp index 2a279886b64db..9f260890d9cff 100644 --- a/lib/ClangImporter/ClangImporter.cpp +++ b/lib/ClangImporter/ClangImporter.cpp @@ -3425,18 +3425,23 @@ ModuleDecl *ClangModuleUnit::getOverlayModule() const { // FIXME: Include proper source location. ModuleDecl *M = getParentModule(); ASTContext &Ctx = M->getASTContext(); - auto overlay = Ctx.getModuleByIdentifier(M->getName()); - if (overlay == M) { - overlay = nullptr; - } else { - // FIXME: This bizarre and twisty invariant is due to nested - // re-entrancy in both clang module loading and overlay module loading. - auto *sharedModuleRef = Ctx.getLoadedModule(M->getName()); - assert(!sharedModuleRef || sharedModuleRef == overlay || - sharedModuleRef == M); + auto overlay = Ctx.getOverlayModule(this); + if (overlay) { Ctx.addLoadedModule(overlay); + } else { + // FIXME: This is the awful legacy of the old implementation of overlay + // loading laid bare. Because the previous implementation used + // ASTContext::getModuleByIdentifier, it consulted the clang importer + // recursively which forced the current module, its dependencies, and + // the overlays of those dependencies to load and + // become visible in the current context. All of the callers of + // ClangModuleUnit::getOverlayModule are relying on this behavior, and + // untangling them is going to take a heroic amount of effort. + // Clang module loading should *never* *ever* be allowed to load unrelated + // Swift modules. + ImportPath::Module::Builder builder(M->getName()); + (void) owner.loadModule(SourceLoc(), std::move(builder).get()); } - auto mutableThis = const_cast(this); mutableThis->overlayModule.setPointerAndInt(overlay, true); } From 6db85203db1eecee0bea9705e42575d313a33111 Mon Sep 17 00:00:00 2001 From: Joe Groff Date: Thu, 12 Nov 2020 13:43:11 -0800 Subject: [PATCH 28/75] SIL: Abstraction pattern support for multiple foreign async returns. An ObjC API maybe imported as async that had multiple non-error arguments to its completion handler, which we treat in Swift as returning a tuple. Use a new form of abstraction pattern to represent this return type, to maintain the correct relation between individual tuple elements and the Clang block parameter types they map to. --- include/swift/SIL/AbstractionPattern.h | 43 ++++++++- lib/SIL/IR/AbstractionPattern.cpp | 87 +++++++++++++++---- .../usr/include/ObjCConcurrency.h | 4 + test/SILGen/objc_async.swift | 22 ++++- 4 files changed, 134 insertions(+), 22 deletions(-) diff --git a/include/swift/SIL/AbstractionPattern.h b/include/swift/SIL/AbstractionPattern.h index 5f22a8310bf54..93df4f66affa4 100644 --- a/include/swift/SIL/AbstractionPattern.h +++ b/include/swift/SIL/AbstractionPattern.h @@ -179,6 +179,11 @@ class AbstractionPattern { /// type. ObjCMethod is valid. OtherData is an encoded foreign /// error index. ObjCMethodType, + /// The type of an ObjC block used as a completion handler for + /// an API that has been imported into Swift as async, + /// representing the tuple of results of the async projection of the + /// API. + ObjCCompletionHandlerArgumentsType, /// The uncurried imported type of a C++ non-operator non-static member /// function. OrigType is valid and is a function type. CXXMethod is valid. CXXMethodType, @@ -410,6 +415,7 @@ class AbstractionPattern { case Kind::CFunctionAsMethodType: case Kind::CurriedCFunctionAsMethodType: case Kind::PartialCurriedCFunctionAsMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: return true; default: @@ -445,7 +451,16 @@ class AbstractionPattern { } bool hasStoredForeignInfo() const { - return hasStoredObjCMethod(); + switch (getKind()) { + case Kind::CurriedObjCMethodType: + case Kind::PartialCurriedObjCMethodType: + case Kind::ObjCMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: + return true; + + default: + return false; + } } bool hasImportAsMemberStatus() const { @@ -552,6 +567,7 @@ class AbstractionPattern { case Kind::CXXOperatorMethodType: case Kind::CurriedCXXOperatorMethodType: case Kind::PartialCurriedCXXOperatorMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: return true; case Kind::Invalid: case Kind::Opaque: @@ -584,6 +600,22 @@ class AbstractionPattern { return pattern; } + /// Return an abstraction pattern for a result tuple + /// corresponding to the parameters of a completion handler + /// block of an API that was imported as async. + static AbstractionPattern + getObjCCompletionHandlerArgumentsType(CanGenericSignature sig, + CanType origTupleType, + const clang::Type *clangBlockType, + EncodedForeignInfo foreignInfo) { + AbstractionPattern pattern(Kind::ObjCCompletionHandlerArgumentsType); + pattern.initClangType(sig, origTupleType, clangBlockType, + Kind::ObjCCompletionHandlerArgumentsType); + pattern.OtherData = foreignInfo.getOpaqueValue(); + + return pattern; + } + public: /// Return an abstraction pattern for the curried type of an /// Objective-C method. @@ -592,6 +624,7 @@ class AbstractionPattern { const Optional &foreignError, const Optional &foreignAsync); + /// Return an abstraction pattern for the uncurried type of a C function /// imported as a method. /// @@ -927,6 +960,7 @@ class AbstractionPattern { case Kind::OpaqueDerivativeFunction: llvm_unreachable("opaque derivative function pattern has no type"); case Kind::ClangType: + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::CurriedObjCMethodType: case Kind::PartialCurriedObjCMethodType: case Kind::ObjCMethodType: @@ -980,6 +1014,7 @@ class AbstractionPattern { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::Type: case Kind::Discard: + case Kind::ObjCCompletionHandlerArgumentsType: assert(signature || !type->hasTypeParameter()); assert(hasSameBasicTypeStructure(OrigType, type)); GenericSig = (type->hasTypeParameter() ? signature : nullptr); @@ -1018,6 +1053,7 @@ class AbstractionPattern { case Kind::CXXOperatorMethodType: case Kind::CurriedCXXOperatorMethodType: case Kind::PartialCurriedCXXOperatorMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: return true; } llvm_unreachable("bad kind"); @@ -1097,6 +1133,7 @@ class AbstractionPattern { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: return false; case Kind::PartialCurriedObjCMethodType: case Kind::CurriedObjCMethodType: @@ -1136,6 +1173,7 @@ class AbstractionPattern { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::Type: case Kind::Discard: + case Kind::ObjCCompletionHandlerArgumentsType: return dyn_cast(getType()); } llvm_unreachable("bad kind"); @@ -1167,6 +1205,7 @@ class AbstractionPattern { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: // We assume that the Clang type might provide additional structure. return false; case Kind::Type: @@ -1200,6 +1239,7 @@ class AbstractionPattern { case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: return false; + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::Tuple: return true; case Kind::Type: @@ -1232,6 +1272,7 @@ class AbstractionPattern { llvm_unreachable("pattern is not a tuple"); case Kind::Tuple: return getNumTupleElements_Stored(); + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::Type: case Kind::Discard: case Kind::ClangType: diff --git a/lib/SIL/IR/AbstractionPattern.cpp b/lib/SIL/IR/AbstractionPattern.cpp index 6813fa34c08c4..f0bb043ba6a32 100644 --- a/lib/SIL/IR/AbstractionPattern.cpp +++ b/lib/SIL/IR/AbstractionPattern.cpp @@ -203,6 +203,7 @@ AbstractionPattern::getOptional(AbstractionPattern object) { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: llvm_unreachable("cannot add optionality to non-type abstraction"); case Kind::Opaque: return AbstractionPattern::getOpaque(); @@ -310,6 +311,7 @@ bool AbstractionPattern::matchesTuple(CanTupleType substType) { return true; case Kind::Tuple: return getNumTupleElements_Stored() == substType->getNumElements(); + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::ClangType: case Kind::Type: case Kind::Discard: { @@ -399,6 +401,19 @@ AbstractionPattern::getTupleElementType(unsigned index) const { return AbstractionPattern::getOpaque(); return AbstractionPattern(getGenericSignature(), getCanTupleElementType(getType(), index)); + + case Kind::ObjCCompletionHandlerArgumentsType: { + // Match up the tuple element with the parameter from the Clang block type, + // skipping the error parameter index if any. + auto callback = cast(getClangType()); + auto errorIndex = getEncodedForeignInfo() + .getAsyncCompletionHandlerErrorParamIndex(); + unsigned paramIndex = index + (errorIndex && index >= *errorIndex); + return AbstractionPattern(getGenericSignature(), + getCanTupleElementType(getType(), index), + callback->getParamType(paramIndex).getTypePtr()); + } + } llvm_unreachable("bad kind"); } @@ -465,6 +480,7 @@ AbstractionPattern AbstractionPattern::getFunctionResultType() const { switch (getKind()) { case Kind::Invalid: llvm_unreachable("querying invalid abstraction pattern!"); + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::Tuple: llvm_unreachable("abstraction pattern for tuple cannot be function"); case Kind::Opaque: @@ -524,25 +540,45 @@ AbstractionPattern AbstractionPattern::getFunctionResultType() const { ->getPointeeType() ->getAs(); - // The result is the first non-error argument to the callback. - unsigned callbackResultIndex = 0; - if (auto callbackErrorIndex = getEncodedForeignInfo() - .getAsyncCompletionHandlerErrorParamIndex()) { - if (*callbackErrorIndex == 0) { - callbackResultIndex = 1; - } + // The result comprises the non-error argument(s) to the callback, if + // any. + + auto callbackErrorIndex = getEncodedForeignInfo() + .getAsyncCompletionHandlerErrorParamIndex(); + assert((!callbackErrorIndex.hasValue() + || callbackParamTy->getNumParams() > *callbackErrorIndex) + && "completion handler has invalid error param index?!"); + unsigned numNonErrorParams + = callbackParamTy->getNumParams() - callbackErrorIndex.hasValue(); + + switch (numNonErrorParams) { + case 0: + // If there are no result arguments, then the imported result type is + // Void, with no interesting abstraction properties. + return AbstractionPattern(TupleType::getEmpty(getType()->getASTContext())); + + case 1: { + // If there's a single argument, abstract it according to its formal type + // in the ObjC signature. + unsigned callbackResultIndex + = callbackErrorIndex && *callbackErrorIndex == 0; + auto clangResultType = callbackParamTy + ->getParamType(callbackResultIndex) + .getTypePtr(); + + return AbstractionPattern(getGenericSignatureForFunctionComponent(), + getResultType(getType()), clangResultType); } - - const clang::Type *clangResultType = nullptr; - if (callbackResultIndex < callbackParamTy->getNumParams()) { - clangResultType = callbackParamTy->getParamType(callbackResultIndex) - .getTypePtr(); - } else { - clangResultType = getObjCMethod()->getASTContext().VoidTy.getTypePtr(); + + default: + // If there are multiple results, we have a special abstraction pattern + // form to represent the mapping from block parameters to tuple elements + // in the return type. + return AbstractionPattern::getObjCCompletionHandlerArgumentsType( + getGenericSignatureForFunctionComponent(), + getResultType(getType()), callbackParamTy, + getEncodedForeignInfo()); } - - return AbstractionPattern(getGenericSignatureForFunctionComponent(), - getResultType(getType()), clangResultType); } return AbstractionPattern(getGenericSignatureForFunctionComponent(), @@ -594,6 +630,7 @@ AbstractionPattern::getObjCMethodAsyncCompletionHandlerType( case Kind::CurriedCFunctionAsMethodType: case Kind::CurriedCXXMethodType: case Kind::CurriedCXXOperatorMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: swift_unreachable("not appropriate for this kind"); } } @@ -791,6 +828,7 @@ AbstractionPattern AbstractionPattern::getOptionalObjectType() const { case Kind::Tuple: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: llvm_unreachable("pattern for function or tuple cannot be for optional"); case Kind::Opaque: @@ -837,6 +875,7 @@ AbstractionPattern AbstractionPattern::getReferenceStorageReferentType() const { case Kind::Tuple: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: return *this; case Kind::Type: return AbstractionPattern(getGenericSignature(), @@ -897,12 +936,15 @@ void AbstractionPattern::print(raw_ostream &out) const { case Kind::CurriedCFunctionAsMethodType: case Kind::PartialCurriedCFunctionAsMethodType: case Kind::CFunctionAsMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: out << (getKind() == Kind::ClangType ? "AP::ClangType(" : getKind() == Kind::CurriedCFunctionAsMethodType ? "AP::CurriedCFunctionAsMethodType(" : getKind() == Kind::PartialCurriedCFunctionAsMethodType - ? "AP::PartialCurriedCFunctionAsMethodType(" + ? "AP::PartialCurriedCFunctionAsMethodType(" : + getKind() == Kind::ObjCCompletionHandlerArgumentsType + ? "AP::ObjCCompletionHandlerArgumentsType(" : "AP::CFunctionAsMethodType("); if (auto sig = getGenericSignature()) { sig->print(out); @@ -922,6 +964,12 @@ void AbstractionPattern::print(raw_ostream &out) const { out << "static"; } } + if (hasStoredForeignInfo()) { + if (auto errorIndex + = getEncodedForeignInfo().getAsyncCompletionHandlerErrorParamIndex()){ + out << ", errorParamIndex=" << *errorIndex; + } + } out << ")"; return; case Kind::CXXMethodType: @@ -1069,6 +1117,9 @@ const { case Kind::OpaqueDerivativeFunction: llvm_unreachable("should not have an opaque derivative function pattern " "matching a struct/enum type"); + case Kind::ObjCCompletionHandlerArgumentsType: + llvm_unreachable("should not have a completion handler argument pattern " + "matching a struct/enum type"); case Kind::PartialCurriedObjCMethodType: case Kind::CurriedObjCMethodType: case Kind::PartialCurriedCFunctionAsMethodType: diff --git a/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h b/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h index 01a62d25ea599..73903fa03a0d3 100644 --- a/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h +++ b/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h @@ -14,6 +14,10 @@ -(void)getMagicNumberAsynchronouslyWithSeed:(NSInteger)seed completionHandler:(void (^)(NSInteger, NSError * _Nullable))handler; @property(readwrite) void (^completionHandler)(NSInteger); +-(void)findMultipleAnswersWithCompletionHandler:(void (^)(NSString *_Nullable, NSInteger, NSError * _Nullable))handler __attribute__((swift_name("findMultipleAnswers(completionHandler:)"))); + +-(void)findDifferentlyFlavoredBooleansWithCompletionHandler:(void (^)(BOOL wholeMilk, _Bool onePercent, NSError *_Nullable))handler __attribute__((swift_name("findDifferentlyFlavoredBooleans(completionHandler:)"))); + -(void)doSomethingConflicted:(NSString *)operation completionHandler:(void (^)(NSInteger))handler; -(NSInteger)doSomethingConflicted:(NSString *)operation; -(void)server:(NSString *)name restartWithCompletionHandler:(void (^)(void))block; diff --git a/test/SILGen/objc_async.swift b/test/SILGen/objc_async.swift index 30f6c91c7f8a3..c783ff6fd3fa3 100644 --- a/test/SILGen/objc_async.swift +++ b/test/SILGen/objc_async.swift @@ -1,4 +1,4 @@ -// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) -emit-silgen -I %S/Inputs/custom-modules -enable-experimental-concurrency %s -verify | %FileCheck %s +// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) -emit-silgen -I %S/Inputs/custom-modules -enable-experimental-concurrency %s -verify | %FileCheck --check-prefix=CHECK --check-prefix=CHECK-%target-cpu %s // REQUIRES: objc_interop import Foundation @@ -41,9 +41,16 @@ func testSlowServer(slowServer: SlowServer) async throws { // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[VOID_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation<()>) -> () await slowServer.serverRestart("somewhere") + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[NSSTRING_INT_THROW_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeThrowingContinuation<(String, Int)>, Optional, Int, Optional) -> () + let (_, _): (String, Int) = try await slowServer.findMultipleAnswers() + + let (_, _): (Bool, Bool) = try await slowServer.findDifferentlyFlavoredBooleans() + // CHECK: [[ERROR]]([[ERROR_VALUE:%.*]] : @owned $Error): - // CHECK: dealloc_stack [[RESUME_BUF]] - // CHECK: throw [[ERROR_VALUE]] + // CHECK: dealloc_stack [[RESUME_BUF]] + // CHECK: br [[THROWBB:bb[0-9]+]]([[ERROR_VALUE]] + // CHECK: [[THROWBB]]([[ERROR_VALUE:%.*]] : @owned $Error): + // CHECK: throw [[ERROR_VALUE]] } @@ -85,3 +92,12 @@ func testSlowServer(slowServer: SlowServer) async throws { // CHECK: [[RESULT_BUF:%.*]] = alloc_stack $() // CHECK: [[RESUME:%.*]] = function_ref @{{.*}}resumeUnsafeContinuation // CHECK: apply [[RESUME]]<()>([[CONT]], [[RESULT_BUF]]) + +// CHECK: sil{{.*}}@[[NSSTRING_INT_THROW_COMPLETION_BLOCK]] +// CHECK: [[RESULT_BUF:%.*]] = alloc_stack $(String, Int) +// CHECK: [[RESULT_0_BUF:%.*]] = tuple_element_addr [[RESULT_BUF]] {{.*}}, 0 +// CHECK: [[BRIDGE:%.*]] = function_ref @{{.*}}unconditionallyBridgeFromObjectiveC +// CHECK: [[BRIDGED:%.*]] = apply [[BRIDGE]] +// CHECK: store [[BRIDGED]] to [init] [[RESULT_0_BUF]] +// CHECK: [[RESULT_1_BUF:%.*]] = tuple_element_addr [[RESULT_BUF]] {{.*}}, 1 +// CHECK: store %2 to [trivial] [[RESULT_1_BUF]] From 4156382f072d8742c58e8ac3db9c1f20f4595fb8 Mon Sep 17 00:00:00 2001 From: Nate Chandler Date: Wed, 28 Oct 2020 16:18:13 -0700 Subject: [PATCH 29/75] [NFC] Corrected typo. ClassMetadataLayout::getOffsett -> ClassMetadataLayout::getOffset --- lib/IRGen/GenClass.cpp | 2 +- lib/IRGen/MetadataLayout.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/IRGen/GenClass.cpp b/lib/IRGen/GenClass.cpp index 74f0376b9683a..d03b1ab5a0204 100644 --- a/lib/IRGen/GenClass.cpp +++ b/lib/IRGen/GenClass.cpp @@ -2522,7 +2522,7 @@ FunctionPointer irgen::emitVirtualMethodValue(IRGenFunction &IGF, IGF.IGM.getClassMetadataLayout(classDecl).getMethodInfo(IGF, method); switch (methodInfo.getKind()) { case ClassMetadataLayout::MethodInfo::Kind::Offset: { - auto offset = methodInfo.getOffsett(); + auto offset = methodInfo.getOffset(); auto slot = IGF.emitAddressAtOffset(metadata, offset, signature.getType()->getPointerTo(), diff --git a/lib/IRGen/MetadataLayout.h b/lib/IRGen/MetadataLayout.h index 312b8c55876f5..c31a56f5c3e6a 100644 --- a/lib/IRGen/MetadataLayout.h +++ b/lib/IRGen/MetadataLayout.h @@ -188,7 +188,7 @@ class ClassMetadataLayout : public NominalMetadataLayout { Kind getKind() const { return TheKind; } - Offset getOffsett() const { + Offset getOffset() const { assert(getKind() == Kind::Offset); return TheOffset; } From db8183d7a19baf4cd09ce77b5edb589789b90609 Mon Sep 17 00:00:00 2001 From: Nate Chandler Date: Wed, 11 Nov 2020 14:48:06 -0800 Subject: [PATCH 30/75] [IRGen] Added LinkEntities for AsyncFunctionPointer. Two LinkEntities are needed to enable the construction during both IRGen and TBDGen. --- include/swift/IRGen/Linking.h | 36 +++++++++++++++++++++++++++++++---- lib/IRGen/Linking.cpp | 24 +++++++++++++++++++++++ 2 files changed, 56 insertions(+), 4 deletions(-) diff --git a/include/swift/IRGen/Linking.h b/include/swift/IRGen/Linking.h index 19bed5bd70a3e..32f75ab44f8dd 100644 --- a/include/swift/IRGen/Linking.h +++ b/include/swift/IRGen/Linking.h @@ -293,6 +293,11 @@ class LinkEntity { /// the metadata cache once. CanonicalPrespecializedGenericTypeCachingOnceToken, + /// The same as AsyncFunctionPointer but with a different stored value, for + /// use by TBDGen. + /// The pointer is a AbstractStorageDecl*. + AsyncFunctionPointerAST, + /// The pointer is a SILFunction*. DynamicallyReplaceableFunctionKey, @@ -410,6 +415,13 @@ class LinkEntity { /// passed to swift_getCanonicalSpecializedMetadata. /// The pointer is a canonical TypeBase*. NoncanonicalSpecializedGenericTypeMetadataCacheVariable, + + /// Provides the data required to invoke an async function using the async + /// calling convention in the form of the size of the context to allocate + /// and the relative address of the function to call with that allocated + /// context. + /// The pointer is a SILFunction*. + AsyncFunctionPointer, }; friend struct llvm::DenseMapInfo; @@ -418,7 +430,7 @@ class LinkEntity { } static bool isDeclKind(Kind k) { - return k <= Kind::CanonicalPrespecializedGenericTypeCachingOnceToken; + return k <= Kind::AsyncFunctionPointerAST; } static bool isTypeKind(Kind k) { return k >= Kind::ProtocolWitnessTableLazyAccessFunction; @@ -1088,6 +1100,21 @@ class LinkEntity { return entity; } + static LinkEntity forAsyncFunctionPointer(SILFunction *silFunction) { + LinkEntity entity; + entity.Pointer = silFunction; + entity.SecondaryPointer = nullptr; + entity.Data = LINKENTITY_SET_FIELD( + Kind, unsigned(LinkEntity::Kind::AsyncFunctionPointer)); + return entity; + } + + static LinkEntity forAsyncFunctionPointer(AbstractFunctionDecl *decl) { + LinkEntity entity; + entity.setForDecl(Kind::AsyncFunctionPointerAST, decl); + return entity; + } + void mangle(llvm::raw_ostream &out) const; void mangle(SmallVectorImpl &buffer) const; std::string mangleAsString() const; @@ -1110,14 +1137,15 @@ class LinkEntity { } bool hasSILFunction() const { - return getKind() == Kind::SILFunction || + return getKind() == Kind::AsyncFunctionPointer || getKind() == Kind::DynamicallyReplaceableFunctionVariable || - getKind() == Kind::DynamicallyReplaceableFunctionKey; + getKind() == Kind::DynamicallyReplaceableFunctionKey || + getKind() == Kind::SILFunction; } SILFunction *getSILFunction() const { assert(hasSILFunction()); - return reinterpret_cast(Pointer); + return reinterpret_cast(Pointer); } SILGlobalVariable *getSILGlobalVariable() const { diff --git a/lib/IRGen/Linking.cpp b/lib/IRGen/Linking.cpp index 817bb82d81c50..8ca70b747fe53 100644 --- a/lib/IRGen/Linking.cpp +++ b/lib/IRGen/Linking.cpp @@ -430,6 +430,17 @@ std::string LinkEntity::mangleAsString() const { return mangler.mangleSILDifferentiabilityWitnessKey( {getSILDifferentiabilityWitness()->getOriginalFunction()->getName(), getSILDifferentiabilityWitness()->getConfig()}); + case Kind::AsyncFunctionPointer: { + std::string Result(getSILFunction()->getName()); + Result.append("AD"); + return Result; + } + case Kind::AsyncFunctionPointerAST: { + std::string Result; + Result = mangler.mangleEntity(getDecl()); + Result.append("AD"); + return Result; + } } llvm_unreachable("bad entity kind!"); } @@ -663,9 +674,13 @@ SILLinkage LinkEntity::getLinkage(ForDefinition_t forDefinition) const { case Kind::DynamicallyReplaceableFunctionKey: return getSILFunction()->getLinkage(); + case Kind::AsyncFunctionPointer: case Kind::SILFunction: return getSILFunction()->getEffectiveSymbolLinkage(); + case Kind::AsyncFunctionPointerAST: + return getSILLinkage(getDeclLinkage(getDecl()), forDefinition); + case Kind::DynamicallyReplaceableFunctionImpl: case Kind::DynamicallyReplaceableFunctionKeyAST: return getSILLinkage(getDeclLinkage(getDecl()), forDefinition); @@ -712,6 +727,8 @@ bool LinkEntity::isContextDescriptor() const { case Kind::ProtocolDescriptor: case Kind::OpaqueTypeDescriptor: return true; + case Kind::AsyncFunctionPointer: + case Kind::AsyncFunctionPointerAST: case Kind::PropertyDescriptor: case Kind::DispatchThunk: case Kind::DispatchThunkInitializer: @@ -780,6 +797,8 @@ bool LinkEntity::isContextDescriptor() const { llvm::Type *LinkEntity::getDefaultDeclarationType(IRGenModule &IGM) const { switch (getKind()) { + case Kind::AsyncFunctionPointer: + return IGM.AsyncFunctionPointerTy; case Kind::ModuleDescriptor: case Kind::ExtensionDescriptor: case Kind::AnonymousDescriptor: @@ -909,6 +928,7 @@ Alignment LinkEntity::getAlignment(IRGenModule &IGM) const { case Kind::MethodDescriptorAllocator: case Kind::OpaqueTypeDescriptor: return Alignment(4); + case Kind::AsyncFunctionPointer: case Kind::ObjCClassRef: case Kind::ObjCClass: case Kind::TypeMetadataLazyCacheVariable: @@ -951,6 +971,7 @@ bool LinkEntity::isWeakImported(ModuleDecl *module) const { return getSILGlobalVariable()->getDecl()->isWeakImported(module); } return false; + case Kind::AsyncFunctionPointer: case Kind::DynamicallyReplaceableFunctionKey: case Kind::DynamicallyReplaceableFunctionVariable: case Kind::SILFunction: { @@ -977,6 +998,7 @@ bool LinkEntity::isWeakImported(ModuleDecl *module) const { return false; } + case Kind::AsyncFunctionPointerAST: case Kind::DispatchThunk: case Kind::DispatchThunkInitializer: case Kind::DispatchThunkAllocator: @@ -1053,6 +1075,7 @@ bool LinkEntity::isWeakImported(ModuleDecl *module) const { DeclContext *LinkEntity::getDeclContextForEmission() const { switch (getKind()) { + case Kind::AsyncFunctionPointerAST: case Kind::DispatchThunk: case Kind::DispatchThunkInitializer: case Kind::DispatchThunkAllocator: @@ -1095,6 +1118,7 @@ DeclContext *LinkEntity::getDeclContextForEmission() const { case Kind::CanonicalSpecializedGenericSwiftMetaclassStub: return getType()->getClassOrBoundGenericClass()->getDeclContext(); + case Kind::AsyncFunctionPointer: case Kind::SILFunction: case Kind::DynamicallyReplaceableFunctionVariable: case Kind::DynamicallyReplaceableFunctionKey: From ed9efa2a8965b919d46bc47d6e041ec0f843d4c8 Mon Sep 17 00:00:00 2001 From: Nate Chandler Date: Mon, 9 Nov 2020 17:35:04 -0800 Subject: [PATCH 31/75] [TBDGen] Add async function pointers. In order to call async functions, instances of the AsyncFunctionPointer struct must be used. If those functions are exported from a module, the AsyncFunctionPointer by means of which the function is to be called must be exported as well. For now, the symbol is exported by manually appending the relevant suffix to the mangled name of the function. --- lib/TBDGen/TBDGen.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/TBDGen/TBDGen.cpp b/lib/TBDGen/TBDGen.cpp index fcbc6e270e036..816af58f6e579 100644 --- a/lib/TBDGen/TBDGen.cpp +++ b/lib/TBDGen/TBDGen.cpp @@ -720,6 +720,10 @@ void TBDGenVisitor::visitAbstractFunctionDecl(AbstractFunctionDecl *AFD) { AFD->getGenericSignature())); visitDefaultArguments(AFD, AFD->getParameters()); + + if (AFD->isAsyncContext()) { + addSymbol(LinkEntity::forAsyncFunctionPointer(AFD)); + } } void TBDGenVisitor::visitFuncDecl(FuncDecl *FD) { From 2d21932672bfe5654bc17be31756bc0ec6453f9d Mon Sep 17 00:00:00 2001 From: Nate Chandler Date: Wed, 11 Nov 2020 14:49:28 -0800 Subject: [PATCH 32/75] [Async CC] Add constant "pointer" for async func. An AsyncFunctionPointer, defined in Task.h, is a struct consisting of two i32s: (1) the relative address of the async function and (2) the size of the async context to be allocated when calling that function. Here, such structs are emitted for every async SILFunction that is emitted. --- lib/IRGen/CallEmission.h | 1 + lib/IRGen/Callee.h | 54 +++- lib/IRGen/GenCall.cpp | 302 ++++++++++++------ lib/IRGen/GenCall.h | 20 +- lib/IRGen/GenClass.cpp | 4 +- lib/IRGen/GenDecl.cpp | 20 +- lib/IRGen/GenFunc.cpp | 61 ++-- lib/IRGen/GenFunc.h | 1 - lib/IRGen/GenKeyPath.cpp | 2 +- lib/IRGen/GenMeta.cpp | 13 + lib/IRGen/GenMeta.h | 3 + lib/IRGen/GenObjC.cpp | 3 +- lib/IRGen/GenOpaque.cpp | 8 +- lib/IRGen/GenPointerAuth.cpp | 6 +- lib/IRGen/GenProto.cpp | 2 +- lib/IRGen/GenThunk.cpp | 31 +- lib/IRGen/IRGenFunction.cpp | 18 ++ lib/IRGen/IRGenFunction.h | 4 +- lib/IRGen/IRGenModule.cpp | 3 + lib/IRGen/IRGenModule.h | 7 + lib/IRGen/IRGenSIL.cpp | 52 +-- .../run-call-classinstance-int64-to-void.sil | 1 + .../run-call-classinstance-void-to-void.sil | 1 + .../async/run-call-existential-to-void.sil | 1 + .../async/run-call-generic-to-generic.sil | 1 + test/IRGen/async/run-call-generic-to-void.sil | 1 + .../run-call-genericEquatable-x2-to-bool.sil | 1 + .../run-call-int64-and-int64-to-void.sil | 1 + test/IRGen/async/run-call-int64-to-void.sil | 1 + ...otocolextension_instance-void-to-int64.sil | 1 + ...protocolwitness_instance-void-to-int64.sil | 1 + .../run-call-structinstance-int64-to-void.sil | 1 + .../run-call-void-throws-to-int-throwing.sil | 1 + ...ing_call-async-nothrow_call-sync-throw.sil | 1 + ...hrows-to-int-throwing_call-async-throw.sil | 1 + ...ing_call-sync-nothrow_call-async-throw.sil | 1 + ...throws-to-int-throwing_call-sync-throw.sil | 1 + .../async/run-call-void-to-existential.sil | 1 + .../run-call-void-to-int64-and-int64.sil | 1 + test/IRGen/async/run-call-void-to-int64.sil | 1 + .../async/run-call-void-to-struct_large.sil | 1 + ..._instance-generic-to-int64-and-generic.sil | 1 + ...protocolwitness_instance-void-to-int64.sil | 3 + ...run-partialapply-capture-class-to-void.sil | 1 + ...nout-generic-and-in-generic-to-generic.sil | 1 + ...tialapply-capture-int64-int64-to-int64.sil | 1 + ...tance_classinstance-and-int64-to-int64.sil | 1 + ...eric_classinstance_to_struct_and_error.sil | 3 + ...eneric_polymorphic_constrained-to-void.sil | 1 + 49 files changed, 479 insertions(+), 168 deletions(-) diff --git a/lib/IRGen/CallEmission.h b/lib/IRGen/CallEmission.h index dcab4708bb933..328a57d116e13 100644 --- a/lib/IRGen/CallEmission.h +++ b/lib/IRGen/CallEmission.h @@ -67,6 +67,7 @@ class CallEmission { void emitToUnmappedExplosion(Explosion &out); virtual void emitCallToUnmappedExplosion(llvm::CallInst *call, Explosion &out) = 0; void emitYieldsToExplosion(Explosion &out); + virtual FunctionPointer getCalleeFunctionPointer() = 0; llvm::CallInst *emitCallSite(); CallEmission(IRGenFunction &IGF, llvm::Value *selfValue, Callee &&callee) diff --git a/lib/IRGen/Callee.h b/lib/IRGen/Callee.h index 5d134d4ac205d..0f3af50a7e58b 100644 --- a/lib/IRGen/Callee.h +++ b/lib/IRGen/Callee.h @@ -124,7 +124,31 @@ namespace irgen { /// A function pointer value. class FunctionPointer { - /// The actual function pointer. + public: + struct KindTy { + enum class Value { + Function, + AsyncFunctionPointer, + }; + static const Value Function = Value::Function; + static const Value AsyncFunctionPointer = Value::AsyncFunctionPointer; + Value value; + KindTy(Value value) : value(value) {} + KindTy(CanSILFunctionType fnType) + : value(fnType->isAsync() ? Value::AsyncFunctionPointer + : Value::Function) {} + friend bool operator==(const KindTy &lhs, const KindTy &rhs) { + return lhs.value == rhs.value; + } + friend bool operator!=(const KindTy &lhs, const KindTy &rhs) { + return !(lhs == rhs); + } + }; + + private: + KindTy Kind; + + /// The actual pointer, either to the function or to its descriptor. llvm::Value *Value; PointerAuthInfo AuthInfo; @@ -135,25 +159,27 @@ namespace irgen { /// Construct a FunctionPointer for an arbitrary pointer value. /// We may add more arguments to this; try to use the other /// constructors/factories if possible. - explicit FunctionPointer(llvm::Value *value, PointerAuthInfo authInfo, + explicit FunctionPointer(KindTy kind, llvm::Value *value, + PointerAuthInfo authInfo, const Signature &signature) - : Value(value), AuthInfo(authInfo), Sig(signature) { + : Kind(kind), Value(value), AuthInfo(authInfo), Sig(signature) { // The function pointer should have function type. assert(value->getType()->getPointerElementType()->isFunctionTy()); // TODO: maybe assert similarity to signature.getType()? } // Temporary only! - explicit FunctionPointer(llvm::Value *value, const Signature &signature) - : FunctionPointer(value, PointerAuthInfo(), signature) {} + explicit FunctionPointer(KindTy kind, llvm::Value *value, + const Signature &signature) + : FunctionPointer(kind, value, PointerAuthInfo(), signature) {} static FunctionPointer forDirect(IRGenModule &IGM, llvm::Constant *value, CanSILFunctionType fnType); - static FunctionPointer forDirect(llvm::Constant *value, + static FunctionPointer forDirect(KindTy kind, llvm::Constant *value, const Signature &signature) { - return FunctionPointer(value, PointerAuthInfo(), signature); + return FunctionPointer(kind, value, PointerAuthInfo(), signature); } static FunctionPointer forExplosionValue(IRGenFunction &IGF, @@ -166,8 +192,17 @@ namespace irgen { return (isa(Value) && AuthInfo.isConstant()); } + KindTy getKind() const { return Kind; } + + /// Given that this value is known to have been constructed from a direct + /// function, Return the name of that function. + StringRef getName(IRGenModule &IGM) const; + /// Return the actual function pointer. - llvm::Value *getPointer() const { return Value; } + llvm::Value *getPointer(IRGenFunction &IGF) const; + + /// Return the actual function pointer. + llvm::Value *getRawPointer() const { return Value; } /// Given that this value is known to have been constructed from /// a direct function, return the function pointer. @@ -205,6 +240,9 @@ namespace irgen { llvm::Value *getExplosionValue(IRGenFunction &IGF, CanSILFunctionType fnType) const; + + /// Form a FunctionPointer whose KindTy is ::Function. + FunctionPointer getAsFunction(IRGenFunction &IGF) const; }; class Callee { diff --git a/lib/IRGen/GenCall.cpp b/lib/IRGen/GenCall.cpp index 00ef089f937db..99430d504cd69 100644 --- a/lib/IRGen/GenCall.cpp +++ b/lib/IRGen/GenCall.cpp @@ -1838,106 +1838,161 @@ void irgen::extractScalarResults(IRGenFunction &IGF, llvm::Type *bodyType, out.add(returned); } -static void externalizeArguments(IRGenFunction &IGF, const Callee &callee, - Explosion &in, Explosion &out, - TemporarySet &temporaries, bool isOutlined); - -llvm::Value *irgen::getDynamicAsyncContextSize(IRGenFunction &IGF, - AsyncContextLayout layout, - CanSILFunctionType functionType, - llvm::Value *thickContext) { - // TODO: This calculation should be extracted out into a standalone function +std::pair irgen::getAsyncFunctionAndSize( + IRGenFunction &IGF, SILFunctionTypeRepresentation representation, + FunctionPointer functionPointer, llvm::Value *thickContext, + std::pair values) { + assert(values.first || values.second); + bool emitFunction = values.first; + bool emitSize = values.second; + // TODO: This calculation should be extracted out into standalone functions // emitted on-demand per-module to improve codesize. - switch (functionType->getRepresentation()) { + switch (representation) { case SILFunctionTypeRepresentation::Thick: { // If the called function is thick, the size of the called function's - // async context may not be statically knowable. + // async context is not statically knowable. // // Specifically, if the thick function was produced by a partial_apply, // the function which was originally partially applied determines the // size of the needed async context. That original function isn't known // statically. The dynamic size is available within the context as an // i32 at the first index: <{ %swift.refcounted*, /*size*/ i32, ... }>. + // In this case, the function pointer is actually a pointer to an llvm + // function. // // On the other hand, if the thick function was produced by a // thin_to_thick_function, then the context will be nullptr. In that - // case, the size of the needed async context is known statically to - // be the size dictated by the function signature. + // case, the dynamic size of the needed async context is available within + // the struct, an AsyncFunctionPointer pointed to by the "function" pointer + // as an i32 at the second index: <{ /*fn rel addr*/ i32, /*size*/ i32 }>. // // We are currently emitting into some basic block. To handle these two // cases, we need to branch based on whether the context is nullptr; each - // branch must then determine the size in the manner appropriate to it. - // Finally, both blocks must join back together to make the call: + // branch must then determine the size and function pointer in the manner + // appropriate to it. Finally, both blocks must join back together to make + // the call: // - // SIL: IR: - // +-----+ +-------------------------+ - // |.....| |%cond = %ctx == nullptr | - // |apply| |br %cond, static, dynamic| - // |.....| +--------/--------------\-+ - // +-----+ / \ - // +-static-------+ +-dynamic----------------------------------------------+ - // |%size = K | |%layout = bitcast %context to <{%swift.context*, i32}>| - // |br join(%size)| |%size_addr = getelementptr %layout, i32 1, i32 0 | - // +-----\--------+ |%size = load %size_addr | - // \ |br join(%size) | - // \ +------------------------------------------------------+ - // \ / - // +-join(%size)-----------------------------------------------------------+ - // |%dataAddr = swift_taskAlloc(%task, %size) | - // |%async_context = bitcast %dataAddr to ASYNC_CONTEXT(static_callee_type)| - // |... // populate the fields %context with arguments | - // |call %callee(%async_context, %context) | - // +-----------------------------------------------------------------------+ - auto *staticSizeBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); - auto *dynamicSizeBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); + // +-------------------------+ + // |%cond = %ctx == nullptr | + // +----------------|br %cond, thin, thick |----------------------+ + // | +-------------------------+ | + // | | + // V | + // +-thin-------------------------------------------+ | + // |%afp = bitcast %fp to %swift.async_func_pointer*| | + // |%size_ptr = getelementptr %afp, i32 0, i32 1 | | + // |%size = load %size_ptr | | + // |%offset_ptr = getelementptr %afp, i32 0, i32 1 | | + // |%offset = load i32 %offset_ptr | | + // |%offset64 = sext %offset to i64 | | + // |%raw_fp = add %offset64, %offset_ptr | | + // |br join(%raw_fp, %size) | | + // +------------------------------------------------+ | + // | | + // | V + // | +-thick--------------------------------------------+ + // | |%layout = bitcast %ctx to <{%swift.context*, i32}>| + // | |%size_addr = getelementptr %layout, i32 0, i32 1 | + // | |%size = load %size_addr | + // | |br join(%fp, %size) | + // | +---/----------------------------------------------+ + // | / + // | / + // V V + // +-join(%fn, %size)------------------------------------------------------+ + // |%dataAddr = swift_taskAlloc(%task, %size) | + // |%async_context = bitcast %dataAddr to ASYNC_CONTEXT(static_callee_type)| + // |... // populate the fields %ctx with arguments | + // |call %fn(%async_context, %ctx) | + // +-----------------------------------------------------------------------+ + auto *thinBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); + auto *thickBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); auto *joinBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); auto hasThickContext = IGF.Builder.CreateICmpNE(thickContext, IGF.IGM.RefCountedNull); - IGF.Builder.CreateCondBr(hasThickContext, dynamicSizeBlock, - staticSizeBlock); - - SmallVector, 2> phiValues; - { - IGF.Builder.emitBlock(staticSizeBlock); - auto size = getAsyncContextSize(layout); - auto *sizeValue = - llvm::ConstantInt::get(IGF.IGM.Int32Ty, size.getValue()); - phiValues.push_back({staticSizeBlock, sizeValue}); + IGF.Builder.CreateCondBr(hasThickContext, thickBlock, thinBlock); + + SmallVector, 2> fnPhiValues; + SmallVector, 2> sizePhiValues; + { // thin + IGF.Builder.emitBlock(thinBlock); + if (emitFunction) { + auto *uncastFnPtr = functionPointer.getPointer(IGF); + auto *fnPtr = IGF.Builder.CreateBitCast(uncastFnPtr, IGF.IGM.Int8PtrTy); + fnPhiValues.push_back({thinBlock, fnPtr}); + } + if (emitSize) { + auto *ptr = functionPointer.getRawPointer(); + auto *descriptorPtr = + IGF.Builder.CreateBitCast(ptr, IGF.IGM.AsyncFunctionPointerPtrTy); + auto *sizePtr = IGF.Builder.CreateStructGEP(descriptorPtr, 1); + auto *size = + IGF.Builder.CreateLoad(sizePtr, IGF.IGM.getPointerAlignment()); + sizePhiValues.push_back({thinBlock, size}); + } IGF.Builder.CreateBr(joinBlock); } - { - IGF.Builder.emitBlock(dynamicSizeBlock); - SmallVector argTypeInfos; - SmallVector argValTypes; - auto int32ASTType = - BuiltinIntegerType::get(32, IGF.IGM.IRGen.SIL.getASTContext()) - ->getCanonicalType(); - auto int32SILType = SILType::getPrimitiveObjectType(int32ASTType); - const TypeInfo &int32TI = IGF.IGM.getTypeInfo(int32SILType); - argValTypes.push_back(int32SILType); - argTypeInfos.push_back(&int32TI); - HeapLayout layout(IGF.IGM, LayoutStrategy::Optimal, argValTypes, - argTypeInfos, - /*typeToFill*/ nullptr, NecessaryBindings()); - auto castThickContext = - layout.emitCastTo(IGF, thickContext, "context.prefix"); - auto sizeLayout = layout.getElement(0); - auto sizeAddr = sizeLayout.project(IGF, castThickContext, - /*NonFixedOffsets*/ llvm::None); - auto *sizeValue = IGF.Builder.CreateLoad(sizeAddr); - phiValues.push_back({dynamicSizeBlock, sizeValue}); + { // thick + IGF.Builder.emitBlock(thickBlock); + if (emitFunction) { + auto *uncastFnPtr = functionPointer.getRawPointer(); + auto *fnPtr = IGF.Builder.CreateBitCast(uncastFnPtr, IGF.IGM.Int8PtrTy); + fnPhiValues.push_back({thickBlock, fnPtr}); + } + if (emitSize) { + SmallVector argTypeInfos; + SmallVector argValTypes; + auto int32ASTType = + BuiltinIntegerType::get(32, IGF.IGM.IRGen.SIL.getASTContext()) + ->getCanonicalType(); + auto int32SILType = SILType::getPrimitiveObjectType(int32ASTType); + const TypeInfo &int32TI = IGF.IGM.getTypeInfo(int32SILType); + argValTypes.push_back(int32SILType); + argTypeInfos.push_back(&int32TI); + HeapLayout layout(IGF.IGM, LayoutStrategy::Optimal, argValTypes, + argTypeInfos, + /*typeToFill*/ nullptr, NecessaryBindings()); + auto castThickContext = + layout.emitCastTo(IGF, thickContext, "context.prefix"); + auto sizeLayout = layout.getElement(0); + auto sizeAddr = sizeLayout.project(IGF, castThickContext, + /*NonFixedOffsets*/ llvm::None); + auto *sizeValue = IGF.Builder.CreateLoad(sizeAddr); + sizePhiValues.push_back({thickBlock, sizeValue}); + } IGF.Builder.CreateBr(joinBlock); } - { + { // join IGF.Builder.emitBlock(joinBlock); - auto *phi = IGF.Builder.CreatePHI(IGF.IGM.Int32Ty, phiValues.size()); - for (auto &entry : phiValues) { - phi->addIncoming(entry.second, entry.first); + llvm::Value *fn = nullptr; + llvm::PHINode *fnPhi = nullptr; + llvm::PHINode *sizePhi = nullptr; + if (emitFunction) { + fnPhi = IGF.Builder.CreatePHI(IGF.IGM.Int8PtrTy, fnPhiValues.size()); + } + if (emitSize) { + sizePhi = IGF.Builder.CreatePHI(IGF.IGM.Int32Ty, sizePhiValues.size()); + } + if (emitFunction) { + assert(fnPhi); + for (auto &entry : fnPhiValues) { + fnPhi->addIncoming(entry.second, entry.first); + } + fn = IGF.Builder.CreateBitCast( + fnPhi, functionPointer.getFunctionType()->getPointerTo()); + } + llvm::Value *size = nullptr; + if (emitSize) { + assert(sizePhi); + for (auto &entry : sizePhiValues) { + sizePhi->addIncoming(entry.second, entry.first); + } + size = sizePhi; } - return phi; + return {fn, size}; } } case SILFunctionTypeRepresentation::Thin: @@ -1947,13 +2002,27 @@ llvm::Value *irgen::getDynamicAsyncContextSize(IRGenFunction &IGF, case SILFunctionTypeRepresentation::WitnessMethod: case SILFunctionTypeRepresentation::Closure: case SILFunctionTypeRepresentation::Block: { - auto size = getAsyncContextSize(layout); - auto *sizeValue = llvm::ConstantInt::get(IGF.IGM.Int32Ty, size.getValue()); - return sizeValue; + llvm::Value *fn = nullptr; + if (emitFunction) { + fn = functionPointer.getPointer(IGF); + } + llvm::Value *size = nullptr; + if (emitSize) { + auto *ptr = functionPointer.getRawPointer(); + auto *descriptorPtr = + IGF.Builder.CreateBitCast(ptr, IGF.IGM.AsyncFunctionPointerPtrTy); + auto *sizePtr = IGF.Builder.CreateStructGEP(descriptorPtr, 1); + size = IGF.Builder.CreateLoad(sizePtr, IGF.IGM.getPointerAlignment()); + } + return {fn, size}; } } } +static void externalizeArguments(IRGenFunction &IGF, const Callee &callee, + Explosion &in, Explosion &out, + TemporarySet &temporaries, bool isOutlined); + namespace { class SyncCallEmission final : public CallEmission { @@ -1965,6 +2034,9 @@ class SyncCallEmission final : public CallEmission { setFromCallee(); } + FunctionPointer getCalleeFunctionPointer() override { + return getCallee().getFunctionPointer().getAsFunction(IGF); + } SILType getParameterType(unsigned index) override { SILFunctionConventions origConv(getCallee().getOrigFunctionType(), IGF.getSILModule()); @@ -2164,6 +2236,7 @@ class AsyncCallEmission final : public CallEmission { Address contextBuffer; Size contextSize; Address context; + llvm::Value *calleeFunction = nullptr; llvm::Value *thickContext = nullptr; Optional asyncContextLayout; @@ -2199,8 +2272,10 @@ class AsyncCallEmission final : public CallEmission { assert(!context.isValid()); auto layout = getAsyncContextLayout(); // Allocate space for the async arguments. - auto *dynamicContextSize32 = getDynamicAsyncContextSize( - IGF, layout, CurCallee.getOrigFunctionType(), thickContext); + llvm::Value *dynamicContextSize32; + std::tie(calleeFunction, dynamicContextSize32) = getAsyncFunctionAndSize( + IGF, CurCallee.getOrigFunctionType()->getRepresentation(), + CurCallee.getFunctionPointer(), thickContext); auto *dynamicContextSize = IGF.Builder.CreateZExt(dynamicContextSize32, IGF.IGM.SizeTy); std::tie(contextBuffer, contextSize) = emitAllocAsyncContext( @@ -2224,6 +2299,11 @@ class AsyncCallEmission final : public CallEmission { super::setFromCallee(); thickContext = CurCallee.getSwiftContext(); } + FunctionPointer getCalleeFunctionPointer() override { + return FunctionPointer( + FunctionPointer::KindTy::Function, calleeFunction, PointerAuthInfo(), + IGF.IGM.getSignature(getCallee().getSubstFunctionType())); + } SILType getParameterType(unsigned index) override { return getAsyncContextLayout().getParameterType(index); } @@ -2369,7 +2449,8 @@ llvm::CallInst *CallEmission::emitCallSite() { EmittedCall = true; // Make the call and clear the arguments array. - const auto &fn = getCallee().getFunctionPointer(); + FunctionPointer fn = getCalleeFunctionPointer(); + assert(fn.getKind() == FunctionPointer::KindTy::Function); auto fnTy = fn.getFunctionType(); // Coerce argument types for those cases where the IR type required @@ -2427,6 +2508,7 @@ llvm::CallInst *CallEmission::emitCallSite() { llvm::CallInst *IRBuilder::CreateCall(const FunctionPointer &fn, ArrayRef args) { + assert(fn.getKind() == FunctionPointer::KindTy::Function); SmallVector bundles; // Add a pointer-auth bundle if necessary. @@ -2437,11 +2519,11 @@ llvm::CallInst *IRBuilder::CreateCall(const FunctionPointer &fn, bundles.emplace_back("ptrauth", bundleArgs); } - assert(!isTrapIntrinsic(fn.getPointer()) && "Use CreateNonMergeableTrap"); + assert(!isTrapIntrinsic(fn.getRawPointer()) && "Use CreateNonMergeableTrap"); llvm::CallInst *call = IRBuilderBase::CreateCall( cast( - fn.getPointer()->getType()->getPointerElementType()), - fn.getPointer(), args, bundles); + fn.getRawPointer()->getType()->getPointerElementType()), + fn.getRawPointer(), args, bundles); call->setAttributes(fn.getAttributes()); call->setCallingConv(fn.getCallingConv()); return call; @@ -4275,7 +4357,7 @@ void IRGenFunction::emitScalarReturn(SILType returnResultType, /// Modify the given variable to hold a pointer whose type is the /// LLVM lowering of the given function type, and return the signature /// for the type. -static Signature emitCastOfFunctionPointer(IRGenFunction &IGF, +Signature irgen::emitCastOfFunctionPointer(IRGenFunction &IGF, llvm::Value *&fnPtr, CanSILFunctionType fnType) { // Figure out the function type. @@ -4309,7 +4391,8 @@ Callee irgen::getBlockPointerCallee(IRGenFunction &IGF, invokeFnPtrAddr.getAddress(), info.OrigFnType); - FunctionPointer fn(invokeFnPtr, authInfo, sig); + FunctionPointer fn(FunctionPointer::KindTy::Function, invokeFnPtr, authInfo, + sig); return Callee(std::move(info), fn, blockPtr); } @@ -4321,7 +4404,7 @@ Callee irgen::getSwiftFunctionPointerCallee( auto authInfo = PointerAuthInfo::forFunctionPointer(IGF.IGM, calleeInfo.OrigFnType); - FunctionPointer fn(fnPtr, authInfo, sig); + FunctionPointer fn(calleeInfo.OrigFnType, fnPtr, authInfo, sig); if (castOpaqueToRefcountedContext) { assert(dataPtr && dataPtr->getType() == IGF.IGM.OpaquePtrTy && "Expecting trivial closure context"); @@ -4337,32 +4420,59 @@ Callee irgen::getCFunctionPointerCallee(IRGenFunction &IGF, auto authInfo = PointerAuthInfo::forFunctionPointer(IGF.IGM, calleeInfo.OrigFnType); - FunctionPointer fn(fnPtr, authInfo, sig); + FunctionPointer fn(FunctionPointer::KindTy::Function, fnPtr, authInfo, sig); return Callee(std::move(calleeInfo), fn); } -FunctionPointer -FunctionPointer::forDirect(IRGenModule &IGM, llvm::Constant *fnPtr, - CanSILFunctionType fnType) { - return forDirect(fnPtr, IGM.getSignature(fnType)); +FunctionPointer FunctionPointer::forDirect(IRGenModule &IGM, + llvm::Constant *fnPtr, + CanSILFunctionType fnType) { + return forDirect(fnType, fnPtr, IGM.getSignature(fnType)); } -FunctionPointer -FunctionPointer::forExplosionValue(IRGenFunction &IGF, llvm::Value *fnPtr, - CanSILFunctionType fnType) { +StringRef FunctionPointer::getName(IRGenModule &IGM) const { + assert(isConstant()); + switch (Kind.value) { + case KindTy::Value::Function: + return getRawPointer()->getName(); + case KindTy::Value::AsyncFunctionPointer: + return IGM + .getSILFunctionForAsyncFunctionPointer( + cast(getDirectPointer()->getOperand(0))) + ->getName(); + } +} + +llvm::Value *FunctionPointer::getPointer(IRGenFunction &IGF) const { + switch (Kind.value) { + case KindTy::Value::Function: + return Value; + case KindTy::Value::AsyncFunctionPointer: + auto *descriptorPtr = + IGF.Builder.CreateBitCast(Value, IGF.IGM.AsyncFunctionPointerPtrTy); + auto *addrPtr = IGF.Builder.CreateStructGEP(descriptorPtr, 0); + return IGF.emitLoadOfRelativePointer( + Address(addrPtr, IGF.IGM.getPointerAlignment()), /*isFar*/ false, + /*expectedType*/ getFunctionType()->getPointerTo()); + } +} + +FunctionPointer FunctionPointer::forExplosionValue(IRGenFunction &IGF, + llvm::Value *fnPtr, + CanSILFunctionType fnType) { // Bitcast out of an opaque pointer type. assert(fnPtr->getType() == IGF.IGM.Int8PtrTy); auto sig = emitCastOfFunctionPointer(IGF, fnPtr, fnType); auto authInfo = PointerAuthInfo::forFunctionPointer(IGF.IGM, fnType); - return FunctionPointer(fnPtr, authInfo, sig); + return FunctionPointer(fnType, fnPtr, authInfo, sig); } llvm::Value * FunctionPointer::getExplosionValue(IRGenFunction &IGF, CanSILFunctionType fnType) const { - llvm::Value *fnPtr = getPointer(); + llvm::Value *fnPtr = getRawPointer(); // Re-sign to the appropriate schema for this function pointer type. auto resultAuthInfo = PointerAuthInfo::forFunctionPointer(IGF.IGM, fnType); @@ -4375,3 +4485,7 @@ FunctionPointer::getExplosionValue(IRGenFunction &IGF, return fnPtr; } + +FunctionPointer FunctionPointer::getAsFunction(IRGenFunction &IGF) const { + return FunctionPointer(KindTy::Function, getPointer(IGF), AuthInfo, Sig); +} diff --git a/lib/IRGen/GenCall.h b/lib/IRGen/GenCall.h index f81bde36067fa..0893c1cb35676 100644 --- a/lib/IRGen/GenCall.h +++ b/lib/IRGen/GenCall.h @@ -302,13 +302,25 @@ namespace irgen { CanSILFunctionType substitutedType, SubstitutionMap substitutionMap); - llvm::Value *getDynamicAsyncContextSize(IRGenFunction &IGF, - AsyncContextLayout layout, - CanSILFunctionType functionType, - llvm::Value *thickContext); + /// Given an async function, get the pointer to the function to be called and + /// the size of the context to be allocated. + /// + /// \param values Whether any code should be emitted to retrieve the function + /// pointer and the size, respectively. If false is passed, no + /// code will be emitted to generate that value and null will + /// be returned for it. + /// + /// \return {function, size} + std::pair getAsyncFunctionAndSize( + IRGenFunction &IGF, SILFunctionTypeRepresentation representation, + FunctionPointer functionPointer, llvm::Value *thickContext, + std::pair values = {true, true}); llvm::CallingConv::ID expandCallingConv(IRGenModule &IGM, SILFunctionTypeRepresentation convention); + Signature emitCastOfFunctionPointer(IRGenFunction &IGF, llvm::Value *&fnPtr, + CanSILFunctionType fnType); + /// Does the given function have a self parameter that should be given /// the special treatment for self parameters? bool hasSelfContextParameter(CanSILFunctionType fnType); diff --git a/lib/IRGen/GenClass.cpp b/lib/IRGen/GenClass.cpp index d03b1ab5a0204..87aaf5fb3f40f 100644 --- a/lib/IRGen/GenClass.cpp +++ b/lib/IRGen/GenClass.cpp @@ -2531,12 +2531,12 @@ FunctionPointer irgen::emitVirtualMethodValue(IRGenFunction &IGF, auto &schema = IGF.getOptions().PointerAuth.SwiftClassMethods; auto authInfo = PointerAuthInfo::emit(IGF, schema, slot.getAddress(), method); - return FunctionPointer(fnPtr, authInfo, signature); + return FunctionPointer(methodType, fnPtr, authInfo, signature); } case ClassMetadataLayout::MethodInfo::Kind::DirectImpl: { auto fnPtr = llvm::ConstantExpr::getBitCast(methodInfo.getDirectImpl(), signature.getType()->getPointerTo()); - return FunctionPointer::forDirect(fnPtr, signature); + return FunctionPointer::forDirect(methodType, fnPtr, signature); } } diff --git a/lib/IRGen/GenDecl.cpp b/lib/IRGen/GenDecl.cpp index 96335b0287d4a..3f1c781443cd2 100644 --- a/lib/IRGen/GenDecl.cpp +++ b/lib/IRGen/GenDecl.cpp @@ -2543,7 +2543,8 @@ void IRGenModule::createReplaceableProlog(IRGenFunction &IGF, SILFunction *f) { LinkEntity::forDynamicallyReplaceableFunctionVariable(f); LinkEntity keyEntity = LinkEntity::forDynamicallyReplaceableFunctionKey(f); - Signature signature = getSignature(f->getLoweredFunctionType()); + auto silFunctionType = f->getLoweredFunctionType(); + Signature signature = getSignature(silFunctionType); // Create and initialize the first link entry for the chain of replacements. // The first implementation is initialized with 'implFn'. @@ -2602,9 +2603,9 @@ void IRGenModule::createReplaceableProlog(IRGenFunction &IGF, SILFunction *f) { auto authEntity = PointerAuthEntity(f); auto authInfo = PointerAuthInfo::emit(IGF, schema, fnPtrAddr, authEntity); - auto *Res = IGF.Builder.CreateCall(FunctionPointer(realReplFn, authInfo, - signature), - forwardedArgs); + auto *Res = IGF.Builder.CreateCall( + FunctionPointer(silFunctionType, realReplFn, authInfo, signature), + forwardedArgs); Res->setTailCall(); if (IGF.CurFn->getReturnType()->isVoidTy()) IGF.Builder.CreateRetVoid(); @@ -2657,8 +2658,10 @@ static void emitDynamicallyReplaceableThunk(IRGenModule &IGM, ? PointerAuthEntity(keyEntity.getSILFunction()) : PointerAuthEntity::Special::TypeDescriptor; auto authInfo = PointerAuthInfo::emit(IGF, schema, fnPtrAddr, authEntity); - auto *Res = IGF.Builder.CreateCall( - FunctionPointer(typeFnPtr, authInfo, signature), forwardedArgs); + auto *Res = + IGF.Builder.CreateCall(FunctionPointer(FunctionPointer::KindTy::Function, + typeFnPtr, authInfo, signature), + forwardedArgs); Res->setTailCall(); if (implFn->getReturnType()->isVoidTy()) @@ -2730,7 +2733,8 @@ void IRGenModule::emitDynamicReplacementOriginalFunctionThunk(SILFunction *f) { auto entity = LinkEntity::forSILFunction(f, true); - Signature signature = getSignature(f->getLoweredFunctionType()); + auto fnType = f->getLoweredFunctionType(); + Signature signature = getSignature(fnType); addLLVMFunctionAttributes(f, signature); LinkInfo implLink = LinkInfo::get(*this, entity, ForDefinition); @@ -2774,7 +2778,7 @@ void IRGenModule::emitDynamicReplacementOriginalFunctionThunk(SILFunction *f) { IGF, schema, fnPtrAddr, PointerAuthEntity(f->getDynamicallyReplacedFunction())); auto *Res = IGF.Builder.CreateCall( - FunctionPointer(typeFnPtr, authInfo, signature), forwardedArgs); + FunctionPointer(fnType, typeFnPtr, authInfo, signature), forwardedArgs); if (implFn->getReturnType()->isVoidTy()) IGF.Builder.CreateRetVoid(); diff --git a/lib/IRGen/GenFunc.cpp b/lib/IRGen/GenFunc.cpp index 7e1c69a758c24..4facfe6b1eeea 100644 --- a/lib/IRGen/GenFunc.cpp +++ b/lib/IRGen/GenFunc.cpp @@ -1128,7 +1128,23 @@ class AsyncPartialApplicationForwarderEmission llvm::Value *getContext() override { return heapContextBuffer; } llvm::Value *getDynamicFunctionPointer() override { assert(dynamicFunction && dynamicFunction->pointer); - return dynamicFunction->pointer; + auto *context = dynamicFunction->context; + if (!context) { + return dynamicFunction->pointer; + } + auto *rawFunction = subIGF.Builder.CreateBitCast( + dynamicFunction->pointer, origSig.getType()->getPointerTo()); + auto authInfo = PointerAuthInfo::forFunctionPointer(IGM, origType); + auto functionPointer = + FunctionPointer(FunctionPointer::KindTy::AsyncFunctionPointer, + rawFunction, authInfo, origSig); + llvm::Value *size = nullptr; + llvm::Value *function = nullptr; + std::tie(function, size) = getAsyncFunctionAndSize( + subIGF, origType->getRepresentation(), functionPointer, context, + {/*function*/ true, /*size*/ false}); + assert(size == nullptr); + return function; } llvm::Value *getDynamicFunctionContext() override { assert((dynamicFunction && dynamicFunction->context) || @@ -1236,7 +1252,8 @@ class AsyncPartialApplicationForwarderEmission asyncExplosion.add(dynamicFunction->context); } - return subIGF.Builder.CreateCall(fnPtr, asyncExplosion.claimAll()); + return subIGF.Builder.CreateCall(fnPtr.getAsFunction(subIGF), + asyncExplosion.claimAll()); } void createReturn(llvm::CallInst *call) override { subIGF.Builder.CreateRetVoid(); @@ -1289,7 +1306,7 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, StringRef FnName; if (staticFnPtr) - FnName = staticFnPtr->getPointer()->getName(); + FnName = staticFnPtr->getName(IGM); IRGenMangler Mangler; std::string thunkName = Mangler.manglePartialApplyForwarder(FnName); @@ -1683,10 +1700,10 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, FunctionPointer fnPtr = [&]() -> FunctionPointer { // If we found a function pointer statically, great. if (staticFnPtr) { - if (staticFnPtr->getPointer()->getType() != fnTy) { - auto fnPtr = staticFnPtr->getPointer(); + if (staticFnPtr->getPointer(subIGF)->getType() != fnTy) { + auto fnPtr = staticFnPtr->getPointer(subIGF); fnPtr = subIGF.Builder.CreateBitCast(fnPtr, fnTy); - return FunctionPointer(fnPtr, origSig); + return FunctionPointer(origType, fnPtr, origSig); } return *staticFnPtr; } @@ -1706,7 +1723,8 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, lastCapturedFieldPtr, PointerAuthEntity::Special::PartialApplyCapture); - return FunctionPointer(fnPtr, authInfo, origSig); + return FunctionPointer(FunctionPointer::KindTy::Function, fnPtr, authInfo, + origSig); }(); // Derive the context argument if needed. This is either: @@ -1993,7 +2011,7 @@ Optional irgen::emitFunctionPartialApplication( hasSingleSwiftRefcountedContext == Yes && outType->getCalleeConvention() == *singleRefcountedConvention) { assert(args.size() == 1); - auto fnPtr = emitPointerAuthResign(IGF, fn, outAuthInfo).getPointer(); + auto fnPtr = emitPointerAuthResign(IGF, fn, outAuthInfo).getPointer(IGF); fnPtr = IGF.Builder.CreateBitCast(fnPtr, IGF.IGM.Int8PtrTy); out.add(fnPtr); llvm::Value *ctx = args.claimNext(); @@ -2032,8 +2050,13 @@ Optional irgen::emitFunctionPartialApplication( emitPartialApplicationForwarder(IGF.IGM, staticFn, fnContext != nullptr, origSig, origType, substType, outType, subs, nullptr, argConventions); - forwarder = emitPointerAuthSign(IGF, forwarder, outAuthInfo); - forwarder = IGF.Builder.CreateBitCast(forwarder, IGF.IGM.Int8PtrTy); + if (origType->isAsync()) { + llvm_unreachable( + "async functions never have a single refcounted context"); + } else { + forwarder = emitPointerAuthSign(IGF, forwarder, outAuthInfo); + forwarder = IGF.Builder.CreateBitCast(forwarder, IGF.IGM.Int8PtrTy); + } out.add(forwarder); llvm::Value *ctx = args.claimNext(); @@ -2121,9 +2144,10 @@ Optional irgen::emitFunctionPartialApplication( auto schemaAuthInfo = PointerAuthInfo::emit(IGF, schema, fieldAddr.getAddress(), PointerAuthEntity::Special::PartialApplyCapture); - fnPtr = emitPointerAuthResign(IGF, fn, schemaAuthInfo).getPointer(); + fnPtr = + emitPointerAuthResign(IGF, fn, schemaAuthInfo).getRawPointer(); } else { - fnPtr = fn.getPointer(); + fnPtr = fn.getRawPointer(); } fnPtr = IGF.Builder.CreateBitCast(fnPtr, IGF.IGM.Int8PtrTy); IGF.Builder.CreateStore(fnPtr, fieldAddr); @@ -2163,16 +2187,9 @@ Optional irgen::emitFunctionPartialApplication( // Create the forwarding stub. auto origSig = IGF.IGM.getSignature(origType); - llvm::Value *forwarder = emitPartialApplicationForwarder(IGF.IGM, - staticFn, - fnContext != nullptr, - origSig, - origType, - substType, - outType, - subs, - &layout, - argConventions); + llvm::Value *forwarder = emitPartialApplicationForwarder( + IGF.IGM, staticFn, fnContext != nullptr, origSig, origType, substType, + outType, subs, &layout, argConventions); forwarder = emitPointerAuthSign(IGF, forwarder, outAuthInfo); forwarder = IGF.Builder.CreateBitCast(forwarder, IGF.IGM.Int8PtrTy); out.add(forwarder); diff --git a/lib/IRGen/GenFunc.h b/lib/IRGen/GenFunc.h index 4c839cb6b509f..7aa69cbfffc69 100644 --- a/lib/IRGen/GenFunc.h +++ b/lib/IRGen/GenFunc.h @@ -55,7 +55,6 @@ namespace irgen { CanSILFunctionType outType, Explosion &out, bool isOutlined); CanType getArgumentLoweringType(CanType type, SILParameterInfo paramInfo, bool isNoEscape); - } // end namespace irgen } // end namespace swift diff --git a/lib/IRGen/GenKeyPath.cpp b/lib/IRGen/GenKeyPath.cpp index a1c0d88161585..9f1a684e3d865 100644 --- a/lib/IRGen/GenKeyPath.cpp +++ b/lib/IRGen/GenKeyPath.cpp @@ -251,7 +251,7 @@ getAccessorForComputedComponent(IRGenModule &IGM, forwardedArgs); } auto fnPtr = FunctionPointer::forDirect(IGM, accessorFn, - accessor->getLoweredFunctionType()); + accessor->getLoweredFunctionType()); auto call = IGF.Builder.CreateCall(fnPtr, forwardedArgs.claimAll()); if (call->getType()->isVoidTy()) diff --git a/lib/IRGen/GenMeta.cpp b/lib/IRGen/GenMeta.cpp index b5fb211dfa29f..5f03b3bc9f25b 100644 --- a/lib/IRGen/GenMeta.cpp +++ b/lib/IRGen/GenMeta.cpp @@ -5281,3 +5281,16 @@ bool irgen::methodRequiresReifiedVTableEntry(IRGenModule &IGM, llvm::dbgs() << " can be elided\n"); return false; } + +llvm::GlobalValue *irgen::emitAsyncFunctionPointer(IRGenModule &IGM, + SILFunction *function, + Size size) { + ConstantInitBuilder initBuilder(IGM); + ConstantStructBuilder builder( + initBuilder.beginStruct(IGM.AsyncFunctionPointerTy)); + builder.addRelativeAddress( + IGM.getAddrOfSILFunction(function, NotForDefinition)); + builder.addInt32(size.getValue()); + return cast(IGM.defineAsyncFunctionPointer( + function, builder.finishAndCreateFuture())); +} diff --git a/lib/IRGen/GenMeta.h b/lib/IRGen/GenMeta.h index 837a35db8f7f0..fd51224af8031 100644 --- a/lib/IRGen/GenMeta.h +++ b/lib/IRGen/GenMeta.h @@ -32,6 +32,7 @@ namespace swift { class FileUnit; class FuncDecl; enum class ResilienceExpansion : unsigned; + struct SILDeclRef; class SILType; class VarDecl; enum class SpecialProtocol : uint8_t; @@ -181,6 +182,8 @@ namespace irgen { GenericSignature sig, ArrayRef requirements); + llvm::GlobalValue *emitAsyncFunctionPointer(IRGenModule &IGM, + SILFunction *function, Size size); } // end namespace irgen } // end namespace swift diff --git a/lib/IRGen/GenObjC.cpp b/lib/IRGen/GenObjC.cpp index 0eabfe12660a2..af2ca7f5208e7 100644 --- a/lib/IRGen/GenObjC.cpp +++ b/lib/IRGen/GenObjC.cpp @@ -657,7 +657,8 @@ Callee irgen::getObjCMethodCallee(IRGenFunction &IGF, Selector selector(method); llvm::Value *selectorValue = IGF.emitObjCSelectorRefLoad(selector.str()); - auto fn = FunctionPointer::forDirect(messenger, sig); + auto fn = FunctionPointer::forDirect(FunctionPointer::KindTy::Function, + messenger, sig); return Callee(std::move(info), fn, receiverValue, selectorValue); } diff --git a/lib/IRGen/GenOpaque.cpp b/lib/IRGen/GenOpaque.cpp index 96f09d622a47b..8f1984b65a8c6 100644 --- a/lib/IRGen/GenOpaque.cpp +++ b/lib/IRGen/GenOpaque.cpp @@ -431,7 +431,8 @@ static FunctionPointer emitLoadOfValueWitnessFunction(IRGenFunction &IGF, IGF.getOptions().PointerAuth.ValueWitnesses, slot, index); - return FunctionPointer(witness, authInfo, signature); + return FunctionPointer(FunctionPointer::KindTy::Function, witness, authInfo, + signature); } /// Given a type metadata pointer, load one of the function @@ -477,12 +478,13 @@ IRGenFunction::emitValueWitnessFunctionRef(SILType type, assert(discriminator && "no saved discriminator for value witness fn!"); authInfo = PointerAuthInfo(schema.getKey(), discriminator); } - return FunctionPointer(witness, authInfo, signature); + return FunctionPointer(FunctionPointer::KindTy::Function, witness, authInfo, + signature); } auto vwtable = emitValueWitnessTableRef(type, &metadataSlot); auto witness = emitLoadOfValueWitnessFunction(*this, vwtable, index); - setScopedLocalTypeDataForLayout(type, key, witness.getPointer()); + setScopedLocalTypeDataForLayout(type, key, witness.getPointer(*this)); if (auto &authInfo = witness.getAuthInfo()) { setScopedLocalTypeDataForLayout(type, LocalTypeDataKind::forValueWitnessDiscriminator(index), diff --git a/lib/IRGen/GenPointerAuth.cpp b/lib/IRGen/GenPointerAuth.cpp index aeae73999d0d8..6729655f268a0 100644 --- a/lib/IRGen/GenPointerAuth.cpp +++ b/lib/IRGen/GenPointerAuth.cpp @@ -71,9 +71,11 @@ llvm::Value *irgen::emitPointerAuthStrip(IRGenFunction &IGF, FunctionPointer irgen::emitPointerAuthResign(IRGenFunction &IGF, const FunctionPointer &fn, const PointerAuthInfo &newAuthInfo) { - llvm::Value *fnPtr = emitPointerAuthResign(IGF, fn.getPointer(), + // TODO: Handle resigning AsyncFunctionPointers. + assert(fn.getKind().value == FunctionPointer::KindTy::Value::Function); + llvm::Value *fnPtr = emitPointerAuthResign(IGF, fn.getPointer(IGF), fn.getAuthInfo(), newAuthInfo); - return FunctionPointer(fnPtr, newAuthInfo, fn.getSignature()); + return FunctionPointer(fn.getKind(), fnPtr, newAuthInfo, fn.getSignature()); } llvm::Value *irgen::emitPointerAuthResign(IRGenFunction &IGF, diff --git a/lib/IRGen/GenProto.cpp b/lib/IRGen/GenProto.cpp index 28af6c259960b..db3148cfb975e 100644 --- a/lib/IRGen/GenProto.cpp +++ b/lib/IRGen/GenProto.cpp @@ -3341,7 +3341,7 @@ FunctionPointer irgen::emitWitnessMethodValue(IRGenFunction &IGF, auto &schema = IGF.getOptions().PointerAuth.ProtocolWitnesses; auto authInfo = PointerAuthInfo::emit(IGF, schema, slot, member); - return FunctionPointer(witnessFnPtr, authInfo, signature); + return FunctionPointer(fnType, witnessFnPtr, authInfo, signature); } FunctionPointer irgen::emitWitnessMethodValue( diff --git a/lib/IRGen/GenThunk.cpp b/lib/IRGen/GenThunk.cpp index c3bc0844433ce..12a08bbd899b3 100644 --- a/lib/IRGen/GenThunk.cpp +++ b/lib/IRGen/GenThunk.cpp @@ -18,9 +18,10 @@ #include "Callee.h" #include "ClassMetadataVisitor.h" +#include "ConstantBuilder.h" #include "Explosion.h" -#include "GenDecl.h" #include "GenClass.h" +#include "GenDecl.h" #include "GenHeap.h" #include "GenOpaque.h" #include "GenPointerAuth.h" @@ -31,6 +32,7 @@ #include "ProtocolInfo.h" #include "Signature.h" #include "swift/IRGen/Linking.h" +#include "swift/SIL/SILDeclRef.h" #include "llvm/IR/Function.h" using namespace swift; @@ -122,6 +124,33 @@ void IRGenModule::emitDispatchThunk(SILDeclRef declRef) { IGF.Builder.CreateRet(result); } +llvm::Constant * +IRGenModule::getAddrOfAsyncFunctionPointer(SILFunction *function) { + (void)getAddrOfSILFunction(function, NotForDefinition); + auto entity = LinkEntity::forAsyncFunctionPointer(function); + return getAddrOfLLVMVariable(entity, NotForDefinition, DebugTypeInfo()); +} + +llvm::Constant *IRGenModule::defineAsyncFunctionPointer(SILFunction *function, + ConstantInit init) { + auto entity = LinkEntity::forAsyncFunctionPointer(function); + auto *var = cast( + getAddrOfLLVMVariable(entity, init, DebugTypeInfo())); + setTrueConstGlobal(var); + return var; +} + +SILFunction * +IRGenModule::getSILFunctionForAsyncFunctionPointer(llvm::Constant *afp) { + for (auto &entry : GlobalVars) { + if (entry.getSecond() == afp) { + auto entity = entry.getFirst(); + return entity.getSILFunction(); + } + } + return nullptr; +} + llvm::GlobalValue *IRGenModule::defineMethodDescriptor(SILDeclRef declRef, NominalTypeDecl *nominalDecl, llvm::Constant *definition) { diff --git a/lib/IRGen/IRGenFunction.cpp b/lib/IRGen/IRGenFunction.cpp index e1bcf0338473a..950d3191d08c7 100644 --- a/lib/IRGen/IRGenFunction.cpp +++ b/lib/IRGen/IRGenFunction.cpp @@ -307,6 +307,24 @@ void IRGenFunction::emitStoreOfRelativeIndirectablePointer(llvm::Value *value, Builder.CreateStore(difference, addr); } +llvm::Value * +IRGenFunction::emitLoadOfRelativePointer(Address addr, bool isFar, + llvm::PointerType *expectedType, + const llvm::Twine &name) { + llvm::Value *value = Builder.CreateLoad(addr); + assert(value->getType() == + (isFar ? IGM.FarRelativeAddressTy : IGM.RelativeAddressTy)); + if (!isFar) { + value = Builder.CreateSExt(value, IGM.IntPtrTy); + } + auto *addrInt = Builder.CreatePtrToInt(addr.getAddress(), IGM.IntPtrTy); + auto *uncastPointerInt = Builder.CreateAdd(addrInt, value); + auto *uncastPointer = Builder.CreateIntToPtr(uncastPointerInt, IGM.Int8PtrTy); + auto uncastPointerAddress = Address(uncastPointer, IGM.getPointerAlignment()); + auto pointer = Builder.CreateBitCast(uncastPointerAddress, expectedType); + return pointer.getAddress(); +} + llvm::Value * IRGenFunction::emitLoadOfRelativeIndirectablePointer(Address addr, bool isFar, diff --git a/lib/IRGen/IRGenFunction.h b/lib/IRGen/IRGenFunction.h index 61f43c33993c4..374996eee082e 100644 --- a/lib/IRGen/IRGenFunction.h +++ b/lib/IRGen/IRGenFunction.h @@ -206,7 +206,9 @@ class IRGenFunction { emitLoadOfRelativeIndirectablePointer(Address addr, bool isFar, llvm::PointerType *expectedType, const llvm::Twine &name = ""); - + llvm::Value *emitLoadOfRelativePointer(Address addr, bool isFar, + llvm::PointerType *expectedType, + const llvm::Twine &name = ""); llvm::Value *emitAllocObjectCall(llvm::Value *metadata, llvm::Value *size, llvm::Value *alignMask, diff --git a/lib/IRGen/IRGenModule.cpp b/lib/IRGen/IRGenModule.cpp index 73fea486e095c..f8aaa060aef21 100644 --- a/lib/IRGen/IRGenModule.cpp +++ b/lib/IRGen/IRGenModule.cpp @@ -590,9 +590,12 @@ IRGenModule::IRGenModule(IRGenerator &irgen, DynamicReplacementKeyTy = createStructType(*this, "swift.dyn_repl_key", {RelativeAddressTy, Int32Ty}); + AsyncFunctionPointerTy = createStructType(*this, "swift.async_func_pointer", + {RelativeAddressTy, Int32Ty}); SwiftContextTy = createStructType(*this, "swift.context", {}); SwiftTaskTy = createStructType(*this, "swift.task", {}); SwiftExecutorTy = createStructType(*this, "swift.executor", {}); + AsyncFunctionPointerPtrTy = AsyncFunctionPointerTy->getPointerTo(DefaultAS); SwiftContextPtrTy = SwiftContextTy->getPointerTo(DefaultAS); SwiftTaskPtrTy = SwiftTaskTy->getPointerTo(DefaultAS); SwiftExecutorPtrTy = SwiftExecutorTy->getPointerTo(DefaultAS); diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h index 49cf07b882f5d..b3a41840e387d 100644 --- a/lib/IRGen/IRGenModule.h +++ b/lib/IRGen/IRGenModule.h @@ -723,9 +723,11 @@ class IRGenModule { *DynamicReplacementLinkEntryPtrTy; // %link_entry* llvm::StructType *DynamicReplacementKeyTy; // { i32, i32} + llvm::StructType *AsyncFunctionPointerTy; // { i32, i32 } llvm::StructType *SwiftContextTy; llvm::StructType *SwiftTaskTy; llvm::StructType *SwiftExecutorTy; + llvm::PointerType *AsyncFunctionPointerPtrTy; llvm::PointerType *SwiftContextPtrTy; llvm::PointerType *SwiftTaskPtrTy; llvm::PointerType *SwiftExecutorPtrTy; @@ -1388,6 +1390,11 @@ private: \ /// Cast the given constant to i8*. llvm::Constant *getOpaquePtr(llvm::Constant *pointer); + llvm::Constant *getAddrOfAsyncFunctionPointer(SILFunction *function); + llvm::Constant *defineAsyncFunctionPointer(SILFunction *function, + ConstantInit init); + SILFunction *getSILFunctionForAsyncFunctionPointer(llvm::Constant *afp); + llvm::Function *getAddrOfDispatchThunk(SILDeclRef declRef, ForDefinition_t forDefinition); void emitDispatchThunk(SILDeclRef declRef); diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index 49f108e93d6a2..4fe396e70d562 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -15,6 +15,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm/IR/Constant.h" #define DEBUG_TYPE "irgensil" #include "swift/AST/ASTContext.h" #include "swift/AST/IRGenOptions.h" @@ -70,6 +71,7 @@ #include "GenFunc.h" #include "GenHeap.h" #include "GenIntegerLiteral.h" +#include "GenMeta.h" #include "GenObjC.h" #include "GenOpaque.h" #include "GenPointerAuth.h" @@ -1964,6 +1966,11 @@ void IRGenSILFunction::emitSILFunction() { if (CurSILFn->getDynamicallyReplacedFunction()) IGM.IRGen.addDynamicReplacement(CurSILFn); + auto funcTy = CurSILFn->getLoweredFunctionType(); + if (funcTy->isAsync() && funcTy->getLanguage() == SILFunctionLanguage::Swift) + emitAsyncFunctionPointer(IGM, CurSILFn, + getAsyncContextLayout(*this).getSize()); + // Configure the dominance resolver. // TODO: consider re-using a dom analysis from the PassManager // TODO: consider using a cheaper analysis at -O0 @@ -1998,7 +2005,6 @@ void IRGenSILFunction::emitSILFunction() { // Map the LLVM arguments to arguments on the entry point BB. Explosion params = collectParameters(); - auto funcTy = CurSILFn->getLoweredFunctionType(); switch (funcTy->getLanguage()) { case SILFunctionLanguage::Swift: @@ -2282,23 +2288,29 @@ void IRGenSILFunction::visitDifferentiabilityWitnessFunctionInst( diffWitness = Builder.CreateBitCast(diffWitness, signature.getType()->getPointerTo()); - setLoweredFunctionPointer(i, FunctionPointer(diffWitness, signature)); + setLoweredFunctionPointer(i, FunctionPointer(fnType, diffWitness, signature)); } void IRGenSILFunction::visitFunctionRefBaseInst(FunctionRefBaseInst *i) { auto fn = i->getInitiallyReferencedFunction(); + auto fnType = fn->getLoweredFunctionType(); - llvm::Constant *fnPtr = IGM.getAddrOfSILFunction( - fn, NotForDefinition, false /*isDynamicallyReplaceableImplementation*/, - isa(i)); - - auto sig = IGM.getSignature(fn->getLoweredFunctionType()); + auto sig = IGM.getSignature(fnType); // Note that the pointer value returned by getAddrOfSILFunction doesn't // necessarily have element type sig.getType(), e.g. if it's imported. + auto *fnPtr = IGM.getAddrOfSILFunction( + fn, NotForDefinition, false /*isDynamicallyReplaceableImplementation*/, + isa(i)); + llvm::Value *value; + if (fn->isAsync()) { + value = IGM.getAddrOfAsyncFunctionPointer(fn); + value = Builder.CreateBitCast(value, fnPtr->getType()); + } else { + value = fnPtr; + } + FunctionPointer fp = FunctionPointer(fnType, value, sig); - FunctionPointer fp = FunctionPointer::forDirect(fnPtr, sig); - // Store the function as a FunctionPointer so we can avoid bitcasting // or thunking if we don't need to. setLoweredFunctionPointer(i, fp); @@ -3059,16 +3071,18 @@ void IRGenSILFunction::visitPartialApplyInst(swift::PartialApplyInst *i) { Explosion llArgs; + auto &lv = getLoweredValue(i->getCallee()); if (i->getOrigCalleeType()->isAsync()) { auto result = getPartialApplicationFunction(*this, i->getCallee(), i->getSubstitutionMap(), i->getSubstCalleeType()); llvm::Value *innerContext = std::get<1>(result); - auto layout = - getAsyncContextLayout(IGM, i->getOrigCalleeType(), - i->getSubstCalleeType(), i->getSubstitutionMap()); - auto size = getDynamicAsyncContextSize( - *this, layout, i->getOrigCalleeType(), innerContext); + llvm::Value *size; + llvm::Value *fnPtr; + std::tie(fnPtr, size) = getAsyncFunctionAndSize( + *this, i->getOrigCalleeType()->getRepresentation(), std::get<0>(result), + innerContext, {/*function*/ false, /*size*/ true}); + assert(fnPtr == nullptr); llArgs.add(size); } @@ -3084,7 +3098,6 @@ void IRGenSILFunction::visitPartialApplyInst(swift::PartialApplyInst *i) { } } - auto &lv = getLoweredValue(i->getCallee()); if (lv.kind == LoweredValue::Kind::ObjCMethod) { // Objective-C partial applications require a different path. There's no // actual function pointer to capture, and we semantically can't cache @@ -3347,7 +3360,8 @@ void IRGenSILFunction::visitEndApply(BeginApplyInst *i, bool isAbort) { auto pointerAuth = PointerAuthInfo::emit(*this, schemaAndEntity.first, coroutine.Buffer.getAddress(), schemaAndEntity.second); - FunctionPointer callee(continuation, pointerAuth, sig); + FunctionPointer callee(i->getOrigCalleeType(), continuation, pointerAuth, + sig); Builder.CreateCall(callee, { coroutine.Buffer.getAddress(), @@ -6086,7 +6100,7 @@ void IRGenSILFunction::visitWitnessMethodInst(swift::WitnessMethodInst *i) { auto fnType = IGM.getSILTypes().getConstantFunctionType( IGM.getMaximalTypeExpansionContext(), member); auto sig = IGM.getSignature(fnType); - auto fn = FunctionPointer::forDirect(fnPtr, sig); + auto fn = FunctionPointer::forDirect(fnType, fnPtr, sig); setLoweredFunctionPointer(i, fn); return; @@ -6250,7 +6264,7 @@ void IRGenSILFunction::visitSuperMethodInst(swift::SuperMethodInst *i) { auto authInfo = PointerAuthInfo::emit(*this, schema, /*storageAddress=*/nullptr, method); - FunctionPointer fn(fnPtr, authInfo, sig); + FunctionPointer fn(methodType, fnPtr, authInfo, sig); setLoweredFunctionPointer(i, fn); return; @@ -6286,7 +6300,7 @@ void IRGenSILFunction::visitClassMethodInst(swift::ClassMethodInst *i) { ResilienceExpansion::Maximal)) { auto *fnPtr = IGM.getAddrOfDispatchThunk(method, NotForDefinition); auto sig = IGM.getSignature(methodType); - FunctionPointer fn(fnPtr, sig); + FunctionPointer fn(methodType, fnPtr, sig); setLoweredFunctionPointer(i, fn); return; diff --git a/test/IRGen/async/run-call-classinstance-int64-to-void.sil b/test/IRGen/async/run-call-classinstance-int64-to-void.sil index bf7be5edc6803..7fb6122b32fe4 100644 --- a/test/IRGen/async/run-call-classinstance-int64-to-void.sil +++ b/test/IRGen/async/run-call-classinstance-int64-to-void.sil @@ -29,6 +29,7 @@ class S { init() } +// CHECK-LL: @classinstanceSInt64ToVoidAD = // CHECK-LL: define hidden swiftcc void @classinstanceSInt64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]+}} { sil hidden @classinstanceSInt64ToVoid : $@async @convention(method) (Int64, @guaranteed S) -> () { bb0(%int : $Int64, %instance : $S): diff --git a/test/IRGen/async/run-call-classinstance-void-to-void.sil b/test/IRGen/async/run-call-classinstance-void-to-void.sil index 8b01da80daabc..bf29959314d1c 100644 --- a/test/IRGen/async/run-call-classinstance-void-to-void.sil +++ b/test/IRGen/async/run-call-classinstance-void-to-void.sil @@ -29,6 +29,7 @@ class S { init() } +// CHECK-LL: @classinstanceSVoidToVoidAD = // CHECK-LL: define hidden swiftcc void @classinstanceSVoidToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @classinstanceSVoidToVoid : $@async @convention(method) (@guaranteed S) -> () { bb0(%instance : $S): diff --git a/test/IRGen/async/run-call-existential-to-void.sil b/test/IRGen/async/run-call-existential-to-void.sil index c6e0dea4f7080..50a811d964602 100644 --- a/test/IRGen/async/run-call-existential-to-void.sil +++ b/test/IRGen/async/run-call-existential-to-void.sil @@ -37,6 +37,7 @@ bb0(%int : $Int64, %S_type : $@thin S.Type): return %instance : $S } +// CHECK-LL: @existentialToVoidAD = // CHECK-LL: define hidden swiftcc void @existentialToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @existentialToVoid : $@async @convention(thin) (@in_guaranteed P) -> () { bb0(%existential : $*P): diff --git a/test/IRGen/async/run-call-generic-to-generic.sil b/test/IRGen/async/run-call-generic-to-generic.sil index 610e1187beda4..88ff644dbb489 100644 --- a/test/IRGen/async/run-call-generic-to-generic.sil +++ b/test/IRGen/async/run-call-generic-to-generic.sil @@ -19,6 +19,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @genericToGenericAD = // CHECK-LL: define hidden swiftcc void @genericToGeneric(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @genericToGeneric : $@async @convention(thin) (@in_guaranteed T) -> @out T { bb0(%out : $*T, %in : $*T): diff --git a/test/IRGen/async/run-call-generic-to-void.sil b/test/IRGen/async/run-call-generic-to-void.sil index fd922c6d49ec6..671a42214d372 100644 --- a/test/IRGen/async/run-call-generic-to-void.sil +++ b/test/IRGen/async/run-call-generic-to-void.sil @@ -19,6 +19,7 @@ import _Concurrency sil public_external @printGeneric : $@convention(thin) (@in_guaranteed T) -> () +// CHECK-LL: @genericToVoidAD = // CHECK-LL: define hidden swiftcc void @genericToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @genericToVoid : $@async @convention(thin) (@in_guaranteed T) -> () { bb0(%instance : $*T): diff --git a/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil b/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil index 27f2a88b4fe53..266f6d2e16665 100644 --- a/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil +++ b/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil @@ -19,6 +19,7 @@ import _Concurrency sil public_external @printBool : $@convention(thin) (Bool) -> () +// CHECK-LL: @genericEquatableAndGenericEquatableToBoolAD = // CHECK-LL: define hidden swiftcc void @genericEquatableAndGenericEquatableToBool(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @genericEquatableAndGenericEquatableToBool : $@async @convention(thin) (@in_guaranteed T, @in_guaranteed T) -> Bool { bb0(%0 : $*T, %1 : $*T): diff --git a/test/IRGen/async/run-call-int64-and-int64-to-void.sil b/test/IRGen/async/run-call-int64-and-int64-to-void.sil index a041807ca2d82..2d895225e29ab 100644 --- a/test/IRGen/async/run-call-int64-and-int64-to-void.sil +++ b/test/IRGen/async/run-call-int64-and-int64-to-void.sil @@ -19,6 +19,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @int64AndInt64ToVoidAD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @int64AndInt64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @int64AndInt64ToVoid : $@async @convention(thin) (Int64, Int64) -> () { entry(%int1: $Int64, %int2: $Int64): diff --git a/test/IRGen/async/run-call-int64-to-void.sil b/test/IRGen/async/run-call-int64-to-void.sil index 91d2f0781ce74..ef4e02751fdbf 100644 --- a/test/IRGen/async/run-call-int64-to-void.sil +++ b/test/IRGen/async/run-call-int64-to-void.sil @@ -19,6 +19,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @int64ToVoidAD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @int64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @int64ToVoid : $@async @convention(thin) (Int64) -> () { entry(%int: $Int64): diff --git a/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil b/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil index 60a11e3722d5f..dadbc2d0abc22 100644 --- a/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil +++ b/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil @@ -33,6 +33,7 @@ struct I : P { init(int: Int64) } +// CHECK-LL: @callPrintMeAD = // CHECK-LL: define hidden swiftcc void @callPrintMe(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @callPrintMe : $@async @convention(method) (@in_guaranteed Self) -> Int64 { bb0(%self : $*Self): diff --git a/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil b/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil index de13f3998036c..ca5ff2bfff557 100644 --- a/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil +++ b/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil @@ -29,6 +29,7 @@ struct I : P { init(int: Int64) } +// CHECK-LL: @I_printMeAD = // CHECK-LL-LABEL: define hidden swiftcc void @I_printMe(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @I_printMe : $@async @convention(method) (I) -> Int64 { bb0(%self : $I): diff --git a/test/IRGen/async/run-call-structinstance-int64-to-void.sil b/test/IRGen/async/run-call-structinstance-int64-to-void.sil index f252e159d2320..7d5572d427dfa 100644 --- a/test/IRGen/async/run-call-structinstance-int64-to-void.sil +++ b/test/IRGen/async/run-call-structinstance-int64-to-void.sil @@ -25,6 +25,7 @@ struct S { init(storage: Int64) } +// CHECK-LL: @structinstanceSInt64ToVoidAD = // CHECK-LL: define hidden swiftcc void @structinstanceSInt64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @structinstanceSInt64ToVoid : $@async @convention(method) (Int64, S) -> () { bb0(%int : $Int64, %self : $S): diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing.sil index f0019bcd371bf..ebdc978c0c257 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing.sil @@ -99,6 +99,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int, @error Error) { bb0: diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil index 7b27aba126715..eb54ee4172e9a 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil @@ -98,6 +98,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int64, @error Error) { entry: diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil index f79e6156802d9..767440ed8c143 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil @@ -99,6 +99,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int, @error Error) { bb0: diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil index 75d84bcd93311..0f464ac917f22 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil @@ -99,6 +99,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int64, @error Error) { entry: diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil index 56d76d74fd348..be7ead19fd642 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil @@ -100,6 +100,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int, @error Error) { bb0: diff --git a/test/IRGen/async/run-call-void-to-existential.sil b/test/IRGen/async/run-call-void-to-existential.sil index eab63c807d73f..2cb202b69fbdd 100644 --- a/test/IRGen/async/run-call-void-to-existential.sil +++ b/test/IRGen/async/run-call-void-to-existential.sil @@ -36,6 +36,7 @@ bb0(%int : $Int64, %S_type : $@thin S.Type): return %instance : $S } +// CHECK-LL: @voidToExistentialAD = // CHECK-LL: define hidden swiftcc void @voidToExistential(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidToExistential : $@async @convention(thin) () -> @out P { bb0(%out : $*P): diff --git a/test/IRGen/async/run-call-void-to-int64-and-int64.sil b/test/IRGen/async/run-call-void-to-int64-and-int64.sil index 789e9c7a981c3..73f22158b43da 100644 --- a/test/IRGen/async/run-call-void-to-int64-and-int64.sil +++ b/test/IRGen/async/run-call-void-to-int64-and-int64.sil @@ -19,6 +19,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @voidToInt64AndInt64AD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @voidToInt64AndInt64(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @voidToInt64AndInt64 : $@async @convention(thin) () -> (Int64, Int64) { %int_literal1 = integer_literal $Builtin.Int64, 42 diff --git a/test/IRGen/async/run-call-void-to-int64.sil b/test/IRGen/async/run-call-void-to-int64.sil index a4bc75715b05d..6742f3503e230 100644 --- a/test/IRGen/async/run-call-void-to-int64.sil +++ b/test/IRGen/async/run-call-void-to-int64.sil @@ -19,6 +19,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @voidToInt64AD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @voidToInt64(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @voidToInt64 : $@async @convention(thin) () -> (Int64) { %int_literal = integer_literal $Builtin.Int64, 42 diff --git a/test/IRGen/async/run-call-void-to-struct_large.sil b/test/IRGen/async/run-call-void-to-struct_large.sil index cd353eecbca6e..f09501d5351ec 100644 --- a/test/IRGen/async/run-call-void-to-struct_large.sil +++ b/test/IRGen/async/run-call-void-to-struct_large.sil @@ -101,6 +101,7 @@ bb0(%0 : $@thin Big.Type): return %62 : $Big } +// CHECK-LL: @getBigAD = // CHECK-LL: define hidden swiftcc void @getBig(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @getBig : $@async @convention(thin) () -> Big { bb0: diff --git a/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil b/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil index 8e022e4970136..9de0bef122806 100644 --- a/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil +++ b/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil @@ -45,6 +45,7 @@ bb0(%out_addr : $*T, %in_addr : $*T, %self : $I): return %value : $Int64 } +// CHECK-LL: @I_P_printMeAD = // CHECK-LL: define internal swiftcc void @I_P_printMe(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil private [transparent] [thunk] @I_P_printMe : $@convention(witness_method: P) @async <τ_0_0> (@in_guaranteed τ_0_0, @in_guaranteed I) -> (Int64, @out τ_0_0) { bb0(%out_addr : $*τ_0_0, %in_addr : $*τ_0_0, %self_addr : $*I): diff --git a/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil b/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil index eac3f6a7b8b8f..31daa0f5389be 100644 --- a/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil +++ b/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil @@ -29,6 +29,9 @@ struct I : P { init(int: Int64) } +// CHECK-LL: @I_printMeAD = +// CHECK-LL: @I_P_printMeAD = + // CHECK-LL-LABEL: define hidden swiftcc void @I_printMe(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @I_printMe : $@async @convention(method) (I) -> Int64 { bb0(%self : $I): diff --git a/test/IRGen/async/run-partialapply-capture-class-to-void.sil b/test/IRGen/async/run-partialapply-capture-class-to-void.sil index 11e0111f497b8..99ae8da853147 100644 --- a/test/IRGen/async/run-partialapply-capture-class-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-class-to-void.sil @@ -57,6 +57,7 @@ sil_vtable S { #S.deinit!deallocator: @S_deallocating_deinit } +// CHECK-LL: @classinstanceSToVoidAD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @classinstanceSToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @classinstanceSToVoid : $@async @convention(thin) (@owned S) -> () { entry(%c : $S): diff --git a/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil b/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil index 9ff9c9815838e..bc0260d1199ba 100644 --- a/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil +++ b/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil @@ -19,6 +19,7 @@ import _Concurrency sil public_external @printGeneric : $@convention(thin) (@in_guaranteed T) -> () sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @inGenericAndInoutGenericToGenericAD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @inGenericAndInoutGenericToGeneric(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { // CHECK-LL: define internal swiftcc void @"$s017inGenericAndInoutb2ToB0TA"(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}, %swift.refcounted* swiftself {{%[0-9]*}}) {{#[0-9]*}} { sil @inGenericAndInoutGenericToGeneric : $@async @convention(thin) (@in T, @inout T) -> @out T { diff --git a/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil b/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil index edf3743511a64..e32330411e218 100644 --- a/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil +++ b/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil @@ -45,6 +45,7 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer