diff --git a/CHANGELOG.md b/CHANGELOG.md index a97a6a59746ed..2e280fcb3136f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ CHANGELOG | Version | Released | Toolchain | | :--------------------- | :--------- | :---------- | +| [Swift 5.4](#swift-54) | | | | [Swift 5.3](#swift-53) | 2020-09-16 | Xcode 12.0 | | [Swift 5.2](#swift-52) | 2020-03-24 | Xcode 11.4 | | [Swift 5.1](#swift-51) | 2019-09-20 | Xcode 11.0 | @@ -24,8 +25,8 @@ CHANGELOG -Swift Next ----------- +Swift 5.4 +--------- * [SR-10069][]: diff --git a/docs/ABI/Mangling.rst b/docs/ABI/Mangling.rst index 0a78314aa9a8f..9084a31aff7b8 100644 --- a/docs/ABI/Mangling.rst +++ b/docs/ABI/Mangling.rst @@ -220,7 +220,8 @@ types where the metadata itself has unknown layout.) global ::= entity // some identifiable thing global ::= from-type to-type generic-signature? 'TR' // reabstraction thunk global ::= from-type to-type generic-signature? 'TR' // reabstraction thunk - global ::= impl-function-type 'Tz' // objc-to-swift-async completion handler block implementation + global ::= impl-function-type type 'Tz' // objc-to-swift-async completion handler block implementation + global ::= impl-function-type type 'TZ' // objc-to-swift-async completion handler block implementation (predefined by runtime) global ::= from-type to-type self-type generic-signature? 'Ty' // reabstraction thunk with dynamic 'Self' capture global ::= from-type to-type generic-signature? 'Tr' // obsolete mangling for reabstraction thunk global ::= entity generic-signature? type type* 'TK' // key path getter diff --git a/include/swift/ABI/MetadataKind.def b/include/swift/ABI/MetadataKind.def index 27d7e93e37c5d..2fe58526ec7a3 100644 --- a/include/swift/ABI/MetadataKind.def +++ b/include/swift/ABI/MetadataKind.def @@ -85,8 +85,8 @@ METADATAKIND(HeapGenericLocalVariable, METADATAKIND(ErrorObject, 1 | MetadataKindIsNonType | MetadataKindIsRuntimePrivate) -/// A heap-allocated simple task. -METADATAKIND(SimpleTask, +/// A heap-allocated task. +METADATAKIND(Task, 2 | MetadataKindIsNonType | MetadataKindIsRuntimePrivate) // getEnumeratedMetadataKind assumes that all the enumerated values here diff --git a/include/swift/ABI/Task.h b/include/swift/ABI/Task.h index 0c5875230f6fd..43cc6df8169f4 100644 --- a/include/swift/ABI/Task.h +++ b/include/swift/ABI/Task.h @@ -19,6 +19,7 @@ #include "swift/Basic/RelativePointer.h" #include "swift/ABI/HeapObject.h" +#include "swift/ABI/Metadata.h" #include "swift/ABI/MetadataValues.h" #include "swift/Runtime/Config.h" #include "swift/Basic/STLExtras.h" @@ -29,6 +30,8 @@ class AsyncTask; class AsyncContext; class Executor; class Job; +struct OpaqueValue; +struct SwiftError; class TaskStatusRecord; /// An ExecutorRef isn't necessarily just a pointer to an executor @@ -86,6 +89,13 @@ class AsyncFunctionPointer { /// A schedulable job. class alignas(2 * alignof(void*)) Job { +protected: + // Indices into SchedulerPrivate, for use by the runtime. + enum { + /// The next waiting task link, an AsyncTask that is waiting on a future. + NextWaitingTaskIndex = 0, + }; + public: // Reserved for the use of the scheduler. void *SchedulerPrivate[2]; @@ -230,19 +240,142 @@ class AsyncTask : public HeapObject, public Job { } }; - bool isFuture() const { return Flags.task_isFuture(); } - bool hasChildFragment() const { return Flags.task_isChildTask(); } ChildFragment *childFragment() { assert(hasChildFragment()); return reinterpret_cast(this + 1); } - // TODO: Future fragment + class FutureFragment { + public: + /// Describes the status of the future. + /// + /// Futures always begin in the "Executing" state, and will always + /// make a single state change to either Success or Error. + enum class Status : uintptr_t { + /// The future is executing or ready to execute. The storage + /// is not accessible. + Executing = 0, + + /// The future has completed with result (of type \c resultType). + Success, + + /// The future has completed by throwing an error (an \c Error + /// existential). + Error, + }; + + /// An item within the wait queue, which includes the status and the + /// head of the list of tasks. + struct WaitQueueItem { + /// Mask used for the low status bits in a wait queue item. + static const uintptr_t statusMask = 0x03; + + uintptr_t storage; + + Status getStatus() const { + return static_cast(storage & statusMask); + } + + AsyncTask *getTask() const { + return reinterpret_cast(storage & ~statusMask); + } + + static WaitQueueItem get(Status status, AsyncTask *task) { + return WaitQueueItem{ + reinterpret_cast(task) | static_cast(status)}; + } + }; + + private: + /// Queue containing all of the tasks that are waiting in `get()`. + /// + /// The low bits contain the status, the rest of the pointer is the + /// AsyncTask. + std::atomic waitQueue; + + /// The type of the result that will be produced by the future. + const Metadata *resultType; + + // Trailing storage for the result itself. The storage will be uninitialized, + // contain an instance of \c resultType, or contaon an an \c Error. + + friend class AsyncTask; + + public: + explicit FutureFragment(const Metadata *resultType) + : waitQueue(WaitQueueItem::get(Status::Executing, nullptr)), + resultType(resultType) { } + + /// Destroy the storage associated with the future. + void destroy(); + + /// Retrieve a pointer to the storage of result. + OpaqueValue *getStoragePtr() { + return reinterpret_cast( + reinterpret_cast(this) + storageOffset(resultType)); + } + + /// Retrieve the error. + SwiftError *&getError() { + return *reinterpret_cast( + reinterpret_cast(this) + storageOffset(resultType)); + } + + /// Compute the offset of the storage from the base of the future + /// fragment. + static size_t storageOffset(const Metadata *resultType) { + size_t offset = sizeof(FutureFragment); + size_t alignment = + std::max(resultType->vw_alignment(), alignof(SwiftError *)); + return (offset + alignment - 1) & ~(alignment - 1); + } + + /// Determine the size of the future fragment given a particular future + /// result type. + static size_t fragmentSize(const Metadata *resultType) { + return storageOffset(resultType) + + std::max(resultType->vw_size(), sizeof(SwiftError *)); + } + }; + + bool isFuture() const { return Flags.task_isFuture(); } + + FutureFragment *futureFragment() { + assert(isFuture()); + if (hasChildFragment()) { + return reinterpret_cast( + reinterpret_cast(this + 1) + 1); + } + + return reinterpret_cast(this + 1); + } + + /// Wait for this future to complete. + /// + /// \returns the status of the future. If this result is + /// \c Executing, then \c waitingTask has been added to the + /// wait queue and will be scheduled when the future completes. Otherwise, + /// the future has completed and can be queried. + FutureFragment::Status waitFuture(AsyncTask *waitingTask); + + /// Complete this future. + /// + /// Upon completion, any waiting tasks will be scheduled on the given + /// executor. + void completeFuture(AsyncContext *context, ExecutorRef executor); static bool classof(const Job *job) { return job->isAsyncTask(); } + +private: + /// Access the next waiting task, which establishes a singly linked list of + /// tasks that are waiting on a future. + AsyncTask *&getNextWaitingTask() { + return reinterpret_cast( + SchedulerPrivate[NextWaitingTaskIndex]); + } }; // The compiler will eventually assume these. @@ -327,6 +460,20 @@ class YieldingAsyncContext : public AsyncContext { } }; +/// An asynchronous context within a task that describes a general "Future". +/// task. +/// +/// This type matches the ABI of a function ` () async throws -> T`, which +/// is the type used by `Task.runDetached` and `Task.group.add` to create +/// futures. +class FutureAsyncContext : public AsyncContext { +public: + SwiftError *errorResult = nullptr; + OpaqueValue *indirectResult; + + using AsyncContext::AsyncContext; +}; + } // end namespace swift #endif diff --git a/include/swift/AST/ASTContext.h b/include/swift/AST/ASTContext.h index c9f3fbe06e187..5e536c5aaaea1 100644 --- a/include/swift/AST/ASTContext.h +++ b/include/swift/AST/ASTContext.h @@ -640,6 +640,14 @@ class ASTContext final { /// if applicable. const Decl *getSwiftDeclForExportedClangDecl(const clang::Decl *decl); + /// General conversion method from Swift types -> Clang types. + /// + /// HACK: This method is only intended to be called from a specific place in + /// IRGen. For converting function types, strongly prefer using one of the + /// other methods instead, instead of manually iterating over parameters + /// and results. + const clang::Type *getClangTypeForIRGen(Type ty); + /// Determine whether the given Swift type is representable in a /// given foreign language. ForeignRepresentationInfo @@ -901,6 +909,13 @@ class ASTContext final { /// \returns The requested module, or NULL if the module cannot be found. ModuleDecl *getModule(ImportPath::Module ModulePath); + /// Attempts to load the matching overlay module for the given clang + /// module into this ASTContext. + /// + /// \returns The Swift overlay module corresponding to the given Clang module, + /// or NULL if the overlay module cannot be found. + ModuleDecl *getOverlayModule(const FileUnit *ClangModule); + ModuleDecl *getModuleByName(StringRef ModuleName); ModuleDecl *getModuleByIdentifier(Identifier ModuleID); diff --git a/include/swift/AST/ASTMangler.h b/include/swift/AST/ASTMangler.h index af7294875332e..6b69083c97de4 100644 --- a/include/swift/AST/ASTMangler.h +++ b/include/swift/AST/ASTMangler.h @@ -155,6 +155,15 @@ class ASTMangler : public Mangler { Type SelfType, ModuleDecl *Module); + /// Mangle a completion handler block implementation function, used for importing ObjC + /// APIs as async. + /// + /// - If `predefined` is true, this mangles the symbol name of the completion handler + /// predefined in the Swift runtime for the given type signature. + std::string mangleObjCAsyncCompletionHandlerImpl(CanSILFunctionType BlockType, + CanType ResultType, + bool predefined); + /// Mangle the derivative function (JVP/VJP) for the given: /// - Mangled original function name. /// - Derivative function kind. diff --git a/include/swift/AST/Decl.h b/include/swift/AST/Decl.h index 96bb06eedac5c..4ca710c2c7b54 100644 --- a/include/swift/AST/Decl.h +++ b/include/swift/AST/Decl.h @@ -391,7 +391,7 @@ class alignas(1 << DeclAlignInBits) Decl { SWIFT_INLINE_BITFIELD(SubscriptDecl, VarDecl, 2, StaticSpelling : 2 ); - SWIFT_INLINE_BITFIELD(AbstractFunctionDecl, ValueDecl, 3+8+1+1+1+1+1+1, + SWIFT_INLINE_BITFIELD(AbstractFunctionDecl, ValueDecl, 3+8+1+1+1+1+1+1+1, /// \see AbstractFunctionDecl::BodyKind BodyKind : 3, @@ -415,7 +415,11 @@ class alignas(1 << DeclAlignInBits) Decl { Synthesized : 1, /// Whether this member's body consists of a single expression. - HasSingleExpressionBody : 1 + HasSingleExpressionBody : 1, + + /// Whether peeking into this function detected nested type declarations. + /// This is set when skipping over the decl at parsing. + HasNestedTypeDeclarations : 1 ); SWIFT_INLINE_BITFIELD(FuncDecl, AbstractFunctionDecl, 1+1+2+1+1+2+1, @@ -5544,6 +5548,7 @@ class AbstractFunctionDecl : public GenericContext, public ValueDecl { Bits.AbstractFunctionDecl.Throws = Throws; Bits.AbstractFunctionDecl.Synthesized = false; Bits.AbstractFunctionDecl.HasSingleExpressionBody = false; + Bits.AbstractFunctionDecl.HasNestedTypeDeclarations = false; } void setBodyKind(BodyKind K) { @@ -5690,6 +5695,16 @@ class AbstractFunctionDecl : public GenericContext, public ValueDecl { setBody(S, BodyKind::Parsed); } + /// Was there a nested type declaration detected when parsing this + /// function was skipped? + bool hasNestedTypeDeclarations() const { + return Bits.AbstractFunctionDecl.HasNestedTypeDeclarations; + } + + void setHasNestedTypeDeclarations(bool value) { + Bits.AbstractFunctionDecl.HasNestedTypeDeclarations = value; + } + /// Note that parsing for the body was delayed. /// /// The function should return the body statement and a flag indicating diff --git a/include/swift/AST/FineGrainedDependencies.h b/include/swift/AST/FineGrainedDependencies.h index e701ad31cce8a..3d224cd842389 100644 --- a/include/swift/AST/FineGrainedDependencies.h +++ b/include/swift/AST/FineGrainedDependencies.h @@ -351,7 +351,7 @@ class BiIndexedTwoStageMap { /// /// \Note The returned graph should not be escaped from the callback. bool withReferenceDependencies( - llvm::PointerUnion MSF, + llvm::PointerUnion MSF, const DependencyTracker &depTracker, StringRef outputPath, bool alsoEmitDotFile, llvm::function_ref); diff --git a/include/swift/Basic/FunctionBodySkipping.h b/include/swift/Basic/FunctionBodySkipping.h index 1d1f8b2deb43a..cd7042a8e595b 100644 --- a/include/swift/Basic/FunctionBodySkipping.h +++ b/include/swift/Basic/FunctionBodySkipping.h @@ -23,6 +23,9 @@ enum class FunctionBodySkipping : uint8_t { None, /// Only non-inlinable function bodies should be skipped. NonInlinable, + /// Only non-inlinable functions bodies without type definitions should + /// be skipped. + NonInlinableWithoutTypes, /// All function bodies should be skipped, where not otherwise required /// for type inference. All diff --git a/include/swift/Demangling/DemangleNodes.def b/include/swift/Demangling/DemangleNodes.def index 57c8d6095420d..1d464be0d86cf 100644 --- a/include/swift/Demangling/DemangleNodes.def +++ b/include/swift/Demangling/DemangleNodes.def @@ -160,6 +160,7 @@ NODE(NominalTypeDescriptor) NODE(NonObjCAttribute) NODE(Number) NODE(ObjCAsyncCompletionHandlerImpl) +NODE(PredefinedObjCAsyncCompletionHandlerImpl) NODE(ObjCAttribute) NODE(ObjCBlock) NODE(EscapingObjCBlock) diff --git a/include/swift/Frontend/InputFile.h b/include/swift/Frontend/InputFile.h index 417d0e5e5a283..f70f9f9437765 100644 --- a/include/swift/Frontend/InputFile.h +++ b/include/swift/Frontend/InputFile.h @@ -121,17 +121,17 @@ class InputFile final { // FrontendInputsAndOutputs. They merely make the call sites // a bit shorter. Add more forwarding methods as needed. - std::string dependenciesFilePath() const { + StringRef getDependenciesFilePath() const { return getPrimarySpecificPaths().SupplementaryOutputs.DependenciesFilePath; } - std::string loadedModuleTracePath() const { + StringRef getLoadedModuleTracePath() const { return getPrimarySpecificPaths().SupplementaryOutputs.LoadedModuleTracePath; } - std::string serializedDiagnosticsPath() const { + StringRef getSerializedDiagnosticsPath() const { return getPrimarySpecificPaths().SupplementaryOutputs .SerializedDiagnosticsPath; } - std::string fixItsOutputPath() const { + StringRef getFixItsOutputPath() const { return getPrimarySpecificPaths().SupplementaryOutputs.FixItsOutputPath; } }; diff --git a/include/swift/IRGen/Linking.h b/include/swift/IRGen/Linking.h index 963a59aa6651b..f29b31fab167b 100644 --- a/include/swift/IRGen/Linking.h +++ b/include/swift/IRGen/Linking.h @@ -293,6 +293,11 @@ class LinkEntity { /// the metadata cache once. CanonicalPrespecializedGenericTypeCachingOnceToken, + /// The same as AsyncFunctionPointer but with a different stored value, for + /// use by TBDGen. + /// The pointer is a AbstractStorageDecl*. + AsyncFunctionPointerAST, + /// The pointer is a SILFunction*. DynamicallyReplaceableFunctionKey, @@ -410,6 +415,13 @@ class LinkEntity { /// passed to swift_getCanonicalSpecializedMetadata. /// The pointer is a canonical TypeBase*. NoncanonicalSpecializedGenericTypeMetadataCacheVariable, + + /// Provides the data required to invoke an async function using the async + /// calling convention in the form of the size of the context to allocate + /// and the relative address of the function to call with that allocated + /// context. + /// The pointer is a SILFunction*. + AsyncFunctionPointer, }; friend struct llvm::DenseMapInfo; @@ -418,7 +430,7 @@ class LinkEntity { } static bool isDeclKind(Kind k) { - return k <= Kind::CanonicalPrespecializedGenericTypeCachingOnceToken; + return k <= Kind::AsyncFunctionPointerAST; } static bool isTypeKind(Kind k) { return k >= Kind::ProtocolWitnessTableLazyAccessFunction; @@ -1088,6 +1100,21 @@ class LinkEntity { return entity; } + static LinkEntity forAsyncFunctionPointer(SILFunction *silFunction) { + LinkEntity entity; + entity.Pointer = silFunction; + entity.SecondaryPointer = nullptr; + entity.Data = LINKENTITY_SET_FIELD( + Kind, unsigned(LinkEntity::Kind::AsyncFunctionPointer)); + return entity; + } + + static LinkEntity forAsyncFunctionPointer(AbstractFunctionDecl *decl) { + LinkEntity entity; + entity.setForDecl(Kind::AsyncFunctionPointerAST, decl); + return entity; + } + void mangle(llvm::raw_ostream &out) const; void mangle(SmallVectorImpl &buffer) const; std::string mangleAsString() const; @@ -1110,14 +1137,15 @@ class LinkEntity { } bool hasSILFunction() const { - return getKind() == Kind::SILFunction || + return getKind() == Kind::AsyncFunctionPointer || getKind() == Kind::DynamicallyReplaceableFunctionVariable || - getKind() == Kind::DynamicallyReplaceableFunctionKey; + getKind() == Kind::DynamicallyReplaceableFunctionKey || + getKind() == Kind::SILFunction; } SILFunction *getSILFunction() const { assert(hasSILFunction()); - return reinterpret_cast(Pointer); + return reinterpret_cast(Pointer); } SILGlobalVariable *getSILGlobalVariable() const { diff --git a/include/swift/Option/Options.td b/include/swift/Option/Options.td index c430938cceca9..a0be668c93627 100644 --- a/include/swift/Option/Options.td +++ b/include/swift/Option/Options.td @@ -301,6 +301,10 @@ def experimental_skip_non_inlinable_function_bodies: Flag<["-"], "experimental-skip-non-inlinable-function-bodies">, Flags<[FrontendOption, HelpHidden]>, HelpText<"Skip type-checking and SIL generation for non-inlinable function bodies">; +def experimental_skip_non_inlinable_function_bodies_without_types: + Flag<["-"], "experimental-skip-non-inlinable-function-bodies-without-types">, + Flags<[FrontendOption, HelpHidden]>, + HelpText<"Skip work on non-inlinable function bodies that do not declare nested types">; def profile_stats_events: Flag<["-"], "profile-stats-events">, Flags<[FrontendOption, HelpHidden]>, HelpText<"Profile changes to stats in -stats-output-dir">; diff --git a/include/swift/Parse/Parser.h b/include/swift/Parse/Parser.h index 7c522d8ce503c..a3ca904ad12f0 100644 --- a/include/swift/Parse/Parser.h +++ b/include/swift/Parse/Parser.h @@ -687,7 +687,10 @@ class Parser { /// Skip a braced block (e.g. function body). The current token must be '{'. /// Returns \c true if the parser hit the eof before finding matched '}'. - bool skipBracedBlock(); + /// + /// Set \c HasNestedTypeDeclarations to true if a token for a type + /// declaration is detected in the skipped block. + bool skipBracedBlock(bool &HasNestedTypeDeclarations); /// Skip over SIL decls until we encounter the start of a Swift decl or eof. void skipSILUntilSwiftDecl(); diff --git a/include/swift/Runtime/Concurrency.h b/include/swift/Runtime/Concurrency.h index b72c22a0c0050..df8162d0e3530 100644 --- a/include/swift/Runtime/Concurrency.h +++ b/include/swift/Runtime/Concurrency.h @@ -49,6 +49,31 @@ AsyncTaskAndContext swift_task_create_f(JobFlags flags, AsyncFunctionType *function, size_t initialContextSize); +/// Create a task object with a future which will run the given +/// function. +/// +/// The task is not yet scheduled. +/// +/// If a parent task is provided, flags.task_hasChildFragment() must +/// be true, and this must be called synchronously with the parent. +/// The parent is responsible for creating a ChildTaskStatusRecord. +/// TODO: should we have a single runtime function for creating a task +/// and doing this child task status record management? +/// +/// flags.task_isFuture must be set. \c futureResultType is the type +/// +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +AsyncTaskAndContext swift_task_create_future( + JobFlags flags, AsyncTask *parent, const Metadata *futureResultType, + const AsyncFunctionPointer *function); + +/// Create a task object with a future which will run the given +/// function. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +AsyncTaskAndContext swift_task_create_future_f( + JobFlags flags, AsyncTask *parent, const Metadata *futureResultType, + AsyncFunctionType *function, size_t initialContextSize); + /// Allocate memory in a task. /// /// This must be called synchronously with the task. @@ -83,6 +108,34 @@ SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) JobPriority swift_task_escalate(AsyncTask *task, JobPriority newPriority); +/// The result of waiting for a task future. +struct TaskFutureWaitResult { + enum Kind : uintptr_t { + /// The waiting task has been added to the future's wait queue, and will + /// be scheduled once the future has completed. + Waiting, + + /// The future succeeded and produced a result value. \c storage points + /// at that value. + Success, + + /// The future finished by throwing an error. \c storage is that error + /// existential. + Error, + }; + + Kind kind; + OpaqueValue *storage; +}; + +/// Wait for a future task to complete. +/// +/// This can be called from any thread. +/// +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +TaskFutureWaitResult +swift_task_future_wait(AsyncTask *task, AsyncTask *waitingTask); + /// Add a status record to a task. The record should not be /// modified while it is registered with a task. /// @@ -145,6 +198,10 @@ SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) NearestTaskDeadline swift_task_getNearestDeadline(AsyncTask *task); +// TODO: Remove this hack. +SWIFT_EXPORT_FROM(swift_Concurrency) SWIFT_CC(swift) +void swift_task_run(AsyncTask *taskToRun); + } #endif diff --git a/include/swift/Runtime/RuntimeFunctions.def b/include/swift/Runtime/RuntimeFunctions.def index 59a682ecec688..3e37335d72bce 100644 --- a/include/swift/Runtime/RuntimeFunctions.def +++ b/include/swift/Runtime/RuntimeFunctions.def @@ -1497,14 +1497,13 @@ FUNCTION(TaskCancel, ARGS(SwiftTaskPtrTy), ATTRS(NoUnwind, ArgMemOnly)) -// AsyncTaskAndContext swift_task_create_f( -// size_t flags, AsyncTask *task, AsyncFunctionType *function, -// size_t initialContextSize); +// AsyncTaskAndContext swift_task_create( +// size_t flags, AsyncTask *task, AsyncFunctionPointer *function); FUNCTION(TaskCreateFunc, - swift_task_create_f, SwiftCC, + swift_task_create, SwiftCC, ConcurrencyAvailability, RETURNS(AsyncTaskAndContextTy), - ARGS(SizeTy, SwiftTaskPtrTy, TaskContinuationFunctionPtrTy, SizeTy), + ARGS(SizeTy, SwiftTaskPtrTy, AsyncFunctionPointerPtrTy), ATTRS(NoUnwind, ArgMemOnly)) #undef RETURNS diff --git a/include/swift/SIL/AbstractionPattern.h b/include/swift/SIL/AbstractionPattern.h index 5f22a8310bf54..93df4f66affa4 100644 --- a/include/swift/SIL/AbstractionPattern.h +++ b/include/swift/SIL/AbstractionPattern.h @@ -179,6 +179,11 @@ class AbstractionPattern { /// type. ObjCMethod is valid. OtherData is an encoded foreign /// error index. ObjCMethodType, + /// The type of an ObjC block used as a completion handler for + /// an API that has been imported into Swift as async, + /// representing the tuple of results of the async projection of the + /// API. + ObjCCompletionHandlerArgumentsType, /// The uncurried imported type of a C++ non-operator non-static member /// function. OrigType is valid and is a function type. CXXMethod is valid. CXXMethodType, @@ -410,6 +415,7 @@ class AbstractionPattern { case Kind::CFunctionAsMethodType: case Kind::CurriedCFunctionAsMethodType: case Kind::PartialCurriedCFunctionAsMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: return true; default: @@ -445,7 +451,16 @@ class AbstractionPattern { } bool hasStoredForeignInfo() const { - return hasStoredObjCMethod(); + switch (getKind()) { + case Kind::CurriedObjCMethodType: + case Kind::PartialCurriedObjCMethodType: + case Kind::ObjCMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: + return true; + + default: + return false; + } } bool hasImportAsMemberStatus() const { @@ -552,6 +567,7 @@ class AbstractionPattern { case Kind::CXXOperatorMethodType: case Kind::CurriedCXXOperatorMethodType: case Kind::PartialCurriedCXXOperatorMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: return true; case Kind::Invalid: case Kind::Opaque: @@ -584,6 +600,22 @@ class AbstractionPattern { return pattern; } + /// Return an abstraction pattern for a result tuple + /// corresponding to the parameters of a completion handler + /// block of an API that was imported as async. + static AbstractionPattern + getObjCCompletionHandlerArgumentsType(CanGenericSignature sig, + CanType origTupleType, + const clang::Type *clangBlockType, + EncodedForeignInfo foreignInfo) { + AbstractionPattern pattern(Kind::ObjCCompletionHandlerArgumentsType); + pattern.initClangType(sig, origTupleType, clangBlockType, + Kind::ObjCCompletionHandlerArgumentsType); + pattern.OtherData = foreignInfo.getOpaqueValue(); + + return pattern; + } + public: /// Return an abstraction pattern for the curried type of an /// Objective-C method. @@ -592,6 +624,7 @@ class AbstractionPattern { const Optional &foreignError, const Optional &foreignAsync); + /// Return an abstraction pattern for the uncurried type of a C function /// imported as a method. /// @@ -927,6 +960,7 @@ class AbstractionPattern { case Kind::OpaqueDerivativeFunction: llvm_unreachable("opaque derivative function pattern has no type"); case Kind::ClangType: + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::CurriedObjCMethodType: case Kind::PartialCurriedObjCMethodType: case Kind::ObjCMethodType: @@ -980,6 +1014,7 @@ class AbstractionPattern { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::Type: case Kind::Discard: + case Kind::ObjCCompletionHandlerArgumentsType: assert(signature || !type->hasTypeParameter()); assert(hasSameBasicTypeStructure(OrigType, type)); GenericSig = (type->hasTypeParameter() ? signature : nullptr); @@ -1018,6 +1053,7 @@ class AbstractionPattern { case Kind::CXXOperatorMethodType: case Kind::CurriedCXXOperatorMethodType: case Kind::PartialCurriedCXXOperatorMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: return true; } llvm_unreachable("bad kind"); @@ -1097,6 +1133,7 @@ class AbstractionPattern { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: return false; case Kind::PartialCurriedObjCMethodType: case Kind::CurriedObjCMethodType: @@ -1136,6 +1173,7 @@ class AbstractionPattern { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::Type: case Kind::Discard: + case Kind::ObjCCompletionHandlerArgumentsType: return dyn_cast(getType()); } llvm_unreachable("bad kind"); @@ -1167,6 +1205,7 @@ class AbstractionPattern { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: // We assume that the Clang type might provide additional structure. return false; case Kind::Type: @@ -1200,6 +1239,7 @@ class AbstractionPattern { case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: return false; + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::Tuple: return true; case Kind::Type: @@ -1232,6 +1272,7 @@ class AbstractionPattern { llvm_unreachable("pattern is not a tuple"); case Kind::Tuple: return getNumTupleElements_Stored(); + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::Type: case Kind::Discard: case Kind::ClangType: diff --git a/include/swift/SIL/LoopInfo.h b/include/swift/SIL/LoopInfo.h index 200797c1a76d4..1ab4d6fe0e4ce 100644 --- a/include/swift/SIL/LoopInfo.h +++ b/include/swift/SIL/LoopInfo.h @@ -60,6 +60,8 @@ class SILLoop : public llvm::LoopBase { } } + SILFunction *getFunction() const { return getHeader()->getParent(); } + private: friend class llvm::LoopInfoBase; diff --git a/include/swift/SIL/MemAccessUtils.h b/include/swift/SIL/MemAccessUtils.h index 9439d50db3044..d5f504e57b310 100644 --- a/include/swift/SIL/MemAccessUtils.h +++ b/include/swift/SIL/MemAccessUtils.h @@ -1369,7 +1369,7 @@ class AccessUseDefChainVisitor { // Result visitNonAccess(SILValue base); // Result visitPhi(SILPhiArgument *phi); // Result visitStorageCast(SingleValueInstruction *cast, Operand *sourceOper); - // Result visitAccessProjection(SingleValueInstruction *cast, + // Result visitAccessProjection(SingleValueInstruction *projectedAddr, // Operand *sourceOper); Result visit(SILValue sourceAddr); @@ -1480,6 +1480,83 @@ Result AccessUseDefChainVisitor::visit(SILValue sourceAddr) { } // end namespace swift +//===----------------------------------------------------------------------===// +// AccessUseDefChainCloner +//===----------------------------------------------------------------------===// + +namespace swift { + +/// Clone all projections and casts on the access use-def chain until either the +/// specified predicate is true or the access base is reached. +/// +/// This will not clone ref_element_addr or ref_tail_addr because those aren't +/// part of the access chain. +template +class AccessUseDefChainCloner + : public AccessUseDefChainVisitor, + SILValue> { + UnaryPredicate predicate; + SILInstruction *insertionPoint; + +public: + AccessUseDefChainCloner(UnaryPredicate predicate, + SILInstruction *insertionPoint) + : predicate(predicate), insertionPoint(insertionPoint) {} + + // Recursive main entry point + SILValue cloneUseDefChain(SILValue addr) { + if (!predicate(addr)) + return addr; + + return this->visit(addr); + } + + // Recursively clone an address on the use-def chain. + SingleValueInstruction *cloneProjection(SingleValueInstruction *projectedAddr, + Operand *sourceOper) { + SILValue projectedSource = cloneUseDefChain(sourceOper->get()); + SILInstruction *clone = projectedAddr->clone(insertionPoint); + clone->setOperand(sourceOper->getOperandNumber(), projectedSource); + return cast(clone); + } + + // MARK: Visitor implementation + + SILValue visitBase(SILValue base, AccessedStorage::Kind kind) { + assert(false && "access base cannot be cloned"); + return SILValue(); + } + + SILValue visitNonAccess(SILValue base) { + assert(false && "unknown address root cannot be cloned"); + return SILValue(); + } + + SILValue visitPhi(SILPhiArgument *phi) { + assert(false && "unexpected phi on access path"); + return SILValue(); + } + + SILValue visitStorageCast(SingleValueInstruction *cast, Operand *sourceOper) { + return cloneProjection(cast, sourceOper); + } + + SILValue visitAccessProjection(SingleValueInstruction *projectedAddr, + Operand *sourceOper) { + return cloneProjection(projectedAddr, sourceOper); + } +}; + +template +SILValue cloneUseDefChain(SILValue addr, SILInstruction *insertionPoint, + UnaryPredicate shouldFollowUse) { + return AccessUseDefChainCloner(shouldFollowUse, + insertionPoint) + .cloneUseDefChain(addr); +} + +} // end namespace swift + //===----------------------------------------------------------------------===// // MARK: Verification //===----------------------------------------------------------------------===// diff --git a/include/swift/SIL/OwnershipUtils.h b/include/swift/SIL/OwnershipUtils.h index bcb6521cd13ec..95c9862327dc1 100644 --- a/include/swift/SIL/OwnershipUtils.h +++ b/include/swift/SIL/OwnershipUtils.h @@ -178,7 +178,9 @@ struct BorrowingOperand { /// Returns true if this borrow scope operand consumes guaranteed /// values and produces a new scope afterwards. - bool consumesGuaranteedValues() const { + /// + /// TODO: tuple, struct, destructure_tuple, destructure_struct. + bool isReborrow() const { switch (kind) { case BorrowingOperandKind::BeginBorrow: case BorrowingOperandKind::BeginApply: diff --git a/include/swift/SIL/SILModule.h b/include/swift/SIL/SILModule.h index 162606ab5a747..c170dcfd67a62 100644 --- a/include/swift/SIL/SILModule.h +++ b/include/swift/SIL/SILModule.h @@ -238,7 +238,7 @@ class SILModule { llvm::DenseMap BuiltinIDCache; /// This is the set of undef values we've created, for uniquing purposes. - llvm::DenseMap, SILUndef *> UndefValues; + llvm::DenseMap UndefValues; /// The stage of processing this module is at. SILStage Stage; diff --git a/include/swift/SIL/SILNode.h b/include/swift/SIL/SILNode.h index 9f47dc9bf9a58..e6cce751c6ce3 100644 --- a/include/swift/SIL/SILNode.h +++ b/include/swift/SIL/SILNode.h @@ -372,7 +372,7 @@ class alignas(8) SILNode { // Number of cases NumCases : 31 - NumTermInstBits; template - friend class SwitchEnumInstBase; + friend class SwitchEnumInstBase ); #define SEIB_BITFIELD_EMPTY(T, U) \ diff --git a/include/swift/SIL/SILUndef.h b/include/swift/SIL/SILUndef.h index 822271de1ca17..8d7540bc70a35 100644 --- a/include/swift/SIL/SILUndef.h +++ b/include/swift/SIL/SILUndef.h @@ -23,25 +23,23 @@ class SILInstruction; class SILModule; class SILUndef : public ValueBase { - ValueOwnershipKind ownershipKind; - - SILUndef(SILType type, ValueOwnershipKind ownershipKind); + SILUndef(SILType type); public: void operator=(const SILArgument &) = delete; void operator delete(void *, size_t) = delete; - static SILUndef *get(SILType ty, SILModule &m, ValueOwnershipKind ownershipKind); + static SILUndef *get(SILType ty, SILModule &m); static SILUndef *get(SILType ty, const SILFunction &f); template static SILUndef *getSentinelValue(SILType type, OwnerTy owner) { // Ownership kind isn't used here, the value just needs to have a unique // address. - return new (*owner) SILUndef(type, OwnershipKind::None); + return new (*owner) SILUndef(type); } - ValueOwnershipKind getOwnershipKind() const { return ownershipKind; } + ValueOwnershipKind getOwnershipKind() const { return OwnershipKind::None; } static bool classof(const SILArgument *) = delete; static bool classof(const SILInstruction *) = delete; diff --git a/include/swift/SILOptimizer/Utils/ValueLifetime.h b/include/swift/SILOptimizer/Utils/ValueLifetime.h index 9fe60c1bbfb43..ceac08b5938f0 100644 --- a/include/swift/SILOptimizer/Utils/ValueLifetime.h +++ b/include/swift/SILOptimizer/Utils/ValueLifetime.h @@ -17,6 +17,7 @@ #ifndef SWIFT_SILOPTIMIZER_UTILS_CFG_H #define SWIFT_SILOPTIMIZER_UTILS_CFG_H +#include "swift/Basic/STLExtras.h" #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILInstruction.h" #include "swift/SILOptimizer/Utils/InstOptUtils.h" @@ -54,24 +55,47 @@ class ValueLifetimeAnalysis { /// end the value's lifetime. using Frontier = SmallVector; + /// A type erased version of frontier so callers can customize the inline + /// size. + using FrontierImpl = SmallVectorImpl; + /// Constructor for the value \p def with a specific range of users. /// /// We templatize over the RangeTy so that we can initialize /// ValueLifetimeAnalysis with misc iterators including transform /// iterators. template - ValueLifetimeAnalysis(decltype(defValue) def, const RangeTy &userRange) - : defValue(def), userSet(userRange.begin(), userRange.end()) { + ValueLifetimeAnalysis(SILArgument *def, const RangeTy &useRange) + : defValue(def), userSet() { + for (SILInstruction *use : useRange) + userSet.insert(use); propagateLiveness(); } - /// Constructor for the value \p def considering all the value's uses. - ValueLifetimeAnalysis(SILInstruction *def) : defValue(def) { - for (auto result : def->getResults()) { - for (Operand *op : result->getUses()) { - userSet.insert(op->getUser()); - } - } + ValueLifetimeAnalysis( + SILArgument *def, + llvm::iterator_range useRange) + : defValue(def), userSet() { + for (Operand *use : useRange) + userSet.insert(use->getUser()); + propagateLiveness(); + } + + template + ValueLifetimeAnalysis( + SILInstruction *def, const RangeTy &useRange) + : defValue(def), userSet() { + for (SILInstruction *use : useRange) + userSet.insert(use); + propagateLiveness(); + } + + ValueLifetimeAnalysis( + SILInstruction *def, + llvm::iterator_range useRange) + : defValue(def), userSet() { + for (Operand *use : useRange) + userSet.insert(use->getUser()); propagateLiveness(); } @@ -106,7 +130,7 @@ class ValueLifetimeAnalysis { /// /// If \p deBlocks is provided, all dead-end blocks are ignored. This /// prevents unreachable-blocks to be included in the frontier. - bool computeFrontier(Frontier &frontier, Mode mode, + bool computeFrontier(FrontierImpl &frontier, Mode mode, DeadEndBlocks *deBlocks = nullptr); ArrayRef> getCriticalEdges() { @@ -125,7 +149,7 @@ class ValueLifetimeAnalysis { } /// Checks if there is a dealloc_ref inside the value's live range. - bool containsDeallocRef(const Frontier &frontier); + bool containsDeallocRef(const FrontierImpl &frontier); /// For debug dumping. void dump() const; @@ -159,7 +183,7 @@ class ValueLifetimeAnalysis { /// Otherwise \p valueOrStackLoc must be a value type and in this case, inserts /// destroy_value at each instruction of the \p frontier. void endLifetimeAtFrontier(SILValue valueOrStackLoc, - const ValueLifetimeAnalysis::Frontier &frontier, + const ValueLifetimeAnalysis::FrontierImpl &frontier, SILBuilderContext &builderCtxt, InstModCallbacks callbacks); diff --git a/include/swift/Sema/ConstraintSystem.h b/include/swift/Sema/ConstraintSystem.h index 64fa45f2ceed8..40d7dc6ffc94f 100644 --- a/include/swift/Sema/ConstraintSystem.h +++ b/include/swift/Sema/ConstraintSystem.h @@ -3134,10 +3134,16 @@ class ConstraintSystem { }); } - /// Determine whether given declaration is unavailable in the current context. + /// Determine whether the given declaration is unavailable from the + /// current context. bool isDeclUnavailable(const Decl *D, ConstraintLocator *locator = nullptr) const; + /// Determine whether the given conformance is unavailable from the + /// current context. + bool isConformanceUnavailable(ProtocolConformanceRef conformance, + ConstraintLocator *locator = nullptr) const; + public: /// Whether we should attempt to fix problems. diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp index 7cf42afa2d904..7eb3d255e3a6b 100644 --- a/lib/AST/ASTContext.cpp +++ b/lib/AST/ASTContext.cpp @@ -54,6 +54,7 @@ #include "swift/Subsystems.h" #include "swift/Syntax/References.h" #include "swift/Syntax/SyntaxArena.h" +#include "clang/AST/Type.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringMap.h" @@ -1927,6 +1928,27 @@ ASTContext::getModule(ImportPath::Module ModulePath) { return nullptr; } +ModuleDecl *ASTContext::getOverlayModule(const FileUnit *FU) { + assert(FU && FU->getKind() == FileUnitKind::ClangModule && + "Overlays can only be retrieved for clang modules!"); + ImportPath::Module::Builder builder(FU->getParentModule()->getName()); + auto ModPath = builder.get(); + if (auto *Existing = getLoadedModule(ModPath)) { + if (!Existing->isNonSwiftModule()) + return Existing; + } + + for (auto &importer : getImpl().ModuleLoaders) { + if (importer.get() == getClangModuleLoader()) + continue; + if (ModuleDecl *M = importer->loadModule(SourceLoc(), ModPath)) { + return M; + } + } + + return nullptr; +} + ModuleDecl *ASTContext::getModuleByName(StringRef ModuleName) { ImportPath::Module::Builder builder(*this, ModuleName, /*separator=*/'.'); return getModule(builder.get()); @@ -4589,6 +4611,10 @@ ASTContext::getSwiftDeclForExportedClangDecl(const clang::Decl *decl) { return impl.Converter->getSwiftDeclForExportedClangDecl(decl); } +const clang::Type * +ASTContext::getClangTypeForIRGen(Type ty) { + return getClangTypeConverter().convert(ty).getTypePtrOrNull(); +} CanGenericSignature ASTContext::getSingleGenericParameterSignature() const { if (auto theSig = getImpl().SingleGenericParameterSignature) diff --git a/lib/AST/ASTMangler.cpp b/lib/AST/ASTMangler.cpp index 490385afbe1a1..b6433344dc0e4 100644 --- a/lib/AST/ASTMangler.cpp +++ b/lib/AST/ASTMangler.cpp @@ -394,6 +394,17 @@ std::string ASTMangler::mangleReabstractionThunkHelper( return finalize(); } +std::string ASTMangler::mangleObjCAsyncCompletionHandlerImpl( + CanSILFunctionType BlockType, + CanType ResultType, + bool predefined) { + beginMangling(); + appendType(BlockType); + appendType(ResultType); + appendOperator(predefined ? "TZ" : "Tz"); + return finalize(); +} + std::string ASTMangler::mangleAutoDiffDerivativeFunctionHelper( StringRef name, AutoDiffDerivativeFunctionKind kind, AutoDiffConfig config) { @@ -1813,10 +1824,15 @@ ASTMangler::getSpecialManglingContext(const ValueDecl *decl, hasNameForLinkage = !clangDecl->getDeclName().isEmpty(); if (hasNameForLinkage) { auto *clangDC = clangDecl->getDeclContext(); - if (isa(clangDC)) return None; + // In C, "nested" structs, unions, enums, etc. will become sibilings: + // struct Foo { struct Bar { }; }; -> struct Foo { }; struct Bar { }; + // Whereas in C++, nested records will actually be nested. So if this is + // a C++ record, simply treat it like a namespace and exit early. + if (isa(clangDC) || + isa(clangDC)) + return None; assert(clangDC->getRedeclContext()->isTranslationUnit() && "non-top-level Clang types not supported yet"); - (void)clangDC; return ASTMangler::ObjCContext; } } diff --git a/lib/AST/ClangTypeConverter.cpp b/lib/AST/ClangTypeConverter.cpp index 8c3b4773d43ef..52e0df90de641 100644 --- a/lib/AST/ClangTypeConverter.cpp +++ b/lib/AST/ClangTypeConverter.cpp @@ -12,13 +12,14 @@ // // This file implements generation of Clang AST types from Swift AST types for // types that are representable in Objective-C interfaces. -// Large chunks of the code are lightly modified versions of the code in -// IRGen/GenClangType.cpp (which should eventually go away), so make sure -// to keep the two in sync. -// The three major differences are that, in this file: +// +// The usage of ClangTypeConverter at the AST level means that we may +// encounter ill-formed types and/or sugared types. To avoid crashing and +// keeping sugar as much as possible (in case the generated Clang type needs +// to be surfaced to the user): +// // 1. We fail gracefully instead of asserting/UB. // 2. We try to keep clang sugar instead of discarding it. -// 3. We use getAs instead of cast as we handle Swift types with sugar. // //===----------------------------------------------------------------------===// diff --git a/lib/AST/ClangTypeConverter.h b/lib/AST/ClangTypeConverter.h index b590b3ef0c0ec..55223b086e9dc 100644 --- a/lib/AST/ClangTypeConverter.h +++ b/lib/AST/ClangTypeConverter.h @@ -94,6 +94,8 @@ class ClangTypeConverter : SmallVectorImpl &templateArgs); private: + friend ASTContext; // HACK: expose `convert` method to ASTContext + clang::QualType convert(Type type); clang::QualType convertMemberType(NominalTypeDecl *DC, diff --git a/lib/AST/FrontendSourceFileDepGraphFactory.cpp b/lib/AST/FrontendSourceFileDepGraphFactory.cpp index fe358fbffd7fd..d85b06f92588a 100644 --- a/lib/AST/FrontendSourceFileDepGraphFactory.cpp +++ b/lib/AST/FrontendSourceFileDepGraphFactory.cpp @@ -193,16 +193,16 @@ std::string DependencyKey::computeNameForProvidedEntity< //============================================================================== bool fine_grained_dependencies::withReferenceDependencies( - llvm::PointerUnion MSF, + llvm::PointerUnion MSF, const DependencyTracker &depTracker, StringRef outputPath, bool alsoEmitDotFile, llvm::function_ref cont) { - if (auto *MD = MSF.dyn_cast()) { + if (auto *MD = MSF.dyn_cast()) { SourceFileDepGraph g = ModuleDepGraphFactory(MD, alsoEmitDotFile).construct(); return cont(std::move(g)); } else { - auto *SF = MSF.get(); + auto *SF = MSF.get(); SourceFileDepGraph g = FrontendSourceFileDepGraphFactory( SF, outputPath, depTracker, alsoEmitDotFile) .construct(); @@ -215,22 +215,22 @@ bool fine_grained_dependencies::withReferenceDependencies( //============================================================================== FrontendSourceFileDepGraphFactory::FrontendSourceFileDepGraphFactory( - SourceFile *SF, StringRef outputPath, const DependencyTracker &depTracker, - const bool alsoEmitDotFile) + const SourceFile *SF, StringRef outputPath, + const DependencyTracker &depTracker, const bool alsoEmitDotFile) : AbstractSourceFileDepGraphFactory( - SF->getASTContext().hadError(), - outputPath, getInterfaceHash(SF), alsoEmitDotFile, - SF->getASTContext().Diags), + SF->getASTContext().hadError(), outputPath, getInterfaceHash(SF), + alsoEmitDotFile, SF->getASTContext().Diags), SF(SF), depTracker(depTracker) {} /// Centralize the invariant that the fingerprint of the whole file is the /// interface hash -std::string FrontendSourceFileDepGraphFactory::getFingerprint(SourceFile *SF) { +std::string +FrontendSourceFileDepGraphFactory::getFingerprint(const SourceFile *SF) { return getInterfaceHash(SF); } std::string -FrontendSourceFileDepGraphFactory::getInterfaceHash(SourceFile *SF) { +FrontendSourceFileDepGraphFactory::getInterfaceHash(const SourceFile *SF) { llvm::SmallString<32> interfaceHash; SF->getInterfaceHash(interfaceHash); return interfaceHash.str().str(); @@ -415,7 +415,7 @@ void FrontendSourceFileDepGraphFactory::addAllDefinedDecls() { namespace { /// Extracts uses out of a SourceFile class UsedDeclEnumerator { - SourceFile *SF; + const SourceFile *SF; const DependencyTracker &depTracker; StringRef swiftDeps; @@ -427,7 +427,8 @@ class UsedDeclEnumerator { public: UsedDeclEnumerator( - SourceFile *SF, const DependencyTracker &depTracker, StringRef swiftDeps, + const SourceFile *SF, const DependencyTracker &depTracker, + StringRef swiftDeps, function_ref createDefUse) : SF(SF), depTracker(depTracker), swiftDeps(swiftDeps), @@ -435,8 +436,7 @@ class UsedDeclEnumerator { DeclAspect::interface, swiftDeps)), sourceFileImplementation(DependencyKey::createKeyForWholeSourceFile( DeclAspect::implementation, swiftDeps)), - createDefUse(createDefUse) { - } + createDefUse(createDefUse) {} public: void enumerateAllUses() { @@ -517,10 +517,11 @@ void FrontendSourceFileDepGraphFactory::addAllUsedDecls() { // MARK: ModuleDepGraphFactory //============================================================================== -ModuleDepGraphFactory::ModuleDepGraphFactory(ModuleDecl *Mod, bool emitDot) - : AbstractSourceFileDepGraphFactory( - Mod->getASTContext().hadError(), - Mod->getNameStr(), "0xBADBEEF", emitDot, Mod->getASTContext().Diags), +ModuleDepGraphFactory::ModuleDepGraphFactory(const ModuleDecl *Mod, + bool emitDot) + : AbstractSourceFileDepGraphFactory(Mod->getASTContext().hadError(), + Mod->getNameStr(), "0xBADBEEF", emitDot, + Mod->getASTContext().Diags), Mod(Mod) {} void ModuleDepGraphFactory::addAllDefinedDecls() { diff --git a/lib/AST/FrontendSourceFileDepGraphFactory.h b/lib/AST/FrontendSourceFileDepGraphFactory.h index e0a85a029140b..e97587dac6b9a 100644 --- a/lib/AST/FrontendSourceFileDepGraphFactory.h +++ b/lib/AST/FrontendSourceFileDepGraphFactory.h @@ -23,29 +23,29 @@ namespace fine_grained_dependencies { class FrontendSourceFileDepGraphFactory : public AbstractSourceFileDepGraphFactory { - SourceFile *const SF; + const SourceFile *SF; const DependencyTracker &depTracker; public: - FrontendSourceFileDepGraphFactory(SourceFile *SF, StringRef outputPath, + FrontendSourceFileDepGraphFactory(const SourceFile *SF, StringRef outputPath, const DependencyTracker &depTracker, bool alsoEmitDotFile); ~FrontendSourceFileDepGraphFactory() override = default; private: - static std::string getFingerprint(SourceFile *SF); - static std::string getInterfaceHash(SourceFile *SF); + static std::string getFingerprint(const SourceFile *SF); + static std::string getInterfaceHash(const SourceFile *SF); void addAllDefinedDecls() override; void addAllUsedDecls() override; }; class ModuleDepGraphFactory : public AbstractSourceFileDepGraphFactory { - ModuleDecl *const Mod; + const ModuleDecl *Mod; public: - ModuleDepGraphFactory(ModuleDecl *Mod, bool emitDot); + ModuleDepGraphFactory(const ModuleDecl *Mod, bool emitDot); ~ModuleDepGraphFactory() override = default; diff --git a/lib/ClangImporter/ClangImporter.cpp b/lib/ClangImporter/ClangImporter.cpp index 6fae528d3e961..30757a1a9ed66 100644 --- a/lib/ClangImporter/ClangImporter.cpp +++ b/lib/ClangImporter/ClangImporter.cpp @@ -3468,18 +3468,23 @@ ModuleDecl *ClangModuleUnit::getOverlayModule() const { // FIXME: Include proper source location. ModuleDecl *M = getParentModule(); ASTContext &Ctx = M->getASTContext(); - auto overlay = Ctx.getModuleByIdentifier(M->getName()); - if (overlay == M) { - overlay = nullptr; - } else { - // FIXME: This bizarre and twisty invariant is due to nested - // re-entrancy in both clang module loading and overlay module loading. - auto *sharedModuleRef = Ctx.getLoadedModule(M->getName()); - assert(!sharedModuleRef || sharedModuleRef == overlay || - sharedModuleRef == M); + auto overlay = Ctx.getOverlayModule(this); + if (overlay) { Ctx.addLoadedModule(overlay); + } else { + // FIXME: This is the awful legacy of the old implementation of overlay + // loading laid bare. Because the previous implementation used + // ASTContext::getModuleByIdentifier, it consulted the clang importer + // recursively which forced the current module, its dependencies, and + // the overlays of those dependencies to load and + // become visible in the current context. All of the callers of + // ClangModuleUnit::getOverlayModule are relying on this behavior, and + // untangling them is going to take a heroic amount of effort. + // Clang module loading should *never* *ever* be allowed to load unrelated + // Swift modules. + ImportPath::Module::Builder builder(M->getName()); + (void) owner.loadModule(SourceLoc(), std::move(builder).get()); } - auto mutableThis = const_cast(this); mutableThis->overlayModule.setPointerAndInt(overlay, true); } diff --git a/lib/ClangImporter/ImportDecl.cpp b/lib/ClangImporter/ImportDecl.cpp index 7c6fb37883606..62f5f26ca638e 100644 --- a/lib/ClangImporter/ImportDecl.cpp +++ b/lib/ClangImporter/ImportDecl.cpp @@ -3499,7 +3499,7 @@ namespace { if (auto dtor = cxxRecordDecl->getDestructor()) { if (dtor->isDeleted() || dtor->getAccess() != clang::AS_public) { - result->setIsCxxNonTrivial(true); + return nullptr; } } } diff --git a/lib/Demangling/Demangler.cpp b/lib/Demangling/Demangler.cpp index 7ce2fad68c7f8..f80f9c677bba0 100644 --- a/lib/Demangling/Demangler.cpp +++ b/lib/Demangling/Demangler.cpp @@ -2266,9 +2266,14 @@ NodePointer Demangler::popProtocolConformance() { NodePointer type = popNode(Node::Kind::Type); return createWithChild(Node::Kind::CoroutineContinuationPrototype, type); } - case 'z': { - NodePointer implType = popNode(Node::Kind::ImplFunctionType); - return createWithChild(Node::Kind::ObjCAsyncCompletionHandlerImpl, implType); + case 'z': + case 'Z': { + NodePointer resultType = popNode(Node::Kind::Type); + NodePointer implType = popNode(Node::Kind::Type); + return createWithChildren(c == 'z' + ? Node::Kind::ObjCAsyncCompletionHandlerImpl + : Node::Kind::PredefinedObjCAsyncCompletionHandlerImpl, + implType, resultType); } case 'V': { NodePointer Base = popNode(isEntity); diff --git a/lib/Demangling/NodePrinter.cpp b/lib/Demangling/NodePrinter.cpp index 6cc54cad72f08..1c1e0a06c2d72 100644 --- a/lib/Demangling/NodePrinter.cpp +++ b/lib/Demangling/NodePrinter.cpp @@ -455,6 +455,7 @@ class NodePrinter { case Node::Kind::PartialApplyForwarder: case Node::Kind::PartialApplyObjCForwarder: case Node::Kind::PostfixOperator: + case Node::Kind::PredefinedObjCAsyncCompletionHandlerImpl: case Node::Kind::PrefixOperator: case Node::Kind::PrivateDeclName: case Node::Kind::PropertyDescriptor: @@ -2534,9 +2535,14 @@ NodePointer NodePrinter::print(NodePointer Node, bool asPrefixContext) { Printer << ')'; } return nullptr; + case Node::Kind::PredefinedObjCAsyncCompletionHandlerImpl: + Printer << "predefined "; + LLVM_FALLTHROUGH; case Node::Kind::ObjCAsyncCompletionHandlerImpl: Printer << "@objc completion handler block implementation for "; print(Node->getChild(0)); + Printer << " with result type "; + print(Node->getChild(1)); return nullptr; case Node::Kind::CanonicalPrespecializedGenericTypeCachingOnceToken: Printer << "flag for loading of canonical specialized generic type " diff --git a/lib/Demangling/OldRemangler.cpp b/lib/Demangling/OldRemangler.cpp index 114e01d8f984e..747954cf2e169 100644 --- a/lib/Demangling/OldRemangler.cpp +++ b/lib/Demangling/OldRemangler.cpp @@ -2166,6 +2166,9 @@ void Remangler::mangleGlobalVariableOnceFunction(Node *node) { void Remangler::mangleGlobalVariableOnceDeclList(Node *node) { unreachable("unsupported"); } +void Remangler::manglePredefinedObjCAsyncCompletionHandlerImpl(Node *node) { + unreachable("unsupported"); +} void Remangler::mangleObjCAsyncCompletionHandlerImpl(Node *node) { unreachable("unsupported"); } diff --git a/lib/Demangling/Remangler.cpp b/lib/Demangling/Remangler.cpp index 3af442346fb57..d55d8e77583e3 100644 --- a/lib/Demangling/Remangler.cpp +++ b/lib/Demangling/Remangler.cpp @@ -805,6 +805,11 @@ void Remangler::mangleCoroutineContinuationPrototype(Node *node) { Buffer << "TC"; } +void Remangler::manglePredefinedObjCAsyncCompletionHandlerImpl(Node *node) { + mangleChildNodes(node); + Buffer << "TZ"; +} + void Remangler::mangleObjCAsyncCompletionHandlerImpl(Node *node) { mangleChildNodes(node); Buffer << "Tz"; diff --git a/lib/Frontend/ArgsToFrontendOptionsConverter.cpp b/lib/Frontend/ArgsToFrontendOptionsConverter.cpp index 1c4e5febdb17e..99fcd27f62bd1 100644 --- a/lib/Frontend/ArgsToFrontendOptionsConverter.cpp +++ b/lib/Frontend/ArgsToFrontendOptionsConverter.cpp @@ -198,7 +198,9 @@ bool ArgsToFrontendOptionsConverter::convert( if (FrontendOptions::doesActionGenerateIR(Opts.RequestedAction) && (Args.hasArg(OPT_experimental_skip_non_inlinable_function_bodies) || - Args.hasArg(OPT_experimental_skip_all_function_bodies))) { + Args.hasArg(OPT_experimental_skip_all_function_bodies) || + Args.hasArg( + OPT_experimental_skip_non_inlinable_function_bodies_without_types))) { Diags.diagnose(SourceLoc(), diag::cannot_emit_ir_skipping_function_bodies); return true; } diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp index d1f671d02cb9e..1ce76d05fdde3 100644 --- a/lib/Frontend/CompilerInvocation.cpp +++ b/lib/Frontend/CompilerInvocation.cpp @@ -729,6 +729,12 @@ static bool ParseTypeCheckerArgs(TypeCheckerOptions &Opts, ArgList &Args, Opts.DebugTimeExpressions |= Args.hasArg(OPT_debug_time_expression_type_checking); + // Check for SkipFunctionBodies arguments in order from skipping less to + // skipping more. + if (Args.hasArg( + OPT_experimental_skip_non_inlinable_function_bodies_without_types)) + Opts.SkipFunctionBodies = FunctionBodySkipping::NonInlinableWithoutTypes; + // If asked to perform InstallAPI, go ahead and enable non-inlinable function // body skipping. if (Args.hasArg(OPT_experimental_skip_non_inlinable_function_bodies) || diff --git a/lib/FrontendTool/CMakeLists.txt b/lib/FrontendTool/CMakeLists.txt index 26b37f661b82b..67e725145dedb 100644 --- a/lib/FrontendTool/CMakeLists.txt +++ b/lib/FrontendTool/CMakeLists.txt @@ -2,6 +2,8 @@ set_swift_llvm_is_available() add_swift_host_library(swiftFrontendTool STATIC FrontendTool.cpp ImportedModules.cpp + LoadedModuleTrace.cpp + MakeStyleDependencies.cpp ScanDependencies.cpp TBD.cpp) add_dependencies(swiftFrontendTool diff --git a/lib/FrontendTool/Dependencies.h b/lib/FrontendTool/Dependencies.h new file mode 100644 index 0000000000000..b6ad92fb44467 --- /dev/null +++ b/lib/FrontendTool/Dependencies.h @@ -0,0 +1,38 @@ +//===--- Dependencies.h -- Unified header for dependnecy tracing utilies --===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_FRONTENDTOOL_DEPENDENCIES_H +#define SWIFT_FRONTENDTOOL_DEPENDENCIES_H + +namespace swift { + +class ASTContext; +class DependencyTracker; +class DiagnosticEngine; +class FrontendOptions; +class InputFile; +class ModuleDecl; + +/// Emit the names of the modules imported by \c mainModule. +bool emitImportedModules(ModuleDecl *mainModule, const FrontendOptions &opts); +bool emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, + DependencyTracker *depTracker, + const FrontendOptions &opts, + const InputFile &input); +bool emitLoadedModuleTraceIfNeeded(ModuleDecl *mainModule, + DependencyTracker *depTracker, + const FrontendOptions &opts, + const InputFile &input); + +} // end namespace swift + +#endif diff --git a/lib/FrontendTool/FrontendTool.cpp b/lib/FrontendTool/FrontendTool.cpp index 944895b28230f..750b80d2e3e3f 100644 --- a/lib/FrontendTool/FrontendTool.cpp +++ b/lib/FrontendTool/FrontendTool.cpp @@ -21,7 +21,7 @@ //===----------------------------------------------------------------------===// #include "swift/FrontendTool/FrontendTool.h" -#include "ImportedModules.h" +#include "Dependencies.h" #include "ScanDependencies.h" #include "TBD.h" @@ -41,7 +41,6 @@ #include "swift/Basic/Dwarf.h" #include "swift/Basic/Edit.h" #include "swift/Basic/FileSystem.h" -#include "swift/Basic/JSONSerialization.h" #include "swift/Basic/LLVMInitialize.h" #include "swift/Basic/Platform.h" #include "swift/Basic/PrettyStackTrace.h" @@ -49,7 +48,6 @@ #include "swift/Basic/Statistic.h" #include "swift/Basic/UUID.h" #include "swift/Option/Options.h" -#include "swift/Frontend/DiagnosticVerifier.h" #include "swift/Frontend/Frontend.h" #include "swift/Frontend/PrintingDiagnosticConsumer.h" #include "swift/Frontend/SerializedDiagnosticConsumer.h" @@ -64,43 +62,28 @@ #include "swift/Serialization/SerializationOptions.h" #include "swift/Serialization/SerializedModuleLoader.h" #include "swift/SILOptimizer/PassManager/Passes.h" -#include "swift/SIL/SILRemarkStreamer.h" #include "swift/Syntax/Serialization/SyntaxSerialization.h" #include "swift/Syntax/SyntaxNodes.h" #include "swift/TBDGen/TBDGen.h" #include "swift/SIL/ModuleSummary.h" #include "swift/Serialization/ModuleSummary.h" -#include "clang/AST/ASTContext.h" -#include "clang/Basic/Module.h" - #include "llvm/ADT/Statistic.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IRReader/IRReader.h" #include "llvm/Option/Option.h" #include "llvm/Option/OptTable.h" -#include "llvm/Remarks/RemarkSerializer.h" #include "llvm/Support/Error.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Support/TargetSelect.h" -#include "llvm/Support/Timer.h" -#include "llvm/Support/YAMLTraits.h" -#include "llvm/Target/TargetMachine.h" #include #include #include #include -#if !defined(_MSC_VER) && !defined(__MINGW32__) -#include -#else -#include -#endif - using namespace swift; static std::string displayName(StringRef MainExecutablePath) { @@ -109,877 +92,23 @@ static std::string displayName(StringRef MainExecutablePath) { return Name; } -StringRef -swift::frontend::utils::escapeForMake(StringRef raw, - llvm::SmallVectorImpl &buffer) { - buffer.clear(); - - // The escaping rules for GNU make are complicated due to the various - // subsitutions and use of the tab in the leading position for recipes. - // Various symbols have significance in different contexts. It is not - // possible to correctly quote all characters in Make (as of 3.7). Match - // gcc and clang's behaviour for the escaping which covers only a subset of - // characters. - for (unsigned I = 0, E = raw.size(); I != E; ++I) { - switch (raw[I]) { - case '#': // Handle '#' the broken GCC way - buffer.push_back('\\'); - break; - - case ' ': - for (unsigned J = I; J && raw[J - 1] == '\\'; --J) - buffer.push_back('\\'); - buffer.push_back('\\'); - break; - - case '$': // $ is escaped by $ - buffer.push_back('$'); - break; - } - buffer.push_back(raw[I]); - } - buffer.push_back('\0'); - - return buffer.data(); -} - -/// This sorting function is used to stabilize the order in which dependencies -/// are emitted into \c .d files that are consumed by external build systems. -/// This serves to eliminate order as a source of non-determinism in these -/// outputs. -/// -/// The exact sorting predicate is not important. Currently, it is a -/// lexicographic comparison that reverses the provided strings before applying -/// the sorting predicate. This has the benefit of being somewhat -/// invariant with respect to the installation location of various system -/// components. e.g. on two systems, the same file identified by two different -/// paths differing only in their relative install location such as -/// -/// /Applications/MyXcode.app/Path/To/A/Framework/In/The/SDK/Header.h -/// /Applications/Xcodes/AnotherXcode.app/Path/To/A/Framework/In/The/SDK/Header.h -/// -/// should appear in roughly the same order relative to other paths. Ultimately, -/// this makes it easier to test the contents of the emitted files with tools -/// like FileCheck. -static std::vector -reversePathSortedFilenames(const ArrayRef elts) { - std::vector tmp(elts.begin(), elts.end()); - std::sort(tmp.begin(), tmp.end(), [](const std::string &a, - const std::string &b) -> bool { - return std::lexicographical_compare(a.rbegin(), a.rend(), - b.rbegin(), b.rend()); - }); - return tmp; -} - -/// Emits a Make-style dependencies file. -static bool emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, - DependencyTracker *depTracker, - const FrontendOptions &opts, - const InputFile &input) { - const std::string &dependenciesFilePath = input.dependenciesFilePath(); - if (dependenciesFilePath.empty()) - return false; - - std::error_code EC; - llvm::raw_fd_ostream out(dependenciesFilePath, EC, llvm::sys::fs::F_None); - - if (out.has_error() || EC) { - diags.diagnose(SourceLoc(), diag::error_opening_output, - dependenciesFilePath, EC.message()); - out.clear_error(); - return true; - } - - llvm::SmallString<256> buffer; - - // collect everything in memory to avoid redundant work - // when there are multiple targets - std::string dependencyString; - - // First include all other files in the module. Make-style dependencies - // need to be conservative! - auto inputPaths = - reversePathSortedFilenames(opts.InputsAndOutputs.getInputFilenames()); - for (auto const &path : inputPaths) { - dependencyString.push_back(' '); - dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); - } - // Then print dependencies we've picked up during compilation. - auto dependencyPaths = - reversePathSortedFilenames(depTracker->getDependencies()); - for (auto const &path : dependencyPaths) { - dependencyString.push_back(' '); - dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); - } - auto incrementalDependencyPaths = - reversePathSortedFilenames(depTracker->getIncrementalDependencies()); - for (auto const &path : incrementalDependencyPaths) { - dependencyString.push_back(' '); - dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); - } - - // FIXME: Xcode can't currently handle multiple targets in a single - // dependency line. - opts.forAllOutputPaths(input, [&](const StringRef targetName) { - auto targetNameEscaped = frontend::utils::escapeForMake(targetName, buffer); - out << targetNameEscaped << " :" << dependencyString << '\n'; - }); - - return false; -} - static void emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, DependencyTracker *depTracker, const FrontendOptions &opts) { opts.InputsAndOutputs.forEachInputProducingSupplementaryOutput( [&](const InputFile &f) -> bool { - return emitMakeDependenciesIfNeeded(diags, depTracker, opts, f); + return swift::emitMakeDependenciesIfNeeded(diags, depTracker, opts, f); }); } -// MARK: - Module Trace - -namespace { -struct SwiftModuleTraceInfo { - Identifier Name; - std::string Path; - bool IsImportedDirectly; - bool SupportsLibraryEvolution; -}; - -struct LoadedModuleTraceFormat { - static const unsigned CurrentVersion = 2; - unsigned Version; - Identifier Name; - std::string Arch; - std::vector SwiftModules; -}; -} - -namespace swift { -namespace json { -template <> struct ObjectTraits { - static void mapping(Output &out, SwiftModuleTraceInfo &contents) { - StringRef name = contents.Name.str(); - out.mapRequired("name", name); - out.mapRequired("path", contents.Path); - out.mapRequired("isImportedDirectly", contents.IsImportedDirectly); - out.mapRequired("supportsLibraryEvolution", - contents.SupportsLibraryEvolution); - } -}; - -// Version notes: -// 1. Keys: name, arch, swiftmodules -// 2. New keys: version, swiftmodulesDetailedInfo -template <> struct ObjectTraits { - static void mapping(Output &out, LoadedModuleTraceFormat &contents) { - out.mapRequired("version", contents.Version); - - StringRef name = contents.Name.str(); - out.mapRequired("name", name); - - out.mapRequired("arch", contents.Arch); - - // The 'swiftmodules' key is kept for backwards compatibility. - std::vector moduleNames; - for (auto &m : contents.SwiftModules) - moduleNames.push_back(m.Path); - out.mapRequired("swiftmodules", moduleNames); - - out.mapRequired("swiftmodulesDetailedInfo", contents.SwiftModules); - } -}; -} -} - -static bool isClangOverlayOf(ModuleDecl *potentialOverlay, - ModuleDecl *potentialUnderlying) { - return !potentialOverlay->isNonSwiftModule() - && potentialUnderlying->isNonSwiftModule() - && potentialOverlay->getName() == potentialUnderlying->getName(); -} - -// TODO: Delete this once changes from https://reviews.llvm.org/D83449 land on -// apple/llvm-project's swift/main branch. -template -static bool contains(const SetLike &setLike, Item item) { - return setLike.find(item) != setLike.end(); -} - -/// Get a set of modules imported by \p module. -/// -/// By default, all imports are included. -static void getImmediateImports( - ModuleDecl *module, - SmallPtrSetImpl &imports, - ModuleDecl::ImportFilter importFilter = { - ModuleDecl::ImportFilterKind::Exported, - ModuleDecl::ImportFilterKind::Default, - ModuleDecl::ImportFilterKind::ImplementationOnly, - ModuleDecl::ImportFilterKind::SPIAccessControl, - ModuleDecl::ImportFilterKind::ShadowedByCrossImportOverlay - }) { - SmallVector importList; - module->getImportedModules(importList, importFilter); - - for (ImportedModule &import : importList) - imports.insert(import.importedModule); -} - -namespace { -/// Helper type for computing (approximate) information about ABI-dependencies. -/// -/// This misses out on details such as typealiases and more. -/// See the "isImportedDirectly" field above for more details. -class ABIDependencyEvaluator { - /// Map of ABIs exported by a particular module, excluding itself. - /// - /// For example, consider (primed letters represent Clang modules): - /// \code - /// - A is @_exported-imported by B - /// - B is #imported by C' (via a compiler-generated umbrella header) - /// - C' is @_exported-imported by C (Swift overlay) - /// - D' is #imported by E' - /// - D' is @_exported-imported by D (Swift overlay) - /// - E' is @_exported-imported by E (Swift overlay) - /// \endcode - /// - /// Then the \c abiExportMap will be - /// \code - /// { A: {}, B: {A}, C: {B}, C': {B}, D: {}, D': {}, E: {D}, E': {D'} } - /// \endcode - /// - /// \b WARNING: Use \c reexposeImportedABI instead of inserting directly. - llvm::DenseMap> abiExportMap; - - /// Stack for depth-first traversal. - SmallVector searchStack; - - llvm::DenseSet visited; - - /// Helper function to handle invariant violations as crashes in debug mode. - void crashOnInvariantViolation( - llvm::function_ref f) const; - - /// Computes the ABI exports for \p importedModule and adds them to - /// \p module's ABI exports. - /// - /// If \p includeImportedModule is true, also adds \p importedModule to - /// \p module's ABI exports. - /// - /// Correct way to add entries to \c abiExportMap. - void reexposeImportedABI(ModuleDecl *module, ModuleDecl *importedModule, - bool includeImportedModule = true); - - /// Check if a Swift module is an overlay for some Clang module. - /// - /// FIXME: Delete this hack once SR-13363 is fixed and ModuleDecl has the - /// right API which we can use directly. - bool isOverlayOfClangModule(ModuleDecl *swiftModule); - - /// Check for cases where we have a fake cycle through an overlay. - /// - /// Sometimes, we have fake cycles in the import graph due to the Clang - /// importer injecting overlays between Clang modules. These don't represent - /// an actual cycle in the build, so we should ignore them. - /// - /// We check this lazily after detecting a cycle because it is difficult to - /// determine at the point where we see the overlay whether it was incorrectly - /// injected by the Clang importer or whether any of its imports will - /// eventually lead to a cycle. - /// - /// For more details, see [NOTE: ABIDependencyEvaluator-fake-cycle-detection] - /// - /// \param startOfCycle A pointer to the element of \c searchStack where - /// the module \em first appeared. - /// - /// \pre The module on top of \c searchStack is the same module as - /// *startOfCycle. - /// - /// \pre searchStack.begin() <= startOfCycle < searchStack.end() - bool isFakeCycleThroughOverlay(ModuleDecl **startOfCycle); - - /// Recursive step in computing ABI dependencies. - /// - /// Use this method instead of using the \c forClangModule/\c forSwiftModule - /// methods. - void computeABIDependenciesForModule(ModuleDecl *module); - void computeABIDependenciesForSwiftModule(ModuleDecl *module); - void computeABIDependenciesForClangModule(ModuleDecl *module); - - static void printModule(const ModuleDecl *module, llvm::raw_ostream &os); - - template - static void printModuleSet(const SetLike &set, llvm::raw_ostream &os); - -public: - ABIDependencyEvaluator() = default; - ABIDependencyEvaluator(const ABIDependencyEvaluator &) = delete; - ABIDependencyEvaluator(ABIDependencyEvaluator &&) = default; - - void getABIDependenciesForSwiftModule( - ModuleDecl *module, SmallPtrSetImpl &abiDependencies); - - void printABIExportMap(llvm::raw_ostream &os) const; -}; -} // end anonymous namespace - -// See [NOTE: Bailing-vs-crashing-in-trace-emission]. -// TODO: Use PrettyStackTrace instead? -void ABIDependencyEvaluator::crashOnInvariantViolation( - llvm::function_ref f) const { -#ifndef NDEBUG - std::string msg; - llvm::raw_string_ostream os(msg); - os << "error: invariant violation: "; - f(os); - llvm::report_fatal_error(os.str()); -#endif -} - -// [NOTE: Trace-Clang-submodule-complexity] -// -// A Clang module may have zero or more submodules. In practice, when traversing -// the imports of a module, we observe that different submodules of the same -// top-level module (almost) freely import each other. Despite this, we still -// need to conceptually traverse the tree formed by the submodule relationship -// (with the top-level module being the root). -// -// This needs to be taken care of in two ways: -// 1. We need to make sure we only go towards the leaves. It's okay if we "jump" -// branches, so long as we don't try to visit an ancestor when one of its -// descendants is still on the traversal stack, so that we don't end up with -// arbitrarily complex intra-module cycles. -// See also: [NOTE: Intra-module-leafwards-traversal]. -// 2. When adding entries to the ABI export map, we need to avoid marking -// dependencies within the same top-level module. This step is needed in -// addition to step 1 to avoid creating cycles like -// Overlay -> Underlying -> Submodule -> Overlay. - -void ABIDependencyEvaluator::reexposeImportedABI( - ModuleDecl *module, ModuleDecl *importedModule, - bool includeImportedModule) { - if (module == importedModule) { - crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { - os << "module "; printModule(module, os); os << " imports itself!\n"; - }); - return; - } - - auto addToABIExportMap = [this](ModuleDecl *module, ModuleDecl *reexport) { - if (module == reexport) { - crashOnInvariantViolation([&](llvm::raw_string_ostream &os){ - os << "expected module "; printModule(reexport, os); - os << " to not re-export itself\n"; - }); - return; - } - if (reexport->isNonSwiftModule() - && module->isNonSwiftModule() - && module->getTopLevelModule() == reexport->getTopLevelModule()) { - // Dependencies within the same top-level Clang module are not useful. - // See also: [NOTE: Trace-Clang-submodule-complexity]. - return; - } - - // We only care about dependencies across top-level modules and we want to - // avoid exploding abiExportMap with submodules. So we only insert entries - // after calling getTopLevelModule(). - - if (::isClangOverlayOf(module, reexport)) { - // For overlays, we need to have a dependency on the underlying module. - // Otherwise, we might accidentally create a Swift -> Swift cycle. - abiExportMap[module].insert( - reexport->getTopLevelModule(/*preferOverlay*/false)); - return; - } - abiExportMap[module].insert( - reexport->getTopLevelModule(/*preferOverlay*/true)); - }; - - computeABIDependenciesForModule(importedModule); - if (includeImportedModule) { - addToABIExportMap(module, importedModule); - } - // Force creation of default value if missing. This prevents abiExportMap from - // growing (and moving) when calling addToABIExportMap. If abiExportMap gets - // moved, then abiExportMap[importedModule] will be moved, forcing us to - // create a defensive copy to avoid iterator invalidation on move. - (void)abiExportMap[module]; - for (auto reexportedModule: abiExportMap[importedModule]) - addToABIExportMap(module, reexportedModule); -} - -bool ABIDependencyEvaluator::isOverlayOfClangModule(ModuleDecl *swiftModule) { - assert(!swiftModule->isNonSwiftModule()); - - llvm::SmallPtrSet importList; - ::getImmediateImports(swiftModule, importList, - {ModuleDecl::ImportFilterKind::Exported}); - bool isOverlay = - llvm::any_of(importList, [&](ModuleDecl *importedModule) -> bool { - return isClangOverlayOf(swiftModule, importedModule); - }); - return isOverlay; -} - -// [NOTE: ABIDependencyEvaluator-fake-cycle-detection] -// -// First, let's consider a concrete example. -// - In Clang-land, ToyKit #imports CoreDoll. -// - The Swift overlay for CoreDoll imports both CoreDoll and ToyKit. -// Importing ToyKit from CoreDoll's overlay informally violates the layering -// of frameworks, but it doesn't actually create any cycles in the build -// dependencies. -// ┌───────────────────────────┐ -// ┌───│ CoreDoll.swiftmodule │ -// │ └───────────────────────────┘ -// │ │ -// import ToyKit @_exported import CoreDoll -// │ │ -// │ │ -// ▼ │ -// ┌──────────────────────────┐ │ -// │ ToyKit (ToyKit/ToyKit.h) │ │ -// └──────────────────────────┘ │ -// │ │ -// #import │ -// │ │ -// ▼ │ -// ┌──────────────────────────────┐ │ -// │CoreDoll (CoreDoll/CoreDoll.h)│◀──┘ -// └──────────────────────────────┘ -// -// Say we are trying to build a Swift module that imports ToyKit. Due to how -// module loading works, the Clang importer inserts the CoreDoll overlay -// between the ToyKit and CoreDoll Clang modules, creating a cycle in the -// import graph. -// -// ┌──────────────────────────┐ -// │ ToyKit (ToyKit/ToyKit.h) │◀──────────┐ -// └──────────────────────────┘ │ -// │ │ -// #import import ToyKit -// │ │ -// ▼ │ -// ┌────────────────────────────┐ │ -// │ CoreDoll.swiftmodule │─────────┘ -// └────────────────────────────┘ -// │ -// @_exported import CoreDoll -// │ -// ▼ -// ┌──────────────────────────────┐ -// │CoreDoll (CoreDoll/CoreDoll.h)│ -// └──────────────────────────────┘ -// -// This means that, at some point, searchStack will look like: -// -// [others] → ToyKit → CoreDoll (overlay) → ToyKit -// -// In the general case, there may be arbitrarily many modules in the cycle, -// including submodules. -// -// [others] → ToyKit → [others] → CoreDoll (overlay) → [others] → ToyKit -// -// where "[others]" indicates 0 or more modules of any kind. -// -// To detect this, we check that the start of the cycle is a Clang module and -// that there is at least one overlay between it and its recurrence at the end -// of the searchStack. If so, we assume we have detected a benign cycle which -// can be safely ignored. - -bool ABIDependencyEvaluator::isFakeCycleThroughOverlay( - ModuleDecl **startOfCycle) { - assert(startOfCycle >= searchStack.begin() && - startOfCycle < searchStack.end() && - "startOfCycleIter points to an element in searchStack"); - // The startOfCycle module must be a Clang module. - if (!(*startOfCycle)->isNonSwiftModule()) - return false; - // Next, we must have zero or more modules followed by a Swift overlay for a - // Clang module. - return std::any_of(startOfCycle + 1, searchStack.end(), - [this](ModuleDecl *module) { - return !module->isNonSwiftModule() && - isOverlayOfClangModule(module); - }); -} - -void ABIDependencyEvaluator::computeABIDependenciesForModule( - ModuleDecl *module) { - auto moduleIter = llvm::find(searchStack, module); - if (moduleIter != searchStack.end()) { - if (isFakeCycleThroughOverlay(moduleIter)) - return; - crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { - os << "unexpected cycle in import graph!\n"; - for (auto m: searchStack) { - printModule(m, os); - if (!m->isNonSwiftModule()) { - os << " (isOverlay = " << isOverlayOfClangModule(m) << ")"; - } - os << "\ndepends on "; - } - printModule(module, os); os << '\n'; - }); - return; - } - if (::contains(visited, module)) - return; - searchStack.push_back(module); - if (module->isNonSwiftModule()) - computeABIDependenciesForClangModule(module); - else - computeABIDependenciesForSwiftModule(module); - searchStack.pop_back(); - visited.insert(module); -} - -void ABIDependencyEvaluator::computeABIDependenciesForSwiftModule( - ModuleDecl *module) { - SmallPtrSet allImports; - ::getImmediateImports(module, allImports); - for (auto import: allImports) { - computeABIDependenciesForModule(import); - if (::isClangOverlayOf(module, import)) { - reexposeImportedABI(module, import, - /*includeImportedModule=*/false); - } - } - - SmallPtrSet reexportedImports; - ::getImmediateImports(module, reexportedImports, - {ModuleDecl::ImportFilterKind::Exported}); - for (auto reexportedImport: reexportedImports) { - reexposeImportedABI(module, reexportedImport); - } -} - -void ABIDependencyEvaluator::computeABIDependenciesForClangModule( - ModuleDecl *module) { - SmallPtrSet imports; - ::getImmediateImports(module, imports); - for (auto import: imports) { - // There are three cases here which can potentially create cycles: - // - // 1. Clang modules importing the stdlib. - // See [NOTE: Pure-Clang-modules-privately-import-stdlib]. - // 2. Overlay S @_exported-imports underlying module S' and another Clang - // module C'. C' (transitively) #imports S' but it gets treated as if - // C' imports S. This creates a cycle: S -> C' -> ... -> S. - // In practice, this case is hit for - // Darwin (Swift) -> SwiftOverlayShims (Clang) -> Darwin (Swift). - // We may also hit this in a slightly different direction, in case - // the module directly imports SwiftOverlayShims: - // SwiftOverlayShims -> Darwin (Swift) -> SwiftOverlayShims - // The latter is handled later by isFakeCycleThroughOverlay. - // 3. [NOTE: Intra-module-leafwards-traversal] - // Cycles within the same top-level module. - // These don't matter for us, since we only care about the dependency - // graph at the granularity of top-level modules. So we ignore these - // by only considering parent -> submodule dependencies. - // See also [NOTE: Trace-Clang-submodule-complexity]. - if (import->isStdlibModule()) { - continue; - } - if (!import->isNonSwiftModule() && isOverlayOfClangModule(import) && - llvm::find(searchStack, import) != searchStack.end()) { - continue; - } - if (import->isNonSwiftModule() - && module->getTopLevelModule() == import->getTopLevelModule() - && (module == import - || !import->findUnderlyingClangModule() - ->isSubModuleOf(module->findUnderlyingClangModule()))) { - continue; - } - computeABIDependenciesForModule(import); - reexposeImportedABI(module, import); - } -} - -void ABIDependencyEvaluator::getABIDependenciesForSwiftModule( - ModuleDecl *module, SmallPtrSetImpl &abiDependencies) { - computeABIDependenciesForModule(module); - SmallPtrSet allImports; - ::getImmediateImports(module, allImports); - for (auto directDependency: allImports) { - abiDependencies.insert(directDependency); - for (auto exposedDependency: abiExportMap[directDependency]) { - abiDependencies.insert(exposedDependency); - } - } -} - -void ABIDependencyEvaluator::printModule( - const ModuleDecl *module, llvm::raw_ostream &os) { - module->getReverseFullModuleName().printForward(os); - os << (module->isNonSwiftModule() ? " (Clang)" : " (Swift)"); - os << " @ " << llvm::format("0x%llx", reinterpret_cast(module)); -} - -template -void ABIDependencyEvaluator::printModuleSet( - const SetLike &set, llvm::raw_ostream &os) { - os << "{ "; - for (auto module: set) { - printModule(module, os); os << ", "; - } - os << "}"; -} - -void ABIDependencyEvaluator::printABIExportMap(llvm::raw_ostream &os) const { - os << "ABI Export Map {{\n"; - for (auto &entry: abiExportMap) { - printModule(entry.first, os); os << " : "; - printModuleSet(entry.second, os); - os << "\n"; - } - os << "}}\n"; -} - -/// Compute the per-module information to be recorded in the trace file. -// -// The most interesting/tricky thing here is _which_ paths get recorded in -// the trace file as dependencies. It depends on how the module was synthesized. -// The key points are: -// -// 1. Paths to swiftmodules in the module cache or in the prebuilt cache are not -// recorded - Precondition: the corresponding path to the swiftinterface must -// already be present as a key in pathToModuleDecl. -// 2. swiftmodules next to a swiftinterface are saved if they are up-to-date. -// -// FIXME: Use the VFS instead of handling paths directly. We are particularly -// sloppy about handling relative paths in the dependency tracker. -static void computeSwiftModuleTraceInfo( - const SmallPtrSetImpl &abiDependencies, - const llvm::DenseMap &pathToModuleDecl, - const DependencyTracker &depTracker, - StringRef prebuiltCachePath, - std::vector &traceInfo) { - - SmallString<256> buffer; - - std::string errMsg; - llvm::raw_string_ostream err(errMsg); - - // FIXME: Use PrettyStackTrace instead. - auto errorUnexpectedPath = - [&pathToModuleDecl](llvm::raw_string_ostream &errStream) { - errStream << "The module <-> path mapping we have is:\n"; - for (auto &m: pathToModuleDecl) - errStream << m.second->getName() << " <-> " << m.first << '\n'; - llvm::report_fatal_error(errStream.str()); - }; - - using namespace llvm::sys; - - auto computeAdjacentInterfacePath = [](SmallVectorImpl &modPath) { - auto swiftInterfaceExt = - file_types::getExtension(file_types::TY_SwiftModuleInterfaceFile); - path::replace_extension(modPath, swiftInterfaceExt); - }; - - for (auto &depPath : depTracker.getDependencies()) { - - // Decide if this is a swiftmodule based on the extension of the raw - // dependency path, as the true file may have a different one. - // For example, this might happen when the canonicalized path points to - // a Content Addressed Storage (CAS) location. - auto moduleFileType = - file_types::lookupTypeForExtension(path::extension(depPath)); - auto isSwiftmodule = - moduleFileType == file_types::TY_SwiftModuleFile; - auto isSwiftinterface = - moduleFileType == file_types::TY_SwiftModuleInterfaceFile; - - if (!(isSwiftmodule || isSwiftinterface)) - continue; - - auto dep = pathToModuleDecl.find(depPath); - if (dep != pathToModuleDecl.end()) { - // Great, we recognize the path! Check if the file is still around. - - ModuleDecl *depMod = dep->second; - if(depMod->isResilient() && !isSwiftinterface) { - // FIXME: Ideally, we would check that the swiftmodule has a - // swiftinterface next to it. Tracked by rdar://problem/56351399. - } - - // FIXME: Better error handling - StringRef realDepPath - = fs::real_path(depPath, buffer, /*expand_tile*/true) - ? StringRef(depPath) // Couldn't find the canonical path, assume - // this is good enough. - : buffer.str(); - - bool isImportedDirectly = ::contains(abiDependencies, depMod); - - traceInfo.push_back( - {/*Name=*/ - depMod->getName(), - /*Path=*/ - realDepPath.str(), - // TODO: There is an edge case which is not handled here. - // When we build a framework using -import-underlying-module, or an - // app/test using -import-objc-header, we should look at the direct - // imports of the bridging modules, and mark those as our direct - // imports. - // TODO: Add negative test cases for the comment above. - // TODO: Describe precise semantics of "isImportedDirectly". - /*IsImportedDirectly=*/ - isImportedDirectly, - /*SupportsLibraryEvolution=*/ - depMod->isResilient()}); - buffer.clear(); - - continue; - } - - // If the depTracker had an interface, that means that we must've - // built a swiftmodule from that interface, so we should have that - // filename available. - if (isSwiftinterface) { - err << "Unexpected path for swiftinterface file:\n" << depPath << "\n"; - errorUnexpectedPath(err); - } - - // Skip cached modules in the prebuilt cache. We will add the corresponding - // swiftinterface from the SDK directly, but this isn't checked. :-/ - // - // FIXME: This is incorrect if both paths are not relative w.r.t. to the - // same root. - if (StringRef(depPath).startswith(prebuiltCachePath)) - continue; - - // If we have a swiftmodule next to an interface, that interface path will - // be saved (not checked), so don't save the path to this swiftmodule. - SmallString<256> moduleAdjacentInterfacePath(depPath); - computeAdjacentInterfacePath(moduleAdjacentInterfacePath); - if (::contains(pathToModuleDecl, moduleAdjacentInterfacePath)) - continue; - - // FIXME: The behavior of fs::exists for relative paths is undocumented. - // Use something else instead? - if (fs::exists(moduleAdjacentInterfacePath)) { - // This should be an error but it is not because of funkiness around - // compatible modules such as us having both armv7s.swiftinterface - // and armv7.swiftinterface in the dependency tracker. - continue; - } - buffer.clear(); - - // We might land here when we have a arm.swiftmodule in the cache path - // which added a dependency on a arm.swiftinterface (which was not loaded). - } - - // Almost a re-implementation of reversePathSortedFilenames :(. - std::sort( - traceInfo.begin(), traceInfo.end(), - [](const SwiftModuleTraceInfo &m1, const SwiftModuleTraceInfo &m2) -> bool { - return std::lexicographical_compare( - m1.Path.rbegin(), m1.Path.rend(), - m2.Path.rbegin(), m2.Path.rend()); - }); -} - -// [NOTE: Bailing-vs-crashing-in-trace-emission] There are certain edge cases -// in trace emission where an invariant that you think should hold does not hold -// in practice. For example, sometimes we have seen modules without any -// corresponding filename. -// -// Since the trace is a supplementary output for build system consumption, it -// it better to emit it on a best-effort basis instead of crashing and failing -// the build. -// -// Moreover, going forward, it would be nice if trace emission were more robust -// so we could emit the trace on a best-effort basis even if the dependency -// graph is ill-formed, so that the trace can be used as a debugging aid. -static bool emitLoadedModuleTraceIfNeeded(ModuleDecl *mainModule, - DependencyTracker *depTracker, - StringRef prebuiltCachePath, - StringRef loadedModuleTracePath) { - ASTContext &ctxt = mainModule->getASTContext(); - assert(!ctxt.hadError() - && "We should've already exited earlier if there was an error."); - - if (loadedModuleTracePath.empty()) - return false; - std::error_code EC; - llvm::raw_fd_ostream out(loadedModuleTracePath, EC, llvm::sys::fs::F_Append); - - if (out.has_error() || EC) { - ctxt.Diags.diagnose(SourceLoc(), diag::error_opening_output, - loadedModuleTracePath, EC.message()); - out.clear_error(); - return true; - } - - SmallPtrSet abiDependencies; - { - ABIDependencyEvaluator evaluator{}; - evaluator.getABIDependenciesForSwiftModule(mainModule, - abiDependencies); - } - - llvm::DenseMap pathToModuleDecl; - for (const auto &module : ctxt.getLoadedModules()) { - ModuleDecl *loadedDecl = module.second; - if (!loadedDecl) - llvm::report_fatal_error("Expected loaded modules to be non-null."); - if (loadedDecl == mainModule) - continue; - if (loadedDecl->getModuleFilename().empty()) { - // FIXME: rdar://problem/59853077 - // Ideally, this shouldn't happen. As a temporary workaround, avoid - // crashing with a message while we investigate the problem. - llvm::errs() << "WARNING: Module '" << loadedDecl->getName().str() - << "' has an empty filename. This is probably an " - << "invariant violation.\n" - << "Please report it as a compiler bug.\n"; - continue; - } - pathToModuleDecl.insert( - std::make_pair(loadedDecl->getModuleFilename(), loadedDecl)); - } - - std::vector swiftModules; - computeSwiftModuleTraceInfo(abiDependencies, - pathToModuleDecl, *depTracker, - prebuiltCachePath, swiftModules); - - LoadedModuleTraceFormat trace = { - /*version=*/LoadedModuleTraceFormat::CurrentVersion, - /*name=*/mainModule->getName(), - /*arch=*/ctxt.LangOpts.Target.getArchName().str(), swiftModules}; - - // raw_fd_ostream is unbuffered, and we may have multiple processes writing, - // so first write to memory and then dump the buffer to the trace file. - std::string stringBuffer; - { - llvm::raw_string_ostream memoryBuffer(stringBuffer); - json::Output jsonOutput(memoryBuffer, /*UserInfo=*/{}, - /*PrettyPrint=*/false); - json::jsonize(jsonOutput, trace, /*Required=*/true); - } - stringBuffer += "\n"; - out << stringBuffer; - - return true; -} - static void emitLoadedModuleTraceForAllPrimariesIfNeeded(ModuleDecl *mainModule, DependencyTracker *depTracker, const FrontendOptions &opts) { opts.InputsAndOutputs.forEachInputProducingSupplementaryOutput( [&](const InputFile &input) -> bool { - return emitLoadedModuleTraceIfNeeded( - mainModule, depTracker, opts.PrebuiltModuleCachePath, - input.loadedModuleTracePath()); + return swift::emitLoadedModuleTraceIfNeeded(mainModule, depTracker, + opts, input); }); } @@ -1000,7 +129,7 @@ getFileOutputStream(StringRef OutputFilename, ASTContext &Ctx) { } /// Writes the Syntax tree to the given file -static bool emitSyntax(SourceFile &SF, StringRef OutputFilename) { +static bool emitSyntax(const SourceFile &SF, StringRef OutputFilename) { auto os = getFileOutputStream(OutputFilename, SF.getASTContext()); if (!os) return true; @@ -1094,8 +223,8 @@ class JSONFixitWriter public: JSONFixitWriter(std::string fixitsOutputPath, const DiagnosticOptions &DiagOpts) - : FixitsOutputPath(fixitsOutputPath), - FixitAll(DiagOpts.FixitCodeForAllDiagnostics) {} + : FixitsOutputPath(std::move(fixitsOutputPath)), + FixitAll(DiagOpts.FixitCodeForAllDiagnostics) {} private: void handleDiagnostic(SourceManager &SM, @@ -1323,10 +452,9 @@ static bool compileLLVMIR(CompilerInstance &Instance) { Module.get(), inputsAndOutputs.getSingleOutputFilename()); } -static void verifyGenericSignaturesIfNeeded(const CompilerInvocation &Invocation, +static void verifyGenericSignaturesIfNeeded(const FrontendOptions &opts, ASTContext &Context) { - auto verifyGenericSignaturesInModule = - Invocation.getFrontendOptions().VerifyGenericSignaturesInModule; + auto verifyGenericSignaturesInModule = opts.VerifyGenericSignaturesInModule; if (verifyGenericSignaturesInModule.empty()) return; if (auto module = Context.getModuleByName(verifyGenericSignaturesInModule)) @@ -1794,7 +922,7 @@ static void performEndOfPipelineActions(CompilerInstance &Instance) { ctx.verifyAllLoadedModules(); // Verify generic signatures if we've been asked to. - verifyGenericSignaturesIfNeeded(Invocation, ctx); + verifyGenericSignaturesIfNeeded(Invocation.getFrontendOptions(), ctx); } // Emit any additional outputs that we only need for a successful compilation. @@ -2484,10 +1612,13 @@ static void emitIndexDataForSourceFile(SourceFile *PrimarySourceFile, if (moduleToken.empty()) moduleToken = opts.InputsAndOutputs.getSingleOutputFilename(); - (void) index::indexAndRecord(Instance.getMainModule(), opts.InputsAndOutputs.copyOutputFilenames(), + (void) index::indexAndRecord(Instance.getMainModule(), + opts.InputsAndOutputs.copyOutputFilenames(), moduleToken, opts.IndexStorePath, - opts.IndexSystemModules, opts.IndexIgnoreStdlib, - isDebugCompilation, Invocation.getTargetTriple(), + opts.IndexSystemModules, + opts.IndexIgnoreStdlib, + isDebugCompilation, + Invocation.getTargetTriple(), *Instance.getDependencyTracker()); } } @@ -2555,11 +1686,12 @@ createSerializedDiagnosticConsumerIfNeeded( return createDispatchingDiagnosticConsumerIfNeeded( inputsAndOutputs, [](const InputFile &input) -> std::unique_ptr { - std::string serializedDiagnosticsPath = input.serializedDiagnosticsPath(); - if (serializedDiagnosticsPath.empty()) - return nullptr; - return serialized_diagnostics::createConsumer(serializedDiagnosticsPath); - }); + auto serializedDiagnosticsPath = input.getSerializedDiagnosticsPath(); + if (serializedDiagnosticsPath.empty()) + return nullptr; + return serialized_diagnostics::createConsumer( + serializedDiagnosticsPath); + }); } /// Creates a diagnostic consumer that handles serializing diagnostics, based on @@ -2576,12 +1708,12 @@ createJSONFixItDiagnosticConsumerIfNeeded( return createDispatchingDiagnosticConsumerIfNeeded( invocation.getFrontendOptions().InputsAndOutputs, [&](const InputFile &input) -> std::unique_ptr { - std::string fixItsOutputPath = input.fixItsOutputPath(); - if (fixItsOutputPath.empty()) - return nullptr; - return std::make_unique( - fixItsOutputPath, invocation.getDiagnosticOptions()); - }); + auto fixItsOutputPath = input.getFixItsOutputPath(); + if (fixItsOutputPath.empty()) + return nullptr; + return std::make_unique( + fixItsOutputPath.str(), invocation.getDiagnosticOptions()); + }); } /// Print information about a diff --git a/lib/FrontendTool/ImportedModules.cpp b/lib/FrontendTool/ImportedModules.cpp index 857efcda8f98b..9316af5a09dc1 100644 --- a/lib/FrontendTool/ImportedModules.cpp +++ b/lib/FrontendTool/ImportedModules.cpp @@ -10,7 +10,7 @@ // //===----------------------------------------------------------------------===// -#include "ImportedModules.h" +#include "Dependencies.h" #include "swift/AST/ASTContext.h" #include "swift/AST/Decl.h" #include "swift/AST/DiagnosticEngine.h" diff --git a/lib/FrontendTool/ImportedModules.h b/lib/FrontendTool/ImportedModules.h deleted file mode 100644 index 510fa4ccdedde..0000000000000 --- a/lib/FrontendTool/ImportedModules.h +++ /dev/null @@ -1,26 +0,0 @@ -//===--- ImportedModules.h -- generates the list of imported modules ------===// -// -// This source file is part of the Swift.org open source project -// -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// - -#ifndef SWIFT_FRONTENDTOOL_IMPORTEDMODULES_H -#define SWIFT_FRONTENDTOOL_IMPORTEDMODULES_H - -namespace swift { - -class ASTContext; -class FrontendOptions; -class ModuleDecl; - -/// Emit the names of the modules imported by \c mainModule. -bool emitImportedModules(ModuleDecl *mainModule, const FrontendOptions &opts); -} // end namespace swift - -#endif diff --git a/lib/FrontendTool/LoadedModuleTrace.cpp b/lib/FrontendTool/LoadedModuleTrace.cpp new file mode 100644 index 0000000000000..8f51a0ebe1ad7 --- /dev/null +++ b/lib/FrontendTool/LoadedModuleTrace.cpp @@ -0,0 +1,764 @@ +//===--- ModuleTrace.cpp -- Emit a trace of all loaded Swift modules ------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "Dependencies.h" +#include "swift/AST/ASTContext.h" +#include "swift/AST/DiagnosticEngine.h" +#include "swift/AST/DiagnosticsFrontend.h" +#include "swift/AST/Module.h" +#include "swift/Basic/FileTypes.h" +#include "swift/Basic/JSONSerialization.h" +#include "swift/Frontend/FrontendOptions.h" + +#include "clang/Basic/Module.h" + +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/YAMLTraits.h" + +#if !defined(_MSC_VER) && !defined(__MINGW32__) +#include +#else +#include +#endif + +using namespace swift; + +namespace { +struct SwiftModuleTraceInfo { + Identifier Name; + std::string Path; + bool IsImportedDirectly; + bool SupportsLibraryEvolution; +}; + +struct LoadedModuleTraceFormat { + static const unsigned CurrentVersion = 2; + unsigned Version; + Identifier Name; + std::string Arch; + std::vector SwiftModules; +}; +} // namespace + +namespace swift { +namespace json { +template <> struct ObjectTraits { + static void mapping(Output &out, SwiftModuleTraceInfo &contents) { + StringRef name = contents.Name.str(); + out.mapRequired("name", name); + out.mapRequired("path", contents.Path); + out.mapRequired("isImportedDirectly", contents.IsImportedDirectly); + out.mapRequired("supportsLibraryEvolution", + contents.SupportsLibraryEvolution); + } +}; + +// Version notes: +// 1. Keys: name, arch, swiftmodules +// 2. New keys: version, swiftmodulesDetailedInfo +template <> struct ObjectTraits { + static void mapping(Output &out, LoadedModuleTraceFormat &contents) { + out.mapRequired("version", contents.Version); + + StringRef name = contents.Name.str(); + out.mapRequired("name", name); + + out.mapRequired("arch", contents.Arch); + + // The 'swiftmodules' key is kept for backwards compatibility. + std::vector moduleNames; + for (auto &m : contents.SwiftModules) + moduleNames.push_back(m.Path); + out.mapRequired("swiftmodules", moduleNames); + + out.mapRequired("swiftmodulesDetailedInfo", contents.SwiftModules); + } +}; +} // namespace json +} // namespace swift + +static bool isClangOverlayOf(ModuleDecl *potentialOverlay, + ModuleDecl *potentialUnderlying) { + return !potentialOverlay->isNonSwiftModule() && + potentialUnderlying->isNonSwiftModule() && + potentialOverlay->getName() == potentialUnderlying->getName(); +} + +// TODO: Delete this once changes from https://reviews.llvm.org/D83449 land on +// apple/llvm-project's swift/main branch. +template +static bool contains(const SetLike &setLike, Item item) { + return setLike.find(item) != setLike.end(); +} + +/// Get a set of modules imported by \p module. +/// +/// By default, all imports are included. +static void getImmediateImports( + ModuleDecl *module, SmallPtrSetImpl &imports, + ModuleDecl::ImportFilter importFilter = { + ModuleDecl::ImportFilterKind::Exported, + ModuleDecl::ImportFilterKind::Default, + ModuleDecl::ImportFilterKind::ImplementationOnly, + ModuleDecl::ImportFilterKind::SPIAccessControl, + ModuleDecl::ImportFilterKind::ShadowedByCrossImportOverlay}) { + SmallVector importList; + module->getImportedModules(importList, importFilter); + + for (ImportedModule &import : importList) + imports.insert(import.importedModule); +} + +namespace { +/// Helper type for computing (approximate) information about ABI-dependencies. +/// +/// This misses out on details such as typealiases and more. +/// See the "isImportedDirectly" field above for more details. +class ABIDependencyEvaluator { + /// Map of ABIs exported by a particular module, excluding itself. + /// + /// For example, consider (primed letters represent Clang modules): + /// \code + /// - A is @_exported-imported by B + /// - B is #imported by C' (via a compiler-generated umbrella header) + /// - C' is @_exported-imported by C (Swift overlay) + /// - D' is #imported by E' + /// - D' is @_exported-imported by D (Swift overlay) + /// - E' is @_exported-imported by E (Swift overlay) + /// \endcode + /// + /// Then the \c abiExportMap will be + /// \code + /// { A: {}, B: {A}, C: {B}, C': {B}, D: {}, D': {}, E: {D}, E': {D'} } + /// \endcode + /// + /// \b WARNING: Use \c reexposeImportedABI instead of inserting directly. + llvm::DenseMap> abiExportMap; + + /// Stack for depth-first traversal. + SmallVector searchStack; + + llvm::DenseSet visited; + + /// Helper function to handle invariant violations as crashes in debug mode. + void crashOnInvariantViolation( + llvm::function_ref f) const; + + /// Computes the ABI exports for \p importedModule and adds them to + /// \p module's ABI exports. + /// + /// If \p includeImportedModule is true, also adds \p importedModule to + /// \p module's ABI exports. + /// + /// Correct way to add entries to \c abiExportMap. + void reexposeImportedABI(ModuleDecl *module, ModuleDecl *importedModule, + bool includeImportedModule = true); + + /// Check if a Swift module is an overlay for some Clang module. + /// + /// FIXME: Delete this hack once SR-13363 is fixed and ModuleDecl has the + /// right API which we can use directly. + bool isOverlayOfClangModule(ModuleDecl *swiftModule); + + /// Check for cases where we have a fake cycle through an overlay. + /// + /// Sometimes, we have fake cycles in the import graph due to the Clang + /// importer injecting overlays between Clang modules. These don't represent + /// an actual cycle in the build, so we should ignore them. + /// + /// We check this lazily after detecting a cycle because it is difficult to + /// determine at the point where we see the overlay whether it was incorrectly + /// injected by the Clang importer or whether any of its imports will + /// eventually lead to a cycle. + /// + /// For more details, see [NOTE: ABIDependencyEvaluator-fake-cycle-detection] + /// + /// \param startOfCycle A pointer to the element of \c searchStack where + /// the module \em first appeared. + /// + /// \pre The module on top of \c searchStack is the same module as + /// *startOfCycle. + /// + /// \pre searchStack.begin() <= startOfCycle < searchStack.end() + bool isFakeCycleThroughOverlay(ModuleDecl **startOfCycle); + + /// Recursive step in computing ABI dependencies. + /// + /// Use this method instead of using the \c forClangModule/\c forSwiftModule + /// methods. + void computeABIDependenciesForModule(ModuleDecl *module); + void computeABIDependenciesForSwiftModule(ModuleDecl *module); + void computeABIDependenciesForClangModule(ModuleDecl *module); + + static void printModule(const ModuleDecl *module, llvm::raw_ostream &os); + + template + static void printModuleSet(const SetLike &set, llvm::raw_ostream &os); + +public: + ABIDependencyEvaluator() = default; + ABIDependencyEvaluator(const ABIDependencyEvaluator &) = delete; + ABIDependencyEvaluator(ABIDependencyEvaluator &&) = default; + + void getABIDependenciesForSwiftModule( + ModuleDecl *module, SmallPtrSetImpl &abiDependencies); + + void printABIExportMap(llvm::raw_ostream &os) const; +}; +} // end anonymous namespace + +// See [NOTE: Bailing-vs-crashing-in-trace-emission]. +// TODO: Use PrettyStackTrace instead? +void ABIDependencyEvaluator::crashOnInvariantViolation( + llvm::function_ref f) const { +#ifndef NDEBUG + std::string msg; + llvm::raw_string_ostream os(msg); + os << "error: invariant violation: "; + f(os); + llvm::report_fatal_error(os.str()); +#endif +} + +// [NOTE: Trace-Clang-submodule-complexity] +// +// A Clang module may have zero or more submodules. In practice, when traversing +// the imports of a module, we observe that different submodules of the same +// top-level module (almost) freely import each other. Despite this, we still +// need to conceptually traverse the tree formed by the submodule relationship +// (with the top-level module being the root). +// +// This needs to be taken care of in two ways: +// 1. We need to make sure we only go towards the leaves. It's okay if we "jump" +// branches, so long as we don't try to visit an ancestor when one of its +// descendants is still on the traversal stack, so that we don't end up with +// arbitrarily complex intra-module cycles. +// See also: [NOTE: Intra-module-leafwards-traversal]. +// 2. When adding entries to the ABI export map, we need to avoid marking +// dependencies within the same top-level module. This step is needed in +// addition to step 1 to avoid creating cycles like +// Overlay -> Underlying -> Submodule -> Overlay. + +void ABIDependencyEvaluator::reexposeImportedABI(ModuleDecl *module, + ModuleDecl *importedModule, + bool includeImportedModule) { + if (module == importedModule) { + crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { + os << "module "; + printModule(module, os); + os << " imports itself!\n"; + }); + return; + } + + auto addToABIExportMap = [this](ModuleDecl *module, ModuleDecl *reexport) { + if (module == reexport) { + crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { + os << "expected module "; + printModule(reexport, os); + os << " to not re-export itself\n"; + }); + return; + } + if (reexport->isNonSwiftModule() && module->isNonSwiftModule() && + module->getTopLevelModule() == reexport->getTopLevelModule()) { + // Dependencies within the same top-level Clang module are not useful. + // See also: [NOTE: Trace-Clang-submodule-complexity]. + return; + } + + // We only care about dependencies across top-level modules and we want to + // avoid exploding abiExportMap with submodules. So we only insert entries + // after calling getTopLevelModule(). + + if (::isClangOverlayOf(module, reexport)) { + // For overlays, we need to have a dependency on the underlying module. + // Otherwise, we might accidentally create a Swift -> Swift cycle. + abiExportMap[module].insert( + reexport->getTopLevelModule(/*preferOverlay*/ false)); + return; + } + abiExportMap[module].insert( + reexport->getTopLevelModule(/*preferOverlay*/ true)); + }; + + computeABIDependenciesForModule(importedModule); + if (includeImportedModule) { + addToABIExportMap(module, importedModule); + } + // Force creation of default value if missing. This prevents abiExportMap from + // growing (and moving) when calling addToABIExportMap. If abiExportMap gets + // moved, then abiExportMap[importedModule] will be moved, forcing us to + // create a defensive copy to avoid iterator invalidation on move. + (void)abiExportMap[module]; + for (auto reexportedModule : abiExportMap[importedModule]) + addToABIExportMap(module, reexportedModule); +} + +bool ABIDependencyEvaluator::isOverlayOfClangModule(ModuleDecl *swiftModule) { + assert(!swiftModule->isNonSwiftModule()); + + llvm::SmallPtrSet importList; + ::getImmediateImports(swiftModule, importList, + {ModuleDecl::ImportFilterKind::Exported}); + bool isOverlay = + llvm::any_of(importList, [&](ModuleDecl *importedModule) -> bool { + return isClangOverlayOf(swiftModule, importedModule); + }); + return isOverlay; +} + +// [NOTE: ABIDependencyEvaluator-fake-cycle-detection] +// +// First, let's consider a concrete example. +// - In Clang-land, ToyKit #imports CoreDoll. +// - The Swift overlay for CoreDoll imports both CoreDoll and ToyKit. +// Importing ToyKit from CoreDoll's overlay informally violates the layering +// of frameworks, but it doesn't actually create any cycles in the build +// dependencies. +// ┌───────────────────────────┐ +// ┌───│ CoreDoll.swiftmodule │ +// │ └───────────────────────────┘ +// │ │ +// import ToyKit @_exported import CoreDoll +// │ │ +// │ │ +// ▼ │ +// ┌──────────────────────────┐ │ +// │ ToyKit (ToyKit/ToyKit.h) │ │ +// └──────────────────────────┘ │ +// │ │ +// #import │ +// │ │ +// ▼ │ +// ┌──────────────────────────────┐ │ +// │CoreDoll (CoreDoll/CoreDoll.h)│◀──┘ +// └──────────────────────────────┘ +// +// Say we are trying to build a Swift module that imports ToyKit. Due to how +// module loading works, the Clang importer inserts the CoreDoll overlay +// between the ToyKit and CoreDoll Clang modules, creating a cycle in the +// import graph. +// +// ┌──────────────────────────┐ +// │ ToyKit (ToyKit/ToyKit.h) │◀──────────┐ +// └──────────────────────────┘ │ +// │ │ +// #import import ToyKit +// │ │ +// ▼ │ +// ┌────────────────────────────┐ │ +// │ CoreDoll.swiftmodule │─────────┘ +// └────────────────────────────┘ +// │ +// @_exported import CoreDoll +// │ +// ▼ +// ┌──────────────────────────────┐ +// │CoreDoll (CoreDoll/CoreDoll.h)│ +// └──────────────────────────────┘ +// +// This means that, at some point, searchStack will look like: +// +// [others] → ToyKit → CoreDoll (overlay) → ToyKit +// +// In the general case, there may be arbitrarily many modules in the cycle, +// including submodules. +// +// [others] → ToyKit → [others] → CoreDoll (overlay) → [others] → ToyKit +// +// where "[others]" indicates 0 or more modules of any kind. +// +// To detect this, we check that the start of the cycle is a Clang module and +// that there is at least one overlay between it and its recurrence at the end +// of the searchStack. If so, we assume we have detected a benign cycle which +// can be safely ignored. + +bool ABIDependencyEvaluator::isFakeCycleThroughOverlay( + ModuleDecl **startOfCycle) { + assert(startOfCycle >= searchStack.begin() && + startOfCycle < searchStack.end() && + "startOfCycleIter points to an element in searchStack"); + // The startOfCycle module must be a Clang module. + if (!(*startOfCycle)->isNonSwiftModule()) + return false; + // Next, we must have zero or more modules followed by a Swift overlay for a + // Clang module. + return std::any_of( + startOfCycle + 1, searchStack.end(), [this](ModuleDecl *module) { + return !module->isNonSwiftModule() && isOverlayOfClangModule(module); + }); +} + +void ABIDependencyEvaluator::computeABIDependenciesForModule( + ModuleDecl *module) { + auto moduleIter = llvm::find(searchStack, module); + if (moduleIter != searchStack.end()) { + if (isFakeCycleThroughOverlay(moduleIter)) + return; + crashOnInvariantViolation([&](llvm::raw_string_ostream &os) { + os << "unexpected cycle in import graph!\n"; + for (auto m : searchStack) { + printModule(m, os); + if (!m->isNonSwiftModule()) { + os << " (isOverlay = " << isOverlayOfClangModule(m) << ")"; + } + os << "\ndepends on "; + } + printModule(module, os); + os << '\n'; + }); + return; + } + if (::contains(visited, module)) + return; + searchStack.push_back(module); + if (module->isNonSwiftModule()) + computeABIDependenciesForClangModule(module); + else + computeABIDependenciesForSwiftModule(module); + searchStack.pop_back(); + visited.insert(module); +} + +void ABIDependencyEvaluator::computeABIDependenciesForSwiftModule( + ModuleDecl *module) { + SmallPtrSet allImports; + ::getImmediateImports(module, allImports); + for (auto import : allImports) { + computeABIDependenciesForModule(import); + if (::isClangOverlayOf(module, import)) { + reexposeImportedABI(module, import, + /*includeImportedModule=*/false); + } + } + + SmallPtrSet reexportedImports; + ::getImmediateImports(module, reexportedImports, + {ModuleDecl::ImportFilterKind::Exported}); + for (auto reexportedImport : reexportedImports) { + reexposeImportedABI(module, reexportedImport); + } +} + +void ABIDependencyEvaluator::computeABIDependenciesForClangModule( + ModuleDecl *module) { + SmallPtrSet imports; + ::getImmediateImports(module, imports); + for (auto import : imports) { + // There are three cases here which can potentially create cycles: + // + // 1. Clang modules importing the stdlib. + // See [NOTE: Pure-Clang-modules-privately-import-stdlib]. + // 2. Overlay S @_exported-imports underlying module S' and another Clang + // module C'. C' (transitively) #imports S' but it gets treated as if + // C' imports S. This creates a cycle: S -> C' -> ... -> S. + // In practice, this case is hit for + // Darwin (Swift) -> SwiftOverlayShims (Clang) -> Darwin (Swift). + // We may also hit this in a slightly different direction, in case + // the module directly imports SwiftOverlayShims: + // SwiftOverlayShims -> Darwin (Swift) -> SwiftOverlayShims + // The latter is handled later by isFakeCycleThroughOverlay. + // 3. [NOTE: Intra-module-leafwards-traversal] + // Cycles within the same top-level module. + // These don't matter for us, since we only care about the dependency + // graph at the granularity of top-level modules. So we ignore these + // by only considering parent -> submodule dependencies. + // See also [NOTE: Trace-Clang-submodule-complexity]. + if (import->isStdlibModule()) { + continue; + } + if (!import->isNonSwiftModule() && isOverlayOfClangModule(import) && + llvm::find(searchStack, import) != searchStack.end()) { + continue; + } + if (import->isNonSwiftModule() && + module->getTopLevelModule() == import->getTopLevelModule() && + (module == import || + !import->findUnderlyingClangModule()->isSubModuleOf( + module->findUnderlyingClangModule()))) { + continue; + } + computeABIDependenciesForModule(import); + reexposeImportedABI(module, import); + } +} + +void ABIDependencyEvaluator::getABIDependenciesForSwiftModule( + ModuleDecl *module, SmallPtrSetImpl &abiDependencies) { + computeABIDependenciesForModule(module); + SmallPtrSet allImports; + ::getImmediateImports(module, allImports); + for (auto directDependency : allImports) { + abiDependencies.insert(directDependency); + for (auto exposedDependency : abiExportMap[directDependency]) { + abiDependencies.insert(exposedDependency); + } + } +} + +void ABIDependencyEvaluator::printModule(const ModuleDecl *module, + llvm::raw_ostream &os) { + module->getReverseFullModuleName().printForward(os); + os << (module->isNonSwiftModule() ? " (Clang)" : " (Swift)"); + os << " @ " << llvm::format("0x%llx", reinterpret_cast(module)); +} + +template +void ABIDependencyEvaluator::printModuleSet(const SetLike &set, + llvm::raw_ostream &os) { + os << "{ "; + for (auto module : set) { + printModule(module, os); + os << ", "; + } + os << "}"; +} + +void ABIDependencyEvaluator::printABIExportMap(llvm::raw_ostream &os) const { + os << "ABI Export Map {{\n"; + for (auto &entry : abiExportMap) { + printModule(entry.first, os); + os << " : "; + printModuleSet(entry.second, os); + os << "\n"; + } + os << "}}\n"; +} + +/// Compute the per-module information to be recorded in the trace file. +// +// The most interesting/tricky thing here is _which_ paths get recorded in +// the trace file as dependencies. It depends on how the module was synthesized. +// The key points are: +// +// 1. Paths to swiftmodules in the module cache or in the prebuilt cache are not +// recorded - Precondition: the corresponding path to the swiftinterface must +// already be present as a key in pathToModuleDecl. +// 2. swiftmodules next to a swiftinterface are saved if they are up-to-date. +// +// FIXME: Use the VFS instead of handling paths directly. We are particularly +// sloppy about handling relative paths in the dependency tracker. +static void computeSwiftModuleTraceInfo( + const SmallPtrSetImpl &abiDependencies, + const llvm::DenseMap &pathToModuleDecl, + const DependencyTracker &depTracker, StringRef prebuiltCachePath, + std::vector &traceInfo) { + + SmallString<256> buffer; + + std::string errMsg; + llvm::raw_string_ostream err(errMsg); + + // FIXME: Use PrettyStackTrace instead. + auto errorUnexpectedPath = + [&pathToModuleDecl](llvm::raw_string_ostream &errStream) { + errStream << "The module <-> path mapping we have is:\n"; + for (auto &m : pathToModuleDecl) + errStream << m.second->getName() << " <-> " << m.first << '\n'; + llvm::report_fatal_error(errStream.str()); + }; + + using namespace llvm::sys; + + auto computeAdjacentInterfacePath = [](SmallVectorImpl &modPath) { + auto swiftInterfaceExt = + file_types::getExtension(file_types::TY_SwiftModuleInterfaceFile); + path::replace_extension(modPath, swiftInterfaceExt); + }; + + for (auto &depPath : depTracker.getDependencies()) { + + // Decide if this is a swiftmodule based on the extension of the raw + // dependency path, as the true file may have a different one. + // For example, this might happen when the canonicalized path points to + // a Content Addressed Storage (CAS) location. + auto moduleFileType = + file_types::lookupTypeForExtension(path::extension(depPath)); + auto isSwiftmodule = moduleFileType == file_types::TY_SwiftModuleFile; + auto isSwiftinterface = + moduleFileType == file_types::TY_SwiftModuleInterfaceFile; + + if (!(isSwiftmodule || isSwiftinterface)) + continue; + + auto dep = pathToModuleDecl.find(depPath); + if (dep != pathToModuleDecl.end()) { + // Great, we recognize the path! Check if the file is still around. + + ModuleDecl *depMod = dep->second; + if (depMod->isResilient() && !isSwiftinterface) { + // FIXME: Ideally, we would check that the swiftmodule has a + // swiftinterface next to it. Tracked by rdar://problem/56351399. + } + + // FIXME: Better error handling + StringRef realDepPath = + fs::real_path(depPath, buffer, /*expand_tile*/ true) + ? StringRef(depPath) // Couldn't find the canonical path, assume + // this is good enough. + : buffer.str(); + + bool isImportedDirectly = ::contains(abiDependencies, depMod); + + traceInfo.push_back( + {/*Name=*/ + depMod->getName(), + /*Path=*/ + realDepPath.str(), + // TODO: There is an edge case which is not handled here. + // When we build a framework using -import-underlying-module, or an + // app/test using -import-objc-header, we should look at the direct + // imports of the bridging modules, and mark those as our direct + // imports. + // TODO: Add negative test cases for the comment above. + // TODO: Describe precise semantics of "isImportedDirectly". + /*IsImportedDirectly=*/ + isImportedDirectly, + /*SupportsLibraryEvolution=*/ + depMod->isResilient()}); + buffer.clear(); + + continue; + } + + // If the depTracker had an interface, that means that we must've + // built a swiftmodule from that interface, so we should have that + // filename available. + if (isSwiftinterface) { + err << "Unexpected path for swiftinterface file:\n" << depPath << "\n"; + errorUnexpectedPath(err); + } + + // Skip cached modules in the prebuilt cache. We will add the corresponding + // swiftinterface from the SDK directly, but this isn't checked. :-/ + // + // FIXME: This is incorrect if both paths are not relative w.r.t. to the + // same root. + if (StringRef(depPath).startswith(prebuiltCachePath)) + continue; + + // If we have a swiftmodule next to an interface, that interface path will + // be saved (not checked), so don't save the path to this swiftmodule. + SmallString<256> moduleAdjacentInterfacePath(depPath); + computeAdjacentInterfacePath(moduleAdjacentInterfacePath); + if (::contains(pathToModuleDecl, moduleAdjacentInterfacePath)) + continue; + + // FIXME: The behavior of fs::exists for relative paths is undocumented. + // Use something else instead? + if (fs::exists(moduleAdjacentInterfacePath)) { + // This should be an error but it is not because of funkiness around + // compatible modules such as us having both armv7s.swiftinterface + // and armv7.swiftinterface in the dependency tracker. + continue; + } + buffer.clear(); + + // We might land here when we have a arm.swiftmodule in the cache path + // which added a dependency on a arm.swiftinterface (which was not loaded). + } + + // Almost a re-implementation of reversePathSortedFilenames :(. + std::sort(traceInfo.begin(), traceInfo.end(), + [](const SwiftModuleTraceInfo &m1, + const SwiftModuleTraceInfo &m2) -> bool { + return std::lexicographical_compare( + m1.Path.rbegin(), m1.Path.rend(), m2.Path.rbegin(), + m2.Path.rend()); + }); +} + +// [NOTE: Bailing-vs-crashing-in-trace-emission] There are certain edge cases +// in trace emission where an invariant that you think should hold does not hold +// in practice. For example, sometimes we have seen modules without any +// corresponding filename. +// +// Since the trace is a supplementary output for build system consumption, it +// it better to emit it on a best-effort basis instead of crashing and failing +// the build. +// +// Moreover, going forward, it would be nice if trace emission were more robust +// so we could emit the trace on a best-effort basis even if the dependency +// graph is ill-formed, so that the trace can be used as a debugging aid. +bool swift::emitLoadedModuleTraceIfNeeded(ModuleDecl *mainModule, + DependencyTracker *depTracker, + const FrontendOptions &opts, + const InputFile &input) { + ASTContext &ctxt = mainModule->getASTContext(); + assert(!ctxt.hadError() && + "We should've already exited earlier if there was an error."); + + auto loadedModuleTracePath = input.getLoadedModuleTracePath(); + if (loadedModuleTracePath.empty()) + return false; + std::error_code EC; + llvm::raw_fd_ostream out(loadedModuleTracePath, EC, llvm::sys::fs::F_Append); + + if (out.has_error() || EC) { + ctxt.Diags.diagnose(SourceLoc(), diag::error_opening_output, + loadedModuleTracePath, EC.message()); + out.clear_error(); + return true; + } + + SmallPtrSet abiDependencies; + { + ABIDependencyEvaluator evaluator{}; + evaluator.getABIDependenciesForSwiftModule(mainModule, abiDependencies); + } + + llvm::DenseMap pathToModuleDecl; + for (const auto &module : ctxt.getLoadedModules()) { + ModuleDecl *loadedDecl = module.second; + if (!loadedDecl) + llvm::report_fatal_error("Expected loaded modules to be non-null."); + if (loadedDecl == mainModule) + continue; + if (loadedDecl->getModuleFilename().empty()) { + // FIXME: rdar://problem/59853077 + // Ideally, this shouldn't happen. As a temporary workaround, avoid + // crashing with a message while we investigate the problem. + llvm::errs() << "WARNING: Module '" << loadedDecl->getName().str() + << "' has an empty filename. This is probably an " + << "invariant violation.\n" + << "Please report it as a compiler bug.\n"; + continue; + } + pathToModuleDecl.insert( + std::make_pair(loadedDecl->getModuleFilename(), loadedDecl)); + } + + std::vector swiftModules; + computeSwiftModuleTraceInfo(abiDependencies, pathToModuleDecl, *depTracker, + opts.PrebuiltModuleCachePath, swiftModules); + + LoadedModuleTraceFormat trace = { + /*version=*/LoadedModuleTraceFormat::CurrentVersion, + /*name=*/mainModule->getName(), + /*arch=*/ctxt.LangOpts.Target.getArchName().str(), swiftModules}; + + // raw_fd_ostream is unbuffered, and we may have multiple processes writing, + // so first write to memory and then dump the buffer to the trace file. + std::string stringBuffer; + { + llvm::raw_string_ostream memoryBuffer(stringBuffer); + json::Output jsonOutput(memoryBuffer, /*UserInfo=*/{}, + /*PrettyPrint=*/false); + json::jsonize(jsonOutput, trace, /*Required=*/true); + } + stringBuffer += "\n"; + out << stringBuffer; + + return true; +} diff --git a/lib/FrontendTool/MakeStyleDependencies.cpp b/lib/FrontendTool/MakeStyleDependencies.cpp new file mode 100644 index 0000000000000..b305233f35274 --- /dev/null +++ b/lib/FrontendTool/MakeStyleDependencies.cpp @@ -0,0 +1,144 @@ +//===--- MakeStyleDependencies.cpp -- Emit make-style dependencies --------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "Dependencies.h" +#include "swift/AST/DiagnosticEngine.h" +#include "swift/AST/DiagnosticsFrontend.h" +#include "swift/AST/ModuleLoader.h" +#include "swift/Frontend/FrontendOptions.h" +#include "swift/Frontend/InputFile.h" +#include "swift/FrontendTool/FrontendTool.h" + +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringRef.h" + +using namespace swift; + +StringRef +swift::frontend::utils::escapeForMake(StringRef raw, + llvm::SmallVectorImpl &buffer) { + buffer.clear(); + + // The escaping rules for GNU make are complicated due to the various + // subsitutions and use of the tab in the leading position for recipes. + // Various symbols have significance in different contexts. It is not + // possible to correctly quote all characters in Make (as of 3.7). Match + // gcc and clang's behaviour for the escaping which covers only a subset of + // characters. + for (unsigned I = 0, E = raw.size(); I != E; ++I) { + switch (raw[I]) { + case '#': // Handle '#' the broken GCC way + buffer.push_back('\\'); + break; + + case ' ': + for (unsigned J = I; J && raw[J - 1] == '\\'; --J) + buffer.push_back('\\'); + buffer.push_back('\\'); + break; + + case '$': // $ is escaped by $ + buffer.push_back('$'); + break; + } + buffer.push_back(raw[I]); + } + buffer.push_back('\0'); + + return buffer.data(); +} + +/// This sorting function is used to stabilize the order in which dependencies +/// are emitted into \c .d files that are consumed by external build systems. +/// This serves to eliminate order as a source of non-determinism in these +/// outputs. +/// +/// The exact sorting predicate is not important. Currently, it is a +/// lexicographic comparison that reverses the provided strings before applying +/// the sorting predicate. This has the benefit of being somewhat +/// invariant with respect to the installation location of various system +/// components. e.g. on two systems, the same file identified by two different +/// paths differing only in their relative install location such as +/// +/// /Applications/MyXcode.app/Path/To/A/Framework/In/The/SDK/Header.h +/// /Applications/Xcodes/AnotherXcode.app/Path/To/A/Framework/In/The/SDK/Header.h +/// +/// should appear in roughly the same order relative to other paths. Ultimately, +/// this makes it easier to test the contents of the emitted files with tools +/// like FileCheck. +static std::vector +reversePathSortedFilenames(const ArrayRef elts) { + std::vector tmp(elts.begin(), elts.end()); + std::sort(tmp.begin(), tmp.end(), + [](const std::string &a, const std::string &b) -> bool { + return std::lexicographical_compare(a.rbegin(), a.rend(), + b.rbegin(), b.rend()); + }); + return tmp; +} + +/// Emits a Make-style dependencies file. +bool swift::emitMakeDependenciesIfNeeded(DiagnosticEngine &diags, + DependencyTracker *depTracker, + const FrontendOptions &opts, + const InputFile &input) { + auto dependenciesFilePath = input.getDependenciesFilePath(); + if (dependenciesFilePath.empty()) + return false; + + std::error_code EC; + llvm::raw_fd_ostream out(dependenciesFilePath, EC, llvm::sys::fs::F_None); + + if (out.has_error() || EC) { + diags.diagnose(SourceLoc(), diag::error_opening_output, + dependenciesFilePath, EC.message()); + out.clear_error(); + return true; + } + + llvm::SmallString<256> buffer; + + // collect everything in memory to avoid redundant work + // when there are multiple targets + std::string dependencyString; + + // First include all other files in the module. Make-style dependencies + // need to be conservative! + auto inputPaths = + reversePathSortedFilenames(opts.InputsAndOutputs.getInputFilenames()); + for (auto const &path : inputPaths) { + dependencyString.push_back(' '); + dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); + } + // Then print dependencies we've picked up during compilation. + auto dependencyPaths = + reversePathSortedFilenames(depTracker->getDependencies()); + for (auto const &path : dependencyPaths) { + dependencyString.push_back(' '); + dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); + } + auto incrementalDependencyPaths = + reversePathSortedFilenames(depTracker->getIncrementalDependencies()); + for (auto const &path : incrementalDependencyPaths) { + dependencyString.push_back(' '); + dependencyString.append(frontend::utils::escapeForMake(path, buffer).str()); + } + + // FIXME: Xcode can't currently handle multiple targets in a single + // dependency line. + opts.forAllOutputPaths(input, [&](const StringRef targetName) { + auto targetNameEscaped = frontend::utils::escapeForMake(targetName, buffer); + out << targetNameEscaped << " :" << dependencyString << '\n'; + }); + + return false; +} diff --git a/lib/FrontendTool/ScanDependencies.cpp b/lib/FrontendTool/ScanDependencies.cpp index 4968a135ff3c8..4c5443762478b 100644 --- a/lib/FrontendTool/ScanDependencies.cpp +++ b/lib/FrontendTool/ScanDependencies.cpp @@ -2,13 +2,14 @@ // // This source file is part of the Swift.org open source project // -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// + #include "ScanDependencies.h" #include "swift/AST/ASTContext.h" #include "swift/AST/Decl.h" diff --git a/lib/IRGen/Address.h b/lib/IRGen/Address.h index a07a7771d8dc5..c048cbcca1787 100644 --- a/lib/IRGen/Address.h +++ b/lib/IRGen/Address.h @@ -112,6 +112,7 @@ class StackAddress { /// In a normal function, the result of llvm.stacksave or null. /// In a coroutine, the result of llvm.coro.alloca.alloc. + /// In an async function, the result of the taskAlloc call. llvm::Value *ExtraInfo; public: diff --git a/lib/IRGen/CallEmission.h b/lib/IRGen/CallEmission.h index dcab4708bb933..20826d67bfd0f 100644 --- a/lib/IRGen/CallEmission.h +++ b/lib/IRGen/CallEmission.h @@ -30,6 +30,7 @@ namespace irgen { class Explosion; class LoadableTypeInfo; struct WitnessMetadata; +class FunctionPointer; /// A plan for emitting a series of calls. class CallEmission { @@ -67,8 +68,12 @@ class CallEmission { void emitToUnmappedExplosion(Explosion &out); virtual void emitCallToUnmappedExplosion(llvm::CallInst *call, Explosion &out) = 0; void emitYieldsToExplosion(Explosion &out); + virtual FunctionPointer getCalleeFunctionPointer() = 0; llvm::CallInst *emitCallSite(); + virtual llvm::CallInst *createCall(const FunctionPointer &fn, + ArrayRef args) = 0; + CallEmission(IRGenFunction &IGF, llvm::Value *selfValue, Callee &&callee) : IGF(IGF), selfValue(selfValue), CurCallee(std::move(callee)) {} diff --git a/lib/IRGen/Callee.h b/lib/IRGen/Callee.h index 5d134d4ac205d..0f3af50a7e58b 100644 --- a/lib/IRGen/Callee.h +++ b/lib/IRGen/Callee.h @@ -124,7 +124,31 @@ namespace irgen { /// A function pointer value. class FunctionPointer { - /// The actual function pointer. + public: + struct KindTy { + enum class Value { + Function, + AsyncFunctionPointer, + }; + static const Value Function = Value::Function; + static const Value AsyncFunctionPointer = Value::AsyncFunctionPointer; + Value value; + KindTy(Value value) : value(value) {} + KindTy(CanSILFunctionType fnType) + : value(fnType->isAsync() ? Value::AsyncFunctionPointer + : Value::Function) {} + friend bool operator==(const KindTy &lhs, const KindTy &rhs) { + return lhs.value == rhs.value; + } + friend bool operator!=(const KindTy &lhs, const KindTy &rhs) { + return !(lhs == rhs); + } + }; + + private: + KindTy Kind; + + /// The actual pointer, either to the function or to its descriptor. llvm::Value *Value; PointerAuthInfo AuthInfo; @@ -135,25 +159,27 @@ namespace irgen { /// Construct a FunctionPointer for an arbitrary pointer value. /// We may add more arguments to this; try to use the other /// constructors/factories if possible. - explicit FunctionPointer(llvm::Value *value, PointerAuthInfo authInfo, + explicit FunctionPointer(KindTy kind, llvm::Value *value, + PointerAuthInfo authInfo, const Signature &signature) - : Value(value), AuthInfo(authInfo), Sig(signature) { + : Kind(kind), Value(value), AuthInfo(authInfo), Sig(signature) { // The function pointer should have function type. assert(value->getType()->getPointerElementType()->isFunctionTy()); // TODO: maybe assert similarity to signature.getType()? } // Temporary only! - explicit FunctionPointer(llvm::Value *value, const Signature &signature) - : FunctionPointer(value, PointerAuthInfo(), signature) {} + explicit FunctionPointer(KindTy kind, llvm::Value *value, + const Signature &signature) + : FunctionPointer(kind, value, PointerAuthInfo(), signature) {} static FunctionPointer forDirect(IRGenModule &IGM, llvm::Constant *value, CanSILFunctionType fnType); - static FunctionPointer forDirect(llvm::Constant *value, + static FunctionPointer forDirect(KindTy kind, llvm::Constant *value, const Signature &signature) { - return FunctionPointer(value, PointerAuthInfo(), signature); + return FunctionPointer(kind, value, PointerAuthInfo(), signature); } static FunctionPointer forExplosionValue(IRGenFunction &IGF, @@ -166,8 +192,17 @@ namespace irgen { return (isa(Value) && AuthInfo.isConstant()); } + KindTy getKind() const { return Kind; } + + /// Given that this value is known to have been constructed from a direct + /// function, Return the name of that function. + StringRef getName(IRGenModule &IGM) const; + /// Return the actual function pointer. - llvm::Value *getPointer() const { return Value; } + llvm::Value *getPointer(IRGenFunction &IGF) const; + + /// Return the actual function pointer. + llvm::Value *getRawPointer() const { return Value; } /// Given that this value is known to have been constructed from /// a direct function, return the function pointer. @@ -205,6 +240,9 @@ namespace irgen { llvm::Value *getExplosionValue(IRGenFunction &IGF, CanSILFunctionType fnType) const; + + /// Form a FunctionPointer whose KindTy is ::Function. + FunctionPointer getAsFunction(IRGenFunction &IGF) const; }; class Callee { diff --git a/lib/IRGen/EntryPointArgumentEmission.h b/lib/IRGen/EntryPointArgumentEmission.h index f41961ebfb4fb..5bec25a7bdd5f 100644 --- a/lib/IRGen/EntryPointArgumentEmission.h +++ b/lib/IRGen/EntryPointArgumentEmission.h @@ -17,10 +17,15 @@ class Value; } namespace swift { + +class SILArgument; + namespace irgen { class Explosion; struct GenericRequirement; +class LoadableTypeInfo; +class TypeInfo; class EntryPointArgumentEmission { @@ -44,6 +49,12 @@ class NativeCCEntryPointArgumentEmission virtual llvm::Value *getSelfWitnessTable() = 0; virtual llvm::Value *getSelfMetadata() = 0; virtual llvm::Value *getCoroutineBuffer() = 0; + virtual Explosion + explosionForObject(IRGenFunction &IGF, unsigned index, SILArgument *param, + SILType paramTy, const LoadableTypeInfo &loadableParamTI, + const LoadableTypeInfo &loadableArgTI, + std::function + explosionForArgument) = 0; }; } // end namespace irgen diff --git a/lib/IRGen/GenCall.cpp b/lib/IRGen/GenCall.cpp index 00ef089f937db..0744ea7d5f50b 100644 --- a/lib/IRGen/GenCall.cpp +++ b/lib/IRGen/GenCall.cpp @@ -330,10 +330,6 @@ AsyncContextLayout::AsyncContextLayout( #endif } -static Size getAsyncContextSize(AsyncContextLayout layout) { - return layout.getSize(); -} - static Alignment getAsyncContextAlignment(IRGenModule &IGM) { return IGM.getPointerAlignment(); } @@ -1838,106 +1834,161 @@ void irgen::extractScalarResults(IRGenFunction &IGF, llvm::Type *bodyType, out.add(returned); } -static void externalizeArguments(IRGenFunction &IGF, const Callee &callee, - Explosion &in, Explosion &out, - TemporarySet &temporaries, bool isOutlined); - -llvm::Value *irgen::getDynamicAsyncContextSize(IRGenFunction &IGF, - AsyncContextLayout layout, - CanSILFunctionType functionType, - llvm::Value *thickContext) { - // TODO: This calculation should be extracted out into a standalone function +std::pair irgen::getAsyncFunctionAndSize( + IRGenFunction &IGF, SILFunctionTypeRepresentation representation, + FunctionPointer functionPointer, llvm::Value *thickContext, + std::pair values) { + assert(values.first || values.second); + bool emitFunction = values.first; + bool emitSize = values.second; + // TODO: This calculation should be extracted out into standalone functions // emitted on-demand per-module to improve codesize. - switch (functionType->getRepresentation()) { + switch (representation) { case SILFunctionTypeRepresentation::Thick: { // If the called function is thick, the size of the called function's - // async context may not be statically knowable. + // async context is not statically knowable. // // Specifically, if the thick function was produced by a partial_apply, // the function which was originally partially applied determines the // size of the needed async context. That original function isn't known // statically. The dynamic size is available within the context as an // i32 at the first index: <{ %swift.refcounted*, /*size*/ i32, ... }>. + // In this case, the function pointer is actually a pointer to an llvm + // function. // // On the other hand, if the thick function was produced by a // thin_to_thick_function, then the context will be nullptr. In that - // case, the size of the needed async context is known statically to - // be the size dictated by the function signature. + // case, the dynamic size of the needed async context is available within + // the struct, an AsyncFunctionPointer pointed to by the "function" pointer + // as an i32 at the second index: <{ /*fn rel addr*/ i32, /*size*/ i32 }>. // // We are currently emitting into some basic block. To handle these two // cases, we need to branch based on whether the context is nullptr; each - // branch must then determine the size in the manner appropriate to it. - // Finally, both blocks must join back together to make the call: + // branch must then determine the size and function pointer in the manner + // appropriate to it. Finally, both blocks must join back together to make + // the call: // - // SIL: IR: - // +-----+ +-------------------------+ - // |.....| |%cond = %ctx == nullptr | - // |apply| |br %cond, static, dynamic| - // |.....| +--------/--------------\-+ - // +-----+ / \ - // +-static-------+ +-dynamic----------------------------------------------+ - // |%size = K | |%layout = bitcast %context to <{%swift.context*, i32}>| - // |br join(%size)| |%size_addr = getelementptr %layout, i32 1, i32 0 | - // +-----\--------+ |%size = load %size_addr | - // \ |br join(%size) | - // \ +------------------------------------------------------+ - // \ / - // +-join(%size)-----------------------------------------------------------+ - // |%dataAddr = swift_taskAlloc(%task, %size) | - // |%async_context = bitcast %dataAddr to ASYNC_CONTEXT(static_callee_type)| - // |... // populate the fields %context with arguments | - // |call %callee(%async_context, %context) | - // +-----------------------------------------------------------------------+ - auto *staticSizeBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); - auto *dynamicSizeBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); + // +-------------------------+ + // |%cond = %ctx == nullptr | + // +----------------|br %cond, thin, thick |----------------------+ + // | +-------------------------+ | + // | | + // V | + // +-thin-------------------------------------------+ | + // |%afp = bitcast %fp to %swift.async_func_pointer*| | + // |%size_ptr = getelementptr %afp, i32 0, i32 1 | | + // |%size = load %size_ptr | | + // |%offset_ptr = getelementptr %afp, i32 0, i32 1 | | + // |%offset = load i32 %offset_ptr | | + // |%offset64 = sext %offset to i64 | | + // |%raw_fp = add %offset64, %offset_ptr | | + // |br join(%raw_fp, %size) | | + // +------------------------------------------------+ | + // | | + // | V + // | +-thick--------------------------------------------+ + // | |%layout = bitcast %ctx to <{%swift.context*, i32}>| + // | |%size_addr = getelementptr %layout, i32 0, i32 1 | + // | |%size = load %size_addr | + // | |br join(%fp, %size) | + // | +---/----------------------------------------------+ + // | / + // | / + // V V + // +-join(%fn, %size)------------------------------------------------------+ + // |%dataAddr = swift_taskAlloc(%task, %size) | + // |%async_context = bitcast %dataAddr to ASYNC_CONTEXT(static_callee_type)| + // |... // populate the fields %ctx with arguments | + // |call %fn(%async_context, %ctx) | + // +-----------------------------------------------------------------------+ + auto *thinBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); + auto *thickBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); auto *joinBlock = llvm::BasicBlock::Create(IGF.IGM.getLLVMContext()); auto hasThickContext = IGF.Builder.CreateICmpNE(thickContext, IGF.IGM.RefCountedNull); - IGF.Builder.CreateCondBr(hasThickContext, dynamicSizeBlock, - staticSizeBlock); - - SmallVector, 2> phiValues; - { - IGF.Builder.emitBlock(staticSizeBlock); - auto size = getAsyncContextSize(layout); - auto *sizeValue = - llvm::ConstantInt::get(IGF.IGM.Int32Ty, size.getValue()); - phiValues.push_back({staticSizeBlock, sizeValue}); + IGF.Builder.CreateCondBr(hasThickContext, thickBlock, thinBlock); + + SmallVector, 2> fnPhiValues; + SmallVector, 2> sizePhiValues; + { // thin + IGF.Builder.emitBlock(thinBlock); + if (emitFunction) { + auto *uncastFnPtr = functionPointer.getPointer(IGF); + auto *fnPtr = IGF.Builder.CreateBitCast(uncastFnPtr, IGF.IGM.Int8PtrTy); + fnPhiValues.push_back({thinBlock, fnPtr}); + } + if (emitSize) { + auto *ptr = functionPointer.getRawPointer(); + auto *descriptorPtr = + IGF.Builder.CreateBitCast(ptr, IGF.IGM.AsyncFunctionPointerPtrTy); + auto *sizePtr = IGF.Builder.CreateStructGEP(descriptorPtr, 1); + auto *size = + IGF.Builder.CreateLoad(sizePtr, IGF.IGM.getPointerAlignment()); + sizePhiValues.push_back({thinBlock, size}); + } IGF.Builder.CreateBr(joinBlock); } - { - IGF.Builder.emitBlock(dynamicSizeBlock); - SmallVector argTypeInfos; - SmallVector argValTypes; - auto int32ASTType = - BuiltinIntegerType::get(32, IGF.IGM.IRGen.SIL.getASTContext()) - ->getCanonicalType(); - auto int32SILType = SILType::getPrimitiveObjectType(int32ASTType); - const TypeInfo &int32TI = IGF.IGM.getTypeInfo(int32SILType); - argValTypes.push_back(int32SILType); - argTypeInfos.push_back(&int32TI); - HeapLayout layout(IGF.IGM, LayoutStrategy::Optimal, argValTypes, - argTypeInfos, - /*typeToFill*/ nullptr, NecessaryBindings()); - auto castThickContext = - layout.emitCastTo(IGF, thickContext, "context.prefix"); - auto sizeLayout = layout.getElement(0); - auto sizeAddr = sizeLayout.project(IGF, castThickContext, - /*NonFixedOffsets*/ llvm::None); - auto *sizeValue = IGF.Builder.CreateLoad(sizeAddr); - phiValues.push_back({dynamicSizeBlock, sizeValue}); + { // thick + IGF.Builder.emitBlock(thickBlock); + if (emitFunction) { + auto *uncastFnPtr = functionPointer.getRawPointer(); + auto *fnPtr = IGF.Builder.CreateBitCast(uncastFnPtr, IGF.IGM.Int8PtrTy); + fnPhiValues.push_back({thickBlock, fnPtr}); + } + if (emitSize) { + SmallVector argTypeInfos; + SmallVector argValTypes; + auto int32ASTType = + BuiltinIntegerType::get(32, IGF.IGM.IRGen.SIL.getASTContext()) + ->getCanonicalType(); + auto int32SILType = SILType::getPrimitiveObjectType(int32ASTType); + const TypeInfo &int32TI = IGF.IGM.getTypeInfo(int32SILType); + argValTypes.push_back(int32SILType); + argTypeInfos.push_back(&int32TI); + HeapLayout layout(IGF.IGM, LayoutStrategy::Optimal, argValTypes, + argTypeInfos, + /*typeToFill*/ nullptr, NecessaryBindings()); + auto castThickContext = + layout.emitCastTo(IGF, thickContext, "context.prefix"); + auto sizeLayout = layout.getElement(0); + auto sizeAddr = sizeLayout.project(IGF, castThickContext, + /*NonFixedOffsets*/ llvm::None); + auto *sizeValue = IGF.Builder.CreateLoad(sizeAddr); + sizePhiValues.push_back({thickBlock, sizeValue}); + } IGF.Builder.CreateBr(joinBlock); } - { + { // join IGF.Builder.emitBlock(joinBlock); - auto *phi = IGF.Builder.CreatePHI(IGF.IGM.Int32Ty, phiValues.size()); - for (auto &entry : phiValues) { - phi->addIncoming(entry.second, entry.first); + llvm::Value *fn = nullptr; + llvm::PHINode *fnPhi = nullptr; + llvm::PHINode *sizePhi = nullptr; + if (emitFunction) { + fnPhi = IGF.Builder.CreatePHI(IGF.IGM.Int8PtrTy, fnPhiValues.size()); + } + if (emitSize) { + sizePhi = IGF.Builder.CreatePHI(IGF.IGM.Int32Ty, sizePhiValues.size()); + } + if (emitFunction) { + assert(fnPhi); + for (auto &entry : fnPhiValues) { + fnPhi->addIncoming(entry.second, entry.first); + } + fn = IGF.Builder.CreateBitCast( + fnPhi, functionPointer.getFunctionType()->getPointerTo()); + } + llvm::Value *size = nullptr; + if (emitSize) { + assert(sizePhi); + for (auto &entry : sizePhiValues) { + sizePhi->addIncoming(entry.second, entry.first); + } + size = sizePhi; } - return phi; + return {fn, size}; } } case SILFunctionTypeRepresentation::Thin: @@ -1947,13 +1998,27 @@ llvm::Value *irgen::getDynamicAsyncContextSize(IRGenFunction &IGF, case SILFunctionTypeRepresentation::WitnessMethod: case SILFunctionTypeRepresentation::Closure: case SILFunctionTypeRepresentation::Block: { - auto size = getAsyncContextSize(layout); - auto *sizeValue = llvm::ConstantInt::get(IGF.IGM.Int32Ty, size.getValue()); - return sizeValue; + llvm::Value *fn = nullptr; + if (emitFunction) { + fn = functionPointer.getPointer(IGF); + } + llvm::Value *size = nullptr; + if (emitSize) { + auto *ptr = functionPointer.getRawPointer(); + auto *descriptorPtr = + IGF.Builder.CreateBitCast(ptr, IGF.IGM.AsyncFunctionPointerPtrTy); + auto *sizePtr = IGF.Builder.CreateStructGEP(descriptorPtr, 1); + size = IGF.Builder.CreateLoad(sizePtr, IGF.IGM.getPointerAlignment()); + } + return {fn, size}; } } } +static void externalizeArguments(IRGenFunction &IGF, const Callee &callee, + Explosion &in, Explosion &out, + TemporarySet &temporaries, bool isOutlined); + namespace { class SyncCallEmission final : public CallEmission { @@ -1965,12 +2030,21 @@ class SyncCallEmission final : public CallEmission { setFromCallee(); } + FunctionPointer getCalleeFunctionPointer() override { + return getCallee().getFunctionPointer().getAsFunction(IGF); + } SILType getParameterType(unsigned index) override { SILFunctionConventions origConv(getCallee().getOrigFunctionType(), IGF.getSILModule()); return origConv.getSILArgumentType( index, IGF.IGM.getMaximalTypeExpansionContext()); } + + llvm::CallInst *createCall(const FunctionPointer &fn, + ArrayRef args) override { + return IGF.Builder.CreateCall(fn, Args); + } + void begin() override { super::begin(); } void end() override { super::end(); } void setFromCallee() override { @@ -2162,8 +2236,9 @@ class AsyncCallEmission final : public CallEmission { using super = CallEmission; Address contextBuffer; - Size contextSize; Address context; + llvm::Value *calleeFunction = nullptr; + llvm::Value *currentResumeFn = nullptr; llvm::Value *thickContext = nullptr; Optional asyncContextLayout; @@ -2199,12 +2274,13 @@ class AsyncCallEmission final : public CallEmission { assert(!context.isValid()); auto layout = getAsyncContextLayout(); // Allocate space for the async arguments. - auto *dynamicContextSize32 = getDynamicAsyncContextSize( - IGF, layout, CurCallee.getOrigFunctionType(), thickContext); + llvm::Value *dynamicContextSize32; + std::tie(calleeFunction, dynamicContextSize32) = getAsyncFunctionAndSize( + IGF, CurCallee.getOrigFunctionType()->getRepresentation(), + CurCallee.getFunctionPointer(), thickContext); auto *dynamicContextSize = IGF.Builder.CreateZExt(dynamicContextSize32, IGF.IGM.SizeTy); - std::tie(contextBuffer, contextSize) = emitAllocAsyncContext( - IGF, layout, dynamicContextSize, getAsyncContextSize(layout)); + contextBuffer = emitAllocAsyncContext(IGF, dynamicContextSize); context = layout.emitCastTo(IGF, contextBuffer.getAddress()); if (layout.canHaveError()) { auto fieldLayout = layout.getErrorLayout(); @@ -2217,13 +2293,18 @@ class AsyncCallEmission final : public CallEmission { void end() override { assert(contextBuffer.isValid()); assert(context.isValid()); - emitDeallocAsyncContext(IGF, contextBuffer, contextSize); + emitDeallocAsyncContext(IGF, contextBuffer); super::end(); } void setFromCallee() override { super::setFromCallee(); thickContext = CurCallee.getSwiftContext(); } + FunctionPointer getCalleeFunctionPointer() override { + return FunctionPointer( + FunctionPointer::KindTy::Function, calleeFunction, PointerAuthInfo(), + IGF.IGM.getSignature(getCallee().getSubstFunctionType())); + } SILType getParameterType(unsigned index) override { return getAsyncContextLayout().getParameterType(index); } @@ -2257,6 +2338,26 @@ class AsyncCallEmission final : public CallEmission { explosion.add(context); saveValue(fieldLayout, explosion, isOutlined); } + { // Return to caller function. + auto fieldLayout = layout.getResumeParentLayout(); + currentResumeFn = IGF.Builder.CreateIntrinsicCall( + llvm::Intrinsic::coro_async_resume, {}); + auto fnVal = currentResumeFn; + // Sign the pointer. + // TODO: use a distinct schema. + if (auto schema = IGF.IGM.getOptions().PointerAuth.AsyncContextParent) { + Address fieldAddr = + fieldLayout.project(IGF, this->context, /*offsets*/ llvm::None); + auto authInfo = PointerAuthInfo::emit( + IGF, schema, fieldAddr.getAddress(), PointerAuthEntity()); + fnVal = emitPointerAuthSign(IGF, fnVal, authInfo); + } + fnVal = IGF.Builder.CreateBitCast(fnVal, + IGF.IGM.TaskContinuationFunctionPtrTy); + Explosion explosion; + explosion.add(fnVal); + saveValue(fieldLayout, explosion, isOutlined); + } { // caller executor Explosion explosion; explosion.add(IGF.getAsyncExecutor()); @@ -2316,6 +2417,28 @@ class AsyncCallEmission final : public CallEmission { auto address = errorLayout.project(IGF, context, /*offsets*/ llvm::None); return address; }; + + llvm::CallInst *createCall(const FunctionPointer &fn, + ArrayRef args) override { + auto &IGM = IGF.IGM; + auto &Builder = IGF.Builder; + // Setup the suspend point. + SmallVector arguments; + arguments.push_back(currentResumeFn); + auto resumeProjFn = IGF.getOrCreateResumePrjFn(); + arguments.push_back( + Builder.CreateBitOrPointerCast(resumeProjFn, IGM.Int8PtrTy)); + auto dispatchFn = IGF.createAsyncDispatchFn(fn, args); + arguments.push_back( + Builder.CreateBitOrPointerCast(dispatchFn, IGM.Int8PtrTy)); + arguments.push_back( + Builder.CreateBitOrPointerCast(fn.getRawPointer(), IGM.Int8PtrTy)); + for (auto arg: args) + arguments.push_back(arg); + auto *id = Builder.CreateIntrinsicCall(llvm::Intrinsic::coro_suspend_async, + arguments); + return id; + } }; } // end anonymous namespace @@ -2369,7 +2492,8 @@ llvm::CallInst *CallEmission::emitCallSite() { EmittedCall = true; // Make the call and clear the arguments array. - const auto &fn = getCallee().getFunctionPointer(); + FunctionPointer fn = getCalleeFunctionPointer(); + assert(fn.getKind() == FunctionPointer::KindTy::Function); auto fnTy = fn.getFunctionType(); // Coerce argument types for those cases where the IR type required @@ -2383,7 +2507,7 @@ llvm::CallInst *CallEmission::emitCallSite() { } // TODO: exceptions! - auto call = IGF.Builder.CreateCall(fn, Args); + auto call = createCall(fn, Args); // Make coroutines calls opaque to LLVM analysis. if (IsCoroutine) { @@ -2427,6 +2551,7 @@ llvm::CallInst *CallEmission::emitCallSite() { llvm::CallInst *IRBuilder::CreateCall(const FunctionPointer &fn, ArrayRef args) { + assert(fn.getKind() == FunctionPointer::KindTy::Function); SmallVector bundles; // Add a pointer-auth bundle if necessary. @@ -2437,11 +2562,11 @@ llvm::CallInst *IRBuilder::CreateCall(const FunctionPointer &fn, bundles.emplace_back("ptrauth", bundleArgs); } - assert(!isTrapIntrinsic(fn.getPointer()) && "Use CreateNonMergeableTrap"); + assert(!isTrapIntrinsic(fn.getRawPointer()) && "Use CreateNonMergeableTrap"); llvm::CallInst *call = IRBuilderBase::CreateCall( cast( - fn.getPointer()->getType()->getPointerElementType()), - fn.getPointer(), args, bundles); + fn.getRawPointer()->getType()->getPointerElementType()), + fn.getRawPointer(), args, bundles); call->setAttributes(fn.getAttributes()); call->setCallingConv(fn.getCallingConv()); return call; @@ -3387,6 +3512,28 @@ emitRetconCoroutineEntry(IRGenFunction &IGF, CanSILFunctionType fnType, IGF.setCoroutineHandle(hdl); } +void irgen::emitAsyncFunctionEntry(IRGenFunction &IGF, + SILFunction *asyncFunction) { + auto &IGM = IGF.IGM; + auto size = getAsyncContextLayout(IGM, asyncFunction).getSize(); + auto asyncFuncPointer = IGF.Builder.CreateBitOrPointerCast( + IGM.getAddrOfAsyncFunctionPointer(asyncFunction), IGM.Int8PtrTy); + auto *id = IGF.Builder.CreateIntrinsicCall( + llvm::Intrinsic::coro_id_async, + {llvm::ConstantInt::get(IGM.Int32Ty, size.getValue()), + llvm::ConstantInt::get(IGM.Int32Ty, 16), + llvm::ConstantInt::get(IGM.Int32Ty, 2), asyncFuncPointer}); + // Call 'llvm.coro.begin', just for consistency with the normal pattern. + // This serves as a handle that we can pass around to other intrinsics. + auto hdl = IGF.Builder.CreateIntrinsicCall( + llvm::Intrinsic::coro_begin, + {id, llvm::ConstantPointerNull::get(IGM.Int8PtrTy)}); + + // Set the coroutine handle; this also flags that is a coroutine so that + // e.g. dynamic allocas use the right code generation. + IGF.setCoroutineHandle(hdl); +} + void irgen::emitYieldOnceCoroutineEntry( IRGenFunction &IGF, CanSILFunctionType fnType, NativeCCEntryPointArgumentEmission &emission) { @@ -3436,28 +3583,6 @@ void irgen::emitDeallocYieldManyCoroutineBuffer(IRGenFunction &IGF, IGF.Builder.CreateLifetimeEnd(buffer, bufferSize); } -Address irgen::emitTaskAlloc(IRGenFunction &IGF, llvm::Value *size, - Alignment alignment) { - auto *call = IGF.Builder.CreateCall(IGF.IGM.getTaskAllocFn(), - {IGF.getAsyncTask(), size}); - call->setDoesNotThrow(); - call->setCallingConv(IGF.IGM.SwiftCC); - call->addAttribute(llvm::AttributeList::FunctionIndex, - llvm::Attribute::ReadNone); - auto address = Address(call, alignment); - return address; -} - -void irgen::emitTaskDealloc(IRGenFunction &IGF, Address address, - llvm::Value *size) { - auto *call = IGF.Builder.CreateCall( - IGF.IGM.getTaskDeallocFn(), {IGF.getAsyncTask(), address.getAddress()}); - call->setDoesNotThrow(); - call->setCallingConv(IGF.IGM.SwiftCC); - call->addAttribute(llvm::AttributeList::FunctionIndex, - llvm::Attribute::ReadNone); -} - void irgen::emitTaskCancel(IRGenFunction &IGF, llvm::Value *task) { if (task->getType() != IGF.IGM.SwiftTaskPtrTy) { task = IGF.Builder.CreateBitCast(task, IGF.IGM.SwiftTaskPtrTy); @@ -3474,11 +3599,9 @@ llvm::Value *irgen::emitTaskCreate( parentTask = IGF.Builder.CreateBitOrPointerCast( parentTask, IGF.IGM.SwiftTaskPtrTy); taskFunction = IGF.Builder.CreateBitOrPointerCast( - taskFunction, IGF.IGM.TaskContinuationFunctionPtrTy); + taskFunction, IGF.IGM.AsyncFunctionPointerPtrTy); // Determine the size of the async context for the closure. - // FIXME: If the task function comes in as an AsyncFunctionPointer, we might - // want to use swift_task_create instead of swift_task_create_f. ASTContext &ctx = IGF.IGM.IRGen.SIL.getASTContext(); auto extInfo = ASTExtInfoBuilder().withAsync().withThrows().build(); auto taskFunctionType = FunctionType::get( @@ -3488,14 +3611,11 @@ llvm::Value *irgen::emitTaskCreate( auto layout = getAsyncContextLayout( IGF.IGM, taskFunctionCanSILType, taskFunctionCanSILType, SubstitutionMap()); - auto layoutSize = getAsyncContextSize(layout); - auto layoutSizeVal = llvm::ConstantInt::get( - IGF.IGM.SizeTy, layoutSize.getValue()); // Call the function. auto *result = IGF.Builder.CreateCall( IGF.IGM.getTaskCreateFuncFn(), - { flags, parentTask, taskFunction, layoutSizeVal }); + { flags, parentTask, taskFunction }); result->setDoesNotThrow(); result->setCallingConv(IGF.IGM.SwiftCC); @@ -3514,28 +3634,17 @@ llvm::Value *irgen::emitTaskCreate( return result; } -std::pair irgen::emitAllocAsyncContext(IRGenFunction &IGF, - AsyncContextLayout layout, - llvm::Value *sizeValue, - Size sizeLowerBound) { +Address irgen::emitAllocAsyncContext(IRGenFunction &IGF, + llvm::Value *sizeValue) { auto alignment = getAsyncContextAlignment(IGF.IGM); - auto address = emitTaskAlloc(IGF, sizeValue, alignment); - IGF.Builder.CreateLifetimeStart(address, sizeLowerBound); - return {address, sizeLowerBound}; -} - -std::pair -irgen::emitAllocAsyncContext(IRGenFunction &IGF, AsyncContextLayout layout) { - auto size = getAsyncContextSize(layout); - auto *sizeValue = llvm::ConstantInt::get(IGF.IGM.SizeTy, size.getValue()); - return emitAllocAsyncContext(IGF, layout, sizeValue, size); + auto address = IGF.emitTaskAlloc(sizeValue, alignment); + IGF.Builder.CreateLifetimeStart(address, Size(-1) /*dynamic size*/); + return address; } -void irgen::emitDeallocAsyncContext(IRGenFunction &IGF, Address context, - Size size) { - auto *sizeValue = llvm::ConstantInt::get(IGF.IGM.SizeTy, size.getValue()); - emitTaskDealloc(IGF, context, sizeValue); - IGF.Builder.CreateLifetimeEnd(context, size); +void irgen::emitDeallocAsyncContext(IRGenFunction &IGF, Address context) { + IGF.emitTaskDealloc(context); + IGF.Builder.CreateLifetimeEnd(context, Size(-1) /*dynamic size*/); } llvm::Value *irgen::emitYield(IRGenFunction &IGF, @@ -4275,7 +4384,7 @@ void IRGenFunction::emitScalarReturn(SILType returnResultType, /// Modify the given variable to hold a pointer whose type is the /// LLVM lowering of the given function type, and return the signature /// for the type. -static Signature emitCastOfFunctionPointer(IRGenFunction &IGF, +Signature irgen::emitCastOfFunctionPointer(IRGenFunction &IGF, llvm::Value *&fnPtr, CanSILFunctionType fnType) { // Figure out the function type. @@ -4309,7 +4418,8 @@ Callee irgen::getBlockPointerCallee(IRGenFunction &IGF, invokeFnPtrAddr.getAddress(), info.OrigFnType); - FunctionPointer fn(invokeFnPtr, authInfo, sig); + FunctionPointer fn(FunctionPointer::KindTy::Function, invokeFnPtr, authInfo, + sig); return Callee(std::move(info), fn, blockPtr); } @@ -4321,7 +4431,7 @@ Callee irgen::getSwiftFunctionPointerCallee( auto authInfo = PointerAuthInfo::forFunctionPointer(IGF.IGM, calleeInfo.OrigFnType); - FunctionPointer fn(fnPtr, authInfo, sig); + FunctionPointer fn(calleeInfo.OrigFnType, fnPtr, authInfo, sig); if (castOpaqueToRefcountedContext) { assert(dataPtr && dataPtr->getType() == IGF.IGM.OpaquePtrTy && "Expecting trivial closure context"); @@ -4337,32 +4447,59 @@ Callee irgen::getCFunctionPointerCallee(IRGenFunction &IGF, auto authInfo = PointerAuthInfo::forFunctionPointer(IGF.IGM, calleeInfo.OrigFnType); - FunctionPointer fn(fnPtr, authInfo, sig); + FunctionPointer fn(FunctionPointer::KindTy::Function, fnPtr, authInfo, sig); return Callee(std::move(calleeInfo), fn); } -FunctionPointer -FunctionPointer::forDirect(IRGenModule &IGM, llvm::Constant *fnPtr, - CanSILFunctionType fnType) { - return forDirect(fnPtr, IGM.getSignature(fnType)); +FunctionPointer FunctionPointer::forDirect(IRGenModule &IGM, + llvm::Constant *fnPtr, + CanSILFunctionType fnType) { + return forDirect(fnType, fnPtr, IGM.getSignature(fnType)); +} + +StringRef FunctionPointer::getName(IRGenModule &IGM) const { + assert(isConstant()); + switch (Kind.value) { + case KindTy::Value::Function: + return getRawPointer()->getName(); + case KindTy::Value::AsyncFunctionPointer: + return IGM + .getSILFunctionForAsyncFunctionPointer( + cast(getDirectPointer()->getOperand(0))) + ->getName(); + } +} + +llvm::Value *FunctionPointer::getPointer(IRGenFunction &IGF) const { + switch (Kind.value) { + case KindTy::Value::Function: + return Value; + case KindTy::Value::AsyncFunctionPointer: + auto *descriptorPtr = + IGF.Builder.CreateBitCast(Value, IGF.IGM.AsyncFunctionPointerPtrTy); + auto *addrPtr = IGF.Builder.CreateStructGEP(descriptorPtr, 0); + return IGF.emitLoadOfRelativePointer( + Address(addrPtr, IGF.IGM.getPointerAlignment()), /*isFar*/ false, + /*expectedType*/ getFunctionType()->getPointerTo()); + } } -FunctionPointer -FunctionPointer::forExplosionValue(IRGenFunction &IGF, llvm::Value *fnPtr, - CanSILFunctionType fnType) { +FunctionPointer FunctionPointer::forExplosionValue(IRGenFunction &IGF, + llvm::Value *fnPtr, + CanSILFunctionType fnType) { // Bitcast out of an opaque pointer type. assert(fnPtr->getType() == IGF.IGM.Int8PtrTy); auto sig = emitCastOfFunctionPointer(IGF, fnPtr, fnType); auto authInfo = PointerAuthInfo::forFunctionPointer(IGF.IGM, fnType); - return FunctionPointer(fnPtr, authInfo, sig); + return FunctionPointer(fnType, fnPtr, authInfo, sig); } llvm::Value * FunctionPointer::getExplosionValue(IRGenFunction &IGF, CanSILFunctionType fnType) const { - llvm::Value *fnPtr = getPointer(); + llvm::Value *fnPtr = getRawPointer(); // Re-sign to the appropriate schema for this function pointer type. auto resultAuthInfo = PointerAuthInfo::forFunctionPointer(IGF.IGM, fnType); @@ -4375,3 +4512,40 @@ FunctionPointer::getExplosionValue(IRGenFunction &IGF, return fnPtr; } + +FunctionPointer FunctionPointer::getAsFunction(IRGenFunction &IGF) const { + return FunctionPointer(KindTy::Function, getPointer(IGF), AuthInfo, Sig); +} + +void irgen::emitAsyncReturn(IRGenFunction &IGF, AsyncContextLayout &asyncLayout, + CanSILFunctionType fnType) { + auto contextAddr = asyncLayout.emitCastTo(IGF, IGF.getAsyncContext()); + auto returnToCallerLayout = asyncLayout.getResumeParentLayout(); + auto returnToCallerAddr = + returnToCallerLayout.project(IGF, contextAddr, llvm::None); + Explosion fn; + cast(returnToCallerLayout.getType()) + .loadAsCopy(IGF, returnToCallerAddr, fn); + llvm::Value *fnVal = fn.claimNext(); + + // TODO: use distinct schema + if (auto schema = IGF.IGM.getOptions().PointerAuth.AsyncContextParent) { + Address fieldAddr = + returnToCallerLayout.project(IGF, contextAddr, /*offsets*/ llvm::None); + auto authInfo = PointerAuthInfo::emit(IGF, schema, fieldAddr.getAddress(), + PointerAuthEntity()); + fnVal = emitPointerAuthAuth(IGF, fnVal, authInfo); + } + + auto sig = emitCastOfFunctionPointer(IGF, fnVal, fnType); + FunctionPointer fnPtr(FunctionPointer::KindTy::Function, fnVal, + PointerAuthInfo(), sig); + + SmallVector Args; + // Get the current task, executor, and async context. + Args.push_back(IGF.getAsyncTask()); + Args.push_back(IGF.getAsyncExecutor()); + Args.push_back(IGF.getAsyncContext()); + auto call = IGF.Builder.CreateCall(fnPtr, Args); + call->setTailCall(); +} diff --git a/lib/IRGen/GenCall.h b/lib/IRGen/GenCall.h index f81bde36067fa..bf3b63b0bc545 100644 --- a/lib/IRGen/GenCall.h +++ b/lib/IRGen/GenCall.h @@ -302,13 +302,25 @@ namespace irgen { CanSILFunctionType substitutedType, SubstitutionMap substitutionMap); - llvm::Value *getDynamicAsyncContextSize(IRGenFunction &IGF, - AsyncContextLayout layout, - CanSILFunctionType functionType, - llvm::Value *thickContext); + /// Given an async function, get the pointer to the function to be called and + /// the size of the context to be allocated. + /// + /// \param values Whether any code should be emitted to retrieve the function + /// pointer and the size, respectively. If false is passed, no + /// code will be emitted to generate that value and null will + /// be returned for it. + /// + /// \return {function, size} + std::pair getAsyncFunctionAndSize( + IRGenFunction &IGF, SILFunctionTypeRepresentation representation, + FunctionPointer functionPointer, llvm::Value *thickContext, + std::pair values = {true, true}); llvm::CallingConv::ID expandCallingConv(IRGenModule &IGM, SILFunctionTypeRepresentation convention); + Signature emitCastOfFunctionPointer(IRGenFunction &IGF, llvm::Value *&fnPtr, + CanSILFunctionType fnType); + /// Does the given function have a self parameter that should be given /// the special treatment for self parameters? bool hasSelfContextParameter(CanSILFunctionType fnType); @@ -385,9 +397,6 @@ namespace irgen { CanSILFunctionType coroutineType, NativeCCEntryPointArgumentEmission &emission); - Address emitTaskAlloc(IRGenFunction &IGF, llvm::Value *size, - Alignment alignment); - void emitTaskDealloc(IRGenFunction &IGF, Address address, llvm::Value *size); void emitTaskCancel(IRGenFunction &IGF, llvm::Value *task); /// Emit a class to swift_task_create[_f] with the given flags, parent task, @@ -396,19 +405,11 @@ namespace irgen { IRGenFunction &IGF, llvm::Value *flags, llvm::Value *parentTask, llvm::Value *taskFunction, llvm::Value *localContextInfo); - /// Allocate task local storage for the specified layout but using the - /// provided dynamic size. Allowing the size to be specified dynamically is - /// necessary for applies of thick functions the sizes of whose async contexts - /// are dependent on the underlying, already partially applied, called - /// function. The provided sizeLowerBound will be used to track the lifetime - /// of the allocation that is known statically. - std::pair emitAllocAsyncContext(IRGenFunction &IGF, - AsyncContextLayout layout, - llvm::Value *sizeValue, - Size sizeLowerBound); - std::pair emitAllocAsyncContext(IRGenFunction &IGF, - AsyncContextLayout layout); - void emitDeallocAsyncContext(IRGenFunction &IGF, Address context, Size size); + /// Allocate task local storage for the provided dynamic size. + Address emitAllocAsyncContext(IRGenFunction &IGF, llvm::Value *sizeValue); + void emitDeallocAsyncContext(IRGenFunction &IGF, Address context); + + void emitAsyncFunctionEntry(IRGenFunction &IGF, SILFunction *asyncFunc); /// Yield the given values from the current continuation. /// @@ -423,6 +424,9 @@ namespace irgen { Executor = 1, Context = 2, }; + + void emitAsyncReturn(IRGenFunction &IGF, AsyncContextLayout &layout, + CanSILFunctionType fnType); } // end namespace irgen } // end namespace swift diff --git a/lib/IRGen/GenClangType.cpp b/lib/IRGen/GenClangType.cpp index 8bc35dd99c134..5fbda4ebd0ed7 100644 --- a/lib/IRGen/GenClangType.cpp +++ b/lib/IRGen/GenClangType.cpp @@ -10,782 +10,25 @@ // //===----------------------------------------------------------------------===// // -// This file implements generation of Clang AST types from Swift AST types -// for types that are representable in Objective-C interfaces. -// AST/ClangTypeConverter.cpp duplicates a bunch of code from here, so make -// sure to keep the two in sync. +// Wrapper functions for creating Clang types from Swift types. // //===----------------------------------------------------------------------===// -#include "llvm/ADT/StringSwitch.h" +#include "IRGenModule.h" + #include "swift/AST/ASTContext.h" -#include "swift/AST/CanTypeVisitor.h" -#include "swift/AST/Decl.h" -#include "swift/AST/ExistentialLayout.h" -#include "swift/AST/NameLookup.h" -#include "swift/SIL/SILType.h" -#include "swift/ClangImporter/ClangImporter.h" +#include "swift/AST/Types.h" + #include "clang/AST/ASTContext.h" -#include "clang/AST/Attr.h" #include "clang/AST/CanonicalType.h" -#include "clang/AST/Decl.h" -#include "clang/AST/DeclObjC.h" #include "clang/AST/Type.h" -#include "clang/Sema/Sema.h" -#include "clang/Basic/TargetInfo.h" -#include "IRGenModule.h" using namespace swift; using namespace irgen; -/// Global information about importing clang types. -class swift::irgen::ClangTypeConverter { - llvm::DenseMap Cache; - - ClangTypeConverter(const ClangTypeConverter &) = delete; - ClangTypeConverter &operator=(const ClangTypeConverter &) = delete; - -public: - ClangTypeConverter() = default; - clang::CanQualType convert(IRGenModule &IGM, CanType type); - clang::CanQualType reverseBuiltinTypeMapping(IRGenModule &IGM, - CanStructType type); -}; - -static CanType getNamedSwiftType(ModuleDecl *stdlib, StringRef name) { - auto &ctx = stdlib->getASTContext(); - SmallVector results; - stdlib->lookupValue(ctx.getIdentifier(name), NLKind::QualifiedLookup, - results); - - // If we have one single type decl, and that decl has been - // type-checked, return its declared type. - // - // ...non-type-checked types should only ever show up here because - // of test cases using -enable-source-import, but unfortunately - // that's a real thing. - if (results.size() == 1) { - if (auto typeDecl = dyn_cast(results[0])) - return typeDecl->getDeclaredInterfaceType()->getCanonicalType(); - } - return CanType(); -} - -static clang::CanQualType -getClangBuiltinTypeFromKind(const clang::ASTContext &context, - clang::BuiltinType::Kind kind) { - switch (kind) { -#define BUILTIN_TYPE(Id, SingletonId) \ - case clang::BuiltinType::Id: \ - return context.SingletonId; -#include "clang/AST/BuiltinTypes.def" -#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ - case clang::BuiltinType::Id: \ - return context.SingletonId; -#include "clang/Basic/OpenCLImageTypes.def" -#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ - case clang::BuiltinType::Id: \ - return context.Id##Ty; -#include "clang/Basic/OpenCLExtensionTypes.def" -#define SVE_TYPE(Name, Id, SingletonId) \ - case clang::BuiltinType::Id: \ - return context.SingletonId; -#include "clang/Basic/AArch64SVEACLETypes.def" - } - - llvm_unreachable("Not a valid BuiltinType."); -} - -static clang::CanQualType getClangSelectorType( - const clang::ASTContext &clangCtx) { - return clangCtx.getPointerType(clangCtx.ObjCBuiltinSelTy); -} - -static clang::CanQualType getClangMetatypeType( - const clang::ASTContext &clangCtx) { - clang::QualType clangType = - clangCtx.getObjCObjectType(clangCtx.ObjCBuiltinClassTy, nullptr, 0); - clangType = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(clangType); -} - -static clang::CanQualType getClangIdType( - const clang::ASTContext &clangCtx) { - clang::QualType clangType = - clangCtx.getObjCObjectType(clangCtx.ObjCBuiltinIdTy, nullptr, 0); - clangType = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(clangType); -} - -static clang::CanQualType getClangDecayedVaListType( - const clang::ASTContext &clangCtx) { - clang::QualType clangType = - clangCtx.getCanonicalType(clangCtx.getBuiltinVaListType()); - if (clangType->isConstantArrayType()) - clangType = clangCtx.getDecayedType(clangType); - return clangCtx.getCanonicalType(clangType); -} - -namespace { -/// Given a Swift type, attempt to return an appropriate Clang -/// CanQualType for the purpose of generating correct code for the -/// ABI. -class GenClangType : public CanTypeVisitor { - IRGenModule &IGM; - irgen::ClangTypeConverter &Converter; - -public: - GenClangType(IRGenModule &IGM, irgen::ClangTypeConverter &converter) - : IGM(IGM), Converter(converter) {} - - const clang::ASTContext &getClangASTContext() const { - return IGM.getClangASTContext(); - } - - /// Return the Clang struct type which was imported and resulted in - /// this Swift struct type. We do not currently handle generating a - /// new Clang struct type for Swift struct types that are created - /// independently of importing a Clang module. - clang::CanQualType visitStructType(CanStructType type); - clang::CanQualType visitTupleType(CanTupleType type); - clang::CanQualType visitMetatypeType(CanMetatypeType type); - clang::CanQualType visitExistentialMetatypeType(CanExistentialMetatypeType type); - clang::CanQualType visitProtocolType(CanProtocolType type); - clang::CanQualType visitClassType(CanClassType type); - clang::CanQualType visitBoundGenericClassType(CanBoundGenericClassType type); - clang::CanQualType visitBoundGenericType(CanBoundGenericType type); - clang::CanQualType visitEnumType(CanEnumType type); - clang::CanQualType visitFunctionType(CanFunctionType type); - clang::CanQualType visitProtocolCompositionType( - CanProtocolCompositionType type); - clang::CanQualType visitBuiltinRawPointerType(CanBuiltinRawPointerType type); - clang::CanQualType visitBuiltinIntegerType(CanBuiltinIntegerType type); - clang::CanQualType visitBuiltinFloatType(CanBuiltinFloatType type); - clang::CanQualType visitArchetypeType(CanArchetypeType type); - clang::CanQualType visitSILFunctionType(CanSILFunctionType type); - clang::CanQualType visitGenericTypeParamType(CanGenericTypeParamType type); - clang::CanQualType visitDynamicSelfType(CanDynamicSelfType type); - - clang::CanQualType visitSILBlockStorageType(CanSILBlockStorageType type); - - clang::CanQualType visitType(CanType type); - - clang::CanQualType getCanonicalType(clang::QualType type) { - return getClangASTContext().getCanonicalType(type); - } - - clang::CanQualType convertMemberType(NominalTypeDecl *DC, - StringRef memberName); -}; -} // end anonymous namespace - -clang::CanQualType -GenClangType::convertMemberType(NominalTypeDecl *DC, StringRef memberName) { - auto memberTypeDecl = cast( - DC->lookupDirect(IGM.Context.getIdentifier(memberName))[0]); - auto memberType = memberTypeDecl->getDeclaredInterfaceType() - ->getCanonicalType(); - return Converter.convert(IGM, memberType); -} - -static clang::CanQualType getClangVectorType(const clang::ASTContext &ctx, - clang::BuiltinType::Kind eltKind, - clang::VectorType::VectorKind vecKind, - StringRef numEltsString) { - unsigned numElts; - bool failedParse = numEltsString.getAsInteger(10, numElts); - assert(!failedParse && "vector type name didn't end in count?"); - (void) failedParse; - - auto eltTy = getClangBuiltinTypeFromKind(ctx, eltKind); - auto vecTy = ctx.getVectorType(eltTy, numElts, vecKind); - return ctx.getCanonicalType(vecTy); -} - -clang::CanQualType GenClangType::visitStructType(CanStructType type) { - auto &ctx = IGM.getClangASTContext(); - - auto swiftDecl = type->getDecl(); - StringRef name = swiftDecl->getName().str(); - - // We assume that the importer translates all of the following types - // directly to structs in the standard library. - - // We want to recognize most of these types by name. -#define CHECK_NAMED_TYPE(NAME, CLANG_TYPE) do { \ - if (name == (NAME)) return CLANG_TYPE; \ - } while (false) - - CHECK_NAMED_TYPE("CGFloat", convertMemberType(swiftDecl, "NativeType")); - CHECK_NAMED_TYPE("OpaquePointer", ctx.VoidPtrTy); - CHECK_NAMED_TYPE("CVaListPointer", getClangDecayedVaListType(ctx)); - CHECK_NAMED_TYPE("DarwinBoolean", ctx.UnsignedCharTy); - CHECK_NAMED_TYPE(swiftDecl->getASTContext().getSwiftName( - KnownFoundationEntity::NSZone), - ctx.VoidPtrTy); - CHECK_NAMED_TYPE("WindowsBool", ctx.IntTy); - CHECK_NAMED_TYPE("ObjCBool", ctx.ObjCBuiltinBoolTy); - CHECK_NAMED_TYPE("Selector", getClangSelectorType(ctx)); - CHECK_NAMED_TYPE("UnsafeRawPointer", ctx.VoidPtrTy); - CHECK_NAMED_TYPE("UnsafeMutableRawPointer", ctx.VoidPtrTy); -#undef CHECK_NAMED_TYPE - - // Map vector types to the corresponding C vectors. -#define MAP_SIMD_TYPE(TYPE_NAME, _, BUILTIN_KIND) \ - if (name.startswith(#TYPE_NAME)) { \ - return getClangVectorType(ctx, clang::BuiltinType::BUILTIN_KIND, \ - clang::VectorType::GenericVector, \ - name.drop_front(sizeof(#TYPE_NAME)-1)); \ - } -#include "swift/ClangImporter/SIMDMappedTypes.def" - - // Everything else we see here ought to be a translation of a builtin. - return Converter.reverseBuiltinTypeMapping(IGM, type); -} - -static clang::CanQualType getClangBuiltinTypeFromTypedef( - clang::Sema &sema, StringRef typedefName) { - auto &context = sema.getASTContext(); - - auto identifier = &context.Idents.get(typedefName); - auto found = sema.LookupSingleName(sema.TUScope, identifier, - clang::SourceLocation(), - clang::Sema::LookupOrdinaryName); - auto typedefDecl = dyn_cast_or_null(found); - if (!typedefDecl) - return {}; - - auto underlyingTy = - context.getCanonicalType(typedefDecl->getUnderlyingType()); - - if (underlyingTy->getAs()) - return underlyingTy; - return {}; -} - -clang::CanQualType -irgen::ClangTypeConverter::reverseBuiltinTypeMapping(IRGenModule &IGM, - CanStructType type) { - // Handle builtin types by adding entries to the cache that reverse - // the mapping done by the importer. We could try to look at the - // members of the struct instead, but even if that's ABI-equivalent - // (which it had better be!), it might erase interesting semantic - // differences like integers vs. characters. This is important - // because CC lowering isn't the only purpose of this conversion. - // - // The importer maps builtin types like 'int' to named types like - // 'CInt', which are generally typealiases. So what we do here is - // map the underlying types of those typealiases back to the builtin - // type. These typealiases frequently create a many-to-one mapping, - // so just use the first type that mapped to a particular underlying - // type. - // - // This is the last thing that happens before asserting that the - // struct type doesn't have a mapping. Furthermore, all of the - // builtin types are pre-built in the clang ASTContext. So it's not - // really a significant performance problem to just cache all them - // right here; it makes making a few more entries in the cache than - // we really need, but it also means we won't end up repeating these - // stdlib lookups multiple times, and we have to perform multiple - // lookups anyway because the MAP_BUILTIN_TYPE database uses - // typealias names (like 'CInt') that aren't obviously associated - // with the underlying C library type. - - auto stdlib = IGM.Context.getStdlibModule(); - assert(stdlib && "translating stdlib type to C without stdlib module?"); - auto &ctx = IGM.getClangASTContext(); - auto cacheStdlibType = [&](StringRef swiftName, - clang::BuiltinType::Kind builtinKind) { - CanType swiftType = getNamedSwiftType(stdlib, swiftName); - if (!swiftType) return; - - auto &sema = IGM.Context.getClangModuleLoader()->getClangSema(); - // Handle Int and UInt specially. On Apple platforms, these correspond to - // the NSInteger and NSUInteger typedefs, so map them back to those typedefs - // if they're available, to ensure we get consistent ObjC @encode strings. - if (swiftType->getAnyNominal() == IGM.Context.getIntDecl()) { - if (auto NSIntegerTy = getClangBuiltinTypeFromTypedef(sema, "NSInteger")){ - Cache.insert({swiftType, NSIntegerTy}); - return; - } - } else if (swiftType->getAnyNominal() == IGM.Context.getUIntDecl()) { - if (auto NSUIntegerTy = - getClangBuiltinTypeFromTypedef(sema, "NSUInteger")) { - Cache.insert({swiftType, NSUIntegerTy}); - return; - } - } - - Cache.insert({swiftType, getClangBuiltinTypeFromKind(ctx, builtinKind)}); - }; - -#define MAP_BUILTIN_TYPE(CLANG_BUILTIN_KIND, SWIFT_TYPE_NAME) \ - cacheStdlibType(#SWIFT_TYPE_NAME, clang::BuiltinType::CLANG_BUILTIN_KIND); -#include "swift/ClangImporter/BuiltinMappedTypes.def" - - // On 64-bit Windows, no C type is imported as an Int or UInt; CLong is - // imported as an Int32 and CLongLong as an Int64. Therefore, manually - // add mappings to C for Int and UInt. - // On 64-bit Cygwin, no manual mapping is required. - if (IGM.Triple.isOSWindows() && !IGM.Triple.isWindowsCygwinEnvironment() && - IGM.Triple.isArch64Bit()) { - // Map UInt to uintptr_t - auto swiftUIntType = getNamedSwiftType(stdlib, "UInt"); - auto clangUIntPtrType = ctx.getCanonicalType(ctx.getUIntPtrType()); - Cache.insert({swiftUIntType, clangUIntPtrType}); - - // Map Int to intptr_t - auto swiftIntType = getNamedSwiftType(stdlib, "Int"); - auto clangIntPtrType = ctx.getCanonicalType(ctx.getIntPtrType()); - Cache.insert({swiftIntType, clangIntPtrType}); - } - - // The above code sets up a bunch of mappings in the cache; just - // assume that we hit one of them. - auto it = Cache.find(type); - assert(it != Cache.end() && - "cannot translate Swift type to C! type is not specially known"); - return it->second; -} - -clang::CanQualType GenClangType::visitTupleType(CanTupleType type) { - unsigned e = type->getNumElements(); - if (e == 0) - return getClangASTContext().VoidTy; - - CanType eltTy = type.getElementType(0); - for (unsigned i = 1; i < e; ++i) { - assert(eltTy == type.getElementType(i) && - "Only tuples where all element types are equal " - "map to fixed-size arrays"); - } - - auto clangEltTy = Converter.convert(IGM, eltTy); - if (!clangEltTy) return clang::CanQualType(); - - APInt size(32, e); - auto &ctx = getClangASTContext(); - return ctx.getCanonicalType( - ctx.getConstantArrayType(clangEltTy, size, nullptr, - clang::ArrayType::Normal, 0)); -} - -clang::CanQualType GenClangType::visitProtocolType(CanProtocolType type) { - auto proto = type->getDecl(); - auto &clangCtx = getClangASTContext(); - - if (!proto->isObjC()) { - std::string s; - llvm::raw_string_ostream err(s); - err << "Trying to compute the clang type for a non-ObjC protocol type\n"; - proto->dump(err); - llvm::report_fatal_error(err.str()); - } - - // Single protocol -> id - clang::IdentifierInfo *name = &clangCtx.Idents.get(proto->getName().get()); - auto *PDecl = clang::ObjCProtocolDecl::Create( - const_cast(clangCtx), - clangCtx.getTranslationUnitDecl(), name, - clang::SourceLocation(), clang::SourceLocation(), nullptr); - - // Attach an objc_runtime_name attribute with the Objective-C name to use - // for this protocol. - SmallString<64> runtimeNameBuffer; - PDecl->addAttr(clang::ObjCRuntimeNameAttr::CreateImplicit( - PDecl->getASTContext(), - proto->getObjCRuntimeName(runtimeNameBuffer))); - - auto clangType = clangCtx.getObjCObjectType(clangCtx.ObjCBuiltinIdTy, - &PDecl, 1); - auto ptrTy = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(ptrTy); -} - -clang::CanQualType GenClangType::visitMetatypeType(CanMetatypeType type) { - return getClangMetatypeType(getClangASTContext()); -} - -clang::CanQualType -GenClangType::visitExistentialMetatypeType(CanExistentialMetatypeType type) { - return getClangMetatypeType(getClangASTContext()); -} - -clang::CanQualType GenClangType::visitClassType(CanClassType type) { - auto &clangCtx = getClangASTContext(); - auto swiftDecl = type->getDecl(); - - // TODO: [non-objc-class-clang-type-conversion] - // Crashing here instead of returning a bogus 'id' leads to test failures, - // which is surprising. - if (!swiftDecl->isObjC()) - return getClangIdType(clangCtx); - - // produce the clang type INTF * if it is imported ObjC object. - clang::IdentifierInfo *ForwardClassId = - &clangCtx.Idents.get(swiftDecl->getName().get()); - auto *CDecl = clang::ObjCInterfaceDecl::Create( - clangCtx, clangCtx.getTranslationUnitDecl(), - clang::SourceLocation(), ForwardClassId, - /*typeParamList*/nullptr, /*PrevDecl=*/nullptr, - clang::SourceLocation()); - - // Attach an objc_runtime_name attribute with the Objective-C name to use - // for this class. - SmallString<64> runtimeNameBuffer; - CDecl->addAttr(clang::ObjCRuntimeNameAttr::CreateImplicit( - CDecl->getASTContext(), - swiftDecl->getObjCRuntimeName(runtimeNameBuffer))); - - auto clangType = clangCtx.getObjCInterfaceType(CDecl); - auto ptrTy = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(ptrTy); -} - -clang::CanQualType GenClangType::visitBoundGenericClassType( - CanBoundGenericClassType type) { - // Any @objc class type in Swift that shows up in an @objc method maps 1-1 to - // "id "; with clang's encoding ignoring the protocol list. - return getClangIdType(getClangASTContext()); -} - -clang::CanQualType -GenClangType::visitBoundGenericType(CanBoundGenericType type) { - // We only expect *Pointer, SIMD* and Optional. - if (auto underlyingTy = - SILType::getPrimitiveObjectType(type).getOptionalObjectType()) { - // The underlying type could be a bridged type, which makes any - // sort of casual assertion here difficult. - return Converter.convert(IGM, underlyingTy.getASTType()); - } - - auto swiftStructDecl = type->getDecl(); - - enum class StructKind { - Invalid, - UnsafeMutablePointer, - UnsafePointer, - AutoreleasingUnsafeMutablePointer, - Unmanaged, - CFunctionPointer, - SIMD, - } kind = llvm::StringSwitch(swiftStructDecl->getName().str()) - .Case("UnsafeMutablePointer", StructKind::UnsafeMutablePointer) - .Case("UnsafePointer", StructKind::UnsafePointer) - .Case( - "AutoreleasingUnsafeMutablePointer", - StructKind::AutoreleasingUnsafeMutablePointer) - .Case("Unmanaged", StructKind::Unmanaged) - .Case("CFunctionPointer", StructKind::CFunctionPointer) - .StartsWith("SIMD", StructKind::SIMD) - .Default(StructKind::Invalid); - - auto args = type.getGenericArgs(); - assert(args.size() == 1 && - "should have a single generic argument!"); - auto loweredArgTy = IGM.getLoweredType(args[0]).getASTType(); - - switch (kind) { - case StructKind::Invalid: - llvm_unreachable("Unexpected non-pointer generic struct type in imported" - " Clang module!"); - - case StructKind::UnsafeMutablePointer: - case StructKind::Unmanaged: - case StructKind::AutoreleasingUnsafeMutablePointer: { - auto clangCanTy = Converter.convert(IGM, loweredArgTy); - if (!clangCanTy) return clang::CanQualType(); - return getClangASTContext().getPointerType(clangCanTy); - } - case StructKind::UnsafePointer: { - clang::QualType clangTy - = Converter.convert(IGM, loweredArgTy).withConst(); - return getCanonicalType(getClangASTContext().getPointerType(clangTy)); - } - - case StructKind::CFunctionPointer: { - auto &clangCtx = getClangASTContext(); - - clang::QualType functionTy; - if (isa(loweredArgTy)) { - functionTy = Converter.convert(IGM, loweredArgTy); - } else { - // Fall back to void(). - functionTy = clangCtx.getFunctionNoProtoType(clangCtx.VoidTy); - } - auto fnPtrTy = clangCtx.getPointerType(functionTy); - return getCanonicalType(fnPtrTy); - } - - case StructKind::SIMD: { - clang::QualType scalarTy = Converter.convert(IGM, loweredArgTy); - auto numEltsString = swiftStructDecl->getName().str(); - numEltsString.consume_front("SIMD"); - unsigned numElts; - bool failedParse = numEltsString.getAsInteger(10, numElts); - assert(!failedParse && "SIMD type name didn't end in count?"); - (void) failedParse; - auto vectorTy = getClangASTContext().getVectorType(scalarTy, numElts, - clang::VectorType::VectorKind::GenericVector); - return getCanonicalType(vectorTy); - } - } - - llvm_unreachable("Not a valid StructKind."); -} - -clang::CanQualType GenClangType::visitEnumType(CanEnumType type) { - // Special case: Uninhabited enums are not @objc, so we don't - // know what to do below, but we can just convert to 'void'. - if (type->isUninhabited()) - return Converter.convert(IGM, IGM.Context.TheEmptyTupleType); - - assert(type->getDecl()->isObjC() && "not an @objc enum?!"); - - // @objc enums lower to their raw types. - return Converter.convert(IGM, - type->getDecl()->getRawType()->getCanonicalType()); -} - -clang::CanQualType GenClangType::visitFunctionType(CanFunctionType type) { - llvm_unreachable("FunctionType should have been lowered away"); -} - -clang::CanQualType GenClangType::visitSILFunctionType(CanSILFunctionType type) { - auto &clangCtx = getClangASTContext(); - - enum FunctionPointerKind { - Block, CFunctionPointer, - }; - - FunctionPointerKind kind; - - switch (type->getRepresentation()) { - case SILFunctionType::Representation::Block: - kind = Block; - break; - - case SILFunctionType::Representation::CFunctionPointer: - kind = CFunctionPointer; - break; - - case SILFunctionType::Representation::Thick: - case SILFunctionType::Representation::Thin: - case SILFunctionType::Representation::Method: - case SILFunctionType::Representation::ObjCMethod: - case SILFunctionType::Representation::WitnessMethod: - case SILFunctionType::Representation::Closure: - llvm_unreachable("not an ObjC-compatible function"); - } - - // Convert the return and parameter types. - auto allResults = type->getResults(); - assert(allResults.size() <= 1 && "multiple results with C convention"); - clang::QualType resultType; - if (allResults.empty()) { - resultType = clangCtx.VoidTy; - } else { - resultType = Converter.convert( - IGM, - allResults[0].getReturnValueType(IGM.getSILModule(), type, - IGM.getMaximalTypeExpansionContext())); - if (resultType.isNull()) - return clang::CanQualType(); - } - - SmallVector paramTypes; - SmallVector extParamInfos; - for (auto paramTy : type->getParameters()) { - clang::FunctionProtoType::ExtParameterInfo extParamInfo; - - // Blocks should only take direct +0 parameters. - switch (paramTy.getConvention()) { - case ParameterConvention::Direct_Guaranteed: - case ParameterConvention::Direct_Unowned: - // OK. - break; - - case ParameterConvention::Direct_Owned: - extParamInfo = extParamInfo.withIsConsumed(true); - break; - - case ParameterConvention::Indirect_In: - case ParameterConvention::Indirect_In_Constant: - case ParameterConvention::Indirect_Inout: - case ParameterConvention::Indirect_InoutAliasable: - case ParameterConvention::Indirect_In_Guaranteed: - llvm_unreachable("block takes indirect parameter"); - } - auto param = Converter.convert( - IGM, paramTy.getArgumentType(IGM.getSILModule(), type, - IGM.getMaximalTypeExpansionContext())); - if (param.isNull()) - return clang::CanQualType(); - - paramTypes.push_back(param); - extParamInfos.push_back(extParamInfo); - } - - // Build the Clang function type. - clang::FunctionProtoType::ExtProtoInfo extProtoInfo; - extProtoInfo.ExtParameterInfos = extParamInfos.begin(); - - auto fnTy = clangCtx.getFunctionType(resultType, paramTypes, extProtoInfo); - clang::QualType ptrTy; - - switch (kind) { - case Block: - ptrTy = clangCtx.getBlockPointerType(fnTy); - break; - case CFunctionPointer: - ptrTy = clangCtx.getPointerType(fnTy); - } - return clangCtx.getCanonicalType(ptrTy); -} - -clang::CanQualType GenClangType::visitSILBlockStorageType(CanSILBlockStorageType type) { - // We'll select (void)(^)(). This isn't correct for all blocks, but block - // storage type should only be converted for function signature lowering, - // where the parameter types do not matter. - auto &clangCtx = getClangASTContext(); - auto fnTy = clangCtx.getFunctionNoProtoType(clangCtx.VoidTy); - auto blockTy = clangCtx.getBlockPointerType(fnTy); - return clangCtx.getCanonicalType(blockTy); -} - -clang::CanQualType GenClangType::visitProtocolCompositionType( - CanProtocolCompositionType type) { - auto &clangCtx = getClangASTContext(); - - // FIXME. Eventually, this will have its own helper routine. - SmallVector Protocols; - auto layout = type.getExistentialLayout(); - assert(layout.isObjC() && "Cannot represent opaque existential in Clang"); - - // AnyObject -> id. - if (layout.isAnyObject()) - return getClangIdType(getClangASTContext()); - - auto superclassTy = clangCtx.ObjCBuiltinIdTy; - if (auto layoutSuperclassTy = layout.getSuperclass()) { - superclassTy = clangCtx.getCanonicalType( - cast( - Converter.convert(IGM, CanType(layoutSuperclassTy))) - ->getPointeeType()); - } - - for (Type t : layout.getProtocols()) { - auto opt = cast( - Converter.convert(IGM, CanType(t))); - for (auto p : opt->quals()) - Protocols.push_back(p); - } - - if (Protocols.empty()) - return superclassTy; - - // id - clang::ObjCProtocolDecl **ProtoQuals = - new(clangCtx) clang::ObjCProtocolDecl*[Protocols.size()]; - memcpy(ProtoQuals, Protocols.data(), - sizeof(clang::ObjCProtocolDecl*)*Protocols.size()); - auto clangType = clangCtx.getObjCObjectType(superclassTy, - ProtoQuals, - Protocols.size()); - auto ptrTy = clangCtx.getObjCObjectPointerType(clangType); - return clangCtx.getCanonicalType(ptrTy); -} - -clang::CanQualType GenClangType::visitBuiltinRawPointerType( - CanBuiltinRawPointerType type) { - return getClangASTContext().VoidPtrTy; -} - -clang::CanQualType GenClangType::visitBuiltinIntegerType( - CanBuiltinIntegerType type) { - auto &ctx = getClangASTContext(); - if (type->getWidth().isPointerWidth()) - return ctx.getCanonicalType(ctx.getUIntPtrType()); - assert(type->getWidth().isFixedWidth()); - auto width = type->getWidth().getFixedWidth(); - if (width == 1) - return ctx.BoolTy; - return ctx.getCanonicalType(ctx.getIntTypeForBitwidth(width, /*signed*/ 0)); -} - -clang::CanQualType GenClangType::visitBuiltinFloatType( - CanBuiltinFloatType type) { - auto &ctx = getClangASTContext(); - auto &clangTargetInfo = ctx.getTargetInfo(); - const llvm::fltSemantics *format = &type->getAPFloatSemantics(); - if (format == &clangTargetInfo.getHalfFormat()) return ctx.HalfTy; - if (format == &clangTargetInfo.getFloatFormat()) return ctx.FloatTy; - if (format == &clangTargetInfo.getDoubleFormat()) return ctx.DoubleTy; - if (format == &clangTargetInfo.getLongDoubleFormat()) return ctx.LongDoubleTy; - llvm_unreachable("cannot translate floating-point format to C"); -} - -clang::CanQualType GenClangType::visitArchetypeType(CanArchetypeType type) { - // We see these in the case where we invoke an @objc function - // through a protocol. - return getClangIdType(getClangASTContext()); -} - -clang::CanQualType GenClangType::visitDynamicSelfType(CanDynamicSelfType type) { - // Dynamic Self is equivalent to 'instancetype', which is treated as - // 'id' within the Objective-C type system. - return getClangIdType(getClangASTContext()); -} - -clang::CanQualType GenClangType::visitGenericTypeParamType( - CanGenericTypeParamType type) { - // We see these in the case where we invoke an @objc function - // through a protocol argument that is a generic type. - return getClangIdType(getClangASTContext()); -} - -clang::CanQualType GenClangType::visitType(CanType type) { - llvm_unreachable("Unexpected type in Clang type generation."); -} - -clang::CanQualType irgen::ClangTypeConverter::convert(IRGenModule &IGM, CanType type) { - // Look in the cache. - auto it = Cache.find(type); - if (it != Cache.end()) { - return it->second; - } - - // Try to do this without making cache entries for obvious cases. - if (auto nominal = dyn_cast(type)) { - auto decl = nominal->getDecl(); - if (auto clangDecl = decl->getClangDecl()) { - auto &ctx = IGM.getClangASTContext(); - if (auto clangTypeDecl = dyn_cast(clangDecl)) { - return ctx.getCanonicalType(ctx.getTypeDeclType(clangTypeDecl)) - .getUnqualifiedType(); - } else if (auto ifaceDecl = dyn_cast(clangDecl)) { - auto clangType = ctx.getObjCInterfaceType(ifaceDecl); - auto ptrTy = ctx.getObjCObjectPointerType(clangType); - return ctx.getCanonicalType(ptrTy); - } else if (auto protoDecl = dyn_cast(clangDecl)){ - auto clangType = ctx.getObjCObjectType( - ctx.ObjCBuiltinIdTy, - const_cast(&protoDecl), - 1); - auto ptrTy = ctx.getObjCObjectPointerType(clangType); - return ctx.getCanonicalType(ptrTy); - } - } - } - - // If that failed, convert the type, cache, and return. - clang::CanQualType result = GenClangType(IGM, *this).visit(type); - Cache.insert({type, result}); - return result; -} - clang::CanQualType IRGenModule::getClangType(CanType type) { - return ClangTypes->convert(*this, type); + auto *ty = type->getASTContext().getClangTypeForIRGen(type); + return ty ? ty->getCanonicalTypeUnqualified() : clang::CanQualType(); } clang::CanQualType IRGenModule::getClangType(SILType type) { @@ -811,18 +54,3 @@ clang::CanQualType IRGenModule::getClangType(SILParameterInfo params, } return clangType; } - -void IRGenModule::initClangTypeConverter() { - if (auto loader = Context.getClangModuleLoader()) { - auto importer = static_cast(loader); - ClangASTContext = &importer->getClangASTContext(); - ClangTypes = new ClangTypeConverter(); - } else { - ClangASTContext = nullptr; - ClangTypes = nullptr; - } -} - -void IRGenModule::destroyClangTypeConverter() { - delete ClangTypes; -} diff --git a/lib/IRGen/GenClass.cpp b/lib/IRGen/GenClass.cpp index 74f0376b9683a..87aaf5fb3f40f 100644 --- a/lib/IRGen/GenClass.cpp +++ b/lib/IRGen/GenClass.cpp @@ -2522,7 +2522,7 @@ FunctionPointer irgen::emitVirtualMethodValue(IRGenFunction &IGF, IGF.IGM.getClassMetadataLayout(classDecl).getMethodInfo(IGF, method); switch (methodInfo.getKind()) { case ClassMetadataLayout::MethodInfo::Kind::Offset: { - auto offset = methodInfo.getOffsett(); + auto offset = methodInfo.getOffset(); auto slot = IGF.emitAddressAtOffset(metadata, offset, signature.getType()->getPointerTo(), @@ -2531,12 +2531,12 @@ FunctionPointer irgen::emitVirtualMethodValue(IRGenFunction &IGF, auto &schema = IGF.getOptions().PointerAuth.SwiftClassMethods; auto authInfo = PointerAuthInfo::emit(IGF, schema, slot.getAddress(), method); - return FunctionPointer(fnPtr, authInfo, signature); + return FunctionPointer(methodType, fnPtr, authInfo, signature); } case ClassMetadataLayout::MethodInfo::Kind::DirectImpl: { auto fnPtr = llvm::ConstantExpr::getBitCast(methodInfo.getDirectImpl(), signature.getType()->getPointerTo()); - return FunctionPointer::forDirect(fnPtr, signature); + return FunctionPointer::forDirect(methodType, fnPtr, signature); } } diff --git a/lib/IRGen/GenDecl.cpp b/lib/IRGen/GenDecl.cpp index ff2570eb9b80d..1219e701de2c5 100644 --- a/lib/IRGen/GenDecl.cpp +++ b/lib/IRGen/GenDecl.cpp @@ -2614,7 +2614,8 @@ void IRGenModule::createReplaceableProlog(IRGenFunction &IGF, SILFunction *f) { LinkEntity::forDynamicallyReplaceableFunctionVariable(f); LinkEntity keyEntity = LinkEntity::forDynamicallyReplaceableFunctionKey(f); - Signature signature = getSignature(f->getLoweredFunctionType()); + auto silFunctionType = f->getLoweredFunctionType(); + Signature signature = getSignature(silFunctionType); // Create and initialize the first link entry for the chain of replacements. // The first implementation is initialized with 'implFn'. @@ -2673,9 +2674,9 @@ void IRGenModule::createReplaceableProlog(IRGenFunction &IGF, SILFunction *f) { auto authEntity = PointerAuthEntity(f); auto authInfo = PointerAuthInfo::emit(IGF, schema, fnPtrAddr, authEntity); - auto *Res = IGF.Builder.CreateCall(FunctionPointer(realReplFn, authInfo, - signature), - forwardedArgs); + auto *Res = IGF.Builder.CreateCall( + FunctionPointer(silFunctionType, realReplFn, authInfo, signature), + forwardedArgs); Res->setTailCall(); if (IGF.CurFn->getReturnType()->isVoidTy()) IGF.Builder.CreateRetVoid(); @@ -2728,8 +2729,10 @@ static void emitDynamicallyReplaceableThunk(IRGenModule &IGM, ? PointerAuthEntity(keyEntity.getSILFunction()) : PointerAuthEntity::Special::TypeDescriptor; auto authInfo = PointerAuthInfo::emit(IGF, schema, fnPtrAddr, authEntity); - auto *Res = IGF.Builder.CreateCall( - FunctionPointer(typeFnPtr, authInfo, signature), forwardedArgs); + auto *Res = + IGF.Builder.CreateCall(FunctionPointer(FunctionPointer::KindTy::Function, + typeFnPtr, authInfo, signature), + forwardedArgs); Res->setTailCall(); if (implFn->getReturnType()->isVoidTy()) @@ -2801,7 +2804,8 @@ void IRGenModule::emitDynamicReplacementOriginalFunctionThunk(SILFunction *f) { auto entity = LinkEntity::forSILFunction(f, true); - Signature signature = getSignature(f->getLoweredFunctionType()); + auto fnType = f->getLoweredFunctionType(); + Signature signature = getSignature(fnType); addLLVMFunctionAttributes(f, signature); LinkInfo implLink = LinkInfo::get(*this, entity, ForDefinition); @@ -2845,7 +2849,7 @@ void IRGenModule::emitDynamicReplacementOriginalFunctionThunk(SILFunction *f) { IGF, schema, fnPtrAddr, PointerAuthEntity(f->getDynamicallyReplacedFunction())); auto *Res = IGF.Builder.CreateCall( - FunctionPointer(typeFnPtr, authInfo, signature), forwardedArgs); + FunctionPointer(fnType, typeFnPtr, authInfo, signature), forwardedArgs); if (implFn->getReturnType()->isVoidTy()) IGF.Builder.CreateRetVoid(); diff --git a/lib/IRGen/GenFunc.cpp b/lib/IRGen/GenFunc.cpp index 7e1c69a758c24..40a1fd9dfc573 100644 --- a/lib/IRGen/GenFunc.cpp +++ b/lib/IRGen/GenFunc.cpp @@ -1128,7 +1128,23 @@ class AsyncPartialApplicationForwarderEmission llvm::Value *getContext() override { return heapContextBuffer; } llvm::Value *getDynamicFunctionPointer() override { assert(dynamicFunction && dynamicFunction->pointer); - return dynamicFunction->pointer; + auto *context = dynamicFunction->context; + if (!context) { + return dynamicFunction->pointer; + } + auto *rawFunction = subIGF.Builder.CreateBitCast( + dynamicFunction->pointer, origSig.getType()->getPointerTo()); + auto authInfo = PointerAuthInfo::forFunctionPointer(IGM, origType); + auto functionPointer = + FunctionPointer(FunctionPointer::KindTy::AsyncFunctionPointer, + rawFunction, authInfo, origSig); + llvm::Value *size = nullptr; + llvm::Value *function = nullptr; + std::tie(function, size) = getAsyncFunctionAndSize( + subIGF, origType->getRepresentation(), functionPointer, context, + {/*function*/ true, /*size*/ false}); + assert(size == nullptr); + return function; } llvm::Value *getDynamicFunctionContext() override { assert((dynamicFunction && dynamicFunction->context) || @@ -1236,7 +1252,8 @@ class AsyncPartialApplicationForwarderEmission asyncExplosion.add(dynamicFunction->context); } - return subIGF.Builder.CreateCall(fnPtr, asyncExplosion.claimAll()); + return subIGF.Builder.CreateCall(fnPtr.getAsFunction(subIGF), + asyncExplosion.claimAll()); } void createReturn(llvm::CallInst *call) override { subIGF.Builder.CreateRetVoid(); @@ -1289,7 +1306,7 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, StringRef FnName; if (staticFnPtr) - FnName = staticFnPtr->getPointer()->getName(); + FnName = staticFnPtr->getName(IGM); IRGenMangler Mangler; std::string thunkName = Mangler.manglePartialApplyForwarder(FnName); @@ -1683,10 +1700,10 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, FunctionPointer fnPtr = [&]() -> FunctionPointer { // If we found a function pointer statically, great. if (staticFnPtr) { - if (staticFnPtr->getPointer()->getType() != fnTy) { - auto fnPtr = staticFnPtr->getPointer(); + if (staticFnPtr->getPointer(subIGF)->getType() != fnTy) { + auto fnPtr = staticFnPtr->getPointer(subIGF); fnPtr = subIGF.Builder.CreateBitCast(fnPtr, fnTy); - return FunctionPointer(fnPtr, origSig); + return FunctionPointer(origType, fnPtr, origSig); } return *staticFnPtr; } @@ -1706,7 +1723,8 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, lastCapturedFieldPtr, PointerAuthEntity::Special::PartialApplyCapture); - return FunctionPointer(fnPtr, authInfo, origSig); + return FunctionPointer(FunctionPointer::KindTy::Function, fnPtr, authInfo, + origSig); }(); // Derive the context argument if needed. This is either: @@ -1993,7 +2011,7 @@ Optional irgen::emitFunctionPartialApplication( hasSingleSwiftRefcountedContext == Yes && outType->getCalleeConvention() == *singleRefcountedConvention) { assert(args.size() == 1); - auto fnPtr = emitPointerAuthResign(IGF, fn, outAuthInfo).getPointer(); + auto fnPtr = emitPointerAuthResign(IGF, fn, outAuthInfo).getPointer(IGF); fnPtr = IGF.Builder.CreateBitCast(fnPtr, IGF.IGM.Int8PtrTy); out.add(fnPtr); llvm::Value *ctx = args.claimNext(); @@ -2032,8 +2050,13 @@ Optional irgen::emitFunctionPartialApplication( emitPartialApplicationForwarder(IGF.IGM, staticFn, fnContext != nullptr, origSig, origType, substType, outType, subs, nullptr, argConventions); - forwarder = emitPointerAuthSign(IGF, forwarder, outAuthInfo); - forwarder = IGF.Builder.CreateBitCast(forwarder, IGF.IGM.Int8PtrTy); + if (origType->isAsync()) { + llvm_unreachable( + "async functions never have a single refcounted context"); + } else { + forwarder = emitPointerAuthSign(IGF, forwarder, outAuthInfo); + forwarder = IGF.Builder.CreateBitCast(forwarder, IGF.IGM.Int8PtrTy); + } out.add(forwarder); llvm::Value *ctx = args.claimNext(); @@ -2121,9 +2144,10 @@ Optional irgen::emitFunctionPartialApplication( auto schemaAuthInfo = PointerAuthInfo::emit(IGF, schema, fieldAddr.getAddress(), PointerAuthEntity::Special::PartialApplyCapture); - fnPtr = emitPointerAuthResign(IGF, fn, schemaAuthInfo).getPointer(); + fnPtr = + emitPointerAuthResign(IGF, fn, schemaAuthInfo).getRawPointer(); } else { - fnPtr = fn.getPointer(); + fnPtr = fn.getRawPointer(); } fnPtr = IGF.Builder.CreateBitCast(fnPtr, IGF.IGM.Int8PtrTy); IGF.Builder.CreateStore(fnPtr, fieldAddr); @@ -2163,16 +2187,9 @@ Optional irgen::emitFunctionPartialApplication( // Create the forwarding stub. auto origSig = IGF.IGM.getSignature(origType); - llvm::Value *forwarder = emitPartialApplicationForwarder(IGF.IGM, - staticFn, - fnContext != nullptr, - origSig, - origType, - substType, - outType, - subs, - &layout, - argConventions); + llvm::Value *forwarder = emitPartialApplicationForwarder( + IGF.IGM, staticFn, fnContext != nullptr, origSig, origType, substType, + outType, subs, &layout, argConventions); forwarder = emitPointerAuthSign(IGF, forwarder, outAuthInfo); forwarder = IGF.Builder.CreateBitCast(forwarder, IGF.IGM.Int8PtrTy); out.add(forwarder); @@ -2355,3 +2372,56 @@ void irgen::emitBlockHeader(IRGenFunction &IGF, IGF.Builder.CreateStore(descriptorVal, IGF.Builder.CreateStructGEP(headerAddr, 4, layout)); } + +llvm::Function *IRGenFunction::getOrCreateResumePrjFn() { + auto name = "__swift_async_resume_project_context"; + return cast(IGM.getOrCreateHelperFunction( + name, IGM.Int8PtrTy, {IGM.Int8PtrTy}, + [&](IRGenFunction &IGF) { + auto it = IGF.CurFn->arg_begin(); + auto &Builder = IGF.Builder; + auto addr = Builder.CreateBitOrPointerCast(&(*it), IGF.IGM.Int8PtrPtrTy); + Address callerContextAddr(addr, IGF.IGM.getPointerAlignment()); + auto callerContext = Builder.CreateLoad(callerContextAddr); + Builder.CreateRet(callerContext); + }, + false /*isNoInline*/)); +} + +llvm::Function * +IRGenFunction::createAsyncDispatchFn(const FunctionPointer &fnPtr, + ArrayRef args) { + SmallVector argTys; + argTys.push_back(IGM.Int8PtrTy); // Function pointer to be called. + for (auto arg : args) { + auto *ty = arg->getType(); + argTys.push_back(ty); + } + auto calleeFnPtrType = fnPtr.getRawPointer()->getType(); + auto *dispatchFnTy = + llvm::FunctionType::get(IGM.VoidTy, argTys, false /*vaargs*/); + llvm::SmallString<40> name; + llvm::raw_svector_ostream(name) << "__swift_suspend_dispatch_" << args.size(); + llvm::Function *dispatch = + llvm::Function::Create(dispatchFnTy, llvm::Function::InternalLinkage, + llvm::StringRef(name), &IGM.Module); + dispatch->setCallingConv(IGM.DefaultCC); + dispatch->setDoesNotThrow(); + IRGenFunction dispatchIGF(IGM, dispatch); + if (IGM.DebugInfo) + IGM.DebugInfo->emitArtificialFunction(dispatchIGF, dispatch); + auto &Builder = dispatchIGF.Builder; + auto it = dispatchIGF.CurFn->arg_begin(), end = dispatchIGF.CurFn->arg_end(); + llvm::Value *ptrArg = &*(it++); + SmallVector callArgs; + for (; it != end; ++it) { + callArgs.push_back(&*it); + } + ptrArg = Builder.CreateBitOrPointerCast(ptrArg, calleeFnPtrType); + auto callee = FunctionPointer(fnPtr.getKind(), ptrArg, fnPtr.getAuthInfo(), + fnPtr.getSignature()); + auto call = Builder.CreateCall(callee, callArgs); + call->setTailCall(); + Builder.CreateRetVoid(); + return dispatch; +} diff --git a/lib/IRGen/GenFunc.h b/lib/IRGen/GenFunc.h index 4c839cb6b509f..7aa69cbfffc69 100644 --- a/lib/IRGen/GenFunc.h +++ b/lib/IRGen/GenFunc.h @@ -55,7 +55,6 @@ namespace irgen { CanSILFunctionType outType, Explosion &out, bool isOutlined); CanType getArgumentLoweringType(CanType type, SILParameterInfo paramInfo, bool isNoEscape); - } // end namespace irgen } // end namespace swift diff --git a/lib/IRGen/GenKeyPath.cpp b/lib/IRGen/GenKeyPath.cpp index e1aca0304ab67..b984112ac20cf 100644 --- a/lib/IRGen/GenKeyPath.cpp +++ b/lib/IRGen/GenKeyPath.cpp @@ -256,7 +256,7 @@ getAccessorForComputedComponent(IRGenModule &IGM, forwardedArgs); } auto fnPtr = FunctionPointer::forDirect(IGM, accessorFn, - accessor->getLoweredFunctionType()); + accessor->getLoweredFunctionType()); auto call = IGF.Builder.CreateCall(fnPtr, forwardedArgs.claimAll()); if (call->getType()->isVoidTy()) diff --git a/lib/IRGen/GenMeta.cpp b/lib/IRGen/GenMeta.cpp index 57633c26c8984..9b132af7cef5c 100644 --- a/lib/IRGen/GenMeta.cpp +++ b/lib/IRGen/GenMeta.cpp @@ -3121,8 +3121,12 @@ namespace { // The class is fragile. Emit a direct reference to the vtable entry. llvm::Constant *ptr; if (entry) { - ptr = IGM.getAddrOfSILFunction(entry->getImplementation(), - NotForDefinition); + if (entry->getImplementation()->isAsync()) { + ptr = IGM.getAddrOfAsyncFunctionPointer(entry->getImplementation()); + } else { + ptr = IGM.getAddrOfSILFunction(entry->getImplementation(), + NotForDefinition); + } } else { // The method is removed by dead method elimination. // It should be never called. We add a pointer to an error function. @@ -5294,3 +5298,16 @@ bool irgen::methodRequiresReifiedVTableEntry(IRGenModule &IGM, llvm::dbgs() << " can be elided\n"); return false; } + +llvm::GlobalValue *irgen::emitAsyncFunctionPointer(IRGenModule &IGM, + SILFunction *function, + Size size) { + ConstantInitBuilder initBuilder(IGM); + ConstantStructBuilder builder( + initBuilder.beginStruct(IGM.AsyncFunctionPointerTy)); + builder.addRelativeAddress( + IGM.getAddrOfSILFunction(function, NotForDefinition)); + builder.addInt32(size.getValue()); + return cast(IGM.defineAsyncFunctionPointer( + function, builder.finishAndCreateFuture())); +} diff --git a/lib/IRGen/GenMeta.h b/lib/IRGen/GenMeta.h index 837a35db8f7f0..fd51224af8031 100644 --- a/lib/IRGen/GenMeta.h +++ b/lib/IRGen/GenMeta.h @@ -32,6 +32,7 @@ namespace swift { class FileUnit; class FuncDecl; enum class ResilienceExpansion : unsigned; + struct SILDeclRef; class SILType; class VarDecl; enum class SpecialProtocol : uint8_t; @@ -181,6 +182,8 @@ namespace irgen { GenericSignature sig, ArrayRef requirements); + llvm::GlobalValue *emitAsyncFunctionPointer(IRGenModule &IGM, + SILFunction *function, Size size); } // end namespace irgen } // end namespace swift diff --git a/lib/IRGen/GenObjC.cpp b/lib/IRGen/GenObjC.cpp index 25feeda16a592..af2ca7f5208e7 100644 --- a/lib/IRGen/GenObjC.cpp +++ b/lib/IRGen/GenObjC.cpp @@ -657,7 +657,8 @@ Callee irgen::getObjCMethodCallee(IRGenFunction &IGF, Selector selector(method); llvm::Value *selectorValue = IGF.emitObjCSelectorRefLoad(selector.str()); - auto fn = FunctionPointer::forDirect(messenger, sig); + auto fn = FunctionPointer::forDirect(FunctionPointer::KindTy::Function, + messenger, sig); return Callee(std::move(info), fn, receiverValue, selectorValue); } @@ -1084,6 +1085,8 @@ static llvm::Constant *getObjCEncodingForTypes(IRGenModule &IGM, std::string encodingString; + auto fnClangTy = fnType->getClangTypeInfo().getType(); + // Return type. { auto clangType = IGM.getClangType(resultType.getASTType()); diff --git a/lib/IRGen/GenOpaque.cpp b/lib/IRGen/GenOpaque.cpp index 96f09d622a47b..48b75e473a0f8 100644 --- a/lib/IRGen/GenOpaque.cpp +++ b/lib/IRGen/GenOpaque.cpp @@ -431,7 +431,8 @@ static FunctionPointer emitLoadOfValueWitnessFunction(IRGenFunction &IGF, IGF.getOptions().PointerAuth.ValueWitnesses, slot, index); - return FunctionPointer(witness, authInfo, signature); + return FunctionPointer(FunctionPointer::KindTy::Function, witness, authInfo, + signature); } /// Given a type metadata pointer, load one of the function @@ -477,12 +478,13 @@ IRGenFunction::emitValueWitnessFunctionRef(SILType type, assert(discriminator && "no saved discriminator for value witness fn!"); authInfo = PointerAuthInfo(schema.getKey(), discriminator); } - return FunctionPointer(witness, authInfo, signature); + return FunctionPointer(FunctionPointer::KindTy::Function, witness, authInfo, + signature); } auto vwtable = emitValueWitnessTableRef(type, &metadataSlot); auto witness = emitLoadOfValueWitnessFunction(*this, vwtable, index); - setScopedLocalTypeDataForLayout(type, key, witness.getPointer()); + setScopedLocalTypeDataForLayout(type, key, witness.getPointer(*this)); if (auto &authInfo = witness.getAuthInfo()) { setScopedLocalTypeDataForLayout(type, LocalTypeDataKind::forValueWitnessDiscriminator(index), @@ -534,8 +536,19 @@ StackAddress IRGenFunction::emitDynamicAlloca(llvm::Type *eltTy, llvm::Value *arraySize, Alignment align, const llvm::Twine &name) { + // Async functions call task alloc. + if (isAsync()) { + llvm::Value *byteCount; + auto eltSize = IGM.DataLayout.getTypeAllocSize(eltTy); + if (eltSize == 1) { + byteCount = arraySize; + } else { + byteCount = Builder.CreateMul(arraySize, IGM.getSize(Size(eltSize))); + } + auto address = emitTaskAlloc(byteCount, align); + return {address, address.getAddress()}; // In coroutines, call llvm.coro.alloca.alloc. - if (isCoroutine()) { + } else if (isCoroutine()) { // Compute the number of bytes to allocate. llvm::Value *byteCount; auto eltSize = IGM.DataLayout.getTypeAllocSize(eltTy); @@ -587,11 +600,16 @@ StackAddress IRGenFunction::emitDynamicAlloca(llvm::Type *eltTy, /// Deallocate dynamic alloca's memory if requested by restoring the stack /// location before the dynamic alloca's call. void IRGenFunction::emitDeallocateDynamicAlloca(StackAddress address) { + // Async function use taskDealloc. + if (isAsync() && address.getAddress().isValid()) { + emitTaskDealloc(Address(address.getExtraInfo(), address.getAlignment())); + return; + } // In coroutines, unconditionally call llvm.coro.alloca.free. // Except if the address is invalid, this happens when this is a StackAddress // for a partial_apply [stack] that did not need a context object on the // stack. - if (isCoroutine() && address.getAddress().isValid()) { + else if (isCoroutine() && address.getAddress().isValid()) { auto allocToken = address.getExtraInfo(); assert(allocToken && "dynamic alloca in coroutine without alloc token?"); auto freeFn = llvm::Intrinsic::getDeclaration( diff --git a/lib/IRGen/GenPointerAuth.cpp b/lib/IRGen/GenPointerAuth.cpp index aeae73999d0d8..6729655f268a0 100644 --- a/lib/IRGen/GenPointerAuth.cpp +++ b/lib/IRGen/GenPointerAuth.cpp @@ -71,9 +71,11 @@ llvm::Value *irgen::emitPointerAuthStrip(IRGenFunction &IGF, FunctionPointer irgen::emitPointerAuthResign(IRGenFunction &IGF, const FunctionPointer &fn, const PointerAuthInfo &newAuthInfo) { - llvm::Value *fnPtr = emitPointerAuthResign(IGF, fn.getPointer(), + // TODO: Handle resigning AsyncFunctionPointers. + assert(fn.getKind().value == FunctionPointer::KindTy::Value::Function); + llvm::Value *fnPtr = emitPointerAuthResign(IGF, fn.getPointer(IGF), fn.getAuthInfo(), newAuthInfo); - return FunctionPointer(fnPtr, newAuthInfo, fn.getSignature()); + return FunctionPointer(fn.getKind(), fnPtr, newAuthInfo, fn.getSignature()); } llvm::Value *irgen::emitPointerAuthResign(IRGenFunction &IGF, diff --git a/lib/IRGen/GenProto.cpp b/lib/IRGen/GenProto.cpp index b112e5b184805..36c0be215a24d 100644 --- a/lib/IRGen/GenProto.cpp +++ b/lib/IRGen/GenProto.cpp @@ -1336,7 +1336,11 @@ class AccessorConformanceInfo : public ConformanceInfo { SILFunction *Func = entry.getMethodWitness().Witness; llvm::Constant *witness = nullptr; if (Func) { - witness = IGM.getAddrOfSILFunction(Func, NotForDefinition); + if (Func->isAsync()) { + witness = IGM.getAddrOfAsyncFunctionPointer(Func); + } else { + witness = IGM.getAddrOfSILFunction(Func, NotForDefinition); + } } else { // The method is removed by dead method elimination. // It should be never called. We add a pointer to an error function. @@ -3341,7 +3345,7 @@ FunctionPointer irgen::emitWitnessMethodValue(IRGenFunction &IGF, auto &schema = IGF.getOptions().PointerAuth.ProtocolWitnesses; auto authInfo = PointerAuthInfo::emit(IGF, schema, slot, member); - return FunctionPointer(witnessFnPtr, authInfo, signature); + return FunctionPointer(fnType, witnessFnPtr, authInfo, signature); } FunctionPointer irgen::emitWitnessMethodValue( diff --git a/lib/IRGen/GenThunk.cpp b/lib/IRGen/GenThunk.cpp index c3bc0844433ce..40b4aab440854 100644 --- a/lib/IRGen/GenThunk.cpp +++ b/lib/IRGen/GenThunk.cpp @@ -18,9 +18,11 @@ #include "Callee.h" #include "ClassMetadataVisitor.h" +#include "ConstantBuilder.h" #include "Explosion.h" -#include "GenDecl.h" +#include "GenCall.h" #include "GenClass.h" +#include "GenDecl.h" #include "GenHeap.h" #include "GenOpaque.h" #include "GenPointerAuth.h" @@ -30,7 +32,9 @@ #include "MetadataLayout.h" #include "ProtocolInfo.h" #include "Signature.h" +#include "swift/AST/GenericEnvironment.h" #include "swift/IRGen/Linking.h" +#include "swift/SIL/SILDeclRef.h" #include "llvm/IR/Function.h" using namespace swift; @@ -77,14 +81,32 @@ static FunctionPointer lookupMethod(IRGenFunction &IGF, SILDeclRef declRef) { // Load the metadata, or use the 'self' value if we have a static method. llvm::Value *self; - // Non-throwing class methods always have the 'self' parameter at the end. - // Throwing class methods have 'self' right before the error parameter. - // - // FIXME: Should find a better way of expressing this. - if (funcTy->hasErrorResult()) - self = (IGF.CurFn->arg_end() - 2); - else - self = (IGF.CurFn->arg_end() - 1); + if (funcTy->isAsync()) { + auto originalType = funcTy; + auto forwardingSubstitutionMap = + decl->getGenericEnvironment() + ? decl->getGenericEnvironment()->getForwardingSubstitutionMap() + : SubstitutionMap(); + auto substitutedType = originalType->substGenericArgs( + IGF.IGM.getSILModule(), forwardingSubstitutionMap, + IGF.IGM.getMaximalTypeExpansionContext()); + auto layout = getAsyncContextLayout(IGF.IGM, originalType, substitutedType, + forwardingSubstitutionMap); + assert(layout.hasLocalContext()); + auto context = layout.emitCastTo(IGF, IGF.getAsyncContext()); + auto localContextAddr = + layout.getLocalContextLayout().project(IGF, context, llvm::None); + self = IGF.Builder.CreateLoad(localContextAddr); + } else { + // Non-throwing class methods always have the 'self' parameter at the end. + // Throwing class methods have 'self' right before the error parameter. + // + // FIXME: Should find a better way of expressing this. + if (funcTy->hasErrorResult()) + self = (IGF.CurFn->arg_end() - 2); + else + self = (IGF.CurFn->arg_end() - 1); + } auto selfTy = funcTy->getSelfParameter().getSILStorageType( IGF.IGM.getSILModule(), funcTy, IGF.IGM.getMaximalTypeExpansionContext()); @@ -107,13 +129,15 @@ void IRGenModule::emitDispatchThunk(SILDeclRef declRef) { } IRGenFunction IGF(*this, f); + IGF.setAsync(declRef.getAbstractFunctionDecl()->hasAsync()); // Look up the method. auto fn = lookupMethod(IGF, declRef); // Call the witness, forwarding all of the parameters. auto params = IGF.collectParameters(); - auto result = IGF.Builder.CreateCall(fn, params.claimAll()); + auto result = + IGF.Builder.CreateCall(fn.getAsFunction(IGF), params.claimAll()); // Return the result, if we have one. if (result->getType()->isVoidTy()) @@ -122,6 +146,33 @@ void IRGenModule::emitDispatchThunk(SILDeclRef declRef) { IGF.Builder.CreateRet(result); } +llvm::Constant * +IRGenModule::getAddrOfAsyncFunctionPointer(SILFunction *function) { + (void)getAddrOfSILFunction(function, NotForDefinition); + auto entity = LinkEntity::forAsyncFunctionPointer(function); + return getAddrOfLLVMVariable(entity, NotForDefinition, DebugTypeInfo()); +} + +llvm::Constant *IRGenModule::defineAsyncFunctionPointer(SILFunction *function, + ConstantInit init) { + auto entity = LinkEntity::forAsyncFunctionPointer(function); + auto *var = cast( + getAddrOfLLVMVariable(entity, init, DebugTypeInfo())); + setTrueConstGlobal(var); + return var; +} + +SILFunction * +IRGenModule::getSILFunctionForAsyncFunctionPointer(llvm::Constant *afp) { + for (auto &entry : GlobalVars) { + if (entry.getSecond() == afp) { + auto entity = entry.getFirst(); + return entity.getSILFunction(); + } + } + return nullptr; +} + llvm::GlobalValue *IRGenModule::defineMethodDescriptor(SILDeclRef declRef, NominalTypeDecl *nominalDecl, llvm::Constant *definition) { diff --git a/lib/IRGen/IRGenFunction.cpp b/lib/IRGen/IRGenFunction.cpp index e1bcf0338473a..e6cbbeac8a263 100644 --- a/lib/IRGen/IRGenFunction.cpp +++ b/lib/IRGen/IRGenFunction.cpp @@ -307,6 +307,24 @@ void IRGenFunction::emitStoreOfRelativeIndirectablePointer(llvm::Value *value, Builder.CreateStore(difference, addr); } +llvm::Value * +IRGenFunction::emitLoadOfRelativePointer(Address addr, bool isFar, + llvm::PointerType *expectedType, + const llvm::Twine &name) { + llvm::Value *value = Builder.CreateLoad(addr); + assert(value->getType() == + (isFar ? IGM.FarRelativeAddressTy : IGM.RelativeAddressTy)); + if (!isFar) { + value = Builder.CreateSExt(value, IGM.IntPtrTy); + } + auto *addrInt = Builder.CreatePtrToInt(addr.getAddress(), IGM.IntPtrTy); + auto *uncastPointerInt = Builder.CreateAdd(addrInt, value); + auto *uncastPointer = Builder.CreateIntToPtr(uncastPointerInt, IGM.Int8PtrTy); + auto uncastPointerAddress = Address(uncastPointer, IGM.getPointerAlignment()); + auto pointer = Builder.CreateBitCast(uncastPointerAddress, expectedType); + return pointer.getAddress(); +} + llvm::Value * IRGenFunction::emitLoadOfRelativeIndirectablePointer(Address addr, bool isFar, @@ -472,3 +490,20 @@ void IRGenFunction::emitTrap(StringRef failureMessage, bool EmitUnreachable) { if (EmitUnreachable) Builder.CreateUnreachable(); } + +Address IRGenFunction::emitTaskAlloc(llvm::Value *size, Alignment alignment) { + auto *call = Builder.CreateCall(IGM.getTaskAllocFn(), {getAsyncTask(), size}); + call->setDoesNotThrow(); + call->setCallingConv(IGM.SwiftCC); + call->addAttribute(llvm::AttributeList::FunctionIndex, + llvm::Attribute::ReadNone); + auto address = Address(call, alignment); + return address; +} + +void IRGenFunction::emitTaskDealloc(Address address) { + auto *call = Builder.CreateCall(IGM.getTaskDeallocFn(), + {getAsyncTask(), address.getAddress()}); + call->setDoesNotThrow(); + call->setCallingConv(IGM.SwiftCC); +} diff --git a/lib/IRGen/IRGenFunction.h b/lib/IRGen/IRGenFunction.h index 61f43c33993c4..ea6de515dbf93 100644 --- a/lib/IRGen/IRGenFunction.h +++ b/lib/IRGen/IRGenFunction.h @@ -83,7 +83,7 @@ class IRGenFunction { OptimizationMode Mode = OptimizationMode::NotSet, const SILDebugScope *DbgScope = nullptr, Optional DbgLoc = None); - virtual ~IRGenFunction(); + ~IRGenFunction(); void unimplemented(SourceLoc Loc, StringRef Message); @@ -128,9 +128,13 @@ class IRGenFunction { CoroutineHandle = handle; } - virtual llvm::Value *getAsyncTask(); - virtual llvm::Value *getAsyncExecutor(); - virtual llvm::Value *getAsyncContext(); + llvm::Value *getAsyncTask(); + llvm::Value *getAsyncExecutor(); + llvm::Value *getAsyncContext(); + + llvm::Function *getOrCreateResumePrjFn(); + llvm::Function *createAsyncDispatchFn(const FunctionPointer &fnPtr, + ArrayRef args); private: void emitPrologue(); @@ -206,7 +210,9 @@ class IRGenFunction { emitLoadOfRelativeIndirectablePointer(Address addr, bool isFar, llvm::PointerType *expectedType, const llvm::Twine &name = ""); - + llvm::Value *emitLoadOfRelativePointer(Address addr, bool isFar, + llvm::PointerType *expectedType, + const llvm::Twine &name = ""); llvm::Value *emitAllocObjectCall(llvm::Value *metadata, llvm::Value *size, llvm::Value *alignMask, @@ -450,6 +456,10 @@ class IRGenFunction { llvm::Value *emitIsEscapingClosureCall(llvm::Value *value, SourceLoc loc, unsigned verificationType); + Address emitTaskAlloc(llvm::Value *size, + Alignment alignment); + void emitTaskDealloc(Address address); + //--- Expression emission //------------------------------------------------------ public: diff --git a/lib/IRGen/IRGenModule.cpp b/lib/IRGen/IRGenModule.cpp index 70ca469352c0d..989ada945c2f6 100644 --- a/lib/IRGen/IRGenModule.cpp +++ b/lib/IRGen/IRGenModule.cpp @@ -554,7 +554,10 @@ IRGenModule::IRGenModule(IRGenerator &irgen, MainInputFilenameForDebugInfo, PrivateDiscriminator); - initClangTypeConverter(); + if (auto loader = Context.getClangModuleLoader()) { + ClangASTContext = + &static_cast(loader)->getClangASTContext(); + } if (ClangASTContext) { auto atomicBoolTy = ClangASTContext->getAtomicType(ClangASTContext->BoolTy); @@ -595,9 +598,12 @@ IRGenModule::IRGenModule(IRGenerator &irgen, DynamicReplacementKeyTy = createStructType(*this, "swift.dyn_repl_key", {RelativeAddressTy, Int32Ty}); + AsyncFunctionPointerTy = createStructType(*this, "swift.async_func_pointer", + {RelativeAddressTy, Int32Ty}, true); SwiftContextTy = createStructType(*this, "swift.context", {}); SwiftTaskTy = createStructType(*this, "swift.task", {}); SwiftExecutorTy = createStructType(*this, "swift.executor", {}); + AsyncFunctionPointerPtrTy = AsyncFunctionPointerTy->getPointerTo(DefaultAS); SwiftContextPtrTy = SwiftContextTy->getPointerTo(DefaultAS); SwiftTaskPtrTy = SwiftTaskTy->getPointerTo(DefaultAS); SwiftExecutorPtrTy = SwiftExecutorTy->getPointerTo(DefaultAS); @@ -619,7 +625,6 @@ IRGenModule::IRGenModule(IRGenerator &irgen, } IRGenModule::~IRGenModule() { - destroyClangTypeConverter(); destroyMetadataLayoutMap(); destroyPointerAuthCaches(); delete &Types; diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h index a11eb32d18476..28db424a881da 100644 --- a/lib/IRGen/IRGenModule.h +++ b/lib/IRGen/IRGenModule.h @@ -724,9 +724,11 @@ class IRGenModule { *DynamicReplacementLinkEntryPtrTy; // %link_entry* llvm::StructType *DynamicReplacementKeyTy; // { i32, i32} + llvm::StructType *AsyncFunctionPointerTy; // { i32, i32 } llvm::StructType *SwiftContextTy; llvm::StructType *SwiftTaskTy; llvm::StructType *SwiftExecutorTy; + llvm::PointerType *AsyncFunctionPointerPtrTy; llvm::PointerType *SwiftContextPtrTy; llvm::PointerType *SwiftTaskPtrTy; llvm::PointerType *SwiftExecutorPtrTy; @@ -965,9 +967,6 @@ class IRGenModule { friend TypeConverter; const clang::ASTContext *ClangASTContext; - ClangTypeConverter *ClangTypes; - void initClangTypeConverter(); - void destroyClangTypeConverter(); llvm::DenseMap MetadataLayouts; void destroyMetadataLayoutMap(); @@ -1393,6 +1392,11 @@ private: \ /// Cast the given constant to i8*. llvm::Constant *getOpaquePtr(llvm::Constant *pointer); + llvm::Constant *getAddrOfAsyncFunctionPointer(SILFunction *function); + llvm::Constant *defineAsyncFunctionPointer(SILFunction *function, + ConstantInit init); + SILFunction *getSILFunctionForAsyncFunctionPointer(llvm::Constant *afp); + llvm::Function *getAddrOfDispatchThunk(SILDeclRef declRef, ForDefinition_t forDefinition); void emitDispatchThunk(SILDeclRef declRef); diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index 2545bbda16b58..6327e80bd080d 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -15,6 +15,7 @@ // //===----------------------------------------------------------------------===// +#include "llvm/IR/Constant.h" #define DEBUG_TYPE "irgensil" #include "swift/AST/ASTContext.h" #include "swift/AST/IRGenOptions.h" @@ -70,6 +71,7 @@ #include "GenFunc.h" #include "GenHeap.h" #include "GenIntegerLiteral.h" +#include "GenMeta.h" #include "GenObjC.h" #include "GenOpaque.h" #include "GenPointerAuth.h" @@ -854,42 +856,6 @@ class IRGenSILFunction : } } - llvm::Value *getAsyncTask() override { - // FIXME: (1) Remove this override, (2) mark the IRGenFunction::getAsyncTask - // declaration as non-virtual, and (3) mark IRGenFunction's - // destructor non-virtual once Task.runDetached is available. - // rdar://problem/70597390*/ - if (CurSILFn->getLoweredFunctionType()->getRepresentation() == - SILFunctionTypeRepresentation::CFunctionPointer) { - return llvm::Constant::getNullValue(IGM.SwiftTaskPtrTy); - } - return IRGenFunction::getAsyncTask(); - } - - llvm::Value *getAsyncExecutor() override { - // FIXME: (1) Remove this override, (2) mark the - // IRGenFunction::getAsyncExecutor declaration as non-virtual, and - // (3) mark IRGenFunction's destructor non-virtual once - // Task.runDetached is available. rdar://problem/70597390*/ - if (CurSILFn->getLoweredFunctionType()->getRepresentation() == - SILFunctionTypeRepresentation::CFunctionPointer) { - return llvm::Constant::getNullValue(IGM.SwiftExecutorPtrTy); - } - return IRGenFunction::getAsyncExecutor(); - } - - llvm::Value *getAsyncContext() override { - // FIXME: (1) Remove this override, (2) mark the - // IRGenFunction::getAsyncContext declaration as non-virtual, and - // (3) mark IRGenFunction's destructor non-virtual once - // Task.runDetached is available. rdar://problem/70597390*/ - if (CurSILFn->getLoweredFunctionType()->getRepresentation() == - SILFunctionTypeRepresentation::CFunctionPointer) { - return llvm::Constant::getNullValue(IGM.SwiftContextPtrTy); - } - return IRGenFunction::getAsyncContext(); - } - //===--------------------------------------------------------------------===// // SIL instruction lowering //===--------------------------------------------------------------------===// @@ -1260,6 +1226,42 @@ class SyncNativeCCEntryPointArgumentEmission final llvm::Value *getCoroutineBuffer() override { return allParamValues.claimNext(); } + Explosion + explosionForObject(IRGenFunction &IGF, unsigned index, SILArgument *param, + SILType paramTy, const LoadableTypeInfo &loadableParamTI, + const LoadableTypeInfo &loadableArgTI, + std::function + explosionForArgument) override { + Explosion paramValues; + // If the explosion must be passed indirectly, load the value from the + // indirect address. + auto &nativeSchema = loadableArgTI.nativeParameterValueSchema(IGF.IGM); + if (nativeSchema.requiresIndirect()) { + Explosion paramExplosion = explosionForArgument(index, 1); + Address paramAddr = + loadableParamTI.getAddressForPointer(paramExplosion.claimNext()); + if (loadableParamTI.getStorageType() != loadableArgTI.getStorageType()) + paramAddr = + loadableArgTI.getAddressForPointer(IGF.Builder.CreateBitCast( + paramAddr.getAddress(), + loadableArgTI.getStorageType()->getPointerTo())); + loadableArgTI.loadAsTake(IGF, paramAddr, paramValues); + } else { + if (!nativeSchema.empty()) { + // Otherwise, we map from the native convention to the type's explosion + // schema. + Explosion nativeParam; + unsigned size = nativeSchema.size(); + Explosion paramExplosion = explosionForArgument(index, size); + paramExplosion.transferInto(nativeParam, size); + paramValues = nativeSchema.mapFromNative(IGF.IGM, IGF, nativeParam, + param->getType()); + } else { + assert(loadableParamTI.getSchema().empty()); + } + } + return paramValues; + }; public: using SyncEntryPointArgumentEmission::requiresIndirectResult; @@ -1310,10 +1312,8 @@ class AsyncNativeCCEntryPointArgumentEmission final return loadValue(contextLayout); } Explosion getArgumentExplosion(unsigned index, unsigned size) override { - assert(size > 0); auto argumentLayout = layout.getArgumentLayout(index); auto result = loadExplosion(argumentLayout); - assert(result.size() == size); return result; } bool requiresIndirectResult(SILType retType) override { return false; } @@ -1378,6 +1378,14 @@ class AsyncNativeCCEntryPointArgumentEmission final llvm_unreachable( "async functions do not use a fixed size coroutine buffer"); } + Explosion + explosionForObject(IRGenFunction &IGF, unsigned index, SILArgument *param, + SILType paramTy, const LoadableTypeInfo &loadableParamTI, + const LoadableTypeInfo &loadableArgTI, + std::function + explosionForArgument) override { + return explosionForArgument(index, 1); + }; }; std::unique_ptr @@ -1396,11 +1404,7 @@ std::unique_ptr getCOrObjCEntryPointArgumentEmission(IRGenSILFunction &IGF, SILBasicBlock &entry, Explosion &allParamValues) { - if (IGF.CurSILFn->isAsync() && - !(/*FIXME: Remove this condition once Task.runDetached is - available. rdar://problem/70597390*/ - IGF.CurSILFn->getLoweredFunctionType()->getRepresentation() == - SILFunctionTypeRepresentation::CFunctionPointer)) { + if (IGF.CurSILFn->isAsync()) { llvm_unreachable("unsupported"); } else { return std::make_unique( @@ -1652,8 +1656,9 @@ static ArrayRef emitEntryPointIndirectReturn( } template -static void bindParameter(IRGenSILFunction &IGF, unsigned index, - SILArgument *param, SILType paramTy, +static void bindParameter(IRGenSILFunction &IGF, + NativeCCEntryPointArgumentEmission &emission, + unsigned index, SILArgument *param, SILType paramTy, ExplosionForArgument explosionForArgument) { // Pull out the parameter value and its formal type. auto ¶mTI = IGF.getTypeInfo(IGF.CurSILFn->mapTypeIntoContext(paramTy)); @@ -1662,36 +1667,11 @@ static void bindParameter(IRGenSILFunction &IGF, unsigned index, // If the SIL parameter isn't passed indirectly, we need to map it // to an explosion. if (param->getType().isObject()) { - Explosion paramValues; auto &loadableParamTI = cast(paramTI); - auto &loadableArgTI = cast(paramTI); - // If the explosion must be passed indirectly, load the value from the - // indirect address. - auto &nativeSchema = argTI.nativeParameterValueSchema(IGF.IGM); - if (nativeSchema.requiresIndirect()) { - Explosion paramExplosion = explosionForArgument(index, 1); - Address paramAddr = - loadableParamTI.getAddressForPointer(paramExplosion.claimNext()); - if (paramTI.getStorageType() != argTI.getStorageType()) - paramAddr = - loadableArgTI.getAddressForPointer(IGF.Builder.CreateBitCast( - paramAddr.getAddress(), - loadableArgTI.getStorageType()->getPointerTo())); - loadableArgTI.loadAsTake(IGF, paramAddr, paramValues); - } else { - if (!nativeSchema.empty()) { - // Otherwise, we map from the native convention to the type's explosion - // schema. - Explosion nativeParam; - unsigned size = nativeSchema.size(); - Explosion paramExplosion = explosionForArgument(index, size); - paramExplosion.transferInto(nativeParam, size); - paramValues = nativeSchema.mapFromNative(IGF.IGM, IGF, nativeParam, - param->getType()); - } else { - assert(paramTI.getSchema().empty()); - } - } + auto &loadableArgTI = cast(argTI); + auto paramValues = + emission.explosionForObject(IGF, index, param, paramTy, loadableParamTI, + loadableArgTI, explosionForArgument); IGF.setLoweredExplosion(param, paramValues); return; } @@ -1749,6 +1729,10 @@ static void emitEntryPointArgumentsNativeCC(IRGenSILFunction &IGF, break; } + if (funcTy->isAsync()) { + emitAsyncFunctionEntry(IGF, IGF.CurSILFn); + } + SILFunctionConventions conv(funcTy, IGF.getSILModule()); // The 'self' argument might be in the context position, which is @@ -1758,7 +1742,7 @@ static void emitEntryPointArgumentsNativeCC(IRGenSILFunction &IGF, params = params.drop_back(); bindParameter( - IGF, 0, selfParam, + IGF, *emission, 0, selfParam, conv.getSILArgumentType(conv.getNumSILArguments() - 1, IGF.IGM.getMaximalTypeExpansionContext()), [&](unsigned startIndex, unsigned size) { @@ -1782,7 +1766,7 @@ static void emitEntryPointArgumentsNativeCC(IRGenSILFunction &IGF, unsigned i = 0; for (SILArgument *param : params) { auto argIdx = conv.getSILArgIndexOfFirstParam() + i; - bindParameter(IGF, i, param, + bindParameter(IGF, *emission, i, param, conv.getSILArgumentType( argIdx, IGF.IGM.getMaximalTypeExpansionContext()), [&](unsigned index, unsigned size) { @@ -1964,6 +1948,11 @@ void IRGenSILFunction::emitSILFunction() { if (CurSILFn->getDynamicallyReplacedFunction()) IGM.IRGen.addDynamicReplacement(CurSILFn); + auto funcTy = CurSILFn->getLoweredFunctionType(); + if (funcTy->isAsync() && funcTy->getLanguage() == SILFunctionLanguage::Swift) + emitAsyncFunctionPointer(IGM, CurSILFn, + getAsyncContextLayout(*this).getSize()); + // Configure the dominance resolver. // TODO: consider re-using a dom analysis from the PassManager // TODO: consider using a cheaper analysis at -O0 @@ -1998,7 +1987,6 @@ void IRGenSILFunction::emitSILFunction() { // Map the LLVM arguments to arguments on the entry point BB. Explosion params = collectParameters(); - auto funcTy = CurSILFn->getLoweredFunctionType(); switch (funcTy->getLanguage()) { case SILFunctionLanguage::Swift: @@ -2282,23 +2270,29 @@ void IRGenSILFunction::visitDifferentiabilityWitnessFunctionInst( diffWitness = Builder.CreateBitCast(diffWitness, signature.getType()->getPointerTo()); - setLoweredFunctionPointer(i, FunctionPointer(diffWitness, signature)); + setLoweredFunctionPointer(i, FunctionPointer(fnType, diffWitness, signature)); } void IRGenSILFunction::visitFunctionRefBaseInst(FunctionRefBaseInst *i) { auto fn = i->getInitiallyReferencedFunction(); + auto fnType = fn->getLoweredFunctionType(); - llvm::Constant *fnPtr = IGM.getAddrOfSILFunction( - fn, NotForDefinition, false /*isDynamicallyReplaceableImplementation*/, - isa(i)); - - auto sig = IGM.getSignature(fn->getLoweredFunctionType()); + auto sig = IGM.getSignature(fnType); // Note that the pointer value returned by getAddrOfSILFunction doesn't // necessarily have element type sig.getType(), e.g. if it's imported. + auto *fnPtr = IGM.getAddrOfSILFunction( + fn, NotForDefinition, false /*isDynamicallyReplaceableImplementation*/, + isa(i)); + llvm::Value *value; + if (fn->isAsync()) { + value = IGM.getAddrOfAsyncFunctionPointer(fn); + value = Builder.CreateBitCast(value, fnPtr->getType()); + } else { + value = fnPtr; + } + FunctionPointer fp = FunctionPointer(fnType, value, sig); - FunctionPointer fp = FunctionPointer::forDirect(fnPtr, sig); - // Store the function as a FunctionPointer so we can avoid bitcasting // or thunking if we don't need to. setLoweredFunctionPointer(i, fp); @@ -3059,16 +3053,18 @@ void IRGenSILFunction::visitPartialApplyInst(swift::PartialApplyInst *i) { Explosion llArgs; + auto &lv = getLoweredValue(i->getCallee()); if (i->getOrigCalleeType()->isAsync()) { auto result = getPartialApplicationFunction(*this, i->getCallee(), i->getSubstitutionMap(), i->getSubstCalleeType()); llvm::Value *innerContext = std::get<1>(result); - auto layout = - getAsyncContextLayout(IGM, i->getOrigCalleeType(), - i->getSubstCalleeType(), i->getSubstitutionMap()); - auto size = getDynamicAsyncContextSize( - *this, layout, i->getOrigCalleeType(), innerContext); + llvm::Value *size; + llvm::Value *fnPtr; + std::tie(fnPtr, size) = getAsyncFunctionAndSize( + *this, i->getOrigCalleeType()->getRepresentation(), std::get<0>(result), + innerContext, {/*function*/ false, /*size*/ true}); + assert(fnPtr == nullptr); llArgs.add(size); } @@ -3084,7 +3080,6 @@ void IRGenSILFunction::visitPartialApplyInst(swift::PartialApplyInst *i) { } } - auto &lv = getLoweredValue(i->getCallee()); if (lv.kind == LoweredValue::Kind::ObjCMethod) { // Objective-C partial applications require a different path. There's no // actual function pointer to capture, and we semantically can't cache @@ -3169,7 +3164,7 @@ void IRGenSILFunction::visitUnreachableInst(swift::UnreachableInst *i) { Builder.CreateUnreachable(); } -static void emitCoroutineExit(IRGenSILFunction &IGF) { +static void emitCoroutineOrAsyncExit(IRGenSILFunction &IGF) { // The LLVM coroutine representation demands that there be a // unique call to llvm.coro.end. @@ -3195,12 +3190,13 @@ static void emitCoroutineExit(IRGenSILFunction &IGF) { static void emitReturnInst(IRGenSILFunction &IGF, SILType resultTy, - Explosion &result) { + Explosion &result, + CanSILFunctionType fnType) { // If we're generating a coroutine, just call coro.end. - if (IGF.isCoroutine()) { + if (IGF.isCoroutine() && !IGF.isAsync()) { assert(result.empty() && "coroutines do not currently support non-void returns"); - emitCoroutineExit(IGF); + emitCoroutineOrAsyncExit(IGF); return; } @@ -3214,12 +3210,7 @@ static void emitReturnInst(IRGenSILFunction &IGF, auto &retTI = cast(IGF.getTypeInfo(resultTy)); retTI.initialize(IGF, result, IGF.IndirectReturn, false); IGF.Builder.CreateRetVoid(); - } else if (IGF.isAsync() && - !(/*FIXME: Remove this condition once Task.runDetached is - available. rdar://problem/70597390*/ - IGF.CurSILFn->getLoweredFunctionType() - ->getRepresentation() == - SILFunctionTypeRepresentation::CFunctionPointer)) { + } else if (IGF.isAsync()) { // If we're generating an async function, store the result into the buffer. assert(!IGF.IndirectReturn.isValid() && "Formally direct results should stay direct results for async " @@ -3236,7 +3227,8 @@ static void emitReturnInst(IRGenSILFunction &IGF, cast(fieldLayout.getType()) .initialize(IGF, result, fieldAddr, /*isOutlined*/ false); } - IGF.Builder.CreateRetVoid(); + emitAsyncReturn(IGF, layout, fnType); + emitCoroutineOrAsyncExit(IGF); } else { auto funcLang = IGF.CurSILFn->getLoweredFunctionType()->getLanguage(); auto swiftCCReturn = funcLang == SILFunctionLanguage::Swift; @@ -3265,7 +3257,8 @@ void IRGenSILFunction::visitReturnInst(swift::ReturnInst *i) { result = std::move(temp); } - emitReturnInst(*this, i->getOperand()->getType(), result); + emitReturnInst(*this, i->getOperand()->getType(), result, + i->getFunction()->getLoweredFunctionType()); } void IRGenSILFunction::visitThrowInst(swift::ThrowInst *i) { @@ -3274,6 +3267,14 @@ void IRGenSILFunction::visitThrowInst(swift::ThrowInst *i) { Builder.CreateStore(exn, getCallerErrorResultSlot()); + // Async functions just return to the continuation. + if (isAsync()) { + auto layout = getAsyncContextLayout(*this); + emitAsyncReturn(*this, layout, i->getFunction()->getLoweredFunctionType()); + emitCoroutineOrAsyncExit(*this); + return; + } + // Create a normal return, but leaving the return value undefined. auto fnTy = CurFn->getType()->getPointerElementType(); auto retTy = cast(fnTy)->getReturnType(); @@ -3287,7 +3288,7 @@ void IRGenSILFunction::visitThrowInst(swift::ThrowInst *i) { void IRGenSILFunction::visitUnwindInst(swift::UnwindInst *i) { // Just call coro.end; there's no need to distinguish 'unwind' // and 'return' at the LLVM level. - emitCoroutineExit(*this); + emitCoroutineOrAsyncExit(*this); } void IRGenSILFunction::visitYieldInst(swift::YieldInst *i) { @@ -3347,7 +3348,8 @@ void IRGenSILFunction::visitEndApply(BeginApplyInst *i, bool isAbort) { auto pointerAuth = PointerAuthInfo::emit(*this, schemaAndEntity.first, coroutine.Buffer.getAddress(), schemaAndEntity.second); - FunctionPointer callee(continuation, pointerAuth, sig); + FunctionPointer callee(i->getOrigCalleeType(), continuation, pointerAuth, + sig); Builder.CreateCall(callee, { coroutine.Buffer.getAddress(), @@ -6087,7 +6089,7 @@ void IRGenSILFunction::visitWitnessMethodInst(swift::WitnessMethodInst *i) { auto fnType = IGM.getSILTypes().getConstantFunctionType( IGM.getMaximalTypeExpansionContext(), member); auto sig = IGM.getSignature(fnType); - auto fn = FunctionPointer::forDirect(fnPtr, sig); + auto fn = FunctionPointer::forDirect(fnType, fnPtr, sig); setLoweredFunctionPointer(i, fn); return; @@ -6251,7 +6253,7 @@ void IRGenSILFunction::visitSuperMethodInst(swift::SuperMethodInst *i) { auto authInfo = PointerAuthInfo::emit(*this, schema, /*storageAddress=*/nullptr, method); - FunctionPointer fn(fnPtr, authInfo, sig); + FunctionPointer fn(methodType, fnPtr, authInfo, sig); setLoweredFunctionPointer(i, fn); return; @@ -6287,7 +6289,7 @@ void IRGenSILFunction::visitClassMethodInst(swift::ClassMethodInst *i) { ResilienceExpansion::Maximal)) { auto *fnPtr = IGM.getAddrOfDispatchThunk(method, NotForDefinition); auto sig = IGM.getSignature(methodType); - FunctionPointer fn(fnPtr, sig); + FunctionPointer fn(methodType, fnPtr, sig); setLoweredFunctionPointer(i, fn); return; diff --git a/lib/IRGen/Linking.cpp b/lib/IRGen/Linking.cpp index 817bb82d81c50..8ca70b747fe53 100644 --- a/lib/IRGen/Linking.cpp +++ b/lib/IRGen/Linking.cpp @@ -430,6 +430,17 @@ std::string LinkEntity::mangleAsString() const { return mangler.mangleSILDifferentiabilityWitnessKey( {getSILDifferentiabilityWitness()->getOriginalFunction()->getName(), getSILDifferentiabilityWitness()->getConfig()}); + case Kind::AsyncFunctionPointer: { + std::string Result(getSILFunction()->getName()); + Result.append("AD"); + return Result; + } + case Kind::AsyncFunctionPointerAST: { + std::string Result; + Result = mangler.mangleEntity(getDecl()); + Result.append("AD"); + return Result; + } } llvm_unreachable("bad entity kind!"); } @@ -663,9 +674,13 @@ SILLinkage LinkEntity::getLinkage(ForDefinition_t forDefinition) const { case Kind::DynamicallyReplaceableFunctionKey: return getSILFunction()->getLinkage(); + case Kind::AsyncFunctionPointer: case Kind::SILFunction: return getSILFunction()->getEffectiveSymbolLinkage(); + case Kind::AsyncFunctionPointerAST: + return getSILLinkage(getDeclLinkage(getDecl()), forDefinition); + case Kind::DynamicallyReplaceableFunctionImpl: case Kind::DynamicallyReplaceableFunctionKeyAST: return getSILLinkage(getDeclLinkage(getDecl()), forDefinition); @@ -712,6 +727,8 @@ bool LinkEntity::isContextDescriptor() const { case Kind::ProtocolDescriptor: case Kind::OpaqueTypeDescriptor: return true; + case Kind::AsyncFunctionPointer: + case Kind::AsyncFunctionPointerAST: case Kind::PropertyDescriptor: case Kind::DispatchThunk: case Kind::DispatchThunkInitializer: @@ -780,6 +797,8 @@ bool LinkEntity::isContextDescriptor() const { llvm::Type *LinkEntity::getDefaultDeclarationType(IRGenModule &IGM) const { switch (getKind()) { + case Kind::AsyncFunctionPointer: + return IGM.AsyncFunctionPointerTy; case Kind::ModuleDescriptor: case Kind::ExtensionDescriptor: case Kind::AnonymousDescriptor: @@ -909,6 +928,7 @@ Alignment LinkEntity::getAlignment(IRGenModule &IGM) const { case Kind::MethodDescriptorAllocator: case Kind::OpaqueTypeDescriptor: return Alignment(4); + case Kind::AsyncFunctionPointer: case Kind::ObjCClassRef: case Kind::ObjCClass: case Kind::TypeMetadataLazyCacheVariable: @@ -951,6 +971,7 @@ bool LinkEntity::isWeakImported(ModuleDecl *module) const { return getSILGlobalVariable()->getDecl()->isWeakImported(module); } return false; + case Kind::AsyncFunctionPointer: case Kind::DynamicallyReplaceableFunctionKey: case Kind::DynamicallyReplaceableFunctionVariable: case Kind::SILFunction: { @@ -977,6 +998,7 @@ bool LinkEntity::isWeakImported(ModuleDecl *module) const { return false; } + case Kind::AsyncFunctionPointerAST: case Kind::DispatchThunk: case Kind::DispatchThunkInitializer: case Kind::DispatchThunkAllocator: @@ -1053,6 +1075,7 @@ bool LinkEntity::isWeakImported(ModuleDecl *module) const { DeclContext *LinkEntity::getDeclContextForEmission() const { switch (getKind()) { + case Kind::AsyncFunctionPointerAST: case Kind::DispatchThunk: case Kind::DispatchThunkInitializer: case Kind::DispatchThunkAllocator: @@ -1095,6 +1118,7 @@ DeclContext *LinkEntity::getDeclContextForEmission() const { case Kind::CanonicalSpecializedGenericSwiftMetaclassStub: return getType()->getClassOrBoundGenericClass()->getDeclContext(); + case Kind::AsyncFunctionPointer: case Kind::SILFunction: case Kind::DynamicallyReplaceableFunctionVariable: case Kind::DynamicallyReplaceableFunctionKey: diff --git a/lib/IRGen/MetadataLayout.h b/lib/IRGen/MetadataLayout.h index 312b8c55876f5..c31a56f5c3e6a 100644 --- a/lib/IRGen/MetadataLayout.h +++ b/lib/IRGen/MetadataLayout.h @@ -188,7 +188,7 @@ class ClassMetadataLayout : public NominalMetadataLayout { Kind getKind() const { return TheKind; } - Offset getOffsett() const { + Offset getOffset() const { assert(getKind() == Kind::Offset); return TheOffset; } diff --git a/lib/Option/CMakeLists.txt b/lib/Option/CMakeLists.txt index 70280746f2b28..0e8999981fa46 100644 --- a/lib/Option/CMakeLists.txt +++ b/lib/Option/CMakeLists.txt @@ -6,3 +6,28 @@ add_dependencies(swiftOption target_link_libraries(swiftOption PRIVATE swiftBasic) +set(features_file_src "${CMAKE_CURRENT_SOURCE_DIR}/features.json") +set(features_file_dest "${CMAKE_BINARY_DIR}/share/swift/features.json") + +add_custom_command( + OUTPUT + ${features_file_dest} + COMMAND + ${CMAKE_COMMAND} -E copy ${features_file_src} ${features_file_dest} + DEPENDS + ${features_file_src} +) + +add_custom_target(swift-features-file DEPENDS ${features_file_dest}) + +add_dependencies(swiftOption swift-features-file) + +swift_install_in_component( + FILES + ${features_file_dest} + DESTINATION + "share/swift" + COMPONENT + compiler +) + diff --git a/lib/Option/features.json b/lib/Option/features.json new file mode 100644 index 0000000000000..fabdc50e67277 --- /dev/null +++ b/lib/Option/features.json @@ -0,0 +1,10 @@ +{ + "features": [ + { + "name": "experimental-skip-all-function-bodies" + }, + { + "name": "experimental-allow-module-with-compiler-errors" + } + ] +} diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp index b6554d54ec921..424769d1f15c0 100644 --- a/lib/Parse/ParseDecl.cpp +++ b/lib/Parse/ParseDecl.cpp @@ -3519,10 +3519,12 @@ static void diagnoseOperatorFixityAttributes(Parser &P, static unsigned skipUntilMatchingRBrace(Parser &P, bool &HasPoundDirective, bool &HasOperatorDeclarations, - bool &HasNestedClassDeclarations) { + bool &HasNestedClassDeclarations, + bool &HasNestedTypeDeclarations) { HasPoundDirective = false; HasOperatorDeclarations = false; HasNestedClassDeclarations = false; + HasNestedTypeDeclarations = false; unsigned OpenBraces = 1; @@ -3541,6 +3543,10 @@ static unsigned skipUntilMatchingRBrace(Parser &P, HasPoundDirective |= P.Tok.isAny(tok::pound_sourceLocation, tok::pound_line, tok::pound_if, tok::pound_else, tok::pound_endif, tok::pound_elseif); + + HasNestedTypeDeclarations |= P.Tok.isAny(tok::kw_class, tok::kw_struct, + tok::kw_enum); + if (P.consumeIf(tok::l_brace)) { ++OpenBraces; continue; @@ -4819,10 +4825,12 @@ bool Parser::canDelayMemberDeclParsing(bool &HasOperatorDeclarations, // we can't lazily parse. BacktrackingScope BackTrack(*this); bool HasPoundDirective; + bool HasNestedTypeDeclarations; skipUntilMatchingRBrace(*this, HasPoundDirective, HasOperatorDeclarations, - HasNestedClassDeclarations); + HasNestedClassDeclarations, + HasNestedTypeDeclarations); if (!HasPoundDirective) BackTrack.cancelBacktrack(); return !BackTrack.willBacktrack(); @@ -5510,7 +5518,7 @@ static ParameterList *parseOptionalAccessorArgument(SourceLoc SpecifierLoc, return ParameterList::create(P.Context, StartLoc, param, EndLoc); } -bool Parser::skipBracedBlock() { +bool Parser::skipBracedBlock(bool &HasNestedTypeDeclarations) { SyntaxParsingContext disabled(SyntaxContext); SyntaxContext->disable(); consumeToken(tok::l_brace); @@ -5524,7 +5532,8 @@ bool Parser::skipBracedBlock() { unsigned OpenBraces = skipUntilMatchingRBrace(*this, HasPoundDirectives, HasOperatorDeclarations, - HasNestedClassDeclarations); + HasNestedClassDeclarations, + HasNestedTypeDeclarations); if (consumeIf(tok::r_brace)) --OpenBraces; return OpenBraces != 0; @@ -6420,11 +6429,13 @@ void Parser::consumeAbstractFunctionBody(AbstractFunctionDecl *AFD, BodyRange.Start = Tok.getLoc(); // Advance the parser to the end of the block; '{' ... '}'. - skipBracedBlock(); + bool HasNestedTypeDeclarations; + skipBracedBlock(HasNestedTypeDeclarations); BodyRange.End = PreviousLoc; AFD->setBodyDelayed(BodyRange); + AFD->setHasNestedTypeDeclarations(HasNestedTypeDeclarations); if (isCodeCompletionFirstPass() && SourceMgr.rangeContainsCodeCompletionLoc(BodyRange)) { diff --git a/lib/SIL/IR/AbstractionPattern.cpp b/lib/SIL/IR/AbstractionPattern.cpp index 6813fa34c08c4..f0bb043ba6a32 100644 --- a/lib/SIL/IR/AbstractionPattern.cpp +++ b/lib/SIL/IR/AbstractionPattern.cpp @@ -203,6 +203,7 @@ AbstractionPattern::getOptional(AbstractionPattern object) { case Kind::PartialCurriedCXXOperatorMethodType: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: llvm_unreachable("cannot add optionality to non-type abstraction"); case Kind::Opaque: return AbstractionPattern::getOpaque(); @@ -310,6 +311,7 @@ bool AbstractionPattern::matchesTuple(CanTupleType substType) { return true; case Kind::Tuple: return getNumTupleElements_Stored() == substType->getNumElements(); + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::ClangType: case Kind::Type: case Kind::Discard: { @@ -399,6 +401,19 @@ AbstractionPattern::getTupleElementType(unsigned index) const { return AbstractionPattern::getOpaque(); return AbstractionPattern(getGenericSignature(), getCanTupleElementType(getType(), index)); + + case Kind::ObjCCompletionHandlerArgumentsType: { + // Match up the tuple element with the parameter from the Clang block type, + // skipping the error parameter index if any. + auto callback = cast(getClangType()); + auto errorIndex = getEncodedForeignInfo() + .getAsyncCompletionHandlerErrorParamIndex(); + unsigned paramIndex = index + (errorIndex && index >= *errorIndex); + return AbstractionPattern(getGenericSignature(), + getCanTupleElementType(getType(), index), + callback->getParamType(paramIndex).getTypePtr()); + } + } llvm_unreachable("bad kind"); } @@ -465,6 +480,7 @@ AbstractionPattern AbstractionPattern::getFunctionResultType() const { switch (getKind()) { case Kind::Invalid: llvm_unreachable("querying invalid abstraction pattern!"); + case Kind::ObjCCompletionHandlerArgumentsType: case Kind::Tuple: llvm_unreachable("abstraction pattern for tuple cannot be function"); case Kind::Opaque: @@ -524,25 +540,45 @@ AbstractionPattern AbstractionPattern::getFunctionResultType() const { ->getPointeeType() ->getAs(); - // The result is the first non-error argument to the callback. - unsigned callbackResultIndex = 0; - if (auto callbackErrorIndex = getEncodedForeignInfo() - .getAsyncCompletionHandlerErrorParamIndex()) { - if (*callbackErrorIndex == 0) { - callbackResultIndex = 1; - } + // The result comprises the non-error argument(s) to the callback, if + // any. + + auto callbackErrorIndex = getEncodedForeignInfo() + .getAsyncCompletionHandlerErrorParamIndex(); + assert((!callbackErrorIndex.hasValue() + || callbackParamTy->getNumParams() > *callbackErrorIndex) + && "completion handler has invalid error param index?!"); + unsigned numNonErrorParams + = callbackParamTy->getNumParams() - callbackErrorIndex.hasValue(); + + switch (numNonErrorParams) { + case 0: + // If there are no result arguments, then the imported result type is + // Void, with no interesting abstraction properties. + return AbstractionPattern(TupleType::getEmpty(getType()->getASTContext())); + + case 1: { + // If there's a single argument, abstract it according to its formal type + // in the ObjC signature. + unsigned callbackResultIndex + = callbackErrorIndex && *callbackErrorIndex == 0; + auto clangResultType = callbackParamTy + ->getParamType(callbackResultIndex) + .getTypePtr(); + + return AbstractionPattern(getGenericSignatureForFunctionComponent(), + getResultType(getType()), clangResultType); } - - const clang::Type *clangResultType = nullptr; - if (callbackResultIndex < callbackParamTy->getNumParams()) { - clangResultType = callbackParamTy->getParamType(callbackResultIndex) - .getTypePtr(); - } else { - clangResultType = getObjCMethod()->getASTContext().VoidTy.getTypePtr(); + + default: + // If there are multiple results, we have a special abstraction pattern + // form to represent the mapping from block parameters to tuple elements + // in the return type. + return AbstractionPattern::getObjCCompletionHandlerArgumentsType( + getGenericSignatureForFunctionComponent(), + getResultType(getType()), callbackParamTy, + getEncodedForeignInfo()); } - - return AbstractionPattern(getGenericSignatureForFunctionComponent(), - getResultType(getType()), clangResultType); } return AbstractionPattern(getGenericSignatureForFunctionComponent(), @@ -594,6 +630,7 @@ AbstractionPattern::getObjCMethodAsyncCompletionHandlerType( case Kind::CurriedCFunctionAsMethodType: case Kind::CurriedCXXMethodType: case Kind::CurriedCXXOperatorMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: swift_unreachable("not appropriate for this kind"); } } @@ -791,6 +828,7 @@ AbstractionPattern AbstractionPattern::getOptionalObjectType() const { case Kind::Tuple: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: llvm_unreachable("pattern for function or tuple cannot be for optional"); case Kind::Opaque: @@ -837,6 +875,7 @@ AbstractionPattern AbstractionPattern::getReferenceStorageReferentType() const { case Kind::Tuple: case Kind::OpaqueFunction: case Kind::OpaqueDerivativeFunction: + case Kind::ObjCCompletionHandlerArgumentsType: return *this; case Kind::Type: return AbstractionPattern(getGenericSignature(), @@ -897,12 +936,15 @@ void AbstractionPattern::print(raw_ostream &out) const { case Kind::CurriedCFunctionAsMethodType: case Kind::PartialCurriedCFunctionAsMethodType: case Kind::CFunctionAsMethodType: + case Kind::ObjCCompletionHandlerArgumentsType: out << (getKind() == Kind::ClangType ? "AP::ClangType(" : getKind() == Kind::CurriedCFunctionAsMethodType ? "AP::CurriedCFunctionAsMethodType(" : getKind() == Kind::PartialCurriedCFunctionAsMethodType - ? "AP::PartialCurriedCFunctionAsMethodType(" + ? "AP::PartialCurriedCFunctionAsMethodType(" : + getKind() == Kind::ObjCCompletionHandlerArgumentsType + ? "AP::ObjCCompletionHandlerArgumentsType(" : "AP::CFunctionAsMethodType("); if (auto sig = getGenericSignature()) { sig->print(out); @@ -922,6 +964,12 @@ void AbstractionPattern::print(raw_ostream &out) const { out << "static"; } } + if (hasStoredForeignInfo()) { + if (auto errorIndex + = getEncodedForeignInfo().getAsyncCompletionHandlerErrorParamIndex()){ + out << ", errorParamIndex=" << *errorIndex; + } + } out << ")"; return; case Kind::CXXMethodType: @@ -1069,6 +1117,9 @@ const { case Kind::OpaqueDerivativeFunction: llvm_unreachable("should not have an opaque derivative function pattern " "matching a struct/enum type"); + case Kind::ObjCCompletionHandlerArgumentsType: + llvm_unreachable("should not have a completion handler argument pattern " + "matching a struct/enum type"); case Kind::PartialCurriedObjCMethodType: case Kind::CurriedObjCMethodType: case Kind::PartialCurriedCFunctionAsMethodType: diff --git a/lib/SIL/IR/SILUndef.cpp b/lib/SIL/IR/SILUndef.cpp index 700e69ec5520c..21831a8cf6683 100644 --- a/lib/SIL/IR/SILUndef.cpp +++ b/lib/SIL/IR/SILUndef.cpp @@ -15,26 +15,16 @@ using namespace swift; -static ValueOwnershipKind getOwnershipKindForUndef(SILType type, const SILFunction &f) { - if (!f.hasOwnership()) - return OwnershipKind::None; - if (type.isAddress() || type.isTrivial(f)) - return OwnershipKind::None; - return OwnershipKind::Owned; -} - -SILUndef::SILUndef(SILType type, ValueOwnershipKind ownershipKind) - : ValueBase(ValueKind::SILUndef, type, IsRepresentative::Yes), - ownershipKind(ownershipKind) {} +SILUndef::SILUndef(SILType type) + : ValueBase(ValueKind::SILUndef, type, IsRepresentative::Yes) {} -SILUndef *SILUndef::get(SILType ty, SILModule &m, ValueOwnershipKind ownershipKind) { - SILUndef *&entry = m.UndefValues[std::make_pair(ty, unsigned(ownershipKind))]; +SILUndef *SILUndef::get(SILType ty, SILModule &m) { + SILUndef *&entry = m.UndefValues[ty]; if (entry == nullptr) - entry = new (m) SILUndef(ty, ownershipKind); + entry = new (m) SILUndef(ty); return entry; } SILUndef *SILUndef::get(SILType ty, const SILFunction &f) { - auto ownershipKind = getOwnershipKindForUndef(ty, f); - return SILUndef::get(ty, f.getModule(), ownershipKind); + return SILUndef::get(ty, f.getModule()); } diff --git a/lib/SIL/IR/ValueOwnership.cpp b/lib/SIL/IR/ValueOwnership.cpp index 824d397c858b3..8798bf1813ed3 100644 --- a/lib/SIL/IR/ValueOwnership.cpp +++ b/lib/SIL/IR/ValueOwnership.cpp @@ -585,9 +585,8 @@ ValueOwnershipKindClassifier::visitBuiltinInst(BuiltinInst *BI) { ValueOwnershipKind SILValue::getOwnershipKind() const { // If we do not have an undef, we should always be able to get to our function // here. If we do not have ownership enabled, just return none for everything - // to short circuit ownership optimizations. If we have an undef we may still - // get some results that are slightly wonky but hopefully when we lower - // ownership we remove that. + // to short circuit ownership optimizations. Since SILUndef in either case + // will be ValueOwnershipKind::None, we will not get any wonky behavior here. // // We assume that any time we are in SILBuilder and call this without having a // value in a block yet, ossa is enabled. diff --git a/lib/SIL/Parser/ParseSIL.cpp b/lib/SIL/Parser/ParseSIL.cpp index 6cef09b209a40..a03a3199408eb 100644 --- a/lib/SIL/Parser/ParseSIL.cpp +++ b/lib/SIL/Parser/ParseSIL.cpp @@ -84,6 +84,14 @@ ParseSILModuleRequest::evaluate(Evaluator &evaluator, if (hadError) { // The rest of the SIL pipeline expects well-formed SIL, so if we encounter // a parsing error, just return an empty SIL module. + // + // Because the SIL parser's notion of failing with an error is distinct from + // the ASTContext's notion of having emitted a diagnostic, it's possible for + // the parser to fail silently without emitting a diagnostic. This assertion + // ensures that +asserts builds will fail fast. If you crash here, please go + // back and add a diagnostic after identifying where the SIL parser failed. + assert(SF->getASTContext().hadError() && + "Failed to parse SIL but did not emit any errors!"); return SILModule::createEmptyModule(desc.context, desc.conv, desc.opts); } return silMod; @@ -491,8 +499,7 @@ bool SILParser::parseVerbatim(StringRef name) { SILParser::~SILParser() { for (auto &Entry : ForwardRefLocalValues) { if (ValueBase *dummyVal = LocalValues[Entry.first()]) { - dummyVal->replaceAllUsesWith( - SILUndef::get(dummyVal->getType(), SILMod, OwnershipKind::None)); + dummyVal->replaceAllUsesWith(SILUndef::get(dummyVal->getType(), SILMod)); SILInstruction::destroy(cast(dummyVal)); SILMod.deallocateInst(cast(dummyVal)); } diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index f335a1f03bb46..f2f4e38e596fb 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -122,14 +122,13 @@ class AccessPhiVisitor phiArg->getIncomingPhiValues(pointerWorklist); } - void visitStorageCast(SingleValueInstruction *projectedAddr, - Operand *sourceOper) { + void visitStorageCast(SingleValueInstruction *cast, Operand *sourceOper) { // Allow conversions to/from pointers and addresses on disjoint phi paths // only if the underlying useDefVisitor allows it. if (storageCastTy == IgnoreStorageCast) pointerWorklist.push_back(sourceOper->get()); else - visitNonAccess(projectedAddr); + visitNonAccess(cast); } void visitAccessProjection(SingleValueInstruction *projectedAddr, @@ -207,8 +206,7 @@ class FindAccessVisitorImpl : public AccessUseDefChainVisitor { return this->asImpl().visitNonAccess(phiArg); } - SILValue visitStorageCast(SingleValueInstruction *projectedAddr, - Operand *sourceAddr) { + SILValue visitStorageCast(SingleValueInstruction *, Operand *sourceAddr) { assert(storageCastTy == IgnoreStorageCast); return sourceAddr->get(); } @@ -303,12 +301,11 @@ class FindAccessBaseVisitor } // Override visitStorageCast to avoid seeing through arbitrary address casts. - SILValue visitStorageCast(SingleValueInstruction *projectedAddr, - Operand *sourceAddr) { + SILValue visitStorageCast(SingleValueInstruction *cast, Operand *sourceAddr) { if (storageCastTy == StopAtStorageCast) - return visitNonAccess(projectedAddr); + return visitNonAccess(cast); - return SuperTy::visitStorageCast(projectedAddr, sourceAddr); + return SuperTy::visitStorageCast(cast, sourceAddr); } }; diff --git a/lib/SIL/Utils/OwnershipUtils.cpp b/lib/SIL/Utils/OwnershipUtils.cpp index 833bb2542a8b7..ced3198cb63a6 100644 --- a/lib/SIL/Utils/OwnershipUtils.cpp +++ b/lib/SIL/Utils/OwnershipUtils.cpp @@ -125,15 +125,6 @@ bool swift::isOwnershipForwardingInst(SILInstruction *i) { return isOwnershipForwardingValueKind(SILNodeKind(i->getKind())); } -bool swift::isReborrowInstruction(const SILInstruction *i) { - switch (i->getKind()) { - case SILInstructionKind::BranchInst: - return true; - default: - return false; - } -} - //===----------------------------------------------------------------------===// // Borrowing Operand //===----------------------------------------------------------------------===// @@ -247,7 +238,7 @@ void BorrowingOperand::visitConsumingUsesOfBorrowIntroducingUserResults( // single guaranteed scope. value.visitLocalScopeEndingUses([&](Operand *valueUser) { if (auto subBorrowScopeOp = BorrowingOperand::get(valueUser)) { - if (subBorrowScopeOp->consumesGuaranteedValues()) { + if (subBorrowScopeOp->isReborrow()) { subBorrowScopeOp->visitUserResultConsumingUses(func); return; } diff --git a/lib/SIL/Verifier/LinearLifetimeChecker.cpp b/lib/SIL/Verifier/LinearLifetimeChecker.cpp index e580bf81c4c1c..44b45cb7c74da 100644 --- a/lib/SIL/Verifier/LinearLifetimeChecker.cpp +++ b/lib/SIL/Verifier/LinearLifetimeChecker.cpp @@ -313,7 +313,8 @@ void State::checkForSameBlockUseAfterFree(Operand *consumingUse, }) == userBlock->end()) { continue; } - } else if (isReborrowInstruction(consumingUse->getUser())) { + } else if (auto borrowingOperand = BorrowingOperand::get(consumingUse)) { + assert(borrowingOperand->isReborrow()); continue; } diff --git a/lib/SIL/Verifier/ReborrowVerifier.cpp b/lib/SIL/Verifier/ReborrowVerifier.cpp index a387e28d4a14b..179831e56d25b 100644 --- a/lib/SIL/Verifier/ReborrowVerifier.cpp +++ b/lib/SIL/Verifier/ReborrowVerifier.cpp @@ -52,11 +52,9 @@ void ReborrowVerifier::verifyReborrows(BorrowingOperand initialScopedOperand, std::tie(borrowLifetimeEndOp, baseVal) = worklist.pop_back_val(); auto *borrowLifetimeEndUser = borrowLifetimeEndOp->getUser(); - // TODO: Add a ReborrowOperand ADT if we need to treat more instructions as - // a reborrow - if (!isReborrowInstruction(borrowLifetimeEndUser)) { + auto borrowingOperand = BorrowingOperand::get(borrowLifetimeEndOp); + if (!borrowingOperand || !borrowingOperand->isReborrow()) continue; - } if (isVisitedOp(borrowLifetimeEndOp, baseVal)) continue; diff --git a/lib/SIL/Verifier/SILOwnershipVerifier.cpp b/lib/SIL/Verifier/SILOwnershipVerifier.cpp index 098cc801849b6..97335a6cd1b36 100644 --- a/lib/SIL/Verifier/SILOwnershipVerifier.cpp +++ b/lib/SIL/Verifier/SILOwnershipVerifier.cpp @@ -354,7 +354,7 @@ bool SILValueOwnershipChecker::gatherUsers( // BorrowScopeOperand and if so, add its end scope instructions as // implicit regular users of our value. if (auto scopedOperand = BorrowingOperand::get(op)) { - assert(!scopedOperand->consumesGuaranteedValues()); + assert(!scopedOperand->isReborrow()); std::function onError = [&](Operand *op) { errorBuilder.handleMalformedSIL([&] { diff --git a/lib/SILGen/ArgumentScope.h b/lib/SILGen/ArgumentScope.h index 83c677e3701d1..affedf995d300 100644 --- a/lib/SILGen/ArgumentScope.h +++ b/lib/SILGen/ArgumentScope.h @@ -61,6 +61,8 @@ class ArgumentScope { formalEvalScope.verify(); } + bool isValid() const { return normalScope.isValid(); } + private: void popImpl() { // We must always pop the formal eval scope before the normal scope since diff --git a/lib/SILGen/ResultPlan.cpp b/lib/SILGen/ResultPlan.cpp index 0f3309dbea041..f249c9261d3f6 100644 --- a/lib/SILGen/ResultPlan.cpp +++ b/lib/SILGen/ResultPlan.cpp @@ -453,15 +453,71 @@ class TupleInitializationResultPlan final : public ResultPlan { class ForeignAsyncInitializationPlan final : public ResultPlan { SILLocation loc; + CalleeTypeInfo calleeTypeInfo; + SILType opaqueResumeType; + SILValue resumeBuf; + SILValue continuation; + public: - ForeignAsyncInitializationPlan(SILLocation loc) : loc(loc) {} + ForeignAsyncInitializationPlan(SILGenFunction &SGF, SILLocation loc, + const CalleeTypeInfo &calleeTypeInfo) + : loc(loc), calleeTypeInfo(calleeTypeInfo) + { + // Allocate space to receive the resume value when the continuation is + // resumed. + opaqueResumeType = SGF.getLoweredType(AbstractionPattern::getOpaque(), + calleeTypeInfo.substResultType); + resumeBuf = SGF.emitTemporaryAllocation(loc, opaqueResumeType); + } void gatherIndirectResultAddrs(SILGenFunction &SGF, SILLocation loc, SmallVectorImpl &outList) const override { - // TODO: Move values from the continuation result buffer to the individual - // out argument buffers, unless we were able to emit the resume buffer - // in-place. + // A foreign async function shouldn't have any indirect results. + } + + ManagedValue + emitForeignAsyncCompletionHandler(SILGenFunction &SGF, SILLocation loc) + override { + // Get the current continuation for the task. + auto continuationDecl = calleeTypeInfo.foreign.async->completionHandlerErrorParamIndex() + ? SGF.getASTContext().getUnsafeThrowingContinuationDecl() + : SGF.getASTContext().getUnsafeContinuationDecl(); + + auto continuationTy = BoundGenericType::get(continuationDecl, Type(), + calleeTypeInfo.substResultType) + ->getCanonicalType(); + + + continuation = SGF.B.createGetAsyncContinuationAddr(loc, resumeBuf, + SILType::getPrimitiveObjectType(continuationTy)); + + // Stash it in a buffer for a block object. + auto blockStorageTy = SILType::getPrimitiveAddressType(SILBlockStorageType::get(continuationTy)); + auto blockStorage = SGF.emitTemporaryAllocation(loc, blockStorageTy); + auto continuationAddr = SGF.B.createProjectBlockStorage(loc, blockStorage); + SGF.B.createStore(loc, continuation, continuationAddr, + StoreOwnershipQualifier::Trivial); + + // Get the block invocation function for the given completion block type. + auto completionHandlerIndex = calleeTypeInfo.foreign.async + ->completionHandlerParamIndex(); + auto implTy = cast(calleeTypeInfo.substFnType + ->getParameters()[completionHandlerIndex] + .getInterfaceType()); + SILFunction *impl = SGF.SGM + .getOrCreateForeignAsyncCompletionHandlerImplFunction(implTy, + continuationTy, + *calleeTypeInfo.foreign.async); + auto implRef = SGF.B.createFunctionRef(loc, impl); + + // Initialize the block object for the completion handler. + auto block = SGF.B.createInitBlockStorageHeader(loc, blockStorage, implRef, + SILType::getPrimitiveObjectType(implTy), {}); + // We don't need to manage the block because it's still on the stack. We + // know we won't escape it locally so the callee can be responsible for + // _Block_copy-ing it. + return ManagedValue::forUnmanaged(block); } RValue finish(SILGenFunction &SGF, SILLocation loc, CanType substType, @@ -469,9 +525,44 @@ class ForeignAsyncInitializationPlan final : public ResultPlan { // There should be no direct results from the call. assert(directResults.empty()); - // TODO: Get the actual result values from the awaited continuation. - // For now, produce an undef RValue. - return SGF.emitUndefRValue(loc, substType); + // Await the continuation we handed off to the completion handler. + SILBasicBlock *resumeBlock = SGF.createBasicBlock(); + SILBasicBlock *errorBlock = nullptr; + auto errorParamIndex = calleeTypeInfo.foreign.async->completionHandlerErrorParamIndex(); + if (errorParamIndex) { + errorBlock = SGF.createBasicBlock(FunctionSection::Postmatter); + } + + SGF.B.createAwaitAsyncContinuation(loc, continuation, resumeBlock, errorBlock); + + // Propagate an error if we have one. + if (errorBlock) { + SGF.B.emitBlock(errorBlock); + + Scope errorScope(SGF, loc); + + auto errorTy = SGF.getASTContext().getErrorDecl()->getDeclaredType() + ->getCanonicalType(); + auto errorVal + = SGF.B.createOwnedPhiArgument(SILType::getPrimitiveObjectType(errorTy)); + + SGF.emitThrow(loc, errorVal, true); + } + + SGF.B.emitBlock(resumeBlock); + + // The incoming value is the maximally-abstracted result type of the + // continuation. Move it out of the resume buffer and reabstract it if + // necessary. + auto resumeResult = SGF.emitLoad(loc, resumeBuf, + calleeTypeInfo.origResultType + ? *calleeTypeInfo.origResultType + : AbstractionPattern(calleeTypeInfo.substResultType), + calleeTypeInfo.substResultType, + SGF.getTypeLowering(calleeTypeInfo.substResultType), + SGFContext(), IsTake); + + return RValue(SGF, loc, calleeTypeInfo.substResultType, resumeResult); } }; @@ -572,8 +663,7 @@ ResultPlanPtr ResultPlanBuilder::buildTopLevelResult(Initialization *init, // Create a result plan that gets the result schema from the completion // handler callback's arguments. // completion handler. - return ResultPlanPtr(new ForeignAsyncInitializationPlan(loc)); - + return ResultPlanPtr(new ForeignAsyncInitializationPlan(SGF, loc, calleeTypeInfo)); } else if (auto foreignError = calleeTypeInfo.foreign.error) { // Handle the foreign error first. // diff --git a/lib/SILGen/ResultPlan.h b/lib/SILGen/ResultPlan.h index 93d0db808be4c..39a62477dbfe7 100644 --- a/lib/SILGen/ResultPlan.h +++ b/lib/SILGen/ResultPlan.h @@ -49,6 +49,11 @@ class ResultPlan { emitForeignErrorArgument(SILGenFunction &SGF, SILLocation loc) { return None; } + + virtual ManagedValue + emitForeignAsyncCompletionHandler(SILGenFunction &SGF, SILLocation loc) { + return {}; + } }; using ResultPlanPtr = std::unique_ptr; diff --git a/lib/SILGen/SILGen.cpp b/lib/SILGen/SILGen.cpp index b0c73c8350bbd..f0943373f754e 100644 --- a/lib/SILGen/SILGen.cpp +++ b/lib/SILGen/SILGen.cpp @@ -327,6 +327,53 @@ SILGenModule::getConformanceToBridgedStoredNSError(SILLocation loc, Type type) { return SwiftModule->lookupConformance(type, proto); } +static FuncDecl * +lookUpResumeContinuationIntrinsic(ASTContext &C, + Optional &cache, + StringRef name) { + if (cache) + return *cache; + + auto *module = C.getLoadedModule(C.Id_Concurrency); + if (!module) { + cache = nullptr; + return nullptr; + } + + SmallVector decls; + module->lookupQualified(module, + DeclNameRef(C.getIdentifier(name)), + NL_QualifiedDefault | NL_IncludeUsableFromInline, + decls); + + if (decls.size() != 1) { + cache = nullptr; + return nullptr; + } + auto func = dyn_cast(decls[0]); + cache = func; + return func; +} + +FuncDecl * +SILGenModule::getResumeUnsafeContinuation() { + return lookUpResumeContinuationIntrinsic(getASTContext(), + ResumeUnsafeContinuation, + "_resumeUnsafeContinuation"); +} +FuncDecl * +SILGenModule::getResumeUnsafeThrowingContinuation() { + return lookUpResumeContinuationIntrinsic(getASTContext(), + ResumeUnsafeThrowingContinuation, + "_resumeUnsafeThrowingContinuation"); +} +FuncDecl * +SILGenModule::getResumeUnsafeThrowingContinuationWithError() { + return lookUpResumeContinuationIntrinsic(getASTContext(), + ResumeUnsafeThrowingContinuationWithError, + "_resumeUnsafeThrowingContinuationWithError"); +} + ProtocolConformance *SILGenModule::getNSErrorConformanceToError() { if (NSErrorConformanceToError) return *NSErrorConformanceToError; diff --git a/lib/SILGen/SILGen.h b/lib/SILGen/SILGen.h index 37a0e7fdb11b8..20f02bf40babb 100644 --- a/lib/SILGen/SILGen.h +++ b/lib/SILGen/SILGen.h @@ -27,6 +27,7 @@ namespace swift { class SILBasicBlock; + class ForeignAsyncConvention; namespace Lowering { class TypeConverter; @@ -118,6 +119,10 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { Optional NSErrorConformanceToError; + Optional ResumeUnsafeContinuation; + Optional ResumeUnsafeThrowingContinuation; + Optional ResumeUnsafeThrowingContinuationWithError; + public: SILGenModule(SILModule &M, ModuleDecl *SM); @@ -163,6 +168,14 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { CanSILFunctionType fromType, CanSILFunctionType toType, CanType dynamicSelfType); + + /// Get or create the declaration of a completion handler block + /// implementation function for an ObjC API that was imported + /// as `async` in Swift. + SILFunction *getOrCreateForeignAsyncCompletionHandlerImplFunction( + CanSILFunctionType blockType, + CanType continuationTy, + ForeignAsyncConvention convention); /// Determine whether the given class has any instance variables that /// need to be destroyed. @@ -460,6 +473,13 @@ class LLVM_LIBRARY_VISIBILITY SILGenModule : public ASTVisitor { /// Retrieve the conformance of NSError to the Error protocol. ProtocolConformance *getNSErrorConformanceToError(); + /// Retrieve the _Concurrency._resumeUnsafeContinuation intrinsic. + FuncDecl *getResumeUnsafeContinuation(); + /// Retrieve the _Concurrency._resumeUnsafeThrowingContinuation intrinsic. + FuncDecl *getResumeUnsafeThrowingContinuation(); + /// Retrieve the _Concurrency._resumeUnsafeThrowingContinuationWithError intrinsic. + FuncDecl *getResumeUnsafeThrowingContinuationWithError(); + SILFunction *getKeyPathProjectionCoroutine(bool isReadAccess, KeyPathTypeKind typeKind); diff --git a/lib/SILGen/SILGenApply.cpp b/lib/SILGen/SILGenApply.cpp index e140eb8d75daa..147fd3ae7d7f3 100644 --- a/lib/SILGen/SILGenApply.cpp +++ b/lib/SILGen/SILGenApply.cpp @@ -3275,14 +3275,15 @@ class ArgEmitter { if (Foreign.async && Foreign.async->completionHandlerParamIndex() == Args.size()) { SILParameterInfo param = claimNextParameters(1).front(); - - // TODO: Get or create the completion handler block implementation - // function for the given argument type, then create a block containing - // the current continuation. (This probably needs to be deferred to right - // before the actual call, since evaluating other arguments to the call - // may suspend the task) - auto argTy = SILType::getPrimitiveObjectType(param.getInterfaceType()); - Args.push_back(ManagedValue::forUnmanaged(SILUndef::get(argTy, SGF.F))); + (void)param; + + // Leave a placeholder in the position. We'll fill this in with a block + // capturing the current continuation right before we invoke the + // function. + // (We can't do this immediately, because evaluating other arguments + // may require suspending the async task, which is not allowed while its + // continuation is active.) + Args.push_back(ManagedValue::forInContext()); } else if (Foreign.error && Foreign.error->getErrorParameterIndex() == Args.size()) { SILParameterInfo param = claimNextParameters(1).front(); @@ -4326,14 +4327,21 @@ RValue SILGenFunction::emitApply(ResultPlanPtr &&resultPlan, // If there's a foreign error or async parameter, fill it in. ManagedValue errorTemp; - if (calleeTypeInfo.foreign.async) { - // TODO: prepare the callback continuation and block here. - - } else if (calleeTypeInfo.foreign.error) { + if (auto foreignAsync = calleeTypeInfo.foreign.async) { + unsigned completionIndex = foreignAsync->completionHandlerParamIndex(); + + // Ram the emitted error into the argument list, over the placeholder + // we left during the first pass. + auto &completionArgSlot = const_cast(args[completionIndex]); + + completionArgSlot = resultPlan->emitForeignAsyncCompletionHandler(*this, loc); + + } else if (auto foreignError = calleeTypeInfo.foreign.error) { unsigned errorParamIndex = - calleeTypeInfo.foreign.error->getErrorParameterIndex(); + foreignError->getErrorParameterIndex(); - // This is pretty evil. + // Ram the emitted error into the argument list, over the placeholder + // we left during the first pass. auto &errorArgSlot = const_cast(args[errorParamIndex]); std::tie(errorTemp, errorArgSlot) = @@ -4444,9 +4452,6 @@ RValue SILGenFunction::emitApply(ResultPlanPtr &&resultPlan, *foreignError); } - // TODO(async): If there's a foreign async convention, await the continuation - // to get the result from the completion callback. - auto directResultsArray = makeArrayRef(directResults); RValue result = resultPlan->finish(*this, loc, substResultType, directResultsArray); diff --git a/lib/SILGen/SILGenThunk.cpp b/lib/SILGen/SILGenThunk.cpp index 74d1efeceec69..dbd2ba3674cdc 100644 --- a/lib/SILGen/SILGenThunk.cpp +++ b/lib/SILGen/SILGenThunk.cpp @@ -27,6 +27,7 @@ #include "swift/AST/ASTMangler.h" #include "swift/AST/DiagnosticsSIL.h" #include "swift/AST/FileUnit.h" +#include "swift/AST/ForeignAsyncConvention.h" #include "swift/AST/GenericEnvironment.h" #include "swift/SIL/PrettyStackTrace.h" #include "swift/SIL/SILArgument.h" @@ -138,6 +139,177 @@ SILGenFunction::emitGlobalFunctionRef(SILLocation loc, SILDeclRef constant, return B.createFunctionRefFor(loc, f); } +SILFunction * +SILGenModule::getOrCreateForeignAsyncCompletionHandlerImplFunction( + CanSILFunctionType blockType, + CanType continuationTy, + ForeignAsyncConvention convention) { + // Extract the result type from the continuation type. + auto resumeType = cast(continuationTy).getGenericArgs()[0]; + + // Build up the implementation function type, which matches the + // block signature with an added block storage argument that points at the + // block buffer. The block storage holds the continuation we feed the + // result values into. + SmallVector implArgs; + auto blockStorageTy = SILBlockStorageType::get(continuationTy); + implArgs.push_back(SILParameterInfo(blockStorageTy, + ParameterConvention::Indirect_InoutAliasable)); + + std::copy(blockType->getParameters().begin(), + blockType->getParameters().end(), + std::back_inserter(implArgs)); + + auto implTy = SILFunctionType::get(GenericSignature(), + blockType->getExtInfo() + .withRepresentation(SILFunctionTypeRepresentation::CFunctionPointer), + SILCoroutineKind::None, + ParameterConvention::Direct_Unowned, + implArgs, {}, blockType->getResults(), + None, + SubstitutionMap(), SubstitutionMap(), getASTContext()); + + auto loc = RegularLocation::getAutoGeneratedLocation(); + + Mangle::ASTMangler Mangler; + auto name = Mangler.mangleObjCAsyncCompletionHandlerImpl(blockType, + resumeType, + /*predefined*/ false); + + SILGenFunctionBuilder builder(*this); + auto F = builder.getOrCreateSharedFunction(loc, name, implTy, + IsBare, IsTransparent, IsSerializable, + ProfileCounter(), + IsThunk, + IsNotDynamic); + + if (F->empty()) { + // TODO: Emit the implementation. + SILGenFunction SGF(*this, *F, SwiftModule); + { + Scope scope(SGF, loc); + SmallVector params; + SGF.collectThunkParams(loc, params); + + // Get the continuation out of the block object. + auto blockStorage = params[0].getValue(); + auto continuationAddr = SGF.B.createProjectBlockStorage(loc, blockStorage); + auto continuationVal = SGF.B.createLoad(loc, continuationAddr, + LoadOwnershipQualifier::Trivial); + auto continuation = ManagedValue::forUnmanaged(continuationVal); + + // Check for an error if the convention includes one. + auto errorIndex = convention.completionHandlerErrorParamIndex(); + + FuncDecl *resumeIntrinsic, *errorIntrinsic; + + SILBasicBlock *returnBB = nullptr; + if (errorIndex) { + resumeIntrinsic = getResumeUnsafeThrowingContinuation(); + errorIntrinsic = getResumeUnsafeThrowingContinuationWithError(); + + auto errorArgument = params[*errorIndex + 1]; + auto someErrorBB = SGF.createBasicBlock(FunctionSection::Postmatter); + auto noneErrorBB = SGF.createBasicBlock(); + returnBB = SGF.createBasicBlockAfter(noneErrorBB); + + auto &C = SGF.getASTContext(); + std::pair switchErrorBBs[] = { + {C.getOptionalSomeDecl(), someErrorBB}, + {C.getOptionalNoneDecl(), noneErrorBB} + }; + + SGF.B.createSwitchEnum(loc, errorArgument.borrow(SGF, loc).getValue(), + /*default*/ nullptr, + switchErrorBBs); + + SGF.B.emitBlock(someErrorBB); + + auto matchedErrorTy = errorArgument.getType().getOptionalObjectType(); + auto matchedError = SGF.B + .createGuaranteedTransformingTerminatorArgument(matchedErrorTy); + + // Resume the continuation as throwing the given error, bridged to a + // native Swift error. + auto nativeError = SGF.emitBridgedToNativeError(loc, matchedError); + Type replacementTypes[] = {resumeType}; + auto subs = SubstitutionMap::get(errorIntrinsic->getGenericSignature(), + replacementTypes, + ArrayRef{}); + SGF.emitApplyOfLibraryIntrinsic(loc, errorIntrinsic, subs, + {continuation, nativeError}, + SGFContext()); + + SGF.B.createBranch(loc, returnBB); + SGF.B.emitBlock(noneErrorBB); + } else { + resumeIntrinsic = getResumeUnsafeContinuation(); + } + + auto loweredResumeTy = SGF.getLoweredType(AbstractionPattern::getOpaque(), + resumeType); + + // Prepare the argument for the resume intrinsic, using the non-error + // arguments to the callback. + { + Scope resumeScope(SGF, loc); + unsigned errorIndexBoundary = errorIndex ? *errorIndex : ~0u; + auto resumeArgBuf = SGF.emitTemporaryAllocation(loc, + loweredResumeTy.getAddressType()); + + auto prepareArgument = [&](SILValue destBuf, ManagedValue arg) { + // Convert the ObjC argument to the bridged Swift representation we + // want. + ManagedValue bridgedArg = SGF.emitBridgedToNativeValue(loc, + arg, + arg.getType().getASTType(), + // FIXME: pass down formal type + destBuf->getType().getASTType(), + destBuf->getType().getObjectType()); + bridgedArg.forwardInto(SGF, loc, destBuf); + }; + + if (auto resumeTuple = dyn_cast(resumeType)) { + assert(params.size() == resumeTuple->getNumElements() + + 1 + (bool)errorIndex); + for (auto i : indices(resumeTuple.getElementTypes())) { + auto resumeEltBuf = SGF.B.createTupleElementAddr(loc, + resumeArgBuf, i); + auto arg = params[1 + i + (i >= errorIndexBoundary)]; + prepareArgument(resumeEltBuf, arg); + } + } else { + assert(params.size() == 2 + (bool)errorIndex); + prepareArgument(resumeArgBuf, params[1 + (errorIndexBoundary == 0)]); + } + + + // Resume the continuation with the composed bridged result. + ManagedValue resumeArg = SGF.emitManagedBufferWithCleanup(resumeArgBuf); + Type replacementTypes[] = {resumeType}; + auto subs = SubstitutionMap::get(resumeIntrinsic->getGenericSignature(), + replacementTypes, + ArrayRef{}); + SGF.emitApplyOfLibraryIntrinsic(loc, resumeIntrinsic, subs, + {continuation, resumeArg}, + SGFContext()); + } + + // Now we've resumed the continuation one way or another. Return from the + // completion callback. + if (returnBB) { + SGF.B.createBranch(loc, returnBB); + SGF.B.emitBlock(returnBB); + } + } + + SGF.B.createReturn(loc, + SILUndef::get(SGF.SGM.Types.getEmptyTupleType(), SGF.F)); + } + + return F; +} + SILFunction *SILGenModule:: getOrCreateReabstractionThunk(CanSILFunctionType thunkType, CanSILFunctionType fromType, diff --git a/lib/SILGen/SwitchEnumBuilder.cpp b/lib/SILGen/SwitchEnumBuilder.cpp index 4704d95fae200..3323d2211975a 100644 --- a/lib/SILGen/SwitchEnumBuilder.cpp +++ b/lib/SILGen/SwitchEnumBuilder.cpp @@ -22,11 +22,11 @@ using namespace Lowering; //===----------------------------------------------------------------------===// SwitchCaseFullExpr::SwitchCaseFullExpr(SILGenFunction &SGF, CleanupLocation loc) - : SGF(SGF), scope(SGF.Cleanups, loc), loc(loc), branchDest() {} + : SGF(SGF), scope(SGF, loc), loc(loc), branchDest() {} SwitchCaseFullExpr::SwitchCaseFullExpr(SILGenFunction &SGF, CleanupLocation loc, SwitchCaseBranchDest branchDest) - : SGF(SGF), scope(SGF.Cleanups, loc), loc(loc), branchDest(branchDest) {} + : SGF(SGF), scope(SGF, loc), loc(loc), branchDest(branchDest) {} void SwitchCaseFullExpr::exitAndBranch(SILLocation loc, ArrayRef branchArgs) { diff --git a/lib/SILGen/SwitchEnumBuilder.h b/lib/SILGen/SwitchEnumBuilder.h index 472ad31ab55aa..53ad4c51e1996 100644 --- a/lib/SILGen/SwitchEnumBuilder.h +++ b/lib/SILGen/SwitchEnumBuilder.h @@ -13,6 +13,7 @@ #ifndef SWIFT_SILGEN_SWITCHENUMBUILDER_H #define SWIFT_SILGEN_SWITCHENUMBUILDER_H +#include "ArgumentScope.h" #include "Scope.h" namespace swift { @@ -52,7 +53,7 @@ struct SwitchCaseBranchDest { /// This scope is also exposed to the debug info. class SwitchCaseFullExpr { SILGenFunction &SGF; - Scope scope; + ArgumentScope scope; CleanupLocation loc; SwitchCaseBranchDest branchDest; diff --git a/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp b/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp index fc785c112ffef..6de6cee60781e 100644 --- a/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp +++ b/lib/SILOptimizer/Differentiation/LinearMapInfo.cpp @@ -138,6 +138,8 @@ LinearMapInfo::createBranchingTraceDecl(SILBasicBlock *originalBB, // Note: must mark enum as implicit to satisfy assertion in // `Parser::parseDeclListDelayed`. branchingTraceDecl->setImplicit(); + // Branching trace enums shall not be resilient. + branchingTraceDecl->getAttrs().add(new (astCtx) FrozenAttr(/*implicit*/ true)); if (genericSig) branchingTraceDecl->setGenericSignature(genericSig); computeAccessLevel(branchingTraceDecl, original->getEffectiveSymbolLinkage()); @@ -201,6 +203,8 @@ LinearMapInfo::createLinearMapStruct(SILBasicBlock *originalBB, // Note: must mark struct as implicit to satisfy assertion in // `Parser::parseDeclListDelayed`. linearMapStruct->setImplicit(); + // Linear map structs shall not be resilient. + linearMapStruct->getAttrs().add(new (astCtx) FrozenAttr(/*implicit*/ true)); if (genericSig) linearMapStruct->setGenericSignature(genericSig); computeAccessLevel(linearMapStruct, original->getEffectiveSymbolLinkage()); diff --git a/lib/SILOptimizer/LoopTransforms/LICM.cpp b/lib/SILOptimizer/LoopTransforms/LICM.cpp index bf489fe892e13..06091676978b3 100644 --- a/lib/SILOptimizer/LoopTransforms/LICM.cpp +++ b/lib/SILOptimizer/LoopTransforms/LICM.cpp @@ -15,6 +15,7 @@ #include "swift/SIL/Dominance.h" #include "swift/SIL/InstructionUtils.h" #include "swift/SIL/MemAccessUtils.h" +#include "swift/SIL/Projection.h" #include "swift/SIL/SILArgument.h" #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILInstruction.h" @@ -61,8 +62,8 @@ static bool mayWriteTo(AliasAnalysis *AA, InstSet &SideEffectInsts, return false; } -/// Returns true if \p I is a store to \p addr. -static StoreInst *isStoreToAddr(SILInstruction *I, SILValue addr) { +/// Returns a non-null StoreInst if \p I is a store to \p accessPath. +static StoreInst *isStoreToAccess(SILInstruction *I, AccessPath accessPath) { auto *SI = dyn_cast(I); if (!SI) return nullptr; @@ -71,53 +72,91 @@ static StoreInst *isStoreToAddr(SILInstruction *I, SILValue addr) { if (SI->getOwnershipQualifier() == StoreOwnershipQualifier::Init) return nullptr; - if (SI->getDest() != addr) + auto storeAccessPath = AccessPath::compute(SI->getDest()); + if (accessPath != storeAccessPath) return nullptr; return SI; } -/// Returns true if \p I is a load from \p addr or a projected address from -/// \p addr. -static LoadInst *isLoadFromAddr(SILInstruction *I, SILValue addr) { +struct LoadWithAccess { + LoadInst *li = nullptr; + AccessPath accessPath; + + operator bool() { return li != nullptr; } +}; + +static LoadWithAccess doesLoadOverlapAccess(SILInstruction *I, + AccessPath accessPath) { auto *LI = dyn_cast_or_null(I); if (!LI) - return nullptr; + return LoadWithAccess(); - // TODO: handle StoreOwnershipQualifier::Take + // TODO: handle LoadOwnershipQualifier::Take if (LI->getOwnershipQualifier() == LoadOwnershipQualifier::Take) - return nullptr; + return LoadWithAccess(); - SILValue v = LI->getOperand(); - for (;;) { - if (v == addr) { - return LI; - } else if (isa(v) || isa(v)) { - v = cast(v)->getOperand(0); - } else { - return nullptr; + AccessPath loadAccessPath = AccessPath::compute(LI->getOperand()); + if (!loadAccessPath.isValid()) + return LoadWithAccess(); + + // Don't use AccessPath::mayOverlap. We only want definite overlap. + if (loadAccessPath.contains(accessPath) + || accessPath.contains(loadAccessPath)) { + return {LI, loadAccessPath}; + } + return LoadWithAccess(); +} + +/// Returns a valid LoadWithAccess if \p I is a load from \p accessPath or a +/// projected address from \p accessPath. +static LoadWithAccess isLoadWithinAccess(SILInstruction *I, + AccessPath accessPath) { + auto loadWithAccess = doesLoadOverlapAccess(I, accessPath); + if (!loadWithAccess) + return loadWithAccess; + + // Make sure that any additional path components beyond the store's access + // path can be converted to value projections during projectLoadValue (it + // currently only supports StructElementAddr and TupleElementAddr). + auto storePathNode = accessPath.getPathNode(); + auto loadPathNode = loadWithAccess.accessPath.getPathNode(); + SILValue loadAddr = loadWithAccess.li->getOperand(); + while (loadPathNode != storePathNode) { + if (!isa(loadAddr) + && !isa(loadAddr)) { + return LoadWithAccess(); } + loadAddr = cast(loadAddr)->getOperand(0); + loadPathNode = loadPathNode.getParent(); } + return loadWithAccess; } /// Returns true if all instructions in \p SideEffectInsts which may alias with -/// \p addr are either loads or stores from \p addr. +/// \p access are either loads or stores from \p access. +/// +/// \p storeAddr is only needed for AliasAnalysis until we have an interface +/// that supports AccessPath. static bool isOnlyLoadedAndStored(AliasAnalysis *AA, InstSet &SideEffectInsts, ArrayRef Loads, ArrayRef Stores, - SILValue addr) { + SILValue storeAddr, AccessPath accessPath) { for (auto *I : SideEffectInsts) { - if (AA->mayReadOrWriteMemory(I, addr) && - !isStoreToAddr(I, addr) && !isLoadFromAddr(I, addr)) { + // Pass the original address value until we can fix AA + if (AA->mayReadOrWriteMemory(I, storeAddr) + && !isStoreToAccess(I, accessPath) + && !isLoadWithinAccess(I, accessPath)) { return false; } } for (auto *LI : Loads) { - if (AA->mayReadFromMemory(LI, addr) && !isLoadFromAddr(LI, addr)) + if (AA->mayReadFromMemory(LI, storeAddr) + && !doesLoadOverlapAccess(LI, accessPath)) return false; } for (auto *SI : Stores) { - if (AA->mayWriteToMemory(SI, addr) && !isStoreToAddr(SI, addr)) + if (AA->mayWriteToMemory(SI, storeAddr) && !isStoreToAccess(SI, accessPath)) return false; } return true; @@ -466,6 +505,7 @@ class LoopTreeOptimization { llvm::DenseMap> LoopNestSummaryMap; SmallVector BotUpWorkList; + InstSet toDelete; SILLoopInfo *LoopInfo; AliasAnalysis *AA; SideEffectAnalysis *SEA; @@ -486,10 +526,12 @@ class LoopTreeOptimization { InstVector SinkDown; /// Load and store instructions that we may be able to move out of the loop. + /// All loads and stores within a block must be in instruction order to + /// simplify replacement of values after SSA update. InstVector LoadsAndStores; - /// All addresses of the \p LoadsAndStores instructions. - llvm::SetVector LoadAndStoreAddrs; + /// All access paths of the \p LoadsAndStores instructions. + llvm::SetVector LoadAndStoreAddrs; /// Hoistable Instructions that need special treatment /// e.g. begin_access @@ -522,11 +564,22 @@ class LoopTreeOptimization { /// Collect a set of instructions that can be hoisted void analyzeCurrentLoop(std::unique_ptr &CurrSummary); + SingleValueInstruction *splitLoad(SILValue splitAddress, + ArrayRef remainingPath, + SILBuilder &builder, + SmallVectorImpl &Loads, + unsigned ldStIdx); + + /// Given an \p accessPath that is only loaded and stored, split loads that + /// are wider than \p accessPath. + bool splitLoads(SmallVectorImpl &Loads, AccessPath accessPath, + SILValue storeAddr); + /// Optimize the current loop nest. bool optimizeLoop(std::unique_ptr &CurrSummary); - /// Move all loads and stores from/to \p addr out of the \p loop. - void hoistLoadsAndStores(SILValue addr, SILLoop *loop, InstVector &toDelete); + /// Move all loads and stores from/to \p accessPath out of the \p loop. + void hoistLoadsAndStores(AccessPath accessPath, SILLoop *loop); /// Move all loads and stores from all addresses in LoadAndStoreAddrs out of /// the \p loop. @@ -759,6 +812,8 @@ static bool analyzeBeginAccess(BeginAccessInst *BI, // We *need* to discover all SideEffectInsts - // even if the loop is otherwise skipped! // This is because outer loops will depend on the inner loop's writes. +// +// This may split some loads into smaller loads. void LoopTreeOptimization::analyzeCurrentLoop( std::unique_ptr &CurrSummary) { InstSet &sideEffects = CurrSummary->SideEffectInsts; @@ -875,11 +930,23 @@ void LoopTreeOptimization::analyzeCurrentLoop( // Collect memory locations for which we can move all loads and stores out // of the loop. + // + // Note: The Loads set and LoadsAndStores set may mutate during this loop. for (StoreInst *SI : Stores) { - SILValue addr = SI->getDest(); - if (isLoopInvariant(addr, Loop) && - isOnlyLoadedAndStored(AA, sideEffects, Loads, Stores, addr)) { - LoadAndStoreAddrs.insert(addr); + // Use AccessPathWithBase to recover a base address that can be used for + // newly inserted memory operations. If we instead teach hoistLoadsAndStores + // how to rematerialize global_addr, then we don't need this base. + auto access = AccessPathWithBase::compute(SI->getDest()); + auto accessPath = access.accessPath; + if (accessPath.isValid() && isLoopInvariant(access.base, Loop)) { + if (isOnlyLoadedAndStored(AA, sideEffects, Loads, Stores, SI->getDest(), + accessPath)) { + if (!LoadAndStoreAddrs.count(accessPath)) { + if (splitLoads(Loads, accessPath, SI->getDest())) { + LoadAndStoreAddrs.insert(accessPath); + } + } + } } } if (!FixLifetimes.empty()) { @@ -905,6 +972,172 @@ void LoopTreeOptimization::analyzeCurrentLoop( } } +// Recursively determine whether the innerAddress is a direct tuple or struct +// projection chain from outerPath. Populate \p reversePathIndices with the path +// difference. +static bool +computeInnerAccessPath(AccessPath::PathNode outerPath, + AccessPath::PathNode innerPath, SILValue innerAddress, + SmallVectorImpl &reversePathIndices) { + if (outerPath == innerPath) + return true; + + if (!isa(innerAddress) + && !isa(innerAddress)) { + return false; + } + assert(ProjectionIndex(innerAddress).Index + == innerPath.getIndex().getSubObjectIndex()); + + reversePathIndices.push_back(innerPath.getIndex()); + SILValue srcAddr = cast(innerAddress)->getOperand(0); + if (!computeInnerAccessPath(outerPath, innerPath.getParent(), srcAddr, + reversePathIndices)) { + return false; + } + return true; +} + +/// Split a load from \p outerAddress recursively following remainingPath. +/// +/// Creates a load with identical \p accessPath and a set of +/// non-overlapping loads. Add the new non-overlapping loads to HoistUp. +/// +/// \p ldstIdx is the index into LoadsAndStores of the original outer load. +/// +/// Return the aggregate produced by merging the loads. +SingleValueInstruction *LoopTreeOptimization::splitLoad( + SILValue splitAddress, ArrayRef remainingPath, + SILBuilder &builder, SmallVectorImpl &Loads, unsigned ldstIdx) { + auto loc = LoadsAndStores[ldstIdx]->getLoc(); + // Recurse until we have a load that matches accessPath. + if (remainingPath.empty()) { + // Create a load that matches the stored access path. + LoadInst *load = builder.createLoad(loc, splitAddress, + LoadOwnershipQualifier::Unqualified); + Loads.push_back(load); + // Replace the outer load in the list of loads and stores to hoist and + // sink. LoadsAndStores must remain in instruction order. + LoadsAndStores[ldstIdx] = load; + LLVM_DEBUG(llvm::dbgs() << "Created load from stored path: " << *load); + return load; + } + auto recordDisjointLoad = [&](LoadInst *newLoad) { + Loads.push_back(newLoad); + LoadsAndStores.insert(LoadsAndStores.begin() + ldstIdx + 1, newLoad); + }; + auto subIndex = remainingPath.back().getSubObjectIndex(); + SILType loadTy = splitAddress->getType(); + if (CanTupleType tupleTy = loadTy.getAs()) { + SmallVector elements; + for (int tupleIdx : range(tupleTy->getNumElements())) { + auto *projection = builder.createTupleElementAddr( + loc, splitAddress, tupleIdx, loadTy.getTupleElementType(tupleIdx)); + SILValue elementVal; + if (tupleIdx == subIndex) { + elementVal = splitLoad(projection, remainingPath.drop_back(), builder, + Loads, ldstIdx); + } else { + elementVal = builder.createLoad(loc, projection, + LoadOwnershipQualifier::Unqualified); + recordDisjointLoad(cast(elementVal)); + } + elements.push_back(elementVal); + } + return builder.createTuple(loc, elements); + } + auto structTy = loadTy.getStructOrBoundGenericStruct(); + assert(structTy && "tuple and struct elements are checked earlier"); + auto &module = builder.getModule(); + auto expansionContext = builder.getFunction().getTypeExpansionContext(); + + SmallVector elements; + int fieldIdx = 0; + for (auto *field : structTy->getStoredProperties()) { + SILType fieldTy = loadTy.getFieldType(field, module, expansionContext); + auto *projection = + builder.createStructElementAddr(loc, splitAddress, field, fieldTy); + SILValue fieldVal; + if (fieldIdx++ == subIndex) + fieldVal = splitLoad(projection, remainingPath.drop_back(), builder, + Loads, ldstIdx); + else { + fieldVal = builder.createLoad(loc, projection, + LoadOwnershipQualifier::Unqualified); + recordDisjointLoad(cast(fieldVal)); + } + elements.push_back(fieldVal); + } + return builder.createStruct(loc, loadTy.getObjectType(), elements); +} + +/// Find all loads that contain \p accessPath. Split them into a load with +/// identical accessPath and a set of non-overlapping loads. Add the new +/// non-overlapping loads to LoadsAndStores and HoistUp. +/// +/// TODO: The \p storeAddr parameter is only needed until we have an +/// AliasAnalysis interface that handles AccessPath. +bool LoopTreeOptimization::splitLoads(SmallVectorImpl &Loads, + AccessPath accessPath, + SILValue storeAddr) { + // The Loads set may mutate during this loop, but we only want to visit the + // original set. + for (unsigned loadsIdx = 0, endIdx = Loads.size(); loadsIdx != endIdx; + ++loadsIdx) { + auto *load = Loads[loadsIdx]; + if (toDelete.count(load)) + continue; + + if (!AA->mayReadFromMemory(load, storeAddr)) + continue; + + AccessPath loadAccessPath = AccessPath::compute(load->getOperand()); + if (accessPath.contains(loadAccessPath)) + continue; + + assert(loadAccessPath.contains(accessPath)); + LLVM_DEBUG(llvm::dbgs() << "Overlaps with loop stores: " << *load); + SmallVector reversePathIndices; + if (!computeInnerAccessPath(loadAccessPath.getPathNode(), + accessPath.getPathNode(), storeAddr, + reversePathIndices)) { + return false; + } + // Found a load wider than the store to accessPath. + // + // SplitLoads is called for each unique access path in the loop that is + // only loaded from and stored to and this loop takes time proportional to: + // num-wide-loads x num-fields x num-loop-memops + // + // For each load wider than the store, it creates a new load for each field + // in that type. Each new load is inserted in the LoadsAndStores vector. To + // avoid super-linear behavior for large types (e.g. giant tuples), limit + // growth of new loads to an arbitrary constant factor per access path. + if (Loads.size() >= endIdx + 6) { + LLVM_DEBUG(llvm::dbgs() << "...Refusing to split more loads\n"); + return false; + } + LLVM_DEBUG(llvm::dbgs() << "...Splitting load\n"); + + unsigned ldstIdx = [this, load]() { + auto ldstIter = llvm::find(LoadsAndStores, load); + assert(ldstIter != LoadsAndStores.end() && "outerLoad missing"); + return std::distance(LoadsAndStores.begin(), ldstIter); + }(); + + SILBuilderWithScope builder(load); + + SILValue aggregateVal = splitLoad(load->getOperand(), reversePathIndices, + builder, Loads, ldstIdx); + + load->replaceAllUsesWith(aggregateVal); + auto iterAndInserted = toDelete.insert(load); + (void)iterAndInserted; + assert(iterAndInserted.second && "the same load should only be split once"); + } + return true; +} + bool LoopTreeOptimization::optimizeLoop( std::unique_ptr &CurrSummary) { auto *CurrentLoop = CurrSummary->Loop; @@ -919,26 +1152,37 @@ bool LoopTreeOptimization::optimizeLoop( currChanged |= sinkInstructions(CurrSummary, DomTree, LoopInfo, SinkDown); currChanged |= hoistSpecialInstruction(CurrSummary, DomTree, LoopInfo, SpecialHoist); + + assert(toDelete.empty() && "only hostAllLoadsAndStores deletes"); return currChanged; } /// Creates a value projection from \p rootVal based on the address projection -/// from \a rootAddr to \a addr. -static SILValue projectLoadValue(SILValue addr, SILValue rootAddr, - SILValue rootVal, SILInstruction *beforeInst) { - if (addr == rootAddr) +/// from \a rootVal to \a addr. +static SILValue projectLoadValue(SILValue addr, AccessPath accessPath, + SILValue rootVal, AccessPath rootAccessPath, + SILInstruction *beforeInst) { + if (accessPath == rootAccessPath) return rootVal; + auto pathNode = accessPath.getPathNode(); + int elementIdx = pathNode.getIndex().getSubObjectIndex(); if (auto *SEI = dyn_cast(addr)) { - SILValue val = projectLoadValue(SEI->getOperand(), rootAddr, rootVal, - beforeInst); + assert(ProjectionIndex(SEI).Index == elementIdx); + SILValue val = projectLoadValue( + SEI->getOperand(), + AccessPath(accessPath.getStorage(), pathNode.getParent(), 0), + rootVal, rootAccessPath, beforeInst); SILBuilder B(beforeInst); return B.createStructExtract(beforeInst->getLoc(), val, SEI->getField(), SEI->getType().getObjectType()); } if (auto *TEI = dyn_cast(addr)) { - SILValue val = projectLoadValue(TEI->getOperand(), rootAddr, rootVal, - beforeInst); + assert(ProjectionIndex(TEI).Index == elementIdx); + SILValue val = projectLoadValue( + TEI->getOperand(), + AccessPath(accessPath.getStorage(), pathNode.getParent(), 0), + rootVal, rootAccessPath, beforeInst); SILBuilder B(beforeInst); return B.createTupleExtract(beforeInst->getLoc(), val, TEI->getFieldIndex(), TEI->getType().getObjectType()); @@ -946,12 +1190,17 @@ static SILValue projectLoadValue(SILValue addr, SILValue rootAddr, llvm_unreachable("unknown projection"); } -/// Returns true if all stores to \p addr commonly dominate the loop exitst of -/// \p loop. -static bool storesCommonlyDominateLoopExits(SILValue addr, SILLoop *loop, - ArrayRef exitingBlocks) { +/// Returns true if all stores to \p addr commonly dominate the loop exits. +static bool +storesCommonlyDominateLoopExits(AccessPath accessPath, + SILLoop *loop, + ArrayRef exitingBlocks) { SmallPtrSet stores; - for (Operand *use : addr->getUses()) { + SmallVector uses; + // Collect as many recognizable stores as possible. It's ok if not all stores + // are collected. + accessPath.collectUses(uses, AccessUseType::Exact, loop->getFunction()); + for (Operand *use : uses) { SILInstruction *user = use->getUser(); if (isa(user)) stores.insert(user->getParent()); @@ -1030,24 +1279,26 @@ static bool storesCommonlyDominateLoopExits(SILValue addr, SILLoop *loop, return true; } -void LoopTreeOptimization::hoistLoadsAndStores(SILValue addr, SILLoop *loop, InstVector &toDelete) { - - SmallVector exitingBlocks; - loop->getExitingBlocks(exitingBlocks); +void LoopTreeOptimization:: +hoistLoadsAndStores(AccessPath accessPath, SILLoop *loop) { + SmallVector exitingAndLatchBlocks; + loop->getExitingAndLatchBlocks(exitingAndLatchBlocks); // This is not a requirement for functional correctness, but we don't want to // _speculatively_ load and store the value (outside of the loop). - if (!storesCommonlyDominateLoopExits(addr, loop, exitingBlocks)) + if (!storesCommonlyDominateLoopExits(accessPath, loop, + exitingAndLatchBlocks)) return; // Inserting the stores requires the exit edges to be not critical. - for (SILBasicBlock *exitingBlock : exitingBlocks) { - for (unsigned idx = 0, e = exitingBlock->getSuccessors().size(); + for (SILBasicBlock *exitingOrLatchBlock : exitingAndLatchBlocks) { + for (unsigned idx = 0, e = exitingOrLatchBlock->getSuccessors().size(); idx != e; ++idx) { // exitingBlock->getSuccessors() must not be moved out of this loop, // because the successor list is invalidated by splitCriticalEdge. - if (!loop->contains(exitingBlock->getSuccessors()[idx])) { - splitCriticalEdge(exitingBlock->getTerminator(), idx, DomTree, LoopInfo); + if (!loop->contains(exitingOrLatchBlock->getSuccessors()[idx])) { + splitCriticalEdge(exitingOrLatchBlock->getTerminator(), idx, DomTree, + LoopInfo); } } } @@ -1057,30 +1308,46 @@ void LoopTreeOptimization::hoistLoadsAndStores(SILValue addr, SILLoop *loop, Ins // Initially load the value in the loop pre header. SILBuilder B(preheader->getTerminator()); - auto *initialLoad = B.createLoad(preheader->getTerminator()->getLoc(), addr, - LoadOwnershipQualifier::Unqualified); - LLVM_DEBUG(llvm::dbgs() << "Creating preload " << *initialLoad); - + SILValue storeAddr; SILSSAUpdater ssaUpdater; - ssaUpdater.initialize(initialLoad->getType()); - ssaUpdater.addAvailableValue(preheader, initialLoad); // Set all stored values as available values in the ssaUpdater. // If there are multiple stores in a block, only the last one counts. Optional loc; for (SILInstruction *I : LoadsAndStores) { - if (auto *SI = isStoreToAddr(I, addr)) { + if (auto *SI = isStoreToAccess(I, accessPath)) { loc = SI->getLoc(); // If a store just stores the loaded value, bail. The operand (= the load) // will be removed later, so it cannot be used as available value. // This corner case is suprisingly hard to handle, so we just give up. - if (isLoadFromAddr(dyn_cast(SI->getSrc()), addr)) + if (isLoadWithinAccess(dyn_cast(SI->getSrc()), accessPath)) return; + if (!storeAddr) { + storeAddr = SI->getDest(); + ssaUpdater.initialize(storeAddr->getType().getObjectType()); + } else if (SI->getDest()->getType() != storeAddr->getType()) { + // This transformation assumes that the values of all stores in the loop + // must be interchangeable. It won't work if stores different types + // because of casting or payload extraction even though they have the + // same access path. + return; + } ssaUpdater.addAvailableValue(SI->getParent(), SI->getSrc()); } } + assert(storeAddr && "hoistLoadsAndStores requires a store in the loop"); + SILValue initialAddr = cloneUseDefChain( + storeAddr, preheader->getTerminator(), [&](SILValue srcAddr) { + // Clone projections until the address dominates preheader. + return !DomTree->dominates(srcAddr->getParentBlock(), preheader); + }); + LoadInst *initialLoad = + B.createLoad(preheader->getTerminator()->getLoc(), initialAddr, + LoadOwnershipQualifier::Unqualified); + LLVM_DEBUG(llvm::dbgs() << "Creating preload " << *initialLoad); + ssaUpdater.addAvailableValue(preheader, initialLoad); // Remove all stores and replace the loads with the current value. SILBasicBlock *currentBlock = nullptr; @@ -1091,37 +1358,45 @@ void LoopTreeOptimization::hoistLoadsAndStores(SILValue addr, SILLoop *loop, Ins currentBlock = block; currentVal = SILValue(); } - if (auto *SI = isStoreToAddr(I, addr)) { + if (auto *SI = isStoreToAccess(I, accessPath)) { LLVM_DEBUG(llvm::dbgs() << "Deleting reloaded store " << *SI); currentVal = SI->getSrc(); - toDelete.push_back(SI); - } else if (auto *LI = isLoadFromAddr(I, addr)) { - // If we didn't see a store in this block yet, get the current value from - // the ssaUpdater. - if (!currentVal) - currentVal = ssaUpdater.getValueInMiddleOfBlock(block); - SILValue projectedValue = projectLoadValue(LI->getOperand(), addr, - currentVal, LI); - LLVM_DEBUG(llvm::dbgs() << "Replacing stored load " << *LI << " with " - << projectedValue); - LI->replaceAllUsesWith(projectedValue); - toDelete.push_back(LI); + toDelete.insert(SI); + continue; + } + auto loadWithAccess = isLoadWithinAccess(I, accessPath); + if (!loadWithAccess) { + continue; } + // If we didn't see a store in this block yet, get the current value from + // the ssaUpdater. + if (!currentVal) + currentVal = ssaUpdater.getValueInMiddleOfBlock(block); + + LoadInst *load = loadWithAccess.li; + auto loadAddress = load->getOperand(); + SILValue projectedValue = projectLoadValue( + loadAddress, loadWithAccess.accessPath, currentVal, accessPath, load); + LLVM_DEBUG(llvm::dbgs() << "Replacing stored load " << *load << " with " + << projectedValue); + load->replaceAllUsesWith(projectedValue); + toDelete.insert(load); } // Store back the value at all loop exits. - for (SILBasicBlock *exitingBlock : exitingBlocks) { - for (SILBasicBlock *succ : exitingBlock->getSuccessors()) { - if (!loop->contains(succ)) { - assert(succ->getSinglePredecessorBlock() && - "should have split critical edges"); - SILBuilder B(succ->begin()); - auto *SI = B.createStore(loc.getValue(), - ssaUpdater.getValueInMiddleOfBlock(succ), addr, - StoreOwnershipQualifier::Unqualified); - (void)SI; - LLVM_DEBUG(llvm::dbgs() << "Creating loop-exit store " << *SI); - } + for (SILBasicBlock *exitingOrLatchBlock : exitingAndLatchBlocks) { + for (SILBasicBlock *succ : exitingOrLatchBlock->getSuccessors()) { + if (loop->contains(succ)) + continue; + + assert(succ->getSinglePredecessorBlock() + && "should have split critical edges"); + SILBuilder B(succ->begin()); + auto *SI = B.createStore( + loc.getValue(), ssaUpdater.getValueInMiddleOfBlock(succ), initialAddr, + StoreOwnershipQualifier::Unqualified); + (void)SI; + LLVM_DEBUG(llvm::dbgs() << "Creating loop-exit store " << *SI); } } @@ -1130,17 +1405,20 @@ void LoopTreeOptimization::hoistLoadsAndStores(SILValue addr, SILLoop *loop, Ins } bool LoopTreeOptimization::hoistAllLoadsAndStores(SILLoop *loop) { - InstVector toDelete; - for (SILValue addr : LoadAndStoreAddrs) { - hoistLoadsAndStores(addr, loop, toDelete); + for (AccessPath accessPath : LoadAndStoreAddrs) { + hoistLoadsAndStores(accessPath, loop); } LoadsAndStores.clear(); LoadAndStoreAddrs.clear(); + if (toDelete.empty()) + return false; + for (SILInstruction *I : toDelete) { - I->eraseFromParent(); + recursivelyDeleteTriviallyDeadInstructions(I, /*force*/ true); } - return !toDelete.empty(); + toDelete.clear(); + return true; } namespace { diff --git a/lib/SILOptimizer/Transforms/AllocBoxToStack.cpp b/lib/SILOptimizer/Transforms/AllocBoxToStack.cpp index fd7695b164640..3c262d4ece1a8 100644 --- a/lib/SILOptimizer/Transforms/AllocBoxToStack.cpp +++ b/lib/SILOptimizer/Transforms/AllocBoxToStack.cpp @@ -937,7 +937,8 @@ specializeApplySite(SILOptFunctionBuilder &FuncBuilder, ApplySite Apply, // release it explicitly when the partial_apply is released. if (Apply.getKind() == ApplySiteKind::PartialApplyInst) { if (PAFrontier.empty()) { - ValueLifetimeAnalysis VLA(cast(Apply)); + auto *PAI = cast(Apply); + ValueLifetimeAnalysis VLA(PAI, PAI->getUses()); pass.CFGChanged |= !VLA.computeFrontier( PAFrontier, ValueLifetimeAnalysis::AllowToModifyCFG); assert(!PAFrontier.empty() && diff --git a/lib/SILOptimizer/Utils/ValueLifetime.cpp b/lib/SILOptimizer/Utils/ValueLifetime.cpp index 55c0e05625a87..46834b54cf3c8 100644 --- a/lib/SILOptimizer/Utils/ValueLifetime.cpp +++ b/lib/SILOptimizer/Utils/ValueLifetime.cpp @@ -90,7 +90,7 @@ SILInstruction *ValueLifetimeAnalysis::findLastUserInBlock(SILBasicBlock *bb) { llvm_unreachable("Expected to find use of value in block!"); } -bool ValueLifetimeAnalysis::computeFrontier(Frontier &frontier, Mode mode, +bool ValueLifetimeAnalysis::computeFrontier(FrontierImpl &frontier, Mode mode, DeadEndBlocks *deBlocks) { assert(!isAliveAtBeginOfBlock(getFunction()->getEntryBlock()) && "Can't compute frontier for def which does not dominate all uses"); @@ -287,7 +287,7 @@ blockContainsDeallocRef(SILBasicBlock *bb, return false; } -bool ValueLifetimeAnalysis::containsDeallocRef(const Frontier &frontier) { +bool ValueLifetimeAnalysis::containsDeallocRef(const FrontierImpl &frontier) { SmallPtrSet frontierBlocks; // Search in live blocks where the value is not alive until the end of the // block, i.e. the live range is terminated by a frontier instruction. @@ -326,7 +326,8 @@ void ValueLifetimeAnalysis::dump() const { } void swift::endLifetimeAtFrontier( - SILValue valueOrStackLoc, const ValueLifetimeAnalysis::Frontier &frontier, + SILValue valueOrStackLoc, + const ValueLifetimeAnalysis::FrontierImpl &frontier, SILBuilderContext &builderCtxt, InstModCallbacks callbacks) { for (SILInstruction *endPoint : frontier) { SILBuilderWithScope builder(endPoint, builderCtxt); diff --git a/lib/Sema/CSSimplify.cpp b/lib/Sema/CSSimplify.cpp index b7da884260de8..aa712c7dea624 100644 --- a/lib/Sema/CSSimplify.cpp +++ b/lib/Sema/CSSimplify.cpp @@ -5669,11 +5669,16 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( return SolutionKind::Unsolved; } + auto *loc = getConstraintLocator(locator); + /// Record the given conformance as the result, adding any conditional /// requirements if necessary. auto recordConformance = [&](ProtocolConformanceRef conformance) { // Record the conformance. - CheckedConformances.push_back({getConstraintLocator(locator), conformance}); + CheckedConformances.push_back({loc, conformance}); + + if (isConformanceUnavailable(conformance, loc)) + increaseScore(SK_Unavailable); // This conformance may be conditional, in which case we need to consider // those requirements as constraints too. @@ -5721,7 +5726,7 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( auto protocolTy = protocol->getDeclaredInterfaceType(); // If this conformance has been fixed already, let's just consider this done. - if (isFixedRequirement(getConstraintLocator(locator), protocolTy)) + if (isFixedRequirement(loc, protocolTy)) return SolutionKind::Solved; // If this is a generic requirement let's try to record that @@ -5768,7 +5773,7 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( auto dstType = getType(assignment->getDest()); auto *fix = IgnoreAssignmentDestinationType::create( - *this, srcType, dstType, getConstraintLocator(locator)); + *this, srcType, dstType, loc); return recordFix(fix) ? SolutionKind::Error : SolutionKind::Solved; } @@ -5779,8 +5784,7 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( // let's record it as a "contextual mismatch" because diagnostic // is going to be dependent on other contextual information. if (path.back().is()) { - auto *fix = ContextualMismatch::create(*this, type, protocolTy, - getConstraintLocator(locator)); + auto *fix = ContextualMismatch::create(*this, type, protocolTy, loc); return recordFix(fix) ? SolutionKind::Error : SolutionKind::Solved; } @@ -5818,7 +5822,6 @@ ConstraintSystem::SolutionKind ConstraintSystem::simplifyConformsToConstraint( // If this is an implicit Hashable conformance check generated for each // index argument of the keypath subscript component, we could just treat // it as though it conforms. - auto *loc = getConstraintLocator(locator); if (loc->isResultOfKeyPathDynamicMemberLookup() || loc->isKeyPathSubscriptComponent()) { if (protocol == diff --git a/lib/Sema/ConstraintSystem.cpp b/lib/Sema/ConstraintSystem.cpp index a47c01ed70521..08cd82da1fd64 100644 --- a/lib/Sema/ConstraintSystem.cpp +++ b/lib/Sema/ConstraintSystem.cpp @@ -22,6 +22,7 @@ #include "swift/AST/Initializer.h" #include "swift/AST/GenericEnvironment.h" #include "swift/AST/ParameterList.h" +#include "swift/AST/ProtocolConformance.h" #include "swift/AST/TypeCheckRequests.h" #include "swift/Basic/Statistic.h" #include "swift/Sema/CSFix.h" @@ -5088,6 +5089,37 @@ bool ConstraintSystem::isDeclUnavailable(const Decl *D, return result.hasValue(); } +bool ConstraintSystem::isConformanceUnavailable(ProtocolConformanceRef conformance, + ConstraintLocator *locator) const { + if (!conformance.isConcrete()) + return false; + + auto *concrete = conformance.getConcrete(); + auto *rootConf = concrete->getRootConformance(); + auto *ext = dyn_cast(rootConf->getDeclContext()); + if (ext == nullptr) + return false; + + auto &ctx = getASTContext(); + + // First check whether this declaration is universally unavailable. + if (ext->getAttrs().isUnavailable(ctx)) + return true; + + SourceLoc loc; + + if (locator) { + if (auto anchor = locator->getAnchor()) + loc = getLoc(anchor); + } + + // If not, let's check contextual unavailability. + ExportContext where = ExportContext::forFunctionBody(DC, loc); + auto result = TypeChecker::checkConformanceAvailability( + rootConf, ext, where); + return result.hasValue(); +} + /// If we aren't certain that we've emitted a diagnostic, emit a fallback /// diagnostic. void ConstraintSystem::maybeProduceFallbackDiagnostic( diff --git a/lib/Sema/TypeCheckDeclPrimary.cpp b/lib/Sema/TypeCheckDeclPrimary.cpp index 563fd68ebe964..bf074b989d5f4 100644 --- a/lib/Sema/TypeCheckDeclPrimary.cpp +++ b/lib/Sema/TypeCheckDeclPrimary.cpp @@ -2351,6 +2351,14 @@ class DeclChecker : public DeclVisitor { FunctionBodySkipping::All) return true; + // If we want all types (for LLDB) we can't skip functions with nested + // types. We could probably improve upon this and type-check only the + // nested types instead for better performances. + if (AFD->hasNestedTypeDeclarations() && + getASTContext().TypeCheckerOpts.SkipFunctionBodies == + FunctionBodySkipping::NonInlinableWithoutTypes) + return false; + // Only skip functions where their body won't be serialized return AFD->getResilienceExpansion() != ResilienceExpansion::Minimal; } diff --git a/lib/Sema/TypeCheckType.cpp b/lib/Sema/TypeCheckType.cpp index d45f3e12693f2..8c34a1b08bc2c 100644 --- a/lib/Sema/TypeCheckType.cpp +++ b/lib/Sema/TypeCheckType.cpp @@ -1706,11 +1706,14 @@ namespace { public: /// Construct a never-null Type. If \p Ty is null, a fatal error is thrown. NeverNullType(Type Ty) : WrappedTy(Ty) { - if (Ty.isNull()) { + if (WrappedTy.isNull()) { llvm::report_fatal_error("Resolved to null type!"); } } + /// Construct a never-null Type. If \p TyB is null, a fatal error is thrown. + NeverNullType(TypeBase *TyB) : NeverNullType(Type(TyB)) {} + operator Type() const { return WrappedTy; } Type get() const { return WrappedTy; } @@ -1755,24 +1758,24 @@ namespace { return diags.diagnose(std::forward(Args)...); } - Type resolveAttributedType(AttributedTypeRepr *repr, - TypeResolutionOptions options); - Type resolveAttributedType(TypeAttributes &attrs, TypeRepr *repr, - TypeResolutionOptions options); - Type resolveASTFunctionType(FunctionTypeRepr *repr, - TypeResolutionOptions options, - AnyFunctionType::Representation representation - = AnyFunctionType::Representation::Swift, - bool noescape = false, - const clang::Type *parsedClangFunctionType - = nullptr, - DifferentiabilityKind diffKind - = DifferentiabilityKind::NonDifferentiable); + NeverNullType resolveAttributedType(AttributedTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveAttributedType(TypeAttributes &attrs, TypeRepr *repr, + TypeResolutionOptions options); + NeverNullType + resolveASTFunctionType(FunctionTypeRepr *repr, + TypeResolutionOptions options, + AnyFunctionType::Representation representation = + AnyFunctionType::Representation::Swift, + bool noescape = false, + const clang::Type *parsedClangFunctionType = nullptr, + DifferentiabilityKind diffKind = + DifferentiabilityKind::NonDifferentiable); SmallVector resolveASTFunctionTypeParams( TupleTypeRepr *inputRepr, TypeResolutionOptions options, bool requiresMappingOut, DifferentiabilityKind diffKind); - Type resolveSILFunctionType( + NeverNullType resolveSILFunctionType( FunctionTypeRepr *repr, TypeResolutionOptions options, SILCoroutineKind coroutineKind = SILCoroutineKind::None, SILFunctionType::ExtInfoBuilder extInfoBuilder = @@ -1791,40 +1794,40 @@ namespace { SmallVectorImpl &yields, SmallVectorImpl &results, Optional &errorResult); - Type resolveIdentifierType(IdentTypeRepr *IdType, - TypeResolutionOptions options); - Type resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, - TypeResolutionOptions options); - Type resolveArrayType(ArrayTypeRepr *repr, - TypeResolutionOptions options); - Type resolveDictionaryType(DictionaryTypeRepr *repr, - TypeResolutionOptions options); - Type resolveOptionalType(OptionalTypeRepr *repr, - TypeResolutionOptions options); - Type resolveImplicitlyUnwrappedOptionalType(ImplicitlyUnwrappedOptionalTypeRepr *repr, - TypeResolutionOptions options, - bool isDirect); - Type resolveTupleType(TupleTypeRepr *repr, - TypeResolutionOptions options); - Type resolveCompositionType(CompositionTypeRepr *repr, - TypeResolutionOptions options); - Type resolveMetatypeType(MetatypeTypeRepr *repr, - TypeResolutionOptions options); - Type resolveProtocolType(ProtocolTypeRepr *repr, - TypeResolutionOptions options); - Type resolveSILBoxType(SILBoxTypeRepr *repr, - TypeResolutionOptions options); - - Type buildMetatypeType(MetatypeTypeRepr *repr, - Type instanceType, - Optional storedRepr); - Type buildProtocolType(ProtocolTypeRepr *repr, - Type instanceType, - Optional storedRepr); - - Type resolveOpaqueReturnType(TypeRepr *repr, StringRef mangledName, - unsigned ordinal, - TypeResolutionOptions options); + NeverNullType resolveIdentifierType(IdentTypeRepr *IdType, + TypeResolutionOptions options); + NeverNullType resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveArrayType(ArrayTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveDictionaryType(DictionaryTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveOptionalType(OptionalTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveImplicitlyUnwrappedOptionalType( + ImplicitlyUnwrappedOptionalTypeRepr *repr, + TypeResolutionOptions options, bool isDirect); + NeverNullType resolveTupleType(TupleTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveCompositionType(CompositionTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveMetatypeType(MetatypeTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveProtocolType(ProtocolTypeRepr *repr, + TypeResolutionOptions options); + NeverNullType resolveSILBoxType(SILBoxTypeRepr *repr, + TypeResolutionOptions options); + + NeverNullType + buildMetatypeType(MetatypeTypeRepr *repr, Type instanceType, + Optional storedRepr); + NeverNullType + buildProtocolType(ProtocolTypeRepr *repr, Type instanceType, + Optional storedRepr); + + NeverNullType resolveOpaqueReturnType(TypeRepr *repr, StringRef mangledName, + unsigned ordinal, + TypeResolutionOptions options); /// Returns true if the given type conforms to `Differentiable` in the /// module of `DC`. If `tangentVectorEqualsSelf` is true, returns true iff @@ -1920,7 +1923,7 @@ NeverNullType TypeResolver::resolveType(TypeRepr *repr, // Default non-escaping for closure parameters auto result = resolveASTFunctionType(cast(repr), options); - if (result && result->is()) + if (result->is()) return applyNonEscapingIfNecessary(result, options); return result; } @@ -1992,8 +1995,9 @@ static Type rebuildWithDynamicSelf(ASTContext &Context, Type ty) { } } -Type TypeResolver::resolveAttributedType(AttributedTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveAttributedType(AttributedTypeRepr *repr, + TypeResolutionOptions options) { // Copy the attributes, since we're about to start hacking on them. TypeAttributes attrs = repr->getAttrs(); assert(!attrs.empty()); @@ -2001,9 +2005,9 @@ Type TypeResolver::resolveAttributedType(AttributedTypeRepr *repr, return resolveAttributedType(attrs, repr->getTypeRepr(), options); } -Type TypeResolver::resolveAttributedType(TypeAttributes &attrs, - TypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveAttributedType(TypeAttributes &attrs, TypeRepr *repr, + TypeResolutionOptions options) { // Convenience to grab the source range of a type attribute. auto getTypeAttrRangeWithAt = [](ASTContext &ctx, SourceLoc attrLoc) { return SourceRange(attrLoc, attrLoc.getAdvancedLoc(1)); @@ -2627,10 +2631,10 @@ TypeResolver::resolveASTFunctionTypeParams(TupleTypeRepr *inputRepr, return elements; } -Type TypeResolver::resolveOpaqueReturnType(TypeRepr *repr, - StringRef mangledName, - unsigned ordinal, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveOpaqueReturnType(TypeRepr *repr, StringRef mangledName, + unsigned ordinal, + TypeResolutionOptions options) { // The type repr should be a generic identifier type. We don't really use // the identifier for anything, but we do resolve the generic arguments // to instantiate the possibly-generic opaque type. @@ -2669,7 +2673,7 @@ Type TypeResolver::resolveOpaqueReturnType(TypeRepr *repr, return ty; } -Type TypeResolver::resolveASTFunctionType( +NeverNullType TypeResolver::resolveASTFunctionType( FunctionTypeRepr *repr, TypeResolutionOptions parentOptions, AnyFunctionType::Representation representation, bool noescape, const clang::Type *parsedClangFunctionType, @@ -2804,8 +2808,8 @@ bool TypeResolver::isDifferentiable(Type type, bool tangentVectorEqualsSelf) { return type->getCanonicalType() == tanSpace->getCanonicalType(); } -Type TypeResolver::resolveSILBoxType(SILBoxTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveSILBoxType(SILBoxTypeRepr *repr, + TypeResolutionOptions options) { // Resolve the field types. SmallVector fields; { @@ -2869,7 +2873,7 @@ Type TypeResolver::resolveSILBoxType(SILBoxTypeRepr *repr, return SILBoxType::get(getASTContext(), layout, subMap); } -Type TypeResolver::resolveSILFunctionType( +NeverNullType TypeResolver::resolveSILFunctionType( FunctionTypeRepr *repr, TypeResolutionOptions options, SILCoroutineKind coroutineKind, SILFunctionType::ExtInfoBuilder extInfoBuilder, ParameterConvention callee, @@ -3284,8 +3288,9 @@ bool TypeResolver::resolveSILResults(TypeRepr *repr, yields, ordinaryResults, errorResult); } -Type TypeResolver::resolveIdentifierType(IdentTypeRepr *IdType, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveIdentifierType(IdentTypeRepr *IdType, + TypeResolutionOptions options) { auto ComponentRange = IdType->getComponentRange(); auto Components = llvm::makeArrayRef(ComponentRange.begin(), ComponentRange.end()); @@ -3319,8 +3324,9 @@ Type TypeResolver::resolveIdentifierType(IdentTypeRepr *IdType, return result; } -Type TypeResolver::resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, + TypeResolutionOptions options) { // inout is only valid for (non-Subscript and non-EnumCaseDecl) // function parameters. if (!options.is(TypeResolverContext::FunctionInput) || @@ -3361,9 +3367,8 @@ Type TypeResolver::resolveSpecifierTypeRepr(SpecifierTypeRepr *repr, return resolveType(repr->getBase(), options); } - -Type TypeResolver::resolveArrayType(ArrayTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveArrayType(ArrayTypeRepr *repr, + TypeResolutionOptions options) { auto baseTy = resolveType(repr->getBase(), options.withoutContext()); if (baseTy->hasError()) { return ErrorType::get(getASTContext()); @@ -3377,8 +3382,9 @@ Type TypeResolver::resolveArrayType(ArrayTypeRepr *repr, return sliceTy; } -Type TypeResolver::resolveDictionaryType(DictionaryTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveDictionaryType(DictionaryTypeRepr *repr, + TypeResolutionOptions options) { options = adjustOptionsForGenericArgs(options); auto keyTy = resolveType(repr->getKey(), options.withoutContext()); @@ -3407,8 +3413,8 @@ Type TypeResolver::resolveDictionaryType(DictionaryTypeRepr *repr, return DictionaryType::get(keyTy, valueTy); } -Type TypeResolver::resolveOptionalType(OptionalTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveOptionalType(OptionalTypeRepr *repr, + TypeResolutionOptions options) { TypeResolutionOptions elementOptions = options.withoutContext(true); elementOptions.setContext(TypeResolverContext::ImmediateOptionalTypeArgument); @@ -3426,10 +3432,9 @@ Type TypeResolver::resolveOptionalType(OptionalTypeRepr *repr, return optionalTy; } -Type TypeResolver::resolveImplicitlyUnwrappedOptionalType( - ImplicitlyUnwrappedOptionalTypeRepr *repr, - TypeResolutionOptions options, - bool isDirect) { +NeverNullType TypeResolver::resolveImplicitlyUnwrappedOptionalType( + ImplicitlyUnwrappedOptionalTypeRepr *repr, TypeResolutionOptions options, + bool isDirect) { TypeResolutionFlags allowIUO = TypeResolutionFlags::SILType; bool doDiag = false; @@ -3497,8 +3502,8 @@ Type TypeResolver::resolveImplicitlyUnwrappedOptionalType( return uncheckedOptionalTy; } -Type TypeResolver::resolveTupleType(TupleTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveTupleType(TupleTypeRepr *repr, + TypeResolutionOptions options) { SmallVector elements; elements.reserve(repr->getNumElements()); @@ -3565,8 +3570,9 @@ Type TypeResolver::resolveTupleType(TupleTypeRepr *repr, return TupleType::get(elements, getASTContext()); } -Type TypeResolver::resolveCompositionType(CompositionTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType +TypeResolver::resolveCompositionType(CompositionTypeRepr *repr, + TypeResolutionOptions options) { // Note that the superclass type will appear as part of one of the // types in 'Members', so it's not used when constructing the @@ -3633,8 +3639,8 @@ Type TypeResolver::resolveCompositionType(CompositionTypeRepr *repr, /*HasExplicitAnyObject=*/false); } -Type TypeResolver::resolveMetatypeType(MetatypeTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveMetatypeType(MetatypeTypeRepr *repr, + TypeResolutionOptions options) { // The instance type of a metatype is always abstract, not SIL-lowered. auto ty = resolveType(repr->getBase(), options.withoutContext()); if (ty->hasError()) { @@ -3654,10 +3660,9 @@ Type TypeResolver::resolveMetatypeType(MetatypeTypeRepr *repr, return buildMetatypeType(repr, ty, storedRepr); } -Type TypeResolver::buildMetatypeType( - MetatypeTypeRepr *repr, - Type instanceType, - Optional storedRepr) { +NeverNullType +TypeResolver::buildMetatypeType(MetatypeTypeRepr *repr, Type instanceType, + Optional storedRepr) { if (instanceType->isAnyExistentialType()) { // TODO: diagnose invalid representations? return ExistentialMetatypeType::get(instanceType, storedRepr); @@ -3666,8 +3671,8 @@ Type TypeResolver::buildMetatypeType( } } -Type TypeResolver::resolveProtocolType(ProtocolTypeRepr *repr, - TypeResolutionOptions options) { +NeverNullType TypeResolver::resolveProtocolType(ProtocolTypeRepr *repr, + TypeResolutionOptions options) { // The instance type of a metatype is always abstract, not SIL-lowered. auto ty = resolveType(repr->getBase(), options.withoutContext()); if (ty->hasError()) { @@ -3687,10 +3692,9 @@ Type TypeResolver::resolveProtocolType(ProtocolTypeRepr *repr, return buildProtocolType(repr, ty, storedRepr); } -Type TypeResolver::buildProtocolType( - ProtocolTypeRepr *repr, - Type instanceType, - Optional storedRepr) { +NeverNullType +TypeResolver::buildProtocolType(ProtocolTypeRepr *repr, Type instanceType, + Optional storedRepr) { if (!instanceType->isAnyExistentialType()) { diagnose(repr->getProtocolLoc(), diag::dot_protocol_on_non_existential, instanceType); diff --git a/lib/Serialization/DeserializeSIL.cpp b/lib/Serialization/DeserializeSIL.cpp index 991d2ab3da5f2..c29900110eae2 100644 --- a/lib/Serialization/DeserializeSIL.cpp +++ b/lib/Serialization/DeserializeSIL.cpp @@ -277,9 +277,10 @@ SILValue SILDeserializer::getLocalValue(ValueID Id, SILType Type) { // The first two IDs are special undefined values. if (Id == 0) - return SILUndef::get(Type, SILMod, OwnershipKind::None); - else if (Id == 1) - return SILUndef::get(Type, SILMod, OwnershipKind::Owned); + return SILUndef::get(Type, SILMod); + assert(Id != 1 && "This used to be for SILUndef with OwnershipKind::Owned... " + "but we don't support that anymore. Make sure no one " + "changes that without updating this code if needed"); // Check to see if this is already defined. ValueBase *Entry = LocalValues.lookup(Id); diff --git a/lib/TBDGen/TBDGen.cpp b/lib/TBDGen/TBDGen.cpp index fcbc6e270e036..816af58f6e579 100644 --- a/lib/TBDGen/TBDGen.cpp +++ b/lib/TBDGen/TBDGen.cpp @@ -720,6 +720,10 @@ void TBDGenVisitor::visitAbstractFunctionDecl(AbstractFunctionDecl *AFD) { AFD->getGenericSignature())); visitDefaultArguments(AFD, AFD->getParameters()); + + if (AFD->isAsyncContext()) { + addSymbol(LinkEntity::forAsyncFunctionPointer(AFD)); + } } void TBDGenVisitor::visitFuncDecl(FuncDecl *FD) { diff --git a/localization/CMakeLists.txt b/localization/CMakeLists.txt index 221fb264ee62d..07a3585a66c55 100644 --- a/localization/CMakeLists.txt +++ b/localization/CMakeLists.txt @@ -1,6 +1,8 @@ -add_custom_target(diagnostic-database) +set(diagnostic_witness "${CMAKE_BINARY_DIR}/share/swift/diagnostics/generated") -add_custom_command(TARGET diagnostic-database +add_custom_command( + OUTPUT + ${diagnostic_witness} COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/diagnostics/ ${CMAKE_BINARY_DIR}/share/swift/diagnostics/ COMMAND @@ -10,11 +12,17 @@ add_custom_command(TARGET diagnostic-database "${SWIFT_NATIVE_SWIFT_TOOLS_PATH}/swift-serialize-diagnostics" --input-file-path ${CMAKE_BINARY_DIR}/share/swift/diagnostics/en.yaml --output-directory ${CMAKE_BINARY_DIR}/share/swift/diagnostics/ + COMMAND + ${CMAKE_COMMAND} -E touch ${diagnostic_witness} + DEPENDS + swift-def-to-yaml-converter + swift-serialize-diagnostics + # Add files in diagnostics subdirectory when they're created ) +add_custom_target(diagnostic-database DEPENDS ${diagnostic_witness}) + add_dependencies(swift-frontend diagnostic-database) -add_dependencies(diagnostic-database swift-serialize-diagnostics) -add_dependencies(diagnostic-database swift-def-to-yaml-converter) swift_install_in_component( DIRECTORY ${CMAKE_BINARY_DIR}/share/swift/diagnostics/ diff --git a/stdlib/public/Concurrency/PartialAsyncTask.swift b/stdlib/public/Concurrency/PartialAsyncTask.swift index dec49a85eda83..618ec38ef9463 100644 --- a/stdlib/public/Concurrency/PartialAsyncTask.swift +++ b/stdlib/public/Concurrency/PartialAsyncTask.swift @@ -24,15 +24,46 @@ public struct PartialAsyncTask { public struct UnsafeContinuation { private var context: UnsafeRawPointer - public func resume(_: T) { } + public func resume(_: __owned T) { } } @frozen public struct UnsafeThrowingContinuation { private var context: UnsafeRawPointer - public func resume(_: T) { } - public func fail(_: Error) { } + public func resume(_: __owned T) { } + public func fail(_: __owned Error) { } } +#if _runtime(_ObjC) +// Intrinsics used by SILGen to resume or fail continuations +// for +@_alwaysEmitIntoClient +@usableFromInline +internal func _resumeUnsafeContinuation( + _ continuation: UnsafeContinuation, + _ value: __owned T +) { + continuation.resume(value) +} + +@_alwaysEmitIntoClient +@usableFromInline +internal func _resumeUnsafeThrowingContinuation( + _ continuation: UnsafeThrowingContinuation, + _ value: __owned T +) { + continuation.resume(value) +} + +@_alwaysEmitIntoClient +@usableFromInline +internal func _resumeUnsafeThrowingContinuationWithError( + _ continuation: UnsafeThrowingContinuation, + _ error: __owned Error +) { + continuation.fail(error) +} + +#endif diff --git a/stdlib/public/Concurrency/Task.cpp b/stdlib/public/Concurrency/Task.cpp index c163f9791fb47..c2e5464204df3 100644 --- a/stdlib/public/Concurrency/Task.cpp +++ b/stdlib/public/Concurrency/Task.cpp @@ -21,12 +21,105 @@ #include "TaskPrivate.h" using namespace swift; +using FutureFragment = AsyncTask::FutureFragment; + +void FutureFragment::destroy() { + auto queueHead = waitQueue.load(std::memory_order_acquire); + switch (queueHead.getStatus()) { + case Status::Executing: + assert(false && "destroying a task that never completed"); + + case Status::Success: + resultType->vw_destroy(getStoragePtr()); + break; + + case Status::Error: + swift_unknownObjectRelease(reinterpret_cast(getError())); + break; + } +} + +FutureFragment::Status AsyncTask::waitFuture(AsyncTask *waitingTask) { + using Status = FutureFragment::Status; + using WaitQueueItem = FutureFragment::WaitQueueItem; + + assert(isFuture()); + auto fragment = futureFragment(); + + auto queueHead = fragment->waitQueue.load(std::memory_order_acquire); + while (true) { + switch (queueHead.getStatus()) { + case Status::Error: + case Status::Success: + // The task is done; we don't need to wait. + return queueHead.getStatus(); + + case Status::Executing: + // Task is now complete. We'll need to add ourselves to the queue. + break; + } + + // Put the waiting task at the beginning of the wait queue. + waitingTask->getNextWaitingTask() = queueHead.getTask(); + auto newQueueHead = WaitQueueItem::get(Status::Executing, waitingTask); + if (fragment->waitQueue.compare_exchange_weak( + queueHead, newQueueHead, std::memory_order_release, + std::memory_order_acquire)) { + // Escalate the priority of this task based on the priority + // of the waiting task. + swift_task_escalate(this, waitingTask->Flags.getPriority()); + return FutureFragment::Status::Executing; + } + } +} + +void AsyncTask::completeFuture(AsyncContext *context, ExecutorRef executor) { + using Status = FutureFragment::Status; + using WaitQueueItem = FutureFragment::WaitQueueItem; + + assert(isFuture()); + auto fragment = futureFragment(); + + // If an error was thrown, save it in the future fragment. + auto futureContext = static_cast(context); + bool hadErrorResult = false; + if (auto errorObject = futureContext->errorResult) { + fragment->getError() = errorObject; + hadErrorResult = true; + } + + // Update the status to signal completion. + auto newQueueHead = WaitQueueItem::get( + hadErrorResult ? Status::Error : Status::Success, + nullptr + ); + auto queueHead = fragment->waitQueue.exchange( + newQueueHead, std::memory_order_acquire); + assert(queueHead.getStatus() == Status::Executing); + + // Schedule every waiting task on the executor. + auto waitingTask = queueHead.getTask(); + while (waitingTask) { + // Find the next waiting task. + auto nextWaitingTask = waitingTask->getNextWaitingTask(); + + // TODO: schedule this task on the executor rather than running it + // directly. + waitingTask->run(executor); + + // Move to the next task. + waitingTask = nextWaitingTask; + } +} SWIFT_CC(swift) -static void destroySimpleTask(SWIFT_CONTEXT HeapObject *_obj) { - auto obj = static_cast(_obj); +static void destroyTask(SWIFT_CONTEXT HeapObject *obj) { + auto task = static_cast(obj); - assert(!obj->isFuture()); + // For a future, destroy the result. + if (task->isFuture()) { + task->futureFragment()->destroy(); + } // The task execution itself should always hold a reference to it, so // if we get here, we know the task has finished running, which means @@ -34,27 +127,26 @@ static void destroySimpleTask(SWIFT_CONTEXT HeapObject *_obj) { // the task-local allocator. There's actually nothing else to clean up // here. - free(obj); + free(task); } -/// Heap metadata for a simple asynchronous task that does not -/// include a future. -static FullMetadata simpleTaskHeapMetadata = { +/// Heap metadata for an asynchronous task. +static FullMetadata taskHeapMetadata = { { { - &destroySimpleTask + &destroyTask }, { /*value witness table*/ nullptr } }, { - MetadataKind::SimpleTask + MetadataKind::Task } }; /// The function that we put in the context of a simple task -/// (one with no future) to handle the final return. +/// to handle the final return. SWIFT_CC(swift) static void completeTask(AsyncTask *task, ExecutorRef executor, AsyncContext *context) { @@ -62,10 +154,14 @@ static void completeTask(AsyncTask *task, ExecutorRef executor, // to wait for the object to be destroyed. _swift_task_alloc_destroy(task); + // Complete the future. + if (task->isFuture()) { + task->completeFuture(context, executor); + } + // TODO: set something in the status? // TODO: notify the parent somehow? // TODO: remove this task from the child-task chain? - // TODO: notify tasks waiting on the future? // Release the task, balancing the retain that a running task // has on itself. @@ -83,18 +179,44 @@ AsyncTaskAndContext swift::swift_task_create_f(JobFlags flags, AsyncTask *parent, AsyncFunctionType *function, size_t initialContextSize) { - assert(!flags.task_isFuture() && "function doesn't support creating futures"); + return swift_task_create_future_f( + flags, parent, nullptr, function, initialContextSize); +} + +AsyncTaskAndContext swift::swift_task_create_future( + JobFlags flags, AsyncTask *parent, const Metadata *futureResultType, + const AsyncFunctionPointer *function) { + return swift_task_create_future_f( + flags, parent, futureResultType, function->Function.get(), + function->ExpectedContextSize); +} + +AsyncTaskAndContext swift::swift_task_create_future_f( + JobFlags flags, AsyncTask *parent, const Metadata *futureResultType, + AsyncFunctionType *function, size_t initialContextSize) { + assert((futureResultType != nullptr) == flags.task_isFuture()); + assert(!flags.task_isFuture() || + initialContextSize >= sizeof(FutureAsyncContext)); assert((parent != nullptr) == flags.task_isChildTask()); // Figure out the size of the header. size_t headerSize = sizeof(AsyncTask); if (parent) headerSize += sizeof(AsyncTask::ChildFragment); + if (futureResultType) { + headerSize += FutureFragment::fragmentSize(futureResultType); + } + + headerSize = llvm::alignTo(headerSize, llvm::Align(alignof(AsyncContext))); + // Allocate the initial context together with the job. // This means that we never get rid of this allocation. size_t amountToAllocate = headerSize + initialContextSize; - assert(amountToAllocate % MaximumAlignment == 0); + // TODO: if this is necessary we need to teach LLVM lowering to request async + // context sizes that are mulitple of that maximum alignment. + // For now disable this assert. + // assert(amountToAllocate % MaximumAlignment == 0); void *allocation = malloc(amountToAllocate); @@ -105,7 +227,7 @@ swift::swift_task_create_f(JobFlags flags, AsyncTask *parent, // Initialize the task so that resuming it will run the given // function on the initial context. AsyncTask *task = - new(allocation) AsyncTask(&simpleTaskHeapMetadata, flags, + new(allocation) AsyncTask(&taskHeapMetadata, flags, function, initialContext); // Initialize the child fragment if applicable. @@ -115,6 +237,18 @@ swift::swift_task_create_f(JobFlags flags, AsyncTask *parent, new (childFragment) AsyncTask::ChildFragment(parent); } + // Initialize the future fragment if applicable. + if (futureResultType) { + auto futureFragment = task->futureFragment(); + new (futureFragment) FutureFragment(futureResultType); + + // Set up the context for the future so there is no error, and a successful + // result will be written into the future fragment's storage. + auto futureContext = static_cast(initialContext); + futureContext->errorResult = nullptr; + futureContext->indirectResult = futureFragment->getStoragePtr(); + } + // Configure the initial context. // // FIXME: if we store a null pointer here using the standard ABI for @@ -132,3 +266,26 @@ swift::swift_task_create_f(JobFlags flags, AsyncTask *parent, return {task, initialContext}; } + +TaskFutureWaitResult +swift::swift_task_future_wait(AsyncTask *task, AsyncTask *waitingTask) { + assert(task->isFuture()); + switch (task->waitFuture(waitingTask)) { + case FutureFragment::Status::Executing: + return TaskFutureWaitResult{TaskFutureWaitResult::Waiting, nullptr}; + + case FutureFragment::Status::Success: + return TaskFutureWaitResult{ + TaskFutureWaitResult::Success, task->futureFragment()->getStoragePtr()}; + + case FutureFragment::Status::Error: + return TaskFutureWaitResult{ + TaskFutureWaitResult::Error, + reinterpret_cast(task->futureFragment()->getError())}; + } +} + +// TODO: Remove this hack. +void swift::swift_task_run(AsyncTask *taskToRun) { + taskToRun->run(ExecutorRef::noPreference()); +} diff --git a/stdlib/public/Concurrency/Task.swift b/stdlib/public/Concurrency/Task.swift index bd8f50c4e00fb..fc478b95dd3ac 100644 --- a/stdlib/public/Concurrency/Task.swift +++ b/stdlib/public/Concurrency/Task.swift @@ -306,3 +306,11 @@ extension Task { fatalError("\(#function) not implemented yet.") } } + +@_silgen_name("swift_task_run") +public func runTask(_ task: __owned Builtin.NativeObject) + +public func runAsync(_ asyncFun: @escaping () async -> ()) { + let childTask = Builtin.createAsyncTask(0, nil, asyncFun) + runTask(childTask.0) +} diff --git a/stdlib/public/core/Builtin.swift b/stdlib/public/core/Builtin.swift index e3e472cd299ff..006d2ba8c861f 100644 --- a/stdlib/public/core/Builtin.swift +++ b/stdlib/public/core/Builtin.swift @@ -346,10 +346,12 @@ internal func _class_getInstancePositiveExtentSize(_ theClass: AnyClass) -> Int } #if INTERNAL_CHECKS_ENABLED +@available(macOS 9999, iOS 9999, watchOS 9999, tvOS 9999, *) @usableFromInline @_silgen_name("_swift_isImmutableCOWBuffer") internal func _swift_isImmutableCOWBuffer(_ object: AnyObject) -> Bool +@available(macOS 9999, iOS 9999, watchOS 9999, tvOS 9999, *) @usableFromInline @_silgen_name("_swift_setImmutableCOWBuffer") internal func _swift_setImmutableCOWBuffer(_ object: AnyObject, _ immutable: Bool) -> Bool diff --git a/stdlib/public/core/KeyPath.swift b/stdlib/public/core/KeyPath.swift index 6df000d8f496b..7c4c0645793af 100644 --- a/stdlib/public/core/KeyPath.swift +++ b/stdlib/public/core/KeyPath.swift @@ -1920,6 +1920,12 @@ func _setAtWritableKeyPath( keyPath: WritableKeyPath, value: __owned Value ) { + if type(of: keyPath).kind == .reference { + return _setAtReferenceWritableKeyPath(root: root, + keyPath: _unsafeUncheckedDowncast(keyPath, + to: ReferenceWritableKeyPath.self), + value: value) + } // TODO: we should be able to do this more efficiently than projecting. let (addr, owner) = keyPath._projectMutableAddress(from: &root) addr.pointee = value diff --git a/test/AutoDiff/compiler_crashers_fixed/rdar71191415-nested-differentiation-of-extension-method-optimized.swift b/test/AutoDiff/compiler_crashers_fixed/rdar71191415-nested-differentiation-of-extension-method-optimized.swift index 892943d791bcc..826a3f8f9a944 100644 --- a/test/AutoDiff/compiler_crashers_fixed/rdar71191415-nested-differentiation-of-extension-method-optimized.swift +++ b/test/AutoDiff/compiler_crashers_fixed/rdar71191415-nested-differentiation-of-extension-method-optimized.swift @@ -5,29 +5,29 @@ import _Differentiation protocol P { - @differentiable - func req(_ input: Float) -> Float + @differentiable + func req(_ input: Float) -> Float } extension P { - @differentiable - func foo(_ input: Float) -> Float { - return req(input) - } + @differentiable + func foo(_ input: Float) -> Float { + return req(input) + } } struct Dummy: P { - @differentiable - func req(_ input: Float) -> Float { - input - } + @differentiable + func req(_ input: Float) -> Float { + input + } } struct DummyComposition: P { - var layer = Dummy() + var layer = Dummy() - @differentiable - func req(_ input: Float) -> Float { - layer.foo(input) - } + @differentiable + func req(_ input: Float) -> Float { + layer.foo(input) + } } diff --git a/test/AutoDiff/compiler_crashers_fixed/rdar71319547-generated-decls-shall-not-be-resilient.swift b/test/AutoDiff/compiler_crashers_fixed/rdar71319547-generated-decls-shall-not-be-resilient.swift new file mode 100644 index 0000000000000..2b4eb4be88a2f --- /dev/null +++ b/test/AutoDiff/compiler_crashers_fixed/rdar71319547-generated-decls-shall-not-be-resilient.swift @@ -0,0 +1,39 @@ +// RUN: %target-build-swift -enable-library-evolution %s +// RUN: %target-build-swift -O -enable-library-evolution %s +// RUN: %target-build-swift -O -g -enable-library-evolution %s + +// rdar://71319547 + +import _Differentiation + + +// Assertion failed: (mainPullbackStruct->getType() == pbStructLoweredType), function run, file swift/lib/SILOptimizer/Differentiation/PullbackCloner.cpp, line 1899. +// Stack dump: +// 1. Swift version 5.3-dev (LLVM 618cb952e0f199a, Swift d74c261f098665c) +// 2. While evaluating request ExecuteSILPipelineRequest(Run pipelines { Mandatory Diagnostic Passes + Enabling Optimization Passes } on SIL for main.main) +// 3. While running pass #17 SILModuleTransform "Differentiation". +// 4. While processing // differentiability witness for foo(_:) +// sil_differentiability_witness [serialized] [parameters 0] [results 0] @$s4main3fooyS2fF : $@convention(thin) (Float) -> Float { +// } +@differentiable(wrt: x) +public func i_have_a_pullback_struct(_ x: Float) -> Float { + return x +} + + +// Assertion failed: (v->getType().isObject()), function operator(), file swift/lib/SIL/Utils/ValueUtils.cpp, line 22. +// Stack dump: +// 1. Swift version 5.3-dev (LLVM 618cb952e0f199a, Swift d74c261f098665c) +// 2. While evaluating request ExecuteSILPipelineRequest(Run pipelines { Mandatory Diagnostic Passes + Enabling Optimization Passes } on SIL for main.main) +// 3. While running pass #24 SILModuleTransform "Differentiation". +// 4. While processing // differentiability witness for i_have_a_branching_trace_enum(_:) +// sil_differentiability_witness [serialized] [parameters 0] [results 0] @$s4main29i_have_a_branching_trace_enumyS2fF : $@convention(thin) (Float) -> Float { +// } +@differentiable(wrt: x) +public func i_have_a_branching_trace_enum(_ x: Float) -> Float { + if true { + return x + } else { + return x.squareRoot() + } +} diff --git a/test/Frontend/skip-function-bodies.swift b/test/Frontend/skip-function-bodies.swift index c8b1ef0ae7e78..b2711efea575c 100644 --- a/test/Frontend/skip-function-bodies.swift +++ b/test/Frontend/skip-function-bodies.swift @@ -3,6 +3,8 @@ // Check -emit-ir and -c are invalid when skipping function bodies // RUN: not %target-swift-frontend -emit-ir %s -experimental-skip-non-inlinable-function-bodies %s 2>&1 | %FileCheck %s --check-prefix ERROR // RUN: not %target-swift-frontend -c %s -experimental-skip-non-inlinable-function-bodies %s 2>&1 | %FileCheck %s --check-prefix ERROR +// RUN: not %target-swift-frontend -emit-ir %s -experimental-skip-non-inlinable-function-bodies-without-types %s 2>&1 | %FileCheck %s --check-prefix ERROR +// RUN: not %target-swift-frontend -c %s -experimental-skip-non-inlinable-function-bodies-without-types %s 2>&1 | %FileCheck %s --check-prefix ERROR // RUN: not %target-swift-frontend -emit-ir %s -experimental-skip-all-function-bodies %s 2>&1 | %FileCheck %s --check-prefix ERROR // RUN: not %target-swift-frontend -c %s -experimental-skip-all-function-bodies %s 2>&1 | %FileCheck %s --check-prefix ERROR // ERROR: -experimental-skip-*-function-bodies do not support emitting IR @@ -13,21 +15,23 @@ // WARNING: module 'SwiftOnoneSupport' cannot be built with -experimental-skip-non-inlinable-function-bodies; this option has been automatically disabled // Check skipped bodies are neither typechecked nor SILgen'd -// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-non-inlinable-function-bodies -debug-forbid-typecheck-prefix INLINENOTYPECHECK %s -o %t/Skip.noninlinable.sil -// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -O -experimental-skip-all-function-bodies -debug-forbid-typecheck-prefix ALLNOTYPECHECK %s -o %t/Skip.all.sil -// %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY < %t/Skip.noninlinable.sil -// %FileCheck %s --check-prefixes CHECK,CHECK-ALL-ONLY < %t/Skip.all.sil +// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-non-inlinable-function-bodies -debug-forbid-typecheck-prefix NEVERTYPECHECK -debug-forbid-typecheck-prefix INLINENOTYPECHECK %s -o %t/Skip.noninlinable.sil +// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-non-inlinable-function-bodies-without-types -debug-forbid-typecheck-prefix NEVERTYPECHECK -debug-forbid-typecheck-prefix TYPESNOTYPECHECK %s -o %t/Skip.withouttypes.sil +// RUN: %target-swift-frontend -emit-sil -emit-sorted-sil -experimental-skip-all-function-bodies -debug-forbid-typecheck-prefix NEVERTYPECHECK -debug-forbid-typecheck-prefix ALLNOTYPECHECK %s -o %t/Skip.all.sil +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY,CHECK-NONINLINE-SIL < %t/Skip.noninlinable.sil +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-WITHOUTTYPES-ONLY,CHECK-NONINLINE-SIL < %t/Skip.withouttypes.sil +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-ALL-ONLY < %t/Skip.all.sil // Emit the module interface and check it against the same set of strings. // RUN: %target-swift-frontend -typecheck %s -enable-library-evolution -emit-module-interface-path %t/Skip.noninlinable.swiftinterface -experimental-skip-non-inlinable-function-bodies -// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY < %t/Skip.noninlinable.swiftinterface +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY,CHECK-NONINLINE-TEXTUAL < %t/Skip.noninlinable.swiftinterface // RUN: %target-swift-frontend -typecheck %s -enable-library-evolution -emit-module-interface-path %t/Skip.all.swiftinterface -experimental-skip-all-function-bodies -// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-ALL-ONLY < %t/Skip.all.swiftinterface +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-ALL-ONLY,CHECK-NONINLINE-TEXTUAL < %t/Skip.all.swiftinterface // Emit the module interface normally, it should be the same as when skipping // non-inlinable. // RUN: %target-swift-frontend -typecheck %s -enable-library-evolution -emit-module-interface-path %t/Skip.swiftinterface -// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY < %t/Skip.swiftinterface +// RUN: %FileCheck %s --check-prefixes CHECK,CHECK-NONINLINE-ONLY,CHECK-NONINLINE-TEXTUAL < %t/Skip.swiftinterface // RUN: diff -u %t/Skip.noninlinable.swiftinterface %t/Skip.swiftinterface @usableFromInline @@ -58,16 +62,14 @@ public class InlinableDeinit { @_fixed_layout public class InlineAlwaysDeinit { @inline(__always) deinit { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) deinit body") // CHECK-NOT: "@inline(__always) deinit body" } } public class NormalDeinit { deinit { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK = 1 + let NEVERTYPECHECK_local = 1 _blackHole("regular deinit body") // CHECK-NOT: "regular deinit body" } } @@ -80,52 +82,44 @@ public class NormalDeinit { } @inline(__always) public func inlineAlwaysFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) func body") // CHECK-NOT: "@inline(__always) func body" } func internalFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("internal func body") // CHECK-NOT: "internal func body" } public func publicFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public func body") // CHECK-NOT: "public func body" } private func privateFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("private func body") // CHECK-NOT: "private func body" } @inline(__always) public func inlineAlwaysLocalTypeFunc() { - let ALLNOTYPECHECK_outerLocal = 1 - let INLINENOTYPECHECK_outerLocal = 1 + let NEVERTYPECHECK_outerLocal = 1 typealias InlineAlwaysLocalType = Int _blackHole("@inline(__always) func body with local type") // CHECK-NOT: "@inline(__always) func body with local type" func takesInlineAlwaysLocalType(_ x: InlineAlwaysLocalType) { - let ALLNOTYPECHECK_innerLocal = 1 - let INLINENOTYPECHECK_innerLocal = 1 + let NEVERTYPECHECK_innerLocal = 1 _blackHole("nested func body inside @inline(__always) func body taking local type") // CHECK-NOT: "nested func body inside @inline(__always) func body taking local type" } takesInlineAlwaysLocalType(0) } public func publicLocalTypeFunc() { - let ALLNOTYPECHECK_outerLocal = 1 - let INLINENOTYPECHECK_outerLocal = 1 + let NEVERTYPECHECK_outerLocal = 1 typealias LocalType = Int _blackHole("public func body with local type") // CHECK-NOT: "public func body with local type" func takesLocalType(_ x: LocalType) { - let ALLNOTYPECHECK_innerLocal = 1 - let INLINENOTYPECHECK_innerLocal = 1 + let NEVERTYPECHECK_innerLocal = 1 _blackHole("nested func body inside public func body taking local type") // CHECK-NOT: "nested func body inside public func body taking local type" } takesLocalType(0) @@ -188,6 +182,57 @@ public func inlinableNestedLocalTypeFunc() { nestedFunc() } +public func funcWithEnum() { + let INLINENOTYPECHECK_local = 1 + let ALLNOTYPECHECK_local = 1 + _blackHole("func with enum body") + // CHECK-WITHOUTTYPES-ONLY: "func with enum body" + // CHECK-NONINLINE-ONLY-NOT: "func with enum body" + // CHECK-ALL-ONLY-NOT: "func with enum body" + enum E {} +} + +public func funcWithClass() { + let INLINENOTYPECHECK_local = 1 + let ALLNOTYPECHECK_local = 1 + _blackHole("func with class body") + // CHECK-WITHOUTTYPES-ONLY: "func with class body" + // CHECK-NONINLINE-ONLY-NOT: "func with class body" + // CHECK-ALL-ONLY-NOT: "func with class body" + class C {} +} + +public func funcWithStruct() { + let INLINENOTYPECHECK_local = 1 + let ALLNOTYPECHECK_local = 1 + _blackHole("func with struct body") + // CHECK-WITHOUTTYPES-ONLY: "func with struct body" + // CHECK-NONINLINE-ONLY-NOT: "func with struct body" + // CHECK-ALL-ONLY-NOT: "func with struct body" + struct S {} +} + +public func funcWithNestedFuncs() { + let INLINENOTYPECHECK_local = 1 + let ALLNOTYPECHECK_local = 1 + _blackHole("func with nested funcs body") + // CHECK-WITHOUTTYPES-ONLY: "func with nested funcs body" + // CHECK-NONINLINE-ONLY-NOT: "func with nested funcs body" + // CHECK-ALL-ONLY-NOT: "func with nested funcs body" + + func bar() { + _blackHole("nested func body") + // CHECK-WITHOUTTYPES-ONLY: "nested func body" + // FIXME: We could skip this nested function. + } + + func foo() { + _blackHole("nested func with type body") + // CHECK-WITHOUTTYPES-ONLY: "nested func with type body" + struct S {} + } +} + public struct Struct { @inlinable public var inlinableVar: Int { let ALLNOTYPECHECK_local = 1 @@ -206,8 +251,7 @@ public struct Struct { @inline(__always) public func inlineAlwaysFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) method body") // CHECK-NOT: "@inline(__always) method body" } @@ -236,13 +280,6 @@ public struct Struct { } } - public var didSetVar: Int = 1 { - didSet { - // Body typechecked regardless - _blackHole("didSet body") // CHECK-NOT: "didSet body" - } - } - @_transparent public func transparentFunc() { let ALLNOTYPECHECK_local = 1 @@ -252,20 +289,17 @@ public struct Struct { } func internalFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("internal method body") // CHECK-NOT: "internal method body" } public func publicFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public method body") // CHECK-NOT: "public method body" } private func privateFunc() { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("private method body") // CHECK-NOT: "private method body" } @@ -276,6 +310,14 @@ public struct Struct { // CHECK-ALL-ONLY-NOT: "@_transparent init body" } + public var didSetVar: Int = 1 { + didSet { + // Body typechecked regardless + _blackHole("didSet body") // CHECK-NONINLINE-SIL: "didSet body" + // CHECK-NONINLINE-TEXTUAL-NOT: "didSet body" + } + } + @inlinable public init() { let ALLNOTYPECHECK_local = 1 _blackHole("@inlinable init body") @@ -284,26 +326,22 @@ public struct Struct { } @inline(__always) public init(a: Int) { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) init body") // CHECK-NOT: "@inline(__always) init body" } init(c: Int) { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("internal init body") // CHECK-NOT: "internal init body" } public init(d: Int) { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public init body") // CHECK-NOT: "public init body" } private init(e: Int) { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("private init body") // CHECK-NOT: "private init body" } @@ -316,8 +354,7 @@ public struct Struct { } @inline(__always) public subscript(a: Int, b: Int) -> Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) subscript getter") // CHECK-NOT: "@inline(__always) subscript getter" return 0 } @@ -333,36 +370,31 @@ public struct Struct { } subscript(a: Int, b: Int, c: Int, d: Int) -> Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("internal subscript getter") // CHECK-NOT: "internal subscript getter" return 0 } public subscript(a: Int, b: Int, c: Int, d: Int, e: Int) -> Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public subscript getter") // CHECK-NOT: "public subscript getter" return 0 } private subscript(e: Int) -> Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("private subscript getter") // CHECK-NOT: "private subscript getter" return 0 } @inline(__always) public var inlineAlwaysVar: Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) getter body") // CHECK-NOT: "@inline(__always) getter body" return 0 } public var publicVar: Int { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("public getter body") // CHECK-NOT: "public getter body" return 0 } @@ -370,8 +402,7 @@ public struct Struct { public var inlineAlwaysSetter: Int { get { 0 } @inline(__always) set { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) setter body") // CHECK-NOT: "@inline(__always) setter body" } } @@ -379,8 +410,7 @@ public struct Struct { public var regularSetter: Int { get { 0 } set { - let ALLNOTYPECHECK_local = 1 - let INLINENOTYPECHECK_local = 1 + let NEVERTYPECHECK_local = 1 _blackHole("@inline(__always) setter body") // CHECK-NOT: "regular setter body" } } diff --git a/test/IRGen/async/Inputs/class-1instance-void_to_void.swift b/test/IRGen/async/Inputs/class-1instance-void_to_void.swift new file mode 100644 index 0000000000000..c1316fcd7955a --- /dev/null +++ b/test/IRGen/async/Inputs/class-1instance-void_to_void.swift @@ -0,0 +1,8 @@ +import _Concurrency + +public class Clazz { + public init() {} + public func classinstanceVoidToVoid() async { + print(self) + } +} diff --git a/test/IRGen/async/builtins.sil b/test/IRGen/async/builtins.sil index 851845c10825e..6a6b82784a048 100644 --- a/test/IRGen/async/builtins.sil +++ b/test/IRGen/async/builtins.sil @@ -34,7 +34,7 @@ sil hidden [ossa] @launch_task : $@convention(method) @async (Int, Optional, %2: @guaranteed $@async @callee_guaranteed () -> (@error Error)): %3 = begin_borrow %1 : $Optional // CHECK: call %swift.refcounted* @swift_retain(%swift.refcounted* returned [[FN_CONTEXT:%.*]]) - // CHECK: [[NEW_TASK_AND_CONTEXT:%.*]] = call swiftcc %swift.async_task_and_context @swift_task_create_f + // CHECK: [[NEW_TASK_AND_CONTEXT:%.*]] = call swiftcc %swift.async_task_and_context @swift_task_create( // CHECK-NEXT: [[NEW_CONTEXT_RAW:%.*]] = extractvalue %swift.async_task_and_context [[NEW_TASK_AND_CONTEXT]], 1 // CHECK-NEXT: [[NEW_CONTEXT:%.*]] = bitcast %swift.context* [[NEW_CONTEXT_RAW]] to // CHECK-NEXT: [[CONTEXT_INFO_LOC:%.*]] = getelementptr inbounds <{{.*}}>* [[NEW_CONTEXT]] @@ -44,4 +44,4 @@ bb0(%0 : $Int, %1: @unowned $Optional, %2: @guaranteed $@a destroy_value %20 : $(Builtin.NativeObject, Builtin.RawPointer) %21 = tuple () return %21 : $() -} \ No newline at end of file +} diff --git a/test/IRGen/async/partial_apply.sil b/test/IRGen/async/partial_apply.sil index dead2ef916796..27ad0e1f41cb2 100644 --- a/test/IRGen/async/partial_apply.sil +++ b/test/IRGen/async/partial_apply.sil @@ -406,7 +406,7 @@ bb0(%x : $*SwiftClassPair): sil public_external @use_closure2 : $@async @convention(thin) (@noescape @async @callee_guaranteed (Int) -> Int) -> () // CHECK-LABEL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @partial_apply_stack_callee_guaranteed_indirect_guaranteed_class_pair_param(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]+}} { -// CHECK-LABEL: define internal swiftcc void @"$s45indirect_guaranteed_captured_class_pair_paramTA.67"(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}, %swift.refcounted* swiftself {{%[0-9]+}}) {{#[0-9]+}} { +// CHECK-LABEL: define internal swiftcc void @"$s45indirect_guaranteed_captured_class_pair_paramTA.70"(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}, %swift.refcounted* swiftself {{%[0-9]+}}) {{#[0-9]+}} { sil @partial_apply_stack_callee_guaranteed_indirect_guaranteed_class_pair_param : $@async @convention(thin) (@in_guaranteed SwiftClassPair) -> () { bb0(%x : $*SwiftClassPair): diff --git a/test/IRGen/async/partial_apply_forwarder.sil b/test/IRGen/async/partial_apply_forwarder.sil index 25fb5f64ba159..ebc995edd7d63 100644 --- a/test/IRGen/async/partial_apply_forwarder.sil +++ b/test/IRGen/async/partial_apply_forwarder.sil @@ -2,6 +2,7 @@ // RUN: %target-swift-frontend -enable-experimental-concurrency -disable-objc-interop -primary-file %s -emit-ir | %FileCheck %s -DINT=i%target-ptrsize --check-prefixes=CHECK,CHECK-native // REQUIRES: concurrency +// UNSUPPORTED: CPU=arm64e sil_stage canonical diff --git a/test/IRGen/async/run-call-classinstance-int64-to-void.sil b/test/IRGen/async/run-call-classinstance-int64-to-void.sil index bf7be5edc6803..e4b086c6b2001 100644 --- a/test/IRGen/async/run-call-classinstance-int64-to-void.sil +++ b/test/IRGen/async/run-call-classinstance-int64-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -29,6 +30,7 @@ class S { init() } +// CHECK-LL: @classinstanceSInt64ToVoidAD = // CHECK-LL: define hidden swiftcc void @classinstanceSInt64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]+}} { sil hidden @classinstanceSInt64ToVoid : $@async @convention(method) (Int64, @guaranteed S) -> () { bb0(%int : $Int64, %instance : $S): @@ -85,9 +87,10 @@ sil_vtable S { #S.deinit!deallocator: @S_deallocating_deinit } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +sil @test_case : $@convention(thin) @async () -> () { %s_type = metatype $@thick S.Type %allocating_init = function_ref @S_allocating_init : $@convention(method) (@thick S.Type) -> @owned S %instance = apply %allocating_init(%s_type) : $@convention(method) (@thick S.Type) -> @owned S @@ -98,10 +101,19 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> %result = apply %classinstanceSInt64ToVoid(%int, %instance) : $@convention(method) @async (Int64, @guaranteed S) -> () // CHECK: main.S strong_release %instance : $S - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-classinstance-void-to-void.sil b/test/IRGen/async/run-call-classinstance-void-to-void.sil index 8b01da80daabc..d9c729cc6d7d1 100644 --- a/test/IRGen/async/run-call-classinstance-void-to-void.sil +++ b/test/IRGen/async/run-call-classinstance-void-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -29,6 +30,7 @@ class S { init() } +// CHECK-LL: @classinstanceSVoidToVoidAD = // CHECK-LL: define hidden swiftcc void @classinstanceSVoidToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @classinstanceSVoidToVoid : $@async @convention(method) (@guaranteed S) -> () { bb0(%instance : $S): @@ -85,9 +87,10 @@ sil_vtable S { #S.deinit!deallocator: @S_deallocating_deinit } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +sil @test_case : $@convention(thin) @async () -> () { %s_type = metatype $@thick S.Type %allocating_init = function_ref @S_allocating_init : $@convention(method) (@thick S.Type) -> @owned S %instance = apply %allocating_init(%s_type) : $@convention(method) (@thick S.Type) -> @owned S @@ -96,9 +99,19 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> %result = apply %classinstanceSVoidToVoid(%instance) : $@convention(method) @async (@guaranteed S) -> () // CHECK: main.S strong_release %instance : $S - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-existential-to-void.sil b/test/IRGen/async/run-call-existential-to-void.sil index c6e0dea4f7080..8811cbeb1ddfc 100644 --- a/test/IRGen/async/run-call-existential-to-void.sil +++ b/test/IRGen/async/run-call-existential-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -37,6 +38,7 @@ bb0(%int : $Int64, %S_type : $@thin S.Type): return %instance : $S } +// CHECK-LL: @existentialToVoidAD = // CHECK-LL: define hidden swiftcc void @existentialToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @existentialToVoid : $@async @convention(thin) (@in_guaranteed P) -> () { bb0(%existential : $*P): @@ -68,13 +70,26 @@ bb0: return %result : $() } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %call = function_ref @call : $@async @convention(thin) () -> () // CHECK: 7384783 %result = apply %call() : $@async @convention(thin) () -> () - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-generic-to-generic.sil b/test/IRGen/async/run-call-generic-to-generic.sil index 610e1187beda4..a1909edcefea0 100644 --- a/test/IRGen/async/run-call-generic-to-generic.sil +++ b/test/IRGen/async/run-call-generic-to-generic.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -19,6 +20,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @genericToGenericAD = // CHECK-LL: define hidden swiftcc void @genericToGeneric(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @genericToGeneric : $@async @convention(thin) (@in_guaranteed T) -> @out T { bb0(%out : $*T, %in : $*T): @@ -27,9 +29,10 @@ bb0(%out : $*T, %in : $*T): return %result : $() } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +sil @test_case : $@convention(thin) @async () -> () { %int_literal = integer_literal $Builtin.Int64, 42 %int = struct $Int64 (%int_literal : $Builtin.Int64) %int_addr = alloc_stack $Int64 @@ -46,11 +49,19 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> dealloc_stack %out_addr : $*Int64 dealloc_stack %int_addr : $*Int64 - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () - + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-generic-to-void.sil b/test/IRGen/async/run-call-generic-to-void.sil index fd922c6d49ec6..973df57826ffe 100644 --- a/test/IRGen/async/run-call-generic-to-void.sil +++ b/test/IRGen/async/run-call-generic-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -19,6 +20,7 @@ import _Concurrency sil public_external @printGeneric : $@convention(thin) (@in_guaranteed T) -> () +// CHECK-LL: @genericToVoidAD = // CHECK-LL: define hidden swiftcc void @genericToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @genericToVoid : $@async @convention(thin) (@in_guaranteed T) -> () { bb0(%instance : $*T): @@ -26,10 +28,10 @@ bb0(%instance : $*T): %result = apply %print_generic(%instance) : $@convention(thin) (@in_guaranteed T) -> () // CHECK: 922337203685477580 return %result : $() } +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): - +sil @test_case : $@convention(thin) @async () -> () { %int_literal = integer_literal $Builtin.Int64, 922337203685477580 %int = struct $Int64 (%int_literal : $Builtin.Int64) %int_addr = alloc_stack $Int64 @@ -38,10 +40,19 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> %result = apply %genericToVoid(%int_addr) : $@async @convention(thin) (@in_guaranteed T) -> () dealloc_stack %int_addr : $*Int64 - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil b/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil index 27f2a88b4fe53..3fd0c1427f371 100644 --- a/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil +++ b/test/IRGen/async/run-call-genericEquatable-x2-to-bool.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -19,6 +20,7 @@ import _Concurrency sil public_external @printBool : $@convention(thin) (Bool) -> () +// CHECK-LL: @genericEquatableAndGenericEquatableToBoolAD = // CHECK-LL: define hidden swiftcc void @genericEquatableAndGenericEquatableToBool(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @genericEquatableAndGenericEquatableToBool : $@async @convention(thin) (@in_guaranteed T, @in_guaranteed T) -> Bool { bb0(%0 : $*T, %1 : $*T): @@ -28,9 +30,10 @@ bb0(%0 : $*T, %1 : $*T): return %6 : $Bool } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +sil @test_case : $@convention(thin) @async () -> () { %int1_literal = integer_literal $Builtin.Int64, 42 %int1 = struct $Int64 (%int1_literal : $Builtin.Int64) %int1_addr = alloc_stack $Int64 @@ -54,8 +57,19 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> dealloc_stack %int2_addr : $*Int64 dealloc_stack %int1_addr : $*Int64 - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-int64-and-int64-to-void.sil b/test/IRGen/async/run-call-int64-and-int64-to-void.sil index a041807ca2d82..31ee136974a8b 100644 --- a/test/IRGen/async/run-call-int64-and-int64-to-void.sil +++ b/test/IRGen/async/run-call-int64-and-int64-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -19,6 +20,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @int64AndInt64ToVoidAD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @int64AndInt64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @int64AndInt64ToVoid : $@async @convention(thin) (Int64, Int64) -> () { entry(%int1: $Int64, %int2: $Int64): @@ -28,9 +30,10 @@ entry(%int1: $Int64, %int2: $Int64): return %result2 : $() } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +sil @test_case : $@convention(thin) @async () -> () { %int_literal1 = integer_literal $Builtin.Int64, 42 %int1 = struct $Int64 (%int_literal1 : $Builtin.Int64) %int_literal2 = integer_literal $Builtin.Int64, 13 @@ -39,8 +42,19 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> %int64AndInt64ToVoid = function_ref @int64AndInt64ToVoid : $@async @convention(thin) (Int64, Int64) -> () %result = apply %int64AndInt64ToVoid(%int1, %int2) : $@async @convention(thin) (Int64, Int64) -> () - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-int64-to-void.sil b/test/IRGen/async/run-call-int64-to-void.sil index 91d2f0781ce74..475f1dc09853f 100644 --- a/test/IRGen/async/run-call-int64-to-void.sil +++ b/test/IRGen/async/run-call-int64-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -19,6 +20,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @int64ToVoidAD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @int64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @int64ToVoid : $@async @convention(thin) (Int64) -> () { entry(%int: $Int64): @@ -27,15 +29,29 @@ entry(%int: $Int64): return %result : $() } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +sil @test_case : $@convention(thin) @async () -> () { %int_literal = integer_literal $Builtin.Int64, 42 %int = struct $Int64 (%int_literal : $Builtin.Int64) %int64ToVoid = function_ref @int64ToVoid : $@async @convention(thin) (Int64) -> () %result = apply %int64ToVoid(%int) : $@async @convention(thin) (Int64) -> () - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil b/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil index 60a11e3722d5f..4df2bc086742d 100644 --- a/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil +++ b/test/IRGen/async/run-call-protocolextension_instance-void-to-int64.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -33,6 +34,7 @@ struct I : P { init(int: Int64) } +// CHECK-LL: @callPrintMeAD = // CHECK-LL: define hidden swiftcc void @callPrintMe(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @callPrintMe : $@async @convention(method) (@in_guaranteed Self) -> Int64 { bb0(%self : $*Self): @@ -60,8 +62,10 @@ bb0(%self_addr : $*I): return %result : $Int64 } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %i_type = metatype $@thin I.Type %i_int_literal = integer_literal $Builtin.Int64, 99 %i_int = struct $Int64 (%i_int_literal : $Builtin.Int64) @@ -74,9 +78,21 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> %printInt64 = function_ref @printInt64 : $@convention(thin) (Int64) -> () %printInt64_result = apply %printInt64(%result) : $@convention(thin) (Int64) -> () // CHECK: 99 - %out_literal = integer_literal $Builtin.Int32, 0 - %out = struct $Int32 (%out_literal : $Builtin.Int32) - return %out : $Int32 + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } sil_witness_table hidden I: P module main { diff --git a/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil b/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil index de13f3998036c..d7f73ce866460 100644 --- a/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil +++ b/test/IRGen/async/run-call-protocolwitness_instance-void-to-int64.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -29,6 +30,7 @@ struct I : P { init(int: Int64) } +// CHECK-LL: @I_printMeAD = // CHECK-LL-LABEL: define hidden swiftcc void @I_printMe(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @I_printMe : $@async @convention(method) (I) -> Int64 { bb0(%self : $I): @@ -50,8 +52,10 @@ bb0(%self_addr : $*I): return %result : $Int64 } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %i_type = metatype $@thin I.Type %i_int_literal = integer_literal $Builtin.Int64, 99 %i_int = struct $Int64 (%i_int_literal : $Builtin.Int64) @@ -64,12 +68,24 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> %printInt64 = function_ref @printInt64 : $@convention(thin) (Int64) -> () %printInt64_result = apply %printInt64(%result) : $@convention(thin) (Int64) -> () // CHECK: 99 - %out_literal = integer_literal $Builtin.Int32, 0 - %out = struct $Int32 (%out_literal : $Builtin.Int32) - return %out : $Int32 + + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } sil_witness_table hidden I: P module main { method #P.printMe: (Self) -> () async -> Int64 : @I_P_printMe } - diff --git a/test/IRGen/async/run-call-resilient-classinstance-void-to-void.sil b/test/IRGen/async/run-call-resilient-classinstance-void-to-void.sil new file mode 100644 index 0000000000000..02441131c9c3b --- /dev/null +++ b/test/IRGen/async/run-call-resilient-classinstance-void-to-void.sil @@ -0,0 +1,54 @@ +// RUN: %empty-directory(%t) +// RUN: %target-build-swift-dylib(%t/%target-library-name(PrintShims)) %S/../../Inputs/print-shims.swift -module-name PrintShims -emit-module -emit-module-path %t/PrintShims.swiftmodule +// RUN: %target-codesign %t/%target-library-name(PrintShims) +// RUN: %target-build-swift-dylib(%t/%target-library-name(ResilientClass)) %S/Inputs/class-1instance-void_to_void.swift -Xfrontend -enable-experimental-concurrency -module-name ResilientClass -emit-module -emit-module-path %t/ResilientClass.swiftmodule +// RUN: %target-codesign %t/%target-library-name(ResilientClass) +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -emit-ir -I %t -L %t -lPrintShim -lResilientClass | %FileCheck %s --check-prefix=CHECK-LL +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -module-name main -o %t/main -I %t -L %t -lPrintShims -lResilientClass %target-rpath(%t) +// RUN: %target-codesign %t/main +// RUN: %target-run %t/main %t/%target-library-name(PrintShims) %t/%target-library-name(ResilientClass) | %FileCheck %s + +// REQUIRES: executable_test +// REQUIRES: swift_test_mode_optimize_none +// REQUIRES: concurrency +// UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e + + +import Builtin +import Swift +import PrintShims +import _Concurrency +import ResilientClass + +sil public_external [exact_self_class] @$s14ResilientClass5ClazzCACycfC : $@convention(method) (@thick Clazz.Type) -> @owned Clazz + +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +// CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @test_case(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { +sil @test_case : $@convention(thin) @async () -> () { + %s_type = metatype $@thick Clazz.Type + %allocating_init = function_ref @$s14ResilientClass5ClazzCACycfC : $@convention(method) (@thick Clazz.Type) -> @owned Clazz + %instance = apply %allocating_init(%s_type) : $@convention(method) (@thick Clazz.Type) -> @owned Clazz + %classinstanceVoidToVoid = class_method %instance : $Clazz, #Clazz.classinstanceVoidToVoid : (Clazz) -> () async -> (), $@convention(method) @async (@guaranteed Clazz) -> () + strong_retain %instance : $Clazz + %result = apply %classinstanceVoidToVoid(%instance) : $@convention(method) @async (@guaranteed Clazz) -> () // CHECK: ResilientClass.Clazz + strong_release %instance : $Clazz + + %out = tuple () + return %out : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-struct_five_bools-to-void.sil b/test/IRGen/async/run-call-struct_five_bools-to-void.sil new file mode 100644 index 0000000000000..1ad1a3bbe4582 --- /dev/null +++ b/test/IRGen/async/run-call-struct_five_bools-to-void.sil @@ -0,0 +1,79 @@ +// RUN: %empty-directory(%t) +// RUN: %target-build-swift-dylib(%t/%target-library-name(PrintShims)) %S/../../Inputs/print-shims.swift -module-name PrintShims -emit-module -emit-module-path %t/PrintShims.swiftmodule +// RUN: %target-codesign %t/%target-library-name(PrintShims) +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -emit-ir -I %t -L %t -lPrintShim | %FileCheck %s --check-prefix=CHECK-LL +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -module-name main -o %t/main -I %t -L %t -lPrintShims %target-rpath(%t) +// RUN: %target-codesign %t/main +// RUN: %target-run %t/main %t/%target-library-name(PrintShims) | %FileCheck %s + +// REQUIRES: executable_test +// REQUIRES: swift_test_mode_optimize_none +// REQUIRES: concurrency +// UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e + + +import Builtin +import Swift +import PrintShims +import _Concurrency + +sil public_external @printGeneric : $@convention(thin) (@in_guaranteed T) -> () + +struct Pack { + public let a: Bool + public let b: Bool + public let c: Bool + public let d: Bool + public let e: Bool +} + +// CHECK-LL: @test_caseAD = + +sil @structPackToVoid : $@async @convention(thin) (Pack) -> () { +entry(%pack : $Pack): + %pack_addr = alloc_stack $Pack + store %pack to %pack_addr : $*Pack + %printGeneric = function_ref @printGeneric : $@convention(thin) (@in_guaranteed T) -> () + %printGeneric_result1 = apply %printGeneric(%pack_addr) : $@convention(thin) (@in_guaranteed T) -> () //CHECK: Pack(a: true, b: false, c: true, d: false, e: true) + dealloc_stack %pack_addr : $*Pack + + return %printGeneric_result1 : $() +} + +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +// CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @test_case(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { +sil @test_case : $@async @convention(thin) () -> () { + + %a_literal = integer_literal $Builtin.Int1, -1 + %a = struct $Bool (%a_literal : $Builtin.Int1) + %b_literal = integer_literal $Builtin.Int1, 0 + %b = struct $Bool (%b_literal : $Builtin.Int1) + %c_literal = integer_literal $Builtin.Int1, -1 + %c = struct $Bool (%c_literal : $Builtin.Int1) + %d_literal = integer_literal $Builtin.Int1, 0 + %d = struct $Bool (%d_literal : $Builtin.Int1) + %e_literal = integer_literal $Builtin.Int1, -1 + %e = struct $Bool (%a_literal : $Builtin.Int1) + + %pack = struct $Pack (%a : $Bool, %b : $Bool, %c : $Bool, %d : $Bool, %e : $Bool) + + %structPackToVoid = function_ref @structPackToVoid : $@async @convention(thin) (Pack) -> () + %result = apply %structPackToVoid(%pack) : $@async @convention(thin) (Pack) -> () + return %result : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-structinstance-int64-to-void.sil b/test/IRGen/async/run-call-structinstance-int64-to-void.sil index f252e159d2320..5449493e5ba95 100644 --- a/test/IRGen/async/run-call-structinstance-int64-to-void.sil +++ b/test/IRGen/async/run-call-structinstance-int64-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -25,6 +26,7 @@ struct S { init(storage: Int64) } +// CHECK-LL: @structinstanceSInt64ToVoidAD = // CHECK-LL: define hidden swiftcc void @structinstanceSInt64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @structinstanceSInt64ToVoid : $@async @convention(method) (Int64, S) -> () { bb0(%int : $Int64, %self : $S): @@ -47,8 +49,10 @@ bb0(%self : $S, %int : $Int64): return %out : $() } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %s_type = metatype $@thin S.Type %storage_literal = integer_literal $Builtin.Int64, 987654321 %storage = struct $Int64 (%storage_literal : $Builtin.Int64) @@ -58,9 +62,21 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer () %result = apply %structinstanceSInt64ToVoid(%int, %s) : $@async @convention(method) (Int64, S) -> () - %exitcode_literal = integer_literal $Builtin.Int32, 0 - %exitcode = struct $Int32 (%exitcode_literal : $Builtin.Int32) - return %exitcode : $Int32 + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing.sil index f0019bcd371bf..5f2cd9a42003f 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -99,6 +100,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int, @error Error) { bb0: @@ -114,15 +116,26 @@ bb0: throw %error : $Error } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %call = function_ref @call : $@async @convention(thin) () -> () // CHECK: 42 %result = apply %call() : $@async @convention(thin) () -> () - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil index 7b27aba126715..f35a2462b9a36 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-nothrow_call-sync-throw.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -98,6 +99,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int64, @error Error) { entry: @@ -137,18 +139,26 @@ bb0: return %Int64 : $Int64 } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %call = function_ref @call : $@async @convention(thin) () -> () // CHECK: 42 %result = apply %call() : $@async @convention(thin) () -> () - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () - - - + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil index f79e6156802d9..6dd180887a612 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-async-throw.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -99,6 +100,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int, @error Error) { bb0: @@ -124,17 +126,26 @@ bb0: throw %error : $Error } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %call = function_ref @call : $@async @convention(thin) () -> () // CHECK: 42 %result = apply %call() : $@async @convention(thin) () -> () - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () - - + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil index 75d84bcd93311..ac44411758f0d 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-nothrow_call-async-throw.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -99,6 +100,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int64, @error Error) { entry: @@ -138,18 +140,26 @@ bb0: throw %error : $Error } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %call = function_ref @call : $@async @convention(thin) () -> () // CHECK: 42 %result = apply %call() : $@async @convention(thin) () -> () - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () - - - + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil index 56d76d74fd348..e265b1fd75d2d 100644 --- a/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil +++ b/test/IRGen/async/run-call-void-throws-to-int-throwing_call-sync-throw.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -100,6 +101,7 @@ bb5: br bb2 } +// CHECK-LL: @voidThrowsToIntAD = // CHECK-LL: define hidden swiftcc void @voidThrowsToInt(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidThrowsToInt : $@async @convention(thin) () -> (Int, @error Error) { bb0: @@ -124,17 +126,26 @@ bb0: %result = builtin "willThrow"(%error : $Error) : $() throw %error : $Error } +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +sil @test_case : $@convention(thin) @async () -> () { %call = function_ref @call : $@async @convention(thin) () -> () // CHECK: 42 %result = apply %call() : $@async @convention(thin) () -> () - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () - + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-void-to-existential.sil b/test/IRGen/async/run-call-void-to-existential.sil index eab63c807d73f..9dc245041d479 100644 --- a/test/IRGen/async/run-call-void-to-existential.sil +++ b/test/IRGen/async/run-call-void-to-existential.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -36,6 +37,7 @@ bb0(%int : $Int64, %S_type : $@thin S.Type): return %instance : $S } +// CHECK-LL: @voidToExistentialAD = // CHECK-LL: define hidden swiftcc void @voidToExistential(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @voidToExistential : $@async @convention(thin) () -> @out P { bb0(%out : $*P): @@ -72,14 +74,26 @@ bb0: return %result : $() } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %call = function_ref @call : $@async @convention(thin) () -> () %result = apply %call() : $@async @convention(thin) () -> () // CHECK: S(int: 42) - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-void-to-int64-and-int64.sil b/test/IRGen/async/run-call-void-to-int64-and-int64.sil index 789e9c7a981c3..7c354c73208c2 100644 --- a/test/IRGen/async/run-call-void-to-int64-and-int64.sil +++ b/test/IRGen/async/run-call-void-to-int64-and-int64.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -19,6 +20,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @voidToInt64AndInt64AD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @voidToInt64AndInt64(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @voidToInt64AndInt64 : $@async @convention(thin) () -> (Int64, Int64) { %int_literal1 = integer_literal $Builtin.Int64, 42 @@ -29,9 +31,10 @@ sil @voidToInt64AndInt64 : $@async @convention(thin) () -> (Int64, Int64) { return %tuple : $(Int64, Int64) } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +sil @test_case : $@convention(thin) @async () -> () { %voidToInt64AndInt64 = function_ref @voidToInt64AndInt64 : $@async @convention(thin) () -> (Int64, Int64) %tuple = apply %voidToInt64AndInt64() : $@async @convention(thin) () -> (Int64, Int64) %int1 = tuple_extract %tuple : $(Int64, Int64), 0 @@ -41,9 +44,19 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> %result1 = apply %print(%int1) : $@convention(thin) (Int64) -> () // CHECK: 42 %result2 = apply %print(%int2) : $@convention(thin) (Int64) -> () // CHECK: 13 - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-call-void-to-int64.sil b/test/IRGen/async/run-call-void-to-int64.sil index a4bc75715b05d..fd1c4a3a67858 100644 --- a/test/IRGen/async/run-call-void-to-int64.sil +++ b/test/IRGen/async/run-call-void-to-int64.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin @@ -19,6 +20,7 @@ import _Concurrency sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @voidToInt64AD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @voidToInt64(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @voidToInt64 : $@async @convention(thin) () -> (Int64) { %int_literal = integer_literal $Builtin.Int64, 42 @@ -26,17 +28,30 @@ sil @voidToInt64 : $@async @convention(thin) () -> (Int64) { return %int : $Int64 } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () +sil @test_case : $@convention(thin) @async () -> () { %voidToInt64 = function_ref @voidToInt64 : $@async @convention(thin) () -> Int64 %int = apply %voidToInt64() : $@async @convention(thin) () -> Int64 %print = function_ref @printInt64 : $@convention(thin) (Int64) -> () %result = apply %print(%int) : $@convention(thin) (Int64) -> () // CHECK: 42 - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-call-void-to-struct_large.sil b/test/IRGen/async/run-call-void-to-struct_large.sil index cd353eecbca6e..10f1afe9ccadf 100644 --- a/test/IRGen/async/run-call-void-to-struct_large.sil +++ b/test/IRGen/async/run-call-void-to-struct_large.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -101,6 +102,7 @@ bb0(%0 : $@thin Big.Type): return %62 : $Big } +// CHECK-LL: @getBigAD = // CHECK-LL: define hidden swiftcc void @getBig(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @getBig : $@async @convention(thin) () -> Big { bb0: @@ -123,12 +125,26 @@ sil hidden @printBig : $@async @convention(thin) () -> () { return %out : $() } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %printBig = function_ref @printBig : $@async @convention(thin) () -> () %result = apply %printBig() : $@async @convention(thin) () -> () // CHECK: Big(i1: 1, i2: 2, i3: 3, i4: 4, i5: 5, i6: 6, i7: 7, i8: 8, i9: 9, i0: 0) - %2 = integer_literal $Builtin.Int32, 0 - %3 = struct $Int32 (%2 : $Builtin.Int32) - return %3 : $Int32 // id: %4 + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil b/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil index 8e022e4970136..c3a9fbfe1735c 100644 --- a/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil +++ b/test/IRGen/async/run-call_generic-protocolwitness_instance-generic-to-int64-and-generic.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -45,6 +46,7 @@ bb0(%out_addr : $*T, %in_addr : $*T, %self : $I): return %value : $Int64 } +// CHECK-LL: @I_P_printMeAD = // CHECK-LL: define internal swiftcc void @I_P_printMe(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil private [transparent] [thunk] @I_P_printMe : $@convention(witness_method: P) @async <τ_0_0> (@in_guaranteed τ_0_0, @in_guaranteed I) -> (Int64, @out τ_0_0) { bb0(%out_addr : $*τ_0_0, %in_addr : $*τ_0_0, %self_addr : $*I): @@ -61,8 +63,10 @@ bb0(%out_addr : $*U, %self_addr : $*T, %in_addr : $*U): return %result : $Int64 } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %I_type = metatype $@thin I.Type %int_literal = integer_literal $Builtin.Int64, 99 %int = struct $Int64 (%int_literal : $Builtin.Int64) @@ -87,12 +91,22 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} sil_witness_table hidden I: P module main { method #P.printMe: (Self) -> (T) async -> (Int64, T) : @I_P_printMe diff --git a/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil b/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil index eac3f6a7b8b8f..6367891014abf 100644 --- a/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil +++ b/test/IRGen/async/run-call_generic-protocolwitness_instance-void-to-int64.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -29,6 +30,9 @@ struct I : P { init(int: Int64) } +// CHECK-LL: @I_printMeAD = +// CHECK-LL: @I_P_printMeAD = + // CHECK-LL-LABEL: define hidden swiftcc void @I_printMe(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @I_printMe : $@async @convention(method) (I) -> Int64 { bb0(%self : $I): @@ -57,8 +61,10 @@ bb0(%t : $*T): return %result : $Int64 } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %i_type = metatype $@thin I.Type %i_int_literal = integer_literal $Builtin.Int64, 99 %i_int = struct $Int64 (%i_int_literal : $Builtin.Int64) @@ -71,12 +77,25 @@ bb0(%0 : $Int32, %1 : $UnsafeMutablePointer> %printInt64 = function_ref @printInt64 : $@convention(thin) (Int64) -> () %printInt64_result = apply %printInt64(%result) : $@convention(thin) (Int64) -> () // CHECK: 99 - %out_literal = integer_literal $Builtin.Int32, 0 - %out = struct $Int32 (%out_literal : $Builtin.Int32) - return %out : $Int32 + + %void = tuple() + return %void : $() } +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} + + sil_witness_table hidden I: P module main { method #P.printMe: (Self) -> () async -> Int64 : @I_P_printMe } - diff --git a/test/IRGen/async/run-convertfunction-int64-to-void.sil b/test/IRGen/async/run-convertfunction-int64-to-void.sil new file mode 100644 index 0000000000000..7dda53421140e --- /dev/null +++ b/test/IRGen/async/run-convertfunction-int64-to-void.sil @@ -0,0 +1,60 @@ +// RUN: %empty-directory(%t) +// RUN: %target-build-swift-dylib(%t/%target-library-name(PrintShims)) %S/../../Inputs/print-shims.swift -module-name PrintShims -emit-module -emit-module-path %t/PrintShims.swiftmodule +// RUN: %target-codesign %t/%target-library-name(PrintShims) +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -emit-ir -I %t -L %t -lPrintShim | %FileCheck %s --check-prefix=CHECK-LL +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -module-name main -o %t/main -I %t -L %t -lPrintShims %target-rpath(%t) +// RUN: %target-codesign %t/main +// RUN: %target-run %t/main %t/%target-library-name(PrintShims) | %FileCheck %s + +// REQUIRES: executable_test +// REQUIRES: swift_test_mode_optimize_none +// REQUIRES: concurrency +// UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e + +import Builtin +import Swift +import PrintShims +import _Concurrency + +sil public_external @printInt64 : $@convention(thin) (Int64) -> () + +// CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @int64ToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { +sil @int64ToVoid : $@async @convention(thin) (Int64) -> () { +entry(%int : $Int64): + %printInt64 = function_ref @printInt64 : $@convention(thin) (Int64) -> () + %result = apply %printInt64(%int) : $@convention(thin) (Int64) -> () // CHECK: 9999 + return %result : $() +} +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { + %int64ToVoid = function_ref @int64ToVoid : $@async @convention(thin) (Int64) -> () + %int64ToVoidThick = thin_to_thick_function %int64ToVoid : $@convention(thin) @async (Int64) -> () to $@async @callee_guaranteed (Int64) -> () + %int64ThrowsToVoid = convert_function %int64ToVoidThick : $@async @callee_guaranteed (Int64) -> () to $@async @callee_guaranteed (Int64) -> @error Error + %int_literal = integer_literal $Builtin.Int64, 9999 + %int = struct $Int64 (%int_literal : $Builtin.Int64) + try_apply %int64ThrowsToVoid(%int) : $@async @callee_guaranteed (Int64) -> @error Error, normal success, error failure + +success(%value : $()): + %result = tuple() + return %result : $() + +failure(%error : $Error): + %end = builtin "errorInMain"(%error : $Error) : $() + unreachable +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-partialapply-capture-class-to-void.sil b/test/IRGen/async/run-partialapply-capture-class-to-void.sil index 11e0111f497b8..2fe81ff70bff5 100644 --- a/test/IRGen/async/run-partialapply-capture-class-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-class-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -57,6 +58,7 @@ sil_vtable S { #S.deinit!deallocator: @S_deallocating_deinit } +// CHECK-LL: @classinstanceSToVoidAD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @classinstanceSToVoid(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @classinstanceSToVoid : $@async @convention(thin) (@owned S) -> () { entry(%c : $S): @@ -75,8 +77,10 @@ entry(%instance : $S): return %partiallyApplied : $@async @callee_owned () -> () } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %s_type = metatype $@thick S.Type %allocating_init = function_ref @S_allocating_init : $@convention(method) (@thick S.Type) -> @owned S %instance = apply %allocating_init(%s_type) : $@convention(method) (@thick S.Type) -> @owned S @@ -88,7 +92,19 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-partialapply-capture-generic_conformer-and-generic-to-void.sil b/test/IRGen/async/run-partialapply-capture-generic_conformer-and-generic-to-void.sil index 51f13c42d8711..73a23172b029e 100644 --- a/test/IRGen/async/run-partialapply-capture-generic_conformer-and-generic-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-generic_conformer-and-generic-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -63,10 +64,10 @@ bb0(%0 : $*S): return %2 : $@async @callee_owned (@in O) -> () } +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () - -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +sil @test_case : $@convention(thin) @async () -> () { %observableImpl = alloc_ref $ObservableImpl strong_retain %observableImpl : $ObservableImpl %observableImpl_addr = alloc_stack $ObservableImpl @@ -82,7 +83,19 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil b/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil index 9ff9c9815838e..2e9fe56168f8c 100644 --- a/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil +++ b/test/IRGen/async/run-partialapply-capture-inout-generic-and-in-generic-to-generic.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -19,6 +20,7 @@ import _Concurrency sil public_external @printGeneric : $@convention(thin) (@in_guaranteed T) -> () sil public_external @printInt64 : $@convention(thin) (Int64) -> () +// CHECK-LL: @inGenericAndInoutGenericToGenericAD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @inGenericAndInoutGenericToGeneric(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { // CHECK-LL: define internal swiftcc void @"$s017inGenericAndInoutb2ToB0TA"(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}, %swift.refcounted* swiftself {{%[0-9]*}}) {{#[0-9]*}} { sil @inGenericAndInoutGenericToGeneric : $@async @convention(thin) (@in T, @inout T) -> @out T { @@ -38,8 +40,10 @@ entry(%a : $*T): return %p : $@async @callee_owned (@in T) -> @out T } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %in_literal = integer_literal $Builtin.Int64, 876 %in = struct $Int64 (%in_literal : $Builtin.Int64) %inout_literal = integer_literal $Builtin.Int64, 678 @@ -53,7 +57,7 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer (@inout T) -> @async @callee_owned (@in T) -> @out T %partiallyApplied = apply %partial_apply_open_generic_capture(%inout_addr) : $@async @convention(thin) (@inout T) -> @async @callee_owned (@in T) -> @out T - %void = apply %partiallyApplied(%result_addr, %in_addr) : $@async @callee_owned (@in Int64) -> @out Int64 + %ignore = apply %partiallyApplied(%result_addr, %in_addr) : $@async @callee_owned (@in Int64) -> @out Int64 %result = load %result_addr : $*Int64 %printInt64 = function_ref @printInt64 : $@convention(thin) (Int64) -> () @@ -63,7 +67,19 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil b/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil index edf3743511a64..d796816bfcac0 100644 --- a/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil +++ b/test/IRGen/async/run-partialapply-capture-int64-int64-to-int64.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -36,15 +37,7 @@ bb0: return %result : $() } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): - %createAndInvokeClosure = function_ref @createAndInvokeClosure : $@async @convention(thin) () -> () - %createAndInvokeClosure_result = apply %createAndInvokeClosure() : $@async @convention(thin) () -> () - %out_literal = integer_literal $Builtin.Int32, 0 - %out = struct $Int32 (%out_literal : $Builtin.Int32) - return %out : $Int32 -} - +// CHECK-LL: @closureAD = // CHECK-LL: define internal swiftcc void @closure(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} // CHECK-LL: define internal swiftcc void @"$s7closureTA"(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}, %swift.refcounted* swiftself {{%[0-9]*}}) {{#[0-9]*}} sil hidden @createPartialApply : $@async @convention(thin) (Int64) -> @owned @async @callee_guaranteed (Int64) -> Int64 { @@ -69,4 +62,26 @@ bb0(%one : $Int64, %two : $Int64): %sum = struct $Int64 (%sum_builtin : $Builtin.Int64) return %sum : $Int64 } +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { + %createAndInvokeClosure = function_ref @createAndInvokeClosure : $@async @convention(thin) () -> () + %createAndInvokeClosure_result = apply %createAndInvokeClosure() : $@async @convention(thin) () -> () + + %void = tuple() + return %void : $() +} +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/IRGen/async/run-partialapply-capture-int64-to-generic.sil b/test/IRGen/async/run-partialapply-capture-int64-to-generic.sil index a796166a6f4ce..5ebd94cf6a372 100644 --- a/test/IRGen/async/run-partialapply-capture-int64-to-generic.sil +++ b/test/IRGen/async/run-partialapply-capture-int64-to-generic.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -37,8 +38,10 @@ entry(%out_t : $*T, %x : $Int32, %in_t : $*T): return %result : $() } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %callee = function_ref @callee : $@async @convention(thin) (Int32, @in_guaranteed T) -> @out T %first_literal = integer_literal $Builtin.Int32, 1 %first = struct $Int32 (%first_literal : $Builtin.Int32) @@ -49,7 +52,7 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer(%second, %callee1) : $@async @convention(thin) (Int32, @owned @async @callee_owned (Int32) -> @out T) -> @async @callee_owned () -> @out T - + %result_addr = alloc_stack $Int32 %result = apply %callee2(%result_addr) : $@async @callee_owned () -> @out Int32 @@ -59,7 +62,19 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-partialapply-capture-struct_classinstance_classinstance-and-int64-to-int64.sil b/test/IRGen/async/run-partialapply-capture-struct_classinstance_classinstance-and-int64-to-int64.sil index 8062e142fcff3..1cfd096f7ce11 100644 --- a/test/IRGen/async/run-partialapply-capture-struct_classinstance_classinstance-and-int64-to-int64.sil +++ b/test/IRGen/async/run-partialapply-capture-struct_classinstance_classinstance-and-int64-to-int64.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -61,6 +62,7 @@ sil_vtable C { struct S { var x: C, y: C } +// CHECK-LL: @structClassInstanceClassInstanceAndInt64ToInt64AD = // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @structClassInstanceClassInstanceAndInt64ToInt64(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { // CHECK-LL: define internal swiftcc void @"$s019structClassInstancebc10AndInt64ToE0TA"(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}, %swift.refcounted* swiftself {{%[0-9]*}}) {{#[0-9]*}} { sil @structClassInstanceClassInstanceAndInt64ToInt64 : $@async @convention(thin) (Int64, @guaranteed S) -> Int64 { @@ -82,8 +84,10 @@ bb0(%x : $S): return %p : $@async @callee_owned (Int64) -> Int64 } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %s_type = metatype $@thick C.Type %allocating_init = function_ref @S_allocating_init : $@convention(method) (@thick C.Type) -> @owned C %instance1 = apply %allocating_init(%s_type) : $@convention(method) (@thick C.Type) -> @owned C @@ -100,7 +104,19 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer () %printInt64_result = apply %printInt64(%result) : $@convention(thin) (Int64) -> () // CHECK: 9999 - %out_literal = integer_literal $Builtin.Int32, 0 - %out = struct $Int32 (%out_literal : $Builtin.Int32) - return %out : $Int32 + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-partialapply-capture-structgeneric_classinstance_to_struct_and_error.sil b/test/IRGen/async/run-partialapply-capture-structgeneric_classinstance_to_struct_and_error.sil index 5b430b6746471..c93ef5dbdffea 100644 --- a/test/IRGen/async/run-partialapply-capture-structgeneric_classinstance_to_struct_and_error.sil +++ b/test/IRGen/async/run-partialapply-capture-structgeneric_classinstance_to_struct_and_error.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -39,6 +40,9 @@ entry(%value : $A2): return %result : $() } +// CHECK-LL: @amethodAD = +// CHECK-LL: @repoAD = + // CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @amethod(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil @amethod : $@async @convention(method) (@in_guaranteed A2) -> (@owned A1, @error Error) { entry(%a2_at_a3_addr : $*A2): @@ -62,8 +66,10 @@ bb0(%0 : $*A2): return %5 : $@async @callee_guaranteed () -> (@owned A1, @error Error) } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %a3 = alloc_ref $A3 %a2 = struct $A2 (%a3 : $A3) @@ -82,7 +88,7 @@ bb_success(%value : $A1): %closure = struct_extract %value : $A1, #A1.b %closure_result = apply %closure() : $@callee_guaranteed () -> () // CHECK: A2(a: main.A3) dealloc_stack %value_addr : $*A1 - + br bb_finish bb_error(%error : $Error): @@ -92,7 +98,19 @@ bb_finish: dealloc_stack %a2_addr : $*A2 - %out_literal = integer_literal $Builtin.Int32, 0 - %out = struct $Int32 (%out_literal : $Builtin.Int32) - return %out : $Int32 + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-partialapply-capture-structgeneric_polymorphic_constrained-to-void.sil b/test/IRGen/async/run-partialapply-capture-structgeneric_polymorphic_constrained-to-void.sil index 64ddb33f08f5d..20c22d8ae8130 100644 --- a/test/IRGen/async/run-partialapply-capture-structgeneric_polymorphic_constrained-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-structgeneric_polymorphic_constrained-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -51,8 +52,10 @@ bb0(%0 : $*τ_0_1): return %9 : $@async @callee_owned () -> () } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %bind_polymorphic_param_from_context = function_ref @bind_polymorphic_param_from_context : $@async @convention(thin) <τ_0_1>(@in τ_0_1) -> @owned @async @callee_owned () -> () %int_literal = integer_literal $Builtin.Int64, 9999 %int = struct $Int64 (%int_literal : $Builtin.Int64) @@ -63,7 +66,19 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer () - %out_literal = integer_literal $Builtin.Int32, 0 - %out = struct $Int32 (%out_literal : $Builtin.Int32) - return %out : $Int32 + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-partialapply-capture-type_structgeneric_polymorphic_constrained-to-void.sil b/test/IRGen/async/run-partialapply-capture-type_structgeneric_polymorphic_constrained-to-void.sil index b077f3075b25d..be23bdc652f0d 100644 --- a/test/IRGen/async/run-partialapply-capture-type_structgeneric_polymorphic_constrained-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-type_structgeneric_polymorphic_constrained-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -32,6 +33,7 @@ sil_witness_table BaseProducer : Q module main { public class WeakBox {} sil_vtable WeakBox {} +// CHECK-LL: @takingQAD = // CHECK-LL: define hidden swiftcc void @takingQ(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { sil hidden @takingQ : $@async @convention(thin) <τ_0_0 where τ_0_0 : Q> (@owned WeakBox<τ_0_0>) -> () { entry(%box : $WeakBox<τ_0_0>): @@ -53,8 +55,10 @@ bb0(%0 : $*τ_0_1): return %9 : $@callee_owned @async (@owned WeakBox>) -> () } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %bind_polymorphic_param_from_forwarder_parameter = function_ref @bind_polymorphic_param_from_forwarder_parameter : $@async @convention(thin) <τ_0_1>(@in τ_0_1) -> @callee_owned @async (@owned WeakBox>) -> () %int_literal = integer_literal $Builtin.Int64, 9999 %int = struct $Int64 (%int_literal : $Builtin.Int64) @@ -66,7 +70,20 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer> %result = apply %partiallyApplied(%box) : $@callee_owned @async (@owned WeakBox>) -> () - %out_literal = integer_literal $Builtin.Int32, 0 - %out = struct $Int32 (%out_literal : $Builtin.Int32) - return %out : $Int32 + + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 } diff --git a/test/IRGen/async/run-partialapply-capture-type_thin-and-classinstance-to-void.sil b/test/IRGen/async/run-partialapply-capture-type_thin-and-classinstance-to-void.sil index 93b323f2456b6..ca545afb1956f 100644 --- a/test/IRGen/async/run-partialapply-capture-type_thin-and-classinstance-to-void.sil +++ b/test/IRGen/async/run-partialapply-capture-type_thin-and-classinstance-to-void.sil @@ -10,6 +10,7 @@ // REQUIRES: swift_test_mode_optimize_none // REQUIRES: concurrency // UNSUPPORTED: use_oC_stdlib +// UNSUPPORTED: CPU=arm64e import Builtin import Swift @@ -84,8 +85,10 @@ entry(%S_type: $@thin S.Type, %C_instance: $C): return %closure : $@async @callee_owned () -> () } -sil @main : $@async @convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { -bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { %c_type = metatype $@thick C.Type %allocating_init = function_ref @C_allocating_init : $@convention(method) (@thick C.Type) -> @owned C %instance = apply %allocating_init(%c_type) : $@convention(method) (@thick C.Type) -> @owned C @@ -97,6 +100,18 @@ bb0(%argc : $Int32, %argv : $UnsafeMutablePointer @async @callee_owned () -> () %result = apply %partiallyApplied() : $@async @callee_owned () -> () + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%argc : $Int32, %argv : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %out_literal = integer_literal $Builtin.Int32, 0 %out = struct $Int32 (%out_literal : $Builtin.Int32) return %out : $Int32 diff --git a/test/IRGen/async/run-thintothick-int64-to-void.sil b/test/IRGen/async/run-thintothick-int64-to-void.sil new file mode 100644 index 0000000000000..580df10310168 --- /dev/null +++ b/test/IRGen/async/run-thintothick-int64-to-void.sil @@ -0,0 +1,64 @@ +// RUN: %empty-directory(%t) +// RUN: %target-build-swift-dylib(%t/%target-library-name(PrintShims)) %S/../../Inputs/print-shims.swift -module-name PrintShims -emit-module -emit-module-path %t/PrintShims.swiftmodule +// RUN: %target-codesign %t/%target-library-name(PrintShims) +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -emit-ir -I %t -L %t -lPrintShim | %FileCheck %s --check-prefix=CHECK-LL +// RUN: %target-build-swift -Xfrontend -enable-experimental-concurrency -parse-sil %s -module-name main -o %t/main -I %t -L %t -lPrintShims %target-rpath(%t) +// RUN: %target-codesign %t/main +// RUN: %target-run %t/main %t/%target-library-name(PrintShims) | %FileCheck %s + +// REQUIRES: executable_test +// REQUIRES: swift_test_mode_optimize_none +// REQUIRES: concurrency +// UNSUPPORTED: use_os_stdlib +// UNSUPPORTED: CPU=arm64e + +import Builtin +import Swift +import PrintShims +import _Concurrency + +sil public_external @printInt64 : $@convention(thin) (Int64) -> () + +// CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @afun2(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { +sil @afun2 : $@async @convention(thin) (Int64) -> () { +entry(%int : $Int64): + %print = function_ref @printInt64 : $@convention(thin) (Int64) -> () + %result = apply %print(%int) : $@convention(thin) (Int64) -> () // CHECK: 9999 + return %result : $() +} + +// CHECK-LL: define{{( dllexport)?}}{{( protected)?}} swiftcc void @test_apply_of_thin_to_thick(%swift.task* {{%[0-9]+}}, %swift.executor* {{%[0-9]+}}, %swift.context* {{%[0-9]+}}) {{#[0-9]*}} { +sil @test_apply_of_thin_to_thick : $@async @convention(thin) () -> () { +entry: + %f = function_ref @afun2 : $@async @convention(thin) (Int64) -> () + %c = thin_to_thick_function %f : $@async @convention(thin) (Int64) -> () to $@async @callee_guaranteed (Int64) -> () + %int_literal = integer_literal $Builtin.Int64, 9999 + %int = struct $Int64 (%int_literal : $Builtin.Int64) + %app = apply %c(%int) : $@async @callee_guaranteed (Int64) -> () + %result = tuple() + return %result : $() +} + +// Defined in _Concurrency +sil public_external @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + +sil @test_case : $@convention(thin) @async () -> () { + %test_apply_of_thin_to_thick = function_ref @test_apply_of_thin_to_thick : $@async @convention(thin) () -> () + %result = apply %test_apply_of_thin_to_thick() : $@async @convention(thin) () -> () + + %void = tuple() + return %void : $() +} + +sil @main : $@convention(c) (Int32, UnsafeMutablePointer>>) -> Int32 { +bb0(%0 : $Int32, %1 : $UnsafeMutablePointer>>): + + %2 = function_ref @test_case : $@convention(thin) @async () -> () + %3 = thin_to_thick_function %2 : $@convention(thin) @async () -> () to $@async @callee_guaranteed () -> () + %4 = function_ref @$s12_Concurrency8runAsyncyyyyYcF : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + %5 = apply %4(%3) : $@convention(thin) (@guaranteed @async @callee_guaranteed () -> ()) -> () + + %6 = integer_literal $Builtin.Int32, 0 + %7 = struct $Int32 (%6 : $Builtin.Int32) + return %7 : $Int32 +} diff --git a/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h b/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h index 01a62d25ea599..73903fa03a0d3 100644 --- a/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h +++ b/test/Inputs/clang-importer-sdk/usr/include/ObjCConcurrency.h @@ -14,6 +14,10 @@ -(void)getMagicNumberAsynchronouslyWithSeed:(NSInteger)seed completionHandler:(void (^)(NSInteger, NSError * _Nullable))handler; @property(readwrite) void (^completionHandler)(NSInteger); +-(void)findMultipleAnswersWithCompletionHandler:(void (^)(NSString *_Nullable, NSInteger, NSError * _Nullable))handler __attribute__((swift_name("findMultipleAnswers(completionHandler:)"))); + +-(void)findDifferentlyFlavoredBooleansWithCompletionHandler:(void (^)(BOOL wholeMilk, _Bool onePercent, NSError *_Nullable))handler __attribute__((swift_name("findDifferentlyFlavoredBooleans(completionHandler:)"))); + -(void)doSomethingConflicted:(NSString *)operation completionHandler:(void (^)(NSInteger))handler; -(NSInteger)doSomethingConflicted:(NSString *)operation; -(void)server:(NSString *)name restartWithCompletionHandler:(void (^)(void))block; diff --git a/test/Interop/Cxx/class/Inputs/module.modulemap b/test/Interop/Cxx/class/Inputs/module.modulemap index 53ee329a23d7a..196baef7f17f3 100644 --- a/test/Interop/Cxx/class/Inputs/module.modulemap +++ b/test/Interop/Cxx/class/Inputs/module.modulemap @@ -41,3 +41,7 @@ module SynthesizedInitializers { module DebugInfo { header "debug-info.h" } + +module NestedRecords { + header "nested-records.h" +} diff --git a/test/Interop/Cxx/class/Inputs/nested-records.h b/test/Interop/Cxx/class/Inputs/nested-records.h new file mode 100644 index 0000000000000..05bcaefb8111d --- /dev/null +++ b/test/Interop/Cxx/class/Inputs/nested-records.h @@ -0,0 +1,45 @@ +struct S1 { + struct S2 { + bool A : 1; + }; +}; + +struct S3 { + struct S4 { }; +}; + +union U1 { + union U2 {}; +}; + +union U3 { + enum E1 {}; +}; + +union U4 { + struct S5 {}; +}; + +struct S6 { + enum E3 {}; +}; + +struct S7 { + union U5 { + union U6 {}; + }; +}; + +struct S8 { + struct S9 { + union U7 {}; + }; +}; + +struct S10 { + union U8 { + enum E4 {}; + }; +}; + +// TODO: Nested class templates (SR-13853). diff --git a/test/Interop/Cxx/class/Inputs/type-classification.h b/test/Interop/Cxx/class/Inputs/type-classification.h index 176e22e1701af..3b10128029ae1 100644 --- a/test/Interop/Cxx/class/Inputs/type-classification.h +++ b/test/Interop/Cxx/class/Inputs/type-classification.h @@ -112,6 +112,17 @@ struct StructWithSubobjectPrivateDefaultedDestructor { StructWithPrivateDefaultedDestructor subobject; }; +struct StructWithDeletedDestructor { + ~StructWithDeletedDestructor() = delete; +}; + +struct StructWithInheritedDeletedDestructor + : StructWithDeletedDestructor {}; + +struct StructWithSubobjectDeletedDestructor { + StructWithDeletedDestructor subobject; +}; + // Tests for common sets of special member functions. struct StructTriviallyCopyableMovable { diff --git a/test/Interop/Cxx/class/nested-records-module-interface.swift b/test/Interop/Cxx/class/nested-records-module-interface.swift new file mode 100644 index 0000000000000..6ea9fb42a0ee6 --- /dev/null +++ b/test/Interop/Cxx/class/nested-records-module-interface.swift @@ -0,0 +1,57 @@ +// RUN: %target-swift-ide-test -print-module -module-to-print=NestedRecords -I %S/Inputs -source-filename=x -enable-cxx-interop | %FileCheck %s + +// CHECK: struct S1 { +// CHECK: struct S2 { +// CHECK: var A: Bool +// CHECK: } +// CHECK: } + +// CHECK: struct S3 { +// CHECK: struct S4 { +// CHECK: } +// CHECK: } + +// CHECK: struct U1 { +// CHECK: struct U2 { +// CHECK: } +// CHECK: } + +// CHECK: struct U3 { +// CHECK: struct E1 : Equatable, RawRepresentable { +// CHECK: typealias RawValue = {{UInt32|Int32}} +// CHECK: } +// CHECK: } + +// CHECK: struct U4 { +// CHECK: struct S5 { +// CHECK: } +// CHECK: } + +// CHECK: struct S6 { +// CHECK: struct E3 : Equatable, RawRepresentable { +// CHECK: typealias RawValue = {{UInt32|Int32}} +// CHECK: } +// CHECK: init() +// CHECK: } + +// CHECK: struct S7 { +// CHECK: struct U5 { +// CHECK: struct U6 { +// CHECK: } +// CHECK: } +// CHECK: } + +// CHECK: struct S8 { +// CHECK: struct S9 { +// CHECK: struct U7 { +// CHECK: } +// CHECK: } +// CHECK: } + +// CHECK: struct S10 { +// CHECK: struct U8 { +// CHECK: struct E4 : Equatable, RawRepresentable { +// CHECK: typealias RawValue = {{UInt32|Int32}} +// CHECK: } +// CHECK: } +// CHECK: } diff --git a/test/Interop/Cxx/class/type-classification-loadable-silgen.swift b/test/Interop/Cxx/class/type-classification-loadable-silgen.swift index 9681aa3d9b333..c77cfdd9e47b0 100644 --- a/test/Interop/Cxx/class/type-classification-loadable-silgen.swift +++ b/test/Interop/Cxx/class/type-classification-loadable-silgen.swift @@ -142,21 +142,6 @@ func pass(s: StructWithSubobjectDefaultedDestructor) { // CHECK: bb0(%0 : $StructWithSubobjectDefaultedDestructor): } -// CHECK-LABEL: sil hidden [ossa] @$s4main4pass{{.*[ (]}}StructWithPrivateDefaultedDestructor) -func pass(s: StructWithPrivateDefaultedDestructor) { - // CHECK: bb0(%0 : $*StructWithPrivateDefaultedDestructor): -} - -// CHECK-LABEL: sil hidden [ossa] @$s4main4pass{{.*[ (]}}StructWithInheritedPrivateDefaultedDestructor) -func pass(s: StructWithInheritedPrivateDefaultedDestructor) { - // CHECK: bb0(%0 : $*StructWithInheritedPrivateDefaultedDestructor): -} - -// CHECK-LABEL: sil hidden [ossa] @$s4main4pass{{.*[ (]}}StructWithSubobjectPrivateDefaultedDestructor) -func pass(s: StructWithSubobjectPrivateDefaultedDestructor) { - // CHECK: bb0(%0 : $*StructWithSubobjectPrivateDefaultedDestructor): -} - // Tests for common sets of special member functions. // CHECK-LABEL: sil hidden [ossa] @$s4main4pass{{.*[ (]}}StructTriviallyCopyableMovable) @@ -173,8 +158,3 @@ func pass(s: StructNonCopyableTriviallyMovable) { func pass(s: StructNonCopyableNonMovable) { // CHECK: bb0(%0 : $*StructNonCopyableNonMovable): } - -// CHECK-LABEL: sil hidden [ossa] @$s4main4pass{{.*[ (]}}StructDeletedDestructor) -func pass(s: StructDeletedDestructor) { - // CHECK: bb0(%0 : $*StructDeletedDestructor): -} diff --git a/test/Interop/Cxx/class/type-classification-module-interface.swift b/test/Interop/Cxx/class/type-classification-module-interface.swift new file mode 100644 index 0000000000000..5945975214eeb --- /dev/null +++ b/test/Interop/Cxx/class/type-classification-module-interface.swift @@ -0,0 +1,9 @@ +// RUN: %target-swift-ide-test -print-module -module-to-print=TypeClassification -I %S/Inputs -source-filename=x -enable-cxx-interop | %FileCheck %s + +// Make sure we don't import objects that we can't destroy. +// CHECK-NOT: StructWithPrivateDefaultedDestructor +// CHECK-NOT: StructWithInheritedPrivateDefaultedDestructor +// CHECK-NOT: StructWithSubobjectPrivateDefaultedDestructor +// CHECK-NOT: StructWithDeletedDestructor +// CHECK-NOT: StructWithInheritedDeletedDestructor +// CHECK-NOT: StructWithSubobjectDeletedDestructor diff --git a/test/SIL/ownership-verifier/undef.sil b/test/SIL/ownership-verifier/undef.sil index 3b03e955776dd..3b479a59c19c3 100644 --- a/test/SIL/ownership-verifier/undef.sil +++ b/test/SIL/ownership-verifier/undef.sil @@ -12,6 +12,10 @@ struct MyInt { var i: Builtin.Int32 } +struct MyKlassWrapper { + var i: Klass +} + // Make sure that we handle undef in an appropriate way. Do to special handling // in SIL around undef, we test this using the ownership dumper for simplicity. @@ -31,7 +35,14 @@ struct MyInt { // CHECK: Visiting: {{.*}}%4 = struct $MyInt (undef : $Builtin.Int32) // CHECK-NEXT: Ownership Constraint: // CHECK-NEXT: Op #: 0 -// CHECK_NEXT: Constraint: +// CHECK-NEXT: Constraint: +// CHECK-LABEL: Visiting: {{%.*}} = struct $MyKlassWrapper (undef : $Klass) +// CHECK-NEXT: Ownership Constraint: +// CHECK-NEXT: Op #: 0 +// CHECK-NEXT: Constraint: +// CHECK-NEXT: Results Ownership Kinds: +// CHECK-NEXT: Result: {{%.*}} = struct $MyKlassWrapper (undef : $Klass) +// CHECK-NEXT: Kind: none sil [ossa] @undef_addresses_have_any_ownership : $@convention(thin) () -> () { bb0: %0 = mark_uninitialized [var] undef : $*Klass @@ -39,6 +50,7 @@ bb0: destroy_value %1 : $Klass %2 = mark_uninitialized [var] undef : $*Builtin.Int32 %3 = struct $MyInt(undef : $Builtin.Int32) + %4 = struct $MyKlassWrapper(undef : $Klass) %9999 = tuple() return %9999 : $() } diff --git a/test/SILGen/Inputs/objc_bridging_nsurl.h b/test/SILGen/Inputs/objc_bridging_nsurl.h new file mode 100644 index 0000000000000..8b97160b961d4 --- /dev/null +++ b/test/SILGen/Inputs/objc_bridging_nsurl.h @@ -0,0 +1,8 @@ + +@import Foundation; + +@interface ObjCKlass : NSObject + +@property(nonatomic, copy, nullable) NSURL *outputURL; + +@end diff --git a/test/SILGen/objc_async.swift b/test/SILGen/objc_async.swift index 94f89ad9f6e3e..c783ff6fd3fa3 100644 --- a/test/SILGen/objc_async.swift +++ b/test/SILGen/objc_async.swift @@ -1,4 +1,4 @@ -// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) -emit-silgen -I %S/Inputs/custom-modules -enable-experimental-concurrency %s -verify | %FileCheck %s +// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) -emit-silgen -I %S/Inputs/custom-modules -enable-experimental-concurrency %s -verify | %FileCheck --check-prefix=CHECK --check-prefix=CHECK-%target-cpu %s // REQUIRES: objc_interop import Foundation @@ -6,11 +6,98 @@ import ObjCConcurrency // CHECK-LABEL: sil {{.*}}@${{.*}}14testSlowServer func testSlowServer(slowServer: SlowServer) async throws { - // CHECK: objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) (Int) -> (), SlowServer) -> () + // CHECK: [[RESUME_BUF:%.*]] = alloc_stack $Int + // CHECK: [[METHOD:%.*]] = objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) (Int) -> (), SlowServer) -> () + // CHECK: [[CONT:%.*]] = get_async_continuation_addr $Int, [[RESUME_BUF]] + // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeContinuation + // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] + // CHECK: store [[CONT]] to [trivial] [[CONT_SLOT]] + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[INT_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation, Int) -> () + // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] + // CHECK: apply [[METHOD]]({{.*}}, [[BLOCK]], %0) + // CHECK: await_async_continuation [[CONT]] {{.*}}, resume [[RESUME:bb[0-9]+]] + // CHECK: [[RESUME]]: + // CHECK: [[RESULT:%.*]] = load [trivial] [[RESUME_BUF]] + // CHECK: dealloc_stack [[RESUME_BUF]] let _: Int = await slowServer.doSomethingSlow("mail") - // CHECK: objc_method {{.*}} $@convention(objc_method) (@convention(block) (Optional, Optional) -> (), SlowServer) -> () - let _: String? = try await slowServer.findAnswer() + + // CHECK: [[RESUME_BUF:%.*]] = alloc_stack $String + // CHECK: [[METHOD:%.*]] = objc_method {{.*}} $@convention(objc_method) (@convention(block) (Optional, Optional) -> (), SlowServer) -> () + // CHECK: [[CONT:%.*]] = get_async_continuation_addr [throws] $String, [[RESUME_BUF]] + // CHECK: [[BLOCK_STORAGE:%.*]] = alloc_stack $@block_storage UnsafeThrowingContinuation + // CHECK: [[CONT_SLOT:%.*]] = project_block_storage [[BLOCK_STORAGE]] + // CHECK: store [[CONT]] to [trivial] [[CONT_SLOT]] + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[STRING_COMPLETION_THROW_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeThrowingContinuation, Optional, Optional) -> () + // CHECK: [[BLOCK:%.*]] = init_block_storage_header [[BLOCK_STORAGE]] {{.*}}, invoke [[BLOCK_IMPL]] + // CHECK: apply [[METHOD]]([[BLOCK]], %0) + // CHECK: await_async_continuation [[CONT]] {{.*}}, resume [[RESUME:bb[0-9]+]], error [[ERROR:bb[0-9]+]] + // CHECK: [[RESUME]]: + // CHECK: [[RESULT:%.*]] = load [take] [[RESUME_BUF]] + // CHECK: destroy_value [[RESULT]] + // CHECK: dealloc_stack [[RESUME_BUF]] + let _: String = try await slowServer.findAnswer() // CHECK: objc_method {{.*}} $@convention(objc_method) (NSString, @convention(block) () -> (), SlowServer) -> () + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[VOID_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeContinuation<()>) -> () await slowServer.serverRestart("somewhere") + + // CHECK: [[BLOCK_IMPL:%.*]] = function_ref @[[NSSTRING_INT_THROW_COMPLETION_BLOCK:.*]] : $@convention(c) (@inout_aliasable @block_storage UnsafeThrowingContinuation<(String, Int)>, Optional, Int, Optional) -> () + let (_, _): (String, Int) = try await slowServer.findMultipleAnswers() + + let (_, _): (Bool, Bool) = try await slowServer.findDifferentlyFlavoredBooleans() + + // CHECK: [[ERROR]]([[ERROR_VALUE:%.*]] : @owned $Error): + // CHECK: dealloc_stack [[RESUME_BUF]] + // CHECK: br [[THROWBB:bb[0-9]+]]([[ERROR_VALUE]] + // CHECK: [[THROWBB]]([[ERROR_VALUE:%.*]] : @owned $Error): + // CHECK: throw [[ERROR_VALUE]] + } + +// CHECK: sil{{.*}}@[[INT_COMPLETION_BLOCK]] +// CHECK: [[CONT_ADDR:%.*]] = project_block_storage %0 +// CHECK: [[CONT:%.*]] = load [trivial] [[CONT_ADDR]] +// CHECK: [[RESULT_BUF:%.*]] = alloc_stack $Int +// CHECK: store %1 to [trivial] [[RESULT_BUF]] +// CHECK: [[RESUME:%.*]] = function_ref @{{.*}}resumeUnsafeContinuation +// CHECK: apply [[RESUME]]([[CONT]], [[RESULT_BUF]]) + +// CHECK: sil{{.*}}@[[STRING_COMPLETION_THROW_BLOCK]] +// CHECK: [[RESUME_IN:%.*]] = copy_value %1 +// CHECK: [[ERROR_IN:%.*]] = copy_value %2 +// CHECK: [[CONT_ADDR:%.*]] = project_block_storage %0 +// CHECK: [[CONT:%.*]] = load [trivial] [[CONT_ADDR]] +// CHECK: [[ERROR_IN_B:%.*]] = begin_borrow [[ERROR_IN]] +// CHECK: switch_enum [[ERROR_IN_B]] : {{.*}}, case #Optional.some!enumelt: [[ERROR_BB:bb[0-9]+]], case #Optional.none!enumelt: [[RESUME_BB:bb[0-9]+]] +// CHECK: [[RESUME_BB]]: +// CHECK: [[RESULT_BUF:%.*]] = alloc_stack $String +// CHECK: [[BRIDGE:%.*]] = function_ref @{{.*}}unconditionallyBridgeFromObjectiveC +// CHECK: [[BRIDGED_RESULT:%.*]] = apply [[BRIDGE]]([[RESUME_IN]] +// CHECK: store [[BRIDGED_RESULT]] to [init] [[RESULT_BUF]] +// CHECK: [[RESUME:%.*]] = function_ref @{{.*}}resumeUnsafeThrowingContinuation +// CHECK: apply [[RESUME]]([[CONT]], [[RESULT_BUF]]) +// CHECK: br [[END_BB:bb[0-9]+]] +// CHECK: [[END_BB]]: +// CHECK: return +// CHECK: [[ERROR_BB]]([[ERROR_IN_UNWRAPPED:%.*]] : @guaranteed $NSError): +// CHECK: [[ERROR:%.*]] = init_existential_ref [[ERROR_IN_UNWRAPPED]] +// CHECK: [[RESUME_WITH_ERROR:%.*]] = function_ref @{{.*}}resumeUnsafeThrowingContinuationWithError +// CHECK: [[ERROR_COPY:%.*]] = copy_value [[ERROR]] +// CHECK: apply [[RESUME_WITH_ERROR]]([[CONT]], [[ERROR_COPY]]) +// CHECK: br [[END_BB]] + +// CHECK: sil {{.*}} @[[VOID_COMPLETION_BLOCK]] +// CHECK: [[CONT_ADDR:%.*]] = project_block_storage %0 +// CHECK: [[CONT:%.*]] = load [trivial] [[CONT_ADDR]] +// CHECK: [[RESULT_BUF:%.*]] = alloc_stack $() +// CHECK: [[RESUME:%.*]] = function_ref @{{.*}}resumeUnsafeContinuation +// CHECK: apply [[RESUME]]<()>([[CONT]], [[RESULT_BUF]]) + +// CHECK: sil{{.*}}@[[NSSTRING_INT_THROW_COMPLETION_BLOCK]] +// CHECK: [[RESULT_BUF:%.*]] = alloc_stack $(String, Int) +// CHECK: [[RESULT_0_BUF:%.*]] = tuple_element_addr [[RESULT_BUF]] {{.*}}, 0 +// CHECK: [[BRIDGE:%.*]] = function_ref @{{.*}}unconditionallyBridgeFromObjectiveC +// CHECK: [[BRIDGED:%.*]] = apply [[BRIDGE]] +// CHECK: store [[BRIDGED]] to [init] [[RESULT_0_BUF]] +// CHECK: [[RESULT_1_BUF:%.*]] = tuple_element_addr [[RESULT_BUF]] {{.*}}, 1 +// CHECK: store %2 to [trivial] [[RESULT_1_BUF]] diff --git a/test/SILGen/objc_bridging_url.swift b/test/SILGen/objc_bridging_url.swift new file mode 100644 index 0000000000000..3ff5743830241 --- /dev/null +++ b/test/SILGen/objc_bridging_url.swift @@ -0,0 +1,12 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-emit-silgen -import-objc-header %S/Inputs/objc_bridging_nsurl.h %s + +// REQUIRES: objc_interop + +// Make sure we do not crash on this + +protocol P { + var outputURL : URL? { get set } +} + +extension ObjCKlass : P {} \ No newline at end of file diff --git a/test/SILOptimizer/licm.sil b/test/SILOptimizer/licm.sil index dcc8871611f91..378139f604990 100644 --- a/test/SILOptimizer/licm.sil +++ b/test/SILOptimizer/licm.sil @@ -634,15 +634,19 @@ struct Index { // ----------------------------------------------------------------------------- // Test combined load/store hoisting/sinking with obvious aliasing loads +// The loop contains loads and stores to the same accesspath: %3 alloc_stack -> #0 -> #0 +// However, they don't share the same projection instructions. +// LICM should still hoist the loads and sink the stores in a combined transformation. +// // CHECK-LABEL: sil shared @testCombinedLdStAliasingLoad : $@convention(method) (Int64) -> Int64 { // CHECK: bb0(%0 : $Int64): -// CHECK: store +// CHECK: store {{.*}} to %{{.*}} : $*Int64 +// CHECK: load %{{.*}} : $*Int64 +// CHECK: br bb1 // CHECK-NOT: {{(load|store)}} -// CHECK: bb1: -// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64 -// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64 -// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64 -// CHECK-NEXT: cond_br +// CHECK: bb3: +// CHECK-NOT: {{(load|store)}} +// CHECK: store %{{.*}} to %{{.*}} : $*Int64 // CHECK-NOT: {{(load|store)}} // CHECK-LABEL: } // end sil function 'testCombinedLdStAliasingLoad' sil shared @testCombinedLdStAliasingLoad : $@convention(method) (Int64) -> Int64 { @@ -768,25 +772,10 @@ sil @getRange : $@convention(thin) () -> Range // CHECK-LABEL: sil shared @testLICMReducedCombinedLdStExtraProjection : $@convention(method) (Int64) -> Int64 { // CHECK: bb0(%0 : $Int64): // CHECK: store %0 to %{{.*}} : $*Int64 +// CHECK: load %{{.*}} : $*Int64 // CHECK-NOT: {{(load|store)}} -// CHECK: bb1(%{{.*}} : $Builtin.Int64): -// CHECK: builtin "sadd_with_overflow_Int64" -// CHECK: load %{{.*}} : $*Builtin.Int64 -// CHECK: builtin "sadd_with_overflow_Int64" -// CHECK: builtin "cmp_eq_Int64" -// CHECK-NEXT: cond_br -// CHECK: bb3: -// CHECK: store %{{.*}} to %{{.*}} : $*Int64 -// CHECK: bb4: -// CHECK: store %{{.*}} to %{{.*}} : $*Int64 -// CHECK: bb5: -// CHECK: function_ref @getRange : $@convention(thin) () -> Range -// CHECK: apply %{{.*}}() : $@convention(thin) () -> Range -// CHECK: store %{{.*}} to %{{.*}} : $*Int64 -// CHECK: bb6: -// CHECK: load %{{.*}} : $*Builtin.Int64 -// CHECK: builtin "cmp_eq_Int64" -// CHECK: cond_br +// CHECK: bb7: +// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64 // CHECK-NOT: {{(load|store)}} // CHECK-LABEL: } // end sil function 'testLICMReducedCombinedLdStExtraProjection' sil shared @testLICMReducedCombinedLdStExtraProjection : $@convention(method) (Int64) -> Int64 { @@ -933,3 +922,399 @@ bb5: %99 = tuple () return %99 : $() } + +// Test load splitting with a loop-invariant stored value. The loop +// will be empty after combined load/store hoisting/sinking. +// +// TODO: sink a struct_extract (or other non-side-effect instructions) +// with no uses in the loop. +// +// CHECK-LABEL: sil shared @testLoadSplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) { +// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64 +// CHECK: [[STOREDVAL:%.*]] = struct_extract %0 : $Int64, #Int64._value +// CHECK: br bb1([[PRELOAD]] : $Int64) +// CHECK: bb1([[PHI:%.*]] : $Int64): +// CHECK-NEXT: [[OUTERVAL:%.*]] = struct $Index ([[PHI]] : $Int64) +// CHECK-NEXT: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb1(%0 : $Int64) +// CHECK: bb3: +// CHECK-NEXT: store %0 to %{{.*}} : $*Int64 +// CHECK-NEXT: tuple ([[OUTERVAL]] : $Index, [[PHI]] : $Int64, [[STOREDVAL]] : $Builtin.Int64) +// CHECK-LABEL: } // end sil function 'testLoadSplit' +sil shared @testLoadSplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index + %middleAddr1 = struct_element_addr %outerAddr1 : $*Index, #Index.value + br bb1 + +bb1: + %val1 = load %outerAddr1 : $*Index + %val2 = load %middleAddr1 : $*Int64 + %outerAddr2 = pointer_to_address %1 : $Builtin.RawPointer to $*Index + %middleAddr2 = struct_element_addr %outerAddr2 : $*Index, #Index.value + store %0 to %middleAddr2 : $*Int64 + %innerAddr1 = struct_element_addr %middleAddr1 : $*Int64, #Int64._value + %val3 = load %innerAddr1 : $*Builtin.Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + %result = tuple (%val1 : $Index, %val2 : $Int64, %val3 : $Builtin.Int64) + return %result : $(Index, Int64, Builtin.Int64) +} + +// Test load splitting with a loop-varying stored value. +// CHECK-LABEL: sil shared @testLoadSplitPhi : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) { +// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64 +// CHECK: br bb1(%4 : $Int64) +// CHECK: bb1([[PHI:%.*]] : $Int64): +// CHECK-NEXT: [[OUTERVAL:%.*]] = struct $Index ([[PHI]] : $Int64) +// CHECK-NEXT: [[EXTRACT:%.*]] = struct_extract [[PHI]] : $Int64, #Int64._value +// CHECK-NEXT: builtin "uadd_with_overflow_Int32"([[EXTRACT]] : $Builtin.Int64 +// CHECK-NEXT: tuple_extract +// CHECK-NEXT: [[ADD:%.*]] = struct $Int64 +// CHECK-NEXT: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb1([[ADD]] : $Int64) +// CHECK: bb3: +// CHECK-NEXT: store [[ADD]] to %{{.*}} : $*Int64 +// CHECK-NEXT: tuple ([[OUTERVAL]] : $Index, [[ADD]] : $Int64, [[EXTRACT]] : $Builtin.Int64) +// CHECK-LABEL: } // end sil function 'testLoadSplitPhi' +sil shared @testLoadSplitPhi : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index + %middleAddr1 = struct_element_addr %outerAddr1 : $*Index, #Index.value + %innerAddr1 = struct_element_addr %middleAddr1 : $*Int64, #Int64._value + br bb1 + +bb1: + %outerVal = load %outerAddr1 : $*Index + %innerVal = load %innerAddr1 : $*Builtin.Int64 + %one = integer_literal $Builtin.Int64, 1 + %zero = integer_literal $Builtin.Int1, 0 + %add = builtin "uadd_with_overflow_Int32"(%innerVal : $Builtin.Int64, %one : $Builtin.Int64, %zero : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %inc = tuple_extract %add : $(Builtin.Int64, Builtin.Int1), 0 + %outerAddr2 = pointer_to_address %1 : $Builtin.RawPointer to $*Index + %middleAddr2 = struct_element_addr %outerAddr2 : $*Index, #Index.value + %newVal = struct $Int64 (%inc : $Builtin.Int64) + store %newVal to %middleAddr2 : $*Int64 + %middleVal = load %middleAddr1 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + %result = tuple (%outerVal : $Index, %middleVal : $Int64, %innerVal : $Builtin.Int64) + return %result : $(Index, Int64, Builtin.Int64) +} + +struct State { + @_hasStorage var valueSet: (Int64, Int64, Int64) { get set } + @_hasStorage var singleValue: Int64 { get set } +} + +// Test the we can remove a store to an individual tuple element when +// the struct containing the tuple is used within the loop. +// The optimized loop should only contain the add operation and a phi, with no memory access. +// +// CHECK-LABEL: sil shared @testTupleSplit : $@convention(method) (Builtin.RawPointer) -> State { +// CHECK: bb0(%0 : $Builtin.RawPointer): +// CHECK: [[HOISTADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0 +// ...Preload stored element #1 +// CHECK: [[PRELOADADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 1 +// CHECK: [[PRELOAD:%.*]] = load [[PRELOADADR]] : $*Int64 +// ...Split element 0 +// CHECK: [[SPLIT0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0 +// CHECK: [[ELT0:%.*]] = load [[SPLIT0]] : $*Int64 +// ...Split element 2 +// CHECK: [[SPLIT2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 2 +// CHECK: [[ELT2:%.*]] = load [[SPLIT2]] : $*Int64 +// ...Split State.singlevalue +// CHECK: [[SINGLEADR:%.*]] = struct_element_addr %{{.*}} : $*State, #State.singleValue +// CHECK: [[SINGLEVAL:%.*]] = load [[SINGLEADR]] : $*Int64 +// ...Hoisted element 0 +// CHECK: [[HOISTLOAD:%.*]] = load [[HOISTADR]] : $*Int64 +// CHECK: [[HOISTVAL:%.*]] = struct_extract [[HOISTLOAD]] : $Int64, #Int64._value +// CHECK: br bb1([[PRELOAD]] : $Int64) +// ...Loop +// CHECK: bb1([[PHI:%.*]] : $Int64): +// CHECK-NEXT: [[TUPLE:%.*]] = tuple ([[ELT0]] : $Int64, [[PHI]] : $Int64, [[ELT2]] : $Int64) +// CHECK-NEXT: [[STRUCT:%.*]] = struct $State ([[TUPLE]] : $(Int64, Int64, Int64), [[SINGLEVAL]] : $Int64) +// CHECK-NEXT: [[ADDEND:%.*]] = struct_extract [[PHI]] : $Int64, #Int64._value +// CHECK-NEXT: [[UADD:%.*]] = builtin "uadd_with_overflow_Int32"([[HOISTVAL]] : $Builtin.Int64, [[ADDEND]] : $Builtin.Int64, %{{.*}} : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) +// CHECK-NEXT: [[ADDVAL:%.*]] = tuple_extract [[UADD]] : $(Builtin.Int64, Builtin.Int1), 0 +// CHECK-NEXT: [[ADDINT:%.*]] = struct $Int64 ([[ADDVAL]] : $Builtin.Int64) +// CHECK-NEXT: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb1([[ADDINT]] : $Int64) +// CHECK: bb3: +// CHECK-NEXT: store [[ADDINT]] to [[PRELOADADR]] : $*Int64 +// CHECK-NEXT: return [[STRUCT]] : $State +// CHECK-LABEL: } // end sil function 'testTupleSplit' +sil shared @testTupleSplit : $@convention(method) (Builtin.RawPointer) -> State { +bb0(%0 : $Builtin.RawPointer): + %stateAddr = pointer_to_address %0 : $Builtin.RawPointer to $*State + %tupleAddr0 = struct_element_addr %stateAddr : $*State, #State.valueSet + %elementAddr0 = tuple_element_addr %tupleAddr0 : $*(Int64, Int64, Int64), 0 + %tupleAddr1 = struct_element_addr %stateAddr : $*State, #State.valueSet + %elementAddr1 = tuple_element_addr %tupleAddr1 : $*(Int64, Int64, Int64), 1 + %tupleAddr11 = struct_element_addr %stateAddr : $*State, #State.valueSet + %elementAddr11 = tuple_element_addr %tupleAddr11 : $*(Int64, Int64, Int64), 1 + br bb1 + +bb1: + %state = load %stateAddr : $*State + %element0 = load %elementAddr0 : $*Int64 + %val0 = struct_extract %element0 : $Int64, #Int64._value + %element1 = load %elementAddr1 : $*Int64 + %val1 = struct_extract %element1 : $Int64, #Int64._value + %zero = integer_literal $Builtin.Int1, 0 + %add = builtin "uadd_with_overflow_Int32"(%val0 : $Builtin.Int64, %val1 : $Builtin.Int64, %zero : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) + %addVal = tuple_extract %add : $(Builtin.Int64, Builtin.Int1), 0 + %addInt = struct $Int64 (%addVal : $Builtin.Int64) + store %addInt to %elementAddr11 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + return %state : $State +} + +// Test multiple stores to disjoint access paths with a single load +// that spans both of them. The load should be split and hosited and +// and the stores be sunk. +// testCommonSplitLoad +// CHECK-LABEL: sil shared @testCommonSplitLoad : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, Int64, Int64) { +// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer): +// CHECK: [[ELT0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0 +// CHECK: [[V0:%.*]] = load [[ELT0]] : $*Int64 +// CHECK: [[ELT2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 2 +// CHECK: [[V2:%.*]] = load [[ELT2]] : $*Int64 +// CHECK: [[ELT1:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 1 +// CHECK: [[V1:%.*]] = load [[ELT1]] : $*Int64 +// CHECK: br bb1([[V0]] : $Int64, [[V2]] : $Int64) +// +// Nothing in this loop except phis... +// CHECK: bb1([[PHI0:%.*]] : $Int64, [[PHI2:%.*]] : $Int64): +// CHECK-NEXT: [[RESULT:%.*]] = tuple ([[PHI0]] : $Int64, [[V1]] : $Int64, [[PHI2]] : $Int64) +// CHECK-NEXT: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK-NEXT: br bb1(%0 : $Int64, %0 : $Int64) +// +// Stores are all sunk... +// CHECK: bb3: +// CHECK: store %0 to [[ELT2]] : $*Int64 +// CHECK: store %0 to [[ELT0]] : $*Int64 +// CHECK: return [[RESULT]] : $(Int64, Int64, Int64) +// CHECK-LABEL: } // end sil function 'testCommonSplitLoad' +sil shared @testCommonSplitLoad : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, Int64, Int64) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, Int64, Int64) + br bb1 + +bb1: + %val1 = load %outerAddr1 : $*(Int64, Int64, Int64) + %elementAddr0 = tuple_element_addr %outerAddr1 : $*(Int64, Int64, Int64), 0 + store %0 to %elementAddr0 : $*Int64 + %elementAddr2 = tuple_element_addr %outerAddr1 : $*(Int64, Int64, Int64), 2 + store %0 to %elementAddr2 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + return %val1 : $(Int64, Int64, Int64) +} + +// Two stores, one to the outer tuple and one to the inner tuple. This +// results in two access paths that are only loaded/stored to. First +// split the outer tuple when processing the outer access path, then +// the inner tuple when processing the inner access path. All loads +// should be hoisted and all stores should be sunk. +// +// CHECK-LABEL: sil shared @testResplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer): +// CHECK: [[ELT_0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 0 +// CHECK: [[V0:%.*]] = load [[ELT_0]] : $*Int64 +// CHECK: [[ELT_1a:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 1 +// CHECK: [[ELT_1_0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 0 +// CHECK: [[V_1_0:%.*]] = load [[ELT_1_0]] : $*Int64 +// CHECK: [[ELT_1b:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 1 +// CHECK: [[ELT_1_1:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 1 +// CHECK: [[V_1_1:%.*]] = load [[ELT_1_1]] : $*Int64 +// CHECK: br bb1([[V_0:%.*]] : $Int64, [[V_1_0]] : $Int64) +// +// Nothing in this loop except phis and tuple reconstruction... +// CHECK: bb1([[PHI_0:%.*]] : $Int64, [[PHI_1_0:%.*]] : $Int64): +// CHECK: [[INNER:%.*]] = tuple ([[PHI_1_0]] : $Int64, [[V_1_1]] : $Int64) +// CHECK: [[OUTER:%.*]] = tuple ([[PHI_0]] : $Int64, [[INNER]] : $(Int64, Int64)) +// CHECK: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK: br bb1(%0 : $Int64, %0 : $Int64) +// +// The two stores are sunk... +// CHECK: bb3: +// CHECK: store %0 to [[ELT_1_0]] : $*Int64 +// CHECK: store %0 to [[ELT_0]] : $*Int64 +// CHECK: return [[OUTER]] : $(Int64, (Int64, Int64)) +// CHECK-LABEL: } // end sil function 'testResplit' +sil shared @testResplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, (Int64, Int64)) + br bb1 + +bb1: + %val1 = load %outerAddr1 : $*(Int64, (Int64, Int64)) + %elementAddr0 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 0 + store %0 to %elementAddr0 : $*Int64 + %elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1 + %elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0 + store %0 to %elementAddr10 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + return %val1 : $(Int64, (Int64, Int64)) +} + +// Two stores to overlapping accesspaths. Combined load/store hoisting +// cannot currently handle stores to overlapping accesspaths, so +// nothing is optimized. +// CHECK-LABEL: sil shared @testTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +// CHECK-LABEL: bb0(%0 : $Int64, %1 : $Int64, %2 : $Builtin.RawPointer): +// CHECK-NOT: load +// CHECK: br bb1 +// CHECK: bb1: +// CHECK: load %{{.*}} : $*(Int64, (Int64, Int64)) +// CHECK: store {{.*}} : $*(Int64, Int64) +// CHECK: store {{.*}} : $*Int64 +// CHECK: cond_br undef, bb2, bb3 +// CHECK-NOT: store +// CHECK-LABEL: } // end sil function 'testTwoStores' +sil shared @testTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +bb0(%0 : $Int64, %1: $Int64, %2 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %2 : $Builtin.RawPointer to $*(Int64, (Int64, Int64)) + br bb1 + +bb1: + %val1 = load %outerAddr1 : $*(Int64, (Int64, Int64)) + %elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1 + %tuple = tuple (%0 : $Int64, %1: $Int64) + store %tuple to %elementAddr1 : $*(Int64, Int64) + %elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0 + store %1 to %elementAddr10 : $*Int64 + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + return %val1 : $(Int64, (Int64, Int64)) +} + +// Two wide loads. The first can be successfully split and the second +// half hoisted. The second cannot be split because of a pointer +// cast. Make sure two remaining loads and the store are still in the loop. +// +// CHECK-LABEL: sil hidden @testSplitNonStandardProjection : $@convention(method) (Int64, Builtin.RawPointer) -> ((Int64, (Int64, Int64)), (Int64, Int64)) { +// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer): +// +// The first load was split, so one half is hoisted. +// CHECK: [[V1:%.*]] = load %{{.*}} : $*Int64 +// CHECK: br bb1 +// CHECK: bb1: +// CHECK: [[V0:%.*]] = load %{{.*}} : $*Int64 +// CHECK: [[INNER:%.*]] = tuple ([[V0]] : $Int64, [[V1]] : $Int64) +// CHECK: store %0 to %{{.*}} : $*Int64 +// CHECK: [[OUTER:%.*]] = load %{{.*}} : $*(Int64, (Int64, Int64)) +// CHECK: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK: br bb1 +// CHECK: bb3: +// CHECK: [[RESULT:%.*]] = tuple ([[OUTER]] : $(Int64, (Int64, Int64)), [[INNER]] : $(Int64, Int64)) +// CHECK: return [[RESULT]] : $((Int64, (Int64, Int64)), (Int64, Int64)) +// CHECK-LABEL: } // end sil function 'testSplitNonStandardProjection' +sil hidden @testSplitNonStandardProjection : $@convention(method) (Int64, Builtin.RawPointer) -> ((Int64, (Int64, Int64)), (Int64, Int64)) { +bb0(%0 : $Int64, %1 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, (Int64, Int64)) + br bb1 + +bb1: + %elt1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1 + %ptr = address_to_pointer %elt1 : $*(Int64, Int64) to $Builtin.RawPointer + %ptrAdr = pointer_to_address %ptr : $Builtin.RawPointer to [strict] $*(Int64, Int64) + %val2 = load %ptrAdr : $*(Int64, Int64) + %eltptr0 = tuple_element_addr %ptrAdr : $*(Int64, Int64), 0 + store %0 to %eltptr0 : $*Int64 + // Process the outermost load after splitting the inner load + %val1 = load %outerAddr1 : $*(Int64, (Int64, Int64)) + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + %result = tuple (%val1 : $(Int64, (Int64, Int64)), %val2 : $(Int64, Int64)) + return %result : $((Int64, (Int64, Int64)), (Int64, Int64)) +} + +// CHECK-LABEL: sil shared @testSameTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +// CHECK: bb0(%0 : $Int64, %1 : $Int64, %2 : $Builtin.RawPointer): +// CHECK: [[ELT_1:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 1 +// CHECK: [[V1:%.*]] = load %4 : $*(Int64, Int64) +// CHECK: [[ELT_0:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 0 +// CHECK: [[V0:%.*]] = load %6 : $*Int64 +// CHECK: [[ARG0:%.*]] = tuple (%0 : $Int64, %0 : $Int64) +// CHECK: [[ARG0_0:%.*]] = tuple_extract %8 : $(Int64, Int64), 0 +// CHECK: [[ARG1:%.*]] = tuple (%1 : $Int64, %1 : $Int64) +// CHECK: br bb1([[V1]] : $(Int64, Int64)) +// CHECK: bb1([[PHI:%.*]] : $(Int64, Int64)): +// CHECK: [[LOOPVAL:%.*]] = tuple ([[V0]] : $Int64, [[PHI]] : $(Int64, Int64)) +// CHECK: cond_br undef, bb2, bb3 +// CHECK: bb2: +// CHECK: br bb1([[ARG1]] : $(Int64, Int64)) +// CHECK: bb3: +// CHECK: store [[ARG1]] to [[ELT_1]] : $*(Int64, Int64) +// CHECK: [[EXTRACT0:%.*]] = tuple_extract [[LOOPVAL]] : $(Int64, (Int64, Int64)), 0 +// CHECK: [[EXTRACT1:%.*]] = tuple_extract [[LOOPVAL]] : $(Int64, (Int64, Int64)), 1 +// CHECK: [[EXTRACT1_1:%.*]] = tuple_extract [[EXTRACT1]] : $(Int64, Int64), 1 +// CHECK: [[TUPLE1:%.*]] = tuple ([[ARG0_0]] : $Int64, [[EXTRACT1_1]] : $Int64) +// CHECK: [[RESULT:%.*]] = tuple ([[EXTRACT0]] : $Int64, [[TUPLE1]] : $(Int64, Int64)) +// CHECK: return [[RESULT]] : $(Int64, (Int64, Int64)) +// CHECK-LABEL: } // end sil function 'testSameTwoStores' +sil shared @testSameTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) { +bb0(%0 : $Int64, %1: $Int64, %2 : $Builtin.RawPointer): + %outerAddr1 = pointer_to_address %2 : $Builtin.RawPointer to $*(Int64, (Int64, Int64)) + br bb1 + +bb1: + %val = load %outerAddr1 : $*(Int64, (Int64, Int64)) + %elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1 + %tupleA = tuple (%0 : $Int64, %0: $Int64) + store %tupleA to %elementAddr1 : $*(Int64, Int64) + %elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0 + %val10 = load %elementAddr10 : $*Int64 + %tupleB = tuple (%1 : $Int64, %1: $Int64) + store %tupleB to %elementAddr1 : $*(Int64, Int64) + cond_br undef, bb2, bb3 + +bb2: + br bb1 + +bb3: + %extract0 = tuple_extract %val : $(Int64, (Int64, Int64)), 0 + %extract1 = tuple_extract %val : $(Int64, (Int64, Int64)), 1 + %extract11 = tuple_extract %extract1 : $(Int64, Int64), 1 + %inner = tuple (%val10 : $Int64, %extract11: $Int64) + %outer = tuple (%extract0 : $Int64, %inner: $(Int64, Int64)) + return %outer : $(Int64, (Int64, Int64)) +} diff --git a/test/Sema/conformance_availability.swift b/test/Sema/conformance_availability.swift index 61dd90e25d207..4a255cb60d90e 100644 --- a/test/Sema/conformance_availability.swift +++ b/test/Sema/conformance_availability.swift @@ -216,20 +216,20 @@ func passAvailableConformance1a(x: HasAvailableConformance1) { // Associated conformance with unavailability protocol Rider { - associatedtype H : Horse + associatedtype H : Horse } struct AssocConformanceUnavailable : Rider { // expected-error@-1 {{conformance of 'HasUnavailableConformance1' to 'Horse' is unavailable}} // expected-note@-2 {{in associated type 'Self.H' (inferred as 'HasUnavailableConformance1')}} - typealias H = HasUnavailableConformance1 + typealias H = HasUnavailableConformance1 } // Associated conformance with deprecation struct AssocConformanceDeprecated : Rider { // expected-warning@-1 {{conformance of 'HasDeprecatedConformance1' to 'Horse' is deprecated}} // expected-note@-2 {{in associated type 'Self.H' (inferred as 'HasDeprecatedConformance1')}} - typealias H = HasDeprecatedConformance1 + typealias H = HasDeprecatedConformance1 } // Associated conformance with availability @@ -237,12 +237,12 @@ struct AssocConformanceAvailable1 : Rider { // expected-error@-1 {{conformance of 'HasAvailableConformance1' to 'Horse' is only available in macOS 100 or newer}} // expected-note@-2 {{in associated type 'Self.H' (inferred as 'HasAvailableConformance1')}} // expected-note@-3 {{add @available attribute to enclosing struct}} - typealias H = HasAvailableConformance1 + typealias H = HasAvailableConformance1 } @available(macOS 100, *) struct AssocConformanceAvailable2 : Rider { - typealias H = HasAvailableConformance1 + typealias H = HasAvailableConformance1 } struct AssocConformanceAvailable3 {} @@ -251,12 +251,74 @@ extension AssocConformanceAvailable3 : Rider { // expected-error@-1 {{conformance of 'HasAvailableConformance1' to 'Horse' is only available in macOS 100 or newer}} // expected-note@-2 {{in associated type 'Self.H' (inferred as 'HasAvailableConformance1')}} // expected-note@-3 {{add @available attribute to enclosing extension}} - typealias H = HasAvailableConformance1 + typealias H = HasAvailableConformance1 } struct AssocConformanceAvailable4 {} @available(macOS 100, *) extension AssocConformanceAvailable4 : Rider { - typealias H = HasAvailableConformance1 -} \ No newline at end of file + typealias H = HasAvailableConformance1 +} + +// Solution ranking should down-rank solutions involving unavailable conformances +protocol First {} +extension First { + func doStuff(_: T) -> Bool {} +} + +protocol Second {} +extension Second { + func doStuff(_: Int) -> Int {} +} + +struct ConformingType1 {} + +extension ConformingType1 : First {} + +@available(macOS 100, *) +extension ConformingType1 : Second {} + +func usesConformingType1(_ c: ConformingType1) { + // We should pick First.doStuff() here, since Second.doStuff() is unavailable + let result = c.doStuff(123) + let _: Bool = result +} + +@available(macOS 100, *) +func usesConformingType1a(_ c: ConformingType1) { + // We should pick Second.doStuff() here, since it is more specialized than + // First.doStuff() + let result = c.doStuff(123) + let _: Int = result +} + +// Same as above but unconditionally unavailable +struct ConformingType2 {} + +extension ConformingType2 : First {} + +@available(*, unavailable) +extension ConformingType2 : Second {} + +func usesConformingType2(_ c: ConformingType2) { + // We should pick First.doStuff() here, since Second.doStuff() is unavailable + let result = c.doStuff(123) + let _: Bool = result +} + +// Make sure this also works for synthesized conformances +struct UnavailableHashable { + let x: Int + let y: Int +} + +@available(macOS 100, *) +extension UnavailableHashable : Hashable {} + +func usesUnavailableHashable(_ c: UnavailableHashable) { + // expected-note@-1 2 {{add @available attribute to enclosing global function}} + _ = Set([c]) + // expected-error@-1 2 {{conformance of 'UnavailableHashable' to 'Hashable' is only available in macOS 100 or newer}} + // expected-note@-2 2 {{add 'if #available' version check}} +} diff --git a/test/api-digester/stability-stdlib-abi-with-asserts.test b/test/api-digester/stability-stdlib-abi-with-asserts.test index c75b63b7b1a4c..bab3e70ca02f5 100644 --- a/test/api-digester/stability-stdlib-abi-with-asserts.test +++ b/test/api-digester/stability-stdlib-abi-with-asserts.test @@ -44,5 +44,3 @@ Protocol _RuntimeFunctionCountersStats is a new API without @available attribute Struct _GlobalRuntimeFunctionCountersState is a new API without @available attribute Struct _ObjectRuntimeFunctionCountersState is a new API without @available attribute Struct _RuntimeFunctionCounters is a new API without @available attribute -Func _swift_isImmutableCOWBuffer(_:) is a new API without @available attribute -Func _swift_setImmutableCOWBuffer(_:_:) is a new API without @available attribute diff --git a/test/stdlib/KeyPath.swift b/test/stdlib/KeyPath.swift index 7d3359f53a93c..672ddcc0167c3 100644 --- a/test/stdlib/KeyPath.swift +++ b/test/stdlib/KeyPath.swift @@ -1030,5 +1030,20 @@ keyPath.test("tail allocated c array") { expectEqual(4, offset) } +keyPath.test("ReferenceWritableKeyPath statically typed as WritableKeyPath") { + let inner = C(x: 42, y: nil, z: 43) + var outer = C>(x: 44, y: nil, z: inner) + let keyPath = \C>.z.x + let upcastKeyPath = keyPath as WritableKeyPath + + expectEqual(outer[keyPath: keyPath], 42) + outer[keyPath: keyPath] = 43 + expectEqual(outer[keyPath: keyPath], 43) + + expectEqual(outer[keyPath: upcastKeyPath], 43) + outer[keyPath: upcastKeyPath] = 44 + expectEqual(outer[keyPath: upcastKeyPath], 44) +} + runAllTests() diff --git a/unittests/runtime/CMakeLists.txt b/unittests/runtime/CMakeLists.txt index 5070c96876713..4ed374290c03b 100644 --- a/unittests/runtime/CMakeLists.txt +++ b/unittests/runtime/CMakeLists.txt @@ -55,6 +55,7 @@ if(("${SWIFT_HOST_VARIANT_SDK}" STREQUAL "${SWIFT_PRIMARY_VARIANT_SDK}") AND if(SWIFT_ENABLE_EXPERIMENTAL_CONCURRENCY) list(APPEND PLATFORM_SOURCES + TaskFuture.cpp TaskStatus.cpp ) list(APPEND PLATFORM_TARGET_LINK_LIBRARIES @@ -66,6 +67,7 @@ if(("${SWIFT_HOST_VARIANT_SDK}" STREQUAL "${SWIFT_PRIMARY_VARIANT_SDK}") AND set(LLVM_OPTIONAL_SOURCES weak.mm Refcounting.mm + TaskFuture.cpp TaskStatus.cpp) add_swift_unittest(SwiftRuntimeTests diff --git a/unittests/runtime/TaskFuture.cpp b/unittests/runtime/TaskFuture.cpp new file mode 100644 index 0000000000000..a4d004caf8bed --- /dev/null +++ b/unittests/runtime/TaskFuture.cpp @@ -0,0 +1,165 @@ +//===--- TaskFuture.cpp - Unit tests for the task futures API -------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "swift/Runtime/Concurrency.h" +#include "swift/Runtime/Metadata.h" +#include "swift/Demangling/ManglingMacros.h" +#include "swift/Basic/STLExtras.h" +#include "gtest/gtest.h" + +using namespace swift; + +namespace { +template struct FutureContext; + +template +using InvokeFunctionRef = + llvm::function_ref *context)>; + +using BodyFunctionRef = + llvm::function_ref; + +template struct FutureContext : FutureAsyncContext { + InvokeFunctionRef storedInvokeFn; + + Storage& getStorage() { + return *reinterpret_cast(this->indirectResult); + } +}; + +// Disable template argument deduction. +template +using undeduced = + typename std::enable_if::value, T>::type; + +template +SWIFT_CC(swift) +static void futureTaskInvokeFunction(AsyncTask *task, ExecutorRef executor, + AsyncContext *context) { + auto futureContext = static_cast*>(context); + futureContext->storedInvokeFn(task, executor, futureContext); + + // Return to finish off the task. + // In a normal situation we'd need to free the context, but here + // we know we're at the top level. + futureContext->ResumeParent(task, executor, futureContext); +} + +template +static void withFutureTask(const Metadata *resultType, + const T& initialValue, + undeduced> invokeFn, + BodyFunctionRef body) { + JobFlags flags = JobKind::Task; + flags.task_setIsFuture(true); + + auto taskAndContext = + swift_task_create_future_f(flags, /*parent*/ nullptr, resultType, + &futureTaskInvokeFunction, + sizeof(FutureContext)); + + auto futureContext = + static_cast*>(taskAndContext.InitialContext); + futureContext->getStorage() = initialValue; // Magic number. + futureContext->storedInvokeFn = invokeFn; + + // Forward our owning reference to the task into its execution, + // causing it to be destroyed when it completes. + body(taskAndContext.Task); +} + +static ExecutorRef createFakeExecutor(uintptr_t value) { + return {reinterpret_cast(value)}; +} +} + +extern const FullMetadata METADATA_SYM(Si); + +struct TestObject : HeapObject { + constexpr TestObject(HeapMetadata const *newMetadata) + : HeapObject(newMetadata, InlineRefCounts::Immortal) + , Addr(NULL), Value(0) {} + + size_t *Addr; + size_t Value; +}; + +static SWIFT_CC(swift) void destroyTestObject(SWIFT_CONTEXT HeapObject *_object) { + auto object = static_cast(_object); + assert(object->Addr && "object already deallocated"); + *object->Addr = object->Value; + object->Addr = nullptr; + swift_deallocObject(object, sizeof(TestObject), alignof(TestObject) - 1); +} + +static const FullMetadata TestClassObjectMetadata = { + { { &destroyTestObject }, { &VALUE_WITNESS_SYM(Bo) } }, + { { nullptr }, ClassFlags::UsesSwiftRefcounting, 0, 0, 0, 0, 0, 0 } +}; + +/// Create an object that, when deallocated, stores the given value to +/// the given pointer. +static TestObject *allocTestObject(size_t *addr, size_t value) { + auto result = + static_cast(swift_allocObject(&TestClassObjectMetadata, + sizeof(TestObject), + alignof(TestObject) - 1)); + result->Addr = addr; + result->Value = value; + return result; +} + +TEST(TaskFutureTest, objectFuture) { + auto createdExecutor = createFakeExecutor(1234); + bool hasRun = false; + + size_t objectValueOnComplete = 7; + TestObject *object = nullptr; + withFutureTask( + &TestClassObjectMetadata, nullptr, + [&](AsyncTask *task, ExecutorRef executor, + FutureContext *context) { + object = allocTestObject(&objectValueOnComplete, 25); + + // The error storage should have been cleared out for us. + EXPECT_EQ(nullptr, context->errorResult); + + // Store the object in the future. + context->getStorage() = object; + + hasRun = true; + }, [&](AsyncTask *task) { + // Retain the task, so it won't be destroyed when it is executed. + swift_retain(task); + + // Run the task, which should fill in the future. + EXPECT_FALSE(hasRun); + task->run(createdExecutor); + EXPECT_TRUE(hasRun); + + // "Wait" for the future, which must have completed by now. + auto waitResult = swift_task_future_wait(task, nullptr); + EXPECT_EQ(TaskFutureWaitResult::Success, waitResult.kind); + + // Make sure we got the result value we expect. + EXPECT_EQ(object, *reinterpret_cast(waitResult.storage)); + + // Make sure the object hasn't been destroyed. + EXPECT_EQ(size_t(7), objectValueOnComplete); + + // Okay, release the task. This should destroy the object. + swift_release(task); + assert(objectValueOnComplete == 25); + }); +} diff --git a/utils/build-script b/utils/build-script index aface1a6a5668..09dc3c79537e7 100755 --- a/utils/build-script +++ b/utils/build-script @@ -352,6 +352,9 @@ def apply_default_arguments(toolchain, args): elif args.android_arch == "aarch64": args.stdlib_deployment_targets.append( StdlibDeploymentTarget.Android.aarch64.name) + elif args.android_arch == "x86_64": + args.stdlib_deployment_targets.append( + StdlibDeploymentTarget.Android.x86_64.name) if args.wasm: args.stdlib_deployment_targets.append( StdlibDeploymentTarget.WASI.wasm32.name) diff --git a/utils/build-script-impl b/utils/build-script-impl index 2376ef7e955ad..ee12934ec9cec 100755 --- a/utils/build-script-impl +++ b/utils/build-script-impl @@ -419,6 +419,7 @@ function verify_host_is_supported() { | watchos-armv7k \ | android-armv7 \ | android-aarch64 \ + | android-x86_64 \ | wasi-wasm32) ;; *) @@ -463,6 +464,10 @@ function set_build_options_for_host() { SWIFT_HOST_TRIPLE="armv7-unknown-linux-androideabi" llvm_target_arch="ARM" ;; + android-x86_64) + SWIFT_HOST_TRIPLE="x86_64-unknown-linux-android${ANDROID_API_LEVEL}" + llvm_target_arch="X86" + ;; linux-armv6) SWIFT_HOST_TRIPLE="armv6-unknown-linux-gnueabihf" llvm_target_arch="ARM" @@ -2166,6 +2171,7 @@ for host in "${ALL_HOSTS[@]}"; do -DCMAKE_CXX_COMPILER:PATH="${CLANG_BIN}/clang++" -DCMAKE_INSTALL_PREFIX:PATH="$(get_host_install_prefix ${host})" -DCMAKE_Swift_COMPILER:PATH=${SWIFTC_BIN} + -DCMAKE_Swift_FLAGS:STRING="-module-cache-path \"${module_cache}\"" -DLLBUILD_ENABLE_ASSERTIONS:BOOL=$(true_false "${LLBUILD_ENABLE_ASSERTIONS}") -DLLBUILD_SUPPORT_BINDINGS:=Swift @@ -2238,6 +2244,7 @@ for host in "${ALL_HOSTS[@]}"; do -DCMAKE_C_COMPILER:PATH="${CLANG_BIN}/clang" -DCMAKE_CXX_COMPILER:PATH="${CLANG_BIN}/clang++" -DCMAKE_Swift_COMPILER:PATH=${SWIFTC_BIN} + -DCMAKE_Swift_FLAGS:STRING="-module-cache-path \"${module_cache}\"" -DCMAKE_INSTALL_PREFIX:PATH="$(get_host_install_prefix ${host})" -DCMAKE_INSTALL_LIBDIR:PATH="lib" @@ -2306,6 +2313,7 @@ for host in "${ALL_HOSTS[@]}"; do -DCMAKE_CXX_COMPILER:PATH=${CLANG_BIN}/clang++ -DCMAKE_SWIFT_COMPILER:PATH=${SWIFTC_BIN} -DCMAKE_Swift_COMPILER:PATH=${SWIFTC_BIN} + -DCMAKE_Swift_FLAGS:STRING="-module-cache-path \"${module_cache}\"" -DCMAKE_INSTALL_PREFIX:PATH=$(get_host_install_prefix ${host}) ${LIBICU_BUILD_ARGS[@]} diff --git a/utils/build_swift/build_swift/driver_arguments.py b/utils/build_swift/build_swift/driver_arguments.py index 1a7033c124f00..3838c9463f06f 100644 --- a/utils/build_swift/build_swift/driver_arguments.py +++ b/utils/build_swift/build_swift/driver_arguments.py @@ -1114,10 +1114,10 @@ def create_argument_parser(): android.adb.commands.DEVICE_TEMP_DIR)) option('--android-arch', store, - choices=['armv7', 'aarch64'], + choices=['armv7', 'aarch64', 'x86_64'], default='armv7', - help='The Android target architecture when building for Android. ' - 'Currently only armv7 and aarch64 are supported. ' + help='The target architecture when building for Android. ' + 'Currently, only armv7, aarch64, and x86_64 are supported. ' '%(default)s is the default.') # ------------------------------------------------------------------------- diff --git a/utils/swift_build_support/swift_build_support/targets.py b/utils/swift_build_support/swift_build_support/targets.py index 4438b83157389..db92d053d99bf 100644 --- a/utils/swift_build_support/swift_build_support/targets.py +++ b/utils/swift_build_support/swift_build_support/targets.py @@ -201,7 +201,7 @@ class StdlibDeploymentTarget(object): Cygwin = Platform("cygwin", archs=["x86_64"]) - Android = AndroidPlatform("android", archs=["armv7", "aarch64"]) + Android = AndroidPlatform("android", archs=["armv7", "aarch64", "x86_64"]) Windows = Platform("windows", archs=["x86_64"]) diff --git a/validation-test/compiler_crashers_2_fixed/rdar71260862.swift b/validation-test/compiler_crashers_2_fixed/rdar71260862.swift new file mode 100644 index 0000000000000..ee04cece7eccc --- /dev/null +++ b/validation-test/compiler_crashers_2_fixed/rdar71260862.swift @@ -0,0 +1,5 @@ +// RUN: %target-swift-frontend %s -emit-ir -enable-library-evolution -enable-experimental-concurrency + +public class X { + public func f() async { } +} diff --git a/validation-test/compiler_crashers_2_fixed/rdar71260972.sil b/validation-test/compiler_crashers_2_fixed/rdar71260972.sil new file mode 100644 index 0000000000000..a50bf5705aee6 --- /dev/null +++ b/validation-test/compiler_crashers_2_fixed/rdar71260972.sil @@ -0,0 +1,26 @@ +// RUN: %target-swift-frontend %s -emit-ir -enable-experimental-concurrency -parse-sil + +import Swift + +struct Pack { + public let a: Bool + public let b: Bool + public let c: Bool + public let d: Bool + public let e: Bool +} + +public struct Strukt { + public let rawValue: Int +} + +class Clazz { } + +sil_vtable Clazz { +} + +sil @$foo : $@convention(method) @async (@guaranteed Pack, @guaranteed Array, @guaranteed Clazz) -> () { +bb0(%0 : $Pack, %1 : $Array, %2 : $Clazz): + %result = tuple () + return %result : $() +}