From ecd6d4ddec3db05b817ee40664e735153ac035b1 Mon Sep 17 00:00:00 2001 From: Mike Ash Date: Thu, 30 Jul 2020 12:13:20 -0400 Subject: [PATCH] Add a new ConcurrentReadableHashMap type. Switch the protocol conformance cache to use it. ConcurrentReadableHashMap is lock-free for readers, with writers using a lock to ensure mutual exclusion amongst each other. The intent is to eventually replace all uses ConcurrentMap with ConcurrentReadableHashMap. ConcurrentReadableHashMap provides for relatively quick lookups by using a hash table. Rearders perform an atomic increment/decrement in order to inform writers that there are active readers. The design attempts to minimize wasted memory by storing the actual elements out-of-line, and having the table store indices into a separate array of elements. The protocol conformance cache now uses ConcurrentReadableHashMap, which provides faster lookups and less memory use than the previous ConcurrentMap implementation. The previous implementation caches ProtocolConformanceDescriptors and extracts the WitnessTable after the cache lookup. The new implementation directly caches the WitnessTable, removing an extra step (potentially a quite slow one) from the fast path. The previous implementation used a generational scheme to detect when negative cache entries became obsolete due to new dynamic libraries being loaded, and update them in place. The new implementation just clears the entire cache when libraries are loaded, greatly simplifying the code and saving the memory needed to track the current generation in each negative cache entry. This means we need to re-cache all requested conformances after loading a dynamic library, but loading libraries at runtime is rare and slow anyway. rdar://problem/67268325 --- include/swift/Reflection/ReflectionContext.h | 51 ++- include/swift/Reflection/RuntimeInternals.h | 14 + include/swift/Runtime/Concurrent.h | 387 +++++++++++++++++- .../public/runtime/CompatibilityOverride.def | 7 - stdlib/public/runtime/ProtocolConformance.cpp | 306 ++++---------- .../Compatibility50/CompatibilityOverride.def | 226 ++++++++++ .../Compatibility50/CompatibilityOverride.h | 61 +++ .../toolchain/Compatibility50/Overrides.cpp | 4 +- .../Compatibility51/CompatibilityOverride.def | 226 ++++++++++ .../Compatibility51/CompatibilityOverride.h | 61 +++ .../toolchain/Compatibility51/Overrides.cpp | 4 +- .../Sources/swift-inspect/Inspector.swift | 24 +- unittests/runtime/CompatibilityOverride.cpp | 5 - unittests/runtime/Concurrent.cpp | 345 ++++++++++++++++ 14 files changed, 1483 insertions(+), 238 deletions(-) create mode 100644 stdlib/toolchain/Compatibility50/CompatibilityOverride.def create mode 100644 stdlib/toolchain/Compatibility50/CompatibilityOverride.h create mode 100644 stdlib/toolchain/Compatibility51/CompatibilityOverride.def create mode 100644 stdlib/toolchain/Compatibility51/CompatibilityOverride.h diff --git a/include/swift/Reflection/ReflectionContext.h b/include/swift/Reflection/ReflectionContext.h index 0ff6c8b44436f..6fc3df0882f5f 100644 --- a/include/swift/Reflection/ReflectionContext.h +++ b/include/swift/Reflection/ReflectionContext.h @@ -879,7 +879,8 @@ class ReflectionContext std::function Call) { if (!NodePtr) return; - auto NodeBytes = getReader().readBytes(RemoteAddress(NodePtr), sizeof(Node)); + auto NodeBytes = getReader().readBytes(RemoteAddress(NodePtr), + sizeof(ConformanceNode)); auto NodeData = reinterpret_cast *>(NodeBytes.get()); if (!NodeData) @@ -889,6 +890,33 @@ class ReflectionContext iterateConformanceTree(NodeData->Right, Call); } + void IterateConformanceTable( + RemoteAddress ConformancesPtr, + std::function Call) { + auto MapBytes = getReader().readBytes(RemoteAddress(ConformancesPtr), + sizeof(ConcurrentHashMap)); + auto MapData = + reinterpret_cast *>(MapBytes.get()); + if (!MapData) + return; + + auto Count = MapData->ElementCount; + auto Size = Count * sizeof(ConformanceCacheEntry); + + auto ElementsBytes = + getReader().readBytes(RemoteAddress(MapData->Elements), Size); + auto ElementsData = + reinterpret_cast *>( + ElementsBytes.get()); + if (!ElementsData) + return; + + for (StoredSize i = 0; i < Count; i++) { + auto &Element = ElementsData[i]; + Call(Element.Type, Element.Proto); + } + } + /// Iterate the protocol conformance cache in the target process, calling Call /// with the type and protocol of each conformance. Returns None on success, /// and a string describing the error on failure. @@ -908,7 +936,26 @@ class ReflectionContext auto Root = getReader().readPointer(ConformancesAddr->getResolvedAddress(), sizeof(StoredPointer)); - iterateConformanceTree(Root->getResolvedAddress().getAddressData(), Call); + auto ReaderCount = Root->getResolvedAddress().getAddressData(); + + // ReaderCount will be the root pointer if the conformance cache is a + // ConcurrentMap. It's very unlikely that there would ever be more readers + // than the least valid pointer value, so compare with that to distinguish. + // TODO: once the old conformance cache is gone for good, remove that code. + uint64_t LeastValidPointerValue; + if (!getReader().queryDataLayout( + DataLayoutQueryType::DLQ_GetLeastValidPointerValue, nullptr, + &LeastValidPointerValue)) { + return std::string("unable to query least valid pointer value"); + } + + if (ReaderCount < LeastValidPointerValue) + IterateConformanceTable(ConformancesAddr->getResolvedAddress(), Call); + else { + // The old code has the root address at this location. + auto RootAddr = ReaderCount; + iterateConformanceTree(RootAddr, Call); + } return llvm::None; } diff --git a/include/swift/Reflection/RuntimeInternals.h b/include/swift/Reflection/RuntimeInternals.h index 11b6027242aaf..30ec8827c8515 100644 --- a/include/swift/Reflection/RuntimeInternals.h +++ b/include/swift/Reflection/RuntimeInternals.h @@ -46,6 +46,20 @@ template struct MetadataCacheNode { typename Runtime::StoredPointer Right; }; +template struct ConcurrentHashMap { + typename Runtime::StoredSize ReaderCount; + typename Runtime::StoredSize ElementCount; + typename Runtime::StoredPointer Elements; + typename Runtime::StoredPointer Indices; + // We'll ignore the remaining fields for now.... +}; + +template struct ConformanceCacheEntry { + typename Runtime::StoredPointer Type; + typename Runtime::StoredPointer Proto; + typename Runtime::StoredPointer Witness; +}; + } // end namespace reflection } // end namespace swift diff --git a/include/swift/Runtime/Concurrent.h b/include/swift/Runtime/Concurrent.h index 7e4ed718b131b..17339f6503533 100644 --- a/include/swift/Runtime/Concurrent.h +++ b/include/swift/Runtime/Concurrent.h @@ -17,6 +17,7 @@ #include #include #include +#include "llvm/ADT/Hashing.h" #include "llvm/Support/Allocator.h" #include "Atomic.h" #include "Debug.h" @@ -26,6 +27,10 @@ #include #endif +#if defined(__APPLE__) && defined(__MACH__) +#include +#endif + namespace swift { /// This is a node in a concurrent linked list. @@ -488,8 +493,8 @@ template struct ConcurrentReadableArray { const ElemTy *end() { return Start + Count; } size_t count() { return Count; } }; - - // This type cannot be safely copied, moved, or deleted. + + // This type cannot be safely copied or moved. ConcurrentReadableArray(const ConcurrentReadableArray &) = delete; ConcurrentReadableArray(ConcurrentReadableArray &&) = delete; ConcurrentReadableArray &operator=(const ConcurrentReadableArray &) = delete; @@ -527,7 +532,7 @@ template struct ConcurrentReadableArray { if (ReaderCount.load(std::memory_order_acquire) == 0) deallocateFreeList(); } - + Snapshot snapshot() { incrementReaders(); auto *storage = Elements.load(SWIFT_MEMORY_ORDER_CONSUME); @@ -541,6 +546,382 @@ template struct ConcurrentReadableArray { } }; +using llvm::hash_value; + +/// A hash table that can be queried without taking any locks. Writes are still +/// locked and serialized, but only with respect to other locks. Writers can add +/// elements and clear the table, but they cannot remove individual elements. +/// Readers work by taking a snapshot of the table and then querying that +/// snapshot. +/// +/// The basic structure of the table consists of two arrays. Elements are stored +/// in a contiguous array, with new elements appended to the end. The second +/// array is the actual hash table, and it contains indices into the elements +/// array. This scheme cuts down on wasted space when the elements are larger +/// than a few bytes: instead of wasting `(1 - loadFactor) * sizeof(element)` +/// bytes on unused space in the hash table, we only waste `(1 - loadFactor) * +/// sizeof(index)`. This scheme also avoids readers seeing partially constructed +/// elements. +/// +/// Reader/writer synchronization for new elements is handled by keeping an +/// element count which is only incremented when the element has been fully +/// constructed. A reader which sees an index beyond its view of the current +/// count will ignore it and treat that as if there was no entry. +/// +/// Reader/writer synchronization for resizing the arrays is handled by tracking +/// the current number of active readers. When resizing, the new array is +/// allocated, the data copied, and then the old array is placed in a free list. +/// The free list is only deallocated if there are no readers, otherwise freeing +/// is deferred. +/// +/// Reader/writer synchronization for clearing the table is a combination of the +/// above. By keeping the old arrays around until all readers are finished, we +/// ensure that readers which started before the clear see valid (pre-clear) +/// data. Readers which see any array as empty will produce no results, thus +/// providing valid post-clear data. +template struct ConcurrentReadableHashMap { + // We use memcpy and don't call destructors. Make sure the elements will put + // up with this. + static_assert(std::is_trivially_copyable::value, + "Elements must be trivially copyable."); + static_assert(std::is_trivially_destructible::value, + "Elements must not have destructors (they won't be called)."); + +private: + /// The type of the elements of the indices array. TODO: use one or two byte + /// indices for smaller tables to save more memory. + using Index = unsigned; + + /// The reciprocal of the load factor at which we expand the table. A value of + /// 4 means that we resize at 1/4 = 75% load factor. + static const size_t ResizeProportion = 4; + + /// Get the "good size" for a given allocation size. When available, this + /// rounds up to the next allocation quantum by calling `malloc_good_size`. + /// Otherwise, just return the passed-in size, which is always valid even if + /// not necessarily optimal. + size_t goodSize(size_t size) { +#if defined(__APPLE__) && defined(__MACH__) + return malloc_good_size(size); +#else + return size; +#endif + } + + /// A private class representing the storage of the indices. In order to + /// ensure that readers can get a consistent view of the indices with a single + /// atomic read, we store the size of the indices array inline, as the first + /// element in the array. + /// + /// We want the number of indices to be a power of two so that we can use a + /// bitwise AND to convert a hash code to an index. We want the entire array + /// to be a power of two in size to be friendly to the allocator, but the size + /// is stored inline. We work around this contradiction by considering the + /// first index to always be occupied with a value that never matches any key. + struct IndexStorage { + std::atomic Mask; + + static IndexStorage *allocate(size_t capacity) { + assert((capacity & (capacity - 1)) == 0 && + "Capacity must be a power of 2"); + auto *ptr = + reinterpret_cast(calloc(capacity, sizeof(Mask))); + if (!ptr) + swift::crash("Could not allocate memory."); + ptr->Mask.store(capacity - 1, std::memory_order_relaxed); + return ptr; + } + + std::atomic &at(size_t i) { return (&Mask)[i]; } + }; + + /// The number of readers currently active, equal to the number of snapshot + /// objects currently alive. + std::atomic ReaderCount; + + /// The number of elements in the elements array. + std::atomic ElementCount; + + /// The array of elements. + std::atomic Elements; + + /// The array of indices. + std::atomic Indices; + + /// The writer lock, which must be taken before any mutation of the table. + Mutex WriterLock; + + /// The maximum number of elements that the current elements array can hold. + size_t ElementCapacity; + + /// The list of element arrays to be freed once no readers are active. + std::vector ElementFreeList; + + /// The list of index arrays to be freed once no readers are active. + std::vector IndicesFreeList; + + void incrementReaders() { + ReaderCount.fetch_add(1, std::memory_order_acquire); + } + + void decrementReaders() { + ReaderCount.fetch_sub(1, std::memory_order_release); + } + + /// Free all the arrays in the free lists. + void deallocateFreeList() { + for (auto *storage : ElementFreeList) + free(storage); + ElementFreeList.clear(); + ElementFreeList.shrink_to_fit(); + + for (auto *indices : IndicesFreeList) + free(indices); + IndicesFreeList.clear(); + IndicesFreeList.shrink_to_fit(); + } + + /// Free all the arrays in the free lists if there are no active readers. If + /// there are active readers, do nothing. + void deallocateFreeListIfSafe() { + if (ReaderCount.load(std::memory_order_relaxed) == 0) + deallocateFreeList(); + } + + /// Grow the elements array, adding the old array to the free list and + /// returning the new array with all existing elements copied into it. + ElemTy *resize(ElemTy *elements, size_t elementCount) { + // Grow capacity by 25%, making sure we grow by at least 1. + size_t newCapacity = + std::max(elementCount + (elementCount >> 2), elementCount + 1); + size_t newSize = newCapacity * sizeof(ElemTy); + + newSize = goodSize(newSize); + newCapacity = newSize / sizeof(ElemTy); + + ElemTy *newElements = static_cast(malloc(newSize)); + if (elements) { + memcpy(newElements, elements, elementCount * sizeof(ElemTy)); + ElementFreeList.push_back(elements); + } + + ElementCapacity = newCapacity; + Elements.store(newElements, std::memory_order_release); + return newElements; + } + + /// Grow the indices array, adding the old array to the free list and + /// returning the new array with all existing indices copied into it. This + /// operation performs a rehash, so that the indices are in the correct + /// location in the new array. + IndexStorage *resize(IndexStorage *indices, Index indicesMask, + ElemTy *elements) { + // Mask is size - 1. Double the size. Start with 4 (fits into 16-byte malloc + // bucket). + size_t newCount = indices ? 2 * (indicesMask + 1) : 4; + size_t newMask = newCount - 1; + + IndexStorage *newIndices = IndexStorage::allocate(newCount); + + for (size_t i = 1; i <= indicesMask; i++) { + Index index = indices->at(i).load(std::memory_order_relaxed); + if (index == 0) + continue; + + auto *element = &elements[index - 1]; + auto hash = hash_value(*element); + + size_t newI = hash & newMask; + while (newIndices->at(newI) != 0) + newI = (newI + 1) & newMask; + newIndices->at(newI).store(index, std::memory_order_relaxed); + } + + Indices.store(newIndices, std::memory_order_release); + + IndicesFreeList.push_back(indices); + + return newIndices; + } + + /// Search for the given key within the given indices and elements arrays. If + /// an entry already exists for that key, return a pointer to the element. If + /// no entry exists, return a pointer to the location in the indices array + /// where the index of the new element would be stored. + template + static std::pair *> + find(const KeyTy &key, IndexStorage *indices, size_t elementCount, + ElemTy *elements) { + if (!indices) + return {nullptr, nullptr}; + auto hash = hash_value(key); + auto indicesMask = indices->Mask.load(std::memory_order_relaxed); + + auto i = hash & indicesMask; + while (true) { + // Index 0 is used for the mask and is not actually an index. + if (i == 0) + i++; + + auto *indexPtr = &indices->at(i); + auto index = indexPtr->load(std::memory_order_acquire); + // Element indices are 1-based, 0 means no entry. + if (index == 0) + return {nullptr, indexPtr}; + if (index - 1 < elementCount) { + auto *candidate = &elements[index - 1]; + if (candidate->matchesKey(key)) + return {candidate, nullptr}; + } + + i = (i + 1) & indicesMask; + } + } + +public: + // This type cannot be safely copied or moved. + ConcurrentReadableHashMap(const ConcurrentReadableHashMap &) = delete; + ConcurrentReadableHashMap(ConcurrentReadableHashMap &&) = delete; + ConcurrentReadableHashMap & + operator=(const ConcurrentReadableHashMap &) = delete; + + ConcurrentReadableHashMap() + : ReaderCount(0), ElementCount(0), Elements(nullptr), Indices(nullptr), + ElementCapacity(0) {} + + ~ConcurrentReadableHashMap() { + assert(ReaderCount.load(std::memory_order_acquire) == 0 && + "deallocating ConcurrentReadableHashMap with outstanding snapshots"); + deallocateFreeList(); + } + + /// Readers take a snapshot of the hash map, then work with the snapshot. + class Snapshot { + ConcurrentReadableHashMap *Map; + IndexStorage *Indices; + ElemTy *Elements; + size_t ElementCount; + + public: + Snapshot(ConcurrentReadableHashMap *map, IndexStorage *indices, + ElemTy *elements, size_t elementCount) + : Map(map), Indices(indices), Elements(elements), + ElementCount(elementCount) {} + + Snapshot(const Snapshot &other) + : Map(other.Map), Indices(other.Indices), Elements(other.Elements), + ElementCount(other.ElementCount) { + Map->incrementReaders(); + } + + ~Snapshot() { Map->decrementReaders(); } + + /// Search for an element matching the given key. Returns a pointer to the + /// found element, or nullptr if no matching element exists. + template const ElemTy *find(const KeyTy &key) { + if (!Indices || !ElementCount || !Elements) + return nullptr; + return ConcurrentReadableHashMap::find(key, Indices, ElementCount, + Elements) + .first; + } + }; + + /// Take a snapshot of the current state of the hash map. + Snapshot snapshot() { + incrementReaders(); + auto *indices = Indices.load(SWIFT_MEMORY_ORDER_CONSUME); + auto elementCount = ElementCount.load(std::memory_order_acquire); + auto *elements = Elements.load(std::memory_order_acquire); + + return Snapshot(this, indices, elements, elementCount); + } + + /// Get an element by key, or insert a new element for that key if one is not + /// already present. Invoke `call` with the pointer to the element. BEWARE: + /// `call` is invoked with the internal writer lock held, keep work to a + /// minimum. + /// + /// `call` is passed the following parameters: + /// - `element`: the pointer to the element corresponding to `key` + /// - `created`: true if the element is newly created, false if it already + /// exists + /// `call` returns a `bool`. When `created` is `true`, the return values mean: + /// - `true` the new entry is to be kept + /// - `false` indicates that the new entry is discarded + /// If the new entry is kept, then the new element MUST be initialized, and + /// have a hash value that matches the hash value of `key`. + /// + /// The return value is ignored when `created` is `false`. + template + void getOrInsert(KeyTy key, const Call &call) { + ScopedLock guard(WriterLock); + + auto *indices = Indices.load(std::memory_order_relaxed); + if (!indices) + indices = resize(indices, 0, nullptr); + + auto indicesMask = indices->Mask.load(std::memory_order_relaxed); + auto elementCount = ElementCount.load(std::memory_order_relaxed); + auto *elements = Elements.load(std::memory_order_relaxed); + + auto found = find(key, indices, elementCount, elements); + if (found.first) { + call(found.first, false); + deallocateFreeListIfSafe(); + return; + } + + // The actual capacity is indicesMask + 1. The number of slots in use is + // elementCount + 1, since the mask also takes a slot. + auto emptyCount = (indicesMask + 1) - (elementCount + 1); + auto proportion = (indicesMask + 1) / emptyCount; + if (proportion >= ResizeProportion) { + indices = resize(indices, indicesMask, elements); + found = find(key, indices, elementCount, elements); + assert(!found.first && "Shouldn't suddenly find the key after rehashing"); + } + + if (elementCount >= ElementCapacity) { + elements = resize(elements, elementCount); + } + auto *element = &elements[elementCount]; + + // Order matters: fill out the element, then update the count, + // then update the index. + bool keep = call(element, true); + if (keep) { + assert(hash_value(key) == hash_value(*element) && + "Element must have the same hash code as its key."); + ElementCount.store(elementCount + 1, std::memory_order_release); + found.second->store(elementCount + 1, std::memory_order_release); + } + + deallocateFreeListIfSafe(); + } + + /// Clear the hash table, freeing (when safe) all memory currently used for + /// indices and elements. + void clear() { + ScopedLock guard(WriterLock); + + auto *indices = Indices.load(std::memory_order_relaxed); + auto *elements = Elements.load(std::memory_order_relaxed); + + // Order doesn't matter here, snapshots will gracefully handle any field + // being NULL/0 while the others are not. + Indices.store(nullptr, std::memory_order_relaxed); + ElementCount.store(0, std::memory_order_relaxed); + Elements.store(nullptr, std::memory_order_relaxed); + ElementCapacity = 0; + + IndicesFreeList.push_back(indices); + ElementFreeList.push_back(elements); + + deallocateFreeListIfSafe(); + } +}; + } // end namespace swift #endif // SWIFT_RUNTIME_CONCURRENTUTILS_H diff --git a/stdlib/public/runtime/CompatibilityOverride.def b/stdlib/public/runtime/CompatibilityOverride.def index 06ee84685dc35..92e32430251cd 100644 --- a/stdlib/public/runtime/CompatibilityOverride.def +++ b/stdlib/public/runtime/CompatibilityOverride.def @@ -134,13 +134,6 @@ OVERRIDE_PROTOCOLCONFORMANCE(conformsToProtocol, const WitnessTable *, , , swift const ProtocolDescriptor *protocol), (type, protocol)) -OVERRIDE_PROTOCOLCONFORMANCE(conformsToSwiftProtocol, - const ProtocolConformanceDescriptor *, , , swift::, - (const Metadata * const type, - const ProtocolDescriptor *protocol, - StringRef moduleName), - (type, protocol, moduleName)) - OVERRIDE_KEYPATH(getKeyPath, const HeapObject *, , , swift::, (const void *pattern, const void *arguments), (pattern, arguments)) diff --git a/stdlib/public/runtime/ProtocolConformance.cpp b/stdlib/public/runtime/ProtocolConformance.cpp index 1a80b2e08e039..e373d2a50848e 100644 --- a/stdlib/public/runtime/ProtocolConformance.cpp +++ b/stdlib/public/runtime/ProtocolConformance.cpp @@ -198,31 +198,27 @@ namespace { : Type(type), Proto(proto) { assert(type); } + + friend llvm::hash_code hash_value(const ConformanceCacheKey &key) { + return llvm::hash_combine(key.Type, key.Proto); + } }; struct ConformanceCacheEntry { private: - const Metadata *Type; - const ProtocolDescriptor *Proto; - std::atomic Description; - std::atomic FailureGeneration; + ConformanceCacheKey Key; + const WitnessTable *Witness; public: - ConformanceCacheEntry(ConformanceCacheKey key, - const ProtocolConformanceDescriptor *description, - size_t failureGeneration) - : Type(key.Type), Proto(key.Proto), Description(description), - FailureGeneration(failureGeneration) { + ConformanceCacheEntry(ConformanceCacheKey key, const WitnessTable *witness) + : Key(key), Witness(witness) {} + + bool matchesKey(const ConformanceCacheKey &key) const { + return Key.Type == key.Type && Key.Proto == key.Proto; } - int compareWithKey(const ConformanceCacheKey &key) const { - if (key.Type != Type) { - return (uintptr_t(key.Type) < uintptr_t(Type) ? -1 : 1); - } else if (key.Proto != Proto) { - return (uintptr_t(key.Proto) < uintptr_t(Proto) ? -1 : 1); - } else { - return 0; - } + friend llvm::hash_code hash_value(const ConformanceCacheEntry &entry) { + return hash_value(entry.Key); } template @@ -230,69 +226,54 @@ namespace { return 0; } - bool isSuccessful() const { - return Description.load(std::memory_order_relaxed) != nullptr; - } - - void makeSuccessful(const ProtocolConformanceDescriptor *description) { - Description.store(description, std::memory_order_release); - } - - void updateFailureGeneration(size_t failureGeneration) { - assert(!isSuccessful()); - FailureGeneration.store(failureGeneration, std::memory_order_relaxed); - } - - /// Get the cached conformance descriptor, if successful. - const ProtocolConformanceDescriptor *getDescription() const { - assert(isSuccessful()); - return Description.load(std::memory_order_acquire); - } - - /// Get the generation in which this lookup failed. - size_t getFailureGeneration() const { - assert(!isSuccessful()); - return FailureGeneration.load(std::memory_order_relaxed); + /// Get the cached witness table, or null if we cached failure. + const WitnessTable *getWitnessTable() const { + return Witness; } }; } // end anonymous namespace // Conformance Cache. struct ConformanceState { - ConcurrentMap Cache; + ConcurrentReadableHashMap Cache; ConcurrentReadableArray SectionsToScan; ConformanceState() { initializeProtocolConformanceLookup(); } - void cacheSuccess(const Metadata *type, const ProtocolDescriptor *proto, - const ProtocolConformanceDescriptor *description) { - auto result = Cache.getOrInsert(ConformanceCacheKey(type, proto), - description, 0); - - // If the entry was already present, we may need to update it. - if (!result.second) { - result.first->makeSuccessful(description); - } - } - - void cacheFailure(const Metadata *type, const ProtocolDescriptor *proto, - size_t failureGeneration) { - auto result = - Cache.getOrInsert(ConformanceCacheKey(type, proto), - (const ProtocolConformanceDescriptor *) nullptr, - failureGeneration); - - // If the entry was already present, we may need to update it. - if (!result.second) { - result.first->updateFailureGeneration(failureGeneration); - } - } - - ConformanceCacheEntry *findCached(const Metadata *type, - const ProtocolDescriptor *proto) { - return Cache.find(ConformanceCacheKey(type, proto)); + void cacheResult(const Metadata *type, const ProtocolDescriptor *proto, + const WitnessTable *witness, size_t sectionsCount) { + Cache.getOrInsert(ConformanceCacheKey(type, proto), + [&](ConformanceCacheEntry *entry, bool created) { + // Create the entry if needed. If it already exists, + // we're done. + if (!created) + return false; + + // Check the current sections count against what was + // passed in. If a section count was passed in and they + // don't match, then this is not an authoritative entry + // and it may have been obsoleted, because the new + // sections could contain a conformance in a more + // specific type. + // + // If they DO match, then we can safely add. Another + // thread might be adding new sections at this point, + // but we will not race with them. That other thread + // will add the new sections, then clear the cache. When + // it clears the cache, it will block waiting for this + // code to complete and relinquish Cache's writer lock. + // If we cache a stale entry, it will be immediately + // cleared. + if (sectionsCount > 0 && + SectionsToScan.snapshot().count() != sectionsCount) + return false; // abandon the new entry + + new (entry) ConformanceCacheEntry( + ConformanceCacheKey(type, proto), witness); + return true; // keep the new entry + }); } #ifndef NDEBUG @@ -323,6 +304,10 @@ _registerProtocolConformances(ConformanceState &C, const ProtocolConformanceRecord *begin, const ProtocolConformanceRecord *end) { C.SectionsToScan.push_back(ConformanceSection{begin, end}); + + // Blow away the conformances cache to get rid of any negative entries that + // may now be obsolete. + C.Cache.clear(); } void swift::addImageProtocolConformanceBlockCallbackUnsafe( @@ -357,98 +342,29 @@ swift::swift_registerProtocolConformances(const ProtocolConformanceRecord *begin _registerProtocolConformances(C, begin, end); } - -struct ConformanceCacheResult { - // true if description is an authoritative result as-is. - // false if more searching is required (for example, because a cached - // failure was returned in failureEntry but it is out-of-date. - bool isAuthoritative; - - // The matching conformance descriptor, or null if no cached conformance - // was found. - const ProtocolConformanceDescriptor *description; - - // If the search fails, this may be the negative cache entry for the - // queried type itself. This entry may be null or out-of-date. - ConformanceCacheEntry *failureEntry; - - static ConformanceCacheResult - cachedSuccess(const ProtocolConformanceDescriptor *description) { - return ConformanceCacheResult { true, description, nullptr }; - } - - static ConformanceCacheResult - cachedFailure(ConformanceCacheEntry *entry, bool auth) { - return ConformanceCacheResult { auth, nullptr, entry }; - } - - static ConformanceCacheResult - cacheMiss() { - return ConformanceCacheResult { false, nullptr, nullptr }; - } -}; - /// Search for a conformance descriptor in the ConformanceCache. -static -ConformanceCacheResult +/// First element of the return value is `true` if the result is authoritative +/// i.e. the result is for the type itself and not a superclass. If `false` +/// then we cached a conformance on a superclass, but that may be overridden. +/// A return value of `{ false, nullptr }` indicates nothing was cached. +static std::pair searchInConformanceCache(const Metadata *type, const ProtocolDescriptor *protocol) { auto &C = Conformances.get(); auto origType = type; - ConformanceCacheEntry *failureEntry = nullptr; - -recur: - { - // Try the specific type first. - if (auto *Value = C.findCached(type, protocol)) { - if (Value->isSuccessful()) { - // Found a conformance on the type or some superclass. Return it. - return ConformanceCacheResult::cachedSuccess(Value->getDescription()); - } - - // Found a negative cache entry. - - bool isAuthoritative; - if (type == origType) { - // This negative cache entry is for the original query type. - // Remember it so it can be returned later. - failureEntry = Value; - // An up-to-date entry for the original type is authoritative. - isAuthoritative = true; - } else { - // An up-to-date cached failure for a superclass of the type is not - // authoritative: there may be a still-undiscovered conformance - // for the original query type. - isAuthoritative = false; - } + auto snapshot = C.Cache.snapshot(); - // Check if the negative cache entry is up-to-date. - if (Value->getFailureGeneration() == C.SectionsToScan.snapshot().count()) { - // Negative cache entry is up-to-date. Return failure along with - // the original query type's own cache entry, if we found one. - // (That entry may be out of date but the caller still has use for it.) - return ConformanceCacheResult::cachedFailure(failureEntry, - isAuthoritative); - } - - // Negative cache entry is out-of-date. - // Continue searching for a better result. + while (type) { + if (auto *Value = snapshot.find(ConformanceCacheKey(type, protocol))) { + return {type == origType, Value->getWitnessTable()}; } - } - // If there is a superclass, look there. - if (auto superclass = _swift_class_getSuperclass(type)) { - type = superclass; - goto recur; + // If there is a superclass, look there. + type = _swift_class_getSuperclass(type); } - // We did not find an up-to-date cache entry. - // If we found an out-of-date entry for the original query type then - // return it (non-authoritatively). Otherwise return a cache miss. - if (failureEntry) - return ConformanceCacheResult::cachedFailure(failureEntry, false); - else - return ConformanceCacheResult::cacheMiss(); + // We did not find a cache entry. + return {false, nullptr}; } namespace { @@ -477,14 +393,6 @@ namespace { } } - /// Retrieve the conforming type as metadata, or NULL if the candidate's - /// conforming type is described in another way (e.g., a nominal type - /// descriptor). - const Metadata *getConformingTypeAsMetadata() const { - return candidateIsMetadata ? static_cast(candidate) - : nullptr; - } - const ContextDescriptor * getContextDescriptor(const Metadata *conformingType) const { const auto *description = conformingType->getTypeContextDescriptor(); @@ -544,40 +452,21 @@ namespace { }; } -static const ProtocolConformanceDescriptor * -swift_conformsToSwiftProtocolImpl(const Metadata * const type, - const ProtocolDescriptor *protocol, - StringRef module) { +static const WitnessTable * +swift_conformsToProtocolImpl(const Metadata *const type, + const ProtocolDescriptor *protocol) { auto &C = Conformances.get(); - // See if we have a cached conformance. The ConcurrentMap data structure - // allows us to insert and search the map concurrently without locking. - auto FoundConformance = searchInConformanceCache(type, protocol); - // If the result (positive or negative) is authoritative, return it. - if (FoundConformance.isAuthoritative) - return FoundConformance.description; + // See if we have an authoritative cached conformance. The + // ConcurrentReadableHashMap data structure allows us to search the map + // concurrently without locking. + auto found = searchInConformanceCache(type, protocol); + if (found.first) + return found.second; - auto failureEntry = FoundConformance.failureEntry; - - // Prepare to scan conformance records. + // Scan conformance records. auto snapshot = C.SectionsToScan.snapshot(); - - // Scan only sections that were not scanned yet. - // If we found an out-of-date negative cache entry, - // we need not to re-scan the sections that it covers. - auto startIndex = failureEntry ? failureEntry->getFailureGeneration() : 0; - auto endIndex = snapshot.count(); - - // If there are no unscanned sections outstanding - // then we can cache failure and give up now. - if (startIndex == endIndex) { - C.cacheFailure(type, protocol, snapshot.count()); - return nullptr; - } - - // Really scan conformance records. - for (size_t i = startIndex; i < endIndex; ++i) { - auto §ion = snapshot.Start[i]; + for (auto §ion : snapshot) { // Eagerly pull records for nondependent witnesses into our cache. for (const auto &record : section) { auto &descriptor = *record.get(); @@ -586,40 +475,25 @@ swift_conformsToSwiftProtocolImpl(const Metadata * const type, if (descriptor.getProtocol() != protocol) continue; - // If there's a matching type, record the positive result. + // If there's a matching type, record the positive result and return it. + // The matching type is exact, so they can't go stale, and we should + // always cache them. ConformanceCandidate candidate(descriptor); - if (candidate.getMatchingType(type)) { - const Metadata *matchingType = candidate.getConformingTypeAsMetadata(); - if (!matchingType) - matchingType = type; - - C.cacheSuccess(matchingType, protocol, &descriptor); + if (auto *matchingType = candidate.getMatchingType(type)) { + auto witness = descriptor.getWitnessTable(matchingType); + C.cacheResult(matchingType, protocol, witness, /*always cache*/ 0); } } } - - // Conformance scan is complete. - - // Search the cache once more, and this time update the cache if necessary. - FoundConformance = searchInConformanceCache(type, protocol); - if (FoundConformance.isAuthoritative) { - return FoundConformance.description; - } else { - C.cacheFailure(type, protocol, snapshot.count()); - return nullptr; - } -} -static const WitnessTable * -swift_conformsToProtocolImpl(const Metadata * const type, - const ProtocolDescriptor *protocol) { - auto description = - swift_conformsToSwiftProtocol(type, protocol, StringRef()); - if (!description) - return nullptr; + // Try the search again to look for the most specific cached conformance. + found = searchInConformanceCache(type, protocol); + + // If it's not authoritative, then add an authoritative entry for this type. + if (!found.first) + C.cacheResult(type, protocol, found.second, snapshot.count()); - return description->getWitnessTable( - findConformingSuperclass(type, description)); + return found.second; } const ContextDescriptor * diff --git a/stdlib/toolchain/Compatibility50/CompatibilityOverride.def b/stdlib/toolchain/Compatibility50/CompatibilityOverride.def new file mode 100644 index 0000000000000..06ee84685dc35 --- /dev/null +++ b/stdlib/toolchain/Compatibility50/CompatibilityOverride.def @@ -0,0 +1,226 @@ +//===--- CompatibilityOverrides.def - Compatibility Overrides Database -*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// This file defines x-macros used for metaprogramming with the set of +// compatibility override functions. +// +//===----------------------------------------------------------------------===// + +/// #define OVERRIDE(name, ret, attrs, namespace, typedArgs, namedArgs) +/// Provides information about an overridable function. +/// - name is the name of the function, without any leading swift_ or +/// namespace. +/// - ret is the return type of the function. +/// - attrs is the attributes, if any, applied to the function definition. +/// - namespace is the namespace, if any, the function is in, including a +/// trailing :: +/// - typedArgs is the argument list, including types, surrounded by +/// parentheses +/// - namedArgs is the list of argument names, with no types, surrounded by +/// parentheses +/// +/// The entries are organized by group. A user may define OVERRIDE to get all +/// entries, or define one or more of OVERRIDE_METADATALOOKUP, OVERRIDE_CASTING, +/// OVERRIDE_OBJC, OVERRIDE_FOREIGN, OVERRIDE_PROTOCOLCONFORMANCE, +/// and OVERRIDE_KEYPATH to get only those entries. + +// NOTE: this file is used to build the definition of OverrideSection in +// CompatibilityOverride.cpp, which is part of the ABI. Do not move or remove entries +// in this file after ABI stability. Additional entries can be added to the end. + +#ifdef OVERRIDE +# define OVERRIDE_METADATALOOKUP OVERRIDE +# define OVERRIDE_CASTING OVERRIDE +# define OVERRIDE_OBJC OVERRIDE +# define OVERRIDE_FOREIGN OVERRIDE +# define OVERRIDE_PROTOCOLCONFORMANCE OVERRIDE +# define OVERRIDE_KEYPATH OVERRIDE +# define OVERRIDE_WITNESSTABLE OVERRIDE +#else +# ifndef OVERRIDE_METADATALOOKUP +# define OVERRIDE_METADATALOOKUP(...) +# endif +# ifndef OVERRIDE_CASTING +# define OVERRIDE_CASTING(...) +# endif +# ifndef OVERRIDE_OBJC +# define OVERRIDE_OBJC(...) +# endif +# ifndef OVERRIDE_FOREIGN +# define OVERRIDE_FOREIGN(...) +# endif +# ifndef OVERRIDE_PROTOCOLCONFORMANCE +# define OVERRIDE_PROTOCOLCONFORMANCE(...) +# endif +# ifndef OVERRIDE_KEYPATH +# define OVERRIDE_KEYPATH(...) +# endif +# ifndef OVERRIDE_WITNESSTABLE +# define OVERRIDE_WITNESSTABLE(...) +# endif +#endif + +OVERRIDE_CASTING(dynamicCast, bool, , , swift::, + (OpaqueValue *dest, OpaqueValue *src, + const Metadata *srcType, + const Metadata *targetType, + DynamicCastFlags flags), + (dest, src, srcType, targetType, flags)) + + +OVERRIDE_CASTING(dynamicCastClass, const void *, , , swift::, + (const void *object, + const ClassMetadata *targetType), + (object, targetType)) + + +OVERRIDE_CASTING(dynamicCastClassUnconditional, const void *, , , swift::, + (const void *object, + const ClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (object, targetType, file, line, column)) + + + +OVERRIDE_CASTING(dynamicCastUnknownClass, const void *, , , swift::, + (const void *object, const Metadata *targetType), + (object, targetType)) + + +OVERRIDE_CASTING(dynamicCastUnknownClassUnconditional, const void *, , , swift::, + (const void *object, const Metadata *targetType, + const char *file, unsigned line, unsigned column), + (object, targetType, file, line, column)) + + +OVERRIDE_CASTING(dynamicCastMetatype, const Metadata *, , , swift::, + (const Metadata *sourceType, + const Metadata *targetType), + (sourceType, targetType)) + + +OVERRIDE_CASTING(dynamicCastMetatypeUnconditional, const Metadata *, , , swift::, + (const Metadata *sourceType, + const Metadata *targetType, + const char *file, unsigned line, unsigned column), + (sourceType, targetType, file, line, column)) + + +OVERRIDE_FOREIGN(dynamicCastForeignClassMetatype, const ClassMetadata *, , , swift::, + (const ClassMetadata *sourceType, + const ClassMetadata *targetType), + (sourceType, targetType)) + + +OVERRIDE_FOREIGN(dynamicCastForeignClassMetatypeUnconditional, + const ClassMetadata *, , , swift::, + (const ClassMetadata *sourceType, + const ClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (sourceType, targetType, file, line, column)) + + +OVERRIDE_PROTOCOLCONFORMANCE(conformsToProtocol, const WitnessTable *, , , swift::, + (const Metadata * const type, + const ProtocolDescriptor *protocol), + (type, protocol)) + +OVERRIDE_PROTOCOLCONFORMANCE(conformsToSwiftProtocol, + const ProtocolConformanceDescriptor *, , , swift::, + (const Metadata * const type, + const ProtocolDescriptor *protocol, + StringRef moduleName), + (type, protocol, moduleName)) + +OVERRIDE_KEYPATH(getKeyPath, const HeapObject *, , , swift::, + (const void *pattern, const void *arguments), + (pattern, arguments)) + +OVERRIDE_METADATALOOKUP(getTypeByMangledNode, TypeInfo, , SWIFT_CC(swift), swift::, + (MetadataRequest request, + Demangler &demangler, + Demangle::NodePointer node, + const void * const *arguments, + SubstGenericParameterFn substGenericParam, + SubstDependentWitnessTableFn substWitnessTable), + (request, demangler, node, arguments, substGenericParam, substWitnessTable)) +OVERRIDE_METADATALOOKUP(getTypeByMangledName, TypeInfo, , SWIFT_CC(swift), swift::, + (MetadataRequest request, + StringRef typeName, + const void * const *arguments, + SubstGenericParameterFn substGenericParam, + SubstDependentWitnessTableFn substWitnessTable), + (request, typeName, arguments, substGenericParam, substWitnessTable)) + +OVERRIDE_WITNESSTABLE(getAssociatedTypeWitnessSlow, MetadataResponse, + SWIFT_RUNTIME_STDLIB_INTERNAL, SWIFT_CC(swift), swift::, + (MetadataRequest request, WitnessTable *wtable, + const Metadata *conformingType, + const ProtocolRequirement *reqBase, + const ProtocolRequirement *assocType), + (request, wtable, conformingType, reqBase, assocType)) + +OVERRIDE_WITNESSTABLE(getAssociatedConformanceWitnessSlow, const WitnessTable *, + SWIFT_RUNTIME_STDLIB_INTERNAL, SWIFT_CC(swift), swift::, + (WitnessTable *wtable, const Metadata *conformingType, + const Metadata *assocType, + const ProtocolRequirement *reqBase, + const ProtocolRequirement *assocConformance), + (wtable, conformingType, assocType, reqBase, + assocConformance)) +#if SWIFT_OBJC_INTEROP + +OVERRIDE_OBJC(dynamicCastObjCClass, const void *, , , swift::, + (const void *object, + const ClassMetadata *targetType), + (object, targetType)) + + +OVERRIDE_OBJC(dynamicCastObjCClassUnconditional, const void *, , , swift::, + (const void *object, + const ClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (object, targetType, file, line, column)) + +OVERRIDE_OBJC(dynamicCastObjCClassMetatype, const ClassMetadata *, , , swift::, + (const ClassMetadata *sourceType, + const ClassMetadata *targetType), + (sourceType, targetType)) + + +OVERRIDE_OBJC(dynamicCastObjCClassMetatypeUnconditional, const ClassMetadata *, , , swift::, + (const ClassMetadata *sourceType, const ClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (sourceType, targetType, file, line, column)) + + +OVERRIDE_FOREIGN(dynamicCastForeignClass, const void *, , , swift::, + (const void *object, + const ForeignClassMetadata *targetType), + (object, targetType)) + + +OVERRIDE_FOREIGN(dynamicCastForeignClassUnconditional, const void *, , , swift::, + (const void *object, const ForeignClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (object, targetType, file, line, column)) + +#endif + +#undef OVERRIDE +#undef OVERRIDE_METADATALOOKUP +#undef OVERRIDE_CASTING +#undef OVERRIDE_OBJC +#undef OVERRIDE_FOREIGN +#undef OVERRIDE_PROTOCOLCONFORMANCE +#undef OVERRIDE_KEYPATH +#undef OVERRIDE_WITNESSTABLE diff --git a/stdlib/toolchain/Compatibility50/CompatibilityOverride.h b/stdlib/toolchain/Compatibility50/CompatibilityOverride.h new file mode 100644 index 0000000000000..e726e41958f50 --- /dev/null +++ b/stdlib/toolchain/Compatibility50/CompatibilityOverride.h @@ -0,0 +1,61 @@ +//===--- CompatibiltyOverride.h - Back-deploying compatibility fixes --*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Support back-deploying compatibility fixes for newer apps running on older runtimes. +// +//===----------------------------------------------------------------------===// + +#ifndef COMPATIBILITY_OVERRIDE_H +#define COMPATIBILITY_OVERRIDE_H + +#include "../../public/runtime/Private.h" +#include "swift/Runtime/Metadata.h" +#include "swift/Runtime/Once.h" +#include + +namespace swift { + +#define COMPATIBILITY_UNPAREN(...) __VA_ARGS__ + +#define OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ + ccAttrs typedef ret (*Original_ ## name) typedArgs; +#include "CompatibilityOverride.def" + +#define OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ + ccAttrs typedef ret (*Override_ ## name)(COMPATIBILITY_UNPAREN typedArgs, \ + Original_ ## name originalImpl); +#include "CompatibilityOverride.def" + +#define OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ + Override_ ## name getOverride_ ## name(); +#include "CompatibilityOverride.def" + + +/// Used to define an override point. The override point #defines the appropriate +/// OVERRIDE macro from CompatibilityOverride.def to this macro, then includes +/// the file to generate the override points. The original implementation of the +/// functionality must be available as swift_funcNameHereImpl. +#define COMPATIBILITY_OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ + attrs ccAttrs ret namespace swift_ ## name typedArgs { \ + static Override_ ## name Override; \ + static swift_once_t Predicate; \ + swift_once(&Predicate, [](void *) { \ + Override = getOverride_ ## name(); \ + }, nullptr); \ + if (Override != nullptr) \ + return Override(COMPATIBILITY_UNPAREN namedArgs, swift_ ## name ## Impl); \ + return swift_ ## name ## Impl namedArgs; \ + } + +} /* end namespace swift */ + +#endif /* COMPATIBILITY_OVERRIDE_H */ diff --git a/stdlib/toolchain/Compatibility50/Overrides.cpp b/stdlib/toolchain/Compatibility50/Overrides.cpp index bc6f88625256b..fcaccf3b0fb4d 100644 --- a/stdlib/toolchain/Compatibility50/Overrides.cpp +++ b/stdlib/toolchain/Compatibility50/Overrides.cpp @@ -16,7 +16,7 @@ #include "Overrides.h" #include "../Compatibility51/Overrides.h" -#include "../../public/runtime/CompatibilityOverride.h" +#include "CompatibilityOverride.h" #include #include @@ -28,7 +28,7 @@ struct OverrideSection { uintptr_t version; #define OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ Override_ ## name name; -#include "../../public/runtime/CompatibilityOverride.def" +#include "CompatibilityOverride.def" }; OverrideSection Swift50Overrides diff --git a/stdlib/toolchain/Compatibility51/CompatibilityOverride.def b/stdlib/toolchain/Compatibility51/CompatibilityOverride.def new file mode 100644 index 0000000000000..06ee84685dc35 --- /dev/null +++ b/stdlib/toolchain/Compatibility51/CompatibilityOverride.def @@ -0,0 +1,226 @@ +//===--- CompatibilityOverrides.def - Compatibility Overrides Database -*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// This file defines x-macros used for metaprogramming with the set of +// compatibility override functions. +// +//===----------------------------------------------------------------------===// + +/// #define OVERRIDE(name, ret, attrs, namespace, typedArgs, namedArgs) +/// Provides information about an overridable function. +/// - name is the name of the function, without any leading swift_ or +/// namespace. +/// - ret is the return type of the function. +/// - attrs is the attributes, if any, applied to the function definition. +/// - namespace is the namespace, if any, the function is in, including a +/// trailing :: +/// - typedArgs is the argument list, including types, surrounded by +/// parentheses +/// - namedArgs is the list of argument names, with no types, surrounded by +/// parentheses +/// +/// The entries are organized by group. A user may define OVERRIDE to get all +/// entries, or define one or more of OVERRIDE_METADATALOOKUP, OVERRIDE_CASTING, +/// OVERRIDE_OBJC, OVERRIDE_FOREIGN, OVERRIDE_PROTOCOLCONFORMANCE, +/// and OVERRIDE_KEYPATH to get only those entries. + +// NOTE: this file is used to build the definition of OverrideSection in +// CompatibilityOverride.cpp, which is part of the ABI. Do not move or remove entries +// in this file after ABI stability. Additional entries can be added to the end. + +#ifdef OVERRIDE +# define OVERRIDE_METADATALOOKUP OVERRIDE +# define OVERRIDE_CASTING OVERRIDE +# define OVERRIDE_OBJC OVERRIDE +# define OVERRIDE_FOREIGN OVERRIDE +# define OVERRIDE_PROTOCOLCONFORMANCE OVERRIDE +# define OVERRIDE_KEYPATH OVERRIDE +# define OVERRIDE_WITNESSTABLE OVERRIDE +#else +# ifndef OVERRIDE_METADATALOOKUP +# define OVERRIDE_METADATALOOKUP(...) +# endif +# ifndef OVERRIDE_CASTING +# define OVERRIDE_CASTING(...) +# endif +# ifndef OVERRIDE_OBJC +# define OVERRIDE_OBJC(...) +# endif +# ifndef OVERRIDE_FOREIGN +# define OVERRIDE_FOREIGN(...) +# endif +# ifndef OVERRIDE_PROTOCOLCONFORMANCE +# define OVERRIDE_PROTOCOLCONFORMANCE(...) +# endif +# ifndef OVERRIDE_KEYPATH +# define OVERRIDE_KEYPATH(...) +# endif +# ifndef OVERRIDE_WITNESSTABLE +# define OVERRIDE_WITNESSTABLE(...) +# endif +#endif + +OVERRIDE_CASTING(dynamicCast, bool, , , swift::, + (OpaqueValue *dest, OpaqueValue *src, + const Metadata *srcType, + const Metadata *targetType, + DynamicCastFlags flags), + (dest, src, srcType, targetType, flags)) + + +OVERRIDE_CASTING(dynamicCastClass, const void *, , , swift::, + (const void *object, + const ClassMetadata *targetType), + (object, targetType)) + + +OVERRIDE_CASTING(dynamicCastClassUnconditional, const void *, , , swift::, + (const void *object, + const ClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (object, targetType, file, line, column)) + + + +OVERRIDE_CASTING(dynamicCastUnknownClass, const void *, , , swift::, + (const void *object, const Metadata *targetType), + (object, targetType)) + + +OVERRIDE_CASTING(dynamicCastUnknownClassUnconditional, const void *, , , swift::, + (const void *object, const Metadata *targetType, + const char *file, unsigned line, unsigned column), + (object, targetType, file, line, column)) + + +OVERRIDE_CASTING(dynamicCastMetatype, const Metadata *, , , swift::, + (const Metadata *sourceType, + const Metadata *targetType), + (sourceType, targetType)) + + +OVERRIDE_CASTING(dynamicCastMetatypeUnconditional, const Metadata *, , , swift::, + (const Metadata *sourceType, + const Metadata *targetType, + const char *file, unsigned line, unsigned column), + (sourceType, targetType, file, line, column)) + + +OVERRIDE_FOREIGN(dynamicCastForeignClassMetatype, const ClassMetadata *, , , swift::, + (const ClassMetadata *sourceType, + const ClassMetadata *targetType), + (sourceType, targetType)) + + +OVERRIDE_FOREIGN(dynamicCastForeignClassMetatypeUnconditional, + const ClassMetadata *, , , swift::, + (const ClassMetadata *sourceType, + const ClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (sourceType, targetType, file, line, column)) + + +OVERRIDE_PROTOCOLCONFORMANCE(conformsToProtocol, const WitnessTable *, , , swift::, + (const Metadata * const type, + const ProtocolDescriptor *protocol), + (type, protocol)) + +OVERRIDE_PROTOCOLCONFORMANCE(conformsToSwiftProtocol, + const ProtocolConformanceDescriptor *, , , swift::, + (const Metadata * const type, + const ProtocolDescriptor *protocol, + StringRef moduleName), + (type, protocol, moduleName)) + +OVERRIDE_KEYPATH(getKeyPath, const HeapObject *, , , swift::, + (const void *pattern, const void *arguments), + (pattern, arguments)) + +OVERRIDE_METADATALOOKUP(getTypeByMangledNode, TypeInfo, , SWIFT_CC(swift), swift::, + (MetadataRequest request, + Demangler &demangler, + Demangle::NodePointer node, + const void * const *arguments, + SubstGenericParameterFn substGenericParam, + SubstDependentWitnessTableFn substWitnessTable), + (request, demangler, node, arguments, substGenericParam, substWitnessTable)) +OVERRIDE_METADATALOOKUP(getTypeByMangledName, TypeInfo, , SWIFT_CC(swift), swift::, + (MetadataRequest request, + StringRef typeName, + const void * const *arguments, + SubstGenericParameterFn substGenericParam, + SubstDependentWitnessTableFn substWitnessTable), + (request, typeName, arguments, substGenericParam, substWitnessTable)) + +OVERRIDE_WITNESSTABLE(getAssociatedTypeWitnessSlow, MetadataResponse, + SWIFT_RUNTIME_STDLIB_INTERNAL, SWIFT_CC(swift), swift::, + (MetadataRequest request, WitnessTable *wtable, + const Metadata *conformingType, + const ProtocolRequirement *reqBase, + const ProtocolRequirement *assocType), + (request, wtable, conformingType, reqBase, assocType)) + +OVERRIDE_WITNESSTABLE(getAssociatedConformanceWitnessSlow, const WitnessTable *, + SWIFT_RUNTIME_STDLIB_INTERNAL, SWIFT_CC(swift), swift::, + (WitnessTable *wtable, const Metadata *conformingType, + const Metadata *assocType, + const ProtocolRequirement *reqBase, + const ProtocolRequirement *assocConformance), + (wtable, conformingType, assocType, reqBase, + assocConformance)) +#if SWIFT_OBJC_INTEROP + +OVERRIDE_OBJC(dynamicCastObjCClass, const void *, , , swift::, + (const void *object, + const ClassMetadata *targetType), + (object, targetType)) + + +OVERRIDE_OBJC(dynamicCastObjCClassUnconditional, const void *, , , swift::, + (const void *object, + const ClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (object, targetType, file, line, column)) + +OVERRIDE_OBJC(dynamicCastObjCClassMetatype, const ClassMetadata *, , , swift::, + (const ClassMetadata *sourceType, + const ClassMetadata *targetType), + (sourceType, targetType)) + + +OVERRIDE_OBJC(dynamicCastObjCClassMetatypeUnconditional, const ClassMetadata *, , , swift::, + (const ClassMetadata *sourceType, const ClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (sourceType, targetType, file, line, column)) + + +OVERRIDE_FOREIGN(dynamicCastForeignClass, const void *, , , swift::, + (const void *object, + const ForeignClassMetadata *targetType), + (object, targetType)) + + +OVERRIDE_FOREIGN(dynamicCastForeignClassUnconditional, const void *, , , swift::, + (const void *object, const ForeignClassMetadata *targetType, + const char *file, unsigned line, unsigned column), + (object, targetType, file, line, column)) + +#endif + +#undef OVERRIDE +#undef OVERRIDE_METADATALOOKUP +#undef OVERRIDE_CASTING +#undef OVERRIDE_OBJC +#undef OVERRIDE_FOREIGN +#undef OVERRIDE_PROTOCOLCONFORMANCE +#undef OVERRIDE_KEYPATH +#undef OVERRIDE_WITNESSTABLE diff --git a/stdlib/toolchain/Compatibility51/CompatibilityOverride.h b/stdlib/toolchain/Compatibility51/CompatibilityOverride.h new file mode 100644 index 0000000000000..e726e41958f50 --- /dev/null +++ b/stdlib/toolchain/Compatibility51/CompatibilityOverride.h @@ -0,0 +1,61 @@ +//===--- CompatibiltyOverride.h - Back-deploying compatibility fixes --*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// Support back-deploying compatibility fixes for newer apps running on older runtimes. +// +//===----------------------------------------------------------------------===// + +#ifndef COMPATIBILITY_OVERRIDE_H +#define COMPATIBILITY_OVERRIDE_H + +#include "../../public/runtime/Private.h" +#include "swift/Runtime/Metadata.h" +#include "swift/Runtime/Once.h" +#include + +namespace swift { + +#define COMPATIBILITY_UNPAREN(...) __VA_ARGS__ + +#define OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ + ccAttrs typedef ret (*Original_ ## name) typedArgs; +#include "CompatibilityOverride.def" + +#define OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ + ccAttrs typedef ret (*Override_ ## name)(COMPATIBILITY_UNPAREN typedArgs, \ + Original_ ## name originalImpl); +#include "CompatibilityOverride.def" + +#define OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ + Override_ ## name getOverride_ ## name(); +#include "CompatibilityOverride.def" + + +/// Used to define an override point. The override point #defines the appropriate +/// OVERRIDE macro from CompatibilityOverride.def to this macro, then includes +/// the file to generate the override points. The original implementation of the +/// functionality must be available as swift_funcNameHereImpl. +#define COMPATIBILITY_OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ + attrs ccAttrs ret namespace swift_ ## name typedArgs { \ + static Override_ ## name Override; \ + static swift_once_t Predicate; \ + swift_once(&Predicate, [](void *) { \ + Override = getOverride_ ## name(); \ + }, nullptr); \ + if (Override != nullptr) \ + return Override(COMPATIBILITY_UNPAREN namedArgs, swift_ ## name ## Impl); \ + return swift_ ## name ## Impl namedArgs; \ + } + +} /* end namespace swift */ + +#endif /* COMPATIBILITY_OVERRIDE_H */ diff --git a/stdlib/toolchain/Compatibility51/Overrides.cpp b/stdlib/toolchain/Compatibility51/Overrides.cpp index 5e1eb78f3ab37..f9a89ec39e115 100644 --- a/stdlib/toolchain/Compatibility51/Overrides.cpp +++ b/stdlib/toolchain/Compatibility51/Overrides.cpp @@ -14,7 +14,7 @@ // //===----------------------------------------------------------------------===// -#include "../../public/runtime/CompatibilityOverride.h" +#include "CompatibilityOverride.h" #include "Overrides.h" #include @@ -27,7 +27,7 @@ struct OverrideSection { uintptr_t version; #define OVERRIDE(name, ret, attrs, ccAttrs, namespace, typedArgs, namedArgs) \ Override_ ## name name; -#include "../../public/runtime/CompatibilityOverride.def" +#include "CompatibilityOverride.def" }; OverrideSection Swift51Overrides diff --git a/tools/swift-inspect/Sources/swift-inspect/Inspector.swift b/tools/swift-inspect/Sources/swift-inspect/Inspector.swift index 80f3668f111b3..05b77192adf68 100644 --- a/tools/swift-inspect/Sources/swift-inspect/Inspector.swift +++ b/tools/swift-inspect/Sources/swift-inspect/Inspector.swift @@ -121,8 +121,10 @@ private func QueryDataLayoutFn(context: UnsafeMutableRawPointer?, type: DataLayoutQueryType, inBuffer: UnsafeMutableRawPointer?, outBuffer: UnsafeMutableRawPointer?) -> CInt { + let is64 = MemoryLayout.stride == 8 + switch type { - case DLQ_GetPointerSize: + case DLQ_GetPointerSize, DLQ_GetSizeSize: let size = UInt8(MemoryLayout.stride) outBuffer!.storeBytes(of: size, toByteOffset: 0, as: UInt8.self) return 1 @@ -130,6 +132,26 @@ private func QueryDataLayoutFn(context: UnsafeMutableRawPointer?, let mask = GetPtrauthMask() outBuffer!.storeBytes(of: mask, toByteOffset: 0, as: UInt.self) return 1 + case DLQ_GetObjCReservedLowBits: + var size: UInt8 = 0 +#if os(macOS) + // The low bit is reserved only on 64-bit macOS. + if is64 { + size = 1 + } +#endif + outBuffer!.storeBytes(of: size, toByteOffset: 0, as: UInt8.self) + return 1 + case DLQ_GetLeastValidPointerValue: + var value: UInt64 = 0x1000 +#if os(macOS) || os(iOS) || os(watchOS) || os(tvOS) + // 64-bit Apple platforms reserve the low 4GB. + if is64 { + value = 0x100000000 + } +#endif + outBuffer!.storeBytes(of: value, toByteOffset: 0, as: UInt64.self) + return 1 default: return 0 } diff --git a/unittests/runtime/CompatibilityOverride.cpp b/unittests/runtime/CompatibilityOverride.cpp index 2dece35195619..80a9b086980b9 100644 --- a/unittests/runtime/CompatibilityOverride.cpp +++ b/unittests/runtime/CompatibilityOverride.cpp @@ -168,11 +168,6 @@ TEST_F(CompatibilityOverrideTest, test_swift_conformsToProtocol) { ASSERT_EQ(Result, nullptr); } -TEST_F(CompatibilityOverrideTest, test_swift_conformsToSwiftProtocol) { - auto Result = swift_conformsToSwiftProtocol(nullptr, nullptr, StringRef()); - ASSERT_EQ(Result, nullptr); -} - TEST_F(CompatibilityOverrideTest, test_swift_getTypeByMangledNode) { Demangler demangler; auto Result = swift_getTypeByMangledNode(MetadataState::Abstract, diff --git a/unittests/runtime/Concurrent.cpp b/unittests/runtime/Concurrent.cpp index 00f09675d6958..141b804f099d4 100644 --- a/unittests/runtime/Concurrent.cpp +++ b/unittests/runtime/Concurrent.cpp @@ -154,3 +154,348 @@ TEST(ConcurrentReadableArrayTest, MultiThreaded2) { ASSERT_EQ(array.snapshot().count(), (size_t)writerCount * insertCount); } + +struct SingleThreadedValue { + size_t key; + size_t x; + SingleThreadedValue(size_t key, size_t x) : key(key), x(x) {} + bool matchesKey(size_t key) { return this->key == key; } + friend llvm::hash_code hash_value(const SingleThreadedValue &value) { + return llvm::hash_value(value.key); + } +}; + +TEST(ConcurrentReadableHashMapTest, SingleThreaded) { + ConcurrentReadableHashMap map; + + auto permute = [](size_t value) { return value ^ 0x33333333U; }; + + auto add = [&](size_t limit) { + for (size_t i = 0; i < limit; i++) + map.getOrInsert(permute(i), + [&](SingleThreadedValue *value, bool created) { + if (created) + new (value) SingleThreadedValue(permute(i), i); + return true; + }); + }; + auto check = [&](size_t limit) { + auto snapshot = map.snapshot(); + ASSERT_EQ(snapshot.find((size_t)~0), nullptr); + for (size_t i = 0; i < limit; i++) { + auto *value = snapshot.find(permute(i)); + ASSERT_NE(value, nullptr); + ASSERT_EQ(permute(i), value->key); + ASSERT_EQ(i, value->x); + } + }; + + check(0); + add(1); + check(1); + add(16); + check(16); + add(100); + check(100); + add(1000); + check(1000); + add(1000000); + check(1000000); + + map.clear(); + check(0); + + add(1); + check(1); + map.clear(); + check(0); + + add(16); + check(16); + map.clear(); + check(0); + + add(100); + check(100); + map.clear(); + check(0); + + add(1000); + check(1000); + map.clear(); + check(0); + + add(1000000); + check(1000000); + map.clear(); + check(0); +} + +struct MultiThreadedKey { + int threadNumber; + int n; + friend llvm::hash_code hash_value(const MultiThreadedKey &value) { + return llvm::hash_combine(value.threadNumber, value.n); + } +}; + +struct MultiThreadedValue { + int threadNumber; + int n; + int x; + MultiThreadedValue(MultiThreadedKey key, int x) + : threadNumber(key.threadNumber), n(key.n), x(x) {} + bool matchesKey(const MultiThreadedKey &key) { + return threadNumber == key.threadNumber && n == key.n; + } + friend llvm::hash_code hash_value(const MultiThreadedValue &value) { + return llvm::hash_combine(value.threadNumber, value.n); + } +}; + +// Test simultaneous readers and writers. +TEST(ConcurrentReadableHashMapTest, MultiThreaded) { + const int writerCount = 16; + const int readerCount = 8; + const int insertCount = 10000; + + ConcurrentReadableHashMap map; + + // NOTE: The bizarre lambdas around the ASSERT_ statements works around the + // fact that these macros emit return statements, which conflict with our + // need to return true/false from these lambdas. Wrapping them in a lambda + // neutralizes the return. + + auto writer = [&](int threadNumber) { + // Insert half, then insert all, to test adding an existing key. + for (int i = 0; i < insertCount / 2; i++) + map.getOrInsert(MultiThreadedKey{threadNumber, i}, [&](MultiThreadedValue + *value, + bool created) { + [&] { ASSERT_TRUE(created); }(); + new (value) MultiThreadedValue(MultiThreadedKey{threadNumber, i}, i); + return true; + }); + // Test discarding a new entry. + for (int i = 0; i < insertCount; i++) + map.getOrInsert(MultiThreadedKey{threadNumber, i}, + [&](MultiThreadedValue *value, bool created) { + [&] { ASSERT_EQ(created, i >= insertCount / 2); }(); + return false; + }); + for (int i = 0; i < insertCount; i++) + map.getOrInsert(MultiThreadedKey{threadNumber, i}, [&](MultiThreadedValue + *value, + bool created) { + if (created) { + [&] { ASSERT_GE(i, insertCount / 2); }(); + new (value) MultiThreadedValue(MultiThreadedKey{threadNumber, i}, i); + } else { + [&] { ASSERT_LT(i, insertCount / 2); }(); + } + return true; + }); + }; + + auto reader = [&] { + bool done = false; + while (!done) { + done = true; + for (int threadNumber = 0; threadNumber < writerCount; threadNumber++) { + // Read from the top down. We should see zero or more missing entries, + // and then the rest are present. Any hole is a bug. + int firstSeen = -1; + auto snapshot = map.snapshot(); + for (int i = insertCount - 1; i >= 0; i--) { + MultiThreadedKey key = {threadNumber, i}; + const MultiThreadedValue *value = snapshot.find(key); + if (value) { + if (firstSeen == -1) + firstSeen = value->x; + ASSERT_EQ(value->x, i); + } else { + ASSERT_EQ(firstSeen, -1); + done = false; + } + } + } + } + }; + + threadedExecute(writerCount + readerCount, [&](int i) { + if (i < writerCount) + writer(i); + else + reader(); + }); +} + +// Test readers and writers while also constantly clearing the map. +TEST(ConcurrentReadableHashMapTest, MultiThreaded2) { + const int writerCount = 16; + const int readerCount = 8; + const int insertCount = 10000; + + ConcurrentReadableHashMap map; + + std::atomic writerDoneCount = {0}; + auto writer = [&](int threadNumber) { + for (int i = 0; i < insertCount; i++) + map.getOrInsert(MultiThreadedKey{threadNumber, i}, [&](MultiThreadedValue + *value, + bool created) { + [&] { ASSERT_TRUE(created); }(); + new (value) MultiThreadedValue(MultiThreadedKey{threadNumber, i}, i); + return true; + }); + writerDoneCount.fetch_add(1, std::memory_order_relaxed); + }; + + auto reader = [&] { + while (writerDoneCount.load(std::memory_order_relaxed) < writerCount) { + for (int threadNumber = 0; threadNumber < writerCount; threadNumber++) { + // Read from the top down. We should see a single contiguous region of + // entries. Multiple regions indicates a bug. + int firstSeen = -1; + int lastSeen = -1; + auto snapshot = map.snapshot(); + for (int i = insertCount - 1; i >= 0; i--) { + MultiThreadedKey key = {threadNumber, i}; + const MultiThreadedValue *value = snapshot.find(key); + if (value) { + if (firstSeen == -1) + firstSeen = value->x; + if (lastSeen != -1) + ASSERT_EQ(lastSeen, i + 1); + lastSeen = value->x; + ASSERT_EQ(value->x, i); + } + } + } + } + }; + + auto clear = [&] { + while (writerDoneCount.load(std::memory_order_relaxed) < writerCount) { + map.clear(); + } + }; + + threadedExecute(writerCount + readerCount + 1, [&](int i) { + if (i < writerCount) + writer(i); + else if (i < writerCount + readerCount) + reader(); + else + clear(); + }); +} + +// Test readers and writers, with readers taking lots of snapshots. +TEST(ConcurrentReadableHashMapTest, MultiThreaded3) { + const int writerCount = 16; + const int readerCount = 8; + const int insertCount = 10000; + + ConcurrentReadableHashMap map; + + std::atomic writerDoneCount = {0}; + auto writer = [&](int threadNumber) { + for (int i = 0; i < insertCount; i++) + map.getOrInsert(MultiThreadedKey{threadNumber, i}, [&](MultiThreadedValue + *value, + bool created) { + [&] { ASSERT_TRUE(created); }(); + new (value) MultiThreadedValue(MultiThreadedKey{threadNumber, i}, i); + return true; + }); + writerDoneCount.fetch_add(1, std::memory_order_relaxed); + }; + + auto reader = [&] { + while (writerDoneCount.load(std::memory_order_relaxed) < writerCount) { + for (int threadNumber = 0; threadNumber < writerCount; threadNumber++) { + // Read from the top down. When we're not clearing the map, we should + // see zero or more missing entries, and then the rest are present. Any + // hole is a bug. + int firstSeen = -1; + int lastSeen = -1; + for (int i = insertCount - 1; i >= 0; i--) { + auto snapshot = map.snapshot(); + MultiThreadedKey key = {threadNumber, i}; + const MultiThreadedValue *value = snapshot.find(key); + if (value) { + if (firstSeen == -1) + firstSeen = value->x; + if (lastSeen != -1) + ASSERT_EQ(lastSeen, i + 1); + lastSeen = value->x; + ASSERT_EQ(value->x, i); + } + } + } + } + }; + + threadedExecute(writerCount + readerCount, [&](int i) { + if (i < writerCount) + writer(i); + else + reader(); + }); +} + +// Test readers and writers, with readers taking lots of snapshots, and +// simultaneous clearing. +TEST(ConcurrentReadableHashMapTest, MultiThreaded4) { + const int writerCount = 16; + const int readerCount = 8; + const int insertCount = 10000; + + ConcurrentReadableHashMap map; + + std::atomic writerDoneCount = {0}; + auto writer = [&](int threadNumber) { + for (int i = 0; i < insertCount; i++) + map.getOrInsert(MultiThreadedKey{threadNumber, i}, [&](MultiThreadedValue + *value, + bool created) { + [&] { ASSERT_TRUE(created); }(); + new (value) MultiThreadedValue(MultiThreadedKey{threadNumber, i}, i); + return true; + }); + writerDoneCount.fetch_add(1, std::memory_order_relaxed); + }; + + auto reader = [&] { + while (writerDoneCount.load(std::memory_order_relaxed) < writerCount) { + for (int threadNumber = 0; threadNumber < writerCount; threadNumber++) { + // With clearing, we can't expect any particular pattern. Just validate + // the values we do see, and make sure we don't crash. + for (int i = insertCount - 1; i >= 0; i--) { + auto snapshot = map.snapshot(); + MultiThreadedKey key = {threadNumber, i}; + const MultiThreadedValue *value = snapshot.find(key); + if (value) { + ASSERT_EQ(value->x, i); + } + } + } + } + }; + + auto clear = [&] { + while (writerDoneCount.load(std::memory_order_relaxed) < writerCount) { + map.clear(); + } + }; + + threadedExecute(writerCount + readerCount + 1, [&](int i) { + if (i < writerCount) + writer(i); + else if (i < writerCount + readerCount) + reader(); + else + clear(); + }); +}