diff --git a/include/swift/Runtime/Config.h b/include/swift/Runtime/Config.h index 56f8529ffa146..82a720d03bcbb 100644 --- a/include/swift/Runtime/Config.h +++ b/include/swift/Runtime/Config.h @@ -36,7 +36,7 @@ #ifdef __s390x__ #define SWIFT_USE_SWIFTCALL 1 #else -#define SWIFT_USE_SWIFTCALL 0 +#define SWIFT_USE_SWIFTCALL 1 #endif #endif @@ -128,6 +128,11 @@ #define SWIFT_LLVM_CC_RegisterPreservingCC llvm::CallingConv::PreserveMost +#if SWIFT_USE_SWIFTCALL +#define SWIFT_LLVM_CC_SwiftCC llvm::CallingConv::Swift +#else +#define SWIFT_LLVM_CC_SwiftCC llvm::CallingConv::C +#endif // If defined, it indicates that runtime function wrappers // should be used on all platforms, even they do not support diff --git a/include/swift/Runtime/HeapObject.h b/include/swift/Runtime/HeapObject.h index 3524baf0a8e80..b40370d5fc642 100644 --- a/include/swift/Runtime/HeapObject.h +++ b/include/swift/Runtime/HeapObject.h @@ -155,12 +155,10 @@ using BoxPair = TwoWordPair; /// The heap object has an initial retain count of 1, and its metadata is set /// such that destroying the heap object destroys the contained value. SWIFT_RUNTIME_EXPORT -BoxPair::Return swift_allocBox(Metadata const *type) - SWIFT_CC(swift); +BoxPair::Return swift_allocBox(Metadata const *type); SWIFT_RUNTIME_EXPORT -BoxPair::Return (*_swift_allocBox)(Metadata const *type) - SWIFT_CC(swift); +BoxPair::Return (*_swift_allocBox)(Metadata const *type); // Allocate plain old memory. This is the generalized entry point diff --git a/include/swift/Runtime/Metadata.h b/include/swift/Runtime/Metadata.h index b9dbfe0a28560..d3c81540a9272 100644 --- a/include/swift/Runtime/Metadata.h +++ b/include/swift/Runtime/Metadata.h @@ -1331,7 +1331,7 @@ const FullOpaqueMetadata METADATA_SYM(BO); // Builtin.UnknownObject struct HeapMetadataHeaderPrefix { /// Destroy the object, returning the allocated size of the object /// or 0 if the object shouldn't be deallocated. - void (*destroy)(HeapObject *); + SWIFT_CC(swift) void (*destroy)(SWIFT_CONTEXT HeapObject *); }; /// The header present on all heap metadata. @@ -1548,7 +1548,7 @@ struct TargetNominalTypeDescriptor { }; using NominalTypeDescriptor = TargetNominalTypeDescriptor; -typedef void (*ClassIVarDestroyer)(HeapObject *); +typedef SWIFT_CC(swift) void (*ClassIVarDestroyer)(SWIFT_CONTEXT HeapObject *); /// The structure of all class metadata. This structure is embedded /// directly within the class's heap metadata structure and therefore @@ -3488,6 +3488,7 @@ std::string nameForMetadata(const Metadata *type, /// Return the superclass, if any. The result is nullptr for root /// classes and class protocol types. +SWIFT_CC(swift) SWIFT_RUNTIME_STDLIB_INTERFACE const Metadata *_swift_class_getSuperclass(const Metadata *theClass); diff --git a/include/swift/Runtime/Reflection.h b/include/swift/Runtime/Reflection.h index cb060b555dbc9..23f3e998d373f 100644 --- a/include/swift/Runtime/Reflection.h +++ b/include/swift/Runtime/Reflection.h @@ -46,6 +46,7 @@ struct MirrorReturn { /// /// Produce a mirror for any value. The runtime produces a mirror that /// structurally reflects values of any type. +SWIFT_CC(swift) SWIFT_RUNTIME_EXPORT MirrorReturn swift_reflectAny(OpaqueValue *value, const Metadata *T); diff --git a/include/swift/Serialization/ModuleFormat.h b/include/swift/Serialization/ModuleFormat.h index 1ff82e05eb91d..29a4f9f093b62 100644 --- a/include/swift/Serialization/ModuleFormat.h +++ b/include/swift/Serialization/ModuleFormat.h @@ -54,7 +54,7 @@ const uint16_t VERSION_MAJOR = 0; /// in source control, you should also update the comment to briefly /// describe what change you made. The content of this comment isn't important; /// it just ensures a conflict if two people change the module format. -const uint16_t VERSION_MINOR = 315; // Last change: uniquely identify extensions +const uint16_t VERSION_MINOR = 316; // Last change: Swift calling convention using DeclID = PointerEmbeddedInt; using DeclIDField = BCFixed<31>; diff --git a/lib/IRGen/Explosion.h b/lib/IRGen/Explosion.h index b7c6edbd515aa..3b0bc24cbbf80 100644 --- a/lib/IRGen/Explosion.h +++ b/lib/IRGen/Explosion.h @@ -233,9 +233,6 @@ class ExplosionSchema { return Elements[index]; } - bool requiresIndirectParameter(IRGenModule &IGM) const; - bool requiresIndirectResult(IRGenModule &IGM) const; - typedef SmallVectorImpl::iterator iterator; typedef SmallVectorImpl::const_iterator const_iterator; @@ -255,13 +252,6 @@ class ExplosionSchema { /// - the element type, if the schema contains exactly one element; /// - an anonymous struct type concatenating those types, otherwise. llvm::Type *getScalarResultType(IRGenModule &IGM) const; - - /// Treating the types in this schema as potential arguments to a - /// function call, add them to the end of the given vector of types. - void addToArgTypes(IRGenModule &IGM, - const TypeInfo &TI, - llvm::AttributeSet &Attrs, - SmallVectorImpl &types) const; }; } // end namespace irgen diff --git a/lib/IRGen/GenCall.cpp b/lib/IRGen/GenCall.cpp index 1741107c8860c..4b43a51a26900 100644 --- a/lib/IRGen/GenCall.cpp +++ b/lib/IRGen/GenCall.cpp @@ -26,6 +26,7 @@ #include "clang/CodeGen/ModuleBuilder.h" #include "swift/AST/GenericEnvironment.h" #include "swift/SIL/SILType.h" +#include "swift/Runtime/Config.h" #include "llvm/IR/CallSite.h" #include "llvm/Support/Compiler.h" @@ -38,21 +39,11 @@ #include "IRGenFunction.h" #include "IRGenModule.h" #include "LoadableTypeInfo.h" +#include "NativeConventionSchema.h" using namespace swift; using namespace irgen; -bool ExplosionSchema::requiresIndirectResult(IRGenModule &IGM) const { - return containsAggregate() || - size() > IGM.TargetInfo.MaxScalarsForDirectResult; -} - -bool ExplosionSchema::requiresIndirectParameter(IRGenModule &IGM) const { - // For now, use the same condition as requiresIndirectSchema. We may want - // to diverge at some point. - return requiresIndirectResult(IGM); -} - llvm::Type *ExplosionSchema::getScalarResultType(IRGenModule &IGM) const { if (size() == 0) { return IGM.VoidTy; @@ -116,27 +107,9 @@ static void addInoutParameterAttributes(IRGenModule &IGM, attrs = attrs.addAttributes(IGM.LLVMContext, argIndex+1, resultAttrs); } -void ExplosionSchema::addToArgTypes(IRGenModule &IGM, - const TypeInfo &TI, - llvm::AttributeSet &Attrs, - SmallVectorImpl &types) const { - // Pass large arguments as indirect value parameters. - if (requiresIndirectParameter(IGM)) { - addIndirectValueParameterAttributes(IGM, Attrs, TI, types.size()); - types.push_back(TI.getStorageType()->getPointerTo()); - return; - } - for (auto &elt : *this) { - if (elt.isAggregate()) - types.push_back(elt.getAggregateType()->getPointerTo()); - else - types.push_back(elt.getScalarType()); - } -} - static llvm::CallingConv::ID getFreestandingConvention(IRGenModule &IGM) { // TODO: use a custom CC that returns three scalars efficiently - return llvm::CallingConv::C; + return SWIFT_LLVM_CC(SwiftCC); } /// Expand the requirements of the given abstract calling convention @@ -179,26 +152,33 @@ static void addIndirectResultAttributes(IRGenModule &IGM, attrs = attrs.addAttributes(IGM.LLVMContext, paramIndex + 1, resultAttrs); } -static void addSwiftSelfAttributes(IRGenModule &IGM, - llvm::AttributeSet &attrs, - unsigned argIndex) { +void IRGenModule::addSwiftSelfAttributes(llvm::AttributeSet &attrs, + unsigned argIndex) { + if (!UseSwiftCC) + return; static const llvm::Attribute::AttrKind attrKinds[] = { llvm::Attribute::SwiftSelf, }; auto argAttrs = - llvm::AttributeSet::get(IGM.LLVMContext, argIndex + 1, attrKinds); - attrs = attrs.addAttributes(IGM.LLVMContext, argIndex + 1, argAttrs); + llvm::AttributeSet::get(this->LLVMContext, argIndex + 1, attrKinds); + attrs = attrs.addAttributes(this->LLVMContext, argIndex + 1, argAttrs); } -static void addSwiftErrorAttributes(IRGenModule &IGM, - llvm::AttributeSet &attrs, - unsigned argIndex) { +void IRGenModule::addSwiftErrorAttributes(llvm::AttributeSet &attrs, + unsigned argIndex) { + // Don't add the swifterror attribute on ABI that don't pass it in a register. + // We create a shadow stack location of the swifterror parameter for the + // debugger on such platforms and so we can't mark the parameter with a + // swifterror attribute. + if (!UseSwiftCC || !this->IsSwiftErrorInRegister) + return; + static const llvm::Attribute::AttrKind attrKinds[] = { llvm::Attribute::SwiftError, }; auto argAttrs = - llvm::AttributeSet::get(IGM.LLVMContext, argIndex + 1, attrKinds); - attrs = attrs.addAttributes(IGM.LLVMContext, argIndex + 1, argAttrs); + llvm::AttributeSet::get(this->LLVMContext, argIndex + 1, attrKinds); + attrs = attrs.addAttributes(this->LLVMContext, argIndex + 1, argAttrs); } void irgen::addByvalArgumentAttributes(IRGenModule &IGM, @@ -236,6 +216,8 @@ namespace { llvm::AttributeSet Attrs; ForeignFunctionInfo ForeignInfo; bool CanUseSRet = true; + bool CanUseError = true; + bool CanUseSelf = true; SignatureExpansion(IRGenModule &IGM, CanSILFunctionType fnType) : IGM(IGM), FnType(fnType) {} @@ -260,6 +242,20 @@ namespace { return result; } + bool claimSelf() { + auto Ret = CanUseSelf; + assert(CanUseSelf && "Multiple self parameters?!"); + CanUseSelf = false; + return Ret; + } + + bool claimError() { + auto Ret = CanUseError; + assert(CanUseError && "Mulitple error parameters?!"); + CanUseError = false; + return Ret; + } + /// Add a pointer to the given type as the next parameter. void addPointerParameter(llvm::Type *storageType) { ParamIRTypes.push_back(storageType->getPointerTo()); @@ -300,6 +296,135 @@ llvm::Type *SignatureExpansion::expandResult() { return resultType; } +NativeConventionSchema::NativeConventionSchema(IRGenModule &IGM, + const TypeInfo *ti, + bool IsResult) + : Lowering(IGM.ClangCodeGen->CGM()) { + if (auto *loadable = dyn_cast(ti)) { + // Lower the type according to the Swift ABI. + loadable->addToAggLowering(IGM, Lowering, Size(0)); + Lowering.finish(); + // Should we pass indirectly according to the ABI? + RequiresIndirect = Lowering.shouldPassIndirectly(IsResult); + } else { + Lowering.finish(); + RequiresIndirect = true; + } +} + +llvm::Type *NativeConventionSchema::getExpandedType(IRGenModule &IGM) const { + if (empty()) + return IGM.VoidTy; + SmallVector elts; + Lowering.enumerateComponents([&](clang::CharUnits offset, + clang::CharUnits end, + llvm::Type *type) { elts.push_back(type); }); + + if (elts.size() == 1) + return elts[0]; + + auto &ctx = IGM.getLLVMContext(); + return llvm::StructType::get(ctx, elts, /*packed*/ false); +} + +std::pair +NativeConventionSchema::getCoercionTypes( + IRGenModule &IGM, SmallVectorImpl &expandedTyIndicesMap) const { + auto &ctx = IGM.getLLVMContext(); + + if (empty()) { + auto type = llvm::StructType::get(ctx); + return {type, type}; + } + + clang::CharUnits lastEnd = clang::CharUnits::Zero(); + llvm::SmallSet overlappedWithSuccessor; + unsigned idx = 0; + + // Mark overlapping ranges. + Lowering.enumerateComponents( + [&](clang::CharUnits offset, clang::CharUnits end, llvm::Type *type) { + if (offset < lastEnd) { + overlappedWithSuccessor.insert(idx); + } + lastEnd = end; + ++idx; + }); + + // Create the coercion struct with only the integer portion of overlapped + // components and non-overlapped components. + idx = 0; + lastEnd = clang::CharUnits::Zero(); + SmallVector elts; + bool packed = false; + Lowering.enumerateComponents( + [&](clang::CharUnits begin, clang::CharUnits end, llvm::Type *type) { + bool overlapped = overlappedWithSuccessor.count(idx) || + (idx && overlappedWithSuccessor.count(idx - 1)); + ++idx; + if (overlapped && !isa(type)) { + // keep the old lastEnd for padding. + return; + } + // Add padding (which may include padding for overlapped non-integer + // components). + if (begin != lastEnd) { + auto paddingSize = begin - lastEnd; + assert(!paddingSize.isNegative()); + + auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx), + paddingSize.getQuantity()); + elts.push_back(padding); + } + if (!packed && + !begin.isMultipleOf(clang::CharUnits::fromQuantity( + IGM.DataLayout.getABITypeAlignment(type)))) + packed = true; + elts.push_back(type); + expandedTyIndicesMap.push_back(idx - 1); + lastEnd = end; + }); + + auto *coercionType = llvm::StructType::get(ctx, elts, packed); + if (overlappedWithSuccessor.empty()) + return {coercionType, llvm::StructType::get(ctx)}; + + // Create the coercion struct with only the non-integer overlapped + // components. + idx = 0; + lastEnd = clang::CharUnits::Zero(); + elts.clear(); + packed = false; + Lowering.enumerateComponents( + [&](clang::CharUnits begin, clang::CharUnits end, llvm::Type *type) { + bool overlapped = overlappedWithSuccessor.count(idx) || + (idx && overlappedWithSuccessor.count(idx - 1)); + ++idx; + if (!overlapped || (overlapped && isa(type))) { + // Ignore and keep the old lastEnd for padding. + return; + } + // Add padding. + if (begin != lastEnd) { + auto paddingSize = begin - lastEnd; + assert(!paddingSize.isNegative()); + + auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx), + paddingSize.getQuantity()); + elts.push_back(padding); + } + if (!packed && + !begin.isMultipleOf(clang::CharUnits::fromQuantity( + IGM.DataLayout.getABITypeAlignment(type)))) + packed = true; + elts.push_back(type); + expandedTyIndicesMap.push_back(idx - 1); + lastEnd = end; + }); + auto *overlappedCoercionType = llvm::StructType::get(ctx, elts, packed); + return {coercionType, overlappedCoercionType}; +} + // TODO: Direct to Indirect result conversion could be handled in a SIL // AddressLowering pass. llvm::Type *SignatureExpansion::expandDirectResult() { @@ -312,18 +437,19 @@ llvm::Type *SignatureExpansion::expandDirectResult() { if (tuple->getNumElements() == 0) return IGM.VoidTy; - ExplosionSchema schema = IGM.getSchema(resultType); switch (FnType->getLanguage()) { case SILFunctionLanguage::C: llvm_unreachable("Expanding C/ObjC parameters in the wrong place!"); break; case SILFunctionLanguage::Swift: { - if (schema.requiresIndirectResult(IGM)) + auto &ti = IGM.getTypeInfo(resultType); + auto &native = ti.nativeReturnValueSchema(IGM); + if (native.requiresIndirect()) return addIndirectResult(); // Disable the use of sret if we have a non-trivial direct result. - if (!schema.empty()) CanUseSRet = false; - return schema.getScalarResultType(IGM); + if (!native.empty()) CanUseSRet = false; + return native.getExpandedType(IGM); } } @@ -814,10 +940,10 @@ llvm::Type *SignatureExpansion::expandExternalSignatureTypes() { case clang::ParameterABI::Ordinary: break; case clang::ParameterABI::SwiftContext: - addSwiftSelfAttributes(IGM, Attrs, getCurParamIndex()); + IGM.addSwiftSelfAttributes(Attrs, getCurParamIndex()); break; case clang::ParameterABI::SwiftErrorResult: - addSwiftErrorAttributes(IGM, Attrs, getCurParamIndex()); + IGM.addSwiftErrorAttributes(Attrs, getCurParamIndex()); break; case clang::ParameterABI::SwiftIndirectResult: addIndirectResultAttributes(IGM, Attrs, getCurParamIndex(),claimSRet()); @@ -867,8 +993,22 @@ llvm::Type *SignatureExpansion::expandExternalSignatureTypes() { return returnInfo.getCoerceToType(); } +static ArrayRef expandScalarOrStructTypeToArray(llvm::Type *&ty) { + ArrayRef expandedTys; + if (auto expansionTy = dyn_cast(ty)) { + // Is there any good reason this isn't public API of llvm::StructType? + expandedTys = makeArrayRef(expansionTy->element_begin(), + expansionTy->getNumElements()); + } else { + expandedTys = ty; + } + return expandedTys; +} + + void SignatureExpansion::expand(SILParameterInfo param) { - auto &ti = IGM.getTypeInfo(getSILFuncConventions().getSILType(param)); + auto paramSILType = getSILFuncConventions().getSILType(param); + auto &ti = IGM.getTypeInfo(paramSILType); switch (auto conv = param.getConvention()) { case ParameterConvention::Indirect_In: case ParameterConvention::Indirect_In_Guaranteed: @@ -894,8 +1034,21 @@ void SignatureExpansion::expand(SILParameterInfo param) { return; } case SILFunctionLanguage::Swift: { - auto schema = ti.getSchema(); - schema.addToArgTypes(IGM, ti, Attrs, ParamIRTypes); + auto &nativeSchema = ti.nativeParameterValueSchema(IGM); + if (nativeSchema.requiresIndirect()) { + addIndirectValueParameterAttributes(IGM, Attrs, ti, + ParamIRTypes.size()); + ParamIRTypes.push_back(ti.getStorageType()->getPointerTo()); + return; + } + if (nativeSchema.empty()) { + assert(ti.getSchema().empty()); + return; + } + auto expandedTy = nativeSchema.getExpandedType(IGM); + auto expandedTysArray = expandScalarOrStructTypeToArray(expandedTy); + for (auto *Ty : expandedTysArray) + ParamIRTypes.push_back(Ty); return; } } @@ -962,9 +1115,9 @@ void SignatureExpansion::expandParameters() { if (hasSelfContext) { auto curLength = ParamIRTypes.size(); (void) curLength; - // TODO: 'swift_context' IR attribute + if (claimSelf()) + IGM.addSwiftSelfAttributes(Attrs, curLength); expand(FnType->getSelfParameter()); - assert(ParamIRTypes.size() == curLength + 1 && "adding 'self' added unexpected number of parameters"); } else { @@ -988,7 +1141,8 @@ void SignatureExpansion::expandParameters() { llvm_unreachable("bad representation kind"); }; if (needsContext()) { - // TODO: 'swift_context' IR attribute + if (claimSelf()) + IGM.addSwiftSelfAttributes(Attrs, ParamIRTypes.size()); ParamIRTypes.push_back(IGM.RefCountedPtrTy); } } @@ -997,7 +1151,8 @@ void SignatureExpansion::expandParameters() { // formal error type; LLVM will magically turn this into a non-pointer // if we set the right attribute. if (FnType->hasErrorResult()) { - // TODO: 'swift_error' IR attribute + if (claimError()) + IGM.addSwiftErrorAttributes(Attrs, ParamIRTypes.size()); llvm::Type *errorType = IGM.getStorageType( getSILFuncConventions().getSILType(FnType->getErrorResult())); ParamIRTypes.push_back(errorType->getPointerTo()); @@ -1090,7 +1245,8 @@ void CallEmission::emitToUnmappedExplosion(Explosion &out) { // Bail out immediately on a void result. llvm::Value *result = call.getInstruction(); - if (result->getType()->isVoidTy()) return; + if (result->getType()->isVoidTy()) + return; SILFunctionConventions fnConv(getCallee().getOrigFunctionType(), IGF.getSILModule()); @@ -1107,11 +1263,29 @@ void CallEmission::emitToUnmappedExplosion(Explosion &out) { // the call. This may be different than the IR type returned by the // call itself due to ABI type coercion. auto resultType = fnConv.getSILResultType(); - auto schema = IGF.IGM.getSchema(resultType); - auto *bodyType = schema.getScalarResultType(IGF.IGM); + auto &nativeSchema = IGF.IGM.getTypeInfo(resultType).nativeReturnValueSchema(IGF.IGM); + + // For ABI reasons the result type of the call might not actually match the + // expected result type. + auto expectedNativeResultType = nativeSchema.getExpandedType(IGF.IGM); + if (result->getType() != expectedNativeResultType) { + // This should only be needed when we call C functions. + assert(getCallee().getOrigFunctionType()->getLanguage() == + SILFunctionLanguage::C); + result = + IGF.coerceValue(result, expectedNativeResultType, IGF.IGM.DataLayout); + } + + // Gather the values. + Explosion nativeExplosion; + if (llvm::StructType *structType = + dyn_cast(result->getType())) + for (unsigned i = 0, e = structType->getNumElements(); i != e; ++i) + nativeExplosion.add(IGF.Builder.CreateExtractValue(result, i)); + else + nativeExplosion.add(result); - // Extract out the scalar results. - extractScalarResults(IGF, bodyType, result, out); + out = nativeSchema.mapFromNative(IGF.IGM, IGF, nativeExplosion, resultType); } /// Emit the unsubstituted result of this call to the given address. @@ -1320,6 +1494,7 @@ void CallEmission::setFromCallee() { assert(LastArgWritten > 0); Args[--LastArgWritten] = errorResultSlot.getAddress(); addAttribute(LastArgWritten + 1, llvm::Attribute::NoCapture); + IGF.IGM.addSwiftErrorAttributes(Attrs, LastArgWritten); // Fill in the context pointer if necessary. if (!contextPtr) { @@ -1335,6 +1510,7 @@ void CallEmission::setFromCallee() { && "block function should not claimed to have data pointer"); assert(LastArgWritten > 0); Args[--LastArgWritten] = contextPtr; + IGF.IGM.addSwiftSelfAttributes(Attrs, LastArgWritten); } } @@ -1460,19 +1636,12 @@ static void emitCoerceAndExpand(IRGenFunction &IGF, paramTI.deallocateStack(IGF, StackAddress(temporary), paramTy); } -static void emitDirectExternalArgument(IRGenFunction &IGF, - SILType argType, llvm::Type *toTy, - Explosion &in, Explosion &out) { +static void emitDirectExternalArgument(IRGenFunction &IGF, SILType argType, + llvm::Type *toTy, Explosion &in, + Explosion &out) { // If we're supposed to pass directly as a struct type, that // really means expanding out as multiple arguments. - ArrayRef expandedTys; - if (auto expansionTy = dyn_cast(toTy)) { - // Is there any good reason this isn't public API of llvm::StructType? - expandedTys = makeArrayRef(expansionTy->element_begin(), - expansionTy->getNumElements()); - } else { - expandedTys = toTy; - } + ArrayRef expandedTys = expandScalarOrStructTypeToArray(toTy); auto &argTI = cast(IGF.getTypeInfo(argType)); auto inputSchema = argTI.getSchema(); @@ -1703,6 +1872,7 @@ static void externalizeArguments(IRGenFunction &IGF, const Callee &callee, } } +/// Returns whether allocas are needed. bool irgen::addNativeArgument(IRGenFunction &IGF, Explosion &in, SILParameterInfo origParamInfo, Explosion &out) { // Addresses consist of a single pointer argument. @@ -1710,12 +1880,11 @@ bool irgen::addNativeArgument(IRGenFunction &IGF, Explosion &in, out.add(in.claimNext()); return false; } - - auto &ti = cast( - IGF.getTypeInfo(IGF.IGM.silConv.getSILType(origParamInfo))); + auto paramType = IGF.IGM.silConv.getSILType(origParamInfo); + auto &ti = cast(IGF.getTypeInfo(paramType)); auto schema = ti.getSchema(); - - if (schema.requiresIndirectParameter(IGF.IGM)) { + auto &nativeSchema = ti.nativeParameterValueSchema(IGF.IGM); + if (nativeSchema.requiresIndirect()) { // Pass the argument indirectly. auto buf = IGF.createAlloca(ti.getStorageType(), ti.getFixedAlignment(), ""); @@ -1723,8 +1892,18 @@ bool irgen::addNativeArgument(IRGenFunction &IGF, Explosion &in, out.add(buf.getAddress()); return true; } else { - // Pass the argument explosion directly. - ti.reexplode(IGF, in, out); + if (schema.empty()) { + assert(nativeSchema.empty()); + return false; + } + assert(!nativeSchema.empty()); + + // Pass the argument explosion directly, mapping into the native swift + // calling convention. + Explosion nonNativeParam; + ti.reexplode(IGF, in, nonNativeParam); + Explosion nativeParam = nativeSchema.mapIntoNative(IGF.IGM, IGF, nonNativeParam, paramType); + nativeParam.transferInto(out, nativeParam.size()); return false; } } @@ -1954,7 +2133,13 @@ Address IRGenFunction::getErrorResultSlot(SILType errorType) { auto addr = builder.CreateAlloca(errorTI.getStorageType(), nullptr, "swifterror"); addr->setAlignment(errorTI.getFixedAlignment().getValue()); - // TODO: add swift_error attribute + + // Only add the swifterror attribute on ABIs that pass it in a register. + // We create a shadow stack location of the swifterror parameter for the + // debugger on platforms that pass swifterror by reference and so we can't + // mark the parameter with a swifterror attribute for these. + if (IGM.IsSwiftErrorInRegister) + addr->setSwiftError(true); // Initialize at the alloca point. auto nullError = llvm::ConstantPointerNull::get( @@ -2132,14 +2317,361 @@ void IRGenFunction::emitScalarReturn(llvm::Type *resultType, Builder.CreateRet(resultAgg); } -void IRGenFunction::emitScalarReturn(SILType resultType, Explosion &result) { +/// Adjust the alignment of the alloca pointed to by \p allocaAddr to the +/// required alignment of the struct \p type. +static void adjustAllocaAlignment(const llvm::DataLayout &DL, + Address allocaAddr, llvm::StructType *type) { + auto layout = DL.getStructLayout(type); + Alignment layoutAlignment = Alignment(layout->getAlignment()); + auto alloca = cast(allocaAddr.getAddress()); + if (alloca->getAlignment() < layoutAlignment.getValue()) { + alloca->setAlignment(layoutAlignment.getValue()); + allocaAddr = Address(allocaAddr.getAddress(), layoutAlignment); + } +} + +unsigned NativeConventionSchema::size() const { + if (empty()) + return 0; + unsigned size = 0; + Lowering.enumerateComponents([&](clang::CharUnits offset, + clang::CharUnits end, + llvm::Type *type) { ++size; }); + return size; +} + +static bool canMatchByTruncation(IRGenModule &IGM, + ArrayRef expandedTys, + const ExplosionSchema &schema) { + // If the schemas don't even match in number, we have to go + // through memory. + if (expandedTys.size() != schema.size() || expandedTys.empty()) + return false; + + if (expandedTys.size() == 1) return false; + + // If there are multiple elements, the pairs of types need to + // match in size upto the penultimate for the truncation to work. + size_t e = expandedTys.size(); + for (size_t i = 0; i != e - 1; ++i) { + // Check that we can truncate the last element. + llvm::Type *outputTy = schema[i].getScalarType(); + llvm::Type *inputTy = expandedTys[i]; + if (inputTy != outputTy && + IGM.DataLayout.getTypeSizeInBits(inputTy) != + IGM.DataLayout.getTypeSizeInBits(outputTy)) + return false; + } + llvm::Type *outputTy = schema[e-1].getScalarType(); + llvm::Type *inputTy = expandedTys[e-1]; + return inputTy == outputTy || (IGM.DataLayout.getTypeSizeInBits(inputTy) == + IGM.DataLayout.getTypeSizeInBits(outputTy)) || + (IGM.DataLayout.getTypeSizeInBits(inputTy) > + IGM.DataLayout.getTypeSizeInBits(outputTy) && + isa(inputTy) && isa(outputTy)); +} + +Explosion NativeConventionSchema::mapFromNative(IRGenModule &IGM, + IRGenFunction &IGF, + Explosion &native, + SILType type) const { + if (native.size() == 0) { + assert(empty() && "Empty explosion must match the native convention"); + return Explosion(); + } + + assert(!empty()); + + auto *nativeTy = getExpandedType(IGM); + auto expandedTys = expandScalarOrStructTypeToArray(nativeTy); + auto &TI = IGM.getTypeInfo(type); + auto schema = TI.getSchema(); + // The expected explosion type. + auto *explosionTy = schema.getScalarResultType(IGM); + + // Check whether we can coerce the explosion to the expected type convention. + auto &DataLayout = IGM.DataLayout; + Explosion nonNativeExplosion; + if (canCoerceToSchema(IGM, expandedTys, schema)) { + if (native.size() == 1) { + auto *elt = native.claimNext(); + if (explosionTy != elt->getType()) { + if (isa(explosionTy) && + isa(elt->getType())) { + elt = IGF.Builder.CreateTrunc(elt, explosionTy); + } else { + elt = IGF.coerceValue(elt, explosionTy, DataLayout); + } + } + nonNativeExplosion.add(elt); + return nonNativeExplosion; + } else if (nativeTy == explosionTy) { + native.transferInto(nonNativeExplosion, native.size()); + return nonNativeExplosion; + } + // Otherwise, we have to go through memory if we can match by truncation. + } else if (canMatchByTruncation(IGM, expandedTys, schema)) { + assert(expandedTys.size() == schema.size()); + for (size_t i = 0, e = expandedTys.size(); i != e; ++i) { + auto *elt = native.claimNext(); + auto *schemaTy = schema[i].getScalarType(); + auto *nativeTy = elt->getType(); + assert(nativeTy == expandedTys[i]); + if (schemaTy == nativeTy) { + // elt = elt + } else if (DataLayout.getTypeSizeInBits(schemaTy) == + DataLayout.getTypeSizeInBits(nativeTy)) + elt = IGF.coerceValue(elt, schemaTy, DataLayout); + else { + assert(DataLayout.getTypeSizeInBits(schemaTy) < + DataLayout.getTypeSizeInBits(nativeTy)); + elt = IGF.Builder.CreateTrunc(elt, schemaTy); + } + nonNativeExplosion.add(elt); + } + return nonNativeExplosion; + } + + // If not, go through memory. + auto &loadableTI = cast(TI); + + // We can get two layouts if there are overlapping ranges in the legal type + // sequence. + llvm::StructType *coercionTy, *overlappedCoercionTy; + SmallVector expandedTyIndicesMap; + std::tie(coercionTy, overlappedCoercionTy) = + getCoercionTypes(IGM, expandedTyIndicesMap); + + // Get the larger layout out of those two. + auto coercionSize = DataLayout.getTypeSizeInBits(coercionTy); + auto overlappedCoercionSize = + DataLayout.getTypeSizeInBits(overlappedCoercionTy); + llvm::StructType *largerCoercion = coercionSize >= overlappedCoercionSize + ? coercionTy + : overlappedCoercionTy; + + // Allocate a temporary for the coersion. + Address temporary; + Size tempSize; + std::tie(temporary, tempSize) = allocateForCoercion( + IGF, largerCoercion, loadableTI.getStorageType(), "temp-coercion"); + + // Make sure we have sufficiently large alignment. + adjustAllocaAlignment(DataLayout, temporary, coercionTy); + adjustAllocaAlignment(DataLayout, temporary, overlappedCoercionTy); + + auto &Builder = IGF.Builder; + Builder.CreateLifetimeStart(temporary, tempSize); + + // Store the expanded type elements. + auto coercionAddr = Builder.CreateElementBitCast(temporary, coercionTy); + unsigned expandedMapIdx = 0; + SmallVector expandedElts(expandedTys.size(), nullptr); + + auto eltsArray = native.claimAll(); + SmallVector nativeElts(eltsArray.begin(), eltsArray.end()); + auto storeToFn = [&](llvm::StructType *ty, Address structAddr) { + for (auto eltIndex : indices(ty->elements())) { + auto layout = DataLayout.getStructLayout(ty); + auto eltTy = ty->getElementType(eltIndex); + // Skip padding fields. + if (eltTy->isArrayTy()) + continue; + Address eltAddr = Builder.CreateStructGEP(structAddr, eltIndex, layout); + auto index = expandedTyIndicesMap[expandedMapIdx]; + assert(index < nativeElts.size() && nativeElts[index] != nullptr); + auto nativeElt = nativeElts[index]; + Builder.CreateStore(nativeElt, eltAddr); + nativeElts[index] = nullptr; + ++expandedMapIdx; + } + }; + + storeToFn(coercionTy, coercionAddr); + if (!overlappedCoercionTy->isEmptyTy()) { + auto overlappedCoercionAddr = + Builder.CreateElementBitCast(temporary, overlappedCoercionTy); + storeToFn(overlappedCoercionTy, overlappedCoercionAddr); + } + + // Reload according to the types schema. + Address storageAddr = Builder.CreateBitCast( + temporary, loadableTI.getStorageType()->getPointerTo()); + loadableTI.loadAsTake(IGF, storageAddr, nonNativeExplosion); + + return nonNativeExplosion; +} + +Explosion NativeConventionSchema::mapIntoNative(IRGenModule &IGM, + IRGenFunction &IGF, + Explosion &fromNonNative, + SILType type) const { + if (fromNonNative.size() == 0) { + assert(empty() && "Empty explosion must match the native convention"); + return Explosion(); + } + + assert(!requiresIndirect() && "Expected direct convention"); + assert(!empty()); + + auto *nativeTy = getExpandedType(IGM); + auto expandedTys = expandScalarOrStructTypeToArray(nativeTy); + auto &TI = IGM.getTypeInfo(type); + auto schema = TI.getSchema(); + auto *explosionTy = schema.getScalarResultType(IGM); + + // Check whether we can coerce the explosion to the expected type convention. + auto &DataLayout = IGM.DataLayout; + Explosion nativeExplosion; + if (canCoerceToSchema(IGM, expandedTys, schema)) { + if (fromNonNative.size() == 1) { + auto *elt = fromNonNative.claimNext(); + if (nativeTy != elt->getType()) { + if (isa(nativeTy) && + isa(elt->getType())) + elt = IGF.Builder.CreateZExt(elt, nativeTy); + else + elt = IGF.coerceValue(elt, nativeTy, DataLayout); + } + nativeExplosion.add(elt); + return nativeExplosion; + } else if (nativeTy == explosionTy) { + fromNonNative.transferInto(nativeExplosion, fromNonNative.size()); + return nativeExplosion; + } + // Otherwise, we have to go through memory if we can't match by truncation. + } else if (canMatchByTruncation(IGM, expandedTys, schema)) { + assert(expandedTys.size() == schema.size()); + for (size_t i = 0, e = expandedTys.size(); i != e; ++i) { + auto *elt = fromNonNative.claimNext(); + auto *schemaTy = elt->getType(); + auto *nativeTy = expandedTys[i]; + assert(schema[i].getScalarType() == schemaTy); + if (schemaTy == nativeTy) { + // elt = elt + } else if (DataLayout.getTypeSizeInBits(schemaTy) == + DataLayout.getTypeSizeInBits(nativeTy)) + elt = IGF.coerceValue(elt, nativeTy, DataLayout); + else { + assert(DataLayout.getTypeSizeInBits(schemaTy) < + DataLayout.getTypeSizeInBits(nativeTy)); + elt = IGF.Builder.CreateZExt(elt, nativeTy); + } + nativeExplosion.add(elt); + } + return nativeExplosion; + } + + // If not, go through memory. + auto &loadableTI = cast(TI); + + // We can get two layouts if there are overlapping ranges in the legal type + // sequence. + llvm::StructType *coercionTy, *overlappedCoercionTy; + SmallVector expandedTyIndicesMap; + std::tie(coercionTy, overlappedCoercionTy) = + getCoercionTypes(IGM, expandedTyIndicesMap); + + // Get the larger layout out of those two. + auto coercionSize = DataLayout.getTypeSizeInBits(coercionTy); + auto overlappedCoercionSize = + DataLayout.getTypeSizeInBits(overlappedCoercionTy); + llvm::StructType *largerCoercion = coercionSize >= overlappedCoercionSize + ? coercionTy + : overlappedCoercionTy; + + // Allocate a temporary for the coersion. + Address temporary; + Size tempSize; + std::tie(temporary, tempSize) = allocateForCoercion( + IGF, largerCoercion, loadableTI.getStorageType(), "temp-coercion"); + + // Make sure we have sufficiently large alignment. + adjustAllocaAlignment(DataLayout, temporary, coercionTy); + adjustAllocaAlignment(DataLayout, temporary, overlappedCoercionTy); + + auto &Builder = IGF.Builder; + Builder.CreateLifetimeStart(temporary, tempSize); + + // Initialize the memory of the temporary. + Address storageAddr = Builder.CreateBitCast( + temporary, loadableTI.getStorageType()->getPointerTo()); + loadableTI.initialize(IGF, fromNonNative, storageAddr); + + // Load the expanded type elements from memory. + auto coercionAddr = Builder.CreateElementBitCast(temporary, coercionTy); + + unsigned expandedMapIdx = 0; + SmallVector expandedElts(expandedTys.size(), nullptr); + + auto loadFromFn = [&](llvm::StructType *ty, Address structAddr) { + for (auto eltIndex : indices(ty->elements())) { + auto layout = DataLayout.getStructLayout(ty); + auto eltTy = ty->getElementType(eltIndex); + // Skip padding fields. + if (eltTy->isArrayTy()) + continue; + Address eltAddr = Builder.CreateStructGEP(structAddr, eltIndex, layout); + llvm::Value *elt = Builder.CreateLoad(eltAddr); + auto index = expandedTyIndicesMap[expandedMapIdx]; + assert(expandedElts[index] == nullptr); + expandedElts[index] = elt; + ++expandedMapIdx; + } + }; + + loadFromFn(coercionTy, coercionAddr); + if (!overlappedCoercionTy->isEmptyTy()) { + auto overlappedCoercionAddr = + Builder.CreateElementBitCast(temporary, overlappedCoercionTy); + loadFromFn(overlappedCoercionTy, overlappedCoercionAddr); + } + + Builder.CreateLifetimeEnd(temporary, tempSize); + + // Add the values to the explosion. + for (auto *val : expandedElts) + nativeExplosion.add(val); + + assert(expandedTys.size() == nativeExplosion.size()); + return nativeExplosion; +} + +void IRGenFunction::emitScalarReturn(SILType resultType, Explosion &result, + bool isSwiftCCReturn) { if (result.size() == 0) { + assert(IGM.getTypeInfo(resultType).nativeReturnValueSchema(IGM).empty() && + "Empty explosion must match the native calling convention"); + Builder.CreateRetVoid(); return; } - auto *ABIType = CurFn->getReturnType(); + // In the native case no coersion is needed. + if (isSwiftCCReturn) { + auto &nativeSchema = + IGM.getTypeInfo(resultType).nativeReturnValueSchema(IGM); + assert(!nativeSchema.requiresIndirect()); + + Explosion native = + nativeSchema.mapIntoNative(IGM, *this, result, resultType); + if (native.size() == 1) { + Builder.CreateRet(native.claimNext()); + return; + } + llvm::Value *nativeAgg = + llvm::UndefValue::get(nativeSchema.getExpandedType(IGM)); + for (unsigned i = 0, e = native.size(); i != e; ++i) { + llvm::Value *elt = native.claimNext(); + nativeAgg = Builder.CreateInsertValue(nativeAgg, elt, i); + } + Builder.CreateRet(nativeAgg); + return; + } + // Otherwise we potentially need to coerce the type. We don't need to go + // through the mapping to the native calling convention. + auto *ABIType = CurFn->getReturnType(); if (result.size() == 1) { auto *returned = result.claimNext(); if (ABIType != returned->getType()) diff --git a/lib/IRGen/GenEnum.cpp b/lib/IRGen/GenEnum.cpp index 45214353fc202..14732d85e6d7e 100644 --- a/lib/IRGen/GenEnum.cpp +++ b/lib/IRGen/GenEnum.cpp @@ -856,7 +856,7 @@ namespace { void addToAggLowering(IRGenModule &IGM, SwiftAggLowering &lowering, Size offset) const override { lowering.addOpaqueData(offset.asCharUnits(), - getFixedSize().asCharUnits()); + (offset + getFixedSize()).asCharUnits()); } void emitScalarRetain(IRGenFunction &IGF, llvm::Value *value, @@ -1219,18 +1219,20 @@ namespace { void addToAggLowering(IRGenModule &IGM, SwiftAggLowering &lowering, Size offset) const override { - for (auto &elt : ElementsWithPayload) { - // Elements are always stored at offset 0. - // This will only be called on strategies for loadable types. - cast(elt.ti)->addToAggLowering(IGM, lowering, offset); - } + + Size runningOffset = offset; + PayloadSchema.forEachType(IGM, [&](llvm::Type *payloadTy) { + lowering.addTypedData(payloadTy, runningOffset.asCharUnits()); + runningOffset += Size(IGM.DataLayout.getTypeStoreSize(payloadTy)); + }); // Add the extra tag bits. if (ExtraTagBitCount > 0) { auto tagStoreSize = IGM.DataLayout.getTypeStoreSize(ExtraTagTy); auto tagOffset = offset + getOffsetOfExtraTagBits(); + assert(tagOffset == runningOffset); lowering.addOpaqueData(tagOffset.asCharUnits(), - Size(tagStoreSize).asCharUnits()); + (tagOffset + Size(tagStoreSize)).asCharUnits()); } } diff --git a/lib/IRGen/GenFunc.cpp b/lib/IRGen/GenFunc.cpp index d28042dda7b15..2ff0b34c7ce6c 100644 --- a/lib/IRGen/GenFunc.cpp +++ b/lib/IRGen/GenFunc.cpp @@ -86,11 +86,12 @@ #include "llvm/Support/Debug.h" #include "llvm/ADT/StringSwitch.h" -#include "IndirectTypeInfo.h" #include "EnumPayload.h" #include "Explosion.h" +#include "FixedTypeInfo.h" #include "GenCall.h" #include "GenClass.h" +#include "GenFunc.h" #include "GenHeap.h" #include "GenMeta.h" #include "GenObjC.h" @@ -101,9 +102,9 @@ #include "IRGenDebugInfo.h" #include "IRGenFunction.h" #include "IRGenModule.h" -#include "FixedTypeInfo.h" +#include "IndirectTypeInfo.h" +#include "NativeConventionSchema.h" #include "ScalarTypeInfo.h" -#include "GenFunc.h" #include "Signature.h" #include "IRGenMangler.h" @@ -726,6 +727,8 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, llvm::Function *fwd = llvm::Function::Create(fwdTy, llvm::Function::InternalLinkage, llvm::StringRef(thunkName), &IGM.Module); + fwd->setCallingConv( + expandCallingConv(IGM, SILFunctionTypeRepresentation::Thick)); auto initialAttrs = IGM.constructInitialAttributes(); // Merge initialAttrs with outAttrs. @@ -747,8 +750,9 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, GenericContextScope scope(IGM, origType->getGenericSignature()); // Forward the indirect return values. - auto &resultTI = IGM.getTypeInfo(outConv.getSILResultType()); - if (resultTI.getSchema().requiresIndirectResult(IGM)) + auto &nativeSchema = IGM.getTypeInfo(outConv.getSILResultType()) + .nativeReturnValueSchema(IGM); + if (nativeSchema.requiresIndirect()) args.add(origParams.claimNext()); SILFunctionConventions origConv(origType, IGM.getSILModule()); @@ -761,14 +765,15 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, // Reemit the parameters as unsubstituted. for (unsigned i = 0; i < outType->getParameters().size(); ++i) { - Explosion arg; auto origParamInfo = origType->getParameters()[i]; auto &ti = IGM.getTypeInfoForLowered(origParamInfo.getType()); auto schema = ti.getSchema(); + auto origParamSILType = IGM.silConv.getSILType(origParamInfo); // Forward the address of indirect value params. - if (!origConv.isSILIndirect(origParamInfo) - && schema.requiresIndirectParameter(IGM)) { + auto &nativeSchemaOrigParam = ti.nativeParameterValueSchema(IGM); + bool isIndirectParam = origConv.isSILIndirect(origParamInfo); + if (!isIndirectParam && nativeSchemaOrigParam.requiresIndirect()) { auto addr = origParams.claimNext(); if (addr->getType() != ti.getStorageType()->getPointerTo()) addr = subIGF.Builder.CreateBitCast(addr, @@ -776,10 +781,37 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, args.add(addr); continue; } - - emitApplyArgument(subIGF, origParamInfo, - outType->getParameters()[i], - origParams, args); + + auto outTypeParamInfo = outType->getParameters()[i]; + // Indirect parameters need no mapping through the native calling + // convention. + if (isIndirectParam) { + emitApplyArgument(subIGF, origParamInfo, outTypeParamInfo, origParams, + args); + continue; + } + + // Map from the native calling convention into the explosion schema. + auto outTypeParamSILType = IGM.silConv.getSILType(origParamInfo); + auto &nativeSchemaOutTypeParam = + IGM.getTypeInfo(outTypeParamSILType).nativeParameterValueSchema(IGM); + Explosion nativeParam; + origParams.transferInto(nativeParam, nativeSchemaOutTypeParam.size()); + Explosion nonNativeParam = nativeSchemaOutTypeParam.mapFromNative( + subIGF.IGM, subIGF, nativeParam, outTypeParamSILType); + assert(nativeParam.empty()); + + // Emit unsubstituted argument for call. + Explosion nonNativeApplyArg; + emitApplyArgument(subIGF, origParamInfo, outTypeParamInfo, nonNativeParam, + nonNativeApplyArg); + assert(nonNativeParam.empty()); + // Map back from the explosion scheme to the native calling convention for + // the call. + Explosion nativeApplyArg = nativeSchemaOrigParam.mapIntoNative( + subIGF.IGM, subIGF, nonNativeApplyArg, origParamSILType); + assert(nonNativeApplyArg.empty()); + nativeApplyArg.transferInto(args, nativeApplyArg.size()); } } @@ -1071,7 +1103,6 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, // Okay, this is where the callee context goes. } else if (fnContext) { - // TODO: swift_context marker. args.add(fnContext); // Pass a placeholder for thin function calls. @@ -1087,7 +1118,6 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, // Pass down the error result. if (origType->hasErrorResult()) { llvm::Value *errorResultPtr = origParams.claimNext(); - // TODO: swift_error marker. args.add(errorResultPtr); } @@ -1109,9 +1139,9 @@ static llvm::Function *emitPartialApplicationForwarder(IRGenModule &IGM, call->setCallingConv(staticFnPtr->getCallingConv()); } else { // Otherwise, use the default attributes for the dynamic type. - // TODO: Currently all indirect function values use some variation of the - // "C" calling convention, but that may change. call->setAttributes(origAttrs); + // Use the calling convention of the partially applied function type. + call->setCallingConv(expandCallingConv(IGM, origType->getRepresentation())); } if (addressesToDeallocate.empty() && !needsAllocas && (!consumesContext || !dependsOnContextLifetime)) @@ -1301,7 +1331,12 @@ void irgen::emitFunctionPartialApplication(IRGenFunction &IGF, // to capture), and the dest ownership semantics match the parameter's, // skip building the box and thunk and just take the pointer as // context. - if (!origType->isPolymorphic() && + // TODO: We can only do this and use swiftself if all our swiftcc emit the + // last parameter that fits into a register as swiftself. + // We should get this optimization back using the @convention(closure) whose + // box argument should just be swift self. + if (false && + !origType->isPolymorphic() && hasSingleSwiftRefcountedContext == Yes && outType->getCalleeConvention() == *singleRefcountedConvention) { assert(args.size() == 1); diff --git a/lib/IRGen/GenHeap.cpp b/lib/IRGen/GenHeap.cpp index 2bfb459f510c5..0e6f309692e92 100644 --- a/lib/IRGen/GenHeap.cpp +++ b/lib/IRGen/GenHeap.cpp @@ -196,7 +196,10 @@ static llvm::Function *createDtorFn(IRGenModule &IGM, llvm::Function::Create(IGM.DeallocatingDtorTy, llvm::Function::PrivateLinkage, "objectdestroy", &IGM.Module); - fn->setAttributes(IGM.constructInitialAttributes()); + auto attrs = IGM.constructInitialAttributes(); + IGM.addSwiftSelfAttributes(attrs, 0); + fn->setAttributes(attrs); + fn->setCallingConv(IGM.SwiftCC); IRGenFunction IGF(IGM, fn); if (IGM.DebugInfo) diff --git a/lib/IRGen/GenObjC.cpp b/lib/IRGen/GenObjC.cpp index 01a6fa006a35d..e16bbad42729b 100644 --- a/lib/IRGen/GenObjC.cpp +++ b/lib/IRGen/GenObjC.cpp @@ -35,6 +35,7 @@ #include "CallEmission.h" #include "Explosion.h" +#include "GenCall.h" #include "GenClass.h" #include "GenFunc.h" #include "GenHeap.h" @@ -46,6 +47,7 @@ #include "IRGenFunction.h" #include "IRGenModule.h" #include "Linking.h" +#include "NativeConventionSchema.h" #include "ScalarTypeInfo.h" #include "StructLayout.h" @@ -767,6 +769,8 @@ static llvm::Function *emitObjCPartialApplicationForwarder(IRGenModule &IGM, llvm::Function::Create(fwdTy, llvm::Function::InternalLinkage, MANGLE_AS_STRING(OBJC_PARTIAL_APPLY_THUNK_SYM), &IGM.Module); + fwd->setCallingConv( + expandCallingConv(IGM, SILFunctionTypeRepresentation::Thick)); auto initialAttrs = IGM.constructInitialAttributes(); // Merge initialAttrs with attrs. @@ -841,7 +845,8 @@ static llvm::Function *emitObjCPartialApplicationForwarder(IRGenModule &IGM, SILType appliedResultTy = origMethodType->getDirectFormalResultsType(); indirectedResultTI = &cast(IGM.getTypeInfo(appliedResultTy)); - if (indirectedResultTI->getSchema().requiresIndirectResult(IGM)) { + auto &nativeSchema = indirectedResultTI->nativeReturnValueSchema(IGM); + if (nativeSchema.requiresIndirect()) { indirectedDirectResult = params.claimNext(); } } @@ -863,11 +868,12 @@ static llvm::Function *emitObjCPartialApplicationForwarder(IRGenModule &IGM, // Otherwise, we have a loadable type that can either be passed directly or // indirectly. assert(info.getSILStorageType().isObject()); - auto &ti = cast(IGM.getTypeInfo(info.getSILStorageType())); - auto schema = ti.getSchema(); + auto &ti = + cast(IGM.getTypeInfo(info.getSILStorageType())); // Load the indirectly passed parameter. - if (schema.requiresIndirectParameter(IGM)) { + auto &nativeSchema = ti.nativeParameterValueSchema(IGM); + if (nativeSchema.requiresIndirect()) { Address paramAddr = ti.getAddressForPointer(params.claimNext()); ti.loadAsTake(subIGF, paramAddr, translatedParams); continue; @@ -920,7 +926,7 @@ static llvm::Function *emitObjCPartialApplicationForwarder(IRGenModule &IGM, auto &callee = emission.getCallee(); auto resultType = callee.getOrigFunctionType()->getDirectFormalResultsType(); - subIGF.emitScalarReturn(resultType, result); + subIGF.emitScalarReturn(resultType, result, true /*isSwiftCCReturn*/); } return fwd; diff --git a/lib/IRGen/GenType.cpp b/lib/IRGen/GenType.cpp index 8bd07346ca285..6c5f1c930bb58 100644 --- a/lib/IRGen/GenType.cpp +++ b/lib/IRGen/GenType.cpp @@ -43,6 +43,7 @@ #include "ReferenceTypeInfo.h" #include "ScalarTypeInfo.h" #include "WeakTypeInfo.h" +#include "NativeConventionSchema.h" using namespace swift; using namespace irgen; @@ -81,6 +82,13 @@ ExplosionSchema TypeInfo::getSchema() const { return schema; } +TypeInfo::~TypeInfo() { + if (nativeReturnSchema) + delete nativeReturnSchema; + if (nativeParameterSchema) + delete nativeParameterSchema; +} + Address TypeInfo::getAddressForPointer(llvm::Value *ptr) const { assert(ptr->getType()->getPointerElementType() == StorageType); return Address(ptr, StorageAlignment); @@ -98,6 +106,20 @@ bool TypeInfo::isKnownEmpty(ResilienceExpansion expansion) const { return false; } +const NativeConventionSchema & +TypeInfo::nativeReturnValueSchema(IRGenModule &IGM) const { + if (nativeReturnSchema == nullptr) + nativeReturnSchema = new NativeConventionSchema(IGM, this, true); + return *nativeReturnSchema; +} + +const NativeConventionSchema & +TypeInfo::nativeParameterValueSchema(IRGenModule &IGM) const { + if (nativeParameterSchema == nullptr) + nativeParameterSchema = new NativeConventionSchema(IGM, this, false); + return *nativeParameterSchema; +} + /// Copy a value from one object to a new object, directly taking /// responsibility for anything it might have. This is like C++ /// move-initialization, except the old object will not be destroyed. @@ -152,7 +174,8 @@ void LoadableTypeInfo::addScalarToAggLowering(IRGenModule &IGM, SwiftAggLowering &lowering, llvm::Type *type, Size offset, Size storageSize) { - lowering.addTypedData(type, offset.asCharUnits(), storageSize.asCharUnits()); + lowering.addTypedData(type, offset.asCharUnits(), + offset.asCharUnits() + storageSize.asCharUnits()); } static llvm::Constant *asSizeConstant(IRGenModule &IGM, Size size) { @@ -549,7 +572,7 @@ namespace { void addToAggLowering(IRGenModule &IGM, SwiftAggLowering &lowering, Size offset) const override { lowering.addOpaqueData(offset.asCharUnits(), - getFixedSize().asCharUnits()); + (offset + getFixedSize()).asCharUnits()); } void packIntoEnumPayload(IRGenFunction &IGF, @@ -1598,15 +1621,6 @@ llvm::PointerType *IRGenModule::isSingleIndirectValue(SILType type) { return nullptr; } -/// Determine whether this type requires an indirect result. -llvm::PointerType *IRGenModule::requiresIndirectResult(SILType type) { - auto &ti = getTypeInfo(type); - ExplosionSchema schema = ti.getSchema(); - if (schema.requiresIndirectResult(*this)) - return ti.getStorageType()->getPointerTo(); - return nullptr; -} - /// Determine whether this type is known to be POD. bool IRGenModule::isPOD(SILType type, ResilienceExpansion expansion) { if (type.is()) return false; diff --git a/lib/IRGen/IRGenFunction.h b/lib/IRGen/IRGenFunction.h index 071c28cc8e6cc..b23d42d2f3cbb 100644 --- a/lib/IRGen/IRGenFunction.h +++ b/lib/IRGen/IRGenFunction.h @@ -96,7 +96,8 @@ class IRGenFunction { //--- Function prologue and epilogue ------------------------------------------- public: Explosion collectParameters(); - void emitScalarReturn(SILType resultTy, Explosion &scalars); + void emitScalarReturn(SILType resultTy, Explosion &scalars, + bool isSwiftCCReturn); void emitScalarReturn(llvm::Type *resultTy, Explosion &scalars); void emitBBForReturn(); diff --git a/lib/IRGen/IRGenModule.cpp b/lib/IRGen/IRGenModule.cpp index c1d2a684fe3d7..d3359bac9a53a 100644 --- a/lib/IRGen/IRGenModule.cpp +++ b/lib/IRGen/IRGenModule.cpp @@ -28,6 +28,7 @@ #include "clang/Basic/TargetInfo.h" #include "clang/CodeGen/CodeGenABITypes.h" #include "clang/CodeGen/ModuleBuilder.h" +#include "clang/CodeGen/SwiftCallingConv.h" #include "clang/Lex/Preprocessor.h" #include "clang/Lex/PreprocessorOptions.h" #include "clang/Lex/HeaderSearch.h" @@ -387,10 +388,17 @@ IRGenModule::IRGenModule(IRGenerator &irgen, else RegisterPreservingCC = DefaultCC; + SwiftCC = SWIFT_LLVM_CC(SwiftCC); + UseSwiftCC = (SwiftCC == llvm::CallingConv::Swift); + if (IRGen.Opts.DebugInfoKind > IRGenDebugInfoKind::None) DebugInfo = new IRGenDebugInfo(IRGen.Opts, *CI, *this, Module, SF); initClangTypeConverter(); + + IsSwiftErrorInRegister = + clang::CodeGen::swiftcall::isSwiftErrorLoweredInRegister( + ClangCodeGen->CGM()); } IRGenModule::~IRGenModule() { diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h index 28a25ec539956..486718e936bb4 100644 --- a/lib/IRGen/IRGenModule.h +++ b/lib/IRGen/IRGenModule.h @@ -393,6 +393,9 @@ class IRGenModule { /// Should we add value names to local IR values? bool EnableValueNames = false; + // Is swifterror returned in a register by the target ABI. + bool IsSwiftErrorInRegister; + llvm::Type *VoidTy; /// void (usually {}) llvm::IntegerType *Int1Ty; /// i1 llvm::IntegerType *Int8Ty; /// i8 @@ -473,6 +476,8 @@ class IRGenModule { llvm::CallingConv::ID C_CC; /// standard C calling convention llvm::CallingConv::ID DefaultCC; /// default calling convention llvm::CallingConv::ID RegisterPreservingCC; /// lightweight calling convention + llvm::CallingConv::ID SwiftCC; /// swift calling convention + bool UseSwiftCC; llvm::FunctionType *getAssociatedTypeMetadataAccessFunctionTy(); llvm::FunctionType *getAssociatedTypeWitnessTableAccessFunctionTy(); @@ -591,7 +596,6 @@ class IRGenModule { ExplosionSchema getSchema(SILType T); unsigned getExplosionSize(SILType T); llvm::PointerType *isSingleIndirectValue(SILType T); - llvm::PointerType *requiresIndirectResult(SILType T); bool isPOD(SILType type, ResilienceExpansion expansion); clang::CanQual getClangType(CanType type); clang::CanQual getClangType(SILType type); @@ -995,6 +999,12 @@ private: \ /// the binary. void setTrueConstGlobal(llvm::GlobalVariable *var); + /// Add the swiftself attribute. + void addSwiftSelfAttributes(llvm::AttributeSet &attrs, unsigned argIndex); + + /// Add the swifterror attribute. + void addSwiftErrorAttributes(llvm::AttributeSet &attrs, unsigned argIndex); + private: llvm::Constant *getAddrOfLLVMVariable(LinkEntity entity, Alignment alignment, diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index b9989f63b3512..359e95510e676 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -68,6 +68,7 @@ #include "GenType.h" #include "IRGenDebugInfo.h" #include "IRGenModule.h" +#include "NativeConventionSchema.h" #include "ReferenceTypeInfo.h" #include "WeakTypeInfo.h" @@ -1170,15 +1171,22 @@ static void bindParameter(IRGenSILFunction &IGF, auto &loadableTI = cast(paramTI); // If the explosion must be passed indirectly, load the value from the // indirect address. - if (loadableTI.getSchema().requiresIndirectParameter(IGF.IGM)) { + auto &nativeSchema = paramTI.nativeParameterValueSchema(IGF.IGM); + if (nativeSchema.requiresIndirect()) { Address paramAddr = loadableTI.getAddressForPointer(allParamValues.claimNext()); loadableTI.loadAsTake(IGF, paramAddr, paramValues); } else { - // Otherwise, we can just take the exploded arguments. - // FIXME: It doesn't necessarily make sense to pass all types using their - // explosion schema. - loadableTI.reexplode(IGF, allParamValues, paramValues); + if (!nativeSchema.empty()) { + // Otherwise, we map from the native convention to the type's explosion + // schema. + Explosion nativeParam; + allParamValues.transferInto(nativeParam, nativeSchema.size()); + paramValues = nativeSchema.mapFromNative(IGF.IGM, IGF, nativeParam, + param->getType()); + } else { + assert(paramTI.getSchema().empty()); + } } IGF.setLoweredExplosion(param, paramValues); return; @@ -1202,10 +1210,11 @@ static void emitEntryPointArgumentsNativeCC(IRGenSILFunction &IGF, auto funcTy = IGF.CurSILFn->getLoweredFunctionType(); // Map the indirect return if present. - ArrayRef params - = emitEntryPointIndirectReturn(IGF, entry, allParamValues, funcTy, - [&](SILType retType) -> bool { - return IGF.IGM.requiresIndirectResult(retType); + ArrayRef params = emitEntryPointIndirectReturn( + IGF, entry, allParamValues, funcTy, [&](SILType retType) -> bool { + auto &schema = + IGF.IGM.getTypeInfo(retType).nativeReturnValueSchema(IGF.IGM); + return schema.requiresIndirect(); }); // The witness method CC passes Self as a final argument. @@ -2327,7 +2336,11 @@ static void emitReturnInst(IRGenSILFunction &IGF, retTI.initialize(IGF, result, IGF.IndirectReturn); IGF.Builder.CreateRetVoid(); } else { - IGF.emitScalarReturn(resultTy, result); + auto funcLang = IGF.CurSILFn->getLoweredFunctionType()->getLanguage(); + auto swiftCCReturn = funcLang == SILFunctionLanguage::Swift; + assert(swiftCCReturn || + funcLang == SILFunctionLanguage::C && "Need to handle all cases"); + IGF.emitScalarReturn(resultTy, result, swiftCCReturn); } } @@ -3211,6 +3224,10 @@ void IRGenSILFunction::visitStoreInst(swift::StoreInst *i) { /// Emit the artificial error result argument. void IRGenSILFunction::emitErrorResultVar(SILResultInfo ErrorInfo, DebugValueInst *DbgValue) { + // We don't need a shadow error variable for debugging on ABI's that return + // swifterror in a register. + if (IGM.IsSwiftErrorInRegister) + return; auto ErrorResultSlot = getErrorResultSlot(IGM.silConv.getSILType(ErrorInfo)); SILDebugVariable Var = DbgValue->getVarInfo(); auto Storage = emitShadowCopy(ErrorResultSlot.getAddress(), getDebugScope(), diff --git a/lib/IRGen/NativeConventionSchema.h b/lib/IRGen/NativeConventionSchema.h new file mode 100644 index 0000000000000..76da00facb9f7 --- /dev/null +++ b/lib/IRGen/NativeConventionSchema.h @@ -0,0 +1,85 @@ +//===--- NativeConventionSchema.h - R-Value Schema for SwiftCC -*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// A schema that describes the explosion of values for passing according to the +// native calling convention. +// +//===----------------------------------------------------------------------===// +#ifndef SWIFT_IRGEN_NATIVECONVENTIONSCHEMA_H +#define SWIFT_IRGEN_NATIVECONVENTIONSCHEMA_H + +#include "clang/CodeGen/SwiftCallingConv.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallVector.h" +#include "IRGen.h" +#include "IRGenFunction.h" + +namespace swift { +namespace irgen { + +class NativeConventionSchema { + clang::CodeGen::swiftcall::SwiftAggLowering Lowering; + bool RequiresIndirect; + +public: + using EnumerationCallback = + clang::CodeGen::swiftcall::SwiftAggLowering::EnumerationCallback; + + NativeConventionSchema(IRGenModule &IGM, const TypeInfo *TI, bool isResult); + + NativeConventionSchema() = delete; + NativeConventionSchema(const NativeConventionSchema &) = delete; + NativeConventionSchema &operator=(const NativeConventionSchema&) = delete; + + bool requiresIndirect() const { return RequiresIndirect; } + bool empty() const { return Lowering.empty(); } + + llvm::Type *getExpandedType(IRGenModule &IGM) const; + + /// The number of components in the schema. + unsigned size() const; + + void enumerateComponents(EnumerationCallback callback) const { + Lowering.enumerateComponents(callback); + } + + /// Map from a non-native explosion to an explosion that follows the native + /// calling convention's schema. + Explosion mapIntoNative(IRGenModule &IGM, IRGenFunction &IGF, + Explosion &fromNonNative, SILType type) const; + + /// Map form a native explosion that follows the native calling convention's + /// schema to an non-native explosion whose schema is described by + /// type.getSchema(). + Explosion mapFromNative(IRGenModule &IGM, IRGenFunction &IGF, + Explosion &native, SILType type) const; + + /// Return a pair of structs that can be used to load/store the components of + /// the native schema from/to the memory representation as defined by the + /// value's loadable type info. + /// The second layout is only necessary if there are overlapping components in + /// the legal type sequence. It contains the non-integer components of + /// overlapped components of the legal type sequence. + /// + /// \p ExpandedTyIndices is a map from the non-array type elements of the + /// returned struct types (viewed concatenated) to the index in the expanded + /// type. + std::pair + getCoercionTypes(IRGenModule &IGM, + SmallVectorImpl &expandedTyIndicesMap) const; +}; + + +} // end namespace irgen +} // end namespace swift + +#endif diff --git a/lib/IRGen/ScalarTypeInfo.h b/lib/IRGen/ScalarTypeInfo.h index d40838e6e63b5..d2471e6888b27 100644 --- a/lib/IRGen/ScalarTypeInfo.h +++ b/lib/IRGen/ScalarTypeInfo.h @@ -199,10 +199,11 @@ class SingleScalarTypeInfo : public ScalarTypeInfo { void addToAggLowering(IRGenModule &IGM, SwiftAggLowering &lowering, Size offset) const override { - LoadableTypeInfo::addScalarToAggLowering(IGM, lowering, - asDerived().getScalarType(), - offset, - asDerived().Derived::getFixedSize()); + // Can't use getFixedSize because it returns the alloc size not the store + // size. + LoadableTypeInfo::addScalarToAggLowering( + IGM, lowering, asDerived().getScalarType(), offset, + Size(IGM.DataLayout.getTypeStoreSize(asDerived().getScalarType()))); } }; diff --git a/lib/IRGen/TypeInfo.h b/lib/IRGen/TypeInfo.h index 20c9f60d536a4..557a97328dc15 100644 --- a/lib/IRGen/TypeInfo.h +++ b/lib/IRGen/TypeInfo.h @@ -44,6 +44,7 @@ namespace irgen { class IRGenModule; class Explosion; class ExplosionSchema; + class NativeConventionSchema; enum OnHeap_t : unsigned char; class OwnedAddress; class RValue; @@ -89,7 +90,8 @@ class TypeInfo { IsBitwiseTakable_t bitwiseTakable, IsFixedSize_t alwaysFixedSize, SpecialTypeInfoKind stik) - : NextConverted(0), StorageType(Type), StorageAlignment(A), + : NextConverted(0), StorageType(Type), nativeReturnSchema(nullptr), + nativeParameterSchema(nullptr), StorageAlignment(A), POD(pod), BitwiseTakable(bitwiseTakable), AlwaysFixedSize(alwaysFixedSize), STIK(stik), SubclassKind(InvalidSubclassKind) { @@ -102,7 +104,7 @@ class TypeInfo { } public: - virtual ~TypeInfo() = default; + virtual ~TypeInfo(); /// Unsafely cast this to the given subtype. template const T &as() const { @@ -115,6 +117,9 @@ class TypeInfo { /// non-fixed types, this is really useful only for forming pointers to it. llvm::Type *StorageType; + mutable NativeConventionSchema *nativeReturnSchema; + mutable NativeConventionSchema *nativeParameterSchema; + /// The storage alignment of this type in bytes. This is never zero /// for a completely-converted type. Alignment StorageAlignment; @@ -483,6 +488,12 @@ class TypeInfo { virtual void initializeArrayWithTakeBackToFront(IRGenFunction &IGF, Address dest, Address src, llvm::Value *count, SILType T) const; + + /// Get the native (abi) convention for a return value of this type. + const NativeConventionSchema &nativeReturnValueSchema(IRGenModule &IGM) const; + + /// Get the native (abi) convention for a parameter value of this type. + const NativeConventionSchema &nativeParameterValueSchema(IRGenModule &IGM) const; }; } // end namespace irgen diff --git a/stdlib/private/StdlibUnittestFoundationExtras/UnavailableFoundationMethodThunks.mm b/stdlib/private/StdlibUnittestFoundationExtras/UnavailableFoundationMethodThunks.mm index 2500743b1ad2d..517e257736e28 100644 --- a/stdlib/private/StdlibUnittestFoundationExtras/UnavailableFoundationMethodThunks.mm +++ b/stdlib/private/StdlibUnittestFoundationExtras/UnavailableFoundationMethodThunks.mm @@ -11,7 +11,9 @@ //===----------------------------------------------------------------------===// #include +#include "swift/Runtime/Config.h" +SWIFT_CC(swift) extern "C" void swift_stdlib_NSArray_getObjects(NSArray NS_RELEASES_ARGUMENT *_Nonnull nsArray, id *objects, NSUInteger rangeLocation, @@ -20,6 +22,7 @@ [nsArray release]; } +SWIFT_CC(swift) extern "C" void swift_stdlib_NSDictionary_getObjects(NSDictionary *_Nonnull nsDictionary, id *objects, id *keys) { diff --git a/stdlib/public/SDK/Foundation/DataThunks.m b/stdlib/public/SDK/Foundation/DataThunks.m index 55bd554f31188..dad06f7e0cdf7 100644 --- a/stdlib/public/SDK/Foundation/DataThunks.m +++ b/stdlib/public/SDK/Foundation/DataThunks.m @@ -21,14 +21,17 @@ extern const NSDataDeallocator NSDataDeallocatorFree; extern const NSDataDeallocator NSDataDeallocatorNone; +SWIFT_CC(swift) void __NSDataInvokeDeallocatorVM(void *mem, NSUInteger length) { NSDataDeallocatorVM(mem, length); } +SWIFT_CC(swift) void __NSDataInvokeDeallocatorUnmap(void *mem, NSUInteger length) { NSDataDeallocatorUnmap(mem, length); } +SWIFT_CC(swift) void __NSDataInvokeDeallocatorFree(void *mem, NSUInteger length) { NSDataDeallocatorFree(mem, length); } diff --git a/stdlib/public/SDK/Foundation/FileManagerThunks.m b/stdlib/public/SDK/Foundation/FileManagerThunks.m index 176df6c3ffd9e..375bc2f550f2e 100644 --- a/stdlib/public/SDK/Foundation/FileManagerThunks.m +++ b/stdlib/public/SDK/Foundation/FileManagerThunks.m @@ -11,7 +11,9 @@ //===----------------------------------------------------------------------===// #import +#include "swift/Runtime/Config.h" +SWIFT_CC(swift) extern /*"C"*/ NS_RETURNS_RETAINED id NS_Swift_NSFileManager_replaceItemAtURL_withItemAtURL_backupItemName_options( NSFileManager *NS_RELEASES_ARGUMENT _Nonnull self_, diff --git a/stdlib/public/SDK/Foundation/Hashing.m b/stdlib/public/SDK/Foundation/Hashing.m index 817e8acf5d564..2cc5772000ae4 100644 --- a/stdlib/public/SDK/Foundation/Hashing.m +++ b/stdlib/public/SDK/Foundation/Hashing.m @@ -13,13 +13,16 @@ // THIS IS JUST TEMPORARY TO AVOID LOCKSTEP ISSUES WITH COREFOUNDATION #import +#include "swift/Runtime/Config.h" #define HASHFACTOR 2654435761U +SWIFT_CC(swift) CFHashCode __CFHashInt(long i) { return ((i > 0) ? (CFHashCode)(i) : (CFHashCode)(-i)) * HASHFACTOR; } +SWIFT_CC(swift) CFHashCode __CFHashDouble(double d) { double dInt; if (d < 0) d = -d; @@ -27,3 +30,10 @@ CFHashCode __CFHashDouble(double d) { CFHashCode integralHash = HASHFACTOR * (CFHashCode)fmod(dInt, (double)ULONG_MAX); return (CFHashCode)(integralHash + (CFHashCode)((d - dInt) * ULONG_MAX)); } + +extern CFHashCode CFHashBytes(uint8_t *bytes, long len); + +SWIFT_CC(swift) +CFHashCode __CFHashBytes(uint8_t *bytes, long len) { + return CFHashBytes(bytes, len); +} diff --git a/stdlib/public/SDK/Foundation/Hashing.swift b/stdlib/public/SDK/Foundation/Hashing.swift index 9c7d952a0e204..6224de83fee2f 100644 --- a/stdlib/public/SDK/Foundation/Hashing.swift +++ b/stdlib/public/SDK/Foundation/Hashing.swift @@ -18,5 +18,5 @@ internal func __CFHashInt(_ i: Int) -> CFHashCode @_silgen_name("__CFHashDouble") internal func __CFHashDouble(_ d: Double) -> CFHashCode -@_silgen_name("CFHashBytes") +@_silgen_name("__CFHashBytes") internal func CFHashBytes(_ bytes: UnsafeMutablePointer?, _ length: Int) -> CFHashCode diff --git a/stdlib/public/SDK/Foundation/IndexSetThunks.m b/stdlib/public/SDK/Foundation/IndexSetThunks.m index d046ee5e09887..8b87a033086ea 100644 --- a/stdlib/public/SDK/Foundation/IndexSetThunks.m +++ b/stdlib/public/SDK/Foundation/IndexSetThunks.m @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #import +#include "swift/Runtime/Config.h" @interface NSIndexSet (NSRanges) - (NSUInteger)rangeCount; @@ -25,6 +26,7 @@ extern NSUInteger __NSIndexSetRangeCount(id NS_RELEASES_ARGUMENT __nonnull self_ return result; } +SWIFT_CC(swift) extern void __NSIndexSetRangeAtIndex(id NS_RELEASES_ARGUMENT __nonnull self_, NSUInteger rangeIndex, NSUInteger * __nonnull location, NSUInteger * __nonnull length) { NSIndexSet *indexSet = self_; NSRange result = [indexSet rangeAtIndex:rangeIndex]; @@ -33,6 +35,7 @@ extern void __NSIndexSetRangeAtIndex(id NS_RELEASES_ARGUMENT __nonnull self_, NS *length = result.length; } +SWIFT_CC(swift) extern NSUInteger __NSIndexSetIndexOfRangeContainingIndex(id NS_RELEASES_ARGUMENT __nonnull self_, NSUInteger index) { NSIndexSet *indexSet = self_; NSUInteger result = [indexSet _indexOfRangeContainingIndex:index]; diff --git a/stdlib/public/runtime/Casting.cpp b/stdlib/public/runtime/Casting.cpp index 525a7b0e5ade3..262c789cf8999 100644 --- a/stdlib/public/runtime/Casting.cpp +++ b/stdlib/public/runtime/Casting.cpp @@ -2701,25 +2701,28 @@ struct _ObjectiveCBridgeableWitnessTable { const _ObjectiveCBridgeableWitnessTable *witnessTable); // func _bridgeToObjectiveC() -> _ObjectiveCType + SWIFT_CC(swift) HeapObject *(*bridgeToObjectiveC)( - OpaqueValue *self, const Metadata *Self, + SWIFT_CONTEXT OpaqueValue *self, const Metadata *Self, const _ObjectiveCBridgeableWitnessTable *witnessTable); // class func _forceBridgeFromObjectiveC(x: _ObjectiveCType, // inout result: Self?) + SWIFT_CC(swift) void (*forceBridgeFromObjectiveC)( HeapObject *sourceValue, OpaqueValue *result, - const Metadata *self, + SWIFT_CONTEXT const Metadata *self, const Metadata *selfType, const _ObjectiveCBridgeableWitnessTable *witnessTable); // class func _conditionallyBridgeFromObjectiveC(x: _ObjectiveCType, // inout result: Self?) -> Bool + SWIFT_CC(swift) bool (*conditionallyBridgeFromObjectiveC)( HeapObject *sourceValue, OpaqueValue *result, - const Metadata *self, + SWIFT_CONTEXT const Metadata *self, const Metadata *selfType, const _ObjectiveCBridgeableWitnessTable *witnessTable); }; @@ -3221,6 +3224,7 @@ bool _swift_isClassOrObjCExistentialType(const Metadata *value, return swift_isClassOrObjCExistentialTypeImpl(T); } +SWIFT_CC(swift) const Metadata *swift::_swift_class_getSuperclass(const Metadata *theClass) { if (const ClassMetadata *classType = theClass->getClassObject()) if (classHasSuperclass(classType)) diff --git a/stdlib/public/runtime/ErrorObject.h b/stdlib/public/runtime/ErrorObject.h index d6d5e80e03e43..74c4774f0359b 100644 --- a/stdlib/public/runtime/ErrorObject.h +++ b/stdlib/public/runtime/ErrorObject.h @@ -214,6 +214,7 @@ void swift_unexpectedError(SwiftError *object); #if SWIFT_OBJC_INTEROP /// Initialize an Error box to make it usable as an NSError instance. +SWIFT_CC(swift) SWIFT_RUNTIME_EXPORT id swift_bridgeErrorToNSError(SwiftError *errorObject); diff --git a/stdlib/public/runtime/ErrorObject.mm b/stdlib/public/runtime/ErrorObject.mm index 5bee48ef2b03a..5340610defed8 100644 --- a/stdlib/public/runtime/ErrorObject.mm +++ b/stdlib/public/runtime/ErrorObject.mm @@ -474,9 +474,11 @@ static id _swift_bridgeErrorToNSError_(SwiftError *errorObject) { return ns; } +SWIFT_CC(swift) SWIFT_RUNTIME_EXPORT -auto *_swift_bridgeErrorToNSError = _swift_bridgeErrorToNSError_; +id (*_swift_bridgeErrorToNSError)(SwiftError*) = _swift_bridgeErrorToNSError_; +SWIFT_CC(swift) id swift::swift_bridgeErrorToNSError(SwiftError *errorObject) { return _swift_bridgeErrorToNSError(errorObject); diff --git a/stdlib/public/runtime/ErrorObjectNative.cpp b/stdlib/public/runtime/ErrorObjectNative.cpp index bcf5c9c05a3fd..42b245b1c2e8d 100644 --- a/stdlib/public/runtime/ErrorObjectNative.cpp +++ b/stdlib/public/runtime/ErrorObjectNative.cpp @@ -46,7 +46,7 @@ _getErrorAllocatedSizeAndAlignmentMask(const Metadata *type) { } /// Destructor for an Error box. -static void _destroyErrorObject(HeapObject *obj) { +static SWIFT_CC(swift) void _destroyErrorObject(SWIFT_CONTEXT HeapObject *obj) { auto error = static_cast(obj); // Destroy the value inside. diff --git a/stdlib/public/runtime/HeapObject.cpp b/stdlib/public/runtime/HeapObject.cpp index 26966bd927ccd..75f2fb4d9ff1b 100644 --- a/stdlib/public/runtime/HeapObject.cpp +++ b/stdlib/public/runtime/HeapObject.cpp @@ -97,6 +97,7 @@ swift::swift_verifyEndOfLifetime(HeapObject *object) { /// \brief Allocate a reference-counted object on the heap that /// occupies bytes of maximally-aligned storage. The object is /// uninitialized except for its header. +SWIFT_CC(swift) SWIFT_RUNTIME_EXPORT HeapObject* swift_bufferAllocate( HeapMetadata const* bufferType, size_t size, size_t alignMask) @@ -110,7 +111,7 @@ intptr_t swift_bufferHeaderSize() { return sizeof(HeapObject); } namespace { /// Heap object destructor for a generic box allocated with swift_allocBox. -static void destroyGenericBox(HeapObject *o) { +static SWIFT_CC(swift) void destroyGenericBox(SWIFT_CONTEXT HeapObject *o) { auto metadata = static_cast(o->metadata); // Destroy the object inside. auto *value = metadata->project(o); @@ -152,12 +153,11 @@ class BoxCacheEntry { static SimpleGlobalCache Boxes; -SWIFT_CC(swift) BoxPair::Return swift::swift_allocBox(const Metadata *type) { return SWIFT_RT_ENTRY_REF(swift_allocBox)(type); } -SWIFT_CC(swift) SWIFT_RT_ENTRY_IMPL_VISIBILITY +SWIFT_RT_ENTRY_IMPL_VISIBILITY extern "C" BoxPair::Return SWIFT_RT_ENTRY_IMPL(swift_allocBox)(const Metadata *type) { // Get the heap metadata for the box. diff --git a/stdlib/public/runtime/Leaks.h b/stdlib/public/runtime/Leaks.h index f26a15f1fbf8b..940a5b0a9a046 100644 --- a/stdlib/public/runtime/Leaks.h +++ b/stdlib/public/runtime/Leaks.h @@ -29,9 +29,9 @@ namespace swift { struct HeapObject; } -SWIFT_RUNTIME_EXPORT LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED +SWIFT_CC(swift) SWIFT_RUNTIME_EXPORT LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED void swift_leaks_startTrackingObjects(const char *); -SWIFT_RUNTIME_EXPORT LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED +SWIFT_CC(swift) SWIFT_RUNTIME_EXPORT LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED int swift_leaks_stopTrackingObjects(const char *); SWIFT_RUNTIME_EXPORT LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED void swift_leaks_startTrackingObject(swift::HeapObject *); diff --git a/stdlib/public/runtime/Leaks.mm b/stdlib/public/runtime/Leaks.mm index 472a54d7d621a..79a7c43d99362 100644 --- a/stdlib/public/runtime/Leaks.mm +++ b/stdlib/public/runtime/Leaks.mm @@ -80,6 +80,7 @@ static id __swift_leaks_allocWithZone(id self, SEL _cmd, id zone) { return result; } +SWIFT_CC(swift) extern "C" void swift_leaks_startTrackingObjects(const char *name) { pthread_mutex_lock(&LeaksMutex); @@ -159,6 +160,7 @@ static void dumpObjCHeapObjects() { } } +SWIFT_CC(swift) extern "C" int swift_leaks_stopTrackingObjects(const char *name) { pthread_mutex_lock(&LeaksMutex); unsigned Result = TrackedSwiftObjects->size() + TrackedObjCObjects->size(); diff --git a/stdlib/public/runtime/MetadataLookup.cpp b/stdlib/public/runtime/MetadataLookup.cpp index 3ec27781a3bae..a1d0253b937d3 100644 --- a/stdlib/public/runtime/MetadataLookup.cpp +++ b/stdlib/public/runtime/MetadataLookup.cpp @@ -253,6 +253,7 @@ _classByName(const llvm::StringRef typeName) { /// \param typeName The name of a class in the form: . /// \return Returns the metadata of the type, if found. +SWIFT_CC(swift) SWIFT_RUNTIME_EXPORT const Metadata * swift_getTypeByName(const char *typeName, size_t typeNameLength) { diff --git a/stdlib/public/runtime/Reflection.mm b/stdlib/public/runtime/Reflection.mm index a1a4d361eb7a2..9045de5322beb 100644 --- a/stdlib/public/runtime/Reflection.mm +++ b/stdlib/public/runtime/Reflection.mm @@ -66,6 +66,7 @@ - (id)debugQuickLookObject; struct String; +SWIFT_CC(swift) extern "C" void swift_stringFromUTF8InRawMemory(String *out, const char *start, intptr_t len); @@ -1140,6 +1141,7 @@ static Mirror ObjC_getMirrorForSuperclass(Class sup, /// /// This function consumes 'value', following Swift's +1 convention for "in" /// arguments. +SWIFT_CC(swift) MirrorReturn swift::swift_reflectAny(OpaqueValue *value, const Metadata *T) { const Metadata *mirrorType; const OpaqueValue *cMirrorValue; diff --git a/stdlib/public/runtime/SwiftObject.mm b/stdlib/public/runtime/SwiftObject.mm index dcbf12436173c..e2b483c26d9c4 100644 --- a/stdlib/public/runtime/SwiftObject.mm +++ b/stdlib/public/runtime/SwiftObject.mm @@ -389,6 +389,7 @@ - (BOOL)isNSValue__ { return NO; } /// reference-counting. The metadata is known to correspond to a class /// type, but note that does not imply being known to be a ClassMetadata /// due to the existence of ObjCClassWrapper. +SWIFT_CC(swift) SWIFT_RUNTIME_EXPORT bool swift_objc_class_usesNativeSwiftReferenceCounting(const Metadata *theClass) { @@ -1454,6 +1455,7 @@ static bool usesNativeSwiftReferenceCounting_nonNull( #if SWIFT_OBJC_INTEROP +SWIFT_CC(swift) SWIFT_RUNTIME_EXPORT ClassExtents::Return swift_objc_class_unknownGetInstanceExtents(const ClassMetadata* c) { diff --git a/test/ClangImporter/CoreGraphics_test.swift b/test/ClangImporter/CoreGraphics_test.swift index 9a37b0ff8ec43..3dfc85c0b6dfb 100644 --- a/test/ClangImporter/CoreGraphics_test.swift +++ b/test/ClangImporter/CoreGraphics_test.swift @@ -7,7 +7,7 @@ import CoreGraphics // CHECK: [[SWITCHTABLE:@.*]] = private unnamed_addr constant [8 x i64] [i64 0, i64 12, i64 23, i64 34, i64 45, i64 55, i64 67, i64 71] -// CHECK-LABEL: define i64 {{.*}}testEnums{{.*}} { +// CHECK-LABEL: define swiftcc i64 {{.*}}testEnums{{.*}} { public func testEnums(_ model: CGColorSpaceModel) -> Int { switch model { case .unknown : return 0 @@ -26,7 +26,7 @@ public func testEnums(_ model: CGColorSpaceModel) -> Int { // CHECK: ret i64 [[LOAD]] } -// CHECK-LABEL: define void {{.*}}rotationAround{{.*}} { +// CHECK-LABEL: define swiftcc void {{.*}}rotationAround{{.*}} { // Get a transform that will rotate around a given offset public func rotationAround(offset: CGPoint, angle: CGFloat, transform: CGAffineTransform = .identity) -> CGAffineTransform { @@ -42,7 +42,7 @@ public func rotationAround(offset: CGPoint, angle: CGFloat, // CHECK: ret void } -// CHECK-LABEL: define void {{.*}}trace{{.*}} { +// CHECK-LABEL: define swiftcc void {{.*}}trace{{.*}} { public func trace(in context: CGContext, path: CGPath) { let red = CGColor(red: 1, green: 0, blue: 0, alpha: 1) context.saveGState() @@ -60,7 +60,7 @@ public func trace(in context: CGContext, path: CGPath) { // CHECK: ret void } -// CHECK-LABEL: define void {{.*}}pdfOperations{{.*}} { +// CHECK-LABEL: define swiftcc void {{.*}}pdfOperations{{.*}} { public func pdfOperations(_ context: CGContext) { context.beginPDFPage(nil) context.endPDFPage() @@ -74,7 +74,7 @@ public func pdfOperations(_ context: CGContext) { // Test some more recently renamed APIs -// CHECK-LABEL: define void {{.*}}testColorRenames{{.*}} { +// CHECK-LABEL: define swiftcc void {{.*}}testColorRenames{{.*}} { public func testColorRenames(color: CGColor, intent: CGColorRenderingIntent) { let colorSpace = CGColorSpace(name: CGColorSpace.sRGB)! @@ -87,7 +87,7 @@ public func testColorRenames(color: CGColor, // CHECK: ret void } -// CHECK-LABEL: define void {{.*}}testRenames{{.*}} { +// CHECK-LABEL: define swiftcc void {{.*}}testRenames{{.*}} { public func testRenames(transform: CGAffineTransform, context: CGContext, point: CGPoint, size: CGSize, rect: CGRect, image: CGImage, diff --git a/test/ClangImporter/attr-swift_private.swift b/test/ClangImporter/attr-swift_private.swift index af7c74da318ec..0bd1079d588a2 100644 --- a/test/ClangImporter/attr-swift_private.swift +++ b/test/ClangImporter/attr-swift_private.swift @@ -19,7 +19,7 @@ import SwiftPrivateAttr // half of a module, or from an overlay. At that point we should test that these // are available in that case and /not/ in the normal import case. -// CHECK-LABEL: define{{( protected)?}} void @{{.+}}12testProperty +// CHECK-LABEL: define{{( protected)?}} swiftcc void @{{.+}}12testProperty public func testProperty(_ foo: Foo) { // CHECK: @"\01L_selector(setPrivValue:)" _ = foo.__privValue @@ -34,7 +34,7 @@ public func testProperty(_ foo: Foo) { #endif } -// CHECK-LABEL: define{{( protected)?}} void @{{.+}}11testMethods +// CHECK-LABEL: define{{( protected)?}} swiftcc void @{{.+}}11testMethods public func testMethods(_ foo: Foo) { // CHECK: @"\01L_selector(noArgs)" foo.__noArgs() @@ -44,7 +44,7 @@ public func testMethods(_ foo: Foo) { foo.__twoArgs(1, other: 2) } -// CHECK-LABEL: define{{( protected)?}} void @{{.+}}16testInitializers +// CHECK-LABEL: define{{( protected)?}} swiftcc void @{{.+}}16testInitializers public func testInitializers() { // Checked below; look for "CSo3Bar". _ = Bar(__noArgs: ()) @@ -53,7 +53,7 @@ public func testInitializers() { _ = Bar(__: 1) } -// CHECK-LABEL: define{{( protected)?}} void @{{.+}}18testFactoryMethods +// CHECK-LABEL: define{{( protected)?}} swiftcc void @{{.+}}18testFactoryMethods public func testFactoryMethods() { // CHECK: @"\01L_selector(fooWithOneArg:)" _ = Foo(__oneArg: 1) @@ -70,7 +70,7 @@ public func testSubscript(_ foo: Foo) { } #endif -// CHECK-LABEL: define{{( protected)?}} void @{{.+}}12testTopLevel +// CHECK-LABEL: define{{( protected)?}} swiftcc void @{{.+}}12testTopLevel public func testTopLevel() { // Checked below; look for "PrivFooSub". let foo = __PrivFooSub() diff --git a/test/ClangImporter/ctypes_ir.swift b/test/ClangImporter/ctypes_ir.swift index 9ac47b04b1ec7..9a0cc9dcbe905 100644 --- a/test/ClangImporter/ctypes_ir.swift +++ b/test/ClangImporter/ctypes_ir.swift @@ -4,19 +4,19 @@ import ctypes -// CHECK-LABEL: define hidden void @_T09ctypes_ir9testColoryyF +// CHECK-LABEL: define hidden swiftcc void @_T09ctypes_ir9testColoryyF func testColor() { // CHECK: store i32 1 var c : Color = green } -// CHECK-LABEL: define hidden void @_T09ctypes_ir12testAnonEnumyyF +// CHECK-LABEL: define hidden swiftcc void @_T09ctypes_ir12testAnonEnumyyF func testAnonEnum() { // CHECK: store i64 30064771073 var a = AnonConst2 } -// CHECK-LABEL: define hidden void @_T09ctypes_ir17testAnonEnumSmallyyF +// CHECK-LABEL: define hidden swiftcc void @_T09ctypes_ir17testAnonEnumSmallyyF func testAnonEnumSmall() { // CHECK: store i64 17 var a = AnonConstSmall2 @@ -27,7 +27,7 @@ func testStructWithFlexibleArray(_ s : StructWithFlexibleArray) { } // Make sure flexible array struct member isn't represented in IR function signature as i0 (or at all). rdar://problem/18510461 -// CHECK-LABEL: define hidden void @_T09ctypes_ir27testStructWithFlexibleArrayySC0defG0VF(i32) +// CHECK-LABEL: define hidden swiftcc void @_T09ctypes_ir27testStructWithFlexibleArrayySC0defG0VF(i32) typealias EightUp = (Int8, Int8, Int8, Int8, Int8, Int8, Int8, Int8) diff --git a/test/ClangImporter/objc_ir.swift b/test/ClangImporter/objc_ir.swift index 46ec684f66467..e81f1ab5075e0 100644 --- a/test/ClangImporter/objc_ir.swift +++ b/test/ClangImporter/objc_ir.swift @@ -18,7 +18,7 @@ import ObjCIRExtras // CHECK: @"\01L_selector_data(method:separateExtMethod:)" = private global [26 x i8] c"method:separateExtMethod:\00", section "__TEXT,__objc_methname,cstring_literals" // Instance method invocation -// CHECK: define hidden void @_T07objc_ir15instanceMethodsySo1BCF([[B]]* +// CHECK: define hidden swiftcc void @_T07objc_ir15instanceMethodsySo1BCF([[B]]* func instanceMethods(_ b: B) { // CHECK: load i8*, i8** @"\01L_selector(method:withFloat:)" // CHECK: call i32 bitcast (void ()* @objc_msgSend to i32 @@ -28,7 +28,7 @@ func instanceMethods(_ b: B) { i = i + b.method(1, with: 2.5 as Double) } -// CHECK: define hidden void @_T07objc_ir16extensionMethodsySo1BC1b_tF +// CHECK: define hidden swiftcc void @_T07objc_ir16extensionMethodsySo1BC1b_tF func extensionMethods(b b: B) { // CHECK: load i8*, i8** @"\01L_selector(method:separateExtMethod:)", align 8 // CHECK: [[T0:%.*]] = call i8* bitcast (void ()* @objc_msgSend to i8* @@ -38,7 +38,7 @@ func extensionMethods(b b: B) { b.method(1, separateExtMethod:1.5) } -// CHECK: define hidden void @_T07objc_ir19initCallToAllocInitys5Int32V1i_tF +// CHECK: define hidden swiftcc void @_T07objc_ir19initCallToAllocInitys5Int32V1i_tF func initCallToAllocInit(i i: CInt) { // CHECK: call {{.*}} @_T0So1BCSQyABGs5Int32V3int_tcfC @@ -49,7 +49,7 @@ func initCallToAllocInit(i i: CInt) { // CHECK: call [[OPAQUE:%.*]]* @objc_allocWithZone // Indexed subscripting -// CHECK: define hidden void @_T07objc_ir19indexedSubscriptingySo1BC1b_Si3idxSo1AC1atF +// CHECK: define hidden swiftcc void @_T07objc_ir19indexedSubscriptingySo1BC1b_Si3idxSo1AC1atF func indexedSubscripting(b b: B, idx: Int, a: A) { // CHECK: load i8*, i8** @"\01L_selector(setObject:atIndexedSubscript:)", align 8 b[idx] = a @@ -58,7 +58,7 @@ func indexedSubscripting(b b: B, idx: Int, a: A) { var a2 = b[idx] as! A } -// CHECK: define hidden void @_T07objc_ir17keyedSubscriptingySo1BC1b_So1AC3idxAG1atF +// CHECK: define hidden swiftcc void @_T07objc_ir17keyedSubscriptingySo1BC1b_So1AC3idxAG1atF func keyedSubscripting(b b: B, idx: A, a: A) { // CHECK: load i8*, i8** @"\01L_selector(setObject:forKeyedSubscript:)" b[a] = a @@ -66,7 +66,7 @@ func keyedSubscripting(b b: B, idx: A, a: A) { var a2 = b[a] as! A } -// CHECK: define hidden void @_T07objc_ir14propertyAccessySo1BC1b_tF +// CHECK: define hidden swiftcc void @_T07objc_ir14propertyAccessySo1BC1b_tF func propertyAccess(b b: B) { // CHECK: load i8*, i8** @"\01L_selector(counter)" // CHECK: load i8*, i8** @"\01L_selector(setCounter:)" @@ -79,7 +79,7 @@ func propertyAccess(b b: B) { B.sharedCounter = B.sharedCounter + 1 } -// CHECK: define hidden [[B]]* @_T07objc_ir8downcastSo1BCSo1AC1a_tF( +// CHECK: define hidden swiftcc [[B]]* @_T07objc_ir8downcastSo1BCSo1AC1a_tF( func downcast(a a: A) -> B { // CHECK: [[CLASS:%.*]] = load %objc_class*, %objc_class** @"OBJC_CLASS_REF_$_B" // CHECK: [[T0:%.*]] = call %objc_class* @swift_rt_swift_getInitializedObjCClass(%objc_class* [[CLASS]]) @@ -88,12 +88,12 @@ func downcast(a a: A) -> B { return a as! B } -// CHECK: define hidden void @_T07objc_ir19almostSubscriptableySo06AlmostD0C3as1_So1AC1atF +// CHECK: define hidden swiftcc void @_T07objc_ir19almostSubscriptableySo06AlmostD0C3as1_So1AC1atF func almostSubscriptable(as1 as1: AlmostSubscriptable, a: A) { as1.objectForKeyedSubscript(a) } -// CHECK: define hidden void @_T07objc_ir13protocolTypesySo7NSMinceC1a_So9NSRuncing_p1btF(%CSo7NSMince*, %objc_object*) {{.*}} { +// CHECK: define hidden swiftcc void @_T07objc_ir13protocolTypesySo7NSMinceC1a_So9NSRuncing_p1btF(%CSo7NSMince*, %objc_object*) {{.*}} { func protocolTypes(a a: NSMince, b: NSRuncing) { // - (void)eatWith:(id )runcer; a.eat(with: b) @@ -101,7 +101,7 @@ func protocolTypes(a a: NSMince, b: NSRuncing) { // CHECK: call void bitcast (void ()* @objc_msgSend to void ([[OPAQUE:%.*]]*, i8*, i8*)*)([[OPAQUE:%.*]]* {{%.*}}, i8* [[SEL]], i8* {{%.*}}) } -// CHECK-LABEL: define hidden void @_T07objc_ir6getsetySo8FooProto_p1p_tF(%objc_object*) {{.*}} { +// CHECK-LABEL: define hidden swiftcc void @_T07objc_ir6getsetySo8FooProto_p1p_tF(%objc_object*) {{.*}} { func getset(p p: FooProto) { // CHECK: load i8*, i8** @"\01L_selector(bar)" // CHECK: load i8*, i8** @"\01L_selector(setBar:)" @@ -109,7 +109,7 @@ func getset(p p: FooProto) { p.bar = prop } -// CHECK-LABEL: define hidden %swift.type* @_T07objc_ir16protocolMetatypeSo8FooProto_pXpSoAC_p1p_tF(%objc_object*) {{.*}} { +// CHECK-LABEL: define hidden swiftcc %swift.type* @_T07objc_ir16protocolMetatypeSo8FooProto_pXpSoAC_p1p_tF(%objc_object*) {{.*}} { func protocolMetatype(p: FooProto) -> FooProto.Type { // CHECK: = call %swift.type* @swift_getObjectType(%objc_object* %0) // CHECK-NOT: {{retain|release}} @@ -126,7 +126,7 @@ class Impl: FooProto, AnotherProto { @objc var bar: Int32 = 0 } -// CHECK-LABEL: define hidden %swift.type* @_T07objc_ir27protocolCompositionMetatypeSo12AnotherProto_So03FooG0pXpAA4ImplC1p_tF(%C7objc_ir4Impl*) {{.*}} { +// CHECK-LABEL: define hidden swiftcc %swift.type* @_T07objc_ir27protocolCompositionMetatypeSo12AnotherProto_So03FooG0pXpAA4ImplC1p_tF(%C7objc_ir4Impl*) {{.*}} { func protocolCompositionMetatype(p: Impl) -> (FooProto & AnotherProto).Type { // CHECK: = getelementptr inbounds %C7objc_ir4Impl, %C7objc_ir4Impl* %0, i32 0, i32 0, i32 0 // CHECK-NOT: {{retain|release}} @@ -139,7 +139,7 @@ func protocolCompositionMetatype(p: Impl) -> (FooProto & AnotherProto).Type { return type } // CHECK: } -// CHECK-LABEL: define hidden %swift.type* @_T07objc_ir28protocolCompositionMetatype2So12AnotherProto_So03FooG0pXpAA4ImplC1p_tF(%C7objc_ir4Impl*) {{.*}} { +// CHECK-LABEL: define hidden swiftcc %swift.type* @_T07objc_ir28protocolCompositionMetatype2So12AnotherProto_So03FooG0pXpAA4ImplC1p_tF(%C7objc_ir4Impl*) {{.*}} { func protocolCompositionMetatype2(p: Impl) -> (FooProto & AnotherProto).Type { // CHECK: = getelementptr inbounds %C7objc_ir4Impl, %C7objc_ir4Impl* %0, i32 0, i32 0, i32 0 // CHECK-NOT: {{retain|release}} @@ -152,7 +152,7 @@ func protocolCompositionMetatype2(p: Impl) -> (FooProto & AnotherProto).Type { return type } // CHECK: } -// CHECK-LABEL: define hidden void @_T07objc_ir17pointerPropertiesySo14PointerWrapperCF(%CSo14PointerWrapper*) {{.*}} { +// CHECK-LABEL: define hidden swiftcc void @_T07objc_ir17pointerPropertiesySo14PointerWrapperCF(%CSo14PointerWrapper*) {{.*}} { func pointerProperties(_ obj: PointerWrapper) { // CHECK: load i8*, i8** @"\01L_selector(setVoidPtr:)" // CHECK: load i8*, i8** @"\01L_selector(setIntPtr:)" @@ -162,17 +162,17 @@ func pointerProperties(_ obj: PointerWrapper) { obj.idPtr = nil as AutoreleasingUnsafeMutablePointer? } -// CHECK-LABEL: define hidden void @_T07objc_ir16strangeSelectorsySo13SwiftNameTestCF(%CSo13SwiftNameTest*) {{.*}} { +// CHECK-LABEL: define hidden swiftcc void @_T07objc_ir16strangeSelectorsySo13SwiftNameTestCF(%CSo13SwiftNameTest*) {{.*}} { func strangeSelectors(_ obj: SwiftNameTest) { // CHECK: load i8*, i8** @"\01L_selector(:b:)" obj.empty(a: 0, b: 0) } -// CHECK-LABEL: define hidden void @_T07objc_ir20customFactoryMethodsyyF() {{.*}} { +// CHECK-LABEL: define hidden swiftcc void @_T07objc_ir20customFactoryMethodsyyF() {{.*}} { func customFactoryMethods() { - // CHECK: call %CSo13SwiftNameTest* @_T0So13SwiftNameTestCAByt10dummyParam_tcfCTO - // CHECK: call %CSo13SwiftNameTest* @_T0So13SwiftNameTestCABypSg2cc_tcfCTO - // CHECK: call %CSo13SwiftNameTest* @_T0So13SwiftNameTestCABs5Int32V5empty_tcfCTO + // CHECK: call swiftcc %CSo13SwiftNameTest* @_T0So13SwiftNameTestCAByt10dummyParam_tcfCTO + // CHECK: call swiftcc %CSo13SwiftNameTest* @_T0So13SwiftNameTestCABypSg2cc_tcfCTO + // CHECK: call swiftcc %CSo13SwiftNameTest* @_T0So13SwiftNameTestCABs5Int32V5empty_tcfCTO _ = SwiftNameTest(dummyParam: ()) _ = SwiftNameTest(cc: nil) _ = SwiftNameTest(empty: 0) @@ -187,13 +187,13 @@ func customFactoryMethods() { _ = SwiftNameTest.empty(1, 2) do { - // CHECK: call %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByt5error_tKcfCTO - // CHECK: call %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yt5errortKcfCTO - // CHECK: call %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yt5erroryyc5blocktKcfCTO - // CHECK: call %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByt5error_yyc5blocktKcfCTO - // CHECK: call %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_tKcfCTO - // CHECK: call %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yyc5blocktKcfCTO - // CHECK: call %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByyc5block_tKcfCTO + // CHECK: call swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByt5error_tKcfCTO + // CHECK: call swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yt5errortKcfCTO + // CHECK: call swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yt5erroryyc5blocktKcfCTO + // CHECK: call swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByt5error_yyc5blocktKcfCTO + // CHECK: call swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_tKcfCTO + // CHECK: call swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yyc5blocktKcfCTO + // CHECK: call swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByyc5block_tKcfCTO _ = try SwiftNameTestError(error: ()) _ = try SwiftNameTestError(aa: nil, error: ()) _ = try SwiftNameTestError(aa: nil, error: (), block: {}) @@ -215,46 +215,46 @@ func customFactoryMethods() { } } -// CHECK-LABEL: define linkonce_odr hidden %CSo13SwiftNameTest* @_T0So13SwiftNameTestCAByt10dummyParam_tcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo13SwiftNameTest* @_T0So13SwiftNameTestCAByt10dummyParam_tcfCTO // CHECK: load i8*, i8** @"\01L_selector(b)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo13SwiftNameTest* @_T0So13SwiftNameTestCABypSg2cc_tcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo13SwiftNameTest* @_T0So13SwiftNameTestCABypSg2cc_tcfCTO // CHECK: load i8*, i8** @"\01L_selector(c:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByt5error_tKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByt5error_tKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err1:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yt5errortKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yt5errortKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err2:error:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yt5erroryyc5blocktKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yt5erroryyc5blocktKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err3:error:callback:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByt5error_yyc5blocktKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByt5error_yyc5blocktKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err4:callback:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_tKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_tKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err5:error:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yyc5blocktKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCABypSg2aa_yyc5blocktKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err6:error:callback:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByyc5block_tKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo18SwiftNameTestError* @_T0So18SwiftNameTestErrorCAByyc5block_tKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err7:callback:)" // CHECK: } -// CHECK-LABEL: define hidden void @_T07objc_ir29customFactoryMethodsInheritedyyF() {{.*}} { +// CHECK-LABEL: define hidden swiftcc void @_T07objc_ir29customFactoryMethodsInheritedyyF() {{.*}} { func customFactoryMethodsInherited() { - // CHECK: call %CSo16SwiftNameTestSub* @_T0So16SwiftNameTestSubCAByt10dummyParam_tcfCTO - // CHECK: call %CSo16SwiftNameTestSub* @_T0So16SwiftNameTestSubCABypSg2cc_tcfCTO + // CHECK: call swiftcc %CSo16SwiftNameTestSub* @_T0So16SwiftNameTestSubCAByt10dummyParam_tcfCTO + // CHECK: call swiftcc %CSo16SwiftNameTestSub* @_T0So16SwiftNameTestSubCABypSg2cc_tcfCTO _ = SwiftNameTestSub(dummyParam: ()) _ = SwiftNameTestSub(cc: nil) @@ -266,13 +266,13 @@ func customFactoryMethodsInherited() { _ = SwiftNameTestSub.xx(nil, bb: nil) do { - // CHECK: call %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByt5error_tKcfCTO - // CHECK: call %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yt5errortKcfCTO - // CHECK: call %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yt5erroryyc5blocktKcfCTO - // CHECK: call %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByt5error_yyc5blocktKcfCTO - // CHECK: call %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_tKcfCTO - // CHECK: call %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yyc5blocktKcfCTO - // CHECK: call %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByyc5block_tKcfCTO + // CHECK: call swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByt5error_tKcfCTO + // CHECK: call swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yt5errortKcfCTO + // CHECK: call swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yt5erroryyc5blocktKcfCTO + // CHECK: call swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByt5error_yyc5blocktKcfCTO + // CHECK: call swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_tKcfCTO + // CHECK: call swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yyc5blocktKcfCTO + // CHECK: call swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByyc5block_tKcfCTO _ = try SwiftNameTestErrorSub(error: ()) _ = try SwiftNameTestErrorSub(aa: nil, error: ()) _ = try SwiftNameTestErrorSub(aa: nil, error: (), block: {}) @@ -294,39 +294,39 @@ func customFactoryMethodsInherited() { } } -// CHECK-LABEL: define linkonce_odr hidden %CSo16SwiftNameTestSub* @_T0So16SwiftNameTestSubCAByt10dummyParam_tcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo16SwiftNameTestSub* @_T0So16SwiftNameTestSubCAByt10dummyParam_tcfCTO // CHECK: load i8*, i8** @"\01L_selector(b)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo16SwiftNameTestSub* @_T0So16SwiftNameTestSubCABypSg2cc_tcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo16SwiftNameTestSub* @_T0So16SwiftNameTestSubCABypSg2cc_tcfCTO // CHECK: load i8*, i8** @"\01L_selector(c:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByt5error_tKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByt5error_tKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err1:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yt5errortKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yt5errortKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err2:error:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yt5erroryyc5blocktKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yt5erroryyc5blocktKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err3:error:callback:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByt5error_yyc5blocktKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByt5error_yyc5blocktKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err4:callback:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_tKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_tKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err5:error:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yyc5blocktKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCABypSg2aa_yyc5blocktKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err6:error:callback:)" // CHECK: } -// CHECK-LABEL: define linkonce_odr hidden %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByyc5block_tKcfCTO +// CHECK-LABEL: define linkonce_odr hidden swiftcc %CSo21SwiftNameTestErrorSub* @_T0So21SwiftNameTestErrorSubCAByyc5block_tKcfCTO // CHECK: load i8*, i8** @"\01L_selector(err7:callback:)" // CHECK: } diff --git a/test/DebugInfo/Errors.swift b/test/DebugInfo/Errors.swift index 4702ff0ea08f0..033febf2ed041 100644 --- a/test/DebugInfo/Errors.swift +++ b/test/DebugInfo/Errors.swift @@ -1,4 +1,5 @@ // RUN: %target-swift-frontend %s -emit-ir -g -o - | %FileCheck %s +// REQUIRES: CPU=i386 class Obj {} enum MyError : Error { @@ -6,8 +7,11 @@ enum MyError : Error { case WithObj(Obj) } +// i386 does not pass swifterror in a register. To support debugging of the +// thrown error we create a shadow stack location holding the address of the +// location that holds the pointer to the error instead. func simple(_ placeholder: Int64) throws -> () { - // CHECK: define {{.*}}void @_TF6Errors6simpleFzVs5Int64T_(i64, %swift.refcounted*, %swift.error**) + // CHECK: define {{.*}}void @_TF6Errors6simpleFzVs5Int64T_(i64, %swift.refcounted* swiftself, %swift.error**) // CHECK: call void @llvm.dbg.declare // CHECK: call void @llvm.dbg.declare({{.*}}, metadata ![[ERROR:[0-9]+]], metadata ![[DEREF:[0-9]+]]) // CHECK: ![[ERROR]] = !DILocalVariable(name: "$error", arg: 2, diff --git a/test/DebugInfo/LinetableArtificialFn.swift b/test/DebugInfo/LinetableArtificialFn.swift index 820d15bf2921a..7c6bf28e9cb5a 100644 --- a/test/DebugInfo/LinetableArtificialFn.swift +++ b/test/DebugInfo/LinetableArtificialFn.swift @@ -2,8 +2,8 @@ // Verify that a helper function that is generated on-the-fly does // not mess up the linetable of the calling function. - -// CHECK: _TFSiCfT22_builtinIntegerLiteralBi2048__Si{{.*}}(i2048 -2) +// CHECK: store i2048 10, i2048* [[STKLOC:%.*]], align +// CHECK: call swiftcc {{(i32|i64)}} @_TFSiCfT22_builtinIntegerLiteralBi2048__Si(i2048* {{.*}} [[STKLOC]] // CHECK: store {{(i32|i64)}} {{.*}}getelementptr // CHECK: store {{(i32|i64)}} {{.*}}getelementptr{{.*}}, !dbg ![[DBG:[0-9]+]] // CHECK-NOT: ![[DBG]] = !{i32 0, i32 0, diff --git a/test/DebugInfo/ProtocolContainer.swift b/test/DebugInfo/ProtocolContainer.swift index 0288a71f65589..d0e41855b6a7b 100644 --- a/test/DebugInfo/ProtocolContainer.swift +++ b/test/DebugInfo/ProtocolContainer.swift @@ -11,7 +11,7 @@ class AClass : AProtocol { init() { x = 0xDEADBEEF } func print() { markUsed("x = \(x)")} } -// CHECK: define hidden void @_T017ProtocolContainer3foo{{[_0-9a-zA-Z]*}}F +// CHECK: define hidden {{.*}}void @_T017ProtocolContainer3foo{{[_0-9a-zA-Z]*}}F // CHECK-NEXT: entry: // CHECK-NEXT: %[[X:.*]] = alloca %P17ProtocolContainer9AProtocol_, align {{(4|8)}} // CHECK: call void @llvm.dbg.declare(metadata %P17ProtocolContainer9AProtocol_* %[[X]], metadata ![[XMD:.*]], metadata !{{[0-9]+}}) diff --git a/test/DebugInfo/closure-args.swift b/test/DebugInfo/closure-args.swift index 8a797f57e402f..fbbe6001b2aad 100644 --- a/test/DebugInfo/closure-args.swift +++ b/test/DebugInfo/closure-args.swift @@ -10,7 +10,7 @@ func main() -> Void var out_only = 2013 var backward_ptr = - // CHECK: define linkonce_odr hidden i1 @_T04mainAAyyFSbSS_SStcfU_( + // CHECK: define linkonce_odr hidden {{.*}} i1 @_T04mainAAyyFSbSS_SStcfU_( // CHECK: %[[RANDOM_STR_ADDR:.*]] = alloca %SS*, align {{(4|8)}} // CHECK: store %SS* %{{.*}}, %SS** %[[RANDOM_STR_ADDR]], align {{(4|8)}} // CHECK-NEXT: call void @llvm.dbg.declare(metadata %SS** %[[RANDOM_STR_ADDR]], metadata !{{.*}}, metadata !{{[0-9]+}}), !dbg diff --git a/test/DebugInfo/generic_arg2.swift b/test/DebugInfo/generic_arg2.swift index 2cd9b9a94cfbc..49746f1d1d563 100644 --- a/test/DebugInfo/generic_arg2.swift +++ b/test/DebugInfo/generic_arg2.swift @@ -1,6 +1,6 @@ // RUN: %target-swift-frontend -Xllvm -new-mangling-for-tests %s -emit-ir -g -o - | %FileCheck %s -// CHECK: define hidden void @_T012generic_arg25ClassC3foo{{.*}}, %swift.type* %U +// CHECK: define hidden swiftcc void @_T012generic_arg25ClassC3foo{{.*}}, %swift.type* %U // CHECK: call void @llvm.dbg.declare(metadata %swift.opaque** %y.addr, metadata ![[U:.*]], metadata !{{[0-9]+}}) // Make sure there is no conflicting dbg.value for this variable.x // CHECK-NOT: dbg.value{{.*}}metadata ![[U]] diff --git a/test/DebugInfo/generic_enum_closure.swift b/test/DebugInfo/generic_enum_closure.swift index 6ef7726991924..8150452f20535 100644 --- a/test/DebugInfo/generic_enum_closure.swift +++ b/test/DebugInfo/generic_enum_closure.swift @@ -5,7 +5,7 @@ struct CErrorOr { var value : T? init(x : __CurrentErrno) { - // CHECK: define hidden void @_T020generic_enum_closure8CErrorOrVACyxGAA14__CurrentErrnoV1x_tcfC + // CHECK: define hidden {{.*}}void @_T020generic_enum_closure8CErrorOrVACyxGAA14__CurrentErrnoV1x_tcfC // CHECK-NOT: define // This is a SIL-level debug_value_addr instruction. // CHECK: call void @llvm.dbg.value({{.*}}, metadata ![[SELF:.*]], metadata !{{[0-9]+}}) diff --git a/test/DebugInfo/initializer.swift b/test/DebugInfo/initializer.swift index 98a6f1a38690a..0322f2d7391d5 100644 --- a/test/DebugInfo/initializer.swift +++ b/test/DebugInfo/initializer.swift @@ -7,11 +7,11 @@ protocol Named { } // initializer.Person.init (initializer.Person.Type)() -> initializer.Person -// CHECK: define hidden %C11initializer6Person* @_T011initializer6PersonCACycfc(%C11initializer6Person*) {{.*}} { +// CHECK: define hidden {{.*}}%C11initializer6Person* @_T011initializer6PersonCACycfc(%C11initializer6Person*{{.*}}) {{.*}} { // initializer.Person.__allocating_init (initializer.Person.Type)() -> initializer.Person -// CHECK: define hidden %C11initializer6Person* @_T011initializer6PersonCACycfC(%swift.type*) {{.*}} { -// CHECK: call %C11initializer6Person* @_T011initializer6PersonCACycfc(%C11initializer6Person* %3), !dbg ![[ALLOCATING_INIT:.*]] +// CHECK: define hidden {{.*}}%C11initializer6Person* @_T011initializer6PersonCACycfC(%swift.type*{{.*}}) {{.*}} { +// CHECK: call {{.*}}%C11initializer6Person* @_T011initializer6PersonCACycfc(%C11initializer6Person* {{.*}}%3), !dbg ![[ALLOCATING_INIT:.*]] // CHECK-DAG: ![[ALLOCATING_INIT]] = !DILocation(line: 0, scope class Person : Named { diff --git a/test/DebugInfo/inout.swift b/test/DebugInfo/inout.swift index 1076b6b24bc55..54c79e89b61b7 100644 --- a/test/DebugInfo/inout.swift +++ b/test/DebugInfo/inout.swift @@ -8,7 +8,7 @@ func Close(_ fn: () -> Int64) { fn() } typealias MyFloat = Float -// CHECK: define hidden void @_TF5inout13modifyFooHeap +// CHECK: define hidden {{.*}}void @_TF5inout13modifyFooHeap // CHECK: %[[ALLOCA:.*]] = alloca %Vs5Int64* // CHECK: call void @llvm.dbg.declare(metadata // CHECK-SAME: %[[ALLOCA]], metadata ![[A:[0-9]+]] diff --git a/test/DebugInfo/iuo_arg.swift b/test/DebugInfo/iuo_arg.swift index 86a422148a5a6..b9dd70d391f14 100644 --- a/test/DebugInfo/iuo_arg.swift +++ b/test/DebugInfo/iuo_arg.swift @@ -13,7 +13,7 @@ class CIFilter { } class MyClass { - // CHECK: define hidden %C7iuo_arg7UIImage* @_T07iuo_arg7MyClassC11filterImageAA7UIImageCSQyAFG_SbtF + // CHECK: define hidden {{.*}} %C7iuo_arg7UIImage* @_T07iuo_arg7MyClassC11filterImageAA7UIImageCSQyAFG_SbtF func filterImage(_ image: UIImage!, _ doSomething:Bool) -> UIImage { // Test that image is in an alloca, but not an indirect location. diff --git a/test/DebugInfo/letstring.swift b/test/DebugInfo/letstring.swift index 3a0f68749edb8..d0e7279213670 100644 --- a/test/DebugInfo/letstring.swift +++ b/test/DebugInfo/letstring.swift @@ -4,7 +4,7 @@ class UIWindow {} class AppDelegate { var window: UIWindow? - // CHECK: define hidden i1 {{.*}}11AppDelegateC1f + // CHECK: define hidden {{.*}}i1 {{.*}}11AppDelegateC1f func f() -> Bool { // Test for -O0 shadow copies. // CHECK: call void @llvm.dbg.declare({{.*}}, metadata ![[B:.*]], metadata !{{[0-9]+}}) diff --git a/test/DebugInfo/linetable-cleanups.swift b/test/DebugInfo/linetable-cleanups.swift index e4f6b0bf82a1a..d4759bcc1f586 100644 --- a/test/DebugInfo/linetable-cleanups.swift +++ b/test/DebugInfo/linetable-cleanups.swift @@ -17,12 +17,12 @@ func main() { markUsed("element = \(element)") } markUsed("Done with the for loop") -// CHECK: call void @_T04main8markUsedyxlF +// CHECK: call {{.*}}void @_T04main8markUsedyxlF // CHECK: br label // CHECK: