diff --git a/Directory.Build.props b/Directory.Build.props
index 7e5c028ecfa209..084308a0dd1fc1 100644
--- a/Directory.Build.props
+++ b/Directory.Build.props
@@ -192,6 +192,8 @@
false
false
+
+ false
true
diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml
index 24fbe6c2292204..f6166255017ba9 100644
--- a/eng/Version.Details.xml
+++ b/eng/Version.Details.xml
@@ -1,5 +1,5 @@
-
+
https://github.com/dotnet/icu
@@ -41,91 +41,91 @@
https://github.com/dotnet/llvm-project
da5dd054a531e6fea65643b7e754285b73eab433
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
https://github.com/dotnet/runtime-assets
@@ -263,33 +263,33 @@
https://github.com/dotnet/llvm-project
da5dd054a531e6fea65643b7e754285b73eab433
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
https://github.com/dotnet/xharness
@@ -303,9 +303,9 @@
https://github.com/dotnet/xharness
e85bb14e85357ab678c2bcb0b6f2bac634fdd49b
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
@@ -331,29 +331,29 @@
https://github.com/dotnet/runtime-assets
1cfc6ba21d0377b51f17eac4fdc2557f7b1e8693
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
-
+
https://github.com/dotnet/dotnet
- 85778473549347b3e4bad3ea009e9438df7b11bb
+ d60c3fe894af16cd15dc86420af0fc9d02be4997
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
diff --git a/eng/Versions.props b/eng/Versions.props
index 6fe9ec30dcc4b7..ea479c8ea1d7e6 100644
--- a/eng/Versions.props
+++ b/eng/Versions.props
@@ -36,17 +36,17 @@
- 10.0.0-preview.25260.104
+ 10.0.0-preview.25269.109
- 5.0.0-1.25260.104
+ 5.0.0-1.25269.109
- 5.0.0-1.25260.104
- 5.0.0-1.25260.104
- 5.0.0-1.25260.104
+ 5.0.0-1.25269.109
+ 5.0.0-1.25269.109
+ 5.0.0-1.25269.109
- 10.0.100-preview.5.25260.104
+ 10.0.100-preview.5.25269.109
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 2.9.2-beta.25260.104
- 10.0.0-beta.25260.104
- 2.9.2-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
- 10.0.0-beta.25260.104
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 2.9.2-beta.25269.109
+ 10.0.0-beta.25269.109
+ 2.9.2-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
+ 10.0.0-beta.25269.109
1.4.0
6.0.0-preview.1.102
- 10.0.0-preview.5.25260.104
+ 10.0.0-preview.5.25269.109
6.0.0
- 10.0.0-preview.5.25260.104
- 10.0.0-preview.5.25260.104
+ 10.0.0-preview.5.25269.109
+ 10.0.0-preview.5.25269.109
6.0.0
4.6.1
@@ -128,16 +128,16 @@
8.0.0
8.0.1
5.0.0
- 10.0.0-preview.5.25260.104
- 10.0.0-preview.5.25260.104
+ 10.0.0-preview.5.25269.109
+ 10.0.0-preview.5.25269.109
6.0.0
5.0.0
5.0.0
5.0.0
7.0.0
- 10.0.0-preview.5.25260.104
+ 10.0.0-preview.5.25269.109
7.0.0
- 10.0.0-preview.5.25260.104
+ 10.0.0-preview.5.25269.109
8.0.0
4.5.1
@@ -180,7 +180,7 @@
2.0.0
17.10.0-beta1.24272.1
- 2.0.0-beta5.25260.104
+ 2.0.0-beta5.25269.109
3.1.16
2.1.0
2.0.3
@@ -226,7 +226,7 @@
9.0.0-preview-20241010.1
- 0.11.5-alpha.25260.104
+ 0.11.5-alpha.25269.109
10.0.0-preview.5.25261.1
@@ -258,7 +258,7 @@
Note: when the name is updated, make sure to update dependency name in eng/pipelines/common/xplat-setup.yml
like - DarcDependenciesChanged.Microsoft_NET_Workload_Emscripten_Current_Manifest-10_0_100_Transport
-->
- 10.0.100-preview.5.25260.104
+ 10.0.100-preview.5.25269.109
$(MicrosoftNETWorkloadEmscriptenCurrentManifest100100TransportVersion)
1.1.87-gba258badda
diff --git a/eng/common/build.ps1 b/eng/common/build.ps1
index 6b3be1916fcaf5..ae2309e312d789 100644
--- a/eng/common/build.ps1
+++ b/eng/common/build.ps1
@@ -127,7 +127,7 @@ function Build {
/p:Deploy=$deploy `
/p:Test=$test `
/p:Pack=$pack `
- /p:DotNetBuildRepo=$productBuild `
+ /p:DotNetBuild=$productBuild `
/p:IntegrationTest=$integrationTest `
/p:PerformanceTest=$performanceTest `
/p:Sign=$sign `
diff --git a/eng/common/build.sh b/eng/common/build.sh
index 36fba82a37930d..da906da202626d 100755
--- a/eng/common/build.sh
+++ b/eng/common/build.sh
@@ -129,14 +129,14 @@ while [[ $# > 0 ]]; do
-pack)
pack=true
;;
- -sourcebuild|-sb)
+ -sourcebuild|-source-build|-sb)
build=true
source_build=true
product_build=true
restore=true
pack=true
;;
- -productBuild|-pb)
+ -productbuild|-product-build|-pb)
build=true
product_build=true
restore=true
@@ -241,7 +241,7 @@ function Build {
/p:RepoRoot="$repo_root" \
/p:Restore=$restore \
/p:Build=$build \
- /p:DotNetBuildRepo=$product_build \
+ /p:DotNetBuild=$product_build \
/p:DotNetBuildSourceOnly=$source_build \
/p:Rebuild=$rebuild \
/p:Test=$test \
diff --git a/eng/common/core-templates/steps/source-build.yml b/eng/common/core-templates/steps/source-build.yml
index f2a0f347fdd67a..0dde553c3ebfbb 100644
--- a/eng/common/core-templates/steps/source-build.yml
+++ b/eng/common/core-templates/steps/source-build.yml
@@ -51,13 +51,12 @@ steps:
${{ coalesce(parameters.platform.buildScript, './build.sh') }} --ci \
--configuration $buildConfig \
--restore --build --pack -bl \
+ --source-build \
${{ parameters.platform.buildArguments }} \
$internalRuntimeDownloadArgs \
$targetRidArgs \
$baseRidArgs \
$portableBuildArgs \
- /p:DotNetBuildSourceOnly=true \
- /p:DotNetBuildRepo=true \
displayName: Build
- template: /eng/common/core-templates/steps/publish-pipeline-artifacts.yml
diff --git a/eng/common/darc-init.sh b/eng/common/darc-init.sh
index 36dbd45e1ce866..e889f439b8dc99 100755
--- a/eng/common/darc-init.sh
+++ b/eng/common/darc-init.sh
@@ -68,7 +68,7 @@ function InstallDarcCli {
fi
fi
- local arcadeServicesSource="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json"
+ local arcadeServicesSource="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-eng/nuget/v3/index.json"
echo "Installing Darc CLI version $darcVersion..."
echo "You may need to restart your command shell if this is the first dotnet tool you have installed."
diff --git a/eng/common/templates/steps/vmr-sync.yml b/eng/common/templates/steps/vmr-sync.yml
new file mode 100644
index 00000000000000..599afb6186b8fd
--- /dev/null
+++ b/eng/common/templates/steps/vmr-sync.yml
@@ -0,0 +1,207 @@
+### These steps synchronize new code from product repositories into the VMR (https://github.com/dotnet/dotnet).
+### They initialize the darc CLI and pull the new updates.
+### Changes are applied locally onto the already cloned VMR (located in $vmrPath).
+
+parameters:
+- name: targetRef
+ displayName: Target revision in dotnet/ to synchronize
+ type: string
+ default: $(Build.SourceVersion)
+
+- name: vmrPath
+ displayName: Path where the dotnet/dotnet is checked out to
+ type: string
+ default: $(Agent.BuildDirectory)/vmr
+
+- name: additionalSyncs
+ displayName: Optional list of package names whose repo's source will also be synchronized in the local VMR, e.g. NuGet.Protocol
+ type: object
+ default: []
+
+steps:
+- checkout: vmr
+ displayName: Clone dotnet/dotnet
+ path: vmr
+ clean: true
+
+- checkout: self
+ displayName: Clone $(Build.Repository.Name)
+ path: repo
+ fetchDepth: 0
+
+# This step is needed so that when we get a detached HEAD / shallow clone,
+# we still pull the commit into the temporary repo clone to use it during the sync.
+# Also unshallow the clone so that forwardflow command would work.
+- script: |
+ git branch repo-head
+ git rev-parse HEAD
+ displayName: Label PR commit
+ workingDirectory: $(Agent.BuildDirectory)/repo
+
+- script: |
+ vmr_sha=$(grep -oP '(?<=Sha=")[^"]*' $(Agent.BuildDirectory)/repo/eng/Version.Details.xml)
+ echo "##vso[task.setvariable variable=vmr_sha]$vmr_sha"
+ displayName: Obtain the vmr sha from Version.Details.xml (Unix)
+ condition: ne(variables['Agent.OS'], 'Windows_NT')
+ workingDirectory: $(Agent.BuildDirectory)/repo
+
+- powershell: |
+ [xml]$xml = Get-Content -Path $(Agent.BuildDirectory)/repo/eng/Version.Details.xml
+ $vmr_sha = $xml.SelectSingleNode("//Source").Sha
+ Write-Output "##vso[task.setvariable variable=vmr_sha]$vmr_sha"
+ displayName: Obtain the vmr sha from Version.Details.xml (Windows)
+ condition: eq(variables['Agent.OS'], 'Windows_NT')
+ workingDirectory: $(Agent.BuildDirectory)/repo
+
+- script: |
+ git fetch --all
+ git checkout $(vmr_sha)
+ displayName: Checkout VMR at correct sha for repo flow
+ workingDirectory: ${{ parameters.vmrPath }}
+
+- script: |
+ git config --global user.name "dotnet-maestro[bot]"
+ git config --global user.email "dotnet-maestro[bot]@users.noreply.github.com"
+ displayName: Set git author to dotnet-maestro[bot]
+ workingDirectory: ${{ parameters.vmrPath }}
+
+- script: |
+ ./eng/common/vmr-sync.sh \
+ --vmr ${{ parameters.vmrPath }} \
+ --tmp $(Agent.TempDirectory) \
+ --azdev-pat '$(dn-bot-all-orgs-code-r)' \
+ --ci \
+ --debug
+
+ if [ "$?" -ne 0 ]; then
+ echo "##vso[task.logissue type=error]Failed to synchronize the VMR"
+ exit 1
+ fi
+ displayName: Sync repo into VMR (Unix)
+ condition: ne(variables['Agent.OS'], 'Windows_NT')
+ workingDirectory: $(Agent.BuildDirectory)/repo
+
+- script: |
+ git config --global diff.astextplain.textconv echo
+ git config --system core.longpaths true
+ displayName: Configure Windows git (longpaths, astextplain)
+ condition: eq(variables['Agent.OS'], 'Windows_NT')
+
+- powershell: |
+ ./eng/common/vmr-sync.ps1 `
+ -vmr ${{ parameters.vmrPath }} `
+ -tmp $(Agent.TempDirectory) `
+ -azdevPat '$(dn-bot-all-orgs-code-r)' `
+ -ci `
+ -debugOutput
+
+ if ($LASTEXITCODE -ne 0) {
+ echo "##vso[task.logissue type=error]Failed to synchronize the VMR"
+ exit 1
+ }
+ displayName: Sync repo into VMR (Windows)
+ condition: eq(variables['Agent.OS'], 'Windows_NT')
+ workingDirectory: $(Agent.BuildDirectory)/repo
+
+- ${{ if eq(variables['Build.Reason'], 'PullRequest') }}:
+ - task: CopyFiles@2
+ displayName: Collect failed patches
+ condition: failed()
+ inputs:
+ SourceFolder: '$(Agent.TempDirectory)'
+ Contents: '*.patch'
+ TargetFolder: '$(Build.ArtifactStagingDirectory)/FailedPatches'
+
+ - publish: '$(Build.ArtifactStagingDirectory)/FailedPatches'
+ artifact: $(System.JobDisplayName)_FailedPatches
+ displayName: Upload failed patches
+ condition: failed()
+
+- ${{ each assetName in parameters.additionalSyncs }}:
+ # The vmr-sync script ends up staging files in the local VMR so we have to commit those
+ - script:
+ git commit --allow-empty -am "Forward-flow $(Build.Repository.Name)"
+ displayName: Commit local VMR changes
+ workingDirectory: ${{ parameters.vmrPath }}
+
+ - script: |
+ set -ex
+
+ echo "Searching for details of asset ${{ assetName }}..."
+
+ # Use darc to get dependencies information
+ dependencies=$(./.dotnet/dotnet darc get-dependencies --name '${{ assetName }}' --ci)
+
+ # Extract repository URL and commit hash
+ repository=$(echo "$dependencies" | grep 'Repo:' | sed 's/Repo:[[:space:]]*//' | head -1)
+
+ if [ -z "$repository" ]; then
+ echo "##vso[task.logissue type=error]Asset ${{ assetName }} not found in the dependency list"
+ exit 1
+ fi
+
+ commit=$(echo "$dependencies" | grep 'Commit:' | sed 's/Commit:[[:space:]]*//' | head -1)
+
+ echo "Updating the VMR from $repository / $commit..."
+ cd ..
+ git clone $repository ${{ assetName }}
+ cd ${{ assetName }}
+ git checkout $commit
+ git branch "sync/$commit"
+
+ ./eng/common/vmr-sync.sh \
+ --vmr ${{ parameters.vmrPath }} \
+ --tmp $(Agent.TempDirectory) \
+ --azdev-pat '$(dn-bot-all-orgs-code-r)' \
+ --ci \
+ --debug
+
+ if [ "$?" -ne 0 ]; then
+ echo "##vso[task.logissue type=error]Failed to synchronize the VMR"
+ exit 1
+ fi
+ displayName: Sync ${{ assetName }} into (Unix)
+ condition: ne(variables['Agent.OS'], 'Windows_NT')
+ workingDirectory: $(Agent.BuildDirectory)/repo
+
+ - powershell: |
+ $ErrorActionPreference = 'Stop'
+
+ Write-Host "Searching for details of asset ${{ assetName }}..."
+
+ $dependencies = .\.dotnet\dotnet darc get-dependencies --name '${{ assetName }}' --ci
+
+ $repository = $dependencies | Select-String -Pattern 'Repo:\s+([^\s]+)' | Select-Object -First 1
+ $repository -match 'Repo:\s+([^\s]+)' | Out-Null
+ $repository = $matches[1]
+
+ if ($repository -eq $null) {
+ Write-Error "Asset ${{ assetName }} not found in the dependency list"
+ exit 1
+ }
+
+ $commit = $dependencies | Select-String -Pattern 'Commit:\s+([^\s]+)' | Select-Object -First 1
+ $commit -match 'Commit:\s+([^\s]+)' | Out-Null
+ $commit = $matches[1]
+
+ Write-Host "Updating the VMR from $repository / $commit..."
+ cd ..
+ git clone $repository ${{ assetName }}
+ cd ${{ assetName }}
+ git checkout $commit
+ git branch "sync/$commit"
+
+ .\eng\common\vmr-sync.ps1 `
+ -vmr ${{ parameters.vmrPath }} `
+ -tmp $(Agent.TempDirectory) `
+ -azdevPat '$(dn-bot-all-orgs-code-r)' `
+ -ci `
+ -debugOutput
+
+ if ($LASTEXITCODE -ne 0) {
+ echo "##vso[task.logissue type=error]Failed to synchronize the VMR"
+ exit 1
+ }
+ displayName: Sync ${{ assetName }} into (Windows)
+ condition: ne(variables['Agent.OS'], 'Windows_NT')
+ workingDirectory: $(Agent.BuildDirectory)/repo
diff --git a/eng/common/templates/vmr-build-pr.yml b/eng/common/templates/vmr-build-pr.yml
new file mode 100644
index 00000000000000..670cf32c3bd1fa
--- /dev/null
+++ b/eng/common/templates/vmr-build-pr.yml
@@ -0,0 +1,33 @@
+trigger: none
+pr:
+ branches:
+ include:
+ - main
+ - release/*
+ paths:
+ exclude:
+ - documentation/*
+ - README.md
+ - CODEOWNERS
+
+variables:
+- template: /eng/common/templates/variables/pool-providers.yml@self
+
+- name: skipComponentGovernanceDetection # we run CG on internal builds only
+ value: true
+
+- name: Codeql.Enabled # we run CodeQL on internal builds only
+ value: false
+
+resources:
+ repositories:
+ - repository: vmr
+ type: github
+ name: dotnet/dotnet
+ endpoint: dotnet
+
+stages:
+- template: /eng/pipelines/templates/stages/vmr-build.yml@vmr
+ parameters:
+ isBuiltFromVmr: false
+ scope: lite
diff --git a/eng/common/tools.ps1 b/eng/common/tools.ps1
index 7373e5305465d7..5f40a3f8238a8b 100644
--- a/eng/common/tools.ps1
+++ b/eng/common/tools.ps1
@@ -68,8 +68,6 @@ $ErrorActionPreference = 'Stop'
# True if the build is a product build
[bool]$productBuild = if (Test-Path variable:productBuild) { $productBuild } else { $false }
-[String[]]$properties = if (Test-Path variable:properties) { $properties } else { @() }
-
function Create-Directory ([string[]] $path) {
New-Item -Path $path -Force -ItemType 'Directory' | Out-Null
}
@@ -853,7 +851,7 @@ function MSBuild-Core() {
# When running on Azure Pipelines, override the returned exit code to avoid double logging.
# Skip this when the build is a child of the VMR orchestrator build.
- if ($ci -and $env:SYSTEM_TEAMPROJECT -ne $null -and !$productBuild -and -not($properties -like "*DotNetBuildRepo=true*")) {
+ if ($ci -and $env:SYSTEM_TEAMPROJECT -ne $null -and !$productBuild) {
Write-PipelineSetResult -Result "Failed" -Message "msbuild execution failed."
# Exiting with an exit code causes the azure pipelines task to log yet another "noise" error
# The above Write-PipelineSetResult will cause the task to be marked as failure without adding yet another error
diff --git a/eng/common/tools.sh b/eng/common/tools.sh
index cc007b1f15ad05..25f5932eee982a 100755
--- a/eng/common/tools.sh
+++ b/eng/common/tools.sh
@@ -507,7 +507,7 @@ function MSBuild-Core {
# When running on Azure Pipelines, override the returned exit code to avoid double logging.
# Skip this when the build is a child of the VMR orchestrator build.
- if [[ "$ci" == true && -n ${SYSTEM_TEAMPROJECT:-} && "$product_build" != true && "$properties" != *"DotNetBuildRepo=true"* ]]; then
+ if [[ "$ci" == true && -n ${SYSTEM_TEAMPROJECT:-} && "$product_build" != true ]]; then
Write-PipelineSetResult -result "Failed" -message "msbuild execution failed."
# Exiting with an exit code causes the azure pipelines task to log yet another "noise" error
# The above Write-PipelineSetResult will cause the task to be marked as failure without adding yet another error
diff --git a/eng/common/vmr-sync.ps1 b/eng/common/vmr-sync.ps1
new file mode 100755
index 00000000000000..8c3c91ce8dede9
--- /dev/null
+++ b/eng/common/vmr-sync.ps1
@@ -0,0 +1,138 @@
+<#
+.SYNOPSIS
+
+This script is used for synchronizing the current repository into a local VMR.
+It pulls the current repository's code into the specified VMR directory for local testing or
+Source-Build validation.
+
+.DESCRIPTION
+
+The tooling used for synchronization will clone the VMR repository into a temporary folder if
+it does not already exist. These clones can be reused in future synchronizations, so it is
+recommended to dedicate a folder for this to speed up re-runs.
+
+.EXAMPLE
+ Synchronize current repository into a local VMR:
+ ./vmr-sync.ps1 -vmrDir "$HOME/repos/dotnet" -tmpDir "$HOME/repos/tmp"
+
+.PARAMETER tmpDir
+Required. Path to the temporary folder where repositories will be cloned
+
+.PARAMETER vmrBranch
+Optional. Branch of the 'dotnet/dotnet' repo to synchronize. The VMR will be checked out to this branch
+
+.PARAMETER azdevPat
+Optional. Azure DevOps PAT to use for cloning private repositories.
+
+.PARAMETER vmrDir
+Optional. Path to the dotnet/dotnet repository. When null, gets cloned to the temporary folder
+
+.PARAMETER debugOutput
+Optional. Enables debug logging in the darc vmr command.
+
+.PARAMETER ci
+Optional. Denotes that the script is running in a CI environment.
+#>
+param (
+ [Parameter(Mandatory=$true, HelpMessage="Path to the temporary folder where repositories will be cloned")]
+ [string][Alias('t', 'tmp')]$tmpDir,
+ [string][Alias('b', 'branch')]$vmrBranch,
+ [string]$remote,
+ [string]$azdevPat,
+ [string][Alias('v', 'vmr')]$vmrDir,
+ [switch]$ci,
+ [switch]$debugOutput
+)
+
+function Fail {
+ Write-Host "> $($args[0])" -ForegroundColor 'Red'
+}
+
+function Highlight {
+ Write-Host "> $($args[0])" -ForegroundColor 'Cyan'
+}
+
+$verbosity = 'verbose'
+if ($debugOutput) {
+ $verbosity = 'debug'
+}
+# Validation
+
+if (-not $tmpDir) {
+ Fail "Missing -tmpDir argument. Please specify the path to the temporary folder where the repositories will be cloned"
+ exit 1
+}
+
+# Sanitize the input
+
+if (-not $vmrDir) {
+ $vmrDir = Join-Path $tmpDir 'dotnet'
+}
+
+if (-not (Test-Path -Path $tmpDir -PathType Container)) {
+ New-Item -ItemType Directory -Path $tmpDir | Out-Null
+}
+
+# Prepare the VMR
+
+if (-not (Test-Path -Path $vmrDir -PathType Container)) {
+ Highlight "Cloning 'dotnet/dotnet' into $vmrDir.."
+ git clone https://github.com/dotnet/dotnet $vmrDir
+
+ if ($vmrBranch) {
+ git -C $vmrDir switch -c $vmrBranch
+ }
+}
+else {
+ if ((git -C $vmrDir diff --quiet) -eq $false) {
+ Fail "There are changes in the working tree of $vmrDir. Please commit or stash your changes"
+ exit 1
+ }
+
+ if ($vmrBranch) {
+ Highlight "Preparing $vmrDir"
+ git -C $vmrDir checkout $vmrBranch
+ git -C $vmrDir pull
+ }
+}
+
+Set-StrictMode -Version Latest
+
+# Prepare darc
+
+Highlight 'Installing .NET, preparing the tooling..'
+. .\eng\common\tools.ps1
+$dotnetRoot = InitializeDotNetCli -install:$true
+$dotnet = "$dotnetRoot\dotnet.exe"
+& "$dotnet" tool restore
+
+Highlight "Starting the synchronization of VMR.."
+
+# Synchronize the VMR
+$darcArgs = (
+ "darc", "vmr", "forwardflow",
+ "--tmp", $tmpDir,
+ "--$verbosity",
+ $vmrDir
+)
+
+if ($ci) {
+ $darcArgs += ("--ci")
+}
+
+if ($azdevPat) {
+ $darcArgs += ("--azdev-pat", $azdevPat)
+}
+
+& "$dotnet" $darcArgs
+
+if ($LASTEXITCODE -eq 0) {
+ Highlight "Synchronization succeeded"
+}
+else {
+ Fail "Synchronization of repo to VMR failed!"
+ Fail "'$vmrDir' is left in its last state (re-run of this script will reset it)."
+ Fail "Please inspect the logs which contain path to the failing patch file (use -debugOutput to get all the details)."
+ Fail "Once you make changes to the conflicting VMR patch, commit it locally and re-run this script."
+ exit 1
+}
diff --git a/eng/common/vmr-sync.sh b/eng/common/vmr-sync.sh
new file mode 100755
index 00000000000000..86d77ccf5b4822
--- /dev/null
+++ b/eng/common/vmr-sync.sh
@@ -0,0 +1,205 @@
+#!/bin/bash
+
+### This script is used for synchronizing the current repository into a local VMR.
+### It pulls the current repository's code into the specified VMR directory for local testing or
+### Source-Build validation.
+###
+### The tooling used for synchronization will clone the VMR repository into a temporary folder if
+### it does not already exist. These clones can be reused in future synchronizations, so it is
+### recommended to dedicate a folder for this to speed up re-runs.
+###
+### USAGE:
+### Synchronize current repository into a local VMR:
+### ./vmr-sync.sh --tmp "$HOME/repos/tmp" "$HOME/repos/dotnet"
+###
+### Options:
+### -t, --tmp, --tmp-dir PATH
+### Required. Path to the temporary folder where repositories will be cloned
+###
+### -b, --branch, --vmr-branch BRANCH_NAME
+### Optional. Branch of the 'dotnet/dotnet' repo to synchronize. The VMR will be checked out to this branch
+###
+### --debug
+### Optional. Turns on the most verbose logging for the VMR tooling
+###
+### --remote name:URI
+### Optional. Additional remote to use during the synchronization
+### This can be used to synchronize to a commit from a fork of the repository
+### Example: 'runtime:https://github.com/yourfork/runtime'
+###
+### --azdev-pat
+### Optional. Azure DevOps PAT to use for cloning private repositories.
+###
+### -v, --vmr, --vmr-dir PATH
+### Optional. Path to the dotnet/dotnet repository. When null, gets cloned to the temporary folder
+
+source="${BASH_SOURCE[0]}"
+
+# resolve $source until the file is no longer a symlink
+while [[ -h "$source" ]]; do
+ scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
+ source="$(readlink "$source")"
+ # if $source was a relative symlink, we need to resolve it relative to the path where the
+ # symlink file was located
+ [[ $source != /* ]] && source="$scriptroot/$source"
+done
+scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
+
+function print_help () {
+ sed -n '/^### /,/^$/p' "$source" | cut -b 5-
+}
+
+COLOR_RED=$(tput setaf 1 2>/dev/null || true)
+COLOR_CYAN=$(tput setaf 6 2>/dev/null || true)
+COLOR_CLEAR=$(tput sgr0 2>/dev/null || true)
+COLOR_RESET=uniquesearchablestring
+FAILURE_PREFIX='> '
+
+function fail () {
+ echo "${COLOR_RED}$FAILURE_PREFIX${1//${COLOR_RESET}/${COLOR_RED}}${COLOR_CLEAR}" >&2
+}
+
+function highlight () {
+ echo "${COLOR_CYAN}$FAILURE_PREFIX${1//${COLOR_RESET}/${COLOR_CYAN}}${COLOR_CLEAR}"
+}
+
+tmp_dir=''
+vmr_dir=''
+vmr_branch=''
+additional_remotes=''
+verbosity=verbose
+azdev_pat=''
+ci=false
+
+while [[ $# -gt 0 ]]; do
+ opt="$(echo "$1" | tr "[:upper:]" "[:lower:]")"
+ case "$opt" in
+ -t|--tmp|--tmp-dir)
+ tmp_dir=$2
+ shift
+ ;;
+ -v|--vmr|--vmr-dir)
+ vmr_dir=$2
+ shift
+ ;;
+ -b|--branch|--vmr-branch)
+ vmr_branch=$2
+ shift
+ ;;
+ --remote)
+ additional_remotes="$additional_remotes $2"
+ shift
+ ;;
+ --azdev-pat)
+ azdev_pat=$2
+ shift
+ ;;
+ --ci)
+ ci=true
+ ;;
+ -d|--debug)
+ verbosity=debug
+ ;;
+ -h|--help)
+ print_help
+ exit 0
+ ;;
+ *)
+ fail "Invalid argument: $1"
+ print_help
+ exit 1
+ ;;
+ esac
+
+ shift
+done
+
+# Validation
+
+if [[ -z "$tmp_dir" ]]; then
+ fail "Missing --tmp-dir argument. Please specify the path to the temporary folder where the repositories will be cloned"
+ exit 1
+fi
+
+# Sanitize the input
+
+if [[ -z "$vmr_dir" ]]; then
+ vmr_dir="$tmp_dir/dotnet"
+fi
+
+if [[ ! -d "$tmp_dir" ]]; then
+ mkdir -p "$tmp_dir"
+fi
+
+if [[ "$verbosity" == "debug" ]]; then
+ set -x
+fi
+
+# Prepare the VMR
+
+if [[ ! -d "$vmr_dir" ]]; then
+ highlight "Cloning 'dotnet/dotnet' into $vmr_dir.."
+ git clone https://github.com/dotnet/dotnet "$vmr_dir"
+
+ if [[ -n "$vmr_branch" ]]; then
+ git -C "$vmr_dir" switch -c "$vmr_branch"
+ fi
+else
+ if ! git -C "$vmr_dir" diff --quiet; then
+ fail "There are changes in the working tree of $vmr_dir. Please commit or stash your changes"
+ exit 1
+ fi
+
+ if [[ -n "$vmr_branch" ]]; then
+ highlight "Preparing $vmr_dir"
+ git -C "$vmr_dir" checkout "$vmr_branch"
+ git -C "$vmr_dir" pull
+ fi
+fi
+
+set -e
+
+# Prepare darc
+
+highlight 'Installing .NET, preparing the tooling..'
+source "./eng/common/tools.sh"
+InitializeDotNetCli true
+dotnetDir=$( cd ./.dotnet/; pwd -P )
+dotnet=$dotnetDir/dotnet
+"$dotnet" tool restore
+
+highlight "Starting the synchronization of VMR.."
+set +e
+
+if [[ -n "$additional_remotes" ]]; then
+ additional_remotes="--additional-remotes $additional_remotes"
+fi
+
+if [[ -n "$azdev_pat" ]]; then
+ azdev_pat="--azdev-pat $azdev_pat"
+fi
+
+ci_arg=''
+if [[ "$ci" == "true" ]]; then
+ ci_arg="--ci"
+fi
+
+# Synchronize the VMR
+
+"$dotnet" darc vmr forwardflow \
+ --tmp "$tmp_dir" \
+ $azdev_pat \
+ --$verbosity \
+ $ci_arg \
+ $additional_remotes \
+ "$vmr_dir"
+
+if [[ $? == 0 ]]; then
+ highlight "Synchronization succeeded"
+else
+ fail "Synchronization of repo to VMR failed!"
+ fail "'$vmr_dir' is left in its last state (re-run of this script will reset it)."
+ fail "Please inspect the logs which contain path to the failing patch file (use --debug to get all the details)."
+ fail "Once you make changes to the conflicting VMR patch, commit it locally and re-run this script."
+ exit 1
+fi
diff --git a/global.json b/global.json
index 8616c8a6541983..0b348ef3b456fe 100644
--- a/global.json
+++ b/global.json
@@ -1,18 +1,18 @@
{
"sdk": {
- "version": "10.0.100-preview.3.25201.16",
+ "version": "10.0.100-preview.5.25265.106",
"allowPrerelease": true,
"rollForward": "major"
},
"tools": {
- "dotnet": "10.0.100-preview.3.25201.16"
+ "dotnet": "10.0.100-preview.5.25265.106"
},
"msbuild-sdks": {
- "Microsoft.DotNet.Arcade.Sdk": "10.0.0-beta.25260.104",
- "Microsoft.DotNet.Helix.Sdk": "10.0.0-beta.25260.104",
- "Microsoft.DotNet.SharedFramework.Sdk": "10.0.0-beta.25260.104",
+ "Microsoft.DotNet.Arcade.Sdk": "10.0.0-beta.25269.109",
+ "Microsoft.DotNet.Helix.Sdk": "10.0.0-beta.25269.109",
+ "Microsoft.DotNet.SharedFramework.Sdk": "10.0.0-beta.25269.109",
"Microsoft.Build.NoTargets": "3.7.0",
"Microsoft.Build.Traversal": "3.4.0",
- "Microsoft.NET.Sdk.IL": "10.0.0-preview.5.25260.104"
+ "Microsoft.NET.Sdk.IL": "10.0.0-preview.5.25269.109"
}
}
diff --git a/src/coreclr/inc/executableallocator.h b/src/coreclr/inc/executableallocator.h
index 973b950ad369bc..11caf3a6857d2d 100644
--- a/src/coreclr/inc/executableallocator.h
+++ b/src/coreclr/inc/executableallocator.h
@@ -182,9 +182,6 @@ class ExecutableAllocator
// Return true if double mapping is enabled.
static bool IsDoubleMappingEnabled();
- // Release memory allocated via DoubleMapping for either templates or normal double mapped data
- void ReleaseWorker(void* pRX, bool releaseTemplate);
-
// Initialize the allocator instance
bool Initialize();
@@ -265,18 +262,6 @@ class ExecutableAllocator
// Unmap the RW mapping at the specified address
void UnmapRW(void* pRW);
-
- // Allocate thunks from a template. pTemplate is the return value from CreateTemplate
- void* AllocateThunksFromTemplate(void *pTemplate, size_t templateSize);
-
- // Free a set of thunks allocated from templates. pThunks must have been returned from AllocateThunksFromTemplate
- void FreeThunksFromTemplate(void *pThunks, size_t templateSize);
-
- // Create a template
- // If templateInImage is not null, it will attempt to use it as the template, otherwise it will create an temporary in memory file to serve as the template
- // Some OS/Architectures may/may not be able to work with this, so this api is permitted to return NULL, and callers should have an alternate approach using
- // the codePageGenerator directly.
- void* CreateTemplate(void* templateInImage, size_t templateSize, void (*codePageGenerator)(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size));
};
#define ExecutableWriterHolder ExecutableWriterHolderNoLog
diff --git a/src/coreclr/inc/loaderheap.h b/src/coreclr/inc/loaderheap.h
index d3040e0b4aa448..782f93cedc6264 100644
--- a/src/coreclr/inc/loaderheap.h
+++ b/src/coreclr/inc/loaderheap.h
@@ -455,19 +455,10 @@ class UnlockedLoaderHeap : public UnlockedLoaderHeapBase
static void WeGotAFaultNowWhat(UnlockedLoaderHeap *pHeap);
};
-struct InterleavedLoaderHeapConfig
-{
- uint32_t StubSize;
- void* Template;
- void (*CodePageGenerator)(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size);
-};
-
-void InitializeLoaderHeapConfig(InterleavedLoaderHeapConfig *pConfig, size_t stubSize, void* templateInImage, void (*codePageGenerator)(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size));
-
//===============================================================================
// This is the base class for InterleavedLoaderHeap It's used as a simple
// allocator for stubs in a scheme where each stub is a small fixed size, and is paired
-// with memory which is GetStubCodePageSize() bytes away. In addition there is an
+// with memory which is GetOSStubPageSize() bytes away. In addition there is an
// ability to free is via a "backout" mechanism that is not considered to have good performance.
//
//===============================================================================
@@ -501,13 +492,16 @@ class UnlockedInterleavedLoaderHeap : public UnlockedLoaderHeapBase
InterleavedStubFreeListNode *m_pFreeListHead;
- const InterleavedLoaderHeapConfig *m_pConfig;
+public:
+public:
+ void (*m_codePageGenerator)(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T size);
#ifndef DACCESS_COMPILE
protected:
UnlockedInterleavedLoaderHeap(
RangeList *pRangeList,
- const InterleavedLoaderHeapConfig *pConfig);
+ void (*codePageGenerator)(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T size),
+ DWORD dwGranularity);
virtual ~UnlockedInterleavedLoaderHeap();
#endif
@@ -1045,11 +1039,13 @@ class InterleavedLoaderHeap : public UnlockedInterleavedLoaderHeap
public:
InterleavedLoaderHeap(RangeList *pRangeList,
BOOL fUnlocked,
- const InterleavedLoaderHeapConfig *pConfig
+ void (*codePageGenerator)(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T size),
+ DWORD dwGranularity
)
: UnlockedInterleavedLoaderHeap(
pRangeList,
- pConfig),
+ codePageGenerator,
+ dwGranularity),
m_CriticalSection(fUnlocked ? NULL : CreateLoaderHeapLock())
{
WRAPPER_NO_CONTRACT;
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index 24c85a77a0ae05..87ed627e5c8baf 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -27872,8 +27872,14 @@ GenTree* Compiler::gtNewSimdWithElementNode(
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
+ assert(op2->IsCnsIntOrI());
assert(varTypeIsArithmetic(op3));
+ ssize_t imm8 = op2->AsIntCon()->IconValue();
+ ssize_t count = simdSize / genTypeSize(simdBaseType);
+
+ assert((0 <= imm8) && (imm8 < count));
+
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
@@ -27939,20 +27945,6 @@ GenTree* Compiler::gtNewSimdWithElementNode(
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
- int immUpperBound = getSIMDVectorLength(simdSize, simdBaseType) - 1;
- bool rangeCheckNeeded = !op2->OperIsConst();
-
- if (!rangeCheckNeeded)
- {
- ssize_t imm8 = op2->AsIntCon()->IconValue();
- rangeCheckNeeded = (imm8 < 0) || (imm8 > immUpperBound);
- }
-
- if (rangeCheckNeeded)
- {
- op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound);
- }
-
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize);
}
diff --git a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp
index 08db22f5871c21..ae24a0ffdec609 100644
--- a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp
+++ b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp
@@ -1832,7 +1832,6 @@ void CodeGen::genBaseIntrinsic(GenTreeHWIntrinsic* node, insOpts instOptions)
GenTree* op1 = (node->GetOperandCount() >= 1) ? node->Op(1) : nullptr;
GenTree* op2 = (node->GetOperandCount() >= 2) ? node->Op(2) : nullptr;
- GenTree* op3 = (node->GetOperandCount() >= 3) ? node->Op(3) : nullptr;
genConsumeMultiOpOperands(node);
regNumber op1Reg = (op1 == nullptr) ? REG_NA : op1->GetRegNum();
diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp
index 29fe70b74b62a6..72328592e3ac1a 100644
--- a/src/coreclr/jit/hwintrinsicxarch.cpp
+++ b/src/coreclr/jit/hwintrinsicxarch.cpp
@@ -4273,6 +4273,34 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
assert(sig->numArgs == 3);
GenTree* indexOp = impStackTop(1).val;
+ if (!indexOp->OperIsConst())
+ {
+ if (!opts.OptimizationEnabled())
+ {
+ // Only enable late stage rewriting if optimizations are enabled
+ // as we won't otherwise encounter a constant at the later point
+ return nullptr;
+ }
+
+ op3 = impPopStack().val;
+ op2 = impPopStack().val;
+ op1 = impSIMDPopStack();
+
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize);
+
+ retNode->AsHWIntrinsic()->SetMethodHandle(this, method R2RARG(*entryPoint));
+ break;
+ }
+
+ ssize_t imm8 = indexOp->AsIntCon()->IconValue();
+ ssize_t count = simdSize / genTypeSize(simdBaseType);
+
+ if ((imm8 >= count) || (imm8 < 0))
+ {
+ // Using software fallback if index is out of range (throw exception)
+ return nullptr;
+ }
+
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp
index 1c7d31f9eb400d..be2e6f619447b0 100644
--- a/src/coreclr/jit/rationalize.cpp
+++ b/src/coreclr/jit/rationalize.cpp
@@ -385,6 +385,57 @@ void Rationalizer::RewriteHWIntrinsicAsUserCall(GenTree** use, ArrayStackOperIsConst())
+ {
+ ssize_t imm8 = op2->AsIntCon()->IconValue();
+ ssize_t count = simdSize / genTypeSize(simdBaseType);
+
+ if ((imm8 >= count) || (imm8 < 0))
+ {
+ // Using software fallback if index is out of range (throw exception)
+ break;
+ }
+
+#if defined(TARGET_XARCH)
+ if (varTypeIsIntegral(simdBaseType))
+ {
+ if (varTypeIsLong(simdBaseType))
+ {
+ if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSE41_X64))
+ {
+ break;
+ }
+ }
+ else if (!varTypeIsShort(simdBaseType))
+ {
+ if (!comp->compOpportunisticallyDependsOn(InstructionSet_SSE41))
+ {
+ break;
+ }
+ }
+ }
+#endif // TARGET_XARCH
+
+ result = comp->gtNewSimdWithElementNode(retType, op1, op2, op3, simdBaseJitType, simdSize);
+ break;
+ }
+ break;
+ }
+
default:
{
if (sigInfo.numArgs == 0)
diff --git a/src/coreclr/minipal/Unix/doublemapping.cpp b/src/coreclr/minipal/Unix/doublemapping.cpp
index 4a2516bea58484..b866da9f93e6f1 100644
--- a/src/coreclr/minipal/Unix/doublemapping.cpp
+++ b/src/coreclr/minipal/Unix/doublemapping.cpp
@@ -9,7 +9,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -26,11 +25,6 @@
#include "minipal.h"
#include "minipal/cpufeatures.h"
-#ifndef TARGET_APPLE
-#include
-#include
-#endif // TARGET_APPLE
-
#ifdef TARGET_APPLE
#include
@@ -259,320 +253,3 @@ bool VMToOSInterface::ReleaseRWMapping(void* pStart, size_t size)
{
return munmap(pStart, size) != -1;
}
-
-#ifndef TARGET_APPLE
-#define MAX_TEMPLATE_THUNK_TYPES 3 // Maximum number of times the CreateTemplate api can be called
-struct TemplateThunkMappingData
-{
- int fdImage;
- off_t offsetInFileOfStartOfSection;
- void* addrOfStartOfSection; // Always NULL if the template mapping data could not be initialized
- void* addrOfEndOfSection;
- bool imageTemplates;
- int templatesCreated;
- off_t nonImageTemplateCurrent;
-};
-
-struct InitializeTemplateThunkLocals
-{
- void* pTemplate;
- Dl_info info;
- TemplateThunkMappingData data;
-};
-
-static TemplateThunkMappingData *s_pThunkData = NULL;
-
-#ifdef FEATURE_MAP_THUNKS_FROM_IMAGE
-
-static Elf32_Word Elf32_WordMin(Elf32_Word left, Elf32_Word right)
-{
- return left < right ? left : right;
-}
-
-static int InitializeTemplateThunkMappingDataPhdrCallback(struct dl_phdr_info *info, size_t size, void *dataPtr)
-{
- InitializeTemplateThunkLocals *locals = (InitializeTemplateThunkLocals*)dataPtr;
-
- if ((void*)info->dlpi_addr == locals->info.dli_fbase)
- {
- for (size_t j = 0; j < info->dlpi_phnum; j++)
- {
- uint8_t* baseSectionAddr = (uint8_t*)locals->info.dli_fbase + info->dlpi_phdr[j].p_vaddr;
- if (locals->pTemplate < baseSectionAddr)
- {
- // Address is before the virtual address of this section begins
- continue;
- }
-
- // Since this is all in support of mapping code from the file, we need to ensure that the region we find
- // is actually present in the file.
- Elf32_Word sizeOfSectionWhichCanBeMapped = Elf32_WordMin(info->dlpi_phdr[j].p_filesz, info->dlpi_phdr[j].p_memsz);
-
- uint8_t* endAddressAllowedForTemplate = baseSectionAddr + sizeOfSectionWhichCanBeMapped;
- if (locals->pTemplate >= endAddressAllowedForTemplate)
- {
- // Template is after the virtual address of this section ends (or the mappable region of the file)
- continue;
- }
-
- // At this point, we have found the template section. Attempt to open the file, and record the various offsets for future use
-
- if (strlen(info->dlpi_name) == 0)
- {
- // This image cannot be directly referenced without capturing the argv[0] parameter
- return -1;
- }
-
- int fdImage = open(info->dlpi_name, O_RDONLY);
- if (fdImage == -1)
- {
- return -1; // Opening the image didn't work
- }
-
- locals->data.fdImage = fdImage;
- locals->data.offsetInFileOfStartOfSection = info->dlpi_phdr[j].p_offset;
- locals->data.addrOfStartOfSection = baseSectionAddr;
- locals->data.addrOfEndOfSection = baseSectionAddr + sizeOfSectionWhichCanBeMapped;
- locals->data.imageTemplates = true;
- return 1; // We have found the result. Abort further processing.
- }
- }
-
- // This isn't the interesting .so
- return 0;
-}
-#endif // FEATURE_MAP_THUNKS_FROM_IMAGE
-
-TemplateThunkMappingData *InitializeTemplateThunkMappingData(void* pTemplate)
-{
- InitializeTemplateThunkLocals locals;
- locals.pTemplate = pTemplate;
- locals.data.fdImage = 0;
- locals.data.offsetInFileOfStartOfSection = 0;
- locals.data.addrOfStartOfSection = NULL;
- locals.data.addrOfEndOfSection = NULL;
- locals.data.imageTemplates = false;
- locals.data.nonImageTemplateCurrent = 0;
- locals.data.templatesCreated = 0;
-
-#ifdef FEATURE_MAP_THUNKS_FROM_IMAGE
- if (dladdr(pTemplate, &locals.info) != 0)
- {
- dl_iterate_phdr(InitializeTemplateThunkMappingDataPhdrCallback, &locals);
- }
-#endif // FEATURE_MAP_THUNKS_FROM_IMAGE
-
- if (locals.data.addrOfStartOfSection == NULL)
- {
- // This is the detail of thunk data which indicates if we were able to compute the template mapping data from the image.
-
-#ifdef TARGET_FREEBSD
- int fd = shm_open(SHM_ANON, O_RDWR | O_CREAT, S_IRWXU);
-#elif defined(TARGET_LINUX) || defined(TARGET_ANDROID)
- int fd = memfd_create("doublemapper-template", MFD_CLOEXEC);
-#else
- int fd = -1;
-
-#ifndef TARGET_ANDROID
- // Bionic doesn't have shm_{open,unlink}
- // POSIX fallback
- if (fd == -1)
- {
- char name[24];
- sprintf(name, "/shm-dotnet-template-%d", getpid());
- name[sizeof(name) - 1] = '\0';
- shm_unlink(name);
- fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL | O_NOFOLLOW, 0600);
- shm_unlink(name);
- }
-#endif // !TARGET_ANDROID
-#endif
- if (fd != -1)
- {
- off_t maxFileSize = MAX_TEMPLATE_THUNK_TYPES * 0x10000; // The largest page size we support currently is 64KB.
- if (ftruncate(fd, maxFileSize) == -1) // Reserve a decent size chunk of logical memory for these things.
- {
- close(fd);
- }
- else
- {
- locals.data.fdImage = fd;
- locals.data.offsetInFileOfStartOfSection = 0;
- // We simulate the template thunk mapping data existing in mapped ram, by declaring that it exists at at
- // an address which is not NULL, and which is naturally aligned on the largest page size supported by any
- // architecture we support (0x10000). We do this, as the generalized logic here is designed around remapping
- // already mapped memory, and by doing this we are able to share that logic.
- locals.data.addrOfStartOfSection = (void*)0x10000;
- locals.data.addrOfEndOfSection = ((uint8_t*)locals.data.addrOfStartOfSection) + maxFileSize;
- locals.data.imageTemplates = false;
- }
- }
- }
-
-
- TemplateThunkMappingData *pAllocatedData = (TemplateThunkMappingData*)malloc(sizeof(TemplateThunkMappingData));
- *pAllocatedData = locals.data;
- TemplateThunkMappingData *pExpectedNull = NULL;
- if (__atomic_compare_exchange_n (&s_pThunkData, &pExpectedNull, pAllocatedData, false, __ATOMIC_RELEASE, __ATOMIC_RELAXED))
- {
- return pAllocatedData;
- }
- else
- {
- free(pAllocatedData);
- return __atomic_load_n(&s_pThunkData, __ATOMIC_ACQUIRE);
- }
-}
-#endif
-
-bool VMToOSInterface::AllocateThunksFromTemplateRespectsStartAddress()
-{
-#ifdef TARGET_APPLE
- return false;
-#else
- return true;
-#endif
-}
-
-void* VMToOSInterface::CreateTemplate(void* pImageTemplate, size_t templateSize, void (*codePageGenerator)(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size))
-{
-#ifdef TARGET_APPLE
- return pImageTemplate;
-#elif defined(TARGET_X86)
- return NULL; // X86 doesn't support high performance relative addressing, which makes the template system not work
-#else
- if (pImageTemplate == NULL)
- return NULL;
-
- TemplateThunkMappingData* pThunkData = __atomic_load_n(&s_pThunkData, __ATOMIC_ACQUIRE);
- if (s_pThunkData == NULL)
- {
- pThunkData = InitializeTemplateThunkMappingData(pImageTemplate);
- }
-
- // Unable to create template mapping region
- if (pThunkData->addrOfStartOfSection == NULL)
- {
- return NULL;
- }
-
- int templatesCreated = __atomic_add_fetch(&pThunkData->templatesCreated, 1, __ATOMIC_SEQ_CST);
- assert(templatesCreated <= MAX_TEMPLATE_THUNK_TYPES);
-
- if (!pThunkData->imageTemplates)
- {
- // Need to allocate a memory mapped region to fill in the data
- off_t locationInFileToStoreGeneratedCode = __atomic_fetch_add((off_t*)&pThunkData->nonImageTemplateCurrent, (off_t)templateSize, __ATOMIC_SEQ_CST);
- void* mappedMemory = mmap(NULL, templateSize, PROT_READ | PROT_WRITE, MAP_SHARED, pThunkData->fdImage, locationInFileToStoreGeneratedCode);
- if (mappedMemory != MAP_FAILED)
- {
- codePageGenerator((uint8_t*)mappedMemory, (uint8_t*)mappedMemory, templateSize);
- munmap(mappedMemory, templateSize);
- return ((uint8_t*)pThunkData->addrOfStartOfSection) + locationInFileToStoreGeneratedCode;
- }
- else
- {
- return NULL;
- }
- }
- else
- {
- return pImageTemplate;
- }
-#endif
-}
-
-void* VMToOSInterface::AllocateThunksFromTemplate(void* pTemplate, size_t templateSize, void* pStartSpecification)
-{
-#ifdef TARGET_APPLE
- vm_address_t addr, taddr;
- vm_prot_t prot, max_prot;
- kern_return_t ret;
-
- // Allocate two contiguous ranges of memory: the first range will contain the stubs
- // and the second range will contain their data.
- do
- {
- ret = vm_allocate(mach_task_self(), &addr, templateSize * 2, VM_FLAGS_ANYWHERE);
- } while (ret == KERN_ABORTED);
-
- if (ret != KERN_SUCCESS)
- {
- return NULL;
- }
-
- do
- {
- ret = vm_remap(
- mach_task_self(), &addr, templateSize, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
- mach_task_self(), (vm_address_t)pTemplate, FALSE, &prot, &max_prot, VM_INHERIT_SHARE);
- } while (ret == KERN_ABORTED);
-
- if (ret != KERN_SUCCESS)
- {
- do
- {
- ret = vm_deallocate(mach_task_self(), addr, templateSize * 2);
- } while (ret == KERN_ABORTED);
-
- return NULL;
- }
- return (void*)addr;
-#else
- TemplateThunkMappingData* pThunkData = __atomic_load_n(&s_pThunkData, __ATOMIC_ACQUIRE);
- if (s_pThunkData == NULL)
- {
- pThunkData = InitializeTemplateThunkMappingData(pTemplate);
- }
-
- if (pThunkData->addrOfStartOfSection == NULL)
- {
- // This is the detail of thunk data which indicates if we were able to compute the template mapping data
- return NULL;
- }
-
- if (pTemplate < pThunkData->addrOfStartOfSection)
- {
- return NULL;
- }
-
- uint8_t* endOfTemplate = ((uint8_t*)pTemplate + templateSize);
- if (endOfTemplate > pThunkData->addrOfEndOfSection)
- return NULL;
-
- size_t sectionOffset = (uint8_t*)pTemplate - (uint8_t*)pThunkData->addrOfStartOfSection;
- off_t fileOffset = pThunkData->offsetInFileOfStartOfSection + sectionOffset;
-
- void *pStart = mmap(pStartSpecification, templateSize * 2, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | (pStartSpecification != NULL ? MAP_FIXED : 0), -1, 0);
- if (pStart == MAP_FAILED)
- {
- return NULL;
- }
-
- void *pStartCode = mmap(pStart, templateSize, PROT_READ | PROT_EXEC, MAP_PRIVATE | MAP_FIXED, pThunkData->fdImage, fileOffset);
- if (pStart != pStartCode)
- {
- munmap(pStart, templateSize * 2);
- return NULL;
- }
-
- return pStart;
-#endif
-}
-
-bool VMToOSInterface::FreeThunksFromTemplate(void* thunks, size_t templateSize)
-{
-#ifdef TARGET_APPLE
- kern_return_t ret;
-
- do
- {
- ret = vm_deallocate(mach_task_self(), (vm_address_t)thunks, templateSize * 2);
- } while (ret == KERN_ABORTED);
-
- return ret == KERN_SUCCESS ? true : false;
-#else
- munmap(thunks, templateSize * 2);
- return true;
-#endif
-}
diff --git a/src/coreclr/minipal/Windows/doublemapping.cpp b/src/coreclr/minipal/Windows/doublemapping.cpp
index f5f25f2bec92cc..9e8ddfed8e964d 100644
--- a/src/coreclr/minipal/Windows/doublemapping.cpp
+++ b/src/coreclr/minipal/Windows/doublemapping.cpp
@@ -210,23 +210,3 @@ bool VMToOSInterface::ReleaseRWMapping(void* pStart, size_t size)
{
return UnmapViewOfFile(pStart);
}
-
-void* VMToOSInterface::CreateTemplate(void* pImageTemplate, size_t templateSize, void (*codePageGenerator)(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size))
-{
- return NULL;
-}
-
-bool VMToOSInterface::AllocateThunksFromTemplateRespectsStartAddress()
-{
- return false;
-}
-
-void* VMToOSInterface::AllocateThunksFromTemplate(void* pTemplate, size_t templateSize, void* pStart)
-{
- return NULL;
-}
-
-bool VMToOSInterface::FreeThunksFromTemplate(void* thunks, size_t templateSize)
-{
- return false;
-}
diff --git a/src/coreclr/minipal/minipal.h b/src/coreclr/minipal/minipal.h
index 01f497e60e6d7e..afecd9ce74dc72 100644
--- a/src/coreclr/minipal/minipal.h
+++ b/src/coreclr/minipal/minipal.h
@@ -75,41 +75,6 @@ class VMToOSInterface
// Return:
// true if it succeeded, false if it failed
static bool ReleaseRWMapping(void* pStart, size_t size);
-
- // Create a template for use by AllocateThunksFromTemplate
- // Parameters:
- // pImageTemplate - Address of start of template in the image for coreclr. (All addresses passed to the api in a process must be from the same module, if any call uses a pImageTemplate, all calls MUST)
- // templateSize - Size of the template
- // codePageGenerator - If the system is unable to use pImageTemplate, use this parameter to generate the code page instead
- //
- // Return:
- // NULL if creating the template fails
- // Non-NULL, a pointer to the template
- static void* CreateTemplate(void* pImageTemplate, size_t templateSize, void (*codePageGenerator)(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size));
-
- // Indicate if the AllocateThunksFromTemplate function respects the pStart address passed to AllocateThunksFromTemplate on this platform
- // Return:
- // true if the parameter is respected, false if not
- static bool AllocateThunksFromTemplateRespectsStartAddress();
-
- // Allocate thunks from template
- // Parameters:
- // pTemplate - Value returned from CreateTemplate
- // templateSize - Size of the templates block in the image
- // pStart - Where to allocate (Specify NULL if no particular address is required). If non-null, this must be an address returned by ReserveDoubleMappedMemory
- //
- // Return:
- // NULL if the allocation fails
- // Non-NULL, a pointer to the allocated region.
- static void* AllocateThunksFromTemplate(void* pTemplate, size_t templateSize, void* pStart);
-
- // Free thunks allocated from template
- // Parameters:
- // pThunks - Address previously returned by AllocateThunksFromTemplate
- // templateSize - Size of the templates block in the image
- // Return:
- // true if it succeeded, false if it failed
- static bool FreeThunksFromTemplate(void* thunks, size_t templateSize);
};
#if defined(HOST_64BIT) && defined(FEATURE_CACHED_INTERFACE_DISPATCH)
diff --git a/src/coreclr/nativeaot/Runtime/unix/PalRedhawkUnix.cpp b/src/coreclr/nativeaot/Runtime/unix/PalRedhawkUnix.cpp
index 94ad25ceab8bdb..a928e7018da25f 100644
--- a/src/coreclr/nativeaot/Runtime/unix/PalRedhawkUnix.cpp
+++ b/src/coreclr/nativeaot/Runtime/unix/PalRedhawkUnix.cpp
@@ -526,7 +526,7 @@ REDHAWK_PALEXPORT UInt32_BOOL REDHAWK_PALAPI PalAllocateThunksFromTemplate(HANDL
vm_prot_t prot, max_prot;
kern_return_t ret;
- // Allocate two contiguous ranges of memory: the first range will contain the stubs
+ // Allocate two contiguous ranges of memory: the first range will contain the trampolines
// and the second range will contain their data.
do
{
diff --git a/src/coreclr/utilcode/executableallocator.cpp b/src/coreclr/utilcode/executableallocator.cpp
index 0242377072238c..d145ab03987a08 100644
--- a/src/coreclr/utilcode/executableallocator.cpp
+++ b/src/coreclr/utilcode/executableallocator.cpp
@@ -503,11 +503,6 @@ void* ExecutableAllocator::Commit(void* pStart, size_t size, bool isExecutable)
}
void ExecutableAllocator::Release(void* pRX)
-{
- ReleaseWorker(pRX, false /* this is the standard Release of normally allocated memory */);
-}
-
-void ExecutableAllocator::ReleaseWorker(void* pRX, bool releaseTemplate)
{
LIMITED_METHOD_CONTRACT;
@@ -553,19 +548,9 @@ void ExecutableAllocator::ReleaseWorker(void* pRX, bool releaseTemplate)
cachedMappingThatOverlaps = FindOverlappingCachedMapping(pBlock);
}
- if (releaseTemplate)
+ if (!VMToOSInterface::ReleaseDoubleMappedMemory(m_doubleMemoryMapperHandle, pRX, pBlock->offset, pBlock->size))
{
- if (!VMToOSInterface::FreeThunksFromTemplate(pRX, pBlock->size / 2))
- {
- g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the template mapped memory failed"));
- }
- }
- else
- {
- if (!VMToOSInterface::ReleaseDoubleMappedMemory(m_doubleMemoryMapperHandle, pRX, pBlock->offset, pBlock->size))
- {
- g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the double mapped memory failed"));
- }
+ g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the double mapped memory failed"));
}
// Put the released block into the free block list
pBlock->baseRX = NULL;
@@ -977,60 +962,3 @@ void ExecutableAllocator::UnmapRW(void* pRW)
g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the RW mapping failed"));
}
}
-
-void* ExecutableAllocator::AllocateThunksFromTemplate(void *pTemplate, size_t templateSize)
-{
- if (IsDoubleMappingEnabled() && VMToOSInterface::AllocateThunksFromTemplateRespectsStartAddress())
- {
- CRITSEC_Holder csh(m_CriticalSection);
-
- bool isFreeBlock;
- BlockRX* block = AllocateBlock(templateSize * 2, &isFreeBlock);
- if (block == NULL)
- {
- return NULL;
- }
-
- void* result = VMToOSInterface::ReserveDoubleMappedMemory(m_doubleMemoryMapperHandle, block->offset, templateSize * 2, 0, 0);
-
- if (result != NULL)
- {
- block->baseRX = result;
- AddRXBlock(block);
- }
- else
- {
- BackoutBlock(block, isFreeBlock);
- }
-
- void *pTemplateAddressAllocated = VMToOSInterface::AllocateThunksFromTemplate(pTemplate, templateSize, block->baseRX);
-
- if (pTemplateAddressAllocated == NULL)
- {
- ReleaseWorker(block->baseRX, false);
- }
-
- return pTemplateAddressAllocated;
- }
- else
- {
- return VMToOSInterface::AllocateThunksFromTemplate(pTemplate, templateSize, NULL);
- }
-}
-
-void ExecutableAllocator::FreeThunksFromTemplate(void *pThunks, size_t templateSize)
-{
- if (IsDoubleMappingEnabled() && VMToOSInterface::AllocateThunksFromTemplateRespectsStartAddress())
- {
- ReleaseWorker(pThunks, true /* This is a release of template allocated memory */);
- }
- else
- {
- VMToOSInterface::FreeThunksFromTemplate(pThunks, templateSize);
- }
-}
-
-void* ExecutableAllocator::CreateTemplate(void* templateInImage, size_t templateSize, void (*codePageGenerator)(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size))
-{
- return VMToOSInterface::CreateTemplate(templateInImage, templateSize, codePageGenerator);
-}
diff --git a/src/coreclr/utilcode/interleavedloaderheap.cpp b/src/coreclr/utilcode/interleavedloaderheap.cpp
index 082e337caebda1..d908ea20c194db 100644
--- a/src/coreclr/utilcode/interleavedloaderheap.cpp
+++ b/src/coreclr/utilcode/interleavedloaderheap.cpp
@@ -33,13 +33,10 @@ namespace
UnlockedInterleavedLoaderHeap::UnlockedInterleavedLoaderHeap(
RangeList *pRangeList,
- const InterleavedLoaderHeapConfig *pConfig) :
+ void (*codePageGenerator)(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T size),
+ DWORD dwGranularity) :
UnlockedLoaderHeapBase(LoaderHeapImplementationKind::Interleaved),
- m_pEndReservedRegion(NULL),
- m_dwGranularity(pConfig->StubSize),
- m_pRangeList(pRangeList),
- m_pFreeListHead(NULL),
- m_pConfig(pConfig)
+ m_pFreeListHead(NULL)
{
CONTRACTL
{
@@ -49,7 +46,15 @@ UnlockedInterleavedLoaderHeap::UnlockedInterleavedLoaderHeap(
}
CONTRACTL_END;
+ m_pEndReservedRegion = NULL;
+
+ m_pRangeList = pRangeList;
+
_ASSERTE((GetStubCodePageSize() % GetOsPageSize()) == 0); // Stub code page size MUST be in increments of the page size. (Really it must be a power of 2 as well, but this is good enough)
+ m_dwGranularity = dwGranularity;
+
+ _ASSERTE(codePageGenerator != NULL);
+ m_codePageGenerator = codePageGenerator;
}
// ~LoaderHeap is not synchronised (obviously)
@@ -75,14 +80,7 @@ UnlockedInterleavedLoaderHeap::~UnlockedInterleavedLoaderHeap()
pVirtualAddress = pSearch->pVirtualAddress;
pNext = pSearch->pNext;
- if (m_pConfig->Template != NULL)
- {
- ExecutableAllocator::Instance()->FreeThunksFromTemplate(pVirtualAddress, GetStubCodePageSize());
- }
- else
- {
- ExecutableAllocator::Instance()->Release(pVirtualAddress);
- }
+ ExecutableAllocator::Instance()->Release(pVirtualAddress);
delete pSearch;
}
@@ -103,7 +101,6 @@ size_t UnlockedInterleavedLoaderHeap::GetBytesAvailReservedRegion()
BOOL UnlockedInterleavedLoaderHeap::CommitPages(void* pData, size_t dwSizeToCommitPart)
{
- _ASSERTE(m_pConfig->Template == NULL); // This path should only be used for LoaderHeaps which use the standard ExecutableAllocator functions
// Commit first set of pages, since it will contain the LoaderHeapBlock
{
void *pTemp = ExecutableAllocator::Instance()->Commit(pData, dwSizeToCommitPart, IsExecutable());
@@ -124,7 +121,7 @@ BOOL UnlockedInterleavedLoaderHeap::CommitPages(void* pData, size_t dwSizeToComm
}
ExecutableWriterHolder codePageWriterHolder((BYTE*)pData, dwSizeToCommitPart, ExecutableAllocator::DoNotAddToCache);
- m_pConfig->CodePageGenerator(codePageWriterHolder.GetRW(), (BYTE*)pData, dwSizeToCommitPart);
+ m_codePageGenerator(codePageWriterHolder.GetRW(), (BYTE*)pData, dwSizeToCommitPart);
FlushInstructionCache(GetCurrentProcess(), pData, dwSizeToCommitPart);
return TRUE;
@@ -140,8 +137,6 @@ BOOL UnlockedInterleavedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)
}
CONTRACTL_END;
- _ASSERTE(m_pConfig->Template == NULL); // This path should only be used for LoaderHeaps which use the standard ExecutableAllocator functions
-
size_t dwSizeToReserve;
// Round to page size again
@@ -227,14 +222,6 @@ BOOL UnlockedInterleavedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)
return TRUE;
}
-void ReleaseAllocatedThunks(BYTE* thunks)
-{
- ExecutableAllocator::Instance()->FreeThunksFromTemplate(thunks, GetStubCodePageSize());
-}
-
-using ThunkMemoryHolder = SpecializedWrapper;
-
-
// Get some more committed pages - either commit some more in the current reserved region, or, if it
// has run out, reserve another set of pages.
// Returns: FALSE if we can't get any more memory
@@ -250,57 +237,6 @@ BOOL UnlockedInterleavedLoaderHeap::GetMoreCommittedPages(size_t dwMinSize)
}
CONTRACTL_END;
- if (m_pConfig->Template != NULL)
- {
- ThunkMemoryHolder newAllocatedThunks = (BYTE*)ExecutableAllocator::Instance()->AllocateThunksFromTemplate(m_pConfig->Template, GetStubCodePageSize());
- if (newAllocatedThunks == NULL)
- {
- return FALSE;
- }
-
- NewHolder pNewBlock = new (nothrow) LoaderHeapBlock;
- if (pNewBlock == NULL)
- {
- return FALSE;
- }
-
- size_t dwSizeToReserve = GetStubCodePageSize() * 2;
-
- // Record reserved range in range list, if one is specified
- // Do this AFTER the commit - otherwise we'll have bogus ranges included.
- if (m_pRangeList != NULL)
- {
- if (!m_pRangeList->AddRange((const BYTE *) newAllocatedThunks,
- ((const BYTE *) newAllocatedThunks) + dwSizeToReserve,
- (void *) this))
- {
- return FALSE;
- }
- }
-
- m_dwTotalAlloc += dwSizeToReserve;
-
- pNewBlock.SuppressRelease();
- newAllocatedThunks.SuppressRelease();
-
- pNewBlock->dwVirtualSize = dwSizeToReserve;
- pNewBlock->pVirtualAddress = newAllocatedThunks;
- pNewBlock->pNext = m_pFirstBlock;
- pNewBlock->m_fReleaseMemory = TRUE;
-
- // Add to the linked list
- m_pFirstBlock = pNewBlock;
-
- m_pAllocPtr = (BYTE*)newAllocatedThunks;
- m_pPtrToEndOfCommittedRegion = m_pAllocPtr + GetStubCodePageSize();
- m_pEndReservedRegion = m_pAllocPtr + dwSizeToReserve; // For consistency with the non-template path m_pEndReservedRegion is after the end of the data area
- m_dwTotalAlloc += GetStubCodePageSize();
-
- return TRUE;
- }
-
- // From here, all work is only for the dynamically allocated InterleavedLoaderHeap path
-
// If we have memory we can use, what are you doing here!
_ASSERTE(dwMinSize > (SIZE_T)(m_pPtrToEndOfCommittedRegion - m_pAllocPtr));
@@ -538,13 +474,5 @@ void *UnlockedInterleavedLoaderHeap::UnlockedAllocStub(
return pResult;
}
-
-void InitializeLoaderHeapConfig(InterleavedLoaderHeapConfig *pConfig, size_t stubSize, void* templateInImage, void (*codePageGenerator)(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size))
-{
- pConfig->StubSize = (uint32_t)stubSize;
- pConfig->Template = ExecutableAllocator::Instance()->CreateTemplate(templateInImage, GetStubCodePageSize(), codePageGenerator);
- pConfig->CodePageGenerator = codePageGenerator;
-}
-
#endif // #ifndef DACCESS_COMPILE
diff --git a/src/coreclr/vm/amd64/thunktemplates.S b/src/coreclr/vm/amd64/thunktemplates.S
index 611556da202bb9..ebb0f6f67f193d 100644
--- a/src/coreclr/vm/amd64/thunktemplates.S
+++ b/src/coreclr/vm/amd64/thunktemplates.S
@@ -5,155 +5,9 @@
#include "unixasmmacros.inc"
#include "asmconstants.h"
-#ifdef FEATURE_MAP_THUNKS_FROM_IMAGE
-
-#define POINTER_SIZE 0x08
-
-#define THUNKS_MAP_SIZE 0x4000
-
-#define PAGE_SIZE 0x4000
-#define PAGE_SIZE_LOG2 14
-
-
-#define DATA_SLOT(stub, field, thunkSize, thunkTemplateName) C_FUNC(thunkTemplateName) + THUNKS_MAP_SIZE + stub##Data__##field + IN_PAGE_INDEX * thunkSize
-
-// ----------
-// StubPrecode
-// ----------
-
-#define STUB_PRECODE_CODESIZE 0x18 // 3 instructions, 13 bytes encoded + 11 bytes of padding
-#define STUB_PRECODE_DATASIZE 0x18 // 2 qwords + a BYTE
-.set STUB_PRECODE_NUM_THUNKS_PER_MAPPING,(THUNKS_MAP_SIZE / STUB_PRECODE_CODESIZE)
-
-.macro THUNKS_BLOCK_STUB_PRECODE
- IN_PAGE_INDEX = 0
- .rept STUB_PRECODE_NUM_THUNKS_PER_MAPPING
-
- mov r10, [rip + DATA_SLOT(StubPrecode, SecretParam, STUB_PRECODE_CODESIZE, StubPrecodeCodeTemplate)]
- jmp [rip + DATA_SLOT(StubPrecode, Target, STUB_PRECODE_CODESIZE, StubPrecodeCodeTemplate)]
- // The above is 13 bytes
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- IN_PAGE_INDEX = IN_PAGE_INDEX + 1
- .endr
-.endm
-
- .text
- .p2align PAGE_SIZE_LOG2
-LEAF_ENTRY StubPrecodeCodeTemplate
- THUNKS_BLOCK_STUB_PRECODE
-LEAF_END_MARKED StubPrecodeCodeTemplate, _TEXT
-
-// ----------
-// FixupPrecode
-// ----------
-
-#define FIXUP_PRECODE_CODESIZE 0x18
-#define FIXUP_PRECODE_DATASIZE 0x18 // 3 qwords
-.set FIXUP_PRECODE_NUM_THUNKS_PER_MAPPING,(THUNKS_MAP_SIZE / FIXUP_PRECODE_CODESIZE)
-
-.macro THUNKS_BLOCK_FIXUP_PRECODE
- IN_PAGE_INDEX = 0
- .rept FIXUP_PRECODE_NUM_THUNKS_PER_MAPPING
-
- jmp [rip + DATA_SLOT(FixupPrecode, Target, FIXUP_PRECODE_CODESIZE, FixupPrecodeCodeTemplate)]
- mov r10, [rip + DATA_SLOT(FixupPrecode, MethodDesc, FIXUP_PRECODE_CODESIZE, FixupPrecodeCodeTemplate)]
- jmp [rip + DATA_SLOT(FixupPrecode, PrecodeFixupThunk, FIXUP_PRECODE_CODESIZE, FixupPrecodeCodeTemplate)]
- // The above is 19 bytes
- int 3
- int 3
- int 3
- int 3
- int 3
- IN_PAGE_INDEX = IN_PAGE_INDEX + 1
- .endr
-.endm
-
- .text
- .p2align PAGE_SIZE_LOG2
-LEAF_ENTRY FixupPrecodeCodeTemplate
- THUNKS_BLOCK_FIXUP_PRECODE
- // We need 16 bytes of padding to pad this out
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
-LEAF_END_MARKED FixupPrecodeCodeTemplate, _TEXT
-
-// ----------
-// CallCountingStub
-// ----------
-
-#define CALLCOUNTING_CODESIZE 0x18
-#define CALLCOUNTING_DATASIZE 0x18 // 3 qwords
-.set CALLCOUNTING_NUM_THUNKS_PER_MAPPING, (THUNKS_MAP_SIZE / CALLCOUNTING_CODESIZE)
-.macro THUNKS_BLOCK_CALLCOUNTING
- IN_PAGE_INDEX = 0
- .rept CALLCOUNTING_NUM_THUNKS_PER_MAPPING
-
- mov rax,QWORD PTR [rip + DATA_SLOT(CallCountingStub, RemainingCallCountCell, CALLCOUNTING_CODESIZE, CallCountingStubCodeTemplate)]
- dec WORD PTR [rax]
- je 0f
- jmp QWORD PTR [rip + DATA_SLOT(CallCountingStub, TargetForMethod, CALLCOUNTING_CODESIZE, CallCountingStubCodeTemplate)]
- 0:
- jmp QWORD PTR [rip + DATA_SLOT(CallCountingStub, TargetForThresholdReached, CALLCOUNTING_CODESIZE, CallCountingStubCodeTemplate)]
- IN_PAGE_INDEX = IN_PAGE_INDEX + 1
- .endr
-.endm
-
- .text
- .p2align PAGE_SIZE_LOG2
-LEAF_ENTRY CallCountingStubCodeTemplate
- THUNKS_BLOCK_CALLCOUNTING
- // We need 16 bytes of padding to pad this out
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
- int 3
-LEAF_END_MARKED CallCountingStubCodeTemplate, _TEXT
-
-#endif
-
// STUB_PAGE_SIZE must match the behavior of GetStubCodePageSize() on this architecture/os
STUB_PAGE_SIZE = 16384
-#ifdef DATA_SLOT
-#undef DATA_SLOT
-#endif
-
#define DATA_SLOT(stub, field) C_FUNC(stub##Code) + STUB_PAGE_SIZE + stub##Data__##field
LEAF_ENTRY StubPrecodeCode, _TEXT
diff --git a/src/coreclr/vm/arm64/thunktemplates.S b/src/coreclr/vm/arm64/thunktemplates.S
index bbbc490854721e..df2abf7c29e0f7 100644
--- a/src/coreclr/vm/arm64/thunktemplates.S
+++ b/src/coreclr/vm/arm64/thunktemplates.S
@@ -4,117 +4,6 @@
#include "unixasmmacros.inc"
#include "asmconstants.h"
-#ifdef FEATURE_MAP_THUNKS_FROM_IMAGE
-#define POINTER_SIZE 0x08
-// Since Arm64 supports 4KB, 16KB and 64KB page sizes, as the templates is only defined for 16KB page size, this cannot be used
-// in a general purpose Linux environment. However it CAN be used on Apple platforms, which specify that 16KB is the system standard
-// page size.
-
-#define THUNKS_MAP_SIZE 0x4000
-
-#define PAGE_SIZE 0x4000
-#define PAGE_SIZE_LOG2 14
-
-
-#define DATA_SLOT(stub, field, thunkSize, thunkTemplateName) C_FUNC(thunkTemplateName) + THUNKS_MAP_SIZE + stub##Data__##field + IN_PAGE_INDEX * thunkSize
-
-// ----------
-// StubPrecode
-// ----------
-
-#define STUB_PRECODE_CODESIZE 0x18 // 3 instructions, 4 bytes each (and we also have 12 bytes of padding)
-#define STUB_PRECODE_DATASIZE 0x18 // 2 qwords + 1 byte
-.set STUB_PRECODE_NUM_THUNKS_PER_MAPPING, (THUNKS_MAP_SIZE / STUB_PRECODE_CODESIZE)
-
-.macro THUNKS_BLOCK_STUB_PRECODE
- IN_PAGE_INDEX = 0
- .rept STUB_PRECODE_NUM_THUNKS_PER_MAPPING
-
- ldr x10, DATA_SLOT(StubPrecode, Target, STUB_PRECODE_CODESIZE, StubPrecodeCodeTemplate)
- ldr x12, DATA_SLOT(StubPrecode, SecretParam, STUB_PRECODE_CODESIZE, StubPrecodeCodeTemplate)
- br x10
-
- brk 0xf000 // Stubs need to be 24-byte in size to allow for the data to be 2 pointers + 1 byte
- brk 0xf000 // Stubs need to be 24-byte in size to allow for the data to be 2 pointers + 1 byte
- brk 0xf000 // Stubs need to be 24-byte in size to allow for the data to be 2 pointers + 1 byte
-
- IN_PAGE_INDEX = IN_PAGE_INDEX + 1
- .endr
-.endm
-
- .text
- .p2align PAGE_SIZE_LOG2
-LEAF_ENTRY StubPrecodeCodeTemplate
- THUNKS_BLOCK_STUB_PRECODE
-LEAF_END_MARKED StubPrecodeCodeTemplate, _TEXT
-
-// ----------
-// FixupPrecode
-// ----------
-
-#define FIXUP_PRECODE_CODESIZE 0x18 // 5 instructions, 4 bytes each (and we also have 4 bytes of padding)
-#define FIXUP_PRECODE_DATASIZE 0x18 // 3 qwords
-.set FIXUP_PRECODE_NUM_THUNKS_PER_MAPPING,(THUNKS_MAP_SIZE / FIXUP_PRECODE_CODESIZE)
-
-.macro THUNKS_BLOCK_FIXUP_PRECODE
- IN_PAGE_INDEX = 0
- .rept FIXUP_PRECODE_NUM_THUNKS_PER_MAPPING
-
- ldr x11, DATA_SLOT(FixupPrecode, Target, FIXUP_PRECODE_CODESIZE, FixupPrecodeCodeTemplate)
- br x11
- ldr x12, DATA_SLOT(FixupPrecode, MethodDesc, FIXUP_PRECODE_CODESIZE, FixupPrecodeCodeTemplate)
- ldr x11, DATA_SLOT(FixupPrecode, PrecodeFixupThunk, FIXUP_PRECODE_CODESIZE, FixupPrecodeCodeTemplate)
- br x11
- brk 0xf000 // Stubs need to be 24-byte in size to allow for the data to be 3 pointers
-
- IN_PAGE_INDEX = IN_PAGE_INDEX + 1
- .endr
-.endm
-
- .text
- .p2align PAGE_SIZE_LOG2
-LEAF_ENTRY FixupPrecodeCodeTemplate
- THUNKS_BLOCK_FIXUP_PRECODE
-LEAF_END_MARKED FixupPrecodeCodeTemplate, _TEXT
-
-// ----------
-// CallCountingStub
-// ----------
-
-#define CALLCOUNTING_CODESIZE 0x28 // 5 instructions, 4 bytes each (and we also have 4 bytes of padding)
-#define CALLCOUNTING_DATASIZE 0x18 // 3 qwords
-.set CALLCOUNTING_NUM_THUNKS_PER_MAPPING, (THUNKS_MAP_SIZE / CALLCOUNTING_CODESIZE)
-
-.macro THUNKS_BLOCK_CALLCOUNTING
- IN_PAGE_INDEX = 0
- .rept CALLCOUNTING_NUM_THUNKS_PER_MAPPING
-
- ldr x9, DATA_SLOT(CallCountingStub, RemainingCallCountCell, CALLCOUNTING_CODESIZE, CallCountingStubCodeTemplate)
- ldrh w10, [x9]
- subs w10, w10, #1
- strh w10, [x9]
- beq 0f
- ldr x9, DATA_SLOT(CallCountingStub, TargetForMethod, CALLCOUNTING_CODESIZE, CallCountingStubCodeTemplate)
- br x9
-0:
- ldr x10, DATA_SLOT(CallCountingStub, TargetForThresholdReached, CALLCOUNTING_CODESIZE, CallCountingStubCodeTemplate)
- br x10
- brk 0xf000 // Stubs need to be 40-byte in size to allow for the data to be pointer aligned
-
- IN_PAGE_INDEX = IN_PAGE_INDEX + 1
- .endr
-.endm
-
- .text
- .p2align PAGE_SIZE_LOG2
-LEAF_ENTRY CallCountingStubCodeTemplate
- THUNKS_BLOCK_CALLCOUNTING
-LEAF_END_MARKED CallCountingStubCodeTemplate, _TEXT
-#endif
-
-#ifdef DATA_SLOT
-#undef DATA_SLOT
-#endif
#define DATA_SLOT(stub, field) . - (. - C_FUNC(stub##Code\STUB_PAGE_SIZE)) + \STUB_PAGE_SIZE + stub##Data__##field
.irp STUB_PAGE_SIZE, 16384, 32768, 65536
diff --git a/src/coreclr/vm/callcounting.cpp b/src/coreclr/vm/callcounting.cpp
index f5168fc0f799b1..0f26b7d4090096 100644
--- a/src/coreclr/vm/callcounting.cpp
+++ b/src/coreclr/vm/callcounting.cpp
@@ -293,14 +293,6 @@ void (*CallCountingStub::CallCountingStubCode)();
#ifndef DACCESS_COMPILE
-static InterleavedLoaderHeapConfig s_callCountingHeapConfig;
-
-#ifdef FEATURE_MAP_THUNKS_FROM_IMAGE
-extern "C" void CallCountingStubCodeTemplate();
-#else
-#define CallCountingStubCodeTemplate NULL
-#endif
-
void CallCountingStub::StaticInitialize()
{
#if defined(TARGET_ARM64) && defined(TARGET_UNIX)
@@ -318,22 +310,14 @@ void CallCountingStub::StaticInitialize()
EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Unsupported OS page size"));
}
#undef ENUM_PAGE_SIZE
-
- if (CallCountingStubCodeTemplate != NULL && pageSize != 0x4000)
- {
- // This should fail if the template is used on a platform which doesn't support the supported page size for templates
- ThrowHR(COR_E_EXECUTIONENGINE);
- }
#else
_ASSERTE((SIZE_T)((BYTE*)CallCountingStubCode_End - (BYTE*)CallCountingStubCode) <= CallCountingStub::CodeSize);
#endif
-
- InitializeLoaderHeapConfig(&s_callCountingHeapConfig, CallCountingStub::CodeSize, (void*)CallCountingStubCodeTemplate, CallCountingStub::GenerateCodePage);
}
#endif // DACCESS_COMPILE
-void CallCountingStub::GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size_t pageSize)
+void CallCountingStub::GenerateCodePage(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T pageSize)
{
#ifdef TARGET_X86
int totalCodeSize = (pageSize / CallCountingStub::CodeSize) * CallCountingStub::CodeSize;
@@ -344,13 +328,13 @@ void CallCountingStub::GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX,
// Set absolute addresses of the slots in the stub
BYTE* pCounterSlot = pageBaseRX + i + pageSize + offsetof(CallCountingStubData, RemainingCallCountCell);
- *(uint8_t**)(pageBase + i + SYMBOL_VALUE(CallCountingStubCode_RemainingCallCountCell_Offset)) = pCounterSlot;
+ *(BYTE**)(pageBase + i + SYMBOL_VALUE(CallCountingStubCode_RemainingCallCountCell_Offset)) = pCounterSlot;
BYTE* pTargetSlot = pageBaseRX + i + pageSize + offsetof(CallCountingStubData, TargetForMethod);
- *(uint8_t**)(pageBase + i + SYMBOL_VALUE(CallCountingStubCode_TargetForMethod_Offset)) = pTargetSlot;
+ *(BYTE**)(pageBase + i + SYMBOL_VALUE(CallCountingStubCode_TargetForMethod_Offset)) = pTargetSlot;
BYTE* pCountReachedZeroSlot = pageBaseRX + i + pageSize + offsetof(CallCountingStubData, TargetForThresholdReached);
- *(uint8_t**)(pageBase + i + SYMBOL_VALUE(CallCountingStubCode_TargetForThresholdReached_Offset)) = pCountReachedZeroSlot;
+ *(BYTE**)(pageBase + i + SYMBOL_VALUE(CallCountingStubCode_TargetForThresholdReached_Offset)) = pCountReachedZeroSlot;
}
#else // TARGET_X86
FillStubCodePage(pageBase, (const void*)PCODEToPINSTR((PCODE)CallCountingStubCode), CallCountingStub::CodeSize, pageSize);
@@ -370,7 +354,7 @@ NOINLINE InterleavedLoaderHeap *CallCountingManager::CallCountingStubAllocator::
_ASSERTE(m_heap == nullptr);
- InterleavedLoaderHeap *heap = new InterleavedLoaderHeap(&m_heapRangeList, true /* fUnlocked */, &s_callCountingHeapConfig);
+ InterleavedLoaderHeap *heap = new InterleavedLoaderHeap(&m_heapRangeList, true /* fUnlocked */, CallCountingStub::GenerateCodePage, CallCountingStub::CodeSize);
m_heap = heap;
return heap;
}
@@ -491,7 +475,6 @@ CallCountingManager::~CallCountingManager()
}
#ifndef DACCESS_COMPILE
-
void CallCountingManager::StaticInitialize()
{
WRAPPER_NO_CONTRACT;
diff --git a/src/coreclr/vm/callcounting.h b/src/coreclr/vm/callcounting.h
index 59071aa51f140b..75a907f4d6ea3c 100644
--- a/src/coreclr/vm/callcounting.h
+++ b/src/coreclr/vm/callcounting.h
@@ -150,7 +150,7 @@ class CallCountingStub
static void StaticInitialize();
#endif // !DACCESS_COMPILE
- static void GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size);
+ static void GenerateCodePage(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T size);
PTR_CallCount GetRemainingCallCountCell() const;
PCODE GetTargetForMethod() const;
diff --git a/src/coreclr/vm/loaderallocator.cpp b/src/coreclr/vm/loaderallocator.cpp
index f31d2d068bbfb8..5fe3bb2faf2831 100644
--- a/src/coreclr/vm/loaderallocator.cpp
+++ b/src/coreclr/vm/loaderallocator.cpp
@@ -1208,7 +1208,8 @@ void LoaderAllocator::Init(BYTE *pExecutableHeapMemory)
m_pNewStubPrecodeHeap = new (&m_NewStubPrecodeHeapInstance) InterleavedLoaderHeap(
&m_stubPrecodeRangeList,
false /* fUnlocked */,
- &s_stubPrecodeHeapConfig);
+ StubPrecode::GenerateCodePage,
+ StubPrecode::CodeSize);
#if defined(FEATURE_STUBPRECODE_DYNAMIC_HELPERS) && defined(FEATURE_READYTORUN)
if (IsCollectible())
@@ -1218,12 +1219,14 @@ void LoaderAllocator::Init(BYTE *pExecutableHeapMemory)
m_pDynamicHelpersStubHeap = new (&m_DynamicHelpersHeapInstance) InterleavedLoaderHeap(
&m_dynamicHelpersRangeList,
false /* fUnlocked */,
- &s_stubPrecodeHeapConfig);
+ StubPrecode::GenerateCodePage,
+ StubPrecode::CodeSize);
#endif // defined(FEATURE_STUBPRECODE_DYNAMIC_HELPERS) && defined(FEATURE_READYTORUN)
m_pFixupPrecodeHeap = new (&m_FixupPrecodeHeapInstance) InterleavedLoaderHeap(&m_fixupPrecodeRangeList,
false /* fUnlocked */,
- &s_fixupStubPrecodeHeapConfig);
+ FixupPrecode::GenerateCodePage,
+ FixupPrecode::CodeSize);
// Initialize the EE marshaling data to NULL.
m_pMarshalingData = NULL;
diff --git a/src/coreclr/vm/precode.cpp b/src/coreclr/vm/precode.cpp
index 798e9849de3a6a..e3e3983e8716e1 100644
--- a/src/coreclr/vm/precode.cpp
+++ b/src/coreclr/vm/precode.cpp
@@ -15,11 +15,6 @@
#include "perfmap.h"
#endif
-InterleavedLoaderHeapConfig s_stubPrecodeHeapConfig;
-#ifdef HAS_FIXUP_PRECODE
-InterleavedLoaderHeapConfig s_fixupStubPrecodeHeapConfig;
-#endif
-
//==========================================================================================
// class Precode
//==========================================================================================
@@ -500,12 +495,6 @@ void (*StubPrecode::StubPrecodeCode)();
void (*StubPrecode::StubPrecodeCode_End)();
#endif
-#ifdef FEATURE_MAP_THUNKS_FROM_IMAGE
-extern "C" void StubPrecodeCodeTemplate();
-#else
-#define StubPrecodeCodeTemplate NULL
-#endif
-
void StubPrecode::StaticInitialize()
{
#if defined(TARGET_ARM64) && defined(TARGET_UNIX)
@@ -523,13 +512,6 @@ void StubPrecode::StaticInitialize()
default:
EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Unsupported OS page size"));
}
-
- if (StubPrecodeCodeTemplate != NULL && pageSize != 0x4000)
- {
- // This should fail if the template is used on a platform which doesn't support the supported page size for templates
- ThrowHR(COR_E_EXECUTIONENGINE);
- }
-
#undef ENUM_PAGE_SIZE
#else
_ASSERTE((SIZE_T)((BYTE*)StubPrecodeCode_End - (BYTE*)StubPrecodeCode) <= StubPrecode::CodeSize);
@@ -542,22 +524,21 @@ void StubPrecode::StaticInitialize()
_ASSERTE((*((BYTE*)PCODEToPINSTR((PCODE)StubPrecodeCode) + OFFSETOF_PRECODE_TYPE)) == StubPrecode::Type);
#endif
- InitializeLoaderHeapConfig(&s_stubPrecodeHeapConfig, StubPrecode::CodeSize, (void*)StubPrecodeCodeTemplate, StubPrecode::GenerateCodePage);
}
-void StubPrecode::GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size_t pageSize)
+void StubPrecode::GenerateCodePage(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T pageSize)
{
#ifdef TARGET_X86
int totalCodeSize = (pageSize / StubPrecode::CodeSize) * StubPrecode::CodeSize;
for (int i = 0; i < totalCodeSize; i += StubPrecode::CodeSize)
{
- memcpy(pageBase + i, (const void*)StubPrecodeCode, (uint8_t*)StubPrecodeCode_End - (uint8_t*)StubPrecodeCode);
+ memcpy(pageBase + i, (const void*)StubPrecodeCode, (BYTE*)StubPrecodeCode_End - (BYTE*)StubPrecodeCode);
- uint8_t* pTargetSlot = pageBaseRX + i + pageSize + offsetof(StubPrecodeData, Target);
- *(uint8_t**)(pageBase + i + SYMBOL_VALUE(StubPrecodeCode_Target_Offset)) = pTargetSlot;
+ BYTE* pTargetSlot = pageBaseRX + i + pageSize + offsetof(StubPrecodeData, Target);
+ *(BYTE**)(pageBase + i + SYMBOL_VALUE(StubPrecodeCode_Target_Offset)) = pTargetSlot;
BYTE* pMethodDescSlot = pageBaseRX + i + pageSize + offsetof(StubPrecodeData, SecretParam);
- *(uint8_t**)(pageBase + i + SYMBOL_VALUE(StubPrecodeCode_MethodDesc_Offset)) = pMethodDescSlot;
+ *(BYTE**)(pageBase + i + SYMBOL_VALUE(StubPrecodeCode_MethodDesc_Offset)) = pMethodDescSlot;
}
#else // TARGET_X86
FillStubCodePage(pageBase, (const void*)PCODEToPINSTR((PCODE)StubPrecodeCode), StubPrecode::CodeSize, pageSize);
@@ -645,12 +626,6 @@ void (*FixupPrecode::FixupPrecodeCode)();
void (*FixupPrecode::FixupPrecodeCode_End)();
#endif
-#ifdef FEATURE_MAP_THUNKS_FROM_IMAGE
-extern "C" void FixupPrecodeCodeTemplate();
-#else
-#define FixupPrecodeCodeTemplate NULL
-#endif
-
void FixupPrecode::StaticInitialize()
{
#if defined(TARGET_ARM64) && defined(TARGET_UNIX)
@@ -670,12 +645,6 @@ void FixupPrecode::StaticInitialize()
EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Unsupported OS page size"));
}
#undef ENUM_PAGE_SIZE
-
- if (FixupPrecodeCodeTemplate != NULL && pageSize != 0x4000)
- {
- // This should fail if the template is used on a platform which doesn't support the supported page size for templates
- ThrowHR(COR_E_EXECUTIONENGINE);
- }
#else
_ASSERTE((SIZE_T)((BYTE*)FixupPrecodeCode_End - (BYTE*)FixupPrecodeCode) <= FixupPrecode::CodeSize);
#endif
@@ -686,11 +655,9 @@ void FixupPrecode::StaticInitialize()
#else
_ASSERTE(*((BYTE*)PCODEToPINSTR((PCODE)FixupPrecodeCode) + OFFSETOF_PRECODE_TYPE) == FixupPrecode::Type);
#endif
-
- InitializeLoaderHeapConfig(&s_fixupStubPrecodeHeapConfig, FixupPrecode::CodeSize, (void*)FixupPrecodeCodeTemplate, FixupPrecode::GenerateCodePage);
}
-void FixupPrecode::GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size_t pageSize)
+void FixupPrecode::GenerateCodePage(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T pageSize)
{
#ifdef TARGET_X86
int totalCodeSize = (pageSize / FixupPrecode::CodeSize) * FixupPrecode::CodeSize;
@@ -698,14 +665,14 @@ void FixupPrecode::GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size
for (int i = 0; i < totalCodeSize; i += FixupPrecode::CodeSize)
{
memcpy(pageBase + i, (const void*)FixupPrecodeCode, FixupPrecode::CodeSize);
- uint8_t* pTargetSlot = pageBaseRX + i + pageSize + offsetof(FixupPrecodeData, Target);
- *(uint8_t**)(pageBase + i + SYMBOL_VALUE(FixupPrecodeCode_Target_Offset)) = pTargetSlot;
+ BYTE* pTargetSlot = pageBaseRX + i + pageSize + offsetof(FixupPrecodeData, Target);
+ *(BYTE**)(pageBase + i + SYMBOL_VALUE(FixupPrecodeCode_Target_Offset)) = pTargetSlot;
BYTE* pMethodDescSlot = pageBaseRX + i + pageSize + offsetof(FixupPrecodeData, MethodDesc);
- *(uint8_t**)(pageBase + i + SYMBOL_VALUE(FixupPrecodeCode_MethodDesc_Offset)) = pMethodDescSlot;
+ *(BYTE**)(pageBase + i + SYMBOL_VALUE(FixupPrecodeCode_MethodDesc_Offset)) = pMethodDescSlot;
BYTE* pPrecodeFixupThunkSlot = pageBaseRX + i + pageSize + offsetof(FixupPrecodeData, PrecodeFixupThunk);
- *(uint8_t**)(pageBase + i + SYMBOL_VALUE(FixupPrecodeCode_PrecodeFixupThunk_Offset)) = pPrecodeFixupThunkSlot;
+ *(BYTE**)(pageBase + i + SYMBOL_VALUE(FixupPrecodeCode_PrecodeFixupThunk_Offset)) = pPrecodeFixupThunkSlot;
}
#else // TARGET_X86
FillStubCodePage(pageBase, (const void*)PCODEToPINSTR((PCODE)FixupPrecodeCode), FixupPrecode::CodeSize, pageSize);
diff --git a/src/coreclr/vm/precode.h b/src/coreclr/vm/precode.h
index 64394d259e91a4..87570f217292a0 100644
--- a/src/coreclr/vm/precode.h
+++ b/src/coreclr/vm/precode.h
@@ -225,7 +225,7 @@ struct StubPrecode
pData->Target = (PCODE)target;
}
- static void GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size);
+ static void GenerateCodePage(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T size);
#endif // !DACCESS_COMPILE
};
@@ -428,7 +428,7 @@ struct FixupPrecode
static void StaticInitialize();
- static void GenerateCodePage(uint8_t* pageBase, uint8_t* pageBaseRX, size_t size);
+ static void GenerateCodePage(BYTE* pageBase, BYTE* pageBaseRX, SIZE_T size);
PTR_FixupPrecodeData GetData() const
{
@@ -861,9 +861,4 @@ struct PrecodeMachineDescriptor
};
#endif //DACCESS_COMPILE
-extern InterleavedLoaderHeapConfig s_stubPrecodeHeapConfig;
-#ifdef HAS_FIXUP_PRECODE
-extern InterleavedLoaderHeapConfig s_fixupStubPrecodeHeapConfig;
-#endif
-
#endif // __PRECODE_H__