diff --git a/Directory.Build.props b/Directory.Build.props
index 0b999b18d37dbe..1b441f7f653871 100644
--- a/Directory.Build.props
+++ b/Directory.Build.props
@@ -69,6 +69,7 @@
$(RepoRoot)LICENSE.TXT
+
true
diff --git a/docs/area-owners.md b/docs/area-owners.md
index 9eb3216fec37a0..1cdef1b4cb9829 100644
--- a/docs/area-owners.md
+++ b/docs/area-owners.md
@@ -14,7 +14,7 @@ Below table shows the combined area owners on this repository:
| area-HostModel | @vitek-karas @swaroop-sridhar | |
| area-ILTools-coreclr | @BruceForstall @dotnet/jit-contrib | |
| area-Infrastructure-coreclr | @jeffschwMSFT @jashook @trylek | |
-| area-Infrastructure-installer | @dleeapho @dagood | |
+| area-Infrastructure-installer | @dleeapho @NikolaMilosavljevic | |
| area-Infrastructure-libraries | @ViktorHofer @ericstj @safern @Anipik | Covers:
- Packaging
- Build and test infra for libraries in dotnet/runtime repo
- VS integration
|
| area-Infrastructure | @ViktorHofer @jeffschwMSFT @dleeapho | |
| area-Interop-coreclr | @jeffschwMSFT @AaronRobinsonMSFT | |
@@ -22,7 +22,7 @@ Below table shows the combined area owners on this repository:
| area-PAL-coreclr | @janvorli | |
| area-R2RDump-coreclr | @nattress | |
| area-ReadyToRun-coreclr | @nattress | |
-| area-Setup | @dagood @dleeapho | Distro-specific (Linux, Mac and Windows) setup packages and msi files |
+| area-Setup | @NikolaMilosavljevic @dleeapho | Distro-specific (Linux, Mac and Windows) setup packages and msi files |
| area-Single-File | @swaroop-sridhar | |
| area-SDK | @janvorli | General development issues and overlap with the SDK and CLI |
| area-Serialization | @StephenMolloy @HongGit | Packages:- System.Runtime.Serialization.Xml
- System.Runtime.Serialization.Json
- System.Private.DataContractSerialization
- System.Xml.XmlSerializer
Excluded:- System.Runtime.Serialization.Formatters
|
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/CMakeLists.txt b/docs/design/coreclr/profiling/sample-profilers/stacksampling/CMakeLists.txt
deleted file mode 100644
index 710fcfd12a2d53..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/CMakeLists.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-cmake_minimum_required (VERSION 3.14)
-
-project(CorProfiler)
-
-if(NOT WIN32)
- set(BASE_SOURCES )
- add_compile_options(-Wno-invalid-noreturn -Wno-pragma-pack -Wno-int-to-pointer-cast -fPIC -fms-extensions -DBIT64 -DPAL_STDCPP_COMPAT -DPLATFORM_UNIX -DHOST_64BIT -std=c++11)
- add_link_options(--no-undefined -pthread)
-
- include_directories($ENV{CORECLR_PATH}/src/pal/inc/rt $ENV{CORECLR_PATH}/src/pal/inc $ENV{CORECLR_PATH}/src/inc)
-endif(NOT WIN32)
-
-if (WIN32)
- set(BASE_SOURCES src/CorProfiler.def)
-endif(WIN32)
-
-include_directories($CORECLR_BIN/inc $ENV{CORECLR_PATH}/src/pal/prebuilt/inc)
-
-set(SOURCES ${BASE_SOURCES} src/ClassFactory.cpp src/CorProfiler.cpp src/dllmain.cpp src/sampler.cpp $ENV{CORECLR_PATH}/src/pal/prebuilt/idl/corprof_i.cpp)
-
-add_library(CorProfiler SHARED ${SOURCES})
\ No newline at end of file
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/build.cmd b/docs/design/coreclr/profiling/sample-profilers/stacksampling/build.cmd
deleted file mode 100644
index 331dcf5e05b527..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/build.cmd
+++ /dev/null
@@ -1,66 +0,0 @@
-@echo off
-setlocal
-
-if not defined BuildOS (
- set BuildOS=Windows
-)
-
-if not defined BuildArch (
- set BuildArch=x64
-)
-
-if not defined BuildType (
- set BuildType=Debug
-)
-
-if not defined CORECLR_PATH (
- set CORECLR_PATH=C:/git/runtime/src/coreclr
-)
-
-if not defined CORECLR_BIN (
- set CORECLR_BIN=C:/git/runtime/artifacts/bin/coreclr/%BuildOS%.%BuildArch%.%BuildType%
-)
-
-set VS_COM_CMD_PATH="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\Tools\VsDevCmd.bat"
-
-if not defined VS_CMD_PATH (
- if exist %VS_COM_CMD_PATH% (
- set VS_CMD_PATH=%VS_COM_CMD_PATH%
- ) else (
- echo No VS developer command prompt detected!
- goto :EOF
- )
-)
-
-echo CORECLR_PATH : %CORECLR_PATH%
-echo BuildOS : %BuildOS%
-echo BuildArch : %BuildArch%
-echo BuildType : %BuildType%
-echo VS PATH : %VS_CMD_PATH%
-
-echo.
-echo Building
-
-if not exist bin\ (
- mkdir bin
-)
-
-pushd bin
-
-cmake -G "Visual Studio 16 2019" ..\ -DCMAKE_BUILD_TYPE=Debug
-
-echo Calling VS Developer Command Prompt to build
-call %VS_CMD_PATH%
-
-msbuild -v:m CorProfiler.sln
-
-popd
-
-echo.
-echo.
-echo.
-echo Done building
-
-echo Copying binary to main directory
-copy /y bin\Debug\CorProfiler.dll .
-
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/build.sh b/docs/design/coreclr/profiling/sample-profilers/stacksampling/build.sh
deleted file mode 100644
index 78bb71c799fa5d..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/build.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-[ -z "${BuildOS:-}" ] && export BuildOS=Linux
-[ -z "${BuildArch:-}" ] && export BuildArch=x64
-[ -z "${BuildType:-}" ] && export BuildType=Debug
-
-[ -z "${CORECLR_PATH:-}" ] && export CORECLR_PATH=~/git/runtime/src/coreclr
-[ -z "${CORECLR_BIN:-}" ] && export CORECLR_BIN=~/git/runtime/artifacts/bin/coreclr/$BuildOS.$BuildArch.$BuildType
-
-printf ' CORECLR_PATH : %s\n' "$CORECLR_PATH"
-printf ' BuildOS : %s\n' "$BuildOS"
-printf ' BuildArch : %s\n' "$BuildArch"
-printf ' BuildType : %s\n' "$BuildType"
-
-printf ' Building ...'
-
-if [ ! -d "bin/" ]; then
- mkdir bin/
-fi
-
-pushd bin
-
-export CC=/usr/bin/clang
-export CXX=/usr/bin/clang++
-cmake ../ -DCMAKE_BUILD_TYPE=Debug
-
-make -j8
-
-popd
-
-printf ' Copying libCorProfiler.so to main directory\n'
-cp bin/libCorProfiler.so .
-
-printf 'Done.\n'
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/ClassFactory.cpp b/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/ClassFactory.cpp
deleted file mode 100644
index 87db0a75b8b4c1..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/ClassFactory.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-#include "ClassFactory.h"
-#include "CorProfiler.h"
-
-ClassFactory::ClassFactory() : refCount(0)
-{
-}
-
-ClassFactory::~ClassFactory()
-{
-}
-
-HRESULT STDMETHODCALLTYPE ClassFactory::QueryInterface(REFIID riid, void **ppvObject)
-{
- if (riid == IID_IUnknown || riid == IID_IClassFactory)
- {
- *ppvObject = this;
- this->AddRef();
- return S_OK;
- }
-
- *ppvObject = nullptr;
- return E_NOINTERFACE;
-}
-
-ULONG STDMETHODCALLTYPE ClassFactory::AddRef()
-{
- return std::atomic_fetch_add(&this->refCount, 1) + 1;
-}
-
-ULONG STDMETHODCALLTYPE ClassFactory::Release()
-{
- int count = std::atomic_fetch_sub(&this->refCount, 1) - 1;
- if (count <= 0)
- {
- delete this;
- }
-
- return count;
-}
-
-HRESULT STDMETHODCALLTYPE ClassFactory::CreateInstance(IUnknown *pUnkOuter, REFIID riid, void **ppvObject)
-{
- if (pUnkOuter != nullptr)
- {
- *ppvObject = nullptr;
- return CLASS_E_NOAGGREGATION;
- }
-
- CorProfiler* profiler = new CorProfiler();
- if (profiler == nullptr)
- {
- return E_FAIL;
- }
-
- return profiler->QueryInterface(riid, ppvObject);
-}
-
-HRESULT STDMETHODCALLTYPE ClassFactory::LockServer(BOOL fLock)
-{
- return S_OK;
-}
\ No newline at end of file
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/ClassFactory.h b/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/ClassFactory.h
deleted file mode 100644
index 10368a604b49d6..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/ClassFactory.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-#pragma once
-
-#include "unknwn.h"
-#include
-
-class ClassFactory : public IClassFactory
-{
-private:
- std::atomic refCount;
-public:
- ClassFactory();
- virtual ~ClassFactory();
- HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void **ppvObject) override;
- ULONG STDMETHODCALLTYPE AddRef(void) override;
- ULONG STDMETHODCALLTYPE Release(void) override;
- HRESULT STDMETHODCALLTYPE CreateInstance(IUnknown *pUnkOuter, REFIID riid, void **ppvObject) override;
- HRESULT STDMETHODCALLTYPE LockServer(BOOL fLock) override;
-};
\ No newline at end of file
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/CorProfiler.cpp b/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/CorProfiler.cpp
deleted file mode 100644
index 8d29e1c22444f1..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/CorProfiler.cpp
+++ /dev/null
@@ -1,510 +0,0 @@
-// Copyright (c) .NET Foundation and contributors. All rights reserved.
-// Licensed under the MIT license. See LICENSE file in the project root for full license information.
-
-#include "CorProfiler.h"
-#include "corhlpr.h"
-#include "profiler_pal.h"
-#include "sampler.h"
-#include
-#include
-
-using std::shared_ptr;
-
-CorProfiler::CorProfiler() :
- refCount(0),
- corProfilerInfo(nullptr),
- sampler(),
- jitEventCount(0)
-{
-
-}
-
-CorProfiler::~CorProfiler()
-{
- if (this->corProfilerInfo != nullptr)
- {
- this->corProfilerInfo->Release();
- this->corProfilerInfo = nullptr;
- }
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::Initialize(IUnknown *pICorProfilerInfoUnk)
-{
- printf("Initialize profiler!\n");
-
- HRESULT hr = pICorProfilerInfoUnk->QueryInterface(IID_ICorProfilerInfo10, (void**)&corProfilerInfo);
- if (hr != S_OK)
- {
- printf("Got HR %X from QI for ICorProfilerInfo4", hr);
- return E_FAIL;
- }
-
- corProfilerInfo->SetEventMask2(COR_PRF_ENABLE_STACK_SNAPSHOT | COR_PRF_MONITOR_JIT_COMPILATION, 0);
-
- sampler = shared_ptr(new Sampler(corProfilerInfo, this));
- sampler->Start();
-
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::Shutdown()
-{
- if (this->corProfilerInfo != nullptr)
- {
- this->corProfilerInfo->Release();
- this->corProfilerInfo = nullptr;
- }
-
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::AppDomainCreationStarted(AppDomainID appDomainId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::AppDomainCreationFinished(AppDomainID appDomainId, HRESULT hrStatus)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::AppDomainShutdownStarted(AppDomainID appDomainId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::AppDomainShutdownFinished(AppDomainID appDomainId, HRESULT hrStatus)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::AssemblyLoadStarted(AssemblyID assemblyId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::AssemblyLoadFinished(AssemblyID assemblyId, HRESULT hrStatus)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::AssemblyUnloadStarted(AssemblyID assemblyId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::AssemblyUnloadFinished(AssemblyID assemblyId, HRESULT hrStatus)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ModuleLoadStarted(ModuleID moduleId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ModuleLoadFinished(ModuleID moduleId, HRESULT hrStatus)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ModuleUnloadStarted(ModuleID moduleId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ModuleUnloadFinished(ModuleID moduleId, HRESULT hrStatus)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ModuleAttachedToAssembly(ModuleID moduleId, AssemblyID AssemblyId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ClassLoadStarted(ClassID classId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ClassLoadFinished(ClassID classId, HRESULT hrStatus)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ClassUnloadStarted(ClassID classId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ClassUnloadFinished(ClassID classId, HRESULT hrStatus)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::FunctionUnloadStarted(FunctionID functionId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::JITCompilationStarted(FunctionID functionId, BOOL fIsSafeToBlock)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::JITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock)
-{
- ++jitEventCount;
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::JITCachedFunctionSearchStarted(FunctionID functionId, BOOL *pbUseCachedFunction)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::JITCachedFunctionSearchFinished(FunctionID functionId, COR_PRF_JIT_CACHE result)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::JITFunctionPitched(FunctionID functionId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::JITInlining(FunctionID callerId, FunctionID calleeId, BOOL *pfShouldInline)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ThreadCreated(ThreadID threadId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ThreadDestroyed(ThreadID threadId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ThreadAssignedToOSThread(ThreadID managedThreadId, DWORD osThreadId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RemotingClientInvocationStarted()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RemotingClientSendingMessage(GUID *pCookie, BOOL fIsAsync)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RemotingClientReceivingReply(GUID *pCookie, BOOL fIsAsync)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RemotingClientInvocationFinished()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RemotingServerReceivingMessage(GUID *pCookie, BOOL fIsAsync)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RemotingServerInvocationStarted()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RemotingServerInvocationReturned()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RemotingServerSendingReply(GUID *pCookie, BOOL fIsAsync)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::UnmanagedToManagedTransition(FunctionID functionId, COR_PRF_TRANSITION_REASON reason)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ManagedToUnmanagedTransition(FunctionID functionId, COR_PRF_TRANSITION_REASON reason)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RuntimeSuspendStarted(COR_PRF_SUSPEND_REASON suspendReason)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RuntimeSuspendFinished()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RuntimeSuspendAborted()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RuntimeResumeStarted()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RuntimeResumeFinished()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RuntimeThreadSuspended(ThreadID threadId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RuntimeThreadResumed(ThreadID threadId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::MovedReferences(ULONG cMovedObjectIDRanges, ObjectID oldObjectIDRangeStart[], ObjectID newObjectIDRangeStart[], ULONG cObjectIDRangeLength[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ObjectAllocated(ObjectID objectId, ClassID classId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ObjectsAllocatedByClass(ULONG cClassCount, ClassID classIds[], ULONG cObjects[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ObjectReferences(ObjectID objectId, ClassID classId, ULONG cObjectRefs, ObjectID objectRefIds[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RootReferences(ULONG cRootRefs, ObjectID rootRefIds[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionThrown(ObjectID thrownObjectId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionSearchFunctionEnter(FunctionID functionId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionSearchFunctionLeave()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionSearchFilterEnter(FunctionID functionId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionSearchFilterLeave()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionSearchCatcherFound(FunctionID functionId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionOSHandlerEnter(UINT_PTR __unused)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionOSHandlerLeave(UINT_PTR __unused)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionUnwindFunctionEnter(FunctionID functionId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionUnwindFunctionLeave()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionUnwindFinallyEnter(FunctionID functionId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionUnwindFinallyLeave()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionCatcherEnter(FunctionID functionId, ObjectID objectId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionCatcherLeave()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::COMClassicVTableCreated(ClassID wrappedClassId, REFGUID implementedIID, void *pVTable, ULONG cSlots)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::COMClassicVTableDestroyed(ClassID wrappedClassId, REFGUID implementedIID, void *pVTable)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionCLRCatcherFound()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ExceptionCLRCatcherExecute()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ThreadNameChanged(ThreadID threadId, ULONG cchName, WCHAR name[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::GarbageCollectionStarted(int cGenerations, BOOL generationCollected[], COR_PRF_GC_REASON reason)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::SurvivingReferences(ULONG cSurvivingObjectIDRanges, ObjectID objectIDRangeStart[], ULONG cObjectIDRangeLength[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::GarbageCollectionFinished()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::FinalizeableObjectQueued(DWORD finalizerFlags, ObjectID objectID)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::RootReferences2(ULONG cRootRefs, ObjectID rootRefIds[], COR_PRF_GC_ROOT_KIND rootKinds[], COR_PRF_GC_ROOT_FLAGS rootFlags[], UINT_PTR rootIds[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::HandleCreated(GCHandleID handleId, ObjectID initialObjectId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::HandleDestroyed(GCHandleID handleId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::InitializeForAttach(IUnknown *pCorProfilerInfoUnk, void *pvClientData, UINT cbClientData)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ProfilerAttachComplete()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ProfilerDetachSucceeded()
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ReJITCompilationStarted(FunctionID functionId, ReJITID rejitId, BOOL fIsSafeToBlock)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::GetReJITParameters(ModuleID moduleId, mdMethodDef methodId, ICorProfilerFunctionControl *pFunctionControl)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ReJITCompilationFinished(FunctionID functionId, ReJITID rejitId, HRESULT hrStatus, BOOL fIsSafeToBlock)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ReJITError(ModuleID moduleId, mdMethodDef methodId, FunctionID functionId, HRESULT hrStatus)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::MovedReferences2(ULONG cMovedObjectIDRanges, ObjectID oldObjectIDRangeStart[], ObjectID newObjectIDRangeStart[], SIZE_T cObjectIDRangeLength[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::SurvivingReferences2(ULONG cSurvivingObjectIDRanges, ObjectID objectIDRangeStart[], SIZE_T cObjectIDRangeLength[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ConditionalWeakTableElementReferences(ULONG cRootRefs, ObjectID keyRefIds[], ObjectID valueRefIds[], GCHandleID rootIds[])
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::GetAssemblyReferences(const WCHAR *wszAssemblyPath, ICorProfilerAssemblyReferenceProvider *pAsmRefProvider)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::ModuleInMemorySymbolsUpdated(ModuleID moduleId)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::DynamicMethodJITCompilationStarted(FunctionID functionId, BOOL fIsSafeToBlock, LPCBYTE ilHeader, ULONG cbILHeader)
-{
- return S_OK;
-}
-
-HRESULT STDMETHODCALLTYPE CorProfiler::DynamicMethodJITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock)
-{
- return S_OK;
-}
-
-bool CorProfiler::IsRuntimeExecutingManagedCode()
-{
- return jitEventCount.load() > 0;
-}
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/CorProfiler.def b/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/CorProfiler.def
deleted file mode 100644
index ddb536cb3066f3..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/CorProfiler.def
+++ /dev/null
@@ -1,4 +0,0 @@
-LIBRARY CorProfiler
-EXPORTS
- DllGetClassObject private
-
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/CorProfiler.h b/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/CorProfiler.h
deleted file mode 100644
index 1d41f6bfd7a2d9..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/CorProfiler.h
+++ /dev/null
@@ -1,211 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include "cor.h"
-#include "corprof.h"
-#include "sampler.h"
-
-#define SHORT_LENGTH 32
-#define STRING_LENGTH 256
-#define LONG_LENGTH 1024
-
-template
-class COMPtrHolder
-{
-public:
- COMPtrHolder()
- {
- m_ptr = NULL;
- }
-
- COMPtrHolder(MetaInterface* ptr)
- {
- if (ptr != NULL)
- {
- ptr->AddRef();
- }
- m_ptr = ptr;
- }
-
- ~COMPtrHolder()
- {
- if (m_ptr != NULL)
- {
- m_ptr->Release();
- m_ptr = NULL;
- }
- }
- MetaInterface* operator->()
- {
- return m_ptr;
- }
-
- MetaInterface** operator&()
- {
- // _ASSERT(m_ptr == NULL);
- return &m_ptr;
- }
-
- operator MetaInterface*()
- {
- return m_ptr;
- }
-private:
- MetaInterface* m_ptr;
-};
-
-class CorProfiler : public ICorProfilerCallback8
-{
-private:
- std::atomic refCount;
- std::shared_ptr sampler;
-
- std::atomic jitEventCount;
-
-public:
- ICorProfilerInfo10* corProfilerInfo;
-
- CorProfiler();
- virtual ~CorProfiler();
- HRESULT STDMETHODCALLTYPE Initialize(IUnknown* pICorProfilerInfoUnk) override;
- HRESULT STDMETHODCALLTYPE Shutdown() override;
- HRESULT STDMETHODCALLTYPE AppDomainCreationStarted(AppDomainID appDomainId) override;
- HRESULT STDMETHODCALLTYPE AppDomainCreationFinished(AppDomainID appDomainId, HRESULT hrStatus) override;
- HRESULT STDMETHODCALLTYPE AppDomainShutdownStarted(AppDomainID appDomainId) override;
- HRESULT STDMETHODCALLTYPE AppDomainShutdownFinished(AppDomainID appDomainId, HRESULT hrStatus) override;
- HRESULT STDMETHODCALLTYPE AssemblyLoadStarted(AssemblyID assemblyId) override;
- HRESULT STDMETHODCALLTYPE AssemblyLoadFinished(AssemblyID assemblyId, HRESULT hrStatus) override;
- HRESULT STDMETHODCALLTYPE AssemblyUnloadStarted(AssemblyID assemblyId) override;
- HRESULT STDMETHODCALLTYPE AssemblyUnloadFinished(AssemblyID assemblyId, HRESULT hrStatus) override;
- HRESULT STDMETHODCALLTYPE ModuleLoadStarted(ModuleID moduleId) override;
- HRESULT STDMETHODCALLTYPE ModuleLoadFinished(ModuleID moduleId, HRESULT hrStatus) override;
- HRESULT STDMETHODCALLTYPE ModuleUnloadStarted(ModuleID moduleId) override;
- HRESULT STDMETHODCALLTYPE ModuleUnloadFinished(ModuleID moduleId, HRESULT hrStatus) override;
- HRESULT STDMETHODCALLTYPE ModuleAttachedToAssembly(ModuleID moduleId, AssemblyID AssemblyId) override;
- HRESULT STDMETHODCALLTYPE ClassLoadStarted(ClassID classId) override;
- HRESULT STDMETHODCALLTYPE ClassLoadFinished(ClassID classId, HRESULT hrStatus) override;
- HRESULT STDMETHODCALLTYPE ClassUnloadStarted(ClassID classId) override;
- HRESULT STDMETHODCALLTYPE ClassUnloadFinished(ClassID classId, HRESULT hrStatus) override;
- HRESULT STDMETHODCALLTYPE FunctionUnloadStarted(FunctionID functionId) override;
- HRESULT STDMETHODCALLTYPE JITCompilationStarted(FunctionID functionId, BOOL fIsSafeToBlock) override;
- HRESULT STDMETHODCALLTYPE JITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock) override;
- HRESULT STDMETHODCALLTYPE JITCachedFunctionSearchStarted(FunctionID functionId, BOOL* pbUseCachedFunction) override;
- HRESULT STDMETHODCALLTYPE JITCachedFunctionSearchFinished(FunctionID functionId, COR_PRF_JIT_CACHE result) override;
- HRESULT STDMETHODCALLTYPE JITFunctionPitched(FunctionID functionId) override;
- HRESULT STDMETHODCALLTYPE JITInlining(FunctionID callerId, FunctionID calleeId, BOOL* pfShouldInline) override;
- HRESULT STDMETHODCALLTYPE ThreadCreated(ThreadID threadId) override;
- HRESULT STDMETHODCALLTYPE ThreadDestroyed(ThreadID threadId) override;
- HRESULT STDMETHODCALLTYPE ThreadAssignedToOSThread(ThreadID managedThreadId, DWORD osThreadId) override;
- HRESULT STDMETHODCALLTYPE RemotingClientInvocationStarted() override;
- HRESULT STDMETHODCALLTYPE RemotingClientSendingMessage(GUID* pCookie, BOOL fIsAsync) override;
- HRESULT STDMETHODCALLTYPE RemotingClientReceivingReply(GUID* pCookie, BOOL fIsAsync) override;
- HRESULT STDMETHODCALLTYPE RemotingClientInvocationFinished() override;
- HRESULT STDMETHODCALLTYPE RemotingServerReceivingMessage(GUID* pCookie, BOOL fIsAsync) override;
- HRESULT STDMETHODCALLTYPE RemotingServerInvocationStarted() override;
- HRESULT STDMETHODCALLTYPE RemotingServerInvocationReturned() override;
- HRESULT STDMETHODCALLTYPE RemotingServerSendingReply(GUID* pCookie, BOOL fIsAsync) override;
- HRESULT STDMETHODCALLTYPE UnmanagedToManagedTransition(FunctionID functionId, COR_PRF_TRANSITION_REASON reason) override;
- HRESULT STDMETHODCALLTYPE ManagedToUnmanagedTransition(FunctionID functionId, COR_PRF_TRANSITION_REASON reason) override;
- HRESULT STDMETHODCALLTYPE RuntimeSuspendStarted(COR_PRF_SUSPEND_REASON suspendReason) override;
- HRESULT STDMETHODCALLTYPE RuntimeSuspendFinished() override;
- HRESULT STDMETHODCALLTYPE RuntimeSuspendAborted() override;
- HRESULT STDMETHODCALLTYPE RuntimeResumeStarted() override;
- HRESULT STDMETHODCALLTYPE RuntimeResumeFinished() override;
- HRESULT STDMETHODCALLTYPE RuntimeThreadSuspended(ThreadID threadId) override;
- HRESULT STDMETHODCALLTYPE RuntimeThreadResumed(ThreadID threadId) override;
- HRESULT STDMETHODCALLTYPE MovedReferences(ULONG cMovedObjectIDRanges, ObjectID oldObjectIDRangeStart[], ObjectID newObjectIDRangeStart[], ULONG cObjectIDRangeLength[]) override;
- HRESULT STDMETHODCALLTYPE ObjectAllocated(ObjectID objectId, ClassID classId) override;
- HRESULT STDMETHODCALLTYPE ObjectsAllocatedByClass(ULONG cClassCount, ClassID classIds[], ULONG cObjects[]) override;
- HRESULT STDMETHODCALLTYPE ObjectReferences(ObjectID objectId, ClassID classId, ULONG cObjectRefs, ObjectID objectRefIds[]) override;
- HRESULT STDMETHODCALLTYPE RootReferences(ULONG cRootRefs, ObjectID rootRefIds[]) override;
- HRESULT STDMETHODCALLTYPE ExceptionThrown(ObjectID thrownObjectId) override;
- HRESULT STDMETHODCALLTYPE ExceptionSearchFunctionEnter(FunctionID functionId) override;
- HRESULT STDMETHODCALLTYPE ExceptionSearchFunctionLeave() override;
- HRESULT STDMETHODCALLTYPE ExceptionSearchFilterEnter(FunctionID functionId) override;
- HRESULT STDMETHODCALLTYPE ExceptionSearchFilterLeave() override;
- HRESULT STDMETHODCALLTYPE ExceptionSearchCatcherFound(FunctionID functionId) override;
- HRESULT STDMETHODCALLTYPE ExceptionOSHandlerEnter(UINT_PTR __unused) override;
- HRESULT STDMETHODCALLTYPE ExceptionOSHandlerLeave(UINT_PTR __unused) override;
- HRESULT STDMETHODCALLTYPE ExceptionUnwindFunctionEnter(FunctionID functionId) override;
- HRESULT STDMETHODCALLTYPE ExceptionUnwindFunctionLeave() override;
- HRESULT STDMETHODCALLTYPE ExceptionUnwindFinallyEnter(FunctionID functionId) override;
- HRESULT STDMETHODCALLTYPE ExceptionUnwindFinallyLeave() override;
- HRESULT STDMETHODCALLTYPE ExceptionCatcherEnter(FunctionID functionId, ObjectID objectId) override;
- HRESULT STDMETHODCALLTYPE ExceptionCatcherLeave() override;
- HRESULT STDMETHODCALLTYPE COMClassicVTableCreated(ClassID wrappedClassId, REFGUID implementedIID, void* pVTable, ULONG cSlots) override;
- HRESULT STDMETHODCALLTYPE COMClassicVTableDestroyed(ClassID wrappedClassId, REFGUID implementedIID, void* pVTable) override;
- HRESULT STDMETHODCALLTYPE ExceptionCLRCatcherFound() override;
- HRESULT STDMETHODCALLTYPE ExceptionCLRCatcherExecute() override;
- HRESULT STDMETHODCALLTYPE ThreadNameChanged(ThreadID threadId, ULONG cchName, WCHAR name[]) override;
- HRESULT STDMETHODCALLTYPE GarbageCollectionStarted(int cGenerations, BOOL generationCollected[], COR_PRF_GC_REASON reason) override;
- HRESULT STDMETHODCALLTYPE SurvivingReferences(ULONG cSurvivingObjectIDRanges, ObjectID objectIDRangeStart[], ULONG cObjectIDRangeLength[]) override;
- HRESULT STDMETHODCALLTYPE GarbageCollectionFinished() override;
- HRESULT STDMETHODCALLTYPE FinalizeableObjectQueued(DWORD finalizerFlags, ObjectID objectID) override;
- HRESULT STDMETHODCALLTYPE RootReferences2(ULONG cRootRefs, ObjectID rootRefIds[], COR_PRF_GC_ROOT_KIND rootKinds[], COR_PRF_GC_ROOT_FLAGS rootFlags[], UINT_PTR rootIds[]) override;
- HRESULT STDMETHODCALLTYPE HandleCreated(GCHandleID handleId, ObjectID initialObjectId) override;
- HRESULT STDMETHODCALLTYPE HandleDestroyed(GCHandleID handleId) override;
- HRESULT STDMETHODCALLTYPE InitializeForAttach(IUnknown* pCorProfilerInfoUnk, void* pvClientData, UINT cbClientData) override;
- HRESULT STDMETHODCALLTYPE ProfilerAttachComplete() override;
- HRESULT STDMETHODCALLTYPE ProfilerDetachSucceeded() override;
- HRESULT STDMETHODCALLTYPE ReJITCompilationStarted(FunctionID functionId, ReJITID rejitId, BOOL fIsSafeToBlock) override;
- HRESULT STDMETHODCALLTYPE GetReJITParameters(ModuleID moduleId, mdMethodDef methodId, ICorProfilerFunctionControl* pFunctionControl) override;
- HRESULT STDMETHODCALLTYPE ReJITCompilationFinished(FunctionID functionId, ReJITID rejitId, HRESULT hrStatus, BOOL fIsSafeToBlock) override;
- HRESULT STDMETHODCALLTYPE ReJITError(ModuleID moduleId, mdMethodDef methodId, FunctionID functionId, HRESULT hrStatus) override;
- HRESULT STDMETHODCALLTYPE MovedReferences2(ULONG cMovedObjectIDRanges, ObjectID oldObjectIDRangeStart[], ObjectID newObjectIDRangeStart[], SIZE_T cObjectIDRangeLength[]) override;
- HRESULT STDMETHODCALLTYPE SurvivingReferences2(ULONG cSurvivingObjectIDRanges, ObjectID objectIDRangeStart[], SIZE_T cObjectIDRangeLength[]) override;
- HRESULT STDMETHODCALLTYPE ConditionalWeakTableElementReferences(ULONG cRootRefs, ObjectID keyRefIds[], ObjectID valueRefIds[], GCHandleID rootIds[]) override;
- HRESULT STDMETHODCALLTYPE GetAssemblyReferences(const WCHAR* wszAssemblyPath, ICorProfilerAssemblyReferenceProvider* pAsmRefProvider) override;
- HRESULT STDMETHODCALLTYPE ModuleInMemorySymbolsUpdated(ModuleID moduleId) override;
-
- HRESULT STDMETHODCALLTYPE DynamicMethodJITCompilationStarted(FunctionID functionId, BOOL fIsSafeToBlock, LPCBYTE ilHeader, ULONG cbILHeader) override;
- HRESULT STDMETHODCALLTYPE DynamicMethodJITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock) override;
-
- HRESULT STDMETHODCALLTYPE QueryInterface(REFIID riid, void **ppvObject) override
- {
- if (riid == __uuidof(ICorProfilerCallback8) ||
- riid == __uuidof(ICorProfilerCallback7) ||
- riid == __uuidof(ICorProfilerCallback6) ||
- riid == __uuidof(ICorProfilerCallback5) ||
- riid == __uuidof(ICorProfilerCallback4) ||
- riid == __uuidof(ICorProfilerCallback3) ||
- riid == __uuidof(ICorProfilerCallback2) ||
- riid == __uuidof(ICorProfilerCallback) ||
- riid == IID_IUnknown)
- {
- *ppvObject = this;
- this->AddRef();
- return S_OK;
- }
-
- *ppvObject = nullptr;
- return E_NOINTERFACE;
- }
-
- ULONG STDMETHODCALLTYPE AddRef(void) override
- {
- return std::atomic_fetch_add(&this->refCount, 1) + 1;
- }
-
- ULONG STDMETHODCALLTYPE Release(void) override
- {
- int count = std::atomic_fetch_sub(&this->refCount, 1) - 1;
-
- if (count <= 0)
- {
- delete this;
- }
-
- return count;
- }
-
- bool IsRuntimeExecutingManagedCode();
-};
\ No newline at end of file
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/dllmain.cpp b/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/dllmain.cpp
deleted file mode 100644
index ce04d31387eaa8..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/dllmain.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-#include "ClassFactory.h"
-
-const IID IID_IUnknown = { 0x00000000, 0x0000, 0x0000, { 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46 } };
-
-const IID IID_IClassFactory = { 0x00000001, 0x0000, 0x0000, { 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46 } };
-
-BOOL STDMETHODCALLTYPE DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved)
-{
- return TRUE;
-}
-
-extern "C" HRESULT STDMETHODCALLTYPE DllGetClassObject(REFCLSID rclsid, REFIID riid, LPVOID* ppv)
-{
- // {cf0d821e-299b-5307-a3d8-b283c03916dd}
- const GUID CLSID_CorProfiler = { 0xcf0d821e, 0x299b, 0x5307, { 0xa3, 0xd8, 0xb2, 0x83, 0xc0, 0x39, 0x16, 0xdd } };
-
- if (ppv == nullptr || rclsid != CLSID_CorProfiler)
- {
- return E_FAIL;
- }
-
- auto factory = new ClassFactory;
- if (factory == nullptr)
- {
- return E_FAIL;
- }
-
- return factory->QueryInterface(riid, ppv);
-}
-
-extern "C" HRESULT STDMETHODCALLTYPE DllCanUnloadNow()
-{
- return S_OK;
-}
\ No newline at end of file
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/profiler_pal.h b/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/profiler_pal.h
deleted file mode 100644
index fc02e24e887d0f..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/profiler_pal.h
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-#pragma once
-
-#ifndef WIN32
-#include
-#include "pal_mstypes.h"
-#include "pal.h"
-#include "ntimage.h"
-#include "corhdr.h"
-
-#define CoTaskMemAlloc(cb) malloc(cb)
-#define CoTaskMemFree(cb) free(cb)
-
-#define UINT_PTR_FORMAT "lx"
-
-#define PROFILER_STUB __attribute__((visibility("hidden"))) EXTERN_C void STDMETHODCALLTYPE
-
-#else
-#define PROFILER_STUB EXTERN_C void STDMETHODCALLTYPE
-#define UINT_PTR_FORMAT "llx"
-#endif
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/sampler.cpp b/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/sampler.cpp
deleted file mode 100644
index 69ec930200f7b4..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/sampler.cpp
+++ /dev/null
@@ -1,380 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-#include "CorProfiler.h"
-#include "sampler.h"
-#include
-#include
-#include
-#include
-#include
-#include
-
-using std::wstring_convert;
-using std::codecvt_utf8;
-using std::string;
-
-ManualEvent Sampler::s_waitEvent;
-Sampler *Sampler::s_instance = nullptr;
-
-HRESULT __stdcall DoStackSnapshotStackSnapShotCallbackWrapper(
- FunctionID funcId,
- UINT_PTR ip,
- COR_PRF_FRAME_INFO frameInfo,
- ULONG32 contextSize,
- BYTE context[],
- void* clientData)
-{
- return Sampler::Instance()->StackSnapshotCallback(funcId,
- ip,
- frameInfo,
- contextSize,
- context,
- clientData);
-}
-
-Sampler::Sampler(ICorProfilerInfo10* pProfInfo, CorProfiler *parent) :
- m_workerThread(DoSampling, pProfInfo, parent)
-{
- Sampler::s_instance = this;
-}
-
-// static
-void Sampler::DoSampling(ICorProfilerInfo10 *pProfInfo, CorProfiler *parent)
-{
- Sampler::Instance()->corProfilerInfo = parent->corProfilerInfo;
-
- pProfInfo->InitializeCurrentThread();
-
- while (true)
- {
- std::this_thread::sleep_for(std::chrono::milliseconds(100));
-
- s_waitEvent.Wait();
-
- if (!parent->IsRuntimeExecutingManagedCode())
- {
- printf("Runtime has not started executing managed code yet.\n");
- continue;
- }
-
- printf("Suspending runtime\n");
- HRESULT hr = pProfInfo->SuspendRuntime();
- if (FAILED(hr))
- {
- printf("Error suspending runtime... hr=0x%x \n", hr);
- continue;
- }
-
- ICorProfilerThreadEnum* threadEnum = nullptr;
- hr = pProfInfo->EnumThreads(&threadEnum);
- if (FAILED(hr))
- {
- printf("Error getting thread enumerator\n");
- continue;
- }
-
- ThreadID threadID;
- ULONG numReturned;
- while ((hr = threadEnum->Next(1, &threadID, &numReturned)) == S_OK)
- {
- printf("Starting stack walk for managed thread id=0x%" PRIx64 "\n", (uint64_t)threadID);
-
- hr = pProfInfo->DoStackSnapshot(threadID,
- DoStackSnapshotStackSnapShotCallbackWrapper,
- COR_PRF_SNAPSHOT_REGISTER_CONTEXT,
- NULL,
- NULL,
- 0);
- if (FAILED(hr))
- {
- if (hr == E_FAIL)
- {
- printf("Managed thread id=0x%" PRIx64 " has no managed frames to walk \n", (uint64_t)threadID);
- }
- else
- {
- printf("DoStackSnapshot for thread id=0x%" PRIx64 " failed with hr=0x%x \n", (uint64_t)threadID, hr);
- }
- }
-
- printf("Ending stack walk for managed thread id=0x%" PRIx64 "\n", (uint64_t)threadID);
- }
-
- printf("Resuming runtime\n");
- hr = pProfInfo->ResumeRuntime();
- if (FAILED(hr))
- {
- printf("ResumeRuntime failed with hr=0x%x \n", hr);
- }
- }
-}
-
-void Sampler::Start()
-{
- s_waitEvent.Signal();
-}
-
-void Sampler::Stop()
-{
- s_waitEvent.Reset();
-}
-
-HRESULT Sampler::StackSnapshotCallback(FunctionID funcId, UINT_PTR ip, COR_PRF_FRAME_INFO frameInfo, ULONG32 contextSize, BYTE context[], void* clientData)
-{
- WSTRING functionName = GetFunctionName(funcId, frameInfo);
-
-#if WIN32
- wstring_convert, wchar_t> convert;
-#else // WIN32
- wstring_convert, char16_t> convert;
-#endif // WIN32
-
- string printable = convert.to_bytes(functionName);
- printf(" %s (funcId=0x%" PRIx64 ")\n", printable.c_str(), (uint64_t)funcId);
- return S_OK;
-}
-
-WSTRING Sampler::GetModuleName(ModuleID modId)
-{
- WCHAR moduleFullName[STRING_LENGTH];
- ULONG nameLength = 0;
- AssemblyID assemID;
-
- if (modId == NULL)
- {
- printf("NULL modId passed to GetModuleName\n");
- return WSTR("Unknown");
- }
-
- HRESULT hr = corProfilerInfo->GetModuleInfo(modId,
- NULL,
- STRING_LENGTH,
- &nameLength,
- moduleFullName,
- &assemID);
- if (FAILED(hr))
- {
- printf("GetModuleInfo failed with hr=0x%x\n", hr);
- return WSTR("Unknown");
- }
-
- WCHAR *ptr = NULL;
- WCHAR *index = moduleFullName;
- // Find the last occurence of the \ character
- while (*index != 0)
- {
- if (*index == '\\' || *index == '/')
- {
- ptr = index;
- }
-
- ++index;
- }
-
- if (ptr == NULL)
- {
- return moduleFullName;
- }
- // Skip the last \ in the string
- ++ptr;
-
- WSTRING moduleName;
- while (*ptr != 0)
- {
- moduleName += *ptr;
- ++ptr;
- }
-
- return moduleName;
-}
-
-
-WSTRING Sampler::GetClassName(ClassID classId)
-{
- ModuleID modId;
- mdTypeDef classToken;
- ClassID parentClassID;
- ULONG32 nTypeArgs;
- ClassID typeArgs[SHORT_LENGTH];
- HRESULT hr = S_OK;
-
- if (classId == NULL)
- {
- printf("NULL classId passed to GetClassName\n");
- return WSTR("Unknown");
- }
-
- hr = corProfilerInfo->GetClassIDInfo2(classId,
- &modId,
- &classToken,
- &parentClassID,
- SHORT_LENGTH,
- &nTypeArgs,
- typeArgs);
- if (CORPROF_E_CLASSID_IS_ARRAY == hr)
- {
- // We have a ClassID of an array.
- return WSTR("ArrayClass");
- }
- else if (CORPROF_E_CLASSID_IS_COMPOSITE == hr)
- {
- // We have a composite class
- return WSTR("CompositeClass");
- }
- else if (CORPROF_E_DATAINCOMPLETE == hr)
- {
- // type-loading is not yet complete. Cannot do anything about it.
- return WSTR("DataIncomplete");
- }
- else if (FAILED(hr))
- {
- printf("GetClassIDInfo returned hr=0x%x for classID=0x%" PRIx64 "\n", hr, (uint64_t)classId);
- return WSTR("Unknown");
- }
-
- COMPtrHolder pMDImport;
- hr = corProfilerInfo->GetModuleMetaData(modId,
- (ofRead | ofWrite),
- IID_IMetaDataImport,
- (IUnknown **)&pMDImport );
- if (FAILED(hr))
- {
- printf("GetModuleMetaData failed with hr=0x%x\n", hr);
- return WSTR("Unknown");
- }
-
-
- WCHAR wName[LONG_LENGTH];
- DWORD dwTypeDefFlags = 0;
- hr = pMDImport->GetTypeDefProps(classToken,
- wName,
- LONG_LENGTH,
- NULL,
- &dwTypeDefFlags,
- NULL);
- if (FAILED(hr))
- {
- printf("GetTypeDefProps failed with hr=0x%x\n", hr);
- return WSTR("Unknown");
- }
-
- WSTRING name = GetModuleName(modId);
- name += WSTR(" ");
- name += wName;
-
- if (nTypeArgs > 0)
- {
- name += WSTR("<");
- }
-
- for(ULONG32 i = 0; i < nTypeArgs; i++)
- {
- name += GetClassName(typeArgs[i]);
-
- if ((i + 1) != nTypeArgs)
- {
- name += WSTR(", ");
- }
- }
-
- if (nTypeArgs > 0)
- {
- name += WSTR(">");
- }
-
- return name;
-}
-
-WSTRING Sampler::GetFunctionName(FunctionID funcID, const COR_PRF_FRAME_INFO frameInfo)
-{
- if (funcID == NULL)
- {
- return WSTR("Unknown_Native_Function");
- }
-
- ClassID classId = NULL;
- ModuleID moduleId = NULL;
- mdToken token = NULL;
- ULONG32 nTypeArgs = NULL;
- ClassID typeArgs[SHORT_LENGTH];
-
- HRESULT hr = corProfilerInfo->GetFunctionInfo2(funcID,
- frameInfo,
- &classId,
- &moduleId,
- &token,
- SHORT_LENGTH,
- &nTypeArgs,
- typeArgs);
- if (FAILED(hr))
- {
- printf("GetFunctionInfo2 failed with hr=0x%x\n", hr);
- }
-
- COMPtrHolder pIMDImport;
- hr = corProfilerInfo->GetModuleMetaData(moduleId,
- ofRead,
- IID_IMetaDataImport,
- (IUnknown **)&pIMDImport);
- if (FAILED(hr))
- {
- printf("GetModuleMetaData failed with hr=0x%x\n", hr);
- }
-
- WCHAR funcName[STRING_LENGTH];
- hr = pIMDImport->GetMethodProps(token,
- NULL,
- funcName,
- STRING_LENGTH,
- 0,
- 0,
- NULL,
- NULL,
- NULL,
- NULL);
- if (FAILED(hr))
- {
- printf("GetMethodProps failed with hr=0x%x\n", hr);
- }
-
- WSTRING name;
-
- // If the ClassID returned from GetFunctionInfo is 0, then the function is a shared generic function.
- if (classId != 0)
- {
- name += GetClassName(classId);
- }
- else
- {
- name += WSTR("SharedGenericFunction");
- }
-
- name += WSTR("::");
-
- name += funcName;
-
- // Fill in the type parameters of the generic method
- if (nTypeArgs > 0)
- {
- name += WSTR("<");
- }
-
- for(ULONG32 i = 0; i < nTypeArgs; i++)
- {
- name += GetClassName(typeArgs[i]);
-
- if ((i + 1) != nTypeArgs)
- {
- name += WSTR(", ");
- }
- }
-
- if (nTypeArgs > 0)
- {
- name += WSTR(">");
- }
-
- return name;
-}
diff --git a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/sampler.h b/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/sampler.h
deleted file mode 100644
index 8cd0fd17dd47e3..00000000000000
--- a/docs/design/coreclr/profiling/sample-profilers/stacksampling/src/sampler.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-
-#if WIN32
-#define WSTRING std::wstring
-#define WSTR(str) L##str
-#else // WIN32
-#define WSTRING std::u16string
-#define WSTR(str) u##str
-#endif // WIN32
-
-class CorProfiler;
-
-class ManualEvent
-{
-private:
- std::mutex m_mtx;
- std::condition_variable m_cv;
- bool m_set = false;
-
- static void DoNothing()
- {
-
- }
-
-public:
- ManualEvent() = default;
- ~ManualEvent() = default;
- ManualEvent(ManualEvent& other) = delete;
- ManualEvent(ManualEvent&& other) = delete;
- ManualEvent& operator= (ManualEvent& other) = delete;
- ManualEvent& operator= (ManualEvent&& other) = delete;
-
- void Wait(std::function spuriousCallback = DoNothing)
- {
- std::unique_lock lock(m_mtx);
- while (!m_set)
- {
- m_cv.wait(lock, [&]() { return m_set; });
- if (!m_set)
- {
- spuriousCallback();
- }
- }
- }
-
- void Signal()
- {
- std::unique_lock lock(m_mtx);
- m_set = true;
- }
-
- void Reset()
- {
- std::unique_lock lock(m_mtx);
- m_set = false;
- }
-};
-
-class Sampler
-{
-private:
- static Sampler* s_instance;
-
- std::thread m_workerThread;
- static ManualEvent s_waitEvent;
-
- ICorProfilerInfo10* corProfilerInfo;
-
- static void DoSampling(ICorProfilerInfo10* pProfInfo, CorProfiler *parent);
-
- WSTRING GetClassName(ClassID classId);
- WSTRING GetModuleName(ModuleID modId);
- WSTRING GetFunctionName(FunctionID funcID, const COR_PRF_FRAME_INFO frameInfo);
-public:
- static Sampler* Instance()
- {
- return s_instance;
- }
-
- Sampler(ICorProfilerInfo10* pProfInfo, CorProfiler *parent);
- ~Sampler() = default;
-
- void Start();
- void Stop();
-
- HRESULT StackSnapshotCallback(FunctionID funcId,
- UINT_PTR ip,
- COR_PRF_FRAME_INFO frameInfo,
- ULONG32 contextSize,
- BYTE context[],
- void* clientData);
-};
-
diff --git a/docs/design/features/OnStackReplacement.md b/docs/design/features/OnStackReplacement.md
new file mode 100644
index 00000000000000..905696875046ea
--- /dev/null
+++ b/docs/design/features/OnStackReplacement.md
@@ -0,0 +1,1111 @@
+# On Stack Replacement in the CLR
+
+Design Sketch and Prototype Assessment
+
+Andy Ayers
+
+Initial: 7 July 2019 —
+Revised: 25 February 2020
+
+## Overview
+
+On Stack Replacement allows the code executed by currently running methods to be
+changed in the middle of method execution, while those methods are active "on
+stack." This document describes design considerations and challenges involved in
+implementing basic On Stack Replacement for the CLR, presents the results of
+some prototype investigations, and describes how OSR might be used to re-host
+Edit and Continue and support more general transitions like deoptimization.
+
+* [Background](#1-Background)
+* [Design Principles](#2-Design-Principles)
+* [An Overview of OSR](#3-An-Overview-of-OSR)
+* [Complications](#4-Complications)
+* [The Prototype](#5-The-Prototype)
+* [Edit and Continue](#6-Edit-and-Continue)
+* [Deoptimization](#7-Deoptimization)
+* [References](#8-References)
+
+## 1. Background
+
+On Stack Replacement (hereafter _OSR_) refers to a set of techniques for
+migrating active stack frames from one version of code to another.
+
+The two versions of the code involved in OSR may arise from different program
+sources (as in Edit and Continue) or different approaches to compiling or
+executing a single program (say, unoptimized code versus optimized code). The
+goal of OSR is to transparently redirect execution from an old version of code
+into a new version, even when in the middle of executing the old version.
+
+Initial work on OSR was pioneered in Self [[1](#1)] as an approach for debugging
+optimized code. But in the years since, OSR has mainly seen adoption on
+platforms like Java [[2](#2), [3](#3)] and JavaScript that rely heavily on
+adaptive recompilation of code.
+
+The ability to adaptively recompile and switch code versions while methods are
+running provides some key advantages:
+
+* Platforms can offer both quick start up and excellent steady-state
+ performance, interpreting or quickly jitting to enable initial method
+ execution, and using OSR to update the methods with better performing or more
+ completely compiled versions as needed.
+
+* Platforms can take advantage of transient program facts and recover when those
+ facts no longer become true. For example, a platform may compile virtual or
+ interface calls as direct calls initially and use OSR to update to more
+ general versions of code when overriding methods or other interface
+ implementations arrive on scene.
+
+The CLR already supports various mechanisms for changing the code for a method
+in a runtime instance. Edit and Continue implements true OSR but is supported
+only on some architectures, works only when code is running under a managed
+debugger, and is supported only for unoptimized to unoptimized code. Profiler
+rejit and tiered compilation can update code used in future invocations of
+methods, but not code running in currently active methods.
+
+In this document we will vary a bit from the literature and use OSR to refer
+strictly to the case where we are transitioning execution **from** an
+unoptimized code instance (either to another unoptimized instance or an
+optimized instance). We will use _deoptimization_ (_deopt_) to describe the
+transition from an optimized code instance to some other code instance
+(typically to an unoptimized instance).
+
+We envision OSR as a technology that will allow us to enable tiered compilation
+by default: performance-critical applications will no longer risk seeing key
+methods trapped in unoptimized tier0 code, and straightforwardly written
+microbenchmarks (e.g. all code in main) will perform as expected, as no matter
+how they are coded, they will be able to transition to optimized code.
+
+OSR also provides key building blocks for an eventual implementation of deopt
+and the ability of our platforms to make strong speculative bets in code
+generation.
+
+In addition, OSR will also allow us to experiment with so-called _deferred
+compilation_, where the jit initially only compiles parts of methods that it
+believes likely to execute (say, based on heuristics or prior runs). If an
+uncompiled part of a method is reached at runtime, OSR can trigger recompilation
+of the missing part or recompilation of the entire method.
+
+The remainder of this document describes OSR in more detail, providing a design
+sketch and some key design choice points, the results and insights gained from
+creating a fully functional prototype, and a list of open issues and areas
+requiring further investigation. We will also mention _deopt_ in passing and
+describe why it presents a different and larger set of challenges.
+
+## 2. Design Principles
+
+As we consider proposals for implementing OSR, we will try and satisfy the
+following design principles:
+
+* Pay as you go. The costs of OSR should be limited to methods that can benefit
+ from OSR, and where possible, paid largely when OSR actually happens.
+
+* Impose few restrictions on optimized codegen. We should not have to restrict
+ or dumb down optimized codegen to allow transitions to it via OSR
+
+* Anticipate likely changes in jit codegen strategy. We should support enabling
+ some optimizations (devirtualization, early branch pruning, some expression
+ opts) at Tier0 without having to radically alter our approach.
+
+* Design for testability. We should be able to force OSR transitions wherever
+ possible and with alternative stress strategies.
+
+* Full diagnostic experience. OSR should not inhibit user ability to debug or
+ reason about logical behavior of their programs. OSR activities should be
+ tracked via suitable eventing mechanisms.
+
+## 3 An overview of OSR
+
+OSR enables transitioning from older unoptimized code to new code
+while the old code is active in some stack frames. An implementation
+must come up with solutions to several related sub problems, which we
+describe briefly here, and in more detail below.
+
+* **Patchpoints** : Identify where in the original method OSR is possible.
+We will use the term _patchpoint_ to describe a particular location in a
+method's code that supports OSR transitions.
+* **Triggers** : Determine what will trigger an OSR transition
+* **Alternatives** : Have means to prepare a suitable alternative code
+version covering all or part of the method (loops, for instance), and
+having one or possibly many entry points.
+* **Transitions**: Remap the stack frame(s) as needed to carry out the
+OSR transition
+
+### 3.1 Patchpoints
+
+A _patchpoint_ is a point in a version of code where OSR is possible.
+Patchpoints are similar in many ways to GC safepoints. At a patchpoint, the live
+state of the ongoing computation must be identifiable (for a GC safepoint, only
+the live GC references need be so identified). All live registers and stack
+slots must be enumerable, and logically described in terms of concepts visible
+in the IL. Additional state like the return address, implicit arguments, and so
+on must also be accounted for.
+
+As with GC safepoints, patchpoints can be handled in a _fully interruptible_
+manner where most any instruction boundary is a patchpoint, or a _partially
+interruptible_ manner, where only some instruction boundaries are patchpoints.
+Also, as with GC, it is acceptable (if suboptimal) to over-identify the live
+state at a patch point. For instance, the live set can include values that never
+end up being consumed by the new method (the upshot here is that we can simply
+decide all the visible IL state is live everywhere, and so avoid running
+liveness analysis in Tier0.)
+
+Also, as with GC safepoints, it is desirable to keep the volume of information
+that must be retained to describe patchpoints to a minimum. Most methods
+executions will never undergo OSR transition and so the information generated
+will never be consulted. To try and keep OSR a _pay as you go_ technique, it is
+important that this information be cheap to generate and store.
+
+#### 3.1.1 Choosing Patchpoints
+
+Most commonly, patchpoints are chosen to be the places in the code that are
+targets of loop back edges. This is a partially interruptible scheme. This
+ensures that no loop in the method can iterate without hitting a patchpoint, and
+so that the method itself cannot execute indefinitely between patchpoints. Note
+by this rule, methods that do not contain any loops will not have any
+patchpoints.
+
+From a compilation standpoint, it would be ideal if patchpoints were also IL
+stack empty points, as this tends to minimize and regularize the live state.
+However, there is no guarantee that execution of a method will reach stack empty
+points with any frequency. So, a fully general patchpoint mechanism must handle
+the case where the evaluation stack is not empty. However, it may be acceptable
+to only allow patchpoints at stack empty points, as loops that execute with
+non-empty evaluation stacks are likely rare.
+
+It is also beneficial if patchpoint selection works via a fairly simple set of
+rules, and here we propose that using the set of _lexical back edges_ or
+backwards branches in IL is a reasonable choice. These can be identified by a
+single scan over a method's IL.
+
+When generating unoptimized code, it is thus sufficient to note the target of
+any backwards branch in IL, the set of those locations (filtered to just the
+subset where the IL stack is empty) are the candidate patchpoints in the method.
+
+We can also rely on the fact that in our current unoptimized code, no IL state
+is kept in registers across IL stack empty points—all the IL state is
+stored in the native stack frame. This means that each patchpoint's live state
+description is the same—he set of stack frame locations holding the IL
+state.
+
+So, with the above restrictions, a single patchpoint descriptor suffices for the
+entire method (analogous to the concept of _untracked_ GC lifetimes in the GC
+info). Further, this information is a superset of the current GC info, so the
+additional data needed to describe a patchpoint is simply the set of live non-GC
+slots on the native stack frame.
+
+[Note: more general schemes like _deopt_ will require something more
+sophisticated.]
+
+#### 3.1.2 Option I: non-stack empty patchpoints
+
+If it turns out we must also allow patchpoints at non-stack empty points, then
+some per-patchpoint state will be needed to map the logical state of the
+evaluation stack into actual stack slots on the methods frame. This state will
+vary from patchpoint to patchpoint.
+
+#### 3.1.3 Option II: fully interruptible patchpoints
+
+Patchpoints can be much more fine-grained, at any block boundary or even within
+blocks, so long as the correspondence of the generated code to the inspiring IL
+is well understood. However fine-grained patchpoints in our proposed version of
+OSR do not seem to offer much in the way of advantages, given that we are also
+proposing synchronous triggers and transitions, and transitioning from
+unoptimized code. A fine-grained patchpoint mechanism would require more
+metadata to describe each transition point.
+
+#### 3.1.4 The Prototype
+
+In the prototype, patchpoints are the set of IL boundaries in a method that are
+stack-empty and the targets of lexical back edges. The live state of the
+original method is just the IL-visible locals and arguments, plus a few special
+values found in certain frames (GS Cookie, etc).
+
+### 3.2 Triggers
+
+When OSR is used to enable transfer control from an unoptimized method into
+optimized code, the most natural trigger is a count of the number of times a
+patchpoint in the method is reached. Once a threshold is reached at a
+patchpoint, the system can begin preparation of the alternative code version
+that will work for that patchpoint.
+
+This counting can be done fairly efficiently, at least in comparison to the
+ambient unoptimized code in the method, by using counters on the local frame.
+When the threshold is reached, control can transfer to a local policy block;
+this can check whether an alternative version needs to be prepared, is already
+being prepared, or is ready for transfer. Since this policy logic is common to
+all patchpoints it most likely should be encapsulated as a helper. In
+pseudocode:
+
+```
+Patchpoint: // each assigned a dense set of IDs
+
+ if (++counter[ppID] > threshold) call PatchpointHelper(ppID)
+```
+The helper can use the return address to determine which patchpoint is making
+the request. To keep overheads manageable, we might instead want to down-count
+and pass the counter address to the helper.
+```
+Patchpoint: // each assigned a dense set of IDs
+
+ if (--counter[ppID] <= 0) call PatchpointHelper(ppID, &counter[ppID])
+```
+The helper logic would be similar to the following:
+```
+PatchpointHelper(int ppID, int* counter)
+{
+ void* patchpointSite = _ReturnAddress();
+ PPState s = GetState(patchpointSite);
+
+ switch (s)
+ {
+ case Unknown:
+ *counter = initialThreshold;
+ SetState(s, Active);
+ return;
+
+ case Active:
+ *counter = checkThreshold;
+ SetState(s, Pending);
+ RequestAlternative(ppID);
+ return;
+
+ case Pending:
+ *counter = checkThreshold;
+ return;
+
+ case Ready:
+ Transition(...); // does not return
+ }
+}
+```
+Here `RequestAlternative` would queue up a request to produce the alternative
+code version; when that request completes the patchpoint state would be set to
+Ready. So the cost for a patchpoint would be an initial helper call (to set the
+Active threshold), then counting, then a second helper call (to request and set
+the pending threshold), then counting, and, depending on how long the request
+took, more callbacks in pending state.
+
+Note that just because a patchpoint is hit often enough to reach Active state,
+there is no guarantee that the patchpoint will be reached again in the future.
+So, it is possible to trigger alternative version compilations that end up never
+getting used, if those alternative versions are patchpoint specific. In a
+pathological case a method might have an entire sequence of patchpoints that
+reach Active state and trigger alternative versions, none of which ever get
+used.
+
+In this scheme, the local frame of the method would have one local counter per
+patchpoint.
+
+#### 3.2.1 Option I: one global counter per patchpoint
+
+Instead of keeping the counters on the local frame, they could be kept in global
+storage associated with the method, to give an absolute count of patchpoint
+frequency over all invocations of the method. This would help trigger
+transitions in methods in use across multiple threads or methods that are a weak
+mixture of iteration and recursion. Because there would now be shared counter
+state, we'd have to think though how to handle the concurrent access. Likely
+we'd implement something like we do for IBC and have a method fetch and locally
+cache the address of its counter vector locally in the prolog.
+
+#### 3.2.2 Option II: shared counters
+
+Alternatively, all patchpoints in a method could share one counter slot (either
+local or global), this would save space but would lead to somewhat more frequent
+callbacks into the runtime and slightly higher likelihood that useless
+alternatives would be created.
+
+#### 3.2.3 Option III: synchronous OSR
+
+Independent of the counter scheme, the runtime could also block and
+synchronously produce and then transition to the alternative version. This would
+eliminate the potential for wasted alternates (though depending on other
+choices, we still might produce multiple alternates for a method). It would also
+hold up progress of the app, as the thread could just as well continue executing
+the unoptimized code past the patchpoint. We might consider transitioning to
+synchronous OSR selectively for methods that have a track record of generating
+useless versions. This is entirely a runtime policy and would not impact jitted
+codegen.
+
+Note: If OSR is used for EnC or for _deopt_ when an invariant changes, then
+synchronous transitions are required as in general, the old method cannot safely
+execute past a patchpoint. If the delay from jitting code is a concern it may be
+possible to fall back to an interpreter for a time while the new version of the
+method is jitted, though this would require that the system also support
+OSR-style transitions from interpreted methods to compiled methods...
+
+#### 3.2.4 Option IV: share counter space with Tiered Compilation
+
+A final option here is to use global counters and also add a counter at method
+entry. The entry counter could be used for two purposes: first to trigger tiered
+jitting of the entire method, and second, to help normalize the per-patchpoint
+counters so as to provide relative profile weights for the blocks in the method
+when it is rejitted (either via tiering or OSR). We note that the set of
+observation points from patchpoint counters is fairly sparse (not as detailed as
+what we get from IBC, say) but it may be sufficient to build a reasonable
+profile.
+
+#### 3.2.5 The Prototype
+
+In the prototype OSR transitions are synchronous; there is one local patchpoint
+counter per frame shared by all patchpoints; patchpoint IDs are IL offsets.
+
+### 3.3 Alternative Versions
+
+When a patchpoint is hit often enough, the runtime should produce an alternative
+version of the code that can be transitioned to at that patchpoint.
+
+There are several choice points for alternatives:
+
+* Whether to tailor the alternative code specifically to that patchpoint or have
+ the alternative handle multiple (or perhaps all) the patchpoints in a method.
+ We'll call the former a single-entry alternative, and the latter
+ multi-entry alternatives (and, in the limit, whole-method alternatives).
+
+* Whether the alternative version encompasses the remainder of the method, or
+ just some part of the method. We'll call these whole and partial
+ alternatives.
+
+* If a partial method alternative, whether the part of the method compiled
+ includes the entire remainder of the method, or just some fragment that
+ includes the patchpoint (say the enclosing loop nest).
+
+* Whether or not the alternative entry points include the code to build up the
+ alternative stack frames, or setup of the new frame happens via some runtime
+ logic.
+
+* Whether or not the alternate version is tailored to the actual runtime state
+ at the point of the trigger. For instance, specific argument or local values,
+ or actual types.
+
+The partial alternatives obviously are special versions that can only be used by
+OSR. The whole method alternative could also be conceivably used as the
+optimized version of the method, but the additional entry points may result some
+loss of optimizations. So, in general, the OSR alternatives are likely distinct
+from the Tier-1 versions of methods and are used only for active frame
+transitions. New calls to methods can be handled via the existing tiering
+mechanisms.
+
+[Note there are some interesting dynamics here that may warrant further
+consideration. A method that is seldomly called with a hot loop will eventually
+trigger both OSR (from the loop) and Tier1 recompilation (from the calls). We
+might consider deferring tiered recompilation for such methods, as the
+unoptimized versions can readily transition to OSR alternates in code that
+matters for performance.]
+
+Taken together there are various combinations of these alternatives that make
+sense, and various tradeoffs to consider. We explore a few of these below.
+
+#### 3.3.1 Option 1: Partial Method with Transition Prolog
+
+In this option, the runtime invokes the jit with a method, IL offset, and the
+original method mapping of stack frame state to IL state at that offset. The jit
+uses the logical PC (IL offset) to determine the scope of the alternative
+fragment. Here the scope is the IL in the method reachable from the patchpoint.
+
+For the entry point it creates a specialized transition prolog that sets up a
+normal frame, and takes the values of the locals from the old stack frame and
+copies them to the new stack slots, and pushes& any live evaluation stack
+arguments. Arguments passed in registers are restored to the right registers.
+Control then transfers to the IL offset of the patchpoint. Any IL in the method
+not reachable from the patchpoint is dead code and can be removed (including the
+original method entry point). This new partial method is then jitted more or
+less normally (modulo the generation of the special prolog).
+
+It might be possible to express this new prolog in IL or something similar. At
+any rate it seems likely the impact on the jit overall can be mostly localized
+to the &importer and prolog generation stages and the rest of the jit would
+operate more or less as it does today.
+
+This alternative version can be used any time the original method reaches the
+inspiring patchpoint.
+
+#### 3.3.2 Option 2: Partial Tailored Method with Transition Prolog
+
+If the runtime also passes the triggering stack frame to the jit, the jit can
+incorporate the values in that frame (or information derived from the frame
+values) into the alternative method codegen. This creates a tailored alternative
+that can only be used at this patchpoint from this specific original method
+invocation. The potential benefit here is that the code in the method may be
+more optimizable with the additional context, and since OSR alternatives are
+likely to be lightly used there may not be much downside to specializing exactly
+for this trigger instance. This alternative likely implies synchronous OSR.
+
+#### 3.3.3 Option 3: Full Method with Multiple Entry Points
+
+Instead of generating an alternative that can only be used to transition from
+one specific patchpoint, the alternative method can offer multiple entry points
+to allow transition from some or all of the patchpoints in the original method.
+
+Note: After thinking about this a bit more, I think we can implement this
+variant without needing multiple prologs—instead we can pass the IL offset
+of the OSR entry point as a hidden argument to the OSR method, and have a switch
+on that argument in the first body block to jump to the right place in the
+method. This might be a viable option to control the potential explosion of OSR
+variants for methods with many patchpoints. This method would still be OSR
+specific—that is, it could not also serve as a normally callable Tier1
+method.
+
+#### 3.3.4 Option 4: Method Fragment
+
+If the alternative method is just a fragment of the entire method, then in
+addition to a specialized entry point, the jit will have to create specialized
+exit points that either transition back to the unoptimized method, or else use
+synchronous OSR to invoke jitting of the method code that comes after the
+fragment.
+
+#### 3.3.5 Prototype
+
+The prototype generates partial methods with transition prolog. Per 4.1 below,
+the OSR method frame incorporates the (live portion of the) original method
+frame instead of supplanting it.
+
+### 3.4 Transitions
+
+A transition can happen once an OSR capable method reaches a patchpoint where a
+suitable alternative version is ready. Because transitions will likely require
+changes in stack frame size it is much simpler to consider transitions only for
+methods at the top of the stack. This means that methods that are invoked
+recursively may be transitioned by OSR gradually as the stack unwinds.
+
+Abstractly, the actual transition could work something like the following: the
+runtime would copy the top stack frame into temporary storage, then carefully
+unwind the current frame. Then the alternative method would be put in place and
+invoked, being passed the copy of the original frame as an argument.
+
+However, the presence of original frame addresses and values derived from those
+addresses in the original frame's live state complicates matters (more on this
+in [Section 4.1](#Addresses-of-Locals)). So the OSR method needs to ensure that
+any "address-exposed" local ends up at the exact same stack location in the OSR
+frame as it did in the original method frame. The simplest way to accomplish
+this is to just leave the original frame in place, and have the OSR frame
+"incorporate" it as part of its frame.
+
+#### 3.4.1 The Prototype
+
+The original method conditionally calls to the patchpoint helper at
+patchpoints. The helper will return if there is no transition.
+
+For a transition, the helper will capture context and virtually unwind itself
+and the original method from the stack to recover callee-save register values
+live into the original method and then restore the callee FP and SP values into
+the context (preserving the original method frame); then set the context IP to
+the OSR method entry and restore context. OSR method will incorporate the
+original method frame as part of its frame.
+
+## 4 Complications
+
+### 4.1 Addresses of Locals
+
+If the live state at the patchpoint includes addresses of locals (or addresses
+of arguments, if the OSR transition pushes a new frame), either these addresses
+must be updated to properly reflect the new locations or the address-taken
+locals must end up in the same relative location in the frame. The jit might
+require some hardening to ensure that address of local is always properly
+described at patchpoints.
+
+Detection of address-taken locals (especially in a non-optimizing jit) may
+require some attention. We frequently see `ldloca` in IL that is consumed in a
+dereference before a stack empty point; such locals are transiently exposed but
+their addresses would not be live at our proposed set of patchpoints (note
+`ldflda` can cause similar issues if it exposes addresses if local struct
+fields).
+
+Arithmetic done on addresses of locals might not be stable across an OSR
+transition (that is, different values could be obtained for a given piece of
+code before and after the transition). While in general there is no guarantee
+about the values produced by this kind of code it is not unreasonable to expect
+that the value would not change over the lifetime of a given method's
+execution. It is not clear how much code might depend on this.
+
+This problem could be partially solved by requiring any address-taken local to
+appear at the same stack location in the alternative method frame and by
+requiring that the OSR frame supplant the original frame (this is how EnC
+works). In that case all address-taken locals would be at the same address.
+Ensuring that this is possible likely entails other restrictions like reserving
+a maximally large register save area for the original method.
+
+However, it seems simplest to just preserve the original method frame, or at
+least the portion of it that contains the live state, and allow the OSR method
+to access the original frame values, either as initial values or as the actual
+homes for that state.
+
+### 4.2 Localloc
+
+Methods with localloc pose similar challenges to those posed by methods with
+address taken locals. Room is made on the original method stack for the localloc
+storage, and a native pointer to that storage is part of the live state of the
+method. The live state may also include pointers and other values derived from
+that address. So, the alternative version must use that same location; a
+copy/fixup procedure to allow this storage to be relocated in some manner seems
+impractical.
+
+In addition, localloc makes describing the local frame more complex, as the size
+of the frame and the location of particular bits of live state can vary.
+Typically, the jit will use multiple frame pointers in a localloc frame to allow
+for relative addressing.
+
+In the most complex case, the original method will have executed one or more
+locallocs before hitting the patchpoint, and the OSR variant will then execute
+more locallocs. Such cases might require the OSR method to maintain 3 or more
+frame pointers.
+
+### 4.3 Funclets
+
+When control is executing in a funclet there are effectively two activation
+records on the stack that share a single frame: the parent frame and the
+funclet frame. The funclet frame is largely a stub frame and most of the frame
+state is kept in the parent frame.
+
+These two frames are not adjacent; they are separated by some number of runtime
+frames. This means it is going to be difficult for our system to handle
+patchpoints within funclets; even if we could update the code the funclet is
+running we would not be able to update the parent frame.
+
+The proposal here is to disallow patchpoints within funclets so that we do not
+attempt OSR transitions when the top of stack frame is a funclet frame. One
+hopes that performance critical loops rarely appear in catch or finally clauses.
+
+EnC has similar restrictions.
+
+### 4.4 GC
+
+There is a brief window of time during the transition where there are GC live
+values on both the original and alternative frames (and the original frame may
+have been copied off-stack). Since the transition is done via a runtime helper,
+it seems prudent to forbid GC during this part of the transition, which should
+be relatively brief.
+
+### 4.5 Diagnostics
+
+Alternative methods will never be called — they are only transitioned to
+by active original methods, so likely no special work is needed to make them
+compatible with the current profiler guarantees for IL modifications ("new
+invocations" of the method invoke the new version).
+
+We may need to update the mechanisms that the runtime uses to notify profilers
+of new native code versions of a method.
+
+The jit will generate the same debug info mappings as it does today, and so the
+debugging experience when debugging an alternative should be similar to the
+experience debugging a Tier1 method. Likewise, the code publishing aspects
+should be common, so for instance active breakpoints should get applied.
+
+[Note: I have verified this on simple examples using the VS debugger; a source
+breakpoint set in the original method is applied to the OSR method too.]
+
+We need to decide what happens if the debugger tries to use SetIP on an OSR
+method for an IL offset that is not within the range of IL compiled; likely
+we'll just have to fail the request.
+
+Breakpoints set at native code addresses won't transfer to the corresponding
+points in OSR methods. We have the same issue with Tiered compilation already.
+
+OSR (exclusive of EnC) will be disabled for debuggable code.
+
+Debugging through an OSR transition (say a single-step that triggers OSR) may
+require special consideration. This is something that needs further
+investigation.
+
+**Prototype: The OSR methods have somewhat unusual unwind records that may be
+confusing the (Windbg) debugger stack trace.**
+
+### 4.6 Proposed Tier-0 Optimizations
+
+We have been assuming up until this point that the original method was not
+optimized in any way, and so its live state is safely over-approximated by the
+values of all locals, arguments, evaluation stack entries. This means that any
+value truly live at a reachable patchpoint (capable of influencing future
+computation) is included in the live set. The reported live set might well be
+larger, of course. The alternative method will likely run liveness and pick from
+this set only the values it sees as truly live.
+
+This means that we can run optimizations in the original method so long as they
+do not alter the computation of the over-approximated live set at any
+patchpoint.
+
+The proposed Tier0 optimizations fit into this category, so long as we restrict
+patchpoints to stack-empty points: we may prune away unreachable code paths (say
+from HW intrinsic checks or provably true or false predicate evaluations &mdash
+;patchpoints in pruned sections would be unreachable) and simplify computations.
+Optimizing expressions may reduce the truly live set but so long as all stores
+to locals and args are kept live the base values needed for any alternate
+version of the code will be available.
+
+### 4.7 Alternative Method Optimizations
+
+In options where the alternative method has multiple entry points, one must be
+wary of early aggressive optimizations done when optimizing the alternative. The
+original version of the method may hit a patchpoint while executing code that
+can be optimized away by the more aggressive alternative method compiler (e.g.
+it may be executing a series of type equality tests in a generic method that the
+optimizing jit can evaluate at jit time). But with our simple patchpoint
+recognition algorithm the alternate compiler can quickly verify that the
+patchpoint IL offset is a viable entry point and ensure that the code at that
+offset is not optimized away. If it turns out that the entry point code is
+optimizable then we may choose to peel one iteration from the entry point loop
+(because with our patchpoint strategy, execution in the alternate method will
+immediately hit a loop top once it is out of the prolog) and allow the in-loop
+versions to be optimized.
+
+### 4.8 Prologs and Unwind
+
+The alternative version of the method will, in all likelihood, need to save and
+restore a different set of callee-saves registers than the original version. But
+since the original stack frame has already saved some registers, the alternative
+version prolog will either need to save a superset of those registers or else
+restore the value of some registers in its prolog. So, the alternative version
+needs to know which registers the original saved and where in the stack they are
+stored.
+
+If we want to preserve frame offsets for address-taken locals then we may face a
+conflict as altering the number of callee save slots may alter frame offsets for
+locals. One thought here is that we could perhaps implement a chained unwind
+scheme, where there is an initial prolog that emulates the original version
+prolog and duplicates its saves, and then a subsequent "shrink wrapped" prolog
+& epilog that saves any additional registers in a disjoint area.
+
+**Prototype:** When it is time to transition, the patchpoint helper virtually
+unwinds two frames from the stack—its own frame, and the frame for the
+original method. So the unwound context restores the callee saves done by the
+original method. That turns out to be sufficient.
+
+You might think the helper would need to carefully save all the register state
+on entry, but that's not the case. Because the original method is un-optimized,
+there isn't any live IL state in registers across the call to the patchpoint
+helper—all the live IL state for the method is on the original
+frame—so the argument and caller-save registers are dead at the
+patchpoint. Thus only part of register state that is significant for ongoing
+computation is the callee-saves, which are recovered via virtual unwind, and the
+frame and stack pointers of the original method, which are likewise recovered by
+virtual unwind.
+
+With this context in hand, the helper then "calls" the OSR method by restoring
+the context. The OSR method performs its own callee-saves as needed, and
+recovers the arguments/IL state from the original frame.
+
+If we were to support patchpoints in optimized code things would be more
+complicated.
+
+### 4.9 Synchronous Methods
+
+OSR methods only need add the code to release the synchronous method monitor.
+This must still be done in a try-finally to ensure release even on exceptional
+exit.
+
+### 4.10 Profile Enter/Leave Hooks
+
+OSR methods only need to support the method exit hook.
+
+## 5 The Prototype
+
+Based on the above, we developed a prototype implementation of OSR to gain
+experience, gather data, and test out assumptions.
+
+The prototype chose the following options:
+* Patchpoints: lexical back edge targets that are stack empty and not in try
+ regions; live state is all locals and args + specials (thus no liveness needed
+ at Tier0)
+* Trigger: one shared counter per frame. Initial value configurable at runtime.
+ Patchpoints decrement the counter and conditionally call the runtime helper if
+ the value is zero or negative.
+* Alternatives: partial method tailored to each patchpoint. OSR method
+ incorporates the original method frame.
+* Transition: synchronous—once the patchpoint has been hit often enough a
+ new alternative is jitted.
+
+The prototype works for x64 on Windows and Linux, and can pass the basic (pri0)
+tests suites with an aggressive transition policy (produce the OSR method and
+transition the first time each patchpoint is hit).
+
+### 5.1 Example Codegen
+
+Consider the following simple method:
+```C#
+ public static int F(int from, int to)
+ {
+ int result = 0;
+ for (int i = from; i < to; i++)
+ {
+ result += i;
+ }
+ return result;
+ }
+
+```
+Normal (Tier0, x64 windows) codegen for the method is:
+```asm
+; Tier-0 compilation
+
+G_M6138_IG01:
+ 55 push rbp
+ 4883EC10 sub rsp, 16
+ 488D6C2410 lea rbp, [rsp+10H]
+ 33C0 xor rax, rax
+ 8945FC mov dword ptr [rbp-04H], eax // result
+ 8945F8 mov dword ptr [rbp-08H], eax // i
+ 894D10 mov dword ptr [rbp+10H], ecx // from
+ 895518 mov dword ptr [rbp+18H], edx // to
+
+G_M6138_IG02:
+ 33C0 xor eax, eax
+ 8945FC mov dword ptr [rbp-04H], eax
+ 8B4510 mov eax, dword ptr [rbp+10H]
+ 8945F8 mov dword ptr [rbp-08H], eax
+ EB11 jmp SHORT G_M6138_IG04
+
+G_M6138_IG03:
+ 8B45FC mov eax, dword ptr [rbp-04H]
+ 0345F8 add eax, dword ptr [rbp-08H] // result += i
+ 8945FC mov dword ptr [rbp-04H], eax
+ 8B45F8 mov eax, dword ptr [rbp-08H]
+ FFC0 inc eax
+ 8945F8 mov dword ptr [rbp-08H], eax
+
+G_M6138_IG04:
+ 8B45F8 mov eax, dword ptr [rbp-08H]
+ 3B4518 cmp eax, dword ptr [rbp+18H]
+ 7CE7 jl SHORT G_M6138_IG03 // i < to ?
+ 8B45FC mov eax, dword ptr [rbp-04H]
+
+G_M6138_IG05:
+ 488D6500 lea rsp, [rbp]
+ 5D pop rbp
+ C3 ret
+```
+with OSR enabled (and patchpoint counter initial value = 2), this becomes:
+```asm
+; Tier-0 compilation + Patchpoints
+
+G_M6138_IG01:
+ 55 push rbp
+ 4883EC30 sub rsp, 48
+ 488D6C2430 lea rbp, [rsp+30H]
+ 33C0 xor rax, rax
+ 8945FC mov dword ptr [rbp-04H], eax // result
+ 8945F8 mov dword ptr [rbp-08H], eax // i
+ 894D10 mov dword ptr [rbp+10H], ecx // from
+ 895518 mov dword ptr [rbp+18H], edx // to
+
+G_M6138_IG02:
+ 33C9 xor ecx, ecx
+ 894DFC mov dword ptr [rbp-04H], ecx // result = 0
+ 8B4D10 mov ecx, dword ptr [rbp+10H]
+ 894DF8 mov dword ptr [rbp-08H], ecx // i = from
+ C745F002000000 mov dword ptr [rbp-10H], 2 // patchpointCounter = 2
+ EB2D jmp SHORT G_M6138_IG06
+
+G_M6138_IG03:
+ 8B4DF0 mov ecx, dword ptr [rbp-10H] // patchpointCounter--
+ FFC9 dec ecx
+ 894DF0 mov dword ptr [rbp-10H], ecx
+ 837DF000 cmp dword ptr [rbp-10H], 0 // ... > 0 ?
+ 7F0E jg SHORT G_M6138_IG05
+
+G_M6138_IG04: ;; bbWeight=0.01
+ 488D4DF0 lea rcx, bword ptr [rbp-10H] // &patchpointCounter
+ BA06000000 mov edx, 6 // ilOffset
+ E808CA465F call CORINFO_HELP_PATCHPOINT
+
+G_M6138_IG05:
+ 8B45FC mov eax, dword ptr [rbp-04H]
+ 0345F8 add eax, dword ptr [rbp-08H]
+ 8945FC mov dword ptr [rbp-04H], eax
+ 8B45F8 mov eax, dword ptr [rbp-08H]
+ FFC0 inc eax
+ 8945F8 mov dword ptr [rbp-08H], eax
+
+G_M6138_IG06:
+ 8B4DF8 mov ecx, dword ptr [rbp-08H]
+ 3B4D18 cmp ecx, dword ptr [rbp+18H]
+ 7CCB jl SHORT G_M6138_IG03
+ 8B45FC mov eax, dword ptr [rbp-04H]
+
+G_M6138_IG07:
+ 488D6500 lea rsp, [rbp]
+ 5D pop rbp
+ C3 ret
+```
+Because Tier0 is unoptimized code, the patchpoint sequence is currently
+unoptimized. This leads to a moderate amount of code bloat in methods with
+patchpoints. The overall code size impact of patchpoints (as measured by
+`jit-diff`) is around 2%, but this is this is an understatement of the impact to
+methods that have patchpoints, as most Tier0 methods won't require patchpoints.
+This is something that can be improved.
+
+The OSR method for this patchpoint is:
+```asm
+; Tier-1 compilation
+; OSR variant for entry point 0x6
+
+G_M6138_IG01:
+ 8B542450 mov edx, dword ptr [rsp+50H] // to
+ 8B4C2434 mov ecx, dword ptr [rsp+34H] // result
+ 8B442430 mov eax, dword ptr [rsp+30H] // i
+
+G_M6138_IG02: ;; bbWeight=8
+ 03C8 add ecx, eax
+ FFC0 inc eax
+ 3BC2 cmp eax, edx
+ 7CF8 jl SHORT G_M6138_IG02
+
+G_M6138_IG03:
+ 8BC1 mov eax, ecx
+
+G_M6138_IG04:
+ 4883C438 add rsp, 56
+ 5D pop rbp
+ C3 ret
+```
+Here the live state is `result`, `i`, and `to`. These are kept in registers and
+initialized in the prolog to the values they had in the original frame. The jit
+request for the OSR method includes 'OSR_INFO" metadata describing the original
+method frame, so the jit can compute the correct addresses for original frame
+slots in the OSR method.
+
+Because the OSR method is entered with the original method frame still active,
+the OSR method has asymmetric prolog and epilog sequences. This is reflected in
+the unwind data for the OSR method by recording a "phantom prolog" to account
+for actions taken by the original method. These are at code offset 0 so happen
+"instantaneously" when the method is entered.
+```
+ UnwindCodes:
+ CodeOffset: 0x00 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 6 * 8 + 8 = 56 = 0x38
+ CodeOffset: 0x00 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rbp (5)
+```
+By way of comparison, here is the full Tier-1 version of the method.
+```asm
+G_M6138_IG01:
+
+G_M6138_IG02:
+ 33C0 xor eax, eax
+ 3BCA cmp ecx, edx
+ 7D08 jge SHORT G_M6138_IG04
+
+G_M6138_IG03: ;; bbWeight=4
+ 03C1 add eax, ecx
+ FFC1 inc ecx
+ 3BCA cmp ecx, edx
+ 7CF8 jl SHORT G_M6138_IG03
+
+G_M6138_IG04:
+ C3 ret
+```
+Note the inner loop codegen is very similar to the OSR variant. This is typical.
+It is often possible to diff the Tier1 and OSR codegen and see that the latter
+is just a partial version of the former, with different register usage and
+different stack offsets.
+
+### 5.2 More Complex Examples
+
+If the OSR method needs to save and restore registers, then the epilog will have
+two stack pointer adjustments: the first to reach the register save area on the
+OSR frame, the second to reach the saved RBP and return address on the original
+frame.
+
+For example:
+```asm
+ 4883C440 add rsp, 64
+ 5B pop rbx
+ 5E pop rsi
+ 5F pop rdi
+ 4883C448 add rsp, 72
+ 5D pop rbp
+ C3 ret
+```
+with unwind info:
+```
+ UnwindCodes:
+ CodeOffset: 0x07 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 7 * 8 + 8 = 64 = 0x40
+ CodeOffset: 0x03 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rbx (3)
+ CodeOffset: 0x02 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rsi (6)
+ CodeOffset: 0x01 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rdi (7)
+ CodeOffset: 0x00 UnwindOp: UWOP_ALLOC_SMALL (2) OpInfo: 8 * 8 + 8 = 72 = 0x48
+ CodeOffset: 0x00 UnwindOp: UWOP_PUSH_NONVOL (0) OpInfo: rbp (5)
+```
+
+If the OSR method needs to save RBP, we may see two RBP restores in the epilog;
+this does not appear to cause problems during execution, as the "last one wins"
+when unwinding.
+
+However, the debugger (at least windbg) may end up being confused; any tool
+simply following the RBP chain will see the original frame is still "linked"
+into the active stack.
+
+### 5.3 PatchpointInfo
+
+As noted above, when the jit is invoked to create the OSR method, it asks the
+runtime for some extra data:
+* The IL offset of the OSR entry point
+* `PatchpointInfo`: a description of the original method frame
+
+`PatchpointInfo` is produced by the jit when jitting the Tier0 method. It is
+allocated by the runtime similarly to other codegen metadata like GC info and
+unwind info and is likewise associated with the original method. When the
+runtime helper decides to kick off an OSR jit, it sets things up so that the jit
+can retrieve this data.
+
+Since the `PatchpointInfo` is produced and consumed by the jit its format is
+largely opaque to the runtime. It has the following general layout:
+```C++
+struct PatchpointInfo
+{
+ unsigned m_patchpointInfoSize;
+ unsigned m_ilSize;
+ unsigned m_numberOfLocals;
+ int m_fpToSpDelta;
+ int m_genericContextArgOffset;
+ int m_keptAliveThisOffset;
+ int m_securityCookieOffset;
+ int m_offsetAndExposureData[];
+};
+```
+The key values are the `fpToSpDelta` which describes the extent of the original
+frame, and the `offsetAndExposureData` which describe the offset of each local
+on the original frame.
+
+### 5.4 Performance Impact
+
+The prototype is mainly intended to show that OSR can be used to improve startup
+without compromising steady-state performance: with OSR, we can safely use the
+quick jit for almost all methods.
+
+We are currently evaluating the performance impact of OSR on some realistic
+scenarios.
+
+Initial data shows a general improvement in startup time, in particular for
+applications where startup was impacted by disabling quick-jitting of methods
+with loops (see dotnet/coreclr#24252).
+
+### 5.5 Prototype Limitations and Workarounds
+
+* x64 only
+* Struct promotion is currently disabled for OSR methods
+* No OSR for synchronous methods
+* No OSR for methods with profiler hooks
+* No OSR for methods with localloc
+* No OSR from "handler" regions (catch/finally/filter)
+
+The prototype trigger strategy is a hybrid: it has a per-frame local counter and
+a per-patchpoint global counter (kept by the runtime). This is probably
+something we need to re-assess.
+
+## 6 Edit and Continue
+
+As mentioned in the introduction, OSR is similar to Edit and Continue (EnC). EnC
+transitions from an original unoptimized version of a method to a new
+unoptimized version with slightly different IL. The CLR already supports EnC on
+some platforms, and we briefly review the current implementation here. Our main
+interest here is in edits to method IL for an active method, so we focus on that
+aspect.
+
+### 6.1 Current EnC Support
+
+Method modification in EnC works roughly as follows. A process being debugged is
+stopped at a breakpoint. The user makes a source edit and hits apply. The source
+edit is vetted by the language compiler as suitable for EnC and metadata edits
+are sent to the runtime via the debugger. For method modifications these edits
+create a new version of method IL. Any subsequent invocations of that method
+will use that new IL.
+
+To update currently active versions, the debugger also adds special breakpoints
+to the plausible patchpoints of the original method's native code. Execution
+then resumes. When one of those special breakpoints is hit, an original
+method's active frame is at the top of the stack, and the frame can be
+transitioned over to the new version. The remapping is done by using the debug
+information generated by the jit for both the old and new versions of the
+method. As part of this the runtime verifies that locals remain at the same
+addresses in the old and new stack frames (thus avoiding the complication noted
+earlier in [Section 4.1](#Addresses-of-Locals)).
+
+The jit is notified if a method is going to potentially be eligible for EnC and
+takes some precautions to ensure the EnC transition can be handled by the
+runtime: for instance, the jit will always save the same set of registers and
+use a frame pointer.
+
+So, for EnC, we see that:
+
+- The viable patchpoints are determined by the debugger (via
+ EnCSequencePointHelper). These are restricted to be stack empty points (since
+ debug info will not describe contents of the evaluation stack) that are not in
+ filter or handlers. They are a broader set than we envision needing for OSR.
+- The necessary mapping information (stack frame layout, native to IL mapping,
+ and native offsets of stack empty points) is present in the debug stream
+ generated by the jit for the original method.
+- The trigger is a set of special breakpoints placed into the original method
+ native code by the debugger when an edit is applied to the method.
+- When an EnC breakpoint is hit, the debugger can choose whether or not to
+ initiate a transition.
+- If the debugger initiates a transition, it is done synchronously: the new
+ version of the method is jitted if necessary and the currently active frame is
+ transitioned over to the new version, via ResumeInUpdatedFunction. Of interest
+ here are the lower-level methods used here to update the frame:
+ FixContextAndResume and FixContextForEnC.
+- The alternative version is a full method and can be used to transition from
+ any patchpoint in the original method.
+- The jit modifies its codegen somewhat to facilitate the transition. It does
+ not, however, explicitly model the alternate entry points.
+
+## 7 Deoptimization
+
+Up until this point we have been assuming the original method was not optimized
+or was optimized in a manner that did not alter its reported live state.
+
+More general optimizations break this property and so additional bookkeeping and
+some restrictions on optimizations may be necessary to allow OSR transitions
+from optimized code. We touch on this briefly below.
+
+Optimizations can either increase or decrease live state.
+
+For instance, unused computations can be removed, and unused local updates
+("dead stores") can be skipped. Registers holding no longer live locals can be
+reused for other values (as can stack slots, though the current jit does not do
+this).
+
+Other optimizations can increase the live state. The classic example is inlining
+— a call to a method is expanded inline, and so at patchpoints within the
+inline body, there are now arguments and locals to the original method, plus
+arguments and locals to the inline method. If we wish to make an OSR transition
+from such a patchpoint to say unoptimized code, we need to effectively undo the
+inlining, creating two frames (or more generally N frames) in place of the
+original frame, and two alternate methods (or N alternate methods).
+
+The general solution is to first ensure that the live state never decreases. The
+patchpoint locations are determined early, and any values truly live at a
+patchpoint at that initial stage of compilation are forced to remain live at
+that patchpoint always. So, some dead store elimination is inhibited, and some
+forms of code motion are inhibited (e.g. one cannot sink a store to a local out
+of a loop, as the patchpoint at loop top would not observe the updated value).
+
+With all the "naive" state guaranteed live at a patchpoint, and any additions to
+live state via inlining carefully tracked, one can transition from optimized
+code via OSR.
+
+Given the need to preserve address artifacts, this transition must be done
+gradually—first creating a frame for the innermost inlined method that
+extends the original frame, then, when this innermost method returns, creating a
+frame for the next innermost inlined method, and so on, until finally the
+root method frame returns and can clean up the optimized method frame as well.
+
+Each of these (presumably, unoptimized) deopt target methods will need to be
+custom-crafted to access the optimized method frame.
+
+This same consideration makes it challenging to implement deopt fallbacks to
+an interpreter; the interpreter will likewise need to keep some of its state
+in the original method frame.
+
+We currently don't have any need to transfer control out of jitted optimized
+code (Tier1), though one could potentially imagine supporting this to better
+debug optimized code. The really strong motivations for deoptimization may come
+about when the system is optimizing based on "currently true" information that
+has now become invalid.
+
+## 8 References
+
+1. U. Holzle, C. Chambers and D. Ungar, "Debugging Optimized
+ Code with Dynamic Deoptimization," in _ACM PLDI_, 1992.
+2. M. Paleczny, C. Vick and C. Click, "The Java Hotspot(tm)
+ Server Compiler," in _USENIX Java Virtual Machine Research and
+ Technology Symposium_, 2001.
+3. S. Fink and F. Qian, "Design, Implementation and
+ Evaluation of Adaptive Recompilation with On-Stack Replacement," in _In
+ International Symposium on Code Generation and Optimization (CGO)_, 2003.
diff --git a/docs/design/features/host-download-urls.md b/docs/design/features/host-download-urls.md
index c2253ed57fffed..eefbccb8738bcf 100644
--- a/docs/design/features/host-download-urls.md
+++ b/docs/design/features/host-download-urls.md
@@ -21,8 +21,8 @@ It's also part of the error when an SDK command is executed and there's no SDK i
```
## Install prerequisites link
-> https://go.microsoft.com/fwlink/?linkid=798306 for Windows
-> https://go.microsoft.com/fwlink/?linkid=2063366 for OSX
+> https://go.microsoft.com/fwlink/?linkid=798306 for Windows
+> https://go.microsoft.com/fwlink/?linkid=2063366 for OSX
> https://go.microsoft.com/fwlink/?linkid=2063370 for Linux
This URL is part of the error message when the host fails to load `hostfxr`.
@@ -45,7 +45,7 @@ This will happen when the host can't find `hostfxr`, typically when there's no .
In this case the URL will contain these parameters:
* `missing_runtime=true` - this marks the case of missing runtime.
-* `apphost_version=` - the version of the `apphost` which was used to create the executable for the app. For example `apphost_version=5.0.0-preview.2.20155.1`. Currently this is only included if the host is a GUI app - see dotnet/runtime#33569 to include it always.
+* `apphost_version=` - the version of the `apphost` which was used to create the executable for the app. For example `apphost_version=5.0.0-preview.2.20155.1`. This is included in all cases in 5.0 and above and it may be included in 3.1 only for GUI apps.
In this case the `apphost_version` parameter could be used by the website to determine the major version of the runtime to offer (using latest minor/patch). Also if there's a `gui=true` we should offer the `WindowsDesktop` runtime installer.
diff --git a/docs/workflow/building/mono/README.md b/docs/workflow/building/mono/README.md
index c751144bd28ec5..36db7c252e34f6 100644
--- a/docs/workflow/building/mono/README.md
+++ b/docs/workflow/building/mono/README.md
@@ -19,7 +19,7 @@ Once you've built the whole runtime and assuming you want to work with just mono
```
or on Windows,
```bat
-build.cmd --subsetCategory mono
+build.cmd -subsetCategory mono
```
When the build completes, product binaries will be dropped in the `artifacts\bin\mono\..` folder.
@@ -45,7 +45,7 @@ To generate nuget packages:
```
or on Windows,
```bat
-build.cmd --subsetCategory mono -pack (with optional release configuration)
+build.cmd -subsetCategory mono -pack (with optional release configuration)
```
The following packages will be created under `artifacts\packages\\Shipping`:
diff --git a/eng/Tools.props b/eng/Tools.props
index 4c399288ad250f..32fc0f6f6d4fcf 100644
--- a/eng/Tools.props
+++ b/eng/Tools.props
@@ -2,6 +2,9 @@
false
true
+
+ <_RepoToolManifest Condition="'$(ContinuousIntegrationBuild)' == 'true'" />
- 5.0.0-beta.20162.4
- 5.0.0-beta.20162.3
- 5.0.0-beta.20162.3
- 5.0.0-beta.20162.3
- 5.0.0-beta.20162.3
- 5.0.0-beta.20162.3
- 2.5.1-beta.20162.3
- 5.0.0-beta.20162.3
- 5.0.0-beta.20162.3
- 5.0.0-beta.20162.3
+ 5.0.0-beta.20168.2
+ 5.0.0-beta.20168.2
+ 5.0.0-beta.20168.2
+ 5.0.0-beta.20168.2
+ 5.0.0-beta.20168.2
+ 5.0.0-beta.20168.2
+ 2.5.1-beta.20168.2
+ 5.0.0-beta.20168.2
+ 5.0.0-beta.20168.2
+ 5.0.0-beta.20168.2
5.0.0-alpha.1.20080.9
5.0.0-alpha.1.20080.9
@@ -114,7 +114,7 @@
4.8.0
- 16.6.0-preview-20200316-01
+ 16.6.0-preview-20200318-01
2.4.1
2.0.5
12.0.3
@@ -123,7 +123,7 @@
3.1.0-preview-20200129.1
- 0.1.6-prerelease.20166.1
+ 0.1.6-prerelease.20169.1
6.0.1-alpha.1.20166.1
6.0.1-alpha.1.20166.1
diff --git a/eng/codeOptimization.targets b/eng/codeOptimization.targets
index cbd2474984592a..24e9c761407cfb 100644
--- a/eng/codeOptimization.targets
+++ b/eng/codeOptimization.targets
@@ -9,7 +9,7 @@
IBCMerge optimizations on Mac for now to unblock the offical build.
See issue https://github.com/dotnet/runtime/issues/33303
-->
- false
+ false
$__ToolchainDir/sysroot/androi
echo "Now to build coreclr, libraries and installers; run:"
echo ROOTFS_DIR=\$\(realpath $__ToolchainDir/sysroot\) ./build.sh --cross --arch $__BuildArch \
- --subsetCategory coreclr \
- --subsetCategory libraries \
+ --subsetCategory coreclr
+echo ROOTFS_DIR=\$\(realpath $__ToolchainDir/sysroot\) ./build.sh --cross --arch $__BuildArch \
+ --subsetCategory libraries
+echo ROOTFS_DIR=\$\(realpath $__ToolchainDir/sysroot\) ./build.sh --cross --arch $__BuildArch \
--subsetCategory installer
diff --git a/eng/common/templates/job/publish-build-assets.yml b/eng/common/templates/job/publish-build-assets.yml
index b722975f9c288f..055304ad89bf65 100644
--- a/eng/common/templates/job/publish-build-assets.yml
+++ b/eng/common/templates/job/publish-build-assets.yml
@@ -37,6 +37,12 @@ jobs:
- name: _BuildConfig
value: ${{ parameters.configuration }}
- group: Publish-Build-Assets
+ # Skip component governance and codesign validation for SDL. These jobs
+ # create no content.
+ - name: skipComponentGovernanceDetection
+ value: true
+ - name: runCodesignValidationInjection
+ value: false
steps:
- ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
diff --git a/eng/common/templates/post-build/common-variables.yml b/eng/common/templates/post-build/common-variables.yml
index 9505cf170f0fec..61488fd7679afd 100644
--- a/eng/common/templates/post-build/common-variables.yml
+++ b/eng/common/templates/post-build/common-variables.yml
@@ -90,3 +90,10 @@ variables:
value: https://dotnetclimsrc.blob.core.windows.net/dotnet/index.json
- name: InternalInstallersBlobFeedKey
value: $(dotnetclimsrc-access-key)
+
+ # Skip component governance and codesign validation for SDL. These jobs
+ # create no content.
+ - name: skipComponentGovernanceDetection
+ value: true
+ - name: runCodesignValidationInjection
+ value: false
\ No newline at end of file
diff --git a/eng/install-native-dependencies.sh b/eng/install-native-dependencies.sh
index a73d4918c521bb..e837b017c7cba1 100644
--- a/eng/install-native-dependencies.sh
+++ b/eng/install-native-dependencies.sh
@@ -23,8 +23,18 @@ elif [ "$1" = "OSX" ]; then
if [ "$?" != "0" ]; then
exit 1;
fi
+elif [ "$1" = "iOS" ]; then
+ brew update
+ brew upgrade
+ if [ "$?" != "0" ]; then
+ exit 1;
+ fi
+ brew install openssl autoconf automake libtool pkg-config python3
+ if [ "$?" != "0" ]; then
+ exit 1;
+ fi
else
- echo "Must pass \"Linux\" or \"OSX\" as first argument."
+ echo "Must pass \"Linux\", \"iOS\" or \"OSX\" as first argument."
exit 1
fi
diff --git a/eng/native/configureplatform.cmake b/eng/native/configureplatform.cmake
index 174cce725d557e..4fccb81a0c714a 100644
--- a/eng/native/configureplatform.cmake
+++ b/eng/native/configureplatform.cmake
@@ -1,15 +1,19 @@
include(CheckPIESupported)
include(${CMAKE_CURRENT_LIST_DIR}/functions.cmake)
+# If set, indicates that this is not an officially supported release
+# Keep in sync with IsPrerelease in Directory.Build.props
+set(PRERELEASE 1)
+
#----------------------------------------
# Detect and set platform variable names
# - for non-windows build platform & architecture is detected using inbuilt CMAKE variables and cross target component configure
# - for windows we use the passed in parameter to CMAKE to determine build arch
#----------------------------------------
set(CLR_CMAKE_HOST_OS ${CMAKE_SYSTEM_NAME})
-if(CLR_CMAKE_HOST_OS STREQUAL Linux)
+if(CLR_CMAKE_HOST_OS STREQUAL Linux OR CLR_CMAKE_HOST_OS STREQUAL Android)
set(CLR_CMAKE_HOST_UNIX 1)
- if(CLR_CROSS_COMPONENTS_BUILD)
+ if(CLR_CROSS_COMPONENTS_BUILD AND NOT CLR_CMAKE_HOST_OS STREQUAL Android)
# CMAKE_HOST_SYSTEM_PROCESSOR returns the value of `uname -p` on host.
if(CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL amd64)
if(CLR_CMAKE_TARGET_ARCH STREQUAL "arm" OR CLR_CMAKE_TARGET_ARCH STREQUAL "armel")
@@ -36,7 +40,7 @@ if(CLR_CMAKE_HOST_OS STREQUAL Linux)
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL armv7l)
set(CLR_CMAKE_HOST_UNIX_ARM 1)
set(CLR_CMAKE_HOST_UNIX_ARMV7L 1)
- elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL arm)
+ elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL arm OR CMAKE_SYSTEM_PROCESSOR STREQUAL armv7-a)
set(CLR_CMAKE_HOST_UNIX_ARM 1)
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64)
set(CLR_CMAKE_HOST_UNIX_ARM64 1)
@@ -54,10 +58,12 @@ if(CLR_CMAKE_HOST_OS STREQUAL Linux)
set(LINUX_ID_FILE "${CMAKE_SYSROOT}${LINUX_ID_FILE}")
endif()
- execute_process(
- COMMAND bash -c "source ${LINUX_ID_FILE} && echo \$ID"
- OUTPUT_VARIABLE CLR_CMAKE_LINUX_ID
- OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(EXISTS ${LINUX_ID_FILE})
+ execute_process(
+ COMMAND bash -c "source ${LINUX_ID_FILE} && echo \$ID"
+ OUTPUT_VARIABLE CLR_CMAKE_LINUX_ID
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ endif()
if(DEFINED CLR_CMAKE_LINUX_ID)
if(CLR_CMAKE_LINUX_ID STREQUAL tizen)
@@ -66,12 +72,14 @@ if(CLR_CMAKE_HOST_OS STREQUAL Linux)
elseif(CLR_CMAKE_LINUX_ID STREQUAL alpine)
set(CLR_CMAKE_HOST_ALPINE_LINUX 1)
set(CLR_CMAKE_HOST_OS ${CLR_CMAKE_LINUX_ID})
- elseif(CLR_CMAKE_LINUX_ID STREQUAL android)
- set(CLR_CMAKE_HOST_ANDROID 1)
- set(CLR_CMAKE_HOST_OS ${CLR_CMAKE_LINUX_ID})
endif()
endif(DEFINED CLR_CMAKE_LINUX_ID)
-endif(CLR_CMAKE_HOST_OS STREQUAL Linux)
+
+ if(CLR_CMAKE_HOST_OS STREQUAL Android)
+ set(CLR_CMAKE_HOST_ANDROID 1)
+ set(CLR_CMAKE_HOST_OS ${CLR_CMAKE_HOST_OS})
+ endif()
+endif(CLR_CMAKE_HOST_OS STREQUAL Linux OR CLR_CMAKE_HOST_OS STREQUAL Android)
if(CLR_CMAKE_HOST_OS STREQUAL Darwin)
set(CLR_CMAKE_HOST_UNIX 1)
@@ -233,11 +241,11 @@ if(CLR_CMAKE_TARGET_OS STREQUAL alpine)
set(CLR_CMAKE_TARGET_ALPINE_LINUX 1)
endif(CLR_CMAKE_TARGET_OS STREQUAL alpine)
-if(CLR_CMAKE_TARGET_OS STREQUAL android)
+if(CLR_CMAKE_TARGET_OS STREQUAL Android)
set(CLR_CMAKE_TARGET_UNIX 1)
set(CLR_CMAKE_TARGET_LINUX 1)
set(CLR_CMAKE_TARGET_ANDROID 1)
-endif(CLR_CMAKE_TARGET_OS STREQUAL android)
+endif(CLR_CMAKE_TARGET_OS STREQUAL Android)
if(CLR_CMAKE_TARGET_OS STREQUAL Darwin)
set(CLR_CMAKE_TARGET_UNIX 1)
diff --git a/eng/native/configuretools.cmake b/eng/native/configuretools.cmake
index b59504a10e7616..b91237da89bfb7 100644
--- a/eng/native/configuretools.cmake
+++ b/eng/native/configuretools.cmake
@@ -1,6 +1,4 @@
-if(NOT CLR_CMAKE_CONFIGURE_PLATFORM_INCLUDED)
- message(FATAL_ERROR "configuretools.cmake needs to be included after configureplatform.cmake")
-endif()
+include(${CMAKE_CURRENT_LIST_DIR}/configureplatform.cmake)
# Get the version of the compiler that is in the file name for tool location.
set (CLR_CMAKE_COMPILER_FILE_NAME_VERSION "")
@@ -52,7 +50,9 @@ if(NOT WIN32)
if(NOT CLR_CMAKE_TARGET_DARWIN AND NOT CLR_CMAKE_TARGET_IOS)
locate_toolchain_exec(objdump CMAKE_OBJDUMP)
- if(CMAKE_CROSSCOMPILING AND NOT DEFINED CLR_CROSS_COMPONENTS_BUILD AND (CMAKE_SYSTEM_PROCESSOR STREQUAL armv7l OR
+ if(CMAKE_SYSTEM_NAME STREQUAL Android)
+ set(TOOSET_PREFIX ${ANDROID_TOOLCHAIN_PREFIX})
+ elseif(CMAKE_CROSSCOMPILING AND NOT DEFINED CLR_CROSS_COMPONENTS_BUILD AND (CMAKE_SYSTEM_PROCESSOR STREQUAL armv7l OR
CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL arm))
set(TOOLSET_PREFIX "${TOOLCHAIN}-")
else()
diff --git a/eng/native/functions.cmake b/eng/native/functions.cmake
index aa2c9411ab68d7..cc806fed263433 100644
--- a/eng/native/functions.cmake
+++ b/eng/native/functions.cmake
@@ -1,10 +1,10 @@
function(clr_unknown_arch)
if (WIN32)
- message(FATAL_ERROR "Only AMD64, ARM64, ARM and I386 are supported")
+ message(FATAL_ERROR "Only AMD64, ARM64, ARM and I386 are supported. Found: ${CMAKE_SYSTEM_PROCESSOR}")
elseif(CLR_CROSS_COMPONENTS_BUILD)
- message(FATAL_ERROR "Only AMD64, I386 host are supported for linux cross-architecture component")
+ message(FATAL_ERROR "Only AMD64, I386 host are supported for linux cross-architecture component. Found: ${CMAKE_SYSTEM_PROCESSOR}")
else()
- message(FATAL_ERROR "Only AMD64, ARM64 and ARM are supported")
+ message(FATAL_ERROR "Only AMD64, ARM64 and ARM are supported. Found: ${CMAKE_SYSTEM_PROCESSOR}")
endif()
endfunction()
diff --git a/eng/pipelines/common/platform-matrix.yml b/eng/pipelines/common/platform-matrix.yml
index 4858cc154478c6..38a01a7c5f5f36 100644
--- a/eng/pipelines/common/platform-matrix.yml
+++ b/eng/pipelines/common/platform-matrix.yml
@@ -198,6 +198,66 @@ jobs:
# asArray: []
# ${{ insert }}: ${{ parameters.jobParameters }}
+# iOS x64
+
+- ${{ if containsValue(parameters.platforms, 'iOS_x64') }}:
+ - template: xplat-setup.yml
+ parameters:
+ jobTemplate: ${{ parameters.jobTemplate }}
+ helixQueuesTemplate: ${{ parameters.helixQueuesTemplate }}
+ osGroup: iOS
+ archType: x64
+ platform: iOS_x64
+ jobParameters:
+ runtimeFlavor: mono
+ stagedBuild: ${{ parameters.stagedBuild }}
+ buildConfig: ${{ parameters.buildConfig }}
+ ${{ if eq(parameters.passPlatforms, true) }}:
+ platforms: ${{ parameters.platforms }}
+ helixQueueGroup: ${{ parameters.helixQueueGroup }}
+ managedTestBuildOsGroup: OSX
+ ${{ insert }}: ${{ parameters.jobParameters }}
+
+# iOS arm
+
+- ${{ if containsValue(parameters.platforms, 'iOS_arm') }}:
+ - template: xplat-setup.yml
+ parameters:
+ jobTemplate: ${{ parameters.jobTemplate }}
+ helixQueuesTemplate: ${{ parameters.helixQueuesTemplate }}
+ osGroup: iOS
+ archType: arm
+ platform: iOS_x64
+ jobParameters:
+ runtimeFlavor: mono
+ stagedBuild: ${{ parameters.stagedBuild }}
+ buildConfig: ${{ parameters.buildConfig }}
+ ${{ if eq(parameters.passPlatforms, true) }}:
+ platforms: ${{ parameters.platforms }}
+ helixQueueGroup: ${{ parameters.helixQueueGroup }}
+ managedTestBuildOsGroup: OSX
+ ${{ insert }}: ${{ parameters.jobParameters }}
+
+# iOS arm64
+
+- ${{ if containsValue(parameters.platforms, 'iOS_arm64') }}:
+ - template: xplat-setup.yml
+ parameters:
+ jobTemplate: ${{ parameters.jobTemplate }}
+ helixQueuesTemplate: ${{ parameters.helixQueuesTemplate }}
+ osGroup: iOS
+ archType: arm64
+ platform: iOS_x64
+ jobParameters:
+ runtimeFlavor: mono
+ stagedBuild: ${{ parameters.stagedBuild }}
+ buildConfig: ${{ parameters.buildConfig }}
+ ${{ if eq(parameters.passPlatforms, true) }}:
+ platforms: ${{ parameters.platforms }}
+ helixQueueGroup: ${{ parameters.helixQueueGroup }}
+ managedTestBuildOsGroup: OSX
+ ${{ insert }}: ${{ parameters.jobParameters }}
+
# macOS x64
- ${{ if or(containsValue(parameters.platforms, 'OSX_x64'), eq(parameters.platformGroup, 'all')) }}:
diff --git a/eng/pipelines/common/xplat-setup.yml b/eng/pipelines/common/xplat-setup.yml
index 817443e041e4cf..d644907ae3d63d 100644
--- a/eng/pipelines/common/xplat-setup.yml
+++ b/eng/pipelines/common/xplat-setup.yml
@@ -87,6 +87,10 @@ jobs:
${{ if eq(parameters.osGroup, 'OSX') }}:
vmImage: 'macOS-10.14'
+ # Public OSX Build Pool
+ ${{ if eq(parameters.osGroup, 'iOS') }}:
+ vmImage: 'macOS-10.14'
+
# Official Build Windows Pool
${{ if and(eq(parameters.osGroup, 'Windows_NT'), ne(variables['System.TeamProject'], 'public')) }}:
name: NetCoreInternal-Pool
diff --git a/eng/pipelines/coreclr/jit-experimental.yml b/eng/pipelines/coreclr/jit-experimental.yml
new file mode 100644
index 00000000000000..e49b0ce4ca57ac
--- /dev/null
+++ b/eng/pipelines/coreclr/jit-experimental.yml
@@ -0,0 +1,41 @@
+trigger: none
+
+pr: none
+
+schedules:
+- cron: "0 22 * * 0,6"
+ displayName: Sun at 2:00 PM (UTC-8:00)
+ branches:
+ include:
+ - master
+ always: true
+
+jobs:
+#
+# Checkout repository
+#
+- template: /eng/pipelines/common/checkout-job.yml
+
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml
+ buildConfig: checked
+ platforms:
+ - Linux_x64
+ - Windows_NT_x64
+ jobParameters:
+ testGroup: jit-experimental
+
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/coreclr/templates/test-job.yml
+ buildConfig: checked
+ platforms:
+ - Linux_x64
+ - Windows_NT_x64
+ helixQueueGroup: ci
+ helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
+ managedOsxBuild: false
+ jobParameters:
+ testGroup: jit-experimental
+ liveLibrariesBuildConfig: Release
diff --git a/eng/pipelines/coreclr/templates/build-job.yml b/eng/pipelines/coreclr/templates/build-job.yml
index 83308bbcd06d9f..3274634f5a39d6 100644
--- a/eng/pipelines/coreclr/templates/build-job.yml
+++ b/eng/pipelines/coreclr/templates/build-job.yml
@@ -1,20 +1,20 @@
parameters:
- buildConfig: ''
archType: ''
+ buildConfig: ''
+ compilerName: 'clang'
+ condition: true
+ container: ''
+ crossrootfsDir: ''
+ isOfficialBuild: false
osGroup: ''
osSubgroup: ''
platform: ''
- container: ''
- testGroup: ''
- crossrootfsDir: ''
- timeoutInMinutes: ''
+ pool: ''
signBinaries: false
stagedBuild: false
+ testGroup: ''
+ timeoutInMinutes: ''
variables: {}
- pool: ''
- isOfficialBuild: false
- condition: true
- useGCC: false
### Product build
jobs:
@@ -33,10 +33,10 @@ jobs:
condition: ${{ parameters.condition }}
# Compute job name from template parameters
- ${{ if eq(parameters.useGCC, true) }}:
- name: ${{ format('coreclr_gcc_product_build_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
+ ${{ if eq(parameters.compilerName, 'gcc') }}:
+ name: ${{ format('coreclr_{0}_product_build_{1}{1}_{3}_{4}', parameters.compilerName, parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
displayName: ${{ format('CoreCLR GCC Product Build {0}{1} {2} {3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
- ${{ if eq(parameters.useGCC, false) }}:
+ ${{ if eq(parameters.compilerName, 'clang') }}:
name: ${{ format('coreclr_product_build_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
displayName: ${{ format('CoreCLR Product Build {0}{1} {2} {3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
@@ -60,17 +60,13 @@ jobs:
- ${{ if eq(parameters.buildConfig, 'Release') }}:
- name: stripSymbolsArg
value: '-stripsymbols'
- - name: clangArg
- value: ''
- - name: gccArg
+ - name: compilerArg
value: ''
- name: publishLogsArtifactPrefix
value: 'BuildLogs_CoreCLR'
- - ${{ if eq(parameters.useGCC, true) }}:
- - name: gccArg
+ - ${{ if eq(parameters.compilerName, 'gcc') }}:
+ - name: compilerArg
value: '-gcc'
- - name: clangArg
- value: ''
- name: publishLogsArtifactPrefix
value: 'BuildLogs_CoreCLR_GCC'
# workaround for gcc directory not in PATH
@@ -78,20 +74,20 @@ jobs:
value: /opt/rh/devtoolset-7/root/usr/bin/gcc
- name: CLR_CXX
value: /opt/rh/devtoolset-7/root/usr/bin/g++
- - ${{ if and(ne(parameters.osGroup, 'Windows_NT'), not(parameters.useGCC)) }}:
- - name: clangArg
+ - ${{ if and(ne(parameters.osGroup, 'Windows_NT'), eq(parameters.compilerName, 'clang')) }}:
+ - name: compilerArg
value: '-clang9'
# Our FreeBSD doesn't yet detect available clang versions, so pass it explicitly.
- ${{ if eq(parameters.osGroup, 'FreeBSD') }}:
- - name: clangArg
+ - name: compilerArg
value: '-clang6.0'
# Building for x64 MUSL happens on Alpine Linux and we need to use the stable version available there
- ${{ if and(eq(parameters.osGroup, 'Linux'), eq(parameters.osSubgroup, '_musl'), eq(parameters.archType, 'x64')) }}:
- - name: clangArg
+ - name: compilerArg
value: ''
# AppleClang has different version scheme, so we let complier introspection pick up the available clang from PATH
- ${{ if eq(parameters.osGroup, 'OSX') }}:
- - name: clangArg
+ - name: compilerArg
value: ''
- ${{ if and(ne(variables['System.TeamProject'], 'public'), ne(variables['Build.Reason'], 'PullRequest')) }}:
# Variables used to publish packages to blob feed
@@ -141,7 +137,7 @@ jobs:
# Build CoreCLR Runtime
- ${{ if ne(parameters.osGroup, 'Windows_NT') }}:
- - script: $(coreClrRepoRootDir)build-runtime$(scriptExt) $(buildConfig) $(archType) $(crossArg) -ci $(clangArg) $(gccArg) $(stripSymbolsArg) $(officialBuildIdArg)
+ - script: $(coreClrRepoRootDir)build-runtime$(scriptExt) $(buildConfig) $(archType) $(crossArg) -ci $(compilerArg) $(stripSymbolsArg) $(officialBuildIdArg)
displayName: Build CoreCLR Runtime
- ${{ if eq(parameters.osGroup, 'Windows_NT') }}:
- script: set __TestIntermediateDir=int&&$(coreClrRepoRootDir)build-runtime$(scriptExt) $(buildConfig) $(archType) -ci $(enforcePgoArg) $(officialBuildIdArg)
@@ -156,7 +152,7 @@ jobs:
displayName: Build managed product components and packages
# Build native test components
- - script: $(coreClrRepoRootDir)build-test$(scriptExt) skipmanaged $(buildConfig) $(archType) $(crossArg) $(priorityArg) $(clangArg) $(gccArg) skipgeneratelayout
+ - script: $(coreClrRepoRootDir)build-test$(scriptExt) skipmanaged $(buildConfig) $(archType) $(crossArg) $(priorityArg) $(compilerArg) skipgeneratelayout
displayName: Build native test components
# Sign on Windows
@@ -173,7 +169,7 @@ jobs:
condition: always()
# We only test on clang binaries.
- - ${{ if eq(parameters.useGCC, false) }}:
+ - ${{ if eq(parameters.compilerName, 'clang') }}:
# Publish product output directory for consumption by tests.
- template: /eng/pipelines/common/upload-artifact-step.yml
parameters:
diff --git a/eng/pipelines/coreclr/templates/format-job.yml b/eng/pipelines/coreclr/templates/format-job.yml
index 740585c6dfc612..e71b66b1f27d8e 100644
--- a/eng/pipelines/coreclr/templates/format-job.yml
+++ b/eng/pipelines/coreclr/templates/format-job.yml
@@ -26,7 +26,11 @@ jobs:
name: ${{ format('format_{0}{1}_{2}', parameters.osGroup, parameters.osSubgroup, parameters.archType) }}
displayName: ${{ format('Formatting {0}{1} {2}', parameters.osGroup, parameters.osSubgroup, parameters.archType) }}
helixType: 'format'
- pool: ${{ parameters.pool }}
+ ${{ if eq(parameters.osGroup, 'Windows_NT') }}:
+ pool:
+ vmImage: 'windows-2019'
+ ${{ if ne(parameters.osGroup, 'Windows_NT') }}:
+ pool: ${{ parameters.pool }}
variables: ${{ parameters.variables }}
condition: ${{ parameters.condition }}
steps:
@@ -38,6 +42,12 @@ jobs:
version: '3.x'
includePreviewVersions: true
installationPath: $(Agent.ToolsDirectory)/dotnet
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '3.x'
+ addToPath: true
+ architecture: 'x64'
+ condition: ${{ eq(parameters.osGroup, 'Windows_NT') }}
- task: PythonScript@0
displayName: Run tests/scripts/format.py
inputs:
diff --git a/eng/pipelines/coreclr/templates/helix-queues-setup.yml b/eng/pipelines/coreclr/templates/helix-queues-setup.yml
index 12fb8ad042b3c5..e35a69e6f7d5b0 100644
--- a/eng/pipelines/coreclr/templates/helix-queues-setup.yml
+++ b/eng/pipelines/coreclr/templates/helix-queues-setup.yml
@@ -116,9 +116,8 @@ jobs:
# Windows_NT arm
- ${{ if eq(parameters.platform, 'Windows_NT_arm') }}:
- # Currently blocked by https://github.com/dotnet/runtime/issues/32320
- # - ${{ if and(eq(variables['System.TeamProject'], 'public'), in(parameters.jobParameters.helixQueueGroup, 'pr', 'ci', 'libraries')) }}:
- # - Windows.10.Arm64v8.Open
+ - ${{ if and(eq(variables['System.TeamProject'], 'public'), in(parameters.jobParameters.helixQueueGroup, 'pr', 'ci', 'libraries')) }}:
+ - Windows.10.Arm64v8.Open
- ${{ if eq(variables['System.TeamProject'], 'internal') }}:
- Windows.10.Arm64
diff --git a/eng/pipelines/coreclr/templates/run-test-job.yml b/eng/pipelines/coreclr/templates/run-test-job.yml
index ffe1e50a06bcc3..8be5c5eb49eab3 100644
--- a/eng/pipelines/coreclr/templates/run-test-job.yml
+++ b/eng/pipelines/coreclr/templates/run-test-job.yml
@@ -100,7 +100,7 @@ jobs:
# TODO: update these numbers as they were determined long ago
${{ if eq(parameters.testGroup, 'innerloop') }}:
timeoutInMinutes: 150
- ${{ if in(parameters.testGroup, 'outerloop') }}:
+ ${{ if in(parameters.testGroup, 'outerloop', 'jit-experimental') }}:
timeoutInMinutes: 270
${{ if in(parameters.testGroup, 'gc-longrunning', 'gc-simulator') }}:
timeoutInMinutes: 480
@@ -232,7 +232,7 @@ jobs:
${{ if and(ne(parameters.corefxTests, true), eq(parameters.testGroup, 'innerloop')) }}:
timeoutPerTestCollectionInMinutes: 30
timeoutPerTestInMinutes: 10
- ${{ if in(parameters.testGroup, 'outerloop') }}:
+ ${{ if in(parameters.testGroup, 'outerloop', 'jit-experimental') }}:
timeoutPerTestCollectionInMinutes: 120
timeoutPerTestInMinutes: 10
${{ if in(parameters.testGroup, 'gc-longrunning', 'gc-simulator') }}:
@@ -378,6 +378,13 @@ jobs:
scenarios:
- jitelthookenabled
- jitelthookenabled_tiered
+ ${{ if in(parameters.testGroup, 'jit-experimental') }}:
+ scenarios:
+ - jitosr
+ - jitosr_stress
+ - jitguardeddevirtualization
+ - jitehwritethru
+ - jitobjectstackallocation
# Publish Logs
- task: PublishPipelineArtifact@1
diff --git a/eng/pipelines/libraries/base-job.yml b/eng/pipelines/libraries/base-job.yml
index 85ea99f75c30f5..e24d0d3328bbc4 100644
--- a/eng/pipelines/libraries/base-job.yml
+++ b/eng/pipelines/libraries/base-job.yml
@@ -40,7 +40,8 @@ jobs:
- _BuildConfig: ${{ parameters.buildConfig }}
- _msbuildCommonParameters: ''
- - _stripSymbolsArg: ''
+ # rename this variable, due to collision with build-native.proj
+ - _stripSymbolsArgYaml: ''
- _runtimeOSArg: ''
- _finalFrameworkArg: ''
- _buildScript: $(_buildScriptFileName)$(scriptExt)
@@ -70,6 +71,10 @@ jobs:
- ${{ if eq(parameters.osGroup, 'WebAssembly') }}:
- _runtimeOSArg: -os ${{ parameters.osGroup }}
+ # force a value for OS when cross-building iOS on OSX
+ - ${{ if eq(parameters.osGroup, 'iOS') }}:
+ - _runtimeOSArg: -os ${{ parameters.osGroup }}
+
- ${{ if ne(parameters.framework, '') }}:
- _finalFrameworkArg: -framework ${{ parameters.framework }}
- _extraHelixArguments: /p:BuildTargetFramework=${{ parameters.framework }}
@@ -107,9 +112,9 @@ jobs:
- ${{ if ne(parameters.osGroup, 'Windows_NT') }}:
- _buildScript: ./$(_buildScriptFileName)$(scriptExt)
- ${{ if eq(parameters.isOfficialBuild, 'true') }}:
- - _stripSymbolsArg: -stripSymbols
+ - _stripSymbolsArgYaml: -stripSymbols
- - _buildArguments: -configuration ${{ parameters.buildConfig }} -ci -arch ${{ parameters.archType }} $(_finalFrameworkArg) $(_stripSymbolsArg) $(_testScopeArg) $(_warnAsErrorArg) $(_runtimeOSArg) $(_msbuildCommonParameters) $(_runtimeArtifactsPathArg) $(_crossBuildPropertyArg)
+ - _buildArguments: -configuration ${{ parameters.buildConfig }} -ci -arch ${{ parameters.archType }} $(_finalFrameworkArg) $(_stripSymbolsArgYaml) $(_testScopeArg) $(_warnAsErrorArg) $(_runtimeOSArg) $(_msbuildCommonParameters) $(_runtimeArtifactsPathArg) $(_crossBuildPropertyArg)
- ${{ parameters.variables }}
dependsOn:
diff --git a/eng/pipelines/mono/templates/build-job.yml b/eng/pipelines/mono/templates/build-job.yml
index d23bfded5a4e8f..e554e749a1c83c 100644
--- a/eng/pipelines/mono/templates/build-job.yml
+++ b/eng/pipelines/mono/templates/build-job.yml
@@ -67,6 +67,9 @@ jobs:
value: /p:OutputRid=linux-musl-${{ parameters.archType }}
- name: _PortableBuild
value: true
+ - ${{ if eq(parameters.osGroup, 'iOS') }}:
+ - name: osOverride
+ value: -os iOS
- ${{ parameters.variables }}
steps:
@@ -75,7 +78,7 @@ jobs:
# Linux builds use docker images with dependencies preinstalled,
# and FreeBSD builds use a build agent with dependencies
# preinstalled, so we only need this step for OSX and Windows.
- - ${{ if eq(parameters.osGroup, 'OSX') }}:
+ - ${{ if in(parameters.osGroup, 'OSX', 'iOS') }}:
- script: sh $(Build.SourcesDirectory)/eng/install-native-dependencies.sh $(osGroup)
displayName: Install native dependencies
- ${{ if eq(parameters.osGroup, 'Windows_NT') }}:
@@ -85,10 +88,10 @@ jobs:
# Build
- ${{ if ne(parameters.osGroup, 'Windows_NT') }}:
- - script: ./mono$(scriptExt) -configuration $(buildConfig) -arch $(archType) -ci /p:MonoEnableLLVM=${{ parameters.llvm }}
+ - script: ./mono$(scriptExt) -configuration $(buildConfig) -arch $(archType) $(osOverride) -ci /p:MonoEnableLLVM=${{ parameters.llvm }}
displayName: Build product
- ${{ if eq(parameters.osGroup, 'Windows_NT') }}:
- - script: mono$(scriptExt) -configuration $(buildConfig) -arch $(archType) -ci /p:MonoEnableLLVM=${{ parameters.llvm }}
+ - script: mono$(scriptExt) -configuration $(buildConfig) -arch $(archType) $(osOverride) -ci /p:MonoEnableLLVM=${{ parameters.llvm }}
displayName: Build product
# Publish product output directory for consumption by tests.
@@ -104,10 +107,10 @@ jobs:
# Build packages
- ${{ if and(ne(parameters.llvm, true), ne(parameters.osGroup, 'Windows_NT')) }}:
- - script: ./mono$(scriptExt) -configuration $(buildConfig) -arch $(archType) -ci $(officialBuildIdArg) /p:MonoEnableLLVM=${{ parameters.llvm }} -pack $(OutputRidArg)
+ - script: ./mono$(scriptExt) -configuration $(buildConfig) -arch $(archType) $(osOverride) -ci $(officialBuildIdArg) /p:MonoEnableLLVM=${{ parameters.llvm }} -pack $(OutputRidArg)
displayName: Build nupkg
- ${{ if and(ne(parameters.llvm, true), eq(parameters.osGroup, 'Windows_NT')) }}:
- - script: mono$(scriptExt) -configuration $(buildConfig) -arch $(archType) -ci $(officialBuildIdArg) /p:MonoEnableLLVM=${{ parameters.llvm }} -pack $(OutputRidArg)
+ - script: mono$(scriptExt) -configuration $(buildConfig) -arch $(archType) $(osOverride) -ci $(officialBuildIdArg) /p:MonoEnableLLVM=${{ parameters.llvm }} -pack $(OutputRidArg)
displayName: Build nupkg
# Save packages using the prepare-signed-artifacts format.
diff --git a/eng/pipelines/mono/templates/xplat-job.yml b/eng/pipelines/mono/templates/xplat-job.yml
index 8d7465d7f88453..2cd61dc7e7f3e4 100644
--- a/eng/pipelines/mono/templates/xplat-job.yml
+++ b/eng/pipelines/mono/templates/xplat-job.yml
@@ -56,7 +56,7 @@ jobs:
agentOs: Ubuntu
${{ if eq(parameters.osGroup, 'FreeBSD') }}:
agentOs: FreeBSD
- ${{ if eq(parameters.osGroup, 'OSX') }}:
+ ${{ if in(parameters.osGroup, 'OSX', 'iOS') }}:
agentOs: MacOS
${{ if eq(parameters.osGroup, 'Windows_NT') }}:
agentOs: Windows_NT
diff --git a/eng/pipelines/runtime-official.yml b/eng/pipelines/runtime-official.yml
index 7619c2e533e1b8..a49a4b1bbda73e 100644
--- a/eng/pipelines/runtime-official.yml
+++ b/eng/pipelines/runtime-official.yml
@@ -69,6 +69,9 @@ stages:
runtimeFlavor: mono
buildConfig: release
platforms:
+ - iOS_x64
+ - iOS_arm
+ - iOS_arm64
- OSX_x64
- Linux_x64
- Linux_arm
@@ -104,6 +107,22 @@ stages:
isOfficialBuild: ${{ variables.isOfficialBuild }}
liveRuntimeBuildConfig: release
+ #
+ # Build libraries using live CoreLib from Mono
+ #
+ - template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/libraries/build-job.yml
+ buildConfig: Release
+ runtimeFlavor: mono
+ platforms:
+ - iOS_x64
+ - iOS_arm
+ - iOS_arm64
+ jobParameters:
+ isOfficialBuild: ${{ variables.isOfficialBuild }}
+ liveRuntimeBuildConfig: release
+
#
# Build libraries AllConfigurations for packages
#
diff --git a/eng/pipelines/runtime.yml b/eng/pipelines/runtime.yml
index 4965c687d5ba2a..8f909825a4311b 100644
--- a/eng/pipelines/runtime.yml
+++ b/eng/pipelines/runtime.yml
@@ -154,7 +154,7 @@ jobs:
- Linux_x64
jobParameters:
testGroup: innerloop
- useGCC: true
+ compilerName: gcc
condition: >-
or(
eq(dependencies.checkout.outputs['SetPathVars_coreclr.containsChange'], true),
@@ -202,7 +202,8 @@ jobs:
#
# Build CoreCLR Formatting Job
-# Only when CoreCLR is changed
+# Only when CoreCLR is changed, and only in the 'master' branch (no release branches;
+# both CI and PR builds).
#
- template: /eng/pipelines/common/platform-matrix.yml
parameters:
@@ -212,9 +213,13 @@ jobs:
- Windows_NT_x64
jobParameters:
condition: >-
- or(
- eq(dependencies.checkout.outputs['SetPathVars_coreclr.containsChange'], true),
- eq(variables['isFullMatrix'], true))
+ and(
+ or(
+ eq(variables['Build.SourceBranchName'], 'master'),
+ eq(variables['System.PullRequest.TargetBranch'], 'master')),
+ or(
+ eq(dependencies.checkout.outputs['SetPathVars_coreclr.containsChange'], true),
+ eq(variables['isFullMatrix'], true)))
#
# Build Mono debug
@@ -226,6 +231,9 @@ jobs:
runtimeFlavor: mono
buildConfig: debug
platforms:
+ - iOS_x64
+ - iOS_arm
+ - iOS_arm64
- OSX_x64
- Linux_x64
- Linux_arm
@@ -253,6 +261,9 @@ jobs:
runtimeFlavor: mono
buildConfig: release
platforms:
+ - iOS_x64
+ - iOS_arm
+ - iOS_arm64
- OSX_x64
- Linux_x64
- Linux_arm
@@ -357,6 +368,34 @@ jobs:
- Windows_NT_x64
jobParameters:
liveRuntimeBuildConfig: release
+
+#
+# Build libraries using Mono CoreLib only
+#
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/libraries/build-job.yml
+ buildConfig: Release
+ runtimeFlavor: mono
+ platforms:
+ - iOS_arm
+ - iOS_arm64
+ - iOS_x64
+ jobParameters:
+ liveRuntimeBuildConfig: release
+
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/libraries/build-job.yml
+ buildConfig: Debug
+ runtimeFlavor: mono
+ platforms:
+ - iOS_arm
+ - iOS_arm64
+ - iOS_x64
+ jobParameters:
+ liveRuntimeBuildConfig: debug
+
#
# Libraries Build that only run when libraries is changed
#
diff --git a/global.json b/global.json
index 6d71700e679e06..d3834c617f6cef 100644
--- a/global.json
+++ b/global.json
@@ -5,17 +5,17 @@
"rollForward": "major"
},
"tools": {
- "dotnet": "5.0.100-preview.2.20157.1"
+ "dotnet": "5.0.100-preview.3.20168.11"
},
"native-tools": {
"cmake": "3.14.2",
- "python": "2.7.15"
+ "python3": "3.7.1"
},
"msbuild-sdks": {
- "Microsoft.DotNet.Build.Tasks.TargetFramework.Sdk": "5.0.0-beta.20162.3",
- "Microsoft.DotNet.Arcade.Sdk": "5.0.0-beta.20162.3",
- "Microsoft.DotNet.Build.Tasks.SharedFramework.Sdk": "5.0.0-beta.20162.3",
- "Microsoft.DotNet.Helix.Sdk": "5.0.0-beta.20162.3",
+ "Microsoft.DotNet.Build.Tasks.TargetFramework.Sdk": "5.0.0-beta.20168.2",
+ "Microsoft.DotNet.Arcade.Sdk": "5.0.0-beta.20168.2",
+ "Microsoft.DotNet.Build.Tasks.SharedFramework.Sdk": "5.0.0-beta.20168.2",
+ "Microsoft.DotNet.Helix.Sdk": "5.0.0-beta.20168.2",
"FIX-85B6-MERGE-9C38-CONFLICT": "1.0.0",
"Microsoft.NET.Sdk.IL": "5.0.0-alpha.1.20076.2",
"Microsoft.Build.NoTargets": "1.0.53",
diff --git a/src/coreclr/CMakeLists.txt b/src/coreclr/CMakeLists.txt
index 7e2b66caeec025..0e75f4473640dd 100644
--- a/src/coreclr/CMakeLists.txt
+++ b/src/coreclr/CMakeLists.txt
@@ -5,7 +5,7 @@ cmake_policy(SET CMP0042 NEW)
# Set the project name
project(CoreCLR)
-include(${CLR_ENG_NATIVE_DIR}/configureplatform.cmake)
+include(${CLR_ENG_NATIVE_DIR}/configuretools.cmake)
if (CLR_CMAKE_HOST_WIN32)
message(STATUS "VS_PLATFORM_TOOLSET is ${CMAKE_VS_PLATFORM_TOOLSET}")
@@ -30,7 +30,6 @@ if(CORECLR_SET_RPATH)
endif(CORECLR_SET_RPATH)
OPTION(CLR_CMAKE_ENABLE_CODE_COVERAGE "Enable code coverage" OFF)
-OPTION(CLR_CMAKE_WARNINGS_ARE_ERRORS "Warnings are errors" ON)
# Ensure other tools are present
if (CLR_CMAKE_HOST_WIN32)
@@ -110,11 +109,14 @@ else (CLR_CMAKE_HOST_WIN32)
endif()
endif(CLR_CMAKE_HOST_WIN32)
+if(CLR_CMAKE_TARGET_ANDROID)
+ add_definitions(-DTARGET_ANDROID)
+endif()
+
#----------------------------------------------------
# Configure compiler settings for environment
#----------------------------------------------------
include(configurecompiler.cmake)
-include(${CLR_ENG_NATIVE_DIR}/configuretools.cmake)
#----------------------------------------------------
# Cross target Component build specific configuration
@@ -141,6 +143,18 @@ endif(NOT CLR_CROSS_COMPONENTS_BUILD)
# - do not depend on clr's compile definitions
#-----------------------------------------
if(CLR_CMAKE_HOST_UNIX)
+ if(CLR_CMAKE_TARGET_ANDROID)
+ find_library(LZMA NAMES lzma)
+ if(LZMA STREQUAL LZMA-NOTFOUND)
+ message(FATAL_ERROR "Cannot find liblzma.")
+ endif(LZMA STREQUAL LZMA-NOTFOUND)
+
+ find_library(ANDROID_GLOB NAMES android-glob)
+ if(ANDROID_GLOB STREQUAL ANDROID_GLOB-NOTFOUND)
+ message(FATAL_ERROR "Cannot find android-glob.")
+ endif()
+ endif()
+
add_subdirectory(src/pal)
add_subdirectory(src/hosts)
endif(CLR_CMAKE_HOST_UNIX)
diff --git a/src/coreclr/build-runtime.sh b/src/coreclr/build-runtime.sh
index 0de4a51ce5f091..b22383cfaa7556 100755
--- a/src/coreclr/build-runtime.sh
+++ b/src/coreclr/build-runtime.sh
@@ -105,11 +105,6 @@ build_cross_architecture_components()
handle_arguments_local() {
case "$1" in
- ignorewarnings|-ignorewarnings)
- __IgnoreWarnings=1
- __CMakeArgs="-DCLR_CMAKE_WARNINGS_ARE_ERRORS=OFF $__CMakeArgs"
- ;;
-
nopgooptimize|-nopgooptimize)
__PgoOptimize=0
__SkipRestoreOptData=1
@@ -149,7 +144,6 @@ __RepoRootDir="$(cd "$__ProjectRoot"/../..; pwd -P)"
__BuildArch=
__BuildType=Debug
__CodeCoverage=0
-__IgnoreWarnings=0
# Set the various build properties here so that CMake and MSBuild can pick them up
__Compiler=clang
diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake
index a58e3af0d76043..2ea61f45e3177a 100644
--- a/src/coreclr/clrdefinitions.cmake
+++ b/src/coreclr/clrdefinitions.cmake
@@ -1,9 +1,5 @@
include(clrfeatures.cmake)
-# If set, indicates that this is not an officially supported release
-# Keep in sync with IsPrerelease in dir.props
-set(PRERELEASE 1)
-
# Features we're currently flighting, but don't intend to ship in officially supported releases
if (PRERELEASE)
add_definitions(-DFEATURE_UTF8STRING)
@@ -197,6 +193,9 @@ endif(FEATURE_ENABLE_NO_ADDRESS_SPACE_RANDOMIZATION)
add_definitions(-DFEATURE_SVR_GC)
add_definitions(-DFEATURE_SYMDIFF)
add_compile_definitions($<$>>:FEATURE_TIERED_COMPILATION>)
+if (CLR_CMAKE_TARGET_ARCH_AMD64)
+ add_compile_definitions($<$>>:FEATURE_ON_STACK_REPLACEMENT>)
+endif (CLR_CMAKE_TARGET_ARCH_AMD64)
if (CLR_CMAKE_TARGET_WIN32)
add_definitions(-DFEATURE_TYPEEQUIVALENCE)
endif(CLR_CMAKE_TARGET_WIN32)
diff --git a/src/coreclr/clrfeatures.cmake b/src/coreclr/clrfeatures.cmake
index 078b4e73ac8044..9025602a3c68c3 100644
--- a/src/coreclr/clrfeatures.cmake
+++ b/src/coreclr/clrfeatures.cmake
@@ -11,7 +11,7 @@ if(NOT DEFINED FEATURE_PERFTRACING AND FEATURE_EVENT_TRACE)
endif(NOT DEFINED FEATURE_PERFTRACING AND FEATURE_EVENT_TRACE)
if(NOT DEFINED FEATURE_DBGIPC)
- if(CLR_CMAKE_TARGET_UNIX AND (NOT CLR_CMAKE_TARGET_ANDROID))
+ if(CLR_CMAKE_TARGET_UNIX)
set(FEATURE_DBGIPC 1)
endif()
endif(NOT DEFINED FEATURE_DBGIPC)
diff --git a/src/coreclr/configurecompiler.cmake b/src/coreclr/configurecompiler.cmake
index 792ea045f05086..c460aad3908e81 100644
--- a/src/coreclr/configurecompiler.cmake
+++ b/src/coreclr/configurecompiler.cmake
@@ -288,10 +288,10 @@ if (CLR_CMAKE_HOST_UNIX)
endif()
endif(CLR_CMAKE_HOST_DARWIN)
- if (CLR_CMAKE_WARNINGS_ARE_ERRORS)
- # All warnings that are not explicitly disabled are reported as errors
+ # Suppress warnings-as-errors in release branches to reduce servicing churn
+ if (PRERELEASE)
add_compile_options(-Werror)
- endif(CLR_CMAKE_WARNINGS_ARE_ERRORS)
+ endif(PRERELEASE)
# Disabled common warnings
add_compile_options(-Wno-unused-variable)
diff --git a/src/coreclr/dir.common.props b/src/coreclr/dir.common.props
index 0758a5f404b915..5ab02aec8387c1 100644
--- a/src/coreclr/dir.common.props
+++ b/src/coreclr/dir.common.props
@@ -33,14 +33,12 @@
- $(MSBuildThisFileDirectory)..\..\
$(MSBuildThisFileDirectory)
- $(RootRepoDir)artifacts\obj\coreclr\$(MSBuildProjectName)\
+ $(RepoRoot)artifacts\obj\coreclr\$(MSBuildProjectName)\
$(ProjectDir)src\
- $(RootRepoDir)artifacts\
- $(RootBinDir)bin\coreclr\$(PlatformConfigPathPart)\
+ $(ArtifactsDir)bin\coreclr\$(PlatformConfigPathPart)\
false
$(PackageVersion)
-
- preview8
@@ -83,21 +79,6 @@
Portable
-
-
-
-
- coreclr
-
-
- true
-
-
-
-
- true
-
-
false
diff --git a/src/coreclr/src/CMakeLists.txt b/src/coreclr/src/CMakeLists.txt
index b6584b2de78301..262e6b7d021c3e 100644
--- a/src/coreclr/src/CMakeLists.txt
+++ b/src/coreclr/src/CMakeLists.txt
@@ -16,9 +16,9 @@ endif(CLR_CMAKE_TARGET_WIN32 AND FEATURE_EVENT_TRACE)
add_subdirectory(debug/dbgutil)
if(CLR_CMAKE_HOST_UNIX)
- if(CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86)
+ if(CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86 AND NOT CLR_CMAKE_HOST_ANDROID)
add_subdirectory(debug/createdump)
- endif(CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86)
+ endif(CLR_CMAKE_HOST_LINUX AND NOT CLR_CMAKE_HOST_UNIX_X86 AND NOT CLR_CMAKE_HOST_ANDROID)
# Include the dummy c++ include files
include_directories("pal/inc/rt/cpp")
diff --git a/src/coreclr/src/System.Private.CoreLib/ILLinkTrim.xml b/src/coreclr/src/System.Private.CoreLib/ILLinkTrim.xml
index 1dc8146af724d6..ff08dc62974c14 100644
--- a/src/coreclr/src/System.Private.CoreLib/ILLinkTrim.xml
+++ b/src/coreclr/src/System.Private.CoreLib/ILLinkTrim.xml
@@ -8,8 +8,6 @@
-
-
diff --git a/src/coreclr/src/System.Private.CoreLib/PinvokeAnalyzerExceptionList.analyzerdata b/src/coreclr/src/System.Private.CoreLib/PinvokeAnalyzerExceptionList.analyzerdata
index 5d45345491c369..141a5c2a73ce0d 100644
--- a/src/coreclr/src/System.Private.CoreLib/PinvokeAnalyzerExceptionList.analyzerdata
+++ b/src/coreclr/src/System.Private.CoreLib/PinvokeAnalyzerExceptionList.analyzerdata
@@ -5,3 +5,6 @@ normaliz.dll!NormalizeString
user32.dll!GetProcessWindowStation
user32.dll!GetUserObjectInformationW
+
+
+kernel32.dll!GetGeoInfo
\ No newline at end of file
diff --git a/src/coreclr/src/System.Private.CoreLib/src/Internal/Runtime/InteropServices/WindowsRuntime/ExceptionSupport.cs b/src/coreclr/src/System.Private.CoreLib/src/Internal/Runtime/InteropServices/WindowsRuntime/ExceptionSupport.cs
index 504f428f54d30e..c7feb47db624e7 100644
--- a/src/coreclr/src/System.Private.CoreLib/src/Internal/Runtime/InteropServices/WindowsRuntime/ExceptionSupport.cs
+++ b/src/coreclr/src/System.Private.CoreLib/src/Internal/Runtime/InteropServices/WindowsRuntime/ExceptionSupport.cs
@@ -3,6 +3,7 @@
// See the LICENSE file in the project root for more information.
using System;
+using System.Diagnostics.CodeAnalysis;
using System.Runtime.InteropServices.WindowsRuntime;
namespace Internal.Runtime.InteropServices.WindowsRuntime
@@ -13,6 +14,7 @@ public static class ExceptionSupport
/// Attach restricted error information to the exception if it may apply to that exception, returning
/// back the input value
///
+ [return: NotNullIfNotNull("e")]
public static Exception? AttachRestrictedErrorInfo(Exception? e)
{
// If there is no exception, then the restricted error info doesn't apply to it
diff --git a/src/coreclr/src/System.Private.CoreLib/src/System/GC.cs b/src/coreclr/src/System.Private.CoreLib/src/System/GC.cs
index c71747cb041f9e..fc8a9650f63b7e 100644
--- a/src/coreclr/src/System.Private.CoreLib/src/System/GC.cs
+++ b/src/coreclr/src/System.Private.CoreLib/src/System/GC.cs
@@ -84,8 +84,16 @@ public static GCMemoryInfo GetGCMemoryInfo()
[DllImport(RuntimeHelpers.QCall, CharSet = CharSet.Unicode)]
internal static extern int _EndNoGCRegion();
+ // keep in sync with GC_ALLOC_FLAGS in gcinterface.h
+ internal enum GC_ALLOC_FLAGS
+ {
+ GC_ALLOC_NO_FLAGS = 0,
+ GC_ALLOC_ZEROING_OPTIONAL = 16,
+ GC_ALLOC_PINNED_OBJECT_HEAP = 64,
+ };
+
[MethodImpl(MethodImplOptions.InternalCall)]
- internal static extern Array AllocateNewArray(IntPtr typeHandle, int length, bool zeroingOptional);
+ internal static extern Array AllocateNewArray(IntPtr typeHandle, int length, GC_ALLOC_FLAGS flags);
[MethodImpl(MethodImplOptions.InternalCall)]
private static extern int GetGenerationWR(IntPtr handle);
@@ -651,31 +659,74 @@ internal static void UnregisterMemoryLoadChangeNotification(Action notification)
}
///
- /// Skips zero-initialization of the array if possible.
- /// If T contains object references, the array is always zero-initialized.
+ /// Allocate an array while skipping zero-initialization if possible.
///
+ /// Specifies the type of the array element.
+ /// Specifies the length of the array.
+ /// Specifies whether the allocated array must be pinned.
+ ///
+ /// If pinned is set to true, must not be a reference type or a type that contains object references.
+ ///
[MethodImpl(MethodImplOptions.AggressiveInlining)] // forced to ensure no perf drop for small memory buffers (hot path)
- internal static T[] AllocateUninitializedArray(int length)
+ public static T[] AllocateUninitializedArray(int length, bool pinned = false)
{
- if (RuntimeHelpers.IsReferenceOrContainsReferences())
+ if (!pinned)
{
- return new T[length];
- }
+ if (RuntimeHelpers.IsReferenceOrContainsReferences())
+ {
+ return new T[length];
+ }
- // for debug builds we always want to call AllocateNewArray to detect AllocateNewArray bugs
+ // for debug builds we always want to call AllocateNewArray to detect AllocateNewArray bugs
#if !DEBUG
- // small arrays are allocated using `new[]` as that is generally faster.
- if (length < 2048 / Unsafe.SizeOf())
+ // small arrays are allocated using `new[]` as that is generally faster.
+ if (length < 2048 / Unsafe.SizeOf())
+ {
+ return new T[length];
+ }
+#endif
+ }
+ else if (RuntimeHelpers.IsReferenceOrContainsReferences())
{
- return new T[length];
+ ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(T));
}
-#endif
+
// kept outside of the small arrays hot path to have inlining without big size growth
- return AllocateNewUninitializedArray(length);
+ return AllocateNewUninitializedArray(length, pinned);
// remove the local function when https://github.com/dotnet/coreclr/issues/5329 is implemented
- static T[] AllocateNewUninitializedArray(int length)
- => Unsafe.As(AllocateNewArray(typeof(T[]).TypeHandle.Value, length, zeroingOptional: true));
+ static T[] AllocateNewUninitializedArray(int length, bool pinned)
+ {
+ GC_ALLOC_FLAGS flags = GC_ALLOC_FLAGS.GC_ALLOC_ZEROING_OPTIONAL;
+ if (pinned)
+ flags |= GC_ALLOC_FLAGS.GC_ALLOC_PINNED_OBJECT_HEAP;
+
+ return Unsafe.As(AllocateNewArray(typeof(T[]).TypeHandle.Value, length, flags));
+ }
+ }
+
+ ///
+ /// Allocate an array.
+ ///
+ /// Specifies the type of the array element.
+ /// Specifies the length of the array.
+ /// Specifies whether the allocated array must be pinned.
+ ///
+ /// If pinned is set to true, must not be a reference type or a type that contains object references.
+ ///
+ public static T[] AllocateArray(int length, bool pinned = false)
+ {
+ GC_ALLOC_FLAGS flags = GC_ALLOC_FLAGS.GC_ALLOC_NO_FLAGS;
+
+ if (pinned)
+ {
+ if (RuntimeHelpers.IsReferenceOrContainsReferences())
+ ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(T));
+
+ flags = GC_ALLOC_FLAGS.GC_ALLOC_PINNED_OBJECT_HEAP;
+ }
+
+ return Unsafe.As(AllocateNewArray(typeof(T[]).TypeHandle.Value, length, flags));
}
}
}
diff --git a/src/coreclr/src/ToolBox/SOS/CMakeLists.txt b/src/coreclr/src/ToolBox/SOS/CMakeLists.txt
index 1fb8efbfe57b29..e8de022432839e 100644
--- a/src/coreclr/src/ToolBox/SOS/CMakeLists.txt
+++ b/src/coreclr/src/ToolBox/SOS/CMakeLists.txt
@@ -5,4 +5,3 @@ if(CLR_CMAKE_TARGET_WIN32)
endif(CLR_CMAKE_TARGET_WIN32)
_install(FILES SOS_README.md DESTINATION .)
-_install(FILES SOS_README.md DESTINATION sharedFramework)
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.cpp
index b8c006040c4f27..5a6f82f5a0ec15 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.cpp
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.cpp
@@ -326,6 +326,38 @@ bool CompileResult::repSetVars(CORINFO_METHOD_HANDLE* ftn, ULONG32* cVars, ICorD
return true;
}
+// Note - Ownership of patchpointInfo is transfered with this call. In replay icorjitinfo we should free it.
+void CompileResult::recSetPatchpointInfo(PatchpointInfo* patchpointInfo)
+{
+ if (SetPatchpointInfo == nullptr)
+ SetPatchpointInfo = new LightWeightMap();
+
+ Agnostic_SetPatchpointInfo value;
+ value.index = (DWORD)SetPatchpointInfo->AddBuffer((const unsigned char*) patchpointInfo, patchpointInfo->PatchpointInfoSize());
+ SetPatchpointInfo->Add(0, value);
+}
+void CompileResult::dmpSetPatchpointInfo(DWORD key, const Agnostic_SetPatchpointInfo& value)
+{
+ PatchpointInfo* patchpointInfo = (PatchpointInfo*)SetPatchpointInfo->GetBuffer(value.index);
+ printf("SetPatchpointInfo key %u, index %u{", key, value.index);
+ // todo -- dump contents
+ printf("}");
+ SetPatchpointInfo->Unlock();
+}
+bool CompileResult::repSetPatchpointInfo(PatchpointInfo** patchpointInfo)
+{
+ if ((SetPatchpointInfo == nullptr) || (SetPatchpointInfo->GetCount() == 0))
+ {
+ *patchpointInfo = nullptr;
+ return false;
+ }
+
+ Agnostic_SetPatchpointInfo value;
+ value = SetPatchpointInfo->Get(0);
+ *patchpointInfo = (PatchpointInfo*)SetPatchpointInfo->GetBuffer(value.index);
+ return true;
+}
+
void CompileResult::recAllocGCInfo(size_t size, void* retval)
{
allocGCInfoDets.size = size;
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.h b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.h
index 9fa9e454462b65..8fce7ab8e9f168 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.h
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/compileresult.h
@@ -112,6 +112,10 @@ class CompileResult
DWORD cVars;
DWORD vars_offset;
};
+ struct Agnostic_SetPatchpointInfo
+ {
+ DWORD index;
+ };
struct Agnostic_CORINFO_EH_CLAUSE2
{
DWORD Flags;
@@ -200,6 +204,10 @@ class CompileResult
void dmpSetVars(DWORD key, const Agnostic_SetVars& value);
bool repSetVars(CORINFO_METHOD_HANDLE* ftn, ULONG32* cVars, ICorDebugInfo::NativeVarInfo** vars);
+ void recSetPatchpointInfo(PatchpointInfo* patchpointInfo);
+ void dmpSetPatchpointInfo(DWORD key, const Agnostic_SetPatchpointInfo& value);
+ bool repSetPatchpointInfo(PatchpointInfo** patchpointInfo);
+
void recAllocGCInfo(size_t size, void* retval);
void recAllocGCInfoCapture();
void dmpAllocGCInfo(DWORD key, const Agnostic_AllocGCInfo& value);
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/crlwmlist.h b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/crlwmlist.h
index 72fa54043a5484..b42077de7abe4f 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/crlwmlist.h
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/crlwmlist.h
@@ -41,6 +41,7 @@ LWM(SetEHcount, DWORD, DWORD)
LWM(SetEHinfo, DWORD, CompileResult::Agnostic_CORINFO_EH_CLAUSE2)
LWM(SetMethodAttribs, DWORDLONG, DWORD)
LWM(SetVars, DWORD, CompileResult::Agnostic_SetVars)
+LWM(SetPatchpointInfo, DWORD, CompileResult::Agnostic_SetPatchpointInfo)
#undef LWM
#undef DENSELWM
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h
index ec468a1f04a821..bb642229cc75ad 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h
@@ -180,6 +180,15 @@ void getGSCookie(GSCookie* pCookieVal, // OUT
GSCookie** ppCookieVal // OUT
);
+// Provide patchpoint info for the method currently being jitted.
+void setPatchpointInfo(
+ PatchpointInfo* patchpointInfo
+ );
+
+PatchpointInfo* getOSRInfo(
+ unsigned * ilOffset // OUT
+ );
+
/**********************************************************************************/
//
// ICorModuleInfo
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/lwmlist.h b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/lwmlist.h
index a7b4c019d5c92f..18abd23e676cf2 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/lwmlist.h
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/lwmlist.h
@@ -112,6 +112,7 @@ LWM(GetMethodSync, DWORDLONG, DLDL)
LWM(GetMethodVTableOffset, DWORDLONG, DDD)
LWM(GetNewArrHelper, DWORDLONG, DWORD)
LWM(GetNewHelper, Agnostic_GetNewHelper, DD)
+LWM(GetOSRInfo, DWORD, Agnostic_GetOSRInfo)
LWM(GetParentType, DWORDLONG, DWORDLONG)
LWM(GetProfilingHandle, DWORD, Agnostic_GetProfilingHandle)
LWM(GetReadyToRunHelper, GetReadyToRunHelper_TOKENin, GetReadyToRunHelper_TOKENout)
@@ -153,5 +154,6 @@ LWM(TryResolveToken, Agnostic_CORINFO_RESOLVED_TOKENin, TryResolveTokenValue)
LWM(SatisfiesClassConstraints, DWORDLONG, DWORD)
LWM(SatisfiesMethodConstraints, DLDL, DWORD)
+
#undef LWM
#undef DENSELWM
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
index 4ca96bd58a4c1e..a988f27211322a 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
@@ -727,6 +727,7 @@ void MethodContext::repCompileMethod(CORINFO_METHOD_INFO* info, unsigned* flags)
info->locals.pSig = (PCCOR_SIGNATURE)CompileMethod->GetBuffer(value.info.locals.pSig_Index);
info->locals.scope = (CORINFO_MODULE_HANDLE)value.info.locals.scope;
info->locals.token = (mdToken)value.info.locals.token;
+
*flags = (unsigned)value.flags;
DEBUG_REP(dmpCompileMethod(0, value));
}
@@ -1124,7 +1125,7 @@ void MethodContext::recGetJitFlags(CORJIT_FLAGS* jitFlags, DWORD sizeInBytes, DW
void MethodContext::dmpGetJitFlags(DWORD key, DD value)
{
CORJIT_FLAGS* jitflags = (CORJIT_FLAGS*)GetJitFlags->GetBuffer(value.A);
- printf("GetJitFlags key %u sizeInBytes-%u jitFlags-%016llX", key, value.B, jitflags->GetFlagsRaw());
+ printf("GetJitFlags key %u sizeInBytes-%u jitFlags-%016llX instructionSetFlags-%016llX", key, value.B, jitflags->GetFlagsRaw(), jitflags->GetInstructionSetFlagsRaw());
GetJitFlags->Unlock();
}
DWORD MethodContext::repGetJitFlags(CORJIT_FLAGS* jitFlags, DWORD sizeInBytes)
@@ -3965,6 +3966,39 @@ void MethodContext::repGetGSCookie(GSCookie* pCookieVal, GSCookie** ppCookieVal)
*ppCookieVal = (GSCookie*)value.B;
}
+void MethodContext::recGetOSRInfo(PatchpointInfo* patchpointInfo, unsigned* ilOffset)
+{
+ if (GetOSRInfo == nullptr)
+ {
+ GetOSRInfo = new LightWeightMap();
+ }
+
+ Agnostic_GetOSRInfo value;
+
+ value.index = (DWORD)GetOSRInfo->AddBuffer((const unsigned char*) patchpointInfo, patchpointInfo->PatchpointInfoSize());
+ value.ilOffset = *ilOffset;
+
+ // use 0 for key
+ DWORD key = 0;
+ GetOSRInfo->Add(key, value);
+ DEBUG_REC(dmpGetOSRInfo(key, value));
+}
+
+void MethodContext::dmpGetOSRInfo(DWORD key, const Agnostic_GetOSRInfo& value)
+{
+ // todo - dump patchpoint info?
+ printf("GetOSRInfo key %u, value patchpointInfo-%u {...} iloffset-%u\n",
+ key, value.index, value.ilOffset);
+}
+
+PatchpointInfo* MethodContext::repGetOSRInfo(unsigned* ilOffset)
+{
+ DWORD key = 0;
+ Agnostic_GetOSRInfo value = GetOSRInfo->Get(key);
+ *ilOffset = value.ilOffset;
+ return (PatchpointInfo*)GetOSRInfo->GetBuffer(value.index);
+}
+
void MethodContext::recGetClassModuleIdForStatics(CORINFO_CLASS_HANDLE cls,
CORINFO_MODULE_HANDLE* pModule,
void** ppIndirection,
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.h b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
index 0e7ee454a9ed4a..f87703d6d131b6 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
@@ -176,6 +176,11 @@ class MethodContext
DWORD targetAbi;
DWORD osType;
};
+ struct Agnostic_GetOSRInfo
+ {
+ DWORD index;
+ unsigned ilOffset;
+ };
struct Agnostic_GetFieldAddress
{
DWORDLONG ppIndirection;
@@ -1000,6 +1005,10 @@ class MethodContext
void dmpGetGSCookie(DWORD key, DLDL value);
void repGetGSCookie(GSCookie* pCookieVal, GSCookie** ppCookieVal);
+ void recGetOSRInfo(PatchpointInfo* patchpointInfo, unsigned* ilOffset);
+ void dmpGetOSRInfo(DWORD key, const Agnostic_GetOSRInfo& value);
+ PatchpointInfo* repGetOSRInfo(unsigned* ilOffset);
+
void recGetClassModuleIdForStatics(CORINFO_CLASS_HANDLE cls,
CORINFO_MODULE_HANDLE* pModule,
void** ppIndirection,
@@ -1335,7 +1344,7 @@ class MethodContext
};
// ********************* Please keep this up-to-date to ease adding more ***************
-// Highest packet number: 175
+// Highest packet number: 177
// *************************************************************************************
enum mcPackets
{
@@ -1446,6 +1455,7 @@ enum mcPackets
Packet_GetMethodVTableOffset = 78,
Packet_GetNewArrHelper = 79,
Packet_GetNewHelper = 80,
+ Packet_GetOSRInfo = 177, // Added 3/5/2020
Packet_GetParentType = 81,
Packet_GetPInvokeUnmanagedTarget = 82, // Retired 2/18/2020
Packet_GetProfilingHandle = 83,
@@ -1514,6 +1524,7 @@ enum mcPackets
PacketCR_SetEHinfo = 128,
PacketCR_SetMethodAttribs = 129,
PacketCR_SetVars = 130,
+ PacketCR_SetPatchpointInfo = 176, // added 8/5/2019
PacketCR_RecordCallSite = 146, // Added 10/28/2013 - to support indirect calls
};
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/runtimedetails.h b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/runtimedetails.h
index 3d6c7e23512d5b..a8723ce16b9f8d 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shared/runtimedetails.h
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shared/runtimedetails.h
@@ -25,6 +25,7 @@
#include
#include
#include
+#include
/// Turn back on direct access to a few OS level things...
#undef HeapCreate
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
index c7235d08169210..603ebb6e54eeae 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
@@ -365,6 +365,23 @@ void interceptor_ICJI::getGSCookie(GSCookie* pCookieVal, // OUT
mc->recGetGSCookie(pCookieVal, ppCookieVal);
}
+// Provide patchpoint info for the method currently being jitted.
+void interceptor_ICJI::setPatchpointInfo(PatchpointInfo* patchpointInfo)
+{
+ mc->cr->AddCall("setPatchpointInfo");
+ mc->cr->recSetPatchpointInfo(patchpointInfo); // Since the EE frees, we've gotta record before its sent to the EE.
+ original_ICorJitInfo->setPatchpointInfo(patchpointInfo);
+}
+
+// Get OSR info for the method currently being jitted
+PatchpointInfo* interceptor_ICJI::getOSRInfo(unsigned* ilOffset)
+{
+ mc->cr->AddCall("getOSRInfo");
+ PatchpointInfo* patchpointInfo = original_ICorJitInfo->getOSRInfo(ilOffset);
+ mc->recGetOSRInfo(patchpointInfo, ilOffset);
+ return patchpointInfo;
+}
+
/**********************************************************************************/
//
// ICorModuleInfo
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp
index e2792149e87e28..4974c13bb39404 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp
@@ -262,6 +262,20 @@ void interceptor_ICJI::getGSCookie(GSCookie* pCookieVal, // OUT
original_ICorJitInfo->getGSCookie(pCookieVal, ppCookieVal);
}
+// Provide patchpoint info for the method currently being jitted.
+void interceptor_ICJI::setPatchpointInfo(PatchpointInfo* patchpointInfo)
+{
+ mcs->AddCall("setPatchpointInfo");
+ original_ICorJitInfo->setPatchpointInfo(patchpointInfo);
+}
+
+// Get OSR info for the method currently being jitted
+PatchpointInfo* interceptor_ICJI::getOSRInfo(unsigned* ilOffset)
+{
+ mcs->AddCall("getOSRInfo");
+ return original_ICorJitInfo->getOSRInfo(ilOffset);
+}
+
/**********************************************************************************/
//
// ICorModuleInfo
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp
index 71b0fa5ba40e7a..0cf00e3b9c90e3 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp
@@ -237,6 +237,19 @@ void interceptor_ICJI::getGSCookie(GSCookie* pCookieVal, // OUT
original_ICorJitInfo->getGSCookie(pCookieVal, ppCookieVal);
}
+
+// Provide patchpoint info for the method currently being jitted.
+void interceptor_ICJI::setPatchpointInfo(PatchpointInfo* patchpointInfo)
+{
+ original_ICorJitInfo->setPatchpointInfo(patchpointInfo);
+}
+
+// Get OSR info for the method currently being jitted
+PatchpointInfo* interceptor_ICJI::getOSRInfo(unsigned* ilOffset)
+{
+ return original_ICorJitInfo->getOSRInfo(ilOffset);
+}
+
/**********************************************************************************/
//
// ICorModuleInfo
diff --git a/src/coreclr/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp b/src/coreclr/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp
index c6085854efca6f..4b6b2d4aa73381 100644
--- a/src/coreclr/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp
+++ b/src/coreclr/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp
@@ -288,6 +288,21 @@ void MyICJI::getGSCookie(GSCookie* pCookieVal, // OUT
jitInstance->mc->repGetGSCookie(pCookieVal, ppCookieVal);
}
+// Provide patchpoint info for the method currently being jitted.
+void MyICJI::setPatchpointInfo(PatchpointInfo* patchpointInfo)
+{
+ jitInstance->mc->cr->AddCall("setPatchpointInfo");
+ jitInstance->mc->cr->recSetPatchpointInfo(patchpointInfo);
+ freeArray(patchpointInfo); // See note in recSetPatchpointInfo... we own destroying this array
+}
+
+// Get OSR info for the method currently being jitted
+PatchpointInfo* MyICJI::getOSRInfo(unsigned* ilOffset)
+{
+ jitInstance->mc->cr->AddCall("getOSRInfo");
+ return jitInstance->mc->repGetOSRInfo(ilOffset);
+}
+
/**********************************************************************************/
//
// ICorModuleInfo
diff --git a/src/coreclr/src/debug/daccess/daccess.cpp b/src/coreclr/src/debug/daccess/daccess.cpp
index fea12eb75fcacc..cf25b1c4eb5461 100644
--- a/src/coreclr/src/debug/daccess/daccess.cpp
+++ b/src/coreclr/src/debug/daccess/daccess.cpp
@@ -5615,12 +5615,7 @@ ClrDataAccess::Initialize(void)
// Thus, when DAC is initialized, initialize utilcode with the base address of the runtime loaded in the
// target process. This is similar to work done in CorDB::SetTargetCLR for mscordbi.
- // Initialize UtilCode for SxS scenarios
- CoreClrCallbacks cccallbacks;
- cccallbacks.m_hmodCoreCLR = (HINSTANCE)m_globalBase; // Base address of the runtime in the target process
- cccallbacks.m_pfnIEE = NULL;
- cccallbacks.m_pfnGetCORSystemDirectory = NULL;
- InitUtilcode(cccallbacks);
+ g_hmodCoreCLR = (HINSTANCE)m_globalBase; // Base address of the runtime in the target process
return S_OK;
}
diff --git a/src/coreclr/src/debug/dbgutil/CMakeLists.txt b/src/coreclr/src/debug/dbgutil/CMakeLists.txt
index 0ff9188e0c3511..bd96e9baa1e052 100644
--- a/src/coreclr/src/debug/dbgutil/CMakeLists.txt
+++ b/src/coreclr/src/debug/dbgutil/CMakeLists.txt
@@ -1,12 +1,11 @@
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
if(CLR_CMAKE_HOST_WIN32)
+ include_directories(${CLR_DIR}/src/inc/llvm)
#use static crt
add_definitions(-MT)
endif(CLR_CMAKE_HOST_WIN32)
-set(CMAKE_INCLUDE_CURRENT_DIR ON)
-
-include_directories(${CLR_DIR}/src/inc/llvm)
-
add_definitions(-DPAL_STDCPP_COMPAT)
if(CLR_CMAKE_TARGET_ALPINE_LINUX)
diff --git a/src/coreclr/src/debug/di/rsmain.cpp b/src/coreclr/src/debug/di/rsmain.cpp
index f58026a889ed9a..5ab2d7d685f9e8 100644
--- a/src/coreclr/src/debug/di/rsmain.cpp
+++ b/src/coreclr/src/debug/di/rsmain.cpp
@@ -1433,11 +1433,7 @@ HRESULT Cordb::SetTargetCLR(HMODULE hmodTargetCLR)
// the same model because coreclr.dll isn't in this process and hmodTargetCLR
// is the debuggee target, not the coreclr.dll to bind utilcode to..
- CoreClrCallbacks cccallbacks;
- cccallbacks.m_hmodCoreCLR = hmodTargetCLR;
- cccallbacks.m_pfnIEE = NULL;
- cccallbacks.m_pfnGetCORSystemDirectory = NULL;
- InitUtilcode(cccallbacks);
+ g_hmodCoreCLR = hmodTargetCLR;
return S_OK;
}
diff --git a/src/coreclr/src/debug/ee/controller.h b/src/coreclr/src/debug/ee/controller.h
index 2775fe6f21aa5b..155b020b01c74b 100644
--- a/src/coreclr/src/debug/ee/controller.h
+++ b/src/coreclr/src/debug/ee/controller.h
@@ -1174,8 +1174,6 @@ class DebuggerController
virtual void DebuggerDetachClean();
public:
- static const BYTE *g_pMSCorEEStart, *g_pMSCorEEEnd;
-
static const BYTE *GetILPrestubDestination(const BYTE *prestub);
static const BYTE *GetILFunctionCode(MethodDesc *fd);
diff --git a/src/coreclr/src/debug/ee/debugger.cpp b/src/coreclr/src/debug/ee/debugger.cpp
index 93d0edc6838d34..cbf6df50871e5e 100644
--- a/src/coreclr/src/debug/ee/debugger.cpp
+++ b/src/coreclr/src/debug/ee/debugger.cpp
@@ -1751,7 +1751,7 @@ void Debugger::SendRawEvent(const DebuggerIPCEvent * pManagedEvent)
// The debugger can then use ReadProcessMemory to read through this array.
ULONG_PTR rgData [] = {
CLRDBG_EXCEPTION_DATA_CHECKSUM,
- (ULONG_PTR) g_pMSCorEE,
+ (ULONG_PTR) g_hThisInst,
(ULONG_PTR) pManagedEvent
};
@@ -5669,7 +5669,7 @@ bool Debugger::FirstChanceNativeException(EXCEPTION_RECORD *exception,
// Ignore any notification exceptions sent from code:Debugger.SendRawEvent.
// This is not a common case, but could happen in some cases described
// in SendRawEvent. Either way, Left-Side and VM should just ignore these.
- if (IsEventDebuggerNotification(exception, PTR_TO_CORDB_ADDRESS(g_pMSCorEE)))
+ if (IsEventDebuggerNotification(exception, PTR_TO_CORDB_ADDRESS(g_hThisInst)))
{
return true;
}
@@ -12363,7 +12363,7 @@ void Debugger::GetAndSendTransitionStubInfo(CORDB_ADDRESS_TYPE *stubAddress)
// If its not a stub, then maybe its an address in mscoree?
if (result == false)
{
- result = (IsIPInModule(g_pMSCorEE, (PCODE)stubAddress) == TRUE);
+ result = (IsIPInModule(g_hThisInst, (PCODE)stubAddress) == TRUE);
}
// This is a synchronous event (reply required)
diff --git a/src/coreclr/src/debug/ee/rcthread.cpp b/src/coreclr/src/debug/ee/rcthread.cpp
index 87111404374a5f..85186e080034fa 100644
--- a/src/coreclr/src/debug/ee/rcthread.cpp
+++ b/src/coreclr/src/debug/ee/rcthread.cpp
@@ -17,7 +17,6 @@
#include "securitywrapper.h"
#endif
#include
-#include
#include "eemessagebox.h"
diff --git a/src/coreclr/src/debug/inc/common.h b/src/coreclr/src/debug/inc/common.h
index 034d9bb241022e..e31ac3aac6c371 100644
--- a/src/coreclr/src/debug/inc/common.h
+++ b/src/coreclr/src/debug/inc/common.h
@@ -205,10 +205,6 @@ HRESULT FindNativeInfoInILVariableArray(DWORD dwIndex,
unsigned int nativeInfoCount,
ICorDebugInfo::NativeVarInfo *nativeInfo);
-
-#define VALIDATE_HEAP
-//HeapValidate(GetProcessHeap(), 0, NULL);
-
// struct DebuggerILToNativeMap: Holds the IL to Native offset map
// Great pains are taken to ensure that this each entry corresponds to the
// first IL instruction in a source line. It isn't actually a mapping
diff --git a/src/coreclr/src/dlls/mscoree/coreclr/CMakeLists.txt b/src/coreclr/src/dlls/mscoree/coreclr/CMakeLists.txt
index e86e91f6a69e88..0517bb50df3218 100644
--- a/src/coreclr/src/dlls/mscoree/coreclr/CMakeLists.txt
+++ b/src/coreclr/src/dlls/mscoree/coreclr/CMakeLists.txt
@@ -57,6 +57,10 @@ else(CLR_CMAKE_HOST_WIN32)
set(EXPORTS_LINKER_OPTION -Wl,-exported_symbols_list,${EXPORTS_FILE})
endif(CLR_CMAKE_TARGET_DARWIN)
+ if(CLR_CMAKE_TARGET_ANDROID AND CLR_CMAKE_HOST_ARCH_ARM)
+ set(EXPORTS_LINKER_OPTION "${EXPORTS_LINKER_OPTION} -Wl,--no-warn-shared-textrel")
+ endif()
+
endif (CLR_CMAKE_HOST_WIN32)
add_definitions(-DFX_VER_INTERNALNAME_STR=CoreCLR.dll)
diff --git a/src/coreclr/src/dlls/mscoree/mscoree.cpp b/src/coreclr/src/dlls/mscoree/mscoree.cpp
index 2a8c7875b87c34..d87d9bebc0ca02 100644
--- a/src/coreclr/src/dlls/mscoree/mscoree.cpp
+++ b/src/coreclr/src/dlls/mscoree/mscoree.cpp
@@ -20,15 +20,15 @@
#include
+// Globals
+extern HINSTANCE g_hThisInst;
+
// Locals.
BOOL STDMETHODCALLTYPE EEDllMain( // TRUE on success, FALSE on error.
HINSTANCE hInst, // Instance handle of the loaded module.
DWORD dwReason, // Reason for loading.
LPVOID lpReserved); // Unused.
-// Globals.
-HINSTANCE g_hThisInst; // This library.
-
#ifndef CROSSGEN_COMPILE
//*****************************************************************************
// Handle lifetime of loaded library.
@@ -36,8 +36,6 @@ HINSTANCE g_hThisInst; // This library.
#include
-extern "C" IExecutionEngine* IEE();
-
#ifdef TARGET_WINDOWS
#include // for __security_init_cookie()
@@ -62,15 +60,11 @@ extern "C" BOOL WINAPI CoreDllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpRe
// Initialization" check and makes it pass.
__security_init_cookie();
- // It's critical that we invoke InitUtilCode() before the CRT initializes.
+ // It's critical that we initialize g_hmodCoreCLR before the CRT initializes.
// We have a lot of global ctors that will break if we let the CRT initialize without
// this step having been done.
- CoreClrCallbacks cccallbacks;
- cccallbacks.m_hmodCoreCLR = (HINSTANCE)hInstance;
- cccallbacks.m_pfnIEE = IEE;
- cccallbacks.m_pfnGetCORSystemDirectory = GetCORSystemDirectoryInternaL;
- InitUtilcode(cccallbacks);
+ g_hmodCoreCLR = (HINSTANCE)hInstance;
if (!(result = _CRT_INIT(hInstance, dwReason, lpReserved)))
{
@@ -115,15 +109,7 @@ BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpReserved)
case DLL_PROCESS_ATTACH:
{
#ifndef TARGET_WINDOWS
- // It's critical that we invoke InitUtilCode() before the CRT initializes.
- // We have a lot of global ctors that will break if we let the CRT initialize without
- // this step having been done.
-
- CoreClrCallbacks cccallbacks;
- cccallbacks.m_hmodCoreCLR = (HINSTANCE)hInstance;
- cccallbacks.m_pfnIEE = IEE;
- cccallbacks.m_pfnGetCORSystemDirectory = GetCORSystemDirectoryInternaL;
- InitUtilcode(cccallbacks);
+ g_hmodCoreCLR = (HINSTANCE)hInstance;
#endif
// Save the module handle.
diff --git a/src/coreclr/src/dlls/mscorpe/ceefilegenwriter.cpp b/src/coreclr/src/dlls/mscorpe/ceefilegenwriter.cpp
index 8c18573c1b518e..1de4e817eab78a 100644
--- a/src/coreclr/src/dlls/mscorpe/ceefilegenwriter.cpp
+++ b/src/coreclr/src/dlls/mscorpe/ceefilegenwriter.cpp
@@ -17,10 +17,6 @@
#include
#include
-// Globals.
-HINSTANCE g_hThisInst; // This library.
-
-
#ifdef EMIT_FIXUPS
// Emitted PEFIXUP structure looks like this
@@ -941,178 +937,11 @@ HRESULT CeeFileGenWriter::emitExeMain()
return S_OK;
} // HRESULT CeeFileGenWriter::emitExeMain()
-
-HRESULT GetClrSystemDirectory(SString& pbuffer)
-{
- HRESULT hr = S_OK;
-
-
- PathString pPath;
- DWORD dwPath;
-
- _ASSERTE (g_hThisInst);
-
- dwPath = WszGetModuleFileName(g_hThisInst, pPath);
- if(dwPath == 0)
- {
- hr = HRESULT_FROM_GetLastErrorNA();
- return (hr);
- }
-
- return CopySystemDirectory(pPath, pbuffer);
-}
-
#ifndef TARGET_UNIX
-BOOL RunProcess(LPCWSTR tempResObj, LPCWSTR pszFilename, DWORD* pdwExitCode, PEWriter &pewriter)
-{
- BOOL fSuccess = FALSE;
-
- PROCESS_INFORMATION pi;
-
- PathString wszSystemDir;
- if (FAILED(GetClrSystemDirectory(wszSystemDir)))
- return FALSE;
-
- const WCHAR* wzMachine;
- if(pewriter.isIA64())
- wzMachine = L"IA64";
- else if(pewriter.isAMD64())
- wzMachine = L"AMD64";
- else if(pewriter.isARM())
- wzMachine = L"ARM";
- else
- wzMachine = L"IX86";
-
- // Res file, so convert it
- StackSString ssCmdLine;
-
- LPWSTR ext = PathFindExtension(pszFilename);
- if(*ext == NULL)
- {
- ssCmdLine.Printf(L"%scvtres.exe /NOLOGO /READONLY /MACHINE:%s \"/OUT:%s\" \"%s.\"",
- wszSystemDir.GetUnicode(),
- wzMachine,
- tempResObj,
- pszFilename);
- }
- else
- {
- ssCmdLine.Printf(L"%scvtres.exe /NOLOGO /READONLY /MACHINE:%s \"/OUT:%s\" \"%s\"",
- wszSystemDir.GetUnicode(),
- wzMachine,
- tempResObj,
- pszFilename);
- }
-
- STARTUPINFOW start;
- ZeroMemory(&start, sizeof(start));
- start.cb = sizeof(start);
- start.dwFlags = STARTF_USESHOWWINDOW;
- start.wShowWindow = SW_HIDE;
-
- fSuccess = WszCreateProcess(
- NULL,
- ssCmdLine.GetUnicode(),
- NULL,
- NULL,
- true,
- 0,
- 0,
- NULL,
- &start,
- &pi);
-
- // If process runs, wait for it to finish
- if (fSuccess) {
- WaitForSingleObject(pi.hProcess, INFINITE);
-
- GetExitCodeProcess(pi.hProcess, pdwExitCode);
-
- CloseHandle(pi.hProcess);
-
- CloseHandle(pi.hThread);
- }
- return fSuccess;
-} // BOOL RunProcess()
-
-// Ensure that pszFilename is an object file (not just a binary resource)
-// If we convert, then return obj filename in pszTempFilename
-HRESULT ConvertResource(const WCHAR * pszFilename, __in_ecount(cchTempFilename) WCHAR *pszTempFilename, size_t cchTempFilename, PEWriter &pewriter)
-{
- HANDLE hFile = WszCreateFile(pszFilename, GENERIC_READ,
- FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
-
-// failure
- if (!hFile || (hFile == INVALID_HANDLE_VALUE))
- {
- //dbprintf("Can't find resource files:%S\n", pszFilename);
- return HRESULT_FROM_GetLastError();
- }
-
-// Read first 4 bytes. If they're all 0, we have a win32 .res file which must be
-// converted. (So call CvtRes.exe). Else it's an obj file.
-
- DWORD dwCount = -1;
- DWORD dwData = -1;
-
- BOOL fRet = ReadFile(hFile,
- &dwData,
- 4,
- &dwCount,
- NULL
- );
-
- CloseHandle(hFile);
-
- if (!fRet) {
- //dbprintf("Invalid resource file:%S\n", pszFilename);
- return HRESULT_FROM_GetLastError();
- }
-
- if (dwData != 0)
- {
- return S_OK;
- }
-
- PathString tempResObj;
- PathString tempResPath;
-
- // Create the temp file where the temp path is at rather than where the application is at.
- if (!WszGetTempPath( tempResPath))
- {
- return HRESULT_FROM_GetLastError();
- }
-
- if (!WszGetTempFileName(tempResPath, L"RES", 0, tempResObj))
- {
- //dbprintf("GetTempFileName failed\n");
- return HRESULT_FROM_GetLastError();
- }
-
- DWORD dwExitCode;
- fRet = RunProcess(tempResObj, pszFilename, &dwExitCode, pewriter);
-
- if (!fRet)
- { // Couldn't run cvtres.exe
- return PostError(CEE_E_CVTRES_NOT_FOUND);
- }
- else if (dwExitCode != 0)
- { // CvtRes.exe ran, but failed
- return HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND);
- }
- else
- { // Conversion succesful, so return filename.
- wcscpy_s(pszTempFilename, cchTempFilename, tempResObj);
- }
-
- return S_OK;
-} // HRESULT ConvertResource()
-
-
// This function reads a resource file and emits it into the generated PE file.
// 1. We can only link resources in obj format. Must convert from .res to .obj
-// with CvtRes.exe.
+// with CvtRes.exe. See https://github.com/dotnet/runtime/issues/11412.
// 2. Must touch up all COFF relocs from .rsrc$01 (resource header) to .rsrc$02
// (resource raw data)
HRESULT CeeFileGenWriter::emitResourceSection()
@@ -1120,21 +949,7 @@ HRESULT CeeFileGenWriter::emitResourceSection()
if (m_resourceFileName == NULL)
return S_OK;
- // Make sure szResFileName is an obj, not just a .res; change name if we convert
- WCHAR szTempFileName[MAX_PATH+1];
- szTempFileName[0] = L'\0';
- HRESULT hr = ConvertResource(m_resourceFileName, szTempFileName,
- MAX_PATH+1, getPEWriter());
- if (FAILED(hr)) return hr;
-
- // Filename may change (if we convert .res to .obj), so have floating pointer
- const WCHAR* szResFileName;
- if (*szTempFileName)
- szResFileName = szTempFileName;
- else
- szResFileName = m_resourceFileName;
-
- _ASSERTE(szResFileName);
+ const WCHAR* szResFileName = m_resourceFileName;
// read the resource file and spit it out in the .rsrc section
@@ -1142,7 +957,7 @@ HRESULT CeeFileGenWriter::emitResourceSection()
HANDLE hMap = NULL;
IMAGE_FILE_HEADER *hMod = NULL;
- hr = S_OK;
+ HRESULT hr = S_OK;
struct Param
{
@@ -1436,9 +1251,6 @@ lDone: ;
CloseHandle(hMap);
if (hFile != INVALID_HANDLE_VALUE)
CloseHandle(hFile);
- if (szResFileName == szTempFileName)
- // delete temporary file if we created one
- WszDeleteFile(szResFileName);
return hr;
} // HRESULT CeeFileGenWriter::emitResourceSection()
@@ -1981,34 +1793,3 @@ HRESULT CeeFileGenWriter::TestEmitFixups()
}
#endif // TEST_EMIT_FIXUPS
#endif // EMIT_FIXUPS
-
-#ifndef FEATURE_MERGE_JIT_AND_ENGINE
-
-//*****************************************************************************
-// Handle lifetime of loaded library.
-//*****************************************************************************
-extern "C"
-BOOL WINAPI DllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpReserved)
-{
- switch (dwReason)
- {
- case DLL_PROCESS_ATTACH:
- { // Save the module handle.
- g_hThisInst = (HINSTANCE)hInstance;
- DisableThreadLibraryCalls((HMODULE)hInstance);
- }
- break;
- case DLL_PROCESS_DETACH:
- break;
- }
-
- return (true);
-} // BOOL WINAPI DllMain()
-
-
-HINSTANCE GetModuleInst()
-{
- return (g_hThisInst);
-} // HINSTANCE GetModuleInst()
-
-#endif //FEATURE_MERGE_JIT_AND_ENGINE
diff --git a/src/coreclr/src/gc/gc.cpp b/src/coreclr/src/gc/gc.cpp
index e547defba10736..e54a993d6fca88 100644
--- a/src/coreclr/src/gc/gc.cpp
+++ b/src/coreclr/src/gc/gc.cpp
@@ -12198,7 +12198,7 @@ void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
uint8_t* obj_start = acontext->alloc_ptr;
assert(start >= obj_start);
uint8_t* obj_end = obj_start + size - plug_skew;
- assert(obj_end > clear_start);
+ assert(obj_end >= clear_start);
// if clearing at the object start, clear the syncblock.
if(obj_start == start)
@@ -37204,7 +37204,9 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
#endif //_PREFAST_
#endif //MULTIPLE_HEAPS
- if (size >= loh_size_threshold || (flags & GC_ALLOC_LARGE_OBJECT_HEAP))
+ assert(size < loh_size_threshold || (flags & GC_ALLOC_LARGE_OBJECT_HEAP));
+
+ if (flags & GC_ALLOC_USER_OLD_HEAP)
{
// The LOH always guarantees at least 8-byte alignment, regardless of platform. Moreover it doesn't
// support mis-aligned object headers so we can't support biased headers. Luckily for us
@@ -37213,7 +37215,8 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_
ASSERT((flags & GC_ALLOC_ALIGN8_BIAS) == 0);
ASSERT(65536 < loh_size_threshold);
- newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, loh_generation, acontext->alloc_bytes_uoh);
+ int gen_num = (flags & GC_ALLOC_PINNED_OBJECT_HEAP) ? poh_generation : loh_generation;
+ newAlloc = (Object*) hp->allocate_uoh_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, gen_num, acontext->alloc_bytes_uoh);
ASSERT(((size_t)newAlloc & 7) == 0);
#ifdef FEATURE_STRUCTALIGN
diff --git a/src/coreclr/src/gc/gcinterface.h b/src/coreclr/src/gc/gcinterface.h
index cd78561af31dc6..de6456d3924731 100644
--- a/src/coreclr/src/gc/gcinterface.h
+++ b/src/coreclr/src/gc/gcinterface.h
@@ -890,7 +890,7 @@ void updateGCShadow(Object** ptr, Object* val);
#define GC_CALL_INTERIOR 0x1
#define GC_CALL_PINNED 0x2
-//flags for IGCHeapAlloc(...)
+// keep in sync with GC_ALLOC_FLAGS in GC.cs
enum GC_ALLOC_FLAGS
{
GC_ALLOC_NO_FLAGS = 0,
@@ -901,6 +901,7 @@ enum GC_ALLOC_FLAGS
GC_ALLOC_ZEROING_OPTIONAL = 16,
GC_ALLOC_LARGE_OBJECT_HEAP = 32,
GC_ALLOC_PINNED_OBJECT_HEAP = 64,
+ GC_ALLOC_USER_OLD_HEAP = GC_ALLOC_LARGE_OBJECT_HEAP | GC_ALLOC_PINNED_OBJECT_HEAP,
};
inline GC_ALLOC_FLAGS operator|(GC_ALLOC_FLAGS a, GC_ALLOC_FLAGS b)
diff --git a/src/coreclr/src/gc/unix/gcenv.unix.cpp b/src/coreclr/src/gc/unix/gcenv.unix.cpp
index f32308c90106c8..855f2da02a9822 100644
--- a/src/coreclr/src/gc/unix/gcenv.unix.cpp
+++ b/src/coreclr/src/gc/unix/gcenv.unix.cpp
@@ -25,6 +25,8 @@
#undef min
#undef max
+#include
+
#if HAVE_SYS_TIME_H
#include
#else
diff --git a/src/coreclr/src/inc/CrstTypes.def b/src/coreclr/src/inc/CrstTypes.def
index 5bbf1b2d5198eb..06f900a79be900 100644
--- a/src/coreclr/src/inc/CrstTypes.def
+++ b/src/coreclr/src/inc/CrstTypes.def
@@ -351,6 +351,10 @@ End
Crst JitGenericHandleCache
End
+Crst JitPatchpoint
+ AcquiredBefore LoaderHeap
+End
+
Crst JitPerf
Unordered
End
diff --git a/src/coreclr/src/inc/MSCOREE.IDL b/src/coreclr/src/inc/MSCOREE.IDL
index bcf5b588b6fb6d..8a19345f86c163 100644
--- a/src/coreclr/src/inc/MSCOREE.IDL
+++ b/src/coreclr/src/inc/MSCOREE.IDL
@@ -10,11 +10,6 @@
** **
**************************************************************************************/
-cpp_quote("#define DECLARE_DEPRECATED ")
-cpp_quote("#define DEPRECATED_CLR_STDAPI STDAPI")
-
-cpp_quote("")
-
//
// Interface descriptions
//
@@ -301,7 +296,3 @@ interface ICLRRuntimeHost4 : ICLRRuntimeHost2
[in] BOOL fWaitUntilDone,
[out] int *pLatchedExitCode);
};
-
-cpp_quote("#undef DEPRECATED_CLR_STDAPI")
-cpp_quote("#undef DECLARE_DEPRECATED")
-cpp_quote("#undef DEPRECATED_CLR_API_MESG")
diff --git a/src/coreclr/src/inc/clrconfigvalues.h b/src/coreclr/src/inc/clrconfigvalues.h
index d4ec91f59f95d2..175a376b18f575 100644
--- a/src/coreclr/src/inc/clrconfigvalues.h
+++ b/src/coreclr/src/inc/clrconfigvalues.h
@@ -644,6 +644,16 @@ RETAIL_CONFIG_DWORD_INFO(INTERNAL_TC_DeleteCallCountingStubsAfter, W("TC_DeleteC
#endif
#endif
+///
+/// On-Stack Replacement
+///
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+RETAIL_CONFIG_DWORD_INFO(INTERNAL_OSR_CounterBump, W("OSR_CounterBump"), 1000, "Counter reload value when a patchpoint is hit")
+RETAIL_CONFIG_DWORD_INFO(INTERNAL_OSR_HitLimit, W("OSR_HitLimit"), 10, "Number of times a patchpoint must call back to trigger an OSR transition")
+CONFIG_DWORD_INFO(INTERNAL_OSR_LowId, W("OSR_LowId"), (DWORD)-1, "Low end of enabled patchpoint range (inclusive)");
+CONFIG_DWORD_INFO(INTERNAL_OSR_HighId, W("OSR_HighId"), 10000000, "High end of enabled patchpoint range (inclusive)");
+#endif
+
///
/// Entry point slot backpatch
///
diff --git a/src/coreclr/src/inc/clrhost.h b/src/coreclr/src/inc/clrhost.h
index 1292446b2926ee..e43d6ebb83aefc 100644
--- a/src/coreclr/src/inc/clrhost.h
+++ b/src/coreclr/src/inc/clrhost.h
@@ -56,20 +56,16 @@
#endif // _DEBUG
-IExecutionEngine *GetExecutionEngine();
-IEEMemoryManager *GetEEMemoryManager();
LPVOID ClrVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect);
BOOL ClrVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType);
SIZE_T ClrVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength);
BOOL ClrVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect);
-LPVOID ClrDebugAlloc (size_t size, LPCSTR pszFile, int iLineNo);
HANDLE ClrGetProcessHeap();
HANDLE ClrHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize);
BOOL ClrHeapDestroy(HANDLE hHeap);
LPVOID ClrHeapAlloc(HANDLE hHeap, DWORD dwFlags, S_SIZE_T dwBytes);
BOOL ClrHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem);
-BOOL ClrHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem);
HANDLE ClrGetProcessExecutableHeap();
@@ -78,8 +74,8 @@ extern int RFS_HashStack();
#endif
#ifndef SELF_NO_HOST
-LPVOID EEHeapAllocInProcessHeap(DWORD dwFlags, SIZE_T dwBytes);
-BOOL EEHeapFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem);
+LPVOID ClrHeapAllocInProcessHeap(DWORD dwFlags, SIZE_T dwBytes);
+BOOL ClrHeapFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem);
#endif
inline LPVOID ClrAllocInProcessHeap(DWORD dwFlags, S_SIZE_T dwBytes)
@@ -91,7 +87,7 @@ inline LPVOID ClrAllocInProcessHeap(DWORD dwFlags, S_SIZE_T dwBytes)
}
#ifndef SELF_NO_HOST
- return EEHeapAllocInProcessHeap(dwFlags, dwBytes.Value());
+ return ClrHeapAllocInProcessHeap(dwFlags, dwBytes.Value());
#else
#undef HeapAlloc
#undef GetProcessHeap
@@ -108,7 +104,7 @@ inline BOOL ClrFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem)
{
STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
#ifndef SELF_NO_HOST
- return EEHeapFreeInProcessHeap(dwFlags, lpMem);
+ return ClrHeapFreeInProcessHeap(dwFlags, lpMem);
#else
#undef HeapFree
#undef GetProcessHeap
@@ -128,29 +124,10 @@ inline BOOL ClrFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem)
// critical section api
CRITSEC_COOKIE ClrCreateCriticalSection(CrstType type, CrstFlags flags);
-HRESULT ClrDeleteCriticalSection(CRITSEC_COOKIE cookie);
+void ClrDeleteCriticalSection(CRITSEC_COOKIE cookie);
void ClrEnterCriticalSection(CRITSEC_COOKIE cookie);
void ClrLeaveCriticalSection(CRITSEC_COOKIE cookie);
-// event api
-EVENT_COOKIE ClrCreateAutoEvent(BOOL bInitialState);
-EVENT_COOKIE ClrCreateManualEvent(BOOL bInitialState);
-void ClrCloseEvent(EVENT_COOKIE event);
-BOOL ClrSetEvent(EVENT_COOKIE event);
-BOOL ClrResetEvent(EVENT_COOKIE event);
-DWORD ClrWaitEvent(EVENT_COOKIE event, DWORD dwMilliseconds, BOOL bAlertable);
-
-// semaphore api
-SEMAPHORE_COOKIE ClrCreateSemaphore(DWORD dwInitial, DWORD dwMax);
-void ClrCloseSemaphore(SEMAPHORE_COOKIE semaphore);
-BOOL ClrReleaseSemaphore(SEMAPHORE_COOKIE semaphore, LONG lReleaseCount, LONG *lpPreviousCount);
-DWORD ClrWaitSemaphore(SEMAPHORE_COOKIE semaphore, DWORD dwMilliseconds, BOOL bAlertable);
-
-// mutex api
-MUTEX_COOKIE ClrCreateMutex(LPSECURITY_ATTRIBUTES lpMutexAttributes,BOOL bInitialOwner,LPCTSTR lpName);
-void ClrCloseMutex(MUTEX_COOKIE mutex);
-BOOL ClrReleaseMutex(MUTEX_COOKIE mutex);
-DWORD ClrWaitForMutex(MUTEX_COOKIE mutex,DWORD dwMilliseconds,BOOL bAlertable);
DWORD ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable);
// Rather than use the above APIs directly, it is recommended that holder classes
@@ -163,94 +140,6 @@ typedef Holder, VoidClrDeleteCriticalSection, NULL> CRITSEC_AllocationHolder;
-class Event {
-public:
- Event ()
- : m_event(NULL)
- {STATIC_CONTRACT_LEAF;}
- ~Event ()
- {
- STATIC_CONTRACT_WRAPPER;
- CloseEvent();
- }
-
- void CreateAutoEvent(BOOL bInitialState)
- {
- STATIC_CONTRACT_WRAPPER;
- m_event = ClrCreateAutoEvent(bInitialState);
- }
- void CreateManualEvent(BOOL bInitialState)
- {
- STATIC_CONTRACT_WRAPPER;
- m_event = ClrCreateManualEvent(bInitialState);
- }
- void CloseEvent()
- {
- STATIC_CONTRACT_WRAPPER;
- if (m_event != NULL)
- ClrCloseEvent(m_event);
- m_event = NULL;
- }
-
- BOOL Set()
- {
- STATIC_CONTRACT_WRAPPER;
- return ClrSetEvent(m_event);
- }
- BOOL Reset()
- {
- STATIC_CONTRACT_WRAPPER;
- return ClrResetEvent(m_event);
- }
- DWORD Wait(DWORD dwMilliseconds, BOOL bAlertable)
- {
- STATIC_CONTRACT_WRAPPER;
- return ClrWaitEvent(m_event, dwMilliseconds, bAlertable);
- }
-
-private:
- EVENT_COOKIE m_event;
-};
-
-class Semaphore {
-public:
- Semaphore ()
- : m_semaphore(NULL)
- {STATIC_CONTRACT_LEAF;}
- ~Semaphore ()
- {
- STATIC_CONTRACT_WRAPPER;
- Close();
- }
-
- void Create(DWORD dwInitial, DWORD dwMax)
- {
- STATIC_CONTRACT_WRAPPER;
- m_semaphore = ClrCreateSemaphore(dwInitial, dwMax);
- }
- void Close()
- {
- STATIC_CONTRACT_WRAPPER;
- if (m_semaphore != NULL)
- ClrCloseSemaphore(m_semaphore);
- m_semaphore = NULL;
- }
-
- BOOL Release(LONG lReleaseCount, LONG* lpPreviousCount)
- {
- STATIC_CONTRACT_WRAPPER;
- return ClrReleaseSemaphore(m_semaphore, lReleaseCount, lpPreviousCount);
- }
- DWORD Wait(DWORD dwMilliseconds, BOOL bAlertable)
- {
- STATIC_CONTRACT_WRAPPER;
- return ClrWaitSemaphore(m_semaphore, dwMilliseconds, bAlertable);
- }
-
-private:
- SEMAPHORE_COOKIE m_semaphore;
-};
-
HMODULE GetCLRModule ();
extern thread_local int t_CantAllocCount;
diff --git a/src/coreclr/src/inc/clrinternal.idl b/src/coreclr/src/inc/clrinternal.idl
index 19fc7081184c9a..27a3c0a4536119 100644
--- a/src/coreclr/src/inc/clrinternal.idl
+++ b/src/coreclr/src/inc/clrinternal.idl
@@ -16,45 +16,6 @@ import "unknwn.idl";
// import mscoree.idl for BucketParameters definition
import "mscoree.idl";
-
-
-cpp_quote("#if 0")
-
-typedef struct _OSVERSIONINFOA {
- DWORD dwOSVersionInfoSize;
- DWORD dwMajorVersion;
- DWORD dwMinorVersion;
- DWORD dwBuildNumber;
- DWORD dwPlatformId;
- CHAR szCSDVersion[ 128 ]; // Maintenance string for PSS usage
-} OSVERSIONINFOA, *POSVERSIONINFOA, *LPOSVERSIONINFOA;
-
-typedef struct _OSVERSIONINFOW {
- DWORD dwOSVersionInfoSize;
- DWORD dwMajorVersion;
- DWORD dwMinorVersion;
- DWORD dwBuildNumber;
- DWORD dwPlatformId;
- WCHAR szCSDVersion[ 128 ]; // Maintenance string for PSS usage
-} OSVERSIONINFOW, *POSVERSIONINFOW, *LPOSVERSIONINFOW, RTL_OSVERSIONINFOW, *PRTL_OSVERSIONINFOW;
-#ifdef UNICODE
-typedef OSVERSIONINFOW OSVERSIONINFO;
-typedef POSVERSIONINFOW POSVERSIONINFO;
-typedef LPOSVERSIONINFOW LPOSVERSIONINFO;
-#else
-typedef OSVERSIONINFOA OSVERSIONINFO;
-typedef POSVERSIONINFOA POSVERSIONINFO;
-typedef LPOSVERSIONINFOA LPOSVERSIONINFO;
-#endif // UNICODE
-
-cpp_quote("#endif")
-
-// IID IExecutionEngine : uuid(7AF02DAC-2A33-494b-A09F-25E00A93C6F8)
-cpp_quote("EXTERN_GUID(IID_IExecutionEngine, 0x7AF02DAC, 0x2A33, 0x494b, 0xA0, 0x9F, 0x25, 0xE0, 0x0A, 0x93, 0xC6, 0xF8);")
-
-// IID IEEMemoryManager : uuid{17713B61-B59F-4e13-BAAF-91623DC8ADC0}
-cpp_quote("EXTERN_GUID(IID_IEEMemoryManager, 0x17713b61, 0xb59f, 0x4e13, 0xba, 0xaf, 0x91, 0x62, 0x3d, 0xc8, 0xad, 0xc0);")
-
// This ID is embedded in the CLRDEBUGINFO resource so that the shim can differentiate dlls which happen to be named
// clr.dll from official Microsoft clr.dll implementations. This is not intended to authenticate a CLR in a strong
// security sense but short of deliberate 3rd party spoofing it should provide a good identity.
@@ -92,9 +53,6 @@ cpp_quote("EXTERN_GUID(IID_IPrivateManagedExceptionReporting, 0xad76a023, 0x332d
// Interface for exposing services from the EE to other DLLs of the CLR.
//*****************************************************************************
typedef void * CRITSEC_COOKIE;
-typedef void * EVENT_COOKIE;
-typedef void * SEMAPHORE_COOKIE;
-typedef void * MUTEX_COOKIE;
typedef enum {
CRST_DEFAULT = 0x0,
@@ -116,145 +74,6 @@ typedef enum {
// this option will assert it in debug mode.
} CrstFlags;
-// Callback function for cleaning up TLS
-typedef VOID (__stdcall *PTLS_CALLBACK_FUNCTION)(PVOID);
-
-
-[
- uuid(7AF02DAC-2A33-494b-A09F-25E00A93C6F8),
- helpstring("CLR Coordination Interface"),
- pointer_default(unique),
- local
-]
-interface IExecutionEngine : IUnknown
-{
- // Critical Sections are sometimes exposed to the host and therefore need to be
- // reflected from all CLR DLLs to the EE.
- //
- // In addition, we always monitor interactions between the lock & the GC, based
- // on the GC mode in which the lock is acquired and we restrict what operations
- // are permitted while holding the lock based on this.
- //
- // Finally, we we rank all our locks to prevent deadlock across all the DLLs of
- // the CLR. This is the level argument to CreateLock.
- //
- // All usage of these locks must be exception-safe. To achieve this, we suggest
- // using Holders (see holder.h & crst.h). In fact, within the EE code cannot
- // hold locks except by using exception-safe holders.
-
- CRITSEC_COOKIE CreateLock([in] LPCSTR szTag, [in] LPCSTR level, [in] CrstFlags flags);
-
- void DestroyLock([in] CRITSEC_COOKIE lock);
-
- void AcquireLock([in] CRITSEC_COOKIE lock);
-
- void ReleaseLock([in] CRITSEC_COOKIE lock);
-
- EVENT_COOKIE CreateAutoEvent([in] BOOL bInitialState);
- EVENT_COOKIE CreateManualEvent([in] BOOL bInitialState);
- void CloseEvent([in] EVENT_COOKIE event);
- BOOL ClrSetEvent([in] EVENT_COOKIE event);
- BOOL ClrResetEvent([in] EVENT_COOKIE event);
- DWORD WaitForEvent([in] EVENT_COOKIE event, [in] DWORD dwMilliseconds, [in] BOOL bAlertable);
- DWORD WaitForSingleObject([in] HANDLE handle, [in] DWORD dwMilliseconds);
-
- // OS header file defines CreateSemaphore.
- SEMAPHORE_COOKIE ClrCreateSemaphore([in] DWORD dwInitial, [in] DWORD dwMax);
- void ClrCloseSemaphore([in] SEMAPHORE_COOKIE semaphore);
- DWORD ClrWaitForSemaphore([in] SEMAPHORE_COOKIE semaphore, [in] DWORD dwMilliseconds, [in] BOOL bAlertable);
- BOOL ClrReleaseSemaphore([in] SEMAPHORE_COOKIE semaphore, [in] LONG lReleaseCount, [in] LONG *lpPreviousCount);
-
- MUTEX_COOKIE ClrCreateMutex([in]LPSECURITY_ATTRIBUTES lpMutexAttributes, [in]BOOL bInitialOwner, [in]LPCTSTR lpName);
- DWORD ClrWaitForMutex([in] MUTEX_COOKIE mutex, [in] DWORD dwMilliseconds, [in] BOOL bAlertable);
- BOOL ClrReleaseMutex([in] MUTEX_COOKIE mutex);
- void ClrCloseMutex([in] MUTEX_COOKIE mutex);
-
- DWORD ClrSleepEx([in] DWORD dwMilliseconds, [in] BOOL bAlertable);
-
- BOOL ClrAllocationDisallowed();
-
- void GetLastThrownObjectExceptionFromThread([out] void **ppvException);
-
-}; // interface IExecutionEngine
-
-
-//*****************************************************************************
-// Interface for exposing memory services from the EE to other DLLs of the CLR.
-//*****************************************************************************
-
-cpp_quote("#if !defined(_WINNT_) && !defined(_NTMMAPI_)")
-typedef void* PMEMORY_BASIC_INFORMATION;
-cpp_quote("#endif")
-
-
-[
- uuid(17713B61-B59F-4e13-BAAF-91623DC8ADC0),
- helpstring("CLR Memory Manager Interface"),
- pointer_default(unique),
- local
-]
-interface IEEMemoryManager : IUnknown
-{
- LPVOID ClrVirtualAlloc(
- [in] LPVOID lpAddress, // region to reserve or commit
- [in] SIZE_T dwSize, // size of region
- [in] DWORD flAllocationType, // type of allocation
- [in] DWORD flProtect // type of access protection
- );
-
- BOOL ClrVirtualFree(
- [in] LPVOID lpAddress, // address of region
- [in] SIZE_T dwSize, // size of region
- [in] DWORD dwFreeType // operation type
- );
-
- SIZE_T ClrVirtualQuery(
- [in] const void* lpAddress, // address of region
- [in] PMEMORY_BASIC_INFORMATION lpBuffer, // information buffer
- [in] SIZE_T dwLength // size of buffer
- );
-
- BOOL ClrVirtualProtect(
- [in] LPVOID lpAddress, // region of committed pages
- [in] SIZE_T dwSize, // size of the region
- [in] DWORD flNewProtect, // desired access protection
- [in] DWORD* lpflOldProtect // old protection
- );
-
- HANDLE ClrGetProcessHeap();
-
- HANDLE ClrHeapCreate(
- [in] DWORD flOptions, // heap allocation attributes
- [in] SIZE_T dwInitialSize, // initial heap size
- [in] SIZE_T dwMaximumSize // maximum heap size
- );
-
- BOOL ClrHeapDestroy(
- [in] HANDLE hHeap // handle to heap
- );
-
- LPVOID ClrHeapAlloc(
- [in] HANDLE hHeap, // handle to private heap block
- [in] DWORD dwFlags, // heap allocation control
- [in] SIZE_T dwBytes // number of bytes to allocate
- );
-
- BOOL ClrHeapFree(
- [in] HANDLE hHeap, // handle to heap
- [in] DWORD dwFlags, // heap free options
- [in] LPVOID lpMem // pointer to memory
- );
-
- BOOL ClrHeapValidate(
- [in] HANDLE hHeap, // handle to heap
- [in] DWORD dwFlags, // heap access options
- [in] const void* lpMem // optional pointer to memory block
- );
-
- HANDLE ClrGetProcessExecutableHeap();
-
-}; // interface IEEMemoryManager
-
//********************************************************************************************
// Interface for exposing GetBucketParametersForCurrentException to Watson testing harness.
//********************************************************************************************
@@ -268,5 +87,3 @@ interface IPrivateManagedExceptionReporting : IUnknown
{
HRESULT GetBucketParametersForCurrentException([out]BucketParameters *pParams);
}
-
-
diff --git a/src/coreclr/src/inc/corinfo.h b/src/coreclr/src/inc/corinfo.h
index 0fa5edbac36cb7..900de9c32f5e01 100644
--- a/src/coreclr/src/inc/corinfo.h
+++ b/src/coreclr/src/inc/corinfo.h
@@ -631,6 +631,8 @@ enum CorInfoHelpFunc
CORINFO_HELP_STACK_PROBE, // Probes each page of the allocated stack frame
+ CORINFO_HELP_PATCHPOINT, // Notify runtime that code has reached a patchpoint
+
CORINFO_HELP_COUNT,
};
@@ -1084,6 +1086,11 @@ inline bool dontInline(CorInfoInline val) {
return(val < 0);
}
+// Patchpoint info is passed back and forth across the interface
+// but is opaque.
+
+struct PatchpointInfo;
+
// Cookie types consumed by the code generator (these are opaque values
// not inspected by the code generator):
@@ -2169,6 +2176,16 @@ class ICorStaticInfo
GSCookie ** ppCookieVal // OUT
) = 0;
+ // Provide patchpoint info for the method currently being jitted.
+ virtual void setPatchpointInfo(
+ PatchpointInfo* patchpointInfo
+ ) = 0;
+
+ // Get patchpoint info and il offset for the method currently being jitted.
+ virtual PatchpointInfo* getOSRInfo(
+ unsigned *ilOffset // [OUT] il offset of OSR entry point
+ ) = 0;
+
/**********************************************************************************/
//
// ICorModuleInfo
diff --git a/src/coreclr/src/inc/corinfoinstructionset.h b/src/coreclr/src/inc/corinfoinstructionset.h
new file mode 100644
index 00000000000000..d43d743134f449
--- /dev/null
+++ b/src/coreclr/src/inc/corinfoinstructionset.h
@@ -0,0 +1,276 @@
+
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// DO NOT EDIT THIS FILE! IT IS AUTOGENERATED
+// FROM /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
+// using /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
+
+#ifndef CORINFOINSTRUCTIONSET_H
+#define CORINFOINSTRUCTIONSET_H
+
+enum CORINFO_InstructionSet
+{
+ InstructionSet_ILLEGAL = 0,
+ InstructionSet_NONE = 63,
+#ifdef TARGET_ARM64
+ InstructionSet_ArmBase=1,
+ InstructionSet_ArmBase_Arm64=2,
+ InstructionSet_AdvSimd=3,
+ InstructionSet_AdvSimd_Arm64=4,
+ InstructionSet_Aes=5,
+ InstructionSet_Crc32=6,
+ InstructionSet_Crc32_Arm64=7,
+ InstructionSet_Sha1=8,
+ InstructionSet_Sha256=9,
+ InstructionSet_Atomics=10,
+ InstructionSet_Vector64=11,
+ InstructionSet_Vector128=12,
+#endif // TARGET_ARM64
+#ifdef TARGET_AMD64
+ InstructionSet_SSE=1,
+ InstructionSet_SSE2=2,
+ InstructionSet_SSE3=3,
+ InstructionSet_SSSE3=4,
+ InstructionSet_SSE41=5,
+ InstructionSet_SSE42=6,
+ InstructionSet_AVX=7,
+ InstructionSet_AVX2=8,
+ InstructionSet_AES=9,
+ InstructionSet_BMI1=10,
+ InstructionSet_BMI2=11,
+ InstructionSet_FMA=12,
+ InstructionSet_LZCNT=13,
+ InstructionSet_PCLMULQDQ=14,
+ InstructionSet_POPCNT=15,
+ InstructionSet_Vector128=16,
+ InstructionSet_Vector256=17,
+ InstructionSet_BMI1_X64=18,
+ InstructionSet_BMI2_X64=19,
+ InstructionSet_LZCNT_X64=20,
+ InstructionSet_POPCNT_X64=21,
+ InstructionSet_SSE_X64=22,
+ InstructionSet_SSE2_X64=23,
+ InstructionSet_SSE41_X64=24,
+ InstructionSet_SSE42_X64=25,
+#endif // TARGET_AMD64
+#ifdef TARGET_X86
+ InstructionSet_SSE=1,
+ InstructionSet_SSE2=2,
+ InstructionSet_SSE3=3,
+ InstructionSet_SSSE3=4,
+ InstructionSet_SSE41=5,
+ InstructionSet_SSE42=6,
+ InstructionSet_AVX=7,
+ InstructionSet_AVX2=8,
+ InstructionSet_AES=9,
+ InstructionSet_BMI1=10,
+ InstructionSet_BMI2=11,
+ InstructionSet_FMA=12,
+ InstructionSet_LZCNT=13,
+ InstructionSet_PCLMULQDQ=14,
+ InstructionSet_POPCNT=15,
+ InstructionSet_Vector128=16,
+ InstructionSet_Vector256=17,
+ InstructionSet_BMI1_X64=18,
+ InstructionSet_BMI2_X64=19,
+ InstructionSet_LZCNT_X64=20,
+ InstructionSet_POPCNT_X64=21,
+ InstructionSet_SSE_X64=22,
+ InstructionSet_SSE2_X64=23,
+ InstructionSet_SSE41_X64=24,
+ InstructionSet_SSE42_X64=25,
+#endif // TARGET_X86
+
+};
+
+struct CORINFO_InstructionSetFlags
+{
+private:
+ uint64_t _flags = 0;
+public:
+ void AddInstructionSet(CORINFO_InstructionSet instructionSet)
+ {
+ _flags = _flags | (((uint64_t)1) << instructionSet);
+ }
+
+ void RemoveInstructionSet(CORINFO_InstructionSet instructionSet)
+ {
+ _flags = _flags & ~(((uint64_t)1) << instructionSet);
+ }
+
+ bool HasInstructionSet(CORINFO_InstructionSet instructionSet) const
+ {
+ return _flags & (((uint64_t)1) << instructionSet);
+ }
+
+ bool Equals(CORINFO_InstructionSetFlags other) const
+ {
+ return _flags == other._flags;
+ }
+
+ void Add(CORINFO_InstructionSetFlags other)
+ {
+ _flags |= other._flags;
+ }
+
+ bool IsEmpty() const
+ {
+ return _flags == 0;
+ }
+
+ void Reset()
+ {
+ _flags = 0;
+ }
+
+ void Set64BitInstructionSetVariants()
+ {
+#ifdef TARGET_ARM64
+ if (HasInstructionSet(InstructionSet_ArmBase))
+ AddInstructionSet(InstructionSet_ArmBase_Arm64);
+ if (HasInstructionSet(InstructionSet_AdvSimd))
+ AddInstructionSet(InstructionSet_AdvSimd_Arm64);
+ if (HasInstructionSet(InstructionSet_Crc32))
+ AddInstructionSet(InstructionSet_Crc32_Arm64);
+#endif // TARGET_ARM64
+#ifdef TARGET_AMD64
+ if (HasInstructionSet(InstructionSet_SSE))
+ AddInstructionSet(InstructionSet_SSE_X64);
+ if (HasInstructionSet(InstructionSet_SSE2))
+ AddInstructionSet(InstructionSet_SSE2_X64);
+ if (HasInstructionSet(InstructionSet_SSE41))
+ AddInstructionSet(InstructionSet_SSE41_X64);
+ if (HasInstructionSet(InstructionSet_SSE42))
+ AddInstructionSet(InstructionSet_SSE42_X64);
+ if (HasInstructionSet(InstructionSet_BMI1))
+ AddInstructionSet(InstructionSet_BMI1_X64);
+ if (HasInstructionSet(InstructionSet_BMI2))
+ AddInstructionSet(InstructionSet_BMI2_X64);
+ if (HasInstructionSet(InstructionSet_LZCNT))
+ AddInstructionSet(InstructionSet_LZCNT_X64);
+ if (HasInstructionSet(InstructionSet_POPCNT))
+ AddInstructionSet(InstructionSet_POPCNT_X64);
+#endif // TARGET_AMD64
+#ifdef TARGET_X86
+#endif // TARGET_X86
+
+ }
+
+ uint64_t GetFlagsRaw()
+ {
+ return _flags;
+ }
+
+ void SetFromFlagsRaw(uint64_t flags)
+ {
+ _flags = flags;
+ }
+};
+
+inline CORINFO_InstructionSetFlags EnsureInstructionSetFlagsAreValid(CORINFO_InstructionSetFlags input)
+{
+ CORINFO_InstructionSetFlags oldflags = input;
+ CORINFO_InstructionSetFlags resultflags = input;
+ do
+ {
+ oldflags = resultflags;
+#ifdef TARGET_ARM64
+ if (resultflags.HasInstructionSet(InstructionSet_ArmBase) && !resultflags.HasInstructionSet(InstructionSet_ArmBase_Arm64))
+ resultflags.RemoveInstructionSet(InstructionSet_ArmBase);
+ if (resultflags.HasInstructionSet(InstructionSet_AdvSimd) && !resultflags.HasInstructionSet(InstructionSet_AdvSimd_Arm64))
+ resultflags.RemoveInstructionSet(InstructionSet_AdvSimd);
+ if (resultflags.HasInstructionSet(InstructionSet_Crc32) && !resultflags.HasInstructionSet(InstructionSet_Crc32_Arm64))
+ resultflags.RemoveInstructionSet(InstructionSet_Crc32);
+ if (resultflags.HasInstructionSet(InstructionSet_AdvSimd) && !resultflags.HasInstructionSet(InstructionSet_ArmBase))
+ resultflags.RemoveInstructionSet(InstructionSet_AdvSimd);
+ if (resultflags.HasInstructionSet(InstructionSet_Aes) && !resultflags.HasInstructionSet(InstructionSet_ArmBase))
+ resultflags.RemoveInstructionSet(InstructionSet_Aes);
+ if (resultflags.HasInstructionSet(InstructionSet_Crc32) && !resultflags.HasInstructionSet(InstructionSet_ArmBase))
+ resultflags.RemoveInstructionSet(InstructionSet_Crc32);
+ if (resultflags.HasInstructionSet(InstructionSet_Sha1) && !resultflags.HasInstructionSet(InstructionSet_ArmBase))
+ resultflags.RemoveInstructionSet(InstructionSet_Sha1);
+ if (resultflags.HasInstructionSet(InstructionSet_Sha256) && !resultflags.HasInstructionSet(InstructionSet_ArmBase))
+ resultflags.RemoveInstructionSet(InstructionSet_Sha256);
+#endif // TARGET_ARM64
+#ifdef TARGET_AMD64
+ if (resultflags.HasInstructionSet(InstructionSet_SSE) && !resultflags.HasInstructionSet(InstructionSet_SSE_X64))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE2) && !resultflags.HasInstructionSet(InstructionSet_SSE2_X64))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE2);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE41) && !resultflags.HasInstructionSet(InstructionSet_SSE41_X64))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE41);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE42) && !resultflags.HasInstructionSet(InstructionSet_SSE42_X64))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE42);
+ if (resultflags.HasInstructionSet(InstructionSet_BMI1) && !resultflags.HasInstructionSet(InstructionSet_BMI1_X64))
+ resultflags.RemoveInstructionSet(InstructionSet_BMI1);
+ if (resultflags.HasInstructionSet(InstructionSet_BMI2) && !resultflags.HasInstructionSet(InstructionSet_BMI2_X64))
+ resultflags.RemoveInstructionSet(InstructionSet_BMI2);
+ if (resultflags.HasInstructionSet(InstructionSet_LZCNT) && !resultflags.HasInstructionSet(InstructionSet_LZCNT_X64))
+ resultflags.RemoveInstructionSet(InstructionSet_LZCNT);
+ if (resultflags.HasInstructionSet(InstructionSet_POPCNT) && !resultflags.HasInstructionSet(InstructionSet_POPCNT_X64))
+ resultflags.RemoveInstructionSet(InstructionSet_POPCNT);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE2) && !resultflags.HasInstructionSet(InstructionSet_SSE))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE2);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE3) && !resultflags.HasInstructionSet(InstructionSet_SSE2))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE3);
+ if (resultflags.HasInstructionSet(InstructionSet_SSSE3) && !resultflags.HasInstructionSet(InstructionSet_SSE3))
+ resultflags.RemoveInstructionSet(InstructionSet_SSSE3);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE41) && !resultflags.HasInstructionSet(InstructionSet_SSSE3))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE41);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE42) && !resultflags.HasInstructionSet(InstructionSet_SSE41))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE42);
+ if (resultflags.HasInstructionSet(InstructionSet_AVX) && !resultflags.HasInstructionSet(InstructionSet_SSE42))
+ resultflags.RemoveInstructionSet(InstructionSet_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet_AVX2) && !resultflags.HasInstructionSet(InstructionSet_AVX))
+ resultflags.RemoveInstructionSet(InstructionSet_AVX2);
+ if (resultflags.HasInstructionSet(InstructionSet_AES) && !resultflags.HasInstructionSet(InstructionSet_SSE2))
+ resultflags.RemoveInstructionSet(InstructionSet_AES);
+ if (resultflags.HasInstructionSet(InstructionSet_BMI1) && !resultflags.HasInstructionSet(InstructionSet_AVX))
+ resultflags.RemoveInstructionSet(InstructionSet_BMI1);
+ if (resultflags.HasInstructionSet(InstructionSet_BMI2) && !resultflags.HasInstructionSet(InstructionSet_AVX))
+ resultflags.RemoveInstructionSet(InstructionSet_BMI2);
+ if (resultflags.HasInstructionSet(InstructionSet_FMA) && !resultflags.HasInstructionSet(InstructionSet_AVX))
+ resultflags.RemoveInstructionSet(InstructionSet_FMA);
+ if (resultflags.HasInstructionSet(InstructionSet_PCLMULQDQ) && !resultflags.HasInstructionSet(InstructionSet_SSE2))
+ resultflags.RemoveInstructionSet(InstructionSet_PCLMULQDQ);
+ if (resultflags.HasInstructionSet(InstructionSet_POPCNT) && !resultflags.HasInstructionSet(InstructionSet_SSE42))
+ resultflags.RemoveInstructionSet(InstructionSet_POPCNT);
+#endif // TARGET_AMD64
+#ifdef TARGET_X86
+ if (resultflags.HasInstructionSet(InstructionSet_SSE2) && !resultflags.HasInstructionSet(InstructionSet_SSE))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE2);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE3) && !resultflags.HasInstructionSet(InstructionSet_SSE2))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE3);
+ if (resultflags.HasInstructionSet(InstructionSet_SSSE3) && !resultflags.HasInstructionSet(InstructionSet_SSE3))
+ resultflags.RemoveInstructionSet(InstructionSet_SSSE3);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE41) && !resultflags.HasInstructionSet(InstructionSet_SSSE3))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE41);
+ if (resultflags.HasInstructionSet(InstructionSet_SSE42) && !resultflags.HasInstructionSet(InstructionSet_SSE41))
+ resultflags.RemoveInstructionSet(InstructionSet_SSE42);
+ if (resultflags.HasInstructionSet(InstructionSet_AVX) && !resultflags.HasInstructionSet(InstructionSet_SSE42))
+ resultflags.RemoveInstructionSet(InstructionSet_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet_AVX2) && !resultflags.HasInstructionSet(InstructionSet_AVX))
+ resultflags.RemoveInstructionSet(InstructionSet_AVX2);
+ if (resultflags.HasInstructionSet(InstructionSet_AES) && !resultflags.HasInstructionSet(InstructionSet_SSE2))
+ resultflags.RemoveInstructionSet(InstructionSet_AES);
+ if (resultflags.HasInstructionSet(InstructionSet_BMI1) && !resultflags.HasInstructionSet(InstructionSet_AVX))
+ resultflags.RemoveInstructionSet(InstructionSet_BMI1);
+ if (resultflags.HasInstructionSet(InstructionSet_BMI2) && !resultflags.HasInstructionSet(InstructionSet_AVX))
+ resultflags.RemoveInstructionSet(InstructionSet_BMI2);
+ if (resultflags.HasInstructionSet(InstructionSet_FMA) && !resultflags.HasInstructionSet(InstructionSet_AVX))
+ resultflags.RemoveInstructionSet(InstructionSet_FMA);
+ if (resultflags.HasInstructionSet(InstructionSet_PCLMULQDQ) && !resultflags.HasInstructionSet(InstructionSet_SSE2))
+ resultflags.RemoveInstructionSet(InstructionSet_PCLMULQDQ);
+ if (resultflags.HasInstructionSet(InstructionSet_POPCNT) && !resultflags.HasInstructionSet(InstructionSet_SSE42))
+ resultflags.RemoveInstructionSet(InstructionSet_POPCNT);
+#endif // TARGET_X86
+
+ } while (!oldflags.Equals(resultflags));
+ return resultflags;
+}
+
+
+
+#endif // CORINFOINSTRUCTIONSET_H
diff --git a/src/coreclr/src/inc/corjit.h b/src/coreclr/src/inc/corjit.h
index 47b6858238415d..857fa5bc2e1155 100644
--- a/src/coreclr/src/inc/corjit.h
+++ b/src/coreclr/src/inc/corjit.h
@@ -89,7 +89,6 @@ extern "C" void __stdcall jitStartup(ICorJitHost* host);
class ICorJitCompiler;
class ICorJitInfo;
-struct IEEMemoryManager;
extern "C" ICorJitCompiler* __stdcall getJit();
diff --git a/src/coreclr/src/inc/corjitflags.h b/src/coreclr/src/inc/corjitflags.h
index 1982e0c7b93f64..eac6f9277909da 100644
--- a/src/coreclr/src/inc/corjitflags.h
+++ b/src/coreclr/src/inc/corjitflags.h
@@ -17,6 +17,8 @@
#ifndef _COR_JIT_FLAGS_H_
#define _COR_JIT_FLAGS_H_
+#include "corinfoinstructionset.h"
+
class CORJIT_FLAGS
{
public:
@@ -40,7 +42,6 @@ class CORJIT_FLAGS
CORJIT_FLAG_TARGET_P4 = 9,
CORJIT_FLAG_USE_FCOMI = 10, // Generated code may use fcomi(p) instruction
CORJIT_FLAG_USE_CMOV = 11, // Generated code may use cmov instruction
- CORJIT_FLAG_USE_SSE2 = 12, // Generated code may use SSE-2 instructions
#else // !defined(TARGET_X86)
@@ -52,21 +53,12 @@ class CORJIT_FLAGS
#endif // !defined(TARGET_X86)
- CORJIT_FLAG_UNUSED6 = 13,
-
- #if defined(TARGET_X86) || defined(TARGET_AMD64)
-
- CORJIT_FLAG_USE_AVX = 14,
- CORJIT_FLAG_USE_AVX2 = 15,
- CORJIT_FLAG_USE_AVX_512 = 16,
-
- #else // !defined(TARGET_X86) && !defined(TARGET_AMD64)
+ CORJIT_FLAG_OSR = 13, // Generate alternate method for On Stack Replacement
CORJIT_FLAG_UNUSED7 = 14,
CORJIT_FLAG_UNUSED8 = 15,
CORJIT_FLAG_UNUSED9 = 16,
- #endif // !defined(TARGET_X86) && !defined(TARGET_AMD64)
#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64)
CORJIT_FLAG_FEATURE_SIMD = 17,
@@ -106,57 +98,6 @@ class CORJIT_FLAGS
CORJIT_FLAG_NO_INLINING = 42, // JIT should not inline any called method into this method
-#if defined(TARGET_ARM64)
-
- CORJIT_FLAG_HAS_ARM64_AES = 43, // ID_AA64ISAR0_EL1.AES is 1 or better
- CORJIT_FLAG_HAS_ARM64_ATOMICS = 44, // ID_AA64ISAR0_EL1.Atomic is 2 or better
- CORJIT_FLAG_HAS_ARM64_CRC32 = 45, // ID_AA64ISAR0_EL1.CRC32 is 1 or better
- CORJIT_FLAG_HAS_ARM64_DCPOP = 46, // ID_AA64ISAR1_EL1.DPB is 1 or better
- CORJIT_FLAG_HAS_ARM64_DP = 47, // ID_AA64ISAR0_EL1.DP is 1 or better
- CORJIT_FLAG_HAS_ARM64_FCMA = 48, // ID_AA64ISAR1_EL1.FCMA is 1 or better
- CORJIT_FLAG_HAS_ARM64_FP = 49, // ID_AA64PFR0_EL1.FP is 0 or better
- CORJIT_FLAG_HAS_ARM64_FP16 = 50, // ID_AA64PFR0_EL1.FP is 1 or better
- CORJIT_FLAG_HAS_ARM64_JSCVT = 51, // ID_AA64ISAR1_EL1.JSCVT is 1 or better
- CORJIT_FLAG_HAS_ARM64_LRCPC = 52, // ID_AA64ISAR1_EL1.LRCPC is 1 or better
- CORJIT_FLAG_HAS_ARM64_PMULL = 53, // ID_AA64ISAR0_EL1.AES is 2 or better
- CORJIT_FLAG_HAS_ARM64_SHA1 = 54, // ID_AA64ISAR0_EL1.SHA1 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SHA256 = 55, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SHA512 = 56, // ID_AA64ISAR0_EL1.SHA2 is 2 or better
- CORJIT_FLAG_HAS_ARM64_SHA3 = 57, // ID_AA64ISAR0_EL1.SHA3 is 1 or better
- CORJIT_FLAG_HAS_ARM64_ADVSIMD = 58, // ID_AA64PFR0_EL1.AdvSIMD is 0 or better
- CORJIT_FLAG_HAS_ARM64_ADVSIMD_V81 = 59, // ID_AA64ISAR0_EL1.RDM is 1 or better
- CORJIT_FLAG_HAS_ARM64_ADVSIMD_FP16 = 60, // ID_AA64PFR0_EL1.AdvSIMD is 1 or better
- CORJIT_FLAG_HAS_ARM64_SM3 = 61, // ID_AA64ISAR0_EL1.SM3 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SM4 = 62, // ID_AA64ISAR0_EL1.SM4 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SVE = 63 // ID_AA64PFR0_EL1.SVE is 1 or better
-
-#elif defined(TARGET_X86) || defined(TARGET_AMD64)
-
- CORJIT_FLAG_USE_SSE3 = 43,
- CORJIT_FLAG_USE_SSSE3 = 44,
- CORJIT_FLAG_USE_SSE41 = 45,
- CORJIT_FLAG_USE_SSE42 = 46,
- CORJIT_FLAG_USE_AES = 47,
- CORJIT_FLAG_USE_BMI1 = 48,
- CORJIT_FLAG_USE_BMI2 = 49,
- CORJIT_FLAG_USE_FMA = 50,
- CORJIT_FLAG_USE_LZCNT = 51,
- CORJIT_FLAG_USE_PCLMULQDQ = 52,
- CORJIT_FLAG_USE_POPCNT = 53,
- CORJIT_FLAG_UNUSED23 = 54,
- CORJIT_FLAG_UNUSED24 = 55,
- CORJIT_FLAG_UNUSED25 = 56,
- CORJIT_FLAG_UNUSED26 = 57,
- CORJIT_FLAG_UNUSED27 = 58,
- CORJIT_FLAG_UNUSED28 = 59,
- CORJIT_FLAG_UNUSED29 = 60,
- CORJIT_FLAG_UNUSED30 = 61,
- CORJIT_FLAG_UNUSED31 = 62,
- CORJIT_FLAG_UNUSED32 = 63
-
-
-#else // !defined(TARGET_ARM64) &&!defined(TARGET_X86) && !defined(TARGET_AMD64)
-
CORJIT_FLAG_UNUSED12 = 43,
CORJIT_FLAG_UNUSED13 = 44,
CORJIT_FLAG_UNUSED14 = 45,
@@ -178,8 +119,6 @@ class CORJIT_FLAGS
CORJIT_FLAG_UNUSED30 = 61,
CORJIT_FLAG_UNUSED31 = 62,
CORJIT_FLAG_UNUSED32 = 63
-
-#endif // !defined(TARGET_ARM64) &&!defined(TARGET_X86) && !defined(TARGET_AMD64)
};
CORJIT_FLAGS()
@@ -198,11 +137,28 @@ class CORJIT_FLAGS
CORJIT_FLAGS(const CORJIT_FLAGS& other)
{
corJitFlags = other.corJitFlags;
+ instructionSetFlags = other.instructionSetFlags;
}
void Reset()
{
corJitFlags = 0;
+ instructionSetFlags.Reset();
+ }
+
+ void Set(CORINFO_InstructionSet instructionSet)
+ {
+ instructionSetFlags.AddInstructionSet(instructionSet);
+ }
+
+ void Clear(CORINFO_InstructionSet instructionSet)
+ {
+ instructionSetFlags.RemoveInstructionSet(instructionSet);
+ }
+
+ void Set64BitInstructionSetVariants()
+ {
+ instructionSetFlags.Set64BitInstructionSetVariants();
}
void Set(CorJitFlag flag)
@@ -223,16 +179,17 @@ class CORJIT_FLAGS
void Add(const CORJIT_FLAGS& other)
{
corJitFlags |= other.corJitFlags;
+ instructionSetFlags.Add(other.instructionSetFlags);
}
- void Remove(const CORJIT_FLAGS& other)
+ bool IsEmpty() const
{
- corJitFlags &= ~other.corJitFlags;
+ return corJitFlags == 0 && instructionSetFlags.IsEmpty();
}
- bool IsEmpty() const
+ void EnsureValidInstructionSetSupport()
{
- return corJitFlags == 0;
+ instructionSetFlags = EnsureInstructionSetFlagsAreValid(instructionSetFlags);
}
// DO NOT USE THIS FUNCTION! (except in very restricted special cases)
@@ -241,9 +198,16 @@ class CORJIT_FLAGS
return corJitFlags;
}
+ // DO NOT USE THIS FUNCTION! (except in very restricted special cases)
+ unsigned __int64 GetInstructionSetFlagsRaw()
+ {
+ return instructionSetFlags.GetFlagsRaw();
+ }
+
private:
unsigned __int64 corJitFlags;
+ CORINFO_InstructionSetFlags instructionSetFlags;
};
diff --git a/src/coreclr/src/inc/crsttypes.h b/src/coreclr/src/inc/crsttypes.h
index 3638826f769d32..98d24c2efafcae 100644
--- a/src/coreclr/src/inc/crsttypes.h
+++ b/src/coreclr/src/inc/crsttypes.h
@@ -89,88 +89,89 @@ enum CrstType
CrstJit = 70,
CrstJitGenericHandleCache = 71,
CrstJitInlineTrackingMap = 72,
- CrstJitPerf = 73,
- CrstJumpStubCache = 74,
- CrstLeafLock = 75,
- CrstListLock = 76,
- CrstLoaderAllocator = 77,
- CrstLoaderAllocatorReferences = 78,
- CrstLoaderHeap = 79,
- CrstMda = 80,
- CrstMetadataTracker = 81,
- CrstMethodDescBackpatchInfoTracker = 82,
- CrstModIntPairList = 83,
- CrstModule = 84,
- CrstModuleFixup = 85,
- CrstModuleLookupTable = 86,
- CrstMulticoreJitHash = 87,
- CrstMulticoreJitManager = 88,
- CrstMUThunkHash = 89,
- CrstNativeBinderInit = 90,
- CrstNativeImageCache = 91,
- CrstNativeImageEagerFixups = 92,
- CrstNls = 93,
- CrstNotifyGdb = 94,
- CrstObjectList = 95,
- CrstOnEventManager = 96,
- CrstPatchEntryPoint = 97,
- CrstPEImage = 98,
- CrstPEImagePDBStream = 99,
- CrstPendingTypeLoadEntry = 100,
- CrstPinHandle = 101,
- CrstPinnedByrefValidation = 102,
- CrstProfilerGCRefDataFreeList = 103,
- CrstProfilingAPIStatus = 104,
- CrstPublisherCertificate = 105,
- CrstRCWCache = 106,
- CrstRCWCleanupList = 107,
- CrstRCWRefCache = 108,
- CrstReadyToRunEntryPointToMethodDescMap = 109,
- CrstReDacl = 110,
- CrstReflection = 111,
- CrstReJITGlobalRequest = 112,
- CrstRemoting = 113,
- CrstRetThunkCache = 114,
- CrstRWLock = 115,
- CrstSavedExceptionInfo = 116,
- CrstSaveModuleProfileData = 117,
- CrstSecurityStackwalkCache = 118,
- CrstSharedAssemblyCreate = 119,
- CrstSigConvert = 120,
- CrstSingleUseLock = 121,
- CrstSpecialStatics = 122,
- CrstSqmManager = 123,
- CrstStackSampler = 124,
- CrstStressLog = 125,
- CrstStrongName = 126,
- CrstStubCache = 127,
- CrstStubDispatchCache = 128,
- CrstStubUnwindInfoHeapSegments = 129,
- CrstSyncBlockCache = 130,
- CrstSyncHashLock = 131,
- CrstSystemBaseDomain = 132,
- CrstSystemDomain = 133,
- CrstSystemDomainDelayedUnloadList = 134,
- CrstThreadIdDispenser = 135,
- CrstThreadpoolEventCache = 136,
- CrstThreadpoolTimerQueue = 137,
- CrstThreadpoolWaitThreads = 138,
- CrstThreadpoolWorker = 139,
- CrstThreadStaticDataHashTable = 140,
- CrstThreadStore = 141,
- CrstTieredCompilation = 142,
- CrstTPMethodTable = 143,
- CrstTypeEquivalenceMap = 144,
- CrstTypeIDMap = 145,
- CrstUMEntryThunkCache = 146,
- CrstUMThunkHash = 147,
- CrstUniqueStack = 148,
- CrstUnresolvedClassLock = 149,
- CrstUnwindInfoTableLock = 150,
- CrstVSDIndirectionCellLock = 151,
- CrstWinRTFactoryCache = 152,
- CrstWrapperTemplate = 153,
- kNumberOfCrstTypes = 154
+ CrstJitPatchpoint = 73,
+ CrstJitPerf = 74,
+ CrstJumpStubCache = 75,
+ CrstLeafLock = 76,
+ CrstListLock = 77,
+ CrstLoaderAllocator = 78,
+ CrstLoaderAllocatorReferences = 79,
+ CrstLoaderHeap = 80,
+ CrstMda = 81,
+ CrstMetadataTracker = 82,
+ CrstMethodDescBackpatchInfoTracker = 83,
+ CrstModIntPairList = 84,
+ CrstModule = 85,
+ CrstModuleFixup = 86,
+ CrstModuleLookupTable = 87,
+ CrstMulticoreJitHash = 88,
+ CrstMulticoreJitManager = 89,
+ CrstMUThunkHash = 90,
+ CrstNativeBinderInit = 91,
+ CrstNativeImageCache = 92,
+ CrstNativeImageEagerFixups = 93,
+ CrstNls = 94,
+ CrstNotifyGdb = 95,
+ CrstObjectList = 96,
+ CrstOnEventManager = 97,
+ CrstPatchEntryPoint = 98,
+ CrstPEImage = 99,
+ CrstPEImagePDBStream = 100,
+ CrstPendingTypeLoadEntry = 101,
+ CrstPinHandle = 102,
+ CrstPinnedByrefValidation = 103,
+ CrstProfilerGCRefDataFreeList = 104,
+ CrstProfilingAPIStatus = 105,
+ CrstPublisherCertificate = 106,
+ CrstRCWCache = 107,
+ CrstRCWCleanupList = 108,
+ CrstRCWRefCache = 109,
+ CrstReadyToRunEntryPointToMethodDescMap = 110,
+ CrstReDacl = 111,
+ CrstReflection = 112,
+ CrstReJITGlobalRequest = 113,
+ CrstRemoting = 114,
+ CrstRetThunkCache = 115,
+ CrstRWLock = 116,
+ CrstSavedExceptionInfo = 117,
+ CrstSaveModuleProfileData = 118,
+ CrstSecurityStackwalkCache = 119,
+ CrstSharedAssemblyCreate = 120,
+ CrstSigConvert = 121,
+ CrstSingleUseLock = 122,
+ CrstSpecialStatics = 123,
+ CrstSqmManager = 124,
+ CrstStackSampler = 125,
+ CrstStressLog = 126,
+ CrstStrongName = 127,
+ CrstStubCache = 128,
+ CrstStubDispatchCache = 129,
+ CrstStubUnwindInfoHeapSegments = 130,
+ CrstSyncBlockCache = 131,
+ CrstSyncHashLock = 132,
+ CrstSystemBaseDomain = 133,
+ CrstSystemDomain = 134,
+ CrstSystemDomainDelayedUnloadList = 135,
+ CrstThreadIdDispenser = 136,
+ CrstThreadpoolEventCache = 137,
+ CrstThreadpoolTimerQueue = 138,
+ CrstThreadpoolWaitThreads = 139,
+ CrstThreadpoolWorker = 140,
+ CrstThreadStaticDataHashTable = 141,
+ CrstThreadStore = 142,
+ CrstTieredCompilation = 143,
+ CrstTPMethodTable = 144,
+ CrstTypeEquivalenceMap = 145,
+ CrstTypeIDMap = 146,
+ CrstUMEntryThunkCache = 147,
+ CrstUMThunkHash = 148,
+ CrstUniqueStack = 149,
+ CrstUnresolvedClassLock = 150,
+ CrstUnwindInfoTableLock = 151,
+ CrstVSDIndirectionCellLock = 152,
+ CrstWinRTFactoryCache = 153,
+ CrstWrapperTemplate = 154,
+ kNumberOfCrstTypes = 155
};
#endif // __CRST_TYPES_INCLUDED
@@ -254,6 +255,7 @@ int g_rgCrstLevelMap[] =
8, // CrstJit
0, // CrstJitGenericHandleCache
16, // CrstJitInlineTrackingMap
+ 3, // CrstJitPatchpoint
-1, // CrstJitPerf
6, // CrstJumpStubCache
0, // CrstLeafLock
@@ -413,6 +415,7 @@ LPCSTR g_rgCrstNameMap[] =
"CrstJit",
"CrstJitGenericHandleCache",
"CrstJitInlineTrackingMap",
+ "CrstJitPatchpoint",
"CrstJitPerf",
"CrstJumpStubCache",
"CrstLeafLock",
diff --git a/src/coreclr/src/inc/eventtracebase.h b/src/coreclr/src/inc/eventtracebase.h
index cde996a92185e2..a02d8f5fbda696 100644
--- a/src/coreclr/src/inc/eventtracebase.h
+++ b/src/coreclr/src/inc/eventtracebase.h
@@ -242,7 +242,7 @@ extern UINT32 g_nClrInstanceId;
#endif // defined(HOST_UNIX) && (defined(FEATURE_EVENT_TRACE) || defined(FEATURE_EVENTSOURCE_XPLAT))
-#if defined(FEATURE_PERFTRACING)
+#if defined(FEATURE_PERFTRACING) || defined(FEATURE_EVENTSOURCE_XPLAT)
/***************************************/
/* Tracing levels supported by CLR ETW */
@@ -397,7 +397,7 @@ class XplatEventLoggerConfiguration
NewArrayHolder _argument;
bool _isValid;
};
-#endif // FEATURE_PERFTRACING
+#endif // defined(FEATURE_PERFTRACING) || defined(FEATURE_EVENTSOURCE_XPLAT)
#if defined(HOST_UNIX) && (defined(FEATURE_EVENT_TRACE) || defined(FEATURE_EVENTSOURCE_XPLAT))
diff --git a/src/coreclr/src/inc/jithelpers.h b/src/coreclr/src/inc/jithelpers.h
index 8e92cbd410514f..cc726a9046414f 100644
--- a/src/coreclr/src/inc/jithelpers.h
+++ b/src/coreclr/src/inc/jithelpers.h
@@ -357,6 +357,8 @@
JITHELPER(CORINFO_HELP_STACK_PROBE, NULL, CORINFO_HELP_SIG_UNDEF)
#endif
+ JITHELPER(CORINFO_HELP_PATCHPOINT, JIT_Patchpoint, CORINFO_HELP_SIG_REG_ONLY)
+
#undef JITHELPER
#undef DYNAMICJITHELPER
#undef JITHELPER
diff --git a/src/coreclr/src/inc/patchpointinfo.h b/src/coreclr/src/inc/patchpointinfo.h
new file mode 100644
index 00000000000000..135ad0135a5496
--- /dev/null
+++ b/src/coreclr/src/inc/patchpointinfo.h
@@ -0,0 +1,145 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// --------------------------------------------------------------------------------
+// patchpointinfo.h
+// --------------------------------------------------------------------------------
+
+#include
+
+#ifndef _PATCHPOINTINFO_H_
+#define _PATCHPOINTINFO_H_
+
+// --------------------------------------------------------------------------------
+// Describes information needed to make an OSR transition
+// - location of Il-visible locals and other important state on the
+// original (Tier0) method frame
+// - total size of the original frame, and SP-FP delta
+//
+// Currently the patchpoint info is independent of the IL offset of the patchpoint.
+//
+// This data is produced when jitting a Tier0 method with OSR enabled, and consumed
+// by the Tier1/OSR jit request.
+//
+struct PatchpointInfo
+{
+ // Determine how much storage is needed to hold this info
+ static unsigned ComputeSize(unsigned localCount)
+ {
+ unsigned baseSize = sizeof(PatchpointInfo);
+ unsigned variableSize = localCount * sizeof(int);
+ unsigned totalSize = baseSize + variableSize;
+ return totalSize;
+ }
+
+ // Initialize
+ void Initialize(unsigned localCount, int fpToSpDelta)
+ {
+ m_fpToSpDelta = fpToSpDelta;
+ m_numberOfLocals = localCount;
+ m_genericContextArgOffset = -1;
+ m_keptAliveThisOffset = -1;
+ m_securityCookieOffset = -1;
+ }
+
+ // Total size of this patchpoint info record, in bytes
+ unsigned PatchpointInfoSize() const
+ {
+ return ComputeSize(m_numberOfLocals);
+ }
+
+ // FP to SP delta of the original method
+ int FpToSpDelta() const
+ {
+ return m_fpToSpDelta;
+ }
+
+ // Number of locals in the original method (including special locals)
+ unsigned NumberOfLocals() const
+ {
+ return m_numberOfLocals;
+ }
+
+ // Original method caller SP offset for generic context arg
+ int GenericContextArgOffset() const
+ {
+ return m_genericContextArgOffset;
+ }
+
+ void SetGenericContextArgOffset(int offset)
+ {
+ m_genericContextArgOffset = offset;
+ }
+
+ // Original method FP relative offset for kept-alive this
+ int KeptAliveThisOffset() const
+ {
+ return m_keptAliveThisOffset;
+ }
+
+ bool HasKeptAliveThis() const
+ {
+ return m_keptAliveThisOffset != -1;
+ }
+
+ void SetKeptAliveThisOffset(int offset)
+ {
+ m_keptAliveThisOffset = offset;
+ }
+
+ // Original method FP relative offset for security cookie
+ int SecurityCookieOffset() const
+ {
+ return m_securityCookieOffset;
+ }
+
+ bool HasSecurityCookie() const
+ {
+ return m_securityCookieOffset != -1;
+ }
+
+ void SetSecurityCookieOffset(int offset)
+ {
+ m_securityCookieOffset = offset;
+ }
+
+ // True if this local was address exposed in the original method
+ bool IsExposed(unsigned localNum) const
+ {
+ return ((m_offsetAndExposureData[localNum] & EXPOSURE_MASK) != 0);
+ }
+
+ void SetIsExposed(unsigned localNum)
+ {
+ m_offsetAndExposureData[localNum] |= EXPOSURE_MASK;
+ }
+
+ // FP relative offset of this local in the original method
+ int Offset(unsigned localNum) const
+ {
+ return (m_offsetAndExposureData[localNum] & ~EXPOSURE_MASK);
+ }
+
+ void SetOffset(unsigned localNum, int offset)
+ {
+ m_offsetAndExposureData[localNum] = offset;
+ }
+
+private:
+ enum
+ {
+ EXPOSURE_MASK = 0x1
+ };
+
+ unsigned m_numberOfLocals;
+ int m_fpToSpDelta;
+ int m_genericContextArgOffset;
+ int m_keptAliveThisOffset;
+ int m_securityCookieOffset;
+ int m_offsetAndExposureData[];
+};
+
+typedef DPTR(struct PatchpointInfo) PTR_PatchpointInfo;
+
+#endif // _PATCHPOINTINFO_H_
diff --git a/src/coreclr/src/inc/readytoruninstructionset.h b/src/coreclr/src/inc/readytoruninstructionset.h
new file mode 100644
index 00000000000000..6e6f2549f90440
--- /dev/null
+++ b/src/coreclr/src/inc/readytoruninstructionset.h
@@ -0,0 +1,38 @@
+
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// DO NOT EDIT THIS FILE! IT IS AUTOGENERATED
+// FROM /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
+// using /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
+
+#ifndef READYTORUNINSTRUCTIONSET_H
+#define READYTORUNINSTRUCTIONSET_H
+enum ReadyToRunInstructionSet
+{
+ READYTORUN_INSTRUCTION_Sse=1,
+ READYTORUN_INSTRUCTION_Sse2=2,
+ READYTORUN_INSTRUCTION_Sse3=3,
+ READYTORUN_INSTRUCTION_Ssse3=4,
+ READYTORUN_INSTRUCTION_Sse41=5,
+ READYTORUN_INSTRUCTION_Sse42=6,
+ READYTORUN_INSTRUCTION_Avx=7,
+ READYTORUN_INSTRUCTION_Avx2=8,
+ READYTORUN_INSTRUCTION_Aes=9,
+ READYTORUN_INSTRUCTION_Bmi1=10,
+ READYTORUN_INSTRUCTION_Bmi2=11,
+ READYTORUN_INSTRUCTION_Fma=12,
+ READYTORUN_INSTRUCTION_Lzcnt=13,
+ READYTORUN_INSTRUCTION_Pclmulqdq=14,
+ READYTORUN_INSTRUCTION_Popcnt=15,
+ READYTORUN_INSTRUCTION_ArmBase=16,
+ READYTORUN_INSTRUCTION_AdvSimd=17,
+ READYTORUN_INSTRUCTION_Crc32=18,
+ READYTORUN_INSTRUCTION_Sha1=19,
+ READYTORUN_INSTRUCTION_Sha256=20,
+ READYTORUN_INSTRUCTION_Atomics=21,
+
+};
+
+#endif // READYTORUNINSTRUCTIONSET_H
diff --git a/src/coreclr/src/inc/slist.h b/src/coreclr/src/inc/slist.h
index f5d03126f9f9bd..87dec1dcd0bb5a 100644
--- a/src/coreclr/src/inc/slist.h
+++ b/src/coreclr/src/inc/slist.h
@@ -274,7 +274,7 @@ class SList
SLink *ret = SLink::FindAndRemove(m_pHead, GetLink(pObj), &prior);
if (ret == m_pTail)
- m_pTail = prior;
+ m_pTail = PTR_SLink(prior);
return GetObject(ret);
}
diff --git a/src/coreclr/src/inc/utilcode.h b/src/coreclr/src/inc/utilcode.h
index 45b5c4c59607cd..9283af18d62d1c 100644
--- a/src/coreclr/src/inc/utilcode.h
+++ b/src/coreclr/src/inc/utilcode.h
@@ -4713,50 +4713,7 @@ typedef HMODULE HMODULE_TGT;
BOOL IsIPInModule(HMODULE_TGT hModule, PCODE ip);
-//----------------------------------------------------------------------------------------
-// The runtime invokes InitUtilcode() in its dllmain and passes along all of the critical
-// callback pointers. For the desktop CLR, all DLLs loaded by the runtime must also call
-// InitUtilcode with the same callbacks as the runtime used. To achieve this, the runtime
-// calls a special initialization routine exposed by the loaded module with the callbacks,
-// which in turn calls InitUtilcode.
-//
-// This structure collects all of the critical callback info passed in InitUtilcode().
-//----------------------------------------------------------------------------------------
-struct CoreClrCallbacks
-{
- typedef IExecutionEngine* (* pfnIEE_t)();
- typedef HRESULT (* pfnGetCORSystemDirectory_t)(SString& pbuffer);
-
- HINSTANCE m_hmodCoreCLR;
- pfnIEE_t m_pfnIEE;
- pfnGetCORSystemDirectory_t m_pfnGetCORSystemDirectory;
-};
-
-
-// For DAC, we include this functionality only when EH SxS is enabled.
-
-//----------------------------------------------------------------------------------------
-// CoreCLR must invoke this before CRT initialization to ensure utilcode has all the callback
-// pointers it needs.
-//----------------------------------------------------------------------------------------
-VOID InitUtilcode(const CoreClrCallbacks &cccallbacks);
-CoreClrCallbacks const & GetClrCallbacks();
-
-//----------------------------------------------------------------------------------------
-// Stuff below is for utilcode.lib eyes only.
-//----------------------------------------------------------------------------------------
-
-// Stores callback pointers provided by InitUtilcode().
-extern CoreClrCallbacks g_CoreClrCallbacks;
-
-// Throws up a helpful dialog if InitUtilcode() wasn't called.
-#ifdef _DEBUG
-void OnUninitializedCoreClrCallbacks();
-#define VALIDATECORECLRCALLBACKS() if (g_CoreClrCallbacks.m_hmodCoreCLR == NULL) OnUninitializedCoreClrCallbacks()
-#else //_DEBUG
-#define VALIDATECORECLRCALLBACKS()
-#endif //_DEBUG
-
+extern HINSTANCE g_hmodCoreCLR;
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
diff --git a/src/coreclr/src/inc/utsem.h b/src/coreclr/src/inc/utsem.h
index b24871d2325d78..0cacfcd940f6e7 100644
--- a/src/coreclr/src/inc/utsem.h
+++ b/src/coreclr/src/inc/utsem.h
@@ -46,18 +46,9 @@ class UTSemReadWrite
#endif //_DEBUG
private:
- Semaphore * GetReadWaiterSemaphore()
- {
- return m_pReadWaiterSemaphore;
- }
- Event * GetWriteWaiterEvent()
- {
- return m_pWriteWaiterEvent;
- }
-
Volatile m_dwFlag; // internal state, see implementation
- Semaphore * m_pReadWaiterSemaphore; // semaphore for awakening read waiters
- Event * m_pWriteWaiterEvent; // event for awakening write waiters
+ HANDLE m_hReadWaiterSemaphore; // semaphore for awakening read waiters
+ HANDLE m_hWriteWaiterEvent; // event for awakening write waiters
}; // class UTSemReadWrite
#endif // __UTSEM_H__
diff --git a/src/coreclr/src/jit/CMakeLists.txt b/src/coreclr/src/jit/CMakeLists.txt
index 2f4a3f201623de..5be8e3f96dab3e 100644
--- a/src/coreclr/src/jit/CMakeLists.txt
+++ b/src/coreclr/src/jit/CMakeLists.txt
@@ -67,6 +67,7 @@ set( JIT_SOURCES
objectalloc.cpp
optcse.cpp
optimizer.cpp
+ patchpoint.cpp
phase.cpp
rangecheck.cpp
rationalize.cpp
diff --git a/src/coreclr/src/jit/block.cpp b/src/coreclr/src/jit/block.cpp
index 01058ba2ab9f7b..fe186a3544102e 100644
--- a/src/coreclr/src/jit/block.cpp
+++ b/src/coreclr/src/jit/block.cpp
@@ -344,6 +344,14 @@ void BasicBlock::dspFlags()
{
printf("bwd ");
}
+ if (bbFlags & BBF_BACKWARD_JUMP_TARGET)
+ {
+ printf("bwd-target ");
+ }
+ if (bbFlags & BBF_PATCHPOINT)
+ {
+ printf("ppoint ");
+ }
if (bbFlags & BBF_RETLESS_CALL)
{
printf("retless ");
diff --git a/src/coreclr/src/jit/block.h b/src/coreclr/src/jit/block.h
index 15897374c565d8..94adae39c50345 100644
--- a/src/coreclr/src/jit/block.h
+++ b/src/coreclr/src/jit/block.h
@@ -440,14 +440,15 @@ struct BasicBlock : private LIR::Range
// BBJ_CALLFINALLY block, as well as, on x86, the final step block out of a
// finally.
-#define BBF_CLONED_FINALLY_BEGIN 0x100000000 // First block of a cloned finally region
-#define BBF_CLONED_FINALLY_END 0x200000000 // Last block of a cloned finally region
-#define BBF_HAS_CALL 0x400000000 // BB contains a call
+#define BBF_CLONED_FINALLY_BEGIN 0x100000000 // First block of a cloned finally region
+#define BBF_CLONED_FINALLY_END 0x200000000 // Last block of a cloned finally region
+#define BBF_HAS_CALL 0x400000000 // BB contains a call
+#define BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY 0x800000000 // Block is dominated by exceptional entry.
+#define BBF_BACKWARD_JUMP_TARGET 0x1000000000 // Block is a target of a backward jump
+#define BBF_PATCHPOINT 0x2000000000 // Block is a patchpoint
// clang-format on
-#define BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY 0x800000000 // Block is dominated by exceptional entry.
-
// Flags that relate blocks to loop structure.
#define BBF_LOOP_FLAGS (BBF_LOOP_PREHEADER | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1)
diff --git a/src/coreclr/src/jit/codegenarm64.cpp b/src/coreclr/src/jit/codegenarm64.cpp
index 4970dbd6261d39..675fca49de84dd 100644
--- a/src/coreclr/src/jit/codegenarm64.cpp
+++ b/src/coreclr/src/jit/codegenarm64.cpp
@@ -5230,7 +5230,7 @@ void CodeGen::genArm64EmitterUnitTests()
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
- // Loads to /Stores from one, two, three, or four SIMD&FP registers
+ // Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
@@ -5413,7 +5413,7 @@ void CodeGen::genArm64EmitterUnitTests()
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
- // Loads to /Stores from one, two, three, or four SIMD&FP registers
+ // Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
@@ -5596,7 +5596,7 @@ void CodeGen::genArm64EmitterUnitTests()
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
- // Loads to /Stores from one, two, three, or four SIMD&FP registers
+ // Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
@@ -5779,7 +5779,7 @@ void CodeGen::genArm64EmitterUnitTests()
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
- // Loads to /Stores from one, two, three, or four SIMD&FP registers
+ // Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
@@ -5836,7 +5836,7 @@ void CodeGen::genArm64EmitterUnitTests()
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
- // Loads to /Stores from one, two, three, or four SIMD&FP registers
+ // Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
@@ -5893,7 +5893,7 @@ void CodeGen::genArm64EmitterUnitTests()
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
- // Loads to /Stores from one, two, three, or four SIMD&FP registers
+ // Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
diff --git a/src/coreclr/src/jit/codegencommon.cpp b/src/coreclr/src/jit/codegencommon.cpp
index 59de2fdd84214d..b9825d2ec9850b 100644
--- a/src/coreclr/src/jit/codegencommon.cpp
+++ b/src/coreclr/src/jit/codegencommon.cpp
@@ -28,6 +28,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "gcinfoencoder.h"
#endif
+#include "patchpointinfo.h"
+
/*****************************************************************************/
const BYTE genTypeSizes[] = {
@@ -2152,6 +2154,11 @@ void CodeGen::genGenerateMachineCode()
printf("; ReadyToRun compilation\n");
}
+ if (compiler->opts.IsOSR())
+ {
+ printf("; OSR variant for entry point 0x%x\n", compiler->info.compILEntry);
+ }
+
if ((compiler->opts.compFlags & CLFLG_MAXOPT) == CLFLG_MAXOPT)
{
printf("; optimized code\n");
@@ -4488,6 +4495,12 @@ void CodeGen::genEnregisterIncomingStackArgs()
}
#endif
+ // OSR handles this specially
+ if (compiler->opts.IsOSR())
+ {
+ return;
+ }
+
assert(compiler->compGeneratingProlog);
unsigned varNum = 0;
@@ -4588,6 +4601,20 @@ void CodeGen::genCheckUseBlockInit()
continue;
}
+ // Initialization of OSR locals must be handled specially
+ if (compiler->lvaIsOSRLocal(varNum))
+ {
+ varDsc->lvMustInit = 0;
+ continue;
+ }
+
+ // Likewise, initialization of the GS cookie is handled specially for OSR.
+ // Could do this for non-OSR too.. (likewise for the dummy)
+ if (compiler->opts.IsOSR() && varNum == compiler->lvaGSSecurityCookie)
+ {
+ continue;
+ }
+
if (!varDsc->lvIsInReg() && !varDsc->lvOnFrame)
{
noway_assert(varDsc->lvRefCnt() == 0);
@@ -6629,6 +6656,127 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg,
inst_ST_RV(ins_Store(TYP_I_IMPL), tempThis, 0, genGetZeroReg(initReg, pInitRegZeroed), TYP_I_IMPL);
}
}
+
+ // Initialize args and locals for OSR. Note this may include promoted fields.
+ if (compiler->opts.IsOSR())
+ {
+ PatchpointInfo* patchpointInfo = compiler->info.compPatchpointInfo;
+
+ // basic sanity checks (make sure we're OSRing the right method)
+ assert(patchpointInfo->NumberOfLocals() == compiler->info.compLocalsCount);
+
+ const int originalFrameSize = patchpointInfo->FpToSpDelta();
+ const unsigned patchpointInfoLen = patchpointInfo->NumberOfLocals();
+
+ for (unsigned varNum = 0; varNum < compiler->lvaCount; varNum++)
+ {
+ if (!compiler->lvaIsOSRLocal(varNum))
+ {
+ continue;
+ }
+
+ LclVarDsc* const varDsc = compiler->lvaGetDesc(varNum);
+
+ if (!varDsc->lvIsInReg())
+ {
+ JITDUMP("---OSR--- V%02u in memory\n", varNum);
+ continue;
+ }
+
+ if (!VarSetOps::IsMember(compiler, compiler->fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
+ {
+ JITDUMP("---OSR--- V%02u (reg) not live at entry\n", varNum);
+ continue;
+ }
+
+ int fieldOffset = 0;
+ unsigned lclNum = varNum;
+
+ if (varDsc->lvIsStructField)
+ {
+ lclNum = varDsc->lvParentLcl;
+ assert(lclNum < patchpointInfoLen);
+
+ fieldOffset = varDsc->lvFldOffset;
+ JITDUMP("---OSR--- V%02u is promoted field of V%02u at offset %d\n", varNum, lclNum, fieldOffset);
+ }
+
+ // Note we are always reading from the original frame here
+ const var_types lclTyp = genActualType(varDsc->lvType);
+ const emitAttr size = emitTypeSize(lclTyp);
+ const int stkOffs = patchpointInfo->Offset(lclNum) + fieldOffset;
+
+ // Original frames always use frame pointers, so
+ // stkOffs is the original frame-relative offset
+ // to the variable.
+ //
+ // We need to determine the stack or frame-pointer relative
+ // offset for this variable in the current frame.
+ //
+ // If current frame does not use a frame pointer, we need to
+ // add the SP-to-FP delta of this frame and the SP-to-FP delta
+ // of the original frame; that translates from this frame's
+ // stack pointer the old frame frame pointer.
+ //
+ // We then add the original frame's frame-pointer relative
+ // offset (note this offset is usually negative -- the stack
+ // grows down, so locals are below the frame pointer).
+ //
+ // /-----original frame-----/
+ // / return address /
+ // / saved RBP --+ / <--- Original frame ptr --+
+ // / ... | / |
+ // / ... (stkOffs) / |
+ // / ... | / |
+ // / variable --+ / |
+ // / ... / (original frame sp-fp delta)
+ // / ... / |
+ // /-----OSR frame ---------/ |
+ // / pseudo return address / --+
+ // / ... / |
+ // / ... / (this frame sp-fp delta)
+ // / ... / |
+ // /------------------------/ <--- Stack ptr --+
+ //
+ // If the current frame is using a frame pointer, we need to
+ // add the SP-to-FP delta of/ the original frame and then add
+ // the original frame's frame-pointer relative offset.
+ //
+ // /-----original frame-----/
+ // / return address /
+ // / saved RBP --+ / <--- Original frame ptr --+
+ // / ... | / |
+ // / ... (stkOffs) / |
+ // / ... | / |
+ // / variable --+ / |
+ // / ... / (original frame sp-fp delta)
+ // / ... / |
+ // /-----OSR frame ---------/ |
+ // / pseudo return address / --+
+ // / saved RBP / <--- Frame ptr --+
+ // / ... /
+ // / ... /
+ // / ... /
+ // /------------------------/
+
+ int offset = originalFrameSize + stkOffs;
+
+ if (isFramePointerUsed())
+ {
+ // also adjust for saved RPB on this frame
+ offset += TARGET_POINTER_SIZE;
+ }
+ else
+ {
+ offset += genSPtoFPdelta();
+ }
+
+ JITDUMP("---OSR--- V%02u (reg) old rbp offset %d old frame %d this frame sp-fp %d new offset %d (%02xH)\n",
+ varNum, stkOffs, originalFrameSize, genSPtoFPdelta(), offset, offset);
+
+ GetEmitter()->emitIns_R_AR(ins_Load(lclTyp), size, varDsc->GetRegNum(), genFramePointerReg(), offset);
+ }
+ }
}
/*-----------------------------------------------------------------------------
@@ -6642,6 +6790,12 @@ void CodeGen::genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg,
void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed)
{
+ // For OSR the original method has set this up for us.
+ if (compiler->opts.IsOSR())
+ {
+ return;
+ }
+
assert(compiler->compGeneratingProlog);
bool reportArg = compiler->lvaReportParamTypeArg();
@@ -7296,6 +7450,19 @@ void CodeGen::genFnProlog()
psiBegProlog();
}
+#if defined(TARGET_XARCH)
+ // For OSR there is a "phantom prolog" to account for the actions taken
+ // in the original frame that impact RBP and RSP on entry to the OSR method.
+ if (compiler->opts.IsOSR())
+ {
+ PatchpointInfo* patchpointInfo = compiler->info.compPatchpointInfo;
+ const int originalFrameSize = patchpointInfo->FpToSpDelta();
+
+ compiler->unwindPush(REG_FPBASE);
+ compiler->unwindAllocStack(originalFrameSize);
+ }
+#endif
+
#ifdef DEBUG
if (compiler->compJitHaltMethod())
@@ -7487,7 +7654,8 @@ void CodeGen::genFnProlog()
}
}
- assert((genInitStkLclCnt > 0) == hasUntrLcl);
+ // TODO-Cleanup: Add suitable assert for the OSR case.
+ assert(compiler->opts.IsOSR() || ((genInitStkLclCnt > 0) == hasUntrLcl));
#ifdef DEBUG
if (verbose)
@@ -7593,7 +7761,9 @@ void CodeGen::genFnProlog()
// This way, the varargs iterator will be able to retrieve the
// call arguments properly since both the arg regs and the stack allocated
// args will be contiguous.
- if (compiler->info.compIsVarArgs)
+ //
+ // OSR methods can skip this, as the setup is done by the orignal method.
+ if (compiler->info.compIsVarArgs && !compiler->opts.IsOSR())
{
GetEmitter()->spillIntArgRegsToShadowSlots();
}
@@ -7801,7 +7971,11 @@ void CodeGen::genFnProlog()
#ifdef PROFILING_SUPPORTED
// Insert a function entry callback for profiling, if requested.
- genProfilingEnterCallback(initReg, &initRegZeroed);
+ // OSR methods aren't called, so don't have enter hooks.
+ if (!compiler->opts.IsOSR())
+ {
+ genProfilingEnterCallback(initReg, &initRegZeroed);
+ }
#endif // PROFILING_SUPPORTED
@@ -7840,37 +8014,43 @@ void CodeGen::genFnProlog()
// Update the arg initial register locations.
compiler->lvaUpdateArgsWithInitialReg();
- FOREACH_REGISTER_FILE(regState)
+ // Home incoming arguments and generate any required inits.
+ // OSR handles this by moving the values from the original frame.
+ //
+ if (!compiler->opts.IsOSR())
{
- if (regState->rsCalleeRegArgMaskLiveIn)
+ FOREACH_REGISTER_FILE(regState)
{
- // If we need an extra register to shuffle around the incoming registers
- // we will use xtraReg (initReg) and set the xtraRegClobbered flag,
- // if we don't need to use the xtraReg then this flag will stay false
- //
- regNumber xtraReg;
- bool xtraRegClobbered = false;
-
- if (genRegMask(initReg) & RBM_ARG_REGS)
- {
- xtraReg = initReg;
- }
- else
+ if (regState->rsCalleeRegArgMaskLiveIn)
{
- xtraReg = REG_SCRATCH;
- initRegZeroed = false;
- }
+ // If we need an extra register to shuffle around the incoming registers
+ // we will use xtraReg (initReg) and set the xtraRegClobbered flag,
+ // if we don't need to use the xtraReg then this flag will stay false
+ //
+ regNumber xtraReg;
+ bool xtraRegClobbered = false;
- genFnPrologCalleeRegArgs(xtraReg, &xtraRegClobbered, regState);
+ if (genRegMask(initReg) & RBM_ARG_REGS)
+ {
+ xtraReg = initReg;
+ }
+ else
+ {
+ xtraReg = REG_SCRATCH;
+ initRegZeroed = false;
+ }
- if (xtraRegClobbered)
- {
- initRegZeroed = false;
+ genFnPrologCalleeRegArgs(xtraReg, &xtraRegClobbered, regState);
+
+ if (xtraRegClobbered)
+ {
+ initRegZeroed = false;
+ }
}
}
}
- // Home the incoming arguments
+ // Home the incoming arguments.
genEnregisterIncomingStackArgs();
/* Initialize any must-init registers variables now */
@@ -8440,6 +8620,24 @@ void CodeGen::genFnEpilog(BasicBlock* block)
}
genPopCalleeSavedRegisters();
+
+ // Extra OSR adjust to get to where RBP was saved by the original frame, and
+ // restore RBP.
+ //
+ // Note the other callee saves made in that frame are dead, the OSR method
+ // will save and restore what it needs.
+ if (compiler->opts.IsOSR())
+ {
+ PatchpointInfo* patchpointInfo = compiler->info.compPatchpointInfo;
+ const int originalFrameSize = patchpointInfo->FpToSpDelta();
+
+ // Use add since we know the SP-to-FP delta of the original method.
+ //
+ // If we ever allow the original method to have localloc this will
+ // need to change.
+ inst_RV_IV(INS_add, REG_SPBASE, originalFrameSize, EA_PTRSIZE);
+ inst_RV(INS_pop, REG_EBP, TYP_I_IMPL);
+ }
}
else
{
@@ -8471,9 +8669,11 @@ void CodeGen::genFnEpilog(BasicBlock* block)
if (compiler->compLocallocUsed)
{
+ // OSR not yet ready for localloc
+ assert(!compiler->opts.IsOSR());
+
// ESP may be variable if a localloc was actually executed. Reset it.
// lea esp, [ebp - compiler->compCalleeRegsPushed * REGSIZE_BYTES]
-
needLea = true;
}
else if (!regSet.rsRegsModified(RBM_CALLEE_SAVED))
@@ -8543,10 +8743,26 @@ void CodeGen::genFnEpilog(BasicBlock* block)
//
// Pop the callee-saved registers (if any)
//
-
genPopCalleeSavedRegisters();
#ifdef TARGET_AMD64
+ // Extra OSR adjust to get to where RBP was saved by the original frame.
+ //
+ // Note the other callee saves made in that frame are dead, the current method
+ // will save and restore what it needs.
+ if (compiler->opts.IsOSR())
+ {
+ PatchpointInfo* patchpointInfo = compiler->info.compPatchpointInfo;
+ const int originalFrameSize = patchpointInfo->FpToSpDelta();
+
+ // Use add since we know the SP-to-FP delta of the original method.
+ // We also need to skip over the slot where we pushed RBP.
+ //
+ // If we ever allow the original method to have localloc this will
+ // need to change.
+ inst_RV_IV(INS_add, REG_SPBASE, originalFrameSize + TARGET_POINTER_SIZE, EA_PTRSIZE);
+ }
+
assert(!needMovEspEbp); // "mov esp, ebp" is not allowed in AMD64 epilogs
#else // !TARGET_AMD64
if (needMovEspEbp)
diff --git a/src/coreclr/src/jit/codegenxarch.cpp b/src/coreclr/src/jit/codegenxarch.cpp
index 0ac2545a9ae6ae..d319979cb8884e 100644
--- a/src/coreclr/src/jit/codegenxarch.cpp
+++ b/src/coreclr/src/jit/codegenxarch.cpp
@@ -22,6 +22,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "lower.h"
#include "gcinfo.h"
#include "gcinfoencoder.h"
+#include "patchpointinfo.h"
/*****************************************************************************
*
@@ -69,6 +70,12 @@ void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
return;
}
+ if (compiler->opts.IsOSR() && compiler->info.compPatchpointInfo->HasSecurityCookie())
+ {
+ // Security cookie is on original frame and was initialized there.
+ return;
+ }
+
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
diff --git a/src/coreclr/src/jit/compiler.cpp b/src/coreclr/src/jit/compiler.cpp
index efa4d187da4e50..3a3e961b34c4c4 100644
--- a/src/coreclr/src/jit/compiler.cpp
+++ b/src/coreclr/src/jit/compiler.cpp
@@ -22,6 +22,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "lower.h"
#include "stacklevelsetter.h"
#include "jittelemetry.h"
+#include "patchpointinfo.h"
#if defined(DEBUG)
// Column settings for COMPlus_JitDumpIR. We could(should) make these programmable.
@@ -2192,250 +2193,179 @@ void Compiler::compSetProcessor()
#endif // TARGET_X86
-// Instruction set flags for Intel hardware intrinsics
+ CORINFO_InstructionSetFlags instructionSetFlags = jitFlags.GetInstructionSetFlags();
+
#ifdef TARGET_XARCH
+ // Instruction set flags for Intel hardware intrinsics
opts.compSupportsISA = 0;
if (JitConfig.EnableHWIntrinsic())
{
// Dummy ISAs for simplifying the JIT code
- opts.setSupportedISA(InstructionSet_Vector128);
- opts.setSupportedISA(InstructionSet_Vector256);
- }
-
- if (JitConfig.EnableSSE())
- {
- opts.setSupportedISA(InstructionSet_SSE);
-#ifdef TARGET_AMD64
- opts.setSupportedISA(InstructionSet_SSE_X64);
-#endif // TARGET_AMD64
-
- if (JitConfig.EnableSSE2())
- {
- opts.setSupportedISA(InstructionSet_SSE2);
-#ifdef TARGET_AMD64
- opts.setSupportedISA(InstructionSet_SSE2_X64);
-#endif // TARGET_AMD64
-
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AES) && JitConfig.EnableAES())
- {
- opts.setSupportedISA(InstructionSet_AES);
- }
-
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_PCLMULQDQ) && JitConfig.EnablePCLMULQDQ())
- {
- opts.setSupportedISA(InstructionSet_PCLMULQDQ);
- }
-
- // We need to additionaly check that COMPlus_EnableSSE3_4 is set, as that
- // is a prexisting config flag that controls the SSE3+ ISAs
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE3) && JitConfig.EnableSSE3() && JitConfig.EnableSSE3_4())
- {
- opts.setSupportedISA(InstructionSet_SSE3);
-
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSSE3) && JitConfig.EnableSSSE3())
- {
- opts.setSupportedISA(InstructionSet_SSSE3);
-
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE41) && JitConfig.EnableSSE41())
- {
- opts.setSupportedISA(InstructionSet_SSE41);
-#ifdef TARGET_AMD64
- opts.setSupportedISA(InstructionSet_SSE41_X64);
-#endif // TARGET_AMD64
-
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE42) && JitConfig.EnableSSE42())
- {
- opts.setSupportedISA(InstructionSet_SSE42);
-#ifdef TARGET_AMD64
- opts.setSupportedISA(InstructionSet_SSE42_X64);
-#endif // TARGET_AMD64
-
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_POPCNT) && JitConfig.EnablePOPCNT())
- {
- opts.setSupportedISA(InstructionSet_POPCNT);
-#ifdef TARGET_AMD64
- opts.setSupportedISA(InstructionSet_POPCNT_X64);
-#endif // TARGET_AMD64
- }
-
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX) && JitConfig.EnableAVX())
- {
- opts.setSupportedISA(InstructionSet_AVX);
-
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_FMA) && JitConfig.EnableFMA())
- {
- opts.setSupportedISA(InstructionSet_FMA);
- }
-
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX2) && JitConfig.EnableAVX2())
- {
- opts.setSupportedISA(InstructionSet_AVX2);
- }
- }
- }
- }
- }
- }
- }
+ instructionSetFlags.AddInstructionSet(InstructionSet_Vector128);
+ instructionSetFlags.AddInstructionSet(InstructionSet_Vector256);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_LZCNT) && JitConfig.EnableLZCNT())
+ if (!JitConfig.EnableSSE())
{
- opts.setSupportedISA(InstructionSet_LZCNT);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSE);
#ifdef TARGET_AMD64
- opts.setSupportedISA(InstructionSet_LZCNT_X64);
-#endif // TARGET_AMD64
- }
-
- // We currently need to also check that AVX is supported as that controls the support for the VEX encoding
- // in the emitter.
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI1) && JitConfig.EnableBMI1() && compSupports(InstructionSet_AVX))
- {
- opts.setSupportedISA(InstructionSet_BMI1);
-#ifdef TARGET_AMD64
- opts.setSupportedISA(InstructionSet_BMI1_X64);
-#endif // TARGET_AMD64
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSE_X64);
+#endif
}
- // We currently need to also check that AVX is supported as that controls the support for the VEX encoding
- // in the emitter.
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI2) && JitConfig.EnableBMI2() && compSupports(InstructionSet_AVX))
+ if (!JitConfig.EnableSSE2())
{
- opts.setSupportedISA(InstructionSet_BMI2);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSE2);
#ifdef TARGET_AMD64
- opts.setSupportedISA(InstructionSet_BMI2_X64);
-#endif // TARGET_AMD64
- }
-
- if (!compIsForInlining())
- {
- if (canUseVexEncoding())
- {
- codeGen->GetEmitter()->SetUseVEXEncoding(true);
- // Assume each JITted method does not contain AVX instruction at first
- codeGen->GetEmitter()->SetContainsAVX(false);
- codeGen->GetEmitter()->SetContains256bitAVX(false);
- }
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSE2_X64);
+#endif
}
-#endif // TARGET_XARCH
-#if defined(TARGET_ARM64)
- if (JitConfig.EnableHWIntrinsic())
+ if (!JitConfig.EnableAES())
{
- // Dummy ISAs for simplifying the JIT code
- opts.setSupportedISA(InstructionSet_ArmBase);
- opts.setSupportedISA(InstructionSet_ArmBase_Arm64);
- opts.setSupportedISA(InstructionSet_Vector64);
- opts.setSupportedISA(InstructionSet_Vector128);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_AES);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_AES) && JitConfig.EnableArm64Aes())
+ if (!JitConfig.EnablePCLMULQDQ())
{
- opts.setSupportedISA(InstructionSet_Aes);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_PCLMULQDQ);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_ATOMICS) && JitConfig.EnableArm64Atomics())
+ // We need to additionaly check that COMPlus_EnableSSE3_4 is set, as that
+ // is a prexisting config flag that controls the SSE3+ ISAs
+ if (!JitConfig.EnableSSE3() || !JitConfig.EnableSSE3_4())
{
- opts.setSupportedISA(InstructionSet_Atomics);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSE3);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_CRC32) && JitConfig.EnableArm64Crc32())
+ if (!JitConfig.EnableSSSE3())
{
- opts.setSupportedISA(InstructionSet_Crc32);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSSE3);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_DCPOP) && JitConfig.EnableArm64Dcpop())
+ if (!JitConfig.EnableSSE41())
{
- opts.setSupportedISA(InstructionSet_Dcpop);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSE41);
+#ifdef TARGET_AMD64
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSE41_X64);
+#endif
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_DP) && JitConfig.EnableArm64Dp())
+ if (!JitConfig.EnableSSE42())
{
- opts.setSupportedISA(InstructionSet_Dp);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSE42);
+#ifdef TARGET_AMD64
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_SSE42_X64);
+#endif
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_FCMA) && JitConfig.EnableArm64Fcma())
+ if (!JitConfig.EnablePOPCNT())
{
- opts.setSupportedISA(InstructionSet_Fcma);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_POPCNT);
+#ifdef TARGET_AMD64
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_POPCNT_X64);
+#endif
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_FP) && JitConfig.EnableArm64Fp())
+ if (!JitConfig.EnableAVX())
{
- opts.setSupportedISA(InstructionSet_Fp);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_AVX);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_FP16) && JitConfig.EnableArm64Fp16())
+ if (!JitConfig.EnableFMA())
{
- opts.setSupportedISA(InstructionSet_Fp16);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_FMA);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_JSCVT) && JitConfig.EnableArm64Jscvt())
+ if (!JitConfig.EnableAVX2())
{
- opts.setSupportedISA(InstructionSet_Jscvt);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_AVX2);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_LRCPC) && JitConfig.EnableArm64Lrcpc())
+ if (!JitConfig.EnableLZCNT())
{
- opts.setSupportedISA(InstructionSet_Lrcpc);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_LZCNT);
+#ifdef TARGET_AMD64
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_LZCNT_X64);
+#endif // TARGET_AMD64
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_PMULL) && JitConfig.EnableArm64Pmull())
+ if (!JitConfig.EnableBMI1())
{
- opts.setSupportedISA(InstructionSet_Pmull);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_BMI1);
+#ifdef TARGET_AMD64
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_BMI1_X64);
+#endif // TARGET_AMD64
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_SHA1) && JitConfig.EnableArm64Sha1())
+ if (!JitConfig.EnableBMI2())
{
- opts.setSupportedISA(InstructionSet_Sha1);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_BMI2);
+#ifdef TARGET_AMD64
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_BMI2_X64);
+#endif // TARGET_AMD64
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_SHA256) && JitConfig.EnableArm64Sha256())
+#endif // TARGET_XARCH
+#if defined(TARGET_ARM64)
+ if (JitConfig.EnableHWIntrinsic())
{
- opts.setSupportedISA(InstructionSet_Sha256);
+ // Dummy ISAs for simplifying the JIT code
+ instructionSetFlags.AddInstructionSet(InstructionSet_ArmBase);
+ instructionSetFlags.AddInstructionSet(InstructionSet_ArmBase_Arm64);
+ instructionSetFlags.AddInstructionSet(InstructionSet_Vector64);
+ instructionSetFlags.AddInstructionSet(InstructionSet_Vector128);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_SHA512) && JitConfig.EnableArm64Sha512())
+ if (!JitConfig.EnableArm64Aes())
{
- opts.setSupportedISA(InstructionSet_Sha512);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_Aes);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_SHA3) && JitConfig.EnableArm64Sha3())
+ if (!JitConfig.EnableArm64Atomics())
{
- opts.setSupportedISA(InstructionSet_Sha3);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_Atomics);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_ADVSIMD) && JitConfig.EnableArm64AdvSimd())
+ if (!JitConfig.EnableArm64Crc32())
{
- opts.setSupportedISA(InstructionSet_AdvSimd);
- opts.setSupportedISA(InstructionSet_AdvSimd_Arm64);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_Crc32);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_Crc32_Arm64);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_ADVSIMD_V81) && JitConfig.EnableArm64AdvSimd_v81())
+ if (!JitConfig.EnableArm64Sha1())
{
- opts.setSupportedISA(InstructionSet_AdvSimd_v81);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_Sha1);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_ADVSIMD_FP16) && JitConfig.EnableArm64AdvSimd_Fp16())
+ if (!JitConfig.EnableArm64Sha256())
{
- opts.setSupportedISA(InstructionSet_AdvSimd_Fp16);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_Sha256);
}
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_SM3) && JitConfig.EnableArm64Sm3())
+ if (!JitConfig.EnableArm64AdvSimd())
{
- opts.setSupportedISA(InstructionSet_Sm3);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_AdvSimd);
+ instructionSetFlags.RemoveInstructionSet(InstructionSet_AdvSimd_Arm64);
}
+#endif
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_SM4) && JitConfig.EnableArm64Sm4())
- {
- opts.setSupportedISA(InstructionSet_Sm4);
- }
+ instructionSetFlags = EnsureInstructionSetFlagsAreValid(instructionSetFlags);
+ opts.setSupportedISAs(jitFlags.GetInstructionSetFlags());
- if (jitFlags.IsSet(JitFlags::JIT_FLAG_HAS_ARM64_SVE) && JitConfig.EnableArm64Sve())
+#ifdef TARGET_XARCH
+ if (!compIsForInlining())
{
- opts.setSupportedISA(InstructionSet_Sve);
+ if (canUseVexEncoding())
+ {
+ codeGen->GetEmitter()->SetUseVEXEncoding(true);
+ // Assume each JITted method does not contain AVX instruction at first
+ codeGen->GetEmitter()->SetContainsAVX(false);
+ codeGen->GetEmitter()->SetContains256bitAVX(false);
+ }
}
-#endif
+#endif // TARGET_XARCH
}
#ifdef PROFILING_SUPPORTED
@@ -3228,6 +3158,11 @@ void Compiler::compInitOptions(JitFlags* jitFlags)
printf("OPTIONS: Tier-1/FullOpts compilation, switched to MinOpts\n");
}
+ if (jitFlags->IsSet(JitFlags::JIT_FLAG_OSR))
+ {
+ printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry);
+ }
+
printf("OPTIONS: compCodeOpt = %s\n",
(opts.compCodeOpt == BLENDED_CODE)
? "BLENDED_CODE"
@@ -4278,6 +4213,10 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
//
DoPhase(this, PHASE_INDXCALL, &Compiler::fgTransformIndirectCalls);
+ // Expand any patchpoints
+ //
+ DoPhase(this, PHASE_PATCHPOINTS, &Compiler::fgTransformPatchpoints);
+
// PostImportPhase: cleanup inlinees
//
auto postImportPhase = [this]() {
@@ -4925,6 +4864,9 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
}
#endif
+ // Generate PatchpointInfo
+ generatePatchpointInfo();
+
RecordStateAtEndOfCompilation();
#ifdef FEATURE_TRACELOGGING
@@ -4952,6 +4894,86 @@ void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags
#endif // FUNC_INFO_LOGGING
}
+//------------------------------------------------------------------------
+// generatePatchpointInfo: allocate and fill in patchpoint info data,
+// and report it to the VM
+//
+void Compiler::generatePatchpointInfo()
+{
+ if (!doesMethodHavePatchpoints())
+ {
+ // Nothing to report
+ return;
+ }
+
+ // Patchpoints are only found in Tier0 code, which is unoptimized, and so
+ // should always have frame pointer.
+ assert(codeGen->isFramePointerUsed());
+
+ // Allocate patchpoint info storage from runtime, and fill in initial bits of data.
+ const unsigned patchpointInfoSize = PatchpointInfo::ComputeSize(info.compLocalsCount);
+ PatchpointInfo* const patchpointInfo = (PatchpointInfo*)info.compCompHnd->allocateArray(patchpointInfoSize);
+
+ // The +TARGET_POINTER_SIZE here is to account for the extra slot the runtime
+ // creates when it simulates calling the OSR method (the "pseudo return address" slot).
+ patchpointInfo->Initialize(info.compLocalsCount, codeGen->genSPtoFPdelta() + TARGET_POINTER_SIZE);
+
+ JITDUMP("--OSR--- FP-SP delta is %d\n", patchpointInfo->FpToSpDelta());
+
+ // We record offsets for all the "locals" here. Could restrict
+ // this to just the IL locals with some extra logic, and save a bit of space,
+ // but would need to adjust all consumers, too.
+ for (unsigned lclNum = 0; lclNum < info.compLocalsCount; lclNum++)
+ {
+ LclVarDsc* const varDsc = lvaGetDesc(lclNum);
+
+ // We expect all these to have stack homes, and be FP relative
+ assert(varDsc->lvOnFrame);
+ assert(varDsc->lvFramePointerBased);
+
+ // Record FramePtr relative offset (no localloc yet)
+ patchpointInfo->SetOffset(lclNum, varDsc->lvStkOffs);
+
+ // Note if IL stream contained an address-of that potentially leads to exposure.
+ // This bit of IL may be skipped by OSR partial importation.
+ if (varDsc->lvHasLdAddrOp)
+ {
+ patchpointInfo->SetIsExposed(lclNum);
+ }
+
+ JITDUMP("--OSR-- V%02u is at offset %d%s\n", lclNum, patchpointInfo->Offset(lclNum),
+ patchpointInfo->IsExposed(lclNum) ? " (exposed)" : "");
+ }
+
+ // Special offsets
+
+ if (lvaReportParamTypeArg() || lvaKeepAliveAndReportThis())
+ {
+ const int offset = lvaToCallerSPRelativeOffset(lvaCachedGenericContextArgOffset(), true);
+ patchpointInfo->SetGenericContextArgOffset(offset);
+ JITDUMP("--OSR-- cached generic context offset is CallerSP %d\n", patchpointInfo->GenericContextArgOffset());
+ }
+
+ if (lvaKeepAliveAndReportThis())
+ {
+ const int offset = lvaCachedGenericContextArgOffset();
+ patchpointInfo->SetKeptAliveThisOffset(offset);
+ JITDUMP("--OSR-- kept-alive this offset is FP %d\n", patchpointInfo->KeptAliveThisOffset());
+ }
+
+ if (compGSReorderStackLayout)
+ {
+ assert(lvaGSSecurityCookie != BAD_VAR_NUM);
+ LclVarDsc* const varDsc = lvaGetDesc(lvaGSSecurityCookie);
+ patchpointInfo->SetSecurityCookieOffset(varDsc->lvStkOffs);
+ JITDUMP("--OSR-- security cookie V%02u offset is FP %d\n", lvaGSSecurityCookie,
+ patchpointInfo->SecurityCookieOffset());
+ }
+
+ // Register this with the runtime.
+ info.compCompHnd->setPatchpointInfo(patchpointInfo);
+}
+
//------------------------------------------------------------------------
// ResetOptAnnotations: Clear annotations produced during global optimizations.
//
@@ -5208,6 +5230,19 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
info.compMethodHnd = methodHnd;
info.compMethodInfo = methodInfo;
+ if (compIsForInlining())
+ {
+ compileFlags->Clear(JitFlags::JIT_FLAG_OSR);
+ info.compILEntry = 0;
+ info.compPatchpointInfo = nullptr;
+ }
+ else if (compileFlags->IsSet(JitFlags::JIT_FLAG_OSR))
+ {
+ // Fetch OSR info from the runtime
+ info.compPatchpointInfo = info.compCompHnd->getOSRInfo(&info.compILEntry);
+ assert(info.compPatchpointInfo != nullptr);
+ }
+
virtualStubParamInfo = new (this, CMK_Unknown) VirtualStubParamInfo(IsTargetAbi(CORINFO_CORERT_ABI));
// compMatchedVM is set to true if both CPU/ABI and OS are matching the execution engine requirements
@@ -5248,28 +5283,11 @@ int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd,
// target default. Currently this is disabling all ARM64 architecture features except FP and SIMD, but this
// should be altered to possibly enable all of them, when they are known to all work.
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_AES);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_ATOMICS);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_CRC32);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_DCPOP);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_DP);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_FCMA);
- compileFlags->Set(JitFlags::JIT_FLAG_HAS_ARM64_FP);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_FP16);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_JSCVT);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_LRCPC);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_PMULL);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SHA1);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SHA256);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SHA512);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SHA3);
- compileFlags->Set(JitFlags::JIT_FLAG_HAS_ARM64_ADVSIMD);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_ADVSIMD_V81);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_ADVSIMD_FP16);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SM3);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SM4);
- compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SVE);
-
+ CORINFO_InstructionSetFlags defaultArm64Flags;
+ defaultArm64Flags.AddInstructionSet(InstructionSet_ArmBase);
+ defaultArm64Flags.AddInstructionSet(InstructionSet_AdvSimd);
+ defaultArm64Flags.Set64BitInstructionSetVariants();
+ compileFlags->SetInstructionSetFlags(defaultArm64Flags);
#endif // defined(TARGET_ARM64)
}
@@ -6021,9 +6039,9 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
#ifdef DEBUG
if ((JitConfig.DumpJittedMethods() == 1) && !compIsForInlining())
{
- printf("Compiling %4d %s::%s, IL size = %u, hash=0x%08x %s%s\n", Compiler::jitTotalMethodCompiled,
+ printf("Compiling %4d %s::%s, IL size = %u, hash=0x%08x %s%s%s\n", Compiler::jitTotalMethodCompiled,
info.compClassName, info.compMethodName, info.compILCodeSize, info.compMethodHash(),
- compGetTieringName(), compGetStressMessage());
+ compGetTieringName(), opts.IsOSR() ? " OSR" : "", compGetStressMessage());
}
if (compIsForInlining())
{
@@ -9185,3 +9203,36 @@ bool Compiler::killGCRefs(GenTree* tree)
return false;
}
+
+//------------------------------------------------------------------------
+// lvaIsOSRLocal: check if this local var is one that requires special
+// treatment for OSR compilations.
+//
+// Arguments:
+// varNum - variable of interest
+//
+// Return Value:
+// true - this is an OSR compile and this local requires special treatment
+// false - not an OSR compile, or not an interesting local for OSR
+
+bool Compiler::lvaIsOSRLocal(unsigned varNum)
+{
+ if (!opts.IsOSR())
+ {
+ return false;
+ }
+
+ if (varNum < info.compLocalsCount)
+ {
+ return true;
+ }
+
+ LclVarDsc* varDsc = lvaGetDesc(varNum);
+
+ if (varDsc->lvIsStructField)
+ {
+ return (varDsc->lvParentLcl < info.compLocalsCount);
+ }
+
+ return false;
+}
diff --git a/src/coreclr/src/jit/compiler.h b/src/coreclr/src/jit/compiler.h
index 0beb639a7e9c9f..3ddb0cddd5d885 100644
--- a/src/coreclr/src/jit/compiler.h
+++ b/src/coreclr/src/jit/compiler.h
@@ -2406,6 +2406,8 @@ class Compiler
EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast);
+ void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg);
+
void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast);
void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast);
@@ -2888,7 +2890,9 @@ class Compiler
void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut);
int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining);
char* gtGetLclVarName(unsigned lclNum);
- void gtDispLclVar(unsigned varNum, bool padForBiggestDisp = true);
+ void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true);
+ void gtDispLclVarStructType(unsigned lclNum);
+ void gtDispClassLayout(ClassLayout* layout, var_types type);
void gtDispStmt(Statement* stmt, const char* msg = nullptr);
void gtDispBlockStmts(BasicBlock* block);
void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, int listCount, char* bufp, unsigned bufLength);
@@ -3207,6 +3211,10 @@ class Compiler
int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased);
int lvaGetInitialSPRelativeOffset(unsigned varNum);
+ // True if this is an OSR compilation and this local is potentially
+ // located on the original method stack frame.
+ bool lvaIsOSRLocal(unsigned varNum);
+
//------------------------ For splitting types ----------------------------
void lvaInitTypeRef();
@@ -3529,8 +3537,7 @@ class Compiler
public:
void impInit();
-
- void impImport(BasicBlock* method);
+ void impImport();
CORINFO_CLASS_HANDLE impGetRefAnyClass();
CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle();
@@ -3671,7 +3678,7 @@ class Compiler
bool mustExpand);
protected:
- bool compSupportsHWIntrinsic(InstructionSet isa);
+ bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa);
GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
@@ -4146,6 +4153,7 @@ class Compiler
BasicBlock* fgFirstBB; // Beginning of the basic block list
BasicBlock* fgLastBB; // End of the basic block list
BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section
+ BasicBlock* fgEntryBB; // For OSR, the original method's entry point
#if defined(FEATURE_EH_FUNCLETS)
BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets)
#endif
@@ -4359,6 +4367,8 @@ class Compiler
void fgTransformIndirectCalls();
+ void fgTransformPatchpoints();
+
void fgInline();
void fgRemoveEmptyTry();
@@ -5268,11 +5278,10 @@ class Compiler
public:
void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt);
Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree);
+ Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree);
private:
void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt);
- Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree);
-
void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt);
Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree);
@@ -6370,6 +6379,7 @@ class Compiler
#define OMF_HAS_OBJSTACKALLOC 0x00000040 // Method contains an object allocated on the stack.
#define OMF_HAS_GUARDEDDEVIRT 0x00000080 // Method contains guarded devirtualization candidate
#define OMF_HAS_EXPRUNTIMELOOKUP 0x00000100 // Method contains a runtime lookup to an expandable dictionary.
+#define OMF_HAS_PATCHPOINT 0x00000200 // Method contains patchpoints
bool doesMethodHaveFatPointer()
{
@@ -6426,6 +6436,16 @@ class Compiler
void addExpRuntimeLookupCandidate(GenTreeCall* call);
+ bool doesMethodHavePatchpoints()
+ {
+ return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0;
+ }
+
+ void setMethodHasPatchpoint()
+ {
+ optMethodFlags |= OMF_HAS_PATCHPOINT;
+ }
+
unsigned optMethodFlags;
bool doesMethodHaveNoReturnCalls()
@@ -8274,7 +8294,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
return false;
}
- bool compSupports(InstructionSet isa) const
+ bool compSupports(CORINFO_InstructionSet isa) const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return (opts.compSupportsISA & (1ULL << isa)) != 0;
@@ -8383,11 +8403,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
uint64_t compSupportsISA;
- void setSupportedISA(InstructionSet isa)
+#endif
+ void setSupportedISAs(CORINFO_InstructionSetFlags isas)
{
- compSupportsISA |= 1ULL << isa;
- }
+#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
+ compSupportsISA = isas.GetFlagsRaw();
#endif
+ }
unsigned compFlags; // method attributes
unsigned instrCount;
@@ -8469,6 +8491,18 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ bool IsOSR() const
+ {
+ return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR);
+ }
+#else
+ bool IsOSR() const
+ {
+ return false;
+ }
+#endif
+
// true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating
// PInvoke transitions inline (e.g. when targeting CoreRT).
bool ShouldUsePInvokeHelpers()
@@ -8809,11 +8843,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// The following holds the class attributes for the method we're compiling.
unsigned compClassAttr;
- const BYTE* compCode;
- IL_OFFSET compILCodeSize; // The IL code size
- IL_OFFSET compILImportSize; // Estimated amount of IL actually imported
- UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This
- // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if:
+ const BYTE* compCode;
+ IL_OFFSET compILCodeSize; // The IL code size
+ IL_OFFSET compILImportSize; // Estimated amount of IL actually imported
+ IL_OFFSET compILEntry; // The IL entry point (normally 0)
+ PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr)
+ UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This
+ // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if:
// (1) the code is not hot/cold split, and we issued less code than we expected, or
// (2) the code is hot/cold split, and we issued less code than we expected
// in the cold section (the hot section will always be padded out to compTotalHotCodeSize).
@@ -9097,6 +9133,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
ArenaAllocator* compGetArenaAllocator();
+ void generatePatchpointInfo();
+
#if MEASURE_MEM_ALLOC
static bool s_dspMemStats; // Display per-phase memory statistics for every function
#endif // MEASURE_MEM_ALLOC
diff --git a/src/coreclr/src/jit/compphases.h b/src/coreclr/src/jit/compphases.h
index f9e0684dad6871..cf0463c5f6e948 100644
--- a/src/coreclr/src/jit/compphases.h
+++ b/src/coreclr/src/jit/compphases.h
@@ -27,6 +27,7 @@
CompPhaseNameMacro(PHASE_PRE_IMPORT, "Pre-import", "PRE-IMP", false, -1, false)
CompPhaseNameMacro(PHASE_IMPORTATION, "Importation", "IMPORT", false, -1, true)
CompPhaseNameMacro(PHASE_INDXCALL, "Indirect call transform", "INDXCALL", false, -1, true)
+CompPhaseNameMacro(PHASE_PATCHPOINTS, "Expand patchpoints", "PPOINT", false, -1, true)
CompPhaseNameMacro(PHASE_POST_IMPORT, "Post-import", "POST-IMP", false, -1, false)
CompPhaseNameMacro(PHASE_IBCINSTR, "IBC instrumentation", "IBCINSTR", false, -1, false)
CompPhaseNameMacro(PHASE_MORPH_INIT, "Morph - Init", "MOR-INIT" ,false, -1, false)
diff --git a/src/coreclr/src/jit/ee_il_dll.cpp b/src/coreclr/src/jit/ee_il_dll.cpp
index 4ac4c24dfde058..78d3d8e59274aa 100644
--- a/src/coreclr/src/jit/ee_il_dll.cpp
+++ b/src/coreclr/src/jit/ee_il_dll.cpp
@@ -340,7 +340,7 @@ unsigned CILJit::getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags)
#ifdef FEATURE_SIMD
#if defined(TARGET_XARCH)
if (!jitFlags.IsSet(JitFlags::JIT_FLAG_PREJIT) && jitFlags.IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD) &&
- jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX2))
+ jitFlags.GetInstructionSetFlags().HasInstructionSet(InstructionSet_AVX2))
{
// Since the ISAs can be disabled individually and since they are hierarchical in nature (that is
// disabling SSE also disables SSE2 through AVX2), we need to check each ISA in the hierarchy to
diff --git a/src/coreclr/src/jit/emitarm64.cpp b/src/coreclr/src/jit/emitarm64.cpp
index e31a9ab43b5bc4..2ce9ed6f3cddfb 100644
--- a/src/coreclr/src/jit/emitarm64.cpp
+++ b/src/coreclr/src/jit/emitarm64.cpp
@@ -229,8 +229,8 @@ void emitter::emitInsSanityCheck(instrDesc* id)
case IF_LS_2D: // LS_2D .Q.............. ....ssnnnnnttttt Vt Rn
case IF_LS_2E: // LS_2E .Q.............. ....ssnnnnnttttt Vt Rn
- case IF_LS_2F: // LS_2F .Q.............. ...Sssnnnnnttttt Vt[] Rn
- case IF_LS_2G: // LS_2G .Q.............. ...Sssnnnnnttttt Vt[] Rn
+ case IF_LS_2F: // LS_2F .Q.............. xx.Sssnnnnnttttt Vt[] Rn
+ case IF_LS_2G: // LS_2G .Q.............. xx.Sssnnnnnttttt Vt[] Rn
assert(isVectorRegister(id->idReg1()));
assert(isIntegerRegister(id->idReg2())); // SP
if (insOptsAnyArrangement(id->idInsOpt()))
@@ -959,8 +959,8 @@ bool emitter::emitInsMayWriteToGCReg(instrDesc* id)
case IF_LS_2C: // LS_2C .X.......X.iiiii iiiiP.nnnnnttttt Rt Rn imm(-256..+255) pre/post inc
case IF_LS_2D: // LS_2D .Q.............. ....ssnnnnnttttt Vt Rn
case IF_LS_2E: // LS_2E .Q.............. ....ssnnnnnttttt Vt Rn
- case IF_LS_2F: // LS_2F .Q.............. ...Sssnnnnnttttt Vt[] Rn
- case IF_LS_2G: // LS_2G .Q.............. ...Sssnnnnnttttt Vt[] Rn
+ case IF_LS_2F: // LS_2F .Q.............. xx.Sssnnnnnttttt Vt[] Rn
+ case IF_LS_2G: // LS_2G .Q.............. xx.Sssnnnnnttttt Vt[] Rn
case IF_LS_3A: // LS_3A .X.......X.mmmmm xxxS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
case IF_LS_3B: // LS_3B X............... .aaaaannnnnttttt Rt Ra Rn
case IF_LS_3C: // LS_3C X.........iiiiii iaaaaannnnnttttt Rt Ra Rn imm(im7,sh)
@@ -9762,8 +9762,8 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
dst += emitOutput_Instr(dst, code);
break;
- case IF_LS_2F: // LS_2F .Q.............. ...Sssnnnnnttttt Vt[] Rn
- case IF_LS_2G: // LS_2G .Q.............. ...Sssnnnnnttttt Vt[] Rn
+ case IF_LS_2F: // LS_2F .Q.............. xx.Sssnnnnnttttt Vt[] Rn
+ case IF_LS_2G: // LS_2G .Q.............. xx.Sssnnnnnttttt Vt[] Rn
elemsize = id->idOpSize();
index = id->idSmallCns();
code = emitInsCode(ins, fmt);
@@ -11645,8 +11645,8 @@ void emitter::emitDispIns(
}
break;
- case IF_LS_2F: // LS_2F .Q.............. ...Sssnnnnnttttt Vt[] Rn
- case IF_LS_2G: // LS_2G .Q.............. ...Sssnnnnnttttt Vt[] Rn
+ case IF_LS_2F: // LS_2F .Q.............. xx.Sssnnnnnttttt Vt[] Rn
+ case IF_LS_2G: // LS_2G .Q.............. xx.Sssnnnnnttttt Vt[] Rn
registerListSize = insGetLoadStoreRegisterListSize(id->idIns());
elemsize = id->idOpSize();
emitDispVectorElemList(id->idReg1(), registerListSize, elemsize, id->idSmallCns(), true);
diff --git a/src/coreclr/src/jit/emitfmtsarm64.h b/src/coreclr/src/jit/emitfmtsarm64.h
index c0fd69edc05e59..401302695bb7ad 100644
--- a/src/coreclr/src/jit/emitfmtsarm64.h
+++ b/src/coreclr/src/jit/emitfmtsarm64.h
@@ -138,8 +138,8 @@ IF_DEF(LS_2D, IS_NONE, NONE) // LS_2D .Q.............. ....ssnnnnnttttt V
// Load single structure and replicate base register
IF_DEF(LS_2E, IS_NONE, NONE) // LS_2E .Q.............. ....ssnnnnnttttt Vt Rn Load/Store multiple structures post-indexed by an immediate
// Load single structure and replicate post-indexed by an immediate
-IF_DEF(LS_2F, IS_NONE, NONE) // LS_2F .Q.............. ...Sssnnnnnttttt Vt[] Rn Load/Store single structure base register
-IF_DEF(LS_2G, IS_NONE, NONE) // LS_2G .Q.............. ...Sssnnnnnttttt Vt[] Rn Load/Store single structure post-indexed by an immediate
+IF_DEF(LS_2F, IS_NONE, NONE) // LS_2F .Q.............. xx.Sssnnnnnttttt Vt[] Rn Load/Store single structure base register
+IF_DEF(LS_2G, IS_NONE, NONE) // LS_2G .Q.............. xx.Sssnnnnnttttt Vt[] Rn Load/Store single structure post-indexed by an immediate
IF_DEF(LS_3A, IS_NONE, NONE) // LS_3A .X.......X.mmmmm xxxS..nnnnnttttt Rt Rn Rm ext(Rm) LSL {}
IF_DEF(LS_3B, IS_NONE, NONE) // LS_3B X............... .aaaaannnnnddddd Rd Ra Rn
IF_DEF(LS_3C, IS_NONE, NONE) // LS_3C X.........iiiiii iaaaaannnnnddddd Rd Ra Rn imm(im7,sh)
diff --git a/src/coreclr/src/jit/emitxarch.cpp b/src/coreclr/src/jit/emitxarch.cpp
index f347c94d417642..bceab19a8f9804 100644
--- a/src/coreclr/src/jit/emitxarch.cpp
+++ b/src/coreclr/src/jit/emitxarch.cpp
@@ -1982,9 +1982,11 @@ inline UNATIVE_OFFSET emitter::emitInsSizeSV(code_t code, int var, int dsp)
LclVarDsc* varDsc = emitComp->lvaTable + var;
bool isRegPassedArg = varDsc->lvIsParam && varDsc->lvIsRegArg;
// Register passed args could have a stack offset of 0.
- noway_assert((int)offs < 0 || isRegPassedArg);
+ noway_assert((int)offs < 0 || isRegPassedArg || emitComp->opts.IsOSR());
#else // !UNIX_AMD64_ABI
- noway_assert((int)offs < 0);
+
+ // OSR transitioning to RBP frame currently can have mid-frame FP
+ noway_assert(((int)offs < 0) || emitComp->opts.IsOSR());
#endif // !UNIX_AMD64_ABI
}
diff --git a/src/coreclr/src/jit/flowgraph.cpp b/src/coreclr/src/jit/flowgraph.cpp
index 63d80906cc6a8b..b7ccbcbda212dd 100644
--- a/src/coreclr/src/jit/flowgraph.cpp
+++ b/src/coreclr/src/jit/flowgraph.cpp
@@ -65,6 +65,7 @@ void Compiler::fgInit()
fgFirstBB = nullptr;
fgLastBB = nullptr;
fgFirstColdBlock = nullptr;
+ fgEntryBB = nullptr;
#if defined(FEATURE_EH_FUNCLETS)
fgFirstFuncletBB = nullptr;
@@ -4328,6 +4329,12 @@ bool Compiler::fgMayExplicitTailCall()
return false;
}
+ if (opts.IsReversePInvoke())
+ {
+ // Reverse P/Invoke
+ return false;
+ }
+
#if !FEATURE_FIXED_OUT_ARGS
if (info.compIsVarArgs)
{
@@ -5177,16 +5184,19 @@ void Compiler::fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, boo
}
}
-/*****************************************************************************
- *
- * Finally link up the bbJumpDest of the blocks together
- */
+//------------------------------------------------------------------------
+// fgMarkBackwardJump: mark blocks indicating there is a jump backwards in
+// IL, from a higher to lower IL offset.
+//
+// Arguments:
+// targetBlock -- target of the jump
+// sourceBlock -- source of the jump
-void Compiler::fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock)
+void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlock)
{
- noway_assert(startBlock->bbNum <= endBlock->bbNum);
+ noway_assert(targetBlock->bbNum <= sourceBlock->bbNum);
- for (BasicBlock* block = startBlock; block != endBlock->bbNext; block = block->bbNext)
+ for (BasicBlock* block = targetBlock; block != sourceBlock->bbNext; block = block->bbNext)
{
if ((block->bbFlags & BBF_BACKWARD_JUMP) == 0)
{
@@ -5194,6 +5204,8 @@ void Compiler::fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock)
compHasBackwardJump = true;
}
}
+
+ targetBlock->bbFlags |= BBF_BACKWARD_JUMP_TARGET;
}
/*****************************************************************************
@@ -5983,6 +5995,28 @@ void Compiler::fgFindBasicBlocks()
return;
}
+ // If we are doing OSR, add an entry block that simply branches to the right IL offset.
+ if (opts.IsOSR())
+ {
+ // Remember the original entry block in case this method is tail recursive.
+ fgEntryBB = fgLookupBB(0);
+
+ // Find the OSR entry block.
+ assert(info.compILEntry >= 0);
+ BasicBlock* bbTarget = fgLookupBB(info.compILEntry);
+
+ fgEnsureFirstBBisScratch();
+ fgFirstBB->bbJumpKind = BBJ_ALWAYS;
+ fgFirstBB->bbJumpDest = bbTarget;
+ fgAddRefPred(bbTarget, fgFirstBB);
+
+ JITDUMP("OSR: redirecting flow at entry via " FMT_BB " to " FMT_BB " (il offset 0x%x)\n", fgFirstBB->bbNum,
+ bbTarget->bbNum, info.compILEntry);
+
+ // rebuild lookup table... should be able to avoid this by leaving room up front.
+ fgInitBBLookup();
+ }
+
/* Mark all blocks within 'try' blocks as such */
if (info.compXcptnsCount == 0)
@@ -6859,7 +6893,7 @@ unsigned Compiler::fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNestin
void Compiler::fgImport()
{
- impImport(fgFirstBB);
+ impImport();
// Estimate how much of method IL was actually imported.
//
@@ -9693,118 +9727,238 @@ void Compiler::fgSimpleLowering()
#endif
}
-/*****************************************************************************
- *
- * Find and remove any basic blocks that are useless (e.g. they have not been
- * imported because they are not reachable, or they have been optimized away).
- */
-
+//------------------------------------------------------------------------
+// fgRemoveEmptyBlocks: clean up flow graph after importation
+//
+// Notes:
+//
+// Find and remove any basic blocks that are useless (e.g. they have not been
+// imported because they are not reachable, or they have been optimized away).
+//
+// Remove try regions where no blocks in the try were imported.
+// Update the end of try and handler regions where trailing blocks were not imported.
+// Update the start of try regions that were partially imported (OSR)
+//
void Compiler::fgRemoveEmptyBlocks()
{
+ JITDUMP("\n*************** In fgRemoveEmptyBlocks\n");
+
BasicBlock* cur;
BasicBlock* nxt;
- /* If we remove any blocks, we'll have to do additional work */
-
+ // If we remove any blocks, we'll have to do additional work
unsigned removedBlks = 0;
for (cur = fgFirstBB; cur != nullptr; cur = nxt)
{
- /* Get hold of the next block (in case we delete 'cur') */
-
+ // Get hold of the next block (in case we delete 'cur')
nxt = cur->bbNext;
- /* Should this block be removed? */
-
+ // Should this block be removed?
if (!(cur->bbFlags & BBF_IMPORTED))
{
noway_assert(cur->isEmpty());
if (ehCanDeleteEmptyBlock(cur))
{
- /* Mark the block as removed */
+ JITDUMP(FMT_BB " was not imported, marking as removed (%d)\n", cur->bbNum, removedBlks);
cur->bbFlags |= BBF_REMOVED;
-
- /* Remember that we've removed a block from the list */
-
removedBlks++;
-#ifdef DEBUG
- if (verbose)
- {
- printf(FMT_BB " was not imported, marked as removed (%d)\n", cur->bbNum, removedBlks);
- }
-#endif // DEBUG
-
- /* Drop the block from the list */
-
+ // Drop the block from the list.
+ //
+ // We rely on the fact that this does not clear out
+ // cur->bbNext or cur->bbPrev in the code that
+ // follows.
fgUnlinkBlock(cur);
}
else
{
- // We were prevented from deleting this block by EH normalization. Mark the block as imported.
+ // We were prevented from deleting this block by EH
+ // normalization. Mark the block as imported.
cur->bbFlags |= BBF_IMPORTED;
}
}
}
- /* If no blocks were removed, we're done */
-
+ // If no blocks were removed, we're done
if (removedBlks == 0)
{
return;
}
- /* Update all references in the exception handler table.
- * Mark the new blocks as non-removable.
- *
- * We may have made the entire try block unreachable.
- * Check for this case and remove the entry from the EH table.
- */
-
+ // Update all references in the exception handler table.
+ //
+ // We may have made the entire try block unreachable.
+ // Check for this case and remove the entry from the EH table.
+ //
+ // For OSR, just the initial part of a try range may become
+ // unreachable; if so we need to shrink the try range down
+ // to the portion that was imported.
unsigned XTnum;
EHblkDsc* HBtab;
- INDEBUG(unsigned delCnt = 0;)
+ unsigned delCnt = 0;
+ // Walk the EH regions from inner to outer
for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
{
AGAIN:
- /* If the beginning of the try block was not imported, we
- * need to remove the entry from the EH table. */
+ // If start of a try region was not imported, then we either
+ // need to trim the region extent, or remove the region
+ // entirely.
+ //
+ // In normal importation, it is not valid to jump into the
+ // middle of a try, so if the try entry was not imported, the
+ // entire try can be removed.
+ //
+ // In OSR importation the entry patchpoint may be in the
+ // middle of a try, and we need to determine how much of the
+ // try ended up getting imported. Because of backwards
+ // branches we may end up importing the entire try even though
+ // execution starts in the middle.
+ //
+ // Note it is common in both cases for the ends of trys (and
+ // associated handlers) to end up not getting imported, so if
+ // the try region is not removed, we always check if we need
+ // to trim the ends.
+ //
if (HBtab->ebdTryBeg->bbFlags & BBF_REMOVED)
{
- noway_assert(!(HBtab->ebdTryBeg->bbFlags & BBF_IMPORTED));
-#ifdef DEBUG
- if (verbose)
+ // Usual case is that the entire try can be removed.
+ bool removeTryRegion = true;
+
+ if (opts.IsOSR())
{
- printf("Beginning of try block (" FMT_BB ") not imported "
- "- remove index #%u from the EH table\n",
- HBtab->ebdTryBeg->bbNum, XTnum + delCnt);
- }
- delCnt++;
-#endif // DEBUG
+ // For OSR we may need to trim the try region start.
+ //
+ // We rely on the fact that removed blocks have been snipped from
+ // the main block list, but that those removed blocks have kept
+ // their bbprev (and bbnext) links.
+ //
+ // Find the first unremoved block before the try entry block.
+ //
+ BasicBlock* const oldTryEntry = HBtab->ebdTryBeg;
+ BasicBlock* tryEntryPrev = oldTryEntry->bbPrev;
+ while ((tryEntryPrev != nullptr) && ((tryEntryPrev->bbFlags & BBF_REMOVED) != 0))
+ {
+ tryEntryPrev = tryEntryPrev->bbPrev;
+ }
- fgRemoveEHTableEntry(XTnum);
+ // Because we've added an unremovable scratch block as
+ // fgFirstBB, this backwards walk should always find
+ // some block.
+ assert(tryEntryPrev != nullptr);
- if (XTnum < compHndBBtabCount)
- {
- // There are more entries left to process, so do more. Note that
- // HBtab now points to the next entry, that we copied down to the
- // current slot. XTnum also stays the same.
- goto AGAIN;
+ // If there is a next block of this prev block, and that block is
+ // contained in the current try, we'd like to make that block
+ // the new start of the try, and keep the region.
+ BasicBlock* newTryEntry = tryEntryPrev->bbNext;
+ bool updateTryEntry = false;
+
+ if ((newTryEntry != nullptr) && bbInTryRegions(XTnum, newTryEntry))
+ {
+ // We want to trim the begin extent of the current try region to newTryEntry.
+ //
+ // This method is invoked after EH normalization, so we may need to ensure all
+ // try regions begin at blocks that are not the start or end of some other try.
+ //
+ // So, see if this block is already the start or end of some other EH region.
+ if (bbIsTryBeg(newTryEntry))
+ {
+ // We've already end-trimmed the inner try. Do the same now for the
+ // current try, so it is easier to detect when they mutually protect.
+ // (we will call this again later, which is harmless).
+ fgSkipRmvdBlocks(HBtab);
+
+ // If this try and the inner try form a "mutually protected try region"
+ // then we must continue to share the try entry block.
+ EHblkDsc* const HBinner = ehGetBlockTryDsc(newTryEntry);
+ assert(HBinner->ebdTryBeg == newTryEntry);
+
+ if (HBtab->ebdTryLast != HBinner->ebdTryLast)
+ {
+ updateTryEntry = true;
+ }
+ }
+ // Also, a try and handler cannot start at the same block
+ else if (bbIsHandlerBeg(newTryEntry))
+ {
+ updateTryEntry = true;
+ }
+
+ if (updateTryEntry)
+ {
+ // We need to trim the current try to begin at a different block. Normally
+ // this would be problematic as we don't have enough context to redirect
+ // all the incoming edges, but we know oldTryEntry is unreachable.
+ // So there are no incoming edges to worry about.
+ //
+ assert(!tryEntryPrev->bbFallsThrough());
+
+ // What follows is similar to fgNewBBInRegion, but we can't call that
+ // here as the oldTryEntry is no longer in the main bb list.
+ newTryEntry = bbNewBasicBlock(BBJ_NONE);
+ newTryEntry->bbFlags |= (BBF_IMPORTED | BBF_INTERNAL);
+
+ // Set the right EH region indices on this new block.
+ //
+ // Patchpoints currently cannot be inside handler regions,
+ // and so likewise the old and new try region entries.
+ assert(!oldTryEntry->hasHndIndex());
+ newTryEntry->setTryIndex(XTnum);
+ newTryEntry->clearHndIndex();
+ fgInsertBBafter(tryEntryPrev, newTryEntry);
+
+ JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n",
+ XTnum + delCnt, oldTryEntry->bbNum, newTryEntry->bbNum);
+ }
+ else
+ {
+ // We can just trim the try to newTryEntry as it is not part of some inner try or handler.
+ JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to " FMT_BB "\n", XTnum + delCnt,
+ oldTryEntry->bbNum, newTryEntry->bbNum);
+ }
+
+ // Update the handler table
+ fgSetTryBeg(HBtab, newTryEntry);
+
+ // Try entry blocks get specially marked and have special protection.
+ HBtab->ebdTryBeg->bbFlags |= BBF_DONT_REMOVE | BBF_TRY_BEG | BBF_HAS_LABEL;
+
+ // We are keeping this try region
+ removeTryRegion = false;
+ }
}
- break; // no more entries (we deleted the last one), so exit the loop
- }
+ if (removeTryRegion)
+ {
+ // In the dump, refer to the region by its original index.
+ JITDUMP("Try region #%u (" FMT_BB " -- " FMT_BB ") not imported, removing try from the EH table\n",
+ XTnum + delCnt, HBtab->ebdTryBeg->bbNum, HBtab->ebdTryLast->bbNum);
-/* At this point we know we have a valid try block */
+ delCnt++;
-#ifdef DEBUG
+ fgRemoveEHTableEntry(XTnum);
+
+ if (XTnum < compHndBBtabCount)
+ {
+ // There are more entries left to process, so do more. Note that
+ // HBtab now points to the next entry, that we copied down to the
+ // current slot. XTnum also stays the same.
+ goto AGAIN;
+ }
+
+ // no more entries (we deleted the last one), so exit the loop
+ break;
+ }
+ }
+
+ // If we get here, the try entry block was not removed.
+ // Check some invariants.
assert(HBtab->ebdTryBeg->bbFlags & BBF_IMPORTED);
assert(HBtab->ebdTryBeg->bbFlags & BBF_DONT_REMOVE);
-
assert(HBtab->ebdHndBeg->bbFlags & BBF_IMPORTED);
assert(HBtab->ebdHndBeg->bbFlags & BBF_DONT_REMOVE);
@@ -9813,10 +9967,10 @@ void Compiler::fgRemoveEmptyBlocks()
assert(HBtab->ebdFilter->bbFlags & BBF_IMPORTED);
assert(HBtab->ebdFilter->bbFlags & BBF_DONT_REMOVE);
}
-#endif // DEBUG
+ // Finally, do region end trimming -- update try and handler ends to reflect removed blocks.
fgSkipRmvdBlocks(HBtab);
- } /* end of the for loop over XTnum */
+ }
// Renumber the basic blocks
JITDUMP("\nRenumbering the basic blocks for fgRemoveEmptyBlocks\n");
@@ -13222,6 +13376,10 @@ void Compiler::fgComputeCalledCount(BasicBlock::weight_t returnWeight)
{
fgFirstBB->bbFlags |= BBF_RUN_RARELY;
}
+ else
+ {
+ fgFirstBB->bbFlags &= ~BBF_RUN_RARELY;
+ }
}
#if DEBUG
@@ -20570,6 +20728,12 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb
return true;
}
+ // For OSR, we allow the firstBB to branch to the middle of a try.
+ if (comp->opts.IsOSR() && (blockPred == comp->fgFirstBB))
+ {
+ return true;
+ }
+
printf("Jump into the middle of try region: " FMT_BB " branches to " FMT_BB "\n", blockPred->bbNum, block->bbNum);
assert(!"Jump into middle of try region");
return false;
@@ -20752,6 +20916,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef
#endif // DEBUG
fgDebugCheckBlockLinks();
+ fgFirstBBisScratch();
if (fgBBcount > 10000 && expensiveDebugCheckLevel < 1)
{
diff --git a/src/coreclr/src/jit/gcencode.cpp b/src/coreclr/src/jit/gcencode.cpp
index 5d05f969a1f48d..a346b9fd1ab258 100644
--- a/src/coreclr/src/jit/gcencode.cpp
+++ b/src/coreclr/src/jit/gcencode.cpp
@@ -22,6 +22,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif
#include "gcinfotypes.h"
+#include "patchpointinfo.h"
ReturnKind GCTypeToReturnKind(CorInfoGCType gcType)
{
@@ -3888,20 +3889,43 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz
assert(false);
}
- gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(
- compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
- compiler->isFramePointerUsed()),
- ctxtParamType);
+ int offset = 0;
+
+ if (compiler->opts.IsOSR())
+ {
+ PatchpointInfo* ppInfo = compiler->info.compPatchpointInfo;
+ offset = ppInfo->GenericContextArgOffset();
+ assert(offset != -1);
+ }
+ else
+ {
+ offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
+ compiler->isFramePointerUsed());
+ }
+
+ gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(offset, ctxtParamType);
}
// As discussed above, handle the case where the generics context is obtained via
// the method table of "this".
else if (compiler->lvaKeepAliveAndReportThis())
{
assert(compiler->info.compThisArg != BAD_VAR_NUM);
- gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(
- compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
- compiler->isFramePointerUsed()),
- GENERIC_CONTEXTPARAM_THIS);
+
+ int offset = 0;
+
+ if (compiler->opts.IsOSR())
+ {
+ PatchpointInfo* ppInfo = compiler->info.compPatchpointInfo;
+ offset = ppInfo->GenericContextArgOffset();
+ assert(offset != -1);
+ }
+ else
+ {
+ offset = compiler->lvaToCallerSPRelativeOffset(compiler->lvaCachedGenericContextArgOffset(),
+ compiler->isFramePointerUsed());
+ }
+
+ gcInfoEncoderWithLog->SetGenericsInstContextStackSlot(offset, GENERIC_CONTEXTPARAM_THIS);
}
if (compiler->getNeedsGSSecurityCookie())
@@ -3909,12 +3933,27 @@ void GCInfo::gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSiz
assert(compiler->lvaGSSecurityCookie != BAD_VAR_NUM);
// The lv offset is FP-relative, and the using code expects caller-sp relative, so translate.
+ int offset = compiler->lvaGetCallerSPRelativeOffset(compiler->lvaGSSecurityCookie);
+
+ if (compiler->opts.IsOSR())
+ {
+ // The offset computed above already includes the OSR frame adjustment, plus the
+ // pop of the "pseudo return address" from the OSR frame.
+ //
+ // To get to caller-SP, we need to subtract off the original frame size and the
+ // pushed RA and RBP for that frame. But ppInfo's FpToSpDelta also accounts for the
+ // pseudo RA between the original method frame and the OSR frame. So the net adjustment
+ // is simply FpToSpDelta plus one register.
+ PatchpointInfo* ppInfo = compiler->info.compPatchpointInfo;
+ int adjustment = ppInfo->FpToSpDelta() + REGSIZE_BYTES;
+ offset -= adjustment;
+ JITDUMP("OSR cookie adjustment %d, final caller-SP offset %d\n", adjustment, offset);
+ }
+
// The code offset ranges assume that the GS Cookie slot is initialized in the prolog, and is valid
// through the remainder of the method. We will not query for the GS Cookie while we're in an epilog,
// so the question of where in the epilog it becomes invalid is moot.
- gcInfoEncoderWithLog->SetGSCookieStackSlot(compiler->lvaGetCallerSPRelativeOffset(
- compiler->lvaGSSecurityCookie),
- prologSize, methodSize);
+ gcInfoEncoderWithLog->SetGSCookieStackSlot(offset, prologSize, methodSize);
}
else if (compiler->lvaReportParamTypeArg() || compiler->lvaKeepAliveAndReportThis())
{
diff --git a/src/coreclr/src/jit/gentree.cpp b/src/coreclr/src/jit/gentree.cpp
index d31a8709080d72..1f0d2e7bf3b02f 100644
--- a/src/coreclr/src/jit/gentree.cpp
+++ b/src/coreclr/src/jit/gentree.cpp
@@ -10040,18 +10040,7 @@ void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, __in __in_z _
if (layout != nullptr)
{
- if (layout->IsBlockLayout())
- {
- printf("<%u>", layout->GetSize());
- }
- else if (varTypeIsSIMD(tree->TypeGet()))
- {
- printf("<%s>", layout->GetClassName());
- }
- else
- {
- printf("<%s, %u>", layout->GetClassName(), layout->GetSize());
- }
+ gtDispClassLayout(layout, tree->TypeGet());
}
}
@@ -10413,6 +10402,69 @@ void Compiler::gtDispLclVar(unsigned lclNum, bool padForBiggestDisp)
}
}
+//------------------------------------------------------------------------
+// gtDispLclVarStructType: Print size and type information about a struct or lclBlk local variable.
+//
+// Arguments:
+// lclNum - The local var id.
+//
+void Compiler::gtDispLclVarStructType(unsigned lclNum)
+{
+ LclVarDsc* varDsc = lvaGetDesc(lclNum);
+ var_types type = varDsc->TypeGet();
+ if (type == TYP_STRUCT)
+ {
+ ClassLayout* layout = varDsc->GetLayout();
+ assert(layout != nullptr);
+ gtDispClassLayout(layout, type);
+ }
+ else if (type == TYP_LCLBLK)
+ {
+#if FEATURE_FIXED_OUT_ARGS
+ assert(lclNum == lvaOutgoingArgSpaceVar);
+ // Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until
+ // after we set it to something.
+ if (lvaOutgoingArgSpaceSize.HasFinalValue())
+ {
+ // A PhasedVar can't be directly used as an arg to a variadic function
+ unsigned value = lvaOutgoingArgSpaceSize;
+ printf("<%u> ", value);
+ }
+ else
+ {
+ printf(" "); // The value hasn't yet been determined
+ }
+#else
+ assert(!"Unknown size");
+ NO_WAY("Target doesn't support TYP_LCLBLK");
+#endif // FEATURE_FIXED_OUT_ARGS
+ }
+}
+
+//------------------------------------------------------------------------
+// gtDispClassLayout: Print size and type information about a layout.
+//
+// Arguments:
+// layout - the layout;
+// type - variable type, used to avoid printing size for SIMD nodes.
+//
+void Compiler::gtDispClassLayout(ClassLayout* layout, var_types type)
+{
+ assert(layout != nullptr);
+ if (layout->IsBlockLayout())
+ {
+ printf("<%u>", layout->GetSize());
+ }
+ else if (varTypeIsSIMD(type))
+ {
+ printf("<%s>", layout->GetClassName());
+ }
+ else
+ {
+ printf("<%s, %u>", layout->GetClassName(), layout->GetSize());
+ }
+}
+
/*****************************************************************************/
void Compiler::gtDispConst(GenTree* tree)
{
diff --git a/src/coreclr/src/jit/gschecks.cpp b/src/coreclr/src/jit/gschecks.cpp
index cc1bb6fc58021e..e31998d3b5a429 100644
--- a/src/coreclr/src/jit/gschecks.cpp
+++ b/src/coreclr/src/jit/gschecks.cpp
@@ -24,7 +24,7 @@ void Compiler::gsGSChecksInitCookie()
{
var_types type = TYP_I_IMPL;
- lvaGSSecurityCookie = lvaGrabTemp(false DEBUGARG("GSSecurityCookie"));
+ lvaGSSecurityCookie = lvaGrabTempWithImplicitUse(false DEBUGARG("GSSecurityCookie"));
// Prevent cookie init/check from being optimized
lvaSetVarAddrExposed(lvaGSSecurityCookie);
diff --git a/src/coreclr/src/jit/hwintrinsic.cpp b/src/coreclr/src/jit/hwintrinsic.cpp
index 0aef72951e3b90..4af653b766e6be 100644
--- a/src/coreclr/src/jit/hwintrinsic.cpp
+++ b/src/coreclr/src/jit/hwintrinsic.cpp
@@ -61,12 +61,11 @@ var_types Compiler::getBaseTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
{
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
- if (category == HW_Category_MemoryStore || HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic) ||
- HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic))
+ if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic) || HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic))
{
CORINFO_ARG_LIST_HANDLE arg = sig->args;
- if ((category == HW_Category_MemoryStore) || HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic))
+ if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic))
{
arg = info.compCompHnd->getArgNext(arg);
}
@@ -303,7 +302,7 @@ NamedIntrinsic HWIntrinsicInfo::lookupId(Compiler* comp,
const char* enclosingClassName)
{
// TODO-Throughput: replace sequential search by binary search
- InstructionSet isa = lookupIsa(className, enclosingClassName);
+ CORINFO_InstructionSet isa = lookupIsa(className, enclosingClassName);
if (isa == InstructionSet_ILLEGAL)
{
@@ -585,7 +584,7 @@ GenTree* Compiler::addRangeCheckIfNeeded(NamedIntrinsic intrinsic, GenTree* immO
//
// Return Value:
// true iff the given instruction set is supported in the current compilation.
-bool Compiler::compSupportsHWIntrinsic(InstructionSet isa)
+bool Compiler::compSupportsHWIntrinsic(CORINFO_InstructionSet isa)
{
return JitConfig.EnableHWIntrinsic() && (featureSIMD || HWIntrinsicInfo::isScalarIsa(isa)) &&
(
@@ -630,11 +629,11 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_SIG_INFO* sig,
bool mustExpand)
{
- InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic);
- HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
- int numArgs = sig->numArgs;
- var_types retType = JITtype2varType(sig->retType);
- var_types baseType = TYP_UNKNOWN;
+ CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsic);
+ HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
+ int numArgs = sig->numArgs;
+ var_types retType = JITtype2varType(sig->retType);
+ var_types baseType = TYP_UNKNOWN;
if ((retType == TYP_STRUCT) && featureSIMD)
{
diff --git a/src/coreclr/src/jit/hwintrinsic.h b/src/coreclr/src/jit/hwintrinsic.h
index 2931c51813e1ed..b80dce7f1c0b00 100644
--- a/src/coreclr/src/jit/hwintrinsic.h
+++ b/src/coreclr/src/jit/hwintrinsic.h
@@ -113,15 +113,15 @@ enum HWIntrinsicFlag : unsigned int
struct HWIntrinsicInfo
{
- NamedIntrinsic id;
- const char* name;
- InstructionSet isa;
- int ival;
- unsigned simdSize;
- int numArgs;
- instruction ins[10];
- HWIntrinsicCategory category;
- HWIntrinsicFlag flags;
+ NamedIntrinsic id;
+ const char* name;
+ CORINFO_InstructionSet isa;
+ int ival;
+ unsigned simdSize;
+ int numArgs;
+ instruction ins[10];
+ HWIntrinsicCategory category;
+ HWIntrinsicFlag flags;
static const HWIntrinsicInfo& lookup(NamedIntrinsic id);
@@ -129,7 +129,7 @@ struct HWIntrinsicInfo
const char* className,
const char* methodName,
const char* enclosingClassName);
- static InstructionSet lookupIsa(const char* className, const char* enclosingClassName);
+ static CORINFO_InstructionSet lookupIsa(const char* className, const char* enclosingClassName);
static unsigned lookupSimdSize(Compiler* comp, NamedIntrinsic id, CORINFO_SIG_INFO* sig);
static int lookupNumArgs(const GenTreeHWIntrinsic* node);
@@ -138,8 +138,8 @@ struct HWIntrinsicInfo
static bool isImmOp(NamedIntrinsic id, const GenTree* op);
static bool isInImmRange(NamedIntrinsic id, int ival);
- static bool isFullyImplementedIsa(InstructionSet isa);
- static bool isScalarIsa(InstructionSet isa);
+ static bool isFullyImplementedIsa(CORINFO_InstructionSet isa);
+ static bool isScalarIsa(CORINFO_InstructionSet isa);
#ifdef TARGET_XARCH
static bool isAVX2GatherIntrinsic(NamedIntrinsic id);
@@ -157,7 +157,7 @@ struct HWIntrinsicInfo
return lookup(id).name;
}
- static InstructionSet lookupIsa(NamedIntrinsic id)
+ static CORINFO_InstructionSet lookupIsa(NamedIntrinsic id)
{
return lookup(id).isa;
}
diff --git a/src/coreclr/src/jit/hwintrinsicarm64.cpp b/src/coreclr/src/jit/hwintrinsicarm64.cpp
index 1f7dde990cc7d7..7f494ab96582f5 100644
--- a/src/coreclr/src/jit/hwintrinsicarm64.cpp
+++ b/src/coreclr/src/jit/hwintrinsicarm64.cpp
@@ -15,7 +15,7 @@
//
// Return Value:
// The 64-bit only InstructionSet associated with isa
-static InstructionSet Arm64VersionOfIsa(InstructionSet isa)
+static CORINFO_InstructionSet Arm64VersionOfIsa(CORINFO_InstructionSet isa)
{
switch (isa)
{
@@ -38,7 +38,7 @@ static InstructionSet Arm64VersionOfIsa(InstructionSet isa)
//
// Return Value:
// The InstructionSet associated with className
-static InstructionSet lookupInstructionSet(const char* className)
+static CORINFO_InstructionSet lookupInstructionSet(const char* className)
{
assert(className != nullptr);
@@ -99,7 +99,7 @@ static InstructionSet lookupInstructionSet(const char* className)
//
// Return Value:
// The InstructionSet associated with className and enclosingClassName
-InstructionSet HWIntrinsicInfo::lookupIsa(const char* className, const char* enclosingClassName)
+CORINFO_InstructionSet HWIntrinsicInfo::lookupIsa(const char* className, const char* enclosingClassName)
{
assert(className != nullptr);
@@ -153,7 +153,7 @@ bool HWIntrinsicInfo::isInImmRange(NamedIntrinsic id, int ival)
//
// Return Value:
// true if isa is supported; otherwise, false
-bool HWIntrinsicInfo::isFullyImplementedIsa(InstructionSet isa)
+bool HWIntrinsicInfo::isFullyImplementedIsa(CORINFO_InstructionSet isa)
{
switch (isa)
{
@@ -188,7 +188,7 @@ bool HWIntrinsicInfo::isFullyImplementedIsa(InstructionSet isa)
//
// Return Value:
// true if isa is scalar; otherwise, false
-bool HWIntrinsicInfo::isScalarIsa(InstructionSet isa)
+bool HWIntrinsicInfo::isScalarIsa(CORINFO_InstructionSet isa)
{
switch (isa)
{
diff --git a/src/coreclr/src/jit/hwintrinsiccodegenarm64.cpp b/src/coreclr/src/jit/hwintrinsiccodegenarm64.cpp
index 178f760a11f4d8..6853b59712243d 100644
--- a/src/coreclr/src/jit/hwintrinsiccodegenarm64.cpp
+++ b/src/coreclr/src/jit/hwintrinsiccodegenarm64.cpp
@@ -301,6 +301,10 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
GetEmitter()->emitIns_R_R_R_R(ins, emitSize, targetReg, op2Reg, op3Reg, op1Reg);
break;
+ case NI_AdvSimd_Store:
+ GetEmitter()->emitIns_R_R(ins, emitSize, op2Reg, op1Reg, opt);
+ break;
+
default:
unreached();
}
diff --git a/src/coreclr/src/jit/hwintrinsiccodegenxarch.cpp b/src/coreclr/src/jit/hwintrinsiccodegenxarch.cpp
index 1ba67be0bb2d9e..cb5160e5cb9373 100644
--- a/src/coreclr/src/jit/hwintrinsiccodegenxarch.cpp
+++ b/src/coreclr/src/jit/hwintrinsiccodegenxarch.cpp
@@ -79,11 +79,11 @@ static bool genIsTableDrivenHWIntrinsic(NamedIntrinsic intrinsicId, HWIntrinsicC
//
void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
{
- NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsicId);
- HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
- int ival = HWIntrinsicInfo::lookupIval(intrinsicId);
- int numArgs = HWIntrinsicInfo::lookupNumArgs(node);
+ NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
+ CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsicId);
+ HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
+ int ival = HWIntrinsicInfo::lookupIval(intrinsicId);
+ int numArgs = HWIntrinsicInfo::lookupNumArgs(node);
assert(HWIntrinsicInfo::RequiresCodegen(intrinsicId));
diff --git a/src/coreclr/src/jit/hwintrinsiclistarm64.h b/src/coreclr/src/jit/hwintrinsiclistarm64.h
index 8be27939f4e13c..ac2a571b34e843 100644
--- a/src/coreclr/src/jit/hwintrinsiclistarm64.h
+++ b/src/coreclr/src/jit/hwintrinsiclistarm64.h
@@ -97,6 +97,7 @@ HARDWARE_INTRINSIC(AdvSimd, Or, -
HARDWARE_INTRINSIC(AdvSimd, OrNot, -1, -1, 2, {INS_orn, INS_orn, INS_orn, INS_orn, INS_orn, INS_orn, INS_orn, INS_orn, INS_orn, INS_orn}, HW_Category_SimpleSIMD, HW_Flag_NoContainment|HW_Flag_UnfixedSIMDSize)
HARDWARE_INTRINSIC(AdvSimd, PopCount, -1, -1, 1, {INS_cnt, INS_cnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SimpleSIMD, HW_Flag_NoContainment|HW_Flag_UnfixedSIMDSize)
HARDWARE_INTRINSIC(AdvSimd, SqrtScalar, -1, 8, 1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_fsqrt, INS_fsqrt}, HW_Category_SIMDScalar, HW_Flag_NoContainment)
+HARDWARE_INTRINSIC(AdvSimd, Store, -1, -1, 2, {INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_UnfixedSIMDSize|HW_Flag_SpecialCodeGen|HW_Flag_BaseTypeFromSecondArg)
HARDWARE_INTRINSIC(AdvSimd, Subtract, -1, -1, 2, {INS_sub, INS_sub, INS_sub, INS_sub, INS_sub, INS_sub, INS_sub, INS_sub, INS_fsub, INS_invalid}, HW_Category_SimpleSIMD, HW_Flag_NoContainment|HW_Flag_UnfixedSIMDSize)
HARDWARE_INTRINSIC(AdvSimd, SubtractScalar, -1, 8, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sub, INS_sub, INS_fsub, INS_fsub}, HW_Category_SIMDScalar, HW_Flag_NoContainment)
HARDWARE_INTRINSIC(AdvSimd, Xor, -1, -1, 2, {INS_eor, INS_eor, INS_eor, INS_eor, INS_eor, INS_eor, INS_eor, INS_eor, INS_eor, INS_eor}, HW_Category_SimpleSIMD, HW_Flag_NoContainment|HW_Flag_UnfixedSIMDSize|HW_Flag_Commutative)
diff --git a/src/coreclr/src/jit/hwintrinsiclistxarch.h b/src/coreclr/src/jit/hwintrinsiclistxarch.h
index 9cb005b8a0f7d8..de3add9c696272 100644
--- a/src/coreclr/src/jit/hwintrinsiclistxarch.h
+++ b/src/coreclr/src/jit/hwintrinsiclistxarch.h
@@ -155,13 +155,13 @@ HARDWARE_INTRINSIC(SSE_ReciprocalSqrtScalar, "ReciprocalS
HARDWARE_INTRINSIC(SSE_Shuffle, "Shuffle", SSE, -1, 16, 3, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_shufps, INS_invalid}, HW_Category_IMM, HW_Flag_FullRangeIMM)
HARDWARE_INTRINSIC(SSE_Sqrt, "Sqrt", SSE, -1, 16, 1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sqrtps, INS_invalid}, HW_Category_SimpleSIMD, HW_Flag_NoRMWSemantics)
HARDWARE_INTRINSIC(SSE_SqrtScalar, "SqrtScalar", SSE, -1, 16, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sqrtss, INS_invalid}, HW_Category_SIMDScalar, HW_Flag_CopyUpperBits)
-HARDWARE_INTRINSIC(SSE_Store, "Store", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movups, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE_StoreAligned, "StoreAligned", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movaps, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE_StoreAlignedNonTemporal, "StoreAlignedNonTemporal", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movntps, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
+HARDWARE_INTRINSIC(SSE_Store, "Store", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movups, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE_StoreAligned, "StoreAligned", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movaps, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE_StoreAlignedNonTemporal, "StoreAlignedNonTemporal", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movntps, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
HARDWARE_INTRINSIC(SSE_StoreFence, "StoreFence", SSE, -1, 0, 0, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_Special, HW_Flag_NoContainment|HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE_StoreHigh, "StoreHigh", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movhps, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE_StoreLow, "StoreLow", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movlps, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE_StoreScalar, "StoreScalar", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movss, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
+HARDWARE_INTRINSIC(SSE_StoreHigh, "StoreHigh", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movhps, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE_StoreLow, "StoreLow", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movlps, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE_StoreScalar, "StoreScalar", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movss, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
HARDWARE_INTRINSIC(SSE_Subtract, "Subtract", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_subps, INS_invalid}, HW_Category_SimpleSIMD, HW_Flag_NoFlag)
HARDWARE_INTRINSIC(SSE_SubtractScalar, "SubtractScalar", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_subss, INS_invalid}, HW_Category_SIMDScalar, HW_Flag_CopyUpperBits)
HARDWARE_INTRINSIC(SSE_UnpackHigh, "UnpackHigh", SSE, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_unpckhps, INS_invalid}, HW_Category_SimpleSIMD, HW_Flag_NoFlag)
@@ -245,7 +245,7 @@ HARDWARE_INTRINSIC(SSE2_LoadHigh, "LoadHigh",
HARDWARE_INTRINSIC(SSE2_LoadLow, "LoadLow", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movlpd}, HW_Category_MemoryLoad, HW_Flag_NoRMWSemantics)
HARDWARE_INTRINSIC(SSE2_LoadScalarVector128, "LoadScalarVector128", SSE2, -1, 16, 1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movd, INS_movd, INS_movq, INS_movq, INS_invalid, INS_movsdsse2}, HW_Category_MemoryLoad, HW_Flag_NoRMWSemantics)
HARDWARE_INTRINSIC(SSE2_LoadVector128, "LoadVector128", SSE2, -1, 16, 1, {INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_invalid, INS_movupd}, HW_Category_MemoryLoad, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE2_MaskMove, "MaskMove", SSE2, -1, 16, 3, {INS_maskmovdqu, INS_maskmovdqu, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoContainment|HW_Flag_NoRMWSemantics)
+HARDWARE_INTRINSIC(SSE2_MaskMove, "MaskMove", SSE2, -1, 16, 3, {INS_maskmovdqu, INS_maskmovdqu, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoContainment|HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
HARDWARE_INTRINSIC(SSE2_Max, "Max", SSE2, -1, 16, 2, {INS_invalid, INS_pmaxub, INS_pmaxsw, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_maxpd}, HW_Category_SimpleSIMD, HW_Flag_Commutative)
HARDWARE_INTRINSIC(SSE2_MemoryFence, "MemoryFence", SSE2, -1, 0, 0, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_Special, HW_Flag_NoContainment|HW_Flag_NoRMWSemantics)
HARDWARE_INTRINSIC(SSE2_MaxScalar, "MaxScalar", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_maxsd}, HW_Category_SIMDScalar, HW_Flag_CopyUpperBits)
@@ -272,13 +272,13 @@ HARDWARE_INTRINSIC(SSE2_ShuffleHigh, "ShuffleHigh
HARDWARE_INTRINSIC(SSE2_ShuffleLow, "ShuffleLow", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_pshuflw, INS_pshuflw, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_IMM, HW_Flag_FullRangeIMM)
HARDWARE_INTRINSIC(SSE2_Sqrt, "Sqrt", SSE2, -1, 16, 1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sqrtpd}, HW_Category_SimpleSIMD, HW_Flag_NoRMWSemantics)
HARDWARE_INTRINSIC(SSE2_SqrtScalar, "SqrtScalar", SSE2, -1, 16, -1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sqrtsd}, HW_Category_SIMDScalar, HW_Flag_CopyUpperBits)
-HARDWARE_INTRINSIC(SSE2_Store, "Store", SSE2, -1, 16, 2, {INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_invalid, INS_movupd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE2_StoreAligned, "StoreAligned", SSE2, -1, 16, 2, {INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_invalid, INS_movapd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE2_StoreAlignedNonTemporal, "StoreAlignedNonTemporal", SSE2, -1, 16, 2, {INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_invalid, INS_movntpd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE2_StoreHigh, "StoreHigh", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movhpd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE2_StoreLow, "StoreLow", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movlpd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(SSE2_StoreNonTemporal, "StoreNonTemporal", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movnti, INS_movnti, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_SpecialCodeGen)
-HARDWARE_INTRINSIC(SSE2_StoreScalar, "StoreScalar", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movq, INS_movq, INS_invalid, INS_movsdsse2}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
+HARDWARE_INTRINSIC(SSE2_Store, "Store", SSE2, -1, 16, 2, {INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_invalid, INS_movupd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE2_StoreAligned, "StoreAligned", SSE2, -1, 16, 2, {INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_invalid, INS_movapd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE2_StoreAlignedNonTemporal, "StoreAlignedNonTemporal", SSE2, -1, 16, 2, {INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_invalid, INS_movntpd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE2_StoreHigh, "StoreHigh", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movhpd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE2_StoreLow, "StoreLow", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movlpd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE2_StoreNonTemporal, "StoreNonTemporal", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movnti, INS_movnti, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_SpecialCodeGen|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(SSE2_StoreScalar, "StoreScalar", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movq, INS_movq, INS_invalid, INS_movsdsse2}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
HARDWARE_INTRINSIC(SSE2_Subtract, "Subtract", SSE2, -1, 16, 2, {INS_psubb, INS_psubb, INS_psubw, INS_psubw, INS_psubd, INS_psubd, INS_psubq, INS_psubq, INS_invalid, INS_subpd}, HW_Category_SimpleSIMD, HW_Flag_NoFlag)
HARDWARE_INTRINSIC(SSE2_SubtractSaturate, "SubtractSaturate", SSE2, -1, 16, 2, {INS_psubsb, INS_psubusb, INS_psubsw, INS_psubusw, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SimpleSIMD, HW_Flag_NoFlag)
HARDWARE_INTRINSIC(SSE2_SubtractScalar, "SubtractScalar", SSE2, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_subsd}, HW_Category_SIMDScalar, HW_Flag_CopyUpperBits)
@@ -297,7 +297,7 @@ HARDWARE_INTRINSIC(SSE2_X64_ConvertToUInt64, "ConvertToUI
HARDWARE_INTRINSIC(SSE2_X64_ConvertScalarToVector128Double, "ConvertScalarToVector128Double", SSE2_X64, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_cvtsi2sd, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMDScalar, HW_Flag_SpecialCodeGen|HW_Flag_BaseTypeFromSecondArg)
HARDWARE_INTRINSIC(SSE2_X64_ConvertScalarToVector128Int64, "ConvertScalarToVector128Int64", SSE2_X64, -1, 16, 1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_mov_i2xmm, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMDScalar, HW_Flag_NoRMWSemantics|HW_Flag_SpecialCodeGen)
HARDWARE_INTRINSIC(SSE2_X64_ConvertScalarToVector128UInt64, "ConvertScalarToVector128UInt64", SSE2_X64, -1, 16, 1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_mov_i2xmm, INS_invalid, INS_invalid}, HW_Category_SIMDScalar, HW_Flag_NoRMWSemantics|HW_Flag_SpecialCodeGen)
-HARDWARE_INTRINSIC(SSE2_X64_StoreNonTemporal, "StoreNonTemporal", SSE2_X64, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movnti, INS_movnti, INS_invalid, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_SpecialCodeGen)
+HARDWARE_INTRINSIC(SSE2_X64_StoreNonTemporal, "StoreNonTemporal", SSE2_X64, -1, 16, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_movnti, INS_movnti, INS_invalid, INS_invalid}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_SpecialCodeGen|HW_Flag_BaseTypeFromSecondArg)
// ***************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************
// Intrinsic ID Function name ISA ival SIMD size NumArg instructions Category Flags
@@ -447,9 +447,9 @@ HARDWARE_INTRINSIC(AVX_RoundToPositiveInfinity, "RoundToPosi
HARDWARE_INTRINSIC(AVX_RoundToZero, "RoundToZero", AVX, 11, 32, 1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_roundps, INS_roundpd}, HW_Category_SimpleSIMD, HW_Flag_NoRMWSemantics)
HARDWARE_INTRINSIC(AVX_Shuffle, "Shuffle", AVX, -1, 32, 3, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_shufps, INS_shufpd}, HW_Category_IMM, HW_Flag_NoRMWSemantics|HW_Flag_FullRangeIMM)
HARDWARE_INTRINSIC(AVX_Sqrt, "Sqrt", AVX, -1, 32, 1, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sqrtps, INS_sqrtpd}, HW_Category_SimpleSIMD, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(AVX_Store, "Store", AVX, -1, 32, 2, {INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movups, INS_movupd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(AVX_StoreAligned, "StoreAligned", AVX, -1, 32, 2, {INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movaps, INS_movapd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
-HARDWARE_INTRINSIC(AVX_StoreAlignedNonTemporal, "StoreAlignedNonTemporal", AVX, -1, 32, 2, {INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntps, INS_movntpd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics)
+HARDWARE_INTRINSIC(AVX_Store, "Store", AVX, -1, 32, 2, {INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movdqu, INS_movups, INS_movupd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(AVX_StoreAligned, "StoreAligned", AVX, -1, 32, 2, {INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movdqa, INS_movaps, INS_movapd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
+HARDWARE_INTRINSIC(AVX_StoreAlignedNonTemporal, "StoreAlignedNonTemporal", AVX, -1, 32, 2, {INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntdq, INS_movntps, INS_movntpd}, HW_Category_MemoryStore, HW_Flag_NoRMWSemantics|HW_Flag_BaseTypeFromSecondArg)
HARDWARE_INTRINSIC(AVX_Subtract, "Subtract", AVX, -1, 32, 2, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_subps, INS_subpd}, HW_Category_SimpleSIMD, HW_Flag_NoFlag)
HARDWARE_INTRINSIC(AVX_TestC, "TestC", AVX, -1, 0, 2, {INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_vtestps, INS_vtestpd}, HW_Category_SimpleSIMD, HW_Flag_UnfixedSIMDSize|HW_Flag_BaseTypeFromFirstArg)
HARDWARE_INTRINSIC(AVX_TestNotZAndNotC, "TestNotZAndNotC", AVX, -1, 0, 2, {INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_ptest, INS_vtestps, INS_vtestpd}, HW_Category_SimpleSIMD, HW_Flag_UnfixedSIMDSize|HW_Flag_BaseTypeFromFirstArg)
diff --git a/src/coreclr/src/jit/hwintrinsicxarch.cpp b/src/coreclr/src/jit/hwintrinsicxarch.cpp
index 6aa59aa2bb7f68..80b341f1c90ecb 100644
--- a/src/coreclr/src/jit/hwintrinsicxarch.cpp
+++ b/src/coreclr/src/jit/hwintrinsicxarch.cpp
@@ -15,7 +15,7 @@
//
// Return Value:
// The 64-bit only InstructionSet associated with isa
-static InstructionSet X64VersionOfIsa(InstructionSet isa)
+static CORINFO_InstructionSet X64VersionOfIsa(CORINFO_InstructionSet isa)
{
switch (isa)
{
@@ -48,7 +48,7 @@ static InstructionSet X64VersionOfIsa(InstructionSet isa)
//
// Return Value:
// The InstructionSet associated with className
-static InstructionSet lookupInstructionSet(const char* className)
+static CORINFO_InstructionSet lookupInstructionSet(const char* className)
{
assert(className != nullptr);
if (className[0] == 'A')
@@ -147,7 +147,7 @@ static InstructionSet lookupInstructionSet(const char* className)
//
// Return Value:
// The InstructionSet associated with className and enclosingClassName
-InstructionSet HWIntrinsicInfo::lookupIsa(const char* className, const char* enclosingClassName)
+CORINFO_InstructionSet HWIntrinsicInfo::lookupIsa(const char* className, const char* enclosingClassName)
{
assert(className != nullptr);
@@ -253,7 +253,7 @@ bool HWIntrinsicInfo::isAVX2GatherIntrinsic(NamedIntrinsic id)
//
// Return Value:
// true if isa is supported; otherwise, false
-bool HWIntrinsicInfo::isFullyImplementedIsa(InstructionSet isa)
+bool HWIntrinsicInfo::isFullyImplementedIsa(CORINFO_InstructionSet isa)
{
switch (isa)
{
@@ -302,7 +302,7 @@ bool HWIntrinsicInfo::isFullyImplementedIsa(InstructionSet isa)
//
// Return Value:
// true if isa is scalar; otherwise, false
-bool HWIntrinsicInfo::isScalarIsa(InstructionSet isa)
+bool HWIntrinsicInfo::isScalarIsa(CORINFO_InstructionSet isa)
{
switch (isa)
{
diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp
index e553a179714f66..64470231697357 100644
--- a/src/coreclr/src/jit/importer.cpp
+++ b/src/coreclr/src/jit/importer.cpp
@@ -7349,11 +7349,18 @@ var_types Compiler::impImportCall(OPCODE opcode,
// Also, popping arguments in a varargs function is more work and NYI
// If we have a security object, we have to keep our frame around for callers
// to see any imperative security.
+ // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT
+ // at the end, so tailcalls should be disabled.
if (info.compFlags & CORINFO_FLG_SYNCH)
{
canTailCall = false;
szCanTailCallFailReason = "Caller is synchronized";
}
+ else if (opts.IsReversePInvoke())
+ {
+ canTailCall = false;
+ szCanTailCallFailReason = "Caller is Reverse P/Invoke";
+ }
#if !FEATURE_FIXED_OUT_ARGS
else if (info.compIsVarArgs)
{
@@ -8495,10 +8502,37 @@ var_types Compiler::impImportCall(OPCODE opcode,
if ((tailCallFlags != 0) && canTailCall && gtIsRecursiveCall(methHnd))
{
assert(verCurrentState.esStackDepth == 0);
+ BasicBlock* loopHead = nullptr;
+ if (opts.IsOSR())
+ {
+ // We might not have been planning on importing the method
+ // entry block, but now we must.
+
+ // We should have remembered the real method entry block.
+ assert(fgEntryBB != nullptr);
+
+ JITDUMP("\nOSR: found tail recursive call in the method, scheduling " FMT_BB " for importation\n",
+ fgEntryBB->bbNum);
+ impImportBlockPending(fgEntryBB);
+
+ // Note there is no explicit flow to this block yet,
+ // make sure it stays around until we actually try
+ // the optimization.
+ fgEntryBB->bbFlags |= BBF_DONT_REMOVE;
+
+ loopHead = fgEntryBB;
+ }
+ else
+ {
+ // For normal jitting we'll branch back to the firstBB; this
+ // should already be imported.
+ loopHead = fgFirstBB;
+ }
+
JITDUMP("\nFound tail recursive call in the method. Mark " FMT_BB " to " FMT_BB
" as having a backward branch.\n",
- fgFirstBB->bbNum, compCurBB->bbNum);
- fgMarkBackwardJump(fgFirstBB, compCurBB);
+ loopHead->bbNum, compCurBB->bbNum);
+ fgMarkBackwardJump(loopHead, compCurBB);
}
// Note: we assume that small return types are already normalized by the managed callee
@@ -10510,6 +10544,35 @@ void Compiler::impImportBlockCode(BasicBlock* block)
impBeginTreeList();
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+
+ // Are there any places in the method where we might add a patchpoint?
+ if (compHasBackwardJump)
+ {
+ // Are patchpoints enabled?
+ if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0))
+ {
+ // We don't inline at Tier0, if we do, we may need rethink our approach.
+ // Could probably support inlines that don't introduce flow.
+ assert(!compIsForInlining());
+
+ // Is the start of this block a suitable patchpoint?
+ // Current strategy is blocks that are stack-empty and backwards branch targets
+ if (block->bbFlags & BBF_BACKWARD_JUMP_TARGET && (verCurrentState.esStackDepth == 0))
+ {
+ block->bbFlags |= BBF_PATCHPOINT;
+ setMethodHasPatchpoint();
+ }
+ }
+ }
+ else
+ {
+ // Should not see backward branch targets w/o backwards branches
+ assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0);
+ }
+
+#endif // FEATURE_ON_STACK_REPLACEMENT
+
/* Walk the opcodes that comprise the basic block */
const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
@@ -11429,6 +11492,11 @@ void Compiler::impImportBlockCode(BasicBlock* block)
BADCODE("Jmp not allowed in protected region");
}
+ if (opts.IsReversePInvoke())
+ {
+ BADCODE("Jmp not allowed in reverse P/Invoke");
+ }
+
if (verCurrentState.esStackDepth != 0)
{
BADCODE("Stack must be empty after CEE_JMPs");
@@ -16734,10 +16802,13 @@ void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
assert(HBtab->HasFaultHandler());
}
}
+ }
- /* Recursively process the handler block */
- BasicBlock* hndBegBB = HBtab->ebdHndBeg;
+ // Recursively process the handler block, if we haven't already done so.
+ BasicBlock* hndBegBB = HBtab->ebdHndBeg;
+ if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0))
+ {
// Construct the proper verification stack state
// either empty or one that contains just
// the Exception Object that we are dealing with
@@ -16773,18 +16844,22 @@ void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
// Queue up the handler for importing
//
impImportBlockPending(hndBegBB);
+ }
- if (HBtab->HasFilter())
- {
- /* @VERIFICATION : Ideally the end of filter state should get
- propagated to the catch handler, this is an incompleteness,
- but is not a security/compliance issue, since the only
- interesting state is the 'thisInit' state.
- */
+ // Process the filter block, if we haven't already done so.
+ if (HBtab->HasFilter())
+ {
+ /* @VERIFICATION : Ideally the end of filter state should get
+ propagated to the catch handler, this is an incompleteness,
+ but is not a security/compliance issue, since the only
+ interesting state is the 'thisInit' state.
+ */
- verCurrentState.esStackDepth = 0;
+ BasicBlock* filterBB = HBtab->ebdFilter;
- BasicBlock* filterBB = HBtab->ebdFilter;
+ if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0))
+ {
+ verCurrentState.esStackDepth = 0;
// push catch arg the stack, spill to a temp if necessary
// Note: can update HBtab->ebdFilter!
@@ -16794,7 +16869,9 @@ void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
impImportBlockPending(filterBB);
}
}
- else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
+
+ // This seems redundant ....??
+ if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
{
/* Recursively process the handler block */
@@ -17861,7 +17938,7 @@ void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* bl
* basic flowgraph has already been constructed and is passed in.
*/
-void Compiler::impImport(BasicBlock* method)
+void Compiler::impImport()
{
#ifdef DEBUG
if (verbose)
@@ -17923,21 +18000,45 @@ void Compiler::impImport(BasicBlock* method)
impPendingList = impPendingFree = nullptr;
- /* Add the entry-point to the worker-list */
+ // Skip leading internal blocks.
+ // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects.
+ //
+ // We expect a linear flow to the first non-internal block. But not necessarily straght-line flow.
+ BasicBlock* entryBlock = fgFirstBB;
- // Skip leading internal blocks. There can be one as a leading scratch BB, and more
- // from EH normalization.
- // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
- // out.
- for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
+ while (entryBlock->bbFlags & BBF_INTERNAL)
{
- // Treat these as imported.
- assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
- JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", method->bbNum);
- method->bbFlags |= BBF_IMPORTED;
+ JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum);
+ entryBlock->bbFlags |= BBF_IMPORTED;
+
+ if (entryBlock->bbJumpKind == BBJ_NONE)
+ {
+ entryBlock = entryBlock->bbNext;
+ }
+ else if (entryBlock->bbJumpKind == BBJ_ALWAYS)
+ {
+ // Only expected for OSR
+ assert(opts.IsOSR());
+ entryBlock = entryBlock->bbJumpDest;
+ }
+ else
+ {
+ assert(!"unexpected bbJumpKind in entry sequence");
+ }
}
- impImportBlockPending(method);
+ // Note for OSR we'd like to be able to verify this block must be
+ // stack empty, but won't know that until we've imported...so instead
+ // we'll BADCODE out if we mess up.
+ //
+ // (the concern here is that the runtime asks us to OSR a
+ // different IL version than the one that matched the method that
+ // triggered OSR). This should not happen but I might have the
+ // IL versioning stuff wrong.
+ //
+ // TODO: we also currently expect this block to be a join point,
+ // which we should verify over when we find jump targets.
+ impImportBlockPending(entryBlock);
/* Import blocks in the worker-list until there are no more */
diff --git a/src/coreclr/src/jit/indirectcalltransformer.cpp b/src/coreclr/src/jit/indirectcalltransformer.cpp
index caec347798031c..6aadc37683a963 100644
--- a/src/coreclr/src/jit/indirectcalltransformer.cpp
+++ b/src/coreclr/src/jit/indirectcalltransformer.cpp
@@ -681,11 +681,12 @@ class IndirectCallTransformer
}
//------------------------------------------------------------------------
- // CreateThen: create else block with direct call to method
+ // CreateThen: create then block with direct call to method
//
virtual void CreateThen()
{
thenBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, checkBlock);
+ thenBlock->bbFlags |= currBlock->bbFlags & BBF_SPLIT_GAINED;
InlineCandidateInfo* inlineInfo = origCall->gtInlineCandidateInfo;
CORINFO_CLASS_HANDLE clsHnd = inlineInfo->clsHandle;
@@ -758,7 +759,8 @@ class IndirectCallTransformer
//
virtual void CreateElse()
{
- elseBlock = CreateAndInsertBasicBlock(BBJ_NONE, thenBlock);
+ elseBlock = CreateAndInsertBasicBlock(BBJ_NONE, thenBlock);
+ elseBlock->bbFlags |= currBlock->bbFlags & BBF_SPLIT_GAINED;
GenTreeCall* call = origCall;
Statement* newStmt = compiler->gtNewStmt(call);
diff --git a/src/coreclr/src/jit/instr.h b/src/coreclr/src/jit/instr.h
index 0de79a4cc1387f..26ba6eec4ac016 100644
--- a/src/coreclr/src/jit/instr.h
+++ b/src/coreclr/src/jit/instr.h
@@ -292,71 +292,6 @@ enum emitAttr : unsigned
#define EmitSize(x) (EA_ATTR(genTypeSize(TypeGet(x))))
-enum InstructionSet
-{
- InstructionSet_ILLEGAL = 0,
-#ifdef TARGET_XARCH
- InstructionSet_Vector128,
- InstructionSet_Vector256,
- // Start linear order SIMD instruction sets
- // These ISAs have strictly generation to generation order.
- InstructionSet_SSE,
- InstructionSet_SSE2,
- InstructionSet_SSE3,
- InstructionSet_SSSE3,
- InstructionSet_SSE41,
- InstructionSet_SSE42,
- InstructionSet_AVX,
- InstructionSet_AVX2,
- // End linear order SIMD instruction sets.
- InstructionSet_AES,
- InstructionSet_BMI1,
- InstructionSet_BMI2,
- InstructionSet_FMA,
- InstructionSet_LZCNT,
- InstructionSet_PCLMULQDQ,
- InstructionSet_POPCNT,
- InstructionSet_BMI1_X64,
- InstructionSet_BMI2_X64,
- InstructionSet_LZCNT_X64,
- InstructionSet_POPCNT_X64,
- InstructionSet_SSE_X64,
- InstructionSet_SSE2_X64,
- InstructionSet_SSE41_X64,
- InstructionSet_SSE42_X64,
-#elif defined(TARGET_ARM)
- InstructionSet_NEON,
-#elif defined(TARGET_ARM64)
- InstructionSet_AdvSimd, // ID_AA64PFR0_EL1.AdvSIMD is 0 or better
- InstructionSet_AdvSimd_Arm64,
- InstructionSet_AdvSimd_Fp16, // ID_AA64PFR0_EL1.AdvSIMD is 1 or better
- InstructionSet_AdvSimd_v81, // ID_AA64ISAR0_EL1.RDM is 1 or better
- InstructionSet_Aes, // ID_AA64ISAR0_EL1.AES is 1 or better
- InstructionSet_ArmBase,
- InstructionSet_ArmBase_Arm64,
- InstructionSet_Atomics, // ID_AA64ISAR0_EL1.Atomic is 2 or better
- InstructionSet_Crc32, // ID_AA64ISAR0_EL1.CRC32 is 1 or better
- InstructionSet_Crc32_Arm64,
- InstructionSet_Dcpop, // ID_AA64ISAR1_EL1.DPB is 1 or better
- InstructionSet_Dp, // ID_AA64ISAR0_EL1.DP is 1 or better
- InstructionSet_Fcma, // ID_AA64ISAR1_EL1.FCMA is 1 or better
- InstructionSet_Fp, // ID_AA64PFR0_EL1.FP is 0 or better
- InstructionSet_Fp16, // ID_AA64PFR0_EL1.FP is 1 or better
- InstructionSet_Jscvt, // ID_AA64ISAR1_EL1.JSCVT is 1 or better
- InstructionSet_Lrcpc, // ID_AA64ISAR1_EL1.LRCPC is 1 or better
- InstructionSet_Pmull, // ID_AA64ISAR0_EL1.AES is 2 or better
- InstructionSet_Sha1, // ID_AA64ISAR0_EL1.SHA1 is 1 or better
- InstructionSet_Sha256, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
- InstructionSet_Sha512, // ID_AA64ISAR0_EL1.SHA2 is 2 or better
- InstructionSet_Sha3, // ID_AA64ISAR0_EL1.SHA3 is 1 or better
- InstructionSet_Sm3, // ID_AA64ISAR0_EL1.SM3 is 1 or better
- InstructionSet_Sm4, // ID_AA64ISAR0_EL1.SM4 is 1 or better
- InstructionSet_Sve, // ID_AA64PFR0_EL1.SVE is 1 or better
- InstructionSet_Vector64,
- InstructionSet_Vector128,
-#endif
- InstructionSet_NONE // No instruction set is available indicating an invalid value
-};
// clang-format on
/*****************************************************************************/
diff --git a/src/coreclr/src/jit/instrsarm64.h b/src/coreclr/src/jit/instrsarm64.h
index 6958660ac68a32..f41a70466dcac8 100644
--- a/src/coreclr/src/jit/instrsarm64.h
+++ b/src/coreclr/src/jit/instrsarm64.h
@@ -87,65 +87,81 @@ INST6(sub, "sub", 0, 0, IF_EN6A, 0x4B000000, 0x4B000000, 0x4B200000,
// enum name FP LD/ST LS_2D LS_3F LS_2E LS_2F LS_3G LS_2G
INST6(ld1, "ld1", 0, LD, IF_EN6B, 0x0C407000, 0x0CC07000, 0x0CDF7000, 0x0D400000, 0x0DC00000, 0x0DDF0000)
+ // C7.2.170 LD1 (multiple structures, one register variant)
// ld1 {Vt},[Xn] LS_2D 0Q00110001000000 0111ssnnnnnttttt 0C40 7000 base register
// ld1 {Vt},[Xn],Xm LS_3F 0Q001100110mmmmm 0111ssnnnnnttttt 0CC0 7000 post-indexed by a register
// ld1 {Vt},[Xn],#imm LS_2E 0Q00110011011111 0111ssnnnnnttttt 0CDF 7000 post-indexed by an immediate
+ // C7.2.171 LD1 (single structure)
// ld1 {Vt}[],[Xn] LS_2F 0Q00110101000000 xx0Sssnnnnnttttt 0D40 0000 base register
// ld1 {Vt}[],[Xn],Xm LS_3G 0Q001101110mmmmm xx0Sssnnnnnttttt 0DC0 0000 post-indexed by a register
// ld1 {Vt}[],[Xn],#imm LS_2G 0Q00110111011111 xx0Sssnnnnnttttt 0DDF 0000 post-indexed by an immediate
INST6(ld2, "ld2", 0, LD, IF_EN6B, 0x0C408000, 0x0CC08000, 0x0CDF8000, 0x0D600000, 0x0DE00000, 0x0DFF0000)
+ // C7.2.173 LD2 (multiple structures)
// ld2 {Vt,Vt2},[Xn] LS_2D 0Q00110001000000 1000ssnnnnnttttt 0C40 8000 base register
// ld2 {Vt,Vt2},[Xn],Xm LS_3F 0Q001100110mmmmm 1000ssnnnnnttttt 0CC0 8000 post-indexed by a register
// ld2 {Vt,Vt2},[Xn],#imm LS_2E 0Q001100110mmmmm 1000ssnnnnnttttt 0CDF 8000 post-indexed by an immediate
+ // C7.2.174 LD2 (single structure)
// ld2 {Vt,Vt2}[],[Xn] LS_2F 0Q00110101100000 xx0Sssnnnnnttttt 0D60 0000 base register
// ld2 {Vt,Vt2}[],[Xn],Xm LS_3G 0Q001101111mmmmm xx0Sssnnnnnttttt 0DE0 0000 post-indexed by a register
// ld2 {Vt,Vt2}[],[Xn],#imm LS_2G 0Q00110111111111 xx0Sssnnnnnttttt 0DFF 0000 post-indexed by an immediate
INST6(ld3, "ld3", 0, LD, IF_EN6B, 0x0C404000, 0x0CC04000, 0x0CDF4000, 0x0D402000, 0x0DC02000, 0x0DDF2000)
+ // C7.2.176 LD3 (multiple structures)
// ld3 {Vt-Vt3},[Xn] LS_2D 0Q00110001000000 0100ssnnnnnttttt 0C40 4000 base register
// ld3 {Vt-Vt3},[Xn],Xm LS_3F 0Q001100110mmmmm 0100ssnnnnnttttt 0CC0 4000 post-indexed by a register
// ld3 {Vt-Vt3},[Xn],#imm LS_2E 0Q001100110mmmmm 0100ssnnnnnttttt 0CDF 4000 post-indexed by an immediate
+ // C7.2.177 LD3 (single structure)
// ld3 {Vt-Vt3}[],[Xn] LS_2F 0Q00110101000000 xx1Sssnnnnnttttt 0D40 2000 base register
// ld3 {Vt-Vt3}[],[Xn],Xm LS_3G 0Q001101110mmmmm xx1Sssnnnnnttttt 0DC0 2000 post-indexed by a register
// ld3 {Vt-Vt3}[],[Xn],#imm LS_2G 0Q00110111011111 xx1Sssnnnnnttttt 0DDF 2000 post-indexed by an immediate
INST6(ld4, "ld4", 0, LD, IF_EN6B, 0x0C400000, 0x0CC00000, 0x0CDF0000, 0x0D602000, 0x0DE02000, 0x0DFF2000)
+ // C7.2.179 LD4 (multiple structures)
// ld4 {Vt-Vt4},[Xn] LS_2D 0Q00110001000000 0000ssnnnnnttttt 0C40 0000 base register
// ld4 {Vt-Vt4},[Xn],Xm LS_3F 0Q001100110mmmmm 0000ssnnnnnttttt 0CC0 0000 post-indexed by a register
// ld4 {Vt-Vt4},[Xn],#imm LS_2E 0Q00110011011111 0000ssnnnnnttttt 0CDF 0000 post-indexed by an immediate
+ // C7.2.180 LD4 (single structure)
// ld4 {Vt-Vt4}[],[Xn] LS_2F 0Q00110101100000 xx1Sssnnnnnttttt 0D60 2000 base register
// ld4 {Vt-Vt4}[],[Xn],Xm LS_3G 0Q001101111mmmmm xx1Sssnnnnnttttt 0DE0 2000 post-indexed by a register
// ld4 {Vt-Vt4}[],[Xn],#imm LS_2G 0Q00110111111111 xx1Sssnnnnnttttt 0DFF 2000 post-indexed by an immediate
INST6(st1, "st1", 0, LD, IF_EN6B, 0x0C007000, 0x0C807000, 0x0C9F7000, 0x0D000000, 0x0D800000, 0x0D9F0000)
+ // C7.2.313 ST1 (multiple structures, one register variant)
// st1 {Vt},[Xn] LS_2D 0Q00110000000000 0111ssnnnnnttttt 0C00 7000 base register
// st1 {Vt},[Xn],Xm LS_3F 0Q001100100mmmmm 0111ssnnnnnttttt 0C80 7000 post-indexed by a register
// st1 {Vt},[Xn],#imm LS_2E 0Q00110010011111 0111ssnnnnnttttt 0C9F 7000 post-indexed by an immediate
+ // C7.2.314 ST1 (single structure)
// st1 {Vt}[],[Xn] LS_2F 0Q00110100000000 xx0Sssnnnnnttttt 0D00 0000 base register
// st1 {Vt}[],[Xn],Xm LS_3G 0Q001101100mmmmm xx0Sssnnnnnttttt 0D80 0000 post-indexed by a register
// st1 {Vt}[],[Xn],#imm LS_2G 0Q00110110011111 xx0Sssnnnnnttttt 0D9F 0000 post-indexed by an immediate
INST6(st2, "st2", 0, ST, IF_EN6B, 0x0C008000, 0x0C808000, 0x0C9F8000, 0x0D200000, 0x0DA00000, 0x0DBF0000)
+ // C7.2.315 ST2 (multiple structures)
// st2 {Vt,Vt2},[Xn] LS_2D 0Q00110000000000 1000ssnnnnnttttt 0C00 8000 base register
// st2 {Vt,Vt2},[Xn],Xm LS_3F 0Q001100100mmmmm 1000ssnnnnnttttt 0C80 8000 post-indexed by a register
// st2 {Vt,Vt2},[Xn],#imm LS_2E 0Q00110010011111 1000ssnnnnnttttt 0C9F 8000 post-indexed by an immediate
+ // C7.2.316 ST2 (single structure)
// st2 {Vt,Vt2}[],[Xn] LS_2F 0Q00110100100000 xx0Sssnnnnnttttt 0D20 0000 base register
// st2 {Vt,Vt2}[],[Xn],Xm LS_3G 0Q001101101mmmmm xx0Sssnnnnnttttt 0DA0 0000 post-indexed by a register
// st2 {Vt,Vt2}[],[Xn],#imm LS_2G 0Q00110110111111 xx0Sssnnnnnttttt 0DBF 0000 post-indexed by an immediate
INST6(st3, "st3", 0, ST, IF_EN6B, 0x0C004000, 0x0C804000, 0x0C9F4000, 0x0D002000, 0x0D802000, 0x0D9F2000)
+ // C7.2.317 ST3 (multiple structures)
// st3 {Vt-Vt3},[Xn] LS_2D 0Q00110000000000 0100ssnnnnnttttt 0C00 4000 base register
// st3 {Vt-Vt3},[Xn],Xm LS_3F 0Q001100100mmmmm 0100ssnnnnnttttt 0C80 4000 post-indexed by a register
// st3 {Vt-Vt3},[Xn],#imm LS_2E 0Q00110010011111 0100ssnnnnnttttt 0C9F 4000 post-indexed by an immediate
+ // C7.2.318 ST3 (single structure)
// st3 {Vt-Vt3}[],[Xn] LS_2F 0Q00110100000000 xx1Sssnnnnnttttt 0D00 2000 base register
// st3 {Vt-Vt3}[],[Xn],Xm LS_3G 0Q001101100mmmmm xx1Sssnnnnnttttt 0D80 2000 post-indexed by a register
// st3 {Vt-Vt3}[],[Xn],#imm LS_2G 0Q00110110011111 xx1Sssnnnnnttttt 0D9F 2000 post-indexed by an immediate
INST6(st4, "st4", 0, ST, IF_EN6B, 0x0C000000, 0x0C800000, 0x0C9F0000, 0x0D202000, 0x0DA02000, 0x0DBF2000)
+ // C7.2.319 ST4 (multiple structures)
// st4 {Vt-Vt4},[Xn] LS_2D 0Q00110000000000 0000ssnnnnnttttt 0C00 0000 base register
// st4 {Vt-Vt4},[Xn],Xm LS_3F 0Q001100100mmmmm 0000ssnnnnnttttt 0C80 0000 post-indexed by a register
// st4 {Vt-Vt4},[Xn],#imm LS_2E 0Q00110010011111 0000ssnnnnnttttt 0C9F 0000 post-indexed by an immediate
+ // C7.2.320 ST4 (single structure)
// st4 {Vt-Vt4}[],[Xn] LS_2F 0Q00110100100000 xx1Sssnnnnnttttt 0D20 2000 base register
// st4 {Vt-Vt4}[],[Xn],Xm LS_3G 0Q001101101mmmmm xx1Sssnnnnnttttt 0DA0 2000 post-indexed by a register
// st4 {Vt-Vt4}[],[Xn],#imm LS_2G 0Q00110110111111 xx1Sssnnnnnttttt 0DBF 2000 post-indexed by an immediate
@@ -438,51 +454,61 @@ INST3(mvn, "mvn", 0, 0, IF_EN3I, 0x2A2003E0, 0x2A2003E0, 0x2E205800)
// enum name FP LD/ST LS_2D LS_3F LS_2E
INST3(ld1_2regs,"ld1", 0,LD, IF_EN3J, 0x0C40A000, 0x0CC0A000, 0x0CDFA000)
+ // C7.2.170 LD1 (multiple structures, two registers variant)
// ld1 {Vt,Vt2},[Xn] LS_2D 0Q00110001000000 1010ssnnnnnttttt 0C40 A000 base register
// ld1 {Vt,Vt2},[Xn],Xm LS_3F 0Q001100110mmmmm 1010ssnnnnnttttt 0CC0 A000 post-indexed by a register
// ld1 {Vt,Vt2},[Xn],#imm LS_2E 0Q00110011011111 1010ssnnnnnttttt 0CDF A000 post-indexed by an immediate
INST3(ld1_3regs,"ld1", 0,LD, IF_EN3J, 0x0C406000, 0x0CC06000, 0x0CDF6000)
+ // C7.2.170 LD1 (multiple structures, three registers variant)
// ld1 {Vt-Vt3},[Xn] LS_2D 0Q00110001000000 0110ssnnnnnttttt 0C40 6000 base register
// ld1 {Vt-Vt3},[Xn],Xm LS_3F 0Q001100110mmmmm 0110ssnnnnnttttt 0CC0 6000 post-indexed by a register
// ld1 {Vt-Vt3},[Xn],#imm LS_2E 0Q00110011011111 0110ssnnnnnttttt 0CDF 6000 post-indexed by an immediate
INST3(ld1_4regs,"ld1", 0,LD, IF_EN3J, 0x0C402000, 0x0CC02000, 0x0CDF2000)
+ // C7.2.170 LD1 (multiple structures, four registers variant)
// ld1 {Vt-Vt4},[Xn] LS_2D 0Q00110001000000 0010ssnnnnnttttt 0C40 2000 base register
// ld1 {Vt-Vt4},[Xn],Xm LS_3F 0Q001100110mmmmm 0010ssnnnnnttttt 0CC0 2000 post-indexed by a register
// ld1 {Vt-Vt4},[Xn],#imm LS_2E 0Q00110011011111 0010ssnnnnnttttt 0CDF 2000 post-indexed by an immediate
INST3(st1_2regs,"st1", 0,ST, IF_EN3J, 0x0C00A000, 0x0C80A000, 0x0C9FA000)
+ // C7.2.313 ST1 (multiple structures, two registers variant)
// st1 {Vt,Vt2},[Xn] LS_2D 0Q00110000000000 1010ssnnnnnttttt 0C00 A000 base register
// st1 {Vt,Vt2},[Xn],Xm LS_3F 0Q001100100mmmmm 1010ssnnnnnttttt 0C80 A000 post-indexed by a register
// st1 {Vt,Vt2},[Xn],#imm LS_2E 0Q00110010011111 1010ssnnnnnttttt 0C9F A000 post-indexed by an immediate
INST3(st1_3regs,"st1", 0,ST, IF_EN3J, 0x0C006000, 0x0C806000, 0x0C9F6000)
+ // C7.2.313 ST1 (multiple structures, three registers variant)
// st1 {Vt-Vt3},[Xn] LS_2D 0Q00110000000000 0110ssnnnnnttttt 0C00 6000 base register
// st1 {Vt-Vt3},[Xn],Xm LS_3F 0Q001100100mmmmm 0110XXnnnnnttttt 0C80 6000 post-indexed by a register
// st1 {Vt-Vt3},[Xn],#imm LS_2E 0Q00110010011111 0110XXnnnnnttttt 0C9F 6000 post-indexed by an immediate
INST3(st1_4regs,"st1", 0,ST, IF_EN3J, 0x0C002000, 0x0C802000, 0x0C9F2000)
+ // C7.2.313 ST1 (multiple structures, four registers variant)
// st1 {Vt-Vt4},[Xn] LS_2D 0Q00110000000000 0010XXnnnnnttttt 0C00 2000 base register
// st1 {Vt-Vt4},[Xn],Xm LS_3F 0Q001100100mmmmm 0010XXnnnnnttttt 0C80 2000 post-indexed by a register
// st1 {Vt-Vt4},[Xn],#imm LS_2E 0Q00110010011111 0010XXnnnnnttttt 0C9F 2000 post-indexed by an immediate
INST3(ld1r, "ld1r", 0,LD, IF_EN3J, 0x0D40C000, 0x0DC0C000, 0x0DDFC000)
+ // C7.2.172 LD1R
// ld1r {Vt},[Xn] LS_2D 0Q00110101000000 1100ssnnnnnttttt 0D40 C000 base register
// ld1r {Vt},[Xn],Xm LS_3F 0Q001101110mmmmm 1100ssnnnnnttttt 0DC0 C000 post-indexed by a register
// ld1r {Vt},[Xn],#1 LS_2E 0Q00110111011111 1100ssnnnnnttttt 0DDF C000 post-indexed by an immediate
INST3(ld2r, "ld2r", 0,LD, IF_EN3J, 0x0D60C000, 0x0DE0C000, 0x0DFFC000)
+ // C7.2.175 LD2R
// ld2r {Vt,Vt2},[Xn] LS_2D 0Q00110101100000 1100ssnnnnnttttt 0D60 C000 base register
// ld2r {Vt,Vt2},[Xn],Xm LS_3F 0Q001101111mmmmm 1100ssnnnnnttttt 0DE0 C000 post-indexed by a register
// ld2r {Vt,Vt2},[Xn],#2 LS_2E 0Q00110111111111 1100ssnnnnnttttt 0DFF C000 post-indexed by an immediate
INST3(ld3r, "ld3r", 0,LD, IF_EN3J, 0x0D40E000, 0x0DC0E000, 0x0DDFE000)
+ // C7.2.178 LD3R
// ld3r {Vt-Vt3},[Xn] LS_2D 0Q00110101000000 1110ssnnnnnttttt 0D40 E000 base register
// ld3r {Vt-Vt3},[Xn],Xm LS_3F 0Q001101110mmmmm 1110ssnnnnnttttt 0DC0 E000 post-indexed by a register
// ld3r {Vt-Vt3},[Xn],#4 LS_2E 0Q00110111011111 1110ssnnnnnttttt 0DDF E000 post-indexed by an immediate
INST3(ld4r, "ld4r", 0,LD, IF_EN3J, 0x0D60E000, 0x0DE0E000, 0x0DFFE000)
+ // C7.2.181 LD4R
// ld4r {Vt-Vt4},[Xn] LS_2D 0Q00110101100000 1110ssnnnnnttttt 0D60 E000 base register
// ld4r {Vt-Vt4},[Xn],Xm LS_3F 0Q001101111mmmmm 1110ssnnnnnttttt 0DE0 E000 post-indexed by a register
// ld4r {Vt-Vt4},[Xn],#8 LS_2E 0Q00110111111111 1110ssnnnnnttttt 0DFF E000 post-indexed by an immediate
diff --git a/src/coreclr/src/jit/jitconfigvalues.h b/src/coreclr/src/jit/jitconfigvalues.h
index 70f65eb5ebf9d2..65b92b55ba1338 100644
--- a/src/coreclr/src/jit/jitconfigvalues.h
+++ b/src/coreclr/src/jit/jitconfigvalues.h
@@ -394,6 +394,11 @@ CONFIG_INTEGER(JitGuardedDevirtualizationGuessUniqueInterface, W("JitGuardedDevi
CONFIG_INTEGER(JitGuardedDevirtualizationGuessBestClass, W("JitGuardedDevirtualizationGuessBestClass"), 1)
#endif // DEBUG
+// Enable insertion of patchpoints into Tier0 methods with loops.
+CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 0)
+// Initial patchpoint counter value used by jitted code
+CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, W("TC_OnStackReplacement_InitialCounter"), 1000)
+
#if defined(DEBUG)
// JitFunctionFile: Name of a file that contains a list of functions. If the currently compiled function is in the
// file, certain other JIT config variables will be active. If the currently compiled function is not in the file,
diff --git a/src/coreclr/src/jit/jitee.h b/src/coreclr/src/jit/jitee.h
index 4f9df37e92de8c..405ef9a7d74219 100644
--- a/src/coreclr/src/jit/jitee.h
+++ b/src/coreclr/src/jit/jitee.h
@@ -27,7 +27,6 @@ class JitFlags
JIT_FLAG_TARGET_P4 = 9,
JIT_FLAG_USE_FCOMI = 10, // Generated code may use fcomi(p) instruction
JIT_FLAG_USE_CMOV = 11, // Generated code may use cmov instruction
- JIT_FLAG_USE_SSE2 = 12, // Generated code may use SSE-2 instructions
#else // !defined(TARGET_X86)
@@ -39,22 +38,12 @@ class JitFlags
#endif // !defined(TARGET_X86)
- JIT_FLAG_UNUSED6 = 13,
-
- #if defined(TARGET_X86) || defined(TARGET_AMD64)
-
- JIT_FLAG_USE_AVX = 14,
- JIT_FLAG_USE_AVX2 = 15,
- JIT_FLAG_USE_AVX_512 = 16,
-
- #else // !defined(TARGET_X86) && !defined(TARGET_AMD64)
+ JIT_FLAG_OSR = 13, // Generate alternate version for On Stack Replacement
JIT_FLAG_UNUSED7 = 14,
JIT_FLAG_UNUSED8 = 15,
JIT_FLAG_UNUSED9 = 16,
- #endif // !defined(TARGET_X86) && !defined(TARGET_AMD64)
-
#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64)
JIT_FLAG_FEATURE_SIMD = 17,
#else
@@ -93,57 +82,6 @@ class JitFlags
JIT_FLAG_NO_INLINING = 42, // JIT should not inline any called method into this method
-#if defined(TARGET_ARM64)
-
- JIT_FLAG_HAS_ARM64_AES = 43, // ID_AA64ISAR0_EL1.AES is 1 or better
- JIT_FLAG_HAS_ARM64_ATOMICS = 44, // ID_AA64ISAR0_EL1.Atomic is 2 or better
- JIT_FLAG_HAS_ARM64_CRC32 = 45, // ID_AA64ISAR0_EL1.CRC32 is 1 or better
- JIT_FLAG_HAS_ARM64_DCPOP = 46, // ID_AA64ISAR1_EL1.DPB is 1 or better
- JIT_FLAG_HAS_ARM64_DP = 47, // ID_AA64ISAR0_EL1.DP is 1 or better
- JIT_FLAG_HAS_ARM64_FCMA = 48, // ID_AA64ISAR1_EL1.FCMA is 1 or better
- JIT_FLAG_HAS_ARM64_FP = 49, // ID_AA64PFR0_EL1.FP is 0 or better
- JIT_FLAG_HAS_ARM64_FP16 = 50, // ID_AA64PFR0_EL1.FP is 1 or better
- JIT_FLAG_HAS_ARM64_JSCVT = 51, // ID_AA64ISAR1_EL1.JSCVT is 1 or better
- JIT_FLAG_HAS_ARM64_LRCPC = 52, // ID_AA64ISAR1_EL1.LRCPC is 1 or better
- JIT_FLAG_HAS_ARM64_PMULL = 53, // ID_AA64ISAR0_EL1.AES is 2 or better
- JIT_FLAG_HAS_ARM64_SHA1 = 54, // ID_AA64ISAR0_EL1.SHA1 is 1 or better
- JIT_FLAG_HAS_ARM64_SHA256 = 55, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
- JIT_FLAG_HAS_ARM64_SHA512 = 56, // ID_AA64ISAR0_EL1.SHA2 is 2 or better
- JIT_FLAG_HAS_ARM64_SHA3 = 57, // ID_AA64ISAR0_EL1.SHA3 is 1 or better
- JIT_FLAG_HAS_ARM64_ADVSIMD = 58, // ID_AA64PFR0_EL1.AdvSIMD is 0 or better
- JIT_FLAG_HAS_ARM64_ADVSIMD_V81 = 59, // ID_AA64ISAR0_EL1.RDM is 1 or better
- JIT_FLAG_HAS_ARM64_ADVSIMD_FP16 = 60, // ID_AA64PFR0_EL1.AdvSIMD is 1 or better
- JIT_FLAG_HAS_ARM64_SM3 = 61, // ID_AA64ISAR0_EL1.SM3 is 1 or better
- JIT_FLAG_HAS_ARM64_SM4 = 62, // ID_AA64ISAR0_EL1.SM4 is 1 or better
- JIT_FLAG_HAS_ARM64_SVE = 63 // ID_AA64PFR0_EL1.SVE is 1 or better
-
-#elif defined(TARGET_X86) || defined(TARGET_AMD64)
-
- JIT_FLAG_USE_SSE3 = 43,
- JIT_FLAG_USE_SSSE3 = 44,
- JIT_FLAG_USE_SSE41 = 45,
- JIT_FLAG_USE_SSE42 = 46,
- JIT_FLAG_USE_AES = 47,
- JIT_FLAG_USE_BMI1 = 48,
- JIT_FLAG_USE_BMI2 = 49,
- JIT_FLAG_USE_FMA = 50,
- JIT_FLAG_USE_LZCNT = 51,
- JIT_FLAG_USE_PCLMULQDQ = 52,
- JIT_FLAG_USE_POPCNT = 53,
- JIT_FLAG_UNUSED23 = 54,
- JIT_FLAG_UNUSED24 = 55,
- JIT_FLAG_UNUSED25 = 56,
- JIT_FLAG_UNUSED26 = 57,
- JIT_FLAG_UNUSED27 = 58,
- JIT_FLAG_UNUSED28 = 59,
- JIT_FLAG_UNUSED29 = 60,
- JIT_FLAG_UNUSED30 = 61,
- JIT_FLAG_UNUSED31 = 62,
- JIT_FLAG_UNUSED32 = 63
-
-
-#else // !defined(TARGET_ARM64) && !defined(TARGET_X86) && !defined(TARGET_AMD64)
-
JIT_FLAG_UNUSED12 = 43,
JIT_FLAG_UNUSED13 = 44,
JIT_FLAG_UNUSED14 = 45,
@@ -166,8 +104,6 @@ class JitFlags
JIT_FLAG_UNUSED31 = 62,
JIT_FLAG_UNUSED32 = 63
-#endif // !defined(TARGET_ARM64) && !defined(TARGET_X86) && !defined(TARGET_AMD64)
-
};
// clang-format on
@@ -187,29 +123,29 @@ class JitFlags
m_jitFlags = 0;
}
- void Set(JitFlag flag)
+ CORINFO_InstructionSetFlags GetInstructionSetFlags() const
{
- m_jitFlags |= 1ULL << (unsigned __int64)flag;
+ return m_instructionSetFlags;
}
- void Clear(JitFlag flag)
+ void SetInstructionSetFlags(CORINFO_InstructionSetFlags instructionSetFlags)
{
- m_jitFlags &= ~(1ULL << (unsigned __int64)flag);
+ m_instructionSetFlags = instructionSetFlags;
}
- bool IsSet(JitFlag flag) const
+ void Set(JitFlag flag)
{
- return (m_jitFlags & (1ULL << (unsigned __int64)flag)) != 0;
+ m_jitFlags |= 1ULL << (unsigned __int64)flag;
}
- void Add(const JitFlags& other)
+ void Clear(JitFlag flag)
{
- m_jitFlags |= other.m_jitFlags;
+ m_jitFlags &= ~(1ULL << (unsigned __int64)flag);
}
- void Remove(const JitFlags& other)
+ bool IsSet(JitFlag flag) const
{
- m_jitFlags &= ~other.m_jitFlags;
+ return (m_jitFlags & (1ULL << (unsigned __int64)flag)) != 0;
}
bool IsEmpty() const
@@ -222,8 +158,9 @@ class JitFlags
// We don't want to have to check every one, so we assume it is exactly the same values as the JitFlag
// values defined in this type.
m_jitFlags = flags.GetFlagsRaw();
+ m_instructionSetFlags.SetFromFlagsRaw(flags.GetInstructionSetFlagsRaw());
- C_ASSERT(sizeof(m_jitFlags) == sizeof(CORJIT_FLAGS));
+ C_ASSERT(sizeof(JitFlags) == sizeof(CORJIT_FLAGS));
#define FLAGS_EQUAL(a, b) C_ASSERT((unsigned)(a) == (unsigned)(b))
@@ -242,15 +179,6 @@ class JitFlags
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TARGET_P4, JIT_FLAG_TARGET_P4);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_FCOMI, JIT_FLAG_USE_FCOMI);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_CMOV, JIT_FLAG_USE_CMOV);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE2, JIT_FLAG_USE_SSE2);
-
-#endif
-
-#if defined(TARGET_X86) || defined(TARGET_AMD64)
-
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX, JIT_FLAG_USE_AVX);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX2, JIT_FLAG_USE_AVX2);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX_512, JIT_FLAG_USE_AVX_512);
#endif
@@ -290,50 +218,10 @@ class JitFlags
#endif // TARGET_ARM
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING, JIT_FLAG_NO_INLINING);
-
-#if defined(TARGET_ARM64)
-
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_AES, JIT_FLAG_HAS_ARM64_AES);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ATOMICS, JIT_FLAG_HAS_ARM64_ATOMICS);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_CRC32, JIT_FLAG_HAS_ARM64_CRC32);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_DCPOP, JIT_FLAG_HAS_ARM64_DCPOP);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_DP, JIT_FLAG_HAS_ARM64_DP);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FCMA, JIT_FLAG_HAS_ARM64_FCMA);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP, JIT_FLAG_HAS_ARM64_FP);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP16, JIT_FLAG_HAS_ARM64_FP16);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_JSCVT, JIT_FLAG_HAS_ARM64_JSCVT);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_LRCPC, JIT_FLAG_HAS_ARM64_LRCPC);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_PMULL, JIT_FLAG_HAS_ARM64_PMULL);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA1, JIT_FLAG_HAS_ARM64_SHA1);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA256, JIT_FLAG_HAS_ARM64_SHA256);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA512, JIT_FLAG_HAS_ARM64_SHA512);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA3, JIT_FLAG_HAS_ARM64_SHA3);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD, JIT_FLAG_HAS_ARM64_ADVSIMD);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD_V81, JIT_FLAG_HAS_ARM64_ADVSIMD_V81);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD_FP16, JIT_FLAG_HAS_ARM64_ADVSIMD_FP16);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SM3, JIT_FLAG_HAS_ARM64_SM3);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SM4, JIT_FLAG_HAS_ARM64_SM4);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SVE, JIT_FLAG_HAS_ARM64_SVE);
-
-#elif defined(TARGET_X86) || defined(TARGET_AMD64)
-
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE3, JIT_FLAG_USE_SSE3);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_SSSE3, JIT_FLAG_USE_SSSE3);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE41, JIT_FLAG_USE_SSE41);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE42, JIT_FLAG_USE_SSE42);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_AES, JIT_FLAG_USE_AES);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_BMI1, JIT_FLAG_USE_BMI1);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_BMI2, JIT_FLAG_USE_BMI2);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_FMA, JIT_FLAG_USE_FMA);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_LZCNT, JIT_FLAG_USE_LZCNT);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_PCLMULQDQ, JIT_FLAG_USE_PCLMULQDQ);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_POPCNT, JIT_FLAG_USE_POPCNT);
-
-#endif // TARGET_X86 || TARGET_AMD64
-
#undef FLAGS_EQUAL
}
private:
- unsigned __int64 m_jitFlags;
+ unsigned __int64 m_jitFlags;
+ CORINFO_InstructionSetFlags m_instructionSetFlags;
};
diff --git a/src/coreclr/src/jit/jiteh.cpp b/src/coreclr/src/jit/jiteh.cpp
index b17785c50b800f..4c9f39ebd6e68e 100644
--- a/src/coreclr/src/jit/jiteh.cpp
+++ b/src/coreclr/src/jit/jiteh.cpp
@@ -1212,6 +1212,25 @@ EHblkDsc* Compiler::ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, Ba
return tryTab;
}
+/*****************************************************************************
+ * This method updates the value of ebdTryBeg
+ */
+
+void Compiler::fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg)
+{
+ assert(newTryBeg != nullptr);
+
+ // Check if we are going to change the existing value of endTryLast
+ //
+ if (handlerTab->ebdTryBeg != newTryBeg)
+ {
+ // Update the EH table with the newTryLast block
+ handlerTab->ebdTryBeg = newTryBeg;
+
+ JITDUMP("EH#%u: New first block of try: " FMT_BB "\n", ehGetIndex(handlerTab), handlerTab->ebdTryBeg->bbNum);
+ }
+}
+
/*****************************************************************************
* This method updates the value of ebdTryLast.
*/
diff --git a/src/coreclr/src/jit/lclvars.cpp b/src/coreclr/src/jit/lclvars.cpp
index f93062dff004c5..3806dcdf2b5582 100644
--- a/src/coreclr/src/jit/lclvars.cpp
+++ b/src/coreclr/src/jit/lclvars.cpp
@@ -21,6 +21,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "emit.h"
#include "register_arg_convention.h"
#include "jitstd/algorithm.h"
+#include "patchpointinfo.h"
/*****************************************************************************/
@@ -279,6 +280,17 @@ void Compiler::lvaInitTypeRef()
CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->locals, localsSig);
lvaSetClass(varNum, clsHnd);
}
+
+ if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varNum))
+ {
+ JITDUMP("-- V%02u is OSR exposed\n", varNum);
+ varDsc->lvHasLdAddrOp = 1;
+
+ if (varDsc->lvType != TYP_STRUCT)
+ {
+ lvaSetVarAddrExposed(varNum);
+ }
+ }
}
if ( // If there already exist unsafe buffers, don't mark more structs as unsafe
@@ -1028,6 +1040,14 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
lvaSetVarAddrExposed(varDscInfo->varNum);
#endif // !TARGET_X86
}
+
+ if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varDscInfo->varNum))
+ {
+ JITDUMP("-- V%02u is OSR exposed\n", varDscInfo->varNum);
+ varDsc->lvHasLdAddrOp = 1;
+ lvaSetVarAddrExposed(varDscInfo->varNum);
+ }
+
} // for each user arg
#ifdef TARGET_ARM
@@ -1830,6 +1850,13 @@ bool Compiler::StructPromotionHelper::CanPromoteStructVar(unsigned lclNum)
return false;
}
+ // TODO-CQ: enable promotion for OSR locals
+ if (compiler->lvaIsOSRLocal(lclNum))
+ {
+ JITDUMP(" struct promotion of V%02u is disabled because it is an OSR local\n", lclNum);
+ return false;
+ }
+
CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle();
return CanPromoteStructType(typeHnd);
}
@@ -4781,6 +4808,13 @@ void Compiler::lvaFixVirtualFrameOffsets()
assert(varDsc->lvFramePointerBased); // We always access it RBP-relative.
assert(!varDsc->lvMustInit); // It is never "must init".
varDsc->lvStkOffs = codeGen->genCallerSPtoInitialSPdelta() + lvaLclSize(lvaOutgoingArgSpaceVar);
+
+ // With OSR the new frame RBP points at the base of the new frame, but the virtual offsets
+ // are from the base of the old frame. Adjust.
+ if (opts.IsOSR())
+ {
+ varDsc->lvStkOffs -= info.compPatchpointInfo->FpToSpDelta();
+ }
}
#endif
@@ -4789,9 +4823,11 @@ void Compiler::lvaFixVirtualFrameOffsets()
#ifdef TARGET_XARCH
delta += REGSIZE_BYTES; // pushed PC (return address) for x86/x64
+ JITDUMP("--- delta bump %d for RA\n", REGSIZE_BYTES);
if (codeGen->doubleAlignOrFramePointerUsed())
{
+ JITDUMP("--- delta bump %d for FP\n", REGSIZE_BYTES);
delta += REGSIZE_BYTES; // pushed EBP (frame pointer)
}
#endif
@@ -4799,6 +4835,7 @@ void Compiler::lvaFixVirtualFrameOffsets()
if (!codeGen->isFramePointerUsed())
{
// pushed registers, return address, and padding
+ JITDUMP("--- delta bump %d for RSP frame\n", codeGen->genTotalFrameSize());
delta += codeGen->genTotalFrameSize();
}
#if defined(TARGET_ARM)
@@ -4811,10 +4848,21 @@ void Compiler::lvaFixVirtualFrameOffsets()
else
{
// FP is used.
+ JITDUMP("--- delta bump %d for RBP frame\n", codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta());
delta += codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta();
}
#endif // TARGET_AMD64
+ // For OSR, update the delta to reflect the current policy that
+ // RBP points at the base of the new frame, and RSP is relative to that RBP.
+ if (opts.IsOSR())
+ {
+ JITDUMP("--- delta bump %d for OSR\n", info.compPatchpointInfo->FpToSpDelta());
+ delta += info.compPatchpointInfo->FpToSpDelta();
+ }
+
+ JITDUMP("--- virtual stack offset to actual stack offset delta is %d\n", delta);
+
unsigned lclNum;
for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++)
{
@@ -4857,6 +4905,7 @@ void Compiler::lvaFixVirtualFrameOffsets()
if (doAssignStkOffs)
{
+ JITDUMP("-- V%02u was %d, now %d\n", lclNum, varDsc->lvStkOffs, varDsc->lvStkOffs + delta);
varDsc->lvStkOffs += delta;
#if DOUBLE_ALIGN
@@ -4876,8 +4925,9 @@ void Compiler::lvaFixVirtualFrameOffsets()
}
#endif
// On System V environments the stkOffs could be 0 for params passed in registers.
- assert(codeGen->isFramePointerUsed() ||
- varDsc->lvStkOffs >= 0); // Only EBP relative references can have negative offsets
+ //
+ // For normal methods only EBP relative references can have negative offsets.
+ assert(codeGen->isFramePointerUsed() || varDsc->lvStkOffs >= 0);
}
}
@@ -5593,7 +5643,9 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum,
*/
void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
{
- int stkOffs = 0;
+ int stkOffs = 0;
+ int originalFrameStkOffs = 0;
+ int originalFrameSize = 0;
// codeGen->isFramePointerUsed is set in regalloc phase. Initialize it to a guess for pre-regalloc layout.
if (lvaDoneFrameLayout <= PRE_REGALLOC_FRAME_LAYOUT)
{
@@ -5632,6 +5684,15 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
lvaTable[lvaRetAddrVar].lvStkOffs = stkOffs;
}
+ // If we are an OSR method, we "inherit" the frame of the original method,
+ // and the stack is already double aligned on entry (since the return adddress push
+ // and any special alignment push happened "before").
+ if (opts.IsOSR())
+ {
+ originalFrameSize = info.compPatchpointInfo->FpToSpDelta();
+ originalFrameStkOffs = stkOffs;
+ stkOffs -= originalFrameSize;
+ }
// TODO-AMD64-CQ: for X64 eventually this should be pushed with all the other
// calleeregs. When you fix this, you'll also need to fix
// the assert at the bottom of this method
@@ -5714,10 +5775,16 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// boundary we would have to use movups when offset turns out unaligned. Movaps is more
// performant than movups.
unsigned calleeFPRegsSavedSize = genCountBits(compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES;
- if (calleeFPRegsSavedSize > 0 && ((stkOffs % XMM_REGSIZE_BYTES) != 0))
+
+ // For OSR the alignment pad computation should not take the original frame into account.
+ // Original frame size includes the pseudo-saved RA and so is always = 8 mod 16.
+ const int offsetForAlign = -(stkOffs + originalFrameSize);
+
+ if ((calleeFPRegsSavedSize > 0) && ((offsetForAlign % XMM_REGSIZE_BYTES) != 0))
{
// Take care of alignment
- int alignPad = (int)AlignmentPad((unsigned)-stkOffs, XMM_REGSIZE_BYTES);
+ int alignPad = (int)AlignmentPad((unsigned)offsetForAlign, XMM_REGSIZE_BYTES);
+ assert(alignPad != 0);
stkOffs -= alignPad;
lvaIncrementFrameSize(alignPad);
}
@@ -5800,7 +5867,19 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
}
#endif // JIT32_GCENCODER
- if (lvaReportParamTypeArg())
+ // OSR methods use the original method slot for the cached kept alive this,
+ // so don't need to allocate a slot on the new frame.
+ if (opts.IsOSR())
+ {
+ if (lvaKeepAliveAndReportThis())
+ {
+ PatchpointInfo* ppInfo = info.compPatchpointInfo;
+ assert(ppInfo->HasKeptAliveThis());
+ int originalOffset = ppInfo->KeptAliveThisOffset();
+ lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset;
+ }
+ }
+ else if (lvaReportParamTypeArg())
{
#ifdef JIT32_GCENCODER
noway_assert(codeGen->isFramePointerUsed());
@@ -5844,7 +5923,11 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
if (compGSReorderStackLayout)
{
assert(getNeedsGSSecurityCookie());
- stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs);
+
+ if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie())
+ {
+ stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs);
+ }
}
/*
@@ -5937,7 +6020,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
In other words, we will not calculate the "base" address of the struct local if
the promotion type is PROMOTION_TYPE_FIELD_DEPENDENT.
*/
- if (lvaIsFieldOfDependentlyPromotedStruct(varDsc))
+ if (!opts.IsOSR() && lvaIsFieldOfDependentlyPromotedStruct(varDsc))
{
continue;
}
@@ -5958,6 +6041,29 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
allocateOnFrame = false;
}
+ // For OSR args and locals, we use the slots on the original frame.
+ //
+ // Note we must do this even for "non frame" locals, as we sometimes
+ // will refer to their memory homes.
+ if (lvaIsOSRLocal(lclNum))
+ {
+ // TODO-CQ: enable struct promotion for OSR locals; when that
+ // happens, figure out how to properly refer to the original
+ // frame slots for the promoted fields.
+ assert(!varDsc->lvIsStructField);
+
+ // Add frampointer-relative offset of this OSR live local in the original frame
+ // to the offset of original frame in our new frame.
+ int originalOffset = info.compPatchpointInfo->Offset(lclNum);
+ int offset = originalFrameStkOffs + originalOffset;
+
+ JITDUMP("---OSR--- V%02u (on old frame) old rbp offset %d old frame offset %d new virt offset %d\n",
+ lclNum, originalOffset, originalFrameStkOffs, offset);
+
+ lvaTable[lclNum].lvStkOffs = offset;
+ continue;
+ }
+
/* Ignore variables that are not on the stack frame */
if (!allocateOnFrame)
@@ -5979,7 +6085,21 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
}
else if (lvaGSSecurityCookie == lclNum && getNeedsGSSecurityCookie())
{
- continue; // This is allocated outside of this loop.
+ // Special case for OSR. If the original method had a cookie,
+ // we use its slot on the original frame.
+ if (opts.IsOSR() && info.compPatchpointInfo->HasSecurityCookie())
+ {
+ int originalOffset = info.compPatchpointInfo->SecurityCookieOffset();
+ int offset = originalFrameStkOffs + originalOffset;
+
+ JITDUMP("---OSR--- V%02u (on old frame, security cookie) old rbp offset %d old frame offset %d new "
+ "virt offset %d\n",
+ lclNum, originalOffset, originalFrameStkOffs, offset);
+
+ lvaTable[lclNum].lvStkOffs = offset;
+ }
+
+ continue;
}
// These need to be located as the very first variables (highest memory address)
@@ -6172,8 +6292,11 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
if (getNeedsGSSecurityCookie() && !compGSReorderStackLayout)
{
- // LOCALLOC used, but we have no unsafe buffer. Allocated cookie last, close to localloc buffer.
- stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs);
+ if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie())
+ {
+ // LOCALLOC used, but we have no unsafe buffer. Allocated cookie last, close to localloc buffer.
+ stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs);
+ }
}
if (tempsAllocated == false)
@@ -6303,7 +6426,8 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
pushedCount += 1; // pushed PC (return address)
#endif
- noway_assert(compLclFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE)));
+ noway_assert(compLclFrameSize + originalFrameSize ==
+ (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE)));
}
int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs)
@@ -6751,7 +6875,7 @@ void Compiler::lvaDumpFrameLocation(unsigned lclNum)
void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth)
{
- LclVarDsc* varDsc = lvaTable + lclNum;
+ LclVarDsc* varDsc = lvaGetDesc(lclNum);
var_types type = varDsc->TypeGet();
if (curState == INITIAL_FRAME_LAYOUT)
@@ -6760,30 +6884,7 @@ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t r
gtDispLclVar(lclNum);
printf(" %7s ", varTypeName(type));
- if (genTypeSize(type) == 0)
- {
-#if FEATURE_FIXED_OUT_ARGS
- if (lclNum == lvaOutgoingArgSpaceVar)
- {
- // Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until
- // after we set it to something.
- if (lvaOutgoingArgSpaceSize.HasFinalValue())
- {
- // A PhasedVar can't be directly used as an arg to a variadic function
- unsigned value = lvaOutgoingArgSpaceSize;
- printf("(%2d) ", value);
- }
- else
- {
- printf("(na) "); // The value hasn't yet been determined
- }
- }
- else
-#endif // FEATURE_FIXED_OUT_ARGS
- {
- printf("(%2d) ", lvaLclSize(lclNum));
- }
- }
+ gtDispLclVarStructType(lclNum);
}
else
{
@@ -7246,6 +7347,21 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased) const
{
assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT);
+ // TODO-Cleanup
+ //
+ // This current should not be called for OSR as caller SP relative
+ // offsets computed below do not reflect the extra stack space
+ // taken up by the original method frame.
+ //
+ // We should make it work.
+ //
+ // Instead we record the needed offsets in the patchpoint info
+ // when doing the original method compile(see special offsets
+ // in generatePatchpointInfo) and consume those values in the OSR
+ // compile. If we fix this we may be able to reduce the size
+ // of the patchpoint info and have less special casing for these
+ // frame slots.
+
if (isFpBased)
{
offset += codeGen->genCallerSPtoFPdelta();
diff --git a/src/coreclr/src/jit/lower.cpp b/src/coreclr/src/jit/lower.cpp
index 580f2b6f71fa22..3d6e3799914d3b 100644
--- a/src/coreclr/src/jit/lower.cpp
+++ b/src/coreclr/src/jit/lower.cpp
@@ -1809,6 +1809,7 @@ void Lowering::LowerFastTailCall(GenTreeCall* call)
// Most of these checks are already done by importer or fgMorphTailCall().
// This serves as a double sanity check.
assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
+ assert(!comp->opts.IsReversePInvoke()); // tail calls reverse pinvoke
assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
diff --git a/src/coreclr/src/jit/lsraarm64.cpp b/src/coreclr/src/jit/lsraarm64.cpp
index 6a695a1185a0a0..b9de46de1910b8 100644
--- a/src/coreclr/src/jit/lsraarm64.cpp
+++ b/src/coreclr/src/jit/lsraarm64.cpp
@@ -994,11 +994,11 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
//
int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
{
- NamedIntrinsic intrinsicId = intrinsicTree->gtHWIntrinsicId;
- var_types baseType = intrinsicTree->gtSIMDBaseType;
- InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsicId);
- HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
- int numArgs = HWIntrinsicInfo::lookupNumArgs(intrinsicTree);
+ NamedIntrinsic intrinsicId = intrinsicTree->gtHWIntrinsicId;
+ var_types baseType = intrinsicTree->gtSIMDBaseType;
+ CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsicId);
+ HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
+ int numArgs = HWIntrinsicInfo::lookupNumArgs(intrinsicTree);
GenTree* op1 = intrinsicTree->gtGetOp1();
GenTree* op2 = intrinsicTree->gtGetOp2();
diff --git a/src/coreclr/src/jit/lsrabuild.cpp b/src/coreclr/src/jit/lsrabuild.cpp
index c45aad77aec9d1..91ac9fdf32ba75 100644
--- a/src/coreclr/src/jit/lsrabuild.cpp
+++ b/src/coreclr/src/jit/lsrabuild.cpp
@@ -1732,15 +1732,7 @@ BasicBlock* getNonEmptyBlock(BasicBlock* block)
{
while (block != nullptr && block->GetFirstLIRNode() == nullptr)
{
- BasicBlock* nextBlock = block->bbNext;
- // Note that here we use the version of NumSucc that does not take a compiler.
- // That way this doesn't have to take a compiler, or be an instance method, e.g. of LinearScan.
- // If we have an empty block, it must have jump type BBJ_NONE or BBJ_ALWAYS, in which
- // case we don't need the version that takes a compiler.
- assert(block->NumSucc() == 1 && ((block->bbJumpKind == BBJ_ALWAYS) || (block->bbJumpKind == BBJ_NONE)));
- // sometimes the first block is empty and ends with an uncond branch
- // assert( block->GetSucc(0) == nextBlock);
- block = nextBlock;
+ block = block->GetUniqueSucc();
}
assert(block != nullptr && block->GetFirstLIRNode() != nullptr);
return block;
@@ -1785,12 +1777,21 @@ void LinearScan::insertZeroInitRefPositions()
Interval* interval = getIntervalForLocalVar(varIndex);
if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet()))
{
+ varDsc->lvMustInit = true;
+
+ // OSR will handle init of locals and promoted fields thereof
+ if (compiler->lvaIsOSRLocal(compiler->lvaTrackedIndexToLclNum(varIndex)))
+ {
+ JITDUMP(" will be initialized by OSR\n");
+ // setIntervalAsSpilled(interval);
+ varDsc->lvMustInit = false;
+ }
+
JITDUMP(" creating ZeroInit\n");
GenTree* firstNode = getNonEmptyBlock(compiler->fgFirstBB)->firstNode();
RefPosition* pos =
newRefPosition(interval, MinLocation, RefTypeZeroInit, firstNode, allRegs(interval->registerType));
pos->setRegOptional(true);
- varDsc->lvMustInit = true;
}
else
{
diff --git a/src/coreclr/src/jit/lsraxarch.cpp b/src/coreclr/src/jit/lsraxarch.cpp
index 19c914e614d91b..2578cd7fbdc925 100644
--- a/src/coreclr/src/jit/lsraxarch.cpp
+++ b/src/coreclr/src/jit/lsraxarch.cpp
@@ -2286,11 +2286,11 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
//
int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
{
- NamedIntrinsic intrinsicId = intrinsicTree->gtHWIntrinsicId;
- var_types baseType = intrinsicTree->gtSIMDBaseType;
- InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsicId);
- HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
- int numArgs = HWIntrinsicInfo::lookupNumArgs(intrinsicTree);
+ NamedIntrinsic intrinsicId = intrinsicTree->gtHWIntrinsicId;
+ var_types baseType = intrinsicTree->gtSIMDBaseType;
+ CORINFO_InstructionSet isa = HWIntrinsicInfo::lookupIsa(intrinsicId);
+ HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
+ int numArgs = HWIntrinsicInfo::lookupNumArgs(intrinsicTree);
// Set the AVX Flags if this instruction may use VEX encoding for SIMD operations.
// Note that this may be true even if the ISA is not AVX (e.g. for platform-agnostic intrinsics
diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp
index a8b134e7332e27..e3be6833e0d3cd 100644
--- a/src/coreclr/src/jit/morph.cpp
+++ b/src/coreclr/src/jit/morph.cpp
@@ -7538,18 +7538,16 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
{
callType = TYP_I_IMPL;
}
- else if (howToReturnStruct == SPK_ByValueAsHfa)
+ else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType))
{
callType = TYP_FLOAT;
}
- assert((callType != TYP_UNKNOWN) && (callType != TYP_STRUCT));
+ assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
}
else
{
callType = origCallType;
}
- GenTree* zero = gtNewZeroConNode(callType);
- result = fgMorphTree(zero);
}
else
{
@@ -8549,14 +8547,26 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// Remove the call
fgRemoveStmt(block, lastStmt);
- // Set the loop edge. Ensure we have a scratch block and then target the
- // next block. Loop detection needs to see a pred out of the loop, so
- // mark the scratch block BBF_DONT_REMOVE to prevent empty block removal
- // on it.
- fgEnsureFirstBBisScratch();
- fgFirstBB->bbFlags |= BBF_DONT_REMOVE;
+ // Set the loop edge.
+ if (opts.IsOSR())
+ {
+ // Todo: this may not look like a viable loop header.
+ // Might need the moral equivalent of a scratch BB.
+ block->bbJumpDest = fgEntryBB;
+ }
+ else
+ {
+ // Ensure we have a scratch block and then target the next
+ // block. Loop detection needs to see a pred out of the loop,
+ // so mark the scratch block BBF_DONT_REMOVE to prevent empty
+ // block removal on it.
+ fgEnsureFirstBBisScratch();
+ fgFirstBB->bbFlags |= BBF_DONT_REMOVE;
+ block->bbJumpDest = fgFirstBB->bbNext;
+ }
+
+ // Finish hooking things up.
block->bbJumpKind = BBJ_ALWAYS;
- block->bbJumpDest = fgFirstBB->bbNext;
block->bbJumpDest->bbFlags |= BBF_JMP_TARGET;
fgAddRefPred(block->bbJumpDest, block);
block->bbFlags &= ~BBF_HAS_JMP;
@@ -14114,6 +14124,13 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
switch (oper)
{
case GT_ASG:
+ // Make sure we're allowed to do this.
+ if (optValnumCSE_phase)
+ {
+ // It is not safe to reorder/delete CSE's
+ break;
+ }
+
if (varTypeIsStruct(typ) && !tree->IsPhiDefn())
{
if (tree->OperIsCopyBlkOp())
@@ -14131,14 +14148,6 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
break;
}
- /* Make sure we're allowed to do this */
-
- if (optValnumCSE_phase)
- {
- // It is not safe to reorder/delete CSE's
- break;
- }
-
if (op2->gtFlags & GTF_ASG)
{
break;
diff --git a/src/coreclr/src/jit/patchpoint.cpp b/src/coreclr/src/jit/patchpoint.cpp
new file mode 100644
index 00000000000000..8527bd937188c5
--- /dev/null
+++ b/src/coreclr/src/jit/patchpoint.cpp
@@ -0,0 +1,244 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "jitpch.h"
+#ifdef _MSC_VER
+#pragma hdrstop
+#endif
+
+//------------------------------------------------------------------------
+// PatchpointTransformer
+//
+// Insert patchpoint checks into Tier0 methods, based on locations identified
+// during importation (see impImportBlockCode).
+//
+// Policy decisions implemented here:
+//
+// * One counter per stack frame, regardless of the number of patchpoints.
+// * Shared counter value initialized to zero in prolog.
+// * Patchpoint trees fully expanded into jit IR. Deferring expansion could
+// lead to more compact code and lessen size overhead for Tier0.
+//
+// Workarounds and limitations:
+//
+// * no patchpoints in handler regions
+// * no patchpoints for localloc methods
+// * no patchpoints in try regions (workaround)
+// * no patchpoints for synchronized methods (workaround)
+//
+class PatchpointTransformer
+{
+ unsigned ppCounterLclNum;
+ const int HIGH_PROBABILITY = 99;
+ Compiler* compiler;
+
+public:
+ PatchpointTransformer(Compiler* compiler) : compiler(compiler)
+ {
+ ppCounterLclNum = compiler->lvaGrabTemp(true DEBUGARG("patchpoint counter"));
+ compiler->lvaTable[ppCounterLclNum].lvType = TYP_INT;
+ }
+
+ //------------------------------------------------------------------------
+ // Run: run transformation for each block.
+ //
+ // Returns:
+ // Number of patchpoints transformed.
+ int Run()
+ {
+ // If the first block is a patchpoint, insert a scratch block.
+ if (compiler->fgFirstBB->bbFlags & BBF_PATCHPOINT)
+ {
+ compiler->fgEnsureFirstBBisScratch();
+ }
+
+ BasicBlock* block = compiler->fgFirstBB;
+ TransformEntry(block);
+
+ int count = 0;
+ for (block = block->bbNext; block != nullptr; block = block->bbNext)
+ {
+ if (block->bbFlags & BBF_PATCHPOINT)
+ {
+ // If block is in a handler region, don't insert a patchpoint.
+ // We can't OSR from funclets.
+ //
+ // TODO: check this earlier, somehow, and fall back to fully
+ // optimizing the method (ala QJFL=0).
+ if (compiler->ehGetBlockHndDsc(block) != nullptr)
+ {
+ JITDUMP("Patchpoint: skipping patchpoint for " FMT_BB " as it is in a handler\n", block->bbNum);
+ continue;
+ }
+
+ JITDUMP("Patchpoint: instrumenting " FMT_BB "\n", block->bbNum);
+ assert(block != compiler->fgFirstBB);
+ TransformBlock(block);
+ count++;
+ }
+ }
+
+ return count;
+ }
+
+private:
+ //------------------------------------------------------------------------
+ // CreateAndInsertBasicBlock: ask compiler to create new basic block.
+ // and insert in into the basic block list.
+ //
+ // Arguments:
+ // jumpKind - jump kind for the new basic block
+ // insertAfter - basic block, after which compiler has to insert the new one.
+ //
+ // Return Value:
+ // new basic block.
+ BasicBlock* CreateAndInsertBasicBlock(BBjumpKinds jumpKind, BasicBlock* insertAfter)
+ {
+ BasicBlock* block = compiler->fgNewBBafter(jumpKind, insertAfter, true);
+ if ((insertAfter->bbFlags & BBF_INTERNAL) == 0)
+ {
+ block->bbFlags &= ~BBF_INTERNAL;
+ block->bbFlags |= BBF_IMPORTED;
+ }
+ return block;
+ }
+
+ //------------------------------------------------------------------------
+ // TransformBlock: expand current block to include patchpoint logic.
+ //
+ // S;
+ //
+ // ==>
+ //
+ // if (--ppCounter <= 0)
+ // {
+ // ppHelper(&ppCounter, ilOffset);
+ // }
+ // S;
+ //
+ void TransformBlock(BasicBlock* block)
+ {
+ // Capture the IL offset
+ IL_OFFSET ilOffset = block->bbCodeOffs;
+ assert(ilOffset != BAD_IL_OFFSET);
+
+ // Current block now becomes the test block
+ BasicBlock* remainderBlock = compiler->fgSplitBlockAtBeginning(block);
+ BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_NONE, block);
+
+ // Update flow and flags
+ block->bbJumpKind = BBJ_COND;
+ block->bbJumpDest = remainderBlock;
+ helperBlock->bbFlags |= BBF_BACKWARD_JUMP;
+
+ // Update weights
+ remainderBlock->inheritWeight(block);
+ helperBlock->inheritWeightPercentage(block, 100 - HIGH_PROBABILITY);
+
+ // Fill in test block
+ //
+ // --ppCounter;
+ GenTree* ppCounterBefore = compiler->gtNewLclvNode(ppCounterLclNum, TYP_INT);
+ GenTree* ppCounterAfter = compiler->gtNewLclvNode(ppCounterLclNum, TYP_INT);
+ GenTree* one = compiler->gtNewIconNode(1, TYP_INT);
+ GenTree* ppCounterSub = compiler->gtNewOperNode(GT_SUB, TYP_INT, ppCounterBefore, one);
+ GenTree* ppCounterAsg = compiler->gtNewOperNode(GT_ASG, TYP_INT, ppCounterAfter, ppCounterSub);
+
+ compiler->fgNewStmtAtEnd(block, ppCounterAsg);
+
+ // if (ppCounter > 0), bypass helper call
+ GenTree* ppCounterUpdated = compiler->gtNewLclvNode(ppCounterLclNum, TYP_INT);
+ GenTree* zero = compiler->gtNewIconNode(0, TYP_INT);
+ GenTree* compare = compiler->gtNewOperNode(GT_GT, TYP_INT, ppCounterUpdated, zero);
+ GenTree* jmp = compiler->gtNewOperNode(GT_JTRUE, TYP_VOID, compare);
+
+ compiler->fgNewStmtAtEnd(block, jmp);
+
+ // Fill in helper block
+ //
+ // call PPHelper(&ppCounter, ilOffset)
+ GenTree* ilOffsetNode = compiler->gtNewIconNode(ilOffset, TYP_INT);
+ GenTree* ppCounterRef = compiler->gtNewLclvNode(ppCounterLclNum, TYP_INT);
+ GenTree* ppCounterAddr = compiler->gtNewOperNode(GT_ADDR, TYP_I_IMPL, ppCounterRef);
+ GenTreeCall::Use* helperArgs = compiler->gtNewCallArgs(ppCounterAddr, ilOffsetNode);
+ GenTreeCall* helperCall = compiler->gtNewHelperCallNode(CORINFO_HELP_PATCHPOINT, TYP_VOID, helperArgs);
+
+ compiler->fgNewStmtAtEnd(helperBlock, helperCall);
+ }
+
+ // ppCounter =
+ void TransformEntry(BasicBlock* block)
+ {
+ assert((block->bbFlags & BBF_PATCHPOINT) == 0);
+
+ int initialCounterValue = JitConfig.TC_OnStackReplacement_InitialCounter();
+
+ if (initialCounterValue < 0)
+ {
+ initialCounterValue = 0;
+ }
+
+ GenTree* initialCounterNode = compiler->gtNewIconNode(initialCounterValue, TYP_INT);
+ GenTree* ppCounterRef = compiler->gtNewLclvNode(ppCounterLclNum, TYP_INT);
+ GenTree* ppCounterAsg = compiler->gtNewOperNode(GT_ASG, TYP_INT, ppCounterRef, initialCounterNode);
+
+ compiler->fgNewStmtNearEnd(block, ppCounterAsg);
+ }
+};
+
+//------------------------------------------------------------------------
+// fgTransformPatchpoints: expansion of patchpoints into control flow.
+//
+// Notes:
+//
+// Patchpoints are placed in the JIT IR during importation, and get expanded
+// here into normal JIT IR.
+//
+void Compiler::fgTransformPatchpoints()
+{
+ JITDUMP("\n*************** in fgTransformPatchpoints\n");
+
+ if (!doesMethodHavePatchpoints())
+ {
+ JITDUMP(" -- no patchpoints to transform\n");
+ return;
+ }
+
+ // We should only be adding patchpoints at Tier0, so should not be in an inlinee
+ assert(!compIsForInlining());
+
+ // We currently can't do OSR in methods with localloc.
+ // Such methods don't have a fixed relationship between frame and stack pointers.
+ //
+ // This is true whether or not the localloc was executed in the original method.
+ //
+ // TODO: handle this case, or else check this earlier and fall back to fully
+ // optimizing the method (ala QJFL=0).
+ if (compLocallocUsed)
+ {
+ JITDUMP(" -- unable to handle methods with localloc\n");
+ return;
+ }
+
+ // We currently can't do OSR in synchronized methods. We need to alter
+ // the logic in fgAddSyncMethodEnterExit for OSR to not try and obtain the
+ // monitor (since the original method will have done so) and set the monitor
+ // obtained flag to true (or reuse the original method slot value).
+ if ((info.compFlags & CORINFO_FLG_SYNCH) != 0)
+ {
+ JITDUMP(" -- unable to handle synchronized methods\n");
+ return;
+ }
+
+ if (opts.IsReversePInvoke())
+ {
+ JITDUMP(" -- unable to handle Reverse P/Invoke\n");
+ return;
+ }
+
+ PatchpointTransformer ppTransformer(this);
+ int count = ppTransformer.Run();
+ JITDUMP("\n*************** After fgTransformPatchpoints() [%d patchpoints transformed]\n", count);
+ INDEBUG(if (verbose) { fgDispBasicBlocks(true); });
+}
diff --git a/src/coreclr/src/jit/rangecheck.cpp b/src/coreclr/src/jit/rangecheck.cpp
index 14044bffc11575..3019824427f266 100644
--- a/src/coreclr/src/jit/rangecheck.cpp
+++ b/src/coreclr/src/jit/rangecheck.cpp
@@ -763,7 +763,7 @@ void RangeCheck::MergeAssertion(BasicBlock* block, GenTree* op, Range* pRange DE
assertions = block->bbAssertionIn;
}
- if (!BitVecOps::MayBeUninit(assertions))
+ if (!BitVecOps::MayBeUninit(assertions) && (m_pCompiler->GetAssertionCount() > 0))
{
// Perform the merge step to fine tune the range value.
MergeEdgeAssertions(op->AsLclVarCommon(), assertions, pRange);
@@ -889,7 +889,7 @@ Range RangeCheck::ComputeRangeForLocalDef(BasicBlock* block,
}
#endif
Range range = GetRange(ssaDef->GetBlock(), ssaDef->GetAssignment()->gtGetOp2(), monIncreasing DEBUGARG(indent));
- if (!BitVecOps::MayBeUninit(block->bbAssertionIn))
+ if (!BitVecOps::MayBeUninit(block->bbAssertionIn) && (m_pCompiler->GetAssertionCount() > 0))
{
JITDUMP("Merge assertions from " FMT_BB ":%s for assignment about [%06d]\n", block->bbNum,
BitVecOps::ToString(m_pCompiler->apTraits, block->bbAssertionIn),
diff --git a/src/coreclr/src/jit/simdcodegenxarch.cpp b/src/coreclr/src/jit/simdcodegenxarch.cpp
index d49fa6008451d8..8ca69e35be37ac 100644
--- a/src/coreclr/src/jit/simdcodegenxarch.cpp
+++ b/src/coreclr/src/jit/simdcodegenxarch.cpp
@@ -73,11 +73,6 @@ instruction CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_type
{
// AVX supports broadcast instructions to populate YMM reg with a single float/double value from memory.
// AVX2 supports broadcast instructions to populate YMM reg with a single value from memory or mm reg.
- // If we decide to use AVX2 only, we can remove this assert.
- if (!compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_USE_AVX2))
- {
- assert(baseType == TYP_FLOAT || baseType == TYP_DOUBLE);
- }
switch (baseType)
{
case TYP_FLOAT:
diff --git a/src/coreclr/src/jit/valuenum.cpp b/src/coreclr/src/jit/valuenum.cpp
index 66f5454f62a572..679e4089828c45 100644
--- a/src/coreclr/src/jit/valuenum.cpp
+++ b/src/coreclr/src/jit/valuenum.cpp
@@ -5794,9 +5794,17 @@ void Compiler::fgValueNumber()
// these are variables that are read before being initialized (at least on some control flow paths)
// if they are not must-init, then they get VNF_InitVal(i), as with the param case.)
- bool isZeroed = (info.compInitMem || varDsc->lvMustInit);
- ValueNum initVal = ValueNumStore::NoVN; // We must assign a new value to initVal
- var_types typ = varDsc->TypeGet();
+ bool isZeroed = (info.compInitMem || varDsc->lvMustInit);
+
+ // For OSR, locals or promoted fields of locals may be missing the initial def
+ // because of partial importation. We can't assume they are zero.
+ if (lvaIsOSRLocal(lclNum))
+ {
+ isZeroed = false;
+ }
+
+ ValueNum initVal = ValueNumStore::NoVN; // We must assign a new value to initVal
+ var_types typ = varDsc->TypeGet();
switch (typ)
{
@@ -6010,7 +6018,8 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk)
BasicBlock::MemoryPhiArg* phiArgs = blk->bbMemorySsaPhiFunc[memoryKind];
assert(phiArgs != BasicBlock::EmptyMemoryPhiDef);
// There should be > 1 args to a phi.
- assert(phiArgs->m_nextArg != nullptr);
+ // But OSR might leave around "dead" try entry blocks...
+ assert((phiArgs->m_nextArg != nullptr) || opts.IsOSR());
ValueNum phiAppVN = vnStore->VNForIntCon(phiArgs->GetSsaNum());
JITDUMP(" Building phi application: $%x = SSA# %d.\n", phiAppVN, phiArgs->GetSsaNum());
bool allSame = true;
diff --git a/src/coreclr/src/pal/prebuilt/idl/clrinternal_i.cpp b/src/coreclr/src/pal/prebuilt/idl/clrinternal_i.cpp
index 5c52018be7dae5..7cfb798aab89ee 100644
--- a/src/coreclr/src/pal/prebuilt/idl/clrinternal_i.cpp
+++ b/src/coreclr/src/pal/prebuilt/idl/clrinternal_i.cpp
@@ -61,12 +61,6 @@ typedef IID CLSID;
#endif // !_MIDL_USE_GUIDDEF_
-MIDL_DEFINE_GUID(IID, IID_IExecutionEngine,0x7AF02DAC,0x2A33,0x494b,0xA0,0x9F,0x25,0xE0,0x0A,0x93,0xC6,0xF8);
-
-
-MIDL_DEFINE_GUID(IID, IID_IEEMemoryManager,0x17713B61,0xB59F,0x4e13,0xBA,0xAF,0x91,0x62,0x3D,0xC8,0xAD,0xC0);
-
-
MIDL_DEFINE_GUID(IID, IID_IPrivateManagedExceptionReporting,0xAD76A023,0x332D,0x4298,0x80,0x01,0x07,0xAA,0x93,0x50,0xDC,0xA4);
#undef MIDL_DEFINE_GUID
diff --git a/src/coreclr/src/pal/prebuilt/inc/clrinternal.h b/src/coreclr/src/pal/prebuilt/inc/clrinternal.h
index 1afbb757f68631..e3625a7c312935 100644
--- a/src/coreclr/src/pal/prebuilt/inc/clrinternal.h
+++ b/src/coreclr/src/pal/prebuilt/inc/clrinternal.h
@@ -39,20 +39,6 @@
/* Forward Declarations */
-#ifndef __IExecutionEngine_FWD_DEFINED__
-#define __IExecutionEngine_FWD_DEFINED__
-typedef interface IExecutionEngine IExecutionEngine;
-
-#endif /* __IExecutionEngine_FWD_DEFINED__ */
-
-
-#ifndef __IEEMemoryManager_FWD_DEFINED__
-#define __IEEMemoryManager_FWD_DEFINED__
-typedef interface IEEMemoryManager IEEMemoryManager;
-
-#endif /* __IEEMemoryManager_FWD_DEFINED__ */
-
-
#ifndef __IPrivateManagedExceptionReporting_FWD_DEFINED__
#define __IPrivateManagedExceptionReporting_FWD_DEFINED__
typedef interface IPrivateManagedExceptionReporting IPrivateManagedExceptionReporting;
@@ -72,48 +58,6 @@ extern "C"{
/* interface __MIDL_itf_clrinternal_0000_0000 */
/* [local] */
-#if 0
-typedef struct _OSVERSIONINFOA
- {
- DWORD dwOSVersionInfoSize;
- DWORD dwMajorVersion;
- DWORD dwMinorVersion;
- DWORD dwBuildNumber;
- DWORD dwPlatformId;
- CHAR szCSDVersion[ 128 ];
- } OSVERSIONINFOA;
-
-typedef struct _OSVERSIONINFOA *POSVERSIONINFOA;
-
-typedef struct _OSVERSIONINFOA *LPOSVERSIONINFOA;
-
-typedef struct _OSVERSIONINFOW
- {
- DWORD dwOSVersionInfoSize;
- DWORD dwMajorVersion;
- DWORD dwMinorVersion;
- DWORD dwBuildNumber;
- DWORD dwPlatformId;
- WCHAR szCSDVersion[ 128 ];
- } OSVERSIONINFOW;
-
-typedef struct _OSVERSIONINFOW *POSVERSIONINFOW;
-
-typedef struct _OSVERSIONINFOW *LPOSVERSIONINFOW;
-
-typedef struct _OSVERSIONINFOW RTL_OSVERSIONINFOW;
-
-typedef struct _OSVERSIONINFOW *PRTL_OSVERSIONINFOW;
-
-typedef OSVERSIONINFOA OSVERSIONINFO;
-
-typedef POSVERSIONINFOA POSVERSIONINFO;
-
-typedef LPOSVERSIONINFOA LPOSVERSIONINFO;
-
-#endif
-EXTERN_GUID(IID_IExecutionEngine, 0x7AF02DAC, 0x2A33, 0x494b, 0xA0, 0x9F, 0x25, 0xE0, 0x0A, 0x93, 0xC6, 0xF8);
-EXTERN_GUID(IID_IEEMemoryManager, 0x17713b61, 0xb59f, 0x4e13, 0xba, 0xaf, 0x91, 0x62, 0x3d, 0xc8, 0xad, 0xc0);
EXTERN_GUID(CLR_ID_V4_DESKTOP, 0x267f3989, 0xd786, 0x4b9a, 0x9a, 0xf6, 0xd1, 0x9e, 0x42, 0xd5, 0x57, 0xec);
EXTERN_GUID(CLR_ID_CORECLR, 0x8CB8E075, 0x0A91, 0x408E, 0x92, 0x28, 0xD6, 0x6E, 0x00, 0xA3, 0xBF, 0xF6 );
EXTERN_GUID(CLR_ID_PHONE_CLR, 0xE7237E9C, 0x31C0, 0x488C, 0xAD, 0x48, 0x32, 0x4D, 0x3E, 0x7E, 0xD9, 0x2A);
@@ -121,13 +65,7 @@ EXTERN_GUID(CLR_ID_ONECORE_CLR, 0xb1ee760d, 0x6c4a, 0x4533, 0xba, 0x41, 0x6f, 0x
EXTERN_GUID(IID_IPrivateManagedExceptionReporting, 0xad76a023, 0x332d, 0x4298, 0x80, 0x01, 0x07, 0xaa, 0x93, 0x50, 0xdc, 0xa4);
typedef void *CRITSEC_COOKIE;
-typedef void *EVENT_COOKIE;
-
-typedef void *SEMAPHORE_COOKIE;
-
-typedef void *MUTEX_COOKIE;
-
-typedef /* [public][public] */
+typedef /* [public] */
enum __MIDL___MIDL_itf_clrinternal_0000_0000_0001
{
CRST_DEFAULT = 0,
@@ -142,555 +80,11 @@ enum __MIDL___MIDL_itf_clrinternal_0000_0000_0001
CRST_DEBUG_ONLY_CHECK_FORBID_SUSPEND_THREAD = 0x200
} CrstFlags;
-typedef VOID ( WINAPI *PTLS_CALLBACK_FUNCTION )(
- PVOID __MIDL____MIDL_itf_clrinternal_0000_00000000);
-
extern RPC_IF_HANDLE __MIDL_itf_clrinternal_0000_0000_v0_0_c_ifspec;
extern RPC_IF_HANDLE __MIDL_itf_clrinternal_0000_0000_v0_0_s_ifspec;
-#ifndef __IExecutionEngine_INTERFACE_DEFINED__
-#define __IExecutionEngine_INTERFACE_DEFINED__
-
-/* interface IExecutionEngine */
-/* [object][local][unique][helpstring][uuid] */
-
-
-EXTERN_C const IID IID_IExecutionEngine;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("7AF02DAC-2A33-494b-A09F-25E00A93C6F8")
- IExecutionEngine : public IUnknown
- {
- public:
- virtual CRITSEC_COOKIE STDMETHODCALLTYPE CreateLock(
- /* [in] */ LPCSTR szTag,
- /* [in] */ LPCSTR level,
- /* [in] */ CrstFlags flags) = 0;
-
- virtual void STDMETHODCALLTYPE DestroyLock(
- /* [in] */ CRITSEC_COOKIE lock) = 0;
-
- virtual void STDMETHODCALLTYPE AcquireLock(
- /* [in] */ CRITSEC_COOKIE lock) = 0;
-
- virtual void STDMETHODCALLTYPE ReleaseLock(
- /* [in] */ CRITSEC_COOKIE lock) = 0;
-
- virtual EVENT_COOKIE STDMETHODCALLTYPE CreateAutoEvent(
- /* [in] */ BOOL bInitialState) = 0;
-
- virtual EVENT_COOKIE STDMETHODCALLTYPE CreateManualEvent(
- /* [in] */ BOOL bInitialState) = 0;
-
- virtual void STDMETHODCALLTYPE CloseEvent(
- /* [in] */ EVENT_COOKIE event) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrSetEvent(
- /* [in] */ EVENT_COOKIE event) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrResetEvent(
- /* [in] */ EVENT_COOKIE event) = 0;
-
- virtual DWORD STDMETHODCALLTYPE WaitForEvent(
- /* [in] */ EVENT_COOKIE event,
- /* [in] */ DWORD dwMilliseconds,
- /* [in] */ BOOL bAlertable) = 0;
-
- virtual DWORD STDMETHODCALLTYPE WaitForSingleObject(
- /* [in] */ HANDLE handle,
- /* [in] */ DWORD dwMilliseconds) = 0;
-
- virtual SEMAPHORE_COOKIE STDMETHODCALLTYPE ClrCreateSemaphore(
- /* [in] */ DWORD dwInitial,
- /* [in] */ DWORD dwMax) = 0;
-
- virtual void STDMETHODCALLTYPE ClrCloseSemaphore(
- /* [in] */ SEMAPHORE_COOKIE semaphore) = 0;
-
- virtual DWORD STDMETHODCALLTYPE ClrWaitForSemaphore(
- /* [in] */ SEMAPHORE_COOKIE semaphore,
- /* [in] */ DWORD dwMilliseconds,
- /* [in] */ BOOL bAlertable) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrReleaseSemaphore(
- /* [in] */ SEMAPHORE_COOKIE semaphore,
- /* [in] */ LONG lReleaseCount,
- /* [in] */ LONG *lpPreviousCount) = 0;
-
- virtual MUTEX_COOKIE STDMETHODCALLTYPE ClrCreateMutex(
- /* [in] */ LPSECURITY_ATTRIBUTES lpMutexAttributes,
- /* [in] */ BOOL bInitialOwner,
- /* [in] */ LPCTSTR lpName) = 0;
-
- virtual DWORD STDMETHODCALLTYPE ClrWaitForMutex(
- /* [in] */ MUTEX_COOKIE mutex,
- /* [in] */ DWORD dwMilliseconds,
- /* [in] */ BOOL bAlertable) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrReleaseMutex(
- /* [in] */ MUTEX_COOKIE mutex) = 0;
-
- virtual void STDMETHODCALLTYPE ClrCloseMutex(
- /* [in] */ MUTEX_COOKIE mutex) = 0;
-
- virtual DWORD STDMETHODCALLTYPE ClrSleepEx(
- /* [in] */ DWORD dwMilliseconds,
- /* [in] */ BOOL bAlertable) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrAllocationDisallowed( void) = 0;
-
- virtual void STDMETHODCALLTYPE GetLastThrownObjectExceptionFromThread(
- /* [out] */ void **ppvException) = 0;
-
- };
-
-
-#else /* C style interface */
-
- typedef struct IExecutionEngineVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IExecutionEngine * This,
- /* [in] */ REFIID riid,
- /* [annotation][iid_is][out] */
- _COM_Outptr_ void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IExecutionEngine * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IExecutionEngine * This);
-
- CRITSEC_COOKIE ( STDMETHODCALLTYPE *CreateLock )(
- IExecutionEngine * This,
- /* [in] */ LPCSTR szTag,
- /* [in] */ LPCSTR level,
- /* [in] */ CrstFlags flags);
-
- void ( STDMETHODCALLTYPE *DestroyLock )(
- IExecutionEngine * This,
- /* [in] */ CRITSEC_COOKIE lock);
-
- void ( STDMETHODCALLTYPE *AcquireLock )(
- IExecutionEngine * This,
- /* [in] */ CRITSEC_COOKIE lock);
-
- void ( STDMETHODCALLTYPE *ReleaseLock )(
- IExecutionEngine * This,
- /* [in] */ CRITSEC_COOKIE lock);
-
- EVENT_COOKIE ( STDMETHODCALLTYPE *CreateAutoEvent )(
- IExecutionEngine * This,
- /* [in] */ BOOL bInitialState);
-
- EVENT_COOKIE ( STDMETHODCALLTYPE *CreateManualEvent )(
- IExecutionEngine * This,
- /* [in] */ BOOL bInitialState);
-
- void ( STDMETHODCALLTYPE *CloseEvent )(
- IExecutionEngine * This,
- /* [in] */ EVENT_COOKIE event);
-
- BOOL ( STDMETHODCALLTYPE *ClrSetEvent )(
- IExecutionEngine * This,
- /* [in] */ EVENT_COOKIE event);
-
- BOOL ( STDMETHODCALLTYPE *ClrResetEvent )(
- IExecutionEngine * This,
- /* [in] */ EVENT_COOKIE event);
-
- DWORD ( STDMETHODCALLTYPE *WaitForEvent )(
- IExecutionEngine * This,
- /* [in] */ EVENT_COOKIE event,
- /* [in] */ DWORD dwMilliseconds,
- /* [in] */ BOOL bAlertable);
-
- DWORD ( STDMETHODCALLTYPE *WaitForSingleObject )(
- IExecutionEngine * This,
- /* [in] */ HANDLE handle,
- /* [in] */ DWORD dwMilliseconds);
-
- SEMAPHORE_COOKIE ( STDMETHODCALLTYPE *ClrCreateSemaphore )(
- IExecutionEngine * This,
- /* [in] */ DWORD dwInitial,
- /* [in] */ DWORD dwMax);
-
- void ( STDMETHODCALLTYPE *ClrCloseSemaphore )(
- IExecutionEngine * This,
- /* [in] */ SEMAPHORE_COOKIE semaphore);
-
- DWORD ( STDMETHODCALLTYPE *ClrWaitForSemaphore )(
- IExecutionEngine * This,
- /* [in] */ SEMAPHORE_COOKIE semaphore,
- /* [in] */ DWORD dwMilliseconds,
- /* [in] */ BOOL bAlertable);
-
- BOOL ( STDMETHODCALLTYPE *ClrReleaseSemaphore )(
- IExecutionEngine * This,
- /* [in] */ SEMAPHORE_COOKIE semaphore,
- /* [in] */ LONG lReleaseCount,
- /* [in] */ LONG *lpPreviousCount);
-
- MUTEX_COOKIE ( STDMETHODCALLTYPE *ClrCreateMutex )(
- IExecutionEngine * This,
- /* [in] */ LPSECURITY_ATTRIBUTES lpMutexAttributes,
- /* [in] */ BOOL bInitialOwner,
- /* [in] */ LPCTSTR lpName);
-
- DWORD ( STDMETHODCALLTYPE *ClrWaitForMutex )(
- IExecutionEngine * This,
- /* [in] */ MUTEX_COOKIE mutex,
- /* [in] */ DWORD dwMilliseconds,
- /* [in] */ BOOL bAlertable);
-
- BOOL ( STDMETHODCALLTYPE *ClrReleaseMutex )(
- IExecutionEngine * This,
- /* [in] */ MUTEX_COOKIE mutex);
-
- void ( STDMETHODCALLTYPE *ClrCloseMutex )(
- IExecutionEngine * This,
- /* [in] */ MUTEX_COOKIE mutex);
-
- DWORD ( STDMETHODCALLTYPE *ClrSleepEx )(
- IExecutionEngine * This,
- /* [in] */ DWORD dwMilliseconds,
- /* [in] */ BOOL bAlertable);
-
- BOOL ( STDMETHODCALLTYPE *ClrAllocationDisallowed )(
- IExecutionEngine * This);
-
- void ( STDMETHODCALLTYPE *GetLastThrownObjectExceptionFromThread )(
- IExecutionEngine * This,
- /* [out] */ void **ppvException);
-
- END_INTERFACE
- } IExecutionEngineVtbl;
-
- interface IExecutionEngine
- {
- CONST_VTBL struct IExecutionEngineVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IExecutionEngine_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IExecutionEngine_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IExecutionEngine_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-#define IExecutionEngine_CreateLock(This,szTag,level,flags) \
- ( (This)->lpVtbl -> CreateLock(This,szTag,level,flags) )
-
-#define IExecutionEngine_DestroyLock(This,lock) \
- ( (This)->lpVtbl -> DestroyLock(This,lock) )
-
-#define IExecutionEngine_AcquireLock(This,lock) \
- ( (This)->lpVtbl -> AcquireLock(This,lock) )
-
-#define IExecutionEngine_ReleaseLock(This,lock) \
- ( (This)->lpVtbl -> ReleaseLock(This,lock) )
-
-#define IExecutionEngine_CreateAutoEvent(This,bInitialState) \
- ( (This)->lpVtbl -> CreateAutoEvent(This,bInitialState) )
-
-#define IExecutionEngine_CreateManualEvent(This,bInitialState) \
- ( (This)->lpVtbl -> CreateManualEvent(This,bInitialState) )
-
-#define IExecutionEngine_CloseEvent(This,event) \
- ( (This)->lpVtbl -> CloseEvent(This,event) )
-
-#define IExecutionEngine_ClrSetEvent(This,event) \
- ( (This)->lpVtbl -> ClrSetEvent(This,event) )
-
-#define IExecutionEngine_ClrResetEvent(This,event) \
- ( (This)->lpVtbl -> ClrResetEvent(This,event) )
-
-#define IExecutionEngine_WaitForEvent(This,event,dwMilliseconds,bAlertable) \
- ( (This)->lpVtbl -> WaitForEvent(This,event,dwMilliseconds,bAlertable) )
-
-#define IExecutionEngine_WaitForSingleObject(This,handle,dwMilliseconds) \
- ( (This)->lpVtbl -> WaitForSingleObject(This,handle,dwMilliseconds) )
-
-#define IExecutionEngine_ClrCreateSemaphore(This,dwInitial,dwMax) \
- ( (This)->lpVtbl -> ClrCreateSemaphore(This,dwInitial,dwMax) )
-
-#define IExecutionEngine_ClrCloseSemaphore(This,semaphore) \
- ( (This)->lpVtbl -> ClrCloseSemaphore(This,semaphore) )
-
-#define IExecutionEngine_ClrWaitForSemaphore(This,semaphore,dwMilliseconds,bAlertable) \
- ( (This)->lpVtbl -> ClrWaitForSemaphore(This,semaphore,dwMilliseconds,bAlertable) )
-
-#define IExecutionEngine_ClrReleaseSemaphore(This,semaphore,lReleaseCount,lpPreviousCount) \
- ( (This)->lpVtbl -> ClrReleaseSemaphore(This,semaphore,lReleaseCount,lpPreviousCount) )
-
-#define IExecutionEngine_ClrCreateMutex(This,lpMutexAttributes,bInitialOwner,lpName) \
- ( (This)->lpVtbl -> ClrCreateMutex(This,lpMutexAttributes,bInitialOwner,lpName) )
-
-#define IExecutionEngine_ClrWaitForMutex(This,mutex,dwMilliseconds,bAlertable) \
- ( (This)->lpVtbl -> ClrWaitForMutex(This,mutex,dwMilliseconds,bAlertable) )
-
-#define IExecutionEngine_ClrReleaseMutex(This,mutex) \
- ( (This)->lpVtbl -> ClrReleaseMutex(This,mutex) )
-
-#define IExecutionEngine_ClrCloseMutex(This,mutex) \
- ( (This)->lpVtbl -> ClrCloseMutex(This,mutex) )
-
-#define IExecutionEngine_ClrSleepEx(This,dwMilliseconds,bAlertable) \
- ( (This)->lpVtbl -> ClrSleepEx(This,dwMilliseconds,bAlertable) )
-
-#define IExecutionEngine_ClrAllocationDisallowed(This) \
- ( (This)->lpVtbl -> ClrAllocationDisallowed(This) )
-
-#define IExecutionEngine_GetLastThrownObjectExceptionFromThread(This,ppvException) \
- ( (This)->lpVtbl -> GetLastThrownObjectExceptionFromThread(This,ppvException) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IExecutionEngine_INTERFACE_DEFINED__ */
-
-
-/* interface __MIDL_itf_clrinternal_0000_0001 */
-/* [local] */
-
-#if !defined(_WINNT_) && !defined(_NTMMAPI_)
-typedef void *PMEMORY_BASIC_INFORMATION;
-
-#endif
-
-
-extern RPC_IF_HANDLE __MIDL_itf_clrinternal_0000_0001_v0_0_c_ifspec;
-extern RPC_IF_HANDLE __MIDL_itf_clrinternal_0000_0001_v0_0_s_ifspec;
-
-#ifndef __IEEMemoryManager_INTERFACE_DEFINED__
-#define __IEEMemoryManager_INTERFACE_DEFINED__
-
-/* interface IEEMemoryManager */
-/* [object][local][unique][helpstring][uuid] */
-
-
-EXTERN_C const IID IID_IEEMemoryManager;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-
- MIDL_INTERFACE("17713B61-B59F-4e13-BAAF-91623DC8ADC0")
- IEEMemoryManager : public IUnknown
- {
- public:
- virtual LPVOID STDMETHODCALLTYPE ClrVirtualAlloc(
- /* [in] */ LPVOID lpAddress,
- /* [in] */ SIZE_T dwSize,
- /* [in] */ DWORD flAllocationType,
- /* [in] */ DWORD flProtect) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrVirtualFree(
- /* [in] */ LPVOID lpAddress,
- /* [in] */ SIZE_T dwSize,
- /* [in] */ DWORD dwFreeType) = 0;
-
- virtual SIZE_T STDMETHODCALLTYPE ClrVirtualQuery(
- /* [in] */ const void *lpAddress,
- /* [in] */ PMEMORY_BASIC_INFORMATION lpBuffer,
- /* [in] */ SIZE_T dwLength) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrVirtualProtect(
- /* [in] */ LPVOID lpAddress,
- /* [in] */ SIZE_T dwSize,
- /* [in] */ DWORD flNewProtect,
- /* [in] */ DWORD *lpflOldProtect) = 0;
-
- virtual HANDLE STDMETHODCALLTYPE ClrGetProcessHeap( void) = 0;
-
- virtual HANDLE STDMETHODCALLTYPE ClrHeapCreate(
- /* [in] */ DWORD flOptions,
- /* [in] */ SIZE_T dwInitialSize,
- /* [in] */ SIZE_T dwMaximumSize) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrHeapDestroy(
- /* [in] */ HANDLE hHeap) = 0;
-
- virtual LPVOID STDMETHODCALLTYPE ClrHeapAlloc(
- /* [in] */ HANDLE hHeap,
- /* [in] */ DWORD dwFlags,
- /* [in] */ SIZE_T dwBytes) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrHeapFree(
- /* [in] */ HANDLE hHeap,
- /* [in] */ DWORD dwFlags,
- /* [in] */ LPVOID lpMem) = 0;
-
- virtual BOOL STDMETHODCALLTYPE ClrHeapValidate(
- /* [in] */ HANDLE hHeap,
- /* [in] */ DWORD dwFlags,
- /* [in] */ const void *lpMem) = 0;
-
- virtual HANDLE STDMETHODCALLTYPE ClrGetProcessExecutableHeap( void) = 0;
-
- };
-
-
-#else /* C style interface */
-
- typedef struct IEEMemoryManagerVtbl
- {
- BEGIN_INTERFACE
-
- HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
- IEEMemoryManager * This,
- /* [in] */ REFIID riid,
- /* [annotation][iid_is][out] */
- _COM_Outptr_ void **ppvObject);
-
- ULONG ( STDMETHODCALLTYPE *AddRef )(
- IEEMemoryManager * This);
-
- ULONG ( STDMETHODCALLTYPE *Release )(
- IEEMemoryManager * This);
-
- LPVOID ( STDMETHODCALLTYPE *ClrVirtualAlloc )(
- IEEMemoryManager * This,
- /* [in] */ LPVOID lpAddress,
- /* [in] */ SIZE_T dwSize,
- /* [in] */ DWORD flAllocationType,
- /* [in] */ DWORD flProtect);
-
- BOOL ( STDMETHODCALLTYPE *ClrVirtualFree )(
- IEEMemoryManager * This,
- /* [in] */ LPVOID lpAddress,
- /* [in] */ SIZE_T dwSize,
- /* [in] */ DWORD dwFreeType);
-
- SIZE_T ( STDMETHODCALLTYPE *ClrVirtualQuery )(
- IEEMemoryManager * This,
- /* [in] */ const void *lpAddress,
- /* [in] */ PMEMORY_BASIC_INFORMATION lpBuffer,
- /* [in] */ SIZE_T dwLength);
-
- BOOL ( STDMETHODCALLTYPE *ClrVirtualProtect )(
- IEEMemoryManager * This,
- /* [in] */ LPVOID lpAddress,
- /* [in] */ SIZE_T dwSize,
- /* [in] */ DWORD flNewProtect,
- /* [in] */ DWORD *lpflOldProtect);
-
- HANDLE ( STDMETHODCALLTYPE *ClrGetProcessHeap )(
- IEEMemoryManager * This);
-
- HANDLE ( STDMETHODCALLTYPE *ClrHeapCreate )(
- IEEMemoryManager * This,
- /* [in] */ DWORD flOptions,
- /* [in] */ SIZE_T dwInitialSize,
- /* [in] */ SIZE_T dwMaximumSize);
-
- BOOL ( STDMETHODCALLTYPE *ClrHeapDestroy )(
- IEEMemoryManager * This,
- /* [in] */ HANDLE hHeap);
-
- LPVOID ( STDMETHODCALLTYPE *ClrHeapAlloc )(
- IEEMemoryManager * This,
- /* [in] */ HANDLE hHeap,
- /* [in] */ DWORD dwFlags,
- /* [in] */ SIZE_T dwBytes);
-
- BOOL ( STDMETHODCALLTYPE *ClrHeapFree )(
- IEEMemoryManager * This,
- /* [in] */ HANDLE hHeap,
- /* [in] */ DWORD dwFlags,
- /* [in] */ LPVOID lpMem);
-
- BOOL ( STDMETHODCALLTYPE *ClrHeapValidate )(
- IEEMemoryManager * This,
- /* [in] */ HANDLE hHeap,
- /* [in] */ DWORD dwFlags,
- /* [in] */ const void *lpMem);
-
- HANDLE ( STDMETHODCALLTYPE *ClrGetProcessExecutableHeap )(
- IEEMemoryManager * This);
-
- END_INTERFACE
- } IEEMemoryManagerVtbl;
-
- interface IEEMemoryManager
- {
- CONST_VTBL struct IEEMemoryManagerVtbl *lpVtbl;
- };
-
-
-
-#ifdef COBJMACROS
-
-
-#define IEEMemoryManager_QueryInterface(This,riid,ppvObject) \
- ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
-
-#define IEEMemoryManager_AddRef(This) \
- ( (This)->lpVtbl -> AddRef(This) )
-
-#define IEEMemoryManager_Release(This) \
- ( (This)->lpVtbl -> Release(This) )
-
-
-#define IEEMemoryManager_ClrVirtualAlloc(This,lpAddress,dwSize,flAllocationType,flProtect) \
- ( (This)->lpVtbl -> ClrVirtualAlloc(This,lpAddress,dwSize,flAllocationType,flProtect) )
-
-#define IEEMemoryManager_ClrVirtualFree(This,lpAddress,dwSize,dwFreeType) \
- ( (This)->lpVtbl -> ClrVirtualFree(This,lpAddress,dwSize,dwFreeType) )
-
-#define IEEMemoryManager_ClrVirtualQuery(This,lpAddress,lpBuffer,dwLength) \
- ( (This)->lpVtbl -> ClrVirtualQuery(This,lpAddress,lpBuffer,dwLength) )
-
-#define IEEMemoryManager_ClrVirtualProtect(This,lpAddress,dwSize,flNewProtect,lpflOldProtect) \
- ( (This)->lpVtbl -> ClrVirtualProtect(This,lpAddress,dwSize,flNewProtect,lpflOldProtect) )
-
-#define IEEMemoryManager_ClrGetProcessHeap(This) \
- ( (This)->lpVtbl -> ClrGetProcessHeap(This) )
-
-#define IEEMemoryManager_ClrHeapCreate(This,flOptions,dwInitialSize,dwMaximumSize) \
- ( (This)->lpVtbl -> ClrHeapCreate(This,flOptions,dwInitialSize,dwMaximumSize) )
-
-#define IEEMemoryManager_ClrHeapDestroy(This,hHeap) \
- ( (This)->lpVtbl -> ClrHeapDestroy(This,hHeap) )
-
-#define IEEMemoryManager_ClrHeapAlloc(This,hHeap,dwFlags,dwBytes) \
- ( (This)->lpVtbl -> ClrHeapAlloc(This,hHeap,dwFlags,dwBytes) )
-
-#define IEEMemoryManager_ClrHeapFree(This,hHeap,dwFlags,lpMem) \
- ( (This)->lpVtbl -> ClrHeapFree(This,hHeap,dwFlags,lpMem) )
-
-#define IEEMemoryManager_ClrHeapValidate(This,hHeap,dwFlags,lpMem) \
- ( (This)->lpVtbl -> ClrHeapValidate(This,hHeap,dwFlags,lpMem) )
-
-#define IEEMemoryManager_ClrGetProcessExecutableHeap(This) \
- ( (This)->lpVtbl -> ClrGetProcessExecutableHeap(This) )
-
-#endif /* COBJMACROS */
-
-
-#endif /* C style interface */
-
-
-
-
-#endif /* __IEEMemoryManager_INTERFACE_DEFINED__ */
-
-
#ifndef __IPrivateManagedExceptionReporting_INTERFACE_DEFINED__
#define __IPrivateManagedExceptionReporting_INTERFACE_DEFINED__
diff --git a/src/coreclr/src/pal/prebuilt/inc/mscoree.h b/src/coreclr/src/pal/prebuilt/inc/mscoree.h
index 5eceabf8450b01..2d83809685411e 100644
--- a/src/coreclr/src/pal/prebuilt/inc/mscoree.h
+++ b/src/coreclr/src/pal/prebuilt/inc/mscoree.h
@@ -70,9 +70,6 @@ extern "C"{
/* interface __MIDL_itf_mscoree_0000_0000 */
/* [local] */
-#define DECLARE_DEPRECATED
-#define DEPRECATED_CLR_STDAPI STDAPI
-
struct IActivationFactory;
struct IHostControl;
@@ -873,10 +870,6 @@ EXTERN_C const IID IID_ICLRRuntimeHost4;
/* interface __MIDL_itf_mscoree_0000_0003 */
/* [local] */
-#undef DEPRECATED_CLR_STDAPI
-#undef DECLARE_DEPRECATED
-#undef DEPRECATED_CLR_API_MESG
-
extern RPC_IF_HANDLE __MIDL_itf_mscoree_0000_0003_v0_0_c_ifspec;
extern RPC_IF_HANDLE __MIDL_itf_mscoree_0000_0003_v0_0_s_ifspec;
diff --git a/src/coreclr/src/pal/src/CMakeLists.txt b/src/coreclr/src/pal/src/CMakeLists.txt
index 249936cd392708..37aee432577830 100644
--- a/src/coreclr/src/pal/src/CMakeLists.txt
+++ b/src/coreclr/src/pal/src/CMakeLists.txt
@@ -290,34 +290,11 @@ if(CLR_CMAKE_TARGET_LINUX)
pthread
rt
)
- endif()
-
- if(CLR_CMAKE_TARGET_ANDROID)
- find_library(LZMA NAMES lzma)
-
- if(LZMA STREQUAL LZMA-NOTFOUND)
- message(FATAL_ERROR "Cannot find liblzma.")
- endif(LZMA STREQUAL LZMA-NOTFOUND)
-
+ else(NOT CLR_CMAKE_TARGET_ANDROID)
target_link_libraries(coreclrpal
- gnustl_shared
- android-support
- android-glob
+ ${ANDROID_GLOB}
${LZMA})
- endif()
-
- if(CLR_MAKE_TARGET_ANDROID)
- find_library(ANDROID_SUPPORT NAMES android-support)
- find_library(ANDROID_GLOB NAMES android-glob)
-
- if(ANDROID_SUPPORT STREQUAL ANDROID_SUPPORT-NOTFOUND)
- message(FATAL_ERROR "Cannot find android-support.")
- endif()
-
- if(ANDROID_GLOB STREQUAL ANDROID_GLOB-NOTFOUND)
- message(FATAL_ERROR "Cannot find android-glob.")
- endif()
- endif()
+ endif(NOT CLR_CMAKE_TARGET_ANDROID)
target_link_libraries(coreclrpal
dl
diff --git a/src/coreclr/src/pal/src/misc/jitsupport.cpp b/src/coreclr/src/pal/src/misc/jitsupport.cpp
index 0da36ab8903a68..70123f8d18faaf 100644
--- a/src/coreclr/src/pal/src/misc/jitsupport.cpp
+++ b/src/coreclr/src/pal/src/misc/jitsupport.cpp
@@ -21,11 +21,11 @@ PAL_GetJitCpuCapabilityFlags(CORJIT_FLAGS *flags)
{
_ASSERTE(flags);
+ CORJIT_FLAGS &CPUCompileFlags = *flags;
#if defined(HOST_ARM64)
#if HAVE_AUXV_HWCAP_H
unsigned long hwCap = getauxval(AT_HWCAP);
- CORJIT_FLAGS &CPUCompileFlags = *flags;
// HWCAP_* flags are introduced by ARM into the Linux kernel as new extensions are published.
// For a given kernel, some of these flags may not be present yet.
// Use ifdef for each to allow for compilation with any vintage kernel.
@@ -34,95 +34,96 @@ PAL_GetJitCpuCapabilityFlags(CORJIT_FLAGS *flags)
// available, using the latest kernel for release should be sufficient.
#ifdef HWCAP_AES
if (hwCap & HWCAP_AES)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_AES);
+ CPUCompileFlags.Set(InstructionSet_Aes);
#endif
#ifdef HWCAP_ATOMICS
if (hwCap & HWCAP_ATOMICS)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ATOMICS);
+ CPUCompileFlags.Set(InstructionSet_Atomics);
#endif
#ifdef HWCAP_CRC32
if (hwCap & HWCAP_CRC32)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_CRC32);
+ CPUCompileFlags.Set(InstructionSet_Crc32);
#endif
#ifdef HWCAP_DCPOP
- if (hwCap & HWCAP_DCPOP)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_DCPOP);
+// if (hwCap & HWCAP_DCPOP)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_DCPOP);
#endif
#ifdef HWCAP_ASIMDDP
- if (hwCap & HWCAP_ASIMDDP)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_DP);
+// if (hwCap & HWCAP_ASIMDDP)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_DP);
#endif
#ifdef HWCAP_FCMA
- if (hwCap & HWCAP_FCMA)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FCMA);
+// if (hwCap & HWCAP_FCMA)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FCMA);
#endif
#ifdef HWCAP_FP
- if (hwCap & HWCAP_FP)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP);
+// if (hwCap & HWCAP_FP)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP);
#endif
#ifdef HWCAP_FPHP
- if (hwCap & HWCAP_FPHP)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP16);
+// if (hwCap & HWCAP_FPHP)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP16);
#endif
#ifdef HWCAP_JSCVT
- if (hwCap & HWCAP_JSCVT)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_JSCVT);
+// if (hwCap & HWCAP_JSCVT)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_JSCVT);
#endif
#ifdef HWCAP_LRCPC
- if (hwCap & HWCAP_LRCPC)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_LRCPC);
+// if (hwCap & HWCAP_LRCPC)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_LRCPC);
#endif
#ifdef HWCAP_PMULL
- if (hwCap & HWCAP_PMULL)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_PMULL);
+// if (hwCap & HWCAP_PMULL)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_PMULL);
#endif
#ifdef HWCAP_SHA1
if (hwCap & HWCAP_SHA1)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA1);
+ CPUCompileFlags.Set(InstructionSet_Sha1);
#endif
#ifdef HWCAP_SHA2
if (hwCap & HWCAP_SHA2)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA256);
+ CPUCompileFlags.Set(InstructionSet_Sha256);
#endif
#ifdef HWCAP_SHA512
- if (hwCap & HWCAP_SHA512)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA512);
+// if (hwCap & HWCAP_SHA512)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA512);
#endif
#ifdef HWCAP_SHA3
- if (hwCap & HWCAP_SHA3)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA3);
+// if (hwCap & HWCAP_SHA3)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA3);
#endif
#ifdef HWCAP_ASIMD
if (hwCap & HWCAP_ASIMD)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD);
+ CPUCompileFlags.Set(InstructionSet_AdvSimd);
#endif
#ifdef HWCAP_ASIMDRDM
- if (hwCap & HWCAP_ASIMDRDM)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD_V81);
+// if (hwCap & HWCAP_ASIMDRDM)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD_V81);
#endif
#ifdef HWCAP_ASIMDHP
- if (hwCap & HWCAP_ASIMDHP)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD_FP16);
+// if (hwCap & HWCAP_ASIMDHP)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD_FP16);
#endif
#ifdef HWCAP_SM3
- if (hwCap & HWCAP_SM3)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SM3);
+// if (hwCap & HWCAP_SM3)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SM3);
#endif
#ifdef HWCAP_SM4
- if (hwCap & HWCAP_SM4)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SM4);
+// if (hwCap & HWCAP_SM4)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SM4);
#endif
#ifdef HWCAP_SVE
- if (hwCap & HWCAP_SVE)
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SVE);
+// if (hwCap & HWCAP_SVE)
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SVE);
#endif
#else // !HAVE_AUXV_HWCAP_H
// CoreCLR SIMD and FP support is included in ARM64 baseline
// On exceptional basis platforms may leave out support, but CoreCLR does not
// yet support such platforms
// Set baseline flags if OS has not exposed mechanism for us to determine CPU capabilities
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD);
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP);
+ CPUCompileFlags.Set(InstructionSet_AdvSimd);
+// CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP);
#endif // HAVE_AUXV_HWCAP_H
#endif // defined(HOST_ARM64)
+ CPUCompileFlags.Set64BitInstructionSetVariants();
}
diff --git a/src/coreclr/src/pal/src/safecrt/mbusafecrt_internal.h b/src/coreclr/src/pal/src/safecrt/mbusafecrt_internal.h
index 83c1db99a80bf9..00136ecfe2839f 100644
--- a/src/coreclr/src/pal/src/safecrt/mbusafecrt_internal.h
+++ b/src/coreclr/src/pal/src/safecrt/mbusafecrt_internal.h
@@ -18,6 +18,8 @@
#ifndef MBUSAFECRT_INTERNAL_H
#define MBUSAFECRT_INTERNAL_H
+#define PAL_IMPLEMENTATION
+
#include "pal_mstypes.h"
#ifndef DLLEXPORT
diff --git a/src/coreclr/src/pal/src/thread/context.cpp b/src/coreclr/src/pal/src/thread/context.cpp
index d00f6888564793..a89bdb2649f0d4 100644
--- a/src/coreclr/src/pal/src/thread/context.cpp
+++ b/src/coreclr/src/pal/src/thread/context.cpp
@@ -203,7 +203,7 @@ BOOL CONTEXT_GetRegisters(DWORD processId, LPCONTEXT lpContext)
ucontext_t registers;
#if HAVE_PT_REGS
struct pt_regs ptrace_registers;
- if (ptrace((__ptrace_request)PT_GETREGS, processId, (caddr_t) &ptrace_registers, 0) == -1)
+ if (ptrace((__ptrace_request)PTRACE_GETREGS, processId, (caddr_t) &ptrace_registers, 0) == -1)
#elif HAVE_BSD_REGS_T
struct reg ptrace_registers;
if (PAL_PTRACE(PT_GETREGS, processId, &ptrace_registers, 0) == -1)
@@ -352,7 +352,7 @@ CONTEXT_SetThreadContext(
(CONTEXT_CONTROL | CONTEXT_INTEGER) & CONTEXT_AREA_MASK)
{
#if HAVE_PT_REGS
- if (ptrace((__ptrace_request)PT_GETREGS, dwProcessId, (caddr_t)&ptrace_registers, 0) == -1)
+ if (ptrace((__ptrace_request)PTRACE_GETREGS, dwProcessId, (caddr_t)&ptrace_registers, 0) == -1)
#elif HAVE_BSD_REGS_T
if (PAL_PTRACE(PT_GETREGS, dwProcessId, &ptrace_registers, 0) == -1)
#endif
@@ -383,7 +383,7 @@ CONTEXT_SetThreadContext(
#undef ASSIGN_REG
#if HAVE_PT_REGS
- if (ptrace((__ptrace_request)PT_SETREGS, dwProcessId, (caddr_t)&ptrace_registers, 0) == -1)
+ if (ptrace((__ptrace_request)PTRACE_SETREGS, dwProcessId, (caddr_t)&ptrace_registers, 0) == -1)
#elif HAVE_BSD_REGS_T
if (PAL_PTRACE(PT_SETREGS, dwProcessId, &ptrace_registers, 0) == -1)
#endif
diff --git a/src/coreclr/src/scripts/genEventing.py b/src/coreclr/src/scripts/genEventing.py
index 50fde02fbd02c6..69c96adf5a063f 100644
--- a/src/coreclr/src/scripts/genEventing.py
+++ b/src/coreclr/src/scripts/genEventing.py
@@ -639,7 +639,8 @@ def generatePlatformIndependentFiles(sClrEtwAllMan, incDir, etmDummyFile, extern
{
int const Level;
ULONGLONG const Keyword;
-} EVENT_DESCRIPTOR;""")
+} EVENT_DESCRIPTOR;
+""")
if not is_windows:
Clrproviders.write(eventpipe_trace_context_typedef) # define EVENTPIPE_TRACE_CONTEXT
diff --git a/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSet.cs b/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSet.cs
new file mode 100644
index 00000000000000..aff37abdf06aca
--- /dev/null
+++ b/src/coreclr/src/tools/Common/Internal/Runtime/ReadyToRunInstructionSet.cs
@@ -0,0 +1,134 @@
+
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// DO NOT EDIT THIS FILE! IT IS AUTOGENERATED
+// FROM /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
+// using /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
+
+using System;
+using System.Runtime.InteropServices;
+using Internal.TypeSystem;
+
+namespace Internal.ReadyToRunConstants
+{
+ public enum ReadyToRunInstructionSet
+ {
+ Sse=1,
+ Sse2=2,
+ Sse3=3,
+ Ssse3=4,
+ Sse41=5,
+ Sse42=6,
+ Avx=7,
+ Avx2=8,
+ Aes=9,
+ Bmi1=10,
+ Bmi2=11,
+ Fma=12,
+ Lzcnt=13,
+ Pclmulqdq=14,
+ Popcnt=15,
+ ArmBase=16,
+ AdvSimd=17,
+ Crc32=18,
+ Sha1=19,
+ Sha256=20,
+ Atomics=21,
+
+ }
+
+ public static class ReadyToRunInstructionSetHelper
+ {
+ ReadyToRunInstructionSet? R2RInstructionSetFromJitInstructionSet(TargetArchitecture architecture, Internal.JitInterface.InstructionSet instructionSet)
+ {
+ switch (architecture)
+ {
+
+ case TargetArchitecture.ARM64:
+ {
+ switch (instructionSet)
+ {
+ case InstructionSet.ARM64_ArmBase: return ReadyToRunInstructionSet.ArmBase;
+ case InstructionSet.ARM64_ArmBase_Arm64: return ReadyToRunInstructionSet.ArmBase;
+ case InstructionSet.ARM64_AdvSimd: return ReadyToRunInstructionSet.AdvSimd;
+ case InstructionSet.ARM64_AdvSimd_Arm64: return ReadyToRunInstructionSet.AdvSimd;
+ case InstructionSet.ARM64_Aes: return ReadyToRunInstructionSet.Aes;
+ case InstructionSet.ARM64_Crc32: return ReadyToRunInstructionSet.Crc32;
+ case InstructionSet.ARM64_Crc32_Arm64: return ReadyToRunInstructionSet.Crc32;
+ case InstructionSet.ARM64_Sha1: return ReadyToRunInstructionSet.Sha1;
+ case InstructionSet.ARM64_Sha256: return ReadyToRunInstructionSet.Sha256;
+ case InstructionSet.ARM64_Atomics: return ReadyToRunInstructionSet.Atomics;
+ case InstructionSet.ARM64_Vector64: return null;
+ case InstructionSet.ARM64_Vector128: return null;
+
+ default: throw new Exception("Unknown instruction set");
+ }
+ }
+
+ case TargetArchitecture.X64:
+ {
+ switch (instructionSet)
+ {
+ case InstructionSet.X64_SSE: return ReadyToRunInstructionSet.Sse;
+ case InstructionSet.X64_SSE_X64: return ReadyToRunInstructionSet.Sse;
+ case InstructionSet.X64_SSE2: return ReadyToRunInstructionSet.Sse2;
+ case InstructionSet.X64_SSE2_X64: return ReadyToRunInstructionSet.Sse2;
+ case InstructionSet.X64_SSE3: return ReadyToRunInstructionSet.Sse3;
+ case InstructionSet.X64_SSSE3: return ReadyToRunInstructionSet.Ssse3;
+ case InstructionSet.X64_SSE41: return ReadyToRunInstructionSet.Sse41;
+ case InstructionSet.X64_SSE41_X64: return ReadyToRunInstructionSet.Sse41;
+ case InstructionSet.X64_SSE42: return ReadyToRunInstructionSet.Sse42;
+ case InstructionSet.X64_SSE42_X64: return ReadyToRunInstructionSet.Sse42;
+ case InstructionSet.X64_AVX: return ReadyToRunInstructionSet.Avx;
+ case InstructionSet.X64_AVX2: return ReadyToRunInstructionSet.Avx2;
+ case InstructionSet.X64_AES: return ReadyToRunInstructionSet.Aes;
+ case InstructionSet.X64_BMI1: return ReadyToRunInstructionSet.Bmi1;
+ case InstructionSet.X64_BMI1_X64: return ReadyToRunInstructionSet.Bmi1;
+ case InstructionSet.X64_BMI2: return ReadyToRunInstructionSet.Bmi2;
+ case InstructionSet.X64_BMI2_X64: return ReadyToRunInstructionSet.Bmi2;
+ case InstructionSet.X64_FMA: return ReadyToRunInstructionSet.Fma;
+ case InstructionSet.X64_LZCNT: return ReadyToRunInstructionSet.Lzcnt;
+ case InstructionSet.X64_LZCNT_X64: return ReadyToRunInstructionSet.Lzcnt;
+ case InstructionSet.X64_PCLMULQDQ: return ReadyToRunInstructionSet.Pclmulqdq;
+ case InstructionSet.X64_POPCNT: return ReadyToRunInstructionSet.Popcnt;
+ case InstructionSet.X64_POPCNT_X64: return ReadyToRunInstructionSet.Popcnt;
+ case InstructionSet.X64_Vector128: return null;
+ case InstructionSet.X64_Vector256: return null;
+
+ default: throw new Exception("Unknown instruction set");
+ }
+ }
+
+ case TargetArchitecture.X86:
+ {
+ switch (instructionSet)
+ {
+ case InstructionSet.X86_SSE: return ReadyToRunInstructionSet.Sse;
+ case InstructionSet.X86_SSE2: return ReadyToRunInstructionSet.Sse2;
+ case InstructionSet.X86_SSE3: return ReadyToRunInstructionSet.Sse3;
+ case InstructionSet.X86_SSSE3: return ReadyToRunInstructionSet.Ssse3;
+ case InstructionSet.X86_SSE41: return ReadyToRunInstructionSet.Sse41;
+ case InstructionSet.X86_SSE42: return ReadyToRunInstructionSet.Sse42;
+ case InstructionSet.X86_AVX: return ReadyToRunInstructionSet.Avx;
+ case InstructionSet.X86_AVX2: return ReadyToRunInstructionSet.Avx2;
+ case InstructionSet.X86_AES: return ReadyToRunInstructionSet.Aes;
+ case InstructionSet.X86_BMI1: return ReadyToRunInstructionSet.Bmi1;
+ case InstructionSet.X86_BMI2: return ReadyToRunInstructionSet.Bmi2;
+ case InstructionSet.X86_FMA: return ReadyToRunInstructionSet.Fma;
+ case InstructionSet.X86_LZCNT: return ReadyToRunInstructionSet.Lzcnt;
+ case InstructionSet.X86_PCLMULQDQ: return ReadyToRunInstructionSet.Pclmulqdq;
+ case InstructionSet.X86_POPCNT: return ReadyToRunInstructionSet.Popcnt;
+ case InstructionSet.X86_Vector128: return null;
+ case InstructionSet.X86_Vector256: return null;
+
+ default: throw new Exception("Unknown instruction set");
+ }
+ }
+
+ default: throw new Exception("Unknown architecture");
+ }
+ }
+ }
+}
diff --git a/src/coreclr/src/tools/Common/JitInterface/CorInfoBase.cs b/src/coreclr/src/tools/Common/JitInterface/CorInfoBase.cs
index 0a6672326e722f..661d016a0362c5 100644
--- a/src/coreclr/src/tools/Common/JitInterface/CorInfoBase.cs
+++ b/src/coreclr/src/tools/Common/JitInterface/CorInfoBase.cs
@@ -62,6 +62,10 @@ unsafe partial class CorInfoImpl
[UnmanagedFunctionPointerAttribute(default(CallingConvention))]
delegate void __getGSCookie(IntPtr _this, IntPtr* ppException, IntPtr* pCookieVal, IntPtr** ppCookieVal);
[UnmanagedFunctionPointerAttribute(default(CallingConvention))]
+ delegate void __setPatchpointInfo(IntPtr _this, IntPtr* ppException, PatchpointInfo* patchpointInfo);
+ [UnmanagedFunctionPointerAttribute(default(CallingConvention))]
+ delegate PatchpointInfo* __getOSRInfo(IntPtr _this, IntPtr* ppException, ref uint ilOffset);
+ [UnmanagedFunctionPointerAttribute(default(CallingConvention))]
delegate void __resolveToken(IntPtr _this, IntPtr* ppException, ref CORINFO_RESOLVED_TOKEN pResolvedToken);
[UnmanagedFunctionPointerAttribute(default(CallingConvention))]
delegate void __tryResolveToken(IntPtr _this, IntPtr* ppException, ref CORINFO_RESOLVED_TOKEN pResolvedToken);
@@ -689,6 +693,33 @@ static void _getGSCookie(IntPtr thisHandle, IntPtr* ppException, IntPtr* pCookie
}
}
+ static void _setPatchpointInfo(IntPtr thisHandle, IntPtr* ppException, PatchpointInfo* patchpointInfo)
+ {
+ var _this = GetThis(thisHandle);
+ try
+ {
+ _this.setPatchpointInfo(patchpointInfo);
+ }
+ catch (Exception ex)
+ {
+ *ppException = _this.AllocException(ex);
+ }
+ }
+
+ static PatchpointInfo* _getOSRInfo(IntPtr thisHandle, IntPtr* ppException, ref uint ilOffset)
+ {
+ var _this = GetThis(thisHandle);
+ try
+ {
+ return _this.getOSRInfo(ref ilOffset);
+ }
+ catch (Exception ex)
+ {
+ *ppException = _this.AllocException(ex);
+ return default(PatchpointInfo*);
+ }
+ }
+
static void _resolveToken(IntPtr thisHandle, IntPtr* ppException, ref CORINFO_RESOLVED_TOKEN pResolvedToken)
{
var _this = GetThis(thisHandle);
@@ -2659,9 +2690,17 @@ static uint _getJitFlags(IntPtr thisHandle, IntPtr* ppException, ref CORJIT_FLAG
static IntPtr GetUnmanagedCallbacks(out Object keepAlive)
{
+
IntPtr * callbacks = (IntPtr *)Marshal.AllocCoTaskMem(sizeof(IntPtr) * 168);
Object[] delegates = new Object[168];
+ IntPtr * callbacks = (IntPtr *)Marshal.AllocCoTaskMem(sizeof(IntPtr) * 167);
+ Object[] delegates = new Object[167];
+
+ IntPtr * callbacks = (IntPtr *)Marshal.AllocCoTaskMem(sizeof(IntPtr) * 169);
+ Object[] delegates = new Object[169];
+
+
var d0 = new __getMethodAttribs(_getMethodAttribs);
callbacks[0] = Marshal.GetFunctionPointerForDelegate(d0);
delegates[0] = d0;
@@ -2737,439 +2776,377 @@ static IntPtr GetUnmanagedCallbacks(out Object keepAlive)
var d24 = new __getGSCookie(_getGSCookie);
callbacks[24] = Marshal.GetFunctionPointerForDelegate(d24);
delegates[24] = d24;
- var d25 = new __resolveToken(_resolveToken);
+ var d25 = new __setPatchpointInfo(_setPatchpointInfo);
callbacks[25] = Marshal.GetFunctionPointerForDelegate(d25);
delegates[25] = d25;
- var d26 = new __tryResolveToken(_tryResolveToken);
+ var d26 = new __getOSRInfo(_getOSRInfo);
callbacks[26] = Marshal.GetFunctionPointerForDelegate(d26);
delegates[26] = d26;
- var d27 = new __findSig(_findSig);
+ var d27 = new __resolveToken(_resolveToken);
callbacks[27] = Marshal.GetFunctionPointerForDelegate(d27);
delegates[27] = d27;
- var d28 = new __findCallSiteSig(_findCallSiteSig);
+ var d28 = new __tryResolveToken(_tryResolveToken);
callbacks[28] = Marshal.GetFunctionPointerForDelegate(d28);
delegates[28] = d28;
- var d29 = new __getTokenTypeAsHandle(_getTokenTypeAsHandle);
+ var d29 = new __findSig(_findSig);
callbacks[29] = Marshal.GetFunctionPointerForDelegate(d29);
delegates[29] = d29;
- var d30 = new __isValidToken(_isValidToken);
+ var d30 = new __findCallSiteSig(_findCallSiteSig);
callbacks[30] = Marshal.GetFunctionPointerForDelegate(d30);
delegates[30] = d30;
- var d31 = new __isValidStringRef(_isValidStringRef);
+ var d31 = new __getTokenTypeAsHandle(_getTokenTypeAsHandle);
callbacks[31] = Marshal.GetFunctionPointerForDelegate(d31);
delegates[31] = d31;
- var d32 = new __getStringLiteral(_getStringLiteral);
+ var d32 = new __isValidToken(_isValidToken);
callbacks[32] = Marshal.GetFunctionPointerForDelegate(d32);
delegates[32] = d32;
- var d33 = new __asCorInfoType(_asCorInfoType);
+ var d33 = new __isValidStringRef(_isValidStringRef);
callbacks[33] = Marshal.GetFunctionPointerForDelegate(d33);
delegates[33] = d33;
- var d34 = new __getClassName(_getClassName);
+ var d34 = new __getStringLiteral(_getStringLiteral);
callbacks[34] = Marshal.GetFunctionPointerForDelegate(d34);
delegates[34] = d34;
- var d35 = new __getClassNameFromMetadata(_getClassNameFromMetadata);
+ var d35 = new __asCorInfoType(_asCorInfoType);
callbacks[35] = Marshal.GetFunctionPointerForDelegate(d35);
delegates[35] = d35;
- var d36 = new __getTypeInstantiationArgument(_getTypeInstantiationArgument);
+ var d36 = new __getClassName(_getClassName);
callbacks[36] = Marshal.GetFunctionPointerForDelegate(d36);
delegates[36] = d36;
- var d37 = new __appendClassName(_appendClassName);
+ var d37 = new __getClassNameFromMetadata(_getClassNameFromMetadata);
callbacks[37] = Marshal.GetFunctionPointerForDelegate(d37);
delegates[37] = d37;
- var d38 = new __isValueClass(_isValueClass);
+ var d38 = new __getTypeInstantiationArgument(_getTypeInstantiationArgument);
callbacks[38] = Marshal.GetFunctionPointerForDelegate(d38);
delegates[38] = d38;
- var d39 = new __canInlineTypeCheck(_canInlineTypeCheck);
+ var d39 = new __appendClassName(_appendClassName);
callbacks[39] = Marshal.GetFunctionPointerForDelegate(d39);
delegates[39] = d39;
- var d40 = new __getClassAttribs(_getClassAttribs);
+ var d40 = new __isValueClass(_isValueClass);
callbacks[40] = Marshal.GetFunctionPointerForDelegate(d40);
delegates[40] = d40;
- var d41 = new __isStructRequiringStackAllocRetBuf(_isStructRequiringStackAllocRetBuf);
+ var d41 = new __canInlineTypeCheck(_canInlineTypeCheck);
callbacks[41] = Marshal.GetFunctionPointerForDelegate(d41);
delegates[41] = d41;
- var d42 = new __getClassModule(_getClassModule);
+ var d42 = new __getClassAttribs(_getClassAttribs);
callbacks[42] = Marshal.GetFunctionPointerForDelegate(d42);
delegates[42] = d42;
- var d43 = new __getModuleAssembly(_getModuleAssembly);
+ var d43 = new __isStructRequiringStackAllocRetBuf(_isStructRequiringStackAllocRetBuf);
callbacks[43] = Marshal.GetFunctionPointerForDelegate(d43);
delegates[43] = d43;
- var d44 = new __getAssemblyName(_getAssemblyName);
+ var d44 = new __getClassModule(_getClassModule);
callbacks[44] = Marshal.GetFunctionPointerForDelegate(d44);
delegates[44] = d44;
- var d45 = new __LongLifetimeMalloc(_LongLifetimeMalloc);
+ var d45 = new __getModuleAssembly(_getModuleAssembly);
callbacks[45] = Marshal.GetFunctionPointerForDelegate(d45);
delegates[45] = d45;
- var d46 = new __LongLifetimeFree(_LongLifetimeFree);
+ var d46 = new __getAssemblyName(_getAssemblyName);
callbacks[46] = Marshal.GetFunctionPointerForDelegate(d46);
delegates[46] = d46;
- var d47 = new __getClassModuleIdForStatics(_getClassModuleIdForStatics);
+ var d47 = new __LongLifetimeMalloc(_LongLifetimeMalloc);
callbacks[47] = Marshal.GetFunctionPointerForDelegate(d47);
delegates[47] = d47;
- var d48 = new __getClassSize(_getClassSize);
+ var d48 = new __LongLifetimeFree(_LongLifetimeFree);
callbacks[48] = Marshal.GetFunctionPointerForDelegate(d48);
delegates[48] = d48;
- var d49 = new __getHeapClassSize(_getHeapClassSize);
+ var d49 = new __getClassModuleIdForStatics(_getClassModuleIdForStatics);
callbacks[49] = Marshal.GetFunctionPointerForDelegate(d49);
delegates[49] = d49;
- var d50 = new __canAllocateOnStack(_canAllocateOnStack);
+ var d50 = new __getClassSize(_getClassSize);
callbacks[50] = Marshal.GetFunctionPointerForDelegate(d50);
delegates[50] = d50;
- var d51 = new __getClassAlignmentRequirement(_getClassAlignmentRequirement);
+ var d51 = new __getHeapClassSize(_getHeapClassSize);
callbacks[51] = Marshal.GetFunctionPointerForDelegate(d51);
delegates[51] = d51;
- var d52 = new __getClassGClayout(_getClassGClayout);
+ var d52 = new __canAllocateOnStack(_canAllocateOnStack);
callbacks[52] = Marshal.GetFunctionPointerForDelegate(d52);
delegates[52] = d52;
- var d53 = new __getClassNumInstanceFields(_getClassNumInstanceFields);
+ var d53 = new __getClassAlignmentRequirement(_getClassAlignmentRequirement);
callbacks[53] = Marshal.GetFunctionPointerForDelegate(d53);
delegates[53] = d53;
- var d54 = new __getFieldInClass(_getFieldInClass);
+ var d54 = new __getClassGClayout(_getClassGClayout);
callbacks[54] = Marshal.GetFunctionPointerForDelegate(d54);
delegates[54] = d54;
- var d55 = new __checkMethodModifier(_checkMethodModifier);
+ var d55 = new __getClassNumInstanceFields(_getClassNumInstanceFields);
callbacks[55] = Marshal.GetFunctionPointerForDelegate(d55);
delegates[55] = d55;
- var d56 = new __getNewHelper(_getNewHelper);
+ var d56 = new __getFieldInClass(_getFieldInClass);
callbacks[56] = Marshal.GetFunctionPointerForDelegate(d56);
delegates[56] = d56;
- var d57 = new __getNewArrHelper(_getNewArrHelper);
+ var d57 = new __checkMethodModifier(_checkMethodModifier);
callbacks[57] = Marshal.GetFunctionPointerForDelegate(d57);
delegates[57] = d57;
- var d58 = new __getCastingHelper(_getCastingHelper);
+ var d58 = new __getNewHelper(_getNewHelper);
callbacks[58] = Marshal.GetFunctionPointerForDelegate(d58);
delegates[58] = d58;
- var d59 = new __getSharedCCtorHelper(_getSharedCCtorHelper);
+ var d59 = new __getNewArrHelper(_getNewArrHelper);
callbacks[59] = Marshal.GetFunctionPointerForDelegate(d59);
delegates[59] = d59;
- var d60 = new __getTypeForBox(_getTypeForBox);
+ var d60 = new __getCastingHelper(_getCastingHelper);
callbacks[60] = Marshal.GetFunctionPointerForDelegate(d60);
delegates[60] = d60;
- var d61 = new __getBoxHelper(_getBoxHelper);
+ var d61 = new __getSharedCCtorHelper(_getSharedCCtorHelper);
callbacks[61] = Marshal.GetFunctionPointerForDelegate(d61);
delegates[61] = d61;
- var d62 = new __getUnBoxHelper(_getUnBoxHelper);
+ var d62 = new __getTypeForBox(_getTypeForBox);
callbacks[62] = Marshal.GetFunctionPointerForDelegate(d62);
delegates[62] = d62;
- var d63 = new __getReadyToRunHelper(_getReadyToRunHelper);
+ var d63 = new __getBoxHelper(_getBoxHelper);
callbacks[63] = Marshal.GetFunctionPointerForDelegate(d63);
delegates[63] = d63;
- var d64 = new __getReadyToRunDelegateCtorHelper(_getReadyToRunDelegateCtorHelper);
+ var d64 = new __getUnBoxHelper(_getUnBoxHelper);
callbacks[64] = Marshal.GetFunctionPointerForDelegate(d64);
delegates[64] = d64;
- var d65 = new __getHelperName(_getHelperName);
+ var d65 = new __getReadyToRunHelper(_getReadyToRunHelper);
callbacks[65] = Marshal.GetFunctionPointerForDelegate(d65);
delegates[65] = d65;
- var d66 = new __initClass(_initClass);
+ var d66 = new __getReadyToRunDelegateCtorHelper(_getReadyToRunDelegateCtorHelper);
callbacks[66] = Marshal.GetFunctionPointerForDelegate(d66);
delegates[66] = d66;
- var d67 = new __classMustBeLoadedBeforeCodeIsRun(_classMustBeLoadedBeforeCodeIsRun);
+ var d67 = new __getHelperName(_getHelperName);
callbacks[67] = Marshal.GetFunctionPointerForDelegate(d67);
delegates[67] = d67;
- var d68 = new __getBuiltinClass(_getBuiltinClass);
+ var d68 = new __initClass(_initClass);
callbacks[68] = Marshal.GetFunctionPointerForDelegate(d68);
delegates[68] = d68;
- var d69 = new __getTypeForPrimitiveValueClass(_getTypeForPrimitiveValueClass);
+ var d69 = new __classMustBeLoadedBeforeCodeIsRun(_classMustBeLoadedBeforeCodeIsRun);
callbacks[69] = Marshal.GetFunctionPointerForDelegate(d69);
delegates[69] = d69;
- var d70 = new __getTypeForPrimitiveNumericClass(_getTypeForPrimitiveNumericClass);
+ var d70 = new __getBuiltinClass(_getBuiltinClass);
callbacks[70] = Marshal.GetFunctionPointerForDelegate(d70);
delegates[70] = d70;
- var d71 = new __canCast(_canCast);
+ var d71 = new __getTypeForPrimitiveValueClass(_getTypeForPrimitiveValueClass);
callbacks[71] = Marshal.GetFunctionPointerForDelegate(d71);
delegates[71] = d71;
- var d72 = new __areTypesEquivalent(_areTypesEquivalent);
+ var d72 = new __getTypeForPrimitiveNumericClass(_getTypeForPrimitiveNumericClass);
callbacks[72] = Marshal.GetFunctionPointerForDelegate(d72);
delegates[72] = d72;
- var d73 = new __compareTypesForCast(_compareTypesForCast);
+ var d73 = new __canCast(_canCast);
callbacks[73] = Marshal.GetFunctionPointerForDelegate(d73);
delegates[73] = d73;
- var d74 = new __compareTypesForEquality(_compareTypesForEquality);
+ var d74 = new __areTypesEquivalent(_areTypesEquivalent);
callbacks[74] = Marshal.GetFunctionPointerForDelegate(d74);
delegates[74] = d74;
- var d75 = new __mergeClasses(_mergeClasses);
+ var d75 = new __compareTypesForCast(_compareTypesForCast);
callbacks[75] = Marshal.GetFunctionPointerForDelegate(d75);
delegates[75] = d75;
- var d76 = new __isMoreSpecificType(_isMoreSpecificType);
+ var d76 = new __compareTypesForEquality(_compareTypesForEquality);
callbacks[76] = Marshal.GetFunctionPointerForDelegate(d76);
delegates[76] = d76;
- var d77 = new __getParentType(_getParentType);
+ var d77 = new __mergeClasses(_mergeClasses);
callbacks[77] = Marshal.GetFunctionPointerForDelegate(d77);
delegates[77] = d77;
- var d78 = new __getChildType(_getChildType);
+ var d78 = new __isMoreSpecificType(_isMoreSpecificType);
callbacks[78] = Marshal.GetFunctionPointerForDelegate(d78);
delegates[78] = d78;
- var d79 = new __satisfiesClassConstraints(_satisfiesClassConstraints);
+ var d79 = new __getParentType(_getParentType);
callbacks[79] = Marshal.GetFunctionPointerForDelegate(d79);
delegates[79] = d79;
- var d80 = new __isSDArray(_isSDArray);
+ var d80 = new __getChildType(_getChildType);
callbacks[80] = Marshal.GetFunctionPointerForDelegate(d80);
delegates[80] = d80;
- var d81 = new __getArrayRank(_getArrayRank);
+ var d81 = new __satisfiesClassConstraints(_satisfiesClassConstraints);
callbacks[81] = Marshal.GetFunctionPointerForDelegate(d81);
delegates[81] = d81;
- var d82 = new __getArrayInitializationData(_getArrayInitializationData);
+ var d82 = new __isSDArray(_isSDArray);
callbacks[82] = Marshal.GetFunctionPointerForDelegate(d82);
delegates[82] = d82;
- var d83 = new __canAccessClass(_canAccessClass);
+ var d83 = new __getArrayRank(_getArrayRank);
callbacks[83] = Marshal.GetFunctionPointerForDelegate(d83);
delegates[83] = d83;
- var d84 = new __getFieldName(_getFieldName);
+ var d84 = new __getArrayInitializationData(_getArrayInitializationData);
callbacks[84] = Marshal.GetFunctionPointerForDelegate(d84);
delegates[84] = d84;
- var d85 = new __getFieldClass(_getFieldClass);
+ var d85 = new __canAccessClass(_canAccessClass);
callbacks[85] = Marshal.GetFunctionPointerForDelegate(d85);
delegates[85] = d85;
- var d86 = new __getFieldType(_getFieldType);
+ var d86 = new __getFieldName(_getFieldName);
callbacks[86] = Marshal.GetFunctionPointerForDelegate(d86);
delegates[86] = d86;
- var d87 = new __getFieldOffset(_getFieldOffset);
+ var d87 = new __getFieldClass(_getFieldClass);
callbacks[87] = Marshal.GetFunctionPointerForDelegate(d87);
delegates[87] = d87;
- var d88 = new __getFieldInfo(_getFieldInfo);
+ var d88 = new __getFieldType(_getFieldType);
callbacks[88] = Marshal.GetFunctionPointerForDelegate(d88);
delegates[88] = d88;
- var d89 = new __isFieldStatic(_isFieldStatic);
+ var d89 = new __getFieldOffset(_getFieldOffset);
callbacks[89] = Marshal.GetFunctionPointerForDelegate(d89);
delegates[89] = d89;
- var d90 = new __getBoundaries(_getBoundaries);
+ var d90 = new __getFieldInfo(_getFieldInfo);
callbacks[90] = Marshal.GetFunctionPointerForDelegate(d90);
delegates[90] = d90;
- var d91 = new __setBoundaries(_setBoundaries);
+ var d91 = new __isFieldStatic(_isFieldStatic);
callbacks[91] = Marshal.GetFunctionPointerForDelegate(d91);
delegates[91] = d91;
- var d92 = new __getVars(_getVars);
+ var d92 = new __getBoundaries(_getBoundaries);
callbacks[92] = Marshal.GetFunctionPointerForDelegate(d92);
delegates[92] = d92;
- var d93 = new __setVars(_setVars);
+ var d93 = new __setBoundaries(_setBoundaries);
callbacks[93] = Marshal.GetFunctionPointerForDelegate(d93);
delegates[93] = d93;
- var d94 = new __allocateArray(_allocateArray);
+ var d94 = new __getVars(_getVars);
callbacks[94] = Marshal.GetFunctionPointerForDelegate(d94);
delegates[94] = d94;
- var d95 = new __freeArray(_freeArray);
+ var d95 = new __setVars(_setVars);
callbacks[95] = Marshal.GetFunctionPointerForDelegate(d95);
delegates[95] = d95;
- var d96 = new __getArgNext(_getArgNext);
+ var d96 = new __allocateArray(_allocateArray);
callbacks[96] = Marshal.GetFunctionPointerForDelegate(d96);
delegates[96] = d96;
- var d97 = new __getArgType(_getArgType);
+ var d97 = new __freeArray(_freeArray);
callbacks[97] = Marshal.GetFunctionPointerForDelegate(d97);
delegates[97] = d97;
- var d98 = new __getArgClass(_getArgClass);
+ var d98 = new __getArgNext(_getArgNext);
callbacks[98] = Marshal.GetFunctionPointerForDelegate(d98);
delegates[98] = d98;
- var d99 = new __getHFAType(_getHFAType);
+ var d99 = new __getArgType(_getArgType);
callbacks[99] = Marshal.GetFunctionPointerForDelegate(d99);
delegates[99] = d99;
- var d100 = new __GetErrorHRESULT(_GetErrorHRESULT);
+ var d100 = new __getArgClass(_getArgClass);
callbacks[100] = Marshal.GetFunctionPointerForDelegate(d100);
delegates[100] = d100;
- var d101 = new __GetErrorMessage(_GetErrorMessage);
+ var d101 = new __getHFAType(_getHFAType);
callbacks[101] = Marshal.GetFunctionPointerForDelegate(d101);
delegates[101] = d101;
- var d102 = new __FilterException(_FilterException);
+ var d102 = new __GetErrorHRESULT(_GetErrorHRESULT);
callbacks[102] = Marshal.GetFunctionPointerForDelegate(d102);
delegates[102] = d102;
- var d103 = new __HandleException(_HandleException);
+ var d103 = new __GetErrorMessage(_GetErrorMessage);
callbacks[103] = Marshal.GetFunctionPointerForDelegate(d103);
delegates[103] = d103;
- var d104 = new __ThrowExceptionForJitResult(_ThrowExceptionForJitResult);
+ var d104 = new __FilterException(_FilterException);
callbacks[104] = Marshal.GetFunctionPointerForDelegate(d104);
delegates[104] = d104;
- var d105 = new __ThrowExceptionForHelper(_ThrowExceptionForHelper);
+ var d105 = new __HandleException(_HandleException);
callbacks[105] = Marshal.GetFunctionPointerForDelegate(d105);
delegates[105] = d105;
- var d106 = new __runWithErrorTrap(_runWithErrorTrap);
+ var d106 = new __ThrowExceptionForJitResult(_ThrowExceptionForJitResult);
callbacks[106] = Marshal.GetFunctionPointerForDelegate(d106);
delegates[106] = d106;
- var d107 = new __getEEInfo(_getEEInfo);
+ var d107 = new __ThrowExceptionForHelper(_ThrowExceptionForHelper);
callbacks[107] = Marshal.GetFunctionPointerForDelegate(d107);
delegates[107] = d107;
- var d108 = new __getJitTimeLogFilename(_getJitTimeLogFilename);
+ var d108 = new __runWithErrorTrap(_runWithErrorTrap);
callbacks[108] = Marshal.GetFunctionPointerForDelegate(d108);
delegates[108] = d108;
- var d109 = new __getMethodDefFromMethod(_getMethodDefFromMethod);
+ var d109 = new __getEEInfo(_getEEInfo);
callbacks[109] = Marshal.GetFunctionPointerForDelegate(d109);
delegates[109] = d109;
- var d110 = new __getMethodName(_getMethodName);
+ var d110 = new __getJitTimeLogFilename(_getJitTimeLogFilename);
callbacks[110] = Marshal.GetFunctionPointerForDelegate(d110);
delegates[110] = d110;
- var d111 = new __getMethodNameFromMetadata(_getMethodNameFromMetadata);
+ var d111 = new __getMethodDefFromMethod(_getMethodDefFromMethod);
callbacks[111] = Marshal.GetFunctionPointerForDelegate(d111);
delegates[111] = d111;
- var d112 = new __getMethodHash(_getMethodHash);
+ var d112 = new __getMethodName(_getMethodName);
callbacks[112] = Marshal.GetFunctionPointerForDelegate(d112);
delegates[112] = d112;
- var d113 = new __findNameOfToken(_findNameOfToken);
+ var d113 = new __getMethodNameFromMetadata(_getMethodNameFromMetadata);
callbacks[113] = Marshal.GetFunctionPointerForDelegate(d113);
delegates[113] = d113;
- var d114 = new __getSystemVAmd64PassStructInRegisterDescriptor(_getSystemVAmd64PassStructInRegisterDescriptor);
+ var d114 = new __getMethodHash(_getMethodHash);
callbacks[114] = Marshal.GetFunctionPointerForDelegate(d114);
delegates[114] = d114;
- var d115 = new __getThreadTLSIndex(_getThreadTLSIndex);
+ var d115 = new __findNameOfToken(_findNameOfToken);
callbacks[115] = Marshal.GetFunctionPointerForDelegate(d115);
delegates[115] = d115;
- var d116 = new __getInlinedCallFrameVptr(_getInlinedCallFrameVptr);
+ var d116 = new __getSystemVAmd64PassStructInRegisterDescriptor(_getSystemVAmd64PassStructInRegisterDescriptor);
callbacks[116] = Marshal.GetFunctionPointerForDelegate(d116);
delegates[116] = d116;
- var d117 = new __getAddrOfCaptureThreadGlobal(_getAddrOfCaptureThreadGlobal);
+ var d117 = new __getThreadTLSIndex(_getThreadTLSIndex);
callbacks[117] = Marshal.GetFunctionPointerForDelegate(d117);
delegates[117] = d117;
- var d118 = new __getHelperFtn(_getHelperFtn);
+ var d118 = new __getInlinedCallFrameVptr(_getInlinedCallFrameVptr);
callbacks[118] = Marshal.GetFunctionPointerForDelegate(d118);
delegates[118] = d118;
- var d119 = new __getFunctionEntryPoint(_getFunctionEntryPoint);
+ var d119 = new __getAddrOfCaptureThreadGlobal(_getAddrOfCaptureThreadGlobal);
callbacks[119] = Marshal.GetFunctionPointerForDelegate(d119);
delegates[119] = d119;
- var d120 = new __getFunctionFixedEntryPoint(_getFunctionFixedEntryPoint);
+ var d120 = new __getHelperFtn(_getHelperFtn);
callbacks[120] = Marshal.GetFunctionPointerForDelegate(d120);
delegates[120] = d120;
- var d121 = new __getMethodSync(_getMethodSync);
+ var d121 = new __getFunctionEntryPoint(_getFunctionEntryPoint);
callbacks[121] = Marshal.GetFunctionPointerForDelegate(d121);
delegates[121] = d121;
- var d122 = new __getLazyStringLiteralHelper(_getLazyStringLiteralHelper);
+ var d122 = new __getFunctionFixedEntryPoint(_getFunctionFixedEntryPoint);
callbacks[122] = Marshal.GetFunctionPointerForDelegate(d122);
delegates[122] = d122;
- var d123 = new __embedModuleHandle(_embedModuleHandle);
+ var d123 = new __getMethodSync(_getMethodSync);
callbacks[123] = Marshal.GetFunctionPointerForDelegate(d123);
delegates[123] = d123;
- var d124 = new __embedClassHandle(_embedClassHandle);
+ var d124 = new __getLazyStringLiteralHelper(_getLazyStringLiteralHelper);
callbacks[124] = Marshal.GetFunctionPointerForDelegate(d124);
delegates[124] = d124;
- var d125 = new __embedMethodHandle(_embedMethodHandle);
+ var d125 = new __embedModuleHandle(_embedModuleHandle);
callbacks[125] = Marshal.GetFunctionPointerForDelegate(d125);
delegates[125] = d125;
- var d126 = new __embedFieldHandle(_embedFieldHandle);
+ var d126 = new __embedClassHandle(_embedClassHandle);
callbacks[126] = Marshal.GetFunctionPointerForDelegate(d126);
delegates[126] = d126;
- var d127 = new __embedGenericHandle(_embedGenericHandle);
+ var d127 = new __embedMethodHandle(_embedMethodHandle);
callbacks[127] = Marshal.GetFunctionPointerForDelegate(d127);
delegates[127] = d127;
- var d128 = new __getLocationOfThisType(_getLocationOfThisType);
+ var d128 = new __embedFieldHandle(_embedFieldHandle);
callbacks[128] = Marshal.GetFunctionPointerForDelegate(d128);
delegates[128] = d128;
- var d129 = new __getAddressOfPInvokeTarget(_getAddressOfPInvokeTarget);
+ var d129 = new __embedGenericHandle(_embedGenericHandle);
callbacks[129] = Marshal.GetFunctionPointerForDelegate(d129);
delegates[129] = d129;
- var d130 = new __GetCookieForPInvokeCalliSig(_GetCookieForPInvokeCalliSig);
+ var d130 = new __getLocationOfThisType(_getLocationOfThisType);
callbacks[130] = Marshal.GetFunctionPointerForDelegate(d130);
delegates[130] = d130;
- var d131 = new __canGetCookieForPInvokeCalliSig(_canGetCookieForPInvokeCalliSig);
+ var d131 = new __getAddressOfPInvokeTarget(_getAddressOfPInvokeTarget);
callbacks[131] = Marshal.GetFunctionPointerForDelegate(d131);
delegates[131] = d131;
- var d132 = new __getJustMyCodeHandle(_getJustMyCodeHandle);
+ var d132 = new __GetCookieForPInvokeCalliSig(_GetCookieForPInvokeCalliSig);
callbacks[132] = Marshal.GetFunctionPointerForDelegate(d132);
delegates[132] = d132;
- var d133 = new __GetProfilingHandle(_GetProfilingHandle);
+ var d133 = new __canGetCookieForPInvokeCalliSig(_canGetCookieForPInvokeCalliSig);
callbacks[133] = Marshal.GetFunctionPointerForDelegate(d133);
delegates[133] = d133;
- var d134 = new __getCallInfo(_getCallInfo);
+ var d134 = new __getJustMyCodeHandle(_getJustMyCodeHandle);
callbacks[134] = Marshal.GetFunctionPointerForDelegate(d134);
delegates[134] = d134;
- var d135 = new __canAccessFamily(_canAccessFamily);
+ var d135 = new __GetProfilingHandle(_GetProfilingHandle);
callbacks[135] = Marshal.GetFunctionPointerForDelegate(d135);
delegates[135] = d135;
- var d136 = new __isRIDClassDomainID(_isRIDClassDomainID);
+ var d136 = new __getCallInfo(_getCallInfo);
callbacks[136] = Marshal.GetFunctionPointerForDelegate(d136);
delegates[136] = d136;
- var d137 = new __getClassDomainID(_getClassDomainID);
+ var d137 = new __canAccessFamily(_canAccessFamily);
callbacks[137] = Marshal.GetFunctionPointerForDelegate(d137);
delegates[137] = d137;
- var d138 = new __getFieldAddress(_getFieldAddress);
+ var d138 = new __isRIDClassDomainID(_isRIDClassDomainID);
callbacks[138] = Marshal.GetFunctionPointerForDelegate(d138);
delegates[138] = d138;
- var d139 = new __getStaticFieldCurrentClass(_getStaticFieldCurrentClass);
+ var d139 = new __getClassDomainID(_getClassDomainID);
callbacks[139] = Marshal.GetFunctionPointerForDelegate(d139);
delegates[139] = d139;
- var d140 = new __getVarArgsHandle(_getVarArgsHandle);
+ var d140 = new __getFieldAddress(_getFieldAddress);
callbacks[140] = Marshal.GetFunctionPointerForDelegate(d140);
delegates[140] = d140;
- var d141 = new __canGetVarArgsHandle(_canGetVarArgsHandle);
+ var d141 = new __getStaticFieldCurrentClass(_getStaticFieldCurrentClass);
callbacks[141] = Marshal.GetFunctionPointerForDelegate(d141);
delegates[141] = d141;
- var d142 = new __constructStringLiteral(_constructStringLiteral);
+ var d142 = new __getVarArgsHandle(_getVarArgsHandle);
callbacks[142] = Marshal.GetFunctionPointerForDelegate(d142);
delegates[142] = d142;
- var d143 = new __emptyStringLiteral(_emptyStringLiteral);
+ var d143 = new __canGetVarArgsHandle(_canGetVarArgsHandle);
callbacks[143] = Marshal.GetFunctionPointerForDelegate(d143);
delegates[143] = d143;
- var d144 = new __getFieldThreadLocalStoreID(_getFieldThreadLocalStoreID);
+ var d144 = new __constructStringLiteral(_constructStringLiteral);
callbacks[144] = Marshal.GetFunctionPointerForDelegate(d144);
delegates[144] = d144;
- var d145 = new __setOverride(_setOverride);
+ var d145 = new __emptyStringLiteral(_emptyStringLiteral);
callbacks[145] = Marshal.GetFunctionPointerForDelegate(d145);
delegates[145] = d145;
- var d146 = new __addActiveDependency(_addActiveDependency);
+ var d146 = new __getFieldThreadLocalStoreID(_getFieldThreadLocalStoreID);
callbacks[146] = Marshal.GetFunctionPointerForDelegate(d146);
delegates[146] = d146;
- var d147 = new __GetDelegateCtor(_GetDelegateCtor);
+ var d147 = new __setOverride(_setOverride);
callbacks[147] = Marshal.GetFunctionPointerForDelegate(d147);
delegates[147] = d147;
- var d148 = new __MethodCompileComplete(_MethodCompileComplete);
+ var d148 = new __addActiveDependency(_addActiveDependency);
callbacks[148] = Marshal.GetFunctionPointerForDelegate(d148);
delegates[148] = d148;
- var d149 = new __getTailCallCopyArgsThunk(_getTailCallCopyArgsThunk);
+ var d149 = new __GetDelegateCtor(_GetDelegateCtor);
callbacks[149] = Marshal.GetFunctionPointerForDelegate(d149);
- delegates[149] = d149;
- var d150 = new __getTailCallHelpers(_getTailCallHelpers);
- callbacks[150] = Marshal.GetFunctionPointerForDelegate(d150);
- delegates[150] = d150;
- var d151 = new __convertPInvokeCalliToCall(_convertPInvokeCalliToCall);
- callbacks[151] = Marshal.GetFunctionPointerForDelegate(d151);
- delegates[151] = d151;
- var d152 = new __allocMem(_allocMem);
- callbacks[152] = Marshal.GetFunctionPointerForDelegate(d152);
- delegates[152] = d152;
- var d153 = new __reserveUnwindInfo(_reserveUnwindInfo);
- callbacks[153] = Marshal.GetFunctionPointerForDelegate(d153);
- delegates[153] = d153;
- var d154 = new __allocUnwindInfo(_allocUnwindInfo);
- callbacks[154] = Marshal.GetFunctionPointerForDelegate(d154);
- delegates[154] = d154;
- var d155 = new __allocGCInfo(_allocGCInfo);
- callbacks[155] = Marshal.GetFunctionPointerForDelegate(d155);
- delegates[155] = d155;
- var d156 = new __setEHcount(_setEHcount);
- callbacks[156] = Marshal.GetFunctionPointerForDelegate(d156);
- delegates[156] = d156;
- var d157 = new __setEHinfo(_setEHinfo);
- callbacks[157] = Marshal.GetFunctionPointerForDelegate(d157);
- delegates[157] = d157;
- var d158 = new __logMsg(_logMsg);
- callbacks[158] = Marshal.GetFunctionPointerForDelegate(d158);
- delegates[158] = d158;
- var d159 = new __doAssert(_doAssert);
- callbacks[159] = Marshal.GetFunctionPointerForDelegate(d159);
- delegates[159] = d159;
- var d160 = new __reportFatalError(_reportFatalError);
- callbacks[160] = Marshal.GetFunctionPointerForDelegate(d160);
- delegates[160] = d160;
- var d161 = new __allocMethodBlockCounts(_allocMethodBlockCounts);
- callbacks[161] = Marshal.GetFunctionPointerForDelegate(d161);
- delegates[161] = d161;
- var d162 = new __getMethodBlockCounts(_getMethodBlockCounts);
- callbacks[162] = Marshal.GetFunctionPointerForDelegate(d162);
- delegates[162] = d162;
- var d163 = new __recordCallSite(_recordCallSite);
- callbacks[163] = Marshal.GetFunctionPointerForDelegate(d163);
- delegates[163] = d163;
- var d164 = new __recordRelocation(_recordRelocation);
- callbacks[164] = Marshal.GetFunctionPointerForDelegate(d164);
- delegates[164] = d164;
- var d165 = new __getRelocTypeHint(_getRelocTypeHint);
- callbacks[165] = Marshal.GetFunctionPointerForDelegate(d165);
- delegates[165] = d165;
- var d166 = new __getExpectedTargetArchitecture(_getExpectedTargetArchitecture);
- callbacks[166] = Marshal.GetFunctionPointerForDelegate(d166);
- delegates[166] = d166;
- var d167 = new __getJitFlags(_getJitFlags);
- callbacks[167] = Marshal.GetFunctionPointerForDelegate(d167);
- delegates[167] = d167;
-
- keepAlive = delegates;
- return (IntPtr)callbacks;
- }
- }
-}
-
diff --git a/src/coreclr/src/tools/Common/JitInterface/CorInfoImpl.cs b/src/coreclr/src/tools/Common/JitInterface/CorInfoImpl.cs
index a7e726481e0527..0335fc436bb300 100644
--- a/src/coreclr/src/tools/Common/JitInterface/CorInfoImpl.cs
+++ b/src/coreclr/src/tools/Common/JitInterface/CorInfoImpl.cs
@@ -2861,13 +2861,36 @@ private uint getJitFlags(ref CORJIT_FLAGS flags, uint sizeInBytes)
if (targetArchitecture == TargetArchitecture.ARM && !_compilation.TypeSystemContext.Target.IsWindows)
flags.Set(CorJitFlag.CORJIT_FLAG_RELATIVE_CODE_RELOCS);
- if ((targetArchitecture == TargetArchitecture.X86
- || targetArchitecture == TargetArchitecture.X64)
+ if (targetArchitecture == TargetArchitecture.X86)
+ {
+ flags.Set(InstructionSet.X86_SSE);
+ flags.Set(InstructionSet.X86_SSE2);
+#if !READYTORUN
+ // This list needs to match the list of intrinsics we can generate detection code for
+ // in HardwareIntrinsicHelpers.EmitIsSupportedIL.
+#else
+ // For ReadyToRun, this list needs to match up with the behavior of FilterNamedIntrinsicMethodAttribs
+ // In particular, that this list of supported hardware will not generate non-SSE2 safe instruction
+ // sequences when paired with the behavior in FilterNamedIntrinsicMethodAttribs
+ if (isMethodDefinedInCoreLib())
+#endif
+ {
+ flags.Set(InstructionSet.X86_AES);
+ flags.Set(InstructionSet.X86_PCLMULQDQ);
+ flags.Set(InstructionSet.X86_SSE3);
+ flags.Set(InstructionSet.X86_SSSE3);
+ flags.Set(InstructionSet.X86_LZCNT);
#if READYTORUN
- && isMethodDefinedInCoreLib()
+ flags.Set(InstructionSet.X86_SSE41);
+ flags.Set(InstructionSet.X86_SSE42);
+ flags.Set(InstructionSet.X86_POPCNT);
#endif
- )
+ }
+ }
+ else if (targetArchitecture == TargetArchitecture.X64)
{
+ flags.Set(InstructionSet.X64_SSE);
+ flags.Set(InstructionSet.X64_SSE2);
#if !READYTORUN
// This list needs to match the list of intrinsics we can generate detection code for
// in HardwareIntrinsicHelpers.EmitIsSupportedIL.
@@ -2875,19 +2898,29 @@ private uint getJitFlags(ref CORJIT_FLAGS flags, uint sizeInBytes)
// For ReadyToRun, this list needs to match up with the behavior of FilterNamedIntrinsicMethodAttribs
// In particular, that this list of supported hardware will not generate non-SSE2 safe instruction
// sequences when paired with the behavior in FilterNamedIntrinsicMethodAttribs
+ if (isMethodDefinedInCoreLib())
#endif
- flags.Set(CorJitFlag.CORJIT_FLAG_USE_AES);
- flags.Set(CorJitFlag.CORJIT_FLAG_USE_PCLMULQDQ);
- flags.Set(CorJitFlag.CORJIT_FLAG_USE_SSE3);
- flags.Set(CorJitFlag.CORJIT_FLAG_USE_SSSE3);
- flags.Set(CorJitFlag.CORJIT_FLAG_USE_LZCNT);
+ {
+ flags.Set(InstructionSet.X64_AES);
+ flags.Set(InstructionSet.X64_PCLMULQDQ);
+ flags.Set(InstructionSet.X64_SSE3);
+ flags.Set(InstructionSet.X64_SSSE3);
+ flags.Set(InstructionSet.X64_LZCNT);
#if READYTORUN
- flags.Set(CorJitFlag.CORJIT_FLAG_USE_SSE41);
- flags.Set(CorJitFlag.CORJIT_FLAG_USE_SSE42);
- flags.Set(CorJitFlag.CORJIT_FLAG_USE_POPCNT);
+ flags.Set(InstructionSet.X64_SSE41);
+ flags.Set(InstructionSet.X64_SSE42);
+ flags.Set(InstructionSet.X64_POPCNT);
#endif
+ }
+ }
+ else if (targetArchitecture == TargetArchitecture.ARM64)
+ {
+ flags.Set(InstructionSet.ARM64_ArmBase);
+ flags.Set(InstructionSet.ARM64_AdvSimd);
}
+ flags.Set64BitInstructionSetVariants(targetArchitecture);
+
if (this.MethodBeingCompiled.IsNativeCallable)
{
#if READYTORUN
diff --git a/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs b/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs
new file mode 100644
index 00000000000000..470faeb0713baa
--- /dev/null
+++ b/src/coreclr/src/tools/Common/JitInterface/CorInfoInstructionSet.cs
@@ -0,0 +1,317 @@
+
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// DO NOT EDIT THIS FILE! IT IS AUTOGENERATED
+// FROM /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
+// using /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
+
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using Internal.TypeSystem;
+
+namespace Internal.JitInterface
+{
+ public enum InstructionSet
+ {
+ ILLEGAL = 0,
+ NONE = 63,
+ ARM64_ArmBase=1,
+ ARM64_ArmBase_Arm64=2,
+ ARM64_AdvSimd=3,
+ ARM64_AdvSimd_Arm64=4,
+ ARM64_Aes=5,
+ ARM64_Crc32=6,
+ ARM64_Crc32_Arm64=7,
+ ARM64_Sha1=8,
+ ARM64_Sha256=9,
+ ARM64_Atomics=10,
+ ARM64_Vector64=11,
+ ARM64_Vector128=12,
+ X64_SSE=1,
+ X64_SSE2=2,
+ X64_SSE3=3,
+ X64_SSSE3=4,
+ X64_SSE41=5,
+ X64_SSE42=6,
+ X64_AVX=7,
+ X64_AVX2=8,
+ X64_AES=9,
+ X64_BMI1=10,
+ X64_BMI2=11,
+ X64_FMA=12,
+ X64_LZCNT=13,
+ X64_PCLMULQDQ=14,
+ X64_POPCNT=15,
+ X64_Vector128=16,
+ X64_Vector256=17,
+ X64_BMI1_X64=18,
+ X64_BMI2_X64=19,
+ X64_LZCNT_X64=20,
+ X64_POPCNT_X64=21,
+ X64_SSE_X64=22,
+ X64_SSE2_X64=23,
+ X64_SSE41_X64=24,
+ X64_SSE42_X64=25,
+ X86_SSE=1,
+ X86_SSE2=2,
+ X86_SSE3=3,
+ X86_SSSE3=4,
+ X86_SSE41=5,
+ X86_SSE42=6,
+ X86_AVX=7,
+ X86_AVX2=8,
+ X86_AES=9,
+ X86_BMI1=10,
+ X86_BMI2=11,
+ X86_FMA=12,
+ X86_LZCNT=13,
+ X86_PCLMULQDQ=14,
+ X86_POPCNT=15,
+ X86_Vector128=16,
+ X86_Vector256=17,
+ X86_BMI1_X64=18,
+ X86_BMI2_X64=19,
+ X86_LZCNT_X64=20,
+ X86_POPCNT_X64=21,
+ X86_SSE_X64=22,
+ X86_SSE2_X64=23,
+ X86_SSE41_X64=24,
+ X86_SSE42_X64=25,
+
+ }
+
+ public struct InstructionSetFlags
+ {
+ ulong _flags;
+
+ public void AddInstructionSet(InstructionSet instructionSet)
+ {
+ _flags = _flags | (((ulong)1) << (int)instructionSet);
+ }
+
+ public void RemoveInstructionSet(InstructionSet instructionSet)
+ {
+ _flags = _flags & ~(((ulong)1) << (int)instructionSet);
+ }
+
+ public bool HasInstructionSet(InstructionSet instructionSet)
+ {
+ return (_flags & (((ulong)1) << (int)instructionSet)) != 0;
+ }
+
+ public bool Equals(InstructionSetFlags other)
+ {
+ return _flags == other._flags;
+ }
+
+ public static InstructionSetFlags ExpandInstructionSetByImplication(TargetArchitecture architecture, InstructionSetFlags input)
+ {
+ InstructionSetFlags oldflags = input;
+ InstructionSetFlags resultflags = input;
+ do
+ {
+ oldflags = resultflags;
+ switch(architecture)
+ {
+
+ case TargetArchitecture.ARM64:
+ if (resultflags.HasInstructionSet(InstructionSet.ARM64_ArmBase))
+ resultflags.AddInstructionSet(InstructionSet.ARM64_ArmBase_Arm64);
+ if (resultflags.HasInstructionSet(InstructionSet.ARM64_AdvSimd))
+ resultflags.AddInstructionSet(InstructionSet.ARM64_AdvSimd_Arm64);
+ if (resultflags.HasInstructionSet(InstructionSet.ARM64_Crc32))
+ resultflags.AddInstructionSet(InstructionSet.ARM64_Crc32_Arm64);
+ if (resultflags.HasInstructionSet(InstructionSet.ARM64_AdvSimd))
+ resultflags.AddInstructionSet(InstructionSet.ARM64_ArmBase);
+ if (resultflags.HasInstructionSet(InstructionSet.ARM64_Aes))
+ resultflags.AddInstructionSet(InstructionSet.ARM64_ArmBase);
+ if (resultflags.HasInstructionSet(InstructionSet.ARM64_Crc32))
+ resultflags.AddInstructionSet(InstructionSet.ARM64_ArmBase);
+ if (resultflags.HasInstructionSet(InstructionSet.ARM64_Sha1))
+ resultflags.AddInstructionSet(InstructionSet.ARM64_ArmBase);
+ if (resultflags.HasInstructionSet(InstructionSet.ARM64_Sha256))
+ resultflags.AddInstructionSet(InstructionSet.ARM64_ArmBase);
+ break;
+
+ case TargetArchitecture.X64:
+ if (resultflags.HasInstructionSet(InstructionSet.X64_SSE))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE_X64);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_SSE2))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE2_X64);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_SSE41))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE41_X64);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_SSE42))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE42_X64);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_BMI1))
+ resultflags.AddInstructionSet(InstructionSet.X64_BMI1_X64);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_BMI2))
+ resultflags.AddInstructionSet(InstructionSet.X64_BMI2_X64);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_LZCNT))
+ resultflags.AddInstructionSet(InstructionSet.X64_LZCNT_X64);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_POPCNT))
+ resultflags.AddInstructionSet(InstructionSet.X64_POPCNT_X64);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_SSE2))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_SSE3))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE2);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_SSSE3))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE3);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_SSE41))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSSE3);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_SSE42))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE41);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_AVX))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE42);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_AVX2))
+ resultflags.AddInstructionSet(InstructionSet.X64_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_AES))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE2);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_BMI1))
+ resultflags.AddInstructionSet(InstructionSet.X64_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_BMI2))
+ resultflags.AddInstructionSet(InstructionSet.X64_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_FMA))
+ resultflags.AddInstructionSet(InstructionSet.X64_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_PCLMULQDQ))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE2);
+ if (resultflags.HasInstructionSet(InstructionSet.X64_POPCNT))
+ resultflags.AddInstructionSet(InstructionSet.X64_SSE42);
+ break;
+
+ case TargetArchitecture.X86:
+ if (resultflags.HasInstructionSet(InstructionSet.X86_SSE2))
+ resultflags.AddInstructionSet(InstructionSet.X86_SSE);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_SSE3))
+ resultflags.AddInstructionSet(InstructionSet.X86_SSE2);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_SSSE3))
+ resultflags.AddInstructionSet(InstructionSet.X86_SSE3);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_SSE41))
+ resultflags.AddInstructionSet(InstructionSet.X86_SSSE3);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_SSE42))
+ resultflags.AddInstructionSet(InstructionSet.X86_SSE41);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_AVX))
+ resultflags.AddInstructionSet(InstructionSet.X86_SSE42);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_AVX2))
+ resultflags.AddInstructionSet(InstructionSet.X86_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_AES))
+ resultflags.AddInstructionSet(InstructionSet.X86_SSE2);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_BMI1))
+ resultflags.AddInstructionSet(InstructionSet.X86_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_BMI2))
+ resultflags.AddInstructionSet(InstructionSet.X86_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_FMA))
+ resultflags.AddInstructionSet(InstructionSet.X86_AVX);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_PCLMULQDQ))
+ resultflags.AddInstructionSet(InstructionSet.X86_SSE2);
+ if (resultflags.HasInstructionSet(InstructionSet.X86_POPCNT))
+ resultflags.AddInstructionSet(InstructionSet.X86_SSE42);
+ break;
+
+ }
+ } while (!oldflags.Equals(resultflags));
+ return resultflags;
+ }
+
+ public static IEnumerable> ArchitectureToValidInstructionSets(TargetArchitecture architecture)
+ {
+ switch (architecture)
+ {
+
+ case TargetArchitecture.ARM64:
+ yield return new KeyValuePair("ArmBase", InstructionSet.ARM64_ArmBase);
+ yield return new KeyValuePair("AdvSimd", InstructionSet.ARM64_AdvSimd);
+ yield return new KeyValuePair("Aes", InstructionSet.ARM64_Aes);
+ yield return new KeyValuePair("Crc32", InstructionSet.ARM64_Crc32);
+ yield return new KeyValuePair("Sha1", InstructionSet.ARM64_Sha1);
+ yield return new KeyValuePair("Sha256", InstructionSet.ARM64_Sha256);
+ yield return new KeyValuePair("Atomics", InstructionSet.ARM64_Atomics);
+ yield return new KeyValuePair("Vector64", InstructionSet.ARM64_Vector64);
+ yield return new KeyValuePair("Vector128", InstructionSet.ARM64_Vector128);
+ break;
+
+ case TargetArchitecture.X64:
+ yield return new KeyValuePair("Sse", InstructionSet.X64_SSE);
+ yield return new KeyValuePair("Sse2", InstructionSet.X64_SSE2);
+ yield return new KeyValuePair("Sse3", InstructionSet.X64_SSE3);
+ yield return new KeyValuePair("Ssse3", InstructionSet.X64_SSSE3);
+ yield return new KeyValuePair("Sse41", InstructionSet.X64_SSE41);
+ yield return new KeyValuePair("Sse42", InstructionSet.X64_SSE42);
+ yield return new KeyValuePair("Avx", InstructionSet.X64_AVX);
+ yield return new KeyValuePair("Avx2", InstructionSet.X64_AVX2);
+ yield return new KeyValuePair("Aes", InstructionSet.X64_AES);
+ yield return new KeyValuePair("Bmi1", InstructionSet.X64_BMI1);
+ yield return new KeyValuePair("Bmi2", InstructionSet.X64_BMI2);
+ yield return new KeyValuePair("Fma", InstructionSet.X64_FMA);
+ yield return new KeyValuePair("Lzcnt", InstructionSet.X64_LZCNT);
+ yield return new KeyValuePair("Pclmulqdq", InstructionSet.X64_PCLMULQDQ);
+ yield return new KeyValuePair("Popcnt", InstructionSet.X64_POPCNT);
+ yield return new KeyValuePair("Vector128", InstructionSet.X64_Vector128);
+ yield return new KeyValuePair("Vector256", InstructionSet.X64_Vector256);
+ break;
+
+ case TargetArchitecture.X86:
+ yield return new KeyValuePair("Sse", InstructionSet.X86_SSE);
+ yield return new KeyValuePair("Sse2", InstructionSet.X86_SSE2);
+ yield return new KeyValuePair("Sse3", InstructionSet.X86_SSE3);
+ yield return new KeyValuePair("Ssse3", InstructionSet.X86_SSSE3);
+ yield return new KeyValuePair("Sse41", InstructionSet.X86_SSE41);
+ yield return new KeyValuePair("Sse42", InstructionSet.X86_SSE42);
+ yield return new KeyValuePair("Avx", InstructionSet.X86_AVX);
+ yield return new KeyValuePair("Avx2", InstructionSet.X86_AVX2);
+ yield return new KeyValuePair("Aes", InstructionSet.X86_AES);
+ yield return new KeyValuePair("Bmi1", InstructionSet.X86_BMI1);
+ yield return new KeyValuePair("Bmi2", InstructionSet.X86_BMI2);
+ yield return new KeyValuePair("Fma", InstructionSet.X86_FMA);
+ yield return new KeyValuePair("Lzcnt", InstructionSet.X86_LZCNT);
+ yield return new KeyValuePair("Pclmulqdq", InstructionSet.X86_PCLMULQDQ);
+ yield return new KeyValuePair("Popcnt", InstructionSet.X86_POPCNT);
+ yield return new KeyValuePair("Vector128", InstructionSet.X86_Vector128);
+ yield return new KeyValuePair("Vector256", InstructionSet.X86_Vector256);
+ break;
+
+ }
+ }
+
+ public void Set64BitInstructionSetVariants(TargetArchitecture architecture)
+ {
+ switch (architecture)
+ {
+
+ case TargetArchitecture.ARM64:
+ if (HasInstructionSet(InstructionSet.ARM64_ArmBase))
+ AddInstructionSet(InstructionSet.ARM64_ArmBase_Arm64);
+ if (HasInstructionSet(InstructionSet.ARM64_AdvSimd))
+ AddInstructionSet(InstructionSet.ARM64_AdvSimd_Arm64);
+ if (HasInstructionSet(InstructionSet.ARM64_Crc32))
+ AddInstructionSet(InstructionSet.ARM64_Crc32_Arm64);
+ break;
+
+ case TargetArchitecture.X64:
+ if (HasInstructionSet(InstructionSet.X64_SSE))
+ AddInstructionSet(InstructionSet.X64_SSE_X64);
+ if (HasInstructionSet(InstructionSet.X64_SSE2))
+ AddInstructionSet(InstructionSet.X64_SSE2_X64);
+ if (HasInstructionSet(InstructionSet.X64_SSE41))
+ AddInstructionSet(InstructionSet.X64_SSE41_X64);
+ if (HasInstructionSet(InstructionSet.X64_SSE42))
+ AddInstructionSet(InstructionSet.X64_SSE42_X64);
+ if (HasInstructionSet(InstructionSet.X64_BMI1))
+ AddInstructionSet(InstructionSet.X64_BMI1_X64);
+ if (HasInstructionSet(InstructionSet.X64_BMI2))
+ AddInstructionSet(InstructionSet.X64_BMI2_X64);
+ if (HasInstructionSet(InstructionSet.X64_LZCNT))
+ AddInstructionSet(InstructionSet.X64_LZCNT_X64);
+ if (HasInstructionSet(InstructionSet.X64_POPCNT))
+ AddInstructionSet(InstructionSet.X64_POPCNT_X64);
+ break;
+
+ case TargetArchitecture.X86:
+ break;
+
+ }
+ }
+ }
+}
diff --git a/src/coreclr/src/tools/Common/JitInterface/CorInfoTypes.cs b/src/coreclr/src/tools/Common/JitInterface/CorInfoTypes.cs
index 75f793a3b6f374..fba8baa7c3c455 100644
--- a/src/coreclr/src/tools/Common/JitInterface/CorInfoTypes.cs
+++ b/src/coreclr/src/tools/Common/JitInterface/CorInfoTypes.cs
@@ -5,6 +5,7 @@
using System;
using System.Diagnostics;
using System.Runtime.InteropServices;
+using Internal.TypeSystem;
namespace Internal.JitInterface
{
@@ -91,6 +92,10 @@ public struct CORINFO_JUST_MY_CODE_HANDLE_
public struct CORINFO_VarArgInfo
{
}
+
+ public struct PatchpointInfo
+ {
+ }
public enum _EXCEPTION_POINTERS
{ }
@@ -778,6 +783,11 @@ public enum CorJitFuncKind
CORJIT_FUNC_FILTER // a funclet associated with an EH filter
}
+ public unsafe struct CORINFO_OSR_INFO
+ {
+ public uint ILOffset;
+ public void* PatchpointInfo;
+ }
public unsafe struct CORINFO_METHOD_INFO
{
@@ -791,6 +801,7 @@ public unsafe struct CORINFO_METHOD_INFO
public CorInfoRegionKind regionKind;
public CORINFO_SIG_INFO args;
public CORINFO_SIG_INFO locals;
+ public CORINFO_OSR_INFO osrInfo;
}
//
// what type of code region we are in
@@ -1311,9 +1322,6 @@ public enum CorJitFlag : uint
CORJIT_FLAG_UNUSED4 = 11,
CORJIT_FLAG_UNUSED5 = 12,
CORJIT_FLAG_UNUSED6 = 13,
- CORJIT_FLAG_USE_AVX = 14,
- CORJIT_FLAG_USE_AVX2 = 15,
- CORJIT_FLAG_USE_AVX_512 = 16,
CORJIT_FLAG_FEATURE_SIMD = 17,
CORJIT_FLAG_MAKEFINALCODE = 18, // Use the final code generator, i.e., not the interpreter.
CORJIT_FLAG_READYTORUN = 19, // Use version-resilient code generation
@@ -1340,49 +1348,12 @@ public enum CorJitFlag : uint
CORJIT_FLAG_TIER1 = 40, // This is the final tier (for now) for tiered compilation which should generate high quality code
CORJIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records
CORJIT_FLAG_NO_INLINING = 42, // JIT should not inline any called method into this method
-
-#region TARGET_ARM64
- CORJIT_FLAG_HAS_ARM64_AES = 43, // ID_AA64ISAR0_EL1.AES is 1 or better
- CORJIT_FLAG_HAS_ARM64_ATOMICS = 44, // ID_AA64ISAR0_EL1.Atomic is 2 or better
- CORJIT_FLAG_HAS_ARM64_CRC32 = 45, // ID_AA64ISAR0_EL1.CRC32 is 1 or better
- CORJIT_FLAG_HAS_ARM64_DCPOP = 46, // ID_AA64ISAR1_EL1.DPB is 1 or better
- CORJIT_FLAG_HAS_ARM64_DP = 47, // ID_AA64ISAR0_EL1.DP is 1 or better
- CORJIT_FLAG_HAS_ARM64_FCMA = 48, // ID_AA64ISAR1_EL1.FCMA is 1 or better
- CORJIT_FLAG_HAS_ARM64_FP = 49, // ID_AA64PFR0_EL1.FP is 0 or better
- CORJIT_FLAG_HAS_ARM64_FP16 = 50, // ID_AA64PFR0_EL1.FP is 1 or better
- CORJIT_FLAG_HAS_ARM64_JSCVT = 51, // ID_AA64ISAR1_EL1.JSCVT is 1 or better
- CORJIT_FLAG_HAS_ARM64_LRCPC = 52, // ID_AA64ISAR1_EL1.LRCPC is 1 or better
- CORJIT_FLAG_HAS_ARM64_PMULL = 53, // ID_AA64ISAR0_EL1.AES is 2 or better
- CORJIT_FLAG_HAS_ARM64_SHA1 = 54, // ID_AA64ISAR0_EL1.SHA1 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SHA256 = 55, // ID_AA64ISAR0_EL1.SHA2 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SHA512 = 56, // ID_AA64ISAR0_EL1.SHA2 is 2 or better
- CORJIT_FLAG_HAS_ARM64_SHA3 = 57, // ID_AA64ISAR0_EL1.SHA3 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SIMD = 58, // ID_AA64PFR0_EL1.AdvSIMD is 0 or better
- CORJIT_FLAG_HAS_ARM64_SIMD_V81 = 59, // ID_AA64ISAR0_EL1.RDM is 1 or better
- CORJIT_FLAG_HAS_ARM64_SIMD_FP16 = 60, // ID_AA64PFR0_EL1.AdvSIMD is 1 or better
- CORJIT_FLAG_HAS_ARM64_SM3 = 61, // ID_AA64ISAR0_EL1.SM3 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SM4 = 62, // ID_AA64ISAR0_EL1.SM4 is 1 or better
- CORJIT_FLAG_HAS_ARM64_SVE = 63, // ID_AA64PFR0_EL1.SVE is 1 or better
-#endregion
-
-#region x86/x64
- CORJIT_FLAG_USE_SSE3 = 43,
- CORJIT_FLAG_USE_SSSE3 = 44,
- CORJIT_FLAG_USE_SSE41 = 45,
- CORJIT_FLAG_USE_SSE42 = 46,
- CORJIT_FLAG_USE_AES = 47,
- CORJIT_FLAG_USE_BMI1 = 48,
- CORJIT_FLAG_USE_BMI2 = 49,
- CORJIT_FLAG_USE_FMA = 50,
- CORJIT_FLAG_USE_LZCNT = 51,
- CORJIT_FLAG_USE_PCLMULQDQ = 52,
- CORJIT_FLAG_USE_POPCNT = 53,
-#endregion
}
public struct CORJIT_FLAGS
{
private UInt64 _corJitFlags;
+ InstructionSetFlags _instructionSetFlags;
public void Reset()
{
@@ -1394,6 +1365,11 @@ public void Set(CorJitFlag flag)
_corJitFlags |= 1UL << (int)flag;
}
+ public void Set(InstructionSet instructionSet)
+ {
+ _instructionSetFlags.AddInstructionSet(instructionSet);
+ }
+
public void Clear(CorJitFlag flag)
{
_corJitFlags &= ~(1UL << (int)flag);
@@ -1404,19 +1380,9 @@ public bool IsSet(CorJitFlag flag)
return (_corJitFlags & (1UL << (int)flag)) != 0;
}
- public void Add(ref CORJIT_FLAGS other)
- {
- _corJitFlags |= other._corJitFlags;
- }
-
- public void Remove(ref CORJIT_FLAGS other)
- {
- _corJitFlags &= ~other._corJitFlags;
- }
-
- public bool IsEmpty()
+ public void Set64BitInstructionSetVariants(TargetArchitecture architecture)
{
- return _corJitFlags == 0;
+ _instructionSetFlags.Set64BitInstructionSetVariants(architecture);
}
}
}
diff --git a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
new file mode 100644
index 00000000000000..9c8404fe8969f0
--- /dev/null
+++ b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
@@ -0,0 +1,81 @@
+; Define the set of instruction sets available on a platform
+; Format is
+;
+; Add new instruction set
+; instructionset,,,,,
+;
+; Add jit 64bit architecture specific instruction set when instruction set is available
+; instructionset64bit,,
+;
+; Add an instruction set implication (i.e, if instruction set A is present, then instruction set B must be present too.)
+; implication,,,
+;
+; Copy instruction sets defined for other architecture at this point in the file.
+; copyinstructionsets,,
+
+; Definition of X86 instruction sets
+
+definearch ,X86 ,32Bit ,X64
+instructionset ,X86 ,Sse , ,1 ,SSE
+instructionset ,X86 ,Sse2 , ,2 ,SSE2
+implication ,X86 ,SSE2 ,SSE
+instructionset ,X86 ,Sse3 , ,3 ,SSE3
+implication ,X86 ,SSE3 ,SSE2
+instructionset ,X86 ,Ssse3 , ,4 ,SSSE3
+implication ,X86 ,SSSE3 ,SSE3
+instructionset ,X86 ,Sse41 , ,5 ,SSE41
+implication ,X86 ,SSE41 ,SSSE3
+instructionset ,X86 ,Sse42 , ,6 ,SSE42
+implication ,X86 ,SSE42 ,SSE41
+instructionset ,X86 ,Avx , ,7 ,AVX
+implication ,X86 ,AVX ,SSE42
+instructionset ,X86 ,Avx2 , ,8 ,AVX2
+implication ,X86 ,AVX2 ,AVX
+instructionset ,X86 ,Aes , ,9 ,AES
+implication ,X86 ,AES ,SSE2
+instructionset ,X86 ,Bmi1 , ,10 ,BMI1
+implication ,X86 ,BMI1 ,AVX
+instructionset ,X86 ,Bmi2 , ,11 ,BMI2
+implication ,X86 ,BMI2 ,AVX
+instructionset ,X86 ,Fma , ,12 ,FMA
+implication ,X86 ,FMA ,AVX
+instructionset ,X86 ,Lzcnt , ,13 ,LZCNT
+instructionset ,X86 ,Pclmulqdq , ,14 ,PCLMULQDQ
+implication ,X86 ,PCLMULQDQ ,SSE2
+instructionset ,X86 ,Popcnt , ,15 ,POPCNT
+implication ,X86 ,POPCNT ,SSE42
+instructionset ,X86 , , , ,Vector128
+instructionset ,X86 , , , ,Vector256
+
+; Definition of X64 instruction sets (Define )
+definearch ,X64 ,64Bit ,X64
+instructionset64bit,X86 ,BMI1
+instructionset64bit,X86 ,BMI2
+instructionset64bit,X86 ,LZCNT
+instructionset64bit,X86 ,POPCNT
+instructionset64bit,X86 ,SSE
+instructionset64bit,X86 ,SSE2
+instructionset64bit,X86 ,SSE41
+instructionset64bit,X86 ,SSE42
+
+copyinstructionsets,X86 ,X64
+
+; Definition of the Arm64 instruction sets
+definearch ,ARM64 ,64Bit ,Arm64
+instructionset ,ARM64 ,ArmBase , ,16 ,ArmBase
+instructionset64bit,ARM64 ,ArmBase
+instructionset ,ARM64 ,AdvSimd , ,17 ,AdvSimd
+instructionset64bit,ARM64 ,AdvSimd
+implication ,ARM64 ,AdvSimd ,ArmBase
+instructionset ,ARM64 ,Aes , ,9 ,Aes
+implication ,ARM64 ,Aes ,ArmBase
+instructionset ,ARM64 ,Crc32 , ,18 ,Crc32
+instructionset64bit,ARM64 ,Crc32
+implication ,ARM64 ,Crc32 ,ArmBase
+instructionset ,ARM64 ,Sha1 , ,19 ,Sha1
+implication ,ARM64 ,Sha1 ,ArmBase
+instructionset ,ARM64 ,Sha256 , ,20 ,Sha256
+implication ,ARM64 ,Sha256 ,ArmBase
+instructionset ,ARM64 , ,Atomics ,21 ,Atomics
+instructionset ,ARM64 , , , ,Vector64
+instructionset ,ARM64 , , , ,Vector128
diff --git a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetGenerator.cs b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetGenerator.cs
new file mode 100644
index 00000000000000..3d5ea118732e30
--- /dev/null
+++ b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetGenerator.cs
@@ -0,0 +1,645 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using System.IO;
+using System.Diagnostics;
+
+namespace Thunkerator
+{
+ public class InstructionSetGenerator
+ {
+ class InstructionSetInfo
+ {
+ public string Architecture { get; }
+ public string ManagedName { get; }
+ public string R2rName { get; }
+ public string R2rNumericValue { get; }
+ public string JitName { get; }
+
+ public InstructionSetInfo(string architecture, string managedName, string r2rName, string r2rNumericValue, string jitName)
+ {
+ Architecture = architecture;
+ ManagedName = managedName;
+ R2rName = String.IsNullOrEmpty(r2rName) ? managedName : r2rName;
+ R2rNumericValue = r2rNumericValue;
+ JitName = jitName;
+ }
+
+ public InstructionSetInfo(string architecture, InstructionSetInfo similarInstructionSet)
+ {
+ Architecture = architecture;
+ ManagedName = similarInstructionSet.ManagedName;
+ R2rName = similarInstructionSet.R2rName;
+ R2rNumericValue = similarInstructionSet.R2rNumericValue;
+ JitName = similarInstructionSet.JitName;
+ }
+
+ public string PublicName
+ {
+ get
+ {
+ if (!String.IsNullOrEmpty(ManagedName))
+ return ManagedName;
+ else if (!String.IsNullOrEmpty(R2rName))
+ return R2rName;
+ else
+ return JitName;
+ }
+ }
+ }
+
+ class InstructionSetImplication
+ {
+ public string Architecture { get; }
+ public string JitName { get; }
+ public string ImpliedJitName { get; }
+
+ public InstructionSetImplication(string architecture, string jitName, string impliedJitName)
+ {
+ Architecture = architecture;
+ JitName = jitName;
+ ImpliedJitName = impliedJitName;
+ }
+
+ public InstructionSetImplication(string architecture, InstructionSetImplication similarInstructionSet)
+ {
+ Architecture = architecture;
+ ImpliedJitName = similarInstructionSet.ImpliedJitName;
+ JitName = similarInstructionSet.JitName;
+ }
+ }
+
+ List _instructionSets = new List();
+ List _implications = new List();
+ Dictionary> _64bitVariants = new Dictionary>();
+ SortedDictionary _r2rNamesByName = new SortedDictionary();
+ SortedDictionary _r2rNamesByNumber = new SortedDictionary();
+ SortedSet _architectures = new SortedSet();
+ Dictionary> _architectureJitNames = new Dictionary>();
+ HashSet _64BitArchitectures = new HashSet();
+ Dictionary _64BitVariantArchitectureJitNameSuffix = new Dictionary();
+
+ void ArchitectureEncountered(string arch)
+ {
+ if (!_64bitVariants.ContainsKey(arch))
+ _64bitVariants.Add(arch, new HashSet());
+ _architectures.Add(arch);
+ if (!_architectureJitNames.ContainsKey(arch))
+ _architectureJitNames.Add(arch, new List());
+ }
+
+ void ValidateArchitectureEncountered(string arch)
+ {
+ if (!_architectures.Contains(arch))
+ throw new Exception("Architecture not defined");
+ }
+
+ private string ArchToIfDefArch(string arch)
+ {
+ if (arch == "X64")
+ return "AMD64";
+ return arch;
+ }
+
+
+ private string ArchToInstructionSetSuffixArch(string arch)
+ {
+ return _64BitVariantArchitectureJitNameSuffix[arch];
+ }
+
+ public bool ParseInput(TextReader tr)
+ {
+ int currentLineIndex = 1;
+ for (string currentLine = tr.ReadLine(); currentLine != null; currentLine = tr.ReadLine(), currentLineIndex++)
+ {
+ try
+ {
+ if (currentLine.Length == 0)
+ {
+ continue; // Its an empty line, ignore
+ }
+
+ if (currentLine[0] == ';')
+ {
+ continue; // Its a comment
+ }
+
+ string[] command = currentLine.Split(',');
+ for (int i = 0; i < command.Length; i++)
+ {
+ command[i] = command[i].Trim();
+ }
+ switch(command[0])
+ {
+ case "definearch":
+ if (command.Length != 4)
+ throw new Exception($"Incorrect number of args for definearch {command.Length}");
+ ArchitectureEncountered(command[1]);
+ if (command[2] == "64Bit")
+ {
+ _64BitArchitectures.Add(command[1]);
+ }
+ else if (command[2] != "32Bit")
+ {
+ throw new Exception("Architecture must be 32Bit or 64Bit");
+ }
+ _64BitVariantArchitectureJitNameSuffix[command[1]] = command[3];
+ break;
+ case "instructionset":
+ if (command.Length != 6)
+ throw new Exception("Incorrect number of args for instructionset");
+ ValidateArchitectureEncountered(command[1]);
+ _architectureJitNames[command[1]].Add(command[5]);
+ _instructionSets.Add(new InstructionSetInfo(command[1],command[2],command[3],command[4],command[5]));
+ break;
+ case "instructionset64bit":
+ if (command.Length != 3)
+ throw new Exception("Incorrect number of args for instructionset");
+ ValidateArchitectureEncountered(command[1]);
+ _64bitVariants[command[1]].Add(command[2]);
+ _architectureJitNames[command[1]].Add(command[2] + "_" + ArchToInstructionSetSuffixArch(command[1]));
+ break;
+ case "implication":
+ if (command.Length != 4)
+ throw new Exception("Incorrect number of args for instructionset");
+ ValidateArchitectureEncountered(command[1]);
+ _implications.Add(new InstructionSetImplication(command[1],command[2], command[3]));
+ break;
+ case "copyinstructionsets":
+ if (command.Length != 3)
+ throw new Exception("Incorrect number of args for instructionset");
+ ValidateArchitectureEncountered(command[1]);
+ ValidateArchitectureEncountered(command[2]);
+ string arch = command[1];
+ string targetarch = command[2];
+ foreach (var val in _instructionSets.ToArray())
+ {
+ if (val.Architecture != arch)
+ continue;
+ _instructionSets.Add(new InstructionSetInfo(targetarch, val));
+ _architectureJitNames[targetarch].Add(val.JitName);
+ }
+ foreach (var val in _implications.ToArray())
+ {
+ if (val.Architecture != arch)
+ continue;
+ _implications.Add(new InstructionSetImplication(targetarch, val));
+ }
+ foreach (var val in _64bitVariants[arch])
+ {
+ _64bitVariants[targetarch].Add(val);
+ _architectureJitNames[targetarch].Add(val + "_" + ArchToInstructionSetSuffixArch(targetarch));
+ }
+ break;
+ default:
+ throw new Exception("Unknown command");
+ }
+ }
+ catch (Exception e)
+ {
+ Console.Error.WriteLine("Error parsing line {0} : {1}", currentLineIndex, e.Message);
+ return false;
+ }
+ }
+
+ foreach (var instructionSet in _instructionSets)
+ {
+ if (!String.IsNullOrEmpty(instructionSet.R2rName))
+ {
+ int r2rValue = Int32.Parse(instructionSet.R2rNumericValue);
+ if (_r2rNamesByName.ContainsKey(instructionSet.R2rName))
+ {
+ if (_r2rNamesByName[instructionSet.R2rName] != r2rValue)
+ throw new Exception("R2R name/number mismatch");
+ }
+ else
+ {
+ _r2rNamesByName.Add(instructionSet.R2rName, r2rValue);
+ _r2rNamesByNumber.Add(r2rValue, instructionSet.R2rName);
+ }
+ }
+ }
+
+ foreach (var architectureInfo in _architectureJitNames)
+ {
+ if (architectureInfo.Value.Count > 62)
+ {
+ throw new Exception("Too many instruction sets added. Scheme of using uint64_t as instruction mask will need updating");
+ }
+ }
+
+ return true;
+ }
+
+ public void WriteManagedReadyToRunInstructionSet(TextWriter tr)
+ {
+ // Write header
+ tr.Write(@"
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// DO NOT EDIT THIS FILE! IT IS AUTOGENERATED
+// FROM /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
+// using /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
+
+using System;
+using System.Runtime.InteropServices;
+using Internal.TypeSystem;
+
+namespace Internal.ReadyToRunConstants
+{
+ public enum ReadyToRunInstructionSet
+ {
+");
+
+ foreach (var r2rEntry in _r2rNamesByNumber)
+ {
+ tr.WriteLine($" {r2rEntry.Value}={r2rEntry.Key},");
+ }
+ tr.Write(@"
+ }
+
+ public static class ReadyToRunInstructionSetHelper
+ {
+ ReadyToRunInstructionSet? R2RInstructionSetFromJitInstructionSet(TargetArchitecture architecture, Internal.JitInterface.InstructionSet instructionSet)
+ {
+ switch (architecture)
+ {
+");
+ foreach (string architecture in _architectures)
+ {
+ tr.Write($@"
+ case TargetArchitecture.{architecture}:
+ {{
+ switch (instructionSet)
+ {{
+");
+ foreach (var instructionSet in _instructionSets)
+ {
+ if (instructionSet.Architecture != architecture) continue;
+
+ string r2rEnumerationValue;
+ if (!String.IsNullOrEmpty(instructionSet.R2rName))
+ r2rEnumerationValue = $"ReadyToRunInstructionSet.{instructionSet.R2rName}";
+ else
+ r2rEnumerationValue = $"null";
+
+ tr.WriteLine($" case InstructionSet.{architecture}_{instructionSet.JitName}: return {r2rEnumerationValue};");
+ if (_64BitArchitectures.Contains(architecture) && _64bitVariants[architecture].Contains(instructionSet.JitName))
+ tr.WriteLine($" case InstructionSet.{architecture}_{instructionSet.JitName}_{ArchToInstructionSetSuffixArch(architecture)}: return {r2rEnumerationValue};");
+ }
+
+ tr.Write(@"
+ default: throw new Exception(""Unknown instruction set"");
+ }
+ }
+");
+ }
+
+ tr.Write(@"
+ default: throw new Exception(""Unknown architecture"");
+ }
+ }
+ }
+}
+");
+ }
+
+ public void WriteManagedJitInstructionSet(TextWriter tr)
+ {
+ // Write header
+ tr.Write(@"
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// DO NOT EDIT THIS FILE! IT IS AUTOGENERATED
+// FROM /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
+// using /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
+
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using Internal.TypeSystem;
+
+namespace Internal.JitInterface
+{
+ public enum InstructionSet
+ {
+ ILLEGAL = 0,
+ NONE = 63,
+");
+ foreach (string architecture in _architectures)
+ {
+ int counter = 1;
+ foreach (var jitName in _architectureJitNames[architecture])
+ {
+ tr.WriteLine($" {architecture}_{jitName}={counter++},");
+ }
+ }
+
+ tr.Write(@"
+ }
+
+ public struct InstructionSetFlags
+ {
+ ulong _flags;
+
+ public void AddInstructionSet(InstructionSet instructionSet)
+ {
+ _flags = _flags | (((ulong)1) << (int)instructionSet);
+ }
+
+ public void RemoveInstructionSet(InstructionSet instructionSet)
+ {
+ _flags = _flags & ~(((ulong)1) << (int)instructionSet);
+ }
+
+ public bool HasInstructionSet(InstructionSet instructionSet)
+ {
+ return (_flags & (((ulong)1) << (int)instructionSet)) != 0;
+ }
+
+ public bool Equals(InstructionSetFlags other)
+ {
+ return _flags == other._flags;
+ }
+
+ public static InstructionSetFlags ExpandInstructionSetByImplication(TargetArchitecture architecture, InstructionSetFlags input)
+ {
+ InstructionSetFlags oldflags = input;
+ InstructionSetFlags resultflags = input;
+ do
+ {
+ oldflags = resultflags;
+ switch(architecture)
+ {
+");
+ foreach (string architecture in _architectures)
+ {
+ tr.Write($@"
+ case TargetArchitecture.{architecture}:
+");
+ foreach (var instructionSet in _instructionSets)
+ {
+ if (instructionSet.Architecture != architecture) continue;
+ if (_64BitArchitectures.Contains(architecture) && _64bitVariants[architecture].Contains(instructionSet.JitName))
+ AddImplication(architecture, instructionSet.JitName, $"{instructionSet.JitName}_{ArchToInstructionSetSuffixArch(architecture)}");
+ }
+ foreach (var implication in _implications)
+ {
+ if (implication.Architecture != architecture) continue;
+ AddImplication(architecture, implication.JitName, implication.ImpliedJitName);
+ }
+ tr.WriteLine(" break;");
+ }
+
+ tr.Write(@"
+ }
+ } while (!oldflags.Equals(resultflags));
+ return resultflags;
+ }
+
+ public static IEnumerable> ArchitectureToValidInstructionSets(TargetArchitecture architecture)
+ {
+ switch (architecture)
+ {
+");
+ foreach (string architecture in _architectures)
+ {
+ tr.Write($@"
+ case TargetArchitecture.{architecture}:
+");
+ foreach (var instructionSet in _instructionSets)
+ {
+ if (instructionSet.Architecture != architecture) continue;
+ tr.WriteLine($" yield return new KeyValuePair(\"{instructionSet.PublicName}\", InstructionSet.{architecture}_{instructionSet.JitName});");
+ }
+ tr.WriteLine(" break;");
+ }
+ tr.Write(@"
+ }
+ }
+
+ public void Set64BitInstructionSetVariants(TargetArchitecture architecture)
+ {
+ switch (architecture)
+ {
+");
+ foreach (string architecture in _architectures)
+ {
+ tr.Write($@"
+ case TargetArchitecture.{architecture}:
+");
+ foreach (var instructionSet in _instructionSets)
+ {
+ if (instructionSet.Architecture != architecture) continue;
+
+ if (_64BitArchitectures.Contains(architecture) && _64bitVariants[architecture].Contains(instructionSet.JitName))
+ {
+ tr.WriteLine($" if (HasInstructionSet(InstructionSet.{architecture}_{instructionSet.JitName}))");
+ tr.WriteLine($" AddInstructionSet(InstructionSet.{architecture}_{instructionSet.JitName}_{ArchToInstructionSetSuffixArch(architecture)});");
+ }
+ }
+
+ tr.WriteLine(" break;");
+ }
+ tr.Write(@"
+ }
+ }
+ }
+}
+");
+ return;
+ void AddImplication(string architecture, string jitName, string impliedJitName)
+ {
+ tr.WriteLine($" if (resultflags.HasInstructionSet(InstructionSet.{architecture}_{jitName}))");
+ tr.WriteLine($" resultflags.AddInstructionSet(InstructionSet.{architecture}_{impliedJitName});");
+ }
+ }
+
+ public void WriteNativeCorInfoInstructionSet(TextWriter tr)
+ {
+ // Write header
+ tr.Write(@"
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// DO NOT EDIT THIS FILE! IT IS AUTOGENERATED
+// FROM /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
+// using /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
+
+#ifndef CORINFOINSTRUCTIONSET_H
+#define CORINFOINSTRUCTIONSET_H
+
+enum CORINFO_InstructionSet
+{
+ InstructionSet_ILLEGAL = 0,
+ InstructionSet_NONE = 63,
+");
+ foreach (string architecture in _architectures)
+ {
+ tr.WriteLine($"#ifdef TARGET_{ArchToIfDefArch(architecture)}");
+ int counter = 1;
+ foreach (var jitName in _architectureJitNames[architecture])
+ {
+ tr.WriteLine($" InstructionSet_{jitName}={counter++},");
+ }
+ tr.WriteLine($"#endif // TARGET_{ArchToIfDefArch(architecture)}");
+ }
+ tr.Write(@"
+};
+
+struct CORINFO_InstructionSetFlags
+{
+private:
+ uint64_t _flags = 0;
+public:
+ void AddInstructionSet(CORINFO_InstructionSet instructionSet)
+ {
+ _flags = _flags | (((uint64_t)1) << instructionSet);
+ }
+
+ void RemoveInstructionSet(CORINFO_InstructionSet instructionSet)
+ {
+ _flags = _flags & ~(((uint64_t)1) << instructionSet);
+ }
+
+ bool HasInstructionSet(CORINFO_InstructionSet instructionSet) const
+ {
+ return _flags & (((uint64_t)1) << instructionSet);
+ }
+
+ bool Equals(CORINFO_InstructionSetFlags other) const
+ {
+ return _flags == other._flags;
+ }
+
+ void Add(CORINFO_InstructionSetFlags other)
+ {
+ _flags |= other._flags;
+ }
+
+ bool IsEmpty() const
+ {
+ return _flags == 0;
+ }
+
+ void Reset()
+ {
+ _flags = 0;
+ }
+
+ void Set64BitInstructionSetVariants()
+ {
+");
+ foreach (string architecture in _architectures)
+ {
+ tr.WriteLine($"#ifdef TARGET_{ArchToIfDefArch(architecture)}");
+ foreach (var instructionSet in _instructionSets)
+ {
+ if (instructionSet.Architecture != architecture) continue;
+
+ if (_64BitArchitectures.Contains(architecture) && _64bitVariants[architecture].Contains(instructionSet.JitName))
+ {
+ tr.WriteLine($" if (HasInstructionSet(InstructionSet_{instructionSet.JitName}))");
+ tr.WriteLine($" AddInstructionSet(InstructionSet_{instructionSet.JitName}_{ArchToInstructionSetSuffixArch(architecture)});");
+ }
+ }
+
+ tr.WriteLine($"#endif // TARGET_{ArchToIfDefArch(architecture)}");
+ }
+ tr.Write(@"
+ }
+
+ uint64_t GetFlagsRaw()
+ {
+ return _flags;
+ }
+
+ void SetFromFlagsRaw(uint64_t flags)
+ {
+ _flags = flags;
+ }
+};
+
+inline CORINFO_InstructionSetFlags EnsureInstructionSetFlagsAreValid(CORINFO_InstructionSetFlags input)
+{
+ CORINFO_InstructionSetFlags oldflags = input;
+ CORINFO_InstructionSetFlags resultflags = input;
+ do
+ {
+ oldflags = resultflags;
+");
+ foreach (string architecture in _architectures)
+ {
+ tr.WriteLine($"#ifdef TARGET_{ArchToIfDefArch(architecture)}");
+ foreach (var instructionSet in _instructionSets)
+ {
+ if (instructionSet.Architecture != architecture) continue;
+ if (_64BitArchitectures.Contains(architecture) && _64bitVariants[architecture].Contains(instructionSet.JitName))
+ AddImplication(architecture, instructionSet.JitName, $"{instructionSet.JitName}_{ArchToInstructionSetSuffixArch(architecture)}");
+ }
+ foreach (var implication in _implications)
+ {
+ if (implication.Architecture != architecture) continue;
+ AddImplication(architecture, implication.JitName, implication.ImpliedJitName);
+ }
+ tr.WriteLine($"#endif // TARGET_{ArchToIfDefArch(architecture)}");
+ }
+ tr.Write(@"
+ } while (!oldflags.Equals(resultflags));
+ return resultflags;
+}
+
+
+
+#endif // CORINFOINSTRUCTIONSET_H
+");
+ return;
+
+ void AddImplication(string architecture, string jitName, string impliedJitName)
+ {
+ tr.WriteLine($" if (resultflags.HasInstructionSet(InstructionSet_{jitName}) && !resultflags.HasInstructionSet(InstructionSet_{impliedJitName}))");
+ tr.WriteLine($" resultflags.RemoveInstructionSet(InstructionSet_{jitName});");
+ }
+ }
+
+ public void WriteNativeReadyToRunInstructionSet(TextWriter tr)
+ {
+ // Write header
+ tr.Write(@"
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// DO NOT EDIT THIS FILE! IT IS AUTOGENERATED
+// FROM /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/InstructionSetDesc.txt
+// using /src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
+
+#ifndef READYTORUNINSTRUCTIONSET_H
+#define READYTORUNINSTRUCTIONSET_H
+enum ReadyToRunInstructionSet
+{
+");
+
+ foreach (var r2rEntry in _r2rNamesByNumber)
+ {
+ tr.WriteLine($" READYTORUN_INSTRUCTION_{r2rEntry.Value}={r2rEntry.Key},");
+ }
+ tr.Write(@"
+};
+
+#endif // READYTORUNINSTRUCTIONSET_H
+");
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/Program.cs b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/Program.cs
index 78b513211cc4a0..94fa40662716d2 100644
--- a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/Program.cs
+++ b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/Program.cs
@@ -493,16 +493,49 @@ class JitInterfaceWrapper
static void Main(string[] args)
{
- IEnumerable functions = ParseInput(new StreamReader(args[0]));
- using (TextWriter tw = new StreamWriter(args[1]))
+ if (args[0] == "InstructionSetGenerator")
{
- Console.WriteLine("Generating {0}", args[1]);
- WriteManagedThunkInterface(tw, functions);
+ InstructionSetGenerator generator = new InstructionSetGenerator();
+ if (!generator.ParseInput(new StreamReader(args[1])))
+ return;
+
+ using (TextWriter tw = new StreamWriter(args[2]))
+ {
+ Console.WriteLine("Generating {0}", args[2]);
+ generator.WriteManagedReadyToRunInstructionSet(tw);
+ }
+
+ using (TextWriter tw = new StreamWriter(args[3]))
+ {
+ Console.WriteLine("Generating {0}", args[3]);
+ generator.WriteManagedJitInstructionSet(tw);
+ }
+
+ using (TextWriter tw = new StreamWriter(args[4]))
+ {
+ Console.WriteLine("Generating {0}", args[4]);
+ generator.WriteNativeCorInfoInstructionSet(tw);
+ }
+
+ using (TextWriter tw = new StreamWriter(args[5]))
+ {
+ Console.WriteLine("Generating {0}", args[5]);
+ generator.WriteNativeReadyToRunInstructionSet(tw);
+ }
}
- using (TextWriter tw = new StreamWriter(args[2]))
+ else
{
- Console.WriteLine("Generating {0}", args[2]);
- WriteNativeWrapperInterface(tw, functions);
+ IEnumerable functions = ParseInput(new StreamReader(args[0]));
+ using (TextWriter tw = new StreamWriter(args[1]))
+ {
+ Console.WriteLine("Generating {0}", args[1]);
+ WriteManagedThunkInterface(tw, functions);
+ }
+ using (TextWriter tw = new StreamWriter(args[2]))
+ {
+ Console.WriteLine("Generating {0}", args[2]);
+ WriteNativeWrapperInterface(tw, functions);
+ }
}
}
}
diff --git a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/ThunkInput.txt b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/ThunkInput.txt
index 64ffbc793d1453..826aa55ffafbf8 100644
--- a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/ThunkInput.txt
+++ b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/ThunkInput.txt
@@ -96,6 +96,7 @@ CORINFO_GENERICHANDLE_RESULT*,ref CORINFO_GENERICHANDLE_RESULT,void*
CORINFO_METHOD_INFO*,CORINFO_METHOD_INFO*,void*
CORINFO_FIELD_INFO*,CORINFO_FIELD_INFO*,void*
CORINFO_CALL_INFO*,CORINFO_CALL_INFO*,void*
+PatchpointInfo*,PatchpointInfo*,void*
DelegateCtorArgs*,ref DelegateCtorArgs,void*
ICorDynamicInfo*,IntPtr,void*
va_list,IntPtr
@@ -186,6 +187,8 @@ FUNCTIONS
void methodMustBeLoadedBeforeCodeIsRun( CORINFO_METHOD_HANDLE method );
CORINFO_METHOD_HANDLE mapMethodDeclToMethodImpl( CORINFO_METHOD_HANDLE method );
void getGSCookie( GSCookie * pCookieVal, GSCookie ** ppCookieVal );
+ void setPatchpointInfo(PatchpointInfo* patchpointInfo);
+ PatchpointInfo* getOSRInfo(unsigned * ilOffset);
void resolveToken(CORINFO_RESOLVED_TOKEN * pResolvedToken);
void tryResolveToken(CORINFO_RESOLVED_TOKEN * pResolvedToken);
void findSig( CORINFO_MODULE_HANDLE module, unsigned sigTOK, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO *sig );
diff --git a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
index 46328b52d08baf..f90ce4554e37a8 100644
--- a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
+++ b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.bat
@@ -1,2 +1,4 @@
-cd /d %~dp0
-dotnet run -- ThunkInput.txt ..\CorInfoBase.cs ..\..\..\crossgen2\jitinterface\jitinterface.h
\ No newline at end of file
+pushd %~dp0
+call ..\..\..\..\..\..\..\dotnet.cmd run -- ThunkInput.txt ..\CorInfoBase.cs ..\..\..\crossgen2\jitinterface\jitinterface.h
+call ..\..\..\..\..\..\..\dotnet.cmd run -- InstructionSetGenerator InstructionSetDesc.txt ..\..\Internal\Runtime\ReadyToRunInstructionSet.cs ..\CorInfoInstructionSet.cs ..\..\..\..\inc\corinfoinstructionset.h ..\..\..\..\inc\readytoruninstructionset.h
+popd
\ No newline at end of file
diff --git a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.sh b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.sh
index 59672c72d4156b..041e410ae98aa1 100755
--- a/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.sh
+++ b/src/coreclr/src/tools/Common/JitInterface/ThunkGenerator/gen.sh
@@ -1,3 +1,4 @@
#!/usr/bin/env bash
cd "$(dirname ${BASH_SOURCE[0]})"
-dotnet run -- ThunkInput.txt ../CorInfoBase.cs ../../../crossgen2/jitinterface/jitinterface.h
+../../../../../../../dotnet.sh run -- ThunkInput.txt ../CorInfoBase.cs ../../../crossgen2/jitinterface/jitinterface.h
+../../../../../../../dotnet.sh run -- InstructionSetGenerator InstructionSetDesc.txt ../../Internal/Runtime/ReadyToRunInstructionSet.cs ../CorInfoInstructionSet.cs ../../../../inc/corinfoinstructionset.h ../../../../inc/readytoruninstructionset.h
\ No newline at end of file
diff --git a/src/coreclr/src/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs b/src/coreclr/src/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
index d27fbf6b4806a9..dbd21a08347675 100644
--- a/src/coreclr/src/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
+++ b/src/coreclr/src/tools/Common/TypeSystem/Common/MetadataVirtualMethodAlgorithm.cs
@@ -419,7 +419,7 @@ private static void FindBaseUnificationGroup(MetadataType currentType, Unificati
foreach (MethodDesc memberMethod in unificationGroup)
{
MethodDesc nameSigMatchMemberMethod = FindMatchingVirtualMethodOnTypeByNameAndSigWithSlotCheck(memberMethod, currentType, reverseMethodSearch: true);
- if (nameSigMatchMemberMethod != null)
+ if (nameSigMatchMemberMethod != null && nameSigMatchMemberMethod != memberMethod)
{
if (separatedMethods == null)
separatedMethods = new MethodDescHashtable();
diff --git a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/FieldFixupSignature.cs b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/FieldFixupSignature.cs
index 32cae03ba4016c..88abad8269ac0b 100644
--- a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/FieldFixupSignature.cs
+++ b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/FieldFixupSignature.cs
@@ -40,6 +40,11 @@ public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false)
EcmaModule targetModule = factory.SignatureContext.GetTargetModule(_fieldDesc);
SignatureContext innerContext = dataBuilder.EmitFixup(factory, _fixupKind, targetModule, factory.SignatureContext);
+ if (_fixupKind == ReadyToRunFixupKind.Check_FieldOffset)
+ {
+ dataBuilder.EmitInt(_fieldDesc.Offset.AsInt);
+ }
+
dataBuilder.EmitFieldSignature(_fieldDesc, innerContext);
}
diff --git a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunSymbolNodeFactory.cs b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunSymbolNodeFactory.cs
index 735ada49e61917..30191aad4e9737 100644
--- a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunSymbolNodeFactory.cs
+++ b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRunSymbolNodeFactory.cs
@@ -80,6 +80,14 @@ private void CreateNodeCaches()
);
});
+ _checkFieldOffsetCache = new NodeCache(key =>
+ {
+ return new PrecodeHelperImport(
+ _codegenNodeFactory,
+ new FieldFixupSignature(ReadyToRunFixupKind.Check_FieldOffset, key)
+ );
+ });
+
_interfaceDispatchCells = new NodeCache(cellKey =>
{
return new DelayLoadHelperMethodImport(
@@ -376,6 +384,13 @@ public ISymbolNode FieldOffset(FieldDesc fieldDesc)
return _fieldOffsetCache.GetOrAdd(fieldDesc);
}
+ private NodeCache _checkFieldOffsetCache;
+
+ public ISymbolNode CheckFieldOffset(FieldDesc fieldDesc)
+ {
+ return _checkFieldOffsetCache.GetOrAdd(fieldDesc);
+ }
+
private NodeCache _fieldBaseOffsetCache;
public ISymbolNode FieldBaseOffset(TypeDesc typeDesc)
diff --git a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilation.cs b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilation.cs
index 03be7bab7e8ea0..77781f0ba76067 100644
--- a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilation.cs
+++ b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/Compiler/ReadyToRunCodegenCompilation.cs
@@ -70,6 +70,17 @@ protected Compilation(
public bool CanInline(MethodDesc caller, MethodDesc callee)
{
+ if (JitConfigProvider.Instance.HasFlag(CorJitFlag.CORJIT_FLAG_DEBUG_CODE))
+ {
+ // If the callee wants debuggable code, don't allow it to be inlined
+ return false;
+ }
+
+ if (callee.IsNoInlining)
+ {
+ return false;
+ }
+
// Check to see if the method requires a security object. This means they call demand and
// shouldn't be inlined.
if (callee.RequireSecObject)
@@ -77,6 +88,18 @@ public bool CanInline(MethodDesc caller, MethodDesc callee)
return false;
}
+ // If the method is MethodImpl'd by another method within the same type, then we have
+ // an issue that the importer will import the wrong body. In this case, we'll just
+ // disallow inlining because getFunctionEntryPoint will do the right thing.
+ if (callee.IsVirtual)
+ {
+ MethodDesc calleeMethodImpl = callee.OwningType.FindVirtualFunctionTargetMethodOnObjectType(callee);
+ if (calleeMethodImpl != callee)
+ {
+ return false;
+ }
+ }
+
return NodeFactory.CompilationModuleGroup.CanInline(caller, callee);
}
diff --git a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj
index 9c416513431dd0..a1e95d8a19cdca 100644
--- a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj
+++ b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/ILCompiler.ReadyToRun.csproj
@@ -234,6 +234,9 @@
JitInterface\CorInfoTypes.cs
+
+ JitInterface\CorInfoInstructionSet.cs
+
JitInterface\JitConfigProvider.cs
diff --git a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
index 2e72b4c45fe6a9..08c0b884e5c859 100644
--- a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
+++ b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/JitInterface/CorInfoImpl.ReadyToRun.cs
@@ -2126,7 +2126,10 @@ private void EncodeFieldBaseOffset(FieldDesc field, CORINFO_FIELD_INFO* pResult,
{
if (pMT.IsValueType)
{
- throw new NotImplementedException("https://github.com/dotnet/runtime/issues/32630: ENCODE_CHECK_FIELD_OFFSET: root field check import");
+ // ENCODE_CHECK_FIELD_OFFSET
+ pResult->offset = 0;
+ pResult->fieldAccessor = CORINFO_FIELD_ACCESSOR.CORINFO_FIELD_INSTANCE_WITH_BASE;
+ pResult->fieldLookup = CreateConstLookupToSymbol(_compilation.SymbolNodeFactory.CheckFieldOffset(field));
}
else
{
@@ -2175,6 +2178,24 @@ private void getGSCookie(IntPtr* pCookieVal, IntPtr** ppCookieVal)
*ppCookieVal = (IntPtr *)ObjectToHandle(_compilation.NodeFactory.GetReadyToRunHelperCell(ReadyToRunHelper.GSCookie));
}
+ ///
+ /// Record patchpoint info for the method
+ ///
+ private void setPatchpointInfo(PatchpointInfo* patchpointInfo)
+ {
+ // No patchpoint info when prejitting
+ throw new NotImplementedException();
+ }
+
+ ///
+ /// Retrieve OSR info for the method
+ ///
+ private PatchpointInfo* getOSRInfo(ref uint ilOffset)
+ {
+ // No patchpoint info when prejitting
+ throw new NotImplementedException();
+ }
+
private void getMethodVTableOffset(CORINFO_METHOD_STRUCT_* method, ref uint offsetOfIndirection, ref uint offsetAfterIndirection, ref bool isRelative)
{ throw new NotImplementedException("getMethodVTableOffset"); }
private void expandRawHandleIntrinsic(ref CORINFO_RESOLVED_TOKEN pResolvedToken, ref CORINFO_GENERICHANDLE_RESULT pResult)
diff --git a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/ObjectWriter/R2RPEBuilder.cs b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/ObjectWriter/R2RPEBuilder.cs
index 944f8b288329bc..ac63ae303a1c09 100644
--- a/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/ObjectWriter/R2RPEBuilder.cs
+++ b/src/coreclr/src/tools/crossgen2/ILCompiler.ReadyToRun/ObjectWriter/R2RPEBuilder.cs
@@ -620,6 +620,7 @@ public static PEHeaderBuilder Create(Characteristics imageCharacteristics, DllCh
machine: target.MachineFromTarget(),
sectionAlignment: sectionAlignment,
fileAlignment: fileAlignment,
+ imageBase: imageBase,
majorLinkerVersion: PEHeaderConstants.MajorLinkerVersion,
minorLinkerVersion: PEHeaderConstants.MinorLinkerVersion,
majorOperatingSystemVersion: PEHeaderConstants.MajorOperatingSystemVersion,
diff --git a/src/coreclr/src/tools/crossgen2/ILCompiler.Reflection.ReadyToRun/ReadyToRunReader.cs b/src/coreclr/src/tools/crossgen2/ILCompiler.Reflection.ReadyToRun/ReadyToRunReader.cs
index 4f7c0dcd92cd12..fd60500cb9e3c9 100644
--- a/src/coreclr/src/tools/crossgen2/ILCompiler.Reflection.ReadyToRun/ReadyToRunReader.cs
+++ b/src/coreclr/src/tools/crossgen2/ILCompiler.Reflection.ReadyToRun/ReadyToRunReader.cs
@@ -386,7 +386,6 @@ private unsafe void Initialize(MetadataReader metadata)
throw new BadImageFormatException("The file is not a ReadyToRun image");
}
- Debug.Assert(!Composite);
_assemblyCache.Add(metadata);
DirectoryEntry r2rHeaderDirectory = PEReader.PEHeaders.CorHeader.ManagedNativeHeaderDirectory;
diff --git a/src/coreclr/src/tools/crossgen2/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs b/src/coreclr/src/tools/crossgen2/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs
index e977f304fd11d9..b1425ba7637c3c 100644
--- a/src/coreclr/src/tools/crossgen2/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs
+++ b/src/coreclr/src/tools/crossgen2/ILCompiler.Reflection.ReadyToRun/ReadyToRunSignature.cs
@@ -407,6 +407,23 @@ private SignatureDecoder(IAssemblyResolver options, MetadataReader metadataReade
_contextReader = contextReader;
}
+ ///
+ /// Construct the signature decoder by storing the image byte array and offset within the array.
+ /// This variant uses the outer global metadata reader
+ ///
+ /// Dump options and paths
+ /// Signature to parse
+ /// Signature offset within the signature byte array
+ /// Top-level signature context reader
+ private SignatureDecoder(IAssemblyResolver options, byte[] signature, int offset, ReadyToRunReader contextReader)
+ {
+ _metadataReader = contextReader.GetGlobalMetadataReader();
+ _options = options;
+ _image = signature;
+ _offset = offset;
+ _contextReader = contextReader;
+ }
+
///
/// Read a single byte from the signature stream and advances the current offset.
///
@@ -996,7 +1013,9 @@ private void ParseType(StringBuilder builder)
break;
case CorElementType.ELEMENT_TYPE_GENERICINST:
- ParseGenericTypeInstance(builder);
+ SignatureDecoder outerTypeDecoder = new SignatureDecoder(_options, _image, _offset, _contextReader);
+ outerTypeDecoder.ParseGenericTypeInstance(builder);
+ _offset = outerTypeDecoder._offset;
break;
case CorElementType.ELEMENT_TYPE_TYPEDBYREF:
diff --git a/src/coreclr/src/tools/crossgen2/crossgen2/crossgen2.csproj b/src/coreclr/src/tools/crossgen2/crossgen2/crossgen2.csproj
index f1e8aaece83e06..69357bba2b39a7 100644
--- a/src/coreclr/src/tools/crossgen2/crossgen2/crossgen2.csproj
+++ b/src/coreclr/src/tools/crossgen2/crossgen2/crossgen2.csproj
@@ -9,6 +9,7 @@
x64;x86
$(ArchGroup)
false
+ true
$(BinDir)/crossgen2
true
false
diff --git a/src/coreclr/src/tools/crossgen2/jitinterface/jitinterface.h b/src/coreclr/src/tools/crossgen2/jitinterface/jitinterface.h
index 39703fa0028da3..ec3554f679cc65 100644
--- a/src/coreclr/src/tools/crossgen2/jitinterface/jitinterface.h
+++ b/src/coreclr/src/tools/crossgen2/jitinterface/jitinterface.h
@@ -35,6 +35,8 @@ struct JitInterfaceCallbacks
void (* methodMustBeLoadedBeforeCodeIsRun)(void * thisHandle, CorInfoException** ppException, void* method);
void* (* mapMethodDeclToMethodImpl)(void * thisHandle, CorInfoException** ppException, void* method);
void (* getGSCookie)(void * thisHandle, CorInfoException** ppException, void* pCookieVal, void** ppCookieVal);
+ void (* setPatchpointInfo)(void * thisHandle, CorInfoException** ppException, void* patchpointInfo);
+ void* (* getOSRInfo)(void * thisHandle, CorInfoException** ppException, unsigned* ilOffset);
void (* resolveToken)(void * thisHandle, CorInfoException** ppException, void* pResolvedToken);
void (* tryResolveToken)(void * thisHandle, CorInfoException** ppException, void* pResolvedToken);
void (* findSig)(void * thisHandle, CorInfoException** ppException, void* module, unsigned sigTOK, void* context, void* sig);
@@ -408,6 +410,23 @@ class JitInterfaceWrapper
throw pException;
}
+ virtual void setPatchpointInfo(void* patchpointInfo)
+ {
+ CorInfoException* pException = nullptr;
+ _callbacks->setPatchpointInfo(_thisHandle, &pException, patchpointInfo);
+ if (pException != nullptr)
+ throw pException;
+ }
+
+ virtual void* getOSRInfo(unsigned* ilOffset)
+ {
+ CorInfoException* pException = nullptr;
+ void* _ret = _callbacks->getOSRInfo(_thisHandle, &pException, ilOffset);
+ if (pException != nullptr)
+ throw pException;
+ return _ret;
+ }
+
virtual void resolveToken(void* pResolvedToken)
{
CorInfoException* pException = nullptr;
diff --git a/src/coreclr/src/utilcode/ccomprc.cpp b/src/coreclr/src/utilcode/ccomprc.cpp
index 421fbc8aacac50..4e2c9b97a5ed74 100644
--- a/src/coreclr/src/utilcode/ccomprc.cpp
+++ b/src/coreclr/src/utilcode/ccomprc.cpp
@@ -15,6 +15,7 @@ __attribute__((visibility("default"))) DECLARE_NATIVE_STRING_RESOURCE_TABLE(NATI
#endif
#include "sstring.h"
#include "stringarraylist.h"
+#include "corpriv.h"
#include
@@ -672,19 +673,21 @@ HRESULT CCompRC::LoadLibraryThrows(HRESOURCEDLL * pHInst)
// The resources are embeded into the .exe itself for crossgen
*pHInst = GetModuleInst();
#else
+
+#ifdef SELF_NO_HOST
+ _ASSERTE(!"CCompRC::LoadLibraryThrows not implemented for SELF_NO_HOST");
+ hr = E_NOTIMPL;
+#else
PathString rcPath; // Path to resource DLL.
// Try first in the same directory as this dll.
- VALIDATECORECLRCALLBACKS();
-
-
- hr = g_CoreClrCallbacks.m_pfnGetCORSystemDirectory(rcPath);
+ hr = GetCORSystemDirectoryInternaL(rcPath);
if (FAILED(hr))
return hr;
hr = LoadLibraryHelper(pHInst, rcPath);
-
+#endif
#endif // CROSSGEN_COMPILE
diff --git a/src/coreclr/src/utilcode/clrhost.cpp b/src/coreclr/src/utilcode/clrhost.cpp
index 090b533563ae3a..23d6eeaf904886 100644
--- a/src/coreclr/src/utilcode/clrhost.cpp
+++ b/src/coreclr/src/utilcode/clrhost.cpp
@@ -10,11 +10,10 @@
#include "clrhost.h"
#include "utilcode.h"
#include "ex.h"
-#include "hostimpl.h"
#include "clrnt.h"
#include "contract.h"
-CoreClrCallbacks g_CoreClrCallbacks;
+HINSTANCE g_hmodCoreCLR;
thread_local int t_CantAllocCount;
@@ -114,38 +113,12 @@ HMODULE GetCLRModule ()
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_SUPPORTS_DAC; // DAC can call in here since we initialize the SxS callbacks in ClrDataAccess::Initialize.
-#ifdef DACCESS_COMPILE
- // For DAC, "g_CoreClrCallbacks" is populated in InitUtilCode when the latter is invoked
- // from ClrDataAccess::Initialize alongwith a reference to a structure allocated in the
- // host-process address space.
- //
- // This function will be invoked in the host when DAC uses SEHException::GetHr that calls into
- // IsComplusException, which calls into WasThrownByUs that calls GetCLRModule when EH SxS is enabled.
- // However, this function can also be executed within the target space as well.
- //
- // Since DACCop gives the warning to DACize this global that, actually, would be used only
- // in the respective address spaces and does not require marshalling, we need to ignore this
- // warning.
- DACCOP_IGNORE(UndacizedGlobalVariable, "g_CoreClrCallbacks has the dual mode DAC issue.");
-#endif // DACCESS_COMPILE
- VALIDATECORECLRCALLBACKS();
-
- // This is the normal coreclr case - we return the module handle that was captured in our DllMain.
-#ifdef DACCESS_COMPILE
- // For DAC, "g_CoreClrCallbacks" is populated in InitUtilCode when the latter is invoked
- // from ClrDataAccess::Initialize alongwith a reference to a structure allocated in the
- // host-process address space.
- //
- // This function will be invoked in the host when DAC uses SEHException::GetHr that calls into
- // IsComplusException, which calls into WasThrownByUs that calls GetCLRModule when EH SxS is enabled.
- // However, this function can also be executed within the target space as well.
- //
- // Since DACCop gives the warning to DACize this global that, actually, would be used only
- // in the respective address spaces and does not require marshalling, we need to ignore this
- // warning.
- DACCOP_IGNORE(UndacizedGlobalVariable, "g_CoreClrCallbacks has the dual mode DAC issue.");
-#endif // DACCESS_COMPILE
- return g_CoreClrCallbacks.m_hmodCoreCLR;
+ // You got here because the dll that included this copy of utilcode.lib.
+ // did not set g_hmodCoreCLR. The most likely cause is that you're running
+ // a dll (other than coreclr.dll) that links to utilcode.lib.
+ _ASSERTE(g_hmodCoreCLR != NULL);
+
+ return g_hmodCoreCLR;
}
@@ -276,65 +249,3 @@ LoadsTypeHolder::~LoadsTypeHolder()
}
#endif //defined(_DEBUG_IMPL) && defined(ENABLE_CONTRACTS_IMPL)
-
-
-//--------------------------------------------------------------------------
-// Side by side inproc support
-//
-// These are new abstractions designed to support loading multiple CLR
-// versions in the same process.
-//--------------------------------------------------------------------------
-
-
-//--------------------------------------------------------------------------
-// One-time initialized called by coreclr.dll in its dllmain.
-//--------------------------------------------------------------------------
-VOID InitUtilcode(CoreClrCallbacks const & cccallbacks)
-{
- //! WARNING: At the time this function is invoked, the C Runtime has NOT been fully initialized, let alone the CLR.
- //! So don't put in a runtime contract and don't invoke other functions in the CLR (not even _ASSERTE!)
-
- LIMITED_METHOD_CONTRACT;
-
- g_CoreClrCallbacks = cccallbacks;
-}
-
-CoreClrCallbacks const & GetClrCallbacks()
-{
- LIMITED_METHOD_CONTRACT;
-
- VALIDATECORECLRCALLBACKS();
- return g_CoreClrCallbacks;
-}
-
-#ifdef _DEBUG
-void OnUninitializedCoreClrCallbacks()
-{
- // Supports DAC since it can be called from GetCLRModule which supports DAC as well.
- LIMITED_METHOD_DAC_CONTRACT;
-
- // If you got here, the most likely cause of the failure is that you're loading some DLL
- // (other than coreclr.dll) that links to utilcode.lib, or that you're using a nohost
- // variant of utilcode.lib but hitting code that assumes there is a CLR in the process.
- //
- // It is expected that coreclr.dll
- // is the ONLY dll that links to utilcode libraries.
- //
- // If you must introduce a new dll that links to utilcode.lib, it is your responsibility
- // to ensure that that dll invoke InitUtilcode() and forward it the right data from the *correct*
- // loaded instance of coreclr. And you'll have to do without the CRT being initialized.
- //
- // Can't use an _ASSERTE here because even that's broken if we get to this point.
- MessageBoxW(0,
- W("g_CoreClrCallbacks not initialized."),
- W("\n\n")
- W("You got here because the dll that included this copy of utilcode.lib ")
- W("did not call InitUtilcode() The most likely cause is that you're running ")
- W("a dll (other than coreclr.dll) that links to utilcode.lib.")
- ,
- 0);
- _ASSERTE(FALSE);
- DebugBreak();
-}
-#endif // _DEBUG
-
diff --git a/src/coreclr/src/utilcode/clrhost_nodependencies.cpp b/src/coreclr/src/utilcode/clrhost_nodependencies.cpp
index d531182b8a43ea..c8b5377ecfe0ad 100644
--- a/src/coreclr/src/utilcode/clrhost_nodependencies.cpp
+++ b/src/coreclr/src/utilcode/clrhost_nodependencies.cpp
@@ -10,7 +10,6 @@
#include "clrhost.h"
#include "utilcode.h"
#include "ex.h"
-#include "hostimpl.h"
#include "clrnt.h"
#include "contract.h"
@@ -92,53 +91,6 @@ void FreeClrDebugState(LPVOID pTlsData)
#endif //_DEBUG
}
-// This is a drastic shutoff toggle that forces all new threads to fail their CLRInitDebugState calls.
-// We only invoke this if FLS can't allocate its master block, preventing us from tracking the shutoff
-// on a per-thread basis.
-BYTE* GetGlobalContractShutoffFlag()
-{
-#ifdef SELF_NO_HOST
-
- static BYTE gGlobalContractShutoffFlag = 0;
- return &gGlobalContractShutoffFlag;
-#else //!SELF_NO_HOST
- HINSTANCE hmod = GetCLRModule();
- if (!hmod)
- {
- return NULL;
- }
- typedef BYTE*(__stdcall * PGETSHUTOFFADDRFUNC)();
- PGETSHUTOFFADDRFUNC pGetContractShutoffFlagFunc = (PGETSHUTOFFADDRFUNC)GetProcAddress(hmod, "GetAddrOfContractShutoffFlag");
- if (!pGetContractShutoffFlagFunc)
- {
- return NULL;
- }
- return pGetContractShutoffFlagFunc();
-#endif //!SELF_NO_HOST
-}
-
-static BOOL AreContractsShutoff()
-{
- BYTE *pShutoff = GetGlobalContractShutoffFlag();
- if (!pShutoff)
- {
- return FALSE;
- }
- else
- {
- return 0 != *pShutoff;
- }
-}
-
-static VOID ShutoffContracts()
-{
- BYTE *pShutoff = GetGlobalContractShutoffFlag();
- if (pShutoff)
- {
- *pShutoff = 1;
- }
-}
-
//=============================================================================================
// Used to initialize the per-thread ClrDebugState. This is called once per thread (with
// possible exceptions for OOM scenarios.)
@@ -160,41 +112,34 @@ ClrDebugState *CLRInitDebugState()
ClrDebugState *pClrDebugState = NULL;
DbgStateLockData *pNewLockData = NULL;
- if (AreContractsShutoff())
- {
- pNewClrDebugState = NULL;
- }
- else
- {
- // Yuck. We cannot call the hosted allocator for ClrDebugState (it is impossible to maintain a guarantee
- // that none of code paths, many of them called conditionally, don't themselves trigger a ClrDebugState creation.)
- // We have to call the OS directly for this.
+ // Yuck. We cannot call the hosted allocator for ClrDebugState (it is impossible to maintain a guarantee
+ // that none of code paths, many of them called conditionally, don't themselves trigger a ClrDebugState creation.)
+ // We have to call the OS directly for this.
#undef HeapAlloc
#undef GetProcessHeap
- pNewClrDebugState = (ClrDebugState*)::HeapAlloc(GetProcessHeap(), 0, sizeof(ClrDebugState));
- if (pNewClrDebugState != NULL)
- {
- // Only allocate a DbgStateLockData if its owning ClrDebugState was successfully allocated
- pNewLockData = (DbgStateLockData *)::HeapAlloc(GetProcessHeap(), 0, sizeof(DbgStateLockData));
- }
+ pNewClrDebugState = (ClrDebugState*)::HeapAlloc(GetProcessHeap(), 0, sizeof(ClrDebugState));
+ if (pNewClrDebugState != NULL)
+ {
+ // Only allocate a DbgStateLockData if its owning ClrDebugState was successfully allocated
+ pNewLockData = (DbgStateLockData *)::HeapAlloc(GetProcessHeap(), 0, sizeof(DbgStateLockData));
+ }
#define GetProcessHeap() Dont_Use_GetProcessHeap()
#define HeapAlloc(hHeap, dwFlags, dwBytes) Dont_Use_HeapAlloc(hHeap, dwFlags, dwBytes)
- if ((pNewClrDebugState != NULL) && (pNewLockData != NULL))
- {
- // Both allocations succeeded, so initialize the structures, and have
- // pNewClrDebugState point to pNewLockData. If either of the allocations
- // failed, we'll use gBadClrDebugState for this thread, and free whichever of
- // pNewClrDebugState or pNewLockData actually did get allocated (if either did).
- // (See code in this function below, outside this block.)
-
- pNewClrDebugState->SetStartingValues();
- pNewClrDebugState->ViolationMaskSet( CanFreeMe );
- _ASSERTE(!(pNewClrDebugState->ViolationMask() & BadDebugState));
-
- pNewLockData->SetStartingValues();
- pNewClrDebugState->SetDbgStateLockData(pNewLockData);
- }
+ if ((pNewClrDebugState != NULL) && (pNewLockData != NULL))
+ {
+ // Both allocations succeeded, so initialize the structures, and have
+ // pNewClrDebugState point to pNewLockData. If either of the allocations
+ // failed, we'll use gBadClrDebugState for this thread, and free whichever of
+ // pNewClrDebugState or pNewLockData actually did get allocated (if either did).
+ // (See code in this function below, outside this block.)
+
+ pNewClrDebugState->SetStartingValues();
+ pNewClrDebugState->ViolationMaskSet( CanFreeMe );
+ _ASSERTE(!(pNewClrDebugState->ViolationMask() & BadDebugState));
+
+ pNewLockData->SetStartingValues();
+ pNewClrDebugState->SetDbgStateLockData(pNewLockData);
}
@@ -531,330 +476,3 @@ BOOL DbgIsExecutable(LPVOID lpMem, SIZE_T length)
}
#endif //_DEBUG
-
-
-
-
-// Access various ExecutionEngine support services, like a logical TLS that abstracts
-// fiber vs. thread issues. We obtain it from a DLL export via the shim.
-
-typedef IExecutionEngine * (__stdcall * IEE_FPTR) ();
-
-//
-// Access various ExecutionEngine support services, like a logical TLS that abstracts
-// fiber vs. thread issues.
-// From an IExecutionEngine is possible to get other services via QueryInterfaces such
-// as memory management
-//
-IExecutionEngine *g_pExecutionEngine = NULL;
-
-#ifdef SELF_NO_HOST
-BYTE g_ExecutionEngineInstance[sizeof(UtilExecutionEngine)];
-#endif
-
-
-IExecutionEngine *GetExecutionEngine()
-{
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- SUPPORTS_DAC_HOST_ONLY;
-
- if (g_pExecutionEngine == NULL)
- {
- IExecutionEngine* pExecutionEngine;
-#ifdef SELF_NO_HOST
- // Create a local copy on the stack and then copy it over to the static instance.
- // This avoids race conditions caused by multiple initializations of vtable in the constructor
- UtilExecutionEngine local;
- memcpy((void*)&g_ExecutionEngineInstance, (void*)&local, sizeof(UtilExecutionEngine));
- pExecutionEngine = (IExecutionEngine*)(UtilExecutionEngine*)&g_ExecutionEngineInstance;
-#else
- // statically linked.
- VALIDATECORECLRCALLBACKS();
- pExecutionEngine = g_CoreClrCallbacks.m_pfnIEE();
-#endif // SELF_NO_HOST
-
- //We use an explicit memory barrier here so that the reference g_pExecutionEngine is valid when
- //it is used, This ia a requirement on platforms with weak memory model . We cannot use VolatileStore
- //because they are the same as normal assignment for DAC builds [see code:VOLATILE]
-
- MemoryBarrier();
- g_pExecutionEngine = pExecutionEngine;
- }
-
- // It's a bug to ask for the ExecutionEngine interface in scenarios where the
- // ExecutionEngine cannot be loaded.
- _ASSERTE(g_pExecutionEngine);
- return g_pExecutionEngine;
-} // GetExecutionEngine
-
-IEEMemoryManager * GetEEMemoryManager()
-{
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- SUPPORTS_DAC_HOST_ONLY;
-
- static IEEMemoryManager *pEEMemoryManager = NULL;
- if (NULL == pEEMemoryManager) {
- IExecutionEngine *pExecutionEngine = GetExecutionEngine();
- _ASSERTE(pExecutionEngine);
-
- // It is dangerous to pass a global pointer to QueryInterface. The pointer may be set
- // to NULL in the call. Imagine that thread 1 calls QI, and get a pointer. But before thread 1
- // returns the pointer to caller, thread 2 calls QI and the pointer is set to NULL.
- IEEMemoryManager *pEEMM;
- pExecutionEngine->QueryInterface(IID_IEEMemoryManager, (void**)&pEEMM);
- pEEMemoryManager = pEEMM;
- }
- // It's a bug to ask for the MemoryManager interface in scenarios where it cannot be loaded.
- _ASSERTE(pEEMemoryManager);
- return pEEMemoryManager;
-}
-
-// should return some error code or exception
-void SetExecutionEngine(IExecutionEngine *pExecutionEngine)
-{
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
-
- _ASSERTE(pExecutionEngine && !g_pExecutionEngine);
- if (!g_pExecutionEngine) {
- g_pExecutionEngine = pExecutionEngine;
- g_pExecutionEngine->AddRef();
- }
-}
-
-CRITSEC_COOKIE ClrCreateCriticalSection(CrstType crstType, CrstFlags flags)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->CreateLock(NULL, (LPCSTR)crstType, flags);
-}
-
-HRESULT ClrDeleteCriticalSection(CRITSEC_COOKIE cookie)
-{
- WRAPPER_NO_CONTRACT;
- GetExecutionEngine()->DestroyLock(cookie);
- return S_OK;
-}
-
-void ClrEnterCriticalSection(CRITSEC_COOKIE cookie)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->AcquireLock(cookie);
-}
-
-void ClrLeaveCriticalSection(CRITSEC_COOKIE cookie)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ReleaseLock(cookie);
-}
-
-EVENT_COOKIE ClrCreateAutoEvent(BOOL bInitialState)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->CreateAutoEvent(bInitialState);
-}
-
-EVENT_COOKIE ClrCreateManualEvent(BOOL bInitialState)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->CreateManualEvent(bInitialState);
-}
-
-void ClrCloseEvent(EVENT_COOKIE event)
-{
- WRAPPER_NO_CONTRACT;
-
- GetExecutionEngine()->CloseEvent(event);
-}
-
-BOOL ClrSetEvent(EVENT_COOKIE event)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ClrSetEvent(event);
-}
-
-BOOL ClrResetEvent(EVENT_COOKIE event)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ClrResetEvent(event);
-}
-
-DWORD ClrWaitEvent(EVENT_COOKIE event, DWORD dwMilliseconds, BOOL bAlertable)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->WaitForEvent(event, dwMilliseconds, bAlertable);
-}
-
-SEMAPHORE_COOKIE ClrCreateSemaphore(DWORD dwInitial, DWORD dwMax)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ClrCreateSemaphore(dwInitial, dwMax);
-}
-
-void ClrCloseSemaphore(SEMAPHORE_COOKIE semaphore)
-{
- WRAPPER_NO_CONTRACT;
-
- GetExecutionEngine()->ClrCloseSemaphore(semaphore);
-}
-
-BOOL ClrReleaseSemaphore(SEMAPHORE_COOKIE semaphore, LONG lReleaseCount, LONG *lpPreviousCount)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ClrReleaseSemaphore(semaphore, lReleaseCount, lpPreviousCount);
-}
-
-DWORD ClrWaitSemaphore(SEMAPHORE_COOKIE semaphore, DWORD dwMilliseconds, BOOL bAlertable)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ClrWaitForSemaphore(semaphore, dwMilliseconds, bAlertable);
-}
-
-MUTEX_COOKIE ClrCreateMutex(LPSECURITY_ATTRIBUTES lpMutexAttributes,
- BOOL bInitialOwner,
- LPCTSTR lpName)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ClrCreateMutex(lpMutexAttributes, bInitialOwner, lpName);
-}
-
-void ClrCloseMutex(MUTEX_COOKIE mutex)
-{
- WRAPPER_NO_CONTRACT;
-
- GetExecutionEngine()->ClrCloseMutex(mutex);
-}
-
-BOOL ClrReleaseMutex(MUTEX_COOKIE mutex)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ClrReleaseMutex(mutex);
-}
-
-DWORD ClrWaitForMutex(MUTEX_COOKIE mutex, DWORD dwMilliseconds, BOOL bAlertable)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ClrWaitForMutex(mutex, dwMilliseconds, bAlertable);
-}
-
-DWORD ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetExecutionEngine()->ClrSleepEx(dwMilliseconds, bAlertable);
-}
-
-LPVOID ClrVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect)
-{
- WRAPPER_NO_CONTRACT;
-
- LPVOID result = GetEEMemoryManager()->ClrVirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
- LOG((LF_EEMEM, LL_INFO100000, "ClrVirtualAlloc (0x%p, 0x%06x, 0x%06x, 0x%02x) = 0x%p\n", lpAddress, dwSize, flAllocationType, flProtect, result));
-
- return result;
-}
-
-BOOL ClrVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType)
-{
- WRAPPER_NO_CONTRACT;
-
- LOG((LF_EEMEM, LL_INFO100000, "ClrVirtualFree (0x%p, 0x%06x, 0x%04x)\n", lpAddress, dwSize, dwFreeType));
- BOOL result = GetEEMemoryManager()->ClrVirtualFree(lpAddress, dwSize, dwFreeType);
-
- return result;
-}
-
-SIZE_T ClrVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength)
-{
- WRAPPER_NO_CONTRACT;
-
- LOG((LF_EEMEM, LL_INFO100000, "ClrVirtualQuery (0x%p)\n", lpAddress));
- return GetEEMemoryManager()->ClrVirtualQuery(lpAddress, lpBuffer, dwLength);
-}
-
-BOOL ClrVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect)
-{
- WRAPPER_NO_CONTRACT;
-
- LOG((LF_EEMEM, LL_INFO100000, "ClrVirtualProtect(0x%p, 0x%06x, 0x%02x)\n", lpAddress, dwSize, flNewProtect));
- return GetEEMemoryManager()->ClrVirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect);
-}
-
-HANDLE ClrGetProcessHeap()
-{
- WRAPPER_NO_CONTRACT;
-
- return GetEEMemoryManager()->ClrGetProcessHeap();
-}
-
-HANDLE ClrHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetEEMemoryManager()->ClrHeapCreate(flOptions, dwInitialSize, dwMaximumSize);
-}
-
-BOOL ClrHeapDestroy(HANDLE hHeap)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetEEMemoryManager()->ClrHeapDestroy(hHeap);
-}
-
-LPVOID ClrHeapAlloc(HANDLE hHeap, DWORD dwFlags, S_SIZE_T dwBytes)
-{
- WRAPPER_NO_CONTRACT;
-
- if(dwBytes.IsOverflow()) return NULL;
-
- LPVOID result = GetEEMemoryManager()->ClrHeapAlloc(hHeap, dwFlags, dwBytes.Value());
-
- return result;
-}
-
-BOOL ClrHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem)
-{
- WRAPPER_NO_CONTRACT;
-
- BOOL result = GetEEMemoryManager()->ClrHeapFree(hHeap, dwFlags, lpMem);
-
- return result;
-}
-
-BOOL ClrHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem)
-{
- WRAPPER_NO_CONTRACT;
-
- return GetEEMemoryManager()->ClrHeapValidate(hHeap, dwFlags, lpMem);
-}
-
-HANDLE ClrGetProcessExecutableHeap()
-{
- WRAPPER_NO_CONTRACT;
-
- return GetEEMemoryManager()->ClrGetProcessExecutableHeap();
-}
-
-void GetLastThrownObjectExceptionFromThread(void **ppvException)
-{
- WRAPPER_NO_CONTRACT;
-
- GetExecutionEngine()->GetLastThrownObjectExceptionFromThread(ppvException);
-}
diff --git a/src/coreclr/src/utilcode/ex.cpp b/src/coreclr/src/utilcode/ex.cpp
index d6690551a027c5..35bcd6c33652a3 100644
--- a/src/coreclr/src/utilcode/ex.cpp
+++ b/src/coreclr/src/utilcode/ex.cpp
@@ -30,7 +30,7 @@ GVAL_IMPL_INIT(HRESULT, g_hrFatalError, S_OK);
// Helper function to get an exception object from outside the exception. In
// the CLR, it may be from the Thread object. Non-CLR users have no thread object,
// and it will do nothing.
-void GetLastThrownObjectExceptionFromThread(void **ppvException);
+void GetLastThrownObjectExceptionFromThread(Exception **ppException);
Exception *Exception::g_OOMException = NULL;
@@ -865,7 +865,7 @@ Exception* DelegatingException::GetDelegate()
{
// .. get it now. NULL in case there isn't one and we take default action.
m_delegatedException = NULL;
- GetLastThrownObjectExceptionFromThread(reinterpret_cast(&m_delegatedException));
+ GetLastThrownObjectExceptionFromThread(&m_delegatedException);
}
return m_delegatedException;
diff --git a/src/coreclr/src/utilcode/hostimpl.cpp b/src/coreclr/src/utilcode/hostimpl.cpp
index 2354d18b8b74dc..6cfaf67c675790 100644
--- a/src/coreclr/src/utilcode/hostimpl.cpp
+++ b/src/coreclr/src/utilcode/hostimpl.cpp
@@ -2,237 +2,87 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-// ==++==
-//
-
-//
-//
-
-//
-// ==--==
-
#include "stdafx.h"
#include "mscoree.h"
#include "clrinternal.h"
-#include "hostimpl.h"
-
-// to avoid to include clrhost.h in this file
-#ifdef FAILPOINTS_ENABLED
-extern int RFS_HashStack();
-#endif
-
-#ifdef SELF_NO_HOST
-HANDLE (*g_fnGetExecutableHeapHandle)();
-#endif
+#include "clrhost.h"
+#include "ex.h"
thread_local size_t t_ThreadType;
-HRESULT STDMETHODCALLTYPE UtilExecutionEngine::QueryInterface(REFIID id, void **pInterface)
-{
- if (!pInterface)
- return E_POINTER;
-
- *pInterface = NULL;
-
- if (id == IID_IExecutionEngine)
- *pInterface = (IExecutionEngine *)this;
- else if (id == IID_IEEMemoryManager)
- *pInterface = (IEEMemoryManager *)this;
- else if (id == IID_IUnknown)
- *pInterface = (IUnknown *)(IExecutionEngine *)this;
- else
- return E_NOINTERFACE;
-
- AddRef();
- return S_OK;
-} // UtilExecutionEngine::QueryInterface
-
-//
-// lifetime of this object is that of the app it lives in so no point in AddRef/Release
-//
-ULONG STDMETHODCALLTYPE UtilExecutionEngine::AddRef()
-{
- return 1;
-}
-
-ULONG STDMETHODCALLTYPE UtilExecutionEngine::Release()
-{
- return 1;
-}
-
-CRITSEC_COOKIE STDMETHODCALLTYPE UtilExecutionEngine::CreateLock(LPCSTR szTag, LPCSTR level, CrstFlags flags)
+CRITSEC_COOKIE ClrCreateCriticalSection(CrstType crstType, CrstFlags flags)
{
CRITICAL_SECTION *cs = (CRITICAL_SECTION*)malloc(sizeof(CRITICAL_SECTION));
InitializeCriticalSection(cs);
return (CRITSEC_COOKIE)cs;
}
-void STDMETHODCALLTYPE UtilExecutionEngine::DestroyLock(CRITSEC_COOKIE lock)
-{
- _ASSERTE(lock);
- DeleteCriticalSection((CRITICAL_SECTION*)lock);
- free(lock);
-}
-
-void STDMETHODCALLTYPE UtilExecutionEngine::AcquireLock(CRITSEC_COOKIE lock)
-{
- _ASSERTE(lock);
- EnterCriticalSection((CRITICAL_SECTION*)lock);
-}
-
-void STDMETHODCALLTYPE UtilExecutionEngine::ReleaseLock(CRITSEC_COOKIE lock)
-{
- _ASSERTE(lock);
- LeaveCriticalSection((CRITICAL_SECTION*)lock);
-}
-
-EVENT_COOKIE STDMETHODCALLTYPE UtilExecutionEngine::CreateAutoEvent(BOOL bInitialState)
-{
- HANDLE handle = WszCreateEvent(NULL, FALSE, bInitialState, NULL);
- _ASSERTE(handle);
- return (EVENT_COOKIE)handle;
-}
-
-EVENT_COOKIE STDMETHODCALLTYPE UtilExecutionEngine::CreateManualEvent(BOOL bInitialState)
-{
- HANDLE handle = WszCreateEvent(NULL, TRUE, bInitialState, NULL);
- _ASSERTE(handle);
- return (EVENT_COOKIE)handle;
-}
-
-void STDMETHODCALLTYPE UtilExecutionEngine::CloseEvent(EVENT_COOKIE event)
-{
- _ASSERTE(event);
- CloseHandle((HANDLE)event);
-}
-
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrSetEvent(EVENT_COOKIE event)
-{
- _ASSERTE(event);
- return SetEvent((HANDLE)event);
-}
-
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrResetEvent(EVENT_COOKIE event)
-{
- _ASSERTE(event);
- return ResetEvent((HANDLE)event);
-}
-
-DWORD STDMETHODCALLTYPE UtilExecutionEngine::WaitForEvent(EVENT_COOKIE event, DWORD dwMilliseconds, BOOL bAlertable)
-{
- _ASSERTE(event);
- return WaitForSingleObjectEx((HANDLE)event, dwMilliseconds, bAlertable);
-}
-
-DWORD STDMETHODCALLTYPE UtilExecutionEngine::WaitForSingleObject(HANDLE handle, DWORD dwMilliseconds)
-{
- _ASSERTE(handle);
- return WaitForSingleObjectEx(handle, dwMilliseconds, FALSE);
-}
-
-SEMAPHORE_COOKIE STDMETHODCALLTYPE UtilExecutionEngine::ClrCreateSemaphore(DWORD dwInitial, DWORD dwMax)
-{
- HANDLE handle = WszCreateSemaphore(NULL, (LONG)dwInitial, (LONG)dwMax, NULL);
- _ASSERTE(handle);
- return (SEMAPHORE_COOKIE)handle;
-}
-
-void STDMETHODCALLTYPE UtilExecutionEngine::ClrCloseSemaphore(SEMAPHORE_COOKIE semaphore)
-{
- _ASSERTE(semaphore);
- CloseHandle((HANDLE)semaphore);
-}
-
-DWORD STDMETHODCALLTYPE UtilExecutionEngine::ClrWaitForSemaphore(SEMAPHORE_COOKIE semaphore, DWORD dwMilliseconds, BOOL bAlertable)
-{
- _ASSERTE(semaphore);
- return WaitForSingleObjectEx((HANDLE)semaphore, dwMilliseconds, bAlertable);
-}
-
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrReleaseSemaphore(SEMAPHORE_COOKIE semaphore, LONG lReleaseCount, LONG *lpPreviousCount)
-{
- _ASSERTE(semaphore);
- return ReleaseSemaphore((HANDLE)semaphore, lReleaseCount, lpPreviousCount);
-}
-
-MUTEX_COOKIE STDMETHODCALLTYPE UtilExecutionEngine::ClrCreateMutex(LPSECURITY_ATTRIBUTES lpMutexAttributes,
- BOOL bInitialOwner,
- LPCTSTR lpName)
-{
- return (MUTEX_COOKIE)WszCreateMutex(lpMutexAttributes,bInitialOwner,lpName);
-}
-
-void STDMETHODCALLTYPE UtilExecutionEngine::ClrCloseMutex(MUTEX_COOKIE mutex)
-{
- _ASSERTE(mutex);
- CloseHandle((HANDLE)mutex);
-}
-
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrReleaseMutex(MUTEX_COOKIE mutex)
+void ClrDeleteCriticalSection(CRITSEC_COOKIE cookie)
{
- _ASSERTE(mutex);
- return ReleaseMutex((HANDLE)mutex);
+ _ASSERTE(cookie);
+ DeleteCriticalSection((CRITICAL_SECTION*)cookie);
+ free(cookie);
}
-DWORD STDMETHODCALLTYPE UtilExecutionEngine::ClrWaitForMutex(MUTEX_COOKIE mutex,
- DWORD dwMilliseconds,
- BOOL bAlertable)
+void ClrEnterCriticalSection(CRITSEC_COOKIE cookie)
{
- _ASSERTE(mutex);
- return WaitForSingleObjectEx ((HANDLE)mutex, dwMilliseconds, bAlertable);
+ _ASSERTE(cookie);
+ EnterCriticalSection((CRITICAL_SECTION*)cookie);
}
-DWORD STDMETHODCALLTYPE UtilExecutionEngine::ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable)
+void ClrLeaveCriticalSection(CRITSEC_COOKIE cookie)
{
- return SleepEx (dwMilliseconds, bAlertable);
+ _ASSERTE(cookie);
+ LeaveCriticalSection((CRITICAL_SECTION*)cookie);
}
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrAllocationDisallowed()
+DWORD ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable)
{
- return FALSE;
+ return SleepEx(dwMilliseconds, bAlertable);
}
-LPVOID STDMETHODCALLTYPE UtilExecutionEngine::ClrVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect)
+LPVOID ClrVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect)
{
#ifdef FAILPOINTS_ENABLED
- if (RFS_HashStack ())
- return NULL;
+ if (RFS_HashStack ())
+ return NULL;
#endif
return VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
}
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType)
+BOOL ClrVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType)
{
return VirtualFree(lpAddress, dwSize, dwFreeType);
}
-SIZE_T STDMETHODCALLTYPE UtilExecutionEngine::ClrVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength)
+SIZE_T ClrVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength)
{
return VirtualQuery(lpAddress, lpBuffer, dwLength);
}
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect)
+BOOL ClrVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect)
{
return VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect);
}
-HANDLE STDMETHODCALLTYPE UtilExecutionEngine::ClrGetProcessHeap()
+#undef GetProcessHeap
+HANDLE ClrGetProcessHeap()
{
return GetProcessHeap();
}
+#define GetProcessHeap() Dont_Use_GetProcessHeap()
-HANDLE STDMETHODCALLTYPE UtilExecutionEngine::ClrGetProcessExecutableHeap()
+HANDLE ClrGetProcessExecutableHeap()
{
#ifndef CROSSGEN_COMPILE
- _ASSERTE(g_fnGetExecutableHeapHandle);
- return (g_fnGetExecutableHeapHandle != NULL) ? g_fnGetExecutableHeapHandle() : NULL;
+ return NULL;
#else
- return GetProcessHeap();
+ return ClrGetProcessHeap();
#endif
}
-HANDLE STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize)
+HANDLE ClrHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize)
{
#ifdef TARGET_UNIX
return NULL;
@@ -241,7 +91,7 @@ HANDLE STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapCreate(DWORD flOptions, SIZ
#endif
}
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapDestroy(HANDLE hHeap)
+BOOL ClrHeapDestroy(HANDLE hHeap)
{
#ifdef TARGET_UNIX
return FALSE;
@@ -250,43 +100,33 @@ BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapDestroy(HANDLE hHeap)
#endif
}
-LPVOID STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes)
+#undef HeapAlloc
+LPVOID ClrHeapAlloc(HANDLE hHeap, DWORD dwFlags, S_SIZE_T dwBytes)
{
+ if (dwBytes.IsOverflow()) return NULL;
+
#ifdef FAILPOINTS_ENABLED
- if (RFS_HashStack ())
- return NULL;
+ if (RFS_HashStack())
+ return NULL;
#endif
- return HeapAlloc(hHeap, dwFlags, dwBytes);
-}
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem)
-{
- return HeapFree(hHeap, dwFlags, lpMem);
+ return HeapAlloc(hHeap, dwFlags, dwBytes.Value());
}
+#define HeapAlloc(hHeap, dwFlags, dwBytes) Dont_Use_HeapAlloc(hHeap, dwFlags, dwBytes)
-BOOL STDMETHODCALLTYPE UtilExecutionEngine::ClrHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem)
+#undef HeapFree
+BOOL ClrHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem)
{
-#ifdef TARGET_UNIX
- return FALSE;
-#else
- return HeapValidate(hHeap, dwFlags, lpMem);
-#endif
+ return HeapFree(hHeap, dwFlags, lpMem);
}
-
+#define HeapFree(hHeap, dwFlags, lpMem) Dont_Use_HeapFree(hHeap, dwFlags, lpMem)
//------------------------------------------------------------------------------
// Helper function to get an exception from outside the exception. In
// the CLR, it may be from the Thread object. Non-CLR users have no thread object,
// and it will do nothing.
-void UtilExecutionEngine::GetLastThrownObjectExceptionFromThread(void **ppvException)
+void GetLastThrownObjectExceptionFromThread(Exception** ppException)
{
- // Declare class so we can declare Exception**
- class Exception;
-
- // Cast to our real type.
- Exception **ppException = reinterpret_cast(ppvException);
-
*ppException = NULL;
-} // UtilExecutionEngine::GetLastThrownObjectExceptionFromThread
-
+}
diff --git a/src/coreclr/src/utilcode/hostimpl.h b/src/coreclr/src/utilcode/hostimpl.h
deleted file mode 100644
index 326a3ee1eb2950..00000000000000
--- a/src/coreclr/src/utilcode/hostimpl.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-// ==++==
-//
-
-//
-//
-
-//
-// ==--==
-
-#ifndef __HOSTIMPL_H__
-#define __HOSTIMPL_H__
-
-#ifdef SELF_NO_HOST
-extern HANDLE g_ExecutableHeapHandle;
-#endif
-
-// We have an internal class that is used to make sure the hosting api
-// is forwarded to the os. This is a must for the shim because mscorwks
-// which normally contains the implementation of the hosting api has not
-// been loaded yet. In fact the shim is the one component responsible
-// for that loading
-class UtilExecutionEngine : public IExecutionEngine, public IEEMemoryManager
-{
-private:
-
- //***************************************************************************
- // IUnknown methods
- //***************************************************************************
-
- HRESULT STDMETHODCALLTYPE QueryInterface(REFIID id, void **pInterface);
- ULONG STDMETHODCALLTYPE AddRef();
- ULONG STDMETHODCALLTYPE Release();
-
- //***************************************************************************
- // IExecutionEngine methods for locking
- //***************************************************************************
-
- CRITSEC_COOKIE STDMETHODCALLTYPE CreateLock(LPCSTR szTag, LPCSTR level, CrstFlags flags);
- void STDMETHODCALLTYPE DestroyLock(CRITSEC_COOKIE lock);
- void STDMETHODCALLTYPE AcquireLock(CRITSEC_COOKIE lock);
- void STDMETHODCALLTYPE ReleaseLock(CRITSEC_COOKIE lock);
-
- EVENT_COOKIE STDMETHODCALLTYPE CreateAutoEvent(BOOL bInitialState);
- EVENT_COOKIE STDMETHODCALLTYPE CreateManualEvent(BOOL bInitialState);
- void STDMETHODCALLTYPE CloseEvent(EVENT_COOKIE event);
- BOOL STDMETHODCALLTYPE ClrSetEvent(EVENT_COOKIE event);
- BOOL STDMETHODCALLTYPE ClrResetEvent(EVENT_COOKIE event);
- DWORD STDMETHODCALLTYPE WaitForEvent(EVENT_COOKIE event, DWORD dwMilliseconds, BOOL bAlertable);
- DWORD STDMETHODCALLTYPE WaitForSingleObject(HANDLE handle, DWORD dwMilliseconds);
-
- SEMAPHORE_COOKIE STDMETHODCALLTYPE ClrCreateSemaphore(DWORD dwInitial, DWORD dwMax);
- void STDMETHODCALLTYPE ClrCloseSemaphore(SEMAPHORE_COOKIE semaphore);
- DWORD STDMETHODCALLTYPE ClrWaitForSemaphore(SEMAPHORE_COOKIE semaphore, DWORD dwMilliseconds, BOOL bAlertable);
- BOOL STDMETHODCALLTYPE ClrReleaseSemaphore(SEMAPHORE_COOKIE semaphore, LONG lReleaseCount, LONG *lpPreviousCount);
-
- MUTEX_COOKIE STDMETHODCALLTYPE ClrCreateMutex(LPSECURITY_ATTRIBUTES lpMutexAttributes,
- BOOL bInitialOwner,
- LPCTSTR lpName);
- void STDMETHODCALLTYPE ClrCloseMutex(MUTEX_COOKIE mutex);
- BOOL STDMETHODCALLTYPE ClrReleaseMutex(MUTEX_COOKIE mutex);
- DWORD STDMETHODCALLTYPE ClrWaitForMutex(MUTEX_COOKIE mutex,
- DWORD dwMilliseconds,
- BOOL bAlertable);
-
- DWORD STDMETHODCALLTYPE ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable);
-
- BOOL STDMETHODCALLTYPE ClrAllocationDisallowed();
-
- void STDMETHODCALLTYPE GetLastThrownObjectExceptionFromThread(void **ppvException);
-
- //***************************************************************************
- // IEEMemoryManager methods for locking
- //***************************************************************************
- LPVOID STDMETHODCALLTYPE ClrVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect);
- BOOL STDMETHODCALLTYPE ClrVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType);
- SIZE_T STDMETHODCALLTYPE ClrVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength);
- BOOL STDMETHODCALLTYPE ClrVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect);
- HANDLE STDMETHODCALLTYPE ClrGetProcessHeap();
- HANDLE STDMETHODCALLTYPE ClrHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize);
- BOOL STDMETHODCALLTYPE ClrHeapDestroy(HANDLE hHeap);
- LPVOID STDMETHODCALLTYPE ClrHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes);
- BOOL STDMETHODCALLTYPE ClrHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem);
- BOOL STDMETHODCALLTYPE ClrHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem);
- HANDLE STDMETHODCALLTYPE ClrGetProcessExecutableHeap();
-
-}; // class UtilExecutionEngine
-
-#endif //__HOSTIMPL_H__
diff --git a/src/coreclr/src/utilcode/log.cpp b/src/coreclr/src/utilcode/log.cpp
index b4907e325f8b5c..75097c574918cd 100644
--- a/src/coreclr/src/utilcode/log.cpp
+++ b/src/coreclr/src/utilcode/log.cpp
@@ -34,7 +34,7 @@
static DWORD LogFlags = 0;
static CQuickWSTR szLogFileName;
static HANDLE LogFileHandle = INVALID_HANDLE_VALUE;
-static volatile MUTEX_COOKIE LogFileMutex = 0;
+static volatile HANDLE LogFileMutex = 0;
static DWORD LogFacilityMask = LF_ALL;
static DWORD LogFacilityMask2 = 0;
static DWORD LogVMLevel = LL_INFO100;
@@ -160,7 +160,7 @@ VOID EnterLogLock()
if(LogFileMutex != 0)
{
DWORD status;
- status = ClrWaitForMutex(LogFileMutex, INFINITE, FALSE);
+ status = WaitForSingleObjectEx(LogFileMutex, INFINITE, FALSE);
_ASSERTE(WAIT_OBJECT_0 == status);
}
}
@@ -173,7 +173,7 @@ VOID LeaveLogLock()
if(LogFileMutex != 0)
{
BOOL success;
- success = ClrReleaseMutex(LogFileMutex);
+ success = ReleaseMutex(LogFileMutex);
_ASSERTE(success);
}
}
@@ -186,11 +186,11 @@ VOID InitializeLogging()
if (bLoggingInitialized)
return;
- MUTEX_COOKIE mutexCookie = ClrCreateMutex(NULL, FALSE, NULL);
- _ASSERTE(mutexCookie != 0);
- if (InterlockedCompareExchangeT(&LogFileMutex, mutexCookie, 0) != 0)
+ HANDLE mutex = WszCreateMutex(NULL, FALSE, NULL);
+ _ASSERTE(mutex != 0);
+ if (InterlockedCompareExchangeT(&LogFileMutex, mutex, 0) != 0)
{
- ClrCloseMutex(mutexCookie);
+ CloseHandle(mutex);
}
EnterLogLock();
diff --git a/src/coreclr/src/utilcode/longfilepathwrappers.cpp b/src/coreclr/src/utilcode/longfilepathwrappers.cpp
index 4a0bf3b86b424a..319685034c1a99 100644
--- a/src/coreclr/src/utilcode/longfilepathwrappers.cpp
+++ b/src/coreclr/src/utilcode/longfilepathwrappers.cpp
@@ -770,7 +770,7 @@ FindFirstFileExWrapper(
#ifdef HOST_WINDOWS
#if ! defined(DACCESS_COMPILE) && !defined(SELF_NO_HOST)
-extern HINSTANCE g_pMSCorEE;
+extern HINSTANCE g_hThisInst;
#endif// ! defined(DACCESS_COMPILE) && !defined(SELF_NO_HOST)
BOOL PAL_GetPALDirectoryWrapper(SString& pbuffer)
@@ -783,7 +783,7 @@ BOOL PAL_GetPALDirectoryWrapper(SString& pbuffer)
HINSTANCE hinst = NULL;
#if ! defined(DACCESS_COMPILE) && !defined(SELF_NO_HOST)
- hinst = g_pMSCorEE;
+ hinst = g_hThisInst;
#endif// ! defined(DACCESS_COMPILE) && !defined(SELF_NO_HOST)
#ifndef CROSSGEN_COMPILE
diff --git a/src/coreclr/src/utilcode/utsem.cpp b/src/coreclr/src/utilcode/utsem.cpp
index c5387d31755870..79c8b3abc6a23a 100644
--- a/src/coreclr/src/utilcode/utsem.cpp
+++ b/src/coreclr/src/utilcode/utsem.cpp
@@ -122,8 +122,8 @@ UTSemReadWrite::UTSemReadWrite()
#endif //SELF_NO_HOST && !CROSSGEN_COMPILE
m_dwFlag = 0;
- m_pReadWaiterSemaphore = NULL;
- m_pWriteWaiterEvent = NULL;
+ m_hReadWaiterSemaphore = NULL;
+ m_hWriteWaiterEvent = NULL;
}
@@ -143,11 +143,11 @@ UTSemReadWrite::~UTSemReadWrite()
_ASSERTE_MSG((m_dwFlag == (ULONG)0), "Destroying a UTSemReadWrite while in use");
- if (m_pReadWaiterSemaphore != NULL)
- delete m_pReadWaiterSemaphore;
+ if (m_hReadWaiterSemaphore != NULL)
+ CloseHandle(m_hReadWaiterSemaphore);
- if (m_pWriteWaiterEvent != NULL)
- delete m_pWriteWaiterEvent;
+ if (m_hWriteWaiterEvent != NULL)
+ CloseHandle(m_hWriteWaiterEvent);
}
//=======================================================================================
@@ -164,30 +164,17 @@ UTSemReadWrite::Init()
}
CONTRACTL_END;
- HRESULT hr = S_OK;
- _ASSERTE(m_pReadWaiterSemaphore == NULL);
- _ASSERTE(m_pWriteWaiterEvent == NULL);
+ _ASSERTE(m_hReadWaiterSemaphore == NULL);
+ _ASSERTE(m_hWriteWaiterEvent == NULL);
- EX_TRY
- {
- CONTRACT_VIOLATION(ThrowsViolation);
-
- m_pReadWaiterSemaphore = new Semaphore();
- m_pReadWaiterSemaphore->Create(0, MAXLONG);
+ m_hReadWaiterSemaphore = WszCreateSemaphore(NULL, 0, MAXLONG, NULL);
+ IfNullRet(m_hReadWaiterSemaphore);
- m_pWriteWaiterEvent = new Event();
- m_pWriteWaiterEvent->CreateAutoEvent(FALSE);
- }
- EX_CATCH
- {
- hr = E_OUTOFMEMORY;
- }
- EX_END_CATCH(SwallowAllExceptions)
- IfFailGo(hr);
+ m_hWriteWaiterEvent = WszCreateEvent(NULL, FALSE, FALSE, NULL);
+ IfNullRet(m_hWriteWaiterEvent);
-ErrExit:
- return hr;
+ return S_OK;
} // UTSemReadWrite::Init
/******************************************************************************
@@ -266,7 +253,7 @@ HRESULT UTSemReadWrite::LockRead()
{ // Try to add waiting reader and then wait for signal
if (dwFlag == InterlockedCompareExchangeT (&m_dwFlag, dwFlag + READWAITERS_INCR, dwFlag))
{
- m_pReadWaiterSemaphore->Wait(INFINITE, FALSE);
+ WaitForSingleObjectEx(m_hReadWaiterSemaphore, INFINITE, FALSE);
break;
}
}
@@ -354,7 +341,7 @@ HRESULT UTSemReadWrite::LockWrite()
{ // Try to add waiting writer and then wait for signal
if (dwFlag == InterlockedCompareExchangeT (&m_dwFlag, dwFlag + WRITEWAITERS_INCR, dwFlag))
{
- m_pWriteWaiterEvent->Wait(INFINITE, FALSE);
+ WaitForSingleObjectEx(m_hWriteWaiterEvent, INFINITE, FALSE);
break;
}
}
@@ -425,7 +412,7 @@ void UTSemReadWrite::UnlockRead()
dwFlag - READERS_INCR - WRITEWAITERS_INCR + WRITERS_INCR,
dwFlag))
{
- m_pWriteWaiterEvent->Set();
+ SetEvent(m_hWriteWaiterEvent);
break;
}
}
@@ -478,7 +465,7 @@ void UTSemReadWrite::UnlockWrite()
dwFlag - WRITERS_INCR - count * READWAITERS_INCR + count * READERS_INCR,
dwFlag))
{
- m_pReadWaiterSemaphore->Release(count, NULL);
+ ReleaseSemaphore(m_hReadWaiterSemaphore, count, NULL);
break;
}
}
@@ -489,7 +476,7 @@ void UTSemReadWrite::UnlockWrite()
// (remove a writer (us), remove a write waiter, add a writer
if (dwFlag == InterlockedCompareExchangeT (&m_dwFlag, dwFlag - WRITEWAITERS_INCR, dwFlag))
{
- m_pWriteWaiterEvent->Set();
+ SetEvent(m_hWriteWaiterEvent);
break;
}
}
diff --git a/src/coreclr/src/vm/CMakeLists.txt b/src/coreclr/src/vm/CMakeLists.txt
index 93a4d21b76c3e8..1e8fd924f61334 100644
--- a/src/coreclr/src/vm/CMakeLists.txt
+++ b/src/coreclr/src/vm/CMakeLists.txt
@@ -100,6 +100,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
methodtable.cpp
nativeimage.cpp
object.cpp
+ onstackreplacement.cpp
pefile.cpp
peimage.cpp
peimagelayout.cpp
@@ -207,6 +208,7 @@ set(VM_HEADERS_DAC_AND_WKS_COMMON
methodtable.inl
object.h
object.inl
+ onstackreplacement.h
pefile.h
pefile.inl
peimage.h
@@ -473,7 +475,6 @@ set(VM_HEADERS_WKS
gcenv.ee.h
gcenv.os.h
gchelpers.h
- hosting.h
ibclogger.h
ilmarshalers.h
interopconverter.h
diff --git a/src/coreclr/src/vm/ceemain.cpp b/src/coreclr/src/vm/ceemain.cpp
index e32fe3171f60f8..5f7965e0a0abf5 100644
--- a/src/coreclr/src/vm/ceemain.cpp
+++ b/src/coreclr/src/vm/ceemain.cpp
@@ -660,6 +660,7 @@ void EEStartupHelper()
CodeVersionManager::StaticInitialize();
TieredCompilationManager::StaticInitialize();
CallCountingManager::StaticInitialize();
+ OnStackReplacementManager::StaticInitialize();
InitThreadManager();
STRESS_LOG0(LF_STARTUP, LL_ALWAYS, "Returned successfully from InitThreadManager");
@@ -795,7 +796,7 @@ void EEStartupHelper()
#ifndef TARGET_UNIX
{
// Record mscorwks geometry
- PEDecoder pe(g_pMSCorEE);
+ PEDecoder pe(g_hThisInst);
g_runtimeLoadedBaseAddress = (SIZE_T)pe.GetBase();
g_runtimeVirtualSize = (SIZE_T)pe.GetVirtualSize();
@@ -924,10 +925,12 @@ void EEStartupHelper()
hr = g_pGCHeap->Initialize();
IfFailGo(hr);
+#ifdef FEATURE_EVENT_TRACE
// Finish setting up rest of EventPipe - specifically enable SampleProfiler if it was requested at startup.
// SampleProfiler needs to cooperate with the GC which hasn't fully finished setting up in the first part of the
// EventPipe initialization, so this is done after the GC has been fully initialized.
EventPipe::FinishInitialize();
+#endif
// This isn't done as part of InitializeGarbageCollector() above because thread
// creation requires AppDomains to have been set up.
@@ -1868,10 +1871,6 @@ BOOL STDMETHODCALLTYPE EEDllMain( // TRUE on success, FALSE on error.
// life of the DLL.
GetSystemInfo(&g_SystemInfo);
- // Remember module instance
- g_pMSCorEE = pParam->hInst;
-
-
// Set callbacks so that LoadStringRC knows which language our
// threads are in so that it can return the proper localized string.
// TODO: This shouldn't rely on the LCID (id), but only the name
@@ -1948,7 +1947,7 @@ BOOL STDMETHODCALLTYPE EEDllMain( // TRUE on success, FALSE on error.
if (dwReason == DLL_THREAD_DETACH || dwReason == DLL_PROCESS_DETACH)
{
- CExecutionEngine::ThreadDetaching();
+ ThreadDetaching();
}
return TRUE;
}
diff --git a/src/coreclr/src/vm/ceemain.h b/src/coreclr/src/vm/ceemain.h
index bd926f6e2b98ba..3201503a3b7366 100644
--- a/src/coreclr/src/vm/ceemain.h
+++ b/src/coreclr/src/vm/ceemain.h
@@ -40,98 +40,8 @@ enum ShutdownCompleteAction
// Force shutdown of the EE
void ForceEEShutdown(ShutdownCompleteAction sca = SCA_ExitProcessWhenShutdownComplete);
-// Setup thread statics, including ClrDebugState and StressLog.
-void SetupTLSForThread(Thread* pThread);
-
-// We have an internal class that can be used to expose EE functionality to other CLR
-// DLLs, via the deliberately obscure IEE DLL exports from the shim and the EE
-// NOTE: This class must not ever contain any instance variables. The reason for
-// this is that the IEE function (corhost.cpp) relies on the fact that you
-// may initialize the object more than once without ill effects. If you
-// change this class so that this condition is violated, you must rewrite
-// how the g_pCEE and related variables are initialized.
-class CExecutionEngine : public IExecutionEngine, public IEEMemoryManager
-{
- friend struct _DacGlobals;
-
- //***************************************************************************
- // public API:
- //***************************************************************************
-public:
-
- // Notification of a DLL_THREAD_DETACH or a Thread Terminate.
- static void ThreadDetaching();
-
-private:
-
- //***************************************************************************
- // IUnknown methods
- //***************************************************************************
-
- HRESULT STDMETHODCALLTYPE QueryInterface(
- REFIID id,
- void **pInterface);
-
- ULONG STDMETHODCALLTYPE AddRef();
-
- ULONG STDMETHODCALLTYPE Release();
-
- //***************************************************************************
- // IExecutionEngine methods for locking
- //***************************************************************************
-
- CRITSEC_COOKIE STDMETHODCALLTYPE CreateLock(LPCSTR szTag, LPCSTR level, CrstFlags flags);
-
- void STDMETHODCALLTYPE DestroyLock(CRITSEC_COOKIE lock);
-
- void STDMETHODCALLTYPE AcquireLock(CRITSEC_COOKIE lock);
-
- void STDMETHODCALLTYPE ReleaseLock(CRITSEC_COOKIE lock);
-
- EVENT_COOKIE STDMETHODCALLTYPE CreateAutoEvent(BOOL bInitialState);
- EVENT_COOKIE STDMETHODCALLTYPE CreateManualEvent(BOOL bInitialState);
- void STDMETHODCALLTYPE CloseEvent(EVENT_COOKIE event);
- BOOL STDMETHODCALLTYPE ClrSetEvent(EVENT_COOKIE event);
- BOOL STDMETHODCALLTYPE ClrResetEvent(EVENT_COOKIE event);
- DWORD STDMETHODCALLTYPE WaitForEvent(EVENT_COOKIE event, DWORD dwMilliseconds, BOOL bAlertable);
- DWORD STDMETHODCALLTYPE WaitForSingleObject(HANDLE handle, DWORD dwMilliseconds);
-
- SEMAPHORE_COOKIE STDMETHODCALLTYPE ClrCreateSemaphore(DWORD dwInitial, DWORD dwMax);
- void STDMETHODCALLTYPE ClrCloseSemaphore(SEMAPHORE_COOKIE semaphore);
- DWORD STDMETHODCALLTYPE ClrWaitForSemaphore(SEMAPHORE_COOKIE semaphore, DWORD dwMilliseconds, BOOL bAlertable);
- BOOL STDMETHODCALLTYPE ClrReleaseSemaphore(SEMAPHORE_COOKIE semaphore, LONG lReleaseCount, LONG *lpPreviousCount);
-
- MUTEX_COOKIE STDMETHODCALLTYPE ClrCreateMutex(LPSECURITY_ATTRIBUTES lpMutexAttributes,
- BOOL bInitialOwner,
- LPCTSTR lpName);
- void STDMETHODCALLTYPE ClrCloseMutex(MUTEX_COOKIE mutex);
- BOOL STDMETHODCALLTYPE ClrReleaseMutex(MUTEX_COOKIE mutex);
- DWORD STDMETHODCALLTYPE ClrWaitForMutex(MUTEX_COOKIE mutex,
- DWORD dwMilliseconds,
- BOOL bAlertable);
-
- DWORD STDMETHODCALLTYPE ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable);
-
- BOOL STDMETHODCALLTYPE ClrAllocationDisallowed();
-
- void STDMETHODCALLTYPE GetLastThrownObjectExceptionFromThread(void **ppvException);
-
- //***************************************************************************
- // IEEMemoryManager methods for locking
- //***************************************************************************
- LPVOID STDMETHODCALLTYPE ClrVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect);
- BOOL STDMETHODCALLTYPE ClrVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType);
- SIZE_T STDMETHODCALLTYPE ClrVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength);
- BOOL STDMETHODCALLTYPE ClrVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect);
- HANDLE STDMETHODCALLTYPE ClrGetProcessHeap();
- HANDLE STDMETHODCALLTYPE ClrHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize);
- BOOL STDMETHODCALLTYPE ClrHeapDestroy(HANDLE hHeap);
- LPVOID STDMETHODCALLTYPE ClrHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes);
- BOOL STDMETHODCALLTYPE ClrHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem);
- BOOL STDMETHODCALLTYPE ClrHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem);
- HANDLE STDMETHODCALLTYPE ClrGetProcessExecutableHeap();
-
-};
+// Notification of a DLL_THREAD_DETACH or a Thread Terminate.
+void ThreadDetaching();
void SetLatchedExitCode (INT32 code);
INT32 GetLatchedExitCode (void);
diff --git a/src/coreclr/src/vm/clrex.cpp b/src/coreclr/src/vm/clrex.cpp
index 5ada2e67b41c65..5153f92dc68934 100644
--- a/src/coreclr/src/vm/clrex.cpp
+++ b/src/coreclr/src/vm/clrex.cpp
@@ -2489,7 +2489,7 @@ CLRLastThrownObjectException* CLRLastThrownObjectException::Validate()
// Helper function to get an exception from outside the exception.
// Create and return a LastThrownObjectException. Its virtual destructor
// will clean up properly.
-void GetLastThrownObjectExceptionFromThread_Internal(Exception **ppException)
+void GetLastThrownObjectExceptionFromThread(Exception **ppException)
{
CONTRACTL
{
@@ -2511,7 +2511,7 @@ void GetLastThrownObjectExceptionFromThread_Internal(Exception **ppException)
*ppException = NULL;
}
-} // void GetLastThrownObjectExceptionFromThread_Internal()
+} // void GetLastThrownObjectExceptionFromThread()
#endif // CROSSGEN_COMPILE
diff --git a/src/coreclr/src/vm/clrex.h b/src/coreclr/src/vm/clrex.h
index 34ca1c0823dd8c..16b32574f5a39f 100644
--- a/src/coreclr/src/vm/clrex.h
+++ b/src/coreclr/src/vm/clrex.h
@@ -195,7 +195,7 @@ class CLRException : public Exception
};
// prototype for helper function to get exception object from thread's LastThrownObject.
-void GetLastThrownObjectExceptionFromThread_Internal(Exception **ppException);
+void GetLastThrownObjectExceptionFromThread(Exception **ppException);
// ---------------------------------------------------------------------------
diff --git a/src/coreclr/src/vm/codeman.cpp b/src/coreclr/src/vm/codeman.cpp
index d3ca9d9c52d8a3..947bcf6eb6431f 100644
--- a/src/coreclr/src/vm/codeman.cpp
+++ b/src/coreclr/src/vm/codeman.cpp
@@ -1296,11 +1296,6 @@ void EEJitManager::SetCpuInfo()
CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_CMOV);
CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_FCOMI);
}
-
- if (CPU_X86_USE_SSE2(cpuInfo.dwFeatures))
- {
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE2);
- }
#endif // TARGET_X86
#if defined(TARGET_X86) || defined(TARGET_AMD64)
@@ -1372,46 +1367,48 @@ void EEJitManager::SetCpuInfo()
if ((buffer[15] & 0x06) == 0x06) // SSE & SSE2
{
+ CPUCompileFlags.Set(InstructionSet_SSE);
+ CPUCompileFlags.Set(InstructionSet_SSE2);
if ((buffer[11] & 0x02) != 0) // AESNI
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AES);
+ CPUCompileFlags.Set(InstructionSet_AES);
}
if ((buffer[8] & 0x02) != 0) // PCLMULQDQ
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_PCLMULQDQ);
+ CPUCompileFlags.Set(InstructionSet_PCLMULQDQ);
}
if ((buffer[8] & 0x01) != 0) // SSE3
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE3);
+ CPUCompileFlags.Set(InstructionSet_SSE3);
if ((buffer[9] & 0x02) != 0) // SSSE3
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSSE3);
+ CPUCompileFlags.Set(InstructionSet_SSSE3);
if ((buffer[10] & 0x08) != 0) // SSE4.1
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE41);
+ CPUCompileFlags.Set(InstructionSet_SSE41);
if ((buffer[10] & 0x10) != 0) // SSE4.2
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE42);
+ CPUCompileFlags.Set(InstructionSet_SSE42);
if ((buffer[10] & 0x80) != 0) // POPCNT
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_POPCNT);
+ CPUCompileFlags.Set(InstructionSet_POPCNT);
}
if ((buffer[11] & 0x18) == 0x18) // AVX & OSXSAVE
{
if(DoesOSSupportAVX() && (xmmYmmStateSupport() == 1))
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX);
+ CPUCompileFlags.Set(InstructionSet_AVX);
if ((buffer[9] & 0x10) != 0) // FMA
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_FMA);
+ CPUCompileFlags.Set(InstructionSet_FMA);
}
if (maxCpuId >= 0x07)
@@ -1420,7 +1417,7 @@ void EEJitManager::SetCpuInfo()
if ((buffer[4] & 0x20) != 0) // AVX2
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX2);
+ CPUCompileFlags.Set(InstructionSet_AVX2);
}
}
}
@@ -1439,7 +1436,7 @@ void EEJitManager::SetCpuInfo()
if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SIMD16ByteOnly) != 0)
{
- CPUCompileFlags.Clear(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX2);
+ CPUCompileFlags.Clear(InstructionSet_AVX2);
}
}
@@ -1449,14 +1446,16 @@ void EEJitManager::SetCpuInfo()
if ((buffer[4] & 0x08) != 0) // BMI1
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_BMI1);
+ CPUCompileFlags.Set(InstructionSet_BMI1);
}
if ((buffer[5] & 0x01) != 0) // BMI2
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_BMI2);
+ CPUCompileFlags.Set(InstructionSet_BMI2);
}
}
+
+ CPUCompileFlags.EnsureValidInstructionSetSupport();
}
DWORD maxCpuIdEx = getcpuid(0x80000000, buffer);
@@ -1471,7 +1470,7 @@ void EEJitManager::SetCpuInfo()
if ((buffer[8] & 0x20) != 0) // LZCNT
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_LZCNT);
+ CPUCompileFlags.Set(InstructionSet_LZCNT);
}
}
#endif // defined(TARGET_X86) || defined(TARGET_AMD64)
@@ -1486,23 +1485,25 @@ void EEJitManager::SetCpuInfo()
PAL_GetJitCpuCapabilityFlags(&CPUCompileFlags);
#elif defined(HOST_64BIT)
// FP and SIMD support are enabled by default
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_ADVSIMD);
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_FP);
+ CPUCompileFlags.Set(InstructionSet_ArmBase);
+ CPUCompileFlags.Set(InstructionSet_AdvSimd);
// PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE (30)
if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE))
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_AES);
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA1);
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_SHA256);
+ CPUCompileFlags.Set(InstructionSet_Aes);
+ CPUCompileFlags.Set(InstructionSet_Sha1);
+ CPUCompileFlags.Set(InstructionSet_Sha256);
}
// PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE (31)
if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE))
{
- CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_HAS_ARM64_CRC32);
+ CPUCompileFlags.Set(InstructionSet_Crc32);
}
#endif // HOST_64BIT
#endif // TARGET_ARM64
+ CPUCompileFlags.Set64BitInstructionSetVariants();
+
m_CPUCompileFlags = CPUCompileFlags;
}
@@ -3581,12 +3582,20 @@ BOOL EEJitManager::GetBoundariesAndVars(
if (pDebugInfo == NULL)
return FALSE;
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ BOOL hasFlagByte = TRUE;
+#else
+ BOOL hasFlagByte = FALSE;
+#endif
+
// Uncompress. This allocates memory and may throw.
CompressDebugInfo::RestoreBoundariesAndVars(
fpNew, pNewData, // allocators
pDebugInfo, // input
- pcMap, ppMap,
- pcVars, ppVars); // output
+ pcMap, ppMap, // output
+ pcVars, ppVars, // output
+ hasFlagByte
+ );
return TRUE;
}
@@ -3608,9 +3617,15 @@ void CodeHeader::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJ
this->pRealCodeHeader.EnumMem();
#endif // USE_INDIRECT_CODEHEADER
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ BOOL hasFlagByte = TRUE;
+#else
+ BOOL hasFlagByte = FALSE;
+#endif
+
if (this->GetDebugInfo() != NULL)
{
- CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo());
+ CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo(), hasFlagByte);
}
}
@@ -5521,8 +5536,9 @@ BOOL NativeImageJitManager::GetBoundariesAndVars(
CompressDebugInfo::RestoreBoundariesAndVars(
fpNew, pNewData, // allocators
pDebugInfo, // input
- pcMap, ppMap,
- pcVars, ppVars); // output
+ pcMap, ppMap, // output
+ pcVars, ppVars, // output
+ FALSE); // no patchpoint info
return TRUE;
}
@@ -6735,8 +6751,9 @@ BOOL ReadyToRunJitManager::GetBoundariesAndVars(
CompressDebugInfo::RestoreBoundariesAndVars(
fpNew, pNewData, // allocators
pDebugInfo, // input
- pcMap, ppMap,
- pcVars, ppVars); // output
+ pcMap, ppMap, // output
+ pcVars, ppVars, // output
+ FALSE); // no patchpoint info
return TRUE;
}
@@ -6760,7 +6777,7 @@ void ReadyToRunJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemory
if (pDebugInfo == NULL)
return;
- CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo);
+ CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo, FALSE);
}
#endif
diff --git a/src/coreclr/src/vm/codeman.h b/src/coreclr/src/vm/codeman.h
index 433ae895bc63ce..38a475cbd9a596 100644
--- a/src/coreclr/src/vm/codeman.h
+++ b/src/coreclr/src/vm/codeman.h
@@ -262,6 +262,7 @@ typedef struct _hpCodeHdr
{
SUPPORTS_DAC;
return pRealCodeHeader->phdrJitGCInfo;
+
}
PTR_MethodDesc GetMethodDesc()
{
diff --git a/src/coreclr/src/vm/codeversion.cpp b/src/coreclr/src/vm/codeversion.cpp
index af833fc2121ad1..6981e057986513 100644
--- a/src/coreclr/src/vm/codeversion.cpp
+++ b/src/coreclr/src/vm/codeversion.cpp
@@ -8,6 +8,7 @@
#include "common.h"
#include "codeversion.h"
+#include "patchpointinfo.h"
#ifdef FEATURE_CODE_VERSIONING
#include "threadsuspend.h"
@@ -52,7 +53,9 @@ NativeCodeVersionNode::NativeCodeVersionNode(
NativeCodeVersionId id,
MethodDesc* pMethodDesc,
ReJITID parentId,
- NativeCodeVersion::OptimizationTier optimizationTier)
+ NativeCodeVersion::OptimizationTier optimizationTier,
+ PatchpointInfo* patchpointInfo,
+ unsigned ilOffset)
:
m_pNativeCode(NULL),
m_pMethodDesc(pMethodDesc),
@@ -64,6 +67,10 @@ NativeCodeVersionNode::NativeCodeVersionNode(
#endif
#ifdef HAVE_GCCOVER
m_gcCover(PTR_NULL),
+#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ m_patchpointInfo(patchpointInfo),
+ m_ilOffset(ilOffset),
#endif
m_flags(0)
{}
@@ -153,6 +160,17 @@ void NativeCodeVersionNode::SetOptimizationTier(NativeCodeVersion::OptimizationT
#endif // FEATURE_TIERED_COMPILATION
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+
+PatchpointInfo* NativeCodeVersionNode::GetOSRInfo(unsigned * ilOffset)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ *ilOffset = m_ilOffset;
+ return m_patchpointInfo;
+}
+
+#endif // FEATURE_ON_STACK_REPLACEMENT
+
#ifdef HAVE_GCCOVER
PTR_GCCoverageInfo NativeCodeVersionNode::GetGCCoverageInfo() const
@@ -334,6 +352,24 @@ void NativeCodeVersion::SetOptimizationTier(OptimizationTier tier)
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+
+PatchpointInfo * NativeCodeVersion::GetOSRInfo(unsigned * ilOffset)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetOSRInfo(ilOffset);
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+#endif
+
+
#ifdef HAVE_GCCOVER
PTR_GCCoverageInfo NativeCodeVersion::GetGCCoverageInfo() const
@@ -929,11 +965,14 @@ void ILCodeVersion::SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap)
HRESULT ILCodeVersion::AddNativeCodeVersion(
MethodDesc* pClosedMethodDesc,
NativeCodeVersion::OptimizationTier optimizationTier,
- NativeCodeVersion* pNativeCodeVersion)
+ NativeCodeVersion* pNativeCodeVersion,
+ PatchpointInfo* patchpointInfo,
+ unsigned ilOffset
+ )
{
LIMITED_METHOD_CONTRACT;
CodeVersionManager* pManager = GetModule()->GetCodeVersionManager();
- HRESULT hr = pManager->AddNativeCodeVersion(*this, pClosedMethodDesc, optimizationTier, pNativeCodeVersion);
+ HRESULT hr = pManager->AddNativeCodeVersion(*this, pClosedMethodDesc, optimizationTier, pNativeCodeVersion, patchpointInfo, ilOffset);
if (FAILED(hr))
{
_ASSERTE(hr == E_OUTOFMEMORY);
@@ -1555,7 +1594,9 @@ HRESULT CodeVersionManager::AddNativeCodeVersion(
ILCodeVersion ilCodeVersion,
MethodDesc* pClosedMethodDesc,
NativeCodeVersion::OptimizationTier optimizationTier,
- NativeCodeVersion* pNativeCodeVersion)
+ NativeCodeVersion* pNativeCodeVersion,
+ PatchpointInfo* patchpointInfo,
+ unsigned ilOffset)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(IsLockOwnedByCurrentThread());
@@ -1569,7 +1610,7 @@ HRESULT CodeVersionManager::AddNativeCodeVersion(
}
NativeCodeVersionId newId = pMethodVersioningState->AllocateVersionId();
- NativeCodeVersionNode* pNativeCodeVersionNode = new (nothrow) NativeCodeVersionNode(newId, pClosedMethodDesc, ilCodeVersion.GetVersionId(), optimizationTier);
+ NativeCodeVersionNode* pNativeCodeVersionNode = new (nothrow) NativeCodeVersionNode(newId, pClosedMethodDesc, ilCodeVersion.GetVersionId(), optimizationTier, patchpointInfo, ilOffset);
if (pNativeCodeVersionNode == NULL)
{
return E_OUTOFMEMORY;
diff --git a/src/coreclr/src/vm/codeversion.h b/src/coreclr/src/vm/codeversion.h
index 724d91a825246b..f318e2ec801598 100644
--- a/src/coreclr/src/vm/codeversion.h
+++ b/src/coreclr/src/vm/codeversion.h
@@ -37,6 +37,11 @@ class GCCoverageInfo;
typedef DPTR(class GCCoverageInfo) PTR_GCCoverageInfo;
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+struct PatchpointInfo;
+typedef DPTR(struct PatchpointInfo) PTR_PatchpointInfo;
+#endif
+
class NativeCodeVersion
{
#ifdef FEATURE_CODE_VERSIONING
@@ -71,6 +76,7 @@ class NativeCodeVersion
{
OptimizationTier0,
OptimizationTier1,
+ OptimizationTier1OSR,
OptimizationTierOptimized, // may do less optimizations than tier 1
};
#ifdef FEATURE_TIERED_COMPILATION
@@ -80,6 +86,10 @@ class NativeCodeVersion
#endif
#endif // FEATURE_TIERED_COMPILATION
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ PatchpointInfo * GetOSRInfo(unsigned * iloffset);
+#endif // FEATURE_ON_STACK_REPLACEMENT
+
#ifdef HAVE_GCCOVER
PTR_GCCoverageInfo GetGCCoverageInfo() const;
void SetGCCoverageInfo(PTR_GCCoverageInfo gcCover);
@@ -165,7 +175,8 @@ class ILCodeVersion
void SetIL(COR_ILMETHOD* pIL);
void SetJitFlags(DWORD flags);
void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap);
- HRESULT AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier,
+ NativeCodeVersion* pNativeCodeVersion, PatchpointInfo* patchpointInfo = NULL, unsigned ilOffset = 0);
HRESULT GetOrCreateActiveNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
HRESULT SetActiveNativeCodeVersion(NativeCodeVersion activeNativeCodeVersion);
#endif //DACCESS_COMPILE
@@ -244,7 +255,8 @@ class NativeCodeVersionNode
public:
#ifndef DACCESS_COMPILE
- NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethod, ReJITID parentId, NativeCodeVersion::OptimizationTier optimizationTier);
+ NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethod, ReJITID parentId, NativeCodeVersion::OptimizationTier optimizationTier,
+ PatchpointInfo* patchpointInfo, unsigned ilOffset);
#endif
PTR_MethodDesc GetMethodDesc() const;
@@ -270,6 +282,10 @@ class NativeCodeVersionNode
void SetGCCoverageInfo(PTR_GCCoverageInfo gcCover);
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ PatchpointInfo * GetOSRInfo(unsigned * ilOffset);
+#endif
+
private:
//union - could save a little memory?
//{
@@ -286,6 +302,10 @@ class NativeCodeVersionNode
#ifdef HAVE_GCCOVER
PTR_GCCoverageInfo m_gcCover;
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ PTR_PatchpointInfo m_patchpointInfo;
+ unsigned m_ilOffset;
+#endif
enum NativeCodeVersionNodeFlags
{
@@ -569,7 +589,8 @@ class CodeVersionManager
};
HRESULT AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion);
- HRESULT AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion,
+ PatchpointInfo* patchpointInfo = NULL, unsigned ilOffset = 0);
PCODE PublishVersionableCodeIfNecessary(
MethodDesc* pMethodDesc,
CallerGCMode callerGCMode,
diff --git a/src/coreclr/src/vm/compile.cpp b/src/coreclr/src/vm/compile.cpp
index c250d5b70d1a5c..cbd51fe0b1c0ec 100644
--- a/src/coreclr/src/vm/compile.cpp
+++ b/src/coreclr/src/vm/compile.cpp
@@ -1018,7 +1018,7 @@ void CEECompileInfo::CompressDebugInfo(
{
STANDARD_VM_CONTRACT;
- CompressDebugInfo::CompressBoundariesAndVars(pOffsetMapping, iOffsetMapping, pNativeVarInfo, iNativeVarInfo, pDebugInfoBuffer, NULL);
+ CompressDebugInfo::CompressBoundariesAndVars(pOffsetMapping, iOffsetMapping, pNativeVarInfo, iNativeVarInfo, NULL, pDebugInfoBuffer, NULL);
}
ICorJitHost* CEECompileInfo::GetJitHost()
diff --git a/src/coreclr/src/vm/comutilnative.cpp b/src/coreclr/src/vm/comutilnative.cpp
index 7d0dea14da052c..1f0e257a548865 100644
--- a/src/coreclr/src/vm/comutilnative.cpp
+++ b/src/coreclr/src/vm/comutilnative.cpp
@@ -1047,7 +1047,7 @@ FCIMPLEND
** zeroingOptional -> whether caller prefers to skip clearing the content of the array, if possible.
**Exceptions: IDS_EE_ARRAY_DIMENSIONS_EXCEEDED when size is too large. OOM if can't allocate.
==============================================================================*/
-FCIMPL3(Object*, GCInterface::AllocateNewArray, void* arrayTypeHandle, INT32 length, CLR_BOOL zeroingOptional)
+FCIMPL3(Object*, GCInterface::AllocateNewArray, void* arrayTypeHandle, INT32 length, INT32 flags)
{
CONTRACTL {
FCALL_CHECK;
@@ -1058,7 +1058,10 @@ FCIMPL3(Object*, GCInterface::AllocateNewArray, void* arrayTypeHandle, INT32 len
HELPER_METHOD_FRAME_BEGIN_RET_0();
- pRet = AllocateSzArray(arrayType, length, zeroingOptional ? GC_ALLOC_ZEROING_OPTIONAL : GC_ALLOC_NO_FLAGS);
+ //Only the following flags are used by GC.cs, so we'll just assert it here.
+ _ASSERTE((flags & ~(GC_ALLOC_ZEROING_OPTIONAL | GC_ALLOC_PINNED_OBJECT_HEAP)) == 0);
+
+ pRet = AllocateSzArray(arrayType, length, (GC_ALLOC_FLAGS)flags);
HELPER_METHOD_FRAME_END();
diff --git a/src/coreclr/src/vm/comutilnative.h b/src/coreclr/src/vm/comutilnative.h
index 56030c4382a81b..4493ec271326f1 100644
--- a/src/coreclr/src/vm/comutilnative.h
+++ b/src/coreclr/src/vm/comutilnative.h
@@ -128,7 +128,7 @@ class GCInterface {
static FCDECL0(INT64, GetAllocatedBytesForCurrentThread);
static FCDECL1(INT64, GetTotalAllocatedBytes, CLR_BOOL precise);
- static FCDECL3(Object*, AllocateNewArray, void* elementTypeHandle, INT32 length, CLR_BOOL zeroingOptional);
+ static FCDECL3(Object*, AllocateNewArray, void* elementTypeHandle, INT32 length, INT32 flags);
#ifdef FEATURE_BASICFREEZE
static
diff --git a/src/coreclr/src/vm/corhost.cpp b/src/coreclr/src/vm/corhost.cpp
index 08519c9fa92dce..763c82ddaa9db0 100644
--- a/src/coreclr/src/vm/corhost.cpp
+++ b/src/coreclr/src/vm/corhost.cpp
@@ -19,7 +19,6 @@
#include "eeconfig.h"
#include "dbginterface.h"
#include "ceemain.h"
-#include "hosting.h"
#include "eepolicy.h"
#include "clrex.h"
#include "comcallablewrapper.h"
@@ -1176,77 +1175,7 @@ HRESULT CorHost2::GetCLRControl(ICLRControl** pCLRControl)
return E_NOTIMPL;
}
-// This is the instance that exposes interfaces out to all the other DLLs of the CLR
-// so they can use our services for TLS, synchronization, memory allocation, etc.
-static BYTE g_CEEInstance[sizeof(CExecutionEngine)];
-static Volatile g_pCEE = NULL;
-
-extern "C" IExecutionEngine * __stdcall IEE()
-{
- LIMITED_METHOD_CONTRACT;
-
- // Unfortunately,we can't probe here. The probing system requires the
- // use of TLS, and in order to initialize TLS we need to call IEE.
-
- //BEGIN_ENTRYPOINT_VOIDRET;
-
-
- // The following code does NOT contain a race condition. The following code is BY DESIGN.
- // The issue is that we can have two separate threads inside this if statement, both of which are
- // initializing the g_CEEInstance variable (and subsequently updating g_pCEE). This works fine,
- // and will not cause an inconsistent state due to the fact that CExecutionEngine has no
- // local variables. If multiple threads make it inside this if statement, it will copy the same
- // bytes over g_CEEInstance and there will not be a time when there is an inconsistent state.
- if ( !g_pCEE )
- {
- // Create a local copy on the stack and then copy it over to the static instance.
- // This avoids race conditions caused by multiple initializations of vtable in the constructor
- CExecutionEngine local;
- memcpy(&g_CEEInstance, (void*)&local, sizeof(CExecutionEngine));
-
- g_pCEE = (IExecutionEngine*)(CExecutionEngine*)&g_CEEInstance;
- }
- //END_ENTRYPOINT_VOIDRET;
-
- return g_pCEE;
-}
-
-
-HRESULT STDMETHODCALLTYPE CExecutionEngine::QueryInterface(REFIID id, void **pInterface)
-{
- LIMITED_METHOD_CONTRACT;
-
- if (!pInterface)
- return E_POINTER;
-
- *pInterface = NULL;
-
- //CANNOTTHROWCOMPLUSEXCEPTION();
- if (id == IID_IExecutionEngine)
- *pInterface = (IExecutionEngine *)this;
- else if (id == IID_IEEMemoryManager)
- *pInterface = (IEEMemoryManager *)this;
- else if (id == IID_IUnknown)
- *pInterface = (IUnknown *)(IExecutionEngine *)this;
- else
- return E_NOINTERFACE;
-
- AddRef();
- return S_OK;
-} // HRESULT STDMETHODCALLTYPE CExecutionEngine::QueryInterface()
-
-
-ULONG STDMETHODCALLTYPE CExecutionEngine::AddRef()
-{
- LIMITED_METHOD_CONTRACT;
- return 1;
-}
-
-ULONG STDMETHODCALLTYPE CExecutionEngine::Release()
-{
- LIMITED_METHOD_CONTRACT;
- return 1;
-}
+///////////////////////////////////////////////////////////////////////////////
// Note: Sampling profilers also use this function to initialize TLS for a unmanaged
// sampling thread so that initialization can be done in advance to avoid deadlocks.
@@ -1270,10 +1199,7 @@ void SetupTLSForThread(Thread* pThread)
#ifdef ENABLE_CONTRACTS
// Profilers need the side effect of GetClrDebugState() to perform initialization
// in advance to avoid deadlocks. Refer to ProfToEEInterfaceImpl::InitializeCurrentThread
- ClrDebugState* pDebugState = ::GetClrDebugState();
-
- if (pThread)
- pThread->m_pClrDebugState = pDebugState;
+ ::GetClrDebugState();
#endif
}
@@ -1283,7 +1209,7 @@ void FreeClrDebugState(LPVOID pTlsData);
#endif
// Called here from a thread detach or from destruction of a Thread object.
-void CExecutionEngine::ThreadDetaching()
+void ThreadDetaching()
{
// Can not cause memory allocation during thread detach, so no real contracts.
STATIC_CONTRACT_NOTHROW;
@@ -1314,614 +1240,6 @@ void CExecutionEngine::ThreadDetaching()
#endif // ENABLE_CONTRACTS_IMPL
}
-CRITSEC_COOKIE STDMETHODCALLTYPE CExecutionEngine::CreateLock(LPCSTR szTag, LPCSTR level, CrstFlags flags)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- ENTRY_POINT;
- }
- CONTRACTL_END;
-
- CRITSEC_COOKIE cookie = NULL;
- BEGIN_ENTRYPOINT_VOIDRET;
- cookie = ::EECreateCriticalSection(*(CrstType*)&level, flags);
- END_ENTRYPOINT_VOIDRET;
- return cookie;
-}
-
-void STDMETHODCALLTYPE CExecutionEngine::DestroyLock(CRITSEC_COOKIE cookie)
-{
- WRAPPER_NO_CONTRACT;
- ::EEDeleteCriticalSection(cookie);
-}
-
-void STDMETHODCALLTYPE CExecutionEngine::AcquireLock(CRITSEC_COOKIE cookie)
-{
- WRAPPER_NO_CONTRACT;
- ::EEEnterCriticalSection(cookie);
-}
-
-void STDMETHODCALLTYPE CExecutionEngine::ReleaseLock(CRITSEC_COOKIE cookie)
-{
- WRAPPER_NO_CONTRACT;
- ::EELeaveCriticalSection(cookie);
-}
-
-// Locking routines supplied by the EE to the other DLLs of the CLR. In a _DEBUG
-// build of the EE, we poison the Crst as a poor man's attempt to do some argument
-// validation.
-#define POISON_BITS 3
-
-static inline EVENT_COOKIE CLREventToCookie(CLREvent * pEvent)
-{
- LIMITED_METHOD_CONTRACT;
- _ASSERTE((((uintptr_t) pEvent) & POISON_BITS) == 0);
-#ifdef _DEBUG
- pEvent = (CLREvent *) (((uintptr_t) pEvent) | POISON_BITS);
-#endif
- return (EVENT_COOKIE) pEvent;
-}
-
-static inline CLREvent *CookieToCLREvent(EVENT_COOKIE cookie)
-{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE((((uintptr_t) cookie) & POISON_BITS) == POISON_BITS);
-#ifdef _DEBUG
- if (cookie)
- {
- cookie = (EVENT_COOKIE) (((uintptr_t) cookie) & ~POISON_BITS);
- }
-#endif
- return (CLREvent *) cookie;
-}
-
-
-EVENT_COOKIE STDMETHODCALLTYPE CExecutionEngine::CreateAutoEvent(BOOL bInitialState)
-{
- CONTRACTL
- {
- THROWS;
- MODE_ANY;
- GC_NOTRIGGER;
- ENTRY_POINT;
- }
- CONTRACTL_END;
-
- EVENT_COOKIE event = NULL;
- BEGIN_ENTRYPOINT_THROWS;
- NewHolder pEvent(new CLREvent());
- pEvent->CreateAutoEvent(bInitialState);
- event = CLREventToCookie(pEvent);
- pEvent.SuppressRelease();
- END_ENTRYPOINT_THROWS;
-
- return event;
-}
-
-EVENT_COOKIE STDMETHODCALLTYPE CExecutionEngine::CreateManualEvent(BOOL bInitialState)
-{
- CONTRACTL
- {
- THROWS;
- MODE_ANY;
- GC_NOTRIGGER;
- ENTRY_POINT;
- }
- CONTRACTL_END;
-
- EVENT_COOKIE event = NULL;
- BEGIN_ENTRYPOINT_THROWS;
-
- NewHolder pEvent(new CLREvent());
- pEvent->CreateManualEvent(bInitialState);
- event = CLREventToCookie(pEvent);
- pEvent.SuppressRelease();
-
- END_ENTRYPOINT_THROWS;
-
- return event;
-}
-
-void STDMETHODCALLTYPE CExecutionEngine::CloseEvent(EVENT_COOKIE event)
-{
- WRAPPER_NO_CONTRACT;
- if (event) {
- CLREvent *pEvent = CookieToCLREvent(event);
- pEvent->CloseEvent();
- delete pEvent;
- }
-}
-
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrSetEvent(EVENT_COOKIE event)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
- if (event) {
- CLREvent *pEvent = CookieToCLREvent(event);
- return pEvent->Set();
- }
- return FALSE;
-}
-
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrResetEvent(EVENT_COOKIE event)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
- if (event) {
- CLREvent *pEvent = CookieToCLREvent(event);
- return pEvent->Reset();
- }
- return FALSE;
-}
-
-DWORD STDMETHODCALLTYPE CExecutionEngine::WaitForEvent(EVENT_COOKIE event,
- DWORD dwMilliseconds,
- BOOL bAlertable)
-{
- WRAPPER_NO_CONTRACT;
- if (event) {
- CLREvent *pEvent = CookieToCLREvent(event);
- return pEvent->Wait(dwMilliseconds,bAlertable);
- }
-
- if (GetThread() && bAlertable)
- ThrowHR(E_INVALIDARG);
- return WAIT_FAILED;
-}
-
-DWORD STDMETHODCALLTYPE CExecutionEngine::WaitForSingleObject(HANDLE handle,
- DWORD dwMilliseconds)
-{
- STATIC_CONTRACT_WRAPPER;
- return ::WaitForSingleObject(handle,dwMilliseconds);
-}
-
-static inline SEMAPHORE_COOKIE CLRSemaphoreToCookie(CLRSemaphore * pSemaphore)
-{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE((((uintptr_t) pSemaphore) & POISON_BITS) == 0);
-#ifdef _DEBUG
- pSemaphore = (CLRSemaphore *) (((uintptr_t) pSemaphore) | POISON_BITS);
-#endif
- return (SEMAPHORE_COOKIE) pSemaphore;
-}
-
-static inline CLRSemaphore *CookieToCLRSemaphore(SEMAPHORE_COOKIE cookie)
-{
- LIMITED_METHOD_CONTRACT;
- _ASSERTE((((uintptr_t) cookie) & POISON_BITS) == POISON_BITS);
-#ifdef _DEBUG
- if (cookie)
- {
- cookie = (SEMAPHORE_COOKIE) (((uintptr_t) cookie) & ~POISON_BITS);
- }
-#endif
- return (CLRSemaphore *) cookie;
-}
-
-
-SEMAPHORE_COOKIE STDMETHODCALLTYPE CExecutionEngine::ClrCreateSemaphore(DWORD dwInitial,
- DWORD dwMax)
-{
- CONTRACTL
- {
- THROWS;
- MODE_ANY;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
- NewHolder pSemaphore(new CLRSemaphore());
- pSemaphore->Create(dwInitial, dwMax);
- SEMAPHORE_COOKIE ret = CLRSemaphoreToCookie(pSemaphore);;
- pSemaphore.SuppressRelease();
- return ret;
-}
-
-void STDMETHODCALLTYPE CExecutionEngine::ClrCloseSemaphore(SEMAPHORE_COOKIE semaphore)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
- CLRSemaphore *pSemaphore = CookieToCLRSemaphore(semaphore);
- pSemaphore->Close();
- delete pSemaphore;
-}
-
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrReleaseSemaphore(SEMAPHORE_COOKIE semaphore,
- LONG lReleaseCount,
- LONG *lpPreviousCount)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
- CLRSemaphore *pSemaphore = CookieToCLRSemaphore(semaphore);
- return pSemaphore->Release(lReleaseCount,lpPreviousCount);
-}
-
-DWORD STDMETHODCALLTYPE CExecutionEngine::ClrWaitForSemaphore(SEMAPHORE_COOKIE semaphore,
- DWORD dwMilliseconds,
- BOOL bAlertable)
-{
- WRAPPER_NO_CONTRACT;
- CLRSemaphore *pSemaphore = CookieToCLRSemaphore(semaphore);
- return pSemaphore->Wait(dwMilliseconds,bAlertable);
-}
-
-static inline MUTEX_COOKIE CLRMutexToCookie(CLRMutex * pMutex)
-{
- LIMITED_METHOD_CONTRACT;
- _ASSERTE((((uintptr_t) pMutex) & POISON_BITS) == 0);
-#ifdef _DEBUG
- pMutex = (CLRMutex *) (((uintptr_t) pMutex) | POISON_BITS);
-#endif
- return (MUTEX_COOKIE) pMutex;
-}
-
-static inline CLRMutex *CookieToCLRMutex(MUTEX_COOKIE cookie)
-{
- LIMITED_METHOD_CONTRACT;
- _ASSERTE((((uintptr_t) cookie) & POISON_BITS) == POISON_BITS);
-#ifdef _DEBUG
- if (cookie)
- {
- cookie = (MUTEX_COOKIE) (((uintptr_t) cookie) & ~POISON_BITS);
- }
-#endif
- return (CLRMutex *) cookie;
-}
-
-
-MUTEX_COOKIE STDMETHODCALLTYPE CExecutionEngine::ClrCreateMutex(LPSECURITY_ATTRIBUTES lpMutexAttributes,
- BOOL bInitialOwner,
- LPCTSTR lpName)
-{
- CONTRACTL
- {
- NOTHROW;
- MODE_ANY;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
-
- MUTEX_COOKIE mutex = 0;
- CLRMutex *pMutex = new (nothrow) CLRMutex();
- if (pMutex)
- {
- EX_TRY
- {
- pMutex->Create(lpMutexAttributes, bInitialOwner, lpName);
- mutex = CLRMutexToCookie(pMutex);
- }
- EX_CATCH
- {
- delete pMutex;
- }
- EX_END_CATCH(SwallowAllExceptions);
- }
- return mutex;
-}
-
-void STDMETHODCALLTYPE CExecutionEngine::ClrCloseMutex(MUTEX_COOKIE mutex)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
- CLRMutex *pMutex = CookieToCLRMutex(mutex);
- pMutex->Close();
- delete pMutex;
-}
-
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrReleaseMutex(MUTEX_COOKIE mutex)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
- CLRMutex *pMutex = CookieToCLRMutex(mutex);
- return pMutex->Release();
-}
-
-DWORD STDMETHODCALLTYPE CExecutionEngine::ClrWaitForMutex(MUTEX_COOKIE mutex,
- DWORD dwMilliseconds,
- BOOL bAlertable)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
- CLRMutex *pMutex = CookieToCLRMutex(mutex);
- return pMutex->Wait(dwMilliseconds,bAlertable);
-}
-
-#undef ClrSleepEx
-DWORD STDMETHODCALLTYPE CExecutionEngine::ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable)
-{
- WRAPPER_NO_CONTRACT;
- return EESleepEx(dwMilliseconds,bAlertable);
-}
-#define ClrSleepEx EESleepEx
-
-#undef ClrAllocationDisallowed
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrAllocationDisallowed()
-{
- WRAPPER_NO_CONTRACT;
- return EEAllocationDisallowed();
-}
-#define ClrAllocationDisallowed EEAllocationDisallowed
-
-#undef ClrVirtualAlloc
-LPVOID STDMETHODCALLTYPE CExecutionEngine::ClrVirtualAlloc(LPVOID lpAddress,
- SIZE_T dwSize,
- DWORD flAllocationType,
- DWORD flProtect)
-{
- WRAPPER_NO_CONTRACT;
- return EEVirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
-}
-#define ClrVirtualAlloc EEVirtualAlloc
-
-#undef ClrVirtualFree
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrVirtualFree(LPVOID lpAddress,
- SIZE_T dwSize,
- DWORD dwFreeType)
-{
- WRAPPER_NO_CONTRACT;
- return EEVirtualFree(lpAddress, dwSize, dwFreeType);
-}
-#define ClrVirtualFree EEVirtualFree
-
-#undef ClrVirtualQuery
-SIZE_T STDMETHODCALLTYPE CExecutionEngine::ClrVirtualQuery(LPCVOID lpAddress,
- PMEMORY_BASIC_INFORMATION lpBuffer,
- SIZE_T dwLength)
-{
- WRAPPER_NO_CONTRACT;
- return EEVirtualQuery(lpAddress, lpBuffer, dwLength);
-}
-#define ClrVirtualQuery EEVirtualQuery
-
-#if defined(_DEBUG) && !defined(TARGET_UNIX)
-static VolatilePtr s_pStartOfUEFSection = NULL;
-static VolatilePtr s_pEndOfUEFSectionBoundary = NULL;
-static Volatile s_dwProtection = 0;
-#endif // _DEBUG && !TARGET_UNIX
-
-#undef ClrVirtualProtect
-
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrVirtualProtect(LPVOID lpAddress,
- SIZE_T dwSize,
- DWORD flNewProtect,
- PDWORD lpflOldProtect)
-{
- WRAPPER_NO_CONTRACT;
-
- // Get the UEF installation details - we will use these to validate
- // that the calls to ClrVirtualProtect are not going to affect the UEF.
- //
- // The OS UEF invocation mechanism was updated. When a UEF is setup,the OS captures
- // the following details about it:
- // 1) Protection of the pages in which the UEF lives
- // 2) The size of the region in which the UEF lives
- // 3) The region's Allocation Base
- //
- // The OS verifies details surrounding the UEF before invocation. For security reasons
- // the page protection cannot change between SetUnhandledExceptionFilter and invocation.
- //
- // Prior to this change, the UEF lived in a common section of code_Seg, along with
- // JIT_PatchedCode. Thus, their pages have the same protection, they live
- // in the same region (and thus, its size is the same).
- //
- // In EEStartupHelper, when we setup the UEF and then invoke InitJitHelpers1 and InitJitHelpers2,
- // they perform some optimizations that result in the memory page protection being changed. When
- // the UEF is to be invoked, the OS does the check on the UEF's cached details against the current
- // memory pages. This check used to fail when on 64bit retail builds when JIT_PatchedCode was
- // aligned after the UEF with a different memory page protection (post the optimizations by InitJitHelpers).
- // Thus, the UEF was never invoked.
- //
- // To circumvent this, we put the UEF in its own section in the code segment so that any modifications
- // to memory pages will not affect the UEF details that the OS cached. This is done in Excep.cpp
- // using the "#pragma code_seg" directives.
- //
- // Below, we double check that:
- //
- // 1) the address being protected does not lie in the region of of the UEF.
- // 2) the section after UEF is not having the same memory protection as UEF section.
- //
- // We assert if either of the two conditions above are true.
-
-#if defined(_DEBUG) && !defined(TARGET_UNIX)
- // We do this check in debug/checked builds only
-
- // Do we have the UEF details?
- if (s_pEndOfUEFSectionBoundary.Load() == NULL)
- {
- // Get reference to MSCORWKS image in memory...
- PEDecoder pe(g_pMSCorEE);
-
- // Find the UEF section from the image
- IMAGE_SECTION_HEADER* pUEFSection = pe.FindSection(CLR_UEF_SECTION_NAME);
- _ASSERTE(pUEFSection != NULL);
- if (pUEFSection)
- {
- // We got our section - get the start of the section
- BYTE* pStartOfUEFSection = static_cast(pe.GetBase())+pUEFSection->VirtualAddress;
- s_pStartOfUEFSection = pStartOfUEFSection;
-
- // Now we need the protection attributes for the memory region in which the
- // UEF section is...
- MEMORY_BASIC_INFORMATION uefInfo;
- if (ClrVirtualQuery(pStartOfUEFSection, &uefInfo, sizeof(uefInfo)) != 0)
- {
- // Calculate how many pages does the UEF section take to get to the start of the
- // next section. We dont calculate this as
- //
- // pStartOfUEFSection + uefInfo.RegionSize
- //
- // because the section following UEF will also be included in the region size
- // if it has the same protection as the UEF section.
- DWORD dwUEFSectionPageCount = ((pUEFSection->Misc.VirtualSize + GetOsPageSize() - 1)/GetOsPageSize());
-
- BYTE* pAddressOfFollowingSection = pStartOfUEFSection + (GetOsPageSize() * dwUEFSectionPageCount);
-
- // Ensure that the section following us is having different memory protection
- MEMORY_BASIC_INFORMATION nextSectionInfo;
- _ASSERTE(ClrVirtualQuery(pAddressOfFollowingSection, &nextSectionInfo, sizeof(nextSectionInfo)) != 0);
- _ASSERTE(nextSectionInfo.Protect != uefInfo.Protect);
-
- // save the memory protection details
- s_dwProtection = uefInfo.Protect;
-
- // Get the end of the UEF section
- BYTE* pEndOfUEFSectionBoundary = pAddressOfFollowingSection - 1;
-
- // Set the end of UEF section boundary
- FastInterlockExchangePointer(s_pEndOfUEFSectionBoundary.GetPointer(), pEndOfUEFSectionBoundary);
- }
- else
- {
- _ASSERTE(!"Unable to get UEF Details!");
- }
- }
- }
-
- if (s_pEndOfUEFSectionBoundary.Load() != NULL)
- {
- // Is the protection being changed?
- if (flNewProtect != s_dwProtection)
- {
- // Is the target address NOT affecting the UEF ? Possible cases:
- // 1) Starts and ends before the UEF start
- // 2) Starts after the UEF start
-
- void* pEndOfRangeAddr = static_cast(lpAddress)+dwSize-1;
-
- _ASSERTE_MSG(((pEndOfRangeAddr < s_pStartOfUEFSection.Load()) || (lpAddress > s_pEndOfUEFSectionBoundary.Load())),
- "Do not virtual protect the section in which UEF lives!");
- }
- }
-#endif // _DEBUG && !TARGET_UNIX
-
- return EEVirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect);
-}
-#define ClrVirtualProtect EEVirtualProtect
-
-#undef ClrGetProcessHeap
-HANDLE STDMETHODCALLTYPE CExecutionEngine::ClrGetProcessHeap()
-{
- WRAPPER_NO_CONTRACT;
- return EEGetProcessHeap();
-}
-#define ClrGetProcessHeap EEGetProcessHeap
-
-#undef ClrGetProcessExecutableHeap
-HANDLE STDMETHODCALLTYPE CExecutionEngine::ClrGetProcessExecutableHeap()
-{
- WRAPPER_NO_CONTRACT;
- return EEGetProcessExecutableHeap();
-}
-#define ClrGetProcessExecutableHeap EEGetProcessExecutableHeap
-
-
-#undef ClrHeapCreate
-HANDLE STDMETHODCALLTYPE CExecutionEngine::ClrHeapCreate(DWORD flOptions,
- SIZE_T dwInitialSize,
- SIZE_T dwMaximumSize)
-{
- WRAPPER_NO_CONTRACT;
- return EEHeapCreate(flOptions, dwInitialSize, dwMaximumSize);
-}
-#define ClrHeapCreate EEHeapCreate
-
-#undef ClrHeapDestroy
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrHeapDestroy(HANDLE hHeap)
-{
- WRAPPER_NO_CONTRACT;
- return EEHeapDestroy(hHeap);
-}
-#define ClrHeapDestroy EEHeapDestroy
-
-#undef ClrHeapAlloc
-LPVOID STDMETHODCALLTYPE CExecutionEngine::ClrHeapAlloc(HANDLE hHeap,
- DWORD dwFlags,
- SIZE_T dwBytes)
-{
- WRAPPER_NO_CONTRACT;
-
- return EEHeapAlloc(hHeap, dwFlags, dwBytes);
-}
-#define ClrHeapAlloc EEHeapAlloc
-
-#undef ClrHeapFree
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrHeapFree(HANDLE hHeap,
- DWORD dwFlags,
- LPVOID lpMem)
-{
- WRAPPER_NO_CONTRACT;
- return EEHeapFree(hHeap, dwFlags, lpMem);
-}
-#define ClrHeapFree EEHeapFree
-
-#undef ClrHeapValidate
-BOOL STDMETHODCALLTYPE CExecutionEngine::ClrHeapValidate(HANDLE hHeap,
- DWORD dwFlags,
- LPCVOID lpMem)
-{
- WRAPPER_NO_CONTRACT;
- return EEHeapValidate(hHeap, dwFlags, lpMem);
-}
-#define ClrHeapValidate EEHeapValidate
-
-//------------------------------------------------------------------------------
-// Helper function to get an exception object from outside the exception. In
-// the CLR, it may be from the Thread object. Non-CLR users have no thread object,
-// and it will do nothing.
-
-void CExecutionEngine::GetLastThrownObjectExceptionFromThread(void **ppvException)
-{
- WRAPPER_NO_CONTRACT;
-
- // Cast to our real type.
- Exception **ppException = reinterpret_cast(ppvException);
-
- // Try to get a better message.
- GetLastThrownObjectExceptionFromThread_Internal(ppException);
-
-} // HRESULT CExecutionEngine::GetLastThrownObjectExceptionFromThread()
HRESULT CorHost2::DllGetActivationFactory(DWORD appDomainID, LPCWSTR wszTypeName, IActivationFactory ** factory)
{
diff --git a/src/coreclr/src/vm/crst.h b/src/coreclr/src/vm/crst.h
index 68c1f6e9e4e195..17330f236e6d04 100644
--- a/src/coreclr/src/vm/crst.h
+++ b/src/coreclr/src/vm/crst.h
@@ -119,10 +119,9 @@ template
friend class ListLockBase;
template
friend class ListLockEntryBase;
-//friend class CExecutionEngine;
friend struct SavedExceptionInfo;
-friend void EEEnterCriticalSection(CRITSEC_COOKIE cookie);
-friend void EELeaveCriticalSection(CRITSEC_COOKIE cookie);
+friend void ClrEnterCriticalSection(CRITSEC_COOKIE cookie);
+friend void ClrLeaveCriticalSection(CRITSEC_COOKIE cookie);
friend class CodeVersionManager;
friend class Debugger;
diff --git a/src/coreclr/src/vm/debuginfostore.cpp b/src/coreclr/src/vm/debuginfostore.cpp
index 295dbe5f201bc5..2c9d17c22f17c4 100644
--- a/src/coreclr/src/vm/debuginfostore.cpp
+++ b/src/coreclr/src/vm/debuginfostore.cpp
@@ -8,6 +8,7 @@
#include "common.h"
#include "debuginfostore.h"
#include "nibblestream.h"
+#include "patchpointinfo.h"
#ifdef _DEBUG
@@ -440,6 +441,7 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars(
IN ULONG iOffsetMapping,
IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
IN ULONG iNativeVarInfo,
+ IN PatchpointInfo * patchpointInfo,
IN OUT SBuffer * pDebugInfoBuffer,
IN LoaderHeap * pLoaderHeap
)
@@ -451,6 +453,18 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars(
PRECONDITION((pDebugInfoBuffer != NULL) ^ (pLoaderHeap != NULL));
} CONTRACTL_END;
+ // Patchpoint info is currently uncompressed.
+ DWORD cbPatchpointInfo = 0;
+
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ if (patchpointInfo != NULL)
+ {
+ cbPatchpointInfo = patchpointInfo->PatchpointInfoSize();
+ }
+#else
+ _ASSERTE(patchpointInfo == NULL);
+#endif
+
// Actually do the compression. These will throw on oom.
NibbleWriter boundsBuffer;
DWORD cbBounds = 0;
@@ -479,7 +493,12 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars(
DWORD cbHeader;
PVOID pHeader = w.GetBlob(&cbHeader);
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ S_UINT32 cbFinalSize = S_UINT32(1) + S_UINT32(cbPatchpointInfo) + S_UINT32(cbHeader) + S_UINT32(cbBounds) + S_UINT32(cbVars);
+#else
S_UINT32 cbFinalSize = S_UINT32(cbHeader) + S_UINT32(cbBounds) + S_UINT32(cbVars);
+#endif
+
if (cbFinalSize.IsOverflow())
ThrowHR(COR_E_OVERFLOW);
@@ -497,6 +516,22 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars(
BYTE *ptr = ptrStart;
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+
+ // First byte is a flag byte:
+ // 0 - no patchpoint info
+ // 1 - patchpoint info
+
+ *ptr++ = (cbPatchpointInfo > 0) ? 1 : 0;
+
+ if (cbPatchpointInfo > 0)
+ {
+ memcpy(ptr, (BYTE*) patchpointInfo, cbPatchpointInfo);
+ ptr += cbPatchpointInfo;
+ }
+
+#endif
+
memcpy(ptr, pHeader, cbHeader);
ptr += cbHeader;
@@ -519,11 +554,6 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars(
#endif // DACCESS_COMPILE
-//-----------------------------------------------------------------------------
-// Compression routines
-// DAC only needs to run the uncompression routines.
-//-----------------------------------------------------------------------------
-
//-----------------------------------------------------------------------------
// Uncompression (restore) routines
//-----------------------------------------------------------------------------
@@ -535,7 +565,8 @@ void CompressDebugInfo::RestoreBoundariesAndVars(
OUT ULONG32 * pcMap, // number of entries in ppMap
OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array
OUT ULONG32 *pcVars,
- OUT ICorDebugInfo::NativeVarInfo **ppVars
+ OUT ICorDebugInfo::NativeVarInfo **ppVars,
+ BOOL hasFlagByte
)
{
CONTRACTL
@@ -552,6 +583,28 @@ void CompressDebugInfo::RestoreBoundariesAndVars(
if (pcVars != NULL) *pcVars = 0;
if (ppVars != NULL) *ppVars = NULL;
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ if (hasFlagByte)
+ {
+ // Check flag byte and skip over any patchpoint info
+ BYTE flagByte = *pDebugInfo;
+ pDebugInfo++;
+
+ if (flagByte == 1)
+ {
+ PTR_PatchpointInfo patchpointInfo = dac_cast(pDebugInfo);
+ pDebugInfo += patchpointInfo->PatchpointInfoSize();
+ }
+ else
+ {
+ _ASSERTE(flagByte == 0);
+ }
+ }
+
+#else
+ _ASSERTE(!hasFlagByte);
+#endif
+
NibbleReader r(pDebugInfo, 12 /* maximum size of compressed 2 UINT32s */);
ULONG cbBounds = r.ReadEncodedU32();
@@ -615,8 +668,41 @@ void CompressDebugInfo::RestoreBoundariesAndVars(
}
}
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+
+PatchpointInfo * CompressDebugInfo::RestorePatchpointInfo(IN PTR_BYTE pDebugInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ PTR_PatchpointInfo patchpointInfo = NULL;
+
+ // Check flag byte.
+ BYTE flagByte = *pDebugInfo;
+ pDebugInfo++;
+
+ if (flagByte == 1)
+ {
+ patchpointInfo = dac_cast(pDebugInfo);
+ }
+ else
+ {
+ _ASSERTE(flagByte == 0);
+ }
+
+ return patchpointInfo;
+}
+
+#endif
+
#ifdef DACCESS_COMPILE
-void CompressDebugInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo)
+void CompressDebugInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte)
{
CONTRACTL
{
@@ -626,6 +712,27 @@ void CompressDebugInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE
}
CONTRACTL_END;
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ if (hasFlagByte)
+ {
+ // Check flag byte and skip over any patchpoint info
+ BYTE flagByte = *pDebugInfo;
+ pDebugInfo++;
+
+ if (flagByte == 1)
+ {
+ PTR_PatchpointInfo patchpointInfo = dac_cast(pDebugInfo);
+ pDebugInfo += patchpointInfo->PatchpointInfoSize();
+ }
+ else
+ {
+ _ASSERTE(flagByte == 0);
+ }
+ }
+#else
+ _ASSERTE(!hasFlagByte);
+#endif
+
NibbleReader r(pDebugInfo, 12 /* maximum size of compressed 2 UINT32s */);
ULONG cbBounds = r.ReadEncodedU32();
diff --git a/src/coreclr/src/vm/debuginfostore.h b/src/coreclr/src/vm/debuginfostore.h
index 453e41fa907c10..518333705a9710 100644
--- a/src/coreclr/src/vm/debuginfostore.h
+++ b/src/coreclr/src/vm/debuginfostore.h
@@ -83,6 +83,7 @@ class CompressDebugInfo
IN ULONG iOffsetMapping,
IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo,
IN ULONG iNativeVarInfo,
+ IN PatchpointInfo * patchpointInfo,
IN OUT SBuffer * pDebugInfoBuffer,
IN LoaderHeap * pLoaderHeap
);
@@ -95,11 +96,18 @@ class CompressDebugInfo
OUT ULONG32 * pcMap, // number of entries in ppMap
OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array
OUT ULONG32 *pcVars,
- OUT ICorDebugInfo::NativeVarInfo **ppVars
+ OUT ICorDebugInfo::NativeVarInfo **ppVars,
+ BOOL hasFlagByte
);
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ static PatchpointInfo * RestorePatchpointInfo(
+ IN PTR_BYTE pDebugInfo
+ );
+#endif
+
#ifdef DACCESS_COMPILE
- static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo);
+ static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte);
#endif
};
diff --git a/src/coreclr/src/vm/dwreport.cpp b/src/coreclr/src/vm/dwreport.cpp
index dbc6bb3e65dc83..4cc5950f7fc9a6 100644
--- a/src/coreclr/src/vm/dwreport.cpp
+++ b/src/coreclr/src/vm/dwreport.cpp
@@ -143,7 +143,7 @@ BOOL RegisterOutOfProcessWatsonCallbacks()
if (SUCCEEDED(::GetCORSystemDirectoryInternaL(wszDACPath)))
{
wszDACPath.Append(wszDACName);
- hr = (*pFnWerRegisterRuntimeExceptionModule)(wszDACPath, (PDWORD)g_pMSCorEE);
+ hr = (*pFnWerRegisterRuntimeExceptionModule)(wszDACPath, (PDWORD)g_hThisInst);
}
else {
hr = E_FAIL;
diff --git a/src/coreclr/src/vm/eeconfig.cpp b/src/coreclr/src/vm/eeconfig.cpp
index 205730d8b57cbd..dc43f141eabf52 100644
--- a/src/coreclr/src/vm/eeconfig.cpp
+++ b/src/coreclr/src/vm/eeconfig.cpp
@@ -339,6 +339,11 @@ HRESULT EEConfig::Init()
tieredCompilation_DeleteCallCountingStubsAfter = 0;
#endif
+#if defined(FEATURE_ON_STACK_REPLACEMENT)
+ dwOSR_HitLimit = 10;
+ dwOSR_CounterBump = 5000;
+#endif
+
#ifndef CROSSGEN_COMPILE
backpatchEntryPointSlots = false;
#endif
@@ -1265,6 +1270,16 @@ fTrackDynamicMethodDebugInfo = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_
}
#endif
+#if defined(FEATURE_ON_STACK_REPLACEMENT)
+ dwOSR_HitLimit = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_OSR_HitLimit);
+ dwOSR_CounterBump = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_OSR_CounterBump);
+#endif
+
+#if defined(FEATURE_ON_STACK_REPLACEMENT) && defined(_DEBUG)
+ dwOSR_LowId = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_OSR_LowId);
+ dwOSR_HighId = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_OSR_HighId);
+#endif
+
#ifndef CROSSGEN_COMPILE
backpatchEntryPointSlots = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BackpatchEntryPointSlots) != 0;
#endif
diff --git a/src/coreclr/src/vm/eeconfig.h b/src/coreclr/src/vm/eeconfig.h
index 3359a4a33eb03e..e28f64834ebe32 100644
--- a/src/coreclr/src/vm/eeconfig.h
+++ b/src/coreclr/src/vm/eeconfig.h
@@ -290,6 +290,17 @@ class EEConfig
DWORD TieredCompilation_DeleteCallCountingStubsAfter() const { LIMITED_METHOD_CONTRACT; return tieredCompilation_DeleteCallCountingStubsAfter; }
#endif
+#if defined(FEATURE_ON_STACK_REPLACEMENT)
+ // OSR Config
+ DWORD OSR_CounterBump() const { LIMITED_METHOD_CONTRACT; return dwOSR_CounterBump; }
+ DWORD OSR_HitLimit() const { LIMITED_METHOD_CONTRACT; return dwOSR_HitLimit; }
+#endif
+
+#if defined(FEATURE_ON_STACK_REPLACEMENT) && defined(_DEBUG)
+ DWORD OSR_LowId() const { LIMITED_METHOD_CONTRACT; return dwOSR_LowId; }
+ DWORD OSR_HighId() const { LIMITED_METHOD_CONTRACT; return dwOSR_HighId; }
+#endif
+
#ifndef CROSSGEN_COMPILE
bool BackpatchEntryPointSlots() const { LIMITED_METHOD_CONTRACT; return backpatchEntryPointSlots; }
#endif
@@ -1023,6 +1034,16 @@ class EEConfig
DWORD tieredCompilation_DeleteCallCountingStubsAfter;
#endif
+#if defined(FEATURE_ON_STACK_REPLACEMENT)
+ DWORD dwOSR_HitLimit;
+ DWORD dwOSR_CounterBump;
+#endif
+
+#if defined(FEATURE_ON_STACK_REPLACEMENT) && defined(_DEBUG)
+ DWORD dwOSR_LowId;
+ DWORD dwOSR_HighId;
+#endif
+
#ifndef CROSSGEN_COMPILE
bool backpatchEntryPointSlots;
#endif
diff --git a/src/coreclr/src/vm/eecontract.cpp b/src/coreclr/src/vm/eecontract.cpp
index cf25adfa2b4f29..105bc36c8b0a8a 100644
--- a/src/coreclr/src/vm/eecontract.cpp
+++ b/src/coreclr/src/vm/eecontract.cpp
@@ -35,10 +35,7 @@ void EEContract::DoChecks(UINT testmask, __in_z const char *szFunction, __in_z c
// allow such calls.
BEGIN_GETTHREAD_ALLOWED_IN_NO_THROW_REGION;
m_pThread = GetThread();
- if (m_pThread != NULL)
- {
- m_pClrDebugState = m_pThread->GetClrDebugState();
- }
+ m_pClrDebugState = GetClrDebugState();
// Call our base DoChecks.
BaseContract::DoChecks(testmask, szFunction, szFile, lineNum);
@@ -256,16 +253,3 @@ void EEContract::DoChecks(UINT testmask, __in_z const char *szFunction, __in_z c
}
}
#endif // ENABLE_CONTRACTS
-
-
-BYTE* __stdcall GetAddrOfContractShutoffFlag()
-{
- LIMITED_METHOD_CONTRACT;
-
- // Exposed entrypoint where we cannot probe or do anything TLS
- // related
- static BYTE gContractShutoffFlag = 0;
-
- return &gContractShutoffFlag;
-}
-
diff --git a/src/coreclr/src/vm/eepolicy.cpp b/src/coreclr/src/vm/eepolicy.cpp
index d01c98883e5390..bec99146cd6017 100644
--- a/src/coreclr/src/vm/eepolicy.cpp
+++ b/src/coreclr/src/vm/eepolicy.cpp
@@ -1012,13 +1012,13 @@ void EEPolicy::LogFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pszMessage
addressString.Printf(W("%p"), pExceptionInfo? (UINT_PTR)pExceptionInfo->ExceptionRecord->ExceptionAddress : address);
// We should always have the reference to the runtime's instance
- _ASSERTE(g_pMSCorEE != NULL);
+ _ASSERTE(g_hThisInst != NULL);
// Setup the string to contain the runtime's base address. Thus, when customers report FEEE with just
// the event log entry containing this string, we can use the absolute and base addresses to determine
// where the fault happened inside the runtime.
SmallStackSString runtimeBaseAddressString;
- runtimeBaseAddressString.Printf(W("%p"), g_pMSCorEE);
+ runtimeBaseAddressString.Printf(W("%p"), g_hThisInst);
SmallStackSString exitCodeString;
exitCodeString.Printf(W("%x"), exitCode);
diff --git a/src/coreclr/src/vm/eventpipesession.h b/src/coreclr/src/vm/eventpipesession.h
index adcddb3bdc5043..cc86d9b9d26ae3 100644
--- a/src/coreclr/src/vm/eventpipesession.h
+++ b/src/coreclr/src/vm/eventpipesession.h
@@ -8,7 +8,6 @@
#ifdef FEATURE_PERFTRACING
#include "common.h"
-#include "hosting.h"
#include "threadsuspend.h"
class EventPipeBufferManager;
diff --git a/src/coreclr/src/vm/excep.cpp b/src/coreclr/src/vm/excep.cpp
index ecec22455d711b..e77851c4497d04 100644
--- a/src/coreclr/src/vm/excep.cpp
+++ b/src/coreclr/src/vm/excep.cpp
@@ -5110,7 +5110,7 @@ LONG EntryPointFilter(PEXCEPTION_POINTERS pExceptionInfo, PVOID _pData)
// Updated to be in its own code segment named CLR_UEF_SECTION_NAME to prevent
// "VirtualProtect" calls from affecting its pages and thus, its
// invocation. For details, see the comment within the implementation of
-// CExecutionEngine::ClrVirtualProtect.
+// ClrVirtualProtect.
//
// Parameters
// pExceptionInfo -- information about the exception
@@ -7359,7 +7359,7 @@ LONG WINAPI CLRVectoredExceptionHandlerPhase2(PEXCEPTION_POINTERS pExceptionInfo
CONTRACT_VIOLATION(TakesLockViolation);
fExternalException = (!ExecutionManager::IsManagedCode(GetIP(pExceptionInfo->ContextRecord)) &&
- !IsIPInModule(g_pMSCorEE, GetIP(pExceptionInfo->ContextRecord)));
+ !IsIPInModule(g_hThisInst, GetIP(pExceptionInfo->ContextRecord)));
}
if (fExternalException)
@@ -7526,7 +7526,7 @@ VEH_ACTION WINAPI CLRVectoredExceptionHandlerPhase3(PEXCEPTION_POINTERS pExcepti
if ((!fAVisOk) && !(pExceptionRecord->ExceptionFlags & EXCEPTION_UNWINDING))
{
PCODE ip = (PCODE)GetIP(pContext);
- if (IsIPInModule(g_pMSCorEE, ip) || IsIPInModule(GCHeapUtilities::GetGCModule(), ip))
+ if (IsIPInModule(g_hThisInst, ip) || IsIPInModule(GCHeapUtilities::GetGCModule(), ip))
{
CONTRACT_VIOLATION(ThrowsViolation|FaultViolation);
@@ -7979,6 +7979,7 @@ LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo)
//
// 1) We have a valid Thread object (implies exception on managed thread)
// 2) Not a valid Thread object but the IP is in the execution engine (implies native thread within EE faulted)
+ // 3) The exception occurred in a GC marked location when no thread exists (i.e. reverse P/Invoke with NativeCallableAttribute).
if (pThread || fExceptionInEE)
{
if (!bIsGCMarker)
@@ -8066,6 +8067,11 @@ LONG WINAPI CLRVectoredExceptionHandlerShim(PEXCEPTION_POINTERS pExceptionInfo)
#endif // FEATURE_EH_FUNCLETS
}
+ else if (bIsGCMarker)
+ {
+ _ASSERTE(pThread == NULL);
+ result = EXCEPTION_CONTINUE_EXECUTION;
+ }
SetLastError(dwLastError);
diff --git a/src/coreclr/src/vm/excep.h b/src/coreclr/src/vm/excep.h
index b6fb51242dc1f2..33a616f81aa7d9 100644
--- a/src/coreclr/src/vm/excep.h
+++ b/src/coreclr/src/vm/excep.h
@@ -186,7 +186,7 @@ void UninstallUnhandledExceptionFilter();
// within a section, no matter where it was located - and for this case, we need the UEF code
// at the right location to ensure that we can check the memory protection of its following
// section so that shouldnt affect UEF's memory protection. For details, read the comment in
-// "CExecutionEngine::ClrVirtualProtect".
+// ClrVirtualProtect.
//
// Keeping UEF in its own section helps prevent code movement as BBT does not reorder
// sections. As per my understanding of the linker, ".text" section always comes first,
@@ -194,7 +194,7 @@ void UninstallUnhandledExceptionFilter();
// The order of user defined executable sections is typically defined by the linker
// in terms of which section it sees first. So, if there is another custom executable
// section that comes after UEF section, it can affect the UEF section and we will
-// assert about it in "CExecutionEngine::ClrVirtualProtect".
+// assert about it in ClrVirtualProtect.
#define CLR_UEF_SECTION_NAME ".CLR_UEF"
#endif //!defined(TARGET_UNIX)
LONG __stdcall COMUnhandledExceptionFilter(EXCEPTION_POINTERS *pExceptionInfo);
diff --git a/src/coreclr/src/vm/exceptionhandling.cpp b/src/coreclr/src/vm/exceptionhandling.cpp
index 655a102407c119..41b3627438aa3f 100644
--- a/src/coreclr/src/vm/exceptionhandling.cpp
+++ b/src/coreclr/src/vm/exceptionhandling.cpp
@@ -818,6 +818,8 @@ UINT_PTR ExceptionTracker::FinishSecondPass(
return uResumePC;
}
+void CleanUpForSecondPass(Thread* pThread, bool fIsSO, LPVOID MemoryStackFpForFrameChain, LPVOID MemoryStackFp);
+
// On CoreARM, the MemoryStackFp is ULONG when passed by RtlDispatchException,
// unlike its 64bit counterparts.
EXTERN_C EXCEPTION_DISPOSITION
@@ -974,7 +976,7 @@ ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord
BOOL fExternalException;
fExternalException = (!ExecutionManager::IsManagedCode(ip) &&
- !IsIPInModule(g_pMSCorEE, ip));
+ !IsIPInModule(g_hThisInst, ip));
if (fExternalException)
{
@@ -1262,6 +1264,22 @@ lExit: ;
if ((ExceptionContinueSearch == returnDisposition))
{
+ if (dwExceptionFlags & EXCEPTION_UNWINDING)
+ {
+ EECodeInfo codeInfo(pDispatcherContext->ControlPc);
+ if (codeInfo.IsValid())
+ {
+ GcInfoDecoder gcInfoDecoder(codeInfo.GetGCInfoToken(), DECODE_REVERSE_PINVOKE_VAR);
+ if (gcInfoDecoder.GetReversePInvokeFrameStackSlot() != NO_REVERSE_PINVOKE_FRAME)
+ {
+ // Exception is being propagated from a native callable method into its native caller.
+ // The explicit frame chain needs to be unwound at this boundary.
+ bool fIsSO = pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW;
+ CleanUpForSecondPass(pThread, fIsSO, (void*)MemoryStackFp, (void*)MemoryStackFp);
+ }
+ }
+ }
+
GCX_PREEMP_NO_DTOR();
}
@@ -5824,7 +5842,7 @@ BOOL IsSafeToUnwindFrameChain(Thread* pThread, LPVOID MemoryStackFpForFrameChain
// We're safe only if the managed method will be unwound also
LPVOID managedSP = dac_cast(GetRegdisplaySP(&rd));
- if (managedSP < MemoryStackFpForFrameChain)
+ if (managedSP <= MemoryStackFpForFrameChain)
{
return TRUE;
}
@@ -6841,7 +6859,7 @@ StackFrame ExceptionTracker::FindParentStackFrameHelper(CrawlFrame* pCF,
#if defined(DACCESS_COMPILE)
HMODULE_TGT hEE = DacGlobalBase();
#else // !DACCESS_COMPILE
- HMODULE_TGT hEE = g_pMSCorEE;
+ HMODULE_TGT hEE = g_hThisInst;
#endif // !DACCESS_COMPILE
fIsCallerInVM = IsIPInModule(hEE, callerIP);
#endif // TARGET_UNIX
diff --git a/src/coreclr/src/vm/gccover.cpp b/src/coreclr/src/vm/gccover.cpp
index c5801e259fec76..96cd53ce2c3f05 100644
--- a/src/coreclr/src/vm/gccover.cpp
+++ b/src/coreclr/src/vm/gccover.cpp
@@ -1400,7 +1400,16 @@ BOOL OnGcCoverageInterrupt(PCONTEXT regs)
}
Thread* pThread = GetThread();
- _ASSERTE(pThread);
+ if (!pThread)
+ {
+ // No thread at the moment so we aren't doing coverage for this function.
+ // This should only occur for methods with the NativeCallableAttribute,
+ // where the call could be coming from a thread unknown to the CLR and
+ // we haven't created a thread yet - see PreStubWorker_Preemptive().
+ _ASSERTE(pMD->HasNativeCallableAttribute());
+ RemoveGcCoverageInterrupt(instrPtr, savedInstrPtr);
+ return TRUE;
+ }
#if defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(TARGET_UNIX)
// If we're unable to redirect, then we simply won't test GC at this
@@ -1452,6 +1461,7 @@ void DoGcStress (PCONTEXT regs, NativeCodeVersion nativeCodeVersion)
DWORD offset = codeInfo.GetRelOffset();
Thread *pThread = GetThread();
+ _ASSERTE(pThread);
if (!IsGcCoverageInterruptInstruction(instrPtr))
{
diff --git a/src/coreclr/src/vm/gchelpers.cpp b/src/coreclr/src/vm/gchelpers.cpp
index e9352bde7cde9c..5f375da096c2f9 100644
--- a/src/coreclr/src/vm/gchelpers.cpp
+++ b/src/coreclr/src/vm/gchelpers.cpp
@@ -42,14 +42,6 @@
//
//========================================================================
-#define ProfileTrackArrayAlloc(orObject) \
- OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);\
- GCPROTECT_BEGIN(objref);\
- ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());\
- GCPROTECT_END();\
- orObject = (ArrayBase *) OBJECTREFToObject(objref);
-
-
inline gc_alloc_context* GetThreadAllocContext()
{
WRAPPER_NO_CONTRACT;
@@ -281,7 +273,7 @@ bool ToLogOrNotToLog(size_t size, const char *typeName)
// this function is called on managed allocation path with unprotected Object*
// as a result LogAlloc cannot call anything that would toggle the GC mode else
// you'll introduce several GC holes!
-inline void LogAlloc(size_t size, MethodTable *pMT, Object* object)
+inline void LogAlloc(Object* object)
{
CONTRACTL
{
@@ -292,6 +284,9 @@ inline void LogAlloc(size_t size, MethodTable *pMT, Object* object)
CONTRACTL_END;
#ifdef LOGGING
+ MethodTable* pMT = object->GetMethodTable();
+ size_t size = object->GetSize();
+
if (LoggingOn(LF_GCALLOC, LL_INFO10))
{
LogSpewAlways("Allocated %5d bytes for %s_TYPE" FMT_ADDR FMT_CLASS "\n",
@@ -311,9 +306,44 @@ inline void LogAlloc(size_t size, MethodTable *pMT, Object* object)
#endif
}
#else
-#define LogAlloc(size, pMT, object)
+#define LogAlloc( object)
#endif
+// signals completion of the object to GC and sends events if necessary
+template
+void PublishObjectAndNotify(TObj* &orObject, GC_ALLOC_FLAGS flags)
+{
+ _ASSERTE(orObject->HasEmptySyncBlockInfo());
+
+ if (flags & GC_ALLOC_USER_OLD_HEAP)
+ {
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
+ }
+
+#ifdef _LOGALLOC
+ LogAlloc(orObject);
+#endif // _LOGALLOC
+
+ // Notify the profiler of the allocation
+ // do this after initializing bounds so callback has size information
+ if (TrackAllocations() ||
+ (TrackLargeAllocations() && flags & GC_ALLOC_LARGE_OBJECT_HEAP))
+ {
+ OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);
+ GCPROTECT_BEGIN(objref);
+ ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());
+ GCPROTECT_END();
+ orObject = (TObj*) OBJECTREFToObject(objref);
+ }
+
+#ifdef FEATURE_EVENT_TRACE
+ // Send ETW event for allocation
+ if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
+ {
+ ETW::TypeSystemLog::SendObjectAllocatedEvent(orObject);
+ }
+#endif // FEATURE_EVENT_TRACE
+}
inline SIZE_T MaxArrayLength(SIZE_T componentSize)
{
@@ -324,7 +354,7 @@ inline SIZE_T MaxArrayLength(SIZE_T componentSize)
return (componentSize == 1) ? 0X7FFFFFC7 : 0X7FEFFFFF;
}
-OBJECTREF AllocateSzArray(TypeHandle arrayType, INT32 cElements, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap)
+OBJECTREF AllocateSzArray(TypeHandle arrayType, INT32 cElements, GC_ALLOC_FLAGS flags)
{
CONTRACTL{
THROWS;
@@ -334,10 +364,10 @@ OBJECTREF AllocateSzArray(TypeHandle arrayType, INT32 cElements, GC_ALLOC_FLAGS
MethodTable* pArrayMT = arrayType.AsMethodTable();
- return AllocateSzArray(pArrayMT, cElements, flags, bAllocateInLargeHeap);
+ return AllocateSzArray(pArrayMT, cElements, flags);
}
-OBJECTREF AllocateSzArray(MethodTable* pArrayMT, INT32 cElements, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap)
+OBJECTREF AllocateSzArray(MethodTable* pArrayMT, INT32 cElements, GC_ALLOC_FLAGS flags)
{
CONTRACTL{
THROWS;
@@ -345,6 +375,8 @@ OBJECTREF AllocateSzArray(MethodTable* pArrayMT, INT32 cElements, GC_ALLOC_FLAGS
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
+ // IBC Log MethodTable access
+ g_IBCLogger.LogMethodTableAccess(pArrayMT);
SetTypeHandleOnThreadForAlloc(TypeHandle(pArrayMT));
_ASSERTE(pArrayMT->CheckInstanceActivated());
@@ -356,9 +388,6 @@ OBJECTREF AllocateSzArray(MethodTable* pArrayMT, INT32 cElements, GC_ALLOC_FLAGS
if (elemType == ELEMENT_TYPE_VOID)
COMPlusThrow(kArgumentException);
- // IBC Log MethodTable access
- g_IBCLogger.LogMethodTableAccess(pArrayMT);
-
if (cElements < 0)
COMPlusThrow(kOverflowException);
@@ -385,22 +414,21 @@ OBJECTREF AllocateSzArray(MethodTable* pArrayMT, INT32 cElements, GC_ALLOC_FLAGS
((DWORD)cElements >= g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold()))
{
STRESS_LOG2(LF_GC, LL_INFO10, "Allocating double MD array of size %d and length %d to large object heap\n", totalSize, cElements);
- bAllocateInLargeHeap = TRUE;
+ flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
}
#endif
if (totalSize >= g_pConfig->GetGCLOHThreshold())
- {
- bAllocateInLargeHeap = TRUE;
- }
+ flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
- flags |= (pArrayMT->ContainsPointers() ? GC_ALLOC_CONTAINS_REF : GC_ALLOC_NO_FLAGS);
+ if (pArrayMT->ContainsPointers())
+ flags |= GC_ALLOC_CONTAINS_REF;
ArrayBase* orArray = NULL;
- if (bAllocateInLargeHeap)
+ if (flags & GC_ALLOC_USER_OLD_HEAP)
{
- orArray = (ArrayBase*)Alloc(totalSize, flags | GC_ALLOC_LARGE_OBJECT_HEAP);
- orArray->SetArrayMethodTableForLargeObject(pArrayMT);
+ orArray = (ArrayBase*)Alloc(totalSize, flags);
+ orArray->SetMethodTableForUOHObject(pArrayMT);
}
else
{
@@ -457,40 +485,14 @@ OBJECTREF AllocateSzArray(MethodTable* pArrayMT, INT32 cElements, GC_ALLOC_FLAGS
#endif
orArray = (ArrayBase*)Alloc(totalSize, flags);
}
- orArray->SetArrayMethodTable(pArrayMT);
+ orArray->SetMethodTable(pArrayMT);
}
// Initialize Object
orArray->m_NumComponents = cElements;
- bool bProfilerNotifyLargeAllocation = false;
-
- if (bAllocateInLargeHeap)
- {
- GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orArray);
- bProfilerNotifyLargeAllocation = TrackLargeAllocations();
- }
-
-#ifdef _LOGALLOC
- LogAlloc(totalSize, pArrayMT, orArray);
-#endif // _LOGALLOC
-
- // Notify the profiler of the allocation
- // do this after initializing bounds so callback has size information
- if (TrackAllocations() || bProfilerNotifyLargeAllocation)
- {
- ProfileTrackArrayAlloc(orArray);
- }
-
-#ifdef FEATURE_EVENT_TRACE
- // Send ETW event for allocation
- if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
- {
- ETW::TypeSystemLog::SendObjectAllocatedEvent(orArray);
- }
-#endif // FEATURE_EVENT_TRACE
-
- return ObjectToOBJECTREF((Object *) orArray);
+ PublishObjectAndNotify(orArray, flags);
+ return ObjectToOBJECTREF((Object*)orArray);
}
void ThrowOutOfMemoryDimensionsExceeded()
@@ -511,7 +513,7 @@ void ThrowOutOfMemoryDimensionsExceeded()
//
// This is wrapper overload to handle TypeHandle arrayType
//
-OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap)
+OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags)
{
CONTRACTL
{
@@ -520,7 +522,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, G
MethodTable* pArrayMT = arrayType.AsMethodTable();
- return AllocateArrayEx(pArrayMT, pArgs, dwNumArgs, flags, bAllocateInLargeHeap);
+ return AllocateArrayEx(pArrayMT, pArgs, dwNumArgs, flags);
}
//
@@ -530,7 +532,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, G
// allocate sub-arrays and fill them in.
//
// For arrays with lower bounds, pBounds is , , , ...
-OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap)
+OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags)
{
CONTRACTL {
THROWS;
@@ -540,8 +542,6 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
PRECONDITION(dwNumArgs > 0);
} CONTRACTL_END;
- ArrayBase * orArray = NULL;
-
#ifdef _DEBUG
if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
{
@@ -550,6 +550,15 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
}
#endif
+ // IBC Log MethodTable access
+ g_IBCLogger.LogMethodTableAccess(pArrayMT);
+ SetTypeHandleOnThreadForAlloc(TypeHandle(pArrayMT));
+
+ // keep original flags in case the call is recursive (jugged array case)
+ // the aditional flags that we infer here, such as GC_ALLOC_CONTAINS_REF
+ // may not be applicable to inner arrays
+ GC_ALLOC_FLAGS flagsOriginal = flags;
+
_ASSERTE(pArrayMT->CheckInstanceActivated());
PREFIX_ASSUME(pArrayMT != NULL);
CorElementType kind = pArrayMT->GetInternalCorElementType();
@@ -562,11 +571,6 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
// Calculate the total number of elements in the array
UINT32 cElements;
-
- // IBC Log MethodTable access
- g_IBCLogger.LogMethodTableAccess(pArrayMT);
- SetTypeHandleOnThreadForAlloc(TypeHandle(pArrayMT));
-
SIZE_T componentSize = pArrayMT->GetComponentSize();
bool maxArrayDimensionLengthOverflow = false;
bool providedLowerBounds = false;
@@ -580,7 +584,7 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
if (rank == 1 && (dwNumArgs == 1 || pArgs[0] == 0))
{
TypeHandle szArrayType = ClassLoader::LoadArrayTypeThrowing(pArrayMT->GetArrayElementTypeHandle(), ELEMENT_TYPE_SZARRAY, 1);
- return AllocateSzArray(szArrayType, pArgs[dwNumArgs - 1], flags, bAllocateInLargeHeap);
+ return AllocateSzArray(szArrayType, pArgs[dwNumArgs - 1], flags);
}
providedLowerBounds = (dwNumArgs == 2*rank);
@@ -642,21 +646,21 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
(cElements >= g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold()))
{
STRESS_LOG2(LF_GC, LL_INFO10, "Allocating double MD array of size %d and length %d to large object heap\n", totalSize, cElements);
- bAllocateInLargeHeap = TRUE;
+ flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
}
#endif
if (totalSize >= g_pConfig->GetGCLOHThreshold())
- {
- bAllocateInLargeHeap = TRUE;
- }
+ flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
- flags |= (pArrayMT->ContainsPointers() ? GC_ALLOC_CONTAINS_REF : GC_ALLOC_NO_FLAGS);
+ if (pArrayMT->ContainsPointers())
+ flags |= GC_ALLOC_CONTAINS_REF;
- if (bAllocateInLargeHeap)
+ ArrayBase* orArray = NULL;
+ if (flags & GC_ALLOC_USER_OLD_HEAP)
{
- orArray = (ArrayBase *) Alloc(totalSize, flags | GC_ALLOC_LARGE_OBJECT_HEAP);
- orArray->SetArrayMethodTableForLargeObject(pArrayMT);
+ orArray = (ArrayBase*)Alloc(totalSize, flags);
+ orArray->SetMethodTableForUOHObject(pArrayMT);
}
else
{
@@ -674,24 +678,11 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
}
#endif
orArray = (ArrayBase*)Alloc(totalSize, flags);
- orArray->SetArrayMethodTable(pArrayMT);
+ orArray->SetMethodTable(pArrayMT);
}
// Initialize Object
orArray->m_NumComponents = cElements;
-
- bool bProfilerNotifyLargeAllocation = false;
-
- if (bAllocateInLargeHeap)
- {
- GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orArray);
- bProfilerNotifyLargeAllocation = TrackLargeAllocations();
- }
-
-#ifdef _LOGALLOC
- LogAlloc(totalSize, pArrayMT, orArray);
-#endif // _LOGALLOC
-
if (kind == ELEMENT_TYPE_ARRAY)
{
INT32 *pCountsPtr = (INT32 *) orArray->GetBoundsPtr();
@@ -704,20 +695,7 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
}
}
- // Notify the profiler of the allocation
- // do this after initializing bounds so callback has size information
- if (TrackAllocations() || bProfilerNotifyLargeAllocation)
- {
- ProfileTrackArrayAlloc(orArray);
- }
-
-#ifdef FEATURE_EVENT_TRACE
- // Send ETW event for allocation
- if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
- {
- ETW::TypeSystemLog::SendObjectAllocatedEvent(orArray);
- }
-#endif // FEATURE_EVENT_TRACE
+ PublishObjectAndNotify(orArray, flags);
if (kind != ELEMENT_TYPE_ARRAY)
{
@@ -741,7 +719,7 @@ OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs,
TypeHandle subArrayType = pArrayMT->GetArrayElementTypeHandle();
for (UINT32 i = 0; i < cElements; i++)
{
- OBJECTREF obj = AllocateArrayEx(subArrayType, &pArgs[1], dwNumArgs-1, flags, bAllocateInLargeHeap);
+ OBJECTREF obj = AllocateArrayEx(subArrayType, &pArgs[1], dwNumArgs-1, flagsOriginal);
outerArray->SetAt(i, obj);
}
@@ -848,7 +826,8 @@ OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle elementType, BOOL bAll
_ASSERTE(arrayType.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
#endif //_DEBUG
- return AllocateSzArray(arrayType, (INT32) cElements, GC_ALLOC_NO_FLAGS, bAllocateInLargeHeap);
+ GC_ALLOC_FLAGS flags = bAllocateInLargeHeap ? GC_ALLOC_LARGE_OBJECT_HEAP : GC_ALLOC_NO_FLAGS;
+ return AllocateSzArray(arrayType, (INT32) cElements, flags);
}
STRINGREF AllocateString( DWORD cchStringLength )
@@ -859,8 +838,6 @@ STRINGREF AllocateString( DWORD cchStringLength )
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
- StringObject *orObject = NULL;
-
#ifdef _DEBUG
if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
{
@@ -876,50 +853,23 @@ STRINGREF AllocateString( DWORD cchStringLength )
if (cchStringLength > 0x3FFFFFDF)
ThrowOutOfMemory();
- SIZE_T ObjectSize = PtrAlign(StringObject::GetSize(cchStringLength));
- _ASSERTE(ObjectSize > cchStringLength);
+ SIZE_T totalSize = PtrAlign(StringObject::GetSize(cchStringLength));
+ _ASSERTE(totalSize > cchStringLength);
SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass));
- orObject = (StringObject *)Alloc( ObjectSize, GC_ALLOC_NO_FLAGS);
+ GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS;
+ if (totalSize >= g_pConfig->GetGCLOHThreshold())
+ flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
- // Object is zero-init already
- _ASSERTE( orObject->HasEmptySyncBlockInfo() );
+ StringObject* orString = (StringObject*)Alloc(totalSize, flags);
// Initialize Object
- //@TODO need to build a LARGE g_pStringMethodTable before
- orObject->SetMethodTable( g_pStringClass );
- orObject->SetStringLength( cchStringLength );
-
- bool bProfilerNotifyLargeAllocation = false;
- if (ObjectSize >= g_pConfig->GetGCLOHThreshold())
- {
- bProfilerNotifyLargeAllocation = TrackLargeAllocations();
- GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
- }
+ orString->SetMethodTable(g_pStringClass);
+ orString->SetStringLength(cchStringLength);
- // Notify the profiler of the allocation
- if (TrackAllocations() || bProfilerNotifyLargeAllocation)
- {
- OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);
- GCPROTECT_BEGIN(objref);
- ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());
- GCPROTECT_END();
-
- orObject = (StringObject *) OBJECTREFToObject(objref);
- }
-
-#ifdef FEATURE_EVENT_TRACE
- // Send ETW event for allocation
- if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
- {
- ETW::TypeSystemLog::SendObjectAllocatedEvent(orObject);
- }
-#endif // FEATURE_EVENT_TRACE
-
- LogAlloc(ObjectSize, g_pStringClass, orObject);
-
- return( ObjectToSTRINGREF(orObject) );
+ PublishObjectAndNotify(orString, flags);
+ return ObjectToSTRINGREF(orString);
}
#ifdef FEATURE_UTF8STRING
@@ -931,8 +881,6 @@ UTF8STRINGREF AllocateUtf8String(DWORD cchStringLength)
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
- Utf8StringObject *orObject = NULL;
-
#ifdef _DEBUG
if (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP))
{
@@ -953,50 +901,23 @@ UTF8STRINGREF AllocateUtf8String(DWORD cchStringLength)
if (cchStringLength > 0x7FFFFFBF)
ThrowOutOfMemory();
- SIZE_T ObjectSize = PtrAlign(Utf8StringObject::GetSize(cchStringLength));
- _ASSERTE(ObjectSize > cchStringLength);
+ SIZE_T totalSize = PtrAlign(Utf8StringObject::GetSize(cchStringLength));
+ _ASSERTE(totalSize > cchStringLength);
SetTypeHandleOnThreadForAlloc(TypeHandle(g_pUtf8StringClass));
- orObject = (Utf8StringObject *)Alloc(ObjectSize, GC_ALLOC_NO_FLAGS);
+ GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS;
+ if (totalSize >= g_pConfig->GetGCLOHThreshold())
+ flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
- // Object is zero-init already
- _ASSERTE(orObject->HasEmptySyncBlockInfo());
+ Utf8StringObject* orString = (Utf8StringObject*)Alloc(totalSize, flags);
// Initialize Object
- orObject->SetMethodTable(g_pUtf8StringClass);
- orObject->SetLength(cchStringLength);
-
- bool bProfilerNotifyLargeAllocation = false;
-
- if (ObjectSize >= g_pConfig->GetGCLOHThreshold())
- {
- GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
- bProfilerNotifyLargeAllocation = TrackLargeAllocations();
- }
-
- // Notify the profiler of the allocation
- if (TrackAllocations() || bProfilerNotifyLargeAllocation)
- {
- OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);
- GCPROTECT_BEGIN(objref);
- ProfilerObjectAllocatedCallback(objref, (ClassID)orObject->GetTypeHandle().AsPtr());
- GCPROTECT_END();
-
- orObject = (Utf8StringObject *)OBJECTREFToObject(objref);
- }
+ orString->SetMethodTable(g_pUtf8StringClass);
+ orString->SetLength(cchStringLength);
-#ifdef FEATURE_EVENT_TRACE
- // Send ETW event for allocation
- if (ETW::TypeSystemLog::IsHeapAllocEventEnabled())
- {
- ETW::TypeSystemLog::SendObjectAllocatedEvent(orObject);
- }
-#endif // FEATURE_EVENT_TRACE
-
- LogAlloc(ObjectSize, g_pUtf8StringClass, orObject);
-
- return( ObjectToUTF8STRINGREF(orObject) );
+ PublishObjectAndNotify(orString, flags);
+ return ObjectToUTF8STRINGREF(orString);
}
#endif // FEATURE_UTF8STRING
@@ -1073,9 +994,16 @@ OBJECTREF AllocateObject(MethodTable *pMT
#endif // FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
#endif // FEATURE_COMINTEROP
{
- DWORD baseSize = pMT->GetBaseSize();
- GC_ALLOC_FLAGS flags = ((pMT->ContainsPointers() ? GC_ALLOC_CONTAINS_REF : GC_ALLOC_NO_FLAGS) |
- (pMT->HasFinalizer() ? GC_ALLOC_FINALIZE : GC_ALLOC_NO_FLAGS));
+ GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS;
+ if (pMT->ContainsPointers())
+ flags |= GC_ALLOC_CONTAINS_REF;
+
+ if (pMT->HasFinalizer())
+ flags |= GC_ALLOC_FINALIZE;
+
+ DWORD totalSize = pMT->GetBaseSize();
+ if (totalSize >= g_pConfig->GetGCLOHThreshold())
+ flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
#ifdef FEATURE_64BIT_ALIGNMENT
if (pMT->RequiresAlign8())
@@ -1089,50 +1017,22 @@ OBJECTREF AllocateObject(MethodTable *pMT
_ASSERTE(sizeof(Object) == 4);
flags |= GC_ALLOC_ALIGN8;
if (pMT->IsValueType())
- {
flags |= GC_ALLOC_ALIGN8_BIAS;
- }
}
#endif // FEATURE_64BIT_ALIGNMENT
- Object* orObject = (Object*)Alloc(baseSize, flags);
-
- // verify zero'd memory (at least for sync block)
- _ASSERTE( orObject->HasEmptySyncBlockInfo() );
+ Object* orObject = (Object*)Alloc(totalSize, flags);
- bool bProfilerNotifyLargeAllocation = false;
- if ((baseSize >= g_pConfig->GetGCLOHThreshold()))
+ if (flags & GC_ALLOC_USER_OLD_HEAP)
{
- orObject->SetMethodTableForLargeObject(pMT);
- bProfilerNotifyLargeAllocation = TrackLargeAllocations();
- GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
+ orObject->SetMethodTableForUOHObject(pMT);
}
else
{
orObject->SetMethodTable(pMT);
}
- // Notify the profiler of the allocation
- if (TrackAllocations() || bProfilerNotifyLargeAllocation)
- {
- OBJECTREF objref = ObjectToOBJECTREF((Object*)orObject);
- GCPROTECT_BEGIN(objref);
- ProfilerObjectAllocatedCallback(objref, (ClassID) orObject->GetTypeHandle().AsPtr());
- GCPROTECT_END();
-
- orObject = (Object *) OBJECTREFToObject(objref);
- }
-
-#ifdef FEATURE_EVENT_TRACE
- // Send ETW event for allocation
- if(ETW::TypeSystemLog::IsHeapAllocEventEnabled())
- {
- ETW::TypeSystemLog::SendObjectAllocatedEvent(orObject);
- }
-#endif // FEATURE_EVENT_TRACE
-
- LogAlloc(pMT->GetBaseSize(), pMT, orObject);
-
+ PublishObjectAndNotify(orObject, flags);
oref = OBJECTREF_TO_UNCHECKED_OBJECTREF(orObject);
}
@@ -1466,7 +1366,7 @@ void ErectWriteBarrierForMT(MethodTable **dst, MethodTable *ref)
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- BYTE *refObject = *(BYTE **)((MethodTable*)ref)->GetLoaderAllocatorObjectHandle();
+ BYTE *refObject = *(BYTE **)ref->GetLoaderAllocatorObjectHandle();
if((BYTE*) refObject >= g_ephemeral_low && (BYTE*) refObject < g_ephemeral_high)
{
// VolatileLoadWithoutBarrier() is used here to prevent fetch of g_card_table from being reordered
diff --git a/src/coreclr/src/vm/gchelpers.h b/src/coreclr/src/vm/gchelpers.h
index 5fe51ce6c77df3..9cf70b12d78307 100644
--- a/src/coreclr/src/vm/gchelpers.h
+++ b/src/coreclr/src/vm/gchelpers.h
@@ -21,12 +21,12 @@
//========================================================================
// Allocate single-dimensional array given array type
-OBJECTREF AllocateSzArray(MethodTable *pArrayMT, INT32 length, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS, BOOL bAllocateInLargeHeap = FALSE);
-OBJECTREF AllocateSzArray(TypeHandle arrayType, INT32 length, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS, BOOL bAllocateInLargeHeap = FALSE);
+OBJECTREF AllocateSzArray(MethodTable *pArrayMT, INT32 length, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS);
+OBJECTREF AllocateSzArray(TypeHandle arrayType, INT32 length, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS);
// The main Array allocation routine, can do multi-dimensional
-OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS, BOOL bAllocateInLargeHeap = FALSE);
-OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS, BOOL bAllocateInLargeHeap = FALSE);
+OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS);
+OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags = GC_ALLOC_NO_FLAGS);
// Create a SD array of primitive types given an element type
OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements);
diff --git a/src/coreclr/src/vm/gctoclreventsink.cpp b/src/coreclr/src/vm/gctoclreventsink.cpp
index 1389d417737c2e..a9bc4bd7654d17 100644
--- a/src/coreclr/src/vm/gctoclreventsink.cpp
+++ b/src/coreclr/src/vm/gctoclreventsink.cpp
@@ -25,6 +25,7 @@ void GCToCLREventSink::FireDynamicEvent(const char* eventName, void* payload, ui
void GCToCLREventSink::FireGCStart_V2(uint32_t count, uint32_t depth, uint32_t reason, uint32_t type)
{
+#ifdef FEATURE_EVENT_TRACE
LIMITED_METHOD_CONTRACT;
ETW::GCLog::ETW_GC_INFO gcStartInfo;
@@ -33,6 +34,7 @@ void GCToCLREventSink::FireGCStart_V2(uint32_t count, uint32_t depth, uint32_t r
gcStartInfo.GCStart.Reason = static_cast(reason);
gcStartInfo.GCStart.Type = static_cast(type);
ETW::GCLog::FireGcStart(&gcStartInfo);
+#endif
}
void GCToCLREventSink::FireGCGenerationRange(uint8_t generation, void* rangeStart, uint64_t rangeUsedLength, uint64_t rangeReservedLength)
diff --git a/src/coreclr/src/vm/hosting.cpp b/src/coreclr/src/vm/hosting.cpp
index 78db9389dd7380..95cf5ee6e46319 100644
--- a/src/coreclr/src/vm/hosting.cpp
+++ b/src/coreclr/src/vm/hosting.cpp
@@ -3,12 +3,9 @@
// See the LICENSE file in the project root for more information.
//
-//
-
#include "common.h"
-#include "hosting.h"
#include "mscoree.h"
#include "corhost.h"
#include "threads.h"
@@ -20,7 +17,7 @@
HANDLE g_ExecutableHeapHandle = NULL;
#undef VirtualAlloc
-LPVOID EEVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect) {
+LPVOID ClrVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect) {
CONTRACTL
{
NOTHROW;
@@ -36,7 +33,10 @@ LPVOID EEVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, D
#ifdef _DEBUG
if (g_fEEStarted) {
- _ASSERTE (!EEAllocationDisallowed());
+ // On Debug build we make sure that a thread is not going to do memory allocation
+ // after it suspends another thread, since the another thread may be suspended while
+ // having OS Heap lock.
+ _ASSERTE (Thread::Debug_AllowCallout());
}
_ASSERTE (lpAddress || (dwSize % g_SystemInfo.dwAllocationGranularity) == 0);
#endif
@@ -89,7 +89,7 @@ LPVOID EEVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, D
#define VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect) Dont_Use_VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect)
#undef VirtualFree
-BOOL EEVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) {
+BOOL ClrVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) {
CONTRACTL
{
NOTHROW;
@@ -102,7 +102,7 @@ BOOL EEVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) {
#define VirtualFree(lpAddress, dwSize, dwFreeType) Dont_Use_VirtualFree(lpAddress, dwSize, dwFreeType)
#undef VirtualQuery
-SIZE_T EEVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength)
+SIZE_T ClrVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength)
{
CONTRACTL
{
@@ -117,8 +117,14 @@ SIZE_T EEVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZ
}
#define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength)
+#if defined(_DEBUG) && !defined(TARGET_UNIX)
+static VolatilePtr s_pStartOfUEFSection = NULL;
+static VolatilePtr s_pEndOfUEFSectionBoundary = NULL;
+static Volatile s_dwProtection = 0;
+#endif // _DEBUG && !TARGET_UNIX
+
#undef VirtualProtect
-BOOL EEVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect)
+BOOL ClrVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect)
{
CONTRACTL
{
@@ -127,14 +133,120 @@ BOOL EEVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWOR
}
CONTRACTL_END;
+ // Get the UEF installation details - we will use these to validate
+ // that the calls to ClrVirtualProtect are not going to affect the UEF.
+ //
+ // The OS UEF invocation mechanism was updated. When a UEF is setup,the OS captures
+ // the following details about it:
+ // 1) Protection of the pages in which the UEF lives
+ // 2) The size of the region in which the UEF lives
+ // 3) The region's Allocation Base
+ //
+ // The OS verifies details surrounding the UEF before invocation. For security reasons
+ // the page protection cannot change between SetUnhandledExceptionFilter and invocation.
+ //
+ // Prior to this change, the UEF lived in a common section of code_Seg, along with
+ // JIT_PatchedCode. Thus, their pages have the same protection, they live
+ // in the same region (and thus, its size is the same).
+ //
+ // In EEStartupHelper, when we setup the UEF and then invoke InitJitHelpers1 and InitJitHelpers2,
+ // they perform some optimizations that result in the memory page protection being changed. When
+ // the UEF is to be invoked, the OS does the check on the UEF's cached details against the current
+ // memory pages. This check used to fail when on 64bit retail builds when JIT_PatchedCode was
+ // aligned after the UEF with a different memory page protection (post the optimizations by InitJitHelpers).
+ // Thus, the UEF was never invoked.
+ //
+ // To circumvent this, we put the UEF in its own section in the code segment so that any modifications
+ // to memory pages will not affect the UEF details that the OS cached. This is done in Excep.cpp
+ // using the "#pragma code_seg" directives.
+ //
+ // Below, we double check that:
+ //
+ // 1) the address being protected does not lie in the region of of the UEF.
+ // 2) the section after UEF is not having the same memory protection as UEF section.
+ //
+ // We assert if either of the two conditions above are true.
+
+#if defined(_DEBUG) && !defined(TARGET_UNIX)
+ // We do this check in debug/checked builds only
+
+ // Do we have the UEF details?
+ if (s_pEndOfUEFSectionBoundary.Load() == NULL)
{
- return ::VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect);
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ // Get reference to MSCORWKS image in memory...
+ PEDecoder pe(g_hThisInst);
+
+ // Find the UEF section from the image
+ IMAGE_SECTION_HEADER* pUEFSection = pe.FindSection(CLR_UEF_SECTION_NAME);
+ _ASSERTE(pUEFSection != NULL);
+ if (pUEFSection)
+ {
+ // We got our section - get the start of the section
+ BYTE* pStartOfUEFSection = static_cast(pe.GetBase()) + pUEFSection->VirtualAddress;
+ s_pStartOfUEFSection = pStartOfUEFSection;
+
+ // Now we need the protection attributes for the memory region in which the
+ // UEF section is...
+ MEMORY_BASIC_INFORMATION uefInfo;
+ if (ClrVirtualQuery(pStartOfUEFSection, &uefInfo, sizeof(uefInfo)) != 0)
+ {
+ // Calculate how many pages does the UEF section take to get to the start of the
+ // next section. We dont calculate this as
+ //
+ // pStartOfUEFSection + uefInfo.RegionSize
+ //
+ // because the section following UEF will also be included in the region size
+ // if it has the same protection as the UEF section.
+ DWORD dwUEFSectionPageCount = ((pUEFSection->Misc.VirtualSize + GetOsPageSize() - 1) / GetOsPageSize());
+
+ BYTE* pAddressOfFollowingSection = pStartOfUEFSection + (GetOsPageSize() * dwUEFSectionPageCount);
+
+ // Ensure that the section following us is having different memory protection
+ MEMORY_BASIC_INFORMATION nextSectionInfo;
+ _ASSERTE(ClrVirtualQuery(pAddressOfFollowingSection, &nextSectionInfo, sizeof(nextSectionInfo)) != 0);
+ _ASSERTE(nextSectionInfo.Protect != uefInfo.Protect);
+
+ // save the memory protection details
+ s_dwProtection = uefInfo.Protect;
+
+ // Get the end of the UEF section
+ BYTE* pEndOfUEFSectionBoundary = pAddressOfFollowingSection - 1;
+
+ // Set the end of UEF section boundary
+ FastInterlockExchangePointer(s_pEndOfUEFSectionBoundary.GetPointer(), pEndOfUEFSectionBoundary);
+ }
+ else
+ {
+ _ASSERTE(!"Unable to get UEF Details!");
+ }
+ }
}
+
+ if (s_pEndOfUEFSectionBoundary.Load() != NULL)
+ {
+ // Is the protection being changed?
+ if (flNewProtect != s_dwProtection)
+ {
+ // Is the target address NOT affecting the UEF ? Possible cases:
+ // 1) Starts and ends before the UEF start
+ // 2) Starts after the UEF start
+
+ void* pEndOfRangeAddr = static_cast(lpAddress) + dwSize - 1;
+
+ _ASSERTE_MSG(((pEndOfRangeAddr < s_pStartOfUEFSection.Load()) || (lpAddress > s_pEndOfUEFSectionBoundary.Load())),
+ "Do not virtual protect the section in which UEF lives!");
+ }
+ }
+#endif // _DEBUG && !TARGET_UNIX
+
+ return ::VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect);
}
#define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect)
#undef GetProcessHeap
-HANDLE EEGetProcessHeap()
+HANDLE ClrGetProcessHeap()
{
// Note: this can be called a little early for real contracts, so we use static contracts instead.
STATIC_CONTRACT_NOTHROW;
@@ -145,7 +257,7 @@ HANDLE EEGetProcessHeap()
#define GetProcessHeap() Dont_Use_GetProcessHeap()
#undef HeapCreate
-HANDLE EEHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize)
+HANDLE ClrHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize)
{
CONTRACTL
{
@@ -166,7 +278,7 @@ HANDLE EEHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize)
#define HeapCreate(flOptions, dwInitialSize, dwMaximumSize) Dont_Use_HeapCreate(flOptions, dwInitialSize, dwMaximumSize)
#undef HeapDestroy
-BOOL EEHeapDestroy(HANDLE hHeap)
+BOOL ClrHeapDestroy(HANDLE hHeap)
{
CONTRACTL
{
@@ -196,7 +308,7 @@ BOOL EEHeapDestroy(HANDLE hHeap)
#undef HeapAlloc
-LPVOID EEHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes)
+LPVOID ClrHeapAlloc(HANDLE hHeap, DWORD dwFlags, S_SIZE_T dwBytes)
{
STATIC_CONTRACT_NOTHROW;
@@ -205,30 +317,30 @@ LPVOID EEHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes)
return NULL;
#endif
+ if (dwBytes.IsOverflow()) return NULL;
+
{
LPVOID p = NULL;
#ifdef _DEBUG
// Store the heap handle to detect heap contamination
- p = ::HeapAlloc (hHeap, dwFlags, dwBytes + OS_HEAP_ALIGN);
+ p = ::HeapAlloc (hHeap, dwFlags, dwBytes.Value() + OS_HEAP_ALIGN);
if(p)
{
*((HANDLE*)p) = hHeap;
p = (BYTE*)p + OS_HEAP_ALIGN;
}
#else
- p = ::HeapAlloc (hHeap, dwFlags, dwBytes);
+ p = ::HeapAlloc (hHeap, dwFlags, dwBytes.Value());
#endif
if(p == NULL
- //under OOM, we might not be able to get Execution Engine and can't access stress log
- && GetExecutionEngine ()
// If we have not created StressLog ring buffer, we should not try to use it.
// StressLog is going to do a memory allocation. We may enter an endless loop.
&& StressLog::t_pCurrentThreadLog != NULL )
{
- STRESS_LOG_OOM_STACK(dwBytes);
+ STRESS_LOG_OOM_STACK(dwBytes.Value());
}
return p;
@@ -236,20 +348,20 @@ LPVOID EEHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes)
}
#define HeapAlloc(hHeap, dwFlags, dwBytes) Dont_Use_HeapAlloc(hHeap, dwFlags, dwBytes)
-LPVOID EEHeapAllocInProcessHeap(DWORD dwFlags, SIZE_T dwBytes)
+LPVOID ClrHeapAllocInProcessHeap(DWORD dwFlags, SIZE_T dwBytes)
{
WRAPPER_NO_CONTRACT;
static HANDLE ProcessHeap = NULL;
if (ProcessHeap == NULL)
- ProcessHeap = EEGetProcessHeap();
+ ProcessHeap = ClrGetProcessHeap();
- return EEHeapAlloc(ProcessHeap,dwFlags,dwBytes);
+ return ClrHeapAlloc(ProcessHeap,dwFlags,S_SIZE_T(dwBytes));
}
#undef HeapFree
-BOOL EEHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem)
+BOOL ClrHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
@@ -281,7 +393,7 @@ BOOL EEHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem)
}
#define HeapFree(hHeap, dwFlags, lpMem) Dont_Use_HeapFree(hHeap, dwFlags, lpMem)
-BOOL EEHeapFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem)
+BOOL ClrHeapFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem)
{
CONTRACTL
{
@@ -294,29 +406,12 @@ BOOL EEHeapFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem)
static HANDLE ProcessHeap = NULL;
if (ProcessHeap == NULL)
- ProcessHeap = EEGetProcessHeap();
+ ProcessHeap = ClrGetProcessHeap();
- return EEHeapFree(ProcessHeap,dwFlags,lpMem);
+ return ClrHeapFree(ProcessHeap,dwFlags,lpMem);
}
-
-#undef HeapValidate
-BOOL EEHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem) {
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
-
-#ifndef TARGET_UNIX
-
- {
- return ::HeapValidate(hHeap, dwFlags, lpMem);
- }
-#else // !TARGET_UNIX
- return TRUE;
-#endif // !TARGET_UNIX
-}
-#define HeapValidate(hHeap, dwFlags, lpMem) Dont_Use_HeapValidate(hHeap, dwFlags, lpMem)
-
-HANDLE EEGetProcessExecutableHeap() {
+HANDLE ClrGetProcessExecutableHeap() {
// Note: this can be called a little early for real contracts, so we use static contracts instead.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
@@ -363,7 +458,7 @@ HANDLE EEGetProcessExecutableHeap() {
#undef SleepEx
#undef Sleep
-DWORD EESleepEx(DWORD dwMilliseconds, BOOL bAlertable)
+DWORD ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable)
{
CONTRACTL
{
@@ -490,7 +585,7 @@ static inline Crst *CookieToCrst(CRITSEC_COOKIE cookie) {
return (Crst *) cookie;
}
-CRITSEC_COOKIE EECreateCriticalSection(CrstType crstType, CrstFlags flags) {
+CRITSEC_COOKIE ClrCreateCriticalSection(CrstType crstType, CrstFlags flags) {
CONTRACTL
{
NOTHROW;
@@ -518,7 +613,7 @@ CRITSEC_COOKIE EECreateCriticalSection(CrstType crstType, CrstFlags flags) {
return ret;
}
-void EEDeleteCriticalSection(CRITSEC_COOKIE cookie)
+void ClrDeleteCriticalSection(CRITSEC_COOKIE cookie)
{
CONTRACTL
{
@@ -533,7 +628,7 @@ void EEDeleteCriticalSection(CRITSEC_COOKIE cookie)
delete pCrst;
}
-DEBUG_NOINLINE void EEEnterCriticalSection(CRITSEC_COOKIE cookie) {
+DEBUG_NOINLINE void ClrEnterCriticalSection(CRITSEC_COOKIE cookie) {
// Entering a critical section has many different contracts
// depending on the flags used to initialize the critical section.
@@ -555,7 +650,7 @@ DEBUG_NOINLINE void EEEnterCriticalSection(CRITSEC_COOKIE cookie) {
pCrst->Enter();
}
-DEBUG_NOINLINE void EELeaveCriticalSection(CRITSEC_COOKIE cookie)
+DEBUG_NOINLINE void ClrLeaveCriticalSection(CRITSEC_COOKIE cookie)
{
CONTRACTL
{
@@ -571,18 +666,3 @@ DEBUG_NOINLINE void EELeaveCriticalSection(CRITSEC_COOKIE cookie)
pCrst->Leave();
}
-
-BOOL EEAllocationDisallowed()
-{
- WRAPPER_NO_CONTRACT;
-
-#ifdef _DEBUG
- // On Debug build we make sure that a thread is not going to do memory allocation
- // after it suspends another thread, since the another thread may be suspended while
- // having OS Heap lock.
- return !Thread::Debug_AllowCallout();
-#else
- return FALSE;
-#endif
-}
-
diff --git a/src/coreclr/src/vm/hosting.h b/src/coreclr/src/vm/hosting.h
deleted file mode 100644
index b4e6fd8ebc3864..00000000000000
--- a/src/coreclr/src/vm/hosting.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-//
-
-//
-
-
-#ifndef __HOSTING_H__
-#define __HOSTING_H__
-
-#include "clrhost.h"
-
-#define ClrVirtualAlloc EEVirtualAlloc
-#define ClrVirtualFree EEVirtualFree
-#define ClrVirtualQuery EEVirtualQuery
-#define ClrVirtualProtect EEVirtualProtect
-#define ClrHeapCreate EEHeapCreate
-#define ClrHeapDestroy EEHeapDestroy
-#define ClrHeapAlloc EEHeapAlloc
-#define ClrHeapFree EEHeapFree
-#define ClrHeapValidate EEHeapValidate
-#define ClrCreateCriticalSection EECreateCriticalSection
-#define ClrDestroyCriticalSection EEDestroyCriticalSection
-#define ClrEnterCriticalSection EEEnterCriticalSection
-#define ClrLeaveCriticalSection EELeaveCriticalSection
-#define ClrSleepEx EESleepEx
-#define ClrTlsSetValue EETlsSetValue
-#define ClrTlsGetValue EETlsGetValue
-
-#define ClrAllocationDisallowed EEAllocationDisallowed
-
-// memory management function
-LPVOID EEVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect);
-BOOL EEVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType);
-SIZE_T EEVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength);
-BOOL EEVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect);
-HANDLE EEGetProcessHeap();
-HANDLE EEHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize);
-BOOL EEHeapDestroy(HANDLE hHeap);
-LPVOID EEHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes);
-BOOL EEHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem);
-BOOL EEHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem);
-
-BOOL EEAllocationDisallowed();
-HANDLE EEGetProcessExecutableHeap();
-
-// critical section functions
-CRITSEC_COOKIE EECreateCriticalSection(CrstType crstType, CrstFlags flags);
-void EEDeleteCriticalSection(CRITSEC_COOKIE cookie);
-void EEEnterCriticalSection(CRITSEC_COOKIE cookie);
-void EELeaveCriticalSection(CRITSEC_COOKIE cookie);
-
-DWORD EESleepEx(DWORD dwMilliseconds, BOOL bAlertable);
-
-#endif
-
diff --git a/src/coreclr/src/vm/i386/jitinterfacex86.cpp b/src/coreclr/src/vm/i386/jitinterfacex86.cpp
index 566668770ea207..edc3fa1270ab3e 100644
--- a/src/coreclr/src/vm/i386/jitinterfacex86.cpp
+++ b/src/coreclr/src/vm/i386/jitinterfacex86.cpp
@@ -717,7 +717,7 @@ void *JIT_TrialAlloc::GenAllocString(Flags flags)
// we need to load the method table for string from the global
- // mov ecx, [g_pStringMethodTable]
+ // mov ecx, [g_pStringClass]
sl.Emit16(0x0d8b);
sl.Emit32((int)(size_t)&g_pStringClass);
diff --git a/src/coreclr/src/vm/ilmarshalers.cpp b/src/coreclr/src/vm/ilmarshalers.cpp
index a9257e048bf705..ec846aaadc624b 100644
--- a/src/coreclr/src/vm/ilmarshalers.cpp
+++ b/src/coreclr/src/vm/ilmarshalers.cpp
@@ -4515,7 +4515,7 @@ void MngdNativeArrayMarshaler::DoClearNativeContents(MngdNativeArrayMarshaler* p
if (pMarshaler != NULL && pMarshaler->ClearOleArray != NULL)
{
- pMarshaler->ClearOleArray((BASEARRAYREF*)pManagedHome, *pNativeHome, cElements, pThis->m_pElementMT, pThis->m_pManagedMarshaler);
+ pMarshaler->ClearOleArray(*pNativeHome, cElements, pThis->m_pElementMT, pThis->m_pManagedMarshaler);
}
}
}
@@ -4733,15 +4733,13 @@ FCIMPL3(void, MngdFixedArrayMarshaler::ClearNativeContents, MngdFixedArrayMarsha
{
FCALL_CONTRACT;
- BASEARRAYREF arrayRef = (BASEARRAYREF)*pManagedHome;
-
- HELPER_METHOD_FRAME_BEGIN_1(arrayRef);
+ HELPER_METHOD_FRAME_BEGIN_0();
const OleVariant::Marshaler* pMarshaler = OleVariant::GetMarshalerForVarType(pThis->m_vt, FALSE);
if (pMarshaler != NULL && pMarshaler->ClearOleArray != NULL)
{
- pMarshaler->ClearOleArray(&arrayRef, pNativeHome, pThis->m_cElements, pThis->m_pElementMT, pThis->m_pManagedElementMarshaler);
+ pMarshaler->ClearOleArray(pNativeHome, pThis->m_cElements, pThis->m_pElementMT, pThis->m_pManagedElementMarshaler);
}
HELPER_METHOD_FRAME_END();
diff --git a/src/coreclr/src/vm/jithelpers.cpp b/src/coreclr/src/vm/jithelpers.cpp
index 6fcb1c82d6d671..8d64b2629fffa9 100644
--- a/src/coreclr/src/vm/jithelpers.cpp
+++ b/src/coreclr/src/vm/jithelpers.cpp
@@ -54,6 +54,7 @@
#include "runtimehandles.h"
#include "castcache.h"
+#include "onstackreplacement.h"
//========================================================================
//
@@ -2608,7 +2609,7 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, IN
_ASSERTE(allocPtr != nullptr);
ArrayBase *array = reinterpret_cast(allocPtr);
- array->SetArrayMethodTable(pArrayMT);
+ array->SetMethodTable(pArrayMT);
_ASSERTE(static_cast(componentCount) == componentCount);
array->m_NumComponents = static_cast(componentCount);
@@ -2667,7 +2668,7 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, I
_ASSERTE(allocPtr != nullptr);
ArrayBase *array = reinterpret_cast(allocPtr);
- array->SetArrayMethodTable(pArrayMT);
+ array->SetMethodTable(pArrayMT);
_ASSERTE(static_cast(componentCount) == componentCount);
array->m_NumComponents = static_cast(componentCount);
@@ -5004,6 +5005,331 @@ HCIMPL0(void, JIT_DebugLogLoopCloning)
}
HCIMPLEND
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+
+// Helper method to jit the OSR version of a method.
+//
+// Returns the address of the jitted code.
+// Returns NULL if osr method can't be created.
+static PCODE JitPatchpointWorker(MethodDesc* pMD, EECodeInfo& codeInfo, int ilOffset)
+{
+ PCODE osrVariant = NULL;
+
+ GCX_PREEMP();
+
+ // Fetch the patchpoint info for the current method
+ EEJitManager* jitMgr = ExecutionManager::GetEEJitManager();
+ CodeHeader* codeHdr = jitMgr->GetCodeHeaderFromStartAddress(codeInfo.GetStartAddress());
+ PTR_BYTE debugInfo = codeHdr->GetDebugInfo();
+ PatchpointInfo* patchpointInfo = CompressDebugInfo::RestorePatchpointInfo(debugInfo);
+
+ if (patchpointInfo == NULL)
+ {
+ // Unexpected, but not fatal
+ STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "JitPatchpointWorker: failed to restore patchpoint info for Method=0x%pM\n", pMD);
+ return NULL;
+ }
+
+ // Set up a new native code version for the OSR variant of this method.
+ NativeCodeVersion osrNativeCodeVersion;
+ {
+ CodeVersionManager::LockHolder codeVersioningLockHolder;
+
+ NativeCodeVersion currentNativeCodeVersion = codeInfo.GetNativeCodeVersion();
+ ILCodeVersion ilCodeVersion = currentNativeCodeVersion.GetILCodeVersion();
+ HRESULT hr = ilCodeVersion.AddNativeCodeVersion(pMD, NativeCodeVersion::OptimizationTier1OSR, &osrNativeCodeVersion, patchpointInfo, ilOffset);
+ if (FAILED(hr))
+ {
+ // Unexpected, but not fatal
+ STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "JitPatchpointWorker: failed to add native code version for Method=0x%pM\n", pMD);
+ return NULL;
+ }
+ }
+
+ // Invoke the jit to compile the OSR version
+ LOG((LF_TIEREDCOMPILATION, LL_INFO10, "JitPatchpointWorker: creating OSR version of Method=0x%pM (%s::%s) at offset %d\n",
+ pMD, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, ilOffset));
+
+ PrepareCodeConfigBuffer configBuffer(osrNativeCodeVersion);
+ PrepareCodeConfig *config = configBuffer.GetConfig();
+ osrVariant = pMD->PrepareCode(config);
+
+ return osrVariant;
+}
+
+// Helper method wrapper to set up a frame so we can invoke methods that might GC
+HCIMPL3(PCODE, JIT_Patchpoint_Framed, MethodDesc* pMD, EECodeInfo& codeInfo, int ilOffset)
+{
+ PCODE result = NULL;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+ result = JitPatchpointWorker(pMD, codeInfo, ilOffset);
+
+ HELPER_METHOD_FRAME_END();
+
+ return result;
+}
+HCIMPLEND
+
+// Jit helper invoked at a patchpoint.
+//
+// Checks to see if this is a known patchpoint, if not,
+// an entry is added to the patchpoint table.
+//
+// When the patchpoint has been hit often enough to trigger
+// a transition, create an OSR method.
+//
+// Currently, counter is a pointer into the Tier0 method stack
+// frame so we have exclusive access.
+
+void JIT_Patchpoint(int* counter, int ilOffset)
+{
+ // This method may not return normally
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ // Patchpoint identity is the helper return address
+ PCODE ip = (PCODE)_ReturnAddress();
+
+ // Fetch or setup patchpoint info for this patchpoint.
+ EECodeInfo codeInfo(ip);
+ MethodDesc* pMD = codeInfo.GetMethodDesc();
+ LoaderAllocator* allocator = pMD->GetLoaderAllocator();
+ OnStackReplacementManager* manager = allocator->GetOnStackReplacementManager();
+ PerPatchpointInfo * ppInfo = manager->GetPerPatchpointInfo(ip);
+
+ // In the current prototype, counter is shared by all patchpoints
+ // in a method, so no matter what happens below, we don't want to
+ // impair those other patchpoints.
+ //
+ // One might be tempted, for instance, to set the counter for
+ // invalid or ignored patchpoints to some high value to reduce
+ // the amount of back and forth with the runtime, but this would
+ // lock out other patchpoints in the method.
+ //
+ // So we always reset the counter to the bump value.
+ //
+ // In the prototype, counter is a location in a stack frame,
+ // so we can update it without worrying about other threads.
+ const int counterBump = g_pConfig->OSR_CounterBump();
+ *counter = counterBump;
+
+#if _DEBUG
+ const int ppId = ppInfo->m_patchpointId;
+#endif
+
+ // Is this a patchpoint that was previously marked as invalid? If so, just return to the Tier0 method.
+ if ((ppInfo->m_flags & PerPatchpointInfo::patchpoint_invalid) == PerPatchpointInfo::patchpoint_invalid)
+ {
+ LOG((LF_TIEREDCOMPILATION, LL_INFO1000, "Jit_Patchpoint: invalid patchpoint [%d] (0x%p) in Method=0x%pM (%s::%s) at offset %d\n",
+ ppId, ip, pMD, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, ilOffset));
+ return;
+ }
+
+ // See if we have an OSR method for this patchpoint.
+ PCODE osrMethodCode = ppInfo->m_osrMethodCode;
+ bool isNewMethod = false;
+
+ if (osrMethodCode == NULL)
+ {
+ // No OSR method yet, let's see if we should create one.
+ //
+ // First, optionally ignore some patchpoints to increase
+ // coverage (stress mode).
+ //
+ // Because there are multiple patchpoints in a method, and
+ // each OSR method covers the remainder of the method from
+ // that point until the method returns, if we trigger on an
+ // early patchpoint in a method, we may never see triggers on
+ // a later one.
+
+#ifdef _DEBUG
+ const int lowId = g_pConfig->OSR_LowId();
+ const int highId = g_pConfig->OSR_HighId();
+
+ if ((ppId < lowId) || (ppId > highId))
+ {
+ LOG((LF_TIEREDCOMPILATION, LL_INFO10, "Jit_Patchpoint: ignoring patchpoint [%d] (0x%p) in Method=0x%pM (%s::%s) at offset %d\n",
+ ppId, ip, pMD, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, ilOffset));
+ return;
+ }
+#endif
+
+ // Second, only request the OSR method if this patchpoint has
+ // been hit often enough.
+ //
+ // Note the initial invocation of the helper depends on the
+ // initial counter value baked into jitted code (call this J);
+ // subsequent invocations depend on the counter bump (call
+ // this B).
+ //
+ // J and B may differ, so the total number of loop iterations
+ // before an OSR method is created is:
+ //
+ // J, if hitLimit <= 1;
+ // J + (hitLimit-1)* B, if hitLimit > 1;
+ //
+ // Current thinking is:
+ //
+ // J should be in the range of tens to hundreds, so that newly
+ // called Tier0 methods that already have OSR methods
+ // available can transition to OSR methods quickly, but
+ // methods called only a few times do not invoke this
+ // helper and so create PerPatchpoint runtime state.
+ //
+ // B should be in the range of hundreds to thousands, so that
+ // we're not too eager to create OSR methods (since there is
+ // some jit cost), but are eager enough to transition before
+ // we run too much Tier0 code.
+ //
+ const int hitLimit = g_pConfig->OSR_HitLimit();
+ const int hitCount = InterlockedIncrement(&ppInfo->m_patchpointCount);
+ const int hitLogLevel = (hitCount == 1) ? LL_INFO10 : LL_INFO1000;
+
+ LOG((LF_TIEREDCOMPILATION, hitLogLevel, "Jit_Patchpoint: patchpoint [%d] (0x%p) hit %d in Method=0x%pM (%s::%s) [il offset %d] (limit %d)\n",
+ ppId, ip, hitCount, pMD, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, ilOffset, hitLimit));
+
+ // Defer, if we haven't yet reached the limit
+ if (hitCount < hitLimit)
+ {
+ return;
+ }
+
+ // Third, make sure no other thread is trying to create the OSR method.
+ LONG oldFlags = ppInfo->m_flags;
+ if ((oldFlags & PerPatchpointInfo::patchpoint_triggered) == PerPatchpointInfo::patchpoint_triggered)
+ {
+ LOG((LF_TIEREDCOMPILATION, LL_INFO1000, "Jit_Patchpoint: AWAITING OSR method for patchpoint [%d] (0x%p)\n", ppId, ip));
+ return;
+ }
+
+ LONG newFlags = ppInfo->m_flags | PerPatchpointInfo::patchpoint_triggered;
+ BOOL triggerTransition = InterlockedCompareExchange(&ppInfo->m_flags, newFlags, oldFlags) == oldFlags;
+
+ if (!triggerTransition)
+ {
+ LOG((LF_TIEREDCOMPILATION, LL_INFO1000, "Jit_Patchpoint: (lost race) AWAITING OSR method for patchpoint [%d] (0x%p)\n", ppId, ip));
+ return;
+ }
+
+ // Time to create the OSR method.
+ //
+ // We currently do this synchronously. We could instead queue
+ // up a request on some worker thread, like we do for
+ // rejitting, and return control to the Tier0 method. It may
+ // eventually return here, if the patchpoint is hit often
+ // enough.
+ //
+ // There is a chance the async version will create methods
+ // that are never used (just like there is a chance that Tier1
+ // methods are ever called).
+ //
+ // In this prototype we want to expose bugs in the jitted code
+ // for OSR methods, so we stick with synchronous creation.
+ LOG((LF_TIEREDCOMPILATION, LL_INFO10, "Jit_Patchpoint: patchpoint [%d] (0x%p) TRIGGER at count %d\n", ppId, ip, hitCount));
+
+ // Invoke the helper to build the OSR method
+ osrMethodCode = HCCALL3(JIT_Patchpoint_Framed, pMD, codeInfo, ilOffset);
+
+ // If that failed, mark the patchpoint as invalid.
+ if (osrMethodCode == NULL)
+ {
+ // Unexpected, but not fatal
+ STRESS_LOG4(LF_TIEREDCOMPILATION, LL_WARNING, "Jit_Patchpoint: patchpoint (0x%p) OSR method creation failed,"
+ " marking patchpoint invalid for Method=0x%pM il offset %d\n", ip, hitCount, pMD, ilOffset);
+
+ InterlockedOr(&ppInfo->m_flags, (LONG)PerPatchpointInfo::patchpoint_invalid);
+ return;
+ }
+
+ // We've successfully created the osr method; make it available.
+ _ASSERTE(ppInfo->m_osrMethodCode == NULL);
+ ppInfo->m_osrMethodCode = osrMethodCode;
+ isNewMethod = true;
+ }
+
+ // If we get here, we have code to transition to...
+ _ASSERTE(osrMethodCode != NULL);
+
+ Thread *pThread = GetThread();
+
+#ifdef FEATURE_HIJACK
+ // We can't crawl the stack of a thread that currently has a hijack pending
+ // (since the hijack routine won't be recognized by any code manager). So we
+ // Undo any hijack, the EE will re-attempt it later.
+ pThread->UnhijackThread();
+#endif
+
+ // Find context for the original method
+ CONTEXT frameContext;
+ frameContext.ContextFlags = CONTEXT_FULL;
+ RtlCaptureContext(&frameContext);
+
+ // Walk back to the original method frame
+ pThread->VirtualUnwindToFirstManagedCallFrame(&frameContext);
+
+ // Remember original method FP and SP because new method will inherit them.
+ UINT_PTR currentSP = GetSP(&frameContext);
+ UINT_PTR currentFP = GetFP(&frameContext);
+
+ // We expect to be back at the right IP
+ if ((UINT_PTR)ip != GetIP(&frameContext))
+ {
+ // Should be fatal
+ STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "Jit_Patchpoint: patchpoint (0x%p) TRANSITION"
+ " unexpected context IP 0x%p\n", ip, GetIP(&frameContext));
+ }
+
+ // Now unwind back to the original method caller frame.
+ EECodeInfo callerCodeInfo(GetIP(&frameContext));
+ frameContext.ContextFlags = CONTEXT_FULL;
+ ULONG_PTR establisherFrame = 0;
+ PVOID handlerData = NULL;
+ RtlVirtualUnwind(UNW_FLAG_NHANDLER, callerCodeInfo.GetModuleBase(), GetIP(&frameContext), callerCodeInfo.GetFunctionEntry(),
+ &frameContext, &handlerData, &establisherFrame, NULL);
+
+ // Now, set FP and SP back to the values they had just before this helper was called,
+ // since the new method must have access to the original method frame.
+ //
+ // TODO: if we access the patchpointInfo here, we can read out the FP-SP delta from there and
+ // use that to adjust the stack, likely saving some stack space.
+
+#if defined(TARGET_AMD64)
+ // If calls push the return address, we need to simulate that here, so the OSR
+ // method sees the "expected" SP misalgnment on entry.
+ _ASSERTE(currentSP % 16 == 0);
+ currentSP -= 8;
+#endif
+
+ SetSP(&frameContext, currentSP);
+ frameContext.Rbp = currentFP;
+
+ // Note we can get here w/o triggering, if there is an existing OSR method and
+ // we hit the patchpoint.
+ const int transitionLogLevel = isNewMethod ? LL_INFO10 : LL_INFO1000;
+ LOG((LF_TIEREDCOMPILATION, transitionLogLevel, "Jit_Patchpoint: patchpoint [%d] (0x%p) TRANSITION to ip 0x%p\n", ppId, ip, osrMethodCode));
+
+ // Install new entry point as IP
+ SetIP(&frameContext, osrMethodCode);
+
+ // Transition!
+ RtlRestoreContext(&frameContext, NULL);
+}
+
+#else
+
+void JIT_Patchpoint(int* counter, int ilOffset)
+{
+ // Stub version if OSR feature is disabled
+ //
+ // Should not be called.
+
+ UNREACHABLE();
+}
+
+#endif // FEATURE_ON_STACK_REPLACEMENT
+
//========================================================================
//
// INTEROP HELPERS
@@ -5177,7 +5503,7 @@ void InitJITHelpers2()
g_pJitGenericHandleCacheCrst.Init(CrstJitGenericHandleCache, CRST_UNSAFE_COOPGC);
- // Allocate and initialize the table
+ // Allocate and initialize the generic handle cache
NewHolder tempGenericHandleCache (new JitGenericHandleCache());
LockOwner sLock = {&g_pJitGenericHandleCacheCrst, IsOwnerOfCrst};
if (!tempGenericHandleCache->Init(59, &sLock))
@@ -5558,10 +5884,10 @@ void InitJitHelperLogging()
{
#ifdef TARGET_X86
- IMAGE_DOS_HEADER *pDOS = (IMAGE_DOS_HEADER *)g_pMSCorEE;
+ IMAGE_DOS_HEADER *pDOS = (IMAGE_DOS_HEADER *)g_hThisInst;
_ASSERTE(pDOS->e_magic == VAL16(IMAGE_DOS_SIGNATURE) && pDOS->e_lfanew != 0);
- IMAGE_NT_HEADERS *pNT = (IMAGE_NT_HEADERS*)((LPBYTE)g_pMSCorEE + VAL32(pDOS->e_lfanew));
+ IMAGE_NT_HEADERS *pNT = (IMAGE_NT_HEADERS*)((LPBYTE)g_hThisInst + VAL32(pDOS->e_lfanew));
#ifdef HOST_64BIT
_ASSERTE(pNT->Signature == VAL32(IMAGE_NT_SIGNATURE)
&& pNT->FileHeader.SizeOfOptionalHeader == VAL16(sizeof(IMAGE_OPTIONAL_HEADER64))
@@ -5665,7 +5991,7 @@ void InitJitHelperLogging()
#else // TARGET_X86
// Is the address in mscoree.dll at all? (All helpers are in
// mscoree.dll)
- if (dynamicHlpFunc->pfnHelper >= (LPBYTE*)g_pMSCorEE && dynamicHlpFunc->pfnHelper < (LPBYTE*)g_pMSCorEE + VAL32(pNT->OptionalHeader.SizeOfImage))
+ if (dynamicHlpFunc->pfnHelper >= (LPBYTE*)g_hThisInst && dynamicHlpFunc->pfnHelper < (LPBYTE*)g_hThisInst + VAL32(pNT->OptionalHeader.SizeOfImage))
{
// See note above. How do I get the size on x86 for a static method?
hlpFuncCount->helperSize = 0;
diff --git a/src/coreclr/src/vm/jitinterface.cpp b/src/coreclr/src/vm/jitinterface.cpp
index 458085486fbf72..60c4b413fab3de 100644
--- a/src/coreclr/src/vm/jitinterface.cpp
+++ b/src/coreclr/src/vm/jitinterface.cpp
@@ -7746,6 +7746,7 @@ getMethodInfoHelper(
&methInfo->locals,
ftn,
true);
+
} // getMethodInfoHelper
//---------------------------------------------------------------------------------------
@@ -10967,6 +10968,50 @@ void CEEJitInfo::setVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo
EE_TO_JIT_TRANSITION();
}
+void CEEJitInfo::setPatchpointInfo(PatchpointInfo* patchpointInfo)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ JIT_TO_EE_TRANSITION();
+
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ // We receive ownership of the array
+ _ASSERTE(m_pPatchpointInfoFromJit == NULL);
+ m_pPatchpointInfoFromJit = patchpointInfo;
+#else
+ UNREACHABLE();
+#endif
+
+ EE_TO_JIT_TRANSITION();
+}
+
+PatchpointInfo* CEEJitInfo::getOSRInfo(unsigned* ilOffset)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ PatchpointInfo* result = NULL;
+ *ilOffset = 0;
+
+ JIT_TO_EE_TRANSITION();
+
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ result = m_pPatchpointInfoFromRuntime;
+ *ilOffset = m_ilOffset;
+#endif
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
void CEEJitInfo::CompressDebugInfo()
{
CONTRACTL {
@@ -10975,11 +11020,20 @@ void CEEJitInfo::CompressDebugInfo()
MODE_PREEMPTIVE;
} CONTRACTL_END;
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ PatchpointInfo* patchpointInfo = m_pPatchpointInfoFromJit;
+#else
+ PatchpointInfo* patchpointInfo = NULL;
+#endif
+
// Don't track JIT info for DynamicMethods.
if (m_pMethodBeingCompiled->IsDynamicMethod() && !g_pConfig->GetTrackDynamicMethodDebugInfo())
+ {
+ _ASSERTE(patchpointInfo == NULL);
return;
+ }
- if (m_iOffsetMapping == 0 && m_iNativeVarInfo == 0)
+ if ((m_iOffsetMapping == 0) && (m_iNativeVarInfo == 0) && (patchpointInfo == NULL))
return;
JIT_TO_EE_TRANSITION();
@@ -10989,6 +11043,7 @@ void CEEJitInfo::CompressDebugInfo()
PTR_BYTE pDebugInfo = CompressDebugInfo::CompressBoundariesAndVars(
m_pOffsetMapping, m_iOffsetMapping,
m_pNativeVarInfo, m_iNativeVarInfo,
+ patchpointInfo,
NULL,
m_pMethodBeingCompiled->GetLoaderAllocator()->GetLowFrequencyHeap());
@@ -12126,7 +12181,7 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
info,
CORJIT_FLAGS::CORJIT_FLAG_CALL_GETJITFLAGS,
nativeEntry,
- nativeSizeOfCode );
+ nativeSizeOfCode);
#ifdef FEATURE_STACK_SAMPLING
if (jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SAMPLING_JIT_BACKGROUND))
@@ -12688,9 +12743,11 @@ PCODE UnsafeJitFunction(PrepareCodeConfig* config,
flags = GetCompileFlags(ftn, flags, &methodInfo);
- // If the reverse P/Invoke flag is used, we aren't going to support
- // any tiered compilation.
- if (flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_REVERSE_PINVOKE))
+#ifdef FEATURE_TIERED_COMPILATION
+ // Clearing all tier flags and mark as optimized if the reverse P/Invoke
+ // flag is used and the function is eligible.
+ if (flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_REVERSE_PINVOKE)
+ && ftn->IsEligibleForTieredCompilation())
{
_ASSERTE(config->GetCallerGCMode() != CallerGCMode::Coop);
@@ -12698,10 +12755,9 @@ PCODE UnsafeJitFunction(PrepareCodeConfig* config,
flags.Clear(CORJIT_FLAGS::CORJIT_FLAG_TIER0);
flags.Clear(CORJIT_FLAGS::CORJIT_FLAG_TIER1);
-#ifdef FEATURE_TIERED_COMPILATION
config->SetJitSwitchedToOptimized();
-#endif // FEATURE_TIERED_COMPILATION
}
+#endif // FEATURE_TIERED_COMPILATION
#ifdef _DEBUG
if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION))
@@ -12748,6 +12804,16 @@ PCODE UnsafeJitFunction(PrepareCodeConfig* config,
jitInfo.SetReserveForJumpStubs(reserveForJumpStubs);
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ // If this is an OSR jit request, grab the OSR info so we can pass it to the jit
+ if (flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_OSR))
+ {
+ unsigned ilOffset = 0;
+ PatchpointInfo* patchpointInfo = nativeCodeVersion.GetOSRInfo(&ilOffset);
+ jitInfo.SetOSRInfo(patchpointInfo, ilOffset);
+ }
+#endif
+
MethodDesc * pMethodForSecurity = jitInfo.GetMethodForSecurity(ftnHnd);
//Since the check could trigger a demand, we have to do this every time.
@@ -13985,6 +14051,18 @@ void CEEInfo::setVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::N
UNREACHABLE(); // only called on derived class.
}
+void CEEInfo::setPatchpointInfo(PatchpointInfo* patchpointInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
+PatchpointInfo* CEEInfo::getOSRInfo(unsigned* ilOffset)
+{
+ LIMITED_METHOD_CONTRACT;
+ UNREACHABLE(); // only called on derived class.
+}
+
void* CEEInfo::getHelperFtn(CorInfoHelpFunc ftnNum, /* IN */
void ** ppIndirection) /* OUT */
{
diff --git a/src/coreclr/src/vm/jitinterface.h b/src/coreclr/src/vm/jitinterface.h
index b77cd0cf15069e..742ce48f8b9ead 100644
--- a/src/coreclr/src/vm/jitinterface.h
+++ b/src/coreclr/src/vm/jitinterface.h
@@ -534,6 +534,9 @@ class CEEInfo : public ICorJitInfo
CORINFO_CLASS_HANDLE getBuiltinClass(CorInfoClassId classId);
void getGSCookie(GSCookie * pCookieVal, GSCookie ** ppCookieVal);
+ void setPatchpointInfo(PatchpointInfo* patchpointInfo);
+ PatchpointInfo* getOSRInfo(unsigned* ilOffset);
+
// "System.Int32" ==> CORINFO_TYPE_INT..
CorInfoType getTypeForPrimitiveValueClass(
CORINFO_CLASS_HANDLE cls
@@ -1310,6 +1313,15 @@ class CEEJitInfo : public CEEInfo
m_iNativeVarInfo = 0;
m_pNativeVarInfo = NULL;
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ if (m_pPatchpointInfoFromJit != NULL)
+ delete [] ((BYTE*) m_pPatchpointInfoFromJit);
+
+ m_pPatchpointInfoFromJit = NULL;
+ m_pPatchpointInfoFromRuntime = NULL;
+ m_ilOffset = 0;
+#endif
+
#ifdef FEATURE_EH_FUNCLETS
m_moduleBase = NULL;
m_totalUnwindSize = 0;
@@ -1372,6 +1384,17 @@ class CEEJitInfo : public CEEInfo
}
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ // Called by the runtime to supply patchpoint information to the jit.
+ void SetOSRInfo(PatchpointInfo* patchpointInfo, unsigned ilOffset)
+ {
+ _ASSERTE(m_pPatchpointInfoFromRuntime == NULL);
+ _ASSERTE(patchpointInfo != NULL);
+ m_pPatchpointInfoFromRuntime = patchpointInfo;
+ m_ilOffset = ilOffset;
+ }
+#endif
+
CEEJitInfo(MethodDesc* fd, COR_ILMETHOD_DECODER* header,
EEJitManager* jm, bool fVerifyOnly, bool allowInlining = true)
: CEEInfo(fd, fVerifyOnly, allowInlining),
@@ -1399,6 +1422,11 @@ class CEEJitInfo : public CEEInfo
m_pOffsetMapping(NULL),
m_iNativeVarInfo(0),
m_pNativeVarInfo(NULL),
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ m_pPatchpointInfoFromJit(NULL),
+ m_pPatchpointInfoFromRuntime(NULL),
+ m_ilOffset(0),
+#endif
m_gphCache()
{
CONTRACTL
@@ -1425,6 +1453,12 @@ class CEEJitInfo : public CEEInfo
if (m_pNativeVarInfo != NULL)
delete [] ((BYTE*) m_pNativeVarInfo);
+
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ if (m_pPatchpointInfoFromJit != NULL)
+ delete [] ((BYTE*) m_pPatchpointInfoFromJit);
+#endif
+
}
// ICorDebugInfo stuff.
@@ -1460,6 +1494,9 @@ class CEEJitInfo : public CEEInfo
void BackoutJitData(EEJitManager * jitMgr);
+ void setPatchpointInfo(PatchpointInfo* patchpointInfo);
+ PatchpointInfo* getOSRInfo(unsigned* ilOffset);
+
protected :
EEJitManager* m_jitManager; // responsible for allocating memory
CodeHeader* m_CodeHeader; // descriptor for JITTED code
@@ -1495,6 +1532,12 @@ protected :
ULONG32 m_iNativeVarInfo;
ICorDebugInfo::NativeVarInfo * m_pNativeVarInfo;
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ PatchpointInfo * m_pPatchpointInfoFromJit;
+ PatchpointInfo * m_pPatchpointInfoFromRuntime;
+ unsigned m_ilOffset;
+#endif
+
// The first time a call is made to CEEJitInfo::GetProfilingHandle() from this thread
// for this method, these values are filled in. Thereafter, these values are used
// in lieu of calling into the base CEEInfo::GetProfilingHandle() again. This protects the
diff --git a/src/coreclr/src/vm/loaderallocator.cpp b/src/coreclr/src/vm/loaderallocator.cpp
index d39cf0a2a44f9c..406f45a89e3290 100644
--- a/src/coreclr/src/vm/loaderallocator.cpp
+++ b/src/coreclr/src/vm/loaderallocator.cpp
@@ -58,6 +58,10 @@ LoaderAllocator::LoaderAllocator()
m_callCountingManager = NULL;
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ m_onStackReplacementManager = NULL;
+#endif
+
m_fGCPressure = false;
m_fTerminated = false;
m_fUnloaded = false;
@@ -1343,6 +1347,14 @@ void LoaderAllocator::Terminate()
}
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ if (m_onStackReplacementManager != NULL)
+ {
+ delete m_onStackReplacementManager;
+ m_onStackReplacementManager = NULL;
+ }
+#endif
+
// In collectible types we merge the low frequency and high frequency heaps
// So don't destroy them twice.
if ((m_pLowFrequencyHeap != NULL) && (m_pLowFrequencyHeap != m_pHighFrequencyHeap))
@@ -2019,3 +2031,34 @@ BOOL LoaderAllocator::InsertComInteropData(MethodTable* pMT, InteropMethodTableD
#endif // FEATURE_COMINTEROP
#endif // !DACCESS_COMPILE
+
+
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+#ifndef DACCESS_COMPILE
+PTR_OnStackReplacementManager LoaderAllocator::GetOnStackReplacementManager()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ INJECT_FAULT(COMPlusThrowOM(););
+ }
+ CONTRACTL_END;
+
+ if (m_onStackReplacementManager == NULL)
+ {
+ OnStackReplacementManager * newManager = new OnStackReplacementManager(this);
+
+ if (FastInterlockCompareExchangePointer(&m_onStackReplacementManager, newManager, NULL) != NULL)
+ {
+ // some thread swooped in and set the field
+ delete newManager;
+ }
+ }
+ _ASSERTE(m_onStackReplacementManager != NULL);
+ return m_onStackReplacementManager;
+}
+#endif //
+#endif // FEATURE_ON_STACK_REPLACEMENT
+
diff --git a/src/coreclr/src/vm/loaderallocator.hpp b/src/coreclr/src/vm/loaderallocator.hpp
index 4938b9545ab217..ea3da4a9cbe602 100644
--- a/src/coreclr/src/vm/loaderallocator.hpp
+++ b/src/coreclr/src/vm/loaderallocator.hpp
@@ -23,6 +23,7 @@ class FuncPtrStubs;
#include "callcounting.h"
#include "methoddescbackpatchinfo.h"
#include "crossloaderallocatorhash.h"
+#include "onstackreplacement.h"
#define VPTRU_LoaderAllocator 0x3200
@@ -283,6 +284,10 @@ class LoaderAllocator
MethodDescBackpatchInfoTracker m_methodDescBackpatchInfoTracker;
#endif
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ PTR_OnStackReplacementManager m_onStackReplacementManager;
+#endif
+
#ifndef DACCESS_COMPILE
public:
@@ -611,6 +616,12 @@ class LoaderAllocator
return &m_methodDescBackpatchInfoTracker;
}
#endif
+
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+public:
+ PTR_OnStackReplacementManager GetOnStackReplacementManager();
+#endif // FEATURE_ON_STACK_REPLACEMENT
+
}; // class LoaderAllocator
typedef VPTR(LoaderAllocator) PTR_LoaderAllocator;
diff --git a/src/coreclr/src/vm/method.hpp b/src/coreclr/src/vm/method.hpp
index 7efecae85e129e..9a558d293084bf 100644
--- a/src/coreclr/src/vm/method.hpp
+++ b/src/coreclr/src/vm/method.hpp
@@ -2038,6 +2038,9 @@ class PrepareCodeConfig
virtual BOOL SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse);
virtual COR_ILMETHOD* GetILHeader();
virtual CORJIT_FLAGS GetJitCompilationFlags();
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ virtual unsigned GetILOffset() const { return 0; }
+#endif
BOOL ProfilerRejectedPrecompiledCode();
BOOL ReadyToRunRejectedPrecompiledCode();
void SetProfilerRejectedPrecompiledCode();
@@ -2100,6 +2103,7 @@ class PrepareCodeConfig
Optimized,
QuickJitted,
OptimizedTier1,
+ OptimizedTier1OSR,
Count
};
diff --git a/src/coreclr/src/vm/object.h b/src/coreclr/src/vm/object.h
index a7d68c10796578..8a091514ad4bf9 100644
--- a/src/coreclr/src/vm/object.h
+++ b/src/coreclr/src/vm/object.h
@@ -149,33 +149,17 @@ class Object
m_pMethTab = pMT;
}
- VOID SetMethodTable(MethodTable *pMT
- DEBUG_ARG(BOOL bAllowArray = FALSE))
+ VOID SetMethodTable(MethodTable *pMT)
{
- LIMITED_METHOD_CONTRACT;
- m_pMethTab = pMT;
-
-#ifdef _DEBUG
- if (!bAllowArray)
- {
- AssertNotArray();
- }
-#endif // _DEBUG
+ WRAPPER_NO_CONTRACT;
+ RawSetMethodTable(pMT);
}
- VOID SetMethodTableForLargeObject(MethodTable *pMT
- DEBUG_ARG(BOOL bAllowArray = FALSE))
+ VOID SetMethodTableForUOHObject(MethodTable *pMT)
{
- // This function must be used if the allocation occurs on the large object heap, and the method table might be a collectible type
WRAPPER_NO_CONTRACT;
+ // This function must be used if the allocation occurs on a UOH heap, and the method table might be a collectible type
ErectWriteBarrierForMT(&m_pMethTab, pMT);
-
-#ifdef _DEBUG
- if (!bAllowArray)
- {
- AssertNotArray();
- }
-#endif // _DEBUG
}
#endif //!DACCESS_COMPILE
@@ -477,16 +461,6 @@ class Object
private:
VOID ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncBlock);
-
-#ifdef _DEBUG
- void AssertNotArray()
- {
- if (m_pMethTab->IsArray())
- {
- _ASSERTE(!"ArrayBase::SetArrayMethodTable/ArrayBase::SetArrayMethodTableForLargeObject should be used for arrays");
- }
- }
-#endif // _DEBUG
};
/*
@@ -547,8 +521,8 @@ class ArrayBase : public Object
friend class GCHeap;
friend class CObjectHeader;
friend class Object;
- friend OBJECTREF AllocateSzArray(MethodTable *pArrayMT, INT32 length, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap);
- friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags, BOOL bAllocateInLargeHeap);
+ friend OBJECTREF AllocateSzArray(MethodTable *pArrayMT, INT32 length, GC_ALLOC_FLAGS flags);
+ friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, GC_ALLOC_FLAGS flags);
friend FCDECL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
friend FCDECL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
friend class JIT_TrialAlloc;
@@ -575,20 +549,15 @@ class ArrayBase : public Object
// type is stored in the array or not
inline TypeHandle GetArrayElementTypeHandle() const;
- // Get the CorElementType for the elements in the array. Avoids creating a TypeHandle
+ // Get the CorElementType for the elements in the array. Avoids creating a TypeHandle
inline CorElementType GetArrayElementType() const;
inline unsigned GetRank() const;
- // Total element count for the array
+ // Total element count for the array
inline DWORD GetNumComponents() const;
-#ifndef DACCESS_COMPILE
- inline void SetArrayMethodTable(MethodTable *pArrayMT);
- inline void SetArrayMethodTableForLargeObject(MethodTable *pArrayMT);
-#endif // !DACCESS_COMPILE
-
- // Get pointer to elements, handles any number of dimensions
+ // Get pointer to elements, handles any number of dimensions
PTR_BYTE GetDataPtr(BOOL inGC = FALSE) const {
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
@@ -699,7 +668,7 @@ class PtrArray : public ArrayBase
{
friend class GCHeap;
friend class ClrDataAccess;
- friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, DWORD flags, BOOL bAllocateInLargeHeap);
+ friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, DWORD flags);
friend class JIT_TrialAlloc;
friend class CheckAsmOffsets;
diff --git a/src/coreclr/src/vm/object.inl b/src/coreclr/src/vm/object.inl
index 8ed202dc25b5ae..cac15829543a95 100644
--- a/src/coreclr/src/vm/object.inl
+++ b/src/coreclr/src/vm/object.inl
@@ -175,24 +175,6 @@ inline DWORD ArrayBase::GetNumComponents() const
return m_NumComponents;
}
-#ifndef DACCESS_COMPILE
-inline void ArrayBase::SetArrayMethodTable(MethodTable *pArrayMT)
-{
- LIMITED_METHOD_CONTRACT;
-
- SetMethodTable(pArrayMT
- DEBUG_ARG(TRUE));
-}
-
-inline void ArrayBase::SetArrayMethodTableForLargeObject(MethodTable *pArrayMT)
-{
- LIMITED_METHOD_CONTRACT;
-
- SetMethodTableForLargeObject(pArrayMT
- DEBUG_ARG(TRUE));
-}
-#endif // !DACCESS_COMPILE
-
inline /* static */ unsigned ArrayBase::GetDataPtrOffset(MethodTable* pMT)
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/coreclr/src/vm/olevariant.cpp b/src/coreclr/src/vm/olevariant.cpp
index 7be1d015db8a39..240b56427f7619 100644
--- a/src/coreclr/src/vm/olevariant.cpp
+++ b/src/coreclr/src/vm/olevariant.cpp
@@ -1835,7 +1835,7 @@ void OleVariant::MarshalIUnknownArrayComToOle(BASEARRAYREF *pComArray, void *ole
MarshalInterfaceArrayComToOleHelper(pComArray, oleArray, pElementMT, FALSE, cElements);
}
-void OleVariant::ClearInterfaceArray(BASEARRAYREF* pComArray, void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
+void OleVariant::ClearInterfaceArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
{
CONTRACTL
{
@@ -2009,7 +2009,7 @@ void OleVariant::MarshalBSTRArrayComToOle(BASEARRAYREF *pComArray, void *oleArra
GCPROTECT_END();
}
-void OleVariant::ClearBSTRArray(BASEARRAYREF* pComArray, void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
+void OleVariant::ClearBSTRArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
{
CONTRACTL
{
@@ -2112,7 +2112,7 @@ void OleVariant::MarshalNonBlittableRecordArrayComToOle(BASEARRAYREF *pComArray,
}
}
-void OleVariant::ClearNonBlittableRecordArray(BASEARRAYREF* pComArray, void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
+void OleVariant::ClearNonBlittableRecordArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
{
CONTRACTL
{
@@ -2124,20 +2124,15 @@ void OleVariant::ClearNonBlittableRecordArray(BASEARRAYREF* pComArray, void *ole
}
CONTRACTL_END;
- ASSERT_PROTECTED(pComArray);
-
SIZE_T elemSize = pInterfaceMT->GetNativeSize();
+ SIZE_T componentSize = TypeHandle(pInterfaceMT).MakeSZArray().GetMethodTable()->GetComponentSize();
BYTE *pOle = (BYTE *) oleArray;
BYTE *pOleEnd = pOle + elemSize * cElements;
- SIZE_T srcofs = *pComArray != NULL ? ArrayBase::GetDataPtrOffset((*pComArray)->GetMethodTable()) : 0;
while (pOle < pOleEnd)
{
- BYTE* managedData = (BYTE*)(*(LPVOID*)pComArray) + srcofs;
-
- MarshalStructViaILStubCode(pManagedMarshalerCode, managedData, pOle, StructMarshalStubs::MarshalOperation::Cleanup);
+ MarshalStructViaILStubCode(pManagedMarshalerCode, nullptr, pOle, StructMarshalStubs::MarshalOperation::Cleanup);
pOle += elemSize;
- srcofs += (*pComArray)->GetComponentSize();
}
}
@@ -2255,7 +2250,7 @@ void OleVariant::MarshalLPWSTRRArrayComToOle(BASEARRAYREF *pComArray, void *oleA
}
}
-void OleVariant::ClearLPWSTRArray(BASEARRAYREF* pComArray, void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
+void OleVariant::ClearLPWSTRArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
{
CONTRACTL
{
@@ -2392,7 +2387,7 @@ void OleVariant::MarshalLPSTRRArrayComToOle(BASEARRAYREF *pComArray, void *oleAr
}
}
-void OleVariant::ClearLPSTRArray(BASEARRAYREF* pComArray, void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
+void OleVariant::ClearLPSTRArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
{
CONTRACTL
{
@@ -2736,7 +2731,7 @@ void OleVariant::MarshalRecordArrayComToOle(BASEARRAYREF *pComArray, void *oleAr
}
-void OleVariant::ClearRecordArray(BASEARRAYREF* pComArray, void *oleArray, SIZE_T cElements, MethodTable *pElementMT, PCODE pManagedMarshalerCode)
+void OleVariant::ClearRecordArray(void *oleArray, SIZE_T cElements, MethodTable *pElementMT, PCODE pManagedMarshalerCode)
{
CONTRACTL
{
@@ -2751,7 +2746,7 @@ void OleVariant::ClearRecordArray(BASEARRAYREF* pComArray, void *oleArray, SIZE_
if (!pElementMT->IsBlittable())
{
_ASSERTE(pElementMT->HasLayout());
- ClearNonBlittableRecordArray(pComArray, oleArray, cElements, pElementMT, pManagedMarshalerCode);
+ ClearNonBlittableRecordArray(oleArray, cElements, pElementMT, pManagedMarshalerCode);
}
}
@@ -4072,7 +4067,7 @@ void OleVariant::MarshalVariantArrayComToOle(BASEARRAYREF *pComArray, void *oleA
GCPROTECT_END();
}
-void OleVariant::ClearVariantArray(BASEARRAYREF* pComArray, void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
+void OleVariant::ClearVariantArray(void *oleArray, SIZE_T cElements, MethodTable *pInterfaceMT, PCODE pManagedMarshalerCode)
{
CONTRACTL
{
diff --git a/src/coreclr/src/vm/olevariant.h b/src/coreclr/src/vm/olevariant.h
index 2cca65fadf8cbb..a09616d7818092 100644
--- a/src/coreclr/src/vm/olevariant.h
+++ b/src/coreclr/src/vm/olevariant.h
@@ -462,7 +462,7 @@ class OleVariant
BOOL fBestFitMapping, BOOL fThrowOnUnmappableChar,
BOOL fOleArrayIsValid,SIZE_T cElements,
PCODE pManagedMarshalerCode);
- void (*ClearOleArray)(BASEARRAYREF* pComArray, void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
+ void (*ClearOleArray)(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
};
static const Marshaler* GetMarshalerForVarType(VARTYPE vt, BOOL fThrow);
@@ -521,7 +521,7 @@ class OleVariant
MethodTable* pInterfaceMT, BOOL fBestFitMapping,
BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid,
SIZE_T cElements, PCODE pManagedMarshalerCode);
- static void ClearBSTRArray(BASEARRAYREF* comArray, void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
+ static void ClearBSTRArray(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
#endif // FEATURE_COMINTEROP
static void MarshalNonBlittableRecordArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
@@ -530,7 +530,7 @@ class OleVariant
MethodTable* pInterfaceMT, BOOL fBestFitMapping,
BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid,
SIZE_T cElements, PCODE pManagedMarshalerCode);
- static void ClearNonBlittableRecordArray(BASEARRAYREF* comArray, void* oleArray,
+ static void ClearNonBlittableRecordArray(void* oleArray,
SIZE_T cElements, MethodTable* pInterfaceMT,
PCODE pManagedMarshalerCode);
@@ -540,7 +540,7 @@ class OleVariant
MethodTable* pInterfaceMT, BOOL fBestFitMapping,
BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid,
SIZE_T cElements, PCODE pManagedMarshalerCode);
- static void ClearLPWSTRArray(BASEARRAYREF* comArray, void* oleArray,
+ static void ClearLPWSTRArray(void* oleArray,
SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
static void MarshalLPSTRArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
@@ -549,7 +549,7 @@ class OleVariant
MethodTable* pInterfaceMT, BOOL fBestFitMapping,
BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid,
SIZE_T cElements, PCODE pManagedMarshalerCode);
- static void ClearLPSTRArray(BASEARRAYREF* comArray, void* oleArray,
+ static void ClearLPSTRArray(void* oleArray,
SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
static void MarshalDateArrayOleToCom(void* oleArray, BASEARRAYREF* pComArray,
@@ -564,7 +564,7 @@ class OleVariant
BOOL fBestFitMapping, BOOL fThrowOnUnmappableChar,
BOOL fOleArrayValid,
SIZE_T cElements, PCODE pManagedMarshalerCode);
- static void ClearRecordArray(BASEARRAYREF* comArray, void* oleArray, SIZE_T cElements, MethodTable* pElementMT, PCODE pManagedMarshalerCode);
+ static void ClearRecordArray(void* oleArray, SIZE_T cElements, MethodTable* pElementMT, PCODE pManagedMarshalerCode);
#ifdef FEATURE_COMINTEROP
static HRESULT MarshalCommonOleRefVariantForObject(OBJECTREF *pObj, VARIANT *pOle);
@@ -574,7 +574,7 @@ class OleVariant
MethodTable* pInterfaceMT, BOOL fBestFitMapping,
BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid,
SIZE_T cElements, PCODE pManagedMarshalerCode);
- static void ClearInterfaceArray(BASEARRAYREF* comArray, void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
+ static void ClearInterfaceArray(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
static void MarshalBoolVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
@@ -623,7 +623,7 @@ class OleVariant
MethodTable* pInterfaceMT, BOOL fBestFitMapping,
BOOL fThrowOnUnmappableChar, BOOL fOleArrayValid,
SIZE_T cElements, PCODE pManagedMarshalerCode);
- static void ClearVariantArray(BASEARRAYREF* comArray, void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
+ static void ClearVariantArray(void* oleArray, SIZE_T cElements, MethodTable* pInterfaceMT, PCODE pManagedMarshalerCode);
#ifdef FEATURE_CLASSIC_COMINTEROP
static void MarshalArrayVariantOleToCom(VARIANT* pOleVariant, VariantData* pComVariant);
diff --git a/src/coreclr/src/vm/onstackreplacement.cpp b/src/coreclr/src/vm/onstackreplacement.cpp
new file mode 100644
index 00000000000000..4bb80efc5e94b2
--- /dev/null
+++ b/src/coreclr/src/vm/onstackreplacement.cpp
@@ -0,0 +1,84 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: onstackreplacement.cpp
+//
+// ===========================================================================
+
+#include "common.h"
+#include "onstackreplacement.h"
+
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+
+
+CrstStatic OnStackReplacementManager::s_lock;
+
+#if _DEBUG
+int OnStackReplacementManager::s_patchpointId = 0;
+#endif
+
+#ifndef DACCESS_COMPILE
+
+void OnStackReplacementManager::StaticInitialize()
+{
+ WRAPPER_NO_CONTRACT;
+ s_lock.Init(CrstJitPatchpoint, CrstFlags(CRST_UNSAFE_COOPGC));
+}
+
+OnStackReplacementManager::OnStackReplacementManager(LoaderAllocator * loaderAllocator) : m_allocator(loaderAllocator), m_jitPatchpointTable()
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LockOwner lock = {&s_lock, IsOwnerOfCrst};
+ m_jitPatchpointTable.Init(INITIAL_TABLE_SIZE, &lock, m_allocator->GetLowFrequencyHeap());
+}
+
+// Fetch or create patchpoint info for this patchpoint.
+PerPatchpointInfo* OnStackReplacementManager::GetPerPatchpointInfo(PCODE ip)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ PTR_PCODE ppId = dac_cast(ip);
+ PTR_PerPatchpointInfo ppInfo = NULL;
+
+ BOOL hasData = m_jitPatchpointTable.GetValueSpeculative(ppId, (HashDatum*)&ppInfo);
+
+ if (!hasData)
+ {
+ CrstHolder lock(&s_lock);
+ hasData = m_jitPatchpointTable.GetValue(ppId, (HashDatum*)&ppInfo);
+
+ if (!hasData)
+ {
+ void * pMem = m_allocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(PerPatchpointInfo)));
+ ppInfo = dac_cast(new (pMem) PerPatchpointInfo());
+ m_jitPatchpointTable.InsertValue(ppId, (HashDatum)ppInfo);
+
+#if _DEBUG
+ ppInfo->m_patchpointId = ++s_patchpointId;
+#endif
+
+ }
+ }
+
+ return ppInfo;
+}
+
+#endif // !DACCESS_COMPILE
+
+#endif // FEATURE_ON_STACK_REPLACEMENT
+
+
diff --git a/src/coreclr/src/vm/onstackreplacement.h b/src/coreclr/src/vm/onstackreplacement.h
new file mode 100644
index 00000000000000..8ef3188cfc22a8
--- /dev/null
+++ b/src/coreclr/src/vm/onstackreplacement.h
@@ -0,0 +1,113 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: onstackreplacement.h
+//
+// ===========================================================================
+
+#ifndef ON_STACK_REPLACEMENT_H
+#define ON_STACK_REPLACEMENT_H
+
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+
+#include "daccess.h"
+#include "eehash.h"
+
+// PerPatchpointInfo is the runtime state tracked for each active patchpoint.
+//
+// A patchpoint becomes active when the JIT_HELP_PATCHPOINT helper is invoked
+// by jitted code.
+//
+struct PerPatchpointInfo
+{
+ PerPatchpointInfo() :
+ m_osrMethodCode(0),
+ m_patchpointCount(0),
+ m_flags(0)
+#if _DEBUG
+ , m_patchpointId(0)
+#endif
+ {
+ }
+
+ // Flag bits
+ enum
+ {
+ patchpoint_triggered = 0x1,
+ patchpoint_invalid = 0x2
+ };
+
+ // The OSR method entry point for this patchpoint.
+ // NULL if no method has yet been jitted, or jitting failed.
+ PCODE m_osrMethodCode;
+ // Number of times jitted code has called the helper at this patchpoint.
+ LONG m_patchpointCount;
+ // Status of this patchpoint
+ LONG m_flags;
+
+#if _DEBUG
+ int m_patchpointId;
+#endif
+};
+
+typedef DPTR(PerPatchpointInfo) PTR_PerPatchpointInfo;
+typedef EEPtrHashTable JitPatchpointTable;
+
+// OnStackReplacementManager keeps track of mapping from patchpoint id to
+// per patchpoint info.
+//
+// Patchpoint identity is currently the return address of the helper call
+// in the jitted code.
+//
+class OnStackReplacementManager
+{
+#if DACCESS_COMPILE
+public:
+ OnStackReplacementManager(LoaderAllocator *) {};
+#else
+public:
+ static void StaticInitialize();
+
+public:
+ OnStackReplacementManager(LoaderAllocator * loaderHeaAllocator);
+
+public:
+ PerPatchpointInfo* GetPerPatchpointInfo(PCODE ip);
+#endif // DACCESS_COMPILE
+
+private:
+
+ enum
+ {
+ INITIAL_TABLE_SIZE = 10
+ };
+
+ static CrstStatic s_lock;
+
+#if _DEBUG
+ static int s_patchpointId;
+#endif
+
+private:
+
+ PTR_LoaderAllocator m_allocator;
+ JitPatchpointTable m_jitPatchpointTable;
+};
+
+#else // FEATURE_TIERED_COMPILATION
+
+class OnStackReplacementManager
+{
+public:
+ static void StaticInitialize() {}
+public:
+
+ OnStackReplacementManager(LoaderAllocator *) {}
+};
+
+#endif // FEATURE_TIERED_COMPILATION
+
+typedef DPTR(OnStackReplacementManager) PTR_OnStackReplacementManager;
+
+#endif // ON_STACK_REPLACEMENT_H
diff --git a/src/coreclr/src/vm/prestub.cpp b/src/coreclr/src/vm/prestub.cpp
index d8544448c2f4b2..24c8dbd411e4b6 100644
--- a/src/coreclr/src/vm/prestub.cpp
+++ b/src/coreclr/src/vm/prestub.cpp
@@ -997,10 +997,13 @@ PCODE MethodDesc::JitCompileCodeLocked(PrepareCodeConfig* pConfig, JitListLockEn
// The profiler may have changed the code on the callback. Need to
// pick up the new code.
+ //
+ // (don't want this for OSR, need to see how it works)
COR_ILMETHOD_DECODER ilDecoderTemp;
COR_ILMETHOD_DECODER *pilHeader = GetAndVerifyILHeader(pConfig, &ilDecoderTemp);
*pFlags = pConfig->GetJitCompilationFlags();
PCODE pOtherCode = NULL;
+
EX_TRY
{
#ifndef CROSSGEN_COMPILE
@@ -1287,6 +1290,9 @@ PrepareCodeConfig::JitOptimizationTier PrepareCodeConfig::GetJitOptimizationTier
case NativeCodeVersion::OptimizationTier1:
return JitOptimizationTier::OptimizedTier1;
+ case NativeCodeVersion::OptimizationTier1OSR:
+ return JitOptimizationTier::OptimizedTier1OSR;
+
case NativeCodeVersion::OptimizationTierOptimized:
return JitOptimizationTier::Optimized;
@@ -1311,6 +1317,7 @@ const char *PrepareCodeConfig::GetJitOptimizationTierStr(PrepareCodeConfig *conf
case JitOptimizationTier::Optimized: return "Optimized";
case JitOptimizationTier::QuickJitted: return "QuickJitted";
case JitOptimizationTier::OptimizedTier1: return "OptimizedTier1";
+ case JitOptimizationTier::OptimizedTier1OSR: return "OptimizedTier1OSR";
default:
UNREACHABLE();
diff --git a/src/coreclr/src/vm/sampleprofiler.cpp b/src/coreclr/src/vm/sampleprofiler.cpp
index e3abbb4365e6b3..d93b47114fb1cc 100644
--- a/src/coreclr/src/vm/sampleprofiler.cpp
+++ b/src/coreclr/src/vm/sampleprofiler.cpp
@@ -6,7 +6,6 @@
#include "eventpipebuffermanager.h"
#include "eventpipeeventinstance.h"
#include "sampleprofiler.h"
-#include "hosting.h"
#include "threadsuspend.h"
#ifdef FEATURE_PERFTRACING
diff --git a/src/coreclr/src/vm/spinlock.cpp b/src/coreclr/src/vm/spinlock.cpp
index 00bb1a39818a95..7bae099c4c00f5 100644
--- a/src/coreclr/src/vm/spinlock.cpp
+++ b/src/coreclr/src/vm/spinlock.cpp
@@ -31,27 +31,12 @@ ULONG SpinLockProfiler::s_ulSpins [LOCK_TYPE_DEFAULT + 1] = { 0 };
SpinLock::SpinLock()
{
- // Global SpinLock variables will cause the constructor to be
- // called during DllInit, which means we cannot use full contracts
- // because we have not called InitUtilCode yet.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
m_Initialized = UnInitialized;
}
-
-SpinLock::~SpinLock()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- }
- CONTRACTL_END;
-
-}
-
void SpinLock::Init(LOCK_TYPE type, bool RequireCoopGC)
{
CONTRACTL
diff --git a/src/coreclr/src/vm/spinlock.h b/src/coreclr/src/vm/spinlock.h
index b79ab6b5c08ba1..aad85618b144b8 100644
--- a/src/coreclr/src/vm/spinlock.h
+++ b/src/coreclr/src/vm/spinlock.h
@@ -175,7 +175,6 @@ class SpinLock
public:
SpinLock ();
- ~SpinLock ();
//Init method, initialize lock and _DEBUG flags
void Init(LOCK_TYPE type, bool RequireCoopGC = FALSE);
diff --git a/src/coreclr/src/vm/threads.cpp b/src/coreclr/src/vm/threads.cpp
index 9af009fce16f3c..5025ad4c59150b 100644
--- a/src/coreclr/src/vm/threads.cpp
+++ b/src/coreclr/src/vm/threads.cpp
@@ -1005,10 +1005,6 @@ HRESULT Thread::DetachThread(BOOL fDLLThreadDetach)
SetThread(NULL);
SetAppDomain(NULL);
-#ifdef ENABLE_CONTRACTS_DATA
- m_pClrDebugState = NULL;
-#endif //ENABLE_CONTRACTS_DATA
-
FastInterlockOr((ULONG*)&m_State, (int) (Thread::TS_Detached | Thread::TS_ReportDead));
// Do not touch Thread object any more. It may be destroyed.
@@ -1337,7 +1333,6 @@ Thread::Thread()
#endif
#ifdef ENABLE_CONTRACTS
- m_pClrDebugState = NULL;
m_ulEnablePreemptiveGCCount = 0;
#endif
@@ -6118,7 +6113,7 @@ size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, siz
NULL
);
- if (((UINT_PTR)g_pMSCorEE) != uImageBase)
+ if (((UINT_PTR)g_hThisInst) != uImageBase)
{
break;
}
diff --git a/src/coreclr/src/vm/threads.h b/src/coreclr/src/vm/threads.h
index ebb37230484793..faab0567a813f8 100644
--- a/src/coreclr/src/vm/threads.h
+++ b/src/coreclr/src/vm/threads.h
@@ -1049,8 +1049,9 @@ class Thread
friend class DacDbiInterfaceImpl; // DacDbiInterfaceImpl::GetThreadHandle(HANDLE * phThread);
#endif // DACCESS_COMPILE
friend class ProfToEEInterfaceImpl; // HRESULT ProfToEEInterfaceImpl::GetHandleFromThread(ThreadID threadId, HANDLE *phThread);
+
friend void SetupTLSForThread(Thread* pThread);
- friend class UnC;
+
friend class CheckAsmOffsets;
friend class ExceptionTracker;
@@ -1913,14 +1914,6 @@ class Thread
bool DetectHandleILStubsForDebugger();
-#ifdef ENABLE_CONTRACTS
- ClrDebugState *GetClrDebugState()
- {
- LIMITED_METHOD_CONTRACT;
- return m_pClrDebugState;
- }
-#endif
-
//**************************************************************
// GC interaction
//**************************************************************
@@ -2057,29 +2050,33 @@ class Thread
void BeginNoTriggerGC(const char *szFile, int lineNum)
{
WRAPPER_NO_CONTRACT;
- m_pClrDebugState->IncrementGCNoTriggerCount();
+
+ ClrDebugState* pClrDebugState = GetClrDebugState();
+ pClrDebugState->IncrementGCNoTriggerCount();
+
if (PreemptiveGCDisabled())
{
- m_pClrDebugState->IncrementGCForbidCount();
+ pClrDebugState->IncrementGCForbidCount();
}
}
void EndNoTriggerGC()
{
WRAPPER_NO_CONTRACT;
- _ASSERTE(m_pClrDebugState->GetGCNoTriggerCount() != 0 || (m_pClrDebugState->ViolationMask() & BadDebugState));
- m_pClrDebugState->DecrementGCNoTriggerCount();
+ ClrDebugState* pClrDebugState = GetClrDebugState();
+
+ _ASSERTE(pClrDebugState->GetGCNoTriggerCount() != 0 || (pClrDebugState->ViolationMask() & BadDebugState));
+ pClrDebugState->DecrementGCNoTriggerCount();
- if (m_pClrDebugState->GetGCForbidCount())
+ if (pClrDebugState->GetGCForbidCount())
{
- m_pClrDebugState->DecrementGCForbidCount();
+ pClrDebugState->DecrementGCForbidCount();
}
}
void BeginForbidGC(const char *szFile, int lineNum)
{
WRAPPER_NO_CONTRACT;
- _ASSERTE(this == GetThread());
#ifdef PROFILING_SUPPORTED
_ASSERTE(PreemptiveGCDisabled()
|| CORProfilerPresent() || // This added to allow profiler to use GetILToNativeMapping
@@ -2094,7 +2091,6 @@ class Thread
void EndForbidGC()
{
WRAPPER_NO_CONTRACT;
- _ASSERTE(this == GetThread());
#ifdef PROFILING_SUPPORTED
_ASSERTE(PreemptiveGCDisabled() ||
CORProfilerPresent() || // This added to allow profiler to use GetILToNativeMapping
@@ -2109,43 +2105,45 @@ class Thread
BOOL GCNoTrigger()
{
WRAPPER_NO_CONTRACT;
- _ASSERTE(this == GetThread());
- if ( (GCViolation|BadDebugState) & m_pClrDebugState->ViolationMask() )
+ ClrDebugState* pClrDebugState = GetClrDebugState();
+ if ( (GCViolation|BadDebugState) & pClrDebugState->ViolationMask() )
{
return FALSE;
}
- return m_pClrDebugState->GetGCNoTriggerCount();
+ return pClrDebugState->GetGCNoTriggerCount();
}
BOOL GCForbidden()
{
WRAPPER_NO_CONTRACT;
- _ASSERTE(this == GetThread());
- if ( (GCViolation|BadDebugState) & m_pClrDebugState->ViolationMask())
+ ClrDebugState* pClrDebugState = GetClrDebugState();
+ if ( (GCViolation|BadDebugState) & pClrDebugState->ViolationMask())
{
return FALSE;
}
- return m_pClrDebugState->GetGCForbidCount();
+ return pClrDebugState->GetGCForbidCount();
}
BOOL RawGCNoTrigger()
{
LIMITED_METHOD_CONTRACT;
- if (m_pClrDebugState->ViolationMask() & BadDebugState)
+ ClrDebugState* pClrDebugState = GetClrDebugState();
+ if (pClrDebugState->ViolationMask() & BadDebugState)
{
return 0;
}
- return m_pClrDebugState->GetGCNoTriggerCount();
+ return pClrDebugState->GetGCNoTriggerCount();
}
BOOL RawGCForbidden()
{
LIMITED_METHOD_CONTRACT;
- if (m_pClrDebugState->ViolationMask() & BadDebugState)
+ ClrDebugState* pClrDebugState = GetClrDebugState();
+ if (pClrDebugState->ViolationMask() & BadDebugState)
{
return 0;
}
- return m_pClrDebugState->GetGCForbidCount();
+ return pClrDebugState->GetGCForbidCount();
}
#endif // ENABLE_CONTRACTS_IMPL
@@ -3495,8 +3493,6 @@ class Thread
private:
#ifdef ENABLE_CONTRACTS_DATA
- struct ClrDebugState *m_pClrDebugState; // Pointer to ClrDebugState for quick access
-
ULONG m_ulEnablePreemptiveGCCount;
#endif // _DEBUG
@@ -5829,8 +5825,7 @@ class GCForbid : AutoCleanupGCAssert
m_fConditional = fConditional;
if (m_fConditional)
{
- Thread *pThread = GetThread();
- m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState();
+ m_pClrDebugState = ::GetClrDebugState();
m_oldClrDebugState = *m_pClrDebugState;
m_pClrDebugState->ViolationMaskReset( GCViolation );
@@ -5854,8 +5849,7 @@ class GCForbid : AutoCleanupGCAssert
m_fConditional = TRUE;
- Thread *pThread = GetThread();
- m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState();
+ m_pClrDebugState = ::GetClrDebugState();
m_oldClrDebugState = *m_pClrDebugState;
m_pClrDebugState->ViolationMaskReset( GCViolation );
@@ -5914,7 +5908,7 @@ class GCNoTrigger
if (m_fConditional)
{
Thread * pThread = GetThreadNULLOk();
- m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState();
+ m_pClrDebugState = ::GetClrDebugState();
m_oldClrDebugState = *m_pClrDebugState;
m_pClrDebugState->ViolationMaskReset( GCViolation );
@@ -5941,7 +5935,7 @@ class GCNoTrigger
m_fConditional = TRUE;
Thread * pThread = GetThreadNULLOk();
- m_pClrDebugState = pThread ? pThread->GetClrDebugState() : ::GetClrDebugState();
+ m_pClrDebugState = ::GetClrDebugState();
m_oldClrDebugState = *m_pClrDebugState;
m_pClrDebugState->ViolationMaskReset( GCViolation );
diff --git a/src/coreclr/src/vm/tieredcompilation.cpp b/src/coreclr/src/vm/tieredcompilation.cpp
index ffd986a53bd24d..e2ff313e8f1f51 100644
--- a/src/coreclr/src/vm/tieredcompilation.cpp
+++ b/src/coreclr/src/vm/tieredcompilation.cpp
@@ -933,6 +933,12 @@ CORJIT_FLAGS TieredCompilationManager::GetJitFlags(NativeCodeVersion nativeCodeV
nativeCodeVersion.SetOptimizationTier(NativeCodeVersion::OptimizationTierOptimized);
goto Optimized;
+#ifdef FEATURE_ON_STACK_REPLACEMENT
+ case NativeCodeVersion::OptimizationTier1OSR:
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_OSR);
+ // fall through
+#endif
+
case NativeCodeVersion::OptimizationTier1:
flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1);
// fall through
diff --git a/src/coreclr/src/vm/vars.cpp b/src/coreclr/src/vm/vars.cpp
index ba0cac3dea478b..6c72710c50e2d0 100644
--- a/src/coreclr/src/vm/vars.cpp
+++ b/src/coreclr/src/vm/vars.cpp
@@ -30,7 +30,7 @@ const char g_psBaseLibrarySatelliteAssemblyName[] = CoreLibSatelliteName_A;
Volatile g_TrapReturningThreads;
-HINSTANCE g_pMSCorEE;
+HINSTANCE g_hThisInst;
BBSweep g_BBSweep;
#ifdef _DEBUG
@@ -178,8 +178,6 @@ int g_IGCTrimCommit = 0;
#endif
-BOOL g_fEnableETW = FALSE;
-
//
// Global state variable indicating if the EE is in its init phase.
//
@@ -221,12 +219,6 @@ bool dbg_fDrasticShutdown = false;
#endif
bool g_fInControlC = false;
-//
-//
-// IJW needs the shim HINSTANCE
-//
-HINSTANCE g_hInstShim = NULL;
-
#endif // #ifndef DACCESS_COMPILE
#ifdef DACCESS_COMPILE
diff --git a/src/coreclr/src/vm/vars.hpp b/src/coreclr/src/vm/vars.hpp
index 65bb74fff44e92..ef9c8dfe8cee55 100644
--- a/src/coreclr/src/vm/vars.hpp
+++ b/src/coreclr/src/vm/vars.hpp
@@ -347,7 +347,7 @@ GARY_DECL(TypeHandle, g_pPredefinedArrayTypes, ELEMENT_TYPE_MAX);
extern "C" Volatile g_TrapReturningThreads;
-EXTERN HINSTANCE g_pMSCorEE;
+EXTERN HINSTANCE g_hThisInst;
EXTERN BBSweep g_BBSweep;
EXTERN IBCLogger g_IBCLogger;
@@ -471,8 +471,6 @@ extern int g_IGCHoardVM;
extern int g_IGCTrimCommit;
#endif
-extern BOOL g_fEnableETW;
-
// Returns a BOOL to indicate if the runtime is active or not
BOOL IsRuntimeActive();
@@ -629,12 +627,6 @@ inline bool CORDebuggerAttached()
-
-//
-// IJW needs the shim HINSTANCE
-//
-EXTERN HINSTANCE g_hInstShim;
-
#ifndef TARGET_UNIX
GVAL_DECL(SIZE_T, g_runtimeLoadedBaseAddress);
GVAL_DECL(SIZE_T, g_runtimeVirtualSize);
diff --git a/src/coreclr/src/zap/zapinfo.cpp b/src/coreclr/src/zap/zapinfo.cpp
index 9163da06daf92d..534674fd5fc002 100644
--- a/src/coreclr/src/zap/zapinfo.cpp
+++ b/src/coreclr/src/zap/zapinfo.cpp
@@ -509,7 +509,7 @@ void ZapInfo::CompileMethod()
&m_currentMethodInfo,
CORJIT_FLAGS::CORJIT_FLAG_CALL_GETJITFLAGS,
&pCode,
- &cCode );
+ &cCode);
if (FAILED(res))
{
// We will fall back to the "main" JIT on failure.
@@ -2998,6 +2998,18 @@ void ZapInfo::setVars(CORINFO_METHOD_HANDLE ftn,
return;
}
+void ZapInfo::setPatchpointInfo(PatchpointInfo* patchpointInfo)
+{
+ // No patchpoint info when prejitting
+ UNREACHABLE();
+}
+
+PatchpointInfo* ZapInfo::getOSRInfo(unsigned * ilOffset)
+{
+ // No patchpoint info when prejitting
+ UNREACHABLE();
+}
+
void * ZapInfo::allocateArray(size_t cBytes)
{
return new BYTE[cBytes];
diff --git a/src/coreclr/src/zap/zapinfo.h b/src/coreclr/src/zap/zapinfo.h
index 4ddcab11ea1e47..35f7e9e17bffea 100644
--- a/src/coreclr/src/zap/zapinfo.h
+++ b/src/coreclr/src/zap/zapinfo.h
@@ -707,6 +707,10 @@ class ZapInfo
void getGSCookie(GSCookie * pCookieVal,
GSCookie** ppCookieVal);
+
+ void setPatchpointInfo(PatchpointInfo * patchpointInfo);
+ PatchpointInfo * getOSRInfo(unsigned * ilOffset);
+
// ICorErrorInfo
HRESULT GetErrorHRESULT(struct _EXCEPTION_POINTERS *pExceptionPointers);
diff --git a/src/coreclr/src/zap/zapper.cpp b/src/coreclr/src/zap/zapper.cpp
index a11f605aa83801..70388bd50018bc 100644
--- a/src/coreclr/src/zap/zapper.cpp
+++ b/src/coreclr/src/zap/zapper.cpp
@@ -1181,10 +1181,17 @@ void Zapper::InitializeCompilerFlags(CORCOMPILE_VERSION_INFO * pVersionInfo)
}
// .NET Core requires SSE2.
- m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE2);
-
#endif // TARGET_X86
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
+ m_pOpt->m_compilerFlags.Set(InstructionSet_SSE);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_SSE2);
+#endif
+#if defined(TARGET_ARM64)
+ m_pOpt->m_compilerFlags.Set(InstructionSet_ArmBase);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_AdvSimd);
+#endif
+
#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64)
// If we're crossgenning CoreLib, allow generating non-VEX intrinsics. The generated code might
// not actually be supported by the processor at runtime so we compensate for it by
@@ -1198,21 +1205,24 @@ void Zapper::InitializeCompilerFlags(CORCOMPILE_VERSION_INFO * pVersionInfo)
m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD);
#if defined(TARGET_X86) || defined(TARGET_AMD64)
- m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AES);
- m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_PCLMULQDQ);
- m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE3);
- m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSSE3);
- m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE41);
- m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE42);
- m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_POPCNT);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_SSE);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_SSE2);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_AES);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_PCLMULQDQ);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_SSE3);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_SSSE3);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_SSE41);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_SSE42);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_POPCNT);
// Leaving out CORJIT_FLAGS::CORJIT_FLAG_USE_AVX, CORJIT_FLAGS::CORJIT_FLAG_USE_FMA
// CORJIT_FLAGS::CORJIT_FLAG_USE_AVX2, CORJIT_FLAGS::CORJIT_FLAG_USE_BMI1,
// CORJIT_FLAGS::CORJIT_FLAG_USE_BMI2 on purpose - these require VEX encodings
// and the JIT doesn't support generating code for methods with mixed encodings.
- m_pOpt->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_LZCNT);
+ m_pOpt->m_compilerFlags.Set(InstructionSet_LZCNT);
#endif // defined(TARGET_X86) || defined(TARGET_AMD64)
}
#endif // defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64)
+ m_pOpt->m_compilerFlags.Set64BitInstructionSetVariants();
if ( m_pOpt->m_compilerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_INFO)
&& m_pOpt->m_compilerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE)
diff --git a/src/coreclr/tests/CMakeLists.txt b/src/coreclr/tests/CMakeLists.txt
index 47ee1e3a9a4de3..1ea31cdd8a0357 100644
--- a/src/coreclr/tests/CMakeLists.txt
+++ b/src/coreclr/tests/CMakeLists.txt
@@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.14.2)
cmake_policy(SET CMP0042 NEW)
project(Tests)
-include(${CLR_ENG_NATIVE_DIR}/configureplatform.cmake)
+include(${CLR_ENG_NATIVE_DIR}/configuretools.cmake)
set(INC_PLATFORM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src/Common/Platform)
if (CLR_CMAKE_TARGET_WIN32)
@@ -12,7 +12,6 @@ endif()
# Include global configure settings
include(${CMAKE_CURRENT_SOURCE_DIR}/../configurecompiler.cmake)
-include(${CLR_ENG_NATIVE_DIR}/configuretools.cmake)
# Compile options
if (CLR_CMAKE_HOST_WIN32)
diff --git a/src/coreclr/tests/Directory.Build.props b/src/coreclr/tests/Directory.Build.props
index 7d7cf074c2f9c0..7e57e7a601e994 100644
--- a/src/coreclr/tests/Directory.Build.props
+++ b/src/coreclr/tests/Directory.Build.props
@@ -15,11 +15,10 @@
- $(MSBuildThisFileDirectory)..\..\..\artifacts\
- $(RootBinDir)bin\coreclr\$(TargetOS).$(BuildArch).$(BuildType)\
+ $(ArtifactsDir)bin\coreclr\$(TargetOS).$(BuildArch).$(BuildType)\
$(__TestWorkingDir)\
- $(RootBinDir)tests\coreclr\$(TargetOS).$(BuildArch).$(BuildType)\
+ $(ArtifactsDir)tests\coreclr\$(TargetOS).$(BuildArch).$(BuildType)\
$(__AltJitArch)
@@ -28,14 +27,14 @@
- $(RootBinDir)obj\
+ $(ArtifactsDir)obj\
$(BaseIntermediateOutputPath)\$(TargetOS).$(BuildArch).$(BuildType)
$(BaseIntermediateOutputPath)\coreclr\$(TargetOS).$(BuildArch).$(BuildType)
- $(RootBinDir)TargetingPack\
+ $(ArtifactsDir)TargetingPack\
diff --git a/src/coreclr/tests/dir.common.props b/src/coreclr/tests/dir.common.props
index c2560603383813..cece71183fbf88 100644
--- a/src/coreclr/tests/dir.common.props
+++ b/src/coreclr/tests/dir.common.props
@@ -17,10 +17,10 @@
- $(RootRepoDir)/artifacts/tests/coreclr/obj/$(OSPlatformConfig)/Managed/$(BuildProjectRelativeDir)
+ $(RepoRoot)/artifacts/tests/coreclr/obj/$(OSPlatformConfig)/Managed/$(BuildProjectRelativeDir)
$(BaseIntermediateOutputPath)
- $(RootRepoDir)/artifacts/tests/coreclr/$(OSPlatformConfig)/$(BuildProjectRelativeDir)
+ $(RepoRoot)/artifacts/tests/coreclr/$(OSPlatformConfig)/$(BuildProjectRelativeDir)
$(BaseOutputPath)
diff --git a/src/coreclr/tests/issues.targets b/src/coreclr/tests/issues.targets
index 1030d821f79929..e89dc91c3c05a4 100644
--- a/src/coreclr/tests/issues.targets
+++ b/src/coreclr/tests/issues.targets
@@ -967,9 +967,6 @@
needs triage
-
- needs triage
-
needs triage
diff --git a/src/coreclr/tests/override.targets b/src/coreclr/tests/override.targets
index b45306f93cc6b2..e51db2c91a3466 100644
--- a/src/coreclr/tests/override.targets
+++ b/src/coreclr/tests/override.targets
@@ -14,7 +14,7 @@
>
-
+
diff --git a/src/coreclr/tests/publishdependency.targets b/src/coreclr/tests/publishdependency.targets
index 13a4f730000602..bdb61e870d427b 100644
--- a/src/coreclr/tests/publishdependency.targets
+++ b/src/coreclr/tests/publishdependency.targets
@@ -21,7 +21,7 @@
- $(RootRepoDir)\artifacts\bin\coreclr\$(TargetOS).$(BuildArch).$(BuildType)
+ $(RepoRoot)\artifacts\bin\coreclr\$(TargetOS).$(BuildArch).$(BuildType)
$(ProductDestination)\ref
diff --git a/src/coreclr/tests/runtest.cmd b/src/coreclr/tests/runtest.cmd
index c80629d28e045b..bfe2925f2b713e 100644
--- a/src/coreclr/tests/runtest.cmd
+++ b/src/coreclr/tests/runtest.cmd
@@ -177,7 +177,7 @@ if defined RunInUnloadableContext (
set __RuntestPyArgs=%__RuntestPyArgs% --run_in_context
)
-set NEXTCMD=python "%__ProjectDir%\runtest.py" %__RuntestPyArgs%
+set NEXTCMD=python3 "%__ProjectDir%\runtest.py" %__RuntestPyArgs%
echo !NEXTCMD!
!NEXTCMD!
diff --git a/src/coreclr/tests/runtest.py b/src/coreclr/tests/runtest.py
index 7ae9c97e58dbf4..09fb8466af0dc3 100755
--- a/src/coreclr/tests/runtest.py
+++ b/src/coreclr/tests/runtest.py
@@ -1250,7 +1250,7 @@ def to_unicode(s):
return unicode(s, "utf-8")
else:
def to_unicode(s):
- return str(s, "utf-8")
+ return s
def find_test_from_name(host_os, test_location, test_name):
""" Given a test's name return the location on disk
diff --git a/src/coreclr/tests/src/Directory.Build.props b/src/coreclr/tests/src/Directory.Build.props
index 2d3ee4bf90cf8a..b70d49f4253fc8 100644
--- a/src/coreclr/tests/src/Directory.Build.props
+++ b/src/coreclr/tests/src/Directory.Build.props
@@ -24,11 +24,11 @@
- $(RootRepoDir)\artifacts\tests\coreclr
+ $(RepoRoot)\artifacts\tests\coreclr
$(__TestRootDir)
$(BaseOutputPath)\$(OSPlatformConfig)\
$(BaseOutputPathWithConfig)
- $(RootRepoDir)\artifacts\tests\coreclr\obj\$(OSPlatformConfig)\Managed\
+ $(RepoRoot)\artifacts\tests\coreclr\obj\$(OSPlatformConfig)\Managed\
$(__ManagedTestIntermediatesDir)\
<__NativeTestIntermediatesDir Condition="'$(__NativeTestIntermediatesDir)' == ''">$([System.IO.Path]::GetFullPath($(BaseOutputPathWithConfig)..\..\coreclr\obj\$(TargetOS).$(BuildArch).$(Configuration)\Native\))
$(MSBuildProjectName)\
diff --git a/src/coreclr/tests/src/GC/API/GC/AllocateUninitializedArray.cs b/src/coreclr/tests/src/GC/API/GC/AllocateUninitializedArray.cs
deleted file mode 100644
index 28dcd969ab5e4e..00000000000000
--- a/src/coreclr/tests/src/GC/API/GC/AllocateUninitializedArray.cs
+++ /dev/null
@@ -1,151 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-// Tests GC.Collect()
-
-using System;
-
-public class Test {
-
-
- public static int Main() {
- // allocate a bunch of SOH byte arrays and touch them.
- var r = new Random(1234);
- for (int i = 0; i < 10000; i++)
- {
- int size = r.Next(10000);
- var arr = AllocUninitialized.Call(size);
-
- if (size > 1)
- {
- arr[0] = 5;
- arr[size - 1] = 17;
- if (arr[0] != 5 || arr[size - 1] != 17)
- {
- Console.WriteLine("Scenario 1 for GC.AllocUninitialized() failed!");
- return 1;
- }
- }
- }
-
- // allocate a bunch of LOH int arrays and touch them.
- for (int i = 0; i < 1000; i++)
- {
- int size = r.Next(100000, 1000000);
- var arr = AllocUninitialized.Call(size);
-
- arr[0] = 5;
- arr[size - 1] = 17;
- if (arr[0] != 5 || arr[size - 1] != 17)
- {
- Console.WriteLine("Scenario 2 for GC.AllocUninitialized() failed!");
- return 1;
- }
- }
-
- // allocate a string array
- {
- int i = 100;
- var arr = AllocUninitialized.Call(i);
-
- arr[0] = "5";
- arr[i - 1] = "17";
- if (arr[0] != "5" || arr[i - 1] != "17")
- {
- Console.WriteLine("Scenario 3 for GC.AllocUninitialized() failed!");
- return 1;
- }
- }
-
- // allocate max size byte array
- {
- if (IntPtr.Size == 8)
- {
- int i = 0x7FFFFFC7;
- var arr = AllocUninitialized.Call(i);
-
- arr[0] = 5;
- arr[i - 1] = 17;
- if (arr[0] != 5 || arr[i - 1] != 17)
- {
- Console.WriteLine("Scenario 4 for GC.AllocUninitialized() failed!");
- return 1;
- }
- }
- }
-
- // negative size
- {
- int GetNegativeValue() => -1;
- int negativeSize = GetNegativeValue();
- Type expectedExceptionType = null;
-
- try
- {
- GC.KeepAlive(new byte[negativeSize]);
-
- Console.WriteLine("Scenario 5 Expected exception (new operator)!");
- return 1;
- }
- catch (Exception newOperatorEx)
- {
- expectedExceptionType = newOperatorEx.GetType();
- }
-
- try
- {
- var arr = AllocUninitialized.Call(-1);
-
- Console.WriteLine("Scenario 5 Expected exception (GC.AllocateUninitializedArray)!");
- return 1;
- }
- catch (Exception allocUninitializedEx) when (allocUninitializedEx.GetType() == expectedExceptionType)
- {
- // OK
- }
- catch (Exception other)
- {
- Console.WriteLine($"Scenario 5 Expected exception type mismatch: expected {expectedExceptionType}, but got {other.GetType()}!");
- return 1;
- }
- }
-
- // too large
- {
- try
- {
- var arr = AllocUninitialized.Call(int.MaxValue);
-
- Console.WriteLine("Scenario 6 Expected exception!");
- return 1;
- }
- catch (OutOfMemoryException)
- {
- }
- }
-
-
- Console.WriteLine("Test for GC.Collect() passed!");
- return 100;
- }
-
- //TODO: This should be removed once the API is public.
- static class AllocUninitialized
- {
- public static Func Call = (i) =>
- {
- // replace the stub with actual impl.
- Call = (Func)typeof(System.GC).
- GetMethod("AllocateUninitializedArray",
- bindingAttr: System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Static,
- binder: null,
- new Type[] { typeof(int) },
- modifiers: new System.Reflection.ParameterModifier[0]).
- MakeGenericMethod(new Type[] { typeof(T) }).
- CreateDelegate(typeof(Func));
-
- // call the impl.
- return Call(i);
- };
- }
-}
diff --git a/src/coreclr/tests/src/GC/API/GC/AllocateUninitializedArray.csproj b/src/coreclr/tests/src/GC/API/GC/AllocateUninitializedArray.csproj
deleted file mode 100644
index 41499bc43fdbfb..00000000000000
--- a/src/coreclr/tests/src/GC/API/GC/AllocateUninitializedArray.csproj
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
- Exe
- 0
-
-
-
- PdbOnly
-
-
-
-
-
diff --git a/src/coreclr/tests/src/Interop/COM/Dynamic/Server/DispatchImpl.cpp b/src/coreclr/tests/src/Interop/COM/Dynamic/Server/DispatchImpl.cpp
index 9885eacf0cc70b..39ade024a34dc4 100644
--- a/src/coreclr/tests/src/Interop/COM/Dynamic/Server/DispatchImpl.cpp
+++ b/src/coreclr/tests/src/Interop/COM/Dynamic/Server/DispatchImpl.cpp
@@ -26,7 +26,7 @@ DispatchImpl::DispatchImpl(GUID guid, void *instance, const wchar_t* tlb)
DispatchImpl::~DispatchImpl()
{
- if (_typeInfo != nullptr)
+ if (_typeLib != nullptr)
_typeLib->Release();
if (_typeInfo != nullptr)
@@ -44,8 +44,8 @@ HRESULT DispatchImpl::DoGetTypeInfo(UINT iTInfo, ITypeInfo** ppTInfo)
if (iTInfo != 0)
return DISP_E_BADINDEX;
- *ppTInfo = _typeInfo;
- return S_OK;
+ assert(_typeInfo != nullptr);
+ return _typeInfo->QueryInterface(__uuidof(*ppTInfo), (void**)ppTInfo);
}
HRESULT DispatchImpl::DoGetIDsOfNames(LPOLESTR* rgszNames, UINT cNames, DISPID* rgDispId)
diff --git a/src/coreclr/tests/src/Interop/PInvoke/Generics/CMakeLists.txt b/src/coreclr/tests/src/Interop/PInvoke/Generics/CMakeLists.txt
index 1aceaf5d91bfd5..ebb09a85e7b049 100644
--- a/src/coreclr/tests/src/Interop/PInvoke/Generics/CMakeLists.txt
+++ b/src/coreclr/tests/src/Interop/PInvoke/Generics/CMakeLists.txt
@@ -13,6 +13,10 @@ elseif(CLR_CMAKE_TARGET_ARCH_ARM64)
add_definitions(-DTARGET_ARM64)
add_definitions(-DTARGET_ARMARCH)
endif()
+if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+ # The ABI for passing parameters with 32-byte alignment has changed in GCC 4.6
+ add_compile_options(-Wno-psabi)
+endif()
set(SOURCES
GenericsNative.IUnknown.cpp
GenericsNative.NullableB.cpp
diff --git a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/AdvSimd_r.csproj b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/AdvSimd_r.csproj
index 07a3c5940f5531..c2bc7b3491ee51 100644
--- a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/AdvSimd_r.csproj
+++ b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/AdvSimd_r.csproj
@@ -429,6 +429,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/AdvSimd_ro.csproj b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/AdvSimd_ro.csproj
index 10562a585f1869..a35c1c23146914 100644
--- a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/AdvSimd_ro.csproj
+++ b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/AdvSimd_ro.csproj
@@ -429,6 +429,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Program.AdvSimd.cs b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Program.AdvSimd.cs
index dd20b98214422b..f45f02ceedc939 100644
--- a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Program.AdvSimd.cs
+++ b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Program.AdvSimd.cs
@@ -433,6 +433,26 @@ static Program()
["PopCount.Vector128.SByte"] = PopCount_Vector128_SByte,
["SqrtScalar.Vector64.Double"] = SqrtScalar_Vector64_Double,
["SqrtScalar.Vector64.Single"] = SqrtScalar_Vector64_Single,
+ ["Store.Vector64.Byte"] = Store_Vector64_Byte,
+ ["Store.Vector64.Double"] = Store_Vector64_Double,
+ ["Store.Vector64.Int16"] = Store_Vector64_Int16,
+ ["Store.Vector64.Int32"] = Store_Vector64_Int32,
+ ["Store.Vector64.Int64"] = Store_Vector64_Int64,
+ ["Store.Vector64.SByte"] = Store_Vector64_SByte,
+ ["Store.Vector64.Single"] = Store_Vector64_Single,
+ ["Store.Vector64.UInt16"] = Store_Vector64_UInt16,
+ ["Store.Vector64.UInt32"] = Store_Vector64_UInt32,
+ ["Store.Vector64.UInt64"] = Store_Vector64_UInt64,
+ ["Store.Vector128.Byte"] = Store_Vector128_Byte,
+ ["Store.Vector128.Double"] = Store_Vector128_Double,
+ ["Store.Vector128.Int16"] = Store_Vector128_Int16,
+ ["Store.Vector128.Int32"] = Store_Vector128_Int32,
+ ["Store.Vector128.Int64"] = Store_Vector128_Int64,
+ ["Store.Vector128.SByte"] = Store_Vector128_SByte,
+ ["Store.Vector128.Single"] = Store_Vector128_Single,
+ ["Store.Vector128.UInt16"] = Store_Vector128_UInt16,
+ ["Store.Vector128.UInt32"] = Store_Vector128_UInt32,
+ ["Store.Vector128.UInt64"] = Store_Vector128_UInt64,
["Subtract.Vector64.Byte"] = Subtract_Vector64_Byte,
["Subtract.Vector64.Int16"] = Subtract_Vector64_Int16,
["Subtract.Vector64.Int32"] = Subtract_Vector64_Int32,
diff --git a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Byte.cs b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Byte.cs
new file mode 100644
index 00000000000000..0c53a2bccc2458
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Byte.cs
@@ -0,0 +1,459 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/******************************************************************************
+ * This file is auto-generated from a template file by the GenerateTests.csx *
+ * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
+ * changes, please update the corresponding template and run according to the *
+ * directions listed in the file. *
+ ******************************************************************************/
+
+using System;
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Runtime.Intrinsics;
+using System.Runtime.Intrinsics.Arm;
+
+namespace JIT.HardwareIntrinsics.Arm
+{
+ public static partial class Program
+ {
+ private static void Store_Vector128_Byte()
+ {
+ var test = new StoreUnaryOpTest__Store_Vector128_Byte();
+
+ if (test.IsSupported)
+ {
+ // Validates basic functionality works, using Unsafe.Read
+ test.RunBasicScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates basic functionality works, using Load
+ test.RunBasicScenario_Load();
+ }
+
+ // Validates calling via reflection works, using Unsafe.Read
+ test.RunReflectionScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates calling via reflection works, using Load
+ test.RunReflectionScenario_Load();
+ }
+
+ // Validates passing a static member works
+ test.RunClsVarScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a static member works, using pinning and Load
+ test.RunClsVarScenario_Load();
+ }
+
+ // Validates passing a local works, using Unsafe.Read
+ test.RunLclVarScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a local works, using Load
+ test.RunLclVarScenario_Load();
+ }
+
+ // Validates passing the field of a local class works
+ test.RunClassLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local class works, using pinning and Load
+ test.RunClassLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a class works
+ test.RunClassFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a class works, using pinning and Load
+ test.RunClassFldScenario_Load();
+ }
+
+ // Validates passing the field of a local struct works
+ test.RunStructLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local struct works, using pinning and Load
+ test.RunStructLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a struct works
+ test.RunStructFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a struct works, using pinning and Load
+ test.RunStructFldScenario_Load();
+ }
+ }
+ else
+ {
+ // Validates we throw on unsupported hardware
+ test.RunUnsupportedScenario();
+ }
+
+ if (!test.Succeeded)
+ {
+ throw new Exception("One or more scenarios did not complete as expected.");
+ }
+ }
+ }
+
+ public sealed unsafe class StoreUnaryOpTest__Store_Vector128_Byte
+ {
+ private struct DataTable
+ {
+ private byte[] inArray1;
+ private byte[] outArray;
+
+ private GCHandle inHandle1;
+ private GCHandle outHandle;
+
+ private ulong alignment;
+
+ public DataTable(Byte[] inArray1, Byte[] outArray, int alignment)
+ {
+ int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf();
+ int sizeOfoutArray = outArray.Length * Unsafe.SizeOf();
+ if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
+ {
+ throw new ArgumentException("Invalid value of alignment");
+ }
+
+ this.inArray1 = new byte[alignment * 2];
+ this.outArray = new byte[alignment * 2];
+
+ this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
+ this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
+
+ this.alignment = (ulong)alignment;
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef(inArray1Ptr), ref Unsafe.As(ref inArray1[0]), (uint)sizeOfinArray1);
+ }
+
+ public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
+ public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
+
+ public void Dispose()
+ {
+ inHandle1.Free();
+ outHandle.Free();
+ }
+
+ private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
+ {
+ return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
+ }
+ }
+
+ private struct TestStruct
+ {
+ public Vector128 _fld1;
+
+ public static TestStruct Create()
+ {
+ var testStruct = new TestStruct();
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref testStruct._fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ return testStruct;
+ }
+
+ public void RunStructFldScenario(StoreUnaryOpTest__Store_Vector128_Byte testClass)
+ {
+ AdvSimd.Store((Byte*)testClass._dataTable.outArrayPtr, _fld1);
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario_Load(StoreUnaryOpTest__Store_Vector128_Byte testClass)
+ {
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Byte*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pFld1)));
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+ }
+ }
+
+ private static readonly int LargestVectorSize = 16;
+
+ private static readonly int Op1ElementCount = Unsafe.SizeOf>() / sizeof(Byte);
+ private static readonly int RetElementCount = Unsafe.SizeOf>() / sizeof(Byte);
+
+ private static Byte[] _data1 = new Byte[Op1ElementCount];
+
+ private static Vector128 _clsVar1;
+
+ private Vector128 _fld1;
+
+ private DataTable _dataTable;
+
+ static StoreUnaryOpTest__Store_Vector128_Byte()
+ {
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _clsVar1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+ }
+
+ public StoreUnaryOpTest__Store_Vector128_Byte()
+ {
+ Succeeded = true;
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
+ _dataTable = new DataTable(_data1, new Byte[RetElementCount], LargestVectorSize);
+ }
+
+ public bool IsSupported => AdvSimd.IsSupported;
+
+ public bool Succeeded { get; set; }
+
+ public void RunBasicScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
+
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, Unsafe.Read>(_dataTable.inArray1Ptr));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunBasicScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
+
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Byte*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Byte*)),
+ Unsafe.Read>(_dataTable.inArray1Ptr) });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Byte*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Byte*)),
+ AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr))
+ });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
+
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, _clsVar1);
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
+
+ fixed (Vector128* pClsVar1 = &_clsVar1)
+ {
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pClsVar1)));
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunLclVarScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
+
+ var op1 = Unsafe.Read>(_dataTable.inArray1Ptr);
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunLclVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
+
+ var op1 = AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr));
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Byte();
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Byte();
+
+ fixed (Vector128* pFld1 = &test._fld1)
+ {
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pFld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunClassFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
+
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, _fld1);
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
+
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(pFld1)));
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunStructLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Byte*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Byte*)(&test._fld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario(this);
+ }
+
+ public void RunStructFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario_Load(this);
+ }
+
+ public void RunUnsupportedScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
+
+ bool succeeded = false;
+
+ try
+ {
+ RunBasicScenario_UnsafeRead();
+ }
+ catch (PlatformNotSupportedException)
+ {
+ succeeded = true;
+ }
+
+ if (!succeeded)
+ {
+ Succeeded = false;
+ }
+ }
+
+ private void ValidateResult(Vector128 op1, void* result, [CallerMemberName] string method = "")
+ {
+ Byte[] inArray1 = new Byte[Op1ElementCount];
+ Byte[] outArray = new Byte[RetElementCount];
+
+ Unsafe.WriteUnaligned(ref Unsafe.As(ref inArray1[0]), op1);
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
+ {
+ Byte[] inArray1 = new Byte[Op1ElementCount];
+ Byte[] outArray = new Byte[RetElementCount];
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref inArray1[0]), ref Unsafe.AsRef(op1), (uint)Unsafe.SizeOf>());
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "")
+ {
+ bool succeeded = true;
+
+ for (int i = 0; i < RetElementCount; i++)
+ {
+ if (firstOp[i] != result[i])
+ {
+ succeeded = false;
+ break;
+ }
+ }
+
+ if (!succeeded)
+ {
+ TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Store)}(Vector128): {method} failed:");
+ TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
+ TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
+ TestLibrary.TestFramework.LogInformation(string.Empty);
+
+ Succeeded = false;
+ }
+ }
+ }
+}
diff --git a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Double.cs b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Double.cs
new file mode 100644
index 00000000000000..790527f659bfce
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Double.cs
@@ -0,0 +1,459 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/******************************************************************************
+ * This file is auto-generated from a template file by the GenerateTests.csx *
+ * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
+ * changes, please update the corresponding template and run according to the *
+ * directions listed in the file. *
+ ******************************************************************************/
+
+using System;
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Runtime.Intrinsics;
+using System.Runtime.Intrinsics.Arm;
+
+namespace JIT.HardwareIntrinsics.Arm
+{
+ public static partial class Program
+ {
+ private static void Store_Vector128_Double()
+ {
+ var test = new StoreUnaryOpTest__Store_Vector128_Double();
+
+ if (test.IsSupported)
+ {
+ // Validates basic functionality works, using Unsafe.Read
+ test.RunBasicScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates basic functionality works, using Load
+ test.RunBasicScenario_Load();
+ }
+
+ // Validates calling via reflection works, using Unsafe.Read
+ test.RunReflectionScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates calling via reflection works, using Load
+ test.RunReflectionScenario_Load();
+ }
+
+ // Validates passing a static member works
+ test.RunClsVarScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a static member works, using pinning and Load
+ test.RunClsVarScenario_Load();
+ }
+
+ // Validates passing a local works, using Unsafe.Read
+ test.RunLclVarScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a local works, using Load
+ test.RunLclVarScenario_Load();
+ }
+
+ // Validates passing the field of a local class works
+ test.RunClassLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local class works, using pinning and Load
+ test.RunClassLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a class works
+ test.RunClassFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a class works, using pinning and Load
+ test.RunClassFldScenario_Load();
+ }
+
+ // Validates passing the field of a local struct works
+ test.RunStructLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local struct works, using pinning and Load
+ test.RunStructLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a struct works
+ test.RunStructFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a struct works, using pinning and Load
+ test.RunStructFldScenario_Load();
+ }
+ }
+ else
+ {
+ // Validates we throw on unsupported hardware
+ test.RunUnsupportedScenario();
+ }
+
+ if (!test.Succeeded)
+ {
+ throw new Exception("One or more scenarios did not complete as expected.");
+ }
+ }
+ }
+
+ public sealed unsafe class StoreUnaryOpTest__Store_Vector128_Double
+ {
+ private struct DataTable
+ {
+ private byte[] inArray1;
+ private byte[] outArray;
+
+ private GCHandle inHandle1;
+ private GCHandle outHandle;
+
+ private ulong alignment;
+
+ public DataTable(Double[] inArray1, Double[] outArray, int alignment)
+ {
+ int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf();
+ int sizeOfoutArray = outArray.Length * Unsafe.SizeOf();
+ if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
+ {
+ throw new ArgumentException("Invalid value of alignment");
+ }
+
+ this.inArray1 = new byte[alignment * 2];
+ this.outArray = new byte[alignment * 2];
+
+ this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
+ this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
+
+ this.alignment = (ulong)alignment;
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef(inArray1Ptr), ref Unsafe.As(ref inArray1[0]), (uint)sizeOfinArray1);
+ }
+
+ public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
+ public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
+
+ public void Dispose()
+ {
+ inHandle1.Free();
+ outHandle.Free();
+ }
+
+ private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
+ {
+ return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
+ }
+ }
+
+ private struct TestStruct
+ {
+ public Vector128 _fld1;
+
+ public static TestStruct Create()
+ {
+ var testStruct = new TestStruct();
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref testStruct._fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ return testStruct;
+ }
+
+ public void RunStructFldScenario(StoreUnaryOpTest__Store_Vector128_Double testClass)
+ {
+ AdvSimd.Store((Double*)testClass._dataTable.outArrayPtr, _fld1);
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario_Load(StoreUnaryOpTest__Store_Vector128_Double testClass)
+ {
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Double*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector128((Double*)(pFld1)));
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+ }
+ }
+
+ private static readonly int LargestVectorSize = 16;
+
+ private static readonly int Op1ElementCount = Unsafe.SizeOf>() / sizeof(Double);
+ private static readonly int RetElementCount = Unsafe.SizeOf>() / sizeof(Double);
+
+ private static Double[] _data1 = new Double[Op1ElementCount];
+
+ private static Vector128 _clsVar1;
+
+ private Vector128 _fld1;
+
+ private DataTable _dataTable;
+
+ static StoreUnaryOpTest__Store_Vector128_Double()
+ {
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _clsVar1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+ }
+
+ public StoreUnaryOpTest__Store_Vector128_Double()
+ {
+ Succeeded = true;
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetDouble(); }
+ _dataTable = new DataTable(_data1, new Double[RetElementCount], LargestVectorSize);
+ }
+
+ public bool IsSupported => AdvSimd.IsSupported;
+
+ public bool Succeeded { get; set; }
+
+ public void RunBasicScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
+
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, Unsafe.Read>(_dataTable.inArray1Ptr));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunBasicScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
+
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Double*)(_dataTable.inArray1Ptr)));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Double*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Double*)),
+ Unsafe.Read>(_dataTable.inArray1Ptr) });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Double*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Double*)),
+ AdvSimd.LoadVector128((Double*)(_dataTable.inArray1Ptr))
+ });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
+
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, _clsVar1);
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
+
+ fixed (Vector128* pClsVar1 = &_clsVar1)
+ {
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Double*)(pClsVar1)));
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunLclVarScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
+
+ var op1 = Unsafe.Read>(_dataTable.inArray1Ptr);
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunLclVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
+
+ var op1 = AdvSimd.LoadVector128((Double*)(_dataTable.inArray1Ptr));
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Double();
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Double();
+
+ fixed (Vector128* pFld1 = &test._fld1)
+ {
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Double*)(pFld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunClassFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
+
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, _fld1);
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
+
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Double*)(pFld1)));
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunStructLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Double*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Double*)(&test._fld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario(this);
+ }
+
+ public void RunStructFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario_Load(this);
+ }
+
+ public void RunUnsupportedScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
+
+ bool succeeded = false;
+
+ try
+ {
+ RunBasicScenario_UnsafeRead();
+ }
+ catch (PlatformNotSupportedException)
+ {
+ succeeded = true;
+ }
+
+ if (!succeeded)
+ {
+ Succeeded = false;
+ }
+ }
+
+ private void ValidateResult(Vector128 op1, void* result, [CallerMemberName] string method = "")
+ {
+ Double[] inArray1 = new Double[Op1ElementCount];
+ Double[] outArray = new Double[RetElementCount];
+
+ Unsafe.WriteUnaligned(ref Unsafe.As(ref inArray1[0]), op1);
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
+ {
+ Double[] inArray1 = new Double[Op1ElementCount];
+ Double[] outArray = new Double[RetElementCount];
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref inArray1[0]), ref Unsafe.AsRef(op1), (uint)Unsafe.SizeOf>());
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(Double[] firstOp, Double[] result, [CallerMemberName] string method = "")
+ {
+ bool succeeded = true;
+
+ for (int i = 0; i < RetElementCount; i++)
+ {
+ if (BitConverter.DoubleToInt64Bits(firstOp[i]) != BitConverter.DoubleToInt64Bits(result[i]))
+ {
+ succeeded = false;
+ break;
+ }
+ }
+
+ if (!succeeded)
+ {
+ TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Store)}(Vector128): {method} failed:");
+ TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
+ TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
+ TestLibrary.TestFramework.LogInformation(string.Empty);
+
+ Succeeded = false;
+ }
+ }
+ }
+}
diff --git a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Int16.cs b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Int16.cs
new file mode 100644
index 00000000000000..80289519e83dcb
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Int16.cs
@@ -0,0 +1,459 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/******************************************************************************
+ * This file is auto-generated from a template file by the GenerateTests.csx *
+ * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
+ * changes, please update the corresponding template and run according to the *
+ * directions listed in the file. *
+ ******************************************************************************/
+
+using System;
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Runtime.Intrinsics;
+using System.Runtime.Intrinsics.Arm;
+
+namespace JIT.HardwareIntrinsics.Arm
+{
+ public static partial class Program
+ {
+ private static void Store_Vector128_Int16()
+ {
+ var test = new StoreUnaryOpTest__Store_Vector128_Int16();
+
+ if (test.IsSupported)
+ {
+ // Validates basic functionality works, using Unsafe.Read
+ test.RunBasicScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates basic functionality works, using Load
+ test.RunBasicScenario_Load();
+ }
+
+ // Validates calling via reflection works, using Unsafe.Read
+ test.RunReflectionScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates calling via reflection works, using Load
+ test.RunReflectionScenario_Load();
+ }
+
+ // Validates passing a static member works
+ test.RunClsVarScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a static member works, using pinning and Load
+ test.RunClsVarScenario_Load();
+ }
+
+ // Validates passing a local works, using Unsafe.Read
+ test.RunLclVarScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a local works, using Load
+ test.RunLclVarScenario_Load();
+ }
+
+ // Validates passing the field of a local class works
+ test.RunClassLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local class works, using pinning and Load
+ test.RunClassLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a class works
+ test.RunClassFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a class works, using pinning and Load
+ test.RunClassFldScenario_Load();
+ }
+
+ // Validates passing the field of a local struct works
+ test.RunStructLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local struct works, using pinning and Load
+ test.RunStructLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a struct works
+ test.RunStructFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a struct works, using pinning and Load
+ test.RunStructFldScenario_Load();
+ }
+ }
+ else
+ {
+ // Validates we throw on unsupported hardware
+ test.RunUnsupportedScenario();
+ }
+
+ if (!test.Succeeded)
+ {
+ throw new Exception("One or more scenarios did not complete as expected.");
+ }
+ }
+ }
+
+ public sealed unsafe class StoreUnaryOpTest__Store_Vector128_Int16
+ {
+ private struct DataTable
+ {
+ private byte[] inArray1;
+ private byte[] outArray;
+
+ private GCHandle inHandle1;
+ private GCHandle outHandle;
+
+ private ulong alignment;
+
+ public DataTable(Int16[] inArray1, Int16[] outArray, int alignment)
+ {
+ int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf();
+ int sizeOfoutArray = outArray.Length * Unsafe.SizeOf();
+ if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
+ {
+ throw new ArgumentException("Invalid value of alignment");
+ }
+
+ this.inArray1 = new byte[alignment * 2];
+ this.outArray = new byte[alignment * 2];
+
+ this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
+ this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
+
+ this.alignment = (ulong)alignment;
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef(inArray1Ptr), ref Unsafe.As(ref inArray1[0]), (uint)sizeOfinArray1);
+ }
+
+ public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
+ public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
+
+ public void Dispose()
+ {
+ inHandle1.Free();
+ outHandle.Free();
+ }
+
+ private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
+ {
+ return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
+ }
+ }
+
+ private struct TestStruct
+ {
+ public Vector128 _fld1;
+
+ public static TestStruct Create()
+ {
+ var testStruct = new TestStruct();
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref testStruct._fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ return testStruct;
+ }
+
+ public void RunStructFldScenario(StoreUnaryOpTest__Store_Vector128_Int16 testClass)
+ {
+ AdvSimd.Store((Int16*)testClass._dataTable.outArrayPtr, _fld1);
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario_Load(StoreUnaryOpTest__Store_Vector128_Int16 testClass)
+ {
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Int16*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector128((Int16*)(pFld1)));
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+ }
+ }
+
+ private static readonly int LargestVectorSize = 16;
+
+ private static readonly int Op1ElementCount = Unsafe.SizeOf>() / sizeof(Int16);
+ private static readonly int RetElementCount = Unsafe.SizeOf>() / sizeof(Int16);
+
+ private static Int16[] _data1 = new Int16[Op1ElementCount];
+
+ private static Vector128 _clsVar1;
+
+ private Vector128 _fld1;
+
+ private DataTable _dataTable;
+
+ static StoreUnaryOpTest__Store_Vector128_Int16()
+ {
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _clsVar1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+ }
+
+ public StoreUnaryOpTest__Store_Vector128_Int16()
+ {
+ Succeeded = true;
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
+ _dataTable = new DataTable(_data1, new Int16[RetElementCount], LargestVectorSize);
+ }
+
+ public bool IsSupported => AdvSimd.IsSupported;
+
+ public bool Succeeded { get; set; }
+
+ public void RunBasicScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
+
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, Unsafe.Read>(_dataTable.inArray1Ptr));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunBasicScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
+
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int16*)(_dataTable.inArray1Ptr)));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Int16*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Int16*)),
+ Unsafe.Read>(_dataTable.inArray1Ptr) });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Int16*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Int16*)),
+ AdvSimd.LoadVector128((Int16*)(_dataTable.inArray1Ptr))
+ });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
+
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, _clsVar1);
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
+
+ fixed (Vector128* pClsVar1 = &_clsVar1)
+ {
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int16*)(pClsVar1)));
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunLclVarScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
+
+ var op1 = Unsafe.Read>(_dataTable.inArray1Ptr);
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunLclVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
+
+ var op1 = AdvSimd.LoadVector128((Int16*)(_dataTable.inArray1Ptr));
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Int16();
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Int16();
+
+ fixed (Vector128* pFld1 = &test._fld1)
+ {
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int16*)(pFld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunClassFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
+
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, _fld1);
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
+
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int16*)(pFld1)));
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunStructLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Int16*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int16*)(&test._fld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario(this);
+ }
+
+ public void RunStructFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario_Load(this);
+ }
+
+ public void RunUnsupportedScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
+
+ bool succeeded = false;
+
+ try
+ {
+ RunBasicScenario_UnsafeRead();
+ }
+ catch (PlatformNotSupportedException)
+ {
+ succeeded = true;
+ }
+
+ if (!succeeded)
+ {
+ Succeeded = false;
+ }
+ }
+
+ private void ValidateResult(Vector128 op1, void* result, [CallerMemberName] string method = "")
+ {
+ Int16[] inArray1 = new Int16[Op1ElementCount];
+ Int16[] outArray = new Int16[RetElementCount];
+
+ Unsafe.WriteUnaligned(ref Unsafe.As(ref inArray1[0]), op1);
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
+ {
+ Int16[] inArray1 = new Int16[Op1ElementCount];
+ Int16[] outArray = new Int16[RetElementCount];
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref inArray1[0]), ref Unsafe.AsRef(op1), (uint)Unsafe.SizeOf>());
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(Int16[] firstOp, Int16[] result, [CallerMemberName] string method = "")
+ {
+ bool succeeded = true;
+
+ for (int i = 0; i < RetElementCount; i++)
+ {
+ if (firstOp[i] != result[i])
+ {
+ succeeded = false;
+ break;
+ }
+ }
+
+ if (!succeeded)
+ {
+ TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Store)}(Vector128): {method} failed:");
+ TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
+ TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
+ TestLibrary.TestFramework.LogInformation(string.Empty);
+
+ Succeeded = false;
+ }
+ }
+ }
+}
diff --git a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Int32.cs b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Int32.cs
new file mode 100644
index 00000000000000..d2e2aebd50ae3f
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Int32.cs
@@ -0,0 +1,459 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/******************************************************************************
+ * This file is auto-generated from a template file by the GenerateTests.csx *
+ * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
+ * changes, please update the corresponding template and run according to the *
+ * directions listed in the file. *
+ ******************************************************************************/
+
+using System;
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Runtime.Intrinsics;
+using System.Runtime.Intrinsics.Arm;
+
+namespace JIT.HardwareIntrinsics.Arm
+{
+ public static partial class Program
+ {
+ private static void Store_Vector128_Int32()
+ {
+ var test = new StoreUnaryOpTest__Store_Vector128_Int32();
+
+ if (test.IsSupported)
+ {
+ // Validates basic functionality works, using Unsafe.Read
+ test.RunBasicScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates basic functionality works, using Load
+ test.RunBasicScenario_Load();
+ }
+
+ // Validates calling via reflection works, using Unsafe.Read
+ test.RunReflectionScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates calling via reflection works, using Load
+ test.RunReflectionScenario_Load();
+ }
+
+ // Validates passing a static member works
+ test.RunClsVarScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a static member works, using pinning and Load
+ test.RunClsVarScenario_Load();
+ }
+
+ // Validates passing a local works, using Unsafe.Read
+ test.RunLclVarScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a local works, using Load
+ test.RunLclVarScenario_Load();
+ }
+
+ // Validates passing the field of a local class works
+ test.RunClassLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local class works, using pinning and Load
+ test.RunClassLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a class works
+ test.RunClassFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a class works, using pinning and Load
+ test.RunClassFldScenario_Load();
+ }
+
+ // Validates passing the field of a local struct works
+ test.RunStructLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local struct works, using pinning and Load
+ test.RunStructLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a struct works
+ test.RunStructFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a struct works, using pinning and Load
+ test.RunStructFldScenario_Load();
+ }
+ }
+ else
+ {
+ // Validates we throw on unsupported hardware
+ test.RunUnsupportedScenario();
+ }
+
+ if (!test.Succeeded)
+ {
+ throw new Exception("One or more scenarios did not complete as expected.");
+ }
+ }
+ }
+
+ public sealed unsafe class StoreUnaryOpTest__Store_Vector128_Int32
+ {
+ private struct DataTable
+ {
+ private byte[] inArray1;
+ private byte[] outArray;
+
+ private GCHandle inHandle1;
+ private GCHandle outHandle;
+
+ private ulong alignment;
+
+ public DataTable(Int32[] inArray1, Int32[] outArray, int alignment)
+ {
+ int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf();
+ int sizeOfoutArray = outArray.Length * Unsafe.SizeOf();
+ if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
+ {
+ throw new ArgumentException("Invalid value of alignment");
+ }
+
+ this.inArray1 = new byte[alignment * 2];
+ this.outArray = new byte[alignment * 2];
+
+ this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
+ this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
+
+ this.alignment = (ulong)alignment;
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef(inArray1Ptr), ref Unsafe.As(ref inArray1[0]), (uint)sizeOfinArray1);
+ }
+
+ public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
+ public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
+
+ public void Dispose()
+ {
+ inHandle1.Free();
+ outHandle.Free();
+ }
+
+ private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
+ {
+ return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
+ }
+ }
+
+ private struct TestStruct
+ {
+ public Vector128 _fld1;
+
+ public static TestStruct Create()
+ {
+ var testStruct = new TestStruct();
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref testStruct._fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ return testStruct;
+ }
+
+ public void RunStructFldScenario(StoreUnaryOpTest__Store_Vector128_Int32 testClass)
+ {
+ AdvSimd.Store((Int32*)testClass._dataTable.outArrayPtr, _fld1);
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario_Load(StoreUnaryOpTest__Store_Vector128_Int32 testClass)
+ {
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Int32*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector128((Int32*)(pFld1)));
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+ }
+ }
+
+ private static readonly int LargestVectorSize = 16;
+
+ private static readonly int Op1ElementCount = Unsafe.SizeOf>() / sizeof(Int32);
+ private static readonly int RetElementCount = Unsafe.SizeOf>() / sizeof(Int32);
+
+ private static Int32[] _data1 = new Int32[Op1ElementCount];
+
+ private static Vector128 _clsVar1;
+
+ private Vector128 _fld1;
+
+ private DataTable _dataTable;
+
+ static StoreUnaryOpTest__Store_Vector128_Int32()
+ {
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _clsVar1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+ }
+
+ public StoreUnaryOpTest__Store_Vector128_Int32()
+ {
+ Succeeded = true;
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
+ _dataTable = new DataTable(_data1, new Int32[RetElementCount], LargestVectorSize);
+ }
+
+ public bool IsSupported => AdvSimd.IsSupported;
+
+ public bool Succeeded { get; set; }
+
+ public void RunBasicScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
+
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, Unsafe.Read>(_dataTable.inArray1Ptr));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunBasicScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
+
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Int32*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Int32*)),
+ Unsafe.Read>(_dataTable.inArray1Ptr) });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Int32*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Int32*)),
+ AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr))
+ });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
+
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, _clsVar1);
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
+
+ fixed (Vector128* pClsVar1 = &_clsVar1)
+ {
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int32*)(pClsVar1)));
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunLclVarScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
+
+ var op1 = Unsafe.Read>(_dataTable.inArray1Ptr);
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunLclVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
+
+ var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr));
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Int32();
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Int32();
+
+ fixed (Vector128* pFld1 = &test._fld1)
+ {
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int32*)(pFld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunClassFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
+
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, _fld1);
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
+
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int32*)(pFld1)));
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunStructLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Int32*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int32*)(&test._fld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario(this);
+ }
+
+ public void RunStructFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario_Load(this);
+ }
+
+ public void RunUnsupportedScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
+
+ bool succeeded = false;
+
+ try
+ {
+ RunBasicScenario_UnsafeRead();
+ }
+ catch (PlatformNotSupportedException)
+ {
+ succeeded = true;
+ }
+
+ if (!succeeded)
+ {
+ Succeeded = false;
+ }
+ }
+
+ private void ValidateResult(Vector128 op1, void* result, [CallerMemberName] string method = "")
+ {
+ Int32[] inArray1 = new Int32[Op1ElementCount];
+ Int32[] outArray = new Int32[RetElementCount];
+
+ Unsafe.WriteUnaligned(ref Unsafe.As(ref inArray1[0]), op1);
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
+ {
+ Int32[] inArray1 = new Int32[Op1ElementCount];
+ Int32[] outArray = new Int32[RetElementCount];
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref inArray1[0]), ref Unsafe.AsRef(op1), (uint)Unsafe.SizeOf>());
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(Int32[] firstOp, Int32[] result, [CallerMemberName] string method = "")
+ {
+ bool succeeded = true;
+
+ for (int i = 0; i < RetElementCount; i++)
+ {
+ if (firstOp[i] != result[i])
+ {
+ succeeded = false;
+ break;
+ }
+ }
+
+ if (!succeeded)
+ {
+ TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Store)}(Vector128): {method} failed:");
+ TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
+ TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
+ TestLibrary.TestFramework.LogInformation(string.Empty);
+
+ Succeeded = false;
+ }
+ }
+ }
+}
diff --git a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Int64.cs b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Int64.cs
new file mode 100644
index 00000000000000..8cfa92632a40a3
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.Int64.cs
@@ -0,0 +1,459 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/******************************************************************************
+ * This file is auto-generated from a template file by the GenerateTests.csx *
+ * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
+ * changes, please update the corresponding template and run according to the *
+ * directions listed in the file. *
+ ******************************************************************************/
+
+using System;
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Runtime.Intrinsics;
+using System.Runtime.Intrinsics.Arm;
+
+namespace JIT.HardwareIntrinsics.Arm
+{
+ public static partial class Program
+ {
+ private static void Store_Vector128_Int64()
+ {
+ var test = new StoreUnaryOpTest__Store_Vector128_Int64();
+
+ if (test.IsSupported)
+ {
+ // Validates basic functionality works, using Unsafe.Read
+ test.RunBasicScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates basic functionality works, using Load
+ test.RunBasicScenario_Load();
+ }
+
+ // Validates calling via reflection works, using Unsafe.Read
+ test.RunReflectionScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates calling via reflection works, using Load
+ test.RunReflectionScenario_Load();
+ }
+
+ // Validates passing a static member works
+ test.RunClsVarScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a static member works, using pinning and Load
+ test.RunClsVarScenario_Load();
+ }
+
+ // Validates passing a local works, using Unsafe.Read
+ test.RunLclVarScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a local works, using Load
+ test.RunLclVarScenario_Load();
+ }
+
+ // Validates passing the field of a local class works
+ test.RunClassLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local class works, using pinning and Load
+ test.RunClassLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a class works
+ test.RunClassFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a class works, using pinning and Load
+ test.RunClassFldScenario_Load();
+ }
+
+ // Validates passing the field of a local struct works
+ test.RunStructLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local struct works, using pinning and Load
+ test.RunStructLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a struct works
+ test.RunStructFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a struct works, using pinning and Load
+ test.RunStructFldScenario_Load();
+ }
+ }
+ else
+ {
+ // Validates we throw on unsupported hardware
+ test.RunUnsupportedScenario();
+ }
+
+ if (!test.Succeeded)
+ {
+ throw new Exception("One or more scenarios did not complete as expected.");
+ }
+ }
+ }
+
+ public sealed unsafe class StoreUnaryOpTest__Store_Vector128_Int64
+ {
+ private struct DataTable
+ {
+ private byte[] inArray1;
+ private byte[] outArray;
+
+ private GCHandle inHandle1;
+ private GCHandle outHandle;
+
+ private ulong alignment;
+
+ public DataTable(Int64[] inArray1, Int64[] outArray, int alignment)
+ {
+ int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf();
+ int sizeOfoutArray = outArray.Length * Unsafe.SizeOf();
+ if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
+ {
+ throw new ArgumentException("Invalid value of alignment");
+ }
+
+ this.inArray1 = new byte[alignment * 2];
+ this.outArray = new byte[alignment * 2];
+
+ this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
+ this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
+
+ this.alignment = (ulong)alignment;
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef(inArray1Ptr), ref Unsafe.As(ref inArray1[0]), (uint)sizeOfinArray1);
+ }
+
+ public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
+ public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
+
+ public void Dispose()
+ {
+ inHandle1.Free();
+ outHandle.Free();
+ }
+
+ private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
+ {
+ return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
+ }
+ }
+
+ private struct TestStruct
+ {
+ public Vector128 _fld1;
+
+ public static TestStruct Create()
+ {
+ var testStruct = new TestStruct();
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref testStruct._fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ return testStruct;
+ }
+
+ public void RunStructFldScenario(StoreUnaryOpTest__Store_Vector128_Int64 testClass)
+ {
+ AdvSimd.Store((Int64*)testClass._dataTable.outArrayPtr, _fld1);
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario_Load(StoreUnaryOpTest__Store_Vector128_Int64 testClass)
+ {
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Int64*)testClass._dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pFld1)));
+
+ testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
+ }
+ }
+ }
+
+ private static readonly int LargestVectorSize = 16;
+
+ private static readonly int Op1ElementCount = Unsafe.SizeOf>() / sizeof(Int64);
+ private static readonly int RetElementCount = Unsafe.SizeOf>() / sizeof(Int64);
+
+ private static Int64[] _data1 = new Int64[Op1ElementCount];
+
+ private static Vector128 _clsVar1;
+
+ private Vector128 _fld1;
+
+ private DataTable _dataTable;
+
+ static StoreUnaryOpTest__Store_Vector128_Int64()
+ {
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _clsVar1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+ }
+
+ public StoreUnaryOpTest__Store_Vector128_Int64()
+ {
+ Succeeded = true;
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As, byte>(ref _fld1), ref Unsafe.As(ref _data1[0]), (uint)Unsafe.SizeOf>());
+
+ for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
+ _dataTable = new DataTable(_data1, new Int64[RetElementCount], LargestVectorSize);
+ }
+
+ public bool IsSupported => AdvSimd.IsSupported;
+
+ public bool Succeeded { get; set; }
+
+ public void RunBasicScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
+
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, Unsafe.Read>(_dataTable.inArray1Ptr));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunBasicScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
+
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)));
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Int64*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Int64*)),
+ Unsafe.Read>(_dataTable.inArray1Ptr) });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunReflectionScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
+
+ typeof(AdvSimd).GetMethod(nameof(AdvSimd.Store), new Type[] { typeof(Int64*), typeof(Vector128) })
+ .Invoke(null, new object[] {
+ Pointer.Box(_dataTable.outArrayPtr, typeof(Int64*)),
+ AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr))
+ });
+
+ ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
+
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, _clsVar1);
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClsVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
+
+ fixed (Vector128* pClsVar1 = &_clsVar1)
+ {
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pClsVar1)));
+
+ ValidateResult(_clsVar1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunLclVarScenario_UnsafeRead()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
+
+ var op1 = Unsafe.Read>(_dataTable.inArray1Ptr);
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunLclVarScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
+
+ var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr));
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, op1);
+
+ ValidateResult(op1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Int64();
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
+
+ var test = new StoreUnaryOpTest__Store_Vector128_Int64();
+
+ fixed (Vector128* pFld1 = &test._fld1)
+ {
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pFld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunClassFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
+
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, _fld1);
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunClassFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
+
+ fixed (Vector128* pFld1 = &_fld1)
+ {
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(pFld1)));
+
+ ValidateResult(_fld1, _dataTable.outArrayPtr);
+ }
+ }
+
+ public void RunStructLclFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, test._fld1);
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructLclFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
+
+ var test = TestStruct.Create();
+ AdvSimd.Store((Int64*)_dataTable.outArrayPtr, AdvSimd.LoadVector128((Int64*)(&test._fld1)));
+
+ ValidateResult(test._fld1, _dataTable.outArrayPtr);
+ }
+
+ public void RunStructFldScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario(this);
+ }
+
+ public void RunStructFldScenario_Load()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
+
+ var test = TestStruct.Create();
+ test.RunStructFldScenario_Load(this);
+ }
+
+ public void RunUnsupportedScenario()
+ {
+ TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
+
+ bool succeeded = false;
+
+ try
+ {
+ RunBasicScenario_UnsafeRead();
+ }
+ catch (PlatformNotSupportedException)
+ {
+ succeeded = true;
+ }
+
+ if (!succeeded)
+ {
+ Succeeded = false;
+ }
+ }
+
+ private void ValidateResult(Vector128 op1, void* result, [CallerMemberName] string method = "")
+ {
+ Int64[] inArray1 = new Int64[Op1ElementCount];
+ Int64[] outArray = new Int64[RetElementCount];
+
+ Unsafe.WriteUnaligned(ref Unsafe.As(ref inArray1[0]), op1);
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
+ {
+ Int64[] inArray1 = new Int64[Op1ElementCount];
+ Int64[] outArray = new Int64[RetElementCount];
+
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref inArray1[0]), ref Unsafe.AsRef(op1), (uint)Unsafe.SizeOf>());
+ Unsafe.CopyBlockUnaligned(ref Unsafe.As(ref outArray[0]), ref Unsafe.AsRef(result), (uint)Unsafe.SizeOf>());
+
+ ValidateResult(inArray1, outArray, method);
+ }
+
+ private void ValidateResult(Int64[] firstOp, Int64[] result, [CallerMemberName] string method = "")
+ {
+ bool succeeded = true;
+
+ for (int i = 0; i < RetElementCount; i++)
+ {
+ if (firstOp[i] != result[i])
+ {
+ succeeded = false;
+ break;
+ }
+ }
+
+ if (!succeeded)
+ {
+ TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.Store)}(Vector128): {method} failed:");
+ TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
+ TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
+ TestLibrary.TestFramework.LogInformation(string.Empty);
+
+ Succeeded = false;
+ }
+ }
+ }
+}
diff --git a/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.SByte.cs b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.SByte.cs
new file mode 100644
index 00000000000000..77aa5d0679564f
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/HardwareIntrinsics/Arm/AdvSimd/Store.Vector128.SByte.cs
@@ -0,0 +1,459 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+/******************************************************************************
+ * This file is auto-generated from a template file by the GenerateTests.csx *
+ * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
+ * changes, please update the corresponding template and run according to the *
+ * directions listed in the file. *
+ ******************************************************************************/
+
+using System;
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Runtime.Intrinsics;
+using System.Runtime.Intrinsics.Arm;
+
+namespace JIT.HardwareIntrinsics.Arm
+{
+ public static partial class Program
+ {
+ private static void Store_Vector128_SByte()
+ {
+ var test = new StoreUnaryOpTest__Store_Vector128_SByte();
+
+ if (test.IsSupported)
+ {
+ // Validates basic functionality works, using Unsafe.Read
+ test.RunBasicScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates basic functionality works, using Load
+ test.RunBasicScenario_Load();
+ }
+
+ // Validates calling via reflection works, using Unsafe.Read
+ test.RunReflectionScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates calling via reflection works, using Load
+ test.RunReflectionScenario_Load();
+ }
+
+ // Validates passing a static member works
+ test.RunClsVarScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a static member works, using pinning and Load
+ test.RunClsVarScenario_Load();
+ }
+
+ // Validates passing a local works, using Unsafe.Read
+ test.RunLclVarScenario_UnsafeRead();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing a local works, using Load
+ test.RunLclVarScenario_Load();
+ }
+
+ // Validates passing the field of a local class works
+ test.RunClassLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local class works, using pinning and Load
+ test.RunClassLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a class works
+ test.RunClassFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a class works, using pinning and Load
+ test.RunClassFldScenario_Load();
+ }
+
+ // Validates passing the field of a local struct works
+ test.RunStructLclFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing the field of a local struct works, using pinning and Load
+ test.RunStructLclFldScenario_Load();
+ }
+
+ // Validates passing an instance member of a struct works
+ test.RunStructFldScenario();
+
+ if (AdvSimd.IsSupported)
+ {
+ // Validates passing an instance member of a struct works, using pinning and Load
+ test.RunStructFldScenario_Load();
+ }
+ }
+ else
+ {
+ // Validates we throw on unsupported hardware
+ test.RunUnsupportedScenario();
+ }
+
+ if (!test.Succeeded)
+ {
+ throw new Exception("One or more scenarios did not complete as expected.");
+ }
+ }
+ }
+
+ public sealed unsafe class StoreUnaryOpTest__Store_Vector128_SByte
+ {
+ private struct DataTable
+ {
+ private byte[] inArray1;
+ private byte[] outArray;
+
+ private GCHandle inHandle1;
+ private GCHandle outHandle;
+
+ private ulong alignment;
+
+ public DataTable(SByte[] inArray1, SByte[] outArray, int alignment)
+ {
+ int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf();
+ int sizeOfoutArray = outArray.Length * Unsafe.SizeOf