From 9ceafad81f9c68f16c6be2e11ff609ce9f228ae9 Mon Sep 17 00:00:00 2001 From: Scott Mosier Date: Tue, 6 Oct 2015 16:58:31 -0700 Subject: [PATCH 1/2] Partially hooked up GC sources to Runtime sources. Finalizer code is not yet hooked up. --- src/Native/Runtime/CommonMacros.h | 36 ++- src/Native/Runtime/CommonTypes.h | 2 + src/Native/Runtime/Crst.cpp | 2 +- src/Native/Runtime/DebugEventSource.cpp | 2 +- src/Native/Runtime/FinalizerHelpers.cpp | 31 ++- src/Native/Runtime/GCHelpers.cpp | 17 +- src/Native/Runtime/GcStressControl.cpp | 2 +- src/Native/Runtime/HandleTableHelpers.cpp | 8 +- src/Native/Runtime/MiscHelpers.cpp | 2 +- src/Native/Runtime/ObjectLayout.cpp | 2 +- src/Native/Runtime/OptionalFieldsRuntime.cpp | 2 +- src/Native/Runtime/PalRedhawkFunctions.h | 251 +++++++++++++++++++ src/Native/Runtime/Profiling.cpp | 2 +- src/Native/Runtime/RHCodeMan.cpp | 2 +- src/Native/Runtime/RWLock.cpp | 2 +- src/Native/Runtime/RuntimeInstance.cpp | 2 +- src/Native/Runtime/SectionMethodList.cpp | 2 +- src/Native/Runtime/StackFrameIterator.cpp | 2 +- src/Native/Runtime/TargetPtrs.h | 3 +- src/Native/Runtime/assert.cpp | 2 +- src/Native/Runtime/banned.h | 6 + src/Native/Runtime/daccess.h | 5 +- src/Native/Runtime/eetype.cpp | 2 +- src/Native/Runtime/gcdump.cpp | 4 +- src/Native/Runtime/module.cpp | 2 +- src/Native/Runtime/portable.cpp | 2 +- src/Native/Runtime/{common.h => rhcommon.h} | 0 src/Native/Runtime/startup.cpp | 2 +- src/Native/Runtime/stressLog.cpp | 2 +- src/Native/Runtime/thread.cpp | 4 +- src/Native/Runtime/threadstore.cpp | 2 +- src/Native/gc/env/gcenv.h | 202 +++++---------- src/Native/gc/env/gcenv.windows.cpp | 1 + src/Native/gc/gc.cpp | 4 +- src/Native/gc/gc.h | 7 +- src/Native/gc/gceewks.cpp | 1 + src/Native/gc/gcimpl.h | 4 +- src/Native/gc/gcobject.h | 149 +++++++++++ src/Native/gc/gcwks.cpp | 1 + src/Native/gc/handletable.cpp | 24 ++ src/Native/gc/handletable.h | 1 + src/Native/gc/objecthandle.cpp | 112 +++++++-- src/Native/gc/objecthandle.h | 8 +- 43 files changed, 694 insertions(+), 225 deletions(-) create mode 100644 src/Native/Runtime/PalRedhawkFunctions.h create mode 100644 src/Native/Runtime/banned.h rename src/Native/Runtime/{common.h => rhcommon.h} (100%) create mode 100644 src/Native/gc/gcobject.h diff --git a/src/Native/Runtime/CommonMacros.h b/src/Native/Runtime/CommonMacros.h index ec58f0d8335..d8e826727a8 100644 --- a/src/Native/Runtime/CommonMacros.h +++ b/src/Native/Runtime/CommonMacros.h @@ -41,6 +41,7 @@ char (*COUNTOF_helper(_CountofType (&_Array)[_SizeOfArray]))[_SizeOfArray]; #define offsetof(s,m) (UIntNative)( (IntNative)&reinterpret_cast((((s *)0)->m)) ) #endif // offsetof +#ifndef GCENV_INCLUDED #define FORCEINLINE __forceinline inline UIntNative ALIGN_UP(UIntNative val, UIntNative alignment); @@ -54,6 +55,7 @@ inline T* ALIGN_DOWN(T* val, UIntNative alignment); inline bool IS_ALIGNED(UIntNative val, UIntNative alignment); template inline bool IS_ALIGNED(T* val, UIntNative alignment); +#endif // GCENV_INCLUDED #ifndef DACCESS_COMPILE // @@ -96,21 +98,38 @@ EXTERN_C int __cdecl memcmp(const void *,const void *,size_t); #if defined(_AMD64_) -#define DATA_ALIGNMENT 8 -#define OS_PAGE_SIZE 0x1000 #define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB) #define LOG2_PTRSIZE 3 #define POINTER_SIZE 8 #elif defined(_X86_) +#define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB) +#define LOG2_PTRSIZE 2 +#define POINTER_SIZE 4 + +#elif defined(_ARM_) + +#define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB) +#define LOG2_PTRSIZE 2 +#define POINTER_SIZE 4 + +#else +#error Unsupported target architecture +#endif + +#ifndef GCENV_INCLUDED +#if defined(_AMD64_) + +#define DATA_ALIGNMENT 8 +#define OS_PAGE_SIZE 0x1000 + +#elif defined(_X86_) + #define DATA_ALIGNMENT 4 #ifndef OS_PAGE_SIZE #define OS_PAGE_SIZE 0x1000 #endif -#define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB) -#define LOG2_PTRSIZE 2 -#define POINTER_SIZE 4 #elif defined(_ARM_) @@ -118,13 +137,11 @@ EXTERN_C int __cdecl memcmp(const void *,const void *,size_t); #ifndef OS_PAGE_SIZE #define OS_PAGE_SIZE 0x1000 #endif -#define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB) -#define LOG2_PTRSIZE 2 -#define POINTER_SIZE 4 #else #error Unsupported target architecture #endif +#endif // GCENV_INCLUDED // // Define an unmanaged function called from managed code that needs to execute in co-operative GC mode. (There @@ -169,5 +186,6 @@ bool inline FitsInI4(__int64 val) { return val == (__int64)(__int32)val; } - +#ifndef GCENV_INCLUDED #define C_ASSERT(e) typedef char __C_ASSERT__[(e)?1:-1] +#endif // GCENV_INCLUDED diff --git a/src/Native/Runtime/CommonTypes.h b/src/Native/Runtime/CommonTypes.h index 042273c1f54..1bd6954652c 100644 --- a/src/Native/Runtime/CommonTypes.h +++ b/src/Native/Runtime/CommonTypes.h @@ -35,7 +35,9 @@ typedef UInt32 UInt32_BOOL; // windows 4-byte BOOL, 0 -> false, #define UInt32_FALSE 0 #define UInt32_TRUE 1 +#ifndef GCENV_INCLUDED #define UNREFERENCED_PARAMETER(P) (P) +#endif // GCENV_INCLUDED #define NULL 0 diff --git a/src/Native/Runtime/Crst.cpp b/src/Native/Runtime/Crst.cpp index 53ad5b97576..220e9eea34f 100644 --- a/src/Native/Runtime/Crst.cpp +++ b/src/Native/Runtime/Crst.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/DebugEventSource.cpp b/src/Native/Runtime/DebugEventSource.cpp index 53d66e94935..059ed667c11 100644 --- a/src/Native/Runtime/DebugEventSource.cpp +++ b/src/Native/Runtime/DebugEventSource.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/FinalizerHelpers.cpp b/src/Native/Runtime/FinalizerHelpers.cpp index 716de5411ce..c6ce37dd1a7 100644 --- a/src/Native/Runtime/FinalizerHelpers.cpp +++ b/src/Native/Runtime/FinalizerHelpers.cpp @@ -6,15 +6,28 @@ // // Unmanaged helpers called by the managed finalizer thread. // - -#include "gcrhenv.h" -#include "RuntimeInstance.h" +#include "common.h" +#include "gcenv.h" +#include "gc.h" +#include "commontypes.h" +#include "commonmacros.h" +#include "daccess.h" + +#include "slist.h" +#include "gcrhinterface.h" +#include "rwlock.h" +#include "runtimeinstance.h" #include "module.h" +#include "objectlayout.h" // Block the current thread until at least one object needs to be finalized (returns true) or memory is low // (returns false and the finalizer thread should initiate a garbage collection). EXTERN_C REDHAWK_API UInt32_BOOL __cdecl RhpWaitForFinalizerRequest() { +#ifdef USE_PORTABLE_HELPERS + ASSERT(!"@TODO: FINALIZER THREAD NYI"); + return FALSE; +#else // We can wait for two events; finalization queue has been populated and low memory resource notification. // But if the latter is signalled we shouldn't wait on it again immediately -- if the garbage collection // the finalizer thread initiates as a result is not sufficient to remove the low memory condition the @@ -30,8 +43,9 @@ EXTERN_C REDHAWK_API UInt32_BOOL __cdecl RhpWaitForFinalizerRequest() // two second timeout expires. do { - HANDLE rgWaitHandles[] = { pHeap->GetFinalizerEvent(), pHeap->GetLowMemoryNotificationEvent() }; - UInt32 cWaitHandles = (fLastEventWasLowMemory || pHeap->GetLowMemoryNotificationEvent() == NULL) ? 1 : 2; + HANDLE lowMemEvent = pHeap->GetLowMemoryNotificationEvent(); + HANDLE rgWaitHandles[] = { pHeap->GetFinalizerEvent(), lowMemEvent }; + UInt32 cWaitHandles = (fLastEventWasLowMemory || (lowMemEvent == NULL)) ? 1 : 2; UInt32 uTimeout = fLastEventWasLowMemory ? 2000 : INFINITE; UInt32 uResult = PalWaitForMultipleObjectsEx(cWaitHandles, rgWaitHandles, FALSE, uTimeout, FALSE); @@ -59,14 +73,20 @@ EXTERN_C REDHAWK_API UInt32_BOOL __cdecl RhpWaitForFinalizerRequest() return FALSE; } } while (true); +#endif } // Indicate that the current round of finalizations is complete. EXTERN_C REDHAWK_API void __cdecl RhpSignalFinalizationComplete() { +#ifdef USE_PORTABLE_HELPERS + ASSERT(!"@TODO: FINALIZER THREAD NYI"); +#else GCHeap::GetGCHeap()->SignalFinalizationDone(TRUE); +#endif } +#ifdef FEATURE_PREMORTEM_FINALIZATION // Enable a last pass of the finalizer during (clean) runtime shutdown. Specify the number of milliseconds // we'll wait before giving up a proceeding with the shutdown (INFINITE is an allowable value). COOP_PINVOKE_HELPER(void, RhEnableShutdownFinalization, (UInt32 uiTimeout)) @@ -80,6 +100,7 @@ COOP_PINVOKE_HELPER(UInt8, RhHasShutdownStarted, ()) { return g_fShutdownHasStarted ? 1 : 0; } +#endif // FEATURE_PREMORTEM_FINALIZATION // // The following helpers are special in that they interact with internal GC state or directly manipulate diff --git a/src/Native/Runtime/GCHelpers.cpp b/src/Native/Runtime/GCHelpers.cpp index 25b091ec350..601c2c4c8ff 100644 --- a/src/Native/Runtime/GCHelpers.cpp +++ b/src/Native/Runtime/GCHelpers.cpp @@ -7,14 +7,23 @@ // Unmanaged helpers exposed by the System.GC managed class. // -#include "gcrhenv.h" +#include "common.h" +#include "gcenv.h" +#include "gc.h" +#include "commontypes.h" +#include "commonmacros.h" #include "restrictedcallouts.h" +#include "daccess.h" +#include "targetptrs.h" +#include "eetype.h" +#include "objectlayout.h" + + COOP_PINVOKE_HELPER(void, RhSuppressFinalize, (OBJECTREF refObj)) { if (!refObj->get_EEType()->HasFinalizer()) return; - GCHeap::GetGCHeap()->SetFinalizationRun(refObj); } @@ -24,7 +33,11 @@ EXTERN_C REDHAWK_API void __cdecl RhWaitForPendingFinalizers(BOOL allowReentrant // called in cooperative mode. ASSERT(!GetThread()->PreemptiveGCDisabled()); +#ifdef USE_PORTABLE_HELPERS + ASSERT(!"@TODO: FINALIZER THREAD NYI"); +#else GCHeap::GetGCHeap()->FinalizerThreadWait(INFINITE, allowReentrantWait); +#endif } COOP_PINVOKE_HELPER(Int32, RhGetMaxGcGeneration, ()) diff --git a/src/Native/Runtime/GcStressControl.cpp b/src/Native/Runtime/GcStressControl.cpp index 73c710cb099..f6ff731d96a 100644 --- a/src/Native/Runtime/GcStressControl.cpp +++ b/src/Native/Runtime/GcStressControl.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #if defined(FEATURE_GC_STRESS) & !defined(DACCESS_COMPILE) diff --git a/src/Native/Runtime/HandleTableHelpers.cpp b/src/Native/Runtime/HandleTableHelpers.cpp index 7f84e093a0b..d9e1e7ecb14 100644 --- a/src/Native/Runtime/HandleTableHelpers.cpp +++ b/src/Native/Runtime/HandleTableHelpers.cpp @@ -10,10 +10,14 @@ // binder has special knowledge of these methods and doesn't generate the normal code to transition out of the // runtime prior to the call. // - -#include "gcrhenv.h" +#include "common.h" +#include "gcenv.h" +#include "objecthandle.h" +#include "commontypes.h" +#include "commonmacros.h" #include "restrictedcallouts.h" + COOP_PINVOKE_HELPER(OBJECTHANDLE, RhpHandleAlloc, (Object *pObject, int type)) { return CreateTypedHandle(g_HandleTableMap.pBuckets[0]->pTable[GetCurrentThreadHomeHeapNumber()], pObject, type); diff --git a/src/Native/Runtime/MiscHelpers.cpp b/src/Native/Runtime/MiscHelpers.cpp index 83883c3a33b..b54280d815c 100644 --- a/src/Native/Runtime/MiscHelpers.cpp +++ b/src/Native/Runtime/MiscHelpers.cpp @@ -7,7 +7,7 @@ // Miscellaneous unmanaged helpers called by managed code. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/ObjectLayout.cpp b/src/Native/Runtime/ObjectLayout.cpp index d2e988ed2f2..2cfffc9e4a7 100644 --- a/src/Native/Runtime/ObjectLayout.cpp +++ b/src/Native/Runtime/ObjectLayout.cpp @@ -6,7 +6,7 @@ // // Implementations of functions dealing with object layout related types. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/OptionalFieldsRuntime.cpp b/src/Native/Runtime/OptionalFieldsRuntime.cpp index f1d2af990fb..193b1d271ae 100644 --- a/src/Native/Runtime/OptionalFieldsRuntime.cpp +++ b/src/Native/Runtime/OptionalFieldsRuntime.cpp @@ -7,7 +7,7 @@ // Implementations of methods of OptionalFields which are used only at runtime (i.e. reading field values). // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #else // DACCESS_COMPILE diff --git a/src/Native/Runtime/PalRedhawkFunctions.h b/src/Native/Runtime/PalRedhawkFunctions.h new file mode 100644 index 00000000000..e22baca0bfc --- /dev/null +++ b/src/Native/Runtime/PalRedhawkFunctions.h @@ -0,0 +1,251 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. +// + +extern "C" UInt16 __stdcall CaptureStackBackTrace(UInt32, UInt32, void*, UInt32*); +inline UInt16 PalCaptureStackBackTrace(UInt32 arg1, UInt32 arg2, void* arg3, UInt32* arg4) +{ + return CaptureStackBackTrace(arg1, arg2, arg3, arg4); +} + +extern "C" UInt32_BOOL __stdcall CloseHandle(HANDLE); +inline UInt32_BOOL PalCloseHandle(HANDLE arg1) +{ + return CloseHandle(arg1); +} + +extern "C" UInt32_BOOL __stdcall CreateDirectoryW(LPCWSTR, LPSECURITY_ATTRIBUTES); +inline UInt32_BOOL PalCreateDirectoryW(LPCWSTR arg1, LPSECURITY_ATTRIBUTES arg2) +{ + return CreateDirectoryW(arg1, arg2); +} + +extern "C" void __stdcall DeleteCriticalSection(CRITICAL_SECTION *); +inline void PalDeleteCriticalSection(CRITICAL_SECTION * arg1) +{ + DeleteCriticalSection(arg1); +} + +extern "C" UInt32_BOOL __stdcall DuplicateHandle(HANDLE, HANDLE, HANDLE, HANDLE *, UInt32, UInt32_BOOL, UInt32); +inline UInt32_BOOL PalDuplicateHandle(HANDLE arg1, HANDLE arg2, HANDLE arg3, HANDLE * arg4, UInt32 arg5, UInt32_BOOL arg6, UInt32 arg7) +{ + return DuplicateHandle(arg1, arg2, arg3, arg4, arg5, arg6, arg7); +} + +extern "C" void __stdcall EnterCriticalSection(CRITICAL_SECTION *); +inline void PalEnterCriticalSection(CRITICAL_SECTION * arg1) +{ + EnterCriticalSection(arg1); +} + +extern "C" void __stdcall ExitProcess(UInt32); +inline void PalExitProcess(UInt32 arg1) +{ + ExitProcess(arg1); +} + +extern "C" UInt32 __stdcall FlsAlloc(PFLS_CALLBACK_FUNCTION); +inline UInt32 PalFlsAlloc(PFLS_CALLBACK_FUNCTION arg1) +{ + return FlsAlloc(arg1); +} + +extern "C" void * __stdcall FlsGetValue(UInt32); +inline void * PalFlsGetValue(UInt32 arg1) +{ + return FlsGetValue(arg1); +} + +extern "C" UInt32_BOOL __stdcall FlsSetValue(UInt32, void *); +inline UInt32_BOOL PalFlsSetValue(UInt32 arg1, void * arg2) +{ + return FlsSetValue(arg1, arg2); +} + +extern "C" UInt32_BOOL __stdcall FlushFileBuffers(HANDLE); +inline UInt32_BOOL PalFlushFileBuffers(HANDLE arg1) +{ + return FlushFileBuffers(arg1); +} + +extern "C" void __stdcall FlushProcessWriteBuffers(); +inline void PalFlushProcessWriteBuffers() +{ + FlushProcessWriteBuffers(); +} + +extern "C" HANDLE __stdcall GetCurrentProcess(); +inline HANDLE PalGetCurrentProcess() +{ + return GetCurrentProcess(); +} + +extern "C" UInt32 __stdcall GetCurrentProcessId(); +inline UInt32 PalGetCurrentProcessId() +{ + return GetCurrentProcessId(); +} + +extern "C" HANDLE __stdcall GetCurrentThread(); +inline HANDLE PalGetCurrentThread() +{ + return GetCurrentThread(); +} + +extern "C" UInt32 __stdcall GetCurrentThreadId(); +inline UInt32 PalGetCurrentThreadId() +{ + return GetCurrentThreadId(); +} + +extern "C" UInt32 __stdcall GetEnvironmentVariableW(LPCWSTR, LPWSTR, UInt32); +inline UInt32 PalGetEnvironmentVariableW(LPCWSTR arg1, LPWSTR arg2, UInt32 arg3) +{ + return GetEnvironmentVariableW(arg1, arg2, arg3); +} + +extern "C" UInt32 __stdcall GetLastError(); +inline UInt32 PalGetLastError() +{ + return GetLastError(); +} + +extern "C" void __stdcall GetNativeSystemInfo(SYSTEM_INFO *); +inline void PalGetNativeSystemInfo(SYSTEM_INFO * arg1) +{ + GetNativeSystemInfo(arg1); +} + +extern "C" void * __stdcall GetProcAddress(HANDLE, char *); +inline void * PalGetProcAddress(HANDLE arg1, char * arg2) +{ + return GetProcAddress(arg1, arg2); +} + +extern "C" HANDLE __stdcall GetProcessHeap(); +inline HANDLE PalGetProcessHeap() +{ + return GetProcessHeap(); +} + +extern "C" void __stdcall GetSystemTimeAsFileTime(FILETIME *); +inline void PalGetSystemTimeAsFileTime(FILETIME * arg1) +{ + GetSystemTimeAsFileTime(arg1); +} + +extern "C" UInt64 __stdcall GetTickCount64(); +inline UInt64 PalGetTickCount64() +{ + return GetTickCount64(); +} + +extern "C" void* __stdcall HeapAlloc(HANDLE, UInt32, UIntNative); +inline void* PalHeapAlloc(HANDLE arg1, UInt32 arg2, UIntNative arg3) +{ + return HeapAlloc(arg1, arg2, arg3); +} + +extern "C" UInt32_BOOL __stdcall HeapFree(HANDLE, UInt32, void *); +inline UInt32_BOOL PalHeapFree(HANDLE arg1, UInt32 arg2, void * arg3) +{ + return HeapFree(arg1, arg2, arg3); +} + +extern "C" UInt32_BOOL __stdcall InitializeCriticalSectionEx(CRITICAL_SECTION *, UInt32, UInt32); +inline UInt32_BOOL PalInitializeCriticalSectionEx(CRITICAL_SECTION * arg1, UInt32 arg2, UInt32 arg3) +{ + return InitializeCriticalSectionEx(arg1, arg2, arg3); +} + +extern "C" UInt32_BOOL __stdcall IsDebuggerPresent(); +inline UInt32_BOOL PalIsDebuggerPresent() +{ + return IsDebuggerPresent(); +} + +extern "C" void __stdcall LeaveCriticalSection(CRITICAL_SECTION *); +inline void PalLeaveCriticalSection(CRITICAL_SECTION * arg1) +{ + LeaveCriticalSection(arg1); +} + +extern "C" HANDLE __stdcall LoadLibraryExW(WCHAR *, HANDLE, UInt32); +inline HANDLE PalLoadLibraryExW(WCHAR * arg1, HANDLE arg2, UInt32 arg3) +{ + return LoadLibraryExW(arg1, arg2, arg3); +} + +extern "C" UInt32_BOOL __stdcall QueryPerformanceCounter(LARGE_INTEGER *); +inline UInt32_BOOL PalQueryPerformanceCounter(LARGE_INTEGER * arg1) +{ + return QueryPerformanceCounter(arg1); +} + +extern "C" UInt32_BOOL __stdcall QueryPerformanceFrequency(LARGE_INTEGER *); +inline UInt32_BOOL PalQueryPerformanceFrequency(LARGE_INTEGER * arg1) +{ + return QueryPerformanceFrequency(arg1); +} + +extern "C" void __stdcall RaiseException(UInt32, UInt32, UInt32, const UInt32 *); +inline void PalRaiseException(UInt32 arg1, UInt32 arg2, UInt32 arg3, const UInt32 * arg4) +{ + RaiseException(arg1, arg2, arg3, arg4); +} + +extern "C" void __stdcall RaiseFailFastException(PEXCEPTION_RECORD, PCONTEXT, UInt32); +inline void PalRaiseFailFastException(PEXCEPTION_RECORD arg1, PCONTEXT arg2, UInt32 arg3) +{ + RaiseFailFastException(arg1, arg2, arg3); +} + +extern "C" UInt32_BOOL __stdcall ReleaseMutex(HANDLE); +inline UInt32_BOOL PalReleaseMutex(HANDLE arg1) +{ + return ReleaseMutex(arg1); +} + +extern "C" UInt32_BOOL __stdcall ResetEvent(HANDLE); +inline UInt32_BOOL PalResetEvent(HANDLE arg1) +{ + return ResetEvent(arg1); +} + +extern "C" UInt32_BOOL __stdcall SetEvent(HANDLE); +inline UInt32_BOOL PalSetEvent(HANDLE arg1) +{ + return SetEvent(arg1); +} + +extern "C" UInt32_BOOL __stdcall SetFilePointerEx(HANDLE, LARGE_INTEGER, LARGE_INTEGER *, UInt32); +inline UInt32_BOOL PalSetFilePointerEx(HANDLE arg1, LARGE_INTEGER arg2, LARGE_INTEGER * arg3, UInt32 arg4) +{ + return SetFilePointerEx(arg1, arg2, arg3, arg4); +} + +extern "C" void __stdcall TerminateProcess(HANDLE, UInt32); +inline void PalTerminateProcess(HANDLE arg1, UInt32 arg2) +{ + TerminateProcess(arg1, arg2); +} + +extern "C" UInt32 __stdcall WaitForMultipleObjectsEx(UInt32, HANDLE *, UInt32_BOOL, UInt32, UInt32_BOOL); +inline UInt32 PalWaitForMultipleObjectsEx(UInt32 arg1, HANDLE * arg2, UInt32_BOOL arg3, UInt32 arg4, UInt32_BOOL arg5) +{ + return WaitForMultipleObjectsEx(arg1, arg2, arg3, arg4, arg5); +} + +extern "C" UInt32 __stdcall WaitForSingleObjectEx(HANDLE, UInt32, UInt32_BOOL); +inline UInt32 PalWaitForSingleObjectEx(HANDLE arg1, UInt32 arg2, UInt32_BOOL arg3) +{ + return WaitForSingleObjectEx(arg1, arg2, arg3); +} + +extern "C" UInt32_BOOL __stdcall WriteFile(HANDLE, const void *, UInt32, UInt32 *, LPOVERLAPPED); +inline UInt32_BOOL PalWriteFile(HANDLE arg1, const void * arg2, UInt32 arg3, UInt32 * arg4, LPOVERLAPPED arg5) +{ + return WriteFile(arg1, arg2, arg3, arg4, arg5); +} + diff --git a/src/Native/Runtime/Profiling.cpp b/src/Native/Runtime/Profiling.cpp index be8e23ebf1e..cc58fa464db 100644 --- a/src/Native/Runtime/Profiling.cpp +++ b/src/Native/Runtime/Profiling.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/RHCodeMan.cpp b/src/Native/Runtime/RHCodeMan.cpp index ced11157717..600a84b5f69 100644 --- a/src/Native/Runtime/RHCodeMan.cpp +++ b/src/Native/Runtime/RHCodeMan.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/RWLock.cpp b/src/Native/Runtime/RWLock.cpp index 8c8a47134a7..8b7de632fff 100644 --- a/src/Native/Runtime/RWLock.cpp +++ b/src/Native/Runtime/RWLock.cpp @@ -6,7 +6,7 @@ // // RWLock.cpp -- adapted from CLR SimpleRWLock.cpp // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/RuntimeInstance.cpp b/src/Native/Runtime/RuntimeInstance.cpp index 65bd659c4c7..4a419b0f21c 100644 --- a/src/Native/Runtime/RuntimeInstance.cpp +++ b/src/Native/Runtime/RuntimeInstance.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/SectionMethodList.cpp b/src/Native/Runtime/SectionMethodList.cpp index 3b3268e5e98..086fc67cd8a 100644 --- a/src/Native/Runtime/SectionMethodList.cpp +++ b/src/Native/Runtime/SectionMethodList.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/StackFrameIterator.cpp b/src/Native/Runtime/StackFrameIterator.cpp index 49df0cd5b10..40f35f7e8f1 100644 --- a/src/Native/Runtime/StackFrameIterator.cpp +++ b/src/Native/Runtime/StackFrameIterator.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/TargetPtrs.h b/src/Native/Runtime/TargetPtrs.h index 467934e882b..e20d1f97c48 100644 --- a/src/Native/Runtime/TargetPtrs.h +++ b/src/Native/Runtime/TargetPtrs.h @@ -113,9 +113,10 @@ typedef UInt32 UIntTarget; #error unexpected target architecture #endif - +#ifndef GCENV_INCLUDED typedef PTR_UInt8 TgtPTR_UInt8; typedef PTR_UInt32 TgtPTR_UInt32; +#endif // GCENV_INCLUDED typedef void * TgtPTR_Void; typedef PTR_EEType TgtPTR_EEType; typedef PTR_GenericInstanceDesc TgtPTR_GenericInstanceDesc; diff --git a/src/Native/Runtime/assert.cpp b/src/Native/Runtime/assert.cpp index 33c627ce917..0e18eaf43de 100644 --- a/src/Native/Runtime/assert.cpp +++ b/src/Native/Runtime/assert.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #include "commontypes.h" #include "commonmacros.h" #include "palredhawkcommon.h" diff --git a/src/Native/Runtime/banned.h b/src/Native/Runtime/banned.h new file mode 100644 index 00000000000..c552cfdde67 --- /dev/null +++ b/src/Native/Runtime/banned.h @@ -0,0 +1,6 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. +// + +// placeholder file \ No newline at end of file diff --git a/src/Native/Runtime/daccess.h b/src/Native/Runtime/daccess.h index bcc8e15d17c..d13a781a7dd 100644 --- a/src/Native/Runtime/daccess.h +++ b/src/Native/Runtime/daccess.h @@ -2171,7 +2171,9 @@ typedef void** PTR_PTR_VOID; #define S16PTR(type) type* #define S16PTRMAX(type, maxChars) type* +#ifndef GCENV_INCLUDED #define PTR_TO_TADDR(ptr) (reinterpret_cast(ptr)) +#endif // GCENV_INCLUDED #define GFN_TADDR(name) (reinterpret_cast(&(name))) #define GVAL_ADDR(g) (&(g)) @@ -2310,6 +2312,7 @@ inline Tgt dac_cast(Src src) // //---------------------------------------------------------------------------- +#ifndef GCENV_INCLUDED #define SPTR_DECL(type, var) _SPTR_DECL(type*, PTR_##type, var) #define SPTR_IMPL(type, cls, var) _SPTR_IMPL(type*, PTR_##type, cls, var) #define SPTR_IMPL_INIT(type, cls, var, init) _SPTR_IMPL_INIT(type*, PTR_##type, cls, var, init) @@ -2318,7 +2321,7 @@ inline Tgt dac_cast(Src src) #define GPTR_DECL(type, var) _GPTR_DECL(type*, PTR_##type, var) #define GPTR_IMPL(type, var) _GPTR_IMPL(type*, PTR_##type, var) #define GPTR_IMPL_INIT(type, var, init) _GPTR_IMPL_INIT(type*, PTR_##type, var, init) - +#endif // GCENV_INCLUDED // If you want to marshal a single instance of an ArrayDPtr over to the host and // return a pointer to it, you can use this function. However, this is unsafe because diff --git a/src/Native/Runtime/eetype.cpp b/src/Native/Runtime/eetype.cpp index 6259a7518de..9d763516a78 100644 --- a/src/Native/Runtime/eetype.cpp +++ b/src/Native/Runtime/eetype.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/gcdump.cpp b/src/Native/Runtime/gcdump.cpp index b374d1587ec..67253c1e899 100644 --- a/src/Native/Runtime/gcdump.cpp +++ b/src/Native/Runtime/gcdump.cpp @@ -11,9 +11,9 @@ * or may be persisted by a managed native code compiler conforming * to the standard code-manager spec. */ -#include "common.h" +#include "rhcommon.h" -#if defined(_DEBUG) || defined(DACCESS_COMPILE) +#if (defined(_DEBUG) || defined(DACCESS_COMPILE)) && !defined(USE_PORTABLE_HELPERS) #include "gcrhenv.h" // @TODO: move off of gcrhenv.h #include "gcinfo.h" diff --git a/src/Native/Runtime/module.cpp b/src/Native/Runtime/module.cpp index 7d9a3976a57..8046f67c68c 100644 --- a/src/Native/Runtime/module.cpp +++ b/src/Native/Runtime/module.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/portable.cpp b/src/Native/Runtime/portable.cpp index 26e4239ef3e..eb3c69b9f09 100644 --- a/src/Native/Runtime/portable.cpp +++ b/src/Native/Runtime/portable.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #include "commontypes.h" #include "daccess.h" diff --git a/src/Native/Runtime/common.h b/src/Native/Runtime/rhcommon.h similarity index 100% rename from src/Native/Runtime/common.h rename to src/Native/Runtime/rhcommon.h diff --git a/src/Native/Runtime/startup.cpp b/src/Native/Runtime/startup.cpp index a5a71f7a550..87e8ecfd541 100644 --- a/src/Native/Runtime/startup.cpp +++ b/src/Native/Runtime/startup.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/Runtime/stressLog.cpp b/src/Native/Runtime/stressLog.cpp index 7bb96a8a8e4..31eb1594bce 100644 --- a/src/Native/Runtime/stressLog.cpp +++ b/src/Native/Runtime/stressLog.cpp @@ -8,7 +8,7 @@ // StressLog infrastructure // --------------------------------------------------------------------------- -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #include "sospriv.h" diff --git a/src/Native/Runtime/thread.cpp b/src/Native/Runtime/thread.cpp index 3e82770241c..acf771f9c9a 100644 --- a/src/Native/Runtime/thread.cpp +++ b/src/Native/Runtime/thread.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE @@ -266,8 +266,10 @@ PTR_ExInfo Thread::GetCurExInfo() void Thread::Construct() { +#ifndef USE_PORTABLE_HELPERS C_ASSERT(OFFSETOF__Thread__m_pTransitionFrame == (offsetof(Thread, m_pTransitionFrame))); +#endif // USE_PORTABLE_HELPERS m_numDynamicTypesTlsCells = 0; m_pDynamicTypesTlsCells = NULL; diff --git a/src/Native/Runtime/threadstore.cpp b/src/Native/Runtime/threadstore.cpp index 17ae10d03a9..6f7a77f92ea 100644 --- a/src/Native/Runtime/threadstore.cpp +++ b/src/Native/Runtime/threadstore.cpp @@ -2,7 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // -#include "common.h" +#include "rhcommon.h" #ifdef DACCESS_COMPILE #include "gcrhenv.h" #endif // DACCESS_COMPILE diff --git a/src/Native/gc/env/gcenv.h b/src/Native/gc/env/gcenv.h index c5ed07efce6..1aa4bd2e593 100644 --- a/src/Native/gc/env/gcenv.h +++ b/src/Native/gc/env/gcenv.h @@ -10,6 +10,8 @@ #define FEATURE_REDHAWK 1 #define FEATURE_CONSERVATIVE_GC 1 +#define GCENV_INCLUDED + #ifndef _MSC_VER #define __stdcall #define __forceinline inline @@ -508,151 +510,10 @@ void FastInterlockAnd(uint32_t volatile *p, uint32_t msk); #define CALLER_LIMITS_SPINNING 0 bool __SwitchToThread (uint32_t dwSleepMSec, uint32_t dwSwitchCount); -//------------------------------------------------------------------------------------------------- -// -// Low-level types describing GC object layouts. -// - -// Bits stolen from the sync block index that the GC/HandleTable knows about (currently these are at the same -// positions as the mainline runtime but we can change this below when it becomes apparent how Redhawk will -// handle sync blocks). -#define BIT_SBLK_GC_RESERVE 0x20000000 -#define BIT_SBLK_FINALIZER_RUN 0x40000000 - -// The sync block index header (small structure that immediately precedes every object in the GC heap). Only -// the GC uses this so far, and only to store a couple of bits of information. -class ObjHeader -{ -private: -#if defined(_WIN64) - uint32_t m_uAlignpad; -#endif // _WIN64 - uint32_t m_uSyncBlockValue; - -public: - uint32_t GetBits() { return m_uSyncBlockValue; } - void SetBit(uint32_t uBit) { FastInterlockOr(&m_uSyncBlockValue, uBit); } - void ClrBit(uint32_t uBit) { FastInterlockAnd(&m_uSyncBlockValue, ~uBit); } - void SetGCBit() { m_uSyncBlockValue |= BIT_SBLK_GC_RESERVE; } - void ClrGCBit() { m_uSyncBlockValue &= ~BIT_SBLK_GC_RESERVE; } -}; - -#define MTFlag_ContainsPointers 1 -#define MTFlag_HasFinalizer 2 -#define MTFlag_IsArray 4 - -class MethodTable -{ -public: - uint16_t m_componentSize; - uint16_t m_flags; - uint32_t m_baseSize; - - MethodTable * m_pRelatedType; - -public: - void InitializeFreeObject() - { - m_baseSize = 3 * sizeof(void *); - m_componentSize = 1; - m_flags = 0; - } - - uint32_t GetBaseSize() - { - return m_baseSize; - } - - uint16_t RawGetComponentSize() - { - return m_componentSize; - } - - bool ContainsPointers() - { - return (m_flags & MTFlag_ContainsPointers) != 0; - } - - bool ContainsPointersOrCollectible() - { - return ContainsPointers(); - } - - bool HasComponentSize() - { - return m_componentSize != 0; - } - - bool HasFinalizer() - { - return (m_flags & MTFlag_HasFinalizer) != 0; - } - - bool HasCriticalFinalizer() - { - return false; - } - - bool IsArray() - { - return (m_flags & MTFlag_IsArray) != 0; - } - - MethodTable * GetParent() - { - _ASSERTE(!IsArray()); - return m_pRelatedType; - } - - bool SanityCheck() - { - return true; - } -}; -#define EEType MethodTable - -class Object -{ - MethodTable * m_pMethTab; - -public: - ObjHeader * GetHeader() - { - return ((ObjHeader *)this) - 1; - } - - MethodTable * RawGetMethodTable() const - { - return m_pMethTab; - } - - void RawSetMethodTable(MethodTable * pMT) - { - m_pMethTab = pMT; - } - - void SetMethodTable(MethodTable * pMT) - { - m_pMethTab = pMT; - } -}; -#define MIN_OBJECT_SIZE (2*sizeof(BYTE*) + sizeof(ObjHeader)) - -class ArrayBase : public Object -{ - DWORD m_dwLength; - -public: - DWORD GetNumComponents() - { - return m_dwLength; - } - - static SIZE_T GetOffsetOfNumComponents() - { - return offsetof(ArrayBase, m_dwLength); - } -}; +class ObjHeader; +class MethodTable; +class Object; +class ArrayBase; // Various types used to refer to object references or handles. This will get more complex if we decide // Redhawk wants to wrap object references in the debug build. @@ -1369,3 +1230,54 @@ class SystemDomain AppDomain *DefaultDomain() { return NULL; } DWORD GetTotalNumSizedRefHandles() { return 0; } }; + +#ifdef STRESS_HEAP +namespace GCStressPolicy +{ + static volatile int32_t s_cGcStressDisables; + + inline bool IsEnabled() { return s_cGcStressDisables == 0; } + inline void GlobalDisable() { FastInterlockIncrement(&s_cGcStressDisables); } + inline void GlobalEnable() { FastInterlockDecrement(&s_cGcStressDisables); } +} + +enum gcs_trigger_points +{ + cfg_any, +}; + +template +class GCStress +{ +public: + static inline bool IsEnabled() + { + return g_pConfig->GetGCStressLevel() != 0; + } +}; +#endif // STRESS_HEAP + +#ifdef VERIFY_HEAP +class SyncBlockCache; + +extern SyncBlockCache g_sSyncBlockCache; + +class SyncBlockCache +{ +public: + static SyncBlockCache *GetSyncBlockCache() { return &g_sSyncBlockCache; } + void GCWeakPtrScan(void *pCallback, LPARAM pCtx, int dummy) + { + UNREFERENCED_PARAMETER(pCallback); + UNREFERENCED_PARAMETER(pCtx); + UNREFERENCED_PARAMETER(dummy); + } + void GCDone(uint32_t demoting, int max_gen) + { + UNREFERENCED_PARAMETER(demoting); + UNREFERENCED_PARAMETER(max_gen); + } + void VerifySyncTableEntry() {} +}; + +#endif // VERIFY_HEAP diff --git a/src/Native/gc/env/gcenv.windows.cpp b/src/Native/gc/env/gcenv.windows.cpp index 9d05fcb2725..6d2cf6888ae 100644 --- a/src/Native/gc/env/gcenv.windows.cpp +++ b/src/Native/gc/env/gcenv.windows.cpp @@ -342,3 +342,4 @@ bool PalHasCapability(PalCapability capability) // TODO: Implement for background GC return false; } + diff --git a/src/Native/gc/gc.cpp b/src/Native/gc/gc.cpp index 6e74047d349..f66ea47812d 100644 --- a/src/Native/gc/gc.cpp +++ b/src/Native/gc/gc.cpp @@ -34432,9 +34432,9 @@ void gc_heap::do_pre_gc() { #ifdef BACKGROUND_GC full_gc_counts[gc_type_background]++; -#ifdef STRESS_HEAP +#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK) GCHeap::gc_stress_fgcs_in_bgc = 0; -#endif // STRESS_HEAP +#endif // STRESS_HEAP && !FEATURE_REDHAWK #endif // BACKGROUND_GC } else diff --git a/src/Native/gc/gc.h b/src/Native/gc/gc.h index 9eefb512a17..8d178931c67 100644 --- a/src/Native/gc/gc.h +++ b/src/Native/gc/gc.h @@ -597,12 +597,7 @@ class GCHeap { // static if since restricting for all heaps is fine virtual size_t GetValidSegmentSize(BOOL large_seg = FALSE) = 0; - - static BOOL IsLargeObject(MethodTable *mt) { - WRAPPER_NO_CONTRACT; - - return mt->GetBaseSize() >= LARGE_OBJECT_SIZE; - } + static BOOL IsLargeObject(MethodTable *mt); static unsigned GetMaxGeneration() { LIMITED_METHOD_DAC_CONTRACT; diff --git a/src/Native/gc/gceewks.cpp b/src/Native/gc/gceewks.cpp index 0d765cdc7a8..69efe972580 100644 --- a/src/Native/gc/gceewks.cpp +++ b/src/Native/gc/gceewks.cpp @@ -8,6 +8,7 @@ #include "common.h" #include "gcenv.h" +#include "gcobject.h" #include "gc.h" #include "gcscan.h" diff --git a/src/Native/gc/gcimpl.h b/src/Native/gc/gcimpl.h index 86de9cac1a1..68ef306b5f8 100644 --- a/src/Native/gc/gcimpl.h +++ b/src/Native/gc/gcimpl.h @@ -160,7 +160,7 @@ class GCHeap : public ::GCHeap DWORD flags=0); // Find the relocation address for an object - PER_HEAP_ISOLATED void Relocate (Object** object, + PER_HEAP_ISOLATED void Relocate (Object** object, ScanContext* sc, DWORD flags=0); @@ -226,7 +226,7 @@ class GCHeap : public ::GCHeap BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers); BOOL ShouldRestartFinalizerWatchDog(); - void SetCardsAfterBulkCopy( Object**, size_t); + void SetCardsAfterBulkCopy( Object**, size_t); #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) void WalkObject (Object* obj, walk_fn fn, void* context); #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) diff --git a/src/Native/gc/gcobject.h b/src/Native/gc/gcobject.h new file mode 100644 index 00000000000..327f5e4910a --- /dev/null +++ b/src/Native/gc/gcobject.h @@ -0,0 +1,149 @@ +// +// Copyright (c) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. +// + +//------------------------------------------------------------------------------------------------- +// +// Low-level types describing GC object layouts. +// + +// Bits stolen from the sync block index that the GC/HandleTable knows about (currently these are at the same +// positions as the mainline runtime but we can change this below when it becomes apparent how Redhawk will +// handle sync blocks). +#define BIT_SBLK_GC_RESERVE 0x20000000 +#define BIT_SBLK_FINALIZER_RUN 0x40000000 + +// The sync block index header (small structure that immediately precedes every object in the GC heap). Only +// the GC uses this so far, and only to store a couple of bits of information. +class ObjHeader +{ +private: +#if defined(_WIN64) + uint32_t m_uAlignpad; +#endif // _WIN64 + uint32_t m_uSyncBlockValue; + +public: + uint32_t GetBits() { return m_uSyncBlockValue; } + void SetBit(uint32_t uBit) { FastInterlockOr(&m_uSyncBlockValue, uBit); } + void ClrBit(uint32_t uBit) { FastInterlockAnd(&m_uSyncBlockValue, ~uBit); } + void SetGCBit() { m_uSyncBlockValue |= BIT_SBLK_GC_RESERVE; } + void ClrGCBit() { m_uSyncBlockValue &= ~BIT_SBLK_GC_RESERVE; } +}; + +#define MTFlag_ContainsPointers 1 +#define MTFlag_HasFinalizer 2 +#define MTFlag_IsArray 4 + +class MethodTable +{ +public: + uint16_t m_componentSize; + uint16_t m_flags; + uint32_t m_baseSize; + + MethodTable * m_pRelatedType; + +public: + void InitializeFreeObject() + { + m_baseSize = 3 * sizeof(void *); + m_componentSize = 1; + m_flags = 0; + } + + uint32_t GetBaseSize() + { + return m_baseSize; + } + + uint16_t RawGetComponentSize() + { + return m_componentSize; + } + + bool ContainsPointers() + { + return (m_flags & MTFlag_ContainsPointers) != 0; + } + + bool ContainsPointersOrCollectible() + { + return ContainsPointers(); + } + + bool HasComponentSize() + { + return m_componentSize != 0; + } + + bool HasFinalizer() + { + return (m_flags & MTFlag_HasFinalizer) != 0; + } + + bool HasCriticalFinalizer() + { + return false; + } + + bool IsArray() + { + return (m_flags & MTFlag_IsArray) != 0; + } + + MethodTable * GetParent() + { + _ASSERTE(!IsArray()); + return m_pRelatedType; + } + + bool SanityCheck() + { + return true; + } +}; + +class Object +{ + MethodTable * m_pMethTab; + +public: + ObjHeader * GetHeader() + { + return ((ObjHeader *)this) - 1; + } + + MethodTable * RawGetMethodTable() const + { + return m_pMethTab; + } + + void RawSetMethodTable(MethodTable * pMT) + { + m_pMethTab = pMT; + } + + void SetMethodTable(MethodTable * pMT) + { + m_pMethTab = pMT; + } +}; +#define MIN_OBJECT_SIZE (2*sizeof(BYTE*) + sizeof(ObjHeader)) + +class ArrayBase : public Object +{ + DWORD m_dwLength; + +public: + DWORD GetNumComponents() + { + return m_dwLength; + } + + static SIZE_T GetOffsetOfNumComponents() + { + return offsetof(ArrayBase, m_dwLength); + } +}; diff --git a/src/Native/gc/gcwks.cpp b/src/Native/gc/gcwks.cpp index eb1d3140f70..b8921a58ea1 100644 --- a/src/Native/gc/gcwks.cpp +++ b/src/Native/gc/gcwks.cpp @@ -8,6 +8,7 @@ #include "common.h" #include "gcenv.h" +#include "gcobject.h" #include "gc.h" #include "gcscan.h" diff --git a/src/Native/gc/handletable.cpp b/src/Native/gc/handletable.cpp index c3d566f8fad..cb233dfa89d 100644 --- a/src/Native/gc/handletable.cpp +++ b/src/Native/gc/handletable.cpp @@ -669,6 +669,30 @@ void HndSetHandleExtraInfo(OBJECTHANDLE handle, UINT uType, LPARAM lExtraInfo) *pUserData = lExtraInfo; } } + +/* +* HndCompareExchangeHandleExtraInfo +* +* Stores owner data with handle. +* +*/ +LPARAM HndCompareExchangeHandleExtraInfo(OBJECTHANDLE handle, UINT uType, LPARAM lOldExtraInfo, LPARAM lNewExtraInfo) +{ + WRAPPER_NO_CONTRACT; + + // fetch the user data slot for this handle if we have the right type + LPARAM *pUserData = HandleValidateAndFetchUserDataPointer(handle, uType); + + // is there a slot? + if (pUserData) + { + // yes - attempt to store the info + return (LPARAM)_FastInterlockCompareExchangePointer((PVOID*)pUserData, (PVOID)lNewExtraInfo, (PVOID)lOldExtraInfo); + } + + _ASSERTE(!"Shouldn't be trying to call HndCompareExchangeHandleExtraInfo on handle types without extra info"); + return NULL; +} #endif // !DACCESS_COMPILE /* diff --git a/src/Native/gc/handletable.h b/src/Native/gc/handletable.h index 10ca9468fa3..41ff462935a 100644 --- a/src/Native/gc/handletable.h +++ b/src/Native/gc/handletable.h @@ -84,6 +84,7 @@ void HndDestroyHandles(HHANDLETABLE hTable, UINT uType, const OBJECTH * owner data associated with handles */ void HndSetHandleExtraInfo(OBJECTHANDLE handle, UINT uType, LPARAM lExtraInfo); +LPARAM HndCompareExchangeHandleExtraInfo(OBJECTHANDLE handle, UINT uType, LPARAM lOldExtraInfo, LPARAM lNewExtraInfo); #endif // !DACCESS_COMPILE LPARAM HndGetHandleExtraInfo(OBJECTHANDLE handle); diff --git a/src/Native/gc/objecthandle.cpp b/src/Native/gc/objecthandle.cpp index 8e07e9e20f2..1969452e3a1 100644 --- a/src/Native/gc/objecthandle.cpp +++ b/src/Native/gc/objecthandle.cpp @@ -17,6 +17,12 @@ #include "gc.h" #include "gcscan.h" +#ifdef FEATURE_REDHAWK +#include "commontypes.h" +#include "commonmacros.h" +#include "restrictedcallouts.h" +#endif // FEATURE_REDHAWK + #include "objecthandle.h" #include "handletablepriv.h" @@ -46,7 +52,7 @@ struct VARSCANINFO { LPARAM lEnableMask; // mask of types to trace HANDLESCANPROC pfnTrace; // tracing function to use - LPARAM lp2; // second parameter + LPARAM lp2; // second parameter }; @@ -73,7 +79,7 @@ void CALLBACK VariableTraceDispatcher(_UNCHECKED_OBJECTREF *pObjRef, LPARAM *pEx } } -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) /* * Scan callback for tracing ref-counted handles. * @@ -97,11 +103,15 @@ void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, LPARAM *pExtraInf if (!HndIsNullOrDestroyedHandle(pObj) && !GCHeap::GetGCHeap()->IsPromoted(pObj)) { +#ifdef FEATURE_REDHAWK + BOOL fIsActive = RestrictedCallouts::InvokeRefCountedHandleCallbacks(pObj); +#else // FEATURE_REDHAWK //@todo optimize the access to the ref-count ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObj); _ASSERTE(pWrap != NULL); BOOL fIsActive = pWrap->IsWrapperActive(); +#endif // FEATURE_REDHAWK if (fIsActive) { _ASSERTE(lp2); @@ -113,7 +123,7 @@ void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, LPARAM *pExtraInf // Assert this object wasn't relocated since we are passing a temporary object's address. _ASSERTE(pOldObj == pObj); } -#endif // FEATURE_COMINTEROP +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, LPARAM *pExtraInfo, LPARAM lp1, LPARAM lp2) { @@ -446,14 +456,28 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, LPARAM break; case HNDTYPE_VARIABLE: -#if 0 // this feature appears to be unused for now - rootFlags |= COR_PRF_GC_ROOT_VARIABLE; +#ifdef FEATURE_REDHAWK + { + // Set the appropriate ETW flags for the current strength of this variable handle + UINT nVarHandleType = GetVariableHandleType(handle); + if (((nVarHandleType & VHT_WEAK_SHORT) != 0) || + ((nVarHandleType & VHT_WEAK_LONG) != 0)) + { + rootFlags |= kEtwGCRootFlagsWeakRef; + } + if ((nVarHandleType & VHT_PINNED) != 0) + { + rootFlags |= kEtwGCRootFlagsPinning; + } + + // No special ETW flag for strong handles (VHT_STRONG) + } #else _ASSERTE(!"Variable handle encountered"); #endif break; -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) case HNDTYPE_REFCOUNTED: rootFlags |= kEtwGCRootFlagsRefCounted; if (*pRef != NULL) @@ -463,7 +487,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, LPARAM rootFlags |= kEtwGCRootFlagsWeakRef; } break; -#endif // FEATURE_COMINTEROP +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK } _UNCHECKED_OBJECTREF pSec = NULL; @@ -948,6 +972,17 @@ OBJECTHANDLE CreateVariableHandle(HHANDLETABLE hTable, OBJECTREF object, UINT ty return HndCreateHandle(hTable, HNDTYPE_VARIABLE, object, (LPARAM)type); } +/* +* GetVariableHandleType. +* +* Retrieves the dynamic type of a variable-strength handle. +*/ +UINT GetVariableHandleType(OBJECTHANDLE handle) +{ + WRAPPER_NO_CONTRACT; + + return (UINT)HndGetHandleExtraInfo(handle); +} /* * UpdateVariableHandleType. @@ -981,6 +1016,23 @@ void UpdateVariableHandleType(OBJECTHANDLE handle, UINT type) HndSetHandleExtraInfo(handle, HNDTYPE_VARIABLE, (LPARAM)type); } +/* +* CompareExchangeVariableHandleType. +* +* Changes the dynamic type of a variable-strength handle. Unlike UpdateVariableHandleType we assume that the +* types have already been validated. +*/ +UINT CompareExchangeVariableHandleType(OBJECTHANDLE handle, UINT oldType, UINT newType) +{ + WRAPPER_NO_CONTRACT; + + // verify that we are being asked to get/set valid types + _ASSERTE(IS_VALID_VHT_VALUE(oldType) && IS_VALID_VHT_VALUE(newType)); + + // attempt to store the type in the handle's extra info + return (UINT)HndCompareExchangeHandleExtraInfo(handle, HNDTYPE_VARIABLE, (LPARAM)oldType, (LPARAM)newType); +} + /* * TraceVariableHandles. @@ -1122,7 +1174,7 @@ void Ref_TraceNormalRoots(UINT condemned, UINT maxgen, ScanContext* sc, Ref_prom // promote objects pointed to by variable handles whose dynamic type is VHT_STRONG TraceVariableHandles(PromoteObject, LPARAM(sc), LPARAM(fn), VHT_STRONG, condemned, maxgen, flags); -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) // don't scan ref-counted handles during concurrent phase as the clean-up of CCWs can race with AD unload and cause AV's if (!sc->concurrent) { @@ -1141,7 +1193,7 @@ void Ref_TraceNormalRoots(UINT condemned, UINT maxgen, ScanContext* sc, Ref_prom walk = walk->pNext; } } -#endif // FEATURE_COMINTEROP +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK } #ifdef FEATURE_COMINTEROP @@ -1184,9 +1236,9 @@ void Ref_CheckReachable(UINT condemned, UINT maxgen, LPARAM lp1) UINT types[] = { HNDTYPE_WEAK_LONG, -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) HNDTYPE_REFCOUNTED, -#endif // FEATURE_COMINTEROP +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK }; // check objects pointed to by short weak handles @@ -1534,8 +1586,10 @@ void Ref_UpdatePointers(UINT condemned, UINT maxgen, ScanContext* sc, Ref_promot HNDTYPE_WEAK_SHORT, HNDTYPE_WEAK_LONG, HNDTYPE_STRONG, -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) HNDTYPE_REFCOUNTED, +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK +#ifdef FEATURE_COMINTEROP HNDTYPE_WEAK_WINRT, #endif // FEATURE_COMINTEROP HNDTYPE_SIZEDREF, @@ -1547,12 +1601,12 @@ void Ref_UpdatePointers(UINT condemned, UINT maxgen, ScanContext* sc, Ref_promot HandleTableMap *walk = &g_HandleTableMap; while (walk) { for (UINT i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) - if (walk->pBuckets[i] != NULL) - { - HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)]; - if (hTable) - HndScanHandlesForGC(hTable, UpdatePointer, LPARAM(sc), LPARAM(fn), types, _countof(types), condemned, maxgen, flags); - } + if (walk->pBuckets[i] != NULL) + { + HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)]; + if (hTable) + HndScanHandlesForGC(hTable, UpdatePointer, LPARAM(sc), LPARAM(fn), types, _countof(types), condemned, maxgen, flags); + } walk = walk->pNext; } @@ -1578,10 +1632,12 @@ void Ref_ScanPointersForProfilerAndETW(UINT maxgen, LPARAM lp1) HNDTYPE_WEAK_SHORT, HNDTYPE_WEAK_LONG, HNDTYPE_STRONG, -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) HNDTYPE_REFCOUNTED, +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK +#ifdef FEATURE_COMINTEROP HNDTYPE_WEAK_WINRT, -#endif // FEATURE_COMINTEROP, +#endif // FEATURE_COMINTEROP HNDTYPE_PINNED, // HNDTYPE_VARIABLE, HNDTYPE_ASYNCPINNED, @@ -1671,8 +1727,10 @@ void Ref_AgeHandles(UINT condemned, UINT maxgen, LPARAM lp1) HNDTYPE_PINNED, HNDTYPE_VARIABLE, -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) HNDTYPE_REFCOUNTED, +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK +#ifdef FEATURE_COMINTEROP HNDTYPE_WEAK_WINRT, #endif // FEATURE_COMINTEROP HNDTYPE_ASYNCPINNED, @@ -1712,8 +1770,10 @@ void Ref_RejuvenateHandles(UINT condemned, UINT maxgen, LPARAM lp1) HNDTYPE_PINNED, HNDTYPE_VARIABLE, -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) HNDTYPE_REFCOUNTED, +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK +#ifdef FEATURE_COMINTEROP HNDTYPE_WEAK_WINRT, #endif // FEATURE_COMINTEROP HNDTYPE_ASYNCPINNED, @@ -1752,8 +1812,10 @@ void Ref_VerifyHandleTable(UINT condemned, UINT maxgen, ScanContext* sc) HNDTYPE_PINNED, HNDTYPE_VARIABLE, -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) HNDTYPE_REFCOUNTED, +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK +#ifdef FEATURE_COMINTEROP HNDTYPE_WEAK_WINRT, #endif // FEATURE_COMINTEROP HNDTYPE_ASYNCPINNED, @@ -1781,8 +1843,8 @@ int GetCurrentThreadHomeHeapNumber() { WRAPPER_NO_CONTRACT; - if (!GCHeap::IsGCHeapInitialized()) - return 0; + if (!GCHeap::IsGCHeapInitialized()) + return 0; return GCHeap::GetGCHeap()->GetHomeHeapNumber(); } diff --git a/src/Native/gc/objecthandle.h b/src/Native/gc/objecthandle.h index 6371c7ae725..5267d7bc92a 100644 --- a/src/Native/gc/objecthandle.h +++ b/src/Native/gc/objecthandle.h @@ -119,7 +119,7 @@ */ #define HNDTYPE_VARIABLE (4) -#ifdef FEATURE_COMINTEROP +#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) /* * REFCOUNTED HANDLES * @@ -131,7 +131,7 @@ * */ #define HNDTYPE_REFCOUNTED (5) -#endif // FEATURE_COMINTEROP +#endif // FEATURE_COMINTEROP || FEATURE_REDHAWK /* @@ -149,7 +149,7 @@ * * */ -#define HNDTYPE_DEPENDENT (6) +#define HNDTYPE_DEPENDENT (6) /* * PINNED HANDLES for asynchronous operation @@ -439,7 +439,9 @@ inline void DestroyDependentHandle(OBJECTHANDLE handle) #ifndef DACCESS_COMPILE OBJECTHANDLE CreateVariableHandle(HHANDLETABLE hTable, OBJECTREF object, UINT type); +UINT GetVariableHandleType(OBJECTHANDLE handle); void UpdateVariableHandleType(OBJECTHANDLE handle, UINT type); +UINT CompareExchangeVariableHandleType(OBJECTHANDLE handle, UINT oldType, UINT newType); inline void DestroyVariableHandle(OBJECTHANDLE handle) { From 015b60a6ae813714a6dc576c36df4637880ab897 Mon Sep 17 00:00:00 2001 From: Scott Mosier Date: Wed, 14 Oct 2015 09:59:44 -0700 Subject: [PATCH 2/2] Additional GC-to-EE hookup. Provide specialized gcenv.h for CoreRT. Some refactoring of related gcenv stuff. Wire up finalizer code. Add runtime-specific scanning code. --- src/Native/Runtime/Crst.cpp | 5 + src/Native/Runtime/Crst.h | 80 +- src/Native/Runtime/EHHelpers.cpp | 1 + src/Native/Runtime/FinalizerHelpers.cpp | 4 - src/Native/Runtime/GCHelpers.cpp | 14 +- src/Native/Runtime/HandleTableHelpers.cpp | 2 - src/Native/Runtime/InstanceStore.cpp | 1 + src/Native/Runtime/ObjectLayout.h | 23 + src/Native/Runtime/PalRedhawk.h | 12 + src/Native/Runtime/PalRedhawkCommon.cpp | 4 +- src/Native/Runtime/PalRedhawkCommon.h | 2 + src/Native/Runtime/PalRedhawkFunctions.h | 38 +- src/Native/Runtime/RWLock.cpp | 4 - src/Native/Runtime/RestrictedCallouts.cpp | 1 + src/Native/Runtime/RhConfig.cpp | 1 + src/Native/Runtime/SpinLock.h | 2 - src/Native/Runtime/SyncClean.cpp | 1 + src/Native/Runtime/allocheap.cpp | 1 + src/Native/Runtime/dllmain.cpp | 1 + src/Native/Runtime/event.cpp | 3 +- src/Native/Runtime/event.h | 2 +- src/Native/Runtime/eventtrace.h | 9 + src/Native/Runtime/gcdump.cpp | 7 +- src/Native/Runtime/gcenv.h | 205 +++ src/Native/Runtime/gcrhenv.cpp | 1160 +++++++++++++++++ src/Native/Runtime/gcrhscan.cpp | 425 ++++++ src/Native/Runtime/portable.cpp | 1 + src/Native/Runtime/rhcommon.h | 4 +- src/Native/Runtime/threadstore.cpp | 3 - src/Native/gc/env/{gcenv.h => gcenv.base.h} | 261 +--- .../gc/{gcobject.h => env/gcenv.object.h} | 0 src/Native/gc/env/gcenv.sync.h | 158 +++ src/Native/gc/env/gcenv.windows.cpp | 194 ++- src/Native/gc/gc.cpp | 38 +- src/Native/gc/gc.h | 1 + src/Native/gc/gcee.cpp | 60 +- src/Native/gc/gceewks.cpp | 1 - src/Native/gc/gcimpl.h | 9 +- src/Native/gc/gcscan.cpp | 13 +- src/Native/gc/gcwks.cpp | 1 - src/Native/gc/objecthandle.cpp | 4 +- 41 files changed, 2313 insertions(+), 443 deletions(-) create mode 100644 src/Native/Runtime/gcenv.h create mode 100644 src/Native/Runtime/gcrhenv.cpp create mode 100644 src/Native/Runtime/gcrhscan.cpp rename src/Native/gc/env/{gcenv.h => gcenv.base.h} (86%) rename src/Native/gc/{gcobject.h => env/gcenv.object.h} (100%) create mode 100644 src/Native/gc/env/gcenv.sync.h diff --git a/src/Native/Runtime/Crst.cpp b/src/Native/Runtime/Crst.cpp index 220e9eea34f..e2af2652ef7 100644 --- a/src/Native/Runtime/Crst.cpp +++ b/src/Native/Runtime/Crst.cpp @@ -16,6 +16,11 @@ #include "crst.h" #endif // !DACCESS_COMPILE +bool EEThreadId::IsSameThread() +{ + return PalGetCurrentThreadId() == m_uiId; +} + void CrstStatic::Init(CrstType eType, CrstFlags eFlags) { #ifndef DACCESS_COMPILE diff --git a/src/Native/Runtime/Crst.h b/src/Native/Runtime/Crst.h index b8806673080..f1e7c100e25 100644 --- a/src/Native/Runtime/Crst.h +++ b/src/Native/Runtime/Crst.h @@ -8,13 +8,13 @@ class EEThreadId { public: - EEThreadId(UInt32 uiId) : m_uiId(uiId) {} + EEThreadId(uint32_t uiId) : m_uiId(uiId) {} #ifndef DACCESS_COMPILE - bool IsSameThread() { return PalGetCurrentThreadId() == m_uiId; } + bool IsSameThread(); #endif private: - UInt32 m_uiId; + uint32_t m_uiId; }; @@ -44,7 +44,11 @@ enum CrstType enum CrstFlags { - CRST_DEFAULT = 0x0, + CRST_DEFAULT = 0x0, + CRST_REENTRANCY = 0x0, + CRST_UNSAFE_SAMELEVEL = 0x0, + CRST_UNSAFE_ANYMODE = 0x0, + CRST_DEBUGGER_THREAD = 0x0, }; // Static version of Crst with no default constructor (user must call Init() before use). @@ -54,6 +58,8 @@ class CrstStatic void Init(CrstType eType, CrstFlags eFlags = CRST_DEFAULT); bool InitNoThrow(CrstType eType, CrstFlags eFlags = CRST_DEFAULT) { Init(eType, eFlags); return true; } void Destroy(); + void Enter() { CrstStatic::Enter(this); } + void Leave() { CrstStatic::Leave(this); } static void Enter(CrstStatic *pCrst); static void Leave(CrstStatic *pCrst); #if defined(_DEBUG) @@ -64,8 +70,8 @@ class CrstStatic private: CRITICAL_SECTION m_sCritSec; #if defined(_DEBUG) - UInt32 m_uiOwnerId; - static const UInt32 UNOWNED = 0; + uint32_t m_uiOwnerId; + static const uint32_t UNOWNED = 0; #endif // _DEBUG }; @@ -79,14 +85,62 @@ class Crst : public CrstStatic }; // Holder for a Crst instance. -class CrstHolder : public Holder +class CrstHolder { + CrstStatic * m_pLock; + public: - CrstHolder(CrstStatic *pCrst, bool fTake = true) : Holder(pCrst, fTake) {} - ~CrstHolder() {} + CrstHolder(CrstStatic * pLock) + : m_pLock(pLock) + { + m_pLock->Enter(); + } + + ~CrstHolder() + { + m_pLock->Leave(); + } }; -// The CLR has split the Crst holders into CrstHolder which only supports acquire on construction/release on -// destruction semantics and CrstHolderWithState, with the old, fully flexible semantics. We don't support the -// split yet so both types are equivalent. -typedef CrstHolder CrstHolderWithState; +class CrstHolderWithState +{ + CrstStatic * m_pLock; + bool m_fAcquired; + +public: + CrstHolderWithState(CrstStatic * pLock, bool fAcquire = true) + : m_pLock(pLock), m_fAcquired(fAcquire) + { + if (fAcquire) + m_pLock->Enter(); + } + + ~CrstHolderWithState() + { + if (m_fAcquired) + m_pLock->Leave(); + } + + void Acquire() + { + if (!m_fAcquired) + { + m_pLock->Enter(); + m_fAcquired = true; + } + } + + void Release() + { + if (m_fAcquired) + { + m_pLock->Leave(); + m_fAcquired = false; + } + } + + CrstStatic * GetValue() + { + return m_pLock; + } +}; diff --git a/src/Native/Runtime/EHHelpers.cpp b/src/Native/Runtime/EHHelpers.cpp index a377594d605..a77ee165f9b 100644 --- a/src/Native/Runtime/EHHelpers.cpp +++ b/src/Native/Runtime/EHHelpers.cpp @@ -3,6 +3,7 @@ // Licensed under the MIT license. See LICENSE file in the project root for full license information. // #ifndef DACCESS_COMPILE +#include "rhcommon.h" #include "commontypes.h" #include "daccess.h" #include "commonmacros.h" diff --git a/src/Native/Runtime/FinalizerHelpers.cpp b/src/Native/Runtime/FinalizerHelpers.cpp index c6ce37dd1a7..442a5a1c209 100644 --- a/src/Native/Runtime/FinalizerHelpers.cpp +++ b/src/Native/Runtime/FinalizerHelpers.cpp @@ -9,16 +9,12 @@ #include "common.h" #include "gcenv.h" #include "gc.h" -#include "commontypes.h" -#include "commonmacros.h" -#include "daccess.h" #include "slist.h" #include "gcrhinterface.h" #include "rwlock.h" #include "runtimeinstance.h" #include "module.h" -#include "objectlayout.h" // Block the current thread until at least one object needs to be finalized (returns true) or memory is low // (returns false and the finalizer thread should initiate a garbage collection). diff --git a/src/Native/Runtime/GCHelpers.cpp b/src/Native/Runtime/GCHelpers.cpp index 601c2c4c8ff..8977917fdc0 100644 --- a/src/Native/Runtime/GCHelpers.cpp +++ b/src/Native/Runtime/GCHelpers.cpp @@ -10,15 +10,17 @@ #include "common.h" #include "gcenv.h" #include "gc.h" -#include "commontypes.h" -#include "commonmacros.h" #include "restrictedcallouts.h" -#include "daccess.h" -#include "targetptrs.h" -#include "eetype.h" -#include "objectlayout.h" +#include "gcrhinterface.h" +#include "palredhawkcommon.h" +#include "slist.h" +#include "varint.h" +#include "regdisplay.h" +#include "stackframeiterator.h" + +#include "thread.h" COOP_PINVOKE_HELPER(void, RhSuppressFinalize, (OBJECTREF refObj)) { diff --git a/src/Native/Runtime/HandleTableHelpers.cpp b/src/Native/Runtime/HandleTableHelpers.cpp index d9e1e7ecb14..424b8ca6e50 100644 --- a/src/Native/Runtime/HandleTableHelpers.cpp +++ b/src/Native/Runtime/HandleTableHelpers.cpp @@ -13,8 +13,6 @@ #include "common.h" #include "gcenv.h" #include "objecthandle.h" -#include "commontypes.h" -#include "commonmacros.h" #include "restrictedcallouts.h" diff --git a/src/Native/Runtime/InstanceStore.cpp b/src/Native/Runtime/InstanceStore.cpp index 6b6e1a057c9..bc307e4ec54 100644 --- a/src/Native/Runtime/InstanceStore.cpp +++ b/src/Native/Runtime/InstanceStore.cpp @@ -2,6 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // +#include "rhcommon.h" #include "commontypes.h" #include "daccess.h" #include "commonmacros.h" diff --git a/src/Native/Runtime/ObjectLayout.h b/src/Native/Runtime/ObjectLayout.h index fd53e6ae275..1502abdfd0c 100644 --- a/src/Native/Runtime/ObjectLayout.h +++ b/src/Native/Runtime/ObjectLayout.h @@ -34,6 +34,10 @@ class ObjHeader //------------------------------------------------------------------------------------------------- static UIntNative const SYNC_BLOCK_SKEW = sizeof(void *); +class EEType; +typedef DPTR(class EEType) PTR_EEType; +class MethodTable; + //------------------------------------------------------------------------------------------------- class Object { @@ -53,6 +57,24 @@ class Object size_t GetSize(); #endif + + // + // Adapter methods for GC code so that GC and runtime code can use the same type. + // These methods are deprecated -- only use from existing GC code. + // + MethodTable * RawGetMethodTable() const + { + return (MethodTable*)m_pEEType; + } + void RawSetMethodTable(MethodTable * pMT) + { + m_pEEType = (EEType *)pMT; + } + void SetMethodTable(MethodTable * pMT) + { + m_pEEType = (EEType *)pMT; + } + ////// End adaptor methods }; typedef DPTR(Object) PTR_Object; typedef DPTR(PTR_Object) PTR_PTR_Object; @@ -66,6 +88,7 @@ static UIntNative const REFERENCE_SIZE = sizeof(Object *); //------------------------------------------------------------------------------------------------- class Array : public Object { + friend class ArrayBase; friend class AsmOffsets; UInt32 m_Length; diff --git a/src/Native/Runtime/PalRedhawk.h b/src/Native/Runtime/PalRedhawk.h index d58c99b2ea4..dfca3bad46f 100644 --- a/src/Native/Runtime/PalRedhawk.h +++ b/src/Native/Runtime/PalRedhawk.h @@ -106,6 +106,18 @@ struct SYSTEM_INFO UInt16 wProcessorRevision; }; +// defined in gcrhenv.cpp +bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount); + +// @TODO: also declared in gcenv.h +struct GCSystemInfo +{ + uint32_t dwNumberOfProcessors; + uint32_t dwPageSize; + uint32_t dwAllocationGranularity; +}; +extern GCSystemInfo g_SystemInfo; + struct OSVERSIONINFOEXW { UInt32 dwOSVersionInfoSize; diff --git a/src/Native/Runtime/PalRedhawkCommon.cpp b/src/Native/Runtime/PalRedhawkCommon.cpp index 4682cea6401..9488d1631e4 100644 --- a/src/Native/Runtime/PalRedhawkCommon.cpp +++ b/src/Native/Runtime/PalRedhawkCommon.cpp @@ -34,11 +34,9 @@ #ifdef APP_LOCAL_RUNTIME #ifdef _DEBUG -EXTERN_C { - HANDLE WINAPI GetStdHandle( +EXTERN_C WINBASEAPI HANDLE WINAPI GetStdHandle( _In_ DWORD nStdHandle ); -} #endif // _DEBUG #endif // APP_LOCAL_RUNTIME diff --git a/src/Native/Runtime/PalRedhawkCommon.h b/src/Native/Runtime/PalRedhawkCommon.h index 51e0c8d7230..02644b07438 100644 --- a/src/Native/Runtime/PalRedhawkCommon.h +++ b/src/Native/Runtime/PalRedhawkCommon.h @@ -15,6 +15,7 @@ #ifndef __PAL_REDHAWK_COMMON_INCLUDED #define __PAL_REDHAWK_COMMON_INCLUDED +#ifndef GCENV_INCLUDED // We define the notion of capabilities: optional functionality that the PAL may expose. Use // PalHasCapability() with the constants below to determine what is supported at runtime. enum PalCapability @@ -23,6 +24,7 @@ enum PalCapability LowMemoryNotificationCapability = 0x00000002, // CreateMemoryResourceNotification() and friends GetCurrentProcessorNumberCapability = 0x00000004, // GetCurrentProcessorNumber() }; +#endif // !GCENV_INCLUDED #define DECLSPEC_ALIGN(x) __declspec(align(x)) diff --git a/src/Native/Runtime/PalRedhawkFunctions.h b/src/Native/Runtime/PalRedhawkFunctions.h index e22baca0bfc..71c87bb9efe 100644 --- a/src/Native/Runtime/PalRedhawkFunctions.h +++ b/src/Native/Runtime/PalRedhawkFunctions.h @@ -111,12 +111,6 @@ inline UInt32 PalGetLastError() return GetLastError(); } -extern "C" void __stdcall GetNativeSystemInfo(SYSTEM_INFO *); -inline void PalGetNativeSystemInfo(SYSTEM_INFO * arg1) -{ - GetNativeSystemInfo(arg1); -} - extern "C" void * __stdcall GetProcAddress(HANDLE, char *); inline void * PalGetProcAddress(HANDLE arg1, char * arg2) { @@ -129,11 +123,6 @@ inline HANDLE PalGetProcessHeap() return GetProcessHeap(); } -extern "C" void __stdcall GetSystemTimeAsFileTime(FILETIME *); -inline void PalGetSystemTimeAsFileTime(FILETIME * arg1) -{ - GetSystemTimeAsFileTime(arg1); -} extern "C" UInt64 __stdcall GetTickCount64(); inline UInt64 PalGetTickCount64() @@ -195,12 +184,6 @@ inline void PalRaiseException(UInt32 arg1, UInt32 arg2, UInt32 arg3, const UInt3 RaiseException(arg1, arg2, arg3, arg4); } -extern "C" void __stdcall RaiseFailFastException(PEXCEPTION_RECORD, PCONTEXT, UInt32); -inline void PalRaiseFailFastException(PEXCEPTION_RECORD arg1, PCONTEXT arg2, UInt32 arg3) -{ - RaiseFailFastException(arg1, arg2, arg3); -} - extern "C" UInt32_BOOL __stdcall ReleaseMutex(HANDLE); inline UInt32_BOOL PalReleaseMutex(HANDLE arg1) { @@ -243,9 +226,28 @@ inline UInt32 PalWaitForSingleObjectEx(HANDLE arg1, UInt32 arg2, UInt32_BOOL arg return WaitForSingleObjectEx(arg1, arg2, arg3); } +#ifdef PAL_REDHAWK_INCLUDED +extern "C" void __stdcall GetNativeSystemInfo(SYSTEM_INFO *); +inline void PalGetNativeSystemInfo(SYSTEM_INFO * arg1) +{ + GetNativeSystemInfo(arg1); +} + +extern "C" void __stdcall GetSystemTimeAsFileTime(FILETIME *); +inline void PalGetSystemTimeAsFileTime(FILETIME * arg1) +{ + GetSystemTimeAsFileTime(arg1); +} + +extern "C" void __stdcall RaiseFailFastException(PEXCEPTION_RECORD, PCONTEXT, UInt32); +inline void PalRaiseFailFastException(PEXCEPTION_RECORD arg1, PCONTEXT arg2, UInt32 arg3) +{ + RaiseFailFastException(arg1, arg2, arg3); +} + extern "C" UInt32_BOOL __stdcall WriteFile(HANDLE, const void *, UInt32, UInt32 *, LPOVERLAPPED); inline UInt32_BOOL PalWriteFile(HANDLE arg1, const void * arg2, UInt32 arg3, UInt32 * arg4, LPOVERLAPPED arg5) { return WriteFile(arg1, arg2, arg3, arg4, arg5); } - +#endif diff --git a/src/Native/Runtime/RWLock.cpp b/src/Native/Runtime/RWLock.cpp index 8b7de632fff..df17e8d87ae 100644 --- a/src/Native/Runtime/RWLock.cpp +++ b/src/Native/Runtime/RWLock.cpp @@ -105,10 +105,6 @@ ReaderWriterLock::ReaderWriterLock() : #ifndef DACCESS_COMPILE -// defined in gcrhenv.cpp -UInt32_BOOL __SwitchToThread(UInt32 dwSleepMSec, UInt32 dwSwitchCount); -extern SYSTEM_INFO g_SystemInfo; - // Attempt to take the read lock, but do not wait if a writer has the lock. // Release the lock if successfully acquired. Returns true if the lock was // taken and released. Returns false if a writer had the lock. diff --git a/src/Native/Runtime/RestrictedCallouts.cpp b/src/Native/Runtime/RestrictedCallouts.cpp index 3b5943cc34a..a4958773f2e 100644 --- a/src/Native/Runtime/RestrictedCallouts.cpp +++ b/src/Native/Runtime/RestrictedCallouts.cpp @@ -8,6 +8,7 @@ // RestrictedCallouts.h for more detail. // +#include "rhcommon.h" #include "commontypes.h" #include "commonmacros.h" #include "daccess.h" diff --git a/src/Native/Runtime/RhConfig.cpp b/src/Native/Runtime/RhConfig.cpp index e2aa9e43029..77da48694c0 100644 --- a/src/Native/Runtime/RhConfig.cpp +++ b/src/Native/Runtime/RhConfig.cpp @@ -3,6 +3,7 @@ // Licensed under the MIT license. See LICENSE file in the project root for full license information. // #ifndef DACCESS_COMPILE +#include "rhcommon.h" #include "commontypes.h" #include "daccess.h" #include "commonmacros.h" diff --git a/src/Native/Runtime/SpinLock.h b/src/Native/Runtime/SpinLock.h index 35e3afac003..78d4de52547 100644 --- a/src/Native/Runtime/SpinLock.h +++ b/src/Native/Runtime/SpinLock.h @@ -5,8 +5,6 @@ #ifndef __SPINLOCK_H__ #define __SPINLOCK_H__ -UInt32_BOOL __SwitchToThread(UInt32 dwSleepMSec, UInt32 dwSwitchCount); - // #SwitchToThreadSpinning // // If you call __SwitchToThread in a loop waiting for a condition to be met, diff --git a/src/Native/Runtime/SyncClean.cpp b/src/Native/Runtime/SyncClean.cpp index 785a5f8ba13..b9f9d9d38a5 100644 --- a/src/Native/Runtime/SyncClean.cpp +++ b/src/Native/Runtime/SyncClean.cpp @@ -2,6 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // +#include "rhcommon.h" #include "commontypes.h" #include "daccess.h" #include "forward_declarations.h" diff --git a/src/Native/Runtime/allocheap.cpp b/src/Native/Runtime/allocheap.cpp index 3f97fe0b31b..ce2ae822f10 100644 --- a/src/Native/Runtime/allocheap.cpp +++ b/src/Native/Runtime/allocheap.cpp @@ -2,6 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // +#include "rhcommon.h" #include "commontypes.h" #include "daccess.h" #include "commonmacros.h" diff --git a/src/Native/Runtime/dllmain.cpp b/src/Native/Runtime/dllmain.cpp index 12295eab337..e30ea925b06 100644 --- a/src/Native/Runtime/dllmain.cpp +++ b/src/Native/Runtime/dllmain.cpp @@ -2,6 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // +#include "rhcommon.h" #include "commontypes.h" #include "daccess.h" #include "commonmacros.h" diff --git a/src/Native/Runtime/event.cpp b/src/Native/Runtime/event.cpp index 0bc07055984..617c3bcabb3 100644 --- a/src/Native/Runtime/event.cpp +++ b/src/Native/Runtime/event.cpp @@ -2,6 +2,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. // +#include "rhcommon.h" #include "commontypes.h" #include "daccess.h" #include "commonmacros.h" @@ -79,7 +80,7 @@ bool CLREventStatic::Reset() return PalResetEvent(m_hEvent); } -UInt32 CLREventStatic::Wait(UInt32 dwMilliseconds, bool bAlertable, bool bAllowReentrantWait) +uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable, bool bAllowReentrantWait) { UInt32 result = WAIT_FAILED; diff --git a/src/Native/Runtime/event.h b/src/Native/Runtime/event.h index 0fdc256da70..3d8ae815c12 100644 --- a/src/Native/Runtime/event.h +++ b/src/Native/Runtime/event.h @@ -13,7 +13,7 @@ class CLREventStatic bool IsValid() const; bool Set(); bool Reset(); - UInt32 Wait(UInt32 dwMilliseconds, bool bAlertable, bool bAllowReentrantWait = false); + uint32_t Wait(uint32_t dwMilliseconds, bool bAlertable, bool bAllowReentrantWait = false); HANDLE GetOSEvent(); private: diff --git a/src/Native/Runtime/eventtrace.h b/src/Native/Runtime/eventtrace.h index c322bc30a73..b405e8faa56 100644 --- a/src/Native/Runtime/eventtrace.h +++ b/src/Native/Runtime/eventtrace.h @@ -269,6 +269,15 @@ namespace ETW // 1 means the notification was due to allocation; 0 means it was due to other factors. ULONG Alloc; } GCFullNotify; + + typedef enum _GC_ROOT_KIND { + GC_ROOT_STACK = 0, + GC_ROOT_FQ = 1, + GC_ROOT_HANDLES = 2, + GC_ROOT_OLDER = 3, + GC_ROOT_SIZEDREF = 4, + GC_ROOT_OVERFLOW = 5 + } GC_ROOT_KIND; } ETW_GC_INFO, *PETW_GC_INFO; #ifdef FEATURE_EVENT_TRACE diff --git a/src/Native/Runtime/gcdump.cpp b/src/Native/Runtime/gcdump.cpp index 67253c1e899..5a11d7ff3c9 100644 --- a/src/Native/Runtime/gcdump.cpp +++ b/src/Native/Runtime/gcdump.cpp @@ -11,11 +11,12 @@ * or may be persisted by a managed native code compiler conforming * to the standard code-manager spec. */ -#include "rhcommon.h" +#include "common.h" -#if (defined(_DEBUG) || defined(DACCESS_COMPILE)) && !defined(USE_PORTABLE_HELPERS) +#if (defined(_DEBUG) || defined(DACCESS_COMPILE)) -#include "gcrhenv.h" // @TODO: move off of gcrhenv.h +#include "gcenv.h" +#include "varint.h" #include "gcinfo.h" #include "gcdump.h" diff --git a/src/Native/Runtime/gcenv.h b/src/Native/Runtime/gcenv.h new file mode 100644 index 00000000000..87af654ae59 --- /dev/null +++ b/src/Native/Runtime/gcenv.h @@ -0,0 +1,205 @@ +// +// Copyright (c) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. +// +#define FEATURE_PREMORTEM_FINALIZATION + +#include "gcenv.base.h" + +#include "crst.h" +#include "event.h" +#include "commontypes.h" +#include "commonmacros.h" +#include "daccess.h" +#include "targetptrs.h" +#include "eetype.h" +#include "objectlayout.h" + + +// Adapter for GC's view of Array +class ArrayBase : Array +{ +public: + DWORD GetNumComponents() + { + return m_Length; + } + + static SIZE_T GetOffsetOfNumComponents() + { + return offsetof(ArrayBase, m_Length); + } +}; + +// +// ----------------------------------------------------------------------------------------------------------- +// +// Bridge GC/HandleTable's version of MethodTable to Redhawk's EEType. Neither component tries to access any +// fields of MethodTable directly so this is mostly just a case of providing all the CLR-style accessors they +// need implemented on top of EEType functionality (we can simply recast the 'this' pointer into an EEType +// pointer). +// +// ****** NOTE: Do NOT attempt to add fields or virtual methods to this class! The pointer passed in 'this' +// ****** really does point to an EEType (there's no such thing as a MethodTable structure in RH). +// +class MethodTable +{ +public: + UInt32 GetBaseSize() { return ((EEType*)this)->get_BaseSize(); } + UInt16 GetComponentSize() { return ((EEType*)this)->get_ComponentSize(); } + UInt16 RawGetComponentSize() { return ((EEType*)this)->get_ComponentSize(); } + UInt32 ContainsPointers() { return ((EEType*)this)->HasReferenceFields(); } + UInt32 ContainsPointersOrCollectible() { return ((EEType*)this)->HasReferenceFields(); } + UInt32_BOOL HasComponentSize() const { return TRUE; } +#ifdef FEATURE_PREMORTEM_FINALIZATION + UInt32_BOOL HasFinalizer() { return ((EEType*)this)->HasFinalizer(); } + UInt32_BOOL HasCriticalFinalizer() { return FALSE; } +#endif // FEATURE_PREMORTEM_FINALIZATION +#ifdef FEATURE_STRUCTALIGN +#ifdef FEATURE_BARTOK + UInt32 GetRequiredAlignment() const { return ((EEType*)this)->get_BaseAlignment(); } +#else // FEATURE_BARTOK + UInt32 GetRequiredAlignment() const { return sizeof(void*); } +#endif // FEATURE_BARTOK +#endif // FEATURE_STRUCTALIGN + UInt32_BOOL SanityCheck() { return ((EEType*)this)->Validate(); } +}; + +class EEConfig +{ + BYTE m_gcStressMode; + +public: + enum HeapVerifyFlags { + HEAPVERIFY_NONE = 0, + HEAPVERIFY_GC = 1, // Verify the heap at beginning and end of GC + HEAPVERIFY_BARRIERCHECK = 2, // Verify the brick table + HEAPVERIFY_SYNCBLK = 4, // Verify sync block scanning + + // the following options can be used to mitigate some of the overhead introduced + // by heap verification. some options might cause heap verifiction to be less + // effective depending on the scenario. + + HEAPVERIFY_NO_RANGE_CHECKS = 0x10, // Excludes checking if an OBJECTREF is within the bounds of the managed heap + HEAPVERIFY_NO_MEM_FILL = 0x20, // Excludes filling unused segment portions with fill pattern + HEAPVERIFY_POST_GC_ONLY = 0x40, // Performs heap verification post-GCs only (instead of before and after each GC) + HEAPVERIFY_DEEP_ON_COMPACT = 0x80 // Performs deep object verfication only on compacting GCs. + }; + + typedef enum { + CONFIG_SYSTEM, + CONFIG_APPLICATION, + CONFIG_SYSTEMONLY + } ConfigSearch; + + enum GCStressFlags { + GCSTRESS_NONE = 0, + GCSTRESS_ALLOC = 1, // GC on all allocs and 'easy' places + GCSTRESS_TRANSITION = 2, // GC on transitions to preemtive GC + GCSTRESS_INSTR_JIT = 4, // GC on every allowable JITed instr + GCSTRESS_INSTR_NGEN = 8, // GC on every allowable NGEN instr + GCSTRESS_UNIQUE = 16, // GC only on a unique stack trace + }; + + // This is treated like a constructor--it is not allowed to fail. We have it like this because we don't + // have a CRT to run a static constructor for us. For now, at least, we don't want to do any heavy-weight + // snooping of the environment to control any of these settings, so don't add any code like that here. + void Construct() + { + m_gcStressMode = GCSTRESS_NONE; + } + + uint32_t ShouldInjectFault(uint32_t faultType) const { UNREFERENCED_PARAMETER(faultType); return FALSE; } + + int GetHeapVerifyLevel(); + bool IsHeapVerifyEnabled() { return GetHeapVerifyLevel() != 0; } + + GCStressFlags GetGCStressLevel() const { return (GCStressFlags) m_gcStressMode; } + void SetGCStressLevel(int val) { m_gcStressMode = (BYTE) val;} + bool IsGCStressMix() const { return false; } + + int GetGCtraceStart() const { return 0; } + int GetGCtraceEnd () const { return 0; }//1000000000; } + int GetGCtraceFac () const { return 0; } + int GetGCprnLvl () const { return 0; } + bool IsGCBreakOnOOMEnabled() const { return false; } + int GetGCgen0size () const { return 0; } + void SetGCgen0size (int iSize) { UNREFERENCED_PARAMETER(iSize); } + int GetSegmentSize () const { return 0; } + void SetSegmentSize (int iSize) { UNREFERENCED_PARAMETER(iSize); } + int GetGCconcurrent(); + void SetGCconcurrent(int val) { UNREFERENCED_PARAMETER(val); } + int GetGCLatencyMode() const { return 1; } + int GetGCForceCompact() const { return 0; } + int GetGCRetainVM () const { return 0; } + int GetGCTrimCommit() const { return 0; } + int GetGCLOHCompactionMode() const { return 0; } + + bool GetGCAllowVeryLargeObjects () const { return false; } + + // We need conservative GC enabled for some edge cases around ICastable support. This doesn't have much + // impact, it just makes the GC slightly more flexible in dealing with interior references (e.g. we can + // conservatively report an interior reference inside a GC free object or in the non-valid tail of the + // heap). + bool GetGCConservative() const { return true; } +}; +extern EEConfig* g_pConfig; + +#ifdef VERIFY_HEAP +class SyncBlockCache; + +extern SyncBlockCache g_sSyncBlockCache; + +class SyncBlockCache +{ +public: + static SyncBlockCache *GetSyncBlockCache() { return &g_sSyncBlockCache; } + void GCWeakPtrScan(void *pCallback, LPARAM pCtx, int dummy) + { + UNREFERENCED_PARAMETER(pCallback); + UNREFERENCED_PARAMETER(pCtx); + UNREFERENCED_PARAMETER(dummy); + } + void GCDone(uint32_t demoting, int max_gen) + { + UNREFERENCED_PARAMETER(demoting); + UNREFERENCED_PARAMETER(max_gen); + } + void VerifySyncTableEntry() {} +}; + +#endif // VERIFY_HEAP + +// +// ----------------------------------------------------------------------------------------------------------- +// +// Support for shutdown finalization, which is off by default but can be enabled by the class library. +// + +// If true runtime shutdown will attempt to finalize all finalizable objects (even those still rooted). +extern bool g_fPerformShutdownFinalization; + +// Time to wait (in milliseconds) for the above finalization to complete before giving up and proceeding with +// shutdown. Can specify INFINITE for no timeout. +extern UInt32 g_uiShutdownFinalizationTimeout; + +// Flag set to true once we've begun shutdown (and before shutdown finalization begins). This is exported to +// the class library so that managed code can tell when it is safe to access other objects from finalizers. +extern bool g_fShutdownHasStarted; + + +#ifdef DACCESS_COMPILE + +// The DAC uses DebuggerEnumGcRefContext in place of a GCCONTEXT when doing reference +// enumeration. The GC passes through additional data in the ScanContext which the debugger +// neither has nor needs. While we could refactor the GC code to make an interface +// with less coupling, that might affect perf or make integration messier. Instead +// we use some typedefs so DAC and runtime can get strong yet distinct types. + +typedef Thread::ScanCallbackData EnumGcRefScanContext; +typedef void EnumGcRefCallbackFunc(PTR_PTR_Object, EnumGcRefScanContext* callbackData, DWORD flags); + +#else +typedef promote_func EnumGcRefCallbackFunc; +typedef ScanContext EnumGcRefScanContext; +#endif diff --git a/src/Native/Runtime/gcrhenv.cpp b/src/Native/Runtime/gcrhenv.cpp new file mode 100644 index 00000000000..61c9294ce55 --- /dev/null +++ b/src/Native/Runtime/gcrhenv.cpp @@ -0,0 +1,1160 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. +// + +// +// This module provides data storage and implementations needed by gcrhenv.h to help provide an isolated build +// and runtime environment in which GC and HandleTable code can exist with minimal modifications from the CLR +// mainline. See gcrhenv.h for a more detailed explanation of how this all fits together. +// + +#include "common.h" + +#include "gcenv.h" +#include "gc.h" +#include "restrictedcallouts.h" + +#include "palredhawkcommon.h" + +#include "gcrhinterface.h" + +#include "slist.h" +#include "varint.h" +#include "regdisplay.h" +#include "stackframeiterator.h" + +#include "thread.h" + +#include "module.h" +#include "rwlock.h" +#include "runtimeinstance.h" +#include "objecthandle.h" +#include "eetype.inl" +#include "rhconfig.h" + +#include "threadstore.h" + +#include "gcdesc.h" +#include "syncclean.hpp" + +typedef void(__stdcall *PFLS_CALLBACK_FUNCTION) (void* lpFlsData); + +#include "PalRedhawkFunctions.h" + +#define USE_CLR_CACHE_SIZE_BEHAVIOR + +EXTERN_C UInt32_BOOL __stdcall PalSwitchToThread(); +EXTERN_C void __stdcall PalSleep(UInt32 milliseconds); + +typedef UInt32(__stdcall *BackgroundCallback)(_In_opt_ void* pCallbackContext); +EXTERN_C UInt32_BOOL __stdcall PalStartFinalizerThread(_In_ BackgroundCallback callback, _In_opt_ void* pCallbackContext); +bool StartFinalizerThread(); + + +#ifndef DACCESS_COMPILE + +// Undo the definitions of any macros set up for GC code which conflict with our usage of PAL APIs below. +#undef GetCurrentThreadId +#undef DebugBreak + +// +// ----------------------------------------------------------------------------------------------------------- +// +// Various global data cells the GC and/or HandleTable rely on. Some are just here to enable easy compilation: +// their value doesn't matter since it won't be consumed at runtime. Others we may have to initialize to some +// reasonable value. A few we might have to manage through the lifetime of the runtime. Each is considered on +// a case by case basis. +// + +#endif // !DACCESS_COMPILE + +#if 0 // @TODO_SDM: in gccommon.cpp +// The one and only GC heap in use. Initialized in RedhawkGCInterface::InitializeSubsystems() below. +GPTR_IMPL_INIT(GCHeap, g_pGCHeap, NULL); +#endif + +#if 0 // @TODO_SDM: in gcenv.windows.cpp +// Special MethodTable used by the GC to mark free list entries. Initialized in +// RedhawkGCInterface::InitializeSubsystems() below. +GPTR_IMPL_INIT(MethodTable, g_pFreeObjectMethodTable, NULL); +#endif + +#ifndef DACCESS_COMPILE + +#if 0 // @TODO_SDM: in gccommon.cpp +// Hosting API related (zero OK). +LONG g_bLowMemoryFromHost = 0; +#endif // 0 + +#if 0 // @TODO_SDM: in gcenv.windows.cpp +// Signals when threads can potentially be hijacked on their way back into the runtime. +// todo: thread suspension, runtime entry/exit story needs to be worked out +UInt32_BOOL g_TrapReturningThreads = 0; + +#ifdef FEATURE_PREMORTEM_FINALIZATION +// Used during shutdown, don't need to worry about this (yet). +bool g_fFinalizerRunOnShutDown = false; +#endif // FEATURE_PREMORTEM_FINALIZATION +#endif + +// +// Simplified EEConfig -- It is just a static member, which statically initializes to the default values and +// has no dynamic initialization. Some settings may change at runtime, however. (Example: gcstress is +// enabled via a compiled-in call from a given managed module, not through snooping an environment setting.) +// + +static EEConfig s_sDummyConfig; +EEConfig* g_pConfig = &s_sDummyConfig; + +int EEConfig::GetHeapVerifyLevel() +{ + return g_pRhConfig->GetHeapVerify(); +} + +int EEConfig::GetGCconcurrent() +{ + return !g_pRhConfig->GetDisableBGC(); +} + +// A few settings are now backed by the cut-down version of Redhawk configuration values. +static RhConfig g_sRhConfig; +RhConfig * g_pRhConfig = &g_sRhConfig; + + +#ifdef FEATURE_ETW +// +// ----------------------------------------------------------------------------------------------------------- +// +// The automatically generated part of the Redhawk ETW infrastructure (EtwEvents.h) calls the following +// function whenever the system enables or disables tracing for this provider. +// + +UInt32 EtwCallback(UInt32 IsEnabled, RH_ETW_CONTEXT * pContext) +{ + if (IsEnabled && + (pContext->RegistrationHandle == Microsoft_Windows_Redhawk_GC_PrivateHandle) && + GCHeap::IsGCHeapInitialized()) + { + FireEtwGCSettings(GCHeap::GetGCHeap()->GetValidSegmentSize(FALSE), + GCHeap::GetGCHeap()->GetValidSegmentSize(TRUE), + GCHeap::IsServerHeap()); + GCHeap::GetGCHeap()->TraceGCSegments(); + } + + // Special check for the runtime provider's GCHeapCollectKeyword. Profilers + // flick this to force a full GC. + if (IsEnabled && + (pContext->RegistrationHandle == Microsoft_Windows_Redhawk_GC_PublicHandle) && + GCHeap::IsGCHeapInitialized() && + ((pContext->MatchAnyKeyword & CLR_GCHEAPCOLLECT_KEYWORD) != 0)) + { + // Profilers may (optionally) specify extra data in the filter parameter + // to log with the GCStart event. + LONGLONG l64ClientSequenceNumber = 0; + if ((pContext->FilterData != NULL) && + (pContext->FilterData->Type == 1) && + (pContext->FilterData->Size == sizeof(l64ClientSequenceNumber))) + { + l64ClientSequenceNumber = *(LONGLONG *) (pContext->FilterData->Ptr); + } + ETW::GCLog::ForceGC(l64ClientSequenceNumber); + } + + return 0; +} +#endif // FEATURE_ETW + +// +// ----------------------------------------------------------------------------------------------------------- +// +// The rest of Redhawk needs to be able to talk to the GC/HandleTable code (to initialize it, allocate +// objects etc.) without pulling in the entire adaptation layer provided by this file and gcrhenv.h. To this +// end the rest of Redhawk talks to us via a simple interface described in gcrhinterface.h. We provide the +// implementation behind those APIs here. +// + +// Perform any runtime-startup initialization needed by the GC, HandleTable or environmental code in gcrhenv. +// The boolean parameter should be true if a server GC is required and false for workstation. Returns true on +// success or false if a subsystem failed to initialize. + +// static +bool RedhawkGCInterface::InitializeSubsystems(GCType gcType) +{ + g_pConfig->Construct(); + +#ifdef FEATURE_ETW + MICROSOFT_WINDOWS_REDHAWK_GC_PRIVATE_PROVIDER_Context.IsEnabled = FALSE; + MICROSOFT_WINDOWS_REDHAWK_GC_PUBLIC_PROVIDER_Context.IsEnabled = FALSE; + + // Register the Redhawk event provider with the system. + RH_ETW_REGISTER_Microsoft_Windows_Redhawk_GC_Private(); + RH_ETW_REGISTER_Microsoft_Windows_Redhawk_GC_Public(); + + MICROSOFT_WINDOWS_REDHAWK_GC_PRIVATE_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_Redhawk_GC_PrivateHandle; + MICROSOFT_WINDOWS_REDHAWK_GC_PUBLIC_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_Redhawk_GC_PublicHandle; +#endif // FEATURE_ETW + + InitializeSystemInfo(); + + // Initialize the special EEType used to mark free list entries in the GC heap. + EEType *pFreeObjectType = new EEType(); //@TODO: remove 'new' + pFreeObjectType->InitializeAsGcFreeType(); + + // Place the pointer to this type in a global cell (typed as the structurally equivalent MethodTable + // that the GC understands). + g_pFreeObjectMethodTable = (MethodTable *)pFreeObjectType; + + // Set the GC heap type. + bool fUseServerGC = (gcType == GCType_Server); + GCHeap::InitializeHeapType(fUseServerGC); + + // Create the GC heap itself. + GCHeap *pGCHeap = GCHeap::CreateGCHeap(); + if (!pGCHeap) + return false; + + // Initialize the GC subsystem. + HRESULT hr = pGCHeap->Initialize(); + if (FAILED(hr)) + return false; + + if (!FinalizerThread::Initialize()) + return false; + + // Initialize HandleTable. + if (!Ref_Initialize()) + return false; + + return true; +} + +// Allocate an object on the GC heap. +// pThread - current Thread +// cbSize - size in bytes of the final object +// uFlags - GC type flags (see gc.h GC_ALLOC_*) +// pEEType - type of the object +// Returns a pointer to the object allocated or NULL on failure. + +// static +void* RedhawkGCInterface::Alloc(Thread *pThread, UIntNative cbSize, UInt32 uFlags, EEType *pEEType) +{ + ASSERT(GCHeap::UseAllocationContexts()); + ASSERT(!pThread->IsDoNotTriggerGcSet()); + + // Save the EEType for instrumentation purposes. + SetLastAllocEEType(pEEType); + + Object * pObject; +#ifdef FEATURE_64BIT_ALIGNMENT + if (uFlags & GC_ALLOC_ALIGN8) + pObject = GCHeap::GetGCHeap()->AllocAlign8(pThread->GetAllocContext(), cbSize, uFlags); + else +#endif // FEATURE_64BIT_ALIGNMENT + pObject = GCHeap::GetGCHeap()->Alloc(pThread->GetAllocContext(), cbSize, uFlags); + + // NOTE: we cannot call PublishObject here because the object isn't initialized! + + return pObject; +} + +// returns the object pointer for caller's convenience +COOP_PINVOKE_HELPER(void*, RhpPublishObject, (void* pObject, UIntNative cbSize)) +{ + ASSERT(cbSize >= LARGE_OBJECT_SIZE); + GCHeap::GetGCHeap()->PublishObject((BYTE*)pObject); + return pObject; +} + +#if 0 // @TODO: This is unused, why is it here? + +// Allocate an object on the large GC heap. Used when you want to force an allocation on the large heap +// that wouldn't normally go there (e.g. objects containing double fields). +// cbSize - size in bytes of the final object +// uFlags - GC type flags (see gc.h GC_ALLOC_*) +// Returns a pointer to the object allocated or NULL on failure. + +// static +void* RedhawkGCInterface::AllocLarge(UIntNative cbSize, UInt32 uFlags) +{ + ASSERT(!GetThread()->IsDoNotTriggerGcSet()); + Object * pObject = GCHeap::GetGCHeap()->AllocLHeap(cbSize, uFlags); + // NOTE: we cannot call PublishObject here because the object isn't initialized! + return pObject; +} +#endif + +// static +void RedhawkGCInterface::InitAllocContext(alloc_context * pAllocContext) +{ + // NOTE: This method is currently unused because the thread's alloc_context is initialized via + // static initialization of tls_CurrentThread. If the initial contents of the alloc_context + // ever change, then a matching change will need to be made to the tls_CurrentThread static + // initializer. + + pAllocContext->init(); +} + +// static +void RedhawkGCInterface::ReleaseAllocContext(alloc_context * pAllocContext) +{ + GCHeap::GetGCHeap()->FixAllocContext(pAllocContext, FALSE, NULL, NULL); +} + +// static +void RedhawkGCInterface::WaitForGCCompletion() +{ + ASSERT(GCHeap::IsGCHeapInitialized()); + + GCHeap::GetGCHeap()->WaitUntilGCComplete(); +} + +#endif // !DACCESS_COMPILE + +// +// ----------------------------------------------------------------------------------------------------------- +// +// AppDomain emulation. The we don't have these in Redhawk so instead we emulate the bare minimum of the API +// touched by the GC/HandleTable and pretend we have precisely one (default) appdomain. +// + +// Used by DAC, but since this just exposes [System|App]Domain::GetIndex we can just keep a local copy. + +SystemDomain g_sSystemDomain; +AppDomain g_sDefaultDomain; + +#ifndef DACCESS_COMPILE + +// +// ----------------------------------------------------------------------------------------------------------- +// +// Trivial sync block cache. Will no doubt be replaced with a real implementation soon. +// + +SyncBlockCache g_sSyncBlockCache; + + +//------------------------------------------------------------------------------------------------- +// Used only by GC initialization, this initializes the EEType used to mark free entries in the GC heap. It +// should be an array type with a component size of one (so the GC can easily size it as appropriate) and +// should be marked as not containing any references. The rest of the fields don't matter: the GC does not +// query them and the rest of the runtime will never hold a reference to free object. + +void EEType::InitializeAsGcFreeType() +{ + m_usComponentSize = 1; + m_usFlags = ParameterizedEEType; + m_uBaseSize = sizeof(Array) + SYNC_BLOCK_SKEW; +} + +#endif // !DACCESS_COMPILE + +extern void GcEnumObject(PTR_OBJECTREF pObj, UInt32 flags, EnumGcRefCallbackFunc * fnGcEnumRef, EnumGcRefScanContext * pSc); +extern void GcEnumObjectsConservatively(PTR_OBJECTREF pLowerBound, PTR_OBJECTREF pUpperBound, EnumGcRefCallbackFunc * fnGcEnumRef, EnumGcRefScanContext * pSc); +extern void GcBulkEnumObjects(PTR_OBJECTREF pObjs, DWORD cObjs, EnumGcRefCallbackFunc * fnGcEnumRef, EnumGcRefScanContext * pSc); + +struct EnumGcRefContext : GCEnumContext +{ + EnumGcRefCallbackFunc * f; + EnumGcRefScanContext * sc; +}; + +static void EnumGcRefsCallback(void * hCallback, PTR_PTR_VOID pObject, UInt32 flags) +{ + EnumGcRefContext * pCtx = (EnumGcRefContext *)hCallback; + + GcEnumObject((PTR_OBJECTREF)pObject, flags, pCtx->f, pCtx->sc); +} + +// static +void RedhawkGCInterface::EnumGcRefs(ICodeManager * pCodeManager, + MethodInfo * pMethodInfo, + UInt32 codeOffset, + REGDISPLAY * pRegisterSet, + void * pfnEnumCallback, + void * pvCallbackData) +{ + EnumGcRefContext ctx; + ctx.pCallback = EnumGcRefsCallback; + ctx.f = (EnumGcRefCallbackFunc *)pfnEnumCallback; + ctx.sc = (EnumGcRefScanContext *)pvCallbackData; + + pCodeManager->EnumGcRefs(pMethodInfo, + codeOffset, + pRegisterSet, + &ctx); +} + +// static +void RedhawkGCInterface::EnumGcRefsInRegionConservatively(PTR_RtuObjectRef pLowerBound, + PTR_RtuObjectRef pUpperBound, + void * pfnEnumCallback, + void * pvCallbackData) +{ + GcEnumObjectsConservatively((PTR_OBJECTREF)pLowerBound, (PTR_OBJECTREF)pUpperBound, (EnumGcRefCallbackFunc *)pfnEnumCallback, (EnumGcRefScanContext *)pvCallbackData); +} + +// static +void RedhawkGCInterface::EnumGcRef(PTR_RtuObjectRef pRef, GCRefKind kind, void * pfnEnumCallback, void * pvCallbackData) +{ + ASSERT((GCRK_Object == kind) || (GCRK_Byref == kind)); + + DWORD flags = 0; + + if (kind == GCRK_Byref) + { + flags |= GC_CALL_INTERIOR; + } + + GcEnumObject((PTR_OBJECTREF)pRef, flags, (EnumGcRefCallbackFunc *)pfnEnumCallback, (EnumGcRefScanContext *)pvCallbackData); +} + +#ifndef DACCESS_COMPILE + +// static +void RedhawkGCInterface::BulkEnumGcObjRef(PTR_RtuObjectRef pRefs, UInt32 cRefs, void * pfnEnumCallback, void * pvCallbackData) +{ + GcBulkEnumObjects((PTR_OBJECTREF)pRefs, cRefs, (EnumGcRefCallbackFunc *)pfnEnumCallback, (EnumGcRefScanContext *)pvCallbackData); +} + +// static +void RedhawkGCInterface::GarbageCollect(UInt32 uGeneration, UInt32 uMode) +{ + ASSERT(!GetThread()->IsDoNotTriggerGcSet()); + GCHeap::GetGCHeap()->GarbageCollect(uGeneration, FALSE, uMode); +} + +// static +GcSegmentHandle RedhawkGCInterface::RegisterFrozenSection(void * pSection, UInt32 SizeSection) +{ + segment_info seginfo; + + seginfo.pvMem = pSection; + seginfo.ibFirstObject = sizeof(ObjHeader); + seginfo.ibAllocated = SizeSection; + seginfo.ibCommit = seginfo.ibAllocated; + seginfo.ibReserved = seginfo.ibAllocated; + + return (GcSegmentHandle)GCHeap::GetGCHeap()->RegisterFrozenSegment(&seginfo); +} + +// static +void RedhawkGCInterface::UnregisterFrozenSection(GcSegmentHandle segment) +{ +#if 1 // @TODO: only used for unload? + ASSERT(!"NYI - UnregisterFrozenSection"); +#else + GCHeap::GetGCHeap()->UnregisterFrozenSegment((segment_handle)segment); +#endif +} + +EXTERN_C UInt32_BOOL g_fGcStressStarted = UInt32_FALSE; // UInt32_BOOL because asm code reads it +#ifdef FEATURE_GC_STRESS +// static +void RedhawkGCInterface::StressGc() +{ + if (!g_fGcStressStarted || GetThread()->IsSuppressGcStressSet() || GetThread()->IsDoNotTriggerGcSet()) + { + return; + } + + GarbageCollect((UInt32) -1, collection_blocking); +} +#endif // FEATURE_GC_STRESS + + +#ifdef FEATURE_GC_STRESS +COOP_PINVOKE_HELPER(void, RhpInitializeGcStress, ()) +{ + g_fGcStressStarted = UInt32_TRUE; + g_pConfig->SetGCStressLevel(EEConfig::GCSTRESS_INSTR_NGEN); // this is the closest CLR equivalent to what we do. + GetRuntimeInstance()->EnableGcPollStress(); +} +#endif // FEATURE_GC_STRESS + +#endif // !DACCESS_COMPILE + +// +// Support for scanning the GC heap, objects and roots. +// + +// The value of the following globals determines whether a callback is made for every live object at the end +// of a garbage collection. Only one callback/context pair can be active for any given collection, so setting +// these has to be co-ordinated carefully, see RedhawkGCInterface::ScanHeap below. +GcScanObjectFunction g_pfnHeapScan = NULL; // Function to call for every live object at the end of a GC +void * g_pvHeapScanContext = NULL; // User context passed on each call to the function above + +// +// Initiate a full garbage collection and call the speficied function with the given context for each object +// that remians alive on the heap at the end of the collection (note that the function will be called while +// the GC still has cooperative threads suspended). +// +// If a GC is in progress (or another caller is in the process of scheduling a similar scan) we'll wait our +// turn and then initiate a further collection. +// +// static +void RedhawkGCInterface::ScanHeap(GcScanObjectFunction pfnScanCallback, void *pContext) +{ +#ifndef DACCESS_COMPILE + // Carefully attempt to set the global callback function (careful in that we won't overwrite another scan + // that's being scheduled or in-progress). If someone beat us to it back off and wait for the + // corresponding GC to complete. + while (FastInterlockCompareExchangePointer(&g_pfnHeapScan, pfnScanCallback, NULL) != NULL) + { + // Wait in pre-emptive mode to avoid stalling another thread that's attempting a collection. + Thread * pCurThread = GetThread(); + ASSERT(pCurThread->PreemptiveGCDisabled()); + pCurThread->EnablePreemptiveGC(); + + // Give the other thread some time to get the collection going. + if (PalSwitchToThread() == 0) + PalSleep(1); + + // Wait for the collection to complete (if the other thread didn't manage to schedule it yet we'll + // just end up going round the loop again). + WaitForGCCompletion(); + + // Come back into co-operative mode. + pCurThread->DisablePreemptiveGC(); + } + + // We should never end up overwriting someone else's callback context when we won the race to set the + // callback function pointer. + ASSERT(g_pvHeapScanContext == NULL); + g_pvHeapScanContext = pContext; + + // Initiate a full garbage collection (0xffffffff == all generations). + GarbageCollect(0xffffffff, collection_blocking); + WaitForGCCompletion(); + + // Release our hold on the global scanning pointers. + g_pvHeapScanContext = NULL; + FastInterlockExchangePointer(&g_pfnHeapScan, NULL); +#else + UNREFERENCED_PARAMETER(pfnScanCallback); + UNREFERENCED_PARAMETER(pContext); +#endif // DACCESS_COMPILE +} + +// Enumerate every reference field in an object, calling back to the specified function with the given context +// for each such reference found. +// static +void RedhawkGCInterface::ScanObject(void *pObject, GcScanObjectFunction pfnScanCallback, void *pContext) +{ +#if !defined(DACCESS_COMPILE) && (defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)) + GCHeap::GetGCHeap()->WalkObject((Object*)pObject, (walk_fn)pfnScanCallback, pContext); +#else + UNREFERENCED_PARAMETER(pObject); + UNREFERENCED_PARAMETER(pfnScanCallback); + UNREFERENCED_PARAMETER(pContext); +#endif // DACCESS_COMPILE +} + +// When scanning for object roots we use existing GC APIs used for object promotion and moving. We use an +// adapter callback to transform the promote function signature used for these methods into something simpler +// that avoids exposing unnecessary implementation details. The pointer to a ScanContext normally passed to +// promotion functions is actually a pointer to the structure below which serves to recall the actual function +// pointer and context for the real context. +struct ScanRootsContext +{ + GcScanRootFunction m_pfnCallback; + void * m_pContext; +}; + +// Callback with a EnumGcRefCallbackFunc signature that forwards the call to a callback with a GcScanFunction signature +// and its own context. +void ScanRootsCallbackWrapper(Object** pObject, EnumGcRefScanContext* pContext, DWORD dwFlags) +{ + UNREFERENCED_PARAMETER(dwFlags); + + ScanRootsContext * pRealContext = (ScanRootsContext*)pContext; + + (*pRealContext->m_pfnCallback)((void**)&pObject, pRealContext->m_pContext); +} + +// Enumerate all the object roots located on the specified thread's stack. It is only safe to call this from +// the context of a GC. +// +// static +void RedhawkGCInterface::ScanStackRoots(Thread *pThread, GcScanRootFunction pfnScanCallback, void *pContext) +{ +#ifndef DACCESS_COMPILE + ScanRootsContext sContext; + sContext.m_pfnCallback = pfnScanCallback; + sContext.m_pContext = pContext; + + pThread->GcScanRoots(ScanRootsCallbackWrapper, &sContext); +#else + UNREFERENCED_PARAMETER(pThread); + UNREFERENCED_PARAMETER(pfnScanCallback); + UNREFERENCED_PARAMETER(pContext); +#endif // !DACCESS_COMPILE +} + +// Enumerate all the object roots located in statics. It is only safe to call this from the context of a GC. +// +// static +void RedhawkGCInterface::ScanStaticRoots(GcScanRootFunction pfnScanCallback, void *pContext) +{ +#ifndef DACCESS_COMPILE + ScanRootsContext sContext; + sContext.m_pfnCallback = pfnScanCallback; + sContext.m_pContext = pContext; + + GetRuntimeInstance()->EnumAllStaticGCRefs(ScanRootsCallbackWrapper, &sContext); +#else + UNREFERENCED_PARAMETER(pfnScanCallback); + UNREFERENCED_PARAMETER(pContext); +#endif // !DACCESS_COMPILE +} + +// Enumerate all the object roots located in handle tables. It is only safe to call this from the context of a +// GC. +// +// static +void RedhawkGCInterface::ScanHandleTableRoots(GcScanRootFunction pfnScanCallback, void *pContext) +{ +#if !defined(DACCESS_COMPILE) && (defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)) + ScanRootsContext sContext; + sContext.m_pfnCallback = pfnScanCallback; + sContext.m_pContext = pContext; + Ref_ScanPointers(2, 2, (EnumGcRefScanContext*)&sContext, ScanRootsCallbackWrapper); +#else + UNREFERENCED_PARAMETER(pfnScanCallback); + UNREFERENCED_PARAMETER(pContext); +#endif // !DACCESS_COMPILE +} + +#ifndef DACCESS_COMPILE + +// This may only be called from a point at which the runtime is suspended. Currently, this +// is used by the VSD infrastructure on a SyncClean::CleanUp callback from the GC when +// a collection is complete. +bool RedhawkGCInterface::IsScanInProgress() +{ + // Only allow callers that have no RH thread or are in cooperative mode; i.e., don't + // call this in preemptive mode, as the result would not be reliable in multi-threaded + // environments. + ASSERT(GetThread() == NULL || GetThread()->PreemptiveGCDisabled()); + return g_pfnHeapScan != NULL; +} + +// This may only be called from a point at which the runtime is suspended. Currently, this +// is used by the VSD infrastructure on a SyncClean::CleanUp callback from the GC when +// a collection is complete. +GcScanObjectFunction RedhawkGCInterface::GetCurrentScanCallbackFunction() +{ + ASSERT(IsScanInProgress()); + return g_pfnHeapScan; +} + +// This may only be called from a point at which the runtime is suspended. Currently, this +// is used by the VSD infrastructure on a SyncClean::CleanUp callback from the GC when +// a collection is complete. +void* RedhawkGCInterface::GetCurrentScanContext() +{ + ASSERT(IsScanInProgress()); + return g_pvHeapScanContext; +} + +UInt32 RedhawkGCInterface::GetGCDescSize(void * pType) +{ + MethodTable * pMT = (MethodTable *)pType; + + if (!pMT->ContainsPointersOrCollectible()) + return 0; + + return (UInt32)CGCDesc::GetCGCDescFromMT(pMT)->GetSize(); +} + +COOP_PINVOKE_HELPER(void, RhpCopyObjectContents, (Object* pobjDest, Object* pobjSrc)) +{ + SIZE_T cbDest = pobjDest->GetSize() - sizeof(ObjHeader); + SIZE_T cbSrc = pobjSrc->GetSize() - sizeof(ObjHeader); + if (cbSrc != cbDest) + return; + + memcpy(pobjDest, pobjSrc, cbDest); + GCHeap::GetGCHeap()->SetCardsAfterBulkCopy((Object**) pobjDest, cbDest); +} + +// Move memory, in a way that is compatible with a move onto the heap, but +// does not require the destination pointer to be on the heap. +EXTERN_C void REDHAWK_CALLCONV RhpBulkWriteBarrier(void* pMemStart, UInt32 cbMemSize); + +COOP_PINVOKE_HELPER(void, RhBulkMoveWithWriteBarrier, (BYTE* pDest, BYTE* pSrc, int cbDest)) +{ + memmove(pDest, pSrc, cbDest); + // Use RhpBulkWriteBarrier here instead of SetCardsAfterBulkCopy as RhpBulkWriteBarrier + // is both faster, and is compatible with a destination that isn't the GC heap. + RhpBulkWriteBarrier(pDest, cbDest); +} + +COOP_PINVOKE_HELPER(void, RhpBox, (Object * pObj, void * pData)) +{ + EEType * pEEType = pObj->get_EEType(); + + // Can box value types only (which also implies no finalizers). + ASSERT(pEEType->get_IsValueType() && !pEEType->HasFinalizer()); + + // cbObject includes ObjHeader (sync block index) and the EEType* field from Object and is rounded up to + // suit GC allocation alignment requirements. cbFields on the other hand is just the raw size of the field + // data. + SIZE_T cbFieldPadding = pEEType->get_ValueTypeFieldPadding(); + SIZE_T cbObject = pEEType->get_BaseSize(); + SIZE_T cbFields = cbObject - (sizeof(ObjHeader) + sizeof(EEType*) + cbFieldPadding); + + UInt8 * pbFields = (UInt8*)pObj + sizeof(EEType*); + + // Copy the unboxed value type data into the new object. + memcpy(pbFields, pData, cbFields); + + // Perform any write barriers necessary for embedded reference fields. + if (pEEType->HasReferenceFields()) + GCHeap::GetGCHeap()->SetCardsAfterBulkCopy((Object**)pbFields, cbFields); +} + +COOP_PINVOKE_HELPER(void, RhUnbox, (Object * pObj, void * pData, EEType * pUnboxToEEType)) +{ + // When unboxing to a Nullable the input object may be null. + if (pObj == NULL) + { + ASSERT(pUnboxToEEType && pUnboxToEEType->IsNullable()); + + // The first field of the Nullable is a Boolean which we must set to false in this case to indicate no + // value is present. + *(Boolean*)pData = FALSE; + + // Clear the value (in case there were GC references we wish to stop reporting). + EEType * pEEType = pUnboxToEEType->GetNullableType(); + SIZE_T cbFieldPadding = pEEType->get_ValueTypeFieldPadding(); + SIZE_T cbFields = pEEType->get_BaseSize() - (sizeof(ObjHeader) + sizeof(EEType*) + cbFieldPadding); + memset((UInt8*)pData + pUnboxToEEType->GetNullableValueOffset(), 0, cbFields); + + return; + } + + EEType * pEEType = pObj->get_EEType(); + + // Can unbox value types only. + ASSERT(pEEType->get_IsValueType()); + + // A special case is that we can unbox a value type T into a Nullable. It's the only case where + // pUnboxToEEType is useful. + ASSERT((pUnboxToEEType == NULL) || pEEType->IsEquivalentTo(pUnboxToEEType) || pUnboxToEEType->IsNullable()); + if (pUnboxToEEType && pUnboxToEEType->IsNullable()) + { + ASSERT(pUnboxToEEType->GetNullableType()->IsEquivalentTo(pEEType)); + + // Set the first field of the Nullable to true to indicate the value is present. + *(Boolean*)pData = TRUE; + + // Adjust the data pointer so that it points at the value field in the Nullable. + pData = (UInt8*)pData + pUnboxToEEType->GetNullableValueOffset(); + } + + SIZE_T cbFieldPadding = pEEType->get_ValueTypeFieldPadding(); + SIZE_T cbFields = pEEType->get_BaseSize() - (sizeof(ObjHeader) + sizeof(EEType*) + cbFieldPadding); + UInt8 * pbFields = (UInt8*)pObj + sizeof(EEType*); + + // Copy the boxed fields into the new location. + memcpy(pData, pbFields, cbFields); + + // Perform any write barriers necessary for embedded reference fields. SetCardsAfterBulkCopy doesn't range + // check the address we pass it and in this case we don't know whether pData really points into the GC + // heap or not. If we call it with an address outside of the GC range we could end up setting a card + // outside of the allocated range of the card table, i.e. corrupt memory. + if (pEEType->HasReferenceFields() && (pData >= g_lowest_address) && (pData < g_highest_address)) + GCHeap::GetGCHeap()->SetCardsAfterBulkCopy((Object**)pData, cbFields); +} + +#endif + +// +// ----------------------------------------------------------------------------------------------------------- +// +// Support for shutdown finalization, which is off by default but can be enabled by the class library. +// + +// If true runtime shutdown will attempt to finalize all finalizable objects (even those still rooted). +bool g_fPerformShutdownFinalization = false; + +// Time to wait (in milliseconds) for the above finalization to complete before giving up and proceeding with +// shutdown. Can specify INFINITE for no timeout. +UInt32 g_uiShutdownFinalizationTimeout = 0; + +// Flag set to true once we've begun shutdown (and before shutdown finalization begins). This is exported to +// the class library so that managed code can tell when it is safe to access other objects from finalizers. +bool g_fShutdownHasStarted = false; + +// If the class library has requested it, call this method on clean shutdown (i.e. return from Main) to +// perform a final pass of finalization where all finalizable objects are processed regardless of whether +// they are still rooted. +// static +void RedhawkGCInterface::ShutdownFinalization() +{ + FinalizerThread::WatchDog(); +} + +// Thread static representing the last allocation. +// This is used to log the type information for each slow allocation. +EEType * RedhawkGCInterface::tls_pLastAllocationEEType = NULL; + +// Get the last allocation for this thread. +EEType * RedhawkGCInterface::GetLastAllocEEType() +{ + return tls_pLastAllocationEEType; +} + +// Set the last allocation for this thread. +void RedhawkGCInterface::SetLastAllocEEType(EEType * pEEType) +{ + tls_pLastAllocationEEType = pEEType; +} + +Thread * GetThread() +{ + return ThreadStore::GetCurrentThread(); +} + +void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason) +{ +#ifdef FEATURE_EVENT_TRACE + ETW::GCLog::ETW_GC_INFO Info; + Info.SuspendEE.Reason = reason; + Info.SuspendEE.GcCount = (((reason == SUSPEND_FOR_GC) || (reason == SUSPEND_FOR_GC_PREP)) ? + (UInt32)GCHeap::GetGCHeap()->GetGcCount() : (UInt32)-1); +#endif // FEATURE_EVENT_TRACE + + FireEtwGCSuspendEEBegin_V1(Info.SuspendEE.Reason, Info.SuspendEE.GcCount, GetClrInstanceId()); + + g_TrapReturningThreads = TRUE; + GCHeap::GetGCHeap()->SetGCInProgress(TRUE); + + GetThreadStore()->SuspendAllThreads(GCHeap::GetGCHeap()->GetWaitForGCEvent()); + + FireEtwGCSuspendEEEnd_V1(GetClrInstanceId()); + +#ifdef APP_LOCAL_RUNTIME + // now is a good opportunity to retry starting the finalizer thread + StartFinalizerThread(); +#endif +} + +void GCToEEInterface::RestartEE(bool bFinishedGC) +{ + FireEtwGCRestartEEBegin_V1(GetClrInstanceId()); + + SyncClean::CleanUp(); + + GetThreadStore()->ResumeAllThreads(GCHeap::GetGCHeap()->GetWaitForGCEvent()); + GCHeap::GetGCHeap()->SetGCInProgress(FALSE); + + g_TrapReturningThreads = FALSE; // @TODO: map this to something meaningful in the new algorithm + + FireEtwGCRestartEEEnd_V1(GetClrInstanceId()); +} + +void GCToEEInterface::ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc) +{ + // TODO: Implement - Scan stack roots on given thread +} + +void GCToEEInterface::ScanStaticGCRefsOpportunistically(promote_func* fn, ScanContext* sc) +{ +} + +void GCToEEInterface::GcStartWork(int condemned, int max_gen) +{ + // Invoke any registered callouts for the start of the collection. + RestrictedCallouts::InvokeGcCallouts(GCRC_StartCollection, condemned); +} + +// EE can perform post stack scanning action, while the user threads are still suspended +void GCToEEInterface::AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc) +{ + // Invoke any registered callouts for the end of the mark phase. + RestrictedCallouts::InvokeGcCallouts(GCRC_AfterMarkPhase, condemned); +} + +void GCToEEInterface::GcBeforeBGCSweepWork() +{ +} + +void GCToEEInterface::GcDone(int condemned) +{ + // Invoke any registered callouts for the end of the collection. + RestrictedCallouts::InvokeGcCallouts(GCRC_EndCollection, condemned); +} + +// Thread functions +bool GCToEEInterface::IsPreemptiveGCDisabled(Thread * pThread) +{ + return pThread->PreemptiveGCDisabled(); +} + +void GCToEEInterface::EnablePreemptiveGC(Thread * pThread) +{ + return pThread->EnablePreemptiveGC(); +} + +void GCToEEInterface::DisablePreemptiveGC(Thread * pThread) +{ + pThread->DisablePreemptiveGC(); +} + +void GCToEEInterface::SetGCSpecial(Thread * pThread) +{ + pThread->SetGCSpecial(true); +} + +alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread) +{ + return pThread->GetAllocContext(); +} + +bool GCToEEInterface::CatchAtSafePoint(Thread * pThread) +{ + return pThread->CatchAtSafePoint(); +} + +// does not acquire thread store lock +void GCToEEInterface::AttachCurrentThread() +{ + ThreadStore::AttachCurrentThread(false); +} + +Thread * GCToEEInterface::GetThreadList(Thread * pThread) +{ + ASSERT(!"Intentionally not implemented"); // not used on this runtime + return nullptr; +} + + +bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext) +{ + // TODO: Implement for background GC + return false; +} + +bool IsGCSpecialThread() +{ + // TODO: Implement for background GC + return false; +} + +#ifdef FEATURE_PREMORTEM_FINALIZATION +Thread * g_pFinalizerThread = nullptr; +CLREventStatic* hEventFinalizer = nullptr; +CLREventStatic* hEventFinalizerDone = nullptr; + +// Finalizer method implemented by redhawkm. +extern "C" void __cdecl ProcessFinalizers(); + +// Unmanaged front-end to the finalizer thread. We require this because at the point the GC creates the +// finalizer thread we're still executing the DllMain for RedhawkU. At that point we can't run managed code +// successfully (in particular module initialization code has not run for RedhawkM). Instead this method waits +// for the first finalization request (by which time everything must be up and running) and kicks off the +// managed portion of the thread at that point. +UInt32 WINAPI FinalizerStart(void* pContext) +{ + HANDLE hFinalizerEvent = (HANDLE)pContext; + + ThreadStore::AttachCurrentThread(); + Thread * pThread = GetThread(); + + // Disallow gcstress on this thread to work around the current implementation's limitation that it will + // get into an infinite loop if performed on the finalizer thread. + pThread->SetSuppressGcStress(); + + FinalizerThread::SetFinalizerThread(pThread); + + // Wait for a finalization request. + UInt32 uResult = PalWaitForSingleObjectEx(hFinalizerEvent, INFINITE, FALSE); + ASSERT(uResult == WAIT_OBJECT_0); + + // Since we just consumed the request (and the event is auto-reset) we must set the event again so the + // managed finalizer code will immediately start processing the queue when we run it. + UInt32_BOOL fResult = PalSetEvent(hFinalizerEvent); + ASSERT(fResult); + + // Run the managed portion of the finalizer. Until we implement (non-process) shutdown this call will + // never return. + + ProcessFinalizers(); + + ASSERT(!"Finalizer thread should never return"); + return 0; +} + +bool StartFinalizerThread() +{ +#ifdef APP_LOCAL_RUNTIME + + // + // On app-local runtimes, if we're running with the fallback PAL code (meaning we don't have IManagedRuntimeServices) + // then we use the WinRT ThreadPool to create the finalizer thread. This might fail at startup, if the current thread + // hasn't been CoInitialized. So we need to retry this later. We use fFinalizerThreadCreated to track whether we've + // successfully created the finalizer thread yet, and also as a sort of lock to make sure two threads don't try + // to create the finalizer thread at the same time. + // + static volatile Int32 fFinalizerThreadCreated; + + if (FastInterlockExchange(&fFinalizerThreadCreated, 1) != 1) + { + if (!PalStartFinalizerThread(FinalizerStart, (void*)hEventFinalizer->GetOSEvent())) + { + // Need to try again another time... + FastInterlockExchange(&fFinalizerThreadCreated, 0); + } + } + + // We always return true, so the GC can start even if we failed. + return true; + +#else // APP_LOCAL_RUNTIME + + // + // If this isn't an app-local runtime, then the PAL will just call CreateThread directly, which should succeed + // under normal circumstances. + // + if (PalStartFinalizerThread(FinalizerStart, (void*)hEventFinalizer->GetOSEvent())) + return true; + else + return false; + +#endif // APP_LOCAL_RUNTIME +} + +bool FinalizerThread::Initialize() +{ + // Allocate the events the GC expects the finalizer thread to have. The hEventFinalizer event is signalled + // by the GC whenever it completes a collection where it found otherwise unreachable finalizable objects. + // The hEventFinalizerDone event is set by the finalizer thread every time it wakes up and drains the + // queue of finalizable objects. It's mainly used by GC.WaitForPendingFinalizers(). The + // hEventFinalizerToShutDown and hEventShutDownToFinalizer are used to synchronize the main thread and the + // finalizer during the optional final finalization pass at shutdown. + hEventFinalizerDone = new CLREventStatic(); + hEventFinalizerDone->CreateManualEvent(FALSE); + hEventFinalizer = new CLREventStatic(); + hEventFinalizer->CreateAutoEvent(FALSE); + + // Create the finalizer thread itself. + if (!StartFinalizerThread()) + return false; + + return true; +} + +void FinalizerThread::SetFinalizerThread(Thread * pThread) +{ + g_pFinalizerThread = pThread; +} + +void FinalizerThread::EnableFinalization() +{ + // Signal to finalizer thread that there are objects to finalize + hEventFinalizer->Set(); +} + +void FinalizerThread::SignalFinalizationDone(bool fFinalizer) +{ + hEventFinalizerDone->Set(); +} + +bool FinalizerThread::HaveExtraWorkForFinalizer() +{ + return g_pFinalizerThread->HaveExtraWorkForFinalizer(); +} + +bool FinalizerThread::IsCurrentThreadFinalizer() +{ + return GetThread() == g_pFinalizerThread; +} + + +// This is called during runtime shutdown to perform a final finalization run with all pontentially +// finalizable objects being finalized (as if their roots had all been cleared). The default behaviour is to +// skip this step, the classlib has to make an explicit request for this functionality and also specifies the +// maximum amount of time it will let the finalization take before we will give up and just let the shutdown +// proceed. +bool FinalizerThread::WatchDog() +{ + // Set the flag indicating that shutdown has started. This is only of interest to managed code running + // finalizers as it lets them know when it is no longer safe to access other objects (which from this + // point on can be finalized even if you hold a reference to them). + g_fShutdownHasStarted = true; + + if (g_fPerformShutdownFinalization) + { +#ifdef BACKGROUND_GC + // Switch off concurrent GC if necessary. + gc_heap::gc_can_use_concurrent = FALSE; + + if (pGenGCHeap->settings.concurrent) + pGenGCHeap->background_gc_wait(); +#endif //BACKGROUND_GC + + DWORD dwTimeout = g_uiShutdownFinalizationTimeout; + + // Wait for any outstanding finalization run to complete. Time this initial operation so that it forms + // part of the overall timeout budget. + DWORD dwStartTime = GetTickCount(); + Wait(dwTimeout); + DWORD dwEndTime = GetTickCount(); + + // In the exceedingly rare case that the tick count wrapped then we'll just reset the timeout to its + // initial value. Otherwise we'll subtract the time we waited from the timeout budget (being mindful + // of the fact that we might have waited slightly longer than the timeout specified). + if (dwTimeout != INFINITE) + { + if (dwEndTime < dwStartTime) + dwTimeout = g_uiShutdownFinalizationTimeout; + else + dwTimeout -= min(dwTimeout, dwEndTime - dwStartTime); + + if (dwTimeout == 0) + return false; + } + + // Inform the GC that all finalizable objects should now be placed in the queue for finalization. FALSE + // here means we don't hold the finalizer lock (so the routine will take it for us). + GCHeap::GetGCHeap()->SetFinalizeQueueForShutdown(FALSE); + + // Wait for the finalizer to process all of these objects. + Wait(dwTimeout); + + if (dwTimeout == INFINITE) + return true; + + // Do a zero timeout wait of the finalizer done event to determine if we timed out above (we don't + // want to modify the signature of GCHeap::FinalizerThreadWait to return this data since that bleeds + // into a CLR visible change to gc.h which is not really worth it for this minor case). + return hEventFinalizerDone->Wait(0, FALSE) == WAIT_OBJECT_0; + } + + return true; +} + +void FinalizerThread::Wait(DWORD timeout, bool allowReentrantWait) +{ + // Can't call this from the finalizer thread itself. + if (!IsCurrentThreadFinalizer()) + { + // Clear any current indication that a finalization pass is finished and wake the finalizer thread up + // (if there's no work to do it'll set the done event immediately). + hEventFinalizerDone->Reset(); + EnableFinalization(); + +#ifdef APP_LOCAL_RUNTIME + // We may have failed to create the finalizer thread at startup. + // Try again now. + StartFinalizerThread(); +#endif + + // Wait for the finalizer thread to get back to us. + hEventFinalizerDone->Wait(timeout, false, allowReentrantWait); + } +} + +#endif // FEATURE_PREMORTEM_FINALIZATION \ No newline at end of file diff --git a/src/Native/Runtime/gcrhscan.cpp b/src/Native/Runtime/gcrhscan.cpp new file mode 100644 index 00000000000..fb1cb5bcb16 --- /dev/null +++ b/src/Native/Runtime/gcrhscan.cpp @@ -0,0 +1,425 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. +// +#include "common.h" + +#include "gcenv.h" +#include "gcscan.h" +#include "gc.h" +#include "objecthandle.h" + +#include "restrictedcallouts.h" + + +#include "palredhawkcommon.h" + +#include "gcrhinterface.h" + +#include "slist.h" +#include "varint.h" +#include "regdisplay.h" +#include "stackframeiterator.h" + +#include "thread.h" + +#include "module.h" +#include "rwlock.h" +#include "runtimeinstance.h" +#include "threadstore.h" + +// todo: remove this hack (brain-dead logging). +#define PalPrintf __noop + +//#define CATCH_GC //catches exception during GC +#ifdef DACCESS_COMPILE +SVAL_IMPL_INIT(LONG, CNameSpace, m_GcStructuresInvalidCnt, 1); +#else //DACCESS_COMPILE +VOLATILE(LONG) CNameSpace::m_GcStructuresInvalidCnt = 1; +#endif //DACCESS_COMPILE + +BOOL CNameSpace::GetGcRuntimeStructuresValid () +{ + _ASSERTE ((LONG)m_GcStructuresInvalidCnt >= 0); + return (LONG)m_GcStructuresInvalidCnt == 0; +} + +#ifndef DACCESS_COMPILE + +VOID CNameSpace::GcStartDoWork() +{ + PalPrintf("CNameSpace::GcStartDoWork\n"); +} + +/* + * Scan for dead weak pointers + */ + +typedef promote_func EnumGcRefCallbackFunc; +typedef ScanContext EnumGcRefScanContext; + +VOID CNameSpace::GcWeakPtrScan( EnumGcRefCallbackFunc* fn, int condemned, int max_gen, EnumGcRefScanContext* sc ) +{ + PalPrintf("CNameSpace::GcWeakPtrScan\n"); + Ref_CheckReachable(condemned, max_gen, (LPARAM)sc); + Ref_ScanDependentHandlesForClearing(condemned, max_gen, sc, fn); +} + +static void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, LPARAM *pExtraInfo, LPARAM lp1, LPARAM lp2) +{ + LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef))); + + Object **pRef = (Object **)pObjRef; + if (!GCHeap::GetGCHeap()->IsPromoted(*pRef)) + { + LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef))); + + *pRef = NULL; + } + else + { + LOG((LF_GC, LL_INFO1000000, "reachable " LOG_OBJECT_CLASS(*pObjRef))); + } +} + +VOID CNameSpace::GcWeakPtrScanBySingleThread( int condemned, int max_gen, EnumGcRefScanContext* sc ) +{ + PalPrintf("CNameSpace::GcWeakPtrScanBySingleThread\n"); + SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(&CheckPromoted, (LPARAM)sc, 0); +} + +VOID CNameSpace::GcShortWeakPtrScan(EnumGcRefCallbackFunc* fn, int condemned, int max_gen, + EnumGcRefScanContext* sc) +{ + PalPrintf("CNameSpace::GcShortWeakPtrScan\n"); + Ref_CheckAlive(condemned, max_gen, (LPARAM)sc); +} + + +void EnumAllStaticGCRefs(EnumGcRefCallbackFunc * fn, EnumGcRefScanContext * sc) +{ + GetRuntimeInstance()->EnumAllStaticGCRefs(fn, sc); +} + + +/* + * Scan all stack roots in this 'namespace' + */ + +VOID CNameSpace::GcScanRoots(EnumGcRefCallbackFunc * fn, int condemned, int max_gen, + EnumGcRefScanContext * sc /*, GCHeap * Hp */) +{ + PalPrintf("CNameSpace::GcScanRoots\n"); + + // STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Phase = %s\n", sc->promotion ? "promote" : "relocate"); + + FOREACH_THREAD(pThread) + { + // Skip "GC Special" threads which are really background workers that will never have any roots. + if (pThread->IsGCSpecial()) + continue; + +#if !defined (ISOLATED_HEAPS) + // @TODO: it is very bizarre that this IsThreadUsingAllocationContextHeap takes a copy of the + // allocation context instead of a reference or a pointer to it. This seems very wasteful given how + // large the alloc_context is. + if (!GCHeap::GetGCHeap()->IsThreadUsingAllocationContextHeap(pThread->GetAllocContext(), + sc->thread_number)) + { + // STRESS_LOG2(LF_GC|LF_GCROOTS, LL_INFO100, "{ Scan of Thread %p (ID = %x) declined by this heap\n", + // pThread, pThread->GetThreadId()); + } + else +#endif + { + STRESS_LOG1(LF_GC|LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p\n", pThread); + sc->thread_under_crawl = pThread; +#if defined(FEATURE_EVENT_TRACE) && !defined(DACCESS_COMPILE) + sc->dwEtwRootKind = kEtwGCRootKindStack; +#endif + pThread->GcScanRoots(fn, sc); + +#if defined(FEATURE_EVENT_TRACE) && !defined(DACCESS_COMPILE) + sc->dwEtwRootKind = kEtwGCRootKindOther; +#endif + STRESS_LOG1(LF_GC|LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p }\n", pThread); + } + } + END_FOREACH_THREAD + + sc->thread_under_crawl = NULL; + + if ((!GCHeap::IsServerHeap() || sc->thread_number == 0) ||(condemned == max_gen && sc->promotion)) + { +#if defined(FEATURE_EVENT_TRACE) && !defined(DACCESS_COMPILE) + sc->dwEtwRootKind = kEtwGCRootStatic; +#endif + EnumAllStaticGCRefs(fn, sc); + } +} + +/* + * Scan all handle roots in this 'namespace' + */ + + +VOID CNameSpace::GcScanHandles (EnumGcRefCallbackFunc* fn, int condemned, int max_gen, + EnumGcRefScanContext* sc) +{ + PalPrintf("CNameSpace::GcScanHandles\n"); + + STRESS_LOG1(LF_GC|LF_GCROOTS, LL_INFO10, "GcScanHandles (Promotion Phase = %d)\n", sc->promotion); + if (sc->promotion) + { + Ref_TracePinningRoots(condemned, max_gen, sc, fn); + Ref_TraceNormalRoots(condemned, max_gen, sc, fn); + } + else + { + Ref_UpdatePointers(condemned, max_gen, sc, fn); + Ref_UpdatePinnedPointers(condemned, max_gen, sc, fn); + Ref_ScanDependentHandlesForRelocation(condemned, max_gen, sc, fn); + } +} + +#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) + +/* + * Scan all handle roots in this 'namespace' for profiling + */ + +VOID CNameSpace::GcScanHandlesForProfilerAndETW (int max_gen, EnumGcRefScanContext* sc) +{ + LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, Handles\n")); + Ref_ScanPointersForProfilerAndETW(max_gen, (LPARAM)sc); +} + +/* + * Scan dependent handles in this 'namespace' for profiling + */ +void CNameSpace::GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc) +{ + LIMITED_METHOD_CONTRACT; + + LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, DependentHandles\n")); + Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc); +} + +#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) + +void CNameSpace::GcRuntimeStructuresValid (BOOL bValid) +{ + if (!bValid) + { + LONG result; + result = FastInterlockIncrement(&m_GcStructuresInvalidCnt); + _ASSERTE (result > 0); + } + else + { + LONG result; + result = FastInterlockDecrement(&m_GcStructuresInvalidCnt); + _ASSERTE (result >= 0); + } +} + +void CNameSpace::GcDemote (int condemned, int max_gen, EnumGcRefScanContext* sc) +{ + PalPrintf("CNameSpace::GcDemote\n"); + Ref_RejuvenateHandles (condemned, max_gen, (LPARAM)sc); + if (!GCHeap::IsServerHeap() || sc->thread_number == 0) + SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen); +} + +void CNameSpace::GcPromotionsGranted (int condemned, int max_gen, EnumGcRefScanContext* sc) +{ + PalPrintf("CNameSpace::GcPromotionsGranted\n"); + Ref_AgeHandles(condemned, max_gen, (LPARAM)sc); + if (!GCHeap::IsServerHeap() || sc->thread_number == 0) + SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen); +} + + +void CNameSpace::GcFixAllocContexts (void* arg, void *heap) +{ + PalPrintf("CNameSpace::GcFixAllocContexts\n"); + if (GCHeap::UseAllocationContexts()) + { + FOREACH_THREAD(thread) + { + GCHeap::GetGCHeap()->FixAllocContext(thread->GetAllocContext(), FALSE, arg, heap); + } + END_FOREACH_THREAD + } +} + +void CNameSpace::GcEnumAllocContexts (enum_alloc_context_func* fn) +{ + PalPrintf("CNameSpace::GcEnumAllocContexts\n"); + + if (GCHeap::UseAllocationContexts()) + { + FOREACH_THREAD(thread) + { + (*fn) (thread->GetAllocContext()); + } + END_FOREACH_THREAD + } +} + +size_t CNameSpace::AskForMoreReservedMemory (size_t old_size, size_t need_size) +{ + PalPrintf("CNameSpace::AskForMoreReservedMemory\n"); + + return old_size + need_size; +} + +void CNameSpace::VerifyHandleTable(int condemned, int max_gen, EnumGcRefScanContext *sc) +{ + PalPrintf("CNameSpace::VerifyHandleTable\n"); + + Ref_VerifyHandleTable(condemned, max_gen, sc); +} + +#endif //!DACCESS_COMPILE + +void PromoteCarefully(PTR_PTR_Object obj, UInt32 flags, EnumGcRefCallbackFunc * fnGcEnumRef, EnumGcRefScanContext * pSc) +{ + // + // Sanity check that the flags contain only these three values + // + assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED|GC_CALL_CHECK_APP_DOMAIN)) == 0); + + // + // Sanity check that GC_CALL_INTERIOR FLAG is set + // + assert(flags & GC_CALL_INTERIOR); + + // If the object reference points into the stack, we + // must not promote it, the GC cannot handle these. + if (pSc->thread_under_crawl->IsWithinStackBounds(*obj)) + return; + + fnGcEnumRef(obj, pSc, flags); +} + +void GcEnumObject(PTR_PTR_Object ppObj, UInt32 flags, EnumGcRefCallbackFunc * fnGcEnumRef, EnumGcRefScanContext * pSc) +{ + // + // Sanity check that the flags contain only these three values + // + assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED|GC_CALL_CHECK_APP_DOMAIN)) == 0); + + // for interior pointers, we optimize the case in which + // it points into the current threads stack area + // + if (flags & GC_CALL_INTERIOR) + PromoteCarefully (ppObj, flags, fnGcEnumRef, pSc); + else + fnGcEnumRef(ppObj, pSc, flags); +} + +void GcBulkEnumObjects(PTR_PTR_Object pObjs, UInt32 cObjs, EnumGcRefCallbackFunc * fnGcEnumRef, EnumGcRefScanContext * pSc) +{ + PTR_PTR_Object ppObj = pObjs; + + for (UInt32 i = 0; i < cObjs; i++) + fnGcEnumRef(ppObj++, pSc, 0); +} + +// Scan a contiguous range of memory and report everything that looks like it could be a GC reference as a +// pinned interior reference. Pinned in case we are wrong (so the GC won't try to move the object and thus +// corrupt the original memory value by relocating it). Interior since we (a) can't easily tell whether a +// real reference is interior or not and interior is the more conservative choice that will work for both and +// (b) because it might not be a real GC reference at all and in that case falsely listing the reference as +// non-interior will cause the GC to make assumptions and crash quite quickly. +void GcEnumObjectsConservatively(PTR_PTR_Object ppLowerBound, PTR_PTR_Object ppUpperBound, EnumGcRefCallbackFunc * fnGcEnumRef, EnumGcRefScanContext * pSc) +{ + // Only report potential references in the promotion phase. Since we report everything as pinned there + // should be no work to do in the relocation phase. + if (pSc->promotion) + { + for (PTR_PTR_Object ppObj = ppLowerBound; ppObj < ppUpperBound; ppObj++) + { + // Only report values that lie in the GC heap range. This doesn't conclusively guarantee that the + // value is a GC heap reference but it's a cheap check that weeds out a lot of spurious values. + PTR_Object pObj = *ppObj; + if (((PTR_UInt8)pObj >= g_lowest_address) && ((PTR_UInt8)pObj <= g_highest_address)) + fnGcEnumRef(ppObj, pSc, GC_CALL_INTERIOR|GC_CALL_PINNED); + } + } +} + +#ifndef DACCESS_COMPILE + +// +// Dependent handle promotion scan support +// + +// This method is called first during the mark phase. It's job is to set up the context for further scanning +// (remembering the scan parameters the GC gives us and initializing some state variables we use to determine +// whether further scans will be required or not). +// +// This scan is not guaranteed to return complete results due to the GC context in which we are called. In +// particular it is possible, due to either a mark stack overflow or unsynchronized operation in server GC +// mode, that not all reachable objects will be reported as promoted yet. However, the operations we perform +// will still be correct and this scan allows us to spot a common optimization where no dependent handles are +// due for retirement in this particular GC. This is an important optimization to take advantage of since +// synchronizing the GC to calculate complete results is a costly operation. +void CNameSpace::GcDhInitialScan(EnumGcRefCallbackFunc* fn, int condemned, int max_gen, EnumGcRefScanContext* sc) +{ + // We allocate space for dependent handle scanning context during Ref_Initialize. Under server GC there + // are actually as many contexts as heaps (and CPUs). Ref_GetDependentHandleContext() retrieves the + // correct context for the current GC thread based on the ScanContext passed to us by the GC. + DhContext *pDhContext = Ref_GetDependentHandleContext(sc); + + // Record GC callback parameters in the DH context so that the GC doesn't continually have to pass the + // same data to each call. + pDhContext->m_pfnPromoteFunction = fn; + pDhContext->m_iCondemned = condemned; + pDhContext->m_iMaxGen = max_gen; + pDhContext->m_pScanContext = sc; + + // Look for dependent handle whose primary has been promoted but whose secondary has not. Promote the + // secondary in those cases. Additionally this scan sets the m_fUnpromotedPrimaries and m_fPromoted state + // flags in the DH context. The m_fUnpromotedPrimaries flag is the most interesting here: if this flag is + // false after the scan then it doesn't matter how many object promotions might currently be missing since + // there are no secondary objects that are currently unpromoted anyway. This is the (hopefully common) + // circumstance under which we don't have to perform any costly additional re-scans. + Ref_ScanDependentHandlesForPromotion(pDhContext); +} + +// This method is called after GcDhInitialScan and before each subsequent scan (GcDhReScan below). It +// determines whether any handles are left that have unpromoted secondaries. +bool CNameSpace::GcDhUnpromotedHandlesExist(EnumGcRefScanContext* sc) +{ + // Locate our dependent handle context based on the GC context. + DhContext *pDhContext = Ref_GetDependentHandleContext(sc); + + return pDhContext->m_fUnpromotedPrimaries; +} + +// Perform a re-scan of dependent handles, promoting secondaries associated with newly promoted primaries as +// above. We may still need to call this multiple times since promotion of a secondary late in the table could +// promote a primary earlier in the table. Also, GC graph promotions are not guaranteed to be complete by the +// time the promotion callback returns (the mark stack can overflow). As a result the GC might have to call +// this method in a loop. The scan records state that let's us know when to terminate (no further handles to +// be promoted or no promotions in the last scan). Returns true if at least one object was promoted as a +// result of the scan. +bool CNameSpace::GcDhReScan(EnumGcRefScanContext* sc) +{ + // Locate our dependent handle context based on the GC context. + DhContext *pDhContext = Ref_GetDependentHandleContext(sc); + + return Ref_ScanDependentHandlesForPromotion(pDhContext); +} + +// +// Sized refs support (not supported on Redhawk) +// + +void CNameSpace::GcScanSizedRefs(EnumGcRefCallbackFunc* fn, int condemned, int max_gen, EnumGcRefScanContext* sc) +{ +} + +#endif // !DACCESS_COMPILE diff --git a/src/Native/Runtime/portable.cpp b/src/Native/Runtime/portable.cpp index eb3c69b9f09..e6bd07fe734 100644 --- a/src/Native/Runtime/portable.cpp +++ b/src/Native/Runtime/portable.cpp @@ -251,3 +251,4 @@ COOP_PINVOKE_HELPER(void, RhTypeCast_CheckVectorElemAddr, ()) { ASSERT_UNCONDITIONALLY("NYI"); } + diff --git a/src/Native/Runtime/rhcommon.h b/src/Native/Runtime/rhcommon.h index fa25c09c632..dba0818bdad 100644 --- a/src/Native/Runtime/rhcommon.h +++ b/src/Native/Runtime/rhcommon.h @@ -13,4 +13,6 @@ // // For our DAC build, we precompile gcrhenv.h because it is extremely large (~3MB of text). For non-DAC // builds, we do not do this because the majority of the files have more constrained #includes. -// \ No newline at end of file +// + +#include "stdint.h" diff --git a/src/Native/Runtime/threadstore.cpp b/src/Native/Runtime/threadstore.cpp index 6f7a77f92ea..3e789ed3f0e 100644 --- a/src/Native/Runtime/threadstore.cpp +++ b/src/Native/Runtime/threadstore.cpp @@ -268,9 +268,6 @@ void ThreadStore::UnlockThreadStore() m_Lock.ReleaseReadLock(); } -// defined in gcrhenv.cpp -extern SYSTEM_INFO g_SystemInfo; - void ThreadStore::SuspendAllThreads(CLREventStatic* pCompletionEvent) { Thread * pThisThread = GetCurrentThreadIfAvailable(); diff --git a/src/Native/gc/env/gcenv.h b/src/Native/gc/env/gcenv.base.h similarity index 86% rename from src/Native/gc/env/gcenv.h rename to src/Native/gc/env/gcenv.base.h index 1aa4bd2e593..707e3af0b67 100644 --- a/src/Native/gc/env/gcenv.h +++ b/src/Native/gc/env/gcenv.base.h @@ -638,62 +638,10 @@ ClrVirtualProtect( // struct alloc_context; - -class Thread -{ - uint32_t m_fPreemptiveGCDisabled; - uintptr_t m_alloc_context[16]; // Reserve enough space to fix allocation context - - friend class ThreadStore; - Thread * m_pNext; - -public: - Thread() - { - } - - bool PreemptiveGCDisabled() - { - return !!m_fPreemptiveGCDisabled; - } - - void EnablePreemptiveGC() - { - m_fPreemptiveGCDisabled = false; - } - - void DisablePreemptiveGC() - { - m_fPreemptiveGCDisabled = true; - } - - alloc_context* GetAllocContext() - { - return (alloc_context *)&m_alloc_context; - } - - void SetGCSpecial(bool fGCSpecial) - { - } - - bool CatchAtSafePoint() - { - // This is only called by the GC on a background GC worker thread that's explicitly interested in letting - // a foreground GC proceed at that point. So it's always safe to return true. - return true; - } -}; +class Thread; Thread * GetThread(); -class ThreadStore -{ -public: - static Thread * GetThreadList(Thread * pThread); - - static void AttachCurrentThread(bool fAcquireThreadStoreLock); -}; - struct ScanContext; typedef void promote_func(PTR_PTR_Object, ScanContext*, unsigned); @@ -743,17 +691,33 @@ class GCToEEInterface static void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2) { } static void SyncBlockCacheDemote(int max_gen) { } static void SyncBlockCachePromotionsGranted(int max_gen) { } + + // Thread functions + static bool IsPreemptiveGCDisabled(Thread * pThread); + static void EnablePreemptiveGC(Thread * pThread); + static void DisablePreemptiveGC(Thread * pThread); + static void SetGCSpecial(Thread * pThread); + static alloc_context * GetAllocContext(Thread * pThread); + static bool CatchAtSafePoint(Thread * pThread); + + // ThreadStore functions + static void AttachCurrentThread(); // does not acquire thread store lock + static Thread * GetThreadList(Thread * pThread); }; class FinalizerThread { public: + static bool Initialize(); static void EnableFinalization(); - static bool HaveExtraWorkForFinalizer() - { - return false; - } + static bool HaveExtraWorkForFinalizer(); + + static bool IsCurrentThreadFinalizer(); + static void Wait(DWORD timeout, bool allowReentrantWait = false); + static bool WatchDog(); + static void SignalFinalizationDone(bool fFinalizer); + static void SetFinalizerThread(Thread * pThread); }; typedef uint32_t (__stdcall *BackgroundCallback)(void* pCallbackContext); @@ -889,6 +853,7 @@ inline void StompWriteBarrierResize(BOOL bReqUpperBoundsCheck) { } +#if 0 // runtime-specific // ----------------------------------------------------------------------------------------------------------- // Config file enumulation // @@ -947,6 +912,7 @@ class EEConfig }; extern EEConfig * g_pConfig; +#endif class CLRConfig { @@ -999,163 +965,6 @@ class CLRConfig } }; - -// ----------------------------------------------------------------------------------------------------------- -// -// Helper classes expected by the GC -// - -class EEThreadId -{ -public: - EEThreadId(UINT32 uiId) : m_uiId(uiId) {} - bool IsSameThread() - { - return m_uiId == GetCurrentThreadId(); - } - -private: - UINT32 m_uiId; -}; - -#define CRST_REENTRANCY 0 -#define CRST_UNSAFE_SAMELEVEL 0 -#define CRST_UNSAFE_ANYMODE 0 -#define CRST_DEBUGGER_THREAD 0 -#define CRST_DEFAULT 0 - -#define CrstHandleTable 0 - -typedef int CrstFlags; -typedef int CrstType; - -class CrstStatic -{ - CRITICAL_SECTION m_cs; -#ifdef _DEBUG - UINT32 m_holderThreadId; -#endif - -public: - bool InitNoThrow(CrstType eType, CrstFlags eFlags = CRST_DEFAULT) - { - UnsafeInitializeCriticalSection(&m_cs); - return true; - } - - void Destroy() - { - UnsafeDeleteCriticalSection(&m_cs); - } - - void Enter() - { - UnsafeEEEnterCriticalSection(&m_cs); -#ifdef _DEBUG - m_holderThreadId = GetCurrentThreadId(); -#endif - } - - void Leave() - { -#ifdef _DEBUG - m_holderThreadId = 0; -#endif - UnsafeEELeaveCriticalSection(&m_cs); - } - -#ifdef _DEBUG - EEThreadId GetHolderThreadId() - { - return m_holderThreadId; - } - - bool OwnedByCurrentThread() - { - return GetHolderThreadId().IsSameThread(); - } -#endif -}; - -class CLREventStatic -{ -public: - void CreateManualEvent(bool bInitialState); - void CreateAutoEvent(bool bInitialState); - void CreateOSManualEvent(bool bInitialState); - void CreateOSAutoEvent(bool bInitialState); - void CloseEvent(); - bool IsValid() const; - bool Set(); - bool Reset(); - uint32_t Wait(uint32_t dwMilliseconds, bool bAlertable); - -private: - HANDLE m_hEvent; - bool m_fInitialized; -}; - -class CrstHolder -{ - CrstStatic * m_pLock; - -public: - CrstHolder(CrstStatic * pLock) - : m_pLock(pLock) - { - m_pLock->Enter(); - } - - ~CrstHolder() - { - m_pLock->Leave(); - } -}; - -class CrstHolderWithState -{ - CrstStatic * m_pLock; - bool m_fAcquired; - -public: - CrstHolderWithState(CrstStatic * pLock, bool fAcquire = true) - : m_pLock(pLock), m_fAcquired(fAcquire) - { - if (fAcquire) - m_pLock->Enter(); - } - - ~CrstHolderWithState() - { - if (m_fAcquired) - m_pLock->Leave(); - } - - void Acquire() - { - if (!m_fAcquired) - { - m_pLock->Enter(); - m_fAcquired = true; - } - } - - void Release() - { - if (m_fAcquired) - { - m_pLock->Leave(); - m_fAcquired = false; - } - } - - CrstStatic * GetValue() - { - return m_pLock; - } -}; - - template class NewHolder { @@ -1257,27 +1066,3 @@ class GCStress }; #endif // STRESS_HEAP -#ifdef VERIFY_HEAP -class SyncBlockCache; - -extern SyncBlockCache g_sSyncBlockCache; - -class SyncBlockCache -{ -public: - static SyncBlockCache *GetSyncBlockCache() { return &g_sSyncBlockCache; } - void GCWeakPtrScan(void *pCallback, LPARAM pCtx, int dummy) - { - UNREFERENCED_PARAMETER(pCallback); - UNREFERENCED_PARAMETER(pCtx); - UNREFERENCED_PARAMETER(dummy); - } - void GCDone(uint32_t demoting, int max_gen) - { - UNREFERENCED_PARAMETER(demoting); - UNREFERENCED_PARAMETER(max_gen); - } - void VerifySyncTableEntry() {} -}; - -#endif // VERIFY_HEAP diff --git a/src/Native/gc/gcobject.h b/src/Native/gc/env/gcenv.object.h similarity index 100% rename from src/Native/gc/gcobject.h rename to src/Native/gc/env/gcenv.object.h diff --git a/src/Native/gc/env/gcenv.sync.h b/src/Native/gc/env/gcenv.sync.h new file mode 100644 index 00000000000..e3abacb07e8 --- /dev/null +++ b/src/Native/gc/env/gcenv.sync.h @@ -0,0 +1,158 @@ +// +// Copyright (c) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. +// + +// ----------------------------------------------------------------------------------------------------------- +// +// Helper classes expected by the GC +// +class EEThreadId +{ +public: + EEThreadId(UINT32 uiId) : m_uiId(uiId) {} + bool IsSameThread() + { + return m_uiId == GetCurrentThreadId(); + } + +private: + UINT32 m_uiId; +}; + +#define CRST_REENTRANCY 0 +#define CRST_UNSAFE_SAMELEVEL 0 +#define CRST_UNSAFE_ANYMODE 0 +#define CRST_DEBUGGER_THREAD 0 +#define CRST_DEFAULT 0 + +#define CrstHandleTable 0 + +typedef int CrstFlags; +typedef int CrstType; + +class CrstStatic +{ + CRITICAL_SECTION m_cs; +#ifdef _DEBUG + UINT32 m_holderThreadId; +#endif + +public: + bool InitNoThrow(CrstType eType, CrstFlags eFlags = CRST_DEFAULT) + { + UnsafeInitializeCriticalSection(&m_cs); + return true; + } + + void Destroy() + { + UnsafeDeleteCriticalSection(&m_cs); + } + + void Enter() + { + UnsafeEEEnterCriticalSection(&m_cs); +#ifdef _DEBUG + m_holderThreadId = GetCurrentThreadId(); +#endif + } + + void Leave() + { +#ifdef _DEBUG + m_holderThreadId = 0; +#endif + UnsafeEELeaveCriticalSection(&m_cs); + } + +#ifdef _DEBUG + EEThreadId GetHolderThreadId() + { + return m_holderThreadId; + } + + bool OwnedByCurrentThread() + { + return GetHolderThreadId().IsSameThread(); + } +#endif +}; + +class CrstHolder +{ + CrstStatic * m_pLock; + +public: + CrstHolder(CrstStatic * pLock) + : m_pLock(pLock) + { + m_pLock->Enter(); + } + + ~CrstHolder() + { + m_pLock->Leave(); + } +}; + +class CrstHolderWithState +{ + CrstStatic * m_pLock; + bool m_fAcquired; + +public: + CrstHolderWithState(CrstStatic * pLock, bool fAcquire = true) + : m_pLock(pLock), m_fAcquired(fAcquire) + { + if (fAcquire) + m_pLock->Enter(); + } + + ~CrstHolderWithState() + { + if (m_fAcquired) + m_pLock->Leave(); + } + + void Acquire() + { + if (!m_fAcquired) + { + m_pLock->Enter(); + m_fAcquired = true; + } + } + + void Release() + { + if (m_fAcquired) + { + m_pLock->Leave(); + m_fAcquired = false; + } + } + + CrstStatic * GetValue() + { + return m_pLock; + } +}; + +class CLREventStatic +{ +public: + void CreateManualEvent(bool bInitialState); + void CreateAutoEvent(bool bInitialState); + void CreateOSManualEvent(bool bInitialState); + void CreateOSAutoEvent(bool bInitialState); + void CloseEvent(); + bool IsValid() const; + bool Set(); + bool Reset(); + uint32_t Wait(uint32_t dwMilliseconds, bool bAlertable); + +private: + HANDLE m_hEvent; + bool m_fInitialized; +}; diff --git a/src/Native/gc/env/gcenv.windows.cpp b/src/Native/gc/env/gcenv.windows.cpp index 6d2cf6888ae..e2a666cbdee 100644 --- a/src/Native/gc/env/gcenv.windows.cpp +++ b/src/Native/gc/env/gcenv.windows.cpp @@ -102,87 +102,6 @@ void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX) } } -void CLREventStatic::CreateManualEvent(bool bInitialState) -{ - m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL); - m_fInitialized = true; -} - -void CLREventStatic::CreateAutoEvent(bool bInitialState) -{ - m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL); - m_fInitialized = true; -} - -void CLREventStatic::CreateOSManualEvent(bool bInitialState) -{ - m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL); - m_fInitialized = true; -} - -void CLREventStatic::CreateOSAutoEvent (bool bInitialState) -{ - m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL); - m_fInitialized = true; -} - -void CLREventStatic::CloseEvent() -{ - if (m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE) - { - CloseHandle(m_hEvent); - m_hEvent = INVALID_HANDLE_VALUE; - } -} - -bool CLREventStatic::IsValid() const -{ - return m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE; -} - -bool CLREventStatic::Set() -{ - if (!m_fInitialized) - return false; - return !!SetEvent(m_hEvent); -} - -bool CLREventStatic::Reset() -{ - if (!m_fInitialized) - return false; - return !!ResetEvent(m_hEvent); -} - -uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable) -{ - DWORD result = WAIT_FAILED; - - if (m_fInitialized) - { - bool disablePreemptive = false; - Thread * pCurThread = GetThread(); - - if (NULL != pCurThread) - { - if (pCurThread->PreemptiveGCDisabled()) - { - pCurThread->EnablePreemptiveGC(); - disablePreemptive = true; - } - } - - result = WaitForSingleObjectEx(m_hEvent, dwMilliseconds, bAlertable); - - if (disablePreemptive) - { - pCurThread->DisablePreemptiveGC(); - } - } - - return result; -} - bool __SwitchToThread(uint32_t dwSleepMSec, uint32_t dwSwitchCount) { SwitchToThread(); @@ -228,8 +147,6 @@ ClrVirtualProtect( MethodTable * g_pFreeObjectMethodTable; -EEConfig * g_pConfig; - GCSystemInfo g_SystemInfo; void InitializeSystemInfo() @@ -246,38 +163,106 @@ int32_t g_TrapReturningThreads; bool g_fFinalizerRunOnShutDown; -__declspec(thread) Thread * pCurrentThread; +void DestroyThread(Thread * pThread) +{ + // TODO: Implement +} -Thread * GetThread() +bool PalHasCapability(PalCapability capability) { - return pCurrentThread; + // TODO: Implement for background GC + return false; +} + +#if 0 // @TODO: Move this runtime-specific code to another file +EEConfig * g_pConfig; + +void CLREventStatic::CreateManualEvent(bool bInitialState) +{ + m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL); + m_fInitialized = true; } -Thread * g_pThreadList = NULL; +void CLREventStatic::CreateAutoEvent(bool bInitialState) +{ + m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL); + m_fInitialized = true; +} -Thread * ThreadStore::GetThreadList(Thread * pThread) +void CLREventStatic::CreateOSManualEvent(bool bInitialState) { - if (pThread == NULL) - return g_pThreadList; + m_hEvent = CreateEventW(NULL, TRUE, bInitialState, NULL); + m_fInitialized = true; +} - return pThread->m_pNext; +void CLREventStatic::CreateOSAutoEvent(bool bInitialState) +{ + m_hEvent = CreateEventW(NULL, FALSE, bInitialState, NULL); + m_fInitialized = true; } -void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock) +void CLREventStatic::CloseEvent() { - // TODO: Locks + if (m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE) + { + CloseHandle(m_hEvent); + m_hEvent = INVALID_HANDLE_VALUE; + } +} - Thread * pThread = new Thread(); - pThread->GetAllocContext()->init(); - pCurrentThread = pThread; +bool CLREventStatic::IsValid() const +{ + return m_fInitialized && m_hEvent != INVALID_HANDLE_VALUE; +} - pThread->m_pNext = g_pThreadList; - g_pThreadList = pThread; +bool CLREventStatic::Set() +{ + if (!m_fInitialized) + return false; + return !!SetEvent(m_hEvent); } -void DestroyThread(Thread * pThread) +bool CLREventStatic::Reset() { - // TODO: Implement + if (!m_fInitialized) + return false; + return !!ResetEvent(m_hEvent); +} + +uint32_t CLREventStatic::Wait(uint32_t dwMilliseconds, bool bAlertable) +{ + DWORD result = WAIT_FAILED; + + if (m_fInitialized) + { + bool disablePreemptive = false; + Thread * pCurThread = GetThread(); + + if (NULL != pCurThread) + { + if (GCToEEInterface::IsPreemptiveGCDisabled(pCurThread)) + { + GCToEEInterface::EnablePreemptiveGC(pCurThread); + disablePreemptive = true; + } + } + + result = WaitForSingleObjectEx(m_hEvent, dwMilliseconds, bAlertable); + + if (disablePreemptive) + { + GCToEEInterface::DisablePreemptiveGC(pCurThread); + } + } + + return result; +} + +__declspec(thread) Thread * pCurrentThread; + +Thread * GetThread() +{ + return pCurrentThread; } void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason) @@ -325,21 +310,20 @@ void FinalizerThread::EnableFinalization() // TODO: Implement for finalization } -bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext) +bool FinalizerThread::HaveExtraWorkForFinalizer() { - // TODO: Implement for background GC return false; } -bool IsGCSpecialThread() +bool PalStartBackgroundGCThread(BackgroundCallback callback, void* pCallbackContext) { // TODO: Implement for background GC return false; } -bool PalHasCapability(PalCapability capability) +bool IsGCSpecialThread() { // TODO: Implement for background GC return false; } - +#endif \ No newline at end of file diff --git a/src/Native/gc/gc.cpp b/src/Native/gc/gc.cpp index f66ea47812d..34107e10f3e 100644 --- a/src/Native/gc/gc.cpp +++ b/src/Native/gc/gc.cpp @@ -1442,12 +1442,12 @@ void WaitLongerNoInstru (int i) { // every 8th attempt: Thread *pCurThread = GetThread(); - BOOL bToggleGC = FALSE; + bool bToggleGC = false; if (pCurThread) { - bToggleGC = pCurThread->PreemptiveGCDisabled(); + bToggleGC = GCToEEInterface::IsPreemptiveGCDisabled(pCurThread); if (bToggleGC) - pCurThread->EnablePreemptiveGC(); + GCToEEInterface::EnablePreemptiveGC(pCurThread); } // if we're waiting for gc to finish, we should block immediately @@ -1473,10 +1473,10 @@ void WaitLongerNoInstru (int i) { if (bToggleGC || g_TrapReturningThreads) { - pCurThread->DisablePreemptiveGC(); + GCToEEInterface::DisablePreemptiveGC(pCurThread); if (!bToggleGC) { - pCurThread->EnablePreemptiveGC(); + GCToEEInterface::EnablePreemptiveGC(pCurThread); } } } @@ -1609,13 +1609,13 @@ void WaitLonger (int i // every 8th attempt: Thread *pCurThread = GetThread(); - BOOL bToggleGC = FALSE; + bool bToggleGC = false; if (pCurThread) { - bToggleGC = pCurThread->PreemptiveGCDisabled(); + bToggleGC = GCToEEInterface::IsPreemptiveGCDisabled(pCurThread); if (bToggleGC) { - pCurThread->EnablePreemptiveGC(); + GCToEEInterface::EnablePreemptiveGC(pCurThread); } else { @@ -1657,7 +1657,7 @@ void WaitLonger (int i #ifdef SYNCHRONIZATION_STATS (spin_lock->num_disable_preemptive_w)++; #endif //SYNCHRONIZATION_STATS - pCurThread->DisablePreemptiveGC(); + GCToEEInterface::DisablePreemptiveGC(pCurThread); } } } @@ -1735,13 +1735,13 @@ static void leave_spin_lock (GCSpinLock * spin_lock) BOOL gc_heap::enable_preemptive (Thread* current_thread) { - BOOL cooperative_mode = FALSE; + bool cooperative_mode = false; if (current_thread) { - cooperative_mode = current_thread->PreemptiveGCDisabled(); + cooperative_mode = GCToEEInterface::IsPreemptiveGCDisabled(current_thread); if (cooperative_mode) { - current_thread->EnablePreemptiveGC(); + GCToEEInterface::EnablePreemptiveGC(current_thread); } } @@ -1754,7 +1754,7 @@ void gc_heap::disable_preemptive (Thread* current_thread, BOOL restore_cooperati { if (restore_cooperative) { - current_thread->DisablePreemptiveGC(); + GCToEEInterface::DisablePreemptiveGC(current_thread); } } } @@ -24371,7 +24371,7 @@ DWORD __stdcall gc_heap::bgc_thread_stub (void* arg) // since now GC threads can be managed threads. ClrFlsSetThreadType (ThreadType_GC); assert (heap->bgc_thread != NULL); - heap->bgc_thread->SetGCSpecial(true); + GCToEEInterface::SetGCSpecial(heap->bgc_thread); STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY); // We commit the thread's entire stack to ensure we're robust in low memory conditions. @@ -24607,10 +24607,10 @@ void gc_heap::allow_fgc() { assert (bgc_thread == GetThread()); - if (bgc_thread->PreemptiveGCDisabled() && bgc_thread->CatchAtSafePoint()) + if (GCToEEInterface::IsPreemptiveGCDisabled(bgc_thread) && GCToEEInterface::CatchAtSafePoint(bgc_thread)) { - bgc_thread->EnablePreemptiveGC(); - bgc_thread->DisablePreemptiveGC(); + GCToEEInterface::EnablePreemptiveGC(bgc_thread); + GCToEEInterface::DisablePreemptiveGC(bgc_thread); } } @@ -35397,7 +35397,7 @@ void CFinalize::EnterFinalizeLock() { _ASSERTE(dbgOnly_IsSpecialEEThread() || GetThread() == 0 || - GetThread()->PreemptiveGCDisabled()); + GCToEEInterface::IsPreemptiveGCDisabled(GetThread())); retry: if (FastInterlockExchange (&lock, 0) >= 0) @@ -35424,7 +35424,7 @@ void CFinalize::LeaveFinalizeLock() { _ASSERTE(dbgOnly_IsSpecialEEThread() || GetThread() == 0 || - GetThread()->PreemptiveGCDisabled()); + GCToEEInterface::IsPreemptiveGCDisabled(GetThread())); #ifdef _DEBUG lockowner_threadid = (DWORD) -1; diff --git a/src/Native/gc/gc.h b/src/Native/gc/gc.h index 8d178931c67..ebc3f1a3638 100644 --- a/src/Native/gc/gc.h +++ b/src/Native/gc/gc.h @@ -616,6 +616,7 @@ class GCHeap { #ifdef FEATURE_BASICFREEZE // frozen segment management functions virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo) = 0; + virtual void UnregisterFrozenSegment(segment_handle seg) = 0; #endif //FEATURE_BASICFREEZE // debug support diff --git a/src/Native/gc/gcee.cpp b/src/Native/gc/gcee.cpp index c8fdef17c04..b7ac37a2a94 100644 --- a/src/Native/gc/gcee.cpp +++ b/src/Native/gc/gcee.cpp @@ -678,16 +678,16 @@ void gc_heap::fire_etw_pin_object_event (BYTE* object, BYTE** ppObject) DWORD gc_heap::user_thread_wait (CLREvent *event, BOOL no_mode_change, int time_out_ms) { Thread* pCurThread = NULL; - BOOL mode = FALSE; + bool mode = false; DWORD dwWaitResult = NOERROR; if (!no_mode_change) { pCurThread = GetThread(); - mode = pCurThread ? pCurThread->PreemptiveGCDisabled() : FALSE; + mode = pCurThread ? GCToEEInterface::IsPreemptiveGCDisabled(pCurThread) : false; if (mode) { - pCurThread->EnablePreemptiveGC(); + GCToEEInterface::EnablePreemptiveGC(pCurThread); } } @@ -695,7 +695,7 @@ DWORD gc_heap::user_thread_wait (CLREvent *event, BOOL no_mode_change, int time_ if (!no_mode_change && mode) { - pCurThread->DisablePreemptiveGC(); + GCToEEInterface::DisablePreemptiveGC(pCurThread); } return dwWaitResult; @@ -790,7 +790,7 @@ DWORD WINAPI gc_heap::rh_bgc_thread_stub(void * pContext) // should not be acquired as part of this operation. This is necessary because this thread is created in // the context of a garbage collection and the lock is already held by the GC. ASSERT(GCHeap::GetGCHeap()->IsGCInProgress()); - ThreadStore::AttachCurrentThread(false); + GCToEEInterface::AttachCurrentThread(); // Inform the GC which Thread* we are. pStartContext->m_pRealContext->bgc_thread = GetThread(); @@ -801,4 +801,54 @@ DWORD WINAPI gc_heap::rh_bgc_thread_stub(void * pContext) #endif // BACKGROUND_GC && FEATURE_REDHAWK +#ifdef FEATURE_BASICFREEZE +segment_handle GCHeap::RegisterFrozenSegment(segment_info *pseginfo) +{ + heap_segment * seg = new (nothrow) heap_segment; + if (!seg) + { + return NULL; + } + + BYTE* base_mem = (BYTE*)pseginfo->pvMem; + heap_segment_mem(seg) = base_mem + pseginfo->ibFirstObject; + heap_segment_allocated(seg) = base_mem + pseginfo->ibAllocated; + heap_segment_committed(seg) = base_mem + pseginfo->ibCommit; + heap_segment_reserved(seg) = base_mem + pseginfo->ibReserved; + heap_segment_next(seg) = 0; + heap_segment_used(seg) = heap_segment_allocated(seg); + heap_segment_plan_allocated(seg) = 0; + seg->flags = heap_segment_flags_readonly; + +#if defined (MULTIPLE_HEAPS) && !defined (ISOLATED_HEAPS) + gc_heap* heap = gc_heap::g_heaps[0]; + heap_segment_heap(seg) = heap; +#else + gc_heap* heap = pGenGCHeap; +#endif //MULTIPLE_HEAPS && !ISOLATED_HEAPS + + if (heap->insert_ro_segment(seg) == FALSE) + { + delete seg; + return NULL; + } + + return reinterpret_cast< segment_handle >(seg); +} + +void GCHeap::UnregisterFrozenSegment(segment_handle seg) +{ +#if defined (MULTIPLE_HEAPS) && !defined (ISOLATED_HEAPS) + gc_heap* heap = gc_heap::g_heaps[0]; +#else + gc_heap* heap = pGenGCHeap; +#endif //MULTIPLE_HEAPS && !ISOLATED_HEAPS + + heap->remove_ro_segment(reinterpret_cast(seg)); +} +#endif // FEATURE_BASICFREEZE + + #endif // !DACCESS_COMPILE + + diff --git a/src/Native/gc/gceewks.cpp b/src/Native/gc/gceewks.cpp index 69efe972580..0d765cdc7a8 100644 --- a/src/Native/gc/gceewks.cpp +++ b/src/Native/gc/gceewks.cpp @@ -8,7 +8,6 @@ #include "common.h" #include "gcenv.h" -#include "gcobject.h" #include "gc.h" #include "gcscan.h" diff --git a/src/Native/gc/gcimpl.h b/src/Native/gc/gcimpl.h index 68ef306b5f8..573e28d75f9 100644 --- a/src/Native/gc/gcimpl.h +++ b/src/Native/gc/gcimpl.h @@ -135,8 +135,8 @@ class GCHeap : public ::GCHeap int GetHomeHeapNumber (); bool IsThreadUsingAllocationContextHeap(alloc_context* acontext, int thread_number); int GetNumberOfHeaps (); - void HideAllocContext(alloc_context*); - void RevealAllocContext(alloc_context*); + void HideAllocContext(alloc_context*); + void RevealAllocContext(alloc_context*); static BOOL IsLargeObject(MethodTable *mt); @@ -180,7 +180,7 @@ class GCHeap : public ::GCHeap BOOL IsHeapPointer (void* object, BOOL small_heap_only = FALSE); #ifdef VERIFY_HEAP - void ValidateObjectMember (Object *obj); + void ValidateObjectMember (Object *obj); #endif //_DEBUG PER_HEAP size_t ApproxTotalBytesInUse(BOOL small_heap_only = FALSE); @@ -255,6 +255,7 @@ class GCHeap : public ::GCHeap #ifdef FEATURE_BASICFREEZE // frozen segment management functions virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo); + virtual void UnregisterFrozenSegment(segment_handle seg); #endif // FEATURE_BASICFREEZE void WaitUntilConcurrentGCComplete (); // Use in managd threads @@ -267,7 +268,7 @@ class GCHeap : public ::GCHeap void TemporaryEnableConcurrentGC(); void TemporaryDisableConcurrentGC(); BOOL IsConcurrentGCEnabled(); - + PER_HEAP_ISOLATED CLREvent *WaitForGCEvent; // used for syncing w/GC PER_HEAP_ISOLATED CFinalize* m_Finalize; diff --git a/src/Native/gc/gcscan.cpp b/src/Native/gc/gcscan.cpp index c5f48377584..7729ffac71a 100644 --- a/src/Native/gc/gcscan.cpp +++ b/src/Native/gc/gcscan.cpp @@ -178,11 +178,12 @@ VOID CNameSpace::GcScanRoots(promote_func* fn, int condemned, int max_gen, } Thread* pThread = NULL; - while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL) + while ((pThread = GCToEEInterface::GetThreadList(pThread)) != NULL) { STRESS_LOG2(LF_GC|LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId()); - if (GCHeap::GetGCHeap()->IsThreadUsingAllocationContextHeap(pThread->GetAllocContext(), sc->thread_number)) + if (GCHeap::GetGCHeap()->IsThreadUsingAllocationContextHeap( + GCToEEInterface::GetAllocContext(pThread), sc->thread_number)) { sc->thread_under_crawl = pThread; #ifdef FEATURE_EVENT_TRACE @@ -324,9 +325,9 @@ void CNameSpace::GcFixAllocContexts (void* arg, void *heap) if (GCHeap::UseAllocationContexts()) { Thread *thread = NULL; - while ((thread = ThreadStore::GetThreadList(thread)) != NULL) + while ((thread = GCToEEInterface::GetThreadList(thread)) != NULL) { - GCHeap::GetGCHeap()->FixAllocContext(thread->GetAllocContext(), FALSE, arg, heap); + GCHeap::GetGCHeap()->FixAllocContext(GCToEEInterface::GetAllocContext(thread), FALSE, arg, heap); } } } @@ -338,9 +339,9 @@ void CNameSpace::GcEnumAllocContexts (enum_alloc_context_func* fn) if (GCHeap::UseAllocationContexts()) { Thread *thread = NULL; - while ((thread = ThreadStore::GetThreadList(thread)) != NULL) + while ((thread = GCToEEInterface::GetThreadList(thread)) != NULL) { - (*fn) (thread->GetAllocContext()); + (*fn) (GCToEEInterface::GetAllocContext(thread)); } } } diff --git a/src/Native/gc/gcwks.cpp b/src/Native/gc/gcwks.cpp index b8921a58ea1..eb1d3140f70 100644 --- a/src/Native/gc/gcwks.cpp +++ b/src/Native/gc/gcwks.cpp @@ -8,7 +8,6 @@ #include "common.h" #include "gcenv.h" -#include "gcobject.h" #include "gc.h" #include "gcscan.h" diff --git a/src/Native/gc/objecthandle.cpp b/src/Native/gc/objecthandle.cpp index 1969452e3a1..342618515b6 100644 --- a/src/Native/gc/objecthandle.cpp +++ b/src/Native/gc/objecthandle.cpp @@ -18,8 +18,6 @@ #include "gcscan.h" #ifdef FEATURE_REDHAWK -#include "commontypes.h" -#include "commonmacros.h" #include "restrictedcallouts.h" #endif // FEATURE_REDHAWK @@ -477,7 +475,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, LPARAM #endif break; -#if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) +#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_REDHAWK) case HNDTYPE_REFCOUNTED: rootFlags |= kEtwGCRootFlagsRefCounted; if (*pRef != NULL)