| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | #ifndef __GCENV_BASE_INCLUDED__ |
| 5 | #define __GCENV_BASE_INCLUDED__ |
| 6 | // |
| 7 | // Sets up basic environment for CLR GC |
| 8 | // |
| 9 | |
| 10 | #ifdef _MSC_VER |
| 11 | #include <intrin.h> |
| 12 | #endif // _MSC_VER |
| 13 | |
| 14 | #define REDHAWK_PALIMPORT extern "C" |
| 15 | #define REDHAWK_PALAPI __stdcall |
| 16 | |
| 17 | #ifndef _MSC_VER |
| 18 | #define __stdcall |
| 19 | #ifdef __clang__ |
| 20 | #define __forceinline __attribute__((always_inline)) inline |
| 21 | #else // __clang__ |
| 22 | #define __forceinline inline |
| 23 | #endif // __clang__ |
| 24 | // [LOCALGC TODO] is there a better place for this? |
| 25 | #define NOINLINE __attribute__((noinline)) |
| 26 | #else // !_MSC_VER |
| 27 | #define NOINLINE __declspec(noinline) |
| 28 | #endif // _MSC_VER |
| 29 | |
| 30 | #ifndef SIZE_T_MAX |
| 31 | #define SIZE_T_MAX ((size_t)-1) |
| 32 | #endif |
| 33 | #ifndef SSIZE_T_MAX |
| 34 | #define SSIZE_T_MAX ((ptrdiff_t)(SIZE_T_MAX / 2)) |
| 35 | #endif |
| 36 | |
| 37 | #ifndef _INC_WINDOWS |
| 38 | // ----------------------------------------------------------------------------------------------------------- |
| 39 | // |
| 40 | // Aliases for Win32 types |
| 41 | // |
| 42 | |
| 43 | typedef int BOOL; |
| 44 | typedef uint32_t DWORD; |
| 45 | typedef uint64_t DWORD64; |
| 46 | typedef uint32_t ULONG; |
| 47 | |
| 48 | // ----------------------------------------------------------------------------------------------------------- |
| 49 | // HRESULT subset. |
| 50 | |
| 51 | #ifdef PLATFORM_UNIX |
| 52 | typedef int32_t HRESULT; |
| 53 | #else |
| 54 | // this must exactly match the typedef used by windows.h |
| 55 | typedef long HRESULT; |
| 56 | #endif |
| 57 | |
| 58 | #define SUCCEEDED(_hr) ((HRESULT)(_hr) >= 0) |
| 59 | #define FAILED(_hr) ((HRESULT)(_hr) < 0) |
| 60 | |
| 61 | inline HRESULT HRESULT_FROM_WIN32(unsigned long x) |
| 62 | { |
| 63 | return (HRESULT)(x) <= 0 ? (HRESULT)(x) : (HRESULT) (((x) & 0x0000FFFF) | (7 << 16) | 0x80000000); |
| 64 | } |
| 65 | |
| 66 | #define S_OK 0x0 |
| 67 | #define E_FAIL 0x80004005 |
| 68 | #define E_OUTOFMEMORY 0x8007000E |
| 69 | #define COR_E_EXECUTIONENGINE 0x80131506 |
| 70 | |
| 71 | #define NOERROR 0x0 |
| 72 | #define ERROR_TIMEOUT 1460 |
| 73 | |
| 74 | #define TRUE true |
| 75 | #define FALSE false |
| 76 | |
| 77 | #define CALLBACK __stdcall |
| 78 | #define FORCEINLINE __forceinline |
| 79 | |
| 80 | #define INFINITE 0xFFFFFFFF |
| 81 | |
| 82 | #define ZeroMemory(Destination,Length) memset((Destination),0,(Length)) |
| 83 | |
| 84 | #ifndef _countof |
| 85 | #define _countof(_array) (sizeof(_array)/sizeof(_array[0])) |
| 86 | #endif |
| 87 | |
| 88 | #ifndef min |
| 89 | #define min(a,b) (((a) < (b)) ? (a) : (b)) |
| 90 | #endif |
| 91 | |
| 92 | #ifndef max |
| 93 | #define max(a,b) (((a) > (b)) ? (a) : (b)) |
| 94 | #endif |
| 95 | |
| 96 | #define C_ASSERT(cond) static_assert( cond, #cond ) |
| 97 | |
| 98 | #define UNREFERENCED_PARAMETER(P) (void)(P) |
| 99 | |
| 100 | #ifdef PLATFORM_UNIX |
| 101 | #define _vsnprintf_s(string, sizeInBytes, count, format, args) vsnprintf(string, sizeInBytes, format, args) |
| 102 | #define sprintf_s snprintf |
| 103 | #define swprintf_s swprintf |
| 104 | #define _snprintf_s(string, sizeInBytes, count, format, ...) \ |
| 105 | snprintf(string, sizeInBytes, format, ## __VA_ARGS__) |
| 106 | #endif |
| 107 | |
| 108 | #ifdef UNICODE |
| 109 | #define _tcslen wcslen |
| 110 | #define _tcscpy wcscpy |
| 111 | #define _stprintf_s swprintf_s |
| 112 | #define _tfopen _wfopen |
| 113 | #else |
| 114 | #define _tcslen strlen |
| 115 | #define _tcscpy strcpy |
| 116 | #define _stprintf_s sprintf_s |
| 117 | #define _tfopen fopen |
| 118 | #endif |
| 119 | |
| 120 | #define WINAPI __stdcall |
| 121 | |
| 122 | typedef DWORD (WINAPI *PTHREAD_START_ROUTINE)(void* lpThreadParameter); |
| 123 | |
| 124 | #define WAIT_OBJECT_0 0 |
| 125 | #define WAIT_TIMEOUT 258 |
| 126 | #define WAIT_FAILED 0xFFFFFFFF |
| 127 | |
| 128 | #if defined(_MSC_VER) |
| 129 | #if defined(_ARM_) |
| 130 | |
| 131 | __forceinline void YieldProcessor() { } |
| 132 | extern "C" void __emit(const unsigned __int32 opcode); |
| 133 | #pragma intrinsic(__emit) |
| 134 | #define MemoryBarrier() { __emit(0xF3BF); __emit(0x8F5F); } |
| 135 | |
| 136 | #elif defined(_ARM64_) |
| 137 | |
| 138 | extern "C" void __yield(void); |
| 139 | #pragma intrinsic(__yield) |
| 140 | __forceinline void YieldProcessor() { __yield();} |
| 141 | |
| 142 | extern "C" void __dmb(const unsigned __int32 _Type); |
| 143 | #pragma intrinsic(__dmb) |
| 144 | #define MemoryBarrier() { __dmb(_ARM64_BARRIER_SY); } |
| 145 | |
| 146 | #elif defined(_AMD64_) |
| 147 | |
| 148 | extern "C" void |
| 149 | _mm_pause ( |
| 150 | void |
| 151 | ); |
| 152 | |
| 153 | extern "C" void |
| 154 | _mm_mfence ( |
| 155 | void |
| 156 | ); |
| 157 | |
| 158 | #pragma intrinsic(_mm_pause) |
| 159 | #pragma intrinsic(_mm_mfence) |
| 160 | |
| 161 | #define YieldProcessor _mm_pause |
| 162 | #define MemoryBarrier _mm_mfence |
| 163 | |
| 164 | #elif defined(_X86_) |
| 165 | |
| 166 | #define YieldProcessor() __asm { rep nop } |
| 167 | #define MemoryBarrier() MemoryBarrierImpl() |
| 168 | __forceinline void MemoryBarrierImpl() |
| 169 | { |
| 170 | int32_t Barrier; |
| 171 | __asm { |
| 172 | xchg Barrier, eax |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | #else // !_ARM_ && !_AMD64_ && !_X86_ |
| 177 | #error Unsupported architecture |
| 178 | #endif |
| 179 | #else // _MSC_VER |
| 180 | |
| 181 | // Only clang defines __has_builtin, so we first test for a GCC define |
| 182 | // before using __has_builtin. |
| 183 | |
| 184 | #if defined(__i386__) || defined(__x86_64__) |
| 185 | |
| 186 | #if (__GNUC__ > 4 && __GNUC_MINOR > 7) || __has_builtin(__builtin_ia32_pause) |
| 187 | // clang added this intrinsic in 3.8 |
| 188 | // gcc added this intrinsic by 4.7.1 |
| 189 | #define YieldProcessor __builtin_ia32_pause |
| 190 | #endif // __has_builtin(__builtin_ia32_pause) |
| 191 | |
| 192 | #if defined(__GNUC__) || __has_builtin(__builtin_ia32_mfence) |
| 193 | // clang has had this intrinsic since at least 3.0 |
| 194 | // gcc has had this intrinsic since forever |
| 195 | #define MemoryBarrier __builtin_ia32_mfence |
| 196 | #endif // __has_builtin(__builtin_ia32_mfence) |
| 197 | |
| 198 | // If we don't have intrinsics, we can do some inline asm instead. |
| 199 | #ifndef YieldProcessor |
| 200 | #define YieldProcessor() asm volatile ("pause") |
| 201 | #endif // YieldProcessor |
| 202 | |
| 203 | #ifndef MemoryBarrier |
| 204 | #define MemoryBarrier() asm volatile ("mfence") |
| 205 | #endif // MemoryBarrier |
| 206 | |
| 207 | #endif // defined(__i386__) || defined(__x86_64__) |
| 208 | |
| 209 | #ifdef __aarch64__ |
| 210 | #define YieldProcessor() asm volatile ("yield") |
| 211 | #define MemoryBarrier __sync_synchronize |
| 212 | #endif // __aarch64__ |
| 213 | |
| 214 | #ifdef __arm__ |
| 215 | #define YieldProcessor() |
| 216 | #define MemoryBarrier __sync_synchronize |
| 217 | #endif // __arm__ |
| 218 | |
| 219 | #endif // _MSC_VER |
| 220 | |
| 221 | #ifdef _MSC_VER |
| 222 | #pragma intrinsic(_BitScanForward) |
| 223 | #pragma intrinsic(_BitScanReverse) |
| 224 | #if _WIN64 |
| 225 | #pragma intrinsic(_BitScanForward64) |
| 226 | #pragma intrinsic(_BitScanReverse64) |
| 227 | #endif |
| 228 | #endif // _MSC_VER |
| 229 | |
| 230 | // Cross-platform wrapper for the _BitScanForward compiler intrinsic. |
| 231 | // A value is unconditionally stored through the bitIndex argument, |
| 232 | // but callers should only rely on it when the function returns TRUE; |
| 233 | // otherwise, the stored value is undefined and varies by implementation |
| 234 | // and hardware platform. |
| 235 | inline uint8_t BitScanForward(uint32_t *bitIndex, uint32_t mask) |
| 236 | { |
| 237 | #ifdef _MSC_VER |
| 238 | return _BitScanForward((unsigned long*)bitIndex, mask); |
| 239 | #else // _MSC_VER |
| 240 | int iIndex = __builtin_ffs(mask); |
| 241 | *bitIndex = static_cast<uint32_t>(iIndex - 1); |
| 242 | // Both GCC and Clang generate better, smaller code if we check whether the |
| 243 | // mask was/is zero rather than the equivalent check that iIndex is zero. |
| 244 | return mask != 0 ? TRUE : FALSE; |
| 245 | #endif // _MSC_VER |
| 246 | } |
| 247 | |
| 248 | // Cross-platform wrapper for the _BitScanForward64 compiler intrinsic. |
| 249 | // A value is unconditionally stored through the bitIndex argument, |
| 250 | // but callers should only rely on it when the function returns TRUE; |
| 251 | // otherwise, the stored value is undefined and varies by implementation |
| 252 | // and hardware platform. |
| 253 | inline uint8_t BitScanForward64(uint32_t *bitIndex, uint64_t mask) |
| 254 | { |
| 255 | #ifdef _MSC_VER |
| 256 | #if _WIN64 |
| 257 | return _BitScanForward64((unsigned long*)bitIndex, mask); |
| 258 | #else |
| 259 | // MSVC targeting a 32-bit target does not support this intrinsic. |
| 260 | // We can fake it using two successive invocations of _BitScanForward. |
| 261 | uint32_t hi = (mask >> 32) & 0xFFFFFFFF; |
| 262 | uint32_t lo = mask & 0xFFFFFFFF; |
| 263 | uint32_t fakeBitIndex = 0; |
| 264 | |
| 265 | uint8_t result = BitScanForward(bitIndex, lo); |
| 266 | if (result == 0) |
| 267 | { |
| 268 | result = BitScanForward(&fakeBitIndex, hi); |
| 269 | if (result != 0) |
| 270 | { |
| 271 | *bitIndex = fakeBitIndex + 32; |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | return result; |
| 276 | #endif // _WIN64 |
| 277 | #else |
| 278 | int iIndex = __builtin_ffsll(mask); |
| 279 | *bitIndex = static_cast<uint32_t>(iIndex - 1); |
| 280 | // Both GCC and Clang generate better, smaller code if we check whether the |
| 281 | // mask was/is zero rather than the equivalent check that iIndex is zero. |
| 282 | return mask != 0 ? TRUE : FALSE; |
| 283 | #endif // _MSC_VER |
| 284 | } |
| 285 | |
| 286 | // Cross-platform wrapper for the _BitScanReverse compiler intrinsic. |
| 287 | inline uint8_t BitScanReverse(uint32_t *bitIndex, uint32_t mask) |
| 288 | { |
| 289 | #ifdef _MSC_VER |
| 290 | return _BitScanReverse((unsigned long*)bitIndex, mask); |
| 291 | #else // _MSC_VER |
| 292 | // The result of __builtin_clzl is undefined when mask is zero, |
| 293 | // but it's still OK to call the intrinsic in that case (just don't use the output). |
| 294 | // Unconditionally calling the intrinsic in this way allows the compiler to |
| 295 | // emit branchless code for this function when possible (depending on how the |
| 296 | // intrinsic is implemented for the target platform). |
| 297 | int lzcount = __builtin_clzl(mask); |
| 298 | *bitIndex = static_cast<uint32_t>(31 - lzcount); |
| 299 | return mask != 0 ? TRUE : FALSE; |
| 300 | #endif // _MSC_VER |
| 301 | } |
| 302 | |
| 303 | // Cross-platform wrapper for the _BitScanReverse64 compiler intrinsic. |
| 304 | inline uint8_t BitScanReverse64(uint32_t *bitIndex, uint64_t mask) |
| 305 | { |
| 306 | #ifdef _MSC_VER |
| 307 | #if _WIN64 |
| 308 | return _BitScanReverse64((unsigned long*)bitIndex, mask); |
| 309 | #else |
| 310 | // MSVC targeting a 32-bit target does not support this intrinsic. |
| 311 | // We can fake it checking whether the upper 32 bits are zeros (or not) |
| 312 | // then calling _BitScanReverse() on either the upper or lower 32 bits. |
| 313 | uint32_t upper = static_cast<uint32_t>(mask >> 32); |
| 314 | |
| 315 | if (upper != 0) |
| 316 | { |
| 317 | uint8_t result = _BitScanReverse((unsigned long*)bitIndex, upper); |
| 318 | *bitIndex += 32; |
| 319 | return result; |
| 320 | } |
| 321 | |
| 322 | return _BitScanReverse((unsigned long*)bitIndex, static_cast<uint32_t>(mask)); |
| 323 | #endif // _WIN64 |
| 324 | #else |
| 325 | // The result of __builtin_clzll is undefined when mask is zero, |
| 326 | // but it's still OK to call the intrinsic in that case (just don't use the output). |
| 327 | // Unconditionally calling the intrinsic in this way allows the compiler to |
| 328 | // emit branchless code for this function when possible (depending on how the |
| 329 | // intrinsic is implemented for the target platform). |
| 330 | int lzcount = __builtin_clzll(mask); |
| 331 | *bitIndex = static_cast<uint32_t>(63 - lzcount); |
| 332 | return mask != 0 ? TRUE : FALSE; |
| 333 | #endif // _MSC_VER |
| 334 | } |
| 335 | |
| 336 | // Aligns a size_t to the specified alignment. Alignment must be a power |
| 337 | // of two. |
| 338 | inline size_t ALIGN_UP(size_t val, size_t alignment) |
| 339 | { |
| 340 | // alignment factor must be power of two |
| 341 | assert((alignment & (alignment - 1)) == 0); |
| 342 | size_t result = (val + (alignment - 1)) & ~(alignment - 1); |
| 343 | assert(result >= val); |
| 344 | return result; |
| 345 | } |
| 346 | |
| 347 | // Aligns a pointer to the specified alignment. Alignment must be a power |
| 348 | // of two. |
| 349 | inline uint8_t* ALIGN_UP(uint8_t* ptr, size_t alignment) |
| 350 | { |
| 351 | size_t as_size_t = reinterpret_cast<size_t>(ptr); |
| 352 | return reinterpret_cast<uint8_t*>(ALIGN_UP(as_size_t, alignment)); |
| 353 | } |
| 354 | |
| 355 | // Aligns a size_t to the specified alignment by rounding down. Alignment must |
| 356 | // be a power of two. |
| 357 | inline size_t ALIGN_DOWN(size_t val, size_t alignment) |
| 358 | { |
| 359 | // alignment factor must be power of two. |
| 360 | assert((alignment & (alignment - 1)) == 0); |
| 361 | size_t result = val & ~(alignment - 1); |
| 362 | return result; |
| 363 | } |
| 364 | |
| 365 | // Aligns a pointer to the specified alignment by rounding down. Alignment |
| 366 | // must be a power of two. |
| 367 | inline uint8_t* ALIGN_DOWN(uint8_t* ptr, size_t alignment) |
| 368 | { |
| 369 | size_t as_size_t = reinterpret_cast<size_t>(ptr); |
| 370 | return reinterpret_cast<uint8_t*>(ALIGN_DOWN(as_size_t, alignment)); |
| 371 | } |
| 372 | |
| 373 | // Aligns a void pointer to the specified alignment by rounding down. Alignment |
| 374 | // must be a power of two. |
| 375 | inline void* ALIGN_DOWN(void* ptr, size_t alignment) |
| 376 | { |
| 377 | size_t as_size_t = reinterpret_cast<size_t>(ptr); |
| 378 | return reinterpret_cast<void*>(ALIGN_DOWN(as_size_t, alignment)); |
| 379 | } |
| 380 | |
| 381 | inline int GetRandomInt(int max) |
| 382 | { |
| 383 | return rand() % max; |
| 384 | } |
| 385 | |
| 386 | typedef struct _PROCESSOR_NUMBER { |
| 387 | uint16_t Group; |
| 388 | uint8_t Number; |
| 389 | uint8_t Reserved; |
| 390 | } PROCESSOR_NUMBER, *PPROCESSOR_NUMBER; |
| 391 | |
| 392 | #endif // _INC_WINDOWS |
| 393 | |
| 394 | // ----------------------------------------------------------------------------------------------------------- |
| 395 | // |
| 396 | // The subset of the contract code required by the GC/HandleTable sources. If Redhawk moves to support |
| 397 | // contracts these local definitions will disappear and be replaced by real implementations. |
| 398 | // |
| 399 | |
| 400 | #define LEAF_CONTRACT |
| 401 | #define LIMITED_METHOD_CONTRACT |
| 402 | #define LIMITED_METHOD_DAC_CONTRACT |
| 403 | #define WRAPPER_CONTRACT |
| 404 | #define WRAPPER_NO_CONTRACT |
| 405 | #define STATIC_CONTRACT_LEAF |
| 406 | #define STATIC_CONTRACT_DEBUG_ONLY |
| 407 | #define STATIC_CONTRACT_NOTHROW |
| 408 | #define STATIC_CONTRACT_CAN_TAKE_LOCK |
| 409 | #define STATIC_CONTRACT_SO_TOLERANT |
| 410 | #define STATIC_CONTRACT_GC_NOTRIGGER |
| 411 | #define STATIC_CONTRACT_MODE_COOPERATIVE |
| 412 | #define CONTRACTL |
| 413 | #define CONTRACT(_expr) |
| 414 | #define CONTRACT_VOID |
| 415 | #define THROWS |
| 416 | #define NOTHROW |
| 417 | #define INSTANCE_CHECK |
| 418 | #define MODE_COOPERATIVE |
| 419 | #define MODE_ANY |
| 420 | #define SO_INTOLERANT |
| 421 | #define SO_TOLERANT |
| 422 | #define GC_TRIGGERS |
| 423 | #define GC_NOTRIGGER |
| 424 | #define CAN_TAKE_LOCK |
| 425 | #define SUPPORTS_DAC |
| 426 | #define FORBID_FAULT |
| 427 | #define CONTRACTL_END |
| 428 | #define CONTRACT_END |
| 429 | #define TRIGGERSGC() |
| 430 | #define WRAPPER(_contract) |
| 431 | #define DISABLED(_contract) |
| 432 | #define INJECT_FAULT(_expr) |
| 433 | #define INJECTFAULT_GCHEAP 0x2 |
| 434 | #define FAULT_NOT_FATAL() |
| 435 | #define BEGIN_DEBUG_ONLY_CODE |
| 436 | #define END_DEBUG_ONLY_CODE |
| 437 | #define BEGIN_GETTHREAD_ALLOWED |
| 438 | #define END_GETTHREAD_ALLOWED |
| 439 | #define LEAF_DAC_CONTRACT |
| 440 | #define PRECONDITION(_expr) |
| 441 | #define POSTCONDITION(_expr) |
| 442 | #define RETURN return |
| 443 | #define CONDITIONAL_CONTRACT_VIOLATION(_violation, _expr) |
| 444 | |
| 445 | // ----------------------------------------------------------------------------------------------------------- |
| 446 | // |
| 447 | // Data access macros |
| 448 | // |
| 449 | typedef uintptr_t TADDR; |
| 450 | #define PTR_TO_TADDR(ptr) ((TADDR)(ptr)) |
| 451 | |
| 452 | #define DPTR(type) type* |
| 453 | #define SPTR(type) type* |
| 454 | typedef DPTR(size_t) PTR_size_t; |
| 455 | typedef DPTR(uint8_t) PTR_uint8_t; |
| 456 | |
| 457 | // ----------------------------------------------------------------------------------------------------------- |
| 458 | |
| 459 | #define DATA_ALIGNMENT sizeof(uintptr_t) |
| 460 | #define RAW_KEYWORD(x) x |
| 461 | #define DECLSPEC_ALIGN(x) __declspec(align(x)) |
| 462 | #ifndef _ASSERTE |
| 463 | #define _ASSERTE(_expr) ASSERT(_expr) |
| 464 | #endif |
| 465 | #define CONSISTENCY_CHECK(_expr) ASSERT(_expr) |
| 466 | #define PREFIX_ASSUME(cond) ASSERT(cond) |
| 467 | #define EEPOLICY_HANDLE_FATAL_ERROR(error) ASSERT(!"EEPOLICY_HANDLE_FATAL_ERROR") |
| 468 | #define UI64(_literal) _literal##ULL |
| 469 | |
| 470 | class ; |
| 471 | class MethodTable; |
| 472 | class Object; |
| 473 | class ArrayBase; |
| 474 | |
| 475 | // Various types used to refer to object references or handles. This will get more complex if we decide |
| 476 | // Redhawk wants to wrap object references in the debug build. |
| 477 | typedef DPTR(Object) PTR_Object; |
| 478 | typedef DPTR(PTR_Object) PTR_PTR_Object; |
| 479 | |
| 480 | typedef PTR_Object OBJECTREF; |
| 481 | typedef PTR_PTR_Object PTR_OBJECTREF; |
| 482 | typedef PTR_Object _UNCHECKED_OBJECTREF; |
| 483 | typedef PTR_PTR_Object PTR_UNCHECKED_OBJECTREF; |
| 484 | |
| 485 | // With no object reference wrapping the following macros are very simple. |
| 486 | #define ObjectToOBJECTREF(_obj) (OBJECTREF)(_obj) |
| 487 | #define OBJECTREFToObject(_obj) (Object*)(_obj) |
| 488 | |
| 489 | #define VALIDATEOBJECTREF(_objref) (void)_objref; |
| 490 | |
| 491 | class Thread; |
| 492 | |
| 493 | inline bool dbgOnly_IsSpecialEEThread() |
| 494 | { |
| 495 | return false; |
| 496 | } |
| 497 | |
| 498 | #define ClrFlsSetThreadType(type) |
| 499 | |
| 500 | // |
| 501 | // Performance logging |
| 502 | // |
| 503 | |
| 504 | #define COUNTER_ONLY(x) |
| 505 | |
| 506 | //#include "etmdummy.h" |
| 507 | //#define ETW_EVENT_ENABLED(e,f) false |
| 508 | |
| 509 | namespace ETW |
| 510 | { |
| 511 | typedef enum _GC_ROOT_KIND { |
| 512 | GC_ROOT_STACK = 0, |
| 513 | GC_ROOT_FQ = 1, |
| 514 | GC_ROOT_HANDLES = 2, |
| 515 | GC_ROOT_OLDER = 3, |
| 516 | GC_ROOT_SIZEDREF = 4, |
| 517 | GC_ROOT_OVERFLOW = 5 |
| 518 | } GC_ROOT_KIND; |
| 519 | }; |
| 520 | |
| 521 | inline bool FitsInU1(uint64_t val) |
| 522 | { |
| 523 | return val == (uint64_t)(uint8_t)val; |
| 524 | } |
| 525 | |
| 526 | // ----------------------------------------------------------------------------------------------------------- |
| 527 | // |
| 528 | // AppDomain emulation. The we don't have these in Redhawk so instead we emulate the bare minimum of the API |
| 529 | // touched by the GC/HandleTable and pretend we have precisely one (default) appdomain. |
| 530 | // |
| 531 | |
| 532 | #define RH_DEFAULT_DOMAIN_ID 1 |
| 533 | |
| 534 | struct ADIndex |
| 535 | { |
| 536 | DWORD m_dwIndex; |
| 537 | |
| 538 | ADIndex () : m_dwIndex(RH_DEFAULT_DOMAIN_ID) {} |
| 539 | explicit ADIndex (DWORD id) : m_dwIndex(id) {} |
| 540 | BOOL operator==(const ADIndex& ad) const { return m_dwIndex == ad.m_dwIndex; } |
| 541 | BOOL operator!=(const ADIndex& ad) const { return m_dwIndex != ad.m_dwIndex; } |
| 542 | }; |
| 543 | |
| 544 | #endif // __GCENV_BASE_INCLUDED__ |
| 545 | |