1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | #include <cstdint> |
6 | #include <cstddef> |
7 | #include <cassert> |
8 | #include <memory> |
9 | #include <pthread.h> |
10 | #include <signal.h> |
11 | |
12 | #include "config.h" |
13 | #include "common.h" |
14 | |
15 | #include "gcenv.structs.h" |
16 | #include "gcenv.base.h" |
17 | #include "gcenv.os.h" |
18 | #include "gcenv.unix.inl" |
19 | #include "volatile.h" |
20 | |
21 | #if HAVE_SYS_TIME_H |
22 | #include <sys/time.h> |
23 | #else |
24 | #error "sys/time.h required by GC PAL for the time being" |
25 | #endif // HAVE_SYS_TIME_ |
26 | |
27 | #if HAVE_SYS_MMAN_H |
28 | #include <sys/mman.h> |
29 | #else |
30 | #error "sys/mman.h required by GC PAL" |
31 | #endif // HAVE_SYS_MMAN_H |
32 | |
33 | #ifdef __linux__ |
34 | #include <sys/syscall.h> |
35 | #endif // __linux__ |
36 | |
37 | #include <time.h> // nanosleep |
38 | #include <sched.h> // sched_yield |
39 | #include <errno.h> |
40 | #include <unistd.h> // sysconf |
41 | #include "globals.h" |
42 | #include "cgroup.h" |
43 | |
44 | #if defined(_ARM_) || defined(_ARM64_) |
45 | #define SYSCONF_GET_NUMPROCS _SC_NPROCESSORS_CONF |
46 | #else |
47 | #define SYSCONF_GET_NUMPROCS _SC_NPROCESSORS_ONLN |
48 | #endif |
49 | |
50 | // The cachced number of logical CPUs observed. |
51 | static uint32_t g_logicalCpuCount = 0; |
52 | |
53 | // Helper memory page used by the FlushProcessWriteBuffers |
54 | static uint8_t* g_helperPage = 0; |
55 | |
56 | // Mutex to make the FlushProcessWriteBuffersMutex thread safe |
57 | static pthread_mutex_t g_flushProcessWriteBuffersMutex; |
58 | |
59 | size_t GetRestrictedPhysicalMemoryLimit(); |
60 | bool GetPhysicalMemoryUsed(size_t* val); |
61 | bool GetCpuLimit(uint32_t* val); |
62 | |
63 | static size_t g_RestrictedPhysicalMemoryLimit = 0; |
64 | |
65 | uint32_t g_pageSizeUnixInl = 0; |
66 | |
67 | // Initialize the interface implementation |
68 | // Return: |
69 | // true if it has succeeded, false if it has failed |
70 | bool GCToOSInterface::Initialize() |
71 | { |
72 | int pageSize = sysconf( _SC_PAGE_SIZE ); |
73 | |
74 | g_pageSizeUnixInl = uint32_t((pageSize > 0) ? pageSize : 0x1000); |
75 | |
76 | // Calculate and cache the number of processors on this machine |
77 | int cpuCount = sysconf(SYSCONF_GET_NUMPROCS); |
78 | if (cpuCount == -1) |
79 | { |
80 | return false; |
81 | } |
82 | |
83 | g_logicalCpuCount = cpuCount; |
84 | |
85 | assert(g_helperPage == 0); |
86 | |
87 | g_helperPage = static_cast<uint8_t*>(mmap(0, OS_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)); |
88 | |
89 | if(g_helperPage == MAP_FAILED) |
90 | { |
91 | return false; |
92 | } |
93 | |
94 | // Verify that the s_helperPage is really aligned to the g_SystemInfo.dwPageSize |
95 | assert((((size_t)g_helperPage) & (OS_PAGE_SIZE - 1)) == 0); |
96 | |
97 | // Locking the page ensures that it stays in memory during the two mprotect |
98 | // calls in the FlushProcessWriteBuffers below. If the page was unmapped between |
99 | // those calls, they would not have the expected effect of generating IPI. |
100 | int status = mlock(g_helperPage, OS_PAGE_SIZE); |
101 | |
102 | if (status != 0) |
103 | { |
104 | return false; |
105 | } |
106 | |
107 | status = pthread_mutex_init(&g_flushProcessWriteBuffersMutex, NULL); |
108 | if (status != 0) |
109 | { |
110 | munlock(g_helperPage, OS_PAGE_SIZE); |
111 | return false; |
112 | } |
113 | |
114 | #if HAVE_MACH_ABSOLUTE_TIME |
115 | kern_return_t machRet; |
116 | if ((machRet = mach_timebase_info(&g_TimebaseInfo)) != KERN_SUCCESS) |
117 | { |
118 | return false; |
119 | } |
120 | #endif // HAVE_MACH_ABSOLUTE_TIME |
121 | |
122 | InitializeCGroup(); |
123 | |
124 | return true; |
125 | } |
126 | |
127 | // Shutdown the interface implementation |
128 | void GCToOSInterface::Shutdown() |
129 | { |
130 | int ret = munlock(g_helperPage, OS_PAGE_SIZE); |
131 | assert(ret == 0); |
132 | ret = pthread_mutex_destroy(&g_flushProcessWriteBuffersMutex); |
133 | assert(ret == 0); |
134 | |
135 | munmap(g_helperPage, OS_PAGE_SIZE); |
136 | |
137 | CleanupCGroup(); |
138 | } |
139 | |
140 | // Get numeric id of the current thread if possible on the |
141 | // current platform. It is indended for logging purposes only. |
142 | // Return: |
143 | // Numeric id of the current thread, as best we can retrieve it. |
144 | uint64_t GCToOSInterface::GetCurrentThreadIdForLogging() |
145 | { |
146 | #if defined(__linux__) |
147 | return (uint64_t)syscall(SYS_gettid); |
148 | #elif HAVE_PTHREAD_GETTHREADID_NP |
149 | return (uint64_t)pthread_getthreadid_np(); |
150 | #elif HAVE_PTHREAD_THREADID_NP |
151 | unsigned long long tid; |
152 | pthread_threadid_np(pthread_self(), &tid); |
153 | return (uint64_t)tid; |
154 | #else |
155 | // Fallback in case we don't know how to get integer thread id on the current platform |
156 | return (uint64_t)pthread_self(); |
157 | #endif |
158 | } |
159 | |
160 | // Get the process ID of the process. |
161 | uint32_t GCToOSInterface::GetCurrentProcessId() |
162 | { |
163 | return getpid(); |
164 | } |
165 | |
166 | // Set ideal affinity for the current thread |
167 | // Parameters: |
168 | // affinity - ideal processor affinity for the thread |
169 | // Return: |
170 | // true if it has succeeded, false if it has failed |
171 | bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity) |
172 | { |
173 | // TODO(segilles) |
174 | return false; |
175 | } |
176 | |
177 | // Get the number of the current processor |
178 | uint32_t GCToOSInterface::GetCurrentProcessorNumber() |
179 | { |
180 | #if HAVE_SCHED_GETCPU |
181 | int processorNumber = sched_getcpu(); |
182 | assert(processorNumber != -1); |
183 | return processorNumber; |
184 | #else |
185 | return 0; |
186 | #endif |
187 | } |
188 | |
189 | // Check if the OS supports getting current processor number |
190 | bool GCToOSInterface::CanGetCurrentProcessorNumber() |
191 | { |
192 | return HAVE_SCHED_GETCPU; |
193 | } |
194 | |
195 | // Flush write buffers of processors that are executing threads of the current process |
196 | void GCToOSInterface::FlushProcessWriteBuffers() |
197 | { |
198 | int status = pthread_mutex_lock(&g_flushProcessWriteBuffersMutex); |
199 | assert(status == 0 && "Failed to lock the flushProcessWriteBuffersMutex lock" ); |
200 | |
201 | // Changing a helper memory page protection from read / write to no access |
202 | // causes the OS to issue IPI to flush TLBs on all processors. This also |
203 | // results in flushing the processor buffers. |
204 | status = mprotect(g_helperPage, OS_PAGE_SIZE, PROT_READ | PROT_WRITE); |
205 | assert(status == 0 && "Failed to change helper page protection to read / write" ); |
206 | |
207 | // Ensure that the page is dirty before we change the protection so that |
208 | // we prevent the OS from skipping the global TLB flush. |
209 | __sync_add_and_fetch((size_t*)g_helperPage, 1); |
210 | |
211 | status = mprotect(g_helperPage, OS_PAGE_SIZE, PROT_NONE); |
212 | assert(status == 0 && "Failed to change helper page protection to no access" ); |
213 | |
214 | status = pthread_mutex_unlock(&g_flushProcessWriteBuffersMutex); |
215 | assert(status == 0 && "Failed to unlock the flushProcessWriteBuffersMutex lock" ); |
216 | } |
217 | |
218 | // Break into a debugger. Uses a compiler intrinsic if one is available, |
219 | // otherwise raises a SIGTRAP. |
220 | void GCToOSInterface::DebugBreak() |
221 | { |
222 | // __has_builtin is only defined by clang. GCC doesn't have a debug |
223 | // trap intrinsic anyway. |
224 | #ifndef __has_builtin |
225 | #define __has_builtin(x) 0 |
226 | #endif // __has_builtin |
227 | |
228 | #if __has_builtin(__builtin_debugtrap) |
229 | __builtin_debugtrap(); |
230 | #else |
231 | raise(SIGTRAP); |
232 | #endif |
233 | } |
234 | |
235 | // Causes the calling thread to sleep for the specified number of milliseconds |
236 | // Parameters: |
237 | // sleepMSec - time to sleep before switching to another thread |
238 | void GCToOSInterface::Sleep(uint32_t sleepMSec) |
239 | { |
240 | if (sleepMSec == 0) |
241 | { |
242 | return; |
243 | } |
244 | |
245 | timespec requested; |
246 | requested.tv_sec = sleepMSec / tccSecondsToMilliSeconds; |
247 | requested.tv_nsec = (sleepMSec - requested.tv_sec * tccSecondsToMilliSeconds) * tccMilliSecondsToNanoSeconds; |
248 | |
249 | timespec remaining; |
250 | while (nanosleep(&requested, &remaining) == EINTR) |
251 | { |
252 | requested = remaining; |
253 | } |
254 | } |
255 | |
256 | // Causes the calling thread to yield execution to another thread that is ready to run on the current processor. |
257 | // Parameters: |
258 | // switchCount - number of times the YieldThread was called in a loop |
259 | void GCToOSInterface::YieldThread(uint32_t switchCount) |
260 | { |
261 | int ret = sched_yield(); |
262 | |
263 | // sched_yield never fails on Linux, unclear about other OSes |
264 | assert(ret == 0); |
265 | } |
266 | |
267 | // Reserve virtual memory range. |
268 | // Parameters: |
269 | // size - size of the virtual memory range |
270 | // alignment - requested memory alignment, 0 means no specific alignment requested |
271 | // flags - flags to control special settings like write watching |
272 | // Return: |
273 | // Starting virtual address of the reserved range |
274 | void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags) |
275 | { |
276 | assert(!(flags & VirtualReserveFlags::WriteWatch) && "WriteWatch not supported on Unix" ); |
277 | if (alignment == 0) |
278 | { |
279 | alignment = OS_PAGE_SIZE; |
280 | } |
281 | |
282 | size_t alignedSize = size + (alignment - OS_PAGE_SIZE); |
283 | void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0); |
284 | |
285 | if (pRetVal != NULL) |
286 | { |
287 | void * pAlignedRetVal = (void *)(((size_t)pRetVal + (alignment - 1)) & ~(alignment - 1)); |
288 | size_t startPadding = (size_t)pAlignedRetVal - (size_t)pRetVal; |
289 | if (startPadding != 0) |
290 | { |
291 | int ret = munmap(pRetVal, startPadding); |
292 | assert(ret == 0); |
293 | } |
294 | |
295 | size_t endPadding = alignedSize - (startPadding + size); |
296 | if (endPadding != 0) |
297 | { |
298 | int ret = munmap((void *)((size_t)pAlignedRetVal + size), endPadding); |
299 | assert(ret == 0); |
300 | } |
301 | |
302 | pRetVal = pAlignedRetVal; |
303 | } |
304 | |
305 | return pRetVal; |
306 | } |
307 | |
308 | // Release virtual memory range previously reserved using VirtualReserve |
309 | // Parameters: |
310 | // address - starting virtual address |
311 | // size - size of the virtual memory range |
312 | // Return: |
313 | // true if it has succeeded, false if it has failed |
314 | bool GCToOSInterface::VirtualRelease(void* address, size_t size) |
315 | { |
316 | int ret = munmap(address, size); |
317 | |
318 | return (ret == 0); |
319 | } |
320 | |
321 | // Commit virtual memory range. It must be part of a range reserved using VirtualReserve. |
322 | // Parameters: |
323 | // address - starting virtual address |
324 | // size - size of the virtual memory range |
325 | // Return: |
326 | // true if it has succeeded, false if it has failed |
327 | bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint32_t node) |
328 | { |
329 | assert(node == NUMA_NODE_UNDEFINED && "Numa allocation is not ported to local GC on unix yet" ); |
330 | return mprotect(address, size, PROT_WRITE | PROT_READ) == 0; |
331 | } |
332 | |
333 | // Decomit virtual memory range. |
334 | // Parameters: |
335 | // address - starting virtual address |
336 | // size - size of the virtual memory range |
337 | // Return: |
338 | // true if it has succeeded, false if it has failed |
339 | bool GCToOSInterface::VirtualDecommit(void* address, size_t size) |
340 | { |
341 | // TODO: This can fail, however the GC does not handle the failure gracefully |
342 | // Explicitly calling mmap instead of mprotect here makes it |
343 | // that much more clear to the operating system that we no |
344 | // longer need these pages. Also, GC depends on re-commited pages to |
345 | // be zeroed-out. |
346 | return mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != NULL; |
347 | } |
348 | |
349 | // Reset virtual memory range. Indicates that data in the memory range specified by address and size is no |
350 | // longer of interest, but it should not be decommitted. |
351 | // Parameters: |
352 | // address - starting virtual address |
353 | // size - size of the virtual memory range |
354 | // unlock - true if the memory range should also be unlocked |
355 | // Return: |
356 | // true if it has succeeded, false if it has failed |
357 | bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock) |
358 | { |
359 | int st; |
360 | #if HAVE_MADV_FREE |
361 | // Try to use MADV_FREE if supported. It tells the kernel that the application doesn't |
362 | // need the pages in the range. Freeing the pages can be delayed until a memory pressure |
363 | // occurs. |
364 | st = madvise(address, size, MADV_FREE); |
365 | if (st != 0) |
366 | #endif |
367 | { |
368 | // In case the MADV_FREE is not supported, use MADV_DONTNEED |
369 | st = madvise(address, size, MADV_DONTNEED); |
370 | } |
371 | |
372 | return (st == 0); |
373 | } |
374 | |
375 | // Check if the OS supports write watching |
376 | bool GCToOSInterface::SupportsWriteWatch() |
377 | { |
378 | return false; |
379 | } |
380 | |
381 | // Reset the write tracking state for the specified virtual memory range. |
382 | // Parameters: |
383 | // address - starting virtual address |
384 | // size - size of the virtual memory range |
385 | void GCToOSInterface::ResetWriteWatch(void* address, size_t size) |
386 | { |
387 | assert(!"should never call ResetWriteWatch on Unix" ); |
388 | } |
389 | |
390 | // Retrieve addresses of the pages that are written to in a region of virtual memory |
391 | // Parameters: |
392 | // resetState - true indicates to reset the write tracking state |
393 | // address - starting virtual address |
394 | // size - size of the virtual memory range |
395 | // pageAddresses - buffer that receives an array of page addresses in the memory region |
396 | // pageAddressesCount - on input, size of the lpAddresses array, in array elements |
397 | // on output, the number of page addresses that are returned in the array. |
398 | // Return: |
399 | // true if it has succeeded, false if it has failed |
400 | bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount) |
401 | { |
402 | assert(!"should never call GetWriteWatch on Unix" ); |
403 | return false; |
404 | } |
405 | |
406 | // Get size of the largest cache on the processor die |
407 | // Parameters: |
408 | // trueSize - true to return true cache size, false to return scaled up size based on |
409 | // the processor architecture |
410 | // Return: |
411 | // Size of the cache |
412 | size_t GCToOSInterface::GetCacheSizePerLogicalCpu(bool trueSize) |
413 | { |
414 | // TODO(segilles) processor detection |
415 | return 0; |
416 | } |
417 | |
418 | // Sets the calling thread's affinity to only run on the processor specified |
419 | // in the GCThreadAffinity structure. |
420 | // Parameters: |
421 | // affinity - The requested affinity for the calling thread. At most one processor |
422 | // can be provided. |
423 | // Return: |
424 | // true if setting the affinity was successful, false otherwise. |
425 | bool GCToOSInterface::SetThreadAffinity(GCThreadAffinity* affinity) |
426 | { |
427 | // [LOCALGC TODO] Thread affinity for unix |
428 | return false; |
429 | } |
430 | |
431 | // Boosts the calling thread's thread priority to a level higher than the default |
432 | // for new threads. |
433 | // Parameters: |
434 | // None. |
435 | // Return: |
436 | // true if the priority boost was successful, false otherwise. |
437 | bool GCToOSInterface::BoostThreadPriority() |
438 | { |
439 | // [LOCALGC TODO] Thread priority for unix |
440 | return false; |
441 | } |
442 | |
443 | /*++ |
444 | Function: |
445 | GetFullAffinityMask |
446 | |
447 | Get affinity mask for the specified number of processors with all |
448 | the processors enabled. |
449 | --*/ |
450 | static uintptr_t GetFullAffinityMask(int cpuCount) |
451 | { |
452 | return ((uintptr_t)1 << (cpuCount)) - 1; |
453 | } |
454 | |
455 | // Get affinity mask of the current process |
456 | // Parameters: |
457 | // processMask - affinity mask for the specified process |
458 | // systemMask - affinity mask for the system |
459 | // Return: |
460 | // true if it has succeeded, false if it has failed |
461 | // Remarks: |
462 | // A process affinity mask is a bit vector in which each bit represents the processors that |
463 | // a process is allowed to run on. A system affinity mask is a bit vector in which each bit |
464 | // represents the processors that are configured into a system. |
465 | // A process affinity mask is a subset of the system affinity mask. A process is only allowed |
466 | // to run on the processors configured into a system. Therefore, the process affinity mask cannot |
467 | // specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor. |
468 | bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processAffinityMask, uintptr_t* systemAffinityMask) |
469 | { |
470 | if (g_logicalCpuCount > 64) |
471 | { |
472 | *processAffinityMask = 0; |
473 | *systemAffinityMask = 0; |
474 | return true; |
475 | } |
476 | |
477 | uintptr_t systemMask = GetFullAffinityMask(g_logicalCpuCount); |
478 | |
479 | #if HAVE_SCHED_GETAFFINITY |
480 | |
481 | int pid = getpid(); |
482 | cpu_set_t cpuSet; |
483 | int st = sched_getaffinity(pid, sizeof(cpu_set_t), &cpuSet); |
484 | if (st == 0) |
485 | { |
486 | uintptr_t processMask = 0; |
487 | |
488 | for (int i = 0; i < g_logicalCpuCount; i++) |
489 | { |
490 | if (CPU_ISSET(i, &cpuSet)) |
491 | { |
492 | processMask |= ((uintptr_t)1) << i; |
493 | } |
494 | } |
495 | |
496 | *processAffinityMask = processMask; |
497 | *systemAffinityMask = systemMask; |
498 | return true; |
499 | } |
500 | else if (errno == EINVAL) |
501 | { |
502 | // There are more processors than can fit in a cpu_set_t |
503 | // return zero in both masks. |
504 | *processAffinityMask = 0; |
505 | *systemAffinityMask = 0; |
506 | return true; |
507 | } |
508 | else |
509 | { |
510 | // We should not get any of the errors that the sched_getaffinity can return since none |
511 | // of them applies for the current thread, so this is an unexpected kind of failure. |
512 | return false; |
513 | } |
514 | |
515 | #else // HAVE_SCHED_GETAFFINITY |
516 | |
517 | // There is no API to manage thread affinity, so let's return both affinity masks |
518 | // with all the CPUs on the system set. |
519 | *systemAffinityMask = systemMask; |
520 | *processAffinityMask = systemMask; |
521 | return true; |
522 | |
523 | #endif // HAVE_SCHED_GETAFFINITY |
524 | } |
525 | |
526 | // Get number of processors assigned to the current process |
527 | // Return: |
528 | // The number of processors |
529 | uint32_t GCToOSInterface::GetCurrentProcessCpuCount() |
530 | { |
531 | uintptr_t pmask, smask; |
532 | uint32_t cpuLimit; |
533 | |
534 | if (!GetCurrentProcessAffinityMask(&pmask, &smask)) |
535 | return 1; |
536 | |
537 | pmask &= smask; |
538 | |
539 | int count = 0; |
540 | while (pmask) |
541 | { |
542 | pmask &= (pmask - 1); |
543 | count++; |
544 | } |
545 | |
546 | // GetProcessAffinityMask can return pmask=0 and smask=0 on systems with more |
547 | // than 64 processors, which would leave us with a count of 0. Since the GC |
548 | // expects there to be at least one processor to run on (and thus at least one |
549 | // heap), we'll return 64 here if count is 0, since there are likely a ton of |
550 | // processors available in that case. The GC also cannot (currently) handle |
551 | // the case where there are more than 64 processors, so we will return a |
552 | // maximum of 64 here. |
553 | if (count == 0 || count > 64) |
554 | count = 64; |
555 | |
556 | if (GetCpuLimit(&cpuLimit) && cpuLimit < count) |
557 | count = cpuLimit; |
558 | |
559 | return count; |
560 | } |
561 | |
562 | // Return the size of the user-mode portion of the virtual address space of this process. |
563 | // Return: |
564 | // non zero if it has succeeded, 0 if it has failed |
565 | size_t GCToOSInterface::GetVirtualMemoryLimit() |
566 | { |
567 | #ifdef BIT64 |
568 | // There is no API to get the total virtual address space size on |
569 | // Unix, so we use a constant value representing 128TB, which is |
570 | // the approximate size of total user virtual address space on |
571 | // the currently supported Unix systems. |
572 | static const uint64_t _128TB = (1ull << 47); |
573 | return _128TB; |
574 | #else |
575 | return (size_t)-1; |
576 | #endif |
577 | } |
578 | |
579 | // Get the physical memory that this process can use. |
580 | // Return: |
581 | // non zero if it has succeeded, 0 if it has failed |
582 | // Remarks: |
583 | // If a process runs with a restricted memory limit, it returns the limit. If there's no limit |
584 | // specified, it returns amount of actual physical memory. |
585 | uint64_t GCToOSInterface::GetPhysicalMemoryLimit() |
586 | { |
587 | size_t restricted_limit; |
588 | // The limit was not cached |
589 | if (g_RestrictedPhysicalMemoryLimit == 0) |
590 | { |
591 | restricted_limit = GetRestrictedPhysicalMemoryLimit(); |
592 | VolatileStore(&g_RestrictedPhysicalMemoryLimit, restricted_limit); |
593 | } |
594 | restricted_limit = g_RestrictedPhysicalMemoryLimit; |
595 | |
596 | if (restricted_limit != 0 && restricted_limit != SIZE_T_MAX) |
597 | return restricted_limit; |
598 | |
599 | long pages = sysconf(_SC_PHYS_PAGES); |
600 | if (pages == -1) |
601 | { |
602 | return 0; |
603 | } |
604 | |
605 | long pageSize = sysconf(_SC_PAGE_SIZE); |
606 | if (pageSize == -1) |
607 | { |
608 | return 0; |
609 | } |
610 | |
611 | return pages * pageSize; |
612 | } |
613 | |
614 | // Get memory status |
615 | // Parameters: |
616 | // memory_load - A number between 0 and 100 that specifies the approximate percentage of physical memory |
617 | // that is in use (0 indicates no memory use and 100 indicates full memory use). |
618 | // available_physical - The amount of physical memory currently available, in bytes. |
619 | // available_page_file - The maximum amount of memory the current process can commit, in bytes. |
620 | void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available_physical, uint64_t* available_page_file) |
621 | { |
622 | if (memory_load != nullptr || available_physical != nullptr) |
623 | { |
624 | uint64_t total = GetPhysicalMemoryLimit(); |
625 | |
626 | uint64_t available = 0; |
627 | uint32_t load = 0; |
628 | size_t used; |
629 | |
630 | // Get the physical memory in use - from it, we can get the physical memory available. |
631 | // We do this only when we have the total physical memory available. |
632 | if (total > 0 && GetPhysicalMemoryUsed(&used)) |
633 | { |
634 | available = total > used ? total-used : 0; |
635 | load = (uint32_t)(((float)used * 100) / (float)total); |
636 | } |
637 | |
638 | if (memory_load != nullptr) |
639 | *memory_load = load; |
640 | if (available_physical != nullptr) |
641 | *available_physical = available; |
642 | } |
643 | |
644 | if (available_page_file != nullptr) |
645 | *available_page_file = 0; |
646 | } |
647 | |
648 | // Get a high precision performance counter |
649 | // Return: |
650 | // The counter value |
651 | int64_t GCToOSInterface::QueryPerformanceCounter() |
652 | { |
653 | // TODO: This is not a particularly efficient implementation - we certainly could |
654 | // do much more specific platform-dependent versions if we find that this method |
655 | // runs hot. However, most likely it does not. |
656 | struct timeval tv; |
657 | if (gettimeofday(&tv, NULL) == -1) |
658 | { |
659 | assert(!"gettimeofday() failed" ); |
660 | // TODO (segilles) unconditional asserts |
661 | return 0; |
662 | } |
663 | return (int64_t) tv.tv_sec * (int64_t) tccSecondsToMicroSeconds + (int64_t) tv.tv_usec; |
664 | } |
665 | |
666 | // Get a frequency of the high precision performance counter |
667 | // Return: |
668 | // The counter frequency |
669 | int64_t GCToOSInterface::QueryPerformanceFrequency() |
670 | { |
671 | // The counter frequency of gettimeofday is in microseconds. |
672 | return tccSecondsToMicroSeconds; |
673 | } |
674 | |
675 | // Get a time stamp with a low precision |
676 | // Return: |
677 | // Time stamp in milliseconds |
678 | uint32_t GCToOSInterface::GetLowPrecisionTimeStamp() |
679 | { |
680 | // TODO(segilles) this is pretty naive, we can do better |
681 | uint64_t retval = 0; |
682 | struct timeval tv; |
683 | if (gettimeofday(&tv, NULL) == 0) |
684 | { |
685 | retval = (tv.tv_sec * tccSecondsToMilliSeconds) + (tv.tv_usec / tccMilliSecondsToMicroSeconds); |
686 | } |
687 | else |
688 | { |
689 | assert(!"gettimeofday() failed\n" ); |
690 | } |
691 | |
692 | return retval; |
693 | } |
694 | |
695 | // Gets the total number of processors on the machine, not taking |
696 | // into account current process affinity. |
697 | // Return: |
698 | // Number of processors on the machine |
699 | uint32_t GCToOSInterface::GetTotalProcessorCount() |
700 | { |
701 | // Calculated in GCToOSInterface::Initialize using |
702 | // sysconf(_SC_NPROCESSORS_ONLN) |
703 | return g_logicalCpuCount; |
704 | } |
705 | |
706 | bool GCToOSInterface::CanEnableGCNumaAware() |
707 | { |
708 | return false; |
709 | } |
710 | |
711 | bool GCToOSInterface::GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no) |
712 | { |
713 | assert(!"Numa has not been ported to local GC for unix" ); |
714 | return false; |
715 | } |
716 | |
717 | bool GCToOSInterface::CanEnableGCCPUGroups() |
718 | { |
719 | return false; |
720 | } |
721 | |
722 | void GCToOSInterface::GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number) |
723 | { |
724 | assert(!"CpuGroup has not been ported to local GC for unix" ); |
725 | } |
726 | |
727 | // Initialize the critical section |
728 | void CLRCriticalSection::Initialize() |
729 | { |
730 | int st = pthread_mutex_init(&m_cs.mutex, NULL); |
731 | assert(st == 0); |
732 | } |
733 | |
734 | // Destroy the critical section |
735 | void CLRCriticalSection::Destroy() |
736 | { |
737 | int st = pthread_mutex_destroy(&m_cs.mutex); |
738 | assert(st == 0); |
739 | } |
740 | |
741 | // Enter the critical section. Blocks until the section can be entered. |
742 | void CLRCriticalSection::Enter() |
743 | { |
744 | pthread_mutex_lock(&m_cs.mutex); |
745 | } |
746 | |
747 | // Leave the critical section |
748 | void CLRCriticalSection::Leave() |
749 | { |
750 | pthread_mutex_unlock(&m_cs.mutex); |
751 | } |
752 | |