1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h"
6#if defined(HOST_OS_ANDROID) || defined(HOST_OS_LINUX) || defined(HOST_OS_MACOS)
7
8#include "vm/virtual_memory.h"
9
10#include <errno.h>
11#include <fcntl.h>
12#include <sys/mman.h>
13#include <sys/stat.h>
14#include <sys/syscall.h>
15#include <unistd.h>
16
17#include "platform/assert.h"
18#include "platform/utils.h"
19#include "vm/isolate.h"
20
21// #define VIRTUAL_MEMORY_LOGGING 1
22#if defined(VIRTUAL_MEMORY_LOGGING)
23#define LOG_INFO(msg, ...) OS::PrintErr(msg, ##__VA_ARGS__)
24#else
25#define LOG_INFO(msg, ...)
26#endif // defined(VIRTUAL_MEMORY_LOGGING)
27
28namespace dart {
29
30// standard MAP_FAILED causes "error: use of old-style cast" as it
31// defines MAP_FAILED as ((void *) -1)
32#undef MAP_FAILED
33#define MAP_FAILED reinterpret_cast<void*>(-1)
34
35DECLARE_FLAG(bool, dual_map_code);
36DECLARE_FLAG(bool, write_protect_code);
37
38#if defined(TARGET_OS_LINUX)
39DECLARE_FLAG(bool, generate_perf_events_symbols);
40DECLARE_FLAG(bool, generate_perf_jitdump);
41#endif
42
43uword VirtualMemory::page_size_ = 0;
44
45intptr_t VirtualMemory::CalculatePageSize() {
46 const intptr_t page_size = getpagesize();
47 ASSERT(page_size != 0);
48 ASSERT(Utils::IsPowerOfTwo(page_size));
49 return page_size;
50}
51
52void VirtualMemory::Init() {
53 if (page_size_ != 0) {
54 // Already initialized.
55 return;
56 }
57
58 page_size_ = CalculatePageSize();
59
60#if defined(DUAL_MAPPING_SUPPORTED)
61// Perf is Linux-specific and the flags aren't defined in Product.
62#if defined(TARGET_OS_LINUX) && !defined(PRODUCT)
63 // Perf interacts strangely with memfds, leading it to sometimes collect
64 // garbled return addresses.
65 if (FLAG_generate_perf_events_symbols || FLAG_generate_perf_jitdump) {
66 LOG_INFO(
67 "Dual code mapping disabled to generate perf events or jitdump.\n");
68 FLAG_dual_map_code = false;
69 return;
70 }
71#endif
72
73 // Detect dual mapping exec permission limitation on some platforms,
74 // such as on docker containers, and disable dual mapping in this case.
75 // Also detect for missing support of memfd_create syscall.
76 if (FLAG_dual_map_code) {
77 intptr_t size = PageSize();
78 intptr_t alignment = 256 * 1024; // e.g. heap page size.
79 VirtualMemory* vm = AllocateAligned(size, alignment, true, "memfd-test");
80 if (vm == NULL) {
81 LOG_INFO("memfd_create not supported; disabling dual mapping of code.\n");
82 FLAG_dual_map_code = false;
83 return;
84 }
85 void* region = reinterpret_cast<void*>(vm->region_.start());
86 void* alias = reinterpret_cast<void*>(vm->alias_.start());
87 if (region == alias ||
88 mprotect(region, size, PROT_READ) != 0 || // Remove PROT_WRITE.
89 mprotect(alias, size, PROT_READ | PROT_EXEC) != 0) { // Add PROT_EXEC.
90 LOG_INFO("mprotect fails; disabling dual mapping of code.\n");
91 FLAG_dual_map_code = false;
92 }
93 delete vm;
94 }
95#endif // defined(DUAL_MAPPING_SUPPORTED)
96}
97
98bool VirtualMemory::DualMappingEnabled() {
99 return FLAG_dual_map_code;
100}
101
102static void unmap(uword start, uword end) {
103 ASSERT(start <= end);
104 uword size = end - start;
105 if (size == 0) {
106 return;
107 }
108
109 if (munmap(reinterpret_cast<void*>(start), size) != 0) {
110 int error = errno;
111 const int kBufferSize = 1024;
112 char error_buf[kBufferSize];
113 FATAL2("munmap error: %d (%s)", error,
114 Utils::StrError(error, error_buf, kBufferSize));
115 }
116}
117
118#if defined(DUAL_MAPPING_SUPPORTED)
119// Do not leak file descriptors to child processes.
120#if !defined(MFD_CLOEXEC)
121#define MFD_CLOEXEC 0x0001U
122#endif
123
124// Wrapper to call memfd_create syscall.
125static inline int memfd_create(const char* name, unsigned int flags) {
126#if !defined(__NR_memfd_create)
127 errno = ENOSYS;
128 return -1;
129#else
130 return syscall(__NR_memfd_create, name, flags);
131#endif
132}
133
134static void* MapAligned(int fd,
135 int prot,
136 intptr_t size,
137 intptr_t alignment,
138 intptr_t allocated_size) {
139 void* address =
140 mmap(NULL, allocated_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
141 LOG_INFO("mmap(NULL, 0x%" Px ", PROT_NONE, ...): %p\n", allocated_size,
142 address);
143 if (address == MAP_FAILED) {
144 return NULL;
145 }
146
147 const uword base = reinterpret_cast<uword>(address);
148 const uword aligned_base = Utils::RoundUp(base, alignment);
149
150 // Guarantee the alignment by mapping at a fixed address inside the above
151 // mapping. Overlapping region will be automatically discarded in the above
152 // mapping. Manually discard non-overlapping regions.
153 address = mmap(reinterpret_cast<void*>(aligned_base), size, prot,
154 MAP_SHARED | MAP_FIXED, fd, 0);
155 LOG_INFO("mmap(0x%" Px ", 0x%" Px ", %u, ...): %p\n", aligned_base, size,
156 prot, address);
157 if (address == MAP_FAILED) {
158 unmap(base, base + allocated_size);
159 return NULL;
160 }
161 ASSERT(address == reinterpret_cast<void*>(aligned_base));
162 unmap(base, aligned_base);
163 unmap(aligned_base + size, base + allocated_size);
164 return address;
165}
166#endif // defined(DUAL_MAPPING_SUPPORTED)
167
168VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
169 intptr_t alignment,
170 bool is_executable,
171 const char* name) {
172 // When FLAG_write_protect_code is active, code memory (indicated by
173 // is_executable = true) is allocated as non-executable and later
174 // changed to executable via VirtualMemory::Protect.
175 //
176 // If FLAG_dual_map_code is active, the executable mapping will be mapped RX
177 // immediately and never changes protection until it is eventually unmapped.
178 ASSERT(Utils::IsAligned(size, PageSize()));
179 ASSERT(Utils::IsPowerOfTwo(alignment));
180 ASSERT(Utils::IsAligned(alignment, PageSize()));
181 ASSERT(name != nullptr);
182 const intptr_t allocated_size = size + alignment - PageSize();
183#if defined(DUAL_MAPPING_SUPPORTED)
184 const bool dual_mapping =
185 is_executable && FLAG_write_protect_code && FLAG_dual_map_code;
186 if (dual_mapping) {
187 int fd = memfd_create(name, MFD_CLOEXEC);
188 if (fd == -1) {
189 return NULL;
190 }
191 if (ftruncate(fd, size) == -1) {
192 close(fd);
193 return NULL;
194 }
195 const int region_prot = PROT_READ | PROT_WRITE;
196 void* region_ptr =
197 MapAligned(fd, region_prot, size, alignment, allocated_size);
198 if (region_ptr == NULL) {
199 close(fd);
200 return NULL;
201 }
202 // The mapping will be RX and stays that way until it will eventually be
203 // unmapped.
204 MemoryRegion region(region_ptr, size);
205 // DUAL_MAPPING_SUPPORTED is false in TARGET_OS_MACOS and hence support
206 // for MAP_JIT is not required here.
207 const int alias_prot = PROT_READ | PROT_EXEC;
208 void* alias_ptr =
209 MapAligned(fd, alias_prot, size, alignment, allocated_size);
210 close(fd);
211 if (alias_ptr == NULL) {
212 const uword region_base = reinterpret_cast<uword>(region_ptr);
213 unmap(region_base, region_base + size);
214 return NULL;
215 }
216 ASSERT(region_ptr != alias_ptr);
217 MemoryRegion alias(alias_ptr, size);
218 return new VirtualMemory(region, alias, region);
219 }
220#endif // defined(DUAL_MAPPING_SUPPORTED)
221
222 const int prot =
223 PROT_READ | PROT_WRITE |
224 ((is_executable && !FLAG_write_protect_code) ? PROT_EXEC : 0);
225
226#if defined(DUAL_MAPPING_SUPPORTED)
227 // Try to use memfd for single-mapped regions too, so they will have an
228 // associated name for memory attribution. Skip if FLAG_dual_map_code is
229 // false, which happens if we detected memfd wasn't working in Init above.
230 if (FLAG_dual_map_code) {
231 int fd = memfd_create(name, MFD_CLOEXEC);
232 if (fd == -1) {
233 return NULL;
234 }
235 if (ftruncate(fd, size) == -1) {
236 close(fd);
237 return NULL;
238 }
239 void* region_ptr = MapAligned(fd, prot, size, alignment, allocated_size);
240 close(fd);
241 if (region_ptr == NULL) {
242 return NULL;
243 }
244 MemoryRegion region(region_ptr, size);
245 return new VirtualMemory(region, region);
246 }
247#endif
248
249 int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
250#if (defined(HOST_OS_MACOS) && !defined(HOST_OS_IOS))
251 if (is_executable && IsAtLeastOS10_14()) {
252 map_flags |= MAP_JIT;
253 }
254#endif // defined(HOST_OS_MACOS)
255 void* address = mmap(NULL, allocated_size, prot, map_flags, -1, 0);
256 LOG_INFO("mmap(NULL, 0x%" Px ", %u, ...): %p\n", allocated_size, prot,
257 address);
258 if (address == MAP_FAILED) {
259 return NULL;
260 }
261
262 const uword base = reinterpret_cast<uword>(address);
263 const uword aligned_base = Utils::RoundUp(base, alignment);
264
265 unmap(base, aligned_base);
266 unmap(aligned_base + size, base + allocated_size);
267
268 MemoryRegion region(reinterpret_cast<void*>(aligned_base), size);
269 return new VirtualMemory(region, region);
270}
271
272VirtualMemory::~VirtualMemory() {
273 if (vm_owns_region()) {
274 unmap(reserved_.start(), reserved_.end());
275 const intptr_t alias_offset = AliasOffset();
276 if (alias_offset != 0) {
277 unmap(reserved_.start() + alias_offset, reserved_.end() + alias_offset);
278 }
279 }
280}
281
282void VirtualMemory::FreeSubSegment(void* address,
283 intptr_t size) {
284 const uword start = reinterpret_cast<uword>(address);
285 unmap(start, start + size);
286}
287
288void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
289#if defined(DEBUG)
290 Thread* thread = Thread::Current();
291 ASSERT(thread == nullptr || thread->IsMutatorThread() ||
292 thread->isolate() == nullptr ||
293 thread->isolate()->mutator_thread()->IsAtSafepoint());
294#endif
295 uword start_address = reinterpret_cast<uword>(address);
296 uword end_address = start_address + size;
297 uword page_address = Utils::RoundDown(start_address, PageSize());
298 int prot = 0;
299 switch (mode) {
300 case kNoAccess:
301 prot = PROT_NONE;
302 break;
303 case kReadOnly:
304 prot = PROT_READ;
305 break;
306 case kReadWrite:
307 prot = PROT_READ | PROT_WRITE;
308 break;
309 case kReadExecute:
310 prot = PROT_READ | PROT_EXEC;
311 break;
312 case kReadWriteExecute:
313 prot = PROT_READ | PROT_WRITE | PROT_EXEC;
314 break;
315 }
316 if (mprotect(reinterpret_cast<void*>(page_address),
317 end_address - page_address, prot) != 0) {
318 int error = errno;
319 const int kBufferSize = 1024;
320 char error_buf[kBufferSize];
321 LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) failed\n", page_address,
322 end_address - page_address, prot);
323 FATAL2("mprotect error: %d (%s)", error,
324 Utils::StrError(error, error_buf, kBufferSize));
325 }
326 LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) ok\n", page_address,
327 end_address - page_address, prot);
328}
329
330} // namespace dart
331
332#endif // defined(HOST_OS_ANDROID ... HOST_OS_LINUX ... HOST_OS_MACOS)
333