| 1 | // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file |
| 2 | // for details. All rights reserved. Use of this source code is governed by a |
| 3 | // BSD-style license that can be found in the LICENSE file. |
| 4 | |
| 5 | #include "vm/globals.h" |
| 6 | #if defined(HOST_OS_FUCHSIA) |
| 7 | |
| 8 | #include "vm/virtual_memory.h" |
| 9 | |
| 10 | #include <sys/mman.h> |
| 11 | #include <unistd.h> |
| 12 | #include <zircon/process.h> |
| 13 | #include <zircon/status.h> |
| 14 | #include <zircon/syscalls.h> |
| 15 | |
| 16 | #include "platform/assert.h" |
| 17 | #include "vm/allocation.h" |
| 18 | #include "vm/growable_array.h" |
| 19 | #include "vm/isolate.h" |
| 20 | #include "vm/lockers.h" |
| 21 | #include "vm/memory_region.h" |
| 22 | #include "vm/os.h" |
| 23 | #include "vm/os_thread.h" |
| 24 | |
| 25 | // #define VIRTUAL_MEMORY_LOGGING 1 |
| 26 | #if defined(VIRTUAL_MEMORY_LOGGING) |
| 27 | #define LOG_ERR(msg, ...) \ |
| 28 | OS::PrintErr("VMVM: %s:%d: " msg, __FILE__, __LINE__, ##__VA_ARGS__) |
| 29 | #define LOG_INFO(msg, ...) \ |
| 30 | OS::PrintErr("VMVM: %s:%d: " msg, __FILE__, __LINE__, ##__VA_ARGS__) |
| 31 | #else |
| 32 | #define LOG_ERR(msg, ...) |
| 33 | #define LOG_INFO(msg, ...) |
| 34 | #endif // defined(VIRTUAL_MEMORY_LOGGING) |
| 35 | |
| 36 | namespace dart { |
| 37 | |
| 38 | DECLARE_FLAG(bool, dual_map_code); |
| 39 | DECLARE_FLAG(bool, write_protect_code); |
| 40 | |
| 41 | uword VirtualMemory::page_size_ = 0; |
| 42 | |
| 43 | intptr_t VirtualMemory::CalculatePageSize() { |
| 44 | const intptr_t page_size = getpagesize(); |
| 45 | ASSERT(page_size != 0); |
| 46 | ASSERT(Utils::IsPowerOfTwo(page_size)); |
| 47 | return page_size; |
| 48 | } |
| 49 | |
| 50 | void VirtualMemory::Init() { |
| 51 | page_size_ = CalculatePageSize(); |
| 52 | } |
| 53 | |
| 54 | static void Unmap(zx_handle_t vmar, uword start, uword end) { |
| 55 | ASSERT(start <= end); |
| 56 | const uword size = end - start; |
| 57 | if (size == 0) { |
| 58 | return; |
| 59 | } |
| 60 | |
| 61 | zx_status_t status = zx_vmar_unmap(vmar, start, size); |
| 62 | if (status != ZX_OK) { |
| 63 | FATAL1("zx_vmar_unmap failed: %s\n" , zx_status_get_string(status)); |
| 64 | } |
| 65 | } |
| 66 | |
| 67 | bool VirtualMemory::DualMappingEnabled() { |
| 68 | return FLAG_dual_map_code; |
| 69 | } |
| 70 | |
| 71 | VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size, |
| 72 | intptr_t alignment, |
| 73 | bool is_executable, |
| 74 | const char* name) { |
| 75 | // When FLAG_write_protect_code is active, code memory (indicated by |
| 76 | // is_executable = true) is allocated as non-executable and later |
| 77 | // changed to executable via VirtualMemory::Protect, which requires |
| 78 | // ZX_RIGHT_EXECUTE on the underlying VMO. |
| 79 | // |
| 80 | // If FLAG_dual_map_code is active, the executable mapping will be mapped RX |
| 81 | // immediately and never changes protection until it is eventually unmapped. |
| 82 | // |
| 83 | // In addition, dual mapping of the same underlying code memory is provided. |
| 84 | const bool dual_mapping = |
| 85 | is_executable && FLAG_write_protect_code && FLAG_dual_map_code; |
| 86 | |
| 87 | ASSERT(Utils::IsAligned(size, page_size_)); |
| 88 | ASSERT(Utils::IsPowerOfTwo(alignment)); |
| 89 | ASSERT(Utils::IsAligned(alignment, page_size_)); |
| 90 | |
| 91 | const zx_vm_option_t align_flag = Utils::ShiftForPowerOfTwo(alignment) |
| 92 | << ZX_VM_ALIGN_BASE; |
| 93 | ASSERT((ZX_VM_ALIGN_1KB <= align_flag) && (align_flag <= ZX_VM_ALIGN_4GB)); |
| 94 | |
| 95 | zx_handle_t vmar = zx_vmar_root_self(); |
| 96 | zx_handle_t vmo = ZX_HANDLE_INVALID; |
| 97 | zx_status_t status = zx_vmo_create(size, 0u, &vmo); |
| 98 | if (status != ZX_OK) { |
| 99 | LOG_ERR("zx_vmo_create(0x%lx) failed: %s\n" , size, |
| 100 | zx_status_get_string(status)); |
| 101 | return NULL; |
| 102 | } |
| 103 | |
| 104 | if (name != NULL) { |
| 105 | zx_object_set_property(vmo, ZX_PROP_NAME, name, strlen(name)); |
| 106 | } |
| 107 | |
| 108 | if (is_executable) { |
| 109 | // Add ZX_RIGHT_EXECUTE permission to VMO, so it can be mapped |
| 110 | // into memory as executable (now or later). |
| 111 | status = zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo); |
| 112 | if (status != ZX_OK) { |
| 113 | LOG_ERR("zx_vmo_replace_as_executable() failed: %s\n" , |
| 114 | zx_status_get_string(status)); |
| 115 | return NULL; |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | const zx_vm_option_t region_options = |
| 120 | ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | align_flag | |
| 121 | ((is_executable && !FLAG_write_protect_code) ? ZX_VM_PERM_EXECUTE : 0); |
| 122 | uword base; |
| 123 | status = zx_vmar_map(vmar, region_options, 0, vmo, 0u, size, &base); |
| 124 | LOG_INFO("zx_vmar_map(%u, 0x%lx, 0x%lx)\n" , region_options, base, size); |
| 125 | if (status != ZX_OK) { |
| 126 | LOG_ERR("zx_vmar_map(%u, 0x%lx, 0x%lx) failed: %s\n" , region_options, base, |
| 127 | size, zx_status_get_string(status)); |
| 128 | return NULL; |
| 129 | } |
| 130 | void* region_ptr = reinterpret_cast<void*>(base); |
| 131 | MemoryRegion region(region_ptr, size); |
| 132 | |
| 133 | VirtualMemory* result; |
| 134 | |
| 135 | if (dual_mapping) { |
| 136 | // The mapping will be RX and stays that way until it will eventually be |
| 137 | // unmapped. |
| 138 | const zx_vm_option_t alias_options = |
| 139 | ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE | align_flag; |
| 140 | status = zx_vmar_map(vmar, alias_options, 0, vmo, 0u, size, &base); |
| 141 | LOG_INFO("zx_vmar_map(%u, 0x%lx, 0x%lx)\n" , alias_options, base, size); |
| 142 | if (status != ZX_OK) { |
| 143 | LOG_ERR("zx_vmar_map(%u, 0x%lx, 0x%lx) failed: %s\n" , alias_options, base, |
| 144 | size, zx_status_get_string(status)); |
| 145 | const uword region_base = reinterpret_cast<uword>(region_ptr); |
| 146 | Unmap(vmar, region_base, region_base + size); |
| 147 | return NULL; |
| 148 | } |
| 149 | void* alias_ptr = reinterpret_cast<void*>(base); |
| 150 | ASSERT(region_ptr != alias_ptr); |
| 151 | MemoryRegion alias(alias_ptr, size); |
| 152 | result = new VirtualMemory(region, alias, region); |
| 153 | } else { |
| 154 | result = new VirtualMemory(region, region, region); |
| 155 | } |
| 156 | zx_handle_close(vmo); |
| 157 | return result; |
| 158 | } |
| 159 | |
| 160 | VirtualMemory::~VirtualMemory() { |
| 161 | // Reserved region may be empty due to VirtualMemory::Truncate. |
| 162 | if (vm_owns_region() && reserved_.size() != 0) { |
| 163 | Unmap(zx_vmar_root_self(), reserved_.start(), reserved_.end()); |
| 164 | LOG_INFO("zx_vmar_unmap(0x%lx, 0x%lx) success\n" , reserved_.start(), |
| 165 | reserved_.size()); |
| 166 | |
| 167 | const intptr_t alias_offset = AliasOffset(); |
| 168 | if (alias_offset != 0) { |
| 169 | Unmap(zx_vmar_root_self(), reserved_.start() + alias_offset, |
| 170 | reserved_.end() + alias_offset); |
| 171 | LOG_INFO("zx_vmar_unmap(0x%lx, 0x%lx) success\n" , |
| 172 | reserved_.start() + alias_offset, reserved_.size()); |
| 173 | } |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | void VirtualMemory::FreeSubSegment(void* address, intptr_t size) { |
| 178 | const uword start = reinterpret_cast<uword>(address); |
| 179 | Unmap(zx_vmar_root_self(), start, start + size); |
| 180 | LOG_INFO("zx_vmar_unmap(0x%p, 0x%lx) success\n" , address, size); |
| 181 | } |
| 182 | |
| 183 | void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) { |
| 184 | #if defined(DEBUG) |
| 185 | Thread* thread = Thread::Current(); |
| 186 | ASSERT(thread == nullptr || thread->IsMutatorThread() || |
| 187 | thread->isolate() == nullptr || |
| 188 | thread->isolate()->mutator_thread()->IsAtSafepoint()); |
| 189 | #endif |
| 190 | const uword start_address = reinterpret_cast<uword>(address); |
| 191 | const uword end_address = start_address + size; |
| 192 | const uword page_address = Utils::RoundDown(start_address, PageSize()); |
| 193 | uint32_t prot = 0; |
| 194 | switch (mode) { |
| 195 | case kNoAccess: |
| 196 | prot = 0; |
| 197 | break; |
| 198 | case kReadOnly: |
| 199 | prot = ZX_VM_PERM_READ; |
| 200 | break; |
| 201 | case kReadWrite: |
| 202 | prot = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE; |
| 203 | break; |
| 204 | case kReadExecute: |
| 205 | prot = ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE; |
| 206 | break; |
| 207 | case kReadWriteExecute: |
| 208 | prot = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE; |
| 209 | break; |
| 210 | } |
| 211 | zx_status_t status = zx_vmar_protect(zx_vmar_root_self(), prot, page_address, |
| 212 | end_address - page_address); |
| 213 | LOG_INFO("zx_vmar_protect(%u, 0x%lx, 0x%lx)\n" , prot, page_address, |
| 214 | end_address - page_address); |
| 215 | if (status != ZX_OK) { |
| 216 | FATAL3("zx_vmar_protect(0x%lx, 0x%lx) failed: %s\n" , page_address, |
| 217 | end_address - page_address, zx_status_get_string(status)); |
| 218 | } |
| 219 | } |
| 220 | |
| 221 | } // namespace dart |
| 222 | |
| 223 | #endif // defined(HOST_OS_FUCHSIA) |
| 224 | |