| 1 | // Copyright (c) 2018, the Dart project authors. Please see the AUTHORS file |
| 2 | // for details. All rights reserved. Use of this source code is governed by a |
| 3 | // BSD-style license that can be found in the LICENSE file. |
| 4 | |
| 5 | #ifndef RUNTIME_VM_INTERPRETER_H_ |
| 6 | #define RUNTIME_VM_INTERPRETER_H_ |
| 7 | |
| 8 | #include "vm/globals.h" |
| 9 | #if !defined(DART_PRECOMPILED_RUNTIME) |
| 10 | |
| 11 | #include "vm/compiler/method_recognizer.h" |
| 12 | #include "vm/constants_kbc.h" |
| 13 | #include "vm/tagged_pointer.h" |
| 14 | |
| 15 | namespace dart { |
| 16 | |
| 17 | class Array; |
| 18 | class Code; |
| 19 | class InterpreterSetjmpBuffer; |
| 20 | class Isolate; |
| 21 | class ObjectPointerVisitor; |
| 22 | class Thread; |
| 23 | |
| 24 | class LookupCache : public ValueObject { |
| 25 | public: |
| 26 | LookupCache() { |
| 27 | ASSERT(Utils::IsPowerOfTwo(sizeof(Entry))); |
| 28 | ASSERT(Utils::IsPowerOfTwo(sizeof(kNumEntries))); |
| 29 | Clear(); |
| 30 | } |
| 31 | |
| 32 | void Clear(); |
| 33 | bool Lookup(intptr_t receiver_cid, |
| 34 | StringPtr function_name, |
| 35 | ArrayPtr arguments_descriptor, |
| 36 | FunctionPtr* target) const; |
| 37 | void Insert(intptr_t receiver_cid, |
| 38 | StringPtr function_name, |
| 39 | ArrayPtr arguments_descriptor, |
| 40 | FunctionPtr target); |
| 41 | |
| 42 | private: |
| 43 | struct Entry { |
| 44 | intptr_t receiver_cid; |
| 45 | StringPtr function_name; |
| 46 | ArrayPtr arguments_descriptor; |
| 47 | FunctionPtr target; |
| 48 | }; |
| 49 | |
| 50 | static const intptr_t kNumEntries = 1024; |
| 51 | static const intptr_t kTableMask = kNumEntries - 1; |
| 52 | |
| 53 | Entry entries_[kNumEntries]; |
| 54 | }; |
| 55 | |
| 56 | // Interpreter intrinsic handler. It is invoked on entry to the intrinsified |
| 57 | // function via Intrinsic bytecode before the frame is setup. |
| 58 | // If the handler returns true then Intrinsic bytecode works as a return |
| 59 | // instruction returning the value in result. Otherwise interpreter proceeds to |
| 60 | // execute the body of the function. |
| 61 | typedef bool (*IntrinsicHandler)(Thread* thread, |
| 62 | ObjectPtr* FP, |
| 63 | ObjectPtr* result); |
| 64 | |
| 65 | class Interpreter { |
| 66 | public: |
| 67 | static const uword kInterpreterStackUnderflowSize = 0x80; |
| 68 | // The entry frame pc marker must be non-zero (a valid exception handler pc). |
| 69 | static const word kEntryFramePcMarker = -1; |
| 70 | |
| 71 | Interpreter(); |
| 72 | ~Interpreter(); |
| 73 | |
| 74 | // The currently executing Interpreter instance, which is associated to the |
| 75 | // current isolate |
| 76 | static Interpreter* Current(); |
| 77 | |
| 78 | // Low address (KBC stack grows up). |
| 79 | uword stack_base() const { return stack_base_; } |
| 80 | // Limit for StackOverflowError. |
| 81 | uword overflow_stack_limit() const { return overflow_stack_limit_; } |
| 82 | // High address (KBC stack grows up). |
| 83 | uword stack_limit() const { return stack_limit_; } |
| 84 | |
| 85 | // Returns true if the interpreter's stack contains the given frame. |
| 86 | // TODO(regis): We should rely on a new thread vm_tag to identify an |
| 87 | // interpreter frame and not need this HasFrame() method. |
| 88 | bool HasFrame(uword frame) const { |
| 89 | return frame >= stack_base() && frame < stack_limit(); |
| 90 | } |
| 91 | |
| 92 | // Identify an entry frame by looking at its pc marker value. |
| 93 | static bool IsEntryFrameMarker(const KBCInstr* pc) { |
| 94 | return reinterpret_cast<word>(pc) == kEntryFramePcMarker; |
| 95 | } |
| 96 | |
| 97 | ObjectPtr Call(const Function& function, |
| 98 | const Array& arguments_descriptor, |
| 99 | const Array& arguments, |
| 100 | Thread* thread); |
| 101 | |
| 102 | ObjectPtr Call(FunctionPtr function, |
| 103 | ArrayPtr argdesc, |
| 104 | intptr_t argc, |
| 105 | ObjectPtr const* argv, |
| 106 | Thread* thread); |
| 107 | |
| 108 | void JumpToFrame(uword pc, uword sp, uword fp, Thread* thread); |
| 109 | |
| 110 | uword get_sp() const { return reinterpret_cast<uword>(fp_); } // Yes, fp_. |
| 111 | uword get_fp() const { return reinterpret_cast<uword>(fp_); } |
| 112 | uword get_pc() const { return reinterpret_cast<uword>(pc_); } |
| 113 | |
| 114 | void Unexit(Thread* thread); |
| 115 | |
| 116 | void VisitObjectPointers(ObjectPointerVisitor* visitor); |
| 117 | void ClearLookupCache() { lookup_cache_.Clear(); } |
| 118 | |
| 119 | #ifndef PRODUCT |
| 120 | void set_is_debugging(bool value) { is_debugging_ = value; } |
| 121 | bool is_debugging() const { return is_debugging_; } |
| 122 | #endif // !PRODUCT |
| 123 | |
| 124 | private: |
| 125 | uintptr_t* stack_; |
| 126 | uword stack_base_; |
| 127 | uword overflow_stack_limit_; |
| 128 | uword stack_limit_; |
| 129 | |
| 130 | ObjectPtr* volatile fp_; |
| 131 | const KBCInstr* volatile pc_; |
| 132 | DEBUG_ONLY(uint64_t icount_;) |
| 133 | |
| 134 | InterpreterSetjmpBuffer* last_setjmp_buffer_; |
| 135 | |
| 136 | ObjectPoolPtr pp_; // Pool Pointer. |
| 137 | ArrayPtr argdesc_; // Arguments Descriptor: used to pass information between |
| 138 | // call instruction and the function entry. |
| 139 | ObjectPtr special_[KernelBytecode::kSpecialIndexCount]; |
| 140 | |
| 141 | LookupCache lookup_cache_; |
| 142 | |
| 143 | void Exit(Thread* thread, |
| 144 | ObjectPtr* base, |
| 145 | ObjectPtr* exit_frame, |
| 146 | const KBCInstr* pc); |
| 147 | |
| 148 | bool Invoke(Thread* thread, |
| 149 | ObjectPtr* call_base, |
| 150 | ObjectPtr* call_top, |
| 151 | const KBCInstr** pc, |
| 152 | ObjectPtr** FP, |
| 153 | ObjectPtr** SP); |
| 154 | |
| 155 | bool InvokeCompiled(Thread* thread, |
| 156 | FunctionPtr function, |
| 157 | ObjectPtr* call_base, |
| 158 | ObjectPtr* call_top, |
| 159 | const KBCInstr** pc, |
| 160 | ObjectPtr** FP, |
| 161 | ObjectPtr** SP); |
| 162 | |
| 163 | bool InvokeBytecode(Thread* thread, |
| 164 | FunctionPtr function, |
| 165 | ObjectPtr* call_base, |
| 166 | ObjectPtr* call_top, |
| 167 | const KBCInstr** pc, |
| 168 | ObjectPtr** FP, |
| 169 | ObjectPtr** SP); |
| 170 | |
| 171 | bool InstanceCall(Thread* thread, |
| 172 | StringPtr target_name, |
| 173 | ObjectPtr* call_base, |
| 174 | ObjectPtr* call_top, |
| 175 | const KBCInstr** pc, |
| 176 | ObjectPtr** FP, |
| 177 | ObjectPtr** SP); |
| 178 | |
| 179 | bool CopyParameters(Thread* thread, |
| 180 | const KBCInstr** pc, |
| 181 | ObjectPtr** FP, |
| 182 | ObjectPtr** SP, |
| 183 | const intptr_t num_fixed_params, |
| 184 | const intptr_t num_opt_pos_params, |
| 185 | const intptr_t num_opt_named_params); |
| 186 | |
| 187 | bool AssertAssignable(Thread* thread, |
| 188 | const KBCInstr* pc, |
| 189 | ObjectPtr* FP, |
| 190 | ObjectPtr* call_top, |
| 191 | ObjectPtr* args, |
| 192 | SubtypeTestCachePtr cache); |
| 193 | template <bool is_getter> |
| 194 | bool AssertAssignableField(Thread* thread, |
| 195 | const KBCInstr* pc, |
| 196 | ObjectPtr* FP, |
| 197 | ObjectPtr* SP, |
| 198 | InstancePtr instance, |
| 199 | FieldPtr field, |
| 200 | InstancePtr value); |
| 201 | |
| 202 | bool AllocateMint(Thread* thread, |
| 203 | int64_t value, |
| 204 | const KBCInstr* pc, |
| 205 | ObjectPtr* FP, |
| 206 | ObjectPtr* SP); |
| 207 | bool AllocateDouble(Thread* thread, |
| 208 | double value, |
| 209 | const KBCInstr* pc, |
| 210 | ObjectPtr* FP, |
| 211 | ObjectPtr* SP); |
| 212 | bool AllocateFloat32x4(Thread* thread, |
| 213 | simd128_value_t value, |
| 214 | const KBCInstr* pc, |
| 215 | ObjectPtr* FP, |
| 216 | ObjectPtr* SP); |
| 217 | bool AllocateFloat64x2(Thread* thread, |
| 218 | simd128_value_t value, |
| 219 | const KBCInstr* pc, |
| 220 | ObjectPtr* FP, |
| 221 | ObjectPtr* SP); |
| 222 | bool AllocateArray(Thread* thread, |
| 223 | TypeArgumentsPtr type_args, |
| 224 | ObjectPtr length, |
| 225 | const KBCInstr* pc, |
| 226 | ObjectPtr* FP, |
| 227 | ObjectPtr* SP); |
| 228 | bool AllocateContext(Thread* thread, |
| 229 | intptr_t num_variables, |
| 230 | const KBCInstr* pc, |
| 231 | ObjectPtr* FP, |
| 232 | ObjectPtr* SP); |
| 233 | bool AllocateClosure(Thread* thread, |
| 234 | const KBCInstr* pc, |
| 235 | ObjectPtr* FP, |
| 236 | ObjectPtr* SP); |
| 237 | |
| 238 | #if defined(DEBUG) |
| 239 | // Returns true if tracing of executed instructions is enabled. |
| 240 | bool IsTracingExecution() const; |
| 241 | |
| 242 | // Prints bytecode instruction at given pc for instruction tracing. |
| 243 | void TraceInstruction(const KBCInstr* pc) const; |
| 244 | |
| 245 | bool IsWritingTraceFile() const; |
| 246 | void FlushTraceBuffer(); |
| 247 | void WriteInstructionToTrace(const KBCInstr* pc); |
| 248 | |
| 249 | void* trace_file_; |
| 250 | uint64_t trace_file_bytes_written_; |
| 251 | |
| 252 | static const intptr_t kTraceBufferSizeInBytes = 10 * KB; |
| 253 | static const intptr_t kTraceBufferInstrs = |
| 254 | kTraceBufferSizeInBytes / sizeof(KBCInstr); |
| 255 | KBCInstr* trace_buffer_; |
| 256 | intptr_t trace_buffer_idx_; |
| 257 | #endif // defined(DEBUG) |
| 258 | |
| 259 | // Longjmp support for exceptions. |
| 260 | InterpreterSetjmpBuffer* last_setjmp_buffer() { return last_setjmp_buffer_; } |
| 261 | void set_last_setjmp_buffer(InterpreterSetjmpBuffer* buffer) { |
| 262 | last_setjmp_buffer_ = buffer; |
| 263 | } |
| 264 | |
| 265 | #ifndef PRODUCT |
| 266 | bool is_debugging_ = false; |
| 267 | #endif // !PRODUCT |
| 268 | |
| 269 | bool supports_unboxed_doubles_; |
| 270 | bool supports_unboxed_simd128_; |
| 271 | |
| 272 | friend class InterpreterSetjmpBuffer; |
| 273 | |
| 274 | DISALLOW_COPY_AND_ASSIGN(Interpreter); |
| 275 | }; |
| 276 | |
| 277 | } // namespace dart |
| 278 | |
| 279 | #endif // !defined(DART_PRECOMPILED_RUNTIME) |
| 280 | |
| 281 | #endif // RUNTIME_VM_INTERPRETER_H_ |
| 282 | |