| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_OPTO_GRAPHKIT_HPP |
| 26 | #define SHARE_OPTO_GRAPHKIT_HPP |
| 27 | |
| 28 | #include "ci/ciEnv.hpp" |
| 29 | #include "ci/ciMethodData.hpp" |
| 30 | #include "gc/shared/c2/barrierSetC2.hpp" |
| 31 | #include "opto/addnode.hpp" |
| 32 | #include "opto/callnode.hpp" |
| 33 | #include "opto/cfgnode.hpp" |
| 34 | #include "opto/compile.hpp" |
| 35 | #include "opto/divnode.hpp" |
| 36 | #include "opto/mulnode.hpp" |
| 37 | #include "opto/phaseX.hpp" |
| 38 | #include "opto/subnode.hpp" |
| 39 | #include "opto/type.hpp" |
| 40 | #include "runtime/deoptimization.hpp" |
| 41 | |
| 42 | class BarrierSetC2; |
| 43 | class FastLockNode; |
| 44 | class FastUnlockNode; |
| 45 | class IdealKit; |
| 46 | class LibraryCallKit; |
| 47 | class Parse; |
| 48 | class RootNode; |
| 49 | |
| 50 | //----------------------------------------------------------------------------- |
| 51 | //----------------------------GraphKit----------------------------------------- |
| 52 | // Toolkit for building the common sorts of subgraphs. |
| 53 | // Does not know about bytecode parsing or type-flow results. |
| 54 | // It is able to create graphs implementing the semantics of most |
| 55 | // or all bytecodes, so that it can expand intrinsics and calls. |
| 56 | // It may depend on JVMState structure, but it must not depend |
| 57 | // on specific bytecode streams. |
| 58 | class GraphKit : public Phase { |
| 59 | friend class PreserveJVMState; |
| 60 | |
| 61 | protected: |
| 62 | ciEnv* _env; // Compilation environment |
| 63 | PhaseGVN &_gvn; // Some optimizations while parsing |
| 64 | SafePointNode* _map; // Parser map from JVM to Nodes |
| 65 | SafePointNode* _exceptions;// Parser map(s) for exception state(s) |
| 66 | int _bci; // JVM Bytecode Pointer |
| 67 | ciMethod* _method; // JVM Current Method |
| 68 | BarrierSetC2* _barrier_set; |
| 69 | |
| 70 | private: |
| 71 | int _sp; // JVM Expression Stack Pointer; don't modify directly! |
| 72 | |
| 73 | private: |
| 74 | SafePointNode* map_not_null() const { |
| 75 | assert(_map != NULL, "must call stopped() to test for reset compiler map" ); |
| 76 | return _map; |
| 77 | } |
| 78 | |
| 79 | public: |
| 80 | GraphKit(); // empty constructor |
| 81 | GraphKit(JVMState* jvms); // the JVM state on which to operate |
| 82 | |
| 83 | #ifdef ASSERT |
| 84 | ~GraphKit() { |
| 85 | assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms" ); |
| 86 | } |
| 87 | #endif |
| 88 | |
| 89 | virtual Parse* is_Parse() const { return NULL; } |
| 90 | virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; } |
| 91 | |
| 92 | ciEnv* env() const { return _env; } |
| 93 | PhaseGVN& gvn() const { return _gvn; } |
| 94 | void* barrier_set_state() const { return C->barrier_set_state(); } |
| 95 | |
| 96 | void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile |
| 97 | |
| 98 | // Handy well-known nodes: |
| 99 | Node* null() const { return zerocon(T_OBJECT); } |
| 100 | Node* top() const { return C->top(); } |
| 101 | RootNode* root() const { return C->root(); } |
| 102 | |
| 103 | // Create or find a constant node |
| 104 | Node* intcon(jint con) const { return _gvn.intcon(con); } |
| 105 | Node* longcon(jlong con) const { return _gvn.longcon(con); } |
| 106 | Node* makecon(const Type *t) const { return _gvn.makecon(t); } |
| 107 | Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); } |
| 108 | // (See also macro MakeConX in type.hpp, which uses intcon or longcon.) |
| 109 | |
| 110 | jint find_int_con(Node* n, jint value_if_unknown) { |
| 111 | return _gvn.find_int_con(n, value_if_unknown); |
| 112 | } |
| 113 | jlong find_long_con(Node* n, jlong value_if_unknown) { |
| 114 | return _gvn.find_long_con(n, value_if_unknown); |
| 115 | } |
| 116 | // (See also macro find_intptr_t_con in type.hpp, which uses one of these.) |
| 117 | |
| 118 | // JVM State accessors: |
| 119 | // Parser mapping from JVM indices into Nodes. |
| 120 | // Low slots are accessed by the StartNode::enum. |
| 121 | // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals(); |
| 122 | // Then come JVM stack slots. |
| 123 | // Finally come the monitors, if any. |
| 124 | // See layout accessors in class JVMState. |
| 125 | |
| 126 | SafePointNode* map() const { return _map; } |
| 127 | bool has_exceptions() const { return _exceptions != NULL; } |
| 128 | JVMState* jvms() const { return map_not_null()->_jvms; } |
| 129 | int sp() const { return _sp; } |
| 130 | int bci() const { return _bci; } |
| 131 | Bytecodes::Code java_bc() const; |
| 132 | ciMethod* method() const { return _method; } |
| 133 | |
| 134 | void set_jvms(JVMState* jvms) { set_map(jvms->map()); |
| 135 | assert(jvms == this->jvms(), "sanity" ); |
| 136 | _sp = jvms->sp(); |
| 137 | _bci = jvms->bci(); |
| 138 | _method = jvms->has_method() ? jvms->method() : NULL; } |
| 139 | void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); } |
| 140 | void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d" , sp); _sp = sp; } |
| 141 | void clean_stack(int from_sp); // clear garbage beyond from_sp to top |
| 142 | |
| 143 | void inc_sp(int i) { set_sp(sp() + i); } |
| 144 | void dec_sp(int i) { set_sp(sp() - i); } |
| 145 | void set_bci(int bci) { _bci = bci; } |
| 146 | |
| 147 | // Make sure jvms has current bci & sp. |
| 148 | JVMState* sync_jvms() const; |
| 149 | JVMState* sync_jvms_for_reexecute(); |
| 150 | |
| 151 | #ifdef ASSERT |
| 152 | // Make sure JVMS has an updated copy of bci and sp. |
| 153 | // Also sanity-check method, depth, and monitor depth. |
| 154 | bool jvms_in_sync() const; |
| 155 | |
| 156 | // Make sure the map looks OK. |
| 157 | void verify_map() const; |
| 158 | |
| 159 | // Make sure a proposed exception state looks OK. |
| 160 | static void verify_exception_state(SafePointNode* ex_map); |
| 161 | #endif |
| 162 | |
| 163 | // Clone the existing map state. (Implements PreserveJVMState.) |
| 164 | SafePointNode* clone_map(); |
| 165 | |
| 166 | // Set the map to a clone of the given one. |
| 167 | void set_map_clone(SafePointNode* m); |
| 168 | |
| 169 | // Tell if the compilation is failing. |
| 170 | bool failing() const { return C->failing(); } |
| 171 | |
| 172 | // Set _map to NULL, signalling a stop to further bytecode execution. |
| 173 | // Preserve the map intact for future use, and return it back to the caller. |
| 174 | SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; } |
| 175 | |
| 176 | // Stop, but first smash the map's inputs to NULL, to mark it dead. |
| 177 | void stop_and_kill_map(); |
| 178 | |
| 179 | // Tell if _map is NULL, or control is top. |
| 180 | bool stopped(); |
| 181 | |
| 182 | // Tell if this method or any caller method has exception handlers. |
| 183 | bool has_ex_handler(); |
| 184 | |
| 185 | // Save an exception without blowing stack contents or other JVM state. |
| 186 | // (The extra pointer is stuck with add_req on the map, beyond the JVMS.) |
| 187 | static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop); |
| 188 | |
| 189 | // Recover a saved exception from its map. |
| 190 | static Node* saved_ex_oop(SafePointNode* ex_map); |
| 191 | |
| 192 | // Recover a saved exception from its map, and remove it from the map. |
| 193 | static Node* clear_saved_ex_oop(SafePointNode* ex_map); |
| 194 | |
| 195 | #ifdef ASSERT |
| 196 | // Recover a saved exception from its map, and remove it from the map. |
| 197 | static bool has_saved_ex_oop(SafePointNode* ex_map); |
| 198 | #endif |
| 199 | |
| 200 | // Push an exception in the canonical position for handlers (stack(0)). |
| 201 | void push_ex_oop(Node* ex_oop) { |
| 202 | ensure_stack(1); // ensure room to push the exception |
| 203 | set_stack(0, ex_oop); |
| 204 | set_sp(1); |
| 205 | clean_stack(1); |
| 206 | } |
| 207 | |
| 208 | // Detach and return an exception state. |
| 209 | SafePointNode* pop_exception_state() { |
| 210 | SafePointNode* ex_map = _exceptions; |
| 211 | if (ex_map != NULL) { |
| 212 | _exceptions = ex_map->next_exception(); |
| 213 | ex_map->set_next_exception(NULL); |
| 214 | debug_only(verify_exception_state(ex_map)); |
| 215 | } |
| 216 | return ex_map; |
| 217 | } |
| 218 | |
| 219 | // Add an exception, using the given JVM state, without commoning. |
| 220 | void push_exception_state(SafePointNode* ex_map) { |
| 221 | debug_only(verify_exception_state(ex_map)); |
| 222 | ex_map->set_next_exception(_exceptions); |
| 223 | _exceptions = ex_map; |
| 224 | } |
| 225 | |
| 226 | // Turn the current JVM state into an exception state, appending the ex_oop. |
| 227 | SafePointNode* make_exception_state(Node* ex_oop); |
| 228 | |
| 229 | // Add an exception, using the given JVM state. |
| 230 | // Combine all exceptions with a common exception type into a single state. |
| 231 | // (This is done via combine_exception_states.) |
| 232 | void add_exception_state(SafePointNode* ex_map); |
| 233 | |
| 234 | // Combine all exceptions of any sort whatever into a single master state. |
| 235 | SafePointNode* combine_and_pop_all_exception_states() { |
| 236 | if (_exceptions == NULL) return NULL; |
| 237 | SafePointNode* phi_map = pop_exception_state(); |
| 238 | SafePointNode* ex_map; |
| 239 | while ((ex_map = pop_exception_state()) != NULL) { |
| 240 | combine_exception_states(ex_map, phi_map); |
| 241 | } |
| 242 | return phi_map; |
| 243 | } |
| 244 | |
| 245 | // Combine the two exception states, building phis as necessary. |
| 246 | // The second argument is updated to include contributions from the first. |
| 247 | void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map); |
| 248 | |
| 249 | // Reset the map to the given state. If there are any half-finished phis |
| 250 | // in it (created by combine_exception_states), transform them now. |
| 251 | // Returns the exception oop. (Caller must call push_ex_oop if required.) |
| 252 | Node* use_exception_state(SafePointNode* ex_map); |
| 253 | |
| 254 | // Collect exceptions from a given JVM state into my exception list. |
| 255 | void add_exception_states_from(JVMState* jvms); |
| 256 | |
| 257 | // Collect all raised exceptions into the current JVM state. |
| 258 | // Clear the current exception list and map, returns the combined states. |
| 259 | JVMState* transfer_exceptions_into_jvms(); |
| 260 | |
| 261 | // Helper to throw a built-in exception. |
| 262 | // Range checks take the offending index. |
| 263 | // Cast and array store checks take the offending class. |
| 264 | // Others do not take the optional argument. |
| 265 | // The JVMS must allow the bytecode to be re-executed |
| 266 | // via an uncommon trap. |
| 267 | void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL); |
| 268 | |
| 269 | // Helper to check the JavaThread::_should_post_on_exceptions flag |
| 270 | // and branch to an uncommon_trap if it is true (with the specified reason and must_throw) |
| 271 | void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason, |
| 272 | bool must_throw) ; |
| 273 | |
| 274 | // Helper Functions for adding debug information |
| 275 | void kill_dead_locals(); |
| 276 | #ifdef ASSERT |
| 277 | bool dead_locals_are_killed(); |
| 278 | #endif |
| 279 | // The call may deoptimize. Supply required JVM state as debug info. |
| 280 | // If must_throw is true, the call is guaranteed not to return normally. |
| 281 | void add_safepoint_edges(SafePointNode* call, |
| 282 | bool must_throw = false); |
| 283 | |
| 284 | // How many stack inputs does the current BC consume? |
| 285 | // And, how does the stack change after the bytecode? |
| 286 | // Returns false if unknown. |
| 287 | bool compute_stack_effects(int& inputs, int& depth); |
| 288 | |
| 289 | // Add a fixed offset to a pointer |
| 290 | Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) { |
| 291 | return basic_plus_adr(base, ptr, MakeConX(offset)); |
| 292 | } |
| 293 | Node* basic_plus_adr(Node* base, intptr_t offset) { |
| 294 | return basic_plus_adr(base, base, MakeConX(offset)); |
| 295 | } |
| 296 | // Add a variable offset to a pointer |
| 297 | Node* basic_plus_adr(Node* base, Node* offset) { |
| 298 | return basic_plus_adr(base, base, offset); |
| 299 | } |
| 300 | Node* basic_plus_adr(Node* base, Node* ptr, Node* offset); |
| 301 | |
| 302 | |
| 303 | // Some convenient shortcuts for common nodes |
| 304 | Node* IfTrue(IfNode* iff) { return _gvn.transform(new IfTrueNode(iff)); } |
| 305 | Node* IfFalse(IfNode* iff) { return _gvn.transform(new IfFalseNode(iff)); } |
| 306 | |
| 307 | Node* AddI(Node* l, Node* r) { return _gvn.transform(new AddINode(l, r)); } |
| 308 | Node* SubI(Node* l, Node* r) { return _gvn.transform(new SubINode(l, r)); } |
| 309 | Node* MulI(Node* l, Node* r) { return _gvn.transform(new MulINode(l, r)); } |
| 310 | Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new DivINode(ctl, l, r)); } |
| 311 | |
| 312 | Node* AndI(Node* l, Node* r) { return _gvn.transform(new AndINode(l, r)); } |
| 313 | Node* OrI(Node* l, Node* r) { return _gvn.transform(new OrINode(l, r)); } |
| 314 | Node* XorI(Node* l, Node* r) { return _gvn.transform(new XorINode(l, r)); } |
| 315 | |
| 316 | Node* MaxI(Node* l, Node* r) { return _gvn.transform(new MaxINode(l, r)); } |
| 317 | Node* MinI(Node* l, Node* r) { return _gvn.transform(new MinINode(l, r)); } |
| 318 | |
| 319 | Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new LShiftINode(l, r)); } |
| 320 | Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new RShiftINode(l, r)); } |
| 321 | Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new URShiftINode(l, r)); } |
| 322 | |
| 323 | Node* CmpI(Node* l, Node* r) { return _gvn.transform(new CmpINode(l, r)); } |
| 324 | Node* CmpL(Node* l, Node* r) { return _gvn.transform(new CmpLNode(l, r)); } |
| 325 | Node* CmpP(Node* l, Node* r) { return _gvn.transform(new CmpPNode(l, r)); } |
| 326 | Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new BoolNode(cmp, relop)); } |
| 327 | |
| 328 | Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new AddPNode(b, a, o)); } |
| 329 | |
| 330 | // Convert between int and long, and size_t. |
| 331 | // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.) |
| 332 | Node* ConvI2L(Node* offset); |
| 333 | Node* ConvI2UL(Node* offset); |
| 334 | Node* ConvL2I(Node* offset); |
| 335 | // Find out the klass of an object. |
| 336 | Node* load_object_klass(Node* object); |
| 337 | // Find out the length of an array. |
| 338 | Node* load_array_length(Node* array); |
| 339 | |
| 340 | |
| 341 | // Helper function to do a NULL pointer check or ZERO check based on type. |
| 342 | // Throw an exception if a given value is null. |
| 343 | // Return the value cast to not-null. |
| 344 | // Be clever about equivalent dominating null checks. |
| 345 | Node* null_check_common(Node* value, BasicType type, |
| 346 | bool assert_null = false, |
| 347 | Node* *null_control = NULL, |
| 348 | bool speculative = false); |
| 349 | Node* null_check(Node* value, BasicType type = T_OBJECT) { |
| 350 | return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null()); |
| 351 | } |
| 352 | Node* null_check_receiver() { |
| 353 | assert(argument(0)->bottom_type()->isa_ptr(), "must be" ); |
| 354 | return null_check(argument(0)); |
| 355 | } |
| 356 | Node* zero_check_int(Node* value) { |
| 357 | assert(value->bottom_type()->basic_type() == T_INT, |
| 358 | "wrong type: %s" , type2name(value->bottom_type()->basic_type())); |
| 359 | return null_check_common(value, T_INT); |
| 360 | } |
| 361 | Node* zero_check_long(Node* value) { |
| 362 | assert(value->bottom_type()->basic_type() == T_LONG, |
| 363 | "wrong type: %s" , type2name(value->bottom_type()->basic_type())); |
| 364 | return null_check_common(value, T_LONG); |
| 365 | } |
| 366 | // Throw an uncommon trap if a given value is __not__ null. |
| 367 | // Return the value cast to null, and be clever about dominating checks. |
| 368 | Node* null_assert(Node* value, BasicType type = T_OBJECT) { |
| 369 | return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null()); |
| 370 | } |
| 371 | |
| 372 | // Check if value is null and abort if it is |
| 373 | Node* must_be_not_null(Node* value, bool do_replace_in_map); |
| 374 | |
| 375 | // Null check oop. Return null-path control into (*null_control). |
| 376 | // Return a cast-not-null node which depends on the not-null control. |
| 377 | // If never_see_null, use an uncommon trap (*null_control sees a top). |
| 378 | // The cast is not valid along the null path; keep a copy of the original. |
| 379 | // If safe_for_replace, then we can replace the value with the cast |
| 380 | // in the parsing map (the cast is guaranteed to dominate the map) |
| 381 | Node* null_check_oop(Node* value, Node* *null_control, |
| 382 | bool never_see_null = false, |
| 383 | bool safe_for_replace = false, |
| 384 | bool speculative = false); |
| 385 | |
| 386 | // Check the null_seen bit. |
| 387 | bool seems_never_null(Node* obj, ciProfileData* data, bool& speculating); |
| 388 | |
| 389 | void guard_klass_being_initialized(Node* klass); |
| 390 | void guard_init_thread(Node* klass); |
| 391 | |
| 392 | void clinit_barrier(ciInstanceKlass* ik, ciMethod* context); |
| 393 | |
| 394 | // Check for unique class for receiver at call |
| 395 | ciKlass* profile_has_unique_klass() { |
| 396 | ciCallProfile profile = method()->call_profile_at_bci(bci()); |
| 397 | if (profile.count() >= 0 && // no cast failures here |
| 398 | profile.has_receiver(0) && |
| 399 | profile.morphism() == 1) { |
| 400 | return profile.receiver(0); |
| 401 | } |
| 402 | return NULL; |
| 403 | } |
| 404 | |
| 405 | // record type from profiling with the type system |
| 406 | Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind); |
| 407 | void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc); |
| 408 | void record_profiled_parameters_for_speculation(); |
| 409 | void record_profiled_return_for_speculation(); |
| 410 | Node* record_profiled_receiver_for_speculation(Node* n); |
| 411 | |
| 412 | // Use the type profile to narrow an object type. |
| 413 | Node* maybe_cast_profiled_receiver(Node* not_null_obj, |
| 414 | ciKlass* require_klass, |
| 415 | ciKlass* spec, |
| 416 | bool safe_for_replace); |
| 417 | |
| 418 | // Cast obj to type and emit guard unless we had too many traps here already |
| 419 | Node* maybe_cast_profiled_obj(Node* obj, |
| 420 | ciKlass* type, |
| 421 | bool not_null = false); |
| 422 | |
| 423 | // Cast obj to not-null on this path |
| 424 | Node* cast_not_null(Node* obj, bool do_replace_in_map = true); |
| 425 | // Replace all occurrences of one node by another. |
| 426 | void replace_in_map(Node* old, Node* neww); |
| 427 | |
| 428 | void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); } |
| 429 | Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); } |
| 430 | Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); } |
| 431 | |
| 432 | void push_pair(Node* ldval) { |
| 433 | push(ldval); |
| 434 | push(top()); // the halfword is merely a placeholder |
| 435 | } |
| 436 | void push_pair_local(int i) { |
| 437 | // longs are stored in locals in "push" order |
| 438 | push( local(i+0) ); // the real value |
| 439 | assert(local(i+1) == top(), "" ); |
| 440 | push(top()); // halfword placeholder |
| 441 | } |
| 442 | Node* pop_pair() { |
| 443 | // the second half is pushed last & popped first; it contains exactly nothing |
| 444 | Node* halfword = pop(); |
| 445 | assert(halfword == top(), "" ); |
| 446 | // the long bits are pushed first & popped last: |
| 447 | return pop(); |
| 448 | } |
| 449 | void set_pair_local(int i, Node* lval) { |
| 450 | // longs are stored in locals as a value/half pair (like doubles) |
| 451 | set_local(i+0, lval); |
| 452 | set_local(i+1, top()); |
| 453 | } |
| 454 | |
| 455 | // Push the node, which may be zero, one, or two words. |
| 456 | void push_node(BasicType n_type, Node* n) { |
| 457 | int n_size = type2size[n_type]; |
| 458 | if (n_size == 1) push( n ); // T_INT, ... |
| 459 | else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG |
| 460 | else { assert(n_size == 0, "must be T_VOID" ); } |
| 461 | } |
| 462 | |
| 463 | Node* pop_node(BasicType n_type) { |
| 464 | int n_size = type2size[n_type]; |
| 465 | if (n_size == 1) return pop(); |
| 466 | else if (n_size == 2) return pop_pair(); |
| 467 | else return NULL; |
| 468 | } |
| 469 | |
| 470 | Node* control() const { return map_not_null()->control(); } |
| 471 | Node* i_o() const { return map_not_null()->i_o(); } |
| 472 | Node* returnadr() const { return map_not_null()->returnadr(); } |
| 473 | Node* frameptr() const { return map_not_null()->frameptr(); } |
| 474 | Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); } |
| 475 | Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); } |
| 476 | Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); } |
| 477 | Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); } |
| 478 | Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); } |
| 479 | |
| 480 | void set_control (Node* c) { map_not_null()->set_control(c); } |
| 481 | void set_i_o (Node* c) { map_not_null()->set_i_o(c); } |
| 482 | void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); } |
| 483 | void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); } |
| 484 | void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); } |
| 485 | void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); } |
| 486 | |
| 487 | // Access unaliased memory |
| 488 | Node* memory(uint alias_idx); |
| 489 | Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); } |
| 490 | Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); } |
| 491 | |
| 492 | // Access immutable memory |
| 493 | Node* immutable_memory() { return C->immutable_memory(); } |
| 494 | |
| 495 | // Set unaliased memory |
| 496 | void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); } |
| 497 | void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); } |
| 498 | void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); } |
| 499 | |
| 500 | // Get the entire memory state (probably a MergeMemNode), and reset it |
| 501 | // (The resetting prevents somebody from using the dangling Node pointer.) |
| 502 | Node* reset_memory(); |
| 503 | |
| 504 | // Get the entire memory state, asserted to be a MergeMemNode. |
| 505 | MergeMemNode* merged_memory() { |
| 506 | Node* mem = map_not_null()->memory(); |
| 507 | assert(mem->is_MergeMem(), "parse memory is always pre-split" ); |
| 508 | return mem->as_MergeMem(); |
| 509 | } |
| 510 | |
| 511 | // Set the entire memory state; produce a new MergeMemNode. |
| 512 | void set_all_memory(Node* newmem); |
| 513 | |
| 514 | // Create a memory projection from the call, then set_all_memory. |
| 515 | void set_all_memory_call(Node* call, bool separate_io_proj = false); |
| 516 | |
| 517 | // Create a LoadNode, reading from the parser's memory state. |
| 518 | // (Note: require_atomic_access is useful only with T_LONG.) |
| 519 | // |
| 520 | // We choose the unordered semantics by default because we have |
| 521 | // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case |
| 522 | // of volatile fields. |
| 523 | Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, |
| 524 | MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, |
| 525 | bool require_atomic_access = false, bool unaligned = false, |
| 526 | bool mismatched = false, bool unsafe = false) { |
| 527 | // This version computes alias_index from bottom_type |
| 528 | return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), |
| 529 | mo, control_dependency, require_atomic_access, |
| 530 | unaligned, mismatched, unsafe); |
| 531 | } |
| 532 | Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, |
| 533 | MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, |
| 534 | bool require_atomic_access = false, bool unaligned = false, |
| 535 | bool mismatched = false, bool unsafe = false) { |
| 536 | // This version computes alias_index from an address type |
| 537 | assert(adr_type != NULL, "use other make_load factory" ); |
| 538 | return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), |
| 539 | mo, control_dependency, require_atomic_access, |
| 540 | unaligned, mismatched, unsafe); |
| 541 | } |
| 542 | // This is the base version which is given an alias index. |
| 543 | Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, |
| 544 | MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, |
| 545 | bool require_atomic_access = false, bool unaligned = false, |
| 546 | bool mismatched = false, bool unsafe = false); |
| 547 | |
| 548 | // Create & transform a StoreNode and store the effect into the |
| 549 | // parser's memory state. |
| 550 | // |
| 551 | // We must ensure that stores of object references will be visible |
| 552 | // only after the object's initialization. So the clients of this |
| 553 | // procedure must indicate that the store requires `release' |
| 554 | // semantics, if the stored value is an object reference that might |
| 555 | // point to a new object and may become externally visible. |
| 556 | Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, |
| 557 | const TypePtr* adr_type, |
| 558 | MemNode::MemOrd mo, |
| 559 | bool require_atomic_access = false, |
| 560 | bool unaligned = false, |
| 561 | bool mismatched = false, |
| 562 | bool unsafe = false) { |
| 563 | // This version computes alias_index from an address type |
| 564 | assert(adr_type != NULL, "use other store_to_memory factory" ); |
| 565 | return store_to_memory(ctl, adr, val, bt, |
| 566 | C->get_alias_index(adr_type), |
| 567 | mo, require_atomic_access, |
| 568 | unaligned, mismatched, unsafe); |
| 569 | } |
| 570 | // This is the base version which is given alias index |
| 571 | // Return the new StoreXNode |
| 572 | Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, |
| 573 | int adr_idx, |
| 574 | MemNode::MemOrd, |
| 575 | bool require_atomic_access = false, |
| 576 | bool unaligned = false, |
| 577 | bool mismatched = false, |
| 578 | bool unsafe = false); |
| 579 | |
| 580 | // Perform decorated accesses |
| 581 | |
| 582 | Node* access_store_at(Node* obj, // containing obj |
| 583 | Node* adr, // actual adress to store val at |
| 584 | const TypePtr* adr_type, |
| 585 | Node* val, |
| 586 | const Type* val_type, |
| 587 | BasicType bt, |
| 588 | DecoratorSet decorators); |
| 589 | |
| 590 | Node* access_load_at(Node* obj, // containing obj |
| 591 | Node* adr, // actual adress to load val at |
| 592 | const TypePtr* adr_type, |
| 593 | const Type* val_type, |
| 594 | BasicType bt, |
| 595 | DecoratorSet decorators); |
| 596 | |
| 597 | Node* access_load(Node* adr, // actual adress to load val at |
| 598 | const Type* val_type, |
| 599 | BasicType bt, |
| 600 | DecoratorSet decorators); |
| 601 | |
| 602 | Node* access_atomic_cmpxchg_val_at(Node* obj, |
| 603 | Node* adr, |
| 604 | const TypePtr* adr_type, |
| 605 | int alias_idx, |
| 606 | Node* expected_val, |
| 607 | Node* new_val, |
| 608 | const Type* value_type, |
| 609 | BasicType bt, |
| 610 | DecoratorSet decorators); |
| 611 | |
| 612 | Node* access_atomic_cmpxchg_bool_at(Node* obj, |
| 613 | Node* adr, |
| 614 | const TypePtr* adr_type, |
| 615 | int alias_idx, |
| 616 | Node* expected_val, |
| 617 | Node* new_val, |
| 618 | const Type* value_type, |
| 619 | BasicType bt, |
| 620 | DecoratorSet decorators); |
| 621 | |
| 622 | Node* access_atomic_xchg_at(Node* obj, |
| 623 | Node* adr, |
| 624 | const TypePtr* adr_type, |
| 625 | int alias_idx, |
| 626 | Node* new_val, |
| 627 | const Type* value_type, |
| 628 | BasicType bt, |
| 629 | DecoratorSet decorators); |
| 630 | |
| 631 | Node* access_atomic_add_at(Node* obj, |
| 632 | Node* adr, |
| 633 | const TypePtr* adr_type, |
| 634 | int alias_idx, |
| 635 | Node* new_val, |
| 636 | const Type* value_type, |
| 637 | BasicType bt, |
| 638 | DecoratorSet decorators); |
| 639 | |
| 640 | void access_clone(Node* src, Node* dst, Node* size, bool is_array); |
| 641 | |
| 642 | Node* access_resolve(Node* n, DecoratorSet decorators); |
| 643 | |
| 644 | // Return addressing for an array element. |
| 645 | Node* array_element_address(Node* ary, Node* idx, BasicType elembt, |
| 646 | // Optional constraint on the array size: |
| 647 | const TypeInt* sizetype = NULL, |
| 648 | // Optional control dependency (for example, on range check) |
| 649 | Node* ctrl = NULL); |
| 650 | |
| 651 | // Return a load of array element at idx. |
| 652 | Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype); |
| 653 | |
| 654 | //---------------- Dtrace support -------------------- |
| 655 | void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry); |
| 656 | void make_dtrace_method_entry(ciMethod* method) { |
| 657 | make_dtrace_method_entry_exit(method, true); |
| 658 | } |
| 659 | void make_dtrace_method_exit(ciMethod* method) { |
| 660 | make_dtrace_method_entry_exit(method, false); |
| 661 | } |
| 662 | |
| 663 | //--------------- stub generation ------------------- |
| 664 | public: |
| 665 | void gen_stub(address C_function, |
| 666 | const char *name, |
| 667 | int is_fancy_jump, |
| 668 | bool pass_tls, |
| 669 | bool return_pc); |
| 670 | |
| 671 | //---------- help for generating calls -------------- |
| 672 | |
| 673 | // Do a null check on the receiver as it would happen before the call to |
| 674 | // callee (with all arguments still on the stack). |
| 675 | Node* null_check_receiver_before_call(ciMethod* callee) { |
| 676 | assert(!callee->is_static(), "must be a virtual method" ); |
| 677 | // Callsite signature can be different from actual method being called (i.e _linkTo* sites). |
| 678 | // Use callsite signature always. |
| 679 | ciMethod* declared_method = method()->get_method_at_bci(bci()); |
| 680 | const int nargs = declared_method->arg_size(); |
| 681 | inc_sp(nargs); |
| 682 | Node* n = null_check_receiver(); |
| 683 | dec_sp(nargs); |
| 684 | return n; |
| 685 | } |
| 686 | |
| 687 | // Fill in argument edges for the call from argument(0), argument(1), ... |
| 688 | // (The next step is to call set_edges_for_java_call.) |
| 689 | void set_arguments_for_java_call(CallJavaNode* call); |
| 690 | |
| 691 | // Fill in non-argument edges for the call. |
| 692 | // Transform the call, and update the basics: control, i_o, memory. |
| 693 | // (The next step is usually to call set_results_for_java_call.) |
| 694 | void set_edges_for_java_call(CallJavaNode* call, |
| 695 | bool must_throw = false, bool separate_io_proj = false); |
| 696 | |
| 697 | // Finish up a java call that was started by set_edges_for_java_call. |
| 698 | // Call add_exception on any throw arising from the call. |
| 699 | // Return the call result (transformed). |
| 700 | Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false, bool deoptimize = false); |
| 701 | |
| 702 | // Similar to set_edges_for_java_call, but simplified for runtime calls. |
| 703 | void set_predefined_output_for_runtime_call(Node* call) { |
| 704 | set_predefined_output_for_runtime_call(call, NULL, NULL); |
| 705 | } |
| 706 | void set_predefined_output_for_runtime_call(Node* call, |
| 707 | Node* keep_mem, |
| 708 | const TypePtr* hook_mem); |
| 709 | Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL); |
| 710 | |
| 711 | // Replace the call with the current state of the kit. Requires |
| 712 | // that the call was generated with separate io_projs so that |
| 713 | // exceptional control flow can be handled properly. |
| 714 | void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false); |
| 715 | |
| 716 | // helper functions for statistics |
| 717 | void increment_counter(address counter_addr); // increment a debug counter |
| 718 | void increment_counter(Node* counter_addr); // increment a debug counter |
| 719 | |
| 720 | // Bail out to the interpreter right now |
| 721 | // The optional klass is the one causing the trap. |
| 722 | // The optional reason is debug information written to the compile log. |
| 723 | // Optional must_throw is the same as with add_safepoint_edges. |
| 724 | void uncommon_trap(int trap_request, |
| 725 | ciKlass* klass = NULL, const char* reason_string = NULL, |
| 726 | bool must_throw = false, bool keep_exact_action = false); |
| 727 | |
| 728 | // Shorthand, to avoid saying "Deoptimization::" so many times. |
| 729 | void uncommon_trap(Deoptimization::DeoptReason reason, |
| 730 | Deoptimization::DeoptAction action, |
| 731 | ciKlass* klass = NULL, const char* reason_string = NULL, |
| 732 | bool must_throw = false, bool keep_exact_action = false) { |
| 733 | uncommon_trap(Deoptimization::make_trap_request(reason, action), |
| 734 | klass, reason_string, must_throw, keep_exact_action); |
| 735 | } |
| 736 | |
| 737 | // Bail out to the interpreter and keep exact action (avoid switching to Action_none). |
| 738 | void uncommon_trap_exact(Deoptimization::DeoptReason reason, |
| 739 | Deoptimization::DeoptAction action, |
| 740 | ciKlass* klass = NULL, const char* reason_string = NULL, |
| 741 | bool must_throw = false) { |
| 742 | uncommon_trap(Deoptimization::make_trap_request(reason, action), |
| 743 | klass, reason_string, must_throw, /*keep_exact_action=*/true); |
| 744 | } |
| 745 | |
| 746 | // SP when bytecode needs to be reexecuted. |
| 747 | virtual int reexecute_sp() { return sp(); } |
| 748 | |
| 749 | // Report if there were too many traps at the current method and bci. |
| 750 | // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. |
| 751 | // If there is no MDO at all, report no trap unless told to assume it. |
| 752 | bool too_many_traps(Deoptimization::DeoptReason reason) { |
| 753 | return C->too_many_traps(method(), bci(), reason); |
| 754 | } |
| 755 | |
| 756 | // Report if there were too many recompiles at the current method and bci. |
| 757 | bool too_many_recompiles(Deoptimization::DeoptReason reason) { |
| 758 | return C->too_many_recompiles(method(), bci(), reason); |
| 759 | } |
| 760 | |
| 761 | bool too_many_traps_or_recompiles(Deoptimization::DeoptReason reason) { |
| 762 | return C->too_many_traps_or_recompiles(method(), bci(), reason); |
| 763 | } |
| 764 | |
| 765 | // Returns the object (if any) which was created the moment before. |
| 766 | Node* just_allocated_object(Node* current_control); |
| 767 | |
| 768 | // Sync Ideal and Graph kits. |
| 769 | void sync_kit(IdealKit& ideal); |
| 770 | void final_sync(IdealKit& ideal); |
| 771 | |
| 772 | public: |
| 773 | // Helper function to round double arguments before a call |
| 774 | void round_double_arguments(ciMethod* dest_method); |
| 775 | void round_double_result(ciMethod* dest_method); |
| 776 | |
| 777 | // rounding for strict float precision conformance |
| 778 | Node* precision_rounding(Node* n); |
| 779 | |
| 780 | // rounding for strict double precision conformance |
| 781 | Node* dprecision_rounding(Node* n); |
| 782 | |
| 783 | // rounding for non-strict double stores |
| 784 | Node* dstore_rounding(Node* n); |
| 785 | |
| 786 | // Helper functions for fast/slow path codes |
| 787 | Node* opt_iff(Node* region, Node* iff); |
| 788 | Node* make_runtime_call(int flags, |
| 789 | const TypeFunc* call_type, address call_addr, |
| 790 | const char* call_name, |
| 791 | const TypePtr* adr_type, // NULL if no memory effects |
| 792 | Node* parm0 = NULL, Node* parm1 = NULL, |
| 793 | Node* parm2 = NULL, Node* parm3 = NULL, |
| 794 | Node* parm4 = NULL, Node* parm5 = NULL, |
| 795 | Node* parm6 = NULL, Node* parm7 = NULL); |
| 796 | enum { // flag values for make_runtime_call |
| 797 | RC_NO_FP = 1, // CallLeafNoFPNode |
| 798 | RC_NO_IO = 2, // do not hook IO edges |
| 799 | RC_NO_LEAF = 4, // CallStaticJavaNode |
| 800 | RC_MUST_THROW = 8, // flag passed to add_safepoint_edges |
| 801 | RC_NARROW_MEM = 16, // input memory is same as output |
| 802 | RC_UNCOMMON = 32, // freq. expected to be like uncommon trap |
| 803 | RC_LEAF = 0 // null value: no flags set |
| 804 | }; |
| 805 | |
| 806 | // merge in all memory slices from new_mem, along the given path |
| 807 | void merge_memory(Node* new_mem, Node* region, int new_path); |
| 808 | void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj, bool deoptimize = false); |
| 809 | |
| 810 | // Helper functions to build synchronizations |
| 811 | int next_monitor(); |
| 812 | Node* insert_mem_bar(int opcode, Node* precedent = NULL); |
| 813 | Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL); |
| 814 | void insert_store_load_for_barrier(); |
| 815 | // Optional 'precedent' is appended as an extra edge, to force ordering. |
| 816 | FastLockNode* shared_lock(Node* obj); |
| 817 | void shared_unlock(Node* box, Node* obj); |
| 818 | |
| 819 | // helper functions for the fast path/slow path idioms |
| 820 | Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result); |
| 821 | |
| 822 | // Generate an instance-of idiom. Used by both the instance-of bytecode |
| 823 | // and the reflective instance-of call. |
| 824 | Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false); |
| 825 | |
| 826 | // Generate a check-cast idiom. Used by both the check-cast bytecode |
| 827 | // and the array-store bytecode |
| 828 | Node* gen_checkcast( Node *subobj, Node* superkls, |
| 829 | Node* *failure_control = NULL ); |
| 830 | |
| 831 | Node* gen_subtype_check(Node* subklass, Node* superklass) { |
| 832 | MergeMemNode* mem = merged_memory(); |
| 833 | Node* ctrl = control(); |
| 834 | Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, &_gvn); |
| 835 | set_control(ctrl); |
| 836 | return n; |
| 837 | } |
| 838 | |
| 839 | // Exact type check used for predicted calls and casts. |
| 840 | // Rewrites (*casted_receiver) to be casted to the stronger type. |
| 841 | // (Caller is responsible for doing replace_in_map.) |
| 842 | Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob, |
| 843 | Node* *casted_receiver); |
| 844 | |
| 845 | // Inexact type check used for predicted calls. |
| 846 | Node* subtype_check_receiver(Node* receiver, ciKlass* klass, |
| 847 | Node** casted_receiver); |
| 848 | |
| 849 | // implementation of object creation |
| 850 | Node* set_output_for_allocation(AllocateNode* alloc, |
| 851 | const TypeOopPtr* oop_type, |
| 852 | bool deoptimize_on_exception=false); |
| 853 | Node* get_layout_helper(Node* klass_node, jint& constant_value); |
| 854 | Node* new_instance(Node* klass_node, |
| 855 | Node* slow_test = NULL, |
| 856 | Node* *return_size_val = NULL, |
| 857 | bool deoptimize_on_exception = false); |
| 858 | Node* new_array(Node* klass_node, Node* count_val, int nargs, |
| 859 | Node* *return_size_val = NULL, |
| 860 | bool deoptimize_on_exception = false); |
| 861 | |
| 862 | // java.lang.String helpers |
| 863 | Node* load_String_length(Node* str, bool set_ctrl); |
| 864 | Node* load_String_value(Node* str, bool set_ctrl); |
| 865 | Node* load_String_coder(Node* str, bool set_ctrl); |
| 866 | void store_String_value(Node* str, Node* value); |
| 867 | void store_String_coder(Node* str, Node* value); |
| 868 | Node* capture_memory(const TypePtr* src_type, const TypePtr* dst_type); |
| 869 | Node* compress_string(Node* src, const TypeAryPtr* src_type, Node* dst, Node* count); |
| 870 | void inflate_string(Node* src, Node* dst, const TypeAryPtr* dst_type, Node* count); |
| 871 | void inflate_string_slow(Node* src, Node* dst, Node* start, Node* count); |
| 872 | |
| 873 | // Handy for making control flow |
| 874 | IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) { |
| 875 | IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's |
| 876 | _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time |
| 877 | // Place 'if' on worklist if it will be in graph |
| 878 | if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later |
| 879 | return iff; |
| 880 | } |
| 881 | |
| 882 | IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) { |
| 883 | IfNode* iff = new IfNode(ctrl, tst, prob, cnt);// New IfNode's |
| 884 | _gvn.transform(iff); // Value may be known at parse-time |
| 885 | // Place 'if' on worklist if it will be in graph |
| 886 | if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later |
| 887 | return iff; |
| 888 | } |
| 889 | |
| 890 | // Insert a loop predicate into the graph |
| 891 | void add_predicate(int nargs = 0); |
| 892 | void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs); |
| 893 | |
| 894 | Node* make_constant_from_field(ciField* field, Node* obj); |
| 895 | }; |
| 896 | |
| 897 | // Helper class to support building of control flow branches. Upon |
| 898 | // creation the map and sp at bci are cloned and restored upon de- |
| 899 | // struction. Typical use: |
| 900 | // |
| 901 | // { PreserveJVMState pjvms(this); |
| 902 | // // code of new branch |
| 903 | // } |
| 904 | // // here the JVM state at bci is established |
| 905 | |
| 906 | class PreserveJVMState: public StackObj { |
| 907 | protected: |
| 908 | GraphKit* _kit; |
| 909 | #ifdef ASSERT |
| 910 | int _block; // PO of current block, if a Parse |
| 911 | int _bci; |
| 912 | #endif |
| 913 | SafePointNode* _map; |
| 914 | uint _sp; |
| 915 | |
| 916 | public: |
| 917 | PreserveJVMState(GraphKit* kit, bool clone_map = true); |
| 918 | ~PreserveJVMState(); |
| 919 | }; |
| 920 | |
| 921 | // Helper class to build cutouts of the form if (p) ; else {x...}. |
| 922 | // The code {x...} must not fall through. |
| 923 | // The kit's main flow of control is set to the "then" continuation of if(p). |
| 924 | class BuildCutout: public PreserveJVMState { |
| 925 | public: |
| 926 | BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN); |
| 927 | ~BuildCutout(); |
| 928 | }; |
| 929 | |
| 930 | // Helper class to preserve the original _reexecute bit and _sp and restore |
| 931 | // them back |
| 932 | class PreserveReexecuteState: public StackObj { |
| 933 | protected: |
| 934 | GraphKit* _kit; |
| 935 | uint _sp; |
| 936 | JVMState::ReexecuteState _reexecute; |
| 937 | |
| 938 | public: |
| 939 | PreserveReexecuteState(GraphKit* kit); |
| 940 | ~PreserveReexecuteState(); |
| 941 | }; |
| 942 | |
| 943 | #endif // SHARE_OPTO_GRAPHKIT_HPP |
| 944 | |