| 1 | /* |
| 2 | * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "compiler/compileLog.hpp" |
| 27 | #include "interpreter/linkResolver.hpp" |
| 28 | #include "memory/universe.hpp" |
| 29 | #include "oops/objArrayKlass.hpp" |
| 30 | #include "opto/addnode.hpp" |
| 31 | #include "opto/castnode.hpp" |
| 32 | #include "opto/memnode.hpp" |
| 33 | #include "opto/parse.hpp" |
| 34 | #include "opto/rootnode.hpp" |
| 35 | #include "opto/runtime.hpp" |
| 36 | #include "opto/subnode.hpp" |
| 37 | #include "runtime/deoptimization.hpp" |
| 38 | #include "runtime/handles.inline.hpp" |
| 39 | |
| 40 | //============================================================================= |
| 41 | // Helper methods for _get* and _put* bytecodes |
| 42 | //============================================================================= |
| 43 | void Parse::do_field_access(bool is_get, bool is_field) { |
| 44 | bool will_link; |
| 45 | ciField* field = iter().get_field(will_link); |
| 46 | assert(will_link, "getfield: typeflow responsibility" ); |
| 47 | |
| 48 | ciInstanceKlass* field_holder = field->holder(); |
| 49 | |
| 50 | if (is_field == field->is_static()) { |
| 51 | // Interpreter will throw java_lang_IncompatibleClassChangeError |
| 52 | // Check this before allowing <clinit> methods to access static fields |
| 53 | uncommon_trap(Deoptimization::Reason_unhandled, |
| 54 | Deoptimization::Action_none); |
| 55 | return; |
| 56 | } |
| 57 | |
| 58 | // Deoptimize on putfield writes to call site target field. |
| 59 | if (!is_get && field->is_call_site_target()) { |
| 60 | uncommon_trap(Deoptimization::Reason_unhandled, |
| 61 | Deoptimization::Action_reinterpret, |
| 62 | NULL, "put to call site target field" ); |
| 63 | return; |
| 64 | } |
| 65 | |
| 66 | if (C->needs_clinit_barrier(field, method())) { |
| 67 | clinit_barrier(field_holder, method()); |
| 68 | if (stopped()) return; |
| 69 | } |
| 70 | |
| 71 | assert(field->will_link(method(), bc()), "getfield: typeflow responsibility" ); |
| 72 | |
| 73 | // Note: We do not check for an unloaded field type here any more. |
| 74 | |
| 75 | // Generate code for the object pointer. |
| 76 | Node* obj; |
| 77 | if (is_field) { |
| 78 | int obj_depth = is_get ? 0 : field->type()->size(); |
| 79 | obj = null_check(peek(obj_depth)); |
| 80 | // Compile-time detect of null-exception? |
| 81 | if (stopped()) return; |
| 82 | |
| 83 | #ifdef ASSERT |
| 84 | const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); |
| 85 | assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed" ); |
| 86 | #endif |
| 87 | |
| 88 | if (is_get) { |
| 89 | (void) pop(); // pop receiver before getting |
| 90 | do_get_xxx(obj, field, is_field); |
| 91 | } else { |
| 92 | do_put_xxx(obj, field, is_field); |
| 93 | (void) pop(); // pop receiver after putting |
| 94 | } |
| 95 | } else { |
| 96 | const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); |
| 97 | obj = _gvn.makecon(tip); |
| 98 | if (is_get) { |
| 99 | do_get_xxx(obj, field, is_field); |
| 100 | } else { |
| 101 | do_put_xxx(obj, field, is_field); |
| 102 | } |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | |
| 107 | void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { |
| 108 | BasicType bt = field->layout_type(); |
| 109 | |
| 110 | // Does this field have a constant value? If so, just push the value. |
| 111 | if (field->is_constant() && |
| 112 | // Keep consistent with types found by ciTypeFlow: for an |
| 113 | // unloaded field type, ciTypeFlow::StateVector::do_getstatic() |
| 114 | // speculates the field is null. The code in the rest of this |
| 115 | // method does the same. We must not bypass it and use a non |
| 116 | // null constant here. |
| 117 | (bt != T_OBJECT || field->type()->is_loaded())) { |
| 118 | // final or stable field |
| 119 | Node* con = make_constant_from_field(field, obj); |
| 120 | if (con != NULL) { |
| 121 | push_node(field->layout_type(), con); |
| 122 | return; |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | ciType* field_klass = field->type(); |
| 127 | bool is_vol = field->is_volatile(); |
| 128 | |
| 129 | // Compute address and memory type. |
| 130 | int offset = field->offset_in_bytes(); |
| 131 | const TypePtr* adr_type = C->alias_type(field)->adr_type(); |
| 132 | Node *adr = basic_plus_adr(obj, obj, offset); |
| 133 | |
| 134 | // Build the resultant type of the load |
| 135 | const Type *type; |
| 136 | |
| 137 | bool must_assert_null = false; |
| 138 | |
| 139 | DecoratorSet decorators = IN_HEAP; |
| 140 | decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED; |
| 141 | |
| 142 | bool is_obj = bt == T_OBJECT || bt == T_ARRAY; |
| 143 | |
| 144 | if (is_obj) { |
| 145 | if (!field->type()->is_loaded()) { |
| 146 | type = TypeInstPtr::BOTTOM; |
| 147 | must_assert_null = true; |
| 148 | } else if (field->is_static_constant()) { |
| 149 | // This can happen if the constant oop is non-perm. |
| 150 | ciObject* con = field->constant_value().as_object(); |
| 151 | // Do not "join" in the previous type; it doesn't add value, |
| 152 | // and may yield a vacuous result if the field is of interface type. |
| 153 | if (con->is_null_object()) { |
| 154 | type = TypePtr::NULL_PTR; |
| 155 | } else { |
| 156 | type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); |
| 157 | } |
| 158 | assert(type != NULL, "field singleton type must be consistent" ); |
| 159 | } else { |
| 160 | type = TypeOopPtr::make_from_klass(field_klass->as_klass()); |
| 161 | } |
| 162 | } else { |
| 163 | type = Type::get_const_basic_type(bt); |
| 164 | } |
| 165 | |
| 166 | Node* ld = access_load_at(obj, adr, adr_type, type, bt, decorators); |
| 167 | |
| 168 | // Adjust Java stack |
| 169 | if (type2size[bt] == 1) |
| 170 | push(ld); |
| 171 | else |
| 172 | push_pair(ld); |
| 173 | |
| 174 | if (must_assert_null) { |
| 175 | // Do not take a trap here. It's possible that the program |
| 176 | // will never load the field's class, and will happily see |
| 177 | // null values in this field forever. Don't stumble into a |
| 178 | // trap for such a program, or we might get a long series |
| 179 | // of useless recompilations. (Or, we might load a class |
| 180 | // which should not be loaded.) If we ever see a non-null |
| 181 | // value, we will then trap and recompile. (The trap will |
| 182 | // not need to mention the class index, since the class will |
| 183 | // already have been loaded if we ever see a non-null value.) |
| 184 | // uncommon_trap(iter().get_field_signature_index()); |
| 185 | if (PrintOpto && (Verbose || WizardMode)) { |
| 186 | method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d" , bci()); |
| 187 | } |
| 188 | if (C->log() != NULL) { |
| 189 | C->log()->elem("assert_null reason='field' klass='%d'" , |
| 190 | C->log()->identify(field->type())); |
| 191 | } |
| 192 | // If there is going to be a trap, put it at the next bytecode: |
| 193 | set_bci(iter().next_bci()); |
| 194 | null_assert(peek()); |
| 195 | set_bci(iter().cur_bci()); // put it back |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { |
| 200 | bool is_vol = field->is_volatile(); |
| 201 | |
| 202 | // Compute address and memory type. |
| 203 | int offset = field->offset_in_bytes(); |
| 204 | const TypePtr* adr_type = C->alias_type(field)->adr_type(); |
| 205 | Node* adr = basic_plus_adr(obj, obj, offset); |
| 206 | BasicType bt = field->layout_type(); |
| 207 | // Value to be stored |
| 208 | Node* val = type2size[bt] == 1 ? pop() : pop_pair(); |
| 209 | |
| 210 | DecoratorSet decorators = IN_HEAP; |
| 211 | decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED; |
| 212 | |
| 213 | bool is_obj = bt == T_OBJECT || bt == T_ARRAY; |
| 214 | |
| 215 | // Store the value. |
| 216 | const Type* field_type; |
| 217 | if (!field->type()->is_loaded()) { |
| 218 | field_type = TypeInstPtr::BOTTOM; |
| 219 | } else { |
| 220 | if (is_obj) { |
| 221 | field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); |
| 222 | } else { |
| 223 | field_type = Type::BOTTOM; |
| 224 | } |
| 225 | } |
| 226 | access_store_at(obj, adr, adr_type, val, field_type, bt, decorators); |
| 227 | |
| 228 | if (is_field) { |
| 229 | // Remember we wrote a volatile field. |
| 230 | // For not multiple copy atomic cpu (ppc64) a barrier should be issued |
| 231 | // in constructors which have such stores. See do_exits() in parse1.cpp. |
| 232 | if (is_vol) { |
| 233 | set_wrote_volatile(true); |
| 234 | } |
| 235 | set_wrote_fields(true); |
| 236 | |
| 237 | // If the field is final, the rules of Java say we are in <init> or <clinit>. |
| 238 | // Note the presence of writes to final non-static fields, so that we |
| 239 | // can insert a memory barrier later on to keep the writes from floating |
| 240 | // out of the constructor. |
| 241 | // Any method can write a @Stable field; insert memory barriers after those also. |
| 242 | if (field->is_final()) { |
| 243 | set_wrote_final(true); |
| 244 | if (AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) { |
| 245 | // Preserve allocation ptr to create precedent edge to it in membar |
| 246 | // generated on exit from constructor. |
| 247 | // Can't bind stable with its allocation, only record allocation for final field. |
| 248 | set_alloc_with_final(obj); |
| 249 | } |
| 250 | } |
| 251 | if (field->is_stable()) { |
| 252 | set_wrote_stable(true); |
| 253 | } |
| 254 | } |
| 255 | } |
| 256 | |
| 257 | //============================================================================= |
| 258 | void Parse::do_anewarray() { |
| 259 | bool will_link; |
| 260 | ciKlass* klass = iter().get_klass(will_link); |
| 261 | |
| 262 | // Uncommon Trap when class that array contains is not loaded |
| 263 | // we need the loaded class for the rest of graph; do not |
| 264 | // initialize the container class (see Java spec)!!! |
| 265 | assert(will_link, "anewarray: typeflow responsibility" ); |
| 266 | |
| 267 | ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass); |
| 268 | // Check that array_klass object is loaded |
| 269 | if (!array_klass->is_loaded()) { |
| 270 | // Generate uncommon_trap for unloaded array_class |
| 271 | uncommon_trap(Deoptimization::Reason_unloaded, |
| 272 | Deoptimization::Action_reinterpret, |
| 273 | array_klass); |
| 274 | return; |
| 275 | } |
| 276 | |
| 277 | kill_dead_locals(); |
| 278 | |
| 279 | const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass); |
| 280 | Node* count_val = pop(); |
| 281 | Node* obj = new_array(makecon(array_klass_type), count_val, 1); |
| 282 | push(obj); |
| 283 | } |
| 284 | |
| 285 | |
| 286 | void Parse::do_newarray(BasicType elem_type) { |
| 287 | kill_dead_locals(); |
| 288 | |
| 289 | Node* count_val = pop(); |
| 290 | const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type)); |
| 291 | Node* obj = new_array(makecon(array_klass), count_val, 1); |
| 292 | // Push resultant oop onto stack |
| 293 | push(obj); |
| 294 | } |
| 295 | |
| 296 | // Expand simple expressions like new int[3][5] and new Object[2][nonConLen]. |
| 297 | // Also handle the degenerate 1-dimensional case of anewarray. |
| 298 | Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { |
| 299 | Node* length = lengths[0]; |
| 300 | assert(length != NULL, "" ); |
| 301 | Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs); |
| 302 | if (ndimensions > 1) { |
| 303 | jint length_con = find_int_con(length, -1); |
| 304 | guarantee(length_con >= 0, "non-constant multianewarray" ); |
| 305 | ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass(); |
| 306 | const TypePtr* adr_type = TypeAryPtr::OOPS; |
| 307 | const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr(); |
| 308 | const intptr_t = arrayOopDesc::base_offset_in_bytes(T_OBJECT); |
| 309 | for (jint i = 0; i < length_con; i++) { |
| 310 | Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); |
| 311 | intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); |
| 312 | Node* eaddr = basic_plus_adr(array, offset); |
| 313 | access_store_at(array, eaddr, adr_type, elem, elemtype, T_OBJECT, IN_HEAP | IS_ARRAY); |
| 314 | } |
| 315 | } |
| 316 | return array; |
| 317 | } |
| 318 | |
| 319 | void Parse::do_multianewarray() { |
| 320 | int ndimensions = iter().get_dimensions(); |
| 321 | |
| 322 | // the m-dimensional array |
| 323 | bool will_link; |
| 324 | ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass(); |
| 325 | assert(will_link, "multianewarray: typeflow responsibility" ); |
| 326 | |
| 327 | // Note: Array classes are always initialized; no is_initialized check. |
| 328 | |
| 329 | kill_dead_locals(); |
| 330 | |
| 331 | // get the lengths from the stack (first dimension is on top) |
| 332 | Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1); |
| 333 | length[ndimensions] = NULL; // terminating null for make_runtime_call |
| 334 | int j; |
| 335 | for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); |
| 336 | |
| 337 | // The original expression was of this form: new T[length0][length1]... |
| 338 | // It is often the case that the lengths are small (except the last). |
| 339 | // If that happens, use the fast 1-d creator a constant number of times. |
| 340 | const int expand_limit = MIN2((int)MultiArrayExpandLimit, 100); |
| 341 | int expand_count = 1; // count of allocations in the expansion |
| 342 | int expand_fanout = 1; // running total fanout |
| 343 | for (j = 0; j < ndimensions-1; j++) { |
| 344 | int dim_con = find_int_con(length[j], -1); |
| 345 | expand_fanout *= dim_con; |
| 346 | expand_count += expand_fanout; // count the level-J sub-arrays |
| 347 | if (dim_con <= 0 |
| 348 | || dim_con > expand_limit |
| 349 | || expand_count > expand_limit) { |
| 350 | expand_count = 0; |
| 351 | break; |
| 352 | } |
| 353 | } |
| 354 | |
| 355 | // Can use multianewarray instead of [a]newarray if only one dimension, |
| 356 | // or if all non-final dimensions are small constants. |
| 357 | if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { |
| 358 | Node* obj = NULL; |
| 359 | // Set the original stack and the reexecute bit for the interpreter |
| 360 | // to reexecute the multianewarray bytecode if deoptimization happens. |
| 361 | // Do it unconditionally even for one dimension multianewarray. |
| 362 | // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() |
| 363 | // when AllocateArray node for newarray is created. |
| 364 | { PreserveReexecuteState preexecs(this); |
| 365 | inc_sp(ndimensions); |
| 366 | // Pass 0 as nargs since uncommon trap code does not need to restore stack. |
| 367 | obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0); |
| 368 | } //original reexecute and sp are set back here |
| 369 | push(obj); |
| 370 | return; |
| 371 | } |
| 372 | |
| 373 | address fun = NULL; |
| 374 | switch (ndimensions) { |
| 375 | case 1: ShouldNotReachHere(); break; |
| 376 | case 2: fun = OptoRuntime::multianewarray2_Java(); break; |
| 377 | case 3: fun = OptoRuntime::multianewarray3_Java(); break; |
| 378 | case 4: fun = OptoRuntime::multianewarray4_Java(); break; |
| 379 | case 5: fun = OptoRuntime::multianewarray5_Java(); break; |
| 380 | }; |
| 381 | Node* c = NULL; |
| 382 | |
| 383 | if (fun != NULL) { |
| 384 | c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, |
| 385 | OptoRuntime::multianewarray_Type(ndimensions), |
| 386 | fun, NULL, TypeRawPtr::BOTTOM, |
| 387 | makecon(TypeKlassPtr::make(array_klass)), |
| 388 | length[0], length[1], length[2], |
| 389 | (ndimensions > 2) ? length[3] : NULL, |
| 390 | (ndimensions > 3) ? length[4] : NULL); |
| 391 | } else { |
| 392 | // Create a java array for dimension sizes |
| 393 | Node* dims = NULL; |
| 394 | { PreserveReexecuteState preexecs(this); |
| 395 | inc_sp(ndimensions); |
| 396 | Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT)))); |
| 397 | dims = new_array(dims_array_klass, intcon(ndimensions), 0); |
| 398 | |
| 399 | // Fill-in it with values |
| 400 | for (j = 0; j < ndimensions; j++) { |
| 401 | Node *dims_elem = array_element_address(dims, intcon(j), T_INT); |
| 402 | store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered); |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, |
| 407 | OptoRuntime::multianewarrayN_Type(), |
| 408 | OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM, |
| 409 | makecon(TypeKlassPtr::make(array_klass)), |
| 410 | dims); |
| 411 | } |
| 412 | make_slow_call_ex(c, env()->Throwable_klass(), false); |
| 413 | |
| 414 | Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms)); |
| 415 | |
| 416 | const Type* type = TypeOopPtr::make_from_klass_raw(array_klass); |
| 417 | |
| 418 | // Improve the type: We know it's not null, exact, and of a given length. |
| 419 | type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull); |
| 420 | type = type->is_aryptr()->cast_to_exactness(true); |
| 421 | |
| 422 | const TypeInt* ltype = _gvn.find_int_type(length[0]); |
| 423 | if (ltype != NULL) |
| 424 | type = type->is_aryptr()->cast_to_size(ltype); |
| 425 | |
| 426 | // We cannot sharpen the nested sub-arrays, since the top level is mutable. |
| 427 | |
| 428 | Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) ); |
| 429 | push(cast); |
| 430 | |
| 431 | // Possible improvements: |
| 432 | // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) |
| 433 | // - Issue CastII against length[*] values, to TypeInt::POS. |
| 434 | } |
| 435 | |