| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | |
| 5 | /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 6 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 7 | XX XX |
| 8 | XX Inline functions XX |
| 9 | XX XX |
| 10 | XX XX |
| 11 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 12 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 13 | */ |
| 14 | |
| 15 | #ifndef _COMPILER_HPP_ |
| 16 | #define _COMPILER_HPP_ |
| 17 | |
| 18 | #include "emit.h" // for emitter::emitAddLabel |
| 19 | |
| 20 | #include "bitvec.h" |
| 21 | |
| 22 | #include "compilerbitsettraits.hpp" |
| 23 | |
| 24 | /* |
| 25 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 26 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 27 | XX XX |
| 28 | XX Miscellaneous utility functions. Some of these are defined in Utils.cpp XX |
| 29 | XX XX |
| 30 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 31 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 32 | */ |
| 33 | |
| 34 | /*****************************************************************************/ |
| 35 | /*****************************************************************************/ |
| 36 | |
| 37 | inline bool getInlinePInvokeEnabled() |
| 38 | { |
| 39 | #ifdef DEBUG |
| 40 | return JitConfig.JitPInvokeEnabled() && !JitConfig.StressCOMCall(); |
| 41 | #else |
| 42 | return true; |
| 43 | #endif |
| 44 | } |
| 45 | |
| 46 | inline bool getInlinePInvokeCheckEnabled() |
| 47 | { |
| 48 | #ifdef DEBUG |
| 49 | return JitConfig.JitPInvokeCheckEnabled() != 0; |
| 50 | #else |
| 51 | return false; |
| 52 | #endif |
| 53 | } |
| 54 | |
| 55 | // Enforce float narrowing for buggy compilers (notably preWhidbey VC) |
| 56 | inline float forceCastToFloat(double d) |
| 57 | { |
| 58 | Volatile<float> f = (float)d; |
| 59 | return f; |
| 60 | } |
| 61 | |
| 62 | // Enforce UInt32 narrowing for buggy compilers (notably Whidbey Beta 2 LKG) |
| 63 | inline UINT32 forceCastToUInt32(double d) |
| 64 | { |
| 65 | Volatile<UINT32> u = (UINT32)d; |
| 66 | return u; |
| 67 | } |
| 68 | |
| 69 | enum RoundLevel |
| 70 | { |
| 71 | ROUND_NEVER = 0, // Never round |
| 72 | ROUND_CMP_CONST = 1, // Round values compared against constants |
| 73 | ROUND_CMP = 2, // Round comparands and return values |
| 74 | ROUND_ALWAYS = 3, // Round always |
| 75 | |
| 76 | COUNT_ROUND_LEVEL, |
| 77 | DEFAULT_ROUND_LEVEL = ROUND_NEVER |
| 78 | }; |
| 79 | |
| 80 | inline RoundLevel getRoundFloatLevel() |
| 81 | { |
| 82 | #ifdef DEBUG |
| 83 | return (RoundLevel)JitConfig.JitRoundFloat(); |
| 84 | #else |
| 85 | return DEFAULT_ROUND_LEVEL; |
| 86 | #endif |
| 87 | } |
| 88 | |
| 89 | /*****************************************************************************/ |
| 90 | /***************************************************************************** |
| 91 | * |
| 92 | * Return the lowest bit that is set |
| 93 | */ |
| 94 | |
| 95 | template <typename T> |
| 96 | inline T genFindLowestBit(T value) |
| 97 | { |
| 98 | return (value & (0 - value)); |
| 99 | } |
| 100 | |
| 101 | /*****************************************************************************/ |
| 102 | /***************************************************************************** |
| 103 | * |
| 104 | * Return the highest bit that is set (that is, a mask that includes just the highest bit). |
| 105 | * TODO-ARM64-Throughput: we should convert these to use the _BitScanReverse() / _BitScanReverse64() |
| 106 | * compiler intrinsics, but our CRT header file intrin.h doesn't define these for ARM64 yet. |
| 107 | */ |
| 108 | |
| 109 | inline unsigned int genFindHighestBit(unsigned int mask) |
| 110 | { |
| 111 | assert(mask != 0); |
| 112 | unsigned int bit = 1U << ((sizeof(unsigned int) * 8) - 1); // start looking at the top |
| 113 | while ((bit & mask) == 0) |
| 114 | { |
| 115 | bit >>= 1; |
| 116 | } |
| 117 | return bit; |
| 118 | } |
| 119 | |
| 120 | inline unsigned __int64 genFindHighestBit(unsigned __int64 mask) |
| 121 | { |
| 122 | assert(mask != 0); |
| 123 | unsigned __int64 bit = 1ULL << ((sizeof(unsigned __int64) * 8) - 1); // start looking at the top |
| 124 | while ((bit & mask) == 0) |
| 125 | { |
| 126 | bit >>= 1; |
| 127 | } |
| 128 | return bit; |
| 129 | } |
| 130 | |
| 131 | #if 0 |
| 132 | // TODO-ARM64-Cleanup: These should probably be the implementation, when intrin.h is updated for ARM64 |
| 133 | inline |
| 134 | unsigned int genFindHighestBit(unsigned int mask) |
| 135 | { |
| 136 | assert(mask != 0); |
| 137 | unsigned int index; |
| 138 | _BitScanReverse(&index, mask); |
| 139 | return 1L << index; |
| 140 | } |
| 141 | |
| 142 | inline |
| 143 | unsigned __int64 genFindHighestBit(unsigned __int64 mask) |
| 144 | { |
| 145 | assert(mask != 0); |
| 146 | unsigned int index; |
| 147 | _BitScanReverse64(&index, mask); |
| 148 | return 1LL << index; |
| 149 | } |
| 150 | #endif // 0 |
| 151 | |
| 152 | /***************************************************************************** |
| 153 | * |
| 154 | * Return true if the given 64-bit value has exactly zero or one bits set. |
| 155 | */ |
| 156 | |
| 157 | template <typename T> |
| 158 | inline BOOL genMaxOneBit(T value) |
| 159 | { |
| 160 | return (value & (value - 1)) == 0; |
| 161 | } |
| 162 | |
| 163 | /***************************************************************************** |
| 164 | * |
| 165 | * Return true if the given 32-bit value has exactly zero or one bits set. |
| 166 | */ |
| 167 | |
| 168 | inline BOOL genMaxOneBit(unsigned value) |
| 169 | { |
| 170 | return (value & (value - 1)) == 0; |
| 171 | } |
| 172 | |
| 173 | /***************************************************************************** |
| 174 | * |
| 175 | * Return true if the given 64-bit value has exactly one bit set. |
| 176 | */ |
| 177 | |
| 178 | template <typename T> |
| 179 | inline bool genExactlyOneBit(T value) |
| 180 | { |
| 181 | return ((value != 0) && genMaxOneBit(value)); |
| 182 | } |
| 183 | |
| 184 | /***************************************************************************** |
| 185 | * |
| 186 | * Return true if the given 32-bit value has exactly zero or one bits set. |
| 187 | */ |
| 188 | |
| 189 | inline bool genExactlyOneBit(unsigned value) |
| 190 | { |
| 191 | return ((value != 0) && genMaxOneBit(value)); |
| 192 | } |
| 193 | |
| 194 | /***************************************************************************** |
| 195 | * |
| 196 | * Given a value that has exactly one bit set, return the position of that |
| 197 | * bit, in other words return the logarithm in base 2 of the given value. |
| 198 | */ |
| 199 | inline unsigned genLog2(unsigned value) |
| 200 | { |
| 201 | return BitPosition(value); |
| 202 | } |
| 203 | |
| 204 | // Given an unsigned 64-bit value, returns the lower 32-bits in unsigned format |
| 205 | // |
| 206 | inline unsigned ulo32(unsigned __int64 value) |
| 207 | { |
| 208 | return static_cast<unsigned>(value); |
| 209 | } |
| 210 | |
| 211 | // Given an unsigned 64-bit value, returns the upper 32-bits in unsigned format |
| 212 | // |
| 213 | inline unsigned uhi32(unsigned __int64 value) |
| 214 | { |
| 215 | return static_cast<unsigned>(value >> 32); |
| 216 | } |
| 217 | |
| 218 | /***************************************************************************** |
| 219 | * |
| 220 | * Given a value that has exactly one bit set, return the position of that |
| 221 | * bit, in other words return the logarithm in base 2 of the given value. |
| 222 | */ |
| 223 | |
| 224 | inline unsigned genLog2(unsigned __int64 value) |
| 225 | { |
| 226 | unsigned lo32 = ulo32(value); |
| 227 | unsigned hi32 = uhi32(value); |
| 228 | |
| 229 | if (lo32 != 0) |
| 230 | { |
| 231 | assert(hi32 == 0); |
| 232 | return genLog2(lo32); |
| 233 | } |
| 234 | else |
| 235 | { |
| 236 | return genLog2(hi32) + 32; |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | /***************************************************************************** |
| 241 | * |
| 242 | * Return the lowest bit that is set in the given register mask. |
| 243 | */ |
| 244 | |
| 245 | inline regMaskTP genFindLowestReg(regMaskTP value) |
| 246 | { |
| 247 | return (regMaskTP)genFindLowestBit(value); |
| 248 | } |
| 249 | |
| 250 | /***************************************************************************** |
| 251 | * |
| 252 | * A rather simple routine that counts the number of bits in a given number. |
| 253 | */ |
| 254 | |
| 255 | template <typename T> |
| 256 | inline unsigned genCountBits(T bits) |
| 257 | { |
| 258 | unsigned cnt = 0; |
| 259 | |
| 260 | while (bits) |
| 261 | { |
| 262 | cnt++; |
| 263 | bits -= genFindLowestBit(bits); |
| 264 | } |
| 265 | |
| 266 | return cnt; |
| 267 | } |
| 268 | |
| 269 | /***************************************************************************** |
| 270 | * |
| 271 | * Given 3 masks value, end, start, returns the bits of value between start |
| 272 | * and end (exclusive). |
| 273 | * |
| 274 | * value[bitNum(end) - 1, bitNum(start) + 1] |
| 275 | */ |
| 276 | |
| 277 | inline unsigned __int64 BitsBetween(unsigned __int64 value, unsigned __int64 end, unsigned __int64 start) |
| 278 | { |
| 279 | assert(start != 0); |
| 280 | assert(start < end); |
| 281 | assert((start & (start - 1)) == 0); |
| 282 | assert((end & (end - 1)) == 0); |
| 283 | |
| 284 | return value & ~((start - 1) | start) & // Ones to the left of set bit in the start mask. |
| 285 | (end - 1); // Ones to the right of set bit in the end mask. |
| 286 | } |
| 287 | |
| 288 | /*****************************************************************************/ |
| 289 | |
| 290 | inline bool jitIsScaleIndexMul(size_t val) |
| 291 | { |
| 292 | switch (val) |
| 293 | { |
| 294 | case 1: |
| 295 | case 2: |
| 296 | case 4: |
| 297 | case 8: |
| 298 | return true; |
| 299 | |
| 300 | default: |
| 301 | return false; |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | // Returns "tree" iff "val" is a valid addressing mode scale shift amount on |
| 306 | // the target architecture. |
| 307 | inline bool jitIsScaleIndexShift(ssize_t val) |
| 308 | { |
| 309 | // It happens that this is the right test for all our current targets: x86, x64 and ARM. |
| 310 | // This test would become target-dependent if we added a new target with a different constraint. |
| 311 | return 0 < val && val < 4; |
| 312 | } |
| 313 | |
| 314 | /***************************************************************************** |
| 315 | * Returns true if value is between [start..end). |
| 316 | * The comparison is inclusive of start, exclusive of end. |
| 317 | */ |
| 318 | |
| 319 | /* static */ |
| 320 | inline bool Compiler::jitIsBetween(unsigned value, unsigned start, unsigned end) |
| 321 | { |
| 322 | return start <= value && value < end; |
| 323 | } |
| 324 | |
| 325 | /***************************************************************************** |
| 326 | * Returns true if value is between [start..end]. |
| 327 | * The comparison is inclusive of both start and end. |
| 328 | */ |
| 329 | |
| 330 | /* static */ |
| 331 | inline bool Compiler::jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end) |
| 332 | { |
| 333 | return start <= value && value <= end; |
| 334 | } |
| 335 | |
| 336 | /****************************************************************************************** |
| 337 | * Return the EH descriptor for the given region index. |
| 338 | */ |
| 339 | inline EHblkDsc* Compiler::ehGetDsc(unsigned regionIndex) |
| 340 | { |
| 341 | assert(regionIndex < compHndBBtabCount); |
| 342 | return &compHndBBtab[regionIndex]; |
| 343 | } |
| 344 | |
| 345 | /****************************************************************************************** |
| 346 | * Return the EH descriptor index of the enclosing try, for the given region index. |
| 347 | */ |
| 348 | inline unsigned Compiler::ehGetEnclosingTryIndex(unsigned regionIndex) |
| 349 | { |
| 350 | return ehGetDsc(regionIndex)->ebdEnclosingTryIndex; |
| 351 | } |
| 352 | |
| 353 | /****************************************************************************************** |
| 354 | * Return the EH descriptor index of the enclosing handler, for the given region index. |
| 355 | */ |
| 356 | inline unsigned Compiler::ehGetEnclosingHndIndex(unsigned regionIndex) |
| 357 | { |
| 358 | return ehGetDsc(regionIndex)->ebdEnclosingHndIndex; |
| 359 | } |
| 360 | |
| 361 | /****************************************************************************************** |
| 362 | * Return the EH index given a region descriptor. |
| 363 | */ |
| 364 | inline unsigned Compiler::ehGetIndex(EHblkDsc* ehDsc) |
| 365 | { |
| 366 | assert(compHndBBtab <= ehDsc && ehDsc < compHndBBtab + compHndBBtabCount); |
| 367 | return (unsigned)(ehDsc - compHndBBtab); |
| 368 | } |
| 369 | |
| 370 | /****************************************************************************************** |
| 371 | * Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of |
| 372 | * (or nullptr if this block is not in a 'try' region). |
| 373 | */ |
| 374 | inline EHblkDsc* Compiler::ehGetBlockTryDsc(BasicBlock* block) |
| 375 | { |
| 376 | if (!block->hasTryIndex()) |
| 377 | { |
| 378 | return nullptr; |
| 379 | } |
| 380 | |
| 381 | return ehGetDsc(block->getTryIndex()); |
| 382 | } |
| 383 | |
| 384 | /****************************************************************************************** |
| 385 | * Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of |
| 386 | * (or nullptr if this block is not in a filter or handler region). |
| 387 | */ |
| 388 | inline EHblkDsc* Compiler::ehGetBlockHndDsc(BasicBlock* block) |
| 389 | { |
| 390 | if (!block->hasHndIndex()) |
| 391 | { |
| 392 | return nullptr; |
| 393 | } |
| 394 | |
| 395 | return ehGetDsc(block->getHndIndex()); |
| 396 | } |
| 397 | |
| 398 | #if FEATURE_EH_FUNCLETS |
| 399 | |
| 400 | /***************************************************************************** |
| 401 | * Get the FuncInfoDsc for the funclet we are currently generating code for. |
| 402 | * This is only valid during codegen. |
| 403 | * |
| 404 | */ |
| 405 | inline FuncInfoDsc* Compiler::funCurrentFunc() |
| 406 | { |
| 407 | return funGetFunc(compCurrFuncIdx); |
| 408 | } |
| 409 | |
| 410 | /***************************************************************************** |
| 411 | * Change which funclet we are currently generating code for. |
| 412 | * This is only valid after funclets are created. |
| 413 | * |
| 414 | */ |
| 415 | inline void Compiler::funSetCurrentFunc(unsigned funcIdx) |
| 416 | { |
| 417 | assert(fgFuncletsCreated); |
| 418 | assert(FitsIn<unsigned short>(funcIdx)); |
| 419 | noway_assert(funcIdx < compFuncInfoCount); |
| 420 | compCurrFuncIdx = (unsigned short)funcIdx; |
| 421 | } |
| 422 | |
| 423 | /***************************************************************************** |
| 424 | * Get the FuncInfoDsc for the given funclet. |
| 425 | * This is only valid after funclets are created. |
| 426 | * |
| 427 | */ |
| 428 | inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) |
| 429 | { |
| 430 | assert(fgFuncletsCreated); |
| 431 | assert(funcIdx < compFuncInfoCount); |
| 432 | return &compFuncInfos[funcIdx]; |
| 433 | } |
| 434 | |
| 435 | /***************************************************************************** |
| 436 | * Get the funcIdx for the EH funclet that begins with block. |
| 437 | * This is only valid after funclets are created. |
| 438 | * It is only valid for blocks marked with BBF_FUNCLET_BEG because |
| 439 | * otherwise we would have to do a more expensive check to determine |
| 440 | * if this should return the filter funclet or the filter handler funclet. |
| 441 | * |
| 442 | */ |
| 443 | inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) |
| 444 | { |
| 445 | assert(fgFuncletsCreated); |
| 446 | assert(block->bbFlags & BBF_FUNCLET_BEG); |
| 447 | |
| 448 | EHblkDsc* eh = ehGetDsc(block->getHndIndex()); |
| 449 | unsigned int funcIdx = eh->ebdFuncIndex; |
| 450 | if (eh->ebdHndBeg != block) |
| 451 | { |
| 452 | // If this is a filter EH clause, but we want the funclet |
| 453 | // for the filter (not the filter handler), it is the previous one |
| 454 | noway_assert(eh->HasFilter()); |
| 455 | noway_assert(eh->ebdFilter == block); |
| 456 | assert(funGetFunc(funcIdx)->funKind == FUNC_HANDLER); |
| 457 | assert(funGetFunc(funcIdx)->funEHIndex == funGetFunc(funcIdx - 1)->funEHIndex); |
| 458 | assert(funGetFunc(funcIdx - 1)->funKind == FUNC_FILTER); |
| 459 | funcIdx--; |
| 460 | } |
| 461 | |
| 462 | return funcIdx; |
| 463 | } |
| 464 | |
| 465 | #else // !FEATURE_EH_FUNCLETS |
| 466 | |
| 467 | /***************************************************************************** |
| 468 | * Get the FuncInfoDsc for the funclet we are currently generating code for. |
| 469 | * This is only valid during codegen. For non-funclet platforms, this is |
| 470 | * always the root function. |
| 471 | * |
| 472 | */ |
| 473 | inline FuncInfoDsc* Compiler::funCurrentFunc() |
| 474 | { |
| 475 | return &compFuncInfoRoot; |
| 476 | } |
| 477 | |
| 478 | /***************************************************************************** |
| 479 | * Change which funclet we are currently generating code for. |
| 480 | * This is only valid after funclets are created. |
| 481 | * |
| 482 | */ |
| 483 | inline void Compiler::funSetCurrentFunc(unsigned funcIdx) |
| 484 | { |
| 485 | assert(funcIdx == 0); |
| 486 | } |
| 487 | |
| 488 | /***************************************************************************** |
| 489 | * Get the FuncInfoDsc for the givven funclet. |
| 490 | * This is only valid after funclets are created. |
| 491 | * |
| 492 | */ |
| 493 | inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) |
| 494 | { |
| 495 | assert(funcIdx == 0); |
| 496 | return &compFuncInfoRoot; |
| 497 | } |
| 498 | |
| 499 | /***************************************************************************** |
| 500 | * No funclets, so always 0. |
| 501 | * |
| 502 | */ |
| 503 | inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) |
| 504 | { |
| 505 | return 0; |
| 506 | } |
| 507 | |
| 508 | #endif // !FEATURE_EH_FUNCLETS |
| 509 | |
| 510 | //------------------------------------------------------------------------------ |
| 511 | // genRegNumFromMask : Maps a single register mask to a register number. |
| 512 | // |
| 513 | // Arguments: |
| 514 | // mask - the register mask |
| 515 | // |
| 516 | // Return Value: |
| 517 | // The number of the register contained in the mask. |
| 518 | // |
| 519 | // Assumptions: |
| 520 | // The mask contains one and only one register. |
| 521 | |
| 522 | inline regNumber genRegNumFromMask(regMaskTP mask) |
| 523 | { |
| 524 | assert(mask != 0); // Must have one bit set, so can't have a mask of zero |
| 525 | |
| 526 | /* Convert the mask to a register number */ |
| 527 | |
| 528 | regNumber regNum = (regNumber)genLog2(mask); |
| 529 | |
| 530 | /* Make sure we got it right */ |
| 531 | |
| 532 | assert(genRegMask(regNum) == mask); |
| 533 | |
| 534 | return regNum; |
| 535 | } |
| 536 | |
| 537 | //------------------------------------------------------------------------------ |
| 538 | // genSmallTypeCanRepresentValue: Checks if a value can be represented by a given small type. |
| 539 | // |
| 540 | // Arguments: |
| 541 | // value - the value to check |
| 542 | // type - the type |
| 543 | // |
| 544 | // Return Value: |
| 545 | // True if the value is representable, false otherwise. |
| 546 | |
| 547 | inline bool genSmallTypeCanRepresentValue(var_types type, ssize_t value) |
| 548 | { |
| 549 | switch (type) |
| 550 | { |
| 551 | case TYP_UBYTE: |
| 552 | case TYP_BOOL: |
| 553 | return FitsIn<UINT8>(value); |
| 554 | case TYP_BYTE: |
| 555 | return FitsIn<INT8>(value); |
| 556 | case TYP_USHORT: |
| 557 | return FitsIn<UINT16>(value); |
| 558 | case TYP_SHORT: |
| 559 | return FitsIn<INT16>(value); |
| 560 | default: |
| 561 | unreached(); |
| 562 | } |
| 563 | } |
| 564 | |
| 565 | /***************************************************************************** |
| 566 | * |
| 567 | * Return the size in bytes of the given type. |
| 568 | */ |
| 569 | |
| 570 | extern const BYTE genTypeSizes[TYP_COUNT]; |
| 571 | |
| 572 | template <class T> |
| 573 | inline unsigned genTypeSize(T type) |
| 574 | { |
| 575 | assert((unsigned)TypeGet(type) < _countof(genTypeSizes)); |
| 576 | |
| 577 | return genTypeSizes[TypeGet(type)]; |
| 578 | } |
| 579 | |
| 580 | /***************************************************************************** |
| 581 | * |
| 582 | * Return the "stack slot count" of the given type. |
| 583 | * returns 1 for 32-bit types and 2 for 64-bit types. |
| 584 | */ |
| 585 | |
| 586 | extern const BYTE genTypeStSzs[TYP_COUNT]; |
| 587 | |
| 588 | inline unsigned genTypeStSz(var_types type) |
| 589 | { |
| 590 | assert((unsigned)type < _countof(genTypeStSzs)); |
| 591 | |
| 592 | return genTypeStSzs[type]; |
| 593 | } |
| 594 | |
| 595 | /***************************************************************************** |
| 596 | * |
| 597 | * Return the number of registers required to hold a value of the given type. |
| 598 | */ |
| 599 | |
| 600 | /***************************************************************************** |
| 601 | * |
| 602 | * The following function maps a 'precise' type to an actual type as seen |
| 603 | * by the VM (for example, 'byte' maps to 'int'). |
| 604 | */ |
| 605 | |
| 606 | extern const BYTE genActualTypes[TYP_COUNT]; |
| 607 | |
| 608 | inline var_types genActualType(var_types type) |
| 609 | { |
| 610 | /* Spot check to make certain the table is in synch with the enum */ |
| 611 | |
| 612 | assert(genActualTypes[TYP_DOUBLE] == TYP_DOUBLE); |
| 613 | assert(genActualTypes[TYP_REF] == TYP_REF); |
| 614 | |
| 615 | assert((unsigned)type < sizeof(genActualTypes)); |
| 616 | return (var_types)genActualTypes[type]; |
| 617 | } |
| 618 | |
| 619 | /*****************************************************************************/ |
| 620 | |
| 621 | inline var_types genUnsignedType(var_types type) |
| 622 | { |
| 623 | /* Force signed types into corresponding unsigned type */ |
| 624 | |
| 625 | switch (type) |
| 626 | { |
| 627 | case TYP_BYTE: |
| 628 | type = TYP_UBYTE; |
| 629 | break; |
| 630 | case TYP_SHORT: |
| 631 | type = TYP_USHORT; |
| 632 | break; |
| 633 | case TYP_INT: |
| 634 | type = TYP_UINT; |
| 635 | break; |
| 636 | case TYP_LONG: |
| 637 | type = TYP_ULONG; |
| 638 | break; |
| 639 | default: |
| 640 | break; |
| 641 | } |
| 642 | |
| 643 | return type; |
| 644 | } |
| 645 | |
| 646 | /*****************************************************************************/ |
| 647 | |
| 648 | inline var_types genSignedType(var_types type) |
| 649 | { |
| 650 | /* Force non-small unsigned type into corresponding signed type */ |
| 651 | /* Note that we leave the small types alone */ |
| 652 | |
| 653 | switch (type) |
| 654 | { |
| 655 | case TYP_UINT: |
| 656 | type = TYP_INT; |
| 657 | break; |
| 658 | case TYP_ULONG: |
| 659 | type = TYP_LONG; |
| 660 | break; |
| 661 | default: |
| 662 | break; |
| 663 | } |
| 664 | |
| 665 | return type; |
| 666 | } |
| 667 | |
| 668 | /***************************************************************************** |
| 669 | * Can this type be passed as a parameter in a register? |
| 670 | */ |
| 671 | |
| 672 | inline bool isRegParamType(var_types type) |
| 673 | { |
| 674 | #if defined(_TARGET_X86_) |
| 675 | return (type <= TYP_INT || type == TYP_REF || type == TYP_BYREF); |
| 676 | #else // !_TARGET_X86_ |
| 677 | return true; |
| 678 | #endif // !_TARGET_X86_ |
| 679 | } |
| 680 | |
| 681 | #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) |
| 682 | /*****************************************************************************/ |
| 683 | // Returns true if 'type' is a struct that can be enregistered for call args |
| 684 | // or can be returned by value in multiple registers. |
| 685 | // if 'type' is not a struct the return value will be false. |
| 686 | // |
| 687 | // Arguments: |
| 688 | // type - the basic jit var_type for the item being queried |
| 689 | // typeClass - the handle for the struct when 'type' is TYP_STRUCT |
| 690 | // typeSize - Out param (if non-null) is updated with the size of 'type'. |
| 691 | // forReturn - this is true when we asking about a GT_RETURN context; |
| 692 | // this is false when we are asking about an argument context |
| 693 | // isVarArg - whether or not this is a vararg fixed arg or variable argument |
| 694 | // - if so on arm64 windows getArgTypeForStruct will ignore HFA |
| 695 | // - types |
| 696 | // |
| 697 | inline bool Compiler::VarTypeIsMultiByteAndCanEnreg( |
| 698 | var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg) |
| 699 | { |
| 700 | bool result = false; |
| 701 | unsigned size = 0; |
| 702 | |
| 703 | if (varTypeIsStruct(type)) |
| 704 | { |
| 705 | size = info.compCompHnd->getClassSize(typeClass); |
| 706 | if (forReturn) |
| 707 | { |
| 708 | structPassingKind howToReturnStruct; |
| 709 | type = getReturnTypeForStruct(typeClass, &howToReturnStruct, size); |
| 710 | } |
| 711 | else |
| 712 | { |
| 713 | structPassingKind howToPassStruct; |
| 714 | type = getArgTypeForStruct(typeClass, &howToPassStruct, isVarArg, size); |
| 715 | } |
| 716 | if (type != TYP_UNKNOWN) |
| 717 | { |
| 718 | result = true; |
| 719 | } |
| 720 | } |
| 721 | else |
| 722 | { |
| 723 | size = genTypeSize(type); |
| 724 | } |
| 725 | |
| 726 | if (typeSize != nullptr) |
| 727 | { |
| 728 | *typeSize = size; |
| 729 | } |
| 730 | |
| 731 | return result; |
| 732 | } |
| 733 | #endif //_TARGET_AMD64_ || _TARGET_ARM64_ |
| 734 | |
| 735 | /*****************************************************************************/ |
| 736 | |
| 737 | #ifdef DEBUG |
| 738 | |
| 739 | inline const char* varTypeGCstring(var_types type) |
| 740 | { |
| 741 | switch (type) |
| 742 | { |
| 743 | case TYP_REF: |
| 744 | return "gcr" ; |
| 745 | case TYP_BYREF: |
| 746 | return "byr" ; |
| 747 | default: |
| 748 | return "non" ; |
| 749 | } |
| 750 | } |
| 751 | |
| 752 | #endif |
| 753 | |
| 754 | /*****************************************************************************/ |
| 755 | |
| 756 | const char* varTypeName(var_types); |
| 757 | |
| 758 | /***************************************************************************** |
| 759 | * |
| 760 | * Helpers to pull big-endian values out of a byte stream. |
| 761 | */ |
| 762 | |
| 763 | inline unsigned genGetU1(const BYTE* addr) |
| 764 | { |
| 765 | return addr[0]; |
| 766 | } |
| 767 | |
| 768 | inline signed genGetI1(const BYTE* addr) |
| 769 | { |
| 770 | return (signed char)addr[0]; |
| 771 | } |
| 772 | |
| 773 | inline unsigned genGetU2(const BYTE* addr) |
| 774 | { |
| 775 | return (addr[0] << 8) | addr[1]; |
| 776 | } |
| 777 | |
| 778 | inline signed genGetI2(const BYTE* addr) |
| 779 | { |
| 780 | return (signed short)((addr[0] << 8) | addr[1]); |
| 781 | } |
| 782 | |
| 783 | inline unsigned genGetU4(const BYTE* addr) |
| 784 | { |
| 785 | return (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; |
| 786 | } |
| 787 | |
| 788 | /*****************************************************************************/ |
| 789 | // Helpers to pull little-endian values out of a byte stream. |
| 790 | |
| 791 | inline unsigned __int8 getU1LittleEndian(const BYTE* ptr) |
| 792 | { |
| 793 | return *(UNALIGNED unsigned __int8*)ptr; |
| 794 | } |
| 795 | |
| 796 | inline unsigned __int16 getU2LittleEndian(const BYTE* ptr) |
| 797 | { |
| 798 | return GET_UNALIGNED_VAL16(ptr); |
| 799 | } |
| 800 | |
| 801 | inline unsigned __int32 getU4LittleEndian(const BYTE* ptr) |
| 802 | { |
| 803 | return GET_UNALIGNED_VAL32(ptr); |
| 804 | } |
| 805 | |
| 806 | inline signed __int8 getI1LittleEndian(const BYTE* ptr) |
| 807 | { |
| 808 | return *(UNALIGNED signed __int8*)ptr; |
| 809 | } |
| 810 | |
| 811 | inline signed __int16 getI2LittleEndian(const BYTE* ptr) |
| 812 | { |
| 813 | return GET_UNALIGNED_VAL16(ptr); |
| 814 | } |
| 815 | |
| 816 | inline signed __int32 getI4LittleEndian(const BYTE* ptr) |
| 817 | { |
| 818 | return GET_UNALIGNED_VAL32(ptr); |
| 819 | } |
| 820 | |
| 821 | inline signed __int64 getI8LittleEndian(const BYTE* ptr) |
| 822 | { |
| 823 | return GET_UNALIGNED_VAL64(ptr); |
| 824 | } |
| 825 | |
| 826 | inline float getR4LittleEndian(const BYTE* ptr) |
| 827 | { |
| 828 | __int32 val = getI4LittleEndian(ptr); |
| 829 | return *(float*)&val; |
| 830 | } |
| 831 | |
| 832 | inline double getR8LittleEndian(const BYTE* ptr) |
| 833 | { |
| 834 | __int64 val = getI8LittleEndian(ptr); |
| 835 | return *(double*)&val; |
| 836 | } |
| 837 | |
| 838 | /***************************************************************************** |
| 839 | * |
| 840 | * Return the normalized index to use in the EXPSET_TP for the CSE with |
| 841 | * the given CSE index. |
| 842 | * Each GenTree has the following field: |
| 843 | * signed char gtCSEnum; // 0 or the CSE index (negated if def) |
| 844 | * So zero is reserved to mean this node is not a CSE |
| 845 | * and postive values indicate CSE uses and negative values indicate CSE defs. |
| 846 | * The caller of this method must pass a non-zero postive value. |
| 847 | * This precondition is checked by the assert on the first line of this method. |
| 848 | */ |
| 849 | |
| 850 | inline unsigned int genCSEnum2bit(unsigned index) |
| 851 | { |
| 852 | assert((index > 0) && (index <= EXPSET_SZ)); |
| 853 | |
| 854 | return (index - 1); |
| 855 | } |
| 856 | |
| 857 | #ifdef DEBUG |
| 858 | const char* genES2str(BitVecTraits* traits, EXPSET_TP set); |
| 859 | const char* refCntWtd2str(unsigned refCntWtd); |
| 860 | #endif |
| 861 | |
| 862 | /* |
| 863 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 864 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 865 | XX GenTree XX |
| 866 | XX Inline functions XX |
| 867 | XX XX |
| 868 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 869 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 870 | */ |
| 871 | |
| 872 | void* GenTree::operator new(size_t sz, Compiler* comp, genTreeOps oper) |
| 873 | { |
| 874 | #if SMALL_TREE_NODES |
| 875 | size_t size = GenTree::s_gtNodeSizes[oper]; |
| 876 | #else |
| 877 | size_t size = TREE_NODE_SZ_LARGE; |
| 878 | #endif |
| 879 | |
| 880 | #if MEASURE_NODE_SIZE |
| 881 | genNodeSizeStats.genTreeNodeCnt += 1; |
| 882 | genNodeSizeStats.genTreeNodeSize += size; |
| 883 | genNodeSizeStats.genTreeNodeActualSize += sz; |
| 884 | |
| 885 | genNodeSizeStatsPerFunc.genTreeNodeCnt += 1; |
| 886 | genNodeSizeStatsPerFunc.genTreeNodeSize += size; |
| 887 | genNodeSizeStatsPerFunc.genTreeNodeActualSize += sz; |
| 888 | #endif // MEASURE_NODE_SIZE |
| 889 | |
| 890 | assert(size >= sz); |
| 891 | return comp->getAllocator(CMK_ASTNode).allocate<char>(size); |
| 892 | } |
| 893 | |
| 894 | // GenTree constructor |
| 895 | inline GenTree::GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode)) |
| 896 | { |
| 897 | gtOper = oper; |
| 898 | gtType = type; |
| 899 | gtFlags = 0; |
| 900 | gtLIRFlags = 0; |
| 901 | #ifdef DEBUG |
| 902 | gtDebugFlags = 0; |
| 903 | #endif // DEBUG |
| 904 | #if FEATURE_ANYCSE |
| 905 | gtCSEnum = NO_CSE; |
| 906 | #endif // FEATURE_ANYCSE |
| 907 | #if ASSERTION_PROP |
| 908 | ClearAssertion(); |
| 909 | #endif |
| 910 | |
| 911 | gtNext = nullptr; |
| 912 | gtPrev = nullptr; |
| 913 | gtRegNum = REG_NA; |
| 914 | INDEBUG(gtRegTag = GT_REGTAG_NONE;) |
| 915 | |
| 916 | INDEBUG(gtCostsInitialized = false;) |
| 917 | |
| 918 | #ifdef DEBUG |
| 919 | #if SMALL_TREE_NODES |
| 920 | size_t size = GenTree::s_gtNodeSizes[oper]; |
| 921 | if (size == TREE_NODE_SZ_SMALL && !largeNode) |
| 922 | { |
| 923 | gtDebugFlags |= GTF_DEBUG_NODE_SMALL; |
| 924 | } |
| 925 | else if (size == TREE_NODE_SZ_LARGE || largeNode) |
| 926 | { |
| 927 | gtDebugFlags |= GTF_DEBUG_NODE_LARGE; |
| 928 | } |
| 929 | else |
| 930 | { |
| 931 | assert(!"bogus node size" ); |
| 932 | } |
| 933 | #endif |
| 934 | #endif |
| 935 | |
| 936 | #if COUNT_AST_OPERS |
| 937 | InterlockedIncrement(&s_gtNodeCounts[oper]); |
| 938 | #endif |
| 939 | |
| 940 | #ifdef DEBUG |
| 941 | gtSeqNum = 0; |
| 942 | gtTreeID = JitTls::GetCompiler()->compGenTreeID++; |
| 943 | gtVNPair.SetBoth(ValueNumStore::NoVN); |
| 944 | gtRegTag = GT_REGTAG_NONE; |
| 945 | gtOperSave = GT_NONE; |
| 946 | #endif |
| 947 | } |
| 948 | |
| 949 | /*****************************************************************************/ |
| 950 | |
| 951 | inline GenTreeStmt* Compiler::gtNewStmt(GenTree* expr, IL_OFFSETX offset) |
| 952 | { |
| 953 | /* NOTE - GT_STMT is now a small node in retail */ |
| 954 | |
| 955 | GenTreeStmt* stmt = new (this, GT_STMT) GenTreeStmt(expr, offset); |
| 956 | |
| 957 | return stmt; |
| 958 | } |
| 959 | |
| 960 | /*****************************************************************************/ |
| 961 | |
| 962 | inline GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications) |
| 963 | { |
| 964 | assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0); |
| 965 | assert((GenTree::OperKind(oper) & GTK_EXOP) == |
| 966 | 0); // Can't use this to construct any types that extend unary/binary operator. |
| 967 | assert(op1 != nullptr || oper == GT_PHI || oper == GT_RETFILT || oper == GT_NOP || |
| 968 | (oper == GT_RETURN && type == TYP_VOID)); |
| 969 | |
| 970 | if (doSimplifications) |
| 971 | { |
| 972 | // We do some simplifications here. |
| 973 | // If this gets to be too many, try a switch... |
| 974 | // TODO-Cleanup: With the factoring out of array bounds checks, it should not be the |
| 975 | // case that we need to check for the array index case here, but without this check |
| 976 | // we get failures (see for example jit\Directed\Languages\Python\test_methods_d.exe) |
| 977 | if (oper == GT_IND) |
| 978 | { |
| 979 | // IND(ADDR(IND(x)) == IND(x) |
| 980 | if (op1->gtOper == GT_ADDR) |
| 981 | { |
| 982 | if (op1->gtOp.gtOp1->gtOper == GT_IND && (op1->gtOp.gtOp1->gtFlags & GTF_IND_ARR_INDEX) == 0) |
| 983 | { |
| 984 | op1 = op1->gtOp.gtOp1->gtOp.gtOp1; |
| 985 | } |
| 986 | } |
| 987 | } |
| 988 | else if (oper == GT_ADDR) |
| 989 | { |
| 990 | // if "x" is not an array index, ADDR(IND(x)) == x |
| 991 | if (op1->gtOper == GT_IND && (op1->gtFlags & GTF_IND_ARR_INDEX) == 0) |
| 992 | { |
| 993 | return op1->gtOp.gtOp1; |
| 994 | } |
| 995 | } |
| 996 | } |
| 997 | |
| 998 | GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, nullptr); |
| 999 | |
| 1000 | // |
| 1001 | // the GT_ADDR of a Local Variable implies GTF_ADDR_ONSTACK |
| 1002 | // |
| 1003 | if ((oper == GT_ADDR) && (op1->OperGet() == GT_LCL_VAR)) |
| 1004 | { |
| 1005 | node->gtFlags |= GTF_ADDR_ONSTACK; |
| 1006 | } |
| 1007 | |
| 1008 | return node; |
| 1009 | } |
| 1010 | |
| 1011 | // Returns an opcode that is of the largest node size in use. |
| 1012 | inline genTreeOps LargeOpOpcode() |
| 1013 | { |
| 1014 | #if SMALL_TREE_NODES |
| 1015 | // Allocate a large node |
| 1016 | assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); |
| 1017 | #endif |
| 1018 | return GT_CALL; |
| 1019 | } |
| 1020 | |
| 1021 | /****************************************************************************** |
| 1022 | * |
| 1023 | * Use to create nodes which may later be morphed to another (big) operator |
| 1024 | */ |
| 1025 | |
| 1026 | inline GenTree* Compiler::gtNewLargeOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2) |
| 1027 | { |
| 1028 | assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0); |
| 1029 | assert((GenTree::OperKind(oper) & GTK_EXOP) == |
| 1030 | 0); // Can't use this to construct any types that extend unary/binary operator. |
| 1031 | #if SMALL_TREE_NODES |
| 1032 | // Allocate a large node |
| 1033 | |
| 1034 | assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL); |
| 1035 | |
| 1036 | GenTree* node = new (this, LargeOpOpcode()) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); |
| 1037 | #else |
| 1038 | GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, op2); |
| 1039 | #endif |
| 1040 | |
| 1041 | return node; |
| 1042 | } |
| 1043 | |
| 1044 | /***************************************************************************** |
| 1045 | * |
| 1046 | * allocates a integer constant entry that represents a handle (something |
| 1047 | * that may need to be fixed up). |
| 1048 | */ |
| 1049 | |
| 1050 | inline GenTree* Compiler::gtNewIconHandleNode(size_t value, unsigned flags, FieldSeqNode* fields) |
| 1051 | { |
| 1052 | GenTree* node; |
| 1053 | assert((flags & (GTF_ICON_HDL_MASK | GTF_ICON_FIELD_OFF)) != 0); |
| 1054 | |
| 1055 | // Interpret "fields == NULL" as "not a field." |
| 1056 | if (fields == nullptr) |
| 1057 | { |
| 1058 | fields = FieldSeqStore::NotAField(); |
| 1059 | } |
| 1060 | |
| 1061 | #if defined(LATE_DISASM) |
| 1062 | node = new (this, LargeOpOpcode()) GenTreeIntCon(TYP_I_IMPL, value, fields DEBUGARG(/*largeNode*/ true)); |
| 1063 | #else |
| 1064 | node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, value, fields); |
| 1065 | #endif |
| 1066 | node->gtFlags |= flags; |
| 1067 | return node; |
| 1068 | } |
| 1069 | |
| 1070 | /***************************************************************************** |
| 1071 | * |
| 1072 | * It may not be allowed to embed HANDLEs directly into the JITed code (for eg, |
| 1073 | * as arguments to JIT helpers). Get a corresponding value that can be embedded. |
| 1074 | * These are versions for each specific type of HANDLE |
| 1075 | */ |
| 1076 | |
| 1077 | inline GenTree* Compiler::gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd) |
| 1078 | { |
| 1079 | void *embedScpHnd, *pEmbedScpHnd; |
| 1080 | |
| 1081 | embedScpHnd = (void*)info.compCompHnd->embedModuleHandle(scpHnd, &pEmbedScpHnd); |
| 1082 | |
| 1083 | assert((!embedScpHnd) != (!pEmbedScpHnd)); |
| 1084 | |
| 1085 | return gtNewIconEmbHndNode(embedScpHnd, pEmbedScpHnd, GTF_ICON_SCOPE_HDL, scpHnd); |
| 1086 | } |
| 1087 | |
| 1088 | //----------------------------------------------------------------------------- |
| 1089 | |
| 1090 | inline GenTree* Compiler::gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd) |
| 1091 | { |
| 1092 | void *embedClsHnd, *pEmbedClsHnd; |
| 1093 | |
| 1094 | embedClsHnd = (void*)info.compCompHnd->embedClassHandle(clsHnd, &pEmbedClsHnd); |
| 1095 | |
| 1096 | assert((!embedClsHnd) != (!pEmbedClsHnd)); |
| 1097 | |
| 1098 | return gtNewIconEmbHndNode(embedClsHnd, pEmbedClsHnd, GTF_ICON_CLASS_HDL, clsHnd); |
| 1099 | } |
| 1100 | |
| 1101 | //----------------------------------------------------------------------------- |
| 1102 | |
| 1103 | inline GenTree* Compiler::gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd) |
| 1104 | { |
| 1105 | void *embedMethHnd, *pEmbedMethHnd; |
| 1106 | |
| 1107 | embedMethHnd = (void*)info.compCompHnd->embedMethodHandle(methHnd, &pEmbedMethHnd); |
| 1108 | |
| 1109 | assert((!embedMethHnd) != (!pEmbedMethHnd)); |
| 1110 | |
| 1111 | return gtNewIconEmbHndNode(embedMethHnd, pEmbedMethHnd, GTF_ICON_METHOD_HDL, methHnd); |
| 1112 | } |
| 1113 | |
| 1114 | //----------------------------------------------------------------------------- |
| 1115 | |
| 1116 | inline GenTree* Compiler::gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd) |
| 1117 | { |
| 1118 | void *embedFldHnd, *pEmbedFldHnd; |
| 1119 | |
| 1120 | embedFldHnd = (void*)info.compCompHnd->embedFieldHandle(fldHnd, &pEmbedFldHnd); |
| 1121 | |
| 1122 | assert((!embedFldHnd) != (!pEmbedFldHnd)); |
| 1123 | |
| 1124 | return gtNewIconEmbHndNode(embedFldHnd, pEmbedFldHnd, GTF_ICON_FIELD_HDL, fldHnd); |
| 1125 | } |
| 1126 | |
| 1127 | /*****************************************************************************/ |
| 1128 | |
| 1129 | //------------------------------------------------------------------------------ |
| 1130 | // gtNewHelperCallNode : Helper to create a call helper node. |
| 1131 | // |
| 1132 | // |
| 1133 | // Arguments: |
| 1134 | // helper - Call helper |
| 1135 | // type - Type of the node |
| 1136 | // args - Call args |
| 1137 | // |
| 1138 | // Return Value: |
| 1139 | // New CT_HELPER node |
| 1140 | |
| 1141 | inline GenTreeCall* Compiler::gtNewHelperCallNode(unsigned helper, var_types type, GenTreeArgList* args) |
| 1142 | { |
| 1143 | unsigned flags = s_helperCallProperties.NoThrow((CorInfoHelpFunc)helper) ? 0 : GTF_EXCEPT; |
| 1144 | GenTreeCall* result = gtNewCallNode(CT_HELPER, eeFindHelper(helper), type, args); |
| 1145 | result->gtFlags |= flags; |
| 1146 | |
| 1147 | #if DEBUG |
| 1148 | // Helper calls are never candidates. |
| 1149 | |
| 1150 | result->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER; |
| 1151 | #endif |
| 1152 | |
| 1153 | return result; |
| 1154 | } |
| 1155 | |
| 1156 | //------------------------------------------------------------------------ |
| 1157 | // gtNewAllocObjNode: A little helper to create an object allocation node. |
| 1158 | // |
| 1159 | // Arguments: |
| 1160 | // helper - Value returned by ICorJitInfo::getNewHelper |
| 1161 | // helperHasSideEffects - True iff allocation helper has side effects |
| 1162 | // clsHnd - Corresponding class handle |
| 1163 | // type - Tree return type (e.g. TYP_REF) |
| 1164 | // op1 - Node containing an address of VtablePtr |
| 1165 | // |
| 1166 | // Return Value: |
| 1167 | // Returns GT_ALLOCOBJ node that will be later morphed into an |
| 1168 | // allocation helper call or local variable allocation on the stack. |
| 1169 | |
| 1170 | inline GenTreeAllocObj* Compiler::gtNewAllocObjNode( |
| 1171 | unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1) |
| 1172 | { |
| 1173 | GenTreeAllocObj* node = new (this, GT_ALLOCOBJ) GenTreeAllocObj(type, helper, helperHasSideEffects, clsHnd, op1); |
| 1174 | return node; |
| 1175 | } |
| 1176 | |
| 1177 | //------------------------------------------------------------------------ |
| 1178 | // gtNewRuntimeLookup: Helper to create a runtime lookup node |
| 1179 | // |
| 1180 | // Arguments: |
| 1181 | // hnd - generic handle being looked up |
| 1182 | // hndTyp - type of the generic handle |
| 1183 | // tree - tree for the lookup |
| 1184 | // |
| 1185 | // Return Value: |
| 1186 | // New GenTreeRuntimeLookup node. |
| 1187 | |
| 1188 | inline GenTree* Compiler::gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree) |
| 1189 | { |
| 1190 | assert(tree != nullptr); |
| 1191 | GenTree* node = new (this, GT_RUNTIMELOOKUP) GenTreeRuntimeLookup(hnd, hndTyp, tree); |
| 1192 | return node; |
| 1193 | } |
| 1194 | |
| 1195 | /*****************************************************************************/ |
| 1196 | |
| 1197 | inline GenTree* Compiler::gtNewCodeRef(BasicBlock* block) |
| 1198 | { |
| 1199 | GenTree* node = new (this, GT_LABEL) GenTreeLabel(block); |
| 1200 | return node; |
| 1201 | } |
| 1202 | |
| 1203 | /***************************************************************************** |
| 1204 | * |
| 1205 | * A little helper to create a data member reference node. |
| 1206 | */ |
| 1207 | |
| 1208 | inline GenTree* Compiler::gtNewFieldRef(var_types typ, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj, DWORD offset) |
| 1209 | { |
| 1210 | #if SMALL_TREE_NODES |
| 1211 | /* 'GT_FIELD' nodes may later get transformed into 'GT_IND' */ |
| 1212 | assert(GenTree::s_gtNodeSizes[GT_IND] <= GenTree::s_gtNodeSizes[GT_FIELD]); |
| 1213 | #endif // SMALL_TREE_NODES |
| 1214 | |
| 1215 | GenTree* tree = new (this, GT_FIELD) GenTreeField(typ, obj, fldHnd, offset); |
| 1216 | |
| 1217 | // If "obj" is the address of a local, note that a field of that struct local has been accessed. |
| 1218 | if (obj != nullptr && obj->OperGet() == GT_ADDR && varTypeIsStruct(obj->gtOp.gtOp1) && |
| 1219 | obj->gtOp.gtOp1->OperGet() == GT_LCL_VAR) |
| 1220 | { |
| 1221 | unsigned lclNum = obj->gtOp.gtOp1->gtLclVarCommon.gtLclNum; |
| 1222 | lvaTable[lclNum].lvFieldAccessed = 1; |
| 1223 | #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) |
| 1224 | // These structs are passed by reference; we should probably be able to treat these |
| 1225 | // as non-global refs, but downstream logic expects these to be marked this way. |
| 1226 | if (lvaTable[lclNum].lvIsParam) |
| 1227 | { |
| 1228 | tree->gtFlags |= GTF_GLOB_REF; |
| 1229 | } |
| 1230 | #endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) |
| 1231 | } |
| 1232 | else |
| 1233 | { |
| 1234 | tree->gtFlags |= GTF_GLOB_REF; |
| 1235 | } |
| 1236 | |
| 1237 | return tree; |
| 1238 | } |
| 1239 | |
| 1240 | /***************************************************************************** |
| 1241 | * |
| 1242 | * A little helper to create an array index node. |
| 1243 | */ |
| 1244 | |
| 1245 | inline GenTree* Compiler::gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp) |
| 1246 | { |
| 1247 | GenTreeIndex* gtIndx = new (this, GT_INDEX) GenTreeIndex(typ, arrayOp, indexOp, genTypeSize(typ)); |
| 1248 | |
| 1249 | return gtIndx; |
| 1250 | } |
| 1251 | |
| 1252 | //------------------------------------------------------------------------------ |
| 1253 | // gtNewArrLen : Helper to create an array length node. |
| 1254 | // |
| 1255 | // |
| 1256 | // Arguments: |
| 1257 | // typ - Type of the node |
| 1258 | // arrayOp - Array node |
| 1259 | // lenOffset - Offset of the length field |
| 1260 | // |
| 1261 | // Return Value: |
| 1262 | // New GT_ARR_LENGTH node |
| 1263 | |
| 1264 | inline GenTreeArrLen* Compiler::gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset) |
| 1265 | { |
| 1266 | GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH) GenTreeArrLen(typ, arrayOp, lenOffset); |
| 1267 | static_assert_no_msg(GTF_ARRLEN_NONFAULTING == GTF_IND_NONFAULTING); |
| 1268 | arrLen->SetIndirExceptionFlags(this); |
| 1269 | return arrLen; |
| 1270 | } |
| 1271 | |
| 1272 | //------------------------------------------------------------------------------ |
| 1273 | // gtNewIndir : Helper to create an indirection node. |
| 1274 | // |
| 1275 | // Arguments: |
| 1276 | // typ - Type of the node |
| 1277 | // addr - Address of the indirection |
| 1278 | // |
| 1279 | // Return Value: |
| 1280 | // New GT_IND node |
| 1281 | |
| 1282 | inline GenTree* Compiler::gtNewIndir(var_types typ, GenTree* addr) |
| 1283 | { |
| 1284 | GenTree* indir = gtNewOperNode(GT_IND, typ, addr); |
| 1285 | indir->SetIndirExceptionFlags(this); |
| 1286 | return indir; |
| 1287 | } |
| 1288 | |
| 1289 | /***************************************************************************** |
| 1290 | * |
| 1291 | * Create (and check for) a "nothing" node, i.e. a node that doesn't produce |
| 1292 | * any code. We currently use a "nop" node of type void for this purpose. |
| 1293 | */ |
| 1294 | |
| 1295 | inline GenTree* Compiler::gtNewNothingNode() |
| 1296 | { |
| 1297 | return new (this, GT_NOP) GenTreeOp(GT_NOP, TYP_VOID); |
| 1298 | } |
| 1299 | /*****************************************************************************/ |
| 1300 | |
| 1301 | inline bool GenTree::IsNothingNode() const |
| 1302 | { |
| 1303 | return (gtOper == GT_NOP && gtType == TYP_VOID); |
| 1304 | } |
| 1305 | |
| 1306 | /***************************************************************************** |
| 1307 | * |
| 1308 | * Change the given node to a NOP - May be later changed to a GT_COMMA |
| 1309 | * |
| 1310 | *****************************************************************************/ |
| 1311 | |
| 1312 | inline void GenTree::gtBashToNOP() |
| 1313 | { |
| 1314 | ChangeOper(GT_NOP); |
| 1315 | |
| 1316 | gtType = TYP_VOID; |
| 1317 | gtOp.gtOp1 = gtOp.gtOp2 = nullptr; |
| 1318 | |
| 1319 | gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS); |
| 1320 | } |
| 1321 | |
| 1322 | // return new arg placeholder node. Does not do anything but has a type associated |
| 1323 | // with it so we can keep track of register arguments in lists associated w/ call nodes |
| 1324 | |
| 1325 | inline GenTree* Compiler::gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd) |
| 1326 | { |
| 1327 | GenTree* node = new (this, GT_ARGPLACE) GenTreeArgPlace(type, clsHnd); |
| 1328 | return node; |
| 1329 | } |
| 1330 | |
| 1331 | /*****************************************************************************/ |
| 1332 | |
| 1333 | inline GenTree* Compiler::gtUnusedValNode(GenTree* expr) |
| 1334 | { |
| 1335 | return gtNewOperNode(GT_COMMA, TYP_VOID, expr, gtNewNothingNode()); |
| 1336 | } |
| 1337 | |
| 1338 | /***************************************************************************** |
| 1339 | * |
| 1340 | * A wrapper for gtSetEvalOrder and gtComputeFPlvls |
| 1341 | * Necessary because the FP levels may need to be re-computed if we reverse |
| 1342 | * operands |
| 1343 | */ |
| 1344 | |
| 1345 | inline void Compiler::gtSetStmtInfo(GenTree* stmt) |
| 1346 | { |
| 1347 | assert(stmt->gtOper == GT_STMT); |
| 1348 | GenTree* expr = stmt->gtStmt.gtStmtExpr; |
| 1349 | |
| 1350 | /* Recursively process the expression */ |
| 1351 | |
| 1352 | gtSetEvalOrder(expr); |
| 1353 | |
| 1354 | // Set the statement to have the same costs as the top node of the tree. |
| 1355 | stmt->CopyCosts(expr); |
| 1356 | } |
| 1357 | |
| 1358 | /*****************************************************************************/ |
| 1359 | #if SMALL_TREE_NODES |
| 1360 | /*****************************************************************************/ |
| 1361 | |
| 1362 | inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate) |
| 1363 | { |
| 1364 | assert(((gtDebugFlags & GTF_DEBUG_NODE_SMALL) != 0) != ((gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0)); |
| 1365 | |
| 1366 | /* Make sure the node isn't too small for the new operator */ |
| 1367 | |
| 1368 | assert(GenTree::s_gtNodeSizes[gtOper] == TREE_NODE_SZ_SMALL || |
| 1369 | GenTree::s_gtNodeSizes[gtOper] == TREE_NODE_SZ_LARGE); |
| 1370 | |
| 1371 | assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_LARGE); |
| 1372 | assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || (gtDebugFlags & GTF_DEBUG_NODE_LARGE)); |
| 1373 | |
| 1374 | #if defined(_HOST_64BIT_) && !defined(_TARGET_64BIT_) |
| 1375 | if (gtOper == GT_CNS_LNG && oper == GT_CNS_INT) |
| 1376 | { |
| 1377 | // When casting from LONG to INT, we need to force cast of the value, |
| 1378 | // if the host architecture represents INT and LONG with the same data size. |
| 1379 | gtLngCon.gtLconVal = (INT64)(INT32)gtLngCon.gtLconVal; |
| 1380 | } |
| 1381 | #endif // defined(_HOST_64BIT_) && !defined(_TARGET_64BIT_) |
| 1382 | |
| 1383 | SetOperRaw(oper); |
| 1384 | |
| 1385 | #ifdef DEBUG |
| 1386 | // Maintain the invariant that unary operators always have NULL gtOp2. |
| 1387 | // If we ever start explicitly allocating GenTreeUnOp nodes, we wouldn't be |
| 1388 | // able to do that (but if we did, we'd have to have a check in gtOp -- perhaps |
| 1389 | // a gtUnOp...) |
| 1390 | if (OperKind(oper) == GTK_UNOP) |
| 1391 | { |
| 1392 | gtOp.gtOp2 = nullptr; |
| 1393 | } |
| 1394 | #endif // DEBUG |
| 1395 | |
| 1396 | #if DEBUGGABLE_GENTREE |
| 1397 | // Until we eliminate SetOper/ChangeOper, we also change the vtable of the node, so that |
| 1398 | // it shows up correctly in the debugger. |
| 1399 | SetVtableForOper(oper); |
| 1400 | #endif // DEBUGGABLE_GENTREE |
| 1401 | |
| 1402 | if (oper == GT_CNS_INT) |
| 1403 | { |
| 1404 | gtIntCon.gtFieldSeq = nullptr; |
| 1405 | } |
| 1406 | |
| 1407 | #if defined(_TARGET_ARM_) |
| 1408 | if (oper == GT_MUL_LONG) |
| 1409 | { |
| 1410 | // We sometimes bash GT_MUL to GT_MUL_LONG, which converts it from GenTreeOp to GenTreeMultiRegOp. |
| 1411 | gtMultiRegOp.gtOtherReg = REG_NA; |
| 1412 | gtMultiRegOp.ClearOtherRegFlags(); |
| 1413 | } |
| 1414 | #endif |
| 1415 | |
| 1416 | if (vnUpdate == CLEAR_VN) |
| 1417 | { |
| 1418 | // Clear the ValueNum field as well. |
| 1419 | gtVNPair.SetBoth(ValueNumStore::NoVN); |
| 1420 | } |
| 1421 | } |
| 1422 | |
| 1423 | inline GenTreeCast* Compiler::gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType) |
| 1424 | { |
| 1425 | GenTreeCast* res = new (this, GT_CAST) GenTreeCast(typ, op1, fromUnsigned, castType); |
| 1426 | return res; |
| 1427 | } |
| 1428 | |
| 1429 | inline GenTreeCast* Compiler::gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType) |
| 1430 | { |
| 1431 | /* Some casts get transformed into 'GT_CALL' or 'GT_IND' nodes */ |
| 1432 | |
| 1433 | assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_CAST]); |
| 1434 | assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_IND]); |
| 1435 | |
| 1436 | /* Make a big node first and then change it to be GT_CAST */ |
| 1437 | |
| 1438 | GenTreeCast* res = |
| 1439 | new (this, LargeOpOpcode()) GenTreeCast(typ, op1, fromUnsigned, castType DEBUGARG(/*largeNode*/ true)); |
| 1440 | return res; |
| 1441 | } |
| 1442 | |
| 1443 | /*****************************************************************************/ |
| 1444 | #else // SMALL_TREE_NODES |
| 1445 | /*****************************************************************************/ |
| 1446 | |
| 1447 | inline void GenTree::InitNodeSize() |
| 1448 | { |
| 1449 | } |
| 1450 | |
| 1451 | inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate) |
| 1452 | { |
| 1453 | SetOperRaw(oper); |
| 1454 | |
| 1455 | if (vnUpdate == CLEAR_VN) |
| 1456 | { |
| 1457 | // Clear the ValueNum field. |
| 1458 | gtVNPair.SetBoth(ValueNumStore::NoVN); |
| 1459 | } |
| 1460 | } |
| 1461 | |
| 1462 | inline void GenTree::ReplaceWith(GenTree* src) |
| 1463 | { |
| 1464 | RecordOperBashing(OperGet(), src->OperGet()); // nop unless NODEBASH_STATS is enabled |
| 1465 | *this = *src; |
| 1466 | #ifdef DEBUG |
| 1467 | gtSeqNum = 0; |
| 1468 | #endif |
| 1469 | } |
| 1470 | |
| 1471 | inline GenTree* Compiler::gtNewCastNode(var_types typ, GenTree* op1, var_types castType) |
| 1472 | { |
| 1473 | GenTree* tree = gtNewOperNode(GT_CAST, typ, op1); |
| 1474 | tree->gtCast.gtCastType = castType; |
| 1475 | } |
| 1476 | |
| 1477 | inline GenTree* Compiler::gtNewCastNodeL(var_types typ, GenTree* op1, var_types castType) |
| 1478 | { |
| 1479 | return gtNewCastNode(typ, op1, castType); |
| 1480 | } |
| 1481 | |
| 1482 | /*****************************************************************************/ |
| 1483 | #endif // SMALL_TREE_NODES |
| 1484 | /*****************************************************************************/ |
| 1485 | |
| 1486 | /*****************************************************************************/ |
| 1487 | |
| 1488 | inline void GenTree::SetOperRaw(genTreeOps oper) |
| 1489 | { |
| 1490 | // Please do not do anything here other than assign to gtOper (debug-only |
| 1491 | // code is OK, but should be kept to a minimum). |
| 1492 | RecordOperBashing(OperGet(), oper); // nop unless NODEBASH_STATS is enabled |
| 1493 | gtOper = oper; |
| 1494 | } |
| 1495 | |
| 1496 | inline void GenTree::SetOperResetFlags(genTreeOps oper) |
| 1497 | { |
| 1498 | SetOper(oper); |
| 1499 | gtFlags &= GTF_NODE_MASK; |
| 1500 | } |
| 1501 | |
| 1502 | inline void GenTree::ChangeOperConst(genTreeOps oper) |
| 1503 | { |
| 1504 | #ifdef _TARGET_64BIT_ |
| 1505 | assert(oper != GT_CNS_LNG); // We should never see a GT_CNS_LNG for a 64-bit target! |
| 1506 | #endif |
| 1507 | assert(OperIsConst(oper)); // use ChangeOper() instead |
| 1508 | SetOperResetFlags(oper); |
| 1509 | // Some constant subtypes have additional fields that must be initialized. |
| 1510 | if (oper == GT_CNS_INT) |
| 1511 | { |
| 1512 | gtIntCon.gtFieldSeq = FieldSeqStore::NotAField(); |
| 1513 | } |
| 1514 | } |
| 1515 | |
| 1516 | inline void GenTree::ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate) |
| 1517 | { |
| 1518 | assert(!OperIsConst(oper)); // use ChangeOperLeaf() instead |
| 1519 | |
| 1520 | unsigned mask = GTF_COMMON_MASK; |
| 1521 | if (this->OperIsIndirOrArrLength() && OperIsIndirOrArrLength(oper)) |
| 1522 | { |
| 1523 | mask |= GTF_IND_NONFAULTING; |
| 1524 | } |
| 1525 | SetOper(oper, vnUpdate); |
| 1526 | gtFlags &= mask; |
| 1527 | |
| 1528 | // Do "oper"-specific initializations... |
| 1529 | switch (oper) |
| 1530 | { |
| 1531 | case GT_LCL_FLD: |
| 1532 | gtLclFld.gtLclOffs = 0; |
| 1533 | gtLclFld.gtFieldSeq = FieldSeqStore::NotAField(); |
| 1534 | break; |
| 1535 | default: |
| 1536 | break; |
| 1537 | } |
| 1538 | } |
| 1539 | |
| 1540 | inline void GenTree::ChangeOperUnchecked(genTreeOps oper) |
| 1541 | { |
| 1542 | unsigned mask = GTF_COMMON_MASK; |
| 1543 | if (this->OperIsIndirOrArrLength() && OperIsIndirOrArrLength(oper)) |
| 1544 | { |
| 1545 | mask |= GTF_IND_NONFAULTING; |
| 1546 | } |
| 1547 | SetOperRaw(oper); // Trust the caller and don't use SetOper() |
| 1548 | gtFlags &= mask; |
| 1549 | } |
| 1550 | |
| 1551 | /***************************************************************************** |
| 1552 | * Returns true if the node is &var (created by ldarga and ldloca) |
| 1553 | */ |
| 1554 | |
| 1555 | inline bool GenTree::IsVarAddr() const |
| 1556 | { |
| 1557 | if (gtOper == GT_ADDR) |
| 1558 | { |
| 1559 | if (gtFlags & GTF_ADDR_ONSTACK) |
| 1560 | { |
| 1561 | assert((gtType == TYP_BYREF) || (gtType == TYP_I_IMPL)); |
| 1562 | return true; |
| 1563 | } |
| 1564 | } |
| 1565 | return false; |
| 1566 | } |
| 1567 | |
| 1568 | /***************************************************************************** |
| 1569 | * |
| 1570 | * Returns true if the node is of the "ovf" variety, for example, add.ovf.i1. |
| 1571 | * + gtOverflow() can only be called for valid operators (that is, we know it is one |
| 1572 | * of the operators which may have GTF_OVERFLOW set). |
| 1573 | * + gtOverflowEx() is more expensive, and should be called only if gtOper may be |
| 1574 | * an operator for which GTF_OVERFLOW is invalid. |
| 1575 | */ |
| 1576 | |
| 1577 | inline bool GenTree::gtOverflow() const |
| 1578 | { |
| 1579 | assert(OperMayOverflow()); |
| 1580 | |
| 1581 | if ((gtFlags & GTF_OVERFLOW) != 0) |
| 1582 | { |
| 1583 | assert(varTypeIsIntegral(TypeGet())); |
| 1584 | |
| 1585 | return true; |
| 1586 | } |
| 1587 | else |
| 1588 | { |
| 1589 | return false; |
| 1590 | } |
| 1591 | } |
| 1592 | |
| 1593 | inline bool GenTree::gtOverflowEx() const |
| 1594 | { |
| 1595 | return OperMayOverflow() && gtOverflow(); |
| 1596 | } |
| 1597 | |
| 1598 | /* |
| 1599 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 1600 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 1601 | XX LclVarsInfo XX |
| 1602 | XX Inline functions XX |
| 1603 | XX XX |
| 1604 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 1605 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 1606 | */ |
| 1607 | |
| 1608 | inline bool Compiler::lvaHaveManyLocals() const |
| 1609 | { |
| 1610 | return (lvaCount >= lclMAX_TRACKED); |
| 1611 | } |
| 1612 | |
| 1613 | /***************************************************************************** |
| 1614 | * |
| 1615 | * Allocate a temporary variable or a set of temp variables. |
| 1616 | */ |
| 1617 | |
| 1618 | inline unsigned Compiler::lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)) |
| 1619 | { |
| 1620 | if (compIsForInlining()) |
| 1621 | { |
| 1622 | // Grab the temp using Inliner's Compiler instance. |
| 1623 | Compiler* pComp = impInlineInfo->InlinerCompiler; // The Compiler instance for the caller (i.e. the inliner) |
| 1624 | |
| 1625 | if (pComp->lvaHaveManyLocals()) |
| 1626 | { |
| 1627 | // Don't create more LclVar with inlining |
| 1628 | compInlineResult->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS); |
| 1629 | } |
| 1630 | |
| 1631 | unsigned tmpNum = pComp->lvaGrabTemp(shortLifetime DEBUGARG(reason)); |
| 1632 | lvaTable = pComp->lvaTable; |
| 1633 | lvaCount = pComp->lvaCount; |
| 1634 | lvaTableCnt = pComp->lvaTableCnt; |
| 1635 | return tmpNum; |
| 1636 | } |
| 1637 | |
| 1638 | // You cannot allocate more space after frame layout! |
| 1639 | noway_assert(lvaDoneFrameLayout < Compiler::TENTATIVE_FRAME_LAYOUT); |
| 1640 | |
| 1641 | /* Check if the lvaTable has to be grown */ |
| 1642 | if (lvaCount + 1 > lvaTableCnt) |
| 1643 | { |
| 1644 | unsigned newLvaTableCnt = lvaCount + (lvaCount / 2) + 1; |
| 1645 | |
| 1646 | // Check for overflow |
| 1647 | if (newLvaTableCnt <= lvaCount) |
| 1648 | { |
| 1649 | IMPL_LIMITATION("too many locals" ); |
| 1650 | } |
| 1651 | |
| 1652 | LclVarDsc* newLvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(newLvaTableCnt); |
| 1653 | |
| 1654 | memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable)); |
| 1655 | memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable)); |
| 1656 | |
| 1657 | for (unsigned i = lvaCount; i < newLvaTableCnt; i++) |
| 1658 | { |
| 1659 | new (&newLvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. |
| 1660 | } |
| 1661 | |
| 1662 | #ifdef DEBUG |
| 1663 | // Fill the old table with junks. So to detect the un-intended use. |
| 1664 | memset(lvaTable, JitConfig.JitDefaultFill(), lvaCount * sizeof(*lvaTable)); |
| 1665 | #endif |
| 1666 | |
| 1667 | lvaTableCnt = newLvaTableCnt; |
| 1668 | lvaTable = newLvaTable; |
| 1669 | } |
| 1670 | |
| 1671 | const unsigned tempNum = lvaCount; |
| 1672 | lvaCount++; |
| 1673 | |
| 1674 | // Initialize lvType, lvIsTemp and lvOnFrame |
| 1675 | lvaTable[tempNum].lvType = TYP_UNDEF; |
| 1676 | lvaTable[tempNum].lvIsTemp = shortLifetime; |
| 1677 | lvaTable[tempNum].lvOnFrame = true; |
| 1678 | |
| 1679 | // If we've started normal ref counting, bump the ref count of this |
| 1680 | // local, as we no longer do any incremental counting, and we presume |
| 1681 | // this new local will be referenced. |
| 1682 | if (lvaLocalVarRefCounted()) |
| 1683 | { |
| 1684 | if (opts.OptimizationDisabled()) |
| 1685 | { |
| 1686 | lvaTable[tempNum].lvImplicitlyReferenced = 1; |
| 1687 | } |
| 1688 | else |
| 1689 | { |
| 1690 | lvaTable[tempNum].setLvRefCnt(1); |
| 1691 | lvaTable[tempNum].setLvRefCntWtd(BB_UNITY_WEIGHT); |
| 1692 | } |
| 1693 | } |
| 1694 | |
| 1695 | #ifdef DEBUG |
| 1696 | lvaTable[tempNum].lvReason = reason; |
| 1697 | |
| 1698 | if (verbose) |
| 1699 | { |
| 1700 | printf("\nlvaGrabTemp returning %d (" , tempNum); |
| 1701 | gtDispLclVar(tempNum, false); |
| 1702 | printf(")%s called for %s.\n" , shortLifetime ? "" : " (a long lifetime temp)" , reason); |
| 1703 | } |
| 1704 | #endif // DEBUG |
| 1705 | |
| 1706 | return tempNum; |
| 1707 | } |
| 1708 | |
| 1709 | inline unsigned Compiler::lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)) |
| 1710 | { |
| 1711 | if (compIsForInlining()) |
| 1712 | { |
| 1713 | // Grab the temps using Inliner's Compiler instance. |
| 1714 | unsigned tmpNum = impInlineInfo->InlinerCompiler->lvaGrabTemps(cnt DEBUGARG(reason)); |
| 1715 | |
| 1716 | lvaTable = impInlineInfo->InlinerCompiler->lvaTable; |
| 1717 | lvaCount = impInlineInfo->InlinerCompiler->lvaCount; |
| 1718 | lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; |
| 1719 | return tmpNum; |
| 1720 | } |
| 1721 | |
| 1722 | #ifdef DEBUG |
| 1723 | if (verbose) |
| 1724 | { |
| 1725 | printf("\nlvaGrabTemps(%d) returning %d..%d (long lifetime temps) called for %s" , cnt, lvaCount, |
| 1726 | lvaCount + cnt - 1, reason); |
| 1727 | } |
| 1728 | #endif |
| 1729 | |
| 1730 | // Could handle this... |
| 1731 | assert(!lvaLocalVarRefCounted()); |
| 1732 | |
| 1733 | // You cannot allocate more space after frame layout! |
| 1734 | noway_assert(lvaDoneFrameLayout < Compiler::TENTATIVE_FRAME_LAYOUT); |
| 1735 | |
| 1736 | /* Check if the lvaTable has to be grown */ |
| 1737 | if (lvaCount + cnt > lvaTableCnt) |
| 1738 | { |
| 1739 | unsigned newLvaTableCnt = lvaCount + max(lvaCount / 2 + 1, cnt); |
| 1740 | |
| 1741 | // Check for overflow |
| 1742 | if (newLvaTableCnt <= lvaCount) |
| 1743 | { |
| 1744 | IMPL_LIMITATION("too many locals" ); |
| 1745 | } |
| 1746 | |
| 1747 | LclVarDsc* newLvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(newLvaTableCnt); |
| 1748 | |
| 1749 | memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable)); |
| 1750 | memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable)); |
| 1751 | for (unsigned i = lvaCount; i < newLvaTableCnt; i++) |
| 1752 | { |
| 1753 | new (&newLvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. |
| 1754 | } |
| 1755 | |
| 1756 | #ifdef DEBUG |
| 1757 | // Fill the old table with junks. So to detect the un-intended use. |
| 1758 | memset(lvaTable, JitConfig.JitDefaultFill(), lvaCount * sizeof(*lvaTable)); |
| 1759 | #endif |
| 1760 | |
| 1761 | lvaTableCnt = newLvaTableCnt; |
| 1762 | lvaTable = newLvaTable; |
| 1763 | } |
| 1764 | |
| 1765 | unsigned tempNum = lvaCount; |
| 1766 | |
| 1767 | while (cnt--) |
| 1768 | { |
| 1769 | lvaTable[lvaCount].lvType = TYP_UNDEF; // Initialize lvType, lvIsTemp and lvOnFrame |
| 1770 | lvaTable[lvaCount].lvIsTemp = false; |
| 1771 | lvaTable[lvaCount].lvOnFrame = true; |
| 1772 | lvaCount++; |
| 1773 | } |
| 1774 | |
| 1775 | return tempNum; |
| 1776 | } |
| 1777 | |
| 1778 | /***************************************************************************** |
| 1779 | * |
| 1780 | * Allocate a temporary variable which is implicitly used by code-gen |
| 1781 | * There will be no explicit references to the temp, and so it needs to |
| 1782 | * be forced to be kept alive, and not be optimized away. |
| 1783 | */ |
| 1784 | |
| 1785 | inline unsigned Compiler::lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)) |
| 1786 | { |
| 1787 | if (compIsForInlining()) |
| 1788 | { |
| 1789 | // Grab the temp using Inliner's Compiler instance. |
| 1790 | unsigned tmpNum = impInlineInfo->InlinerCompiler->lvaGrabTempWithImplicitUse(shortLifetime DEBUGARG(reason)); |
| 1791 | |
| 1792 | lvaTable = impInlineInfo->InlinerCompiler->lvaTable; |
| 1793 | lvaCount = impInlineInfo->InlinerCompiler->lvaCount; |
| 1794 | lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; |
| 1795 | return tmpNum; |
| 1796 | } |
| 1797 | |
| 1798 | unsigned lclNum = lvaGrabTemp(shortLifetime DEBUGARG(reason)); |
| 1799 | |
| 1800 | LclVarDsc* varDsc = &lvaTable[lclNum]; |
| 1801 | |
| 1802 | // This will prevent it from being optimized away |
| 1803 | // TODO-CQ: We shouldn't have to go as far as to declare these |
| 1804 | // address-exposed -- DoNotEnregister should suffice? |
| 1805 | lvaSetVarAddrExposed(lclNum); |
| 1806 | |
| 1807 | // Note the implicit use |
| 1808 | varDsc->lvImplicitlyReferenced = 1; |
| 1809 | |
| 1810 | return lclNum; |
| 1811 | } |
| 1812 | |
| 1813 | /***************************************************************************** |
| 1814 | * |
| 1815 | * Increment the ref counts for a local variable |
| 1816 | */ |
| 1817 | |
| 1818 | inline void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler* comp, RefCountState state, bool propagate) |
| 1819 | { |
| 1820 | // In minopts and debug codegen, we don't maintain normal ref counts. |
| 1821 | if ((state == RCS_NORMAL) && comp->opts.OptimizationDisabled()) |
| 1822 | { |
| 1823 | // Note, at least, that there is at least one reference. |
| 1824 | lvImplicitlyReferenced = 1; |
| 1825 | return; |
| 1826 | } |
| 1827 | |
| 1828 | Compiler::lvaPromotionType promotionType = DUMMY_INIT(Compiler::PROMOTION_TYPE_NONE); |
| 1829 | if (varTypeIsStruct(lvType)) |
| 1830 | { |
| 1831 | promotionType = comp->lvaGetPromotionType(this); |
| 1832 | } |
| 1833 | |
| 1834 | // |
| 1835 | // Increment counts on the local itself. |
| 1836 | // |
| 1837 | if (lvType != TYP_STRUCT || promotionType != Compiler::PROMOTION_TYPE_INDEPENDENT) |
| 1838 | { |
| 1839 | // |
| 1840 | // Increment lvRefCnt |
| 1841 | // |
| 1842 | int newRefCnt = lvRefCnt(state) + 1; |
| 1843 | if (newRefCnt == (unsigned short)newRefCnt) // lvRefCnt is an "unsigned short". Don't overflow it. |
| 1844 | { |
| 1845 | setLvRefCnt((unsigned short)newRefCnt, state); |
| 1846 | } |
| 1847 | |
| 1848 | // |
| 1849 | // Increment lvRefCntWtd |
| 1850 | // |
| 1851 | if (weight != 0) |
| 1852 | { |
| 1853 | // We double the weight of internal temps |
| 1854 | // |
| 1855 | if (lvIsTemp && (weight * 2 > weight)) |
| 1856 | { |
| 1857 | weight *= 2; |
| 1858 | } |
| 1859 | |
| 1860 | unsigned newWeight = lvRefCntWtd(state) + weight; |
| 1861 | if (newWeight >= lvRefCntWtd(state)) |
| 1862 | { // lvRefCntWtd is an "unsigned". Don't overflow it |
| 1863 | setLvRefCntWtd(newWeight, state); |
| 1864 | } |
| 1865 | else |
| 1866 | { // On overflow we assign ULONG_MAX |
| 1867 | setLvRefCntWtd(ULONG_MAX, state); |
| 1868 | } |
| 1869 | } |
| 1870 | } |
| 1871 | |
| 1872 | if (varTypeIsStruct(lvType) && propagate) |
| 1873 | { |
| 1874 | // For promoted struct locals, increment lvRefCnt on its field locals as well. |
| 1875 | if (promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT || |
| 1876 | promotionType == Compiler::PROMOTION_TYPE_DEPENDENT) |
| 1877 | { |
| 1878 | for (unsigned i = lvFieldLclStart; i < lvFieldLclStart + lvFieldCnt; ++i) |
| 1879 | { |
| 1880 | comp->lvaTable[i].incRefCnts(weight, comp, state, false); // Don't propagate |
| 1881 | } |
| 1882 | } |
| 1883 | } |
| 1884 | |
| 1885 | if (lvIsStructField && propagate) |
| 1886 | { |
| 1887 | // Depending on the promotion type, increment the ref count for the parent struct as well. |
| 1888 | promotionType = comp->lvaGetParentPromotionType(this); |
| 1889 | LclVarDsc* parentvarDsc = &comp->lvaTable[lvParentLcl]; |
| 1890 | assert(!parentvarDsc->lvRegStruct); |
| 1891 | if (promotionType == Compiler::PROMOTION_TYPE_DEPENDENT) |
| 1892 | { |
| 1893 | parentvarDsc->incRefCnts(weight, comp, state, false); // Don't propagate |
| 1894 | } |
| 1895 | } |
| 1896 | |
| 1897 | #ifdef DEBUG |
| 1898 | if (comp->verbose) |
| 1899 | { |
| 1900 | unsigned varNum = (unsigned)(this - comp->lvaTable); |
| 1901 | assert(&comp->lvaTable[varNum] == this); |
| 1902 | printf("New refCnts for V%02u: refCnt = %2u, refCntWtd = %s\n" , varNum, lvRefCnt(state), |
| 1903 | refCntWtd2str(lvRefCntWtd(state))); |
| 1904 | } |
| 1905 | #endif |
| 1906 | } |
| 1907 | |
| 1908 | /***************************************************************************** |
| 1909 | * |
| 1910 | * The following returns the mask of all tracked locals |
| 1911 | * referenced in a statement. |
| 1912 | */ |
| 1913 | |
| 1914 | inline VARSET_VALRET_TP Compiler::lvaStmtLclMask(GenTree* stmt) |
| 1915 | { |
| 1916 | GenTree* tree; |
| 1917 | unsigned varNum; |
| 1918 | LclVarDsc* varDsc; |
| 1919 | VARSET_TP lclMask(VarSetOps::MakeEmpty(this)); |
| 1920 | |
| 1921 | assert(stmt->gtOper == GT_STMT); |
| 1922 | assert(fgStmtListThreaded); |
| 1923 | |
| 1924 | for (tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext) |
| 1925 | { |
| 1926 | if (tree->gtOper != GT_LCL_VAR) |
| 1927 | { |
| 1928 | continue; |
| 1929 | } |
| 1930 | |
| 1931 | varNum = tree->gtLclVarCommon.gtLclNum; |
| 1932 | assert(varNum < lvaCount); |
| 1933 | varDsc = lvaTable + varNum; |
| 1934 | |
| 1935 | if (!varDsc->lvTracked) |
| 1936 | { |
| 1937 | continue; |
| 1938 | } |
| 1939 | |
| 1940 | VarSetOps::UnionD(this, lclMask, VarSetOps::MakeSingleton(this, varDsc->lvVarIndex)); |
| 1941 | } |
| 1942 | |
| 1943 | return lclMask; |
| 1944 | } |
| 1945 | |
| 1946 | /***************************************************************************** |
| 1947 | * Returns true if the lvType is a TYP_REF or a TYP_BYREF. |
| 1948 | * When the lvType is a TYP_STRUCT it searches the GC layout |
| 1949 | * of the struct and returns true iff it contains a GC ref. |
| 1950 | */ |
| 1951 | |
| 1952 | inline bool Compiler::lvaTypeIsGC(unsigned varNum) |
| 1953 | { |
| 1954 | if (lvaTable[varNum].TypeGet() == TYP_STRUCT) |
| 1955 | { |
| 1956 | assert(lvaTable[varNum].lvGcLayout != nullptr); // bits are intialized |
| 1957 | return (lvaTable[varNum].lvStructGcCount != 0); |
| 1958 | } |
| 1959 | return (varTypeIsGC(lvaTable[varNum].TypeGet())); |
| 1960 | } |
| 1961 | |
| 1962 | /***************************************************************************** |
| 1963 | Is this a synchronized instance method? If so, we will need to report "this" |
| 1964 | in the GC information, so that the EE can release the object lock |
| 1965 | in case of an exception |
| 1966 | |
| 1967 | We also need to report "this" and keep it alive for all shared generic |
| 1968 | code that gets the actual generic context from the "this" pointer and |
| 1969 | has exception handlers. |
| 1970 | |
| 1971 | For example, if List<T>::m() is shared between T = object and T = string, |
| 1972 | then inside m() an exception handler "catch E<T>" needs to be able to fetch |
| 1973 | the 'this' pointer to find out what 'T' is in order to tell if we |
| 1974 | should catch the exception or not. |
| 1975 | */ |
| 1976 | |
| 1977 | inline bool Compiler::lvaKeepAliveAndReportThis() |
| 1978 | { |
| 1979 | if (info.compIsStatic || lvaTable[0].TypeGet() != TYP_REF) |
| 1980 | { |
| 1981 | return false; |
| 1982 | } |
| 1983 | |
| 1984 | const bool genericsContextIsThis = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0; |
| 1985 | |
| 1986 | #ifdef JIT32_GCENCODER |
| 1987 | |
| 1988 | if (info.compFlags & CORINFO_FLG_SYNCH) |
| 1989 | return true; |
| 1990 | |
| 1991 | if (genericsContextIsThis) |
| 1992 | { |
| 1993 | // TODO: Check if any of the exception clauses are |
| 1994 | // typed using a generic type. Else, we do not need to report this. |
| 1995 | if (info.compXcptnsCount > 0) |
| 1996 | return true; |
| 1997 | |
| 1998 | if (opts.compDbgCode) |
| 1999 | return true; |
| 2000 | |
| 2001 | if (lvaGenericsContextUseCount > 0) |
| 2002 | { |
| 2003 | JITDUMP("Reporting this as generic context: %u refs\n" , lvaGenericsContextUseCount); |
| 2004 | return true; |
| 2005 | } |
| 2006 | } |
| 2007 | #else // !JIT32_GCENCODER |
| 2008 | // If the generics context is the this pointer we need to report it if either |
| 2009 | // the VM requires us to keep the generics context alive or it is used in a look-up. |
| 2010 | // We keep it alive in the lookup scenario, even when the VM didn't ask us to, |
| 2011 | // because collectible types need the generics context when gc-ing. |
| 2012 | if (genericsContextIsThis) |
| 2013 | { |
| 2014 | const bool isUsed = lvaGenericsContextUseCount > 0; |
| 2015 | const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0; |
| 2016 | |
| 2017 | if (isUsed || mustKeep) |
| 2018 | { |
| 2019 | JITDUMP("Reporting this as generic context: %u refs%s\n" , lvaGenericsContextUseCount, |
| 2020 | mustKeep ? ", must keep" : "" ); |
| 2021 | |
| 2022 | return true; |
| 2023 | } |
| 2024 | } |
| 2025 | #endif |
| 2026 | |
| 2027 | return false; |
| 2028 | } |
| 2029 | |
| 2030 | /***************************************************************************** |
| 2031 | Similar to lvaKeepAliveAndReportThis |
| 2032 | */ |
| 2033 | |
| 2034 | inline bool Compiler::lvaReportParamTypeArg() |
| 2035 | { |
| 2036 | if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) |
| 2037 | { |
| 2038 | assert(info.compTypeCtxtArg != -1); |
| 2039 | |
| 2040 | // If the VM requires us to keep the generics context alive and report it (for example, if any catch |
| 2041 | // clause catches a type that uses a generic parameter of this method) this flag will be set. |
| 2042 | if (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) |
| 2043 | { |
| 2044 | return true; |
| 2045 | } |
| 2046 | |
| 2047 | // Otherwise, if an exact type parameter is needed in the body, report the generics context. |
| 2048 | // We do this because collectible types needs the generics context when gc-ing. |
| 2049 | if (lvaGenericsContextUseCount > 0) |
| 2050 | { |
| 2051 | return true; |
| 2052 | } |
| 2053 | } |
| 2054 | |
| 2055 | // Otherwise, we don't need to report it -- the generics context parameter is unused. |
| 2056 | return false; |
| 2057 | } |
| 2058 | |
| 2059 | //***************************************************************************** |
| 2060 | |
| 2061 | inline int Compiler::lvaCachedGenericContextArgOffset() |
| 2062 | { |
| 2063 | assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); |
| 2064 | |
| 2065 | return lvaCachedGenericContextArgOffs; |
| 2066 | } |
| 2067 | |
| 2068 | //------------------------------------------------------------------------ |
| 2069 | // lvaFrameAddress: Determine the stack frame offset of the given variable, |
| 2070 | // and how to generate an address to that stack frame. |
| 2071 | // |
| 2072 | // Arguments: |
| 2073 | // varNum - The variable to inquire about. Positive for user variables |
| 2074 | // or arguments, negative for spill-temporaries. |
| 2075 | // mustBeFPBased - [_TARGET_ARM_ only] True if the base register must be FP. |
| 2076 | // After FINAL_FRAME_LAYOUT, if false, it also requires SP base register. |
| 2077 | // pBaseReg - [_TARGET_ARM_ only] Out arg. *pBaseReg is set to the base |
| 2078 | // register to use. |
| 2079 | // addrModeOffset - [_TARGET_ARM_ only] The mode offset within the variable that we need to address. |
| 2080 | // For example, for a large struct local, and a struct field reference, this will be the offset |
| 2081 | // of the field. Thus, for V02 + 0x28, if V02 itself is at offset SP + 0x10 |
| 2082 | // then addrModeOffset is what gets added beyond that, here 0x28. |
| 2083 | // isFloatUsage - [_TARGET_ARM_ only] True if the instruction being generated is a floating |
| 2084 | // point instruction. This requires using floating-point offset restrictions. |
| 2085 | // Note that a variable can be non-float, e.g., struct, but accessed as a |
| 2086 | // float local field. |
| 2087 | // pFPbased - [non-_TARGET_ARM_] Out arg. Set *FPbased to true if the |
| 2088 | // variable is addressed off of FP, false if it's addressed |
| 2089 | // off of SP. |
| 2090 | // |
| 2091 | // Return Value: |
| 2092 | // Returns the variable offset from the given base register. |
| 2093 | // |
| 2094 | inline |
| 2095 | #ifdef _TARGET_ARM_ |
| 2096 | int |
| 2097 | Compiler::lvaFrameAddress( |
| 2098 | int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage) |
| 2099 | #else |
| 2100 | int |
| 2101 | Compiler::lvaFrameAddress(int varNum, bool* pFPbased) |
| 2102 | #endif |
| 2103 | { |
| 2104 | assert(lvaDoneFrameLayout != NO_FRAME_LAYOUT); |
| 2105 | |
| 2106 | int varOffset; |
| 2107 | bool FPbased; |
| 2108 | bool fConservative = false; |
| 2109 | if (varNum >= 0) |
| 2110 | { |
| 2111 | LclVarDsc* varDsc; |
| 2112 | |
| 2113 | assert((unsigned)varNum < lvaCount); |
| 2114 | varDsc = lvaTable + varNum; |
| 2115 | bool isPrespilledArg = false; |
| 2116 | #if defined(_TARGET_ARM_) && defined(PROFILING_SUPPORTED) |
| 2117 | isPrespilledArg = varDsc->lvIsParam && compIsProfilerHookNeeded() && |
| 2118 | lvaIsPreSpilled(varNum, codeGen->regSet.rsMaskPreSpillRegs(false)); |
| 2119 | #endif |
| 2120 | |
| 2121 | // If we have finished with register allocation, and this isn't a stack-based local, |
| 2122 | // check that this has a valid stack location. |
| 2123 | if (lvaDoneFrameLayout > REGALLOC_FRAME_LAYOUT && !varDsc->lvOnFrame) |
| 2124 | { |
| 2125 | #ifdef _TARGET_AMD64_ |
| 2126 | #ifndef UNIX_AMD64_ABI |
| 2127 | // On amd64, every param has a stack location, except on Unix-like systems. |
| 2128 | assert(varDsc->lvIsParam); |
| 2129 | #endif // UNIX_AMD64_ABI |
| 2130 | #else // !_TARGET_AMD64_ |
| 2131 | // For other targets, a stack parameter that is enregistered or prespilled |
| 2132 | // for profiling on ARM will have a stack location. |
| 2133 | assert((varDsc->lvIsParam && !varDsc->lvIsRegArg) || isPrespilledArg); |
| 2134 | #endif // !_TARGET_AMD64_ |
| 2135 | } |
| 2136 | |
| 2137 | FPbased = varDsc->lvFramePointerBased; |
| 2138 | |
| 2139 | #ifdef DEBUG |
| 2140 | #if FEATURE_FIXED_OUT_ARGS |
| 2141 | if ((unsigned)varNum == lvaOutgoingArgSpaceVar) |
| 2142 | { |
| 2143 | assert(FPbased == false); |
| 2144 | } |
| 2145 | else |
| 2146 | #endif |
| 2147 | { |
| 2148 | #if DOUBLE_ALIGN |
| 2149 | assert(FPbased == (isFramePointerUsed() || (genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg))); |
| 2150 | #else |
| 2151 | #ifdef _TARGET_X86_ |
| 2152 | assert(FPbased == isFramePointerUsed()); |
| 2153 | #endif |
| 2154 | #endif |
| 2155 | } |
| 2156 | #endif // DEBUG |
| 2157 | |
| 2158 | varOffset = varDsc->lvStkOffs; |
| 2159 | } |
| 2160 | else // Its a spill-temp |
| 2161 | { |
| 2162 | FPbased = isFramePointerUsed(); |
| 2163 | if (lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT) |
| 2164 | { |
| 2165 | TempDsc* tmpDsc = codeGen->regSet.tmpFindNum(varNum); |
| 2166 | // The temp might be in use, since this might be during code generation. |
| 2167 | if (tmpDsc == nullptr) |
| 2168 | { |
| 2169 | tmpDsc = codeGen->regSet.tmpFindNum(varNum, RegSet::TEMP_USAGE_USED); |
| 2170 | } |
| 2171 | assert(tmpDsc != nullptr); |
| 2172 | varOffset = tmpDsc->tdTempOffs(); |
| 2173 | } |
| 2174 | else |
| 2175 | { |
| 2176 | // This value is an estimate until we calculate the |
| 2177 | // offset after the final frame layout |
| 2178 | // --------------------------------------------------- |
| 2179 | // : : |
| 2180 | // +-------------------------+ base --+ |
| 2181 | // | LR, ++N for ARM | | frameBaseOffset (= N) |
| 2182 | // +-------------------------+ | |
| 2183 | // | R11, ++N for ARM | <---FP | |
| 2184 | // +-------------------------+ --+ |
| 2185 | // | compCalleeRegsPushed - N| | lclFrameOffset |
| 2186 | // +-------------------------+ --+ |
| 2187 | // | lclVars | | |
| 2188 | // +-------------------------+ | |
| 2189 | // | tmp[MAX_SPILL_TEMP] | | |
| 2190 | // | tmp[1] | | |
| 2191 | // | tmp[0] | | compLclFrameSize |
| 2192 | // +-------------------------+ | |
| 2193 | // | outgoingArgSpaceSize | | |
| 2194 | // +-------------------------+ --+ |
| 2195 | // | | <---SP |
| 2196 | // : : |
| 2197 | // --------------------------------------------------- |
| 2198 | |
| 2199 | fConservative = true; |
| 2200 | if (!FPbased) |
| 2201 | { |
| 2202 | // Worst case stack based offset. |
| 2203 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 2204 | #if FEATURE_FIXED_OUT_ARGS |
| 2205 | int outGoingArgSpaceSize = lvaOutgoingArgSpaceSize; |
| 2206 | #else |
| 2207 | int outGoingArgSpaceSize = 0; |
| 2208 | #endif |
| 2209 | varOffset = outGoingArgSpaceSize + max(-varNum * TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize()); |
| 2210 | } |
| 2211 | else |
| 2212 | { |
| 2213 | // Worst case FP based offset. |
| 2214 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 2215 | |
| 2216 | #ifdef _TARGET_ARM_ |
| 2217 | varOffset = codeGen->genCallerSPtoInitialSPdelta() - codeGen->genCallerSPtoFPdelta(); |
| 2218 | #else |
| 2219 | varOffset = -(codeGen->genTotalFrameSize()); |
| 2220 | #endif |
| 2221 | } |
| 2222 | } |
| 2223 | } |
| 2224 | |
| 2225 | #ifdef _TARGET_ARM_ |
| 2226 | if (FPbased) |
| 2227 | { |
| 2228 | if (mustBeFPBased) |
| 2229 | { |
| 2230 | *pBaseReg = REG_FPBASE; |
| 2231 | } |
| 2232 | // Change the Frame Pointer (R11)-based addressing to the SP-based addressing when possible because |
| 2233 | // it generates smaller code on ARM. See frame picture above for the math. |
| 2234 | else |
| 2235 | { |
| 2236 | // If it is the final frame layout phase, we don't have a choice, we should stick |
| 2237 | // to either FP based or SP based that we decided in the earlier phase. Because |
| 2238 | // we have already selected the instruction. MinOpts will always reserve R10, so |
| 2239 | // for MinOpts always use SP-based offsets, using R10 as necessary, for simplicity. |
| 2240 | |
| 2241 | int spVarOffset = fConservative ? compLclFrameSize : varOffset + codeGen->genSPtoFPdelta(); |
| 2242 | int actualSPOffset = spVarOffset + addrModeOffset; |
| 2243 | int actualFPOffset = varOffset + addrModeOffset; |
| 2244 | int encodingLimitUpper = isFloatUsage ? 0x3FC : 0xFFF; |
| 2245 | int encodingLimitLower = isFloatUsage ? -0x3FC : -0xFF; |
| 2246 | |
| 2247 | // Use SP-based encoding. During encoding, we'll pick the best encoding for the actual offset we have. |
| 2248 | if (opts.MinOpts() || (actualSPOffset <= encodingLimitUpper)) |
| 2249 | { |
| 2250 | varOffset = spVarOffset; |
| 2251 | *pBaseReg = compLocallocUsed ? REG_SAVED_LOCALLOC_SP : REG_SPBASE; |
| 2252 | } |
| 2253 | // Use Frame Pointer (R11)-based encoding. |
| 2254 | else if ((encodingLimitLower <= actualFPOffset) && (actualFPOffset <= encodingLimitUpper)) |
| 2255 | { |
| 2256 | *pBaseReg = REG_FPBASE; |
| 2257 | } |
| 2258 | // Otherwise, use SP-based encoding. This is either (1) a small positive offset using a single movw, |
| 2259 | // (2) a large offset using movw/movt. In either case, we must have already reserved |
| 2260 | // the "reserved register", which will get used during encoding. |
| 2261 | else |
| 2262 | { |
| 2263 | varOffset = spVarOffset; |
| 2264 | *pBaseReg = compLocallocUsed ? REG_SAVED_LOCALLOC_SP : REG_SPBASE; |
| 2265 | } |
| 2266 | } |
| 2267 | } |
| 2268 | else |
| 2269 | { |
| 2270 | *pBaseReg = REG_SPBASE; |
| 2271 | } |
| 2272 | #else |
| 2273 | *pFPbased = FPbased; |
| 2274 | #endif |
| 2275 | |
| 2276 | return varOffset; |
| 2277 | } |
| 2278 | |
| 2279 | inline bool Compiler::lvaIsParameter(unsigned varNum) |
| 2280 | { |
| 2281 | LclVarDsc* varDsc; |
| 2282 | |
| 2283 | assert(varNum < lvaCount); |
| 2284 | varDsc = lvaTable + varNum; |
| 2285 | |
| 2286 | return varDsc->lvIsParam; |
| 2287 | } |
| 2288 | |
| 2289 | inline bool Compiler::lvaIsRegArgument(unsigned varNum) |
| 2290 | { |
| 2291 | LclVarDsc* varDsc; |
| 2292 | |
| 2293 | assert(varNum < lvaCount); |
| 2294 | varDsc = lvaTable + varNum; |
| 2295 | |
| 2296 | return varDsc->lvIsRegArg; |
| 2297 | } |
| 2298 | |
| 2299 | inline BOOL Compiler::lvaIsOriginalThisArg(unsigned varNum) |
| 2300 | { |
| 2301 | assert(varNum < lvaCount); |
| 2302 | |
| 2303 | BOOL isOriginalThisArg = (varNum == info.compThisArg) && (info.compIsStatic == false); |
| 2304 | |
| 2305 | #ifdef DEBUG |
| 2306 | if (isOriginalThisArg) |
| 2307 | { |
| 2308 | LclVarDsc* varDsc = lvaTable + varNum; |
| 2309 | // Should never write to or take the address of the original 'this' arg |
| 2310 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 2311 | |
| 2312 | #ifndef JIT32_GCENCODER |
| 2313 | // With the general encoder/decoder, when the original 'this' arg is needed as a generics context param, we |
| 2314 | // copy to a new local, and mark the original as DoNotEnregister, to |
| 2315 | // ensure that it is stack-allocated. It should not be the case that the original one can be modified -- it |
| 2316 | // should not be written to, or address-exposed. |
| 2317 | assert(!varDsc->lvHasILStoreOp && |
| 2318 | (!varDsc->lvAddrExposed || ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0))); |
| 2319 | #else |
| 2320 | assert(!varDsc->lvHasILStoreOp && !varDsc->lvAddrExposed); |
| 2321 | #endif |
| 2322 | } |
| 2323 | #endif |
| 2324 | |
| 2325 | return isOriginalThisArg; |
| 2326 | } |
| 2327 | |
| 2328 | inline BOOL Compiler::lvaIsOriginalThisReadOnly() |
| 2329 | { |
| 2330 | return lvaArg0Var == info.compThisArg; |
| 2331 | } |
| 2332 | |
| 2333 | /***************************************************************************** |
| 2334 | * |
| 2335 | * The following is used to detect the cases where the same local variable# |
| 2336 | * is used both as a long/double value and a 32-bit value and/or both as an |
| 2337 | * integer/address and a float value. |
| 2338 | */ |
| 2339 | |
| 2340 | /* static */ inline unsigned Compiler::lvaTypeRefMask(var_types type) |
| 2341 | { |
| 2342 | const static BYTE lvaTypeRefMasks[] = { |
| 2343 | #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) howUsed, |
| 2344 | #include "typelist.h" |
| 2345 | #undef DEF_TP |
| 2346 | }; |
| 2347 | |
| 2348 | assert((unsigned)type < sizeof(lvaTypeRefMasks)); |
| 2349 | assert(lvaTypeRefMasks[type] != 0); |
| 2350 | |
| 2351 | return lvaTypeRefMasks[type]; |
| 2352 | } |
| 2353 | |
| 2354 | /***************************************************************************** |
| 2355 | * |
| 2356 | * The following is used to detect the cases where the same local variable# |
| 2357 | * is used both as a long/double value and a 32-bit value and/or both as an |
| 2358 | * integer/address and a float value. |
| 2359 | */ |
| 2360 | |
| 2361 | inline var_types Compiler::lvaGetActualType(unsigned lclNum) |
| 2362 | { |
| 2363 | return genActualType(lvaGetRealType(lclNum)); |
| 2364 | } |
| 2365 | |
| 2366 | inline var_types Compiler::lvaGetRealType(unsigned lclNum) |
| 2367 | { |
| 2368 | return lvaTable[lclNum].TypeGet(); |
| 2369 | } |
| 2370 | |
| 2371 | /* |
| 2372 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2373 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2374 | XX Importer XX |
| 2375 | XX Inline functions XX |
| 2376 | XX XX |
| 2377 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2378 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2379 | */ |
| 2380 | |
| 2381 | inline unsigned Compiler::compMapILargNum(unsigned ILargNum) |
| 2382 | { |
| 2383 | assert(ILargNum < info.compILargsCount || tiVerificationNeeded); |
| 2384 | |
| 2385 | // Note that this works because if compRetBuffArg/compTypeCtxtArg/lvVarargsHandleArg are not present |
| 2386 | // they will be BAD_VAR_NUM (MAX_UINT), which is larger than any variable number. |
| 2387 | if (ILargNum >= info.compRetBuffArg) |
| 2388 | { |
| 2389 | ILargNum++; |
| 2390 | assert(ILargNum < info.compLocalsCount || tiVerificationNeeded); // compLocals count already adjusted. |
| 2391 | } |
| 2392 | |
| 2393 | if (ILargNum >= (unsigned)info.compTypeCtxtArg) |
| 2394 | { |
| 2395 | ILargNum++; |
| 2396 | assert(ILargNum < info.compLocalsCount || tiVerificationNeeded); // compLocals count already adjusted. |
| 2397 | } |
| 2398 | |
| 2399 | if (ILargNum >= (unsigned)lvaVarargsHandleArg) |
| 2400 | { |
| 2401 | ILargNum++; |
| 2402 | assert(ILargNum < info.compLocalsCount || tiVerificationNeeded); // compLocals count already adjusted. |
| 2403 | } |
| 2404 | |
| 2405 | assert(ILargNum < info.compArgsCount || tiVerificationNeeded); |
| 2406 | return (ILargNum); |
| 2407 | } |
| 2408 | |
| 2409 | //------------------------------------------------------------------------ |
| 2410 | // Compiler::mangleVarArgsType: Retype float types to their corresponding |
| 2411 | // : int/long types. |
| 2412 | // |
| 2413 | // Notes: |
| 2414 | // |
| 2415 | // The mangling of types will only occur for incoming vararg fixed arguments |
| 2416 | // on windows arm|64 or on armel (softFP). |
| 2417 | // |
| 2418 | // NO-OP for all other cases. |
| 2419 | // |
| 2420 | inline var_types Compiler::mangleVarArgsType(var_types type) |
| 2421 | { |
| 2422 | #if defined(_TARGET_ARMARCH_) |
| 2423 | if (opts.compUseSoftFP |
| 2424 | #if defined(_TARGET_WINDOWS_) |
| 2425 | || info.compIsVarArgs |
| 2426 | #endif // defined(_TARGET_WINDOWS_) |
| 2427 | ) |
| 2428 | { |
| 2429 | switch (type) |
| 2430 | { |
| 2431 | case TYP_FLOAT: |
| 2432 | return TYP_INT; |
| 2433 | case TYP_DOUBLE: |
| 2434 | return TYP_LONG; |
| 2435 | default: |
| 2436 | break; |
| 2437 | } |
| 2438 | } |
| 2439 | #endif // defined(_TARGET_ARMARCH_) |
| 2440 | return type; |
| 2441 | } |
| 2442 | |
| 2443 | // For CORECLR there is no vararg on System V systems. |
| 2444 | #if FEATURE_VARARG |
| 2445 | inline regNumber Compiler::getCallArgIntRegister(regNumber floatReg) |
| 2446 | { |
| 2447 | #ifdef _TARGET_AMD64_ |
| 2448 | switch (floatReg) |
| 2449 | { |
| 2450 | case REG_XMM0: |
| 2451 | return REG_RCX; |
| 2452 | case REG_XMM1: |
| 2453 | return REG_RDX; |
| 2454 | case REG_XMM2: |
| 2455 | return REG_R8; |
| 2456 | case REG_XMM3: |
| 2457 | return REG_R9; |
| 2458 | default: |
| 2459 | unreached(); |
| 2460 | } |
| 2461 | #else // !_TARGET_AMD64_ |
| 2462 | // How will float args be passed for RyuJIT/x86? |
| 2463 | NYI("getCallArgIntRegister for RyuJIT/x86" ); |
| 2464 | return REG_NA; |
| 2465 | #endif // !_TARGET_AMD64_ |
| 2466 | } |
| 2467 | |
| 2468 | inline regNumber Compiler::getCallArgFloatRegister(regNumber intReg) |
| 2469 | { |
| 2470 | #ifdef _TARGET_AMD64_ |
| 2471 | switch (intReg) |
| 2472 | { |
| 2473 | case REG_RCX: |
| 2474 | return REG_XMM0; |
| 2475 | case REG_RDX: |
| 2476 | return REG_XMM1; |
| 2477 | case REG_R8: |
| 2478 | return REG_XMM2; |
| 2479 | case REG_R9: |
| 2480 | return REG_XMM3; |
| 2481 | default: |
| 2482 | unreached(); |
| 2483 | } |
| 2484 | #else // !_TARGET_AMD64_ |
| 2485 | // How will float args be passed for RyuJIT/x86? |
| 2486 | NYI("getCallArgFloatRegister for RyuJIT/x86" ); |
| 2487 | return REG_NA; |
| 2488 | #endif // !_TARGET_AMD64_ |
| 2489 | } |
| 2490 | #endif // FEATURE_VARARG |
| 2491 | |
| 2492 | /* |
| 2493 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2494 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2495 | XX Register Allocator XX |
| 2496 | XX Inline functions XX |
| 2497 | XX XX |
| 2498 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2499 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2500 | */ |
| 2501 | |
| 2502 | /*****************************************************************************/ |
| 2503 | |
| 2504 | inline bool rpCanAsgOperWithoutReg(GenTree* op, bool lclvar) |
| 2505 | { |
| 2506 | var_types type; |
| 2507 | |
| 2508 | switch (op->OperGet()) |
| 2509 | { |
| 2510 | case GT_CNS_LNG: |
| 2511 | case GT_CNS_INT: |
| 2512 | return true; |
| 2513 | case GT_LCL_VAR: |
| 2514 | type = genActualType(op->TypeGet()); |
| 2515 | if (lclvar && ((type == TYP_INT) || (type == TYP_REF) || (type == TYP_BYREF))) |
| 2516 | { |
| 2517 | return true; |
| 2518 | } |
| 2519 | break; |
| 2520 | default: |
| 2521 | break; |
| 2522 | } |
| 2523 | |
| 2524 | return false; |
| 2525 | } |
| 2526 | |
| 2527 | /* |
| 2528 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2529 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2530 | XX XX |
| 2531 | XX FlowGraph XX |
| 2532 | XX Inline functions XX |
| 2533 | XX XX |
| 2534 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2535 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2536 | */ |
| 2537 | |
| 2538 | inline bool Compiler::compCanEncodePtrArgCntMax() |
| 2539 | { |
| 2540 | #ifdef JIT32_GCENCODER |
| 2541 | // DDB 204533: |
| 2542 | // The GC encoding for fully interruptible methods does not |
| 2543 | // support more than 1023 pushed arguments, so we have to |
| 2544 | // use a partially interruptible GC info/encoding. |
| 2545 | // |
| 2546 | return (fgPtrArgCntMax < MAX_PTRARG_OFS); |
| 2547 | #else // JIT32_GCENCODER |
| 2548 | return true; |
| 2549 | #endif |
| 2550 | } |
| 2551 | |
| 2552 | /***************************************************************************** |
| 2553 | * |
| 2554 | * Call the given function pointer for all nodes in the tree. The 'visitor' |
| 2555 | * fn should return one of the following values: |
| 2556 | * |
| 2557 | * WALK_ABORT stop walking and return immediately |
| 2558 | * WALK_CONTINUE continue walking |
| 2559 | * WALK_SKIP_SUBTREES don't walk any subtrees of the node just visited |
| 2560 | * |
| 2561 | * computeStack - true if we want to make stack visible to callback function |
| 2562 | */ |
| 2563 | |
| 2564 | inline Compiler::fgWalkResult Compiler::fgWalkTreePre( |
| 2565 | GenTree** pTree, fgWalkPreFn* visitor, void* callBackData, bool lclVarsOnly, bool computeStack) |
| 2566 | |
| 2567 | { |
| 2568 | fgWalkData walkData; |
| 2569 | |
| 2570 | walkData.compiler = this; |
| 2571 | walkData.wtprVisitorFn = visitor; |
| 2572 | walkData.pCallbackData = callBackData; |
| 2573 | walkData.parent = nullptr; |
| 2574 | walkData.wtprLclsOnly = lclVarsOnly; |
| 2575 | #ifdef DEBUG |
| 2576 | walkData.printModified = false; |
| 2577 | #endif |
| 2578 | |
| 2579 | fgWalkResult result; |
| 2580 | if (lclVarsOnly && computeStack) |
| 2581 | { |
| 2582 | GenericTreeWalker<true, true, false, true, true> walker(&walkData); |
| 2583 | result = walker.WalkTree(pTree, nullptr); |
| 2584 | } |
| 2585 | else if (lclVarsOnly) |
| 2586 | { |
| 2587 | GenericTreeWalker<false, true, false, true, true> walker(&walkData); |
| 2588 | result = walker.WalkTree(pTree, nullptr); |
| 2589 | } |
| 2590 | else if (computeStack) |
| 2591 | { |
| 2592 | GenericTreeWalker<true, true, false, false, true> walker(&walkData); |
| 2593 | result = walker.WalkTree(pTree, nullptr); |
| 2594 | } |
| 2595 | else |
| 2596 | { |
| 2597 | GenericTreeWalker<false, true, false, false, true> walker(&walkData); |
| 2598 | result = walker.WalkTree(pTree, nullptr); |
| 2599 | } |
| 2600 | |
| 2601 | #ifdef DEBUG |
| 2602 | if (verbose && walkData.printModified) |
| 2603 | { |
| 2604 | gtDispTree(*pTree); |
| 2605 | } |
| 2606 | #endif |
| 2607 | |
| 2608 | return result; |
| 2609 | } |
| 2610 | |
| 2611 | /***************************************************************************** |
| 2612 | * |
| 2613 | * Same as above, except the tree walk is performed in a depth-first fashion, |
| 2614 | * The 'visitor' fn should return one of the following values: |
| 2615 | * |
| 2616 | * WALK_ABORT stop walking and return immediately |
| 2617 | * WALK_CONTINUE continue walking |
| 2618 | * |
| 2619 | * computeStack - true if we want to make stack visible to callback function |
| 2620 | */ |
| 2621 | |
| 2622 | inline Compiler::fgWalkResult Compiler::fgWalkTreePost(GenTree** pTree, |
| 2623 | fgWalkPostFn* visitor, |
| 2624 | void* callBackData, |
| 2625 | bool computeStack) |
| 2626 | { |
| 2627 | fgWalkData walkData; |
| 2628 | |
| 2629 | walkData.compiler = this; |
| 2630 | walkData.wtpoVisitorFn = visitor; |
| 2631 | walkData.pCallbackData = callBackData; |
| 2632 | walkData.parent = nullptr; |
| 2633 | |
| 2634 | fgWalkResult result; |
| 2635 | if (computeStack) |
| 2636 | { |
| 2637 | GenericTreeWalker<true, false, true, false, true> walker(&walkData); |
| 2638 | result = walker.WalkTree(pTree, nullptr); |
| 2639 | } |
| 2640 | else |
| 2641 | { |
| 2642 | GenericTreeWalker<false, false, true, false, true> walker(&walkData); |
| 2643 | result = walker.WalkTree(pTree, nullptr); |
| 2644 | } |
| 2645 | |
| 2646 | assert(result == WALK_CONTINUE || result == WALK_ABORT); |
| 2647 | |
| 2648 | return result; |
| 2649 | } |
| 2650 | |
| 2651 | /***************************************************************************** |
| 2652 | * |
| 2653 | * Call the given function pointer for all nodes in the tree. The 'visitor' |
| 2654 | * fn should return one of the following values: |
| 2655 | * |
| 2656 | * WALK_ABORT stop walking and return immediately |
| 2657 | * WALK_CONTINUE continue walking |
| 2658 | * WALK_SKIP_SUBTREES don't walk any subtrees of the node just visited |
| 2659 | */ |
| 2660 | |
| 2661 | inline Compiler::fgWalkResult Compiler::fgWalkTree(GenTree** pTree, |
| 2662 | fgWalkPreFn* preVisitor, |
| 2663 | fgWalkPreFn* postVisitor, |
| 2664 | void* callBackData) |
| 2665 | |
| 2666 | { |
| 2667 | fgWalkData walkData; |
| 2668 | |
| 2669 | walkData.compiler = this; |
| 2670 | walkData.wtprVisitorFn = preVisitor; |
| 2671 | walkData.wtpoVisitorFn = postVisitor; |
| 2672 | walkData.pCallbackData = callBackData; |
| 2673 | walkData.parent = nullptr; |
| 2674 | walkData.wtprLclsOnly = false; |
| 2675 | #ifdef DEBUG |
| 2676 | walkData.printModified = false; |
| 2677 | #endif |
| 2678 | |
| 2679 | fgWalkResult result; |
| 2680 | |
| 2681 | assert(preVisitor || postVisitor); |
| 2682 | |
| 2683 | if (preVisitor && postVisitor) |
| 2684 | { |
| 2685 | GenericTreeWalker<true, true, true, false, true> walker(&walkData); |
| 2686 | result = walker.WalkTree(pTree, nullptr); |
| 2687 | } |
| 2688 | else if (preVisitor) |
| 2689 | { |
| 2690 | GenericTreeWalker<true, true, false, false, true> walker(&walkData); |
| 2691 | result = walker.WalkTree(pTree, nullptr); |
| 2692 | } |
| 2693 | else |
| 2694 | { |
| 2695 | GenericTreeWalker<true, false, true, false, true> walker(&walkData); |
| 2696 | result = walker.WalkTree(pTree, nullptr); |
| 2697 | } |
| 2698 | |
| 2699 | #ifdef DEBUG |
| 2700 | if (verbose && walkData.printModified) |
| 2701 | { |
| 2702 | gtDispTree(*pTree); |
| 2703 | } |
| 2704 | #endif |
| 2705 | |
| 2706 | return result; |
| 2707 | } |
| 2708 | |
| 2709 | /***************************************************************************** |
| 2710 | * |
| 2711 | * Has this block been added to throw an inlined exception |
| 2712 | * Returns true if the block was added to throw one of: |
| 2713 | * range-check exception |
| 2714 | * argument exception (used by feature SIMD) |
| 2715 | * argument range-check exception (used by feature SIMD) |
| 2716 | * divide by zero exception (Not used on X86/X64) |
| 2717 | * null reference exception (Not currently used) |
| 2718 | * overflow exception |
| 2719 | */ |
| 2720 | |
| 2721 | inline bool Compiler::fgIsThrowHlpBlk(BasicBlock* block) |
| 2722 | { |
| 2723 | if (!fgIsCodeAdded()) |
| 2724 | { |
| 2725 | return false; |
| 2726 | } |
| 2727 | |
| 2728 | if (!(block->bbFlags & BBF_INTERNAL) || block->bbJumpKind != BBJ_THROW) |
| 2729 | { |
| 2730 | return false; |
| 2731 | } |
| 2732 | |
| 2733 | GenTree* call = block->lastNode(); |
| 2734 | |
| 2735 | #ifdef DEBUG |
| 2736 | if (block->IsLIR()) |
| 2737 | { |
| 2738 | LIR::Range& blockRange = LIR::AsRange(block); |
| 2739 | for (LIR::Range::ReverseIterator node = blockRange.rbegin(), end = blockRange.rend(); node != end; ++node) |
| 2740 | { |
| 2741 | if (node->OperGet() == GT_CALL) |
| 2742 | { |
| 2743 | assert(*node == call); |
| 2744 | assert(node == blockRange.rbegin()); |
| 2745 | break; |
| 2746 | } |
| 2747 | } |
| 2748 | } |
| 2749 | #endif |
| 2750 | |
| 2751 | if (!call || (call->gtOper != GT_CALL)) |
| 2752 | { |
| 2753 | return false; |
| 2754 | } |
| 2755 | |
| 2756 | if (!((call->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_RNGCHKFAIL)) || |
| 2757 | (call->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWDIVZERO)) || |
| 2758 | (call->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWNULLREF)) || |
| 2759 | (call->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_OVERFLOW)))) |
| 2760 | { |
| 2761 | return false; |
| 2762 | } |
| 2763 | |
| 2764 | // We can get to this point for blocks that we didn't create as throw helper blocks |
| 2765 | // under stress, with crazy flow graph optimizations. So, walk the fgAddCodeList |
| 2766 | // for the final determination. |
| 2767 | |
| 2768 | for (AddCodeDsc* add = fgAddCodeList; add; add = add->acdNext) |
| 2769 | { |
| 2770 | if (block == add->acdDstBlk) |
| 2771 | { |
| 2772 | return add->acdKind == SCK_RNGCHK_FAIL || add->acdKind == SCK_DIV_BY_ZERO || add->acdKind == SCK_OVERFLOW || |
| 2773 | add->acdKind == SCK_ARG_EXCPN || add->acdKind == SCK_ARG_RNG_EXCPN; |
| 2774 | } |
| 2775 | } |
| 2776 | |
| 2777 | // We couldn't find it in the fgAddCodeList |
| 2778 | return false; |
| 2779 | } |
| 2780 | |
| 2781 | #if !FEATURE_FIXED_OUT_ARGS |
| 2782 | |
| 2783 | /***************************************************************************** |
| 2784 | * |
| 2785 | * Return the stackLevel of the inserted block that throws exception |
| 2786 | * (by calling the EE helper). |
| 2787 | */ |
| 2788 | |
| 2789 | inline unsigned Compiler::fgThrowHlpBlkStkLevel(BasicBlock* block) |
| 2790 | { |
| 2791 | for (AddCodeDsc* add = fgAddCodeList; add; add = add->acdNext) |
| 2792 | { |
| 2793 | if (block == add->acdDstBlk) |
| 2794 | { |
| 2795 | // Compute assert cond separately as assert macro cannot have conditional compilation directives. |
| 2796 | bool cond = |
| 2797 | (add->acdKind == SCK_RNGCHK_FAIL || add->acdKind == SCK_DIV_BY_ZERO || add->acdKind == SCK_OVERFLOW || |
| 2798 | add->acdKind == SCK_ARG_EXCPN || add->acdKind == SCK_ARG_RNG_EXCPN); |
| 2799 | assert(cond); |
| 2800 | |
| 2801 | // TODO: bbTgtStkDepth is DEBUG-only. |
| 2802 | // Should we use it regularly and avoid this search. |
| 2803 | assert(block->bbTgtStkDepth == add->acdStkLvl); |
| 2804 | return add->acdStkLvl; |
| 2805 | } |
| 2806 | } |
| 2807 | |
| 2808 | noway_assert(!"fgThrowHlpBlkStkLevel should only be called if fgIsThrowHlpBlk() is true, but we can't find the " |
| 2809 | "block in the fgAddCodeList list" ); |
| 2810 | |
| 2811 | /* We couldn't find the basic block: it must not have been a throw helper block */ |
| 2812 | |
| 2813 | return 0; |
| 2814 | } |
| 2815 | |
| 2816 | #endif // !FEATURE_FIXED_OUT_ARGS |
| 2817 | |
| 2818 | /* |
| 2819 | Small inline function to change a given block to a throw block. |
| 2820 | |
| 2821 | */ |
| 2822 | inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) |
| 2823 | { |
| 2824 | // If we're converting a BBJ_CALLFINALLY block to a BBJ_THROW block, |
| 2825 | // then mark the subsequent BBJ_ALWAYS block as unreferenced. |
| 2826 | if (block->isBBCallAlwaysPair()) |
| 2827 | { |
| 2828 | BasicBlock* leaveBlk = block->bbNext; |
| 2829 | noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); |
| 2830 | |
| 2831 | leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; |
| 2832 | leaveBlk->bbRefs = 0; |
| 2833 | leaveBlk->bbPreds = nullptr; |
| 2834 | |
| 2835 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 2836 | // This function (fgConvertBBToThrowBB) can be called before the predecessor lists are created (e.g., in |
| 2837 | // fgMorph). The fgClearFinallyTargetBit() function to update the BBF_FINALLY_TARGET bit depends on these |
| 2838 | // predecessor lists. If there are no predecessor lists, we immediately clear all BBF_FINALLY_TARGET bits |
| 2839 | // (to allow subsequent dead code elimination to delete such blocks without asserts), and set a flag to |
| 2840 | // recompute them later, before they are required. |
| 2841 | if (fgComputePredsDone) |
| 2842 | { |
| 2843 | fgClearFinallyTargetBit(leaveBlk->bbJumpDest); |
| 2844 | } |
| 2845 | else |
| 2846 | { |
| 2847 | fgClearAllFinallyTargetBits(); |
| 2848 | fgNeedToAddFinallyTargetBits = true; |
| 2849 | } |
| 2850 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 2851 | } |
| 2852 | |
| 2853 | block->bbJumpKind = BBJ_THROW; |
| 2854 | block->bbSetRunRarely(); // any block with a throw is rare |
| 2855 | } |
| 2856 | |
| 2857 | /***************************************************************************** |
| 2858 | * |
| 2859 | * Return true if we've added any new basic blocks. |
| 2860 | */ |
| 2861 | |
| 2862 | inline bool Compiler::fgIsCodeAdded() |
| 2863 | { |
| 2864 | return fgAddCodeModf; |
| 2865 | } |
| 2866 | |
| 2867 | /***************************************************************************** |
| 2868 | Is the offset too big? |
| 2869 | */ |
| 2870 | inline bool Compiler::fgIsBigOffset(size_t offset) |
| 2871 | { |
| 2872 | return (offset > compMaxUncheckedOffsetForNullObject); |
| 2873 | } |
| 2874 | |
| 2875 | /* |
| 2876 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2877 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2878 | XX TempsInfo XX |
| 2879 | XX Inline functions XX |
| 2880 | XX XX |
| 2881 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2882 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 2883 | */ |
| 2884 | |
| 2885 | /*****************************************************************************/ |
| 2886 | |
| 2887 | /* static */ inline unsigned RegSet::tmpSlot(unsigned size) |
| 2888 | { |
| 2889 | noway_assert(size >= sizeof(int)); |
| 2890 | noway_assert(size <= TEMP_MAX_SIZE); |
| 2891 | assert((size % sizeof(int)) == 0); |
| 2892 | |
| 2893 | assert(size < UINT32_MAX); |
| 2894 | return size / sizeof(int) - 1; |
| 2895 | } |
| 2896 | |
| 2897 | /***************************************************************************** |
| 2898 | * |
| 2899 | * Finish allocating temps - should be called each time after a pass is made |
| 2900 | * over a function body. |
| 2901 | */ |
| 2902 | |
| 2903 | inline void RegSet::tmpEnd() |
| 2904 | { |
| 2905 | #ifdef DEBUG |
| 2906 | if (m_rsCompiler->verbose && (tmpCount > 0)) |
| 2907 | { |
| 2908 | printf("%d tmps used\n" , tmpCount); |
| 2909 | } |
| 2910 | #endif // DEBUG |
| 2911 | } |
| 2912 | |
| 2913 | /***************************************************************************** |
| 2914 | * |
| 2915 | * Shuts down the temp-tracking code. Should be called once per function |
| 2916 | * compiled. |
| 2917 | */ |
| 2918 | |
| 2919 | inline void RegSet::tmpDone() |
| 2920 | { |
| 2921 | #ifdef DEBUG |
| 2922 | unsigned count; |
| 2923 | TempDsc* temp; |
| 2924 | |
| 2925 | assert(tmpAllFree()); |
| 2926 | for (temp = tmpListBeg(), count = temp ? 1 : 0; temp; temp = tmpListNxt(temp), count += temp ? 1 : 0) |
| 2927 | { |
| 2928 | assert(temp->tdLegalOffset()); |
| 2929 | } |
| 2930 | |
| 2931 | // Make sure that all the temps were released |
| 2932 | assert(count == tmpCount); |
| 2933 | assert(tmpGetCount == 0); |
| 2934 | #endif // DEBUG |
| 2935 | } |
| 2936 | |
| 2937 | #ifdef DEBUG |
| 2938 | inline bool Compiler::shouldUseVerboseTrees() |
| 2939 | { |
| 2940 | return (JitConfig.JitDumpVerboseTrees() == 1); |
| 2941 | } |
| 2942 | |
| 2943 | inline bool Compiler::shouldUseVerboseSsa() |
| 2944 | { |
| 2945 | return (JitConfig.JitDumpVerboseSsa() == 1); |
| 2946 | } |
| 2947 | |
| 2948 | //------------------------------------------------------------------------ |
| 2949 | // shouldDumpASCIITrees: Should we use only ASCII characters for tree dumps? |
| 2950 | // |
| 2951 | // Notes: |
| 2952 | // This is set to default to 1 in clrConfigValues.h |
| 2953 | |
| 2954 | inline bool Compiler::shouldDumpASCIITrees() |
| 2955 | { |
| 2956 | return (JitConfig.JitDumpASCII() == 1); |
| 2957 | } |
| 2958 | |
| 2959 | /***************************************************************************** |
| 2960 | * Should we enable JitStress mode? |
| 2961 | * 0: No stress |
| 2962 | * !=2: Vary stress. Performance will be slightly/moderately degraded |
| 2963 | * 2: Check-all stress. Performance will be REALLY horrible |
| 2964 | */ |
| 2965 | |
| 2966 | inline DWORD getJitStressLevel() |
| 2967 | { |
| 2968 | return JitConfig.JitStress(); |
| 2969 | } |
| 2970 | |
| 2971 | /***************************************************************************** |
| 2972 | * Should we do the strict check for non-virtual call to the virtual method? |
| 2973 | */ |
| 2974 | |
| 2975 | inline DWORD StrictCheckForNonVirtualCallToVirtualMethod() |
| 2976 | { |
| 2977 | return JitConfig.JitStrictCheckForNonVirtualCallToVirtualMethod() == 1; |
| 2978 | } |
| 2979 | |
| 2980 | #endif // DEBUG |
| 2981 | |
| 2982 | /*****************************************************************************/ |
| 2983 | /* Map a register argument number ("RegArgNum") to a register number ("RegNum"). |
| 2984 | * A RegArgNum is in this range: |
| 2985 | * [0, MAX_REG_ARG) -- for integer registers |
| 2986 | * [0, MAX_FLOAT_REG_ARG) -- for floating point registers |
| 2987 | * Note that RegArgNum's are overlapping for integer and floating-point registers, |
| 2988 | * while RegNum's are not (for ARM anyway, though for x86, it might be different). |
| 2989 | * If we have a fixed return buffer register and are given it's index |
| 2990 | * we return the fixed return buffer register |
| 2991 | */ |
| 2992 | |
| 2993 | inline regNumber genMapIntRegArgNumToRegNum(unsigned argNum) |
| 2994 | { |
| 2995 | if (hasFixedRetBuffReg() && (argNum == theFixedRetBuffArgNum())) |
| 2996 | { |
| 2997 | return theFixedRetBuffReg(); |
| 2998 | } |
| 2999 | |
| 3000 | assert(argNum < ArrLen(intArgRegs)); |
| 3001 | |
| 3002 | return intArgRegs[argNum]; |
| 3003 | } |
| 3004 | |
| 3005 | inline regNumber genMapFloatRegArgNumToRegNum(unsigned argNum) |
| 3006 | { |
| 3007 | #ifndef _TARGET_X86_ |
| 3008 | assert(argNum < ArrLen(fltArgRegs)); |
| 3009 | |
| 3010 | return fltArgRegs[argNum]; |
| 3011 | #else |
| 3012 | assert(!"no x86 float arg regs\n" ); |
| 3013 | return REG_NA; |
| 3014 | #endif |
| 3015 | } |
| 3016 | |
| 3017 | __forceinline regNumber genMapRegArgNumToRegNum(unsigned argNum, var_types type) |
| 3018 | { |
| 3019 | if (varTypeIsFloating(type)) |
| 3020 | { |
| 3021 | return genMapFloatRegArgNumToRegNum(argNum); |
| 3022 | } |
| 3023 | else |
| 3024 | { |
| 3025 | return genMapIntRegArgNumToRegNum(argNum); |
| 3026 | } |
| 3027 | } |
| 3028 | |
| 3029 | /*****************************************************************************/ |
| 3030 | /* Map a register argument number ("RegArgNum") to a register mask of the associated register. |
| 3031 | * Note that for floating-pointer registers, only the low register for a register pair |
| 3032 | * (for a double on ARM) is returned. |
| 3033 | */ |
| 3034 | |
| 3035 | inline regMaskTP genMapIntRegArgNumToRegMask(unsigned argNum) |
| 3036 | { |
| 3037 | assert(argNum < ArrLen(intArgMasks)); |
| 3038 | |
| 3039 | return intArgMasks[argNum]; |
| 3040 | } |
| 3041 | |
| 3042 | inline regMaskTP genMapFloatRegArgNumToRegMask(unsigned argNum) |
| 3043 | { |
| 3044 | #ifndef _TARGET_X86_ |
| 3045 | assert(argNum < ArrLen(fltArgMasks)); |
| 3046 | |
| 3047 | return fltArgMasks[argNum]; |
| 3048 | #else |
| 3049 | assert(!"no x86 float arg regs\n" ); |
| 3050 | return RBM_NONE; |
| 3051 | #endif |
| 3052 | } |
| 3053 | |
| 3054 | __forceinline regMaskTP genMapArgNumToRegMask(unsigned argNum, var_types type) |
| 3055 | { |
| 3056 | regMaskTP result; |
| 3057 | if (varTypeIsFloating(type)) |
| 3058 | { |
| 3059 | result = genMapFloatRegArgNumToRegMask(argNum); |
| 3060 | #ifdef _TARGET_ARM_ |
| 3061 | if (type == TYP_DOUBLE) |
| 3062 | { |
| 3063 | assert((result & RBM_DBL_REGS) != 0); |
| 3064 | result |= (result << 1); |
| 3065 | } |
| 3066 | #endif |
| 3067 | } |
| 3068 | else |
| 3069 | { |
| 3070 | result = genMapIntRegArgNumToRegMask(argNum); |
| 3071 | } |
| 3072 | return result; |
| 3073 | } |
| 3074 | |
| 3075 | /*****************************************************************************/ |
| 3076 | /* Map a register number ("RegNum") to a register argument number ("RegArgNum") |
| 3077 | * If we have a fixed return buffer register we return theFixedRetBuffArgNum |
| 3078 | */ |
| 3079 | |
| 3080 | inline unsigned genMapIntRegNumToRegArgNum(regNumber regNum) |
| 3081 | { |
| 3082 | assert(genRegMask(regNum) & fullIntArgRegMask()); |
| 3083 | |
| 3084 | switch (regNum) |
| 3085 | { |
| 3086 | case REG_ARG_0: |
| 3087 | return 0; |
| 3088 | #if MAX_REG_ARG >= 2 |
| 3089 | case REG_ARG_1: |
| 3090 | return 1; |
| 3091 | #if MAX_REG_ARG >= 3 |
| 3092 | case REG_ARG_2: |
| 3093 | return 2; |
| 3094 | #if MAX_REG_ARG >= 4 |
| 3095 | case REG_ARG_3: |
| 3096 | return 3; |
| 3097 | #if MAX_REG_ARG >= 5 |
| 3098 | case REG_ARG_4: |
| 3099 | return 4; |
| 3100 | #if MAX_REG_ARG >= 6 |
| 3101 | case REG_ARG_5: |
| 3102 | return 5; |
| 3103 | #if MAX_REG_ARG >= 7 |
| 3104 | case REG_ARG_6: |
| 3105 | return 6; |
| 3106 | #if MAX_REG_ARG >= 8 |
| 3107 | case REG_ARG_7: |
| 3108 | return 7; |
| 3109 | #endif |
| 3110 | #endif |
| 3111 | #endif |
| 3112 | #endif |
| 3113 | #endif |
| 3114 | #endif |
| 3115 | #endif |
| 3116 | default: |
| 3117 | // Check for the Arm64 fixed return buffer argument register |
| 3118 | if (hasFixedRetBuffReg() && (regNum == theFixedRetBuffReg())) |
| 3119 | { |
| 3120 | return theFixedRetBuffArgNum(); |
| 3121 | } |
| 3122 | else |
| 3123 | { |
| 3124 | assert(!"invalid register arg register" ); |
| 3125 | return BAD_VAR_NUM; |
| 3126 | } |
| 3127 | } |
| 3128 | } |
| 3129 | |
| 3130 | inline unsigned genMapFloatRegNumToRegArgNum(regNumber regNum) |
| 3131 | { |
| 3132 | assert(genRegMask(regNum) & RBM_FLTARG_REGS); |
| 3133 | |
| 3134 | #ifdef _TARGET_ARM_ |
| 3135 | return regNum - REG_F0; |
| 3136 | #elif defined(_TARGET_ARM64_) |
| 3137 | return regNum - REG_V0; |
| 3138 | #elif defined(UNIX_AMD64_ABI) |
| 3139 | return regNum - REG_FLTARG_0; |
| 3140 | #else |
| 3141 | |
| 3142 | #if MAX_FLOAT_REG_ARG >= 1 |
| 3143 | switch (regNum) |
| 3144 | { |
| 3145 | case REG_FLTARG_0: |
| 3146 | return 0; |
| 3147 | #if MAX_REG_ARG >= 2 |
| 3148 | case REG_FLTARG_1: |
| 3149 | return 1; |
| 3150 | #if MAX_REG_ARG >= 3 |
| 3151 | case REG_FLTARG_2: |
| 3152 | return 2; |
| 3153 | #if MAX_REG_ARG >= 4 |
| 3154 | case REG_FLTARG_3: |
| 3155 | return 3; |
| 3156 | #if MAX_REG_ARG >= 5 |
| 3157 | case REG_FLTARG_4: |
| 3158 | return 4; |
| 3159 | #endif |
| 3160 | #endif |
| 3161 | #endif |
| 3162 | #endif |
| 3163 | default: |
| 3164 | assert(!"invalid register arg register" ); |
| 3165 | return BAD_VAR_NUM; |
| 3166 | } |
| 3167 | #else |
| 3168 | assert(!"flt reg args not allowed" ); |
| 3169 | return BAD_VAR_NUM; |
| 3170 | #endif |
| 3171 | #endif // !arm |
| 3172 | } |
| 3173 | |
| 3174 | inline unsigned genMapRegNumToRegArgNum(regNumber regNum, var_types type) |
| 3175 | { |
| 3176 | if (varTypeIsFloating(type)) |
| 3177 | { |
| 3178 | return genMapFloatRegNumToRegArgNum(regNum); |
| 3179 | } |
| 3180 | else |
| 3181 | { |
| 3182 | return genMapIntRegNumToRegArgNum(regNum); |
| 3183 | } |
| 3184 | } |
| 3185 | |
| 3186 | /*****************************************************************************/ |
| 3187 | /* Return a register mask with the first 'numRegs' argument registers set. |
| 3188 | */ |
| 3189 | |
| 3190 | inline regMaskTP genIntAllRegArgMask(unsigned numRegs) |
| 3191 | { |
| 3192 | assert(numRegs <= MAX_REG_ARG); |
| 3193 | |
| 3194 | regMaskTP result = RBM_NONE; |
| 3195 | for (unsigned i = 0; i < numRegs; i++) |
| 3196 | { |
| 3197 | result |= intArgMasks[i]; |
| 3198 | } |
| 3199 | return result; |
| 3200 | } |
| 3201 | |
| 3202 | inline regMaskTP genFltAllRegArgMask(unsigned numRegs) |
| 3203 | { |
| 3204 | assert(numRegs <= MAX_FLOAT_REG_ARG); |
| 3205 | |
| 3206 | regMaskTP result = RBM_NONE; |
| 3207 | for (unsigned i = 0; i < numRegs; i++) |
| 3208 | { |
| 3209 | result |= fltArgMasks[i]; |
| 3210 | } |
| 3211 | return result; |
| 3212 | } |
| 3213 | |
| 3214 | /* |
| 3215 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3216 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3217 | XX Liveness XX |
| 3218 | XX Inline functions XX |
| 3219 | XX XX |
| 3220 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3221 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3222 | */ |
| 3223 | |
| 3224 | template <bool ForCodeGen> |
| 3225 | inline void Compiler::compUpdateLife(VARSET_VALARG_TP newLife) |
| 3226 | { |
| 3227 | if (!VarSetOps::Equal(this, compCurLife, newLife)) |
| 3228 | { |
| 3229 | compChangeLife<ForCodeGen>(newLife); |
| 3230 | } |
| 3231 | #ifdef DEBUG |
| 3232 | else |
| 3233 | { |
| 3234 | if (verbose) |
| 3235 | { |
| 3236 | printf("Liveness not changing: %s " , VarSetOps::ToString(this, compCurLife)); |
| 3237 | dumpConvertedVarSet(this, compCurLife); |
| 3238 | printf("\n" ); |
| 3239 | } |
| 3240 | } |
| 3241 | #endif // DEBUG |
| 3242 | } |
| 3243 | |
| 3244 | /***************************************************************************** |
| 3245 | * |
| 3246 | * We stash cookies in basic blocks for the code emitter; this call retrieves |
| 3247 | * the cookie associated with the given basic block. |
| 3248 | */ |
| 3249 | |
| 3250 | inline void* emitCodeGetCookie(BasicBlock* block) |
| 3251 | { |
| 3252 | assert(block); |
| 3253 | return block->bbEmitCookie; |
| 3254 | } |
| 3255 | |
| 3256 | /* |
| 3257 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3258 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3259 | XX Optimizer XX |
| 3260 | XX Inline functions XX |
| 3261 | XX XX |
| 3262 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3263 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3264 | */ |
| 3265 | |
| 3266 | #if LOCAL_ASSERTION_PROP |
| 3267 | |
| 3268 | /***************************************************************************** |
| 3269 | * |
| 3270 | * The following resets the value assignment table |
| 3271 | * used only during local assertion prop |
| 3272 | */ |
| 3273 | |
| 3274 | inline void Compiler::optAssertionReset(AssertionIndex limit) |
| 3275 | { |
| 3276 | PREFAST_ASSUME(optAssertionCount <= optMaxAssertionCount); |
| 3277 | |
| 3278 | while (optAssertionCount > limit) |
| 3279 | { |
| 3280 | AssertionIndex index = optAssertionCount; |
| 3281 | AssertionDsc* curAssertion = optGetAssertion(index); |
| 3282 | optAssertionCount--; |
| 3283 | unsigned lclNum = curAssertion->op1.lcl.lclNum; |
| 3284 | assert(lclNum < lvaTableCnt); |
| 3285 | BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); |
| 3286 | |
| 3287 | // |
| 3288 | // Find the Copy assertions |
| 3289 | // |
| 3290 | if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && |
| 3291 | (curAssertion->op2.kind == O2K_LCLVAR_COPY)) |
| 3292 | { |
| 3293 | // |
| 3294 | // op2.lcl.lclNum no longer depends upon this assertion |
| 3295 | // |
| 3296 | lclNum = curAssertion->op2.lcl.lclNum; |
| 3297 | BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); |
| 3298 | } |
| 3299 | } |
| 3300 | while (optAssertionCount < limit) |
| 3301 | { |
| 3302 | AssertionIndex index = ++optAssertionCount; |
| 3303 | AssertionDsc* curAssertion = optGetAssertion(index); |
| 3304 | unsigned lclNum = curAssertion->op1.lcl.lclNum; |
| 3305 | BitVecOps::AddElemD(apTraits, GetAssertionDep(lclNum), index - 1); |
| 3306 | |
| 3307 | // |
| 3308 | // Check for Copy assertions |
| 3309 | // |
| 3310 | if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && |
| 3311 | (curAssertion->op2.kind == O2K_LCLVAR_COPY)) |
| 3312 | { |
| 3313 | // |
| 3314 | // op2.lcl.lclNum now depends upon this assertion |
| 3315 | // |
| 3316 | lclNum = curAssertion->op2.lcl.lclNum; |
| 3317 | BitVecOps::AddElemD(apTraits, GetAssertionDep(lclNum), index - 1); |
| 3318 | } |
| 3319 | } |
| 3320 | } |
| 3321 | |
| 3322 | /***************************************************************************** |
| 3323 | * |
| 3324 | * The following removes the i-th entry in the value assignment table |
| 3325 | * used only during local assertion prop |
| 3326 | */ |
| 3327 | |
| 3328 | inline void Compiler::optAssertionRemove(AssertionIndex index) |
| 3329 | { |
| 3330 | assert(index > 0); |
| 3331 | assert(index <= optAssertionCount); |
| 3332 | PREFAST_ASSUME(optAssertionCount <= optMaxAssertionCount); |
| 3333 | |
| 3334 | AssertionDsc* curAssertion = optGetAssertion(index); |
| 3335 | |
| 3336 | // Two cases to consider if (index == optAssertionCount) then the last |
| 3337 | // entry in the table is to be removed and that happens automatically when |
| 3338 | // optAssertionCount is decremented and we can just clear the optAssertionDep bits |
| 3339 | // The other case is when index < optAssertionCount and here we overwrite the |
| 3340 | // index-th entry in the table with the data found at the end of the table |
| 3341 | // Since we are reordering the rable the optAssertionDep bits need to be recreated |
| 3342 | // using optAssertionReset(0) and optAssertionReset(newAssertionCount) will |
| 3343 | // correctly update the optAssertionDep bits |
| 3344 | // |
| 3345 | if (index == optAssertionCount) |
| 3346 | { |
| 3347 | unsigned lclNum = curAssertion->op1.lcl.lclNum; |
| 3348 | BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); |
| 3349 | |
| 3350 | // |
| 3351 | // Check for Copy assertions |
| 3352 | // |
| 3353 | if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && |
| 3354 | (curAssertion->op2.kind == O2K_LCLVAR_COPY)) |
| 3355 | { |
| 3356 | // |
| 3357 | // op2.lcl.lclNum no longer depends upon this assertion |
| 3358 | // |
| 3359 | lclNum = curAssertion->op2.lcl.lclNum; |
| 3360 | BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); |
| 3361 | } |
| 3362 | |
| 3363 | optAssertionCount--; |
| 3364 | } |
| 3365 | else |
| 3366 | { |
| 3367 | AssertionDsc* lastAssertion = optGetAssertion(optAssertionCount); |
| 3368 | AssertionIndex newAssertionCount = optAssertionCount - 1; |
| 3369 | |
| 3370 | optAssertionReset(0); // This make optAssertionCount equal 0 |
| 3371 | |
| 3372 | memcpy(curAssertion, // the entry to be removed |
| 3373 | lastAssertion, // last entry in the table |
| 3374 | sizeof(AssertionDsc)); |
| 3375 | |
| 3376 | optAssertionReset(newAssertionCount); |
| 3377 | } |
| 3378 | } |
| 3379 | #endif // LOCAL_ASSERTION_PROP |
| 3380 | |
| 3381 | inline void Compiler::LoopDsc::AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd) |
| 3382 | { |
| 3383 | if (lpFieldsModified == nullptr) |
| 3384 | { |
| 3385 | lpFieldsModified = |
| 3386 | new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::FieldHandleSet(comp->getAllocatorLoopHoist()); |
| 3387 | } |
| 3388 | lpFieldsModified->Set(fldHnd, true); |
| 3389 | } |
| 3390 | |
| 3391 | inline void Compiler::LoopDsc::AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd) |
| 3392 | { |
| 3393 | if (lpArrayElemTypesModified == nullptr) |
| 3394 | { |
| 3395 | lpArrayElemTypesModified = |
| 3396 | new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::ClassHandleSet(comp->getAllocatorLoopHoist()); |
| 3397 | } |
| 3398 | lpArrayElemTypesModified->Set(structHnd, true); |
| 3399 | } |
| 3400 | |
| 3401 | inline void Compiler::LoopDsc::VERIFY_lpIterTree() |
| 3402 | { |
| 3403 | #ifdef DEBUG |
| 3404 | assert(lpFlags & LPFLG_ITER); |
| 3405 | |
| 3406 | // iterTree should be "lcl ASG lcl <op> const" |
| 3407 | |
| 3408 | assert(lpIterTree->OperIs(GT_ASG)); |
| 3409 | |
| 3410 | GenTree* lhs = lpIterTree->gtOp.gtOp1; |
| 3411 | GenTree* rhs = lpIterTree->gtOp.gtOp2; |
| 3412 | assert(lhs->OperGet() == GT_LCL_VAR); |
| 3413 | |
| 3414 | switch (rhs->gtOper) |
| 3415 | { |
| 3416 | case GT_ADD: |
| 3417 | case GT_SUB: |
| 3418 | case GT_MUL: |
| 3419 | case GT_RSH: |
| 3420 | case GT_LSH: |
| 3421 | break; |
| 3422 | default: |
| 3423 | assert(!"Unknown operator for loop increment" ); |
| 3424 | } |
| 3425 | assert(rhs->gtOp.gtOp1->OperGet() == GT_LCL_VAR); |
| 3426 | assert(rhs->gtOp.gtOp1->AsLclVarCommon()->GetLclNum() == lhs->AsLclVarCommon()->GetLclNum()); |
| 3427 | assert(rhs->gtOp.gtOp2->OperGet() == GT_CNS_INT); |
| 3428 | #endif |
| 3429 | } |
| 3430 | |
| 3431 | //----------------------------------------------------------------------------- |
| 3432 | |
| 3433 | inline unsigned Compiler::LoopDsc::lpIterVar() |
| 3434 | { |
| 3435 | VERIFY_lpIterTree(); |
| 3436 | return lpIterTree->gtOp.gtOp1->gtLclVarCommon.gtLclNum; |
| 3437 | } |
| 3438 | |
| 3439 | //----------------------------------------------------------------------------- |
| 3440 | |
| 3441 | inline int Compiler::LoopDsc::lpIterConst() |
| 3442 | { |
| 3443 | VERIFY_lpIterTree(); |
| 3444 | GenTree* rhs = lpIterTree->gtOp.gtOp2; |
| 3445 | return (int)rhs->gtOp.gtOp2->gtIntCon.gtIconVal; |
| 3446 | } |
| 3447 | |
| 3448 | //----------------------------------------------------------------------------- |
| 3449 | |
| 3450 | inline genTreeOps Compiler::LoopDsc::lpIterOper() |
| 3451 | { |
| 3452 | VERIFY_lpIterTree(); |
| 3453 | GenTree* rhs = lpIterTree->gtOp.gtOp2; |
| 3454 | return rhs->OperGet(); |
| 3455 | } |
| 3456 | |
| 3457 | inline var_types Compiler::LoopDsc::lpIterOperType() |
| 3458 | { |
| 3459 | VERIFY_lpIterTree(); |
| 3460 | |
| 3461 | var_types type = lpIterTree->TypeGet(); |
| 3462 | assert(genActualType(type) == TYP_INT); |
| 3463 | |
| 3464 | if ((lpIterTree->gtFlags & GTF_UNSIGNED) && type == TYP_INT) |
| 3465 | { |
| 3466 | type = TYP_UINT; |
| 3467 | } |
| 3468 | |
| 3469 | return type; |
| 3470 | } |
| 3471 | |
| 3472 | inline void Compiler::LoopDsc::VERIFY_lpTestTree() |
| 3473 | { |
| 3474 | #ifdef DEBUG |
| 3475 | assert(lpFlags & LPFLG_ITER); |
| 3476 | assert(lpTestTree); |
| 3477 | |
| 3478 | genTreeOps oper = lpTestTree->OperGet(); |
| 3479 | assert(GenTree::OperIsCompare(oper)); |
| 3480 | |
| 3481 | GenTree* iterator = nullptr; |
| 3482 | GenTree* limit = nullptr; |
| 3483 | if ((lpTestTree->gtOp.gtOp2->gtOper == GT_LCL_VAR) && (lpTestTree->gtOp.gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0) |
| 3484 | { |
| 3485 | iterator = lpTestTree->gtOp.gtOp2; |
| 3486 | limit = lpTestTree->gtOp.gtOp1; |
| 3487 | } |
| 3488 | else if ((lpTestTree->gtOp.gtOp1->gtOper == GT_LCL_VAR) && |
| 3489 | (lpTestTree->gtOp.gtOp1->gtFlags & GTF_VAR_ITERATOR) != 0) |
| 3490 | { |
| 3491 | iterator = lpTestTree->gtOp.gtOp1; |
| 3492 | limit = lpTestTree->gtOp.gtOp2; |
| 3493 | } |
| 3494 | else |
| 3495 | { |
| 3496 | // one of the nodes has to be the iterator |
| 3497 | assert(false); |
| 3498 | } |
| 3499 | |
| 3500 | if (lpFlags & LPFLG_CONST_LIMIT) |
| 3501 | { |
| 3502 | assert(limit->OperIsConst()); |
| 3503 | } |
| 3504 | if (lpFlags & LPFLG_VAR_LIMIT) |
| 3505 | { |
| 3506 | assert(limit->OperGet() == GT_LCL_VAR); |
| 3507 | } |
| 3508 | if (lpFlags & LPFLG_ARRLEN_LIMIT) |
| 3509 | { |
| 3510 | assert(limit->OperGet() == GT_ARR_LENGTH); |
| 3511 | } |
| 3512 | #endif |
| 3513 | } |
| 3514 | |
| 3515 | //----------------------------------------------------------------------------- |
| 3516 | |
| 3517 | inline bool Compiler::LoopDsc::lpIsReversed() |
| 3518 | { |
| 3519 | VERIFY_lpTestTree(); |
| 3520 | return ((lpTestTree->gtOp.gtOp2->gtOper == GT_LCL_VAR) && |
| 3521 | (lpTestTree->gtOp.gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0); |
| 3522 | } |
| 3523 | |
| 3524 | //----------------------------------------------------------------------------- |
| 3525 | |
| 3526 | inline genTreeOps Compiler::LoopDsc::lpTestOper() |
| 3527 | { |
| 3528 | VERIFY_lpTestTree(); |
| 3529 | genTreeOps op = lpTestTree->OperGet(); |
| 3530 | return lpIsReversed() ? GenTree::SwapRelop(op) : op; |
| 3531 | } |
| 3532 | |
| 3533 | //----------------------------------------------------------------------------- |
| 3534 | |
| 3535 | inline GenTree* Compiler::LoopDsc::lpIterator() |
| 3536 | { |
| 3537 | VERIFY_lpTestTree(); |
| 3538 | |
| 3539 | return lpIsReversed() ? lpTestTree->gtOp.gtOp2 : lpTestTree->gtOp.gtOp1; |
| 3540 | } |
| 3541 | |
| 3542 | //----------------------------------------------------------------------------- |
| 3543 | |
| 3544 | inline GenTree* Compiler::LoopDsc::lpLimit() |
| 3545 | { |
| 3546 | VERIFY_lpTestTree(); |
| 3547 | |
| 3548 | return lpIsReversed() ? lpTestTree->gtOp.gtOp1 : lpTestTree->gtOp.gtOp2; |
| 3549 | } |
| 3550 | |
| 3551 | //----------------------------------------------------------------------------- |
| 3552 | |
| 3553 | inline int Compiler::LoopDsc::lpConstLimit() |
| 3554 | { |
| 3555 | VERIFY_lpTestTree(); |
| 3556 | assert(lpFlags & LPFLG_CONST_LIMIT); |
| 3557 | |
| 3558 | GenTree* limit = lpLimit(); |
| 3559 | assert(limit->OperIsConst()); |
| 3560 | return (int)limit->gtIntCon.gtIconVal; |
| 3561 | } |
| 3562 | |
| 3563 | //----------------------------------------------------------------------------- |
| 3564 | |
| 3565 | inline unsigned Compiler::LoopDsc::lpVarLimit() |
| 3566 | { |
| 3567 | VERIFY_lpTestTree(); |
| 3568 | assert(lpFlags & LPFLG_VAR_LIMIT); |
| 3569 | |
| 3570 | GenTree* limit = lpLimit(); |
| 3571 | assert(limit->OperGet() == GT_LCL_VAR); |
| 3572 | return limit->gtLclVarCommon.gtLclNum; |
| 3573 | } |
| 3574 | |
| 3575 | //----------------------------------------------------------------------------- |
| 3576 | |
| 3577 | inline bool Compiler::LoopDsc::lpArrLenLimit(Compiler* comp, ArrIndex* index) |
| 3578 | { |
| 3579 | VERIFY_lpTestTree(); |
| 3580 | assert(lpFlags & LPFLG_ARRLEN_LIMIT); |
| 3581 | |
| 3582 | GenTree* limit = lpLimit(); |
| 3583 | assert(limit->OperGet() == GT_ARR_LENGTH); |
| 3584 | |
| 3585 | // Check if we have a.length or a[i][j].length |
| 3586 | if (limit->gtArrLen.ArrRef()->gtOper == GT_LCL_VAR) |
| 3587 | { |
| 3588 | index->arrLcl = limit->gtArrLen.ArrRef()->gtLclVarCommon.gtLclNum; |
| 3589 | index->rank = 0; |
| 3590 | return true; |
| 3591 | } |
| 3592 | // We have a[i].length, extract a[i] pattern. |
| 3593 | else if (limit->gtArrLen.ArrRef()->gtOper == GT_COMMA) |
| 3594 | { |
| 3595 | return comp->optReconstructArrIndex(limit->gtArrLen.ArrRef(), index, BAD_VAR_NUM); |
| 3596 | } |
| 3597 | return false; |
| 3598 | } |
| 3599 | |
| 3600 | /***************************************************************************** |
| 3601 | * Is "var" assigned in the loop "lnum" ? |
| 3602 | */ |
| 3603 | |
| 3604 | inline bool Compiler::optIsVarAssgLoop(unsigned lnum, unsigned var) |
| 3605 | { |
| 3606 | assert(lnum < optLoopCount); |
| 3607 | if (var < lclMAX_ALLSET_TRACKED) |
| 3608 | { |
| 3609 | ALLVARSET_TP vs(AllVarSetOps::MakeSingleton(this, var)); |
| 3610 | return optIsSetAssgLoop(lnum, vs) != 0; |
| 3611 | } |
| 3612 | else |
| 3613 | { |
| 3614 | return optIsVarAssigned(optLoopTable[lnum].lpHead->bbNext, optLoopTable[lnum].lpBottom, nullptr, var); |
| 3615 | } |
| 3616 | } |
| 3617 | |
| 3618 | /* |
| 3619 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3620 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3621 | XX XX |
| 3622 | XX Optimization activation rules XX |
| 3623 | XX XX |
| 3624 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3625 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3626 | */ |
| 3627 | |
| 3628 | // are we compiling for fast code, or are we compiling for blended code and |
| 3629 | // inside a loop? |
| 3630 | // We return true for BLENDED_CODE if the Block executes more than BB_LOOP_WEIGHT/2 |
| 3631 | inline bool Compiler::optFastCodeOrBlendedLoop(BasicBlock::weight_t bbWeight) |
| 3632 | { |
| 3633 | return (compCodeOpt() == FAST_CODE) || |
| 3634 | ((compCodeOpt() == BLENDED_CODE) && (bbWeight > (BB_LOOP_WEIGHT / 2 * BB_UNITY_WEIGHT))); |
| 3635 | } |
| 3636 | |
| 3637 | // are we running on a Intel Pentium 4? |
| 3638 | inline bool Compiler::optPentium4(void) |
| 3639 | { |
| 3640 | return (info.genCPU == CPU_X86_PENTIUM_4); |
| 3641 | } |
| 3642 | |
| 3643 | // should we use add/sub instead of inc/dec? (faster on P4, but increases size) |
| 3644 | inline bool Compiler::optAvoidIncDec(BasicBlock::weight_t bbWeight) |
| 3645 | { |
| 3646 | return optPentium4() && optFastCodeOrBlendedLoop(bbWeight); |
| 3647 | } |
| 3648 | |
| 3649 | // should we try to replace integer multiplication with lea/add/shift sequences? |
| 3650 | inline bool Compiler::optAvoidIntMult(void) |
| 3651 | { |
| 3652 | return (compCodeOpt() != SMALL_CODE); |
| 3653 | } |
| 3654 | |
| 3655 | /* |
| 3656 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3657 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3658 | XX EEInterface XX |
| 3659 | XX Inline functions XX |
| 3660 | XX XX |
| 3661 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3662 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3663 | */ |
| 3664 | |
| 3665 | extern var_types JITtype2varType(CorInfoType type); |
| 3666 | |
| 3667 | #include "ee_il_dll.hpp" |
| 3668 | |
| 3669 | inline CORINFO_METHOD_HANDLE Compiler::eeFindHelper(unsigned helper) |
| 3670 | { |
| 3671 | assert(helper < CORINFO_HELP_COUNT); |
| 3672 | |
| 3673 | /* Helpers are marked by the fact that they are odd numbers |
| 3674 | * force this to be an odd number (will shift it back to extract) */ |
| 3675 | |
| 3676 | return ((CORINFO_METHOD_HANDLE)(size_t)((helper << 2) + 1)); |
| 3677 | } |
| 3678 | |
| 3679 | inline CorInfoHelpFunc Compiler::eeGetHelperNum(CORINFO_METHOD_HANDLE method) |
| 3680 | { |
| 3681 | // Helpers are marked by the fact that they are odd numbers |
| 3682 | if (!(((size_t)method) & 1)) |
| 3683 | { |
| 3684 | return (CORINFO_HELP_UNDEF); |
| 3685 | } |
| 3686 | return ((CorInfoHelpFunc)(((size_t)method) >> 2)); |
| 3687 | } |
| 3688 | |
| 3689 | inline Compiler::fgWalkResult Compiler::CountSharedStaticHelper(GenTree** pTree, fgWalkData* data) |
| 3690 | { |
| 3691 | if (Compiler::IsSharedStaticHelper(*pTree)) |
| 3692 | { |
| 3693 | int* pCount = (int*)data->pCallbackData; |
| 3694 | (*pCount)++; |
| 3695 | } |
| 3696 | |
| 3697 | return WALK_CONTINUE; |
| 3698 | } |
| 3699 | |
| 3700 | // TODO-Cleanup: Replace calls to IsSharedStaticHelper with new HelperCallProperties |
| 3701 | // |
| 3702 | |
| 3703 | inline bool Compiler::IsSharedStaticHelper(GenTree* tree) |
| 3704 | { |
| 3705 | if (tree->gtOper != GT_CALL || tree->gtCall.gtCallType != CT_HELPER) |
| 3706 | { |
| 3707 | return false; |
| 3708 | } |
| 3709 | |
| 3710 | CorInfoHelpFunc helper = eeGetHelperNum(tree->gtCall.gtCallMethHnd); |
| 3711 | |
| 3712 | bool result1 = |
| 3713 | // More helpers being added to IsSharedStaticHelper (that have similar behaviors but are not true |
| 3714 | // ShareStaticHelperts) |
| 3715 | helper == CORINFO_HELP_STRCNS || helper == CORINFO_HELP_BOX || |
| 3716 | |
| 3717 | // helpers being added to IsSharedStaticHelper |
| 3718 | helper == CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT || helper == CORINFO_HELP_GETSTATICFIELDADDR_TLS || |
| 3719 | helper == CORINFO_HELP_GETGENERICS_GCSTATIC_BASE || helper == CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE || |
| 3720 | helper == CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE || |
| 3721 | helper == CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE || |
| 3722 | |
| 3723 | helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE || |
| 3724 | helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR || |
| 3725 | helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR || |
| 3726 | helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS || |
| 3727 | helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS || |
| 3728 | helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE || |
| 3729 | helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE || |
| 3730 | helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR || |
| 3731 | helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR || |
| 3732 | helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS || |
| 3733 | helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS || |
| 3734 | #ifdef FEATURE_READYTORUN_COMPILER |
| 3735 | helper == CORINFO_HELP_READYTORUN_STATIC_BASE || helper == CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE || |
| 3736 | #endif |
| 3737 | helper == CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS; |
| 3738 | #if 0 |
| 3739 | // See above TODO-Cleanup |
| 3740 | bool result2 = s_helperCallProperties.IsPure(helper) && s_helperCallProperties.NonNullReturn(helper); |
| 3741 | assert (result1 == result2); |
| 3742 | #endif |
| 3743 | return result1; |
| 3744 | } |
| 3745 | |
| 3746 | inline bool Compiler::IsTreeAlwaysHoistable(GenTree* tree) |
| 3747 | { |
| 3748 | if (IsSharedStaticHelper(tree)) |
| 3749 | { |
| 3750 | return (GTF_CALL_HOISTABLE & tree->gtFlags) ? true : false; |
| 3751 | } |
| 3752 | else |
| 3753 | { |
| 3754 | return false; |
| 3755 | } |
| 3756 | } |
| 3757 | |
| 3758 | inline bool Compiler::IsGcSafePoint(GenTree* tree) |
| 3759 | { |
| 3760 | if (tree->IsCall()) |
| 3761 | { |
| 3762 | GenTreeCall* call = tree->AsCall(); |
| 3763 | if (!call->IsFastTailCall()) |
| 3764 | { |
| 3765 | if (call->gtCallType == CT_INDIRECT) |
| 3766 | { |
| 3767 | return true; |
| 3768 | } |
| 3769 | else if (call->gtCallType == CT_USER_FUNC) |
| 3770 | { |
| 3771 | if ((call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) == 0) |
| 3772 | { |
| 3773 | return true; |
| 3774 | } |
| 3775 | } |
| 3776 | // otherwise we have a CT_HELPER |
| 3777 | } |
| 3778 | } |
| 3779 | |
| 3780 | return false; |
| 3781 | } |
| 3782 | |
| 3783 | // |
| 3784 | // Note that we want to have two special FIELD_HANDLES that will both |
| 3785 | // be considered non-Data Offset handles |
| 3786 | // |
| 3787 | // The special values that we use are FLD_GLOBAL_DS and FLD_GLOBAL_FS |
| 3788 | // |
| 3789 | |
| 3790 | inline bool jitStaticFldIsGlobAddr(CORINFO_FIELD_HANDLE fldHnd) |
| 3791 | { |
| 3792 | return (fldHnd == FLD_GLOBAL_DS || fldHnd == FLD_GLOBAL_FS); |
| 3793 | } |
| 3794 | |
| 3795 | #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(FEATURE_TRACELOGGING) |
| 3796 | |
| 3797 | inline bool Compiler::eeIsNativeMethod(CORINFO_METHOD_HANDLE method) |
| 3798 | { |
| 3799 | return ((((size_t)method) & 0x2) == 0x2); |
| 3800 | } |
| 3801 | |
| 3802 | inline CORINFO_METHOD_HANDLE Compiler::eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method) |
| 3803 | { |
| 3804 | assert((((size_t)method) & 0x3) == 0x2); |
| 3805 | return (CORINFO_METHOD_HANDLE)(((size_t)method) & ~0x3); |
| 3806 | } |
| 3807 | #endif |
| 3808 | |
| 3809 | inline CORINFO_METHOD_HANDLE Compiler::eeMarkNativeTarget(CORINFO_METHOD_HANDLE method) |
| 3810 | { |
| 3811 | assert((((size_t)method) & 0x3) == 0); |
| 3812 | if (method == nullptr) |
| 3813 | { |
| 3814 | return method; |
| 3815 | } |
| 3816 | else |
| 3817 | { |
| 3818 | return (CORINFO_METHOD_HANDLE)(((size_t)method) | 0x2); |
| 3819 | } |
| 3820 | } |
| 3821 | |
| 3822 | /* |
| 3823 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3824 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3825 | XX Compiler XX |
| 3826 | XX Inline functions XX |
| 3827 | XX XX |
| 3828 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3829 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 3830 | */ |
| 3831 | |
| 3832 | #ifndef DEBUG |
| 3833 | inline bool Compiler::compStressCompile(compStressArea stressArea, unsigned weightPercentage) |
| 3834 | { |
| 3835 | return false; |
| 3836 | } |
| 3837 | #endif |
| 3838 | |
| 3839 | inline ArenaAllocator* Compiler::compGetArenaAllocator() |
| 3840 | { |
| 3841 | return compArenaAllocator; |
| 3842 | } |
| 3843 | |
| 3844 | inline bool Compiler::compIsProfilerHookNeeded() |
| 3845 | { |
| 3846 | #ifdef PROFILING_SUPPORTED |
| 3847 | return compProfilerHookNeeded |
| 3848 | // IL stubs are excluded by VM and we need to do the same even running |
| 3849 | // under a complus env hook to generate profiler hooks |
| 3850 | || (opts.compJitELTHookEnabled && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)); |
| 3851 | #else // !PROFILING_SUPPORTED |
| 3852 | return false; |
| 3853 | #endif // !PROFILING_SUPPORTED |
| 3854 | } |
| 3855 | |
| 3856 | /***************************************************************************** |
| 3857 | * |
| 3858 | * Check for the special case where the object is the constant 0. |
| 3859 | * As we can't even fold the tree (null+fldOffs), we are left with |
| 3860 | * op1 and op2 both being a constant. This causes lots of problems. |
| 3861 | * We simply grab a temp and assign 0 to it and use it in place of the NULL. |
| 3862 | */ |
| 3863 | |
| 3864 | inline GenTree* Compiler::impCheckForNullPointer(GenTree* obj) |
| 3865 | { |
| 3866 | /* If it is not a GC type, we will be able to fold it. |
| 3867 | So don't need to do anything */ |
| 3868 | |
| 3869 | if (!varTypeIsGC(obj->TypeGet())) |
| 3870 | { |
| 3871 | return obj; |
| 3872 | } |
| 3873 | |
| 3874 | if (obj->gtOper == GT_CNS_INT) |
| 3875 | { |
| 3876 | assert(obj->gtType == TYP_REF || obj->gtType == TYP_BYREF); |
| 3877 | |
| 3878 | // We can see non-zero byrefs for RVA statics. |
| 3879 | if (obj->gtIntCon.gtIconVal != 0) |
| 3880 | { |
| 3881 | assert(obj->gtType == TYP_BYREF); |
| 3882 | return obj; |
| 3883 | } |
| 3884 | |
| 3885 | unsigned tmp = lvaGrabTemp(true DEBUGARG("CheckForNullPointer" )); |
| 3886 | |
| 3887 | // We don't need to spill while appending as we are only assigning |
| 3888 | // NULL to a freshly-grabbed temp. |
| 3889 | |
| 3890 | impAssignTempGen(tmp, obj, (unsigned)CHECK_SPILL_NONE); |
| 3891 | |
| 3892 | obj = gtNewLclvNode(tmp, obj->gtType); |
| 3893 | } |
| 3894 | |
| 3895 | return obj; |
| 3896 | } |
| 3897 | |
| 3898 | /***************************************************************************** |
| 3899 | * |
| 3900 | * Check for the special case where the object is the methods original 'this' pointer. |
| 3901 | * Note that, the original 'this' pointer is always local var 0 for non-static method, |
| 3902 | * even if we might have created the copy of 'this' pointer in lvaArg0Var. |
| 3903 | */ |
| 3904 | |
| 3905 | inline bool Compiler::impIsThis(GenTree* obj) |
| 3906 | { |
| 3907 | if (compIsForInlining()) |
| 3908 | { |
| 3909 | return impInlineInfo->InlinerCompiler->impIsThis(obj); |
| 3910 | } |
| 3911 | else |
| 3912 | { |
| 3913 | return ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR) && lvaIsOriginalThisArg(obj->gtLclVarCommon.gtLclNum)); |
| 3914 | } |
| 3915 | } |
| 3916 | |
| 3917 | /***************************************************************************** |
| 3918 | * |
| 3919 | * Check to see if the delegate is created using "LDFTN <TOK>" or not. |
| 3920 | */ |
| 3921 | |
| 3922 | inline bool Compiler::impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr) |
| 3923 | { |
| 3924 | assert(newobjCodeAddr[0] == CEE_NEWOBJ); |
| 3925 | return (newobjCodeAddr - delegateCreateStart == 6 && // LDFTN <TOK> takes 6 bytes |
| 3926 | delegateCreateStart[0] == CEE_PREFIX1 && delegateCreateStart[1] == (CEE_LDFTN & 0xFF)); |
| 3927 | } |
| 3928 | |
| 3929 | /***************************************************************************** |
| 3930 | * |
| 3931 | * Check to see if the delegate is created using "DUP LDVIRTFTN <TOK>" or not. |
| 3932 | */ |
| 3933 | |
| 3934 | inline bool Compiler::impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr) |
| 3935 | { |
| 3936 | assert(newobjCodeAddr[0] == CEE_NEWOBJ); |
| 3937 | return (newobjCodeAddr - delegateCreateStart == 7 && // DUP LDVIRTFTN <TOK> takes 6 bytes |
| 3938 | delegateCreateStart[0] == CEE_DUP && delegateCreateStart[1] == CEE_PREFIX1 && |
| 3939 | delegateCreateStart[2] == (CEE_LDVIRTFTN & 0xFF)); |
| 3940 | } |
| 3941 | /***************************************************************************** |
| 3942 | * |
| 3943 | * Returns true if the compiler instance is created for import only (verification). |
| 3944 | */ |
| 3945 | |
| 3946 | inline bool Compiler::compIsForImportOnly() |
| 3947 | { |
| 3948 | return opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY); |
| 3949 | } |
| 3950 | |
| 3951 | /***************************************************************************** |
| 3952 | * |
| 3953 | * Returns true if the compiler instance is created for inlining. |
| 3954 | */ |
| 3955 | |
| 3956 | inline bool Compiler::compIsForInlining() |
| 3957 | { |
| 3958 | return (impInlineInfo != nullptr); |
| 3959 | } |
| 3960 | |
| 3961 | /***************************************************************************** |
| 3962 | * |
| 3963 | * Check the inline result field in the compiler to see if inlining failed or not. |
| 3964 | */ |
| 3965 | |
| 3966 | inline bool Compiler::compDonotInline() |
| 3967 | { |
| 3968 | if (compIsForInlining()) |
| 3969 | { |
| 3970 | assert(compInlineResult != nullptr); |
| 3971 | return compInlineResult->IsFailure(); |
| 3972 | } |
| 3973 | else |
| 3974 | { |
| 3975 | return false; |
| 3976 | } |
| 3977 | } |
| 3978 | |
| 3979 | inline bool Compiler::impIsPrimitive(CorInfoType jitType) |
| 3980 | { |
| 3981 | return ((CORINFO_TYPE_BOOL <= jitType && jitType <= CORINFO_TYPE_DOUBLE) || jitType == CORINFO_TYPE_PTR); |
| 3982 | } |
| 3983 | |
| 3984 | /***************************************************************************** |
| 3985 | * |
| 3986 | * Get the promotion type of a struct local. |
| 3987 | */ |
| 3988 | |
| 3989 | inline Compiler::lvaPromotionType Compiler::lvaGetPromotionType(const LclVarDsc* varDsc) |
| 3990 | { |
| 3991 | assert(!varDsc->lvPromoted || varTypeIsPromotable(varDsc) || varDsc->lvUnusedStruct); |
| 3992 | |
| 3993 | if (!varDsc->lvPromoted) |
| 3994 | { |
| 3995 | // no struct promotion for this LclVar |
| 3996 | return PROMOTION_TYPE_NONE; |
| 3997 | } |
| 3998 | if (varDsc->lvDoNotEnregister) |
| 3999 | { |
| 4000 | // The struct is not enregistered |
| 4001 | return PROMOTION_TYPE_DEPENDENT; |
| 4002 | } |
| 4003 | if (!varDsc->lvIsParam) |
| 4004 | { |
| 4005 | // The struct is a register candidate |
| 4006 | return PROMOTION_TYPE_INDEPENDENT; |
| 4007 | } |
| 4008 | |
| 4009 | // Has struct promotion for arguments been disabled using COMPlus_JitNoStructPromotion=2 |
| 4010 | if (fgNoStructParamPromotion) |
| 4011 | { |
| 4012 | // The struct parameter is not enregistered |
| 4013 | return PROMOTION_TYPE_DEPENDENT; |
| 4014 | } |
| 4015 | |
| 4016 | // We have a parameter that could be enregistered |
| 4017 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 4018 | |
| 4019 | #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) |
| 4020 | |
| 4021 | // The struct parameter is a register candidate |
| 4022 | return PROMOTION_TYPE_INDEPENDENT; |
| 4023 | #else |
| 4024 | // The struct parameter is not enregistered |
| 4025 | return PROMOTION_TYPE_DEPENDENT; |
| 4026 | #endif |
| 4027 | } |
| 4028 | |
| 4029 | /***************************************************************************** |
| 4030 | * |
| 4031 | * Get the promotion type of a struct local. |
| 4032 | */ |
| 4033 | |
| 4034 | inline Compiler::lvaPromotionType Compiler::lvaGetPromotionType(unsigned varNum) |
| 4035 | { |
| 4036 | assert(varNum < lvaCount); |
| 4037 | return lvaGetPromotionType(&lvaTable[varNum]); |
| 4038 | } |
| 4039 | |
| 4040 | /***************************************************************************** |
| 4041 | * |
| 4042 | * Given a field local, get the promotion type of its parent struct local. |
| 4043 | */ |
| 4044 | |
| 4045 | inline Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType(const LclVarDsc* varDsc) |
| 4046 | { |
| 4047 | assert(varDsc->lvIsStructField); |
| 4048 | assert(varDsc->lvParentLcl < lvaCount); |
| 4049 | |
| 4050 | lvaPromotionType promotionType = lvaGetPromotionType(varDsc->lvParentLcl); |
| 4051 | assert(promotionType != PROMOTION_TYPE_NONE); |
| 4052 | return promotionType; |
| 4053 | } |
| 4054 | |
| 4055 | /***************************************************************************** |
| 4056 | * |
| 4057 | * Given a field local, get the promotion type of its parent struct local. |
| 4058 | */ |
| 4059 | |
| 4060 | inline Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType(unsigned varNum) |
| 4061 | { |
| 4062 | assert(varNum < lvaCount); |
| 4063 | return lvaGetParentPromotionType(&lvaTable[varNum]); |
| 4064 | } |
| 4065 | |
| 4066 | /***************************************************************************** |
| 4067 | * |
| 4068 | * Return true if the local is a field local of a promoted struct of type PROMOTION_TYPE_DEPENDENT. |
| 4069 | * Return false otherwise. |
| 4070 | */ |
| 4071 | |
| 4072 | inline bool Compiler::lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc) |
| 4073 | { |
| 4074 | if (!varDsc->lvIsStructField) |
| 4075 | { |
| 4076 | return false; |
| 4077 | } |
| 4078 | |
| 4079 | lvaPromotionType promotionType = lvaGetParentPromotionType(varDsc); |
| 4080 | if (promotionType == PROMOTION_TYPE_DEPENDENT) |
| 4081 | { |
| 4082 | return true; |
| 4083 | } |
| 4084 | |
| 4085 | assert(promotionType == PROMOTION_TYPE_INDEPENDENT); |
| 4086 | return false; |
| 4087 | } |
| 4088 | |
| 4089 | //------------------------------------------------------------------------ |
| 4090 | // lvaIsGCTracked: Determine whether this var should be reported |
| 4091 | // as tracked for GC purposes. |
| 4092 | // |
| 4093 | // Arguments: |
| 4094 | // varDsc - the LclVarDsc for the var in question. |
| 4095 | // |
| 4096 | // Return Value: |
| 4097 | // Returns true if the variable should be reported as tracked in the GC info. |
| 4098 | // |
| 4099 | // Notes: |
| 4100 | // This never returns true for struct variables, even if they are tracked. |
| 4101 | // This is because struct variables are never tracked as a whole for GC purposes. |
| 4102 | // It is up to the caller to ensure that the fields of struct variables are |
| 4103 | // correctly tracked. |
| 4104 | // On Amd64, we never GC-track fields of dependently promoted structs, even |
| 4105 | // though they may be tracked for optimization purposes. |
| 4106 | // It seems that on x86 and arm, we simply don't track these |
| 4107 | // fields, though I have not verified that. I attempted to make these GC-tracked, |
| 4108 | // but there was too much logic that depends on these being untracked, so changing |
| 4109 | // this would require non-trivial effort. |
| 4110 | |
| 4111 | inline bool Compiler::lvaIsGCTracked(const LclVarDsc* varDsc) |
| 4112 | { |
| 4113 | if (varDsc->lvTracked && (varDsc->lvType == TYP_REF || varDsc->lvType == TYP_BYREF)) |
| 4114 | { |
| 4115 | // Stack parameters are always untracked w.r.t. GC reportings |
| 4116 | const bool isStackParam = varDsc->lvIsParam && !varDsc->lvIsRegArg; |
| 4117 | #ifdef _TARGET_AMD64_ |
| 4118 | return !isStackParam && !lvaIsFieldOfDependentlyPromotedStruct(varDsc); |
| 4119 | #else // !_TARGET_AMD64_ |
| 4120 | return !isStackParam; |
| 4121 | #endif // !_TARGET_AMD64_ |
| 4122 | } |
| 4123 | else |
| 4124 | { |
| 4125 | return false; |
| 4126 | } |
| 4127 | } |
| 4128 | |
| 4129 | inline void Compiler::EndPhase(Phases phase) |
| 4130 | { |
| 4131 | #if defined(FEATURE_JIT_METHOD_PERF) |
| 4132 | if (pCompJitTimer != nullptr) |
| 4133 | { |
| 4134 | pCompJitTimer->EndPhase(this, phase); |
| 4135 | } |
| 4136 | #endif |
| 4137 | #if DUMP_FLOWGRAPHS |
| 4138 | fgDumpFlowGraph(phase); |
| 4139 | #endif // DUMP_FLOWGRAPHS |
| 4140 | previousCompletedPhase = phase; |
| 4141 | #ifdef DEBUG |
| 4142 | if (dumpIR) |
| 4143 | { |
| 4144 | if ((*dumpIRPhase == L'*') || (wcscmp(dumpIRPhase, PhaseShortNames[phase]) == 0)) |
| 4145 | { |
| 4146 | printf("\n" ); |
| 4147 | printf("IR after %s (switch: %ls)\n" , PhaseEnums[phase], PhaseShortNames[phase]); |
| 4148 | printf("\n" ); |
| 4149 | |
| 4150 | if (dumpIRLinear) |
| 4151 | { |
| 4152 | dFuncIR(); |
| 4153 | } |
| 4154 | else if (dumpIRTrees) |
| 4155 | { |
| 4156 | dTrees(); |
| 4157 | } |
| 4158 | |
| 4159 | // If we are just dumping a single method and we have a request to exit |
| 4160 | // after dumping, do so now. |
| 4161 | |
| 4162 | if (dumpIRExit && ((*dumpIRPhase != L'*') || (phase == PHASE_EMIT_GCEH))) |
| 4163 | { |
| 4164 | exit(0); |
| 4165 | } |
| 4166 | } |
| 4167 | } |
| 4168 | #endif |
| 4169 | } |
| 4170 | |
| 4171 | /*****************************************************************************/ |
| 4172 | #if MEASURE_CLRAPI_CALLS |
| 4173 | |
| 4174 | inline void Compiler::CLRApiCallEnter(unsigned apix) |
| 4175 | { |
| 4176 | if (pCompJitTimer != nullptr) |
| 4177 | { |
| 4178 | pCompJitTimer->CLRApiCallEnter(apix); |
| 4179 | } |
| 4180 | } |
| 4181 | inline void Compiler::CLRApiCallLeave(unsigned apix) |
| 4182 | { |
| 4183 | if (pCompJitTimer != nullptr) |
| 4184 | { |
| 4185 | pCompJitTimer->CLRApiCallLeave(apix); |
| 4186 | } |
| 4187 | } |
| 4188 | |
| 4189 | inline void Compiler::CLR_API_Enter(API_ICorJitInfo_Names ename) |
| 4190 | { |
| 4191 | CLRApiCallEnter(ename); |
| 4192 | } |
| 4193 | |
| 4194 | inline void Compiler::CLR_API_Leave(API_ICorJitInfo_Names ename) |
| 4195 | { |
| 4196 | CLRApiCallLeave(ename); |
| 4197 | } |
| 4198 | |
| 4199 | #endif // MEASURE_CLRAPI_CALLS |
| 4200 | |
| 4201 | //------------------------------------------------------------------------------ |
| 4202 | // fgStructTempNeedsExplicitZeroInit : Check whether temp struct needs |
| 4203 | // explicit zero initialization in this basic block. |
| 4204 | // |
| 4205 | // Arguments: |
| 4206 | // varDsc - struct local var description |
| 4207 | // block - basic block to check |
| 4208 | // |
| 4209 | // Returns: |
| 4210 | // true if the struct temp needs explicit zero-initialization in this basic block; |
| 4211 | // false otherwise |
| 4212 | // |
| 4213 | // Notes: |
| 4214 | // If compInitMem is true, structs with GC pointer fields and long-lifetime structs |
| 4215 | // are fully zero-initialized in the prologue. Therefore, we don't need to insert |
| 4216 | // zero-initialization in this block if it is not in a loop. |
| 4217 | |
| 4218 | bool Compiler::fgStructTempNeedsExplicitZeroInit(LclVarDsc* varDsc, BasicBlock* block) |
| 4219 | { |
| 4220 | bool containsGCPtr = (varDsc->lvStructGcCount > 0); |
| 4221 | return (!info.compInitMem || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0) || (!containsGCPtr && varDsc->lvIsTemp)); |
| 4222 | } |
| 4223 | |
| 4224 | /*****************************************************************************/ |
| 4225 | ValueNum Compiler::GetUseAsgDefVNOrTreeVN(GenTree* op) |
| 4226 | { |
| 4227 | if (op->gtFlags & GTF_VAR_USEASG) |
| 4228 | { |
| 4229 | unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); |
| 4230 | unsigned ssaNum = GetSsaNumForLocalVarDef(op); |
| 4231 | return lvaTable[lclNum].GetPerSsaData(ssaNum)->m_vnPair.GetConservative(); |
| 4232 | } |
| 4233 | else |
| 4234 | { |
| 4235 | return op->gtVNPair.GetConservative(); |
| 4236 | } |
| 4237 | } |
| 4238 | |
| 4239 | /*****************************************************************************/ |
| 4240 | unsigned Compiler::GetSsaNumForLocalVarDef(GenTree* lcl) |
| 4241 | { |
| 4242 | // Address-taken variables don't have SSA numbers. |
| 4243 | if (!lvaInSsa(lcl->AsLclVarCommon()->gtLclNum)) |
| 4244 | { |
| 4245 | return SsaConfig::RESERVED_SSA_NUM; |
| 4246 | } |
| 4247 | |
| 4248 | if (lcl->gtFlags & GTF_VAR_USEASG) |
| 4249 | { |
| 4250 | // It's partial definition of a struct. "lcl" is both used and defined here; |
| 4251 | // we've chosen in this case to annotate "lcl" with the SSA number (and VN) of the use, |
| 4252 | // and to store the SSA number of the def in a side table. |
| 4253 | unsigned ssaNum; |
| 4254 | // In case of a remorph (fgMorph) in CSE/AssertionProp after SSA phase, there |
| 4255 | // wouldn't be an entry for the USEASG portion of the indir addr, return |
| 4256 | // reserved. |
| 4257 | if (!GetOpAsgnVarDefSsaNums()->Lookup(lcl, &ssaNum)) |
| 4258 | { |
| 4259 | return SsaConfig::RESERVED_SSA_NUM; |
| 4260 | } |
| 4261 | return ssaNum; |
| 4262 | } |
| 4263 | else |
| 4264 | { |
| 4265 | return lcl->AsLclVarCommon()->gtSsaNum; |
| 4266 | } |
| 4267 | } |
| 4268 | |
| 4269 | template <typename TVisitor> |
| 4270 | void GenTree::VisitOperands(TVisitor visitor) |
| 4271 | { |
| 4272 | switch (OperGet()) |
| 4273 | { |
| 4274 | // Leaf nodes |
| 4275 | case GT_LCL_VAR: |
| 4276 | case GT_LCL_FLD: |
| 4277 | case GT_LCL_VAR_ADDR: |
| 4278 | case GT_LCL_FLD_ADDR: |
| 4279 | case GT_CATCH_ARG: |
| 4280 | case GT_LABEL: |
| 4281 | case GT_FTN_ADDR: |
| 4282 | case GT_RET_EXPR: |
| 4283 | case GT_CNS_INT: |
| 4284 | case GT_CNS_LNG: |
| 4285 | case GT_CNS_DBL: |
| 4286 | case GT_CNS_STR: |
| 4287 | case GT_MEMORYBARRIER: |
| 4288 | case GT_JMP: |
| 4289 | case GT_JCC: |
| 4290 | case GT_SETCC: |
| 4291 | case GT_NO_OP: |
| 4292 | case GT_START_NONGC: |
| 4293 | case GT_PROF_HOOK: |
| 4294 | #if !FEATURE_EH_FUNCLETS |
| 4295 | case GT_END_LFIN: |
| 4296 | #endif // !FEATURE_EH_FUNCLETS |
| 4297 | case GT_PHI_ARG: |
| 4298 | case GT_JMPTABLE: |
| 4299 | case GT_CLS_VAR: |
| 4300 | case GT_CLS_VAR_ADDR: |
| 4301 | case GT_ARGPLACE: |
| 4302 | case GT_PHYSREG: |
| 4303 | case GT_EMITNOP: |
| 4304 | case GT_PINVOKE_PROLOG: |
| 4305 | case GT_PINVOKE_EPILOG: |
| 4306 | case GT_IL_OFFSET: |
| 4307 | return; |
| 4308 | |
| 4309 | // Unary operators with an optional operand |
| 4310 | case GT_NOP: |
| 4311 | case GT_RETURN: |
| 4312 | case GT_RETFILT: |
| 4313 | if (this->AsUnOp()->gtOp1 == nullptr) |
| 4314 | { |
| 4315 | return; |
| 4316 | } |
| 4317 | __fallthrough; |
| 4318 | |
| 4319 | // Standard unary operators |
| 4320 | case GT_STORE_LCL_VAR: |
| 4321 | case GT_STORE_LCL_FLD: |
| 4322 | case GT_NOT: |
| 4323 | case GT_NEG: |
| 4324 | case GT_BSWAP: |
| 4325 | case GT_BSWAP16: |
| 4326 | case GT_COPY: |
| 4327 | case GT_RELOAD: |
| 4328 | case GT_ARR_LENGTH: |
| 4329 | case GT_CAST: |
| 4330 | case GT_BITCAST: |
| 4331 | case GT_CKFINITE: |
| 4332 | case GT_LCLHEAP: |
| 4333 | case GT_ADDR: |
| 4334 | case GT_IND: |
| 4335 | case GT_OBJ: |
| 4336 | case GT_BLK: |
| 4337 | case GT_BOX: |
| 4338 | case GT_ALLOCOBJ: |
| 4339 | case GT_INIT_VAL: |
| 4340 | case GT_JTRUE: |
| 4341 | case GT_SWITCH: |
| 4342 | case GT_NULLCHECK: |
| 4343 | case GT_PUTARG_REG: |
| 4344 | case GT_PUTARG_STK: |
| 4345 | #if FEATURE_ARG_SPLIT |
| 4346 | case GT_PUTARG_SPLIT: |
| 4347 | #endif // FEATURE_ARG_SPLIT |
| 4348 | case GT_RETURNTRAP: |
| 4349 | visitor(this->AsUnOp()->gtOp1); |
| 4350 | return; |
| 4351 | |
| 4352 | // Variadic nodes |
| 4353 | case GT_PHI: |
| 4354 | assert(this->AsUnOp()->gtOp1 != nullptr); |
| 4355 | this->AsUnOp()->gtOp1->VisitListOperands(visitor); |
| 4356 | return; |
| 4357 | |
| 4358 | case GT_FIELD_LIST: |
| 4359 | VisitListOperands(visitor); |
| 4360 | return; |
| 4361 | |
| 4362 | #ifdef FEATURE_SIMD |
| 4363 | case GT_SIMD: |
| 4364 | if (this->AsSIMD()->gtSIMDIntrinsicID == SIMDIntrinsicInitN) |
| 4365 | { |
| 4366 | assert(this->AsSIMD()->gtOp1 != nullptr); |
| 4367 | this->AsSIMD()->gtOp1->VisitListOperands(visitor); |
| 4368 | } |
| 4369 | else |
| 4370 | { |
| 4371 | VisitBinOpOperands<TVisitor>(visitor); |
| 4372 | } |
| 4373 | return; |
| 4374 | #endif // FEATURE_SIMD |
| 4375 | |
| 4376 | #ifdef FEATURE_HW_INTRINSICS |
| 4377 | case GT_HWIntrinsic: |
| 4378 | if ((this->AsHWIntrinsic()->gtOp1 != nullptr) && this->AsHWIntrinsic()->gtOp1->OperIsList()) |
| 4379 | { |
| 4380 | this->AsHWIntrinsic()->gtOp1->VisitListOperands(visitor); |
| 4381 | } |
| 4382 | else |
| 4383 | { |
| 4384 | VisitBinOpOperands<TVisitor>(visitor); |
| 4385 | } |
| 4386 | return; |
| 4387 | #endif // FEATURE_HW_INTRINSICS |
| 4388 | |
| 4389 | // Special nodes |
| 4390 | case GT_CMPXCHG: |
| 4391 | { |
| 4392 | GenTreeCmpXchg* const cmpXchg = this->AsCmpXchg(); |
| 4393 | if (visitor(cmpXchg->gtOpLocation) == VisitResult::Abort) |
| 4394 | { |
| 4395 | return; |
| 4396 | } |
| 4397 | if (visitor(cmpXchg->gtOpValue) == VisitResult::Abort) |
| 4398 | { |
| 4399 | return; |
| 4400 | } |
| 4401 | visitor(cmpXchg->gtOpComparand); |
| 4402 | return; |
| 4403 | } |
| 4404 | |
| 4405 | case GT_ARR_BOUNDS_CHECK: |
| 4406 | #ifdef FEATURE_SIMD |
| 4407 | case GT_SIMD_CHK: |
| 4408 | #endif // FEATURE_SIMD |
| 4409 | #ifdef FEATURE_HW_INTRINSICS |
| 4410 | case GT_HW_INTRINSIC_CHK: |
| 4411 | #endif // FEATURE_HW_INTRINSICS |
| 4412 | { |
| 4413 | GenTreeBoundsChk* const boundsChk = this->AsBoundsChk(); |
| 4414 | if (visitor(boundsChk->gtIndex) == VisitResult::Abort) |
| 4415 | { |
| 4416 | return; |
| 4417 | } |
| 4418 | visitor(boundsChk->gtArrLen); |
| 4419 | return; |
| 4420 | } |
| 4421 | |
| 4422 | case GT_FIELD: |
| 4423 | if (this->AsField()->gtFldObj != nullptr) |
| 4424 | { |
| 4425 | visitor(this->AsField()->gtFldObj); |
| 4426 | } |
| 4427 | return; |
| 4428 | |
| 4429 | case GT_STMT: |
| 4430 | if (this->AsStmt()->gtStmtExpr != nullptr) |
| 4431 | { |
| 4432 | visitor(this->AsStmt()->gtStmtExpr); |
| 4433 | } |
| 4434 | return; |
| 4435 | |
| 4436 | case GT_ARR_ELEM: |
| 4437 | { |
| 4438 | GenTreeArrElem* const arrElem = this->AsArrElem(); |
| 4439 | if (visitor(arrElem->gtArrObj) == VisitResult::Abort) |
| 4440 | { |
| 4441 | return; |
| 4442 | } |
| 4443 | for (unsigned i = 0; i < arrElem->gtArrRank; i++) |
| 4444 | { |
| 4445 | if (visitor(arrElem->gtArrInds[i]) == VisitResult::Abort) |
| 4446 | { |
| 4447 | return; |
| 4448 | } |
| 4449 | } |
| 4450 | return; |
| 4451 | } |
| 4452 | |
| 4453 | case GT_ARR_OFFSET: |
| 4454 | { |
| 4455 | GenTreeArrOffs* const arrOffs = this->AsArrOffs(); |
| 4456 | if (visitor(arrOffs->gtOffset) == VisitResult::Abort) |
| 4457 | { |
| 4458 | return; |
| 4459 | } |
| 4460 | if (visitor(arrOffs->gtIndex) == VisitResult::Abort) |
| 4461 | { |
| 4462 | return; |
| 4463 | } |
| 4464 | visitor(arrOffs->gtArrObj); |
| 4465 | return; |
| 4466 | } |
| 4467 | |
| 4468 | case GT_DYN_BLK: |
| 4469 | { |
| 4470 | GenTreeDynBlk* const dynBlock = this->AsDynBlk(); |
| 4471 | if (visitor(dynBlock->gtOp1) == VisitResult::Abort) |
| 4472 | { |
| 4473 | return; |
| 4474 | } |
| 4475 | visitor(dynBlock->gtDynamicSize); |
| 4476 | return; |
| 4477 | } |
| 4478 | |
| 4479 | case GT_STORE_DYN_BLK: |
| 4480 | { |
| 4481 | GenTreeDynBlk* const dynBlock = this->AsDynBlk(); |
| 4482 | if (visitor(dynBlock->gtOp1) == VisitResult::Abort) |
| 4483 | { |
| 4484 | return; |
| 4485 | } |
| 4486 | if (visitor(dynBlock->gtOp2) == VisitResult::Abort) |
| 4487 | { |
| 4488 | return; |
| 4489 | } |
| 4490 | visitor(dynBlock->gtDynamicSize); |
| 4491 | return; |
| 4492 | } |
| 4493 | |
| 4494 | case GT_CALL: |
| 4495 | { |
| 4496 | GenTreeCall* const call = this->AsCall(); |
| 4497 | if ((call->gtCallObjp != nullptr) && (visitor(call->gtCallObjp) == VisitResult::Abort)) |
| 4498 | { |
| 4499 | return; |
| 4500 | } |
| 4501 | if ((call->gtCallArgs != nullptr) && (call->gtCallArgs->VisitListOperands(visitor) == VisitResult::Abort)) |
| 4502 | { |
| 4503 | return; |
| 4504 | } |
| 4505 | if ((call->gtCallLateArgs != nullptr) && |
| 4506 | (call->gtCallLateArgs->VisitListOperands(visitor)) == VisitResult::Abort) |
| 4507 | { |
| 4508 | return; |
| 4509 | } |
| 4510 | if (call->gtCallType == CT_INDIRECT) |
| 4511 | { |
| 4512 | if ((call->gtCallCookie != nullptr) && (visitor(call->gtCallCookie) == VisitResult::Abort)) |
| 4513 | { |
| 4514 | return; |
| 4515 | } |
| 4516 | if ((call->gtCallAddr != nullptr) && (visitor(call->gtCallAddr) == VisitResult::Abort)) |
| 4517 | { |
| 4518 | return; |
| 4519 | } |
| 4520 | } |
| 4521 | if ((call->gtControlExpr != nullptr)) |
| 4522 | { |
| 4523 | visitor(call->gtControlExpr); |
| 4524 | } |
| 4525 | return; |
| 4526 | } |
| 4527 | |
| 4528 | // Binary nodes |
| 4529 | default: |
| 4530 | assert(this->OperIsBinary()); |
| 4531 | VisitBinOpOperands<TVisitor>(visitor); |
| 4532 | return; |
| 4533 | } |
| 4534 | } |
| 4535 | |
| 4536 | template <typename TVisitor> |
| 4537 | GenTree::VisitResult GenTree::VisitListOperands(TVisitor visitor) |
| 4538 | { |
| 4539 | for (GenTreeArgList* node = this->AsArgList(); node != nullptr; node = node->Rest()) |
| 4540 | { |
| 4541 | if (visitor(node->gtOp1) == VisitResult::Abort) |
| 4542 | { |
| 4543 | return VisitResult::Abort; |
| 4544 | } |
| 4545 | } |
| 4546 | |
| 4547 | return VisitResult::Continue; |
| 4548 | } |
| 4549 | |
| 4550 | template <typename TVisitor> |
| 4551 | void GenTree::VisitBinOpOperands(TVisitor visitor) |
| 4552 | { |
| 4553 | assert(this->OperIsBinary()); |
| 4554 | |
| 4555 | GenTreeOp* const op = this->AsOp(); |
| 4556 | |
| 4557 | GenTree* const op1 = op->gtOp1; |
| 4558 | if ((op1 != nullptr) && (visitor(op1) == VisitResult::Abort)) |
| 4559 | { |
| 4560 | return; |
| 4561 | } |
| 4562 | |
| 4563 | GenTree* const op2 = op->gtOp2; |
| 4564 | if (op2 != nullptr) |
| 4565 | { |
| 4566 | visitor(op2); |
| 4567 | } |
| 4568 | } |
| 4569 | |
| 4570 | /***************************************************************************** |
| 4571 | * operator new |
| 4572 | * |
| 4573 | * Note that compiler's allocator is an arena allocator that returns memory that is |
| 4574 | * not zero-initialized and can contain data from a prior allocation lifetime. |
| 4575 | */ |
| 4576 | |
| 4577 | inline void* __cdecl operator new(size_t sz, Compiler* compiler, CompMemKind cmk) |
| 4578 | { |
| 4579 | return compiler->getAllocator(cmk).allocate<char>(sz); |
| 4580 | } |
| 4581 | |
| 4582 | inline void* __cdecl operator new[](size_t sz, Compiler* compiler, CompMemKind cmk) |
| 4583 | { |
| 4584 | return compiler->getAllocator(cmk).allocate<char>(sz); |
| 4585 | } |
| 4586 | |
| 4587 | inline void* __cdecl operator new(size_t sz, void* p, const jitstd::placement_t& /* syntax_difference */) |
| 4588 | { |
| 4589 | return p; |
| 4590 | } |
| 4591 | |
| 4592 | /*****************************************************************************/ |
| 4593 | |
| 4594 | #ifdef DEBUG |
| 4595 | |
| 4596 | inline void printRegMask(regMaskTP mask) |
| 4597 | { |
| 4598 | printf(REG_MASK_ALL_FMT, mask); |
| 4599 | } |
| 4600 | |
| 4601 | inline char* regMaskToString(regMaskTP mask, Compiler* context) |
| 4602 | { |
| 4603 | const size_t cchRegMask = 24; |
| 4604 | char* regmask = new (context, CMK_Unknown) char[cchRegMask]; |
| 4605 | |
| 4606 | sprintf_s(regmask, cchRegMask, REG_MASK_ALL_FMT, mask); |
| 4607 | |
| 4608 | return regmask; |
| 4609 | } |
| 4610 | |
| 4611 | inline void printRegMaskInt(regMaskTP mask) |
| 4612 | { |
| 4613 | printf(REG_MASK_INT_FMT, (mask & RBM_ALLINT)); |
| 4614 | } |
| 4615 | |
| 4616 | inline char* regMaskIntToString(regMaskTP mask, Compiler* context) |
| 4617 | { |
| 4618 | const size_t cchRegMask = 24; |
| 4619 | char* regmask = new (context, CMK_Unknown) char[cchRegMask]; |
| 4620 | |
| 4621 | sprintf_s(regmask, cchRegMask, REG_MASK_INT_FMT, (mask & RBM_ALLINT)); |
| 4622 | |
| 4623 | return regmask; |
| 4624 | } |
| 4625 | |
| 4626 | #endif // DEBUG |
| 4627 | |
| 4628 | inline static bool StructHasOverlappingFields(DWORD attribs) |
| 4629 | { |
| 4630 | return ((attribs & CORINFO_FLG_OVERLAPPING_FIELDS) != 0); |
| 4631 | } |
| 4632 | |
| 4633 | inline static bool StructHasCustomLayout(DWORD attribs) |
| 4634 | { |
| 4635 | return ((attribs & CORINFO_FLG_CUSTOMLAYOUT) != 0); |
| 4636 | } |
| 4637 | |
| 4638 | /***************************************************************************** |
| 4639 | * This node should not be referenced by anyone now. Set its values to garbage |
| 4640 | * to catch extra references |
| 4641 | */ |
| 4642 | |
| 4643 | inline void DEBUG_DESTROY_NODE(GenTree* tree) |
| 4644 | { |
| 4645 | #ifdef DEBUG |
| 4646 | // printf("DEBUG_DESTROY_NODE for [0x%08x]\n", tree); |
| 4647 | |
| 4648 | // Save gtOper in case we want to find out what this node was |
| 4649 | tree->gtOperSave = tree->gtOper; |
| 4650 | |
| 4651 | tree->gtType = TYP_UNDEF; |
| 4652 | tree->gtFlags |= 0xFFFFFFFF & ~GTF_NODE_MASK; |
| 4653 | if (tree->OperIsSimple()) |
| 4654 | { |
| 4655 | tree->gtOp.gtOp1 = tree->gtOp.gtOp2 = nullptr; |
| 4656 | } |
| 4657 | // Must do this last, because the "gtOp" check above will fail otherwise. |
| 4658 | // Don't call SetOper, because GT_COUNT is not a valid value |
| 4659 | tree->gtOper = GT_COUNT; |
| 4660 | #endif |
| 4661 | } |
| 4662 | |
| 4663 | //------------------------------------------------------------------------------ |
| 4664 | // lvRefCnt: access reference count for this local var |
| 4665 | // |
| 4666 | // Arguments: |
| 4667 | // state: the requestor's expected ref count state; defaults to RCS_NORMAL |
| 4668 | // |
| 4669 | // Return Value: |
| 4670 | // Ref count for the local. |
| 4671 | |
| 4672 | inline unsigned short LclVarDsc::lvRefCnt(RefCountState state) const |
| 4673 | { |
| 4674 | |
| 4675 | #if defined(DEBUG) |
| 4676 | assert(state != RCS_INVALID); |
| 4677 | Compiler* compiler = JitTls::GetCompiler(); |
| 4678 | assert(compiler->lvaRefCountState == state); |
| 4679 | #endif |
| 4680 | |
| 4681 | if (lvImplicitlyReferenced && (m_lvRefCnt == 0)) |
| 4682 | { |
| 4683 | return 1; |
| 4684 | } |
| 4685 | |
| 4686 | return m_lvRefCnt; |
| 4687 | } |
| 4688 | |
| 4689 | //------------------------------------------------------------------------------ |
| 4690 | // incLvRefCnt: increment reference count for this local var |
| 4691 | // |
| 4692 | // Arguments: |
| 4693 | // delta: the amount of the increment |
| 4694 | // state: the requestor's expected ref count state; defaults to RCS_NORMAL |
| 4695 | // |
| 4696 | // Notes: |
| 4697 | // It is currently the caller's responsibilty to ensure this increment |
| 4698 | // will not cause overflow. |
| 4699 | |
| 4700 | inline void LclVarDsc::incLvRefCnt(unsigned short delta, RefCountState state) |
| 4701 | { |
| 4702 | |
| 4703 | #if defined(DEBUG) |
| 4704 | assert(state != RCS_INVALID); |
| 4705 | Compiler* compiler = JitTls::GetCompiler(); |
| 4706 | assert(compiler->lvaRefCountState == state); |
| 4707 | #endif |
| 4708 | |
| 4709 | unsigned short oldRefCnt = m_lvRefCnt; |
| 4710 | m_lvRefCnt += delta; |
| 4711 | assert(m_lvRefCnt >= oldRefCnt); |
| 4712 | } |
| 4713 | |
| 4714 | //------------------------------------------------------------------------------ |
| 4715 | // setLvRefCnt: set the reference count for this local var |
| 4716 | // |
| 4717 | // Arguments: |
| 4718 | // newValue: the desired new reference count |
| 4719 | // state: the requestor's expected ref count state; defaults to RCS_NORMAL |
| 4720 | // |
| 4721 | // Notes: |
| 4722 | // Generally after calling v->setLvRefCnt(Y), v->lvRefCnt() == Y. |
| 4723 | // However this may not be true when v->lvImplicitlyReferenced == 1. |
| 4724 | |
| 4725 | inline void LclVarDsc::setLvRefCnt(unsigned short newValue, RefCountState state) |
| 4726 | { |
| 4727 | |
| 4728 | #if defined(DEBUG) |
| 4729 | assert(state != RCS_INVALID); |
| 4730 | Compiler* compiler = JitTls::GetCompiler(); |
| 4731 | assert(compiler->lvaRefCountState == state); |
| 4732 | #endif |
| 4733 | |
| 4734 | m_lvRefCnt = newValue; |
| 4735 | } |
| 4736 | |
| 4737 | //------------------------------------------------------------------------------ |
| 4738 | // lvRefCntWtd: access wighted reference count for this local var |
| 4739 | // |
| 4740 | // Arguments: |
| 4741 | // state: the requestor's expected ref count state; defaults to RCS_NORMAL |
| 4742 | // |
| 4743 | // Return Value: |
| 4744 | // Weighted ref count for the local. |
| 4745 | |
| 4746 | inline BasicBlock::weight_t LclVarDsc::lvRefCntWtd(RefCountState state) const |
| 4747 | { |
| 4748 | |
| 4749 | #if defined(DEBUG) |
| 4750 | assert(state != RCS_INVALID); |
| 4751 | Compiler* compiler = JitTls::GetCompiler(); |
| 4752 | assert(compiler->lvaRefCountState == state); |
| 4753 | #endif |
| 4754 | |
| 4755 | if (lvImplicitlyReferenced && (m_lvRefCntWtd == 0)) |
| 4756 | { |
| 4757 | return BB_UNITY_WEIGHT; |
| 4758 | } |
| 4759 | |
| 4760 | return m_lvRefCntWtd; |
| 4761 | } |
| 4762 | |
| 4763 | //------------------------------------------------------------------------------ |
| 4764 | // incLvRefCntWtd: increment weighted reference count for this local var |
| 4765 | // |
| 4766 | // Arguments: |
| 4767 | // delta: the amount of the increment |
| 4768 | // state: the requestor's expected ref count state; defaults to RCS_NORMAL |
| 4769 | // |
| 4770 | // Notes: |
| 4771 | // It is currently the caller's responsibilty to ensure this increment |
| 4772 | // will not cause overflow. |
| 4773 | |
| 4774 | inline void LclVarDsc::incLvRefCntWtd(BasicBlock::weight_t delta, RefCountState state) |
| 4775 | { |
| 4776 | |
| 4777 | #if defined(DEBUG) |
| 4778 | assert(state != RCS_INVALID); |
| 4779 | Compiler* compiler = JitTls::GetCompiler(); |
| 4780 | assert(compiler->lvaRefCountState == state); |
| 4781 | #endif |
| 4782 | |
| 4783 | BasicBlock::weight_t oldRefCntWtd = m_lvRefCntWtd; |
| 4784 | m_lvRefCntWtd += delta; |
| 4785 | assert(m_lvRefCntWtd >= oldRefCntWtd); |
| 4786 | } |
| 4787 | |
| 4788 | //------------------------------------------------------------------------------ |
| 4789 | // setLvRefCntWtd: set the weighted reference count for this local var |
| 4790 | // |
| 4791 | // Arguments: |
| 4792 | // newValue: the desired new weighted reference count |
| 4793 | // state: the requestor's expected ref count state; defaults to RCS_NORMAL |
| 4794 | // |
| 4795 | // Notes: |
| 4796 | // Generally after calling v->setLvRefCntWtd(Y), v->lvRefCntWtd() == Y. |
| 4797 | // However this may not be true when v->lvImplicitlyReferenced == 1. |
| 4798 | |
| 4799 | inline void LclVarDsc::setLvRefCntWtd(BasicBlock::weight_t newValue, RefCountState state) |
| 4800 | { |
| 4801 | |
| 4802 | #if defined(DEBUG) |
| 4803 | assert(state != RCS_INVALID); |
| 4804 | Compiler* compiler = JitTls::GetCompiler(); |
| 4805 | assert(compiler->lvaRefCountState == state); |
| 4806 | #endif |
| 4807 | |
| 4808 | m_lvRefCntWtd = newValue; |
| 4809 | } |
| 4810 | |
| 4811 | /*****************************************************************************/ |
| 4812 | #endif //_COMPILER_HPP_ |
| 4813 | /*****************************************************************************/ |
| 4814 | |