| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | |
| 5 | /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 6 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 7 | XX XX |
| 8 | XX Compiler XX |
| 9 | XX XX |
| 10 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 11 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 12 | */ |
| 13 | #include "jitpch.h" |
| 14 | #ifdef _MSC_VER |
| 15 | #pragma hdrstop |
| 16 | #endif // _MSC_VER |
| 17 | #include "hostallocator.h" |
| 18 | #include "emit.h" |
| 19 | #include "ssabuilder.h" |
| 20 | #include "valuenum.h" |
| 21 | #include "rangecheck.h" |
| 22 | #include "lower.h" |
| 23 | #include "stacklevelsetter.h" |
| 24 | #include "jittelemetry.h" |
| 25 | |
| 26 | #if defined(DEBUG) |
| 27 | // Column settings for COMPlus_JitDumpIR. We could(should) make these programmable. |
| 28 | #define COLUMN_OPCODE 30 |
| 29 | #define COLUMN_OPERANDS (COLUMN_OPCODE + 25) |
| 30 | #define COLUMN_KINDS 110 |
| 31 | #define COLUMN_FLAGS (COLUMN_KINDS + 32) |
| 32 | #endif |
| 33 | |
| 34 | #if defined(DEBUG) |
| 35 | unsigned Compiler::jitTotalMethodCompiled = 0; |
| 36 | #endif // defined(DEBUG) |
| 37 | |
| 38 | #if defined(DEBUG) |
| 39 | LONG Compiler::jitNestingLevel = 0; |
| 40 | #endif // defined(DEBUG) |
| 41 | |
| 42 | #ifdef ALT_JIT |
| 43 | // static |
| 44 | bool Compiler::s_pAltJitExcludeAssembliesListInitialized = false; |
| 45 | AssemblyNamesList2* Compiler::s_pAltJitExcludeAssembliesList = nullptr; |
| 46 | #endif // ALT_JIT |
| 47 | |
| 48 | #ifdef DEBUG |
| 49 | // static |
| 50 | bool Compiler::s_pJitDisasmIncludeAssembliesListInitialized = false; |
| 51 | AssemblyNamesList2* Compiler::s_pJitDisasmIncludeAssembliesList = nullptr; |
| 52 | #endif // DEBUG |
| 53 | |
| 54 | /***************************************************************************** |
| 55 | * |
| 56 | * Little helpers to grab the current cycle counter value; this is done |
| 57 | * differently based on target architecture, host toolchain, etc. The |
| 58 | * main thing is to keep the overhead absolutely minimal; in fact, on |
| 59 | * x86/x64 we use RDTSC even though it's not thread-safe; GetThreadCycles |
| 60 | * (which is monotonous) is just too expensive. |
| 61 | */ |
| 62 | #ifdef FEATURE_JIT_METHOD_PERF |
| 63 | |
| 64 | #if defined(_HOST_X86_) || defined(_HOST_AMD64_) |
| 65 | |
| 66 | #if defined(_MSC_VER) |
| 67 | |
| 68 | #include <intrin.h> |
| 69 | inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) |
| 70 | { |
| 71 | *cycleOut = __rdtsc(); |
| 72 | return true; |
| 73 | } |
| 74 | |
| 75 | #elif defined(__clang__) |
| 76 | |
| 77 | inline bool _our_GetThreadCycles(unsigned __int64* cycleOut) |
| 78 | { |
| 79 | uint32_t hi, lo; |
| 80 | __asm__ __volatile__("rdtsc" : "=a" (lo), "=d" (hi)); |
| 81 | *cycleOut = (static_cast<unsigned __int64>(hi) << 32) | static_cast<unsigned __int64>(lo); |
| 82 | return true; |
| 83 | } |
| 84 | |
| 85 | #else // neither _MSC_VER nor __clang__ |
| 86 | |
| 87 | // The following *might* work - might as well try. |
| 88 | #define _our_GetThreadCycles(cp) GetThreadCycles(cp) |
| 89 | |
| 90 | #endif |
| 91 | |
| 92 | #elif defined(_HOST_ARM_) || defined(_HOST_ARM64_) |
| 93 | |
| 94 | // If this doesn't work please see ../gc/gc.cpp for additional ARM |
| 95 | // info (and possible solutions). |
| 96 | #define _our_GetThreadCycles(cp) GetThreadCycles(cp) |
| 97 | |
| 98 | #else // not x86/x64 and not ARM |
| 99 | |
| 100 | // Don't know what this target is, but let's give it a try; if |
| 101 | // someone really wants to make this work, please add the right |
| 102 | // code here. |
| 103 | #define _our_GetThreadCycles(cp) GetThreadCycles(cp) |
| 104 | |
| 105 | #endif // which host OS |
| 106 | |
| 107 | #endif // FEATURE_JIT_METHOD_PERF |
| 108 | /*****************************************************************************/ |
| 109 | inline unsigned getCurTime() |
| 110 | { |
| 111 | SYSTEMTIME tim; |
| 112 | |
| 113 | GetSystemTime(&tim); |
| 114 | |
| 115 | return (((tim.wHour * 60) + tim.wMinute) * 60 + tim.wSecond) * 1000 + tim.wMilliseconds; |
| 116 | } |
| 117 | |
| 118 | /*****************************************************************************/ |
| 119 | #ifdef DEBUG |
| 120 | /*****************************************************************************/ |
| 121 | |
| 122 | static FILE* jitSrcFilePtr; |
| 123 | |
| 124 | static unsigned jitCurSrcLine; |
| 125 | |
| 126 | void Compiler::JitLogEE(unsigned level, const char* fmt, ...) |
| 127 | { |
| 128 | va_list args; |
| 129 | |
| 130 | if (verbose) |
| 131 | { |
| 132 | va_start(args, fmt); |
| 133 | vflogf(jitstdout, fmt, args); |
| 134 | va_end(args); |
| 135 | } |
| 136 | |
| 137 | va_start(args, fmt); |
| 138 | vlogf(level, fmt, args); |
| 139 | va_end(args); |
| 140 | } |
| 141 | |
| 142 | void Compiler::compDspSrcLinesByLineNum(unsigned line, bool seek) |
| 143 | { |
| 144 | if (!jitSrcFilePtr) |
| 145 | { |
| 146 | return; |
| 147 | } |
| 148 | |
| 149 | if (jitCurSrcLine == line) |
| 150 | { |
| 151 | return; |
| 152 | } |
| 153 | |
| 154 | if (jitCurSrcLine > line) |
| 155 | { |
| 156 | if (!seek) |
| 157 | { |
| 158 | return; |
| 159 | } |
| 160 | |
| 161 | if (fseek(jitSrcFilePtr, 0, SEEK_SET) != 0) |
| 162 | { |
| 163 | printf("Compiler::compDspSrcLinesByLineNum: fseek returned an error.\n" ); |
| 164 | } |
| 165 | jitCurSrcLine = 0; |
| 166 | } |
| 167 | |
| 168 | if (!seek) |
| 169 | { |
| 170 | printf(";\n" ); |
| 171 | } |
| 172 | |
| 173 | do |
| 174 | { |
| 175 | char temp[128]; |
| 176 | size_t llen; |
| 177 | |
| 178 | if (!fgets(temp, sizeof(temp), jitSrcFilePtr)) |
| 179 | { |
| 180 | return; |
| 181 | } |
| 182 | |
| 183 | if (seek) |
| 184 | { |
| 185 | continue; |
| 186 | } |
| 187 | |
| 188 | llen = strlen(temp); |
| 189 | if (llen && temp[llen - 1] == '\n') |
| 190 | { |
| 191 | temp[llen - 1] = 0; |
| 192 | } |
| 193 | |
| 194 | printf("; %s\n" , temp); |
| 195 | } while (++jitCurSrcLine < line); |
| 196 | |
| 197 | if (!seek) |
| 198 | { |
| 199 | printf(";\n" ); |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | /*****************************************************************************/ |
| 204 | |
| 205 | void Compiler::compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP) |
| 206 | { |
| 207 | static IPmappingDsc* nextMappingDsc; |
| 208 | static unsigned lastLine; |
| 209 | |
| 210 | if (!opts.dspLines) |
| 211 | { |
| 212 | return; |
| 213 | } |
| 214 | |
| 215 | if (curIP == 0) |
| 216 | { |
| 217 | if (genIPmappingList) |
| 218 | { |
| 219 | nextMappingDsc = genIPmappingList; |
| 220 | lastLine = jitGetILoffs(nextMappingDsc->ipmdILoffsx); |
| 221 | |
| 222 | unsigned firstLine = jitGetILoffs(nextMappingDsc->ipmdILoffsx); |
| 223 | |
| 224 | unsigned earlierLine = (firstLine < 5) ? 0 : firstLine - 5; |
| 225 | |
| 226 | compDspSrcLinesByLineNum(earlierLine, true); // display previous 5 lines |
| 227 | compDspSrcLinesByLineNum(firstLine, false); |
| 228 | } |
| 229 | else |
| 230 | { |
| 231 | nextMappingDsc = nullptr; |
| 232 | } |
| 233 | |
| 234 | return; |
| 235 | } |
| 236 | |
| 237 | if (nextMappingDsc) |
| 238 | { |
| 239 | UNATIVE_OFFSET offset = nextMappingDsc->ipmdNativeLoc.CodeOffset(genEmitter); |
| 240 | |
| 241 | if (offset <= curIP) |
| 242 | { |
| 243 | IL_OFFSET nextOffs = jitGetILoffs(nextMappingDsc->ipmdILoffsx); |
| 244 | |
| 245 | if (lastLine < nextOffs) |
| 246 | { |
| 247 | compDspSrcLinesByLineNum(nextOffs); |
| 248 | } |
| 249 | else |
| 250 | { |
| 251 | // This offset corresponds to a previous line. Rewind to that line |
| 252 | |
| 253 | compDspSrcLinesByLineNum(nextOffs - 2, true); |
| 254 | compDspSrcLinesByLineNum(nextOffs); |
| 255 | } |
| 256 | |
| 257 | lastLine = nextOffs; |
| 258 | nextMappingDsc = nextMappingDsc->ipmdNext; |
| 259 | } |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | /*****************************************************************************/ |
| 264 | #endif // DEBUG |
| 265 | |
| 266 | /*****************************************************************************/ |
| 267 | #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS |
| 268 | |
| 269 | static unsigned genMethodCnt; // total number of methods JIT'ted |
| 270 | unsigned genMethodICnt; // number of interruptible methods |
| 271 | unsigned genMethodNCnt; // number of non-interruptible methods |
| 272 | static unsigned = 0; |
| 273 | |
| 274 | #endif |
| 275 | |
| 276 | /*****************************************************************************/ |
| 277 | #if MEASURE_NODE_SIZE |
| 278 | NodeSizeStats genNodeSizeStats; |
| 279 | NodeSizeStats genNodeSizeStatsPerFunc; |
| 280 | |
| 281 | unsigned genTreeNcntHistBuckets[] = {10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 1000, 5000, 10000, 0}; |
| 282 | Histogram genTreeNcntHist(genTreeNcntHistBuckets); |
| 283 | |
| 284 | unsigned genTreeNsizHistBuckets[] = {1000, 5000, 10000, 50000, 100000, 500000, 1000000, 0}; |
| 285 | Histogram genTreeNsizHist(genTreeNsizHistBuckets); |
| 286 | #endif // MEASURE_NODE_SIZE |
| 287 | |
| 288 | /*****************************************************************************/ |
| 289 | #if MEASURE_MEM_ALLOC |
| 290 | |
| 291 | unsigned memAllocHistBuckets[] = {64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; |
| 292 | Histogram memAllocHist(memAllocHistBuckets); |
| 293 | unsigned memUsedHistBuckets[] = {16, 32, 64, 128, 192, 256, 512, 1024, 4096, 8192, 0}; |
| 294 | Histogram memUsedHist(memUsedHistBuckets); |
| 295 | |
| 296 | #endif // MEASURE_MEM_ALLOC |
| 297 | |
| 298 | /***************************************************************************** |
| 299 | * |
| 300 | * Variables to keep track of total code amounts. |
| 301 | */ |
| 302 | |
| 303 | #if DISPLAY_SIZES |
| 304 | |
| 305 | size_t grossVMsize; // Total IL code size |
| 306 | size_t grossNCsize; // Native code + data size |
| 307 | size_t totalNCsize; // Native code + data + GC info size (TODO-Cleanup: GC info size only accurate for JIT32_GCENCODER) |
| 308 | size_t gcHeaderISize; // GC header size: interruptible methods |
| 309 | size_t gcPtrMapISize; // GC pointer map size: interruptible methods |
| 310 | size_t gcHeaderNSize; // GC header size: non-interruptible methods |
| 311 | size_t gcPtrMapNSize; // GC pointer map size: non-interruptible methods |
| 312 | |
| 313 | #endif // DISPLAY_SIZES |
| 314 | |
| 315 | /***************************************************************************** |
| 316 | * |
| 317 | * Variables to keep track of argument counts. |
| 318 | */ |
| 319 | |
| 320 | #if CALL_ARG_STATS |
| 321 | |
| 322 | unsigned argTotalCalls; |
| 323 | unsigned argHelperCalls; |
| 324 | unsigned argStaticCalls; |
| 325 | unsigned argNonVirtualCalls; |
| 326 | unsigned argVirtualCalls; |
| 327 | |
| 328 | unsigned argTotalArgs; // total number of args for all calls (including objectPtr) |
| 329 | unsigned argTotalDWordArgs; |
| 330 | unsigned argTotalLongArgs; |
| 331 | unsigned argTotalFloatArgs; |
| 332 | unsigned argTotalDoubleArgs; |
| 333 | |
| 334 | unsigned argTotalRegArgs; |
| 335 | unsigned argTotalTemps; |
| 336 | unsigned argTotalLclVar; |
| 337 | unsigned argTotalDeferred; |
| 338 | unsigned argTotalConst; |
| 339 | |
| 340 | unsigned argTotalObjPtr; |
| 341 | unsigned argTotalGTF_ASGinArgs; |
| 342 | |
| 343 | unsigned argMaxTempsPerMethod; |
| 344 | |
| 345 | unsigned argCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; |
| 346 | Histogram argCntTable(argCntBuckets); |
| 347 | |
| 348 | unsigned argDWordCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; |
| 349 | Histogram argDWordCntTable(argDWordCntBuckets); |
| 350 | |
| 351 | unsigned argDWordLngCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; |
| 352 | Histogram argDWordLngCntTable(argDWordLngCntBuckets); |
| 353 | |
| 354 | unsigned argTempsCntBuckets[] = {0, 1, 2, 3, 4, 5, 6, 10, 0}; |
| 355 | Histogram argTempsCntTable(argTempsCntBuckets); |
| 356 | |
| 357 | #endif // CALL_ARG_STATS |
| 358 | |
| 359 | /***************************************************************************** |
| 360 | * |
| 361 | * Variables to keep track of basic block counts. |
| 362 | */ |
| 363 | |
| 364 | #if COUNT_BASIC_BLOCKS |
| 365 | |
| 366 | // -------------------------------------------------- |
| 367 | // Basic block count frequency table: |
| 368 | // -------------------------------------------------- |
| 369 | // <= 1 ===> 26872 count ( 56% of total) |
| 370 | // 2 .. 2 ===> 669 count ( 58% of total) |
| 371 | // 3 .. 3 ===> 4687 count ( 68% of total) |
| 372 | // 4 .. 5 ===> 5101 count ( 78% of total) |
| 373 | // 6 .. 10 ===> 5575 count ( 90% of total) |
| 374 | // 11 .. 20 ===> 3028 count ( 97% of total) |
| 375 | // 21 .. 50 ===> 1108 count ( 99% of total) |
| 376 | // 51 .. 100 ===> 182 count ( 99% of total) |
| 377 | // 101 .. 1000 ===> 34 count (100% of total) |
| 378 | // 1001 .. 10000 ===> 0 count (100% of total) |
| 379 | // -------------------------------------------------- |
| 380 | |
| 381 | unsigned bbCntBuckets[] = {1, 2, 3, 5, 10, 20, 50, 100, 1000, 10000, 0}; |
| 382 | Histogram bbCntTable(bbCntBuckets); |
| 383 | |
| 384 | /* Histogram for the IL opcode size of methods with a single basic block */ |
| 385 | |
| 386 | unsigned bbSizeBuckets[] = {1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 0}; |
| 387 | Histogram bbOneBBSizeTable(bbSizeBuckets); |
| 388 | |
| 389 | #endif // COUNT_BASIC_BLOCKS |
| 390 | |
| 391 | /***************************************************************************** |
| 392 | * |
| 393 | * Used by optFindNaturalLoops to gather statistical information such as |
| 394 | * - total number of natural loops |
| 395 | * - number of loops with 1, 2, ... exit conditions |
| 396 | * - number of loops that have an iterator (for like) |
| 397 | * - number of loops that have a constant iterator |
| 398 | */ |
| 399 | |
| 400 | #if COUNT_LOOPS |
| 401 | |
| 402 | unsigned totalLoopMethods; // counts the total number of methods that have natural loops |
| 403 | unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has |
| 404 | unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent |
| 405 | unsigned totalLoopCount; // counts the total number of natural loops |
| 406 | unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops |
| 407 | unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent |
| 408 | unsigned iterLoopCount; // counts the # of loops with an iterator (for like) |
| 409 | unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < const) |
| 410 | unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) |
| 411 | bool hasMethodLoops; // flag to keep track if we already counted a method as having loops |
| 412 | unsigned loopsThisMethod; // counts the number of loops in the current method |
| 413 | bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. |
| 414 | |
| 415 | /* Histogram for number of loops in a method */ |
| 416 | |
| 417 | unsigned loopCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0}; |
| 418 | Histogram loopCountTable(loopCountBuckets); |
| 419 | |
| 420 | /* Histogram for number of loop exits */ |
| 421 | |
| 422 | unsigned loopExitCountBuckets[] = {0, 1, 2, 3, 4, 5, 6, 0}; |
| 423 | Histogram loopExitCountTable(loopExitCountBuckets); |
| 424 | |
| 425 | #endif // COUNT_LOOPS |
| 426 | |
| 427 | //------------------------------------------------------------------------ |
| 428 | // getJitGCType: Given the VM's CorInfoGCType convert it to the JIT's var_types |
| 429 | // |
| 430 | // Arguments: |
| 431 | // gcType - an enum value that originally came from an element |
| 432 | // of the BYTE[] returned from getClassGClayout() |
| 433 | // |
| 434 | // Return Value: |
| 435 | // The corresponsing enum value from the JIT's var_types |
| 436 | // |
| 437 | // Notes: |
| 438 | // The gcLayout of each field of a struct is returned from getClassGClayout() |
| 439 | // as a BYTE[] but each BYTE element is actually a CorInfoGCType value |
| 440 | // Note when we 'know' that there is only one element in theis array |
| 441 | // the JIT will often pass the address of a single BYTE, instead of a BYTE[] |
| 442 | // |
| 443 | |
| 444 | var_types Compiler::getJitGCType(BYTE gcType) |
| 445 | { |
| 446 | var_types result = TYP_UNKNOWN; |
| 447 | CorInfoGCType corInfoType = (CorInfoGCType)gcType; |
| 448 | |
| 449 | if (corInfoType == TYPE_GC_NONE) |
| 450 | { |
| 451 | result = TYP_I_IMPL; |
| 452 | } |
| 453 | else if (corInfoType == TYPE_GC_REF) |
| 454 | { |
| 455 | result = TYP_REF; |
| 456 | } |
| 457 | else if (corInfoType == TYPE_GC_BYREF) |
| 458 | { |
| 459 | result = TYP_BYREF; |
| 460 | } |
| 461 | else |
| 462 | { |
| 463 | noway_assert(!"Bad value of 'gcType'" ); |
| 464 | } |
| 465 | return result; |
| 466 | } |
| 467 | |
| 468 | #if FEATURE_MULTIREG_ARGS |
| 469 | //--------------------------------------------------------------------------- |
| 470 | // getStructGcPtrsFromOp: Given a GenTree node of TYP_STRUCT that represents |
| 471 | // a pass by value argument, return the gcPtr layout |
| 472 | // for the pointers sized fields |
| 473 | // Arguments: |
| 474 | // op - the operand of TYP_STRUCT that is passed by value |
| 475 | // gcPtrsOut - an array of BYTES that are written by this method |
| 476 | // they will contain the VM's CorInfoGCType values |
| 477 | // for each pointer sized field |
| 478 | // Return Value: |
| 479 | // Two [or more] values are written into the gcPtrs array |
| 480 | // |
| 481 | // Note that for ARM64 there will always be exactly two pointer sized fields |
| 482 | |
| 483 | void Compiler::getStructGcPtrsFromOp(GenTree* op, BYTE* gcPtrsOut) |
| 484 | { |
| 485 | assert(op->TypeGet() == TYP_STRUCT); |
| 486 | |
| 487 | #ifdef _TARGET_ARM64_ |
| 488 | if (op->OperGet() == GT_OBJ) |
| 489 | { |
| 490 | CORINFO_CLASS_HANDLE objClass = op->gtObj.gtClass; |
| 491 | |
| 492 | int structSize = info.compCompHnd->getClassSize(objClass); |
| 493 | assert(structSize <= 2 * TARGET_POINTER_SIZE); |
| 494 | |
| 495 | BYTE gcPtrsTmp[2] = {TYPE_GC_NONE, TYPE_GC_NONE}; |
| 496 | |
| 497 | info.compCompHnd->getClassGClayout(objClass, &gcPtrsTmp[0]); |
| 498 | |
| 499 | gcPtrsOut[0] = gcPtrsTmp[0]; |
| 500 | gcPtrsOut[1] = gcPtrsTmp[1]; |
| 501 | } |
| 502 | else if (op->OperGet() == GT_LCL_VAR) |
| 503 | { |
| 504 | GenTreeLclVarCommon* varNode = op->AsLclVarCommon(); |
| 505 | unsigned varNum = varNode->gtLclNum; |
| 506 | assert(varNum < lvaCount); |
| 507 | LclVarDsc* varDsc = &lvaTable[varNum]; |
| 508 | |
| 509 | // At this point any TYP_STRUCT LclVar must be a 16-byte pass by value argument |
| 510 | assert(varDsc->lvSize() == 2 * TARGET_POINTER_SIZE); |
| 511 | |
| 512 | gcPtrsOut[0] = varDsc->lvGcLayout[0]; |
| 513 | gcPtrsOut[1] = varDsc->lvGcLayout[1]; |
| 514 | } |
| 515 | else |
| 516 | #endif |
| 517 | { |
| 518 | noway_assert(!"Unsupported Oper for getStructGcPtrsFromOp" ); |
| 519 | } |
| 520 | } |
| 521 | #endif // FEATURE_MULTIREG_ARGS |
| 522 | |
| 523 | #ifdef ARM_SOFTFP |
| 524 | //--------------------------------------------------------------------------- |
| 525 | // IsSingleFloat32Struct: |
| 526 | // Check if the given struct type contains only one float32 value type |
| 527 | // |
| 528 | // Arguments: |
| 529 | // clsHnd - the handle for the struct type |
| 530 | // |
| 531 | // Return Value: |
| 532 | // true if the given struct type contains only one float32 value type, |
| 533 | // false otherwise. |
| 534 | // |
| 535 | |
| 536 | bool Compiler::isSingleFloat32Struct(CORINFO_CLASS_HANDLE clsHnd) |
| 537 | { |
| 538 | for (;;) |
| 539 | { |
| 540 | // all of class chain must be of value type and must have only one field |
| 541 | if (!info.compCompHnd->isValueClass(clsHnd) || info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1) |
| 542 | { |
| 543 | return false; |
| 544 | } |
| 545 | |
| 546 | CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd; |
| 547 | CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); |
| 548 | CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd); |
| 549 | |
| 550 | switch (fieldType) |
| 551 | { |
| 552 | case CORINFO_TYPE_VALUECLASS: |
| 553 | clsHnd = *pClsHnd; |
| 554 | break; |
| 555 | |
| 556 | case CORINFO_TYPE_FLOAT: |
| 557 | return true; |
| 558 | |
| 559 | default: |
| 560 | return false; |
| 561 | } |
| 562 | } |
| 563 | } |
| 564 | #endif // ARM_SOFTFP |
| 565 | |
| 566 | //----------------------------------------------------------------------------- |
| 567 | // getPrimitiveTypeForStruct: |
| 568 | // Get the "primitive" type that is is used for a struct |
| 569 | // of size 'structSize'. |
| 570 | // We examine 'clsHnd' to check the GC layout of the struct and |
| 571 | // return TYP_REF for structs that simply wrap an object. |
| 572 | // If the struct is a one element HFA, we will return the |
| 573 | // proper floating point type. |
| 574 | // |
| 575 | // Arguments: |
| 576 | // structSize - the size of the struct type, cannot be zero |
| 577 | // clsHnd - the handle for the struct type, used when may have |
| 578 | // an HFA or if we need the GC layout for an object ref. |
| 579 | // |
| 580 | // Return Value: |
| 581 | // The primitive type (i.e. byte, short, int, long, ref, float, double) |
| 582 | // used to pass or return structs of this size. |
| 583 | // If we shouldn't use a "primitive" type then TYP_UNKNOWN is returned. |
| 584 | // Notes: |
| 585 | // For 32-bit targets (X86/ARM32) the 64-bit TYP_LONG type is not |
| 586 | // considered a primitive type by this method. |
| 587 | // So a struct that wraps a 'long' is passed and returned in the |
| 588 | // same way as any other 8-byte struct |
| 589 | // For ARM32 if we have an HFA struct that wraps a 64-bit double |
| 590 | // we will return TYP_DOUBLE. |
| 591 | // |
| 592 | var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg) |
| 593 | { |
| 594 | assert(structSize != 0); |
| 595 | |
| 596 | var_types useType; |
| 597 | |
| 598 | switch (structSize) |
| 599 | { |
| 600 | case 1: |
| 601 | useType = TYP_BYTE; |
| 602 | break; |
| 603 | |
| 604 | case 2: |
| 605 | useType = TYP_SHORT; |
| 606 | break; |
| 607 | |
| 608 | #if !defined(_TARGET_XARCH_) || defined(UNIX_AMD64_ABI) |
| 609 | case 3: |
| 610 | useType = TYP_INT; |
| 611 | break; |
| 612 | |
| 613 | #endif // !_TARGET_XARCH_ || UNIX_AMD64_ABI |
| 614 | |
| 615 | #ifdef _TARGET_64BIT_ |
| 616 | case 4: |
| 617 | if (IsHfa(clsHnd)) |
| 618 | { |
| 619 | // A structSize of 4 with IsHfa, it must be an HFA of one float |
| 620 | useType = TYP_FLOAT; |
| 621 | } |
| 622 | else |
| 623 | { |
| 624 | useType = TYP_INT; |
| 625 | } |
| 626 | break; |
| 627 | |
| 628 | #if !defined(_TARGET_XARCH_) || defined(UNIX_AMD64_ABI) |
| 629 | case 5: |
| 630 | case 6: |
| 631 | case 7: |
| 632 | useType = TYP_I_IMPL; |
| 633 | break; |
| 634 | |
| 635 | #endif // !_TARGET_XARCH_ || UNIX_AMD64_ABI |
| 636 | #endif // _TARGET_64BIT_ |
| 637 | |
| 638 | case TARGET_POINTER_SIZE: |
| 639 | #ifdef ARM_SOFTFP |
| 640 | // For ARM_SOFTFP, HFA is unsupported so we need to check in another way |
| 641 | // This matters only for size-4 struct cause bigger structs would be processed with RetBuf |
| 642 | if (isSingleFloat32Struct(clsHnd)) |
| 643 | #else // !ARM_SOFTFP |
| 644 | if (IsHfa(clsHnd) |
| 645 | #if defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) |
| 646 | // Arm64 Windows VarArg methods arguments will not |
| 647 | // classify HFA types, they will need to be treated |
| 648 | // as if they are not HFA types. |
| 649 | && !isVarArg |
| 650 | #endif // defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) |
| 651 | ) |
| 652 | #endif // ARM_SOFTFP |
| 653 | { |
| 654 | #ifdef _TARGET_64BIT_ |
| 655 | var_types hfaType = GetHfaType(clsHnd); |
| 656 | |
| 657 | // A structSize of 8 with IsHfa, we have two possiblities: |
| 658 | // An HFA of one double or an HFA of two floats |
| 659 | // |
| 660 | // Check and exclude the case of an HFA of two floats |
| 661 | if (hfaType == TYP_DOUBLE) |
| 662 | { |
| 663 | // We have an HFA of one double |
| 664 | useType = TYP_DOUBLE; |
| 665 | } |
| 666 | else |
| 667 | { |
| 668 | assert(hfaType == TYP_FLOAT); |
| 669 | |
| 670 | // We have an HFA of two floats |
| 671 | // This should be passed or returned in two FP registers |
| 672 | useType = TYP_UNKNOWN; |
| 673 | } |
| 674 | #else // a 32BIT target |
| 675 | // A structSize of 4 with IsHfa, it must be an HFA of one float |
| 676 | useType = TYP_FLOAT; |
| 677 | #endif // _TARGET_64BIT_ |
| 678 | } |
| 679 | else |
| 680 | { |
| 681 | BYTE gcPtr = 0; |
| 682 | // Check if this pointer-sized struct is wrapping a GC object |
| 683 | info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); |
| 684 | useType = getJitGCType(gcPtr); |
| 685 | } |
| 686 | break; |
| 687 | |
| 688 | #ifdef _TARGET_ARM_ |
| 689 | case 8: |
| 690 | if (IsHfa(clsHnd)) |
| 691 | { |
| 692 | var_types hfaType = GetHfaType(clsHnd); |
| 693 | |
| 694 | // A structSize of 8 with IsHfa, we have two possiblities: |
| 695 | // An HFA of one double or an HFA of two floats |
| 696 | // |
| 697 | // Check and exclude the case of an HFA of two floats |
| 698 | if (hfaType == TYP_DOUBLE) |
| 699 | { |
| 700 | // We have an HFA of one double |
| 701 | useType = TYP_DOUBLE; |
| 702 | } |
| 703 | else |
| 704 | { |
| 705 | assert(hfaType == TYP_FLOAT); |
| 706 | |
| 707 | // We have an HFA of two floats |
| 708 | // This should be passed or returned in two FP registers |
| 709 | useType = TYP_UNKNOWN; |
| 710 | } |
| 711 | } |
| 712 | else |
| 713 | { |
| 714 | // We don't have an HFA |
| 715 | useType = TYP_UNKNOWN; |
| 716 | } |
| 717 | break; |
| 718 | #endif // _TARGET_ARM_ |
| 719 | |
| 720 | default: |
| 721 | useType = TYP_UNKNOWN; |
| 722 | break; |
| 723 | } |
| 724 | |
| 725 | return useType; |
| 726 | } |
| 727 | |
| 728 | //----------------------------------------------------------------------------- |
| 729 | // getArgTypeForStruct: |
| 730 | // Get the type that is used to pass values of the given struct type. |
| 731 | // If you have already retrieved the struct size then it should be |
| 732 | // passed as the optional third argument, as this allows us to avoid |
| 733 | // an extra call to getClassSize(clsHnd) |
| 734 | // |
| 735 | // Arguments: |
| 736 | // clsHnd - the handle for the struct type |
| 737 | // wbPassStruct - An "out" argument with information about how |
| 738 | // the struct is to be passed |
| 739 | // isVarArg - is vararg, used to ignore HFA types for Arm64 windows varargs |
| 740 | // structSize - the size of the struct type, |
| 741 | // or zero if we should call getClassSize(clsHnd) |
| 742 | // |
| 743 | // Return Value: |
| 744 | // For wbPassStruct you can pass a 'nullptr' and nothing will be written |
| 745 | // or returned for that out parameter. |
| 746 | // When *wbPassStruct is SPK_PrimitiveType this method's return value |
| 747 | // is the primitive type used to pass the struct. |
| 748 | // When *wbPassStruct is SPK_ByReference this method's return value |
| 749 | // is always TYP_UNKNOWN and the struct type is passed by reference to a copy |
| 750 | // When *wbPassStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value |
| 751 | // is always TYP_STRUCT and the struct type is passed by value either |
| 752 | // using multiple registers or on the stack. |
| 753 | // |
| 754 | // Assumptions: |
| 755 | // The size must be the size of the given type. |
| 756 | // The given class handle must be for a value type (struct). |
| 757 | // |
| 758 | // Notes: |
| 759 | // About HFA types: |
| 760 | // When the clsHnd is a one element HFA type we return the appropriate |
| 761 | // floating point primitive type and *wbPassStruct is SPK_PrimitiveType |
| 762 | // If there are two or more elements in the HFA type then the this method's |
| 763 | // return value is TYP_STRUCT and *wbPassStruct is SPK_ByValueAsHfa |
| 764 | // |
| 765 | var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, |
| 766 | structPassingKind* wbPassStruct, |
| 767 | bool isVarArg, |
| 768 | unsigned structSize) |
| 769 | { |
| 770 | var_types useType = TYP_UNKNOWN; |
| 771 | structPassingKind howToPassStruct = SPK_Unknown; // We must change this before we return |
| 772 | |
| 773 | assert(structSize != 0); |
| 774 | |
| 775 | // Determine if we can pass the struct as a primitive type. |
| 776 | // Note that on x86 we never pass structs as primitive types (unless the VM unwraps them for us). |
| 777 | #ifndef _TARGET_X86_ |
| 778 | #ifdef UNIX_AMD64_ABI |
| 779 | |
| 780 | // An 8-byte struct may need to be passed in a floating point register |
| 781 | // So we always consult the struct "Classifier" routine |
| 782 | // |
| 783 | SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; |
| 784 | eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); |
| 785 | |
| 786 | if (structDesc.passedInRegisters && (structDesc.eightByteCount != 1)) |
| 787 | { |
| 788 | // We can't pass this as a primitive type. |
| 789 | } |
| 790 | else if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) |
| 791 | { |
| 792 | // If this is passed as a floating type, use that. |
| 793 | // Otherwise, we'll use the general case - we don't want to use the "EightByteType" |
| 794 | // directly, because it returns `TYP_INT` for any integral type <= 4 bytes, and |
| 795 | // we need to preserve small types. |
| 796 | useType = GetEightByteType(structDesc, 0); |
| 797 | } |
| 798 | else |
| 799 | #endif // UNIX_AMD64_ABI |
| 800 | |
| 801 | // The largest primitive type is 8 bytes (TYP_DOUBLE) |
| 802 | // so we can skip calling getPrimitiveTypeForStruct when we |
| 803 | // have a struct that is larger than that. |
| 804 | // |
| 805 | if (structSize <= sizeof(double)) |
| 806 | { |
| 807 | // We set the "primitive" useType based upon the structSize |
| 808 | // and also examine the clsHnd to see if it is an HFA of count one |
| 809 | useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg); |
| 810 | } |
| 811 | |
| 812 | #endif // !_TARGET_X86_ |
| 813 | |
| 814 | // Did we change this struct type into a simple "primitive" type? |
| 815 | // |
| 816 | if (useType != TYP_UNKNOWN) |
| 817 | { |
| 818 | // Yes, we should use the "primitive" type in 'useType' |
| 819 | howToPassStruct = SPK_PrimitiveType; |
| 820 | } |
| 821 | else // We can't replace the struct with a "primitive" type |
| 822 | { |
| 823 | // See if we can pass this struct by value, possibly in multiple registers |
| 824 | // or if we should pass it by reference to a copy |
| 825 | // |
| 826 | if (structSize <= MAX_PASS_MULTIREG_BYTES) |
| 827 | { |
| 828 | // Structs that are HFA's are passed by value in multiple registers |
| 829 | if (IsHfa(clsHnd) |
| 830 | #if defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) |
| 831 | && !isVarArg // Arm64 Windows VarArg methods arguments will not |
| 832 | // classify HFA types, they will need to be treated |
| 833 | // as if they are not HFA types. |
| 834 | #endif // defined(_TARGET_WINDOWS_) && defined(_TARGET_ARM64_) |
| 835 | ) |
| 836 | { |
| 837 | // HFA's of count one should have been handled by getPrimitiveTypeForStruct |
| 838 | assert(GetHfaCount(clsHnd) >= 2); |
| 839 | |
| 840 | // setup wbPassType and useType indicate that this is passed by value as an HFA |
| 841 | // using multiple registers |
| 842 | // (when all of the parameters registers are used, then the stack will be used) |
| 843 | howToPassStruct = SPK_ByValueAsHfa; |
| 844 | useType = TYP_STRUCT; |
| 845 | } |
| 846 | else // Not an HFA struct type |
| 847 | { |
| 848 | |
| 849 | #ifdef UNIX_AMD64_ABI |
| 850 | |
| 851 | // The case of (structDesc.eightByteCount == 1) should have already been handled |
| 852 | if ((structDesc.eightByteCount > 1) || !structDesc.passedInRegisters) |
| 853 | { |
| 854 | // setup wbPassType and useType indicate that this is passed by value in multiple registers |
| 855 | // (when all of the parameters registers are used, then the stack will be used) |
| 856 | howToPassStruct = SPK_ByValue; |
| 857 | useType = TYP_STRUCT; |
| 858 | } |
| 859 | else |
| 860 | { |
| 861 | assert(structDesc.eightByteCount == 0); |
| 862 | // Otherwise we pass this struct by reference to a copy |
| 863 | // setup wbPassType and useType indicate that this is passed using one register |
| 864 | // (by reference to a copy) |
| 865 | howToPassStruct = SPK_ByReference; |
| 866 | useType = TYP_UNKNOWN; |
| 867 | } |
| 868 | |
| 869 | #elif defined(_TARGET_ARM64_) |
| 870 | |
| 871 | // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct |
| 872 | assert(structSize > TARGET_POINTER_SIZE); |
| 873 | |
| 874 | // On ARM64 structs that are 9-16 bytes are passed by value in multiple registers |
| 875 | // |
| 876 | if (structSize <= (TARGET_POINTER_SIZE * 2)) |
| 877 | { |
| 878 | // setup wbPassType and useType indicate that this is passed by value in multiple registers |
| 879 | // (when all of the parameters registers are used, then the stack will be used) |
| 880 | howToPassStruct = SPK_ByValue; |
| 881 | useType = TYP_STRUCT; |
| 882 | } |
| 883 | else // a structSize that is 17-32 bytes in size |
| 884 | { |
| 885 | // Otherwise we pass this struct by reference to a copy |
| 886 | // setup wbPassType and useType indicate that this is passed using one register |
| 887 | // (by reference to a copy) |
| 888 | howToPassStruct = SPK_ByReference; |
| 889 | useType = TYP_UNKNOWN; |
| 890 | } |
| 891 | |
| 892 | #elif defined(_TARGET_X86_) || defined(_TARGET_ARM_) |
| 893 | |
| 894 | // Otherwise we pass this struct by value on the stack |
| 895 | // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI |
| 896 | howToPassStruct = SPK_ByValue; |
| 897 | useType = TYP_STRUCT; |
| 898 | |
| 899 | #else // _TARGET_XXX_ |
| 900 | |
| 901 | noway_assert(!"Unhandled TARGET in getArgTypeForStruct (with FEATURE_MULTIREG_ARGS=1)" ); |
| 902 | |
| 903 | #endif // _TARGET_XXX_ |
| 904 | } |
| 905 | } |
| 906 | else // (structSize > MAX_PASS_MULTIREG_BYTES) |
| 907 | { |
| 908 | // We have a (large) struct that can't be replaced with a "primitive" type |
| 909 | // and can't be passed in multiple registers |
| 910 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 911 | |
| 912 | #if defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(UNIX_AMD64_ABI) |
| 913 | |
| 914 | // Otherwise we pass this struct by value on the stack |
| 915 | // setup wbPassType and useType indicate that this is passed by value according to the X86/ARM32 ABI |
| 916 | howToPassStruct = SPK_ByValue; |
| 917 | useType = TYP_STRUCT; |
| 918 | |
| 919 | #elif defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_) |
| 920 | |
| 921 | // Otherwise we pass this struct by reference to a copy |
| 922 | // setup wbPassType and useType indicate that this is passed using one register (by reference to a copy) |
| 923 | howToPassStruct = SPK_ByReference; |
| 924 | useType = TYP_UNKNOWN; |
| 925 | |
| 926 | #else // _TARGET_XXX_ |
| 927 | |
| 928 | noway_assert(!"Unhandled TARGET in getArgTypeForStruct" ); |
| 929 | |
| 930 | #endif // _TARGET_XXX_ |
| 931 | } |
| 932 | } |
| 933 | |
| 934 | // 'howToPassStruct' must be set to one of the valid values before we return |
| 935 | assert(howToPassStruct != SPK_Unknown); |
| 936 | if (wbPassStruct != nullptr) |
| 937 | { |
| 938 | *wbPassStruct = howToPassStruct; |
| 939 | } |
| 940 | |
| 941 | return useType; |
| 942 | } |
| 943 | |
| 944 | //----------------------------------------------------------------------------- |
| 945 | // getReturnTypeForStruct: |
| 946 | // Get the type that is used to return values of the given struct type. |
| 947 | // If you have already retrieved the struct size then it should be |
| 948 | // passed as the optional third argument, as this allows us to avoid |
| 949 | // an extra call to getClassSize(clsHnd) |
| 950 | // |
| 951 | // Arguments: |
| 952 | // clsHnd - the handle for the struct type |
| 953 | // wbReturnStruct - An "out" argument with information about how |
| 954 | // the struct is to be returned |
| 955 | // structSize - the size of the struct type, |
| 956 | // or zero if we should call getClassSize(clsHnd) |
| 957 | // |
| 958 | // Return Value: |
| 959 | // For wbReturnStruct you can pass a 'nullptr' and nothing will be written |
| 960 | // or returned for that out parameter. |
| 961 | // When *wbReturnStruct is SPK_PrimitiveType this method's return value |
| 962 | // is the primitive type used to return the struct. |
| 963 | // When *wbReturnStruct is SPK_ByReference this method's return value |
| 964 | // is always TYP_UNKNOWN and the struct type is returned using a return buffer |
| 965 | // When *wbReturnStruct is SPK_ByValue or SPK_ByValueAsHfa this method's return value |
| 966 | // is always TYP_STRUCT and the struct type is returned using multiple registers. |
| 967 | // |
| 968 | // Assumptions: |
| 969 | // The size must be the size of the given type. |
| 970 | // The given class handle must be for a value type (struct). |
| 971 | // |
| 972 | // Notes: |
| 973 | // About HFA types: |
| 974 | // When the clsHnd is a one element HFA type then this method's return |
| 975 | // value is the appropriate floating point primitive type and |
| 976 | // *wbReturnStruct is SPK_PrimitiveType. |
| 977 | // If there are two or more elements in the HFA type and the target supports |
| 978 | // multireg return types then the return value is TYP_STRUCT and |
| 979 | // *wbReturnStruct is SPK_ByValueAsHfa. |
| 980 | // Additionally if there are two or more elements in the HFA type and |
| 981 | // the target doesn't support multreg return types then it is treated |
| 982 | // as if it wasn't an HFA type. |
| 983 | // About returning TYP_STRUCT: |
| 984 | // Whenever this method's return value is TYP_STRUCT it always means |
| 985 | // that multiple registers are used to return this struct. |
| 986 | // |
| 987 | var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, |
| 988 | structPassingKind* wbReturnStruct /* = nullptr */, |
| 989 | unsigned structSize /* = 0 */) |
| 990 | { |
| 991 | var_types useType = TYP_UNKNOWN; |
| 992 | structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return |
| 993 | bool canReturnInRegister = true; |
| 994 | |
| 995 | assert(clsHnd != NO_CLASS_HANDLE); |
| 996 | |
| 997 | if (structSize == 0) |
| 998 | { |
| 999 | structSize = info.compCompHnd->getClassSize(clsHnd); |
| 1000 | } |
| 1001 | assert(structSize > 0); |
| 1002 | |
| 1003 | #ifdef UNIX_AMD64_ABI |
| 1004 | // An 8-byte struct may need to be returned in a floating point register |
| 1005 | // So we always consult the struct "Classifier" routine |
| 1006 | // |
| 1007 | SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; |
| 1008 | eeGetSystemVAmd64PassStructInRegisterDescriptor(clsHnd, &structDesc); |
| 1009 | |
| 1010 | if (structDesc.eightByteCount == 1) |
| 1011 | { |
| 1012 | assert(structSize <= sizeof(double)); |
| 1013 | assert(structDesc.passedInRegisters); |
| 1014 | |
| 1015 | if (structDesc.eightByteClassifications[0] == SystemVClassificationTypeSSE) |
| 1016 | { |
| 1017 | // If this is returned as a floating type, use that. |
| 1018 | // Otherwise, leave as TYP_UNKONWN and we'll sort things out below. |
| 1019 | useType = GetEightByteType(structDesc, 0); |
| 1020 | howToReturnStruct = SPK_PrimitiveType; |
| 1021 | } |
| 1022 | } |
| 1023 | else |
| 1024 | { |
| 1025 | // Return classification is not always size based... |
| 1026 | canReturnInRegister = structDesc.passedInRegisters; |
| 1027 | } |
| 1028 | |
| 1029 | #endif // UNIX_AMD64_ABI |
| 1030 | |
| 1031 | // Check for cases where a small struct is returned in a register |
| 1032 | // via a primitive type. |
| 1033 | // |
| 1034 | // The largest primitive type is 8 bytes (TYP_DOUBLE) |
| 1035 | // so we can skip calling getPrimitiveTypeForStruct when we |
| 1036 | // have a struct that is larger than that. |
| 1037 | if (canReturnInRegister && (useType == TYP_UNKNOWN) && (structSize <= sizeof(double))) |
| 1038 | { |
| 1039 | // We set the "primitive" useType based upon the structSize |
| 1040 | // and also examine the clsHnd to see if it is an HFA of count one |
| 1041 | // |
| 1042 | // The ABI for struct returns in varArg methods, is same as the normal case, |
| 1043 | // so pass false for isVararg |
| 1044 | useType = getPrimitiveTypeForStruct(structSize, clsHnd, /*isVararg=*/false); |
| 1045 | |
| 1046 | if (useType != TYP_UNKNOWN) |
| 1047 | { |
| 1048 | if (structSize == genTypeSize(useType)) |
| 1049 | { |
| 1050 | // Currently: 1, 2, 4, or 8 byte structs |
| 1051 | howToReturnStruct = SPK_PrimitiveType; |
| 1052 | } |
| 1053 | else |
| 1054 | { |
| 1055 | // Currently: 3, 5, 6, or 7 byte structs |
| 1056 | assert(structSize < genTypeSize(useType)); |
| 1057 | howToReturnStruct = SPK_EnclosingType; |
| 1058 | } |
| 1059 | } |
| 1060 | } |
| 1061 | |
| 1062 | #ifdef _TARGET_64BIT_ |
| 1063 | // Note this handles an odd case when FEATURE_MULTIREG_RET is disabled and HFAs are enabled |
| 1064 | // |
| 1065 | // getPrimitiveTypeForStruct will return TYP_UNKNOWN for a struct that is an HFA of two floats |
| 1066 | // because when HFA are enabled, normally we would use two FP registers to pass or return it |
| 1067 | // |
| 1068 | // But if we don't have support for multiple register return types, we have to change this. |
| 1069 | // Since we what we have an 8-byte struct (float + float) we change useType to TYP_I_IMPL |
| 1070 | // so that the struct is returned instead using an 8-byte integer register. |
| 1071 | // |
| 1072 | if ((FEATURE_MULTIREG_RET == 0) && (useType == TYP_UNKNOWN) && (structSize == (2 * sizeof(float))) && IsHfa(clsHnd)) |
| 1073 | { |
| 1074 | useType = TYP_I_IMPL; |
| 1075 | howToReturnStruct = SPK_PrimitiveType; |
| 1076 | } |
| 1077 | #endif |
| 1078 | |
| 1079 | // Did we change this struct type into a simple "primitive" type? |
| 1080 | if (useType != TYP_UNKNOWN) |
| 1081 | { |
| 1082 | // If so, we should have already set howToReturnStruct, too. |
| 1083 | assert(howToReturnStruct != SPK_Unknown); |
| 1084 | } |
| 1085 | else // We can't replace the struct with a "primitive" type |
| 1086 | { |
| 1087 | // See if we can return this struct by value, possibly in multiple registers |
| 1088 | // or if we should return it using a return buffer register |
| 1089 | // |
| 1090 | if ((FEATURE_MULTIREG_RET == 1) && (structSize <= MAX_RET_MULTIREG_BYTES)) |
| 1091 | { |
| 1092 | // Structs that are HFA's are returned in multiple registers |
| 1093 | if (IsHfa(clsHnd)) |
| 1094 | { |
| 1095 | // HFA's of count one should have been handled by getPrimitiveTypeForStruct |
| 1096 | assert(GetHfaCount(clsHnd) >= 2); |
| 1097 | |
| 1098 | // setup wbPassType and useType indicate that this is returned by value as an HFA |
| 1099 | // using multiple registers |
| 1100 | howToReturnStruct = SPK_ByValueAsHfa; |
| 1101 | useType = TYP_STRUCT; |
| 1102 | } |
| 1103 | else // Not an HFA struct type |
| 1104 | { |
| 1105 | |
| 1106 | #ifdef UNIX_AMD64_ABI |
| 1107 | |
| 1108 | // The case of (structDesc.eightByteCount == 1) should have already been handled |
| 1109 | if (structDesc.eightByteCount > 1) |
| 1110 | { |
| 1111 | // setup wbPassType and useType indicate that this is returned by value in multiple registers |
| 1112 | howToReturnStruct = SPK_ByValue; |
| 1113 | useType = TYP_STRUCT; |
| 1114 | assert(structDesc.passedInRegisters == true); |
| 1115 | } |
| 1116 | else |
| 1117 | { |
| 1118 | assert(structDesc.eightByteCount == 0); |
| 1119 | // Otherwise we return this struct using a return buffer |
| 1120 | // setup wbPassType and useType indicate that this is return using a return buffer register |
| 1121 | // (reference to a return buffer) |
| 1122 | howToReturnStruct = SPK_ByReference; |
| 1123 | useType = TYP_UNKNOWN; |
| 1124 | assert(structDesc.passedInRegisters == false); |
| 1125 | } |
| 1126 | |
| 1127 | #elif defined(_TARGET_ARM64_) |
| 1128 | |
| 1129 | // Structs that are pointer sized or smaller should have been handled by getPrimitiveTypeForStruct |
| 1130 | assert(structSize > TARGET_POINTER_SIZE); |
| 1131 | |
| 1132 | // On ARM64 structs that are 9-16 bytes are returned by value in multiple registers |
| 1133 | // |
| 1134 | if (structSize <= (TARGET_POINTER_SIZE * 2)) |
| 1135 | { |
| 1136 | // setup wbPassType and useType indicate that this is return by value in multiple registers |
| 1137 | howToReturnStruct = SPK_ByValue; |
| 1138 | useType = TYP_STRUCT; |
| 1139 | } |
| 1140 | else // a structSize that is 17-32 bytes in size |
| 1141 | { |
| 1142 | // Otherwise we return this struct using a return buffer |
| 1143 | // setup wbPassType and useType indicate that this is returned using a return buffer register |
| 1144 | // (reference to a return buffer) |
| 1145 | howToReturnStruct = SPK_ByReference; |
| 1146 | useType = TYP_UNKNOWN; |
| 1147 | } |
| 1148 | |
| 1149 | #elif defined(_TARGET_ARM_) || defined(_TARGET_X86_) |
| 1150 | |
| 1151 | // Otherwise we return this struct using a return buffer |
| 1152 | // setup wbPassType and useType indicate that this is returned using a return buffer register |
| 1153 | // (reference to a return buffer) |
| 1154 | howToReturnStruct = SPK_ByReference; |
| 1155 | useType = TYP_UNKNOWN; |
| 1156 | |
| 1157 | #else // _TARGET_XXX_ |
| 1158 | |
| 1159 | noway_assert(!"Unhandled TARGET in getReturnTypeForStruct (with FEATURE_MULTIREG_ARGS=1)" ); |
| 1160 | |
| 1161 | #endif // _TARGET_XXX_ |
| 1162 | } |
| 1163 | } |
| 1164 | else // (structSize > MAX_RET_MULTIREG_BYTES) || (FEATURE_MULTIREG_RET == 0) |
| 1165 | { |
| 1166 | // We have a (large) struct that can't be replaced with a "primitive" type |
| 1167 | // and can't be returned in multiple registers |
| 1168 | |
| 1169 | // We return this struct using a return buffer register |
| 1170 | // setup wbPassType and useType indicate that this is returned using a return buffer register |
| 1171 | // (reference to a return buffer) |
| 1172 | howToReturnStruct = SPK_ByReference; |
| 1173 | useType = TYP_UNKNOWN; |
| 1174 | } |
| 1175 | } |
| 1176 | |
| 1177 | // 'howToReturnStruct' must be set to one of the valid values before we return |
| 1178 | assert(howToReturnStruct != SPK_Unknown); |
| 1179 | if (wbReturnStruct != nullptr) |
| 1180 | { |
| 1181 | *wbReturnStruct = howToReturnStruct; |
| 1182 | } |
| 1183 | |
| 1184 | return useType; |
| 1185 | } |
| 1186 | |
| 1187 | /////////////////////////////////////////////////////////////////////////////// |
| 1188 | // |
| 1189 | // MEASURE_NOWAY: code to measure and rank dynamic occurences of noway_assert. |
| 1190 | // (Just the appearances of noway_assert, whether the assert is true or false.) |
| 1191 | // This might help characterize the cost of noway_assert in non-DEBUG builds, |
| 1192 | // or determine which noway_assert should be simple DEBUG-only asserts. |
| 1193 | // |
| 1194 | /////////////////////////////////////////////////////////////////////////////// |
| 1195 | |
| 1196 | #if MEASURE_NOWAY |
| 1197 | |
| 1198 | struct FileLine |
| 1199 | { |
| 1200 | char* m_file; |
| 1201 | unsigned m_line; |
| 1202 | char* m_condStr; |
| 1203 | |
| 1204 | FileLine() : m_file(nullptr), m_line(0), m_condStr(nullptr) |
| 1205 | { |
| 1206 | } |
| 1207 | |
| 1208 | FileLine(const char* file, unsigned line, const char* condStr) : m_line(line) |
| 1209 | { |
| 1210 | size_t newSize = (strlen(file) + 1) * sizeof(char); |
| 1211 | m_file = HostAllocator::getHostAllocator().allocate<char>(newSize); |
| 1212 | strcpy_s(m_file, newSize, file); |
| 1213 | |
| 1214 | newSize = (strlen(condStr) + 1) * sizeof(char); |
| 1215 | m_condStr = HostAllocator::getHostAllocator().allocate<char>(newSize); |
| 1216 | strcpy_s(m_condStr, newSize, condStr); |
| 1217 | } |
| 1218 | |
| 1219 | FileLine(const FileLine& other) |
| 1220 | { |
| 1221 | m_file = other.m_file; |
| 1222 | m_line = other.m_line; |
| 1223 | m_condStr = other.m_condStr; |
| 1224 | } |
| 1225 | |
| 1226 | // GetHashCode() and Equals() are needed by JitHashTable |
| 1227 | |
| 1228 | static unsigned GetHashCode(FileLine fl) |
| 1229 | { |
| 1230 | assert(fl.m_file != nullptr); |
| 1231 | unsigned code = fl.m_line; |
| 1232 | for (const char* p = fl.m_file; *p != '\0'; p++) |
| 1233 | { |
| 1234 | code += *p; |
| 1235 | } |
| 1236 | // Could also add condStr. |
| 1237 | return code; |
| 1238 | } |
| 1239 | |
| 1240 | static bool Equals(FileLine fl1, FileLine fl2) |
| 1241 | { |
| 1242 | return (fl1.m_line == fl2.m_line) && (0 == strcmp(fl1.m_file, fl2.m_file)); |
| 1243 | } |
| 1244 | }; |
| 1245 | |
| 1246 | typedef JitHashTable<FileLine, FileLine, size_t, HostAllocator> FileLineToCountMap; |
| 1247 | FileLineToCountMap* NowayAssertMap; |
| 1248 | |
| 1249 | void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr) |
| 1250 | { |
| 1251 | if (NowayAssertMap == nullptr) |
| 1252 | { |
| 1253 | NowayAssertMap = new (HostAllocator::getHostAllocator()) FileLineToCountMap(HostAllocator::getHostAllocator()); |
| 1254 | } |
| 1255 | FileLine fl(filename, line, condStr); |
| 1256 | size_t* pCount = NowayAssertMap->LookupPointer(fl); |
| 1257 | if (pCount == nullptr) |
| 1258 | { |
| 1259 | NowayAssertMap->Set(fl, 1); |
| 1260 | } |
| 1261 | else |
| 1262 | { |
| 1263 | ++(*pCount); |
| 1264 | } |
| 1265 | } |
| 1266 | |
| 1267 | void RecordNowayAssertGlobal(const char* filename, unsigned line, const char* condStr) |
| 1268 | { |
| 1269 | if ((JitConfig.JitMeasureNowayAssert() == 1) && (JitTls::GetCompiler() != nullptr)) |
| 1270 | { |
| 1271 | JitTls::GetCompiler()->RecordNowayAssert(filename, line, condStr); |
| 1272 | } |
| 1273 | } |
| 1274 | |
| 1275 | struct NowayAssertCountMap |
| 1276 | { |
| 1277 | size_t count; |
| 1278 | FileLine fl; |
| 1279 | |
| 1280 | NowayAssertCountMap() : count(0) |
| 1281 | { |
| 1282 | } |
| 1283 | |
| 1284 | static int __cdecl compare(const void* elem1, const void* elem2) |
| 1285 | { |
| 1286 | NowayAssertCountMap* e1 = (NowayAssertCountMap*)elem1; |
| 1287 | NowayAssertCountMap* e2 = (NowayAssertCountMap*)elem2; |
| 1288 | return (int)((ssize_t)e2->count - (ssize_t)e1->count); // sort in descending order |
| 1289 | } |
| 1290 | }; |
| 1291 | |
| 1292 | void DisplayNowayAssertMap() |
| 1293 | { |
| 1294 | if (NowayAssertMap != nullptr) |
| 1295 | { |
| 1296 | FILE* fout; |
| 1297 | |
| 1298 | LPCWSTR strJitMeasureNowayAssertFile = JitConfig.JitMeasureNowayAssertFile(); |
| 1299 | if (strJitMeasureNowayAssertFile != nullptr) |
| 1300 | { |
| 1301 | fout = _wfopen(strJitMeasureNowayAssertFile, W("a" )); |
| 1302 | if (fout == nullptr) |
| 1303 | { |
| 1304 | fprintf(jitstdout, "Failed to open JitMeasureNowayAssertFile \"%ws\"\n" , strJitMeasureNowayAssertFile); |
| 1305 | return; |
| 1306 | } |
| 1307 | } |
| 1308 | else |
| 1309 | { |
| 1310 | fout = jitstdout; |
| 1311 | } |
| 1312 | |
| 1313 | // Iterate noway assert map, create sorted table by occurrence, dump it. |
| 1314 | unsigned count = NowayAssertMap->GetCount(); |
| 1315 | NowayAssertCountMap* nacp = new NowayAssertCountMap[count]; |
| 1316 | unsigned i = 0; |
| 1317 | |
| 1318 | for (FileLineToCountMap::KeyIterator iter = NowayAssertMap->Begin(), end = NowayAssertMap->End(); |
| 1319 | !iter.Equal(end); ++iter) |
| 1320 | { |
| 1321 | nacp[i].count = iter.GetValue(); |
| 1322 | nacp[i].fl = iter.Get(); |
| 1323 | ++i; |
| 1324 | } |
| 1325 | |
| 1326 | qsort(nacp, count, sizeof(nacp[0]), NowayAssertCountMap::compare); |
| 1327 | |
| 1328 | if (fout == jitstdout) |
| 1329 | { |
| 1330 | // Don't output the header if writing to a file, since we'll be appending to existing dumps in that case. |
| 1331 | fprintf(fout, "\nnoway_assert counts:\n" ); |
| 1332 | fprintf(fout, "count, file, line, text\n" ); |
| 1333 | } |
| 1334 | |
| 1335 | for (i = 0; i < count; i++) |
| 1336 | { |
| 1337 | fprintf(fout, "%u, %s, %u, \"%s\"\n" , nacp[i].count, nacp[i].fl.m_file, nacp[i].fl.m_line, |
| 1338 | nacp[i].fl.m_condStr); |
| 1339 | } |
| 1340 | |
| 1341 | if (fout != jitstdout) |
| 1342 | { |
| 1343 | fclose(fout); |
| 1344 | fout = nullptr; |
| 1345 | } |
| 1346 | } |
| 1347 | } |
| 1348 | |
| 1349 | #endif // MEASURE_NOWAY |
| 1350 | |
| 1351 | /***************************************************************************** |
| 1352 | * variables to keep track of how many iterations we go in a dataflow pass |
| 1353 | */ |
| 1354 | |
| 1355 | #if DATAFLOW_ITER |
| 1356 | |
| 1357 | unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow |
| 1358 | unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow |
| 1359 | |
| 1360 | #endif // DATAFLOW_ITER |
| 1361 | |
| 1362 | #if MEASURE_BLOCK_SIZE |
| 1363 | size_t genFlowNodeSize; |
| 1364 | size_t genFlowNodeCnt; |
| 1365 | #endif // MEASURE_BLOCK_SIZE |
| 1366 | |
| 1367 | /*****************************************************************************/ |
| 1368 | // We keep track of methods we've already compiled. |
| 1369 | |
| 1370 | /***************************************************************************** |
| 1371 | * Declare the statics |
| 1372 | */ |
| 1373 | |
| 1374 | #ifdef DEBUG |
| 1375 | /* static */ |
| 1376 | unsigned Compiler::s_compMethodsCount = 0; // to produce unique label names |
| 1377 | #endif |
| 1378 | |
| 1379 | #if MEASURE_MEM_ALLOC |
| 1380 | /* static */ |
| 1381 | bool Compiler::s_dspMemStats = false; |
| 1382 | #endif |
| 1383 | |
| 1384 | #ifndef PROFILING_SUPPORTED |
| 1385 | const bool Compiler::Options::compNoPInvokeInlineCB = false; |
| 1386 | #endif |
| 1387 | |
| 1388 | /***************************************************************************** |
| 1389 | * |
| 1390 | * One time initialization code |
| 1391 | */ |
| 1392 | |
| 1393 | /* static */ |
| 1394 | void Compiler::compStartup() |
| 1395 | { |
| 1396 | #if DISPLAY_SIZES |
| 1397 | grossVMsize = grossNCsize = totalNCsize = 0; |
| 1398 | #endif // DISPLAY_SIZES |
| 1399 | |
| 1400 | /* Initialize the table of tree node sizes */ |
| 1401 | |
| 1402 | GenTree::InitNodeSize(); |
| 1403 | |
| 1404 | #ifdef JIT32_GCENCODER |
| 1405 | // Initialize the GC encoder lookup table |
| 1406 | |
| 1407 | GCInfo::gcInitEncoderLookupTable(); |
| 1408 | #endif |
| 1409 | |
| 1410 | /* Initialize the emitter */ |
| 1411 | |
| 1412 | emitter::emitInit(); |
| 1413 | |
| 1414 | // Static vars of ValueNumStore |
| 1415 | ValueNumStore::InitValueNumStoreStatics(); |
| 1416 | |
| 1417 | compDisplayStaticSizes(jitstdout); |
| 1418 | } |
| 1419 | |
| 1420 | /***************************************************************************** |
| 1421 | * |
| 1422 | * One time finalization code |
| 1423 | */ |
| 1424 | |
| 1425 | /* static */ |
| 1426 | void Compiler::compShutdown() |
| 1427 | { |
| 1428 | #ifdef ALT_JIT |
| 1429 | if (s_pAltJitExcludeAssembliesList != nullptr) |
| 1430 | { |
| 1431 | s_pAltJitExcludeAssembliesList->~AssemblyNamesList2(); // call the destructor |
| 1432 | s_pAltJitExcludeAssembliesList = nullptr; |
| 1433 | } |
| 1434 | #endif // ALT_JIT |
| 1435 | |
| 1436 | #ifdef DEBUG |
| 1437 | if (s_pJitDisasmIncludeAssembliesList != nullptr) |
| 1438 | { |
| 1439 | s_pJitDisasmIncludeAssembliesList->~AssemblyNamesList2(); // call the destructor |
| 1440 | s_pJitDisasmIncludeAssembliesList = nullptr; |
| 1441 | } |
| 1442 | #endif // DEBUG |
| 1443 | |
| 1444 | #if MEASURE_NOWAY |
| 1445 | DisplayNowayAssertMap(); |
| 1446 | #endif // MEASURE_NOWAY |
| 1447 | |
| 1448 | /* Shut down the emitter */ |
| 1449 | |
| 1450 | emitter::emitDone(); |
| 1451 | |
| 1452 | #if defined(DEBUG) || defined(INLINE_DATA) |
| 1453 | // Finish reading and/or writing inline xml |
| 1454 | InlineStrategy::FinalizeXml(); |
| 1455 | #endif // defined(DEBUG) || defined(INLINE_DATA) |
| 1456 | |
| 1457 | #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS |
| 1458 | if (genMethodCnt == 0) |
| 1459 | { |
| 1460 | return; |
| 1461 | } |
| 1462 | #endif |
| 1463 | |
| 1464 | #if NODEBASH_STATS |
| 1465 | GenTree::ReportOperBashing(jitstdout); |
| 1466 | #endif |
| 1467 | |
| 1468 | // Where should we write our statistics output? |
| 1469 | FILE* fout = jitstdout; |
| 1470 | |
| 1471 | #ifdef FEATURE_JIT_METHOD_PERF |
| 1472 | if (compJitTimeLogFilename != nullptr) |
| 1473 | { |
| 1474 | FILE* jitTimeLogFile = _wfopen(compJitTimeLogFilename, W("a" )); |
| 1475 | if (jitTimeLogFile != nullptr) |
| 1476 | { |
| 1477 | CompTimeSummaryInfo::s_compTimeSummary.Print(jitTimeLogFile); |
| 1478 | fclose(jitTimeLogFile); |
| 1479 | } |
| 1480 | } |
| 1481 | #endif // FEATURE_JIT_METHOD_PERF |
| 1482 | |
| 1483 | #if COUNT_AST_OPERS |
| 1484 | |
| 1485 | // Add up all the counts so that we can show percentages of total |
| 1486 | unsigned gtc = 0; |
| 1487 | for (unsigned op = 0; op < GT_COUNT; op++) |
| 1488 | gtc += GenTree::s_gtNodeCounts[op]; |
| 1489 | |
| 1490 | if (gtc > 0) |
| 1491 | { |
| 1492 | unsigned rem_total = gtc; |
| 1493 | unsigned rem_large = 0; |
| 1494 | unsigned rem_small = 0; |
| 1495 | |
| 1496 | unsigned tot_large = 0; |
| 1497 | unsigned tot_small = 0; |
| 1498 | |
| 1499 | fprintf(fout, "\nGenTree operator counts (approximate):\n\n" ); |
| 1500 | |
| 1501 | for (unsigned op = 0; op < GT_COUNT; op++) |
| 1502 | { |
| 1503 | unsigned siz = GenTree::s_gtTrueSizes[op]; |
| 1504 | unsigned cnt = GenTree::s_gtNodeCounts[op]; |
| 1505 | double pct = 100.0 * cnt / gtc; |
| 1506 | |
| 1507 | if (siz > TREE_NODE_SZ_SMALL) |
| 1508 | tot_large += cnt; |
| 1509 | else |
| 1510 | tot_small += cnt; |
| 1511 | |
| 1512 | // Let's not show anything below a threshold |
| 1513 | if (pct >= 0.5) |
| 1514 | { |
| 1515 | fprintf(fout, " GT_%-17s %7u (%4.1lf%%) %3u bytes each\n" , GenTree::OpName((genTreeOps)op), cnt, |
| 1516 | pct, siz); |
| 1517 | rem_total -= cnt; |
| 1518 | } |
| 1519 | else |
| 1520 | { |
| 1521 | if (siz > TREE_NODE_SZ_SMALL) |
| 1522 | rem_large += cnt; |
| 1523 | else |
| 1524 | rem_small += cnt; |
| 1525 | } |
| 1526 | } |
| 1527 | if (rem_total > 0) |
| 1528 | { |
| 1529 | fprintf(fout, " All other GT_xxx ... %7u (%4.1lf%%) ... %4.1lf%% small + %4.1lf%% large\n" , rem_total, |
| 1530 | 100.0 * rem_total / gtc, 100.0 * rem_small / gtc, 100.0 * rem_large / gtc); |
| 1531 | } |
| 1532 | fprintf(fout, " -----------------------------------------------------\n" ); |
| 1533 | fprintf(fout, " Total ....... %11u --ALL-- ... %4.1lf%% small + %4.1lf%% large\n" , gtc, |
| 1534 | 100.0 * tot_small / gtc, 100.0 * tot_large / gtc); |
| 1535 | fprintf(fout, "\n" ); |
| 1536 | } |
| 1537 | |
| 1538 | #endif // COUNT_AST_OPERS |
| 1539 | |
| 1540 | #if DISPLAY_SIZES |
| 1541 | |
| 1542 | if (grossVMsize && grossNCsize) |
| 1543 | { |
| 1544 | fprintf(fout, "\n" ); |
| 1545 | fprintf(fout, "--------------------------------------\n" ); |
| 1546 | fprintf(fout, "Function and GC info size stats\n" ); |
| 1547 | fprintf(fout, "--------------------------------------\n" ); |
| 1548 | |
| 1549 | fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n" , grossVMsize, grossNCsize, Target::g_tgtCPUName, |
| 1550 | 100 * grossNCsize / grossVMsize, "Total (excluding GC info)" ); |
| 1551 | |
| 1552 | fprintf(fout, "[%7u VM, %8u %6s %4u%%] %s\n" , grossVMsize, totalNCsize, Target::g_tgtCPUName, |
| 1553 | 100 * totalNCsize / grossVMsize, "Total (including GC info)" ); |
| 1554 | |
| 1555 | if (gcHeaderISize || gcHeaderNSize) |
| 1556 | { |
| 1557 | fprintf(fout, "\n" ); |
| 1558 | |
| 1559 | fprintf(fout, "GC tables : [%7uI,%7uN] %7u byt (%u%% of IL, %u%% of %s).\n" , |
| 1560 | gcHeaderISize + gcPtrMapISize, gcHeaderNSize + gcPtrMapNSize, totalNCsize - grossNCsize, |
| 1561 | 100 * (totalNCsize - grossNCsize) / grossVMsize, 100 * (totalNCsize - grossNCsize) / grossNCsize, |
| 1562 | Target::g_tgtCPUName); |
| 1563 | |
| 1564 | fprintf(fout, "GC headers : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n" , gcHeaderISize, |
| 1565 | gcHeaderNSize, gcHeaderISize + gcHeaderNSize, (float)gcHeaderISize / (genMethodICnt + 0.001), |
| 1566 | (float)gcHeaderNSize / (genMethodNCnt + 0.001), |
| 1567 | (float)(gcHeaderISize + gcHeaderNSize) / genMethodCnt); |
| 1568 | |
| 1569 | fprintf(fout, "GC ptr maps : [%7uI,%7uN] %7u byt, [%4.1fI,%4.1fN] %4.1f byt/meth\n" , gcPtrMapISize, |
| 1570 | gcPtrMapNSize, gcPtrMapISize + gcPtrMapNSize, (float)gcPtrMapISize / (genMethodICnt + 0.001), |
| 1571 | (float)gcPtrMapNSize / (genMethodNCnt + 0.001), |
| 1572 | (float)(gcPtrMapISize + gcPtrMapNSize) / genMethodCnt); |
| 1573 | } |
| 1574 | else |
| 1575 | { |
| 1576 | fprintf(fout, "\n" ); |
| 1577 | |
| 1578 | fprintf(fout, "GC tables take up %u bytes (%u%% of instr, %u%% of %6s code).\n" , |
| 1579 | totalNCsize - grossNCsize, 100 * (totalNCsize - grossNCsize) / grossVMsize, |
| 1580 | 100 * (totalNCsize - grossNCsize) / grossNCsize, Target::g_tgtCPUName); |
| 1581 | } |
| 1582 | |
| 1583 | #ifdef DEBUG |
| 1584 | #if DOUBLE_ALIGN |
| 1585 | fprintf(fout, "%u out of %u methods generated with double-aligned stack\n" , |
| 1586 | Compiler::s_lvaDoubleAlignedProcsCount, genMethodCnt); |
| 1587 | #endif |
| 1588 | #endif |
| 1589 | } |
| 1590 | |
| 1591 | #endif // DISPLAY_SIZES |
| 1592 | |
| 1593 | #if CALL_ARG_STATS |
| 1594 | compDispCallArgStats(fout); |
| 1595 | #endif |
| 1596 | |
| 1597 | #if COUNT_BASIC_BLOCKS |
| 1598 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1599 | fprintf(fout, "Basic block count frequency table:\n" ); |
| 1600 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1601 | bbCntTable.dump(fout); |
| 1602 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1603 | |
| 1604 | fprintf(fout, "\n" ); |
| 1605 | |
| 1606 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1607 | fprintf(fout, "IL method size frequency table for methods with a single basic block:\n" ); |
| 1608 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1609 | bbOneBBSizeTable.dump(fout); |
| 1610 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1611 | #endif // COUNT_BASIC_BLOCKS |
| 1612 | |
| 1613 | #if COUNT_LOOPS |
| 1614 | |
| 1615 | fprintf(fout, "\n" ); |
| 1616 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1617 | fprintf(fout, "Loop stats\n" ); |
| 1618 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1619 | fprintf(fout, "Total number of methods with loops is %5u\n" , totalLoopMethods); |
| 1620 | fprintf(fout, "Total number of loops is %5u\n" , totalLoopCount); |
| 1621 | fprintf(fout, "Maximum number of loops per method is %5u\n" , maxLoopsPerMethod); |
| 1622 | fprintf(fout, "# of methods overflowing nat loop table is %5u\n" , totalLoopOverflows); |
| 1623 | fprintf(fout, "Total number of 'unnatural' loops is %5u\n" , totalUnnatLoopCount); |
| 1624 | fprintf(fout, "# of methods overflowing unnat loop limit is %5u\n" , totalUnnatLoopOverflows); |
| 1625 | fprintf(fout, "Total number of loops with an iterator is %5u\n" , iterLoopCount); |
| 1626 | fprintf(fout, "Total number of loops with a simple iterator is %5u\n" , simpleTestLoopCount); |
| 1627 | fprintf(fout, "Total number of loops with a constant iterator is %5u\n" , constIterLoopCount); |
| 1628 | |
| 1629 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1630 | fprintf(fout, "Loop count frequency table:\n" ); |
| 1631 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1632 | loopCountTable.dump(fout); |
| 1633 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1634 | fprintf(fout, "Loop exit count frequency table:\n" ); |
| 1635 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1636 | loopExitCountTable.dump(fout); |
| 1637 | fprintf(fout, "--------------------------------------------------\n" ); |
| 1638 | |
| 1639 | #endif // COUNT_LOOPS |
| 1640 | |
| 1641 | #if DATAFLOW_ITER |
| 1642 | |
| 1643 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1644 | fprintf(fout, "Total number of iterations in the CSE dataflow loop is %5u\n" , CSEiterCount); |
| 1645 | fprintf(fout, "Total number of iterations in the CF dataflow loop is %5u\n" , CFiterCount); |
| 1646 | |
| 1647 | #endif // DATAFLOW_ITER |
| 1648 | |
| 1649 | #if MEASURE_NODE_SIZE |
| 1650 | |
| 1651 | fprintf(fout, "\n" ); |
| 1652 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1653 | fprintf(fout, "GenTree node allocation stats\n" ); |
| 1654 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1655 | |
| 1656 | fprintf(fout, "Allocated %6I64u tree nodes (%7I64u bytes total, avg %4I64u bytes per method)\n" , |
| 1657 | genNodeSizeStats.genTreeNodeCnt, genNodeSizeStats.genTreeNodeSize, |
| 1658 | genNodeSizeStats.genTreeNodeSize / genMethodCnt); |
| 1659 | |
| 1660 | fprintf(fout, "Allocated %7I64u bytes of unused tree node space (%3.2f%%)\n" , |
| 1661 | genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize, |
| 1662 | (float)(100 * (genNodeSizeStats.genTreeNodeSize - genNodeSizeStats.genTreeNodeActualSize)) / |
| 1663 | genNodeSizeStats.genTreeNodeSize); |
| 1664 | |
| 1665 | fprintf(fout, "\n" ); |
| 1666 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1667 | fprintf(fout, "Distribution of per-method GenTree node counts:\n" ); |
| 1668 | genTreeNcntHist.dump(fout); |
| 1669 | |
| 1670 | fprintf(fout, "\n" ); |
| 1671 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1672 | fprintf(fout, "Distribution of per-method GenTree node allocations (in bytes):\n" ); |
| 1673 | genTreeNsizHist.dump(fout); |
| 1674 | |
| 1675 | #endif // MEASURE_NODE_SIZE |
| 1676 | |
| 1677 | #if MEASURE_BLOCK_SIZE |
| 1678 | |
| 1679 | fprintf(fout, "\n" ); |
| 1680 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1681 | fprintf(fout, "BasicBlock and flowList/BasicBlockList allocation stats\n" ); |
| 1682 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1683 | |
| 1684 | fprintf(fout, "Allocated %6u basic blocks (%7u bytes total, avg %4u bytes per method)\n" , BasicBlock::s_Count, |
| 1685 | BasicBlock::s_Size, BasicBlock::s_Size / genMethodCnt); |
| 1686 | fprintf(fout, "Allocated %6u flow nodes (%7u bytes total, avg %4u bytes per method)\n" , genFlowNodeCnt, |
| 1687 | genFlowNodeSize, genFlowNodeSize / genMethodCnt); |
| 1688 | |
| 1689 | #endif // MEASURE_BLOCK_SIZE |
| 1690 | |
| 1691 | #if MEASURE_MEM_ALLOC |
| 1692 | |
| 1693 | if (s_dspMemStats) |
| 1694 | { |
| 1695 | fprintf(fout, "\nAll allocations:\n" ); |
| 1696 | ArenaAllocator::dumpAggregateMemStats(jitstdout); |
| 1697 | |
| 1698 | fprintf(fout, "\nLargest method:\n" ); |
| 1699 | ArenaAllocator::dumpMaxMemStats(jitstdout); |
| 1700 | |
| 1701 | fprintf(fout, "\n" ); |
| 1702 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1703 | fprintf(fout, "Distribution of total memory allocated per method (in KB):\n" ); |
| 1704 | memAllocHist.dump(fout); |
| 1705 | |
| 1706 | fprintf(fout, "\n" ); |
| 1707 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1708 | fprintf(fout, "Distribution of total memory used per method (in KB):\n" ); |
| 1709 | memUsedHist.dump(fout); |
| 1710 | } |
| 1711 | |
| 1712 | #endif // MEASURE_MEM_ALLOC |
| 1713 | |
| 1714 | #if LOOP_HOIST_STATS |
| 1715 | #ifdef DEBUG // Always display loop stats in retail |
| 1716 | if (JitConfig.DisplayLoopHoistStats() != 0) |
| 1717 | #endif // DEBUG |
| 1718 | { |
| 1719 | PrintAggregateLoopHoistStats(jitstdout); |
| 1720 | } |
| 1721 | #endif // LOOP_HOIST_STATS |
| 1722 | |
| 1723 | #if MEASURE_PTRTAB_SIZE |
| 1724 | |
| 1725 | fprintf(fout, "\n" ); |
| 1726 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1727 | fprintf(fout, "GC pointer table stats\n" ); |
| 1728 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1729 | |
| 1730 | fprintf(fout, "Reg pointer descriptor size (internal): %8u (avg %4u per method)\n" , GCInfo::s_gcRegPtrDscSize, |
| 1731 | GCInfo::s_gcRegPtrDscSize / genMethodCnt); |
| 1732 | |
| 1733 | fprintf(fout, "Total pointer table size: %8u (avg %4u per method)\n" , GCInfo::s_gcTotalPtrTabSize, |
| 1734 | GCInfo::s_gcTotalPtrTabSize / genMethodCnt); |
| 1735 | |
| 1736 | #endif // MEASURE_PTRTAB_SIZE |
| 1737 | |
| 1738 | #if MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES |
| 1739 | |
| 1740 | if (genMethodCnt != 0) |
| 1741 | { |
| 1742 | fprintf(fout, "\n" ); |
| 1743 | fprintf(fout, "A total of %6u methods compiled" , genMethodCnt); |
| 1744 | #if DISPLAY_SIZES |
| 1745 | if (genMethodICnt || genMethodNCnt) |
| 1746 | { |
| 1747 | fprintf(fout, " (%u interruptible, %u non-interruptible)" , genMethodICnt, genMethodNCnt); |
| 1748 | } |
| 1749 | #endif // DISPLAY_SIZES |
| 1750 | fprintf(fout, ".\n" ); |
| 1751 | } |
| 1752 | |
| 1753 | #endif // MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || MEASURE_PTRTAB_SIZE || DISPLAY_SIZES |
| 1754 | |
| 1755 | #if EMITTER_STATS |
| 1756 | emitterStats(fout); |
| 1757 | #endif |
| 1758 | |
| 1759 | #if MEASURE_FATAL |
| 1760 | fprintf(fout, "\n" ); |
| 1761 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1762 | fprintf(fout, "Fatal errors stats\n" ); |
| 1763 | fprintf(fout, "---------------------------------------------------\n" ); |
| 1764 | fprintf(fout, " badCode: %u\n" , fatal_badCode); |
| 1765 | fprintf(fout, " noWay: %u\n" , fatal_noWay); |
| 1766 | fprintf(fout, " NOMEM: %u\n" , fatal_NOMEM); |
| 1767 | fprintf(fout, " noWayAssertBody: %u\n" , fatal_noWayAssertBody); |
| 1768 | #ifdef DEBUG |
| 1769 | fprintf(fout, " noWayAssertBodyArgs: %u\n" , fatal_noWayAssertBodyArgs); |
| 1770 | #endif // DEBUG |
| 1771 | fprintf(fout, " NYI: %u\n" , fatal_NYI); |
| 1772 | #endif // MEASURE_FATAL |
| 1773 | } |
| 1774 | |
| 1775 | /***************************************************************************** |
| 1776 | * Display static data structure sizes. |
| 1777 | */ |
| 1778 | |
| 1779 | /* static */ |
| 1780 | void Compiler::compDisplayStaticSizes(FILE* fout) |
| 1781 | { |
| 1782 | |
| 1783 | #if MEASURE_NODE_SIZE |
| 1784 | GenTree::DumpNodeSizes(fout); |
| 1785 | #endif |
| 1786 | |
| 1787 | #if MEASURE_BLOCK_SIZE |
| 1788 | |
| 1789 | BasicBlock* bbDummy = nullptr; |
| 1790 | |
| 1791 | fprintf(fout, "\n" ); |
| 1792 | fprintf(fout, "Offset / size of bbNext = %3u / %3u\n" , offsetof(BasicBlock, bbNext), |
| 1793 | sizeof(bbDummy->bbNext)); |
| 1794 | fprintf(fout, "Offset / size of bbNum = %3u / %3u\n" , offsetof(BasicBlock, bbNum), |
| 1795 | sizeof(bbDummy->bbNum)); |
| 1796 | fprintf(fout, "Offset / size of bbPostOrderNum = %3u / %3u\n" , offsetof(BasicBlock, bbPostOrderNum), |
| 1797 | sizeof(bbDummy->bbPostOrderNum)); |
| 1798 | fprintf(fout, "Offset / size of bbRefs = %3u / %3u\n" , offsetof(BasicBlock, bbRefs), |
| 1799 | sizeof(bbDummy->bbRefs)); |
| 1800 | fprintf(fout, "Offset / size of bbFlags = %3u / %3u\n" , offsetof(BasicBlock, bbFlags), |
| 1801 | sizeof(bbDummy->bbFlags)); |
| 1802 | fprintf(fout, "Offset / size of bbWeight = %3u / %3u\n" , offsetof(BasicBlock, bbWeight), |
| 1803 | sizeof(bbDummy->bbWeight)); |
| 1804 | fprintf(fout, "Offset / size of bbJumpKind = %3u / %3u\n" , offsetof(BasicBlock, bbJumpKind), |
| 1805 | sizeof(bbDummy->bbJumpKind)); |
| 1806 | fprintf(fout, "Offset / size of bbJumpOffs = %3u / %3u\n" , offsetof(BasicBlock, bbJumpOffs), |
| 1807 | sizeof(bbDummy->bbJumpOffs)); |
| 1808 | fprintf(fout, "Offset / size of bbJumpDest = %3u / %3u\n" , offsetof(BasicBlock, bbJumpDest), |
| 1809 | sizeof(bbDummy->bbJumpDest)); |
| 1810 | fprintf(fout, "Offset / size of bbJumpSwt = %3u / %3u\n" , offsetof(BasicBlock, bbJumpSwt), |
| 1811 | sizeof(bbDummy->bbJumpSwt)); |
| 1812 | fprintf(fout, "Offset / size of bbEntryState = %3u / %3u\n" , offsetof(BasicBlock, bbEntryState), |
| 1813 | sizeof(bbDummy->bbEntryState)); |
| 1814 | fprintf(fout, "Offset / size of bbStkTempsIn = %3u / %3u\n" , offsetof(BasicBlock, bbStkTempsIn), |
| 1815 | sizeof(bbDummy->bbStkTempsIn)); |
| 1816 | fprintf(fout, "Offset / size of bbStkTempsOut = %3u / %3u\n" , offsetof(BasicBlock, bbStkTempsOut), |
| 1817 | sizeof(bbDummy->bbStkTempsOut)); |
| 1818 | fprintf(fout, "Offset / size of bbTryIndex = %3u / %3u\n" , offsetof(BasicBlock, bbTryIndex), |
| 1819 | sizeof(bbDummy->bbTryIndex)); |
| 1820 | fprintf(fout, "Offset / size of bbHndIndex = %3u / %3u\n" , offsetof(BasicBlock, bbHndIndex), |
| 1821 | sizeof(bbDummy->bbHndIndex)); |
| 1822 | fprintf(fout, "Offset / size of bbCatchTyp = %3u / %3u\n" , offsetof(BasicBlock, bbCatchTyp), |
| 1823 | sizeof(bbDummy->bbCatchTyp)); |
| 1824 | fprintf(fout, "Offset / size of bbStkDepth = %3u / %3u\n" , offsetof(BasicBlock, bbStkDepth), |
| 1825 | sizeof(bbDummy->bbStkDepth)); |
| 1826 | fprintf(fout, "Offset / size of bbFPinVars = %3u / %3u\n" , offsetof(BasicBlock, bbFPinVars), |
| 1827 | sizeof(bbDummy->bbFPinVars)); |
| 1828 | fprintf(fout, "Offset / size of bbPreds = %3u / %3u\n" , offsetof(BasicBlock, bbPreds), |
| 1829 | sizeof(bbDummy->bbPreds)); |
| 1830 | fprintf(fout, "Offset / size of bbReach = %3u / %3u\n" , offsetof(BasicBlock, bbReach), |
| 1831 | sizeof(bbDummy->bbReach)); |
| 1832 | fprintf(fout, "Offset / size of bbIDom = %3u / %3u\n" , offsetof(BasicBlock, bbIDom), |
| 1833 | sizeof(bbDummy->bbIDom)); |
| 1834 | fprintf(fout, "Offset / size of bbDfsNum = %3u / %3u\n" , offsetof(BasicBlock, bbDfsNum), |
| 1835 | sizeof(bbDummy->bbDfsNum)); |
| 1836 | fprintf(fout, "Offset / size of bbCodeOffs = %3u / %3u\n" , offsetof(BasicBlock, bbCodeOffs), |
| 1837 | sizeof(bbDummy->bbCodeOffs)); |
| 1838 | fprintf(fout, "Offset / size of bbCodeOffsEnd = %3u / %3u\n" , offsetof(BasicBlock, bbCodeOffsEnd), |
| 1839 | sizeof(bbDummy->bbCodeOffsEnd)); |
| 1840 | fprintf(fout, "Offset / size of bbVarUse = %3u / %3u\n" , offsetof(BasicBlock, bbVarUse), |
| 1841 | sizeof(bbDummy->bbVarUse)); |
| 1842 | fprintf(fout, "Offset / size of bbVarDef = %3u / %3u\n" , offsetof(BasicBlock, bbVarDef), |
| 1843 | sizeof(bbDummy->bbVarDef)); |
| 1844 | fprintf(fout, "Offset / size of bbLiveIn = %3u / %3u\n" , offsetof(BasicBlock, bbLiveIn), |
| 1845 | sizeof(bbDummy->bbLiveIn)); |
| 1846 | fprintf(fout, "Offset / size of bbLiveOut = %3u / %3u\n" , offsetof(BasicBlock, bbLiveOut), |
| 1847 | sizeof(bbDummy->bbLiveOut)); |
| 1848 | fprintf(fout, "Offset / size of bbMemorySsaPhiFunc = %3u / %3u\n" , offsetof(BasicBlock, bbMemorySsaPhiFunc), |
| 1849 | sizeof(bbDummy->bbMemorySsaPhiFunc)); |
| 1850 | fprintf(fout, "Offset / size of bbMemorySsaNumIn = %3u / %3u\n" , offsetof(BasicBlock, bbMemorySsaNumIn), |
| 1851 | sizeof(bbDummy->bbMemorySsaNumIn)); |
| 1852 | fprintf(fout, "Offset / size of bbMemorySsaNumOut = %3u / %3u\n" , offsetof(BasicBlock, bbMemorySsaNumOut), |
| 1853 | sizeof(bbDummy->bbMemorySsaNumOut)); |
| 1854 | fprintf(fout, "Offset / size of bbScope = %3u / %3u\n" , offsetof(BasicBlock, bbScope), |
| 1855 | sizeof(bbDummy->bbScope)); |
| 1856 | fprintf(fout, "Offset / size of bbCseGen = %3u / %3u\n" , offsetof(BasicBlock, bbCseGen), |
| 1857 | sizeof(bbDummy->bbCseGen)); |
| 1858 | fprintf(fout, "Offset / size of bbCseIn = %3u / %3u\n" , offsetof(BasicBlock, bbCseIn), |
| 1859 | sizeof(bbDummy->bbCseIn)); |
| 1860 | fprintf(fout, "Offset / size of bbCseOut = %3u / %3u\n" , offsetof(BasicBlock, bbCseOut), |
| 1861 | sizeof(bbDummy->bbCseOut)); |
| 1862 | |
| 1863 | fprintf(fout, "Offset / size of bbEmitCookie = %3u / %3u\n" , offsetof(BasicBlock, bbEmitCookie), |
| 1864 | sizeof(bbDummy->bbEmitCookie)); |
| 1865 | |
| 1866 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 1867 | fprintf(fout, "Offset / size of bbUnwindNopEmitCookie = %3u / %3u\n" , offsetof(BasicBlock, bbUnwindNopEmitCookie), |
| 1868 | sizeof(bbDummy->bbUnwindNopEmitCookie)); |
| 1869 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 1870 | |
| 1871 | #ifdef VERIFIER |
| 1872 | fprintf(fout, "Offset / size of bbStackIn = %3u / %3u\n" , offsetof(BasicBlock, bbStackIn), |
| 1873 | sizeof(bbDummy->bbStackIn)); |
| 1874 | fprintf(fout, "Offset / size of bbStackOut = %3u / %3u\n" , offsetof(BasicBlock, bbStackOut), |
| 1875 | sizeof(bbDummy->bbStackOut)); |
| 1876 | fprintf(fout, "Offset / size of bbTypesIn = %3u / %3u\n" , offsetof(BasicBlock, bbTypesIn), |
| 1877 | sizeof(bbDummy->bbTypesIn)); |
| 1878 | fprintf(fout, "Offset / size of bbTypesOut = %3u / %3u\n" , offsetof(BasicBlock, bbTypesOut), |
| 1879 | sizeof(bbDummy->bbTypesOut)); |
| 1880 | #endif // VERIFIER |
| 1881 | |
| 1882 | #ifdef DEBUG |
| 1883 | fprintf(fout, "Offset / size of bbLoopNum = %3u / %3u\n" , offsetof(BasicBlock, bbLoopNum), |
| 1884 | sizeof(bbDummy->bbLoopNum)); |
| 1885 | #endif // DEBUG |
| 1886 | |
| 1887 | fprintf(fout, "\n" ); |
| 1888 | fprintf(fout, "Size of BasicBlock = %3u\n" , sizeof(BasicBlock)); |
| 1889 | |
| 1890 | #endif // MEASURE_BLOCK_SIZE |
| 1891 | |
| 1892 | #if EMITTER_STATS |
| 1893 | emitterStaticStats(fout); |
| 1894 | #endif |
| 1895 | } |
| 1896 | |
| 1897 | /***************************************************************************** |
| 1898 | * |
| 1899 | * Constructor |
| 1900 | */ |
| 1901 | |
| 1902 | void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo) |
| 1903 | { |
| 1904 | assert(pAlloc); |
| 1905 | compArenaAllocator = pAlloc; |
| 1906 | |
| 1907 | // Inlinee Compile object will only be allocated when needed for the 1st time. |
| 1908 | InlineeCompiler = nullptr; |
| 1909 | |
| 1910 | // Set the inline info. |
| 1911 | impInlineInfo = inlineInfo; |
| 1912 | |
| 1913 | eeInfoInitialized = false; |
| 1914 | |
| 1915 | compDoAggressiveInlining = false; |
| 1916 | |
| 1917 | if (compIsForInlining()) |
| 1918 | { |
| 1919 | m_inlineStrategy = nullptr; |
| 1920 | compInlineResult = inlineInfo->inlineResult; |
| 1921 | } |
| 1922 | else |
| 1923 | { |
| 1924 | m_inlineStrategy = new (this, CMK_Inlining) InlineStrategy(this); |
| 1925 | compInlineResult = nullptr; |
| 1926 | } |
| 1927 | |
| 1928 | #ifdef FEATURE_TRACELOGGING |
| 1929 | // Make sure JIT telemetry is initialized as soon as allocations can be made |
| 1930 | // but no later than a point where noway_asserts can be thrown. |
| 1931 | // 1. JIT telemetry could allocate some objects internally. |
| 1932 | // 2. NowayAsserts are tracked through telemetry. |
| 1933 | // Note: JIT telemetry could gather data when compiler is not fully initialized. |
| 1934 | // So you have to initialize the compiler variables you use for telemetry. |
| 1935 | assert((unsigned)PHASE_PRE_IMPORT == 0); |
| 1936 | previousCompletedPhase = PHASE_PRE_IMPORT; |
| 1937 | info.compILCodeSize = 0; |
| 1938 | info.compMethodHnd = nullptr; |
| 1939 | compJitTelemetry.Initialize(this); |
| 1940 | #endif |
| 1941 | |
| 1942 | #ifdef DEBUG |
| 1943 | bRangeAllowStress = false; |
| 1944 | #endif |
| 1945 | |
| 1946 | fgInit(); |
| 1947 | lvaInit(); |
| 1948 | |
| 1949 | if (!compIsForInlining()) |
| 1950 | { |
| 1951 | codeGen = getCodeGenerator(this); |
| 1952 | optInit(); |
| 1953 | hashBv::Init(this); |
| 1954 | |
| 1955 | compVarScopeMap = nullptr; |
| 1956 | |
| 1957 | // If this method were a real constructor for Compiler, these would |
| 1958 | // become method initializations. |
| 1959 | impPendingBlockMembers = JitExpandArray<BYTE>(getAllocator()); |
| 1960 | impSpillCliquePredMembers = JitExpandArray<BYTE>(getAllocator()); |
| 1961 | impSpillCliqueSuccMembers = JitExpandArray<BYTE>(getAllocator()); |
| 1962 | |
| 1963 | lvMemoryPerSsaData = SsaDefArray<SsaMemDef>(); |
| 1964 | |
| 1965 | // |
| 1966 | // Initialize all the per-method statistics gathering data structures. |
| 1967 | // |
| 1968 | |
| 1969 | optLoopsCloned = 0; |
| 1970 | |
| 1971 | #if LOOP_HOIST_STATS |
| 1972 | m_loopsConsidered = 0; |
| 1973 | m_curLoopHasHoistedExpression = false; |
| 1974 | m_loopsWithHoistedExpressions = 0; |
| 1975 | m_totalHoistedExpressions = 0; |
| 1976 | #endif // LOOP_HOIST_STATS |
| 1977 | #if MEASURE_NODE_SIZE |
| 1978 | genNodeSizeStatsPerFunc.Init(); |
| 1979 | #endif // MEASURE_NODE_SIZE |
| 1980 | } |
| 1981 | else |
| 1982 | { |
| 1983 | codeGen = nullptr; |
| 1984 | } |
| 1985 | |
| 1986 | compJmpOpUsed = false; |
| 1987 | compLongUsed = false; |
| 1988 | compTailCallUsed = false; |
| 1989 | compLocallocUsed = false; |
| 1990 | compLocallocOptimized = false; |
| 1991 | compQmarkRationalized = false; |
| 1992 | compQmarkUsed = false; |
| 1993 | compFloatingPointUsed = false; |
| 1994 | compUnsafeCastUsed = false; |
| 1995 | |
| 1996 | compNeedsGSSecurityCookie = false; |
| 1997 | compGSReorderStackLayout = false; |
| 1998 | #if STACK_PROBES |
| 1999 | compStackProbePrologDone = false; |
| 2000 | #endif |
| 2001 | |
| 2002 | compGeneratingProlog = false; |
| 2003 | compGeneratingEpilog = false; |
| 2004 | |
| 2005 | compLSRADone = false; |
| 2006 | compRationalIRForm = false; |
| 2007 | |
| 2008 | #ifdef DEBUG |
| 2009 | compCodeGenDone = false; |
| 2010 | compRegSetCheckLevel = 0; |
| 2011 | opts.compMinOptsIsUsed = false; |
| 2012 | #endif |
| 2013 | opts.compMinOptsIsSet = false; |
| 2014 | |
| 2015 | // Used by fgFindJumpTargets for inlining heuristics. |
| 2016 | opts.instrCount = 0; |
| 2017 | |
| 2018 | // Used to track when we should consider running EarlyProp |
| 2019 | optMethodFlags = 0; |
| 2020 | |
| 2021 | #ifdef DEBUG |
| 2022 | m_nodeTestData = nullptr; |
| 2023 | m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS; |
| 2024 | #endif |
| 2025 | m_switchDescMap = nullptr; |
| 2026 | m_blockToEHPreds = nullptr; |
| 2027 | m_fieldSeqStore = nullptr; |
| 2028 | m_zeroOffsetFieldMap = nullptr; |
| 2029 | m_arrayInfoMap = nullptr; |
| 2030 | m_refAnyClass = nullptr; |
| 2031 | for (MemoryKind memoryKind : allMemoryKinds()) |
| 2032 | { |
| 2033 | m_memorySsaMap[memoryKind] = nullptr; |
| 2034 | } |
| 2035 | |
| 2036 | #ifdef DEBUG |
| 2037 | if (!compIsForInlining()) |
| 2038 | { |
| 2039 | compDoComponentUnitTestsOnce(); |
| 2040 | } |
| 2041 | #endif // DEBUG |
| 2042 | |
| 2043 | vnStore = nullptr; |
| 2044 | m_opAsgnVarDefSsaNums = nullptr; |
| 2045 | fgSsaPassesCompleted = 0; |
| 2046 | fgVNPassesCompleted = 0; |
| 2047 | |
| 2048 | // check that HelperCallProperties are initialized |
| 2049 | |
| 2050 | assert(s_helperCallProperties.IsPure(CORINFO_HELP_GETSHARED_GCSTATIC_BASE)); |
| 2051 | assert(!s_helperCallProperties.IsPure(CORINFO_HELP_GETFIELDOBJ)); // quick sanity check |
| 2052 | |
| 2053 | // We start with the flow graph in tree-order |
| 2054 | fgOrder = FGOrderTree; |
| 2055 | |
| 2056 | #ifdef FEATURE_SIMD |
| 2057 | m_simdHandleCache = nullptr; |
| 2058 | #endif // FEATURE_SIMD |
| 2059 | |
| 2060 | compUsesThrowHelper = false; |
| 2061 | } |
| 2062 | |
| 2063 | /***************************************************************************** |
| 2064 | * |
| 2065 | * Destructor |
| 2066 | */ |
| 2067 | |
| 2068 | void Compiler::compDone() |
| 2069 | { |
| 2070 | } |
| 2071 | |
| 2072 | void* Compiler::compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ |
| 2073 | void** ppIndirection) /* OUT */ |
| 2074 | { |
| 2075 | void* addr; |
| 2076 | |
| 2077 | if (info.compMatchedVM) |
| 2078 | { |
| 2079 | addr = info.compCompHnd->getHelperFtn(ftnNum, ppIndirection); |
| 2080 | } |
| 2081 | else |
| 2082 | { |
| 2083 | // If we don't have a matched VM, we won't get valid results when asking for a helper function. |
| 2084 | addr = UlongToPtr(0xCA11CA11); // "callcall" |
| 2085 | } |
| 2086 | |
| 2087 | return addr; |
| 2088 | } |
| 2089 | |
| 2090 | unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd) |
| 2091 | { |
| 2092 | var_types sigType = genActualType(JITtype2varType(cit)); |
| 2093 | unsigned sigSize; |
| 2094 | sigSize = genTypeSize(sigType); |
| 2095 | if (cit == CORINFO_TYPE_VALUECLASS) |
| 2096 | { |
| 2097 | sigSize = info.compCompHnd->getClassSize(clsHnd); |
| 2098 | } |
| 2099 | else if (cit == CORINFO_TYPE_REFANY) |
| 2100 | { |
| 2101 | sigSize = 2 * TARGET_POINTER_SIZE; |
| 2102 | } |
| 2103 | return sigSize; |
| 2104 | } |
| 2105 | |
| 2106 | #ifdef DEBUG |
| 2107 | static bool DidComponentUnitTests = false; |
| 2108 | |
| 2109 | void Compiler::compDoComponentUnitTestsOnce() |
| 2110 | { |
| 2111 | if (!JitConfig.RunComponentUnitTests()) |
| 2112 | { |
| 2113 | return; |
| 2114 | } |
| 2115 | |
| 2116 | if (!DidComponentUnitTests) |
| 2117 | { |
| 2118 | DidComponentUnitTests = true; |
| 2119 | ValueNumStore::RunTests(this); |
| 2120 | BitSetSupport::TestSuite(getAllocatorDebugOnly()); |
| 2121 | } |
| 2122 | } |
| 2123 | |
| 2124 | //------------------------------------------------------------------------ |
| 2125 | // compGetJitDefaultFill: |
| 2126 | // |
| 2127 | // Return Value: |
| 2128 | // An unsigned char value used to initizalize memory allocated by the JIT. |
| 2129 | // The default value is taken from COMPLUS_JitDefaultFill, if is not set |
| 2130 | // the value will be 0xdd. When JitStress is active a random value based |
| 2131 | // on the method hash is used. |
| 2132 | // |
| 2133 | // Notes: |
| 2134 | // Note that we can't use small values like zero, because we have some |
| 2135 | // asserts that can fire for such values. |
| 2136 | // |
| 2137 | unsigned char Compiler::compGetJitDefaultFill() |
| 2138 | { |
| 2139 | unsigned char defaultFill = (unsigned char)JitConfig.JitDefaultFill(); |
| 2140 | |
| 2141 | if ((this != nullptr) && (compStressCompile(STRESS_GENERIC_VARN, 50))) |
| 2142 | { |
| 2143 | unsigned temp; |
| 2144 | temp = info.compMethodHash(); |
| 2145 | temp = (temp >> 16) ^ temp; |
| 2146 | temp = (temp >> 8) ^ temp; |
| 2147 | temp = temp & 0xff; |
| 2148 | // asserts like this: assert(!IsUninitialized(stkLvl)); |
| 2149 | // mean that small values for defaultFill are problematic |
| 2150 | // so we make the value larger in that case. |
| 2151 | if (temp < 0x20) |
| 2152 | { |
| 2153 | temp |= 0x80; |
| 2154 | } |
| 2155 | defaultFill = (unsigned char)temp; |
| 2156 | } |
| 2157 | |
| 2158 | return defaultFill; |
| 2159 | } |
| 2160 | |
| 2161 | #endif // DEBUG |
| 2162 | |
| 2163 | /*****************************************************************************/ |
| 2164 | #ifdef DEBUG |
| 2165 | /*****************************************************************************/ |
| 2166 | |
| 2167 | VarName Compiler::compVarName(regNumber reg, bool isFloatReg) |
| 2168 | { |
| 2169 | if (isFloatReg) |
| 2170 | { |
| 2171 | assert(genIsValidFloatReg(reg)); |
| 2172 | } |
| 2173 | else |
| 2174 | { |
| 2175 | assert(genIsValidReg(reg)); |
| 2176 | } |
| 2177 | |
| 2178 | if ((info.compVarScopesCount > 0) && compCurBB && opts.varNames) |
| 2179 | { |
| 2180 | unsigned lclNum; |
| 2181 | LclVarDsc* varDsc; |
| 2182 | |
| 2183 | /* Look for the matching register */ |
| 2184 | for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) |
| 2185 | { |
| 2186 | /* If the variable is not in a register, or not in the register we're looking for, quit. */ |
| 2187 | /* Also, if it is a compiler generated variable (i.e. slot# > info.compVarScopesCount), don't bother. */ |
| 2188 | if ((varDsc->lvRegister != 0) && (varDsc->lvRegNum == reg) && (varDsc->IsFloatRegType() || !isFloatReg) && |
| 2189 | (varDsc->lvSlotNum < info.compVarScopesCount)) |
| 2190 | { |
| 2191 | /* check if variable in that register is live */ |
| 2192 | if (VarSetOps::IsMember(this, compCurLife, varDsc->lvVarIndex)) |
| 2193 | { |
| 2194 | /* variable is live - find the corresponding slot */ |
| 2195 | VarScopeDsc* varScope = |
| 2196 | compFindLocalVar(varDsc->lvSlotNum, compCurBB->bbCodeOffs, compCurBB->bbCodeOffsEnd); |
| 2197 | if (varScope) |
| 2198 | { |
| 2199 | return varScope->vsdName; |
| 2200 | } |
| 2201 | } |
| 2202 | } |
| 2203 | } |
| 2204 | } |
| 2205 | |
| 2206 | return nullptr; |
| 2207 | } |
| 2208 | |
| 2209 | const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloatReg) |
| 2210 | { |
| 2211 | |
| 2212 | #ifdef _TARGET_ARM_ |
| 2213 | isFloatReg = genIsValidFloatReg(reg); |
| 2214 | #endif |
| 2215 | |
| 2216 | if (displayVar && (reg != REG_NA)) |
| 2217 | { |
| 2218 | VarName varName = compVarName(reg, isFloatReg); |
| 2219 | |
| 2220 | if (varName) |
| 2221 | { |
| 2222 | const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1; |
| 2223 | static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 |
| 2224 | // consecutive calls before printing |
| 2225 | static int index = 0; // for circular index into the name array |
| 2226 | |
| 2227 | index = (index + 1) % 2; // circular reuse of index |
| 2228 | sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "%s'%s'" , getRegName(reg, isFloatReg), |
| 2229 | VarNameToStr(varName)); |
| 2230 | |
| 2231 | return nameVarReg[index]; |
| 2232 | } |
| 2233 | } |
| 2234 | |
| 2235 | /* no debug info required or no variable in that register |
| 2236 | -> return standard name */ |
| 2237 | |
| 2238 | return getRegName(reg, isFloatReg); |
| 2239 | } |
| 2240 | |
| 2241 | const char* Compiler::compRegNameForSize(regNumber reg, size_t size) |
| 2242 | { |
| 2243 | if (size == 0 || size >= 4) |
| 2244 | { |
| 2245 | return compRegVarName(reg, true); |
| 2246 | } |
| 2247 | |
| 2248 | // clang-format off |
| 2249 | static |
| 2250 | const char * sizeNames[][2] = |
| 2251 | { |
| 2252 | { "al" , "ax" }, |
| 2253 | { "cl" , "cx" }, |
| 2254 | { "dl" , "dx" }, |
| 2255 | { "bl" , "bx" }, |
| 2256 | #ifdef _TARGET_AMD64_ |
| 2257 | { "spl" , "sp" }, // ESP |
| 2258 | { "bpl" , "bp" }, // EBP |
| 2259 | { "sil" , "si" }, // ESI |
| 2260 | { "dil" , "di" }, // EDI |
| 2261 | { "r8b" , "r8w" }, |
| 2262 | { "r9b" , "r9w" }, |
| 2263 | { "r10b" , "r10w" }, |
| 2264 | { "r11b" , "r11w" }, |
| 2265 | { "r12b" , "r12w" }, |
| 2266 | { "r13b" , "r13w" }, |
| 2267 | { "r14b" , "r14w" }, |
| 2268 | { "r15b" , "r15w" }, |
| 2269 | #endif // _TARGET_AMD64_ |
| 2270 | }; |
| 2271 | // clang-format on |
| 2272 | |
| 2273 | assert(isByteReg(reg)); |
| 2274 | assert(genRegMask(reg) & RBM_BYTE_REGS); |
| 2275 | assert(size == 1 || size == 2); |
| 2276 | |
| 2277 | return sizeNames[reg][size - 1]; |
| 2278 | } |
| 2279 | |
| 2280 | const char* Compiler::compFPregVarName(unsigned fpReg, bool displayVar) |
| 2281 | { |
| 2282 | const int NAME_VAR_REG_BUFFER_LEN = 4 + 256 + 1; |
| 2283 | static char nameVarReg[2][NAME_VAR_REG_BUFFER_LEN]; // to avoid overwriting the buffer when have 2 consecutive calls |
| 2284 | // before printing |
| 2285 | static int index = 0; // for circular index into the name array |
| 2286 | |
| 2287 | index = (index + 1) % 2; // circular reuse of index |
| 2288 | |
| 2289 | /* no debug info required or no variable in that register |
| 2290 | -> return standard name */ |
| 2291 | |
| 2292 | sprintf_s(nameVarReg[index], NAME_VAR_REG_BUFFER_LEN, "ST(%d)" , fpReg); |
| 2293 | return nameVarReg[index]; |
| 2294 | } |
| 2295 | |
| 2296 | const char* Compiler::compLocalVarName(unsigned varNum, unsigned offs) |
| 2297 | { |
| 2298 | unsigned i; |
| 2299 | VarScopeDsc* t; |
| 2300 | |
| 2301 | for (i = 0, t = info.compVarScopes; i < info.compVarScopesCount; i++, t++) |
| 2302 | { |
| 2303 | if (t->vsdVarNum != varNum) |
| 2304 | { |
| 2305 | continue; |
| 2306 | } |
| 2307 | |
| 2308 | if (offs >= t->vsdLifeBeg && offs < t->vsdLifeEnd) |
| 2309 | { |
| 2310 | return VarNameToStr(t->vsdName); |
| 2311 | } |
| 2312 | } |
| 2313 | |
| 2314 | return nullptr; |
| 2315 | } |
| 2316 | |
| 2317 | /*****************************************************************************/ |
| 2318 | #endif // DEBUG |
| 2319 | /*****************************************************************************/ |
| 2320 | |
| 2321 | void Compiler::compSetProcessor() |
| 2322 | { |
| 2323 | // |
| 2324 | // NOTE: This function needs to be kept in sync with EEJitManager::SetCpuInfo() in vm\codemap.cpp |
| 2325 | // |
| 2326 | |
| 2327 | const JitFlags& jitFlags = *opts.jitFlags; |
| 2328 | |
| 2329 | #if defined(_TARGET_ARM_) |
| 2330 | info.genCPU = CPU_ARM; |
| 2331 | #elif defined(_TARGET_ARM64_) |
| 2332 | info.genCPU = CPU_ARM64; |
| 2333 | #elif defined(_TARGET_AMD64_) |
| 2334 | info.genCPU = CPU_X64; |
| 2335 | #elif defined(_TARGET_X86_) |
| 2336 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4)) |
| 2337 | info.genCPU = CPU_X86_PENTIUM_4; |
| 2338 | else |
| 2339 | info.genCPU = CPU_X86; |
| 2340 | #endif |
| 2341 | |
| 2342 | // |
| 2343 | // Processor specific optimizations |
| 2344 | // |
| 2345 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 2346 | |
| 2347 | #ifdef _TARGET_AMD64_ |
| 2348 | opts.compUseFCOMI = false; |
| 2349 | opts.compUseCMOV = true; |
| 2350 | #elif defined(_TARGET_X86_) |
| 2351 | opts.compUseFCOMI = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_FCOMI); |
| 2352 | opts.compUseCMOV = jitFlags.IsSet(JitFlags::JIT_FLAG_USE_CMOV); |
| 2353 | |
| 2354 | #ifdef DEBUG |
| 2355 | if (opts.compUseFCOMI) |
| 2356 | opts.compUseFCOMI = !compStressCompile(STRESS_USE_FCOMI, 50); |
| 2357 | if (opts.compUseCMOV) |
| 2358 | opts.compUseCMOV = !compStressCompile(STRESS_USE_CMOV, 50); |
| 2359 | #endif // DEBUG |
| 2360 | |
| 2361 | #endif // _TARGET_X86_ |
| 2362 | |
| 2363 | // Instruction set flags for Intel hardware intrinsics |
| 2364 | #ifdef _TARGET_XARCH_ |
| 2365 | opts.compSupportsISA = 0; |
| 2366 | |
| 2367 | if (!jitFlags.IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 2368 | { |
| 2369 | #ifdef FEATURE_CORECLR |
| 2370 | if (JitConfig.EnableHWIntrinsic()) |
| 2371 | { |
| 2372 | opts.setSupportedISA(InstructionSet_Base); |
| 2373 | |
| 2374 | if (JitConfig.EnableSSE()) |
| 2375 | { |
| 2376 | opts.setSupportedISA(InstructionSet_SSE); |
| 2377 | #ifdef _TARGET_AMD64_ |
| 2378 | opts.setSupportedISA(InstructionSet_SSE_X64); |
| 2379 | #endif // _TARGET_AMD64_ |
| 2380 | |
| 2381 | if (JitConfig.EnableSSE2()) |
| 2382 | { |
| 2383 | opts.setSupportedISA(InstructionSet_SSE2); |
| 2384 | #ifdef _TARGET_AMD64_ |
| 2385 | opts.setSupportedISA(InstructionSet_SSE2_X64); |
| 2386 | #endif // _TARGET_AMD64_ |
| 2387 | |
| 2388 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AES) && JitConfig.EnableAES()) |
| 2389 | { |
| 2390 | opts.setSupportedISA(InstructionSet_AES); |
| 2391 | } |
| 2392 | |
| 2393 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_PCLMULQDQ) && JitConfig.EnablePCLMULQDQ()) |
| 2394 | { |
| 2395 | opts.setSupportedISA(InstructionSet_PCLMULQDQ); |
| 2396 | } |
| 2397 | |
| 2398 | // We need to additionaly check that COMPlus_EnableSSE3_4 is set, as that |
| 2399 | // is a prexisting config flag that controls the SSE3+ ISAs |
| 2400 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE3) && JitConfig.EnableSSE3() && |
| 2401 | JitConfig.EnableSSE3_4()) |
| 2402 | { |
| 2403 | opts.setSupportedISA(InstructionSet_SSE3); |
| 2404 | |
| 2405 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSSE3) && JitConfig.EnableSSSE3()) |
| 2406 | { |
| 2407 | opts.setSupportedISA(InstructionSet_SSSE3); |
| 2408 | |
| 2409 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE41) && JitConfig.EnableSSE41()) |
| 2410 | { |
| 2411 | opts.setSupportedISA(InstructionSet_SSE41); |
| 2412 | #ifdef _TARGET_AMD64_ |
| 2413 | opts.setSupportedISA(InstructionSet_SSE41_X64); |
| 2414 | #endif // _TARGET_AMD64_ |
| 2415 | |
| 2416 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_SSE42) && JitConfig.EnableSSE42()) |
| 2417 | { |
| 2418 | opts.setSupportedISA(InstructionSet_SSE42); |
| 2419 | #ifdef _TARGET_AMD64_ |
| 2420 | opts.setSupportedISA(InstructionSet_SSE42_X64); |
| 2421 | #endif // _TARGET_AMD64_ |
| 2422 | |
| 2423 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_POPCNT) && JitConfig.EnablePOPCNT()) |
| 2424 | { |
| 2425 | opts.setSupportedISA(InstructionSet_POPCNT); |
| 2426 | #ifdef _TARGET_AMD64_ |
| 2427 | opts.setSupportedISA(InstructionSet_POPCNT_X64); |
| 2428 | #endif // _TARGET_AMD64_ |
| 2429 | } |
| 2430 | |
| 2431 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX) && JitConfig.EnableAVX()) |
| 2432 | { |
| 2433 | opts.setSupportedISA(InstructionSet_AVX); |
| 2434 | |
| 2435 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_FMA) && JitConfig.EnableFMA()) |
| 2436 | { |
| 2437 | opts.setSupportedISA(InstructionSet_FMA); |
| 2438 | } |
| 2439 | |
| 2440 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX2) && JitConfig.EnableAVX2()) |
| 2441 | { |
| 2442 | opts.setSupportedISA(InstructionSet_AVX2); |
| 2443 | } |
| 2444 | } |
| 2445 | } |
| 2446 | } |
| 2447 | } |
| 2448 | } |
| 2449 | } |
| 2450 | } |
| 2451 | |
| 2452 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_LZCNT) && JitConfig.EnableLZCNT()) |
| 2453 | { |
| 2454 | opts.setSupportedISA(InstructionSet_LZCNT); |
| 2455 | #ifdef _TARGET_AMD64_ |
| 2456 | opts.setSupportedISA(InstructionSet_LZCNT_X64); |
| 2457 | #endif // _TARGET_AMD64_ |
| 2458 | } |
| 2459 | |
| 2460 | // We currently need to also check that AVX is supported as that controls the support for the VEX encoding |
| 2461 | // in the emitter. |
| 2462 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI1) && JitConfig.EnableBMI1() && |
| 2463 | compSupports(InstructionSet_AVX)) |
| 2464 | { |
| 2465 | opts.setSupportedISA(InstructionSet_BMI1); |
| 2466 | #ifdef _TARGET_AMD64_ |
| 2467 | opts.setSupportedISA(InstructionSet_BMI1_X64); |
| 2468 | #endif // _TARGET_AMD64_ |
| 2469 | } |
| 2470 | |
| 2471 | // We currently need to also check that AVX is supported as that controls the support for the VEX encoding |
| 2472 | // in the emitter. |
| 2473 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_BMI2) && JitConfig.EnableBMI2() && |
| 2474 | compSupports(InstructionSet_AVX)) |
| 2475 | { |
| 2476 | opts.setSupportedISA(InstructionSet_BMI2); |
| 2477 | #ifdef _TARGET_AMD64_ |
| 2478 | opts.setSupportedISA(InstructionSet_BMI2_X64); |
| 2479 | #endif // _TARGET_AMD64_ |
| 2480 | } |
| 2481 | } |
| 2482 | #else // !FEATURE_CORECLR |
| 2483 | // If this is not FEATURE_CORECLR, the only flags supported by the VM are AVX and AVX2. |
| 2484 | // Furthermore, the only two configurations supported by the desktop JIT are SSE2 and AVX2, |
| 2485 | // so if the latter is set, we also check all the in-between options. |
| 2486 | // Note that the EnableSSE2 and EnableSSE flags are only checked by HW Intrinsic code, |
| 2487 | // so the System.Numerics.Vector support doesn't depend on those flags. |
| 2488 | // However, if any of these are disabled, we will not enable AVX2. |
| 2489 | // |
| 2490 | if (jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX) && jitFlags.IsSet(JitFlags::JIT_FLAG_USE_AVX2) && |
| 2491 | (JitConfig.EnableAVX2() != 0) && (JitConfig.EnableAVX() != 0) && (JitConfig.EnableSSE42() != 0) && |
| 2492 | (JitConfig.EnableSSE41() != 0) && (JitConfig.EnableSSSE3() != 0) && (JitConfig.EnableSSE3() != 0) && |
| 2493 | (JitConfig.EnableSSE2() != 0) && (JitConfig.EnableSSE() != 0) && (JitConfig.EnableSSE3_4() != 0)) |
| 2494 | { |
| 2495 | opts.setSupportedISA(InstructionSet_SSE); |
| 2496 | opts.setSupportedISA(InstructionSet_SSE2); |
| 2497 | opts.setSupportedISA(InstructionSet_SSE3); |
| 2498 | opts.setSupportedISA(InstructionSet_SSSE3); |
| 2499 | opts.setSupportedISA(InstructionSet_SSE41); |
| 2500 | opts.setSupportedISA(InstructionSet_SSE42); |
| 2501 | opts.setSupportedISA(InstructionSet_AVX); |
| 2502 | opts.setSupportedISA(InstructionSet_AVX2); |
| 2503 | } |
| 2504 | #endif // !FEATURE_CORECLR |
| 2505 | } |
| 2506 | |
| 2507 | if (!compIsForInlining()) |
| 2508 | { |
| 2509 | if (canUseVexEncoding()) |
| 2510 | { |
| 2511 | codeGen->getEmitter()->SetUseVEXEncoding(true); |
| 2512 | // Assume each JITted method does not contain AVX instruction at first |
| 2513 | codeGen->getEmitter()->SetContainsAVX(false); |
| 2514 | codeGen->getEmitter()->SetContains256bitAVX(false); |
| 2515 | } |
| 2516 | } |
| 2517 | #endif // _TARGET_XARCH_ |
| 2518 | |
| 2519 | #if defined(_TARGET_ARM64_) |
| 2520 | // There is no JitFlag for Base instructions handle manually |
| 2521 | opts.setSupportedISA(InstructionSet_Base); |
| 2522 | #define HARDWARE_INTRINSIC_CLASS(flag, isa) \ |
| 2523 | if (jitFlags.IsSet(JitFlags::flag)) \ |
| 2524 | opts.setSupportedISA(InstructionSet_##isa); |
| 2525 | #include "hwintrinsiclistArm64.h" |
| 2526 | |
| 2527 | #endif |
| 2528 | } |
| 2529 | |
| 2530 | #ifdef PROFILING_SUPPORTED |
| 2531 | // A Dummy routine to receive Enter/Leave/Tailcall profiler callbacks. |
| 2532 | // These are used when complus_JitEltHookEnabled=1 |
| 2533 | #ifdef _TARGET_AMD64_ |
| 2534 | void DummyProfilerELTStub(UINT_PTR ProfilerHandle, UINT_PTR callerSP) |
| 2535 | { |
| 2536 | return; |
| 2537 | } |
| 2538 | #else //! _TARGET_AMD64_ |
| 2539 | void DummyProfilerELTStub(UINT_PTR ProfilerHandle) |
| 2540 | { |
| 2541 | return; |
| 2542 | } |
| 2543 | #endif //!_TARGET_AMD64_ |
| 2544 | |
| 2545 | #endif // PROFILING_SUPPORTED |
| 2546 | |
| 2547 | bool Compiler::compIsFullTrust() |
| 2548 | { |
| 2549 | return (info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) == CORINFO_VERIFICATION_CAN_SKIP); |
| 2550 | } |
| 2551 | |
| 2552 | bool Compiler::compShouldThrowOnNoway( |
| 2553 | #ifdef FEATURE_TRACELOGGING |
| 2554 | const char* filename, unsigned line |
| 2555 | #endif |
| 2556 | ) |
| 2557 | { |
| 2558 | #ifdef FEATURE_TRACELOGGING |
| 2559 | compJitTelemetry.NotifyNowayAssert(filename, line); |
| 2560 | #endif |
| 2561 | |
| 2562 | // In min opts, we don't want the noway assert to go through the exception |
| 2563 | // path. Instead we want it to just silently go through codegen for |
| 2564 | // compat reasons. |
| 2565 | // If we are not in full trust, we should always fire for security. |
| 2566 | return !opts.MinOpts() || !compIsFullTrust(); |
| 2567 | } |
| 2568 | |
| 2569 | // ConfigInteger does not offer an option for decimal flags. Any numbers are interpreted as hex. |
| 2570 | // I could add the decimal option to ConfigInteger or I could write a function to reinterpret this |
| 2571 | // value as the user intended. |
| 2572 | unsigned ReinterpretHexAsDecimal(unsigned in) |
| 2573 | { |
| 2574 | // ex: in: 0x100 returns: 100 |
| 2575 | unsigned result = 0; |
| 2576 | unsigned index = 1; |
| 2577 | |
| 2578 | // default value |
| 2579 | if (in == INT_MAX) |
| 2580 | { |
| 2581 | return in; |
| 2582 | } |
| 2583 | |
| 2584 | while (in) |
| 2585 | { |
| 2586 | unsigned digit = in % 16; |
| 2587 | in >>= 4; |
| 2588 | assert(digit < 10); |
| 2589 | result += digit * index; |
| 2590 | index *= 10; |
| 2591 | } |
| 2592 | return result; |
| 2593 | } |
| 2594 | |
| 2595 | void Compiler::compInitOptions(JitFlags* jitFlags) |
| 2596 | { |
| 2597 | #ifdef UNIX_AMD64_ABI |
| 2598 | opts.compNeedToAlignFrame = false; |
| 2599 | #endif // UNIX_AMD64_ABI |
| 2600 | memset(&opts, 0, sizeof(opts)); |
| 2601 | |
| 2602 | if (compIsForInlining()) |
| 2603 | { |
| 2604 | // The following flags are lost when inlining. (They are removed in |
| 2605 | // Compiler::fgInvokeInlineeCompiler().) |
| 2606 | assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT)); |
| 2607 | assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)); |
| 2608 | assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)); |
| 2609 | assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC)); |
| 2610 | assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO)); |
| 2611 | |
| 2612 | assert(jitFlags->IsSet(JitFlags::JIT_FLAG_SKIP_VERIFICATION)); |
| 2613 | } |
| 2614 | |
| 2615 | opts.jitFlags = jitFlags; |
| 2616 | opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization |
| 2617 | |
| 2618 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE) || jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) || |
| 2619 | jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) |
| 2620 | { |
| 2621 | opts.compFlags = CLFLG_MINOPT; |
| 2622 | } |
| 2623 | // Don't optimize .cctors (except prejit) or if we're an inlinee |
| 2624 | else if (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) && |
| 2625 | !compIsForInlining()) |
| 2626 | { |
| 2627 | opts.compFlags = CLFLG_MINOPT; |
| 2628 | } |
| 2629 | |
| 2630 | // Default value is to generate a blend of size and speed optimizations |
| 2631 | // |
| 2632 | opts.compCodeOpt = BLENDED_CODE; |
| 2633 | |
| 2634 | // If the EE sets SIZE_OPT or if we are compiling a Class constructor |
| 2635 | // we will optimize for code size at the expense of speed |
| 2636 | // |
| 2637 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT) || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) |
| 2638 | { |
| 2639 | opts.compCodeOpt = SMALL_CODE; |
| 2640 | } |
| 2641 | // |
| 2642 | // If the EE sets SPEED_OPT we will optimize for speed at the expense of code size |
| 2643 | // |
| 2644 | else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT) || |
| 2645 | (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1) && !jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT))) |
| 2646 | { |
| 2647 | opts.compCodeOpt = FAST_CODE; |
| 2648 | assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT)); |
| 2649 | } |
| 2650 | |
| 2651 | //------------------------------------------------------------------------- |
| 2652 | |
| 2653 | opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); |
| 2654 | opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); |
| 2655 | opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); |
| 2656 | |
| 2657 | #if REGEN_SHORTCUTS || REGEN_CALLPAT |
| 2658 | // We never want to have debugging enabled when regenerating GC encoding patterns |
| 2659 | opts.compDbgCode = false; |
| 2660 | opts.compDbgInfo = false; |
| 2661 | opts.compDbgEnC = false; |
| 2662 | #endif |
| 2663 | |
| 2664 | compSetProcessor(); |
| 2665 | |
| 2666 | #ifdef DEBUG |
| 2667 | opts.dspOrder = false; |
| 2668 | if (compIsForInlining()) |
| 2669 | { |
| 2670 | verbose = impInlineInfo->InlinerCompiler->verbose; |
| 2671 | } |
| 2672 | else |
| 2673 | { |
| 2674 | verbose = false; |
| 2675 | codeGen->setVerbose(false); |
| 2676 | } |
| 2677 | verboseTrees = verbose && shouldUseVerboseTrees(); |
| 2678 | verboseSsa = verbose && shouldUseVerboseSsa(); |
| 2679 | asciiTrees = shouldDumpASCIITrees(); |
| 2680 | opts.dspDiffable = compIsForInlining() ? impInlineInfo->InlinerCompiler->opts.dspDiffable : false; |
| 2681 | #endif |
| 2682 | |
| 2683 | opts.compNeedSecurityCheck = false; |
| 2684 | opts.altJit = false; |
| 2685 | |
| 2686 | #if defined(LATE_DISASM) && !defined(DEBUG) |
| 2687 | // For non-debug builds with the late disassembler built in, we currently always do late disassembly |
| 2688 | // (we have no way to determine when not to, since we don't have class/method names). |
| 2689 | // In the DEBUG case, this is initialized to false, below. |
| 2690 | opts.doLateDisasm = true; |
| 2691 | #endif |
| 2692 | |
| 2693 | #ifdef DEBUG |
| 2694 | |
| 2695 | const JitConfigValues::MethodSet* pfAltJit; |
| 2696 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 2697 | { |
| 2698 | pfAltJit = &JitConfig.AltJitNgen(); |
| 2699 | } |
| 2700 | else |
| 2701 | { |
| 2702 | pfAltJit = &JitConfig.AltJit(); |
| 2703 | } |
| 2704 | |
| 2705 | #ifdef ALT_JIT |
| 2706 | if (pfAltJit->contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 2707 | { |
| 2708 | opts.altJit = true; |
| 2709 | } |
| 2710 | |
| 2711 | unsigned altJitLimit = ReinterpretHexAsDecimal(JitConfig.AltJitLimit()); |
| 2712 | if (altJitLimit > 0 && Compiler::jitTotalMethodCompiled >= altJitLimit) |
| 2713 | { |
| 2714 | opts.altJit = false; |
| 2715 | } |
| 2716 | #endif // ALT_JIT |
| 2717 | |
| 2718 | #else // !DEBUG |
| 2719 | |
| 2720 | const char* altJitVal; |
| 2721 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 2722 | { |
| 2723 | altJitVal = JitConfig.AltJitNgen().list(); |
| 2724 | } |
| 2725 | else |
| 2726 | { |
| 2727 | altJitVal = JitConfig.AltJit().list(); |
| 2728 | } |
| 2729 | |
| 2730 | #ifdef ALT_JIT |
| 2731 | // In release mode, you either get all methods or no methods. You must use "*" as the parameter, or we ignore it. |
| 2732 | // You don't get to give a regular expression of methods to match. |
| 2733 | // (Partially, this is because we haven't computed and stored the method and class name except in debug, and it |
| 2734 | // might be expensive to do so.) |
| 2735 | if ((altJitVal != nullptr) && (strcmp(altJitVal, "*" ) == 0)) |
| 2736 | { |
| 2737 | opts.altJit = true; |
| 2738 | } |
| 2739 | #endif // ALT_JIT |
| 2740 | |
| 2741 | #endif // !DEBUG |
| 2742 | |
| 2743 | #ifdef ALT_JIT |
| 2744 | // Take care of COMPlus_AltJitExcludeAssemblies. |
| 2745 | if (opts.altJit) |
| 2746 | { |
| 2747 | // First, initialize the AltJitExcludeAssemblies list, but only do it once. |
| 2748 | if (!s_pAltJitExcludeAssembliesListInitialized) |
| 2749 | { |
| 2750 | const wchar_t* wszAltJitExcludeAssemblyList = JitConfig.AltJitExcludeAssemblies(); |
| 2751 | if (wszAltJitExcludeAssemblyList != nullptr) |
| 2752 | { |
| 2753 | // NOTE: The Assembly name list is allocated in the process heap, not in the no-release heap, which is |
| 2754 | // reclaimed |
| 2755 | // for every compilation. This is ok because we only allocate once, due to the static. |
| 2756 | s_pAltJitExcludeAssembliesList = new (HostAllocator::getHostAllocator()) |
| 2757 | AssemblyNamesList2(wszAltJitExcludeAssemblyList, HostAllocator::getHostAllocator()); |
| 2758 | } |
| 2759 | s_pAltJitExcludeAssembliesListInitialized = true; |
| 2760 | } |
| 2761 | |
| 2762 | if (s_pAltJitExcludeAssembliesList != nullptr) |
| 2763 | { |
| 2764 | // We have an exclusion list. See if this method is in an assembly that is on the list. |
| 2765 | // Note that we check this for every method, since we might inline across modules, and |
| 2766 | // if the inlinee module is on the list, we don't want to use the altjit for it. |
| 2767 | const char* methodAssemblyName = info.compCompHnd->getAssemblyName( |
| 2768 | info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); |
| 2769 | if (s_pAltJitExcludeAssembliesList->IsInList(methodAssemblyName)) |
| 2770 | { |
| 2771 | opts.altJit = false; |
| 2772 | } |
| 2773 | } |
| 2774 | } |
| 2775 | #endif // ALT_JIT |
| 2776 | |
| 2777 | #ifdef DEBUG |
| 2778 | |
| 2779 | bool altJitConfig = !pfAltJit->isEmpty(); |
| 2780 | |
| 2781 | // If we have a non-empty AltJit config then we change all of these other |
| 2782 | // config values to refer only to the AltJit. Otherwise, a lot of COMPlus_* variables |
| 2783 | // would apply to both the altjit and the normal JIT, but we only care about |
| 2784 | // debugging the altjit if the COMPlus_AltJit configuration is set. |
| 2785 | // |
| 2786 | if (compIsForImportOnly() && (!altJitConfig || opts.altJit)) |
| 2787 | { |
| 2788 | if (JitConfig.JitImportBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 2789 | { |
| 2790 | assert(!"JitImportBreak reached" ); |
| 2791 | } |
| 2792 | } |
| 2793 | |
| 2794 | bool verboseDump = false; |
| 2795 | bool dumpIR = false; |
| 2796 | bool dumpIRTypes = false; |
| 2797 | bool dumpIRLocals = false; |
| 2798 | bool dumpIRRegs = false; |
| 2799 | bool = false; |
| 2800 | bool dumpIRValnums = false; |
| 2801 | bool dumpIRCosts = false; |
| 2802 | bool dumpIRFlags = false; |
| 2803 | bool dumpIRKinds = false; |
| 2804 | bool dumpIRNodes = false; |
| 2805 | bool dumpIRNoLists = false; |
| 2806 | bool dumpIRNoLeafs = false; |
| 2807 | bool dumpIRNoStmts = false; |
| 2808 | bool dumpIRTrees = false; |
| 2809 | bool dumpIRLinear = false; |
| 2810 | bool dumpIRDataflow = false; |
| 2811 | bool = false; |
| 2812 | bool dumpIRExit = false; |
| 2813 | LPCWSTR dumpIRPhase = nullptr; |
| 2814 | LPCWSTR dumpIRFormat = nullptr; |
| 2815 | |
| 2816 | if (!altJitConfig || opts.altJit) |
| 2817 | { |
| 2818 | LPCWSTR dumpIRFormat = nullptr; |
| 2819 | |
| 2820 | // We should only enable 'verboseDump' when we are actually compiling a matching method |
| 2821 | // and not enable it when we are just considering inlining a matching method. |
| 2822 | // |
| 2823 | if (!compIsForInlining()) |
| 2824 | { |
| 2825 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 2826 | { |
| 2827 | if (JitConfig.NgenDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 2828 | { |
| 2829 | verboseDump = true; |
| 2830 | } |
| 2831 | unsigned ngenHashDumpVal = (unsigned)JitConfig.NgenHashDump(); |
| 2832 | if ((ngenHashDumpVal != (DWORD)-1) && (ngenHashDumpVal == info.compMethodHash())) |
| 2833 | { |
| 2834 | verboseDump = true; |
| 2835 | } |
| 2836 | if (JitConfig.NgenDumpIR().contains(info.compMethodName, info.compClassName, |
| 2837 | &info.compMethodInfo->args)) |
| 2838 | { |
| 2839 | dumpIR = true; |
| 2840 | } |
| 2841 | unsigned ngenHashDumpIRVal = (unsigned)JitConfig.NgenHashDumpIR(); |
| 2842 | if ((ngenHashDumpIRVal != (DWORD)-1) && (ngenHashDumpIRVal == info.compMethodHash())) |
| 2843 | { |
| 2844 | dumpIR = true; |
| 2845 | } |
| 2846 | dumpIRFormat = JitConfig.NgenDumpIRFormat(); |
| 2847 | dumpIRPhase = JitConfig.NgenDumpIRPhase(); |
| 2848 | } |
| 2849 | else |
| 2850 | { |
| 2851 | if (JitConfig.JitDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 2852 | { |
| 2853 | verboseDump = true; |
| 2854 | } |
| 2855 | unsigned jitHashDumpVal = (unsigned)JitConfig.JitHashDump(); |
| 2856 | if ((jitHashDumpVal != (DWORD)-1) && (jitHashDumpVal == info.compMethodHash())) |
| 2857 | { |
| 2858 | verboseDump = true; |
| 2859 | } |
| 2860 | if (JitConfig.JitDumpIR().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 2861 | { |
| 2862 | dumpIR = true; |
| 2863 | } |
| 2864 | unsigned jitHashDumpIRVal = (unsigned)JitConfig.JitHashDumpIR(); |
| 2865 | if ((jitHashDumpIRVal != (DWORD)-1) && (jitHashDumpIRVal == info.compMethodHash())) |
| 2866 | { |
| 2867 | dumpIR = true; |
| 2868 | } |
| 2869 | dumpIRFormat = JitConfig.JitDumpIRFormat(); |
| 2870 | dumpIRPhase = JitConfig.JitDumpIRPhase(); |
| 2871 | } |
| 2872 | } |
| 2873 | |
| 2874 | if (dumpIRPhase == nullptr) |
| 2875 | { |
| 2876 | dumpIRPhase = W("*" ); |
| 2877 | } |
| 2878 | |
| 2879 | this->dumpIRPhase = dumpIRPhase; |
| 2880 | |
| 2881 | if (dumpIRFormat != nullptr) |
| 2882 | { |
| 2883 | this->dumpIRFormat = dumpIRFormat; |
| 2884 | } |
| 2885 | |
| 2886 | dumpIRTrees = false; |
| 2887 | dumpIRLinear = true; |
| 2888 | if (dumpIRFormat != nullptr) |
| 2889 | { |
| 2890 | for (LPCWSTR p = dumpIRFormat; (*p != 0);) |
| 2891 | { |
| 2892 | for (; (*p != 0); p++) |
| 2893 | { |
| 2894 | if (*p != L' ') |
| 2895 | { |
| 2896 | break; |
| 2897 | } |
| 2898 | } |
| 2899 | |
| 2900 | if (*p == 0) |
| 2901 | { |
| 2902 | break; |
| 2903 | } |
| 2904 | |
| 2905 | static bool dumpedHelp = false; |
| 2906 | |
| 2907 | if ((*p == L'?') && (!dumpedHelp)) |
| 2908 | { |
| 2909 | printf("*******************************************************************************\n" ); |
| 2910 | printf("\n" ); |
| 2911 | dFormatIR(); |
| 2912 | printf("\n" ); |
| 2913 | printf("\n" ); |
| 2914 | printf("Available specifiers (comma separated):\n" ); |
| 2915 | printf("\n" ); |
| 2916 | printf("? dump out value of COMPlus_JitDumpIRFormat and this list of values\n" ); |
| 2917 | printf("\n" ); |
| 2918 | printf("linear linear IR dump (default)\n" ); |
| 2919 | printf("tree tree IR dump (traditional)\n" ); |
| 2920 | printf("mixed intermingle tree dump with linear IR dump\n" ); |
| 2921 | printf("\n" ); |
| 2922 | printf("dataflow use data flow form of linear IR dump\n" ); |
| 2923 | printf("structural use structural form of linear IR dump\n" ); |
| 2924 | printf("all implies structural, include everything\n" ); |
| 2925 | printf("\n" ); |
| 2926 | printf("kinds include tree node kinds in dump, example: \"kinds=[LEAF][LOCAL]\"\n" ); |
| 2927 | printf("flags include tree node flags in dump, example: \"flags=[CALL][GLOB_REF]\" \n" ); |
| 2928 | printf("types includes tree node types in dump, example: \".int\"\n" ); |
| 2929 | printf("locals include local numbers and tracking numbers in dump, example: \"(V3,T1)\"\n" ); |
| 2930 | printf("regs include register assignments in dump, example: \"(rdx)\"\n" ); |
| 2931 | printf("ssa include SSA numbers in dump, example: \"<d:3>\" or \"<u:3>\"\n" ); |
| 2932 | printf("valnums include Value numbers in dump, example: \"<v:$c4>\" or \"<v:$c4,$c5>\"\n" ); |
| 2933 | printf("\n" ); |
| 2934 | printf("nolist exclude GT_LIST nodes from dump\n" ); |
| 2935 | printf("noleafs exclude LEAF nodes from dump (fold into operations)\n" ); |
| 2936 | printf("nostmts exclude GT_STMTS from dump (unless required by dependencies)\n" ); |
| 2937 | printf("\n" ); |
| 2938 | printf("blkhdrs include block headers\n" ); |
| 2939 | printf("exit exit program after last phase dump (used with single method)\n" ); |
| 2940 | printf("\n" ); |
| 2941 | printf("*******************************************************************************\n" ); |
| 2942 | dumpedHelp = true; |
| 2943 | } |
| 2944 | |
| 2945 | if (wcsncmp(p, W("types" ), 5) == 0) |
| 2946 | { |
| 2947 | dumpIRTypes = true; |
| 2948 | } |
| 2949 | |
| 2950 | if (wcsncmp(p, W("locals" ), 6) == 0) |
| 2951 | { |
| 2952 | dumpIRLocals = true; |
| 2953 | } |
| 2954 | |
| 2955 | if (wcsncmp(p, W("regs" ), 4) == 0) |
| 2956 | { |
| 2957 | dumpIRRegs = true; |
| 2958 | } |
| 2959 | |
| 2960 | if (wcsncmp(p, W("ssa" ), 3) == 0) |
| 2961 | { |
| 2962 | dumpIRSsa = true; |
| 2963 | } |
| 2964 | |
| 2965 | if (wcsncmp(p, W("valnums" ), 7) == 0) |
| 2966 | { |
| 2967 | dumpIRValnums = true; |
| 2968 | } |
| 2969 | |
| 2970 | if (wcsncmp(p, W("costs" ), 5) == 0) |
| 2971 | { |
| 2972 | dumpIRCosts = true; |
| 2973 | } |
| 2974 | |
| 2975 | if (wcsncmp(p, W("flags" ), 5) == 0) |
| 2976 | { |
| 2977 | dumpIRFlags = true; |
| 2978 | } |
| 2979 | |
| 2980 | if (wcsncmp(p, W("kinds" ), 5) == 0) |
| 2981 | { |
| 2982 | dumpIRKinds = true; |
| 2983 | } |
| 2984 | |
| 2985 | if (wcsncmp(p, W("nodes" ), 5) == 0) |
| 2986 | { |
| 2987 | dumpIRNodes = true; |
| 2988 | } |
| 2989 | |
| 2990 | if (wcsncmp(p, W("exit" ), 4) == 0) |
| 2991 | { |
| 2992 | dumpIRExit = true; |
| 2993 | } |
| 2994 | |
| 2995 | if (wcsncmp(p, W("nolists" ), 7) == 0) |
| 2996 | { |
| 2997 | dumpIRNoLists = true; |
| 2998 | } |
| 2999 | |
| 3000 | if (wcsncmp(p, W("noleafs" ), 7) == 0) |
| 3001 | { |
| 3002 | dumpIRNoLeafs = true; |
| 3003 | } |
| 3004 | |
| 3005 | if (wcsncmp(p, W("nostmts" ), 7) == 0) |
| 3006 | { |
| 3007 | dumpIRNoStmts = true; |
| 3008 | } |
| 3009 | |
| 3010 | if (wcsncmp(p, W("trees" ), 5) == 0) |
| 3011 | { |
| 3012 | dumpIRTrees = true; |
| 3013 | dumpIRLinear = false; |
| 3014 | } |
| 3015 | |
| 3016 | if (wcsncmp(p, W("structural" ), 10) == 0) |
| 3017 | { |
| 3018 | dumpIRLinear = true; |
| 3019 | dumpIRNoStmts = false; |
| 3020 | dumpIRNoLeafs = false; |
| 3021 | dumpIRNoLists = false; |
| 3022 | } |
| 3023 | |
| 3024 | if (wcsncmp(p, W("all" ), 3) == 0) |
| 3025 | { |
| 3026 | dumpIRLinear = true; |
| 3027 | dumpIRKinds = true; |
| 3028 | dumpIRFlags = true; |
| 3029 | dumpIRTypes = true; |
| 3030 | dumpIRLocals = true; |
| 3031 | dumpIRRegs = true; |
| 3032 | dumpIRSsa = true; |
| 3033 | dumpIRValnums = true; |
| 3034 | dumpIRCosts = true; |
| 3035 | dumpIRNoStmts = false; |
| 3036 | dumpIRNoLeafs = false; |
| 3037 | dumpIRNoLists = false; |
| 3038 | } |
| 3039 | |
| 3040 | if (wcsncmp(p, W("linear" ), 6) == 0) |
| 3041 | { |
| 3042 | dumpIRTrees = false; |
| 3043 | dumpIRLinear = true; |
| 3044 | } |
| 3045 | |
| 3046 | if (wcsncmp(p, W("mixed" ), 5) == 0) |
| 3047 | { |
| 3048 | dumpIRTrees = true; |
| 3049 | dumpIRLinear = true; |
| 3050 | } |
| 3051 | |
| 3052 | if (wcsncmp(p, W("dataflow" ), 8) == 0) |
| 3053 | { |
| 3054 | dumpIRDataflow = true; |
| 3055 | dumpIRNoLeafs = true; |
| 3056 | dumpIRNoLists = true; |
| 3057 | dumpIRNoStmts = true; |
| 3058 | } |
| 3059 | |
| 3060 | if (wcsncmp(p, W("blkhdrs" ), 7) == 0) |
| 3061 | { |
| 3062 | dumpIRBlockHeaders = true; |
| 3063 | } |
| 3064 | |
| 3065 | for (; (*p != 0); p++) |
| 3066 | { |
| 3067 | if (*p == L',') |
| 3068 | { |
| 3069 | p++; |
| 3070 | break; |
| 3071 | } |
| 3072 | } |
| 3073 | } |
| 3074 | } |
| 3075 | } |
| 3076 | |
| 3077 | if (verboseDump) |
| 3078 | { |
| 3079 | verbose = true; |
| 3080 | } |
| 3081 | |
| 3082 | if (dumpIR) |
| 3083 | { |
| 3084 | this->dumpIR = true; |
| 3085 | } |
| 3086 | |
| 3087 | if (dumpIRTypes) |
| 3088 | { |
| 3089 | this->dumpIRTypes = true; |
| 3090 | } |
| 3091 | |
| 3092 | if (dumpIRLocals) |
| 3093 | { |
| 3094 | this->dumpIRLocals = true; |
| 3095 | } |
| 3096 | |
| 3097 | if (dumpIRRegs) |
| 3098 | { |
| 3099 | this->dumpIRRegs = true; |
| 3100 | } |
| 3101 | |
| 3102 | if (dumpIRSsa) |
| 3103 | { |
| 3104 | this->dumpIRSsa = true; |
| 3105 | } |
| 3106 | |
| 3107 | if (dumpIRValnums) |
| 3108 | { |
| 3109 | this->dumpIRValnums = true; |
| 3110 | } |
| 3111 | |
| 3112 | if (dumpIRCosts) |
| 3113 | { |
| 3114 | this->dumpIRCosts = true; |
| 3115 | } |
| 3116 | |
| 3117 | if (dumpIRFlags) |
| 3118 | { |
| 3119 | this->dumpIRFlags = true; |
| 3120 | } |
| 3121 | |
| 3122 | if (dumpIRKinds) |
| 3123 | { |
| 3124 | this->dumpIRKinds = true; |
| 3125 | } |
| 3126 | |
| 3127 | if (dumpIRNodes) |
| 3128 | { |
| 3129 | this->dumpIRNodes = true; |
| 3130 | } |
| 3131 | |
| 3132 | if (dumpIRNoLists) |
| 3133 | { |
| 3134 | this->dumpIRNoLists = true; |
| 3135 | } |
| 3136 | |
| 3137 | if (dumpIRNoLeafs) |
| 3138 | { |
| 3139 | this->dumpIRNoLeafs = true; |
| 3140 | } |
| 3141 | |
| 3142 | if (dumpIRNoLeafs && dumpIRDataflow) |
| 3143 | { |
| 3144 | this->dumpIRDataflow = true; |
| 3145 | } |
| 3146 | |
| 3147 | if (dumpIRNoStmts) |
| 3148 | { |
| 3149 | this->dumpIRNoStmts = true; |
| 3150 | } |
| 3151 | |
| 3152 | if (dumpIRTrees) |
| 3153 | { |
| 3154 | this->dumpIRTrees = true; |
| 3155 | } |
| 3156 | |
| 3157 | if (dumpIRLinear) |
| 3158 | { |
| 3159 | this->dumpIRLinear = true; |
| 3160 | } |
| 3161 | |
| 3162 | if (dumpIRBlockHeaders) |
| 3163 | { |
| 3164 | this->dumpIRBlockHeaders = true; |
| 3165 | } |
| 3166 | |
| 3167 | if (dumpIRExit) |
| 3168 | { |
| 3169 | this->dumpIRExit = true; |
| 3170 | } |
| 3171 | |
| 3172 | #endif // DEBUG |
| 3173 | |
| 3174 | #ifdef FEATURE_SIMD |
| 3175 | // Minimum bar for availing SIMD benefits is SSE2 on AMD64/x86. |
| 3176 | featureSIMD = jitFlags->IsSet(JitFlags::JIT_FLAG_FEATURE_SIMD); |
| 3177 | setUsesSIMDTypes(false); |
| 3178 | #endif // FEATURE_SIMD |
| 3179 | |
| 3180 | if (compIsForImportOnly()) |
| 3181 | { |
| 3182 | return; |
| 3183 | } |
| 3184 | |
| 3185 | #if FEATURE_TAILCALL_OPT |
| 3186 | // By default opportunistic tail call optimization is enabled. |
| 3187 | // Recognition is done in the importer so this must be set for |
| 3188 | // inlinees as well. |
| 3189 | opts.compTailCallOpt = true; |
| 3190 | #endif // FEATURE_TAILCALL_OPT |
| 3191 | |
| 3192 | if (compIsForInlining()) |
| 3193 | { |
| 3194 | return; |
| 3195 | } |
| 3196 | |
| 3197 | // The rest of the opts fields that we initialize here |
| 3198 | // should only be used when we generate code for the method |
| 3199 | // They should not be used when importing or inlining |
| 3200 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 3201 | |
| 3202 | #if FEATURE_TAILCALL_OPT |
| 3203 | opts.compTailCallLoopOpt = true; |
| 3204 | #endif // FEATURE_TAILCALL_OPT |
| 3205 | |
| 3206 | opts.genFPorder = true; |
| 3207 | opts.genFPopt = true; |
| 3208 | |
| 3209 | opts.instrCount = 0; |
| 3210 | opts.lvRefCount = 0; |
| 3211 | |
| 3212 | #ifdef PROFILING_SUPPORTED |
| 3213 | opts.compJitELTHookEnabled = false; |
| 3214 | #endif // PROFILING_SUPPORTED |
| 3215 | |
| 3216 | #ifdef DEBUG |
| 3217 | opts.dspInstrs = false; |
| 3218 | opts.dspEmit = false; |
| 3219 | opts.dspLines = false; |
| 3220 | opts.varNames = false; |
| 3221 | opts.dmpHex = false; |
| 3222 | opts.disAsm = false; |
| 3223 | opts.disAsmSpilled = false; |
| 3224 | opts.disDiffable = false; |
| 3225 | opts.dspCode = false; |
| 3226 | opts.dspEHTable = false; |
| 3227 | opts.dspDebugInfo = false; |
| 3228 | opts.dspGCtbls = false; |
| 3229 | opts.disAsm2 = false; |
| 3230 | opts.dspUnwind = false; |
| 3231 | opts.compLongAddress = false; |
| 3232 | opts.optRepeat = false; |
| 3233 | |
| 3234 | #ifdef LATE_DISASM |
| 3235 | opts.doLateDisasm = false; |
| 3236 | #endif // LATE_DISASM |
| 3237 | |
| 3238 | compDebugBreak = false; |
| 3239 | |
| 3240 | // If we have a non-empty AltJit config then we change all of these other |
| 3241 | // config values to refer only to the AltJit. |
| 3242 | // |
| 3243 | if (!altJitConfig || opts.altJit) |
| 3244 | { |
| 3245 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 3246 | { |
| 3247 | if ((JitConfig.NgenOrder() & 1) == 1) |
| 3248 | { |
| 3249 | opts.dspOrder = true; |
| 3250 | } |
| 3251 | |
| 3252 | if (JitConfig.NgenGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3253 | { |
| 3254 | opts.dspGCtbls = true; |
| 3255 | } |
| 3256 | |
| 3257 | if (JitConfig.NgenDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3258 | { |
| 3259 | opts.disAsm = true; |
| 3260 | } |
| 3261 | if (JitConfig.NgenDisasm().contains("SPILLED" , nullptr, nullptr)) |
| 3262 | { |
| 3263 | opts.disAsmSpilled = true; |
| 3264 | } |
| 3265 | |
| 3266 | if (JitConfig.NgenUnwindDump().contains(info.compMethodName, info.compClassName, |
| 3267 | &info.compMethodInfo->args)) |
| 3268 | { |
| 3269 | opts.dspUnwind = true; |
| 3270 | } |
| 3271 | |
| 3272 | if (JitConfig.NgenEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3273 | { |
| 3274 | opts.dspEHTable = true; |
| 3275 | } |
| 3276 | |
| 3277 | if (JitConfig.NgenDebugDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3278 | { |
| 3279 | opts.dspDebugInfo = true; |
| 3280 | } |
| 3281 | } |
| 3282 | else |
| 3283 | { |
| 3284 | bool disEnabled = true; |
| 3285 | |
| 3286 | // Setup assembly name list for disassembly, if not already set up. |
| 3287 | if (!s_pJitDisasmIncludeAssembliesListInitialized) |
| 3288 | { |
| 3289 | const wchar_t* assemblyNameList = JitConfig.JitDisasmAssemblies(); |
| 3290 | if (assemblyNameList != nullptr) |
| 3291 | { |
| 3292 | s_pJitDisasmIncludeAssembliesList = new (HostAllocator::getHostAllocator()) |
| 3293 | AssemblyNamesList2(assemblyNameList, HostAllocator::getHostAllocator()); |
| 3294 | } |
| 3295 | s_pJitDisasmIncludeAssembliesListInitialized = true; |
| 3296 | } |
| 3297 | |
| 3298 | // If we have an assembly name list for disassembly, also check this method's assembly. |
| 3299 | if (s_pJitDisasmIncludeAssembliesList != nullptr) |
| 3300 | { |
| 3301 | const char* assemblyName = info.compCompHnd->getAssemblyName( |
| 3302 | info.compCompHnd->getModuleAssembly(info.compCompHnd->getClassModule(info.compClassHnd))); |
| 3303 | |
| 3304 | if (!s_pJitDisasmIncludeAssembliesList->IsInList(assemblyName)) |
| 3305 | { |
| 3306 | disEnabled = false; |
| 3307 | } |
| 3308 | } |
| 3309 | |
| 3310 | if (disEnabled) |
| 3311 | { |
| 3312 | if ((JitConfig.JitOrder() & 1) == 1) |
| 3313 | { |
| 3314 | opts.dspOrder = true; |
| 3315 | } |
| 3316 | |
| 3317 | if (JitConfig.JitGCDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3318 | { |
| 3319 | opts.dspGCtbls = true; |
| 3320 | } |
| 3321 | |
| 3322 | if (JitConfig.JitDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3323 | { |
| 3324 | opts.disAsm = true; |
| 3325 | } |
| 3326 | |
| 3327 | if (JitConfig.JitDisasm().contains("SPILLED" , nullptr, nullptr)) |
| 3328 | { |
| 3329 | opts.disAsmSpilled = true; |
| 3330 | } |
| 3331 | |
| 3332 | if (JitConfig.JitUnwindDump().contains(info.compMethodName, info.compClassName, |
| 3333 | &info.compMethodInfo->args)) |
| 3334 | { |
| 3335 | opts.dspUnwind = true; |
| 3336 | } |
| 3337 | |
| 3338 | if (JitConfig.JitEHDump().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3339 | { |
| 3340 | opts.dspEHTable = true; |
| 3341 | } |
| 3342 | |
| 3343 | if (JitConfig.JitDebugDump().contains(info.compMethodName, info.compClassName, |
| 3344 | &info.compMethodInfo->args)) |
| 3345 | { |
| 3346 | opts.dspDebugInfo = true; |
| 3347 | } |
| 3348 | } |
| 3349 | } |
| 3350 | |
| 3351 | #ifdef LATE_DISASM |
| 3352 | if (JitConfig.JitLateDisasm().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3353 | opts.doLateDisasm = true; |
| 3354 | #endif // LATE_DISASM |
| 3355 | |
| 3356 | // This one applies to both Ngen/Jit Disasm output: COMPlus_JitDiffableDasm=1 |
| 3357 | if (JitConfig.DiffableDasm() != 0) |
| 3358 | { |
| 3359 | opts.disDiffable = true; |
| 3360 | opts.dspDiffable = true; |
| 3361 | } |
| 3362 | |
| 3363 | if (JitConfig.JitLongAddress() != 0) |
| 3364 | { |
| 3365 | opts.compLongAddress = true; |
| 3366 | } |
| 3367 | |
| 3368 | if (JitConfig.JitOptRepeat().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3369 | { |
| 3370 | opts.optRepeat = true; |
| 3371 | } |
| 3372 | } |
| 3373 | |
| 3374 | if (verboseDump) |
| 3375 | { |
| 3376 | opts.dspCode = true; |
| 3377 | opts.dspEHTable = true; |
| 3378 | opts.dspGCtbls = true; |
| 3379 | opts.disAsm2 = true; |
| 3380 | opts.dspUnwind = true; |
| 3381 | verbose = true; |
| 3382 | verboseTrees = shouldUseVerboseTrees(); |
| 3383 | verboseSsa = shouldUseVerboseSsa(); |
| 3384 | codeGen->setVerbose(true); |
| 3385 | } |
| 3386 | |
| 3387 | treesBeforeAfterMorph = (JitConfig.TreesBeforeAfterMorph() == 1); |
| 3388 | morphNum = 0; // Initialize the morphed-trees counting. |
| 3389 | |
| 3390 | expensiveDebugCheckLevel = JitConfig.JitExpensiveDebugCheckLevel(); |
| 3391 | if (expensiveDebugCheckLevel == 0) |
| 3392 | { |
| 3393 | // If we're in a stress mode that modifies the flowgraph, make 1 the default. |
| 3394 | if (fgStressBBProf() || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) |
| 3395 | { |
| 3396 | expensiveDebugCheckLevel = 1; |
| 3397 | } |
| 3398 | } |
| 3399 | |
| 3400 | if (verbose) |
| 3401 | { |
| 3402 | printf("****** START compiling %s (MethodHash=%08x)\n" , info.compFullName, info.compMethodHash()); |
| 3403 | printf("Generating code for %s %s\n" , Target::g_tgtPlatformName, Target::g_tgtCPUName); |
| 3404 | printf("" ); // in our logic this causes a flush |
| 3405 | } |
| 3406 | |
| 3407 | if (JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3408 | { |
| 3409 | assert(!"JitBreak reached" ); |
| 3410 | } |
| 3411 | |
| 3412 | unsigned jitHashBreakVal = (unsigned)JitConfig.JitHashBreak(); |
| 3413 | if ((jitHashBreakVal != (DWORD)-1) && (jitHashBreakVal == info.compMethodHash())) |
| 3414 | { |
| 3415 | assert(!"JitHashBreak reached" ); |
| 3416 | } |
| 3417 | |
| 3418 | if (verbose || |
| 3419 | JitConfig.JitDebugBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args) || |
| 3420 | JitConfig.JitBreak().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3421 | { |
| 3422 | compDebugBreak = true; |
| 3423 | } |
| 3424 | |
| 3425 | memset(compActiveStressModes, 0, sizeof(compActiveStressModes)); |
| 3426 | |
| 3427 | #endif // DEBUG |
| 3428 | |
| 3429 | //------------------------------------------------------------------------- |
| 3430 | |
| 3431 | #ifdef DEBUG |
| 3432 | assert(!codeGen->isGCTypeFixed()); |
| 3433 | opts.compGcChecks = (JitConfig.JitGCChecks() != 0) || compStressCompile(STRESS_GENERIC_VARN, 5); |
| 3434 | #endif |
| 3435 | |
| 3436 | #if defined(DEBUG) && defined(_TARGET_XARCH_) |
| 3437 | enum |
| 3438 | { |
| 3439 | STACK_CHECK_ON_RETURN = 0x1, |
| 3440 | STACK_CHECK_ON_CALL = 0x2, |
| 3441 | STACK_CHECK_ALL = 0x3 |
| 3442 | }; |
| 3443 | |
| 3444 | DWORD dwJitStackChecks = JitConfig.JitStackChecks(); |
| 3445 | if (compStressCompile(STRESS_GENERIC_VARN, 5)) |
| 3446 | { |
| 3447 | dwJitStackChecks = STACK_CHECK_ALL; |
| 3448 | } |
| 3449 | opts.compStackCheckOnRet = (dwJitStackChecks & DWORD(STACK_CHECK_ON_RETURN)) != 0; |
| 3450 | #if defined(_TARGET_X86_) |
| 3451 | opts.compStackCheckOnCall = (dwJitStackChecks & DWORD(STACK_CHECK_ON_CALL)) != 0; |
| 3452 | #endif // defined(_TARGET_X86_) |
| 3453 | #endif // defined(DEBUG) && defined(_TARGET_XARCH_) |
| 3454 | |
| 3455 | #if MEASURE_MEM_ALLOC |
| 3456 | s_dspMemStats = (JitConfig.DisplayMemStats() != 0); |
| 3457 | #endif |
| 3458 | |
| 3459 | #ifdef PROFILING_SUPPORTED |
| 3460 | opts.compNoPInvokeInlineCB = jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_NO_PINVOKE_INLINE); |
| 3461 | |
| 3462 | // Cache the profiler handle |
| 3463 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_PROF_ENTERLEAVE)) |
| 3464 | { |
| 3465 | BOOL hookNeeded; |
| 3466 | BOOL indirected; |
| 3467 | info.compCompHnd->GetProfilingHandle(&hookNeeded, &compProfilerMethHnd, &indirected); |
| 3468 | compProfilerHookNeeded = !!hookNeeded; |
| 3469 | compProfilerMethHndIndirected = !!indirected; |
| 3470 | } |
| 3471 | else |
| 3472 | { |
| 3473 | compProfilerHookNeeded = false; |
| 3474 | compProfilerMethHnd = nullptr; |
| 3475 | compProfilerMethHndIndirected = false; |
| 3476 | } |
| 3477 | |
| 3478 | // Honour COMPlus_JitELTHookEnabled only if VM has not asked us to generate profiler |
| 3479 | // hooks in the first place. That is, override VM only if it hasn't asked for a |
| 3480 | // profiler callback for this method. |
| 3481 | if (!compProfilerHookNeeded && (JitConfig.JitELTHookEnabled() != 0)) |
| 3482 | { |
| 3483 | opts.compJitELTHookEnabled = true; |
| 3484 | } |
| 3485 | |
| 3486 | // TBD: Exclude PInvoke stubs |
| 3487 | if (opts.compJitELTHookEnabled) |
| 3488 | { |
| 3489 | compProfilerMethHnd = (void*)DummyProfilerELTStub; |
| 3490 | compProfilerMethHndIndirected = false; |
| 3491 | } |
| 3492 | |
| 3493 | #endif // PROFILING_SUPPORTED |
| 3494 | |
| 3495 | #if FEATURE_TAILCALL_OPT |
| 3496 | const wchar_t* strTailCallOpt = JitConfig.TailCallOpt(); |
| 3497 | if (strTailCallOpt != nullptr) |
| 3498 | { |
| 3499 | opts.compTailCallOpt = (UINT)_wtoi(strTailCallOpt) != 0; |
| 3500 | } |
| 3501 | |
| 3502 | if (JitConfig.TailCallLoopOpt() == 0) |
| 3503 | { |
| 3504 | opts.compTailCallLoopOpt = false; |
| 3505 | } |
| 3506 | #endif |
| 3507 | |
| 3508 | opts.compScopeInfo = opts.compDbgInfo; |
| 3509 | |
| 3510 | #ifdef LATE_DISASM |
| 3511 | codeGen->getDisAssembler().disOpenForLateDisAsm(info.compMethodName, info.compClassName, |
| 3512 | info.compMethodInfo->args.pSig); |
| 3513 | #endif |
| 3514 | |
| 3515 | //------------------------------------------------------------------------- |
| 3516 | |
| 3517 | opts.compReloc = jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC); |
| 3518 | |
| 3519 | #ifdef DEBUG |
| 3520 | #if defined(_TARGET_XARCH_) |
| 3521 | // Whether encoding of absolute addr as PC-rel offset is enabled |
| 3522 | opts.compEnablePCRelAddr = (JitConfig.EnablePCRelAddr() != 0); |
| 3523 | #endif |
| 3524 | #endif // DEBUG |
| 3525 | |
| 3526 | opts.compProcedureSplitting = jitFlags->IsSet(JitFlags::JIT_FLAG_PROCSPLIT); |
| 3527 | |
| 3528 | #ifdef _TARGET_ARM64_ |
| 3529 | // TODO-ARM64-NYI: enable hot/cold splitting |
| 3530 | opts.compProcedureSplitting = false; |
| 3531 | #endif // _TARGET_ARM64_ |
| 3532 | |
| 3533 | #ifdef DEBUG |
| 3534 | opts.compProcedureSplittingEH = opts.compProcedureSplitting; |
| 3535 | #endif // DEBUG |
| 3536 | |
| 3537 | if (opts.compProcedureSplitting) |
| 3538 | { |
| 3539 | // Note that opts.compdbgCode is true under ngen for checked assemblies! |
| 3540 | opts.compProcedureSplitting = !opts.compDbgCode; |
| 3541 | |
| 3542 | #ifdef DEBUG |
| 3543 | // JitForceProcedureSplitting is used to force procedure splitting on checked assemblies. |
| 3544 | // This is useful for debugging on a checked build. Note that we still only do procedure |
| 3545 | // splitting in the zapper. |
| 3546 | if (JitConfig.JitForceProcedureSplitting().contains(info.compMethodName, info.compClassName, |
| 3547 | &info.compMethodInfo->args)) |
| 3548 | { |
| 3549 | opts.compProcedureSplitting = true; |
| 3550 | } |
| 3551 | |
| 3552 | // JitNoProcedureSplitting will always disable procedure splitting. |
| 3553 | if (JitConfig.JitNoProcedureSplitting().contains(info.compMethodName, info.compClassName, |
| 3554 | &info.compMethodInfo->args)) |
| 3555 | { |
| 3556 | opts.compProcedureSplitting = false; |
| 3557 | } |
| 3558 | // |
| 3559 | // JitNoProcedureSplittingEH will disable procedure splitting in functions with EH. |
| 3560 | if (JitConfig.JitNoProcedureSplittingEH().contains(info.compMethodName, info.compClassName, |
| 3561 | &info.compMethodInfo->args)) |
| 3562 | { |
| 3563 | opts.compProcedureSplittingEH = false; |
| 3564 | } |
| 3565 | #endif |
| 3566 | } |
| 3567 | |
| 3568 | fgProfileBuffer = nullptr; |
| 3569 | fgProfileData_ILSizeMismatch = false; |
| 3570 | fgNumProfileRuns = 0; |
| 3571 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT)) |
| 3572 | { |
| 3573 | assert(!compIsForInlining()); |
| 3574 | HRESULT hr; |
| 3575 | hr = info.compCompHnd->getBBProfileData(info.compMethodHnd, &fgProfileBufferCount, &fgProfileBuffer, |
| 3576 | &fgNumProfileRuns); |
| 3577 | |
| 3578 | // a failed result that also has a non-NULL fgProfileBuffer |
| 3579 | // indicates that the ILSize for the method no longer matches |
| 3580 | // the ILSize for the method when profile data was collected. |
| 3581 | // |
| 3582 | // We will discard the IBC data in this case |
| 3583 | // |
| 3584 | if (FAILED(hr) && (fgProfileBuffer != nullptr)) |
| 3585 | { |
| 3586 | fgProfileData_ILSizeMismatch = true; |
| 3587 | fgProfileBuffer = nullptr; |
| 3588 | } |
| 3589 | #ifdef DEBUG |
| 3590 | // A successful result implies a non-NULL fgProfileBuffer |
| 3591 | // |
| 3592 | if (SUCCEEDED(hr)) |
| 3593 | { |
| 3594 | assert(fgProfileBuffer != nullptr); |
| 3595 | } |
| 3596 | |
| 3597 | // A failed result implies a NULL fgProfileBuffer |
| 3598 | // see implementation of Compiler::fgHaveProfileData() |
| 3599 | // |
| 3600 | if (FAILED(hr)) |
| 3601 | { |
| 3602 | assert(fgProfileBuffer == nullptr); |
| 3603 | } |
| 3604 | #endif |
| 3605 | } |
| 3606 | |
| 3607 | opts.compNeedStackProbes = false; |
| 3608 | |
| 3609 | #ifdef DEBUG |
| 3610 | if (JitConfig.StackProbesOverride() != 0 || compStressCompile(STRESS_GENERIC_VARN, 5)) |
| 3611 | { |
| 3612 | opts.compNeedStackProbes = true; |
| 3613 | } |
| 3614 | #endif |
| 3615 | |
| 3616 | #ifdef DEBUG |
| 3617 | // Now, set compMaxUncheckedOffsetForNullObject for STRESS_NULL_OBJECT_CHECK |
| 3618 | if (compStressCompile(STRESS_NULL_OBJECT_CHECK, 30)) |
| 3619 | { |
| 3620 | compMaxUncheckedOffsetForNullObject = (size_t)JitConfig.JitMaxUncheckedOffset(); |
| 3621 | if (verbose) |
| 3622 | { |
| 3623 | printf("STRESS_NULL_OBJECT_CHECK: compMaxUncheckedOffsetForNullObject=0x%X\n" , |
| 3624 | compMaxUncheckedOffsetForNullObject); |
| 3625 | } |
| 3626 | } |
| 3627 | |
| 3628 | if (verbose) |
| 3629 | { |
| 3630 | // If we are compiling for a specific tier, make that very obvious in the output. |
| 3631 | // Note that we don't expect multiple TIER flags to be set at one time, but there |
| 3632 | // is nothing preventing that. |
| 3633 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) |
| 3634 | { |
| 3635 | printf("OPTIONS: Tier-0 compilation (set COMPlus_TieredCompilation=0 to disable)\n" ); |
| 3636 | } |
| 3637 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1)) |
| 3638 | { |
| 3639 | printf("OPTIONS: Tier-1 compilation\n" ); |
| 3640 | } |
| 3641 | |
| 3642 | printf("OPTIONS: compCodeOpt = %s\n" , |
| 3643 | (opts.compCodeOpt == BLENDED_CODE) |
| 3644 | ? "BLENDED_CODE" |
| 3645 | : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" |
| 3646 | : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE" ); |
| 3647 | |
| 3648 | printf("OPTIONS: compDbgCode = %s\n" , dspBool(opts.compDbgCode)); |
| 3649 | printf("OPTIONS: compDbgInfo = %s\n" , dspBool(opts.compDbgInfo)); |
| 3650 | printf("OPTIONS: compDbgEnC = %s\n" , dspBool(opts.compDbgEnC)); |
| 3651 | printf("OPTIONS: compProcedureSplitting = %s\n" , dspBool(opts.compProcedureSplitting)); |
| 3652 | printf("OPTIONS: compProcedureSplittingEH = %s\n" , dspBool(opts.compProcedureSplittingEH)); |
| 3653 | |
| 3654 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && fgHaveProfileData()) |
| 3655 | { |
| 3656 | printf("OPTIONS: using real profile data\n" ); |
| 3657 | } |
| 3658 | |
| 3659 | if (fgProfileData_ILSizeMismatch) |
| 3660 | { |
| 3661 | printf("OPTIONS: discarded IBC profile data due to mismatch in ILSize\n" ); |
| 3662 | } |
| 3663 | |
| 3664 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 3665 | { |
| 3666 | printf("OPTIONS: Jit invoked for ngen\n" ); |
| 3667 | } |
| 3668 | printf("OPTIONS: Stack probing is %s\n" , opts.compNeedStackProbes ? "ENABLED" : "DISABLED" ); |
| 3669 | } |
| 3670 | #endif |
| 3671 | |
| 3672 | opts.compGCPollType = GCPOLL_NONE; |
| 3673 | if (jitFlags->IsSet(JitFlags::JIT_FLAG_GCPOLL_CALLS)) |
| 3674 | { |
| 3675 | opts.compGCPollType = GCPOLL_CALL; |
| 3676 | } |
| 3677 | else if (jitFlags->IsSet(JitFlags::JIT_FLAG_GCPOLL_INLINE)) |
| 3678 | { |
| 3679 | // make sure that the EE didn't set both flags. |
| 3680 | assert(opts.compGCPollType == GCPOLL_NONE); |
| 3681 | opts.compGCPollType = GCPOLL_INLINE; |
| 3682 | } |
| 3683 | |
| 3684 | #ifdef PROFILING_SUPPORTED |
| 3685 | #ifdef UNIX_AMD64_ABI |
| 3686 | if (compIsProfilerHookNeeded()) |
| 3687 | { |
| 3688 | opts.compNeedToAlignFrame = true; |
| 3689 | } |
| 3690 | #endif // UNIX_AMD64_ABI |
| 3691 | #endif |
| 3692 | } |
| 3693 | |
| 3694 | #ifdef DEBUG |
| 3695 | |
| 3696 | bool Compiler::compJitHaltMethod() |
| 3697 | { |
| 3698 | /* This method returns true when we use an INS_BREAKPOINT to allow us to step into the generated native code */ |
| 3699 | /* Note that this these two "Jit" environment variables also work for ngen images */ |
| 3700 | |
| 3701 | if (JitConfig.JitHalt().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3702 | { |
| 3703 | return true; |
| 3704 | } |
| 3705 | |
| 3706 | /* Use this Hash variant when there are a lot of method with the same name and different signatures */ |
| 3707 | |
| 3708 | unsigned fJitHashHaltVal = (unsigned)JitConfig.JitHashHalt(); |
| 3709 | if ((fJitHashHaltVal != (unsigned)-1) && (fJitHashHaltVal == info.compMethodHash())) |
| 3710 | { |
| 3711 | return true; |
| 3712 | } |
| 3713 | |
| 3714 | return false; |
| 3715 | } |
| 3716 | |
| 3717 | /***************************************************************************** |
| 3718 | * Should we use a "stress-mode" for the given stressArea. We have different |
| 3719 | * areas to allow the areas to be mixed in different combinations in |
| 3720 | * different methods. |
| 3721 | * 'weight' indicates how often (as a percentage) the area should be stressed. |
| 3722 | * It should reflect the usefulness:overhead ratio. |
| 3723 | */ |
| 3724 | |
| 3725 | const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = { |
| 3726 | #define STRESS_MODE(mode) W("STRESS_") W(#mode), |
| 3727 | |
| 3728 | STRESS_MODES |
| 3729 | #undef STRESS_MODE |
| 3730 | }; |
| 3731 | |
| 3732 | bool Compiler::compStressCompile(compStressArea stressArea, unsigned weight) |
| 3733 | { |
| 3734 | unsigned hash; |
| 3735 | DWORD stressLevel; |
| 3736 | |
| 3737 | if (!bRangeAllowStress) |
| 3738 | { |
| 3739 | return false; |
| 3740 | } |
| 3741 | |
| 3742 | if (!JitConfig.JitStressOnly().isEmpty() && |
| 3743 | !JitConfig.JitStressOnly().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 3744 | { |
| 3745 | return false; |
| 3746 | } |
| 3747 | |
| 3748 | bool doStress = false; |
| 3749 | const wchar_t* strStressModeNames; |
| 3750 | |
| 3751 | // Does user explicitly prevent using this STRESS_MODE through the command line? |
| 3752 | const wchar_t* strStressModeNamesNot = JitConfig.JitStressModeNamesNot(); |
| 3753 | if ((strStressModeNamesNot != nullptr) && |
| 3754 | (wcsstr(strStressModeNamesNot, s_compStressModeNames[stressArea]) != nullptr)) |
| 3755 | { |
| 3756 | doStress = false; |
| 3757 | goto _done; |
| 3758 | } |
| 3759 | |
| 3760 | // Does user explicitly set this STRESS_MODE through the command line? |
| 3761 | strStressModeNames = JitConfig.JitStressModeNames(); |
| 3762 | if (strStressModeNames != nullptr) |
| 3763 | { |
| 3764 | if (wcsstr(strStressModeNames, s_compStressModeNames[stressArea]) != nullptr) |
| 3765 | { |
| 3766 | doStress = true; |
| 3767 | goto _done; |
| 3768 | } |
| 3769 | |
| 3770 | // This stress mode name did not match anything in the stress |
| 3771 | // mode whitelist. If user has requested only enable mode, |
| 3772 | // don't allow this stress mode to turn on. |
| 3773 | const bool onlyEnableMode = JitConfig.JitStressModeNamesOnly() != 0; |
| 3774 | |
| 3775 | if (onlyEnableMode) |
| 3776 | { |
| 3777 | doStress = false; |
| 3778 | goto _done; |
| 3779 | } |
| 3780 | } |
| 3781 | |
| 3782 | // 0: No stress (Except when explicitly set in complus_JitStressModeNames) |
| 3783 | // !=2: Vary stress. Performance will be slightly/moderately degraded |
| 3784 | // 2: Check-all stress. Performance will be REALLY horrible |
| 3785 | stressLevel = getJitStressLevel(); |
| 3786 | |
| 3787 | assert(weight <= MAX_STRESS_WEIGHT); |
| 3788 | |
| 3789 | /* Check for boundary conditions */ |
| 3790 | |
| 3791 | if (stressLevel == 0 || weight == 0) |
| 3792 | { |
| 3793 | return false; |
| 3794 | } |
| 3795 | |
| 3796 | // Should we allow unlimited stress ? |
| 3797 | if (stressArea > STRESS_COUNT_VARN && stressLevel == 2) |
| 3798 | { |
| 3799 | return true; |
| 3800 | } |
| 3801 | |
| 3802 | if (weight == MAX_STRESS_WEIGHT) |
| 3803 | { |
| 3804 | doStress = true; |
| 3805 | goto _done; |
| 3806 | } |
| 3807 | |
| 3808 | // Get a hash which can be compared with 'weight' |
| 3809 | |
| 3810 | assert(stressArea != 0); |
| 3811 | hash = (info.compMethodHash() ^ stressArea ^ stressLevel) % MAX_STRESS_WEIGHT; |
| 3812 | |
| 3813 | assert(hash < MAX_STRESS_WEIGHT && weight <= MAX_STRESS_WEIGHT); |
| 3814 | doStress = (hash < weight); |
| 3815 | |
| 3816 | _done: |
| 3817 | |
| 3818 | if (doStress && !compActiveStressModes[stressArea]) |
| 3819 | { |
| 3820 | if (verbose) |
| 3821 | { |
| 3822 | printf("\n\n*** JitStress: %ws ***\n\n" , s_compStressModeNames[stressArea]); |
| 3823 | } |
| 3824 | compActiveStressModes[stressArea] = 1; |
| 3825 | } |
| 3826 | |
| 3827 | return doStress; |
| 3828 | } |
| 3829 | |
| 3830 | #endif // DEBUG |
| 3831 | |
| 3832 | void Compiler::compInitDebuggingInfo() |
| 3833 | { |
| 3834 | assert(!compIsForInlining()); |
| 3835 | |
| 3836 | #ifdef DEBUG |
| 3837 | if (verbose) |
| 3838 | { |
| 3839 | printf("*************** In compInitDebuggingInfo() for %s\n" , info.compFullName); |
| 3840 | } |
| 3841 | #endif |
| 3842 | |
| 3843 | /*------------------------------------------------------------------------- |
| 3844 | * |
| 3845 | * Get hold of the local variable records, if there are any |
| 3846 | */ |
| 3847 | |
| 3848 | info.compVarScopesCount = 0; |
| 3849 | |
| 3850 | if (opts.compScopeInfo) |
| 3851 | { |
| 3852 | eeGetVars(); |
| 3853 | } |
| 3854 | |
| 3855 | compInitVarScopeMap(); |
| 3856 | |
| 3857 | if (opts.compScopeInfo || opts.compDbgCode) |
| 3858 | { |
| 3859 | compInitScopeLists(); |
| 3860 | } |
| 3861 | |
| 3862 | if (opts.compDbgCode && (info.compVarScopesCount > 0)) |
| 3863 | { |
| 3864 | /* Create a new empty basic block. fgExtendDbgLifetimes() may add |
| 3865 | initialization of variables which are in scope right from the |
| 3866 | start of the (real) first BB (and therefore artificially marked |
| 3867 | as alive) into this block. |
| 3868 | */ |
| 3869 | |
| 3870 | fgEnsureFirstBBisScratch(); |
| 3871 | |
| 3872 | fgInsertStmtAtEnd(fgFirstBB, gtNewNothingNode()); |
| 3873 | |
| 3874 | JITDUMP("Debuggable code - Add new %s to perform initialization of variables\n" , fgFirstBB->dspToString()); |
| 3875 | } |
| 3876 | |
| 3877 | /*------------------------------------------------------------------------- |
| 3878 | * |
| 3879 | * Read the stmt-offsets table and the line-number table |
| 3880 | */ |
| 3881 | |
| 3882 | info.compStmtOffsetsImplicit = ICorDebugInfo::NO_BOUNDARIES; |
| 3883 | |
| 3884 | // We can only report debug info for EnC at places where the stack is empty. |
| 3885 | // Actually, at places where there are not live temps. Else, we won't be able |
| 3886 | // to map between the old and the new versions correctly as we won't have |
| 3887 | // any info for the live temps. |
| 3888 | |
| 3889 | assert(!opts.compDbgEnC || !opts.compDbgInfo || |
| 3890 | 0 == (info.compStmtOffsetsImplicit & ~ICorDebugInfo::STACK_EMPTY_BOUNDARIES)); |
| 3891 | |
| 3892 | info.compStmtOffsetsCount = 0; |
| 3893 | |
| 3894 | if (opts.compDbgInfo) |
| 3895 | { |
| 3896 | /* Get hold of the line# records, if there are any */ |
| 3897 | |
| 3898 | eeGetStmtOffsets(); |
| 3899 | |
| 3900 | #ifdef DEBUG |
| 3901 | if (verbose) |
| 3902 | { |
| 3903 | printf("info.compStmtOffsetsCount = %d\n" , info.compStmtOffsetsCount); |
| 3904 | printf("info.compStmtOffsetsImplicit = %04Xh" , info.compStmtOffsetsImplicit); |
| 3905 | |
| 3906 | if (info.compStmtOffsetsImplicit) |
| 3907 | { |
| 3908 | printf(" ( " ); |
| 3909 | if (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) |
| 3910 | { |
| 3911 | printf("STACK_EMPTY " ); |
| 3912 | } |
| 3913 | if (info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) |
| 3914 | { |
| 3915 | printf("NOP " ); |
| 3916 | } |
| 3917 | if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) |
| 3918 | { |
| 3919 | printf("CALL_SITE " ); |
| 3920 | } |
| 3921 | printf(")" ); |
| 3922 | } |
| 3923 | printf("\n" ); |
| 3924 | IL_OFFSET* pOffs = info.compStmtOffsets; |
| 3925 | for (unsigned i = 0; i < info.compStmtOffsetsCount; i++, pOffs++) |
| 3926 | { |
| 3927 | printf("%02d) IL_%04Xh\n" , i, *pOffs); |
| 3928 | } |
| 3929 | } |
| 3930 | #endif |
| 3931 | } |
| 3932 | } |
| 3933 | |
| 3934 | void Compiler::compSetOptimizationLevel() |
| 3935 | { |
| 3936 | bool theMinOptsValue; |
| 3937 | #pragma warning(suppress : 4101) |
| 3938 | unsigned jitMinOpts; |
| 3939 | |
| 3940 | if (compIsForInlining()) |
| 3941 | { |
| 3942 | theMinOptsValue = impInlineInfo->InlinerCompiler->opts.MinOpts(); |
| 3943 | goto _SetMinOpts; |
| 3944 | } |
| 3945 | |
| 3946 | theMinOptsValue = false; |
| 3947 | |
| 3948 | if (opts.compFlags == CLFLG_MINOPT) |
| 3949 | { |
| 3950 | JITLOG((LL_INFO100, "CLFLG_MINOPT set for method %s\n" , info.compFullName)); |
| 3951 | theMinOptsValue = true; |
| 3952 | } |
| 3953 | |
| 3954 | #ifdef DEBUG |
| 3955 | jitMinOpts = JitConfig.JitMinOpts(); |
| 3956 | |
| 3957 | if (!theMinOptsValue && (jitMinOpts > 0)) |
| 3958 | { |
| 3959 | // jitTotalMethodCompiled does not include the method that is being compiled now, so make +1. |
| 3960 | unsigned methodCount = Compiler::jitTotalMethodCompiled + 1; |
| 3961 | unsigned methodCountMask = methodCount & 0xFFF; |
| 3962 | unsigned kind = (jitMinOpts & 0xF000000) >> 24; |
| 3963 | switch (kind) |
| 3964 | { |
| 3965 | default: |
| 3966 | if (jitMinOpts <= methodCount) |
| 3967 | { |
| 3968 | if (verbose) |
| 3969 | { |
| 3970 | printf(" Optimizations disabled by JitMinOpts and methodCount\n" ); |
| 3971 | } |
| 3972 | theMinOptsValue = true; |
| 3973 | } |
| 3974 | break; |
| 3975 | case 0xD: |
| 3976 | { |
| 3977 | unsigned firstMinopts = (jitMinOpts >> 12) & 0xFFF; |
| 3978 | unsigned secondMinopts = (jitMinOpts >> 0) & 0xFFF; |
| 3979 | |
| 3980 | if ((firstMinopts == methodCountMask) || (secondMinopts == methodCountMask)) |
| 3981 | { |
| 3982 | if (verbose) |
| 3983 | { |
| 3984 | printf("0xD: Optimizations disabled by JitMinOpts and methodCountMask\n" ); |
| 3985 | } |
| 3986 | theMinOptsValue = true; |
| 3987 | } |
| 3988 | } |
| 3989 | break; |
| 3990 | case 0xE: |
| 3991 | { |
| 3992 | unsigned startMinopts = (jitMinOpts >> 12) & 0xFFF; |
| 3993 | unsigned endMinopts = (jitMinOpts >> 0) & 0xFFF; |
| 3994 | |
| 3995 | if ((startMinopts <= methodCountMask) && (endMinopts >= methodCountMask)) |
| 3996 | { |
| 3997 | if (verbose) |
| 3998 | { |
| 3999 | printf("0xE: Optimizations disabled by JitMinOpts and methodCountMask\n" ); |
| 4000 | } |
| 4001 | theMinOptsValue = true; |
| 4002 | } |
| 4003 | } |
| 4004 | break; |
| 4005 | case 0xF: |
| 4006 | { |
| 4007 | unsigned bitsZero = (jitMinOpts >> 12) & 0xFFF; |
| 4008 | unsigned bitsOne = (jitMinOpts >> 0) & 0xFFF; |
| 4009 | |
| 4010 | if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) |
| 4011 | { |
| 4012 | if (verbose) |
| 4013 | { |
| 4014 | printf("0xF: Optimizations disabled by JitMinOpts and methodCountMask\n" ); |
| 4015 | } |
| 4016 | theMinOptsValue = true; |
| 4017 | } |
| 4018 | } |
| 4019 | break; |
| 4020 | } |
| 4021 | } |
| 4022 | |
| 4023 | if (!theMinOptsValue) |
| 4024 | { |
| 4025 | if (JitConfig.JitMinOptsName().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 4026 | { |
| 4027 | theMinOptsValue = true; |
| 4028 | } |
| 4029 | } |
| 4030 | |
| 4031 | #if 0 |
| 4032 | // The code in this #if can be used to debug optimization issues according to method hash. |
| 4033 | // To use, uncomment, rebuild and set environment variables minoptshashlo and minoptshashhi. |
| 4034 | #ifdef DEBUG |
| 4035 | unsigned methHash = info.compMethodHash(); |
| 4036 | char* lostr = getenv("minoptshashlo" ); |
| 4037 | unsigned methHashLo = 0; |
| 4038 | if (lostr != nullptr) |
| 4039 | { |
| 4040 | sscanf_s(lostr, "%x" , &methHashLo); |
| 4041 | char* histr = getenv("minoptshashhi" ); |
| 4042 | unsigned methHashHi = UINT32_MAX; |
| 4043 | if (histr != nullptr) |
| 4044 | { |
| 4045 | sscanf_s(histr, "%x" , &methHashHi); |
| 4046 | if (methHash >= methHashLo && methHash <= methHashHi) |
| 4047 | { |
| 4048 | printf("MinOpts for method %s, hash = 0x%x.\n" , |
| 4049 | info.compFullName, info.compMethodHash()); |
| 4050 | printf("" ); // in our logic this causes a flush |
| 4051 | theMinOptsValue = true; |
| 4052 | } |
| 4053 | } |
| 4054 | } |
| 4055 | #endif |
| 4056 | #endif |
| 4057 | |
| 4058 | if (compStressCompile(STRESS_MIN_OPTS, 5)) |
| 4059 | { |
| 4060 | theMinOptsValue = true; |
| 4061 | } |
| 4062 | // For PREJIT we never drop down to MinOpts |
| 4063 | // unless unless CLFLG_MINOPT is set |
| 4064 | else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 4065 | { |
| 4066 | if ((unsigned)JitConfig.JitMinOptsCodeSize() < info.compILCodeSize) |
| 4067 | { |
| 4068 | JITLOG((LL_INFO10, "IL Code Size exceeded, using MinOpts for method %s\n" , info.compFullName)); |
| 4069 | theMinOptsValue = true; |
| 4070 | } |
| 4071 | else if ((unsigned)JitConfig.JitMinOptsInstrCount() < opts.instrCount) |
| 4072 | { |
| 4073 | JITLOG((LL_INFO10, "IL instruction count exceeded, using MinOpts for method %s\n" , info.compFullName)); |
| 4074 | theMinOptsValue = true; |
| 4075 | } |
| 4076 | else if ((unsigned)JitConfig.JitMinOptsBbCount() < fgBBcount) |
| 4077 | { |
| 4078 | JITLOG((LL_INFO10, "Basic Block count exceeded, using MinOpts for method %s\n" , info.compFullName)); |
| 4079 | theMinOptsValue = true; |
| 4080 | } |
| 4081 | else if ((unsigned)JitConfig.JitMinOptsLvNumCount() < lvaCount) |
| 4082 | { |
| 4083 | JITLOG((LL_INFO10, "Local Variable Num count exceeded, using MinOpts for method %s\n" , info.compFullName)); |
| 4084 | theMinOptsValue = true; |
| 4085 | } |
| 4086 | else if ((unsigned)JitConfig.JitMinOptsLvRefCount() < opts.lvRefCount) |
| 4087 | { |
| 4088 | JITLOG((LL_INFO10, "Local Variable Ref count exceeded, using MinOpts for method %s\n" , info.compFullName)); |
| 4089 | theMinOptsValue = true; |
| 4090 | } |
| 4091 | if (theMinOptsValue == true) |
| 4092 | { |
| 4093 | JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " |
| 4094 | "%3d,%3d for method %s\n" , |
| 4095 | info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); |
| 4096 | if (JitConfig.JitBreakOnMinOpts() != 0) |
| 4097 | { |
| 4098 | assert(!"MinOpts enabled" ); |
| 4099 | } |
| 4100 | } |
| 4101 | } |
| 4102 | #else // !DEBUG |
| 4103 | // Retail check if we should force Minopts due to the complexity of the method |
| 4104 | // For PREJIT we never drop down to MinOpts |
| 4105 | // unless unless CLFLG_MINOPT is set |
| 4106 | if (!theMinOptsValue && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && |
| 4107 | ((DEFAULT_MIN_OPTS_CODE_SIZE < info.compILCodeSize) || (DEFAULT_MIN_OPTS_INSTR_COUNT < opts.instrCount) || |
| 4108 | (DEFAULT_MIN_OPTS_BB_COUNT < fgBBcount) || (DEFAULT_MIN_OPTS_LV_NUM_COUNT < lvaCount) || |
| 4109 | (DEFAULT_MIN_OPTS_LV_REF_COUNT < opts.lvRefCount))) |
| 4110 | { |
| 4111 | theMinOptsValue = true; |
| 4112 | } |
| 4113 | #endif // DEBUG |
| 4114 | |
| 4115 | JITLOG((LL_INFO10000, |
| 4116 | "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count %3d,%3d for method %s\n" , |
| 4117 | info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); |
| 4118 | |
| 4119 | #if 0 |
| 4120 | // The code in this #if has been useful in debugging loop cloning issues, by |
| 4121 | // enabling selective enablement of the loop cloning optimization according to |
| 4122 | // method hash. |
| 4123 | #ifdef DEBUG |
| 4124 | if (!theMinOptsValue) |
| 4125 | { |
| 4126 | unsigned methHash = info.compMethodHash(); |
| 4127 | char* lostr = getenv("opthashlo" ); |
| 4128 | unsigned methHashLo = 0; |
| 4129 | if (lostr != NULL) |
| 4130 | { |
| 4131 | sscanf_s(lostr, "%x" , &methHashLo); |
| 4132 | // methHashLo = (unsigned(atoi(lostr)) << 2); // So we don't have to use negative numbers. |
| 4133 | } |
| 4134 | char* histr = getenv("opthashhi" ); |
| 4135 | unsigned methHashHi = UINT32_MAX; |
| 4136 | if (histr != NULL) |
| 4137 | { |
| 4138 | sscanf_s(histr, "%x" , &methHashHi); |
| 4139 | // methHashHi = (unsigned(atoi(histr)) << 2); // So we don't have to use negative numbers. |
| 4140 | } |
| 4141 | if (methHash < methHashLo || methHash > methHashHi) |
| 4142 | { |
| 4143 | theMinOptsValue = true; |
| 4144 | } |
| 4145 | else |
| 4146 | { |
| 4147 | printf("Doing optimization in in %s (0x%x).\n" , info.compFullName, methHash); |
| 4148 | } |
| 4149 | } |
| 4150 | #endif |
| 4151 | #endif |
| 4152 | |
| 4153 | _SetMinOpts: |
| 4154 | |
| 4155 | // Set the MinOpts value |
| 4156 | opts.SetMinOpts(theMinOptsValue); |
| 4157 | |
| 4158 | #ifdef DEBUG |
| 4159 | if (verbose && !compIsForInlining()) |
| 4160 | { |
| 4161 | printf("OPTIONS: opts.MinOpts() == %s\n" , opts.MinOpts() ? "true" : "false" ); |
| 4162 | } |
| 4163 | #endif |
| 4164 | |
| 4165 | /* Control the optimizations */ |
| 4166 | |
| 4167 | if (opts.OptimizationDisabled()) |
| 4168 | { |
| 4169 | opts.compFlags &= ~CLFLG_MAXOPT; |
| 4170 | opts.compFlags |= CLFLG_MINOPT; |
| 4171 | } |
| 4172 | |
| 4173 | if (!compIsForInlining()) |
| 4174 | { |
| 4175 | codeGen->setFramePointerRequired(false); |
| 4176 | codeGen->setFrameRequired(false); |
| 4177 | |
| 4178 | if (opts.OptimizationDisabled()) |
| 4179 | { |
| 4180 | codeGen->setFrameRequired(true); |
| 4181 | } |
| 4182 | |
| 4183 | #if !defined(_TARGET_AMD64_) |
| 4184 | // The VM sets JitFlags::JIT_FLAG_FRAMED for two reasons: (1) the COMPlus_JitFramed variable is set, or |
| 4185 | // (2) the function is marked "noinline". The reason for #2 is that people mark functions |
| 4186 | // noinline to ensure the show up on in a stack walk. But for AMD64, we don't need a frame |
| 4187 | // pointer for the frame to show up in stack walk. |
| 4188 | if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_FRAMED)) |
| 4189 | codeGen->setFrameRequired(true); |
| 4190 | #endif |
| 4191 | |
| 4192 | if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELOC)) |
| 4193 | { |
| 4194 | codeGen->genAlignLoops = false; // loop alignment not supported for prejitted code |
| 4195 | |
| 4196 | // The zapper doesn't set JitFlags::JIT_FLAG_ALIGN_LOOPS, and there is |
| 4197 | // no reason for it to set it as the JIT doesn't currently support loop alignment |
| 4198 | // for prejitted images. (The JIT doesn't know the final address of the code, hence |
| 4199 | // it can't align code based on unknown addresses.) |
| 4200 | assert(!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALIGN_LOOPS)); |
| 4201 | } |
| 4202 | else |
| 4203 | { |
| 4204 | codeGen->genAlignLoops = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_ALIGN_LOOPS); |
| 4205 | } |
| 4206 | } |
| 4207 | |
| 4208 | info.compUnwrapContextful = opts.OptimizationEnabled(); |
| 4209 | |
| 4210 | fgCanRelocateEHRegions = true; |
| 4211 | } |
| 4212 | |
| 4213 | #ifdef _TARGET_ARMARCH_ |
| 4214 | // Function compRsvdRegCheck: |
| 4215 | // given a curState to use for calculating the total frame size |
| 4216 | // it will return true if the REG_OPT_RSVD should be reserved so |
| 4217 | // that it can be use to form large offsets when accessing stack |
| 4218 | // based LclVar including both incoming and out going argument areas. |
| 4219 | // |
| 4220 | // The method advances the frame layout state to curState by calling |
| 4221 | // lvaFrameSize(curState). |
| 4222 | // |
| 4223 | bool Compiler::compRsvdRegCheck(FrameLayoutState curState) |
| 4224 | { |
| 4225 | // Always do the layout even if returning early. Callers might |
| 4226 | // depend on us to do the layout. |
| 4227 | unsigned frameSize = lvaFrameSize(curState); |
| 4228 | JITDUMP("\n" |
| 4229 | "compRsvdRegCheck\n" |
| 4230 | " frame size = %6d\n" |
| 4231 | " compArgSize = %6d\n" , |
| 4232 | frameSize, compArgSize); |
| 4233 | |
| 4234 | if (opts.MinOpts()) |
| 4235 | { |
| 4236 | // Have a recovery path in case we fail to reserve REG_OPT_RSVD and go |
| 4237 | // over the limit of SP and FP offset ranges due to large |
| 4238 | // temps. |
| 4239 | JITDUMP(" Returning true (MinOpts)\n\n" ); |
| 4240 | return true; |
| 4241 | } |
| 4242 | |
| 4243 | unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; |
| 4244 | if (compFloatingPointUsed) |
| 4245 | { |
| 4246 | calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; |
| 4247 | } |
| 4248 | calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters |
| 4249 | |
| 4250 | noway_assert(frameSize >= calleeSavedRegMaxSz); |
| 4251 | |
| 4252 | #if defined(_TARGET_ARM64_) |
| 4253 | |
| 4254 | // TODO-ARM64-CQ: update this! |
| 4255 | JITDUMP(" Returning true (ARM64)\n\n" ); |
| 4256 | return true; // just always assume we'll need it, for now |
| 4257 | |
| 4258 | #else // _TARGET_ARM_ |
| 4259 | |
| 4260 | // frame layout: |
| 4261 | // |
| 4262 | // ... high addresses ... |
| 4263 | // frame contents size |
| 4264 | // ------------------- ------------------------ |
| 4265 | // inArgs compArgSize (includes prespill) |
| 4266 | // caller SP ---> |
| 4267 | // prespill |
| 4268 | // LR REGSIZE_BYTES |
| 4269 | // R11 ---> R11 REGSIZE_BYTES |
| 4270 | // callee saved regs CALLEE_SAVED_REG_MAXSZ (32 bytes) |
| 4271 | // optional saved fp regs CALLEE_SAVED_FLOAT_MAXSZ (64 bytes) |
| 4272 | // lclSize |
| 4273 | // incl. TEMPS MAX_SPILL_TEMP_SIZE |
| 4274 | // incl. outArgs |
| 4275 | // SP ---> |
| 4276 | // ... low addresses ... |
| 4277 | // |
| 4278 | // When codeGen->isFramePointerRequired is true, R11 will be established as a frame pointer. |
| 4279 | // We can then use R11 to access incoming args with positive offsets, and LclVars with |
| 4280 | // negative offsets. |
| 4281 | // |
| 4282 | // In functions with EH, in the non-funclet (or main) region, even though we will have a |
| 4283 | // frame pointer, we can use SP with positive offsets to access any or all locals or arguments |
| 4284 | // that we can reach with SP-relative encodings. The funclet region might require the reserved |
| 4285 | // register, since it must use offsets from R11 to access the parent frame. |
| 4286 | |
| 4287 | unsigned maxR11PositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; |
| 4288 | JITDUMP(" maxR11PositiveEncodingOffset = %6d\n" , maxR11PositiveEncodingOffset); |
| 4289 | |
| 4290 | // Floating point load/store instructions (VLDR/VSTR) can address up to -0x3FC from R11, but we |
| 4291 | // don't know if there are either no integer locals, or if we don't need large negative offsets |
| 4292 | // for the integer locals, so we must use the integer max negative offset, which is a |
| 4293 | // smaller (absolute value) number. |
| 4294 | unsigned maxR11NegativeEncodingOffset = 0x00FF; // This is a negative offset from R11. |
| 4295 | JITDUMP(" maxR11NegativeEncodingOffset = %6d\n" , maxR11NegativeEncodingOffset); |
| 4296 | |
| 4297 | // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. |
| 4298 | unsigned maxR11PositiveOffset = compArgSize + (2 * REGSIZE_BYTES) - 1; |
| 4299 | JITDUMP(" maxR11PositiveOffset = %6d\n" , maxR11PositiveOffset); |
| 4300 | |
| 4301 | // The value is positive, but represents a negative offset from R11. |
| 4302 | // frameSize includes callee-saved space for R11 and LR, which are at non-negative offsets from R11 |
| 4303 | // (+0 and +4, respectively), so don't include those in the max possible negative offset. |
| 4304 | assert(frameSize >= (2 * REGSIZE_BYTES)); |
| 4305 | unsigned maxR11NegativeOffset = frameSize - (2 * REGSIZE_BYTES); |
| 4306 | JITDUMP(" maxR11NegativeOffset = %6d\n" , maxR11NegativeOffset); |
| 4307 | |
| 4308 | if (codeGen->isFramePointerRequired()) |
| 4309 | { |
| 4310 | if (maxR11NegativeOffset > maxR11NegativeEncodingOffset) |
| 4311 | { |
| 4312 | JITDUMP(" Returning true (frame required and maxR11NegativeOffset)\n\n" ); |
| 4313 | return true; |
| 4314 | } |
| 4315 | if (maxR11PositiveOffset > maxR11PositiveEncodingOffset) |
| 4316 | { |
| 4317 | JITDUMP(" Returning true (frame required and maxR11PositiveOffset)\n\n" ); |
| 4318 | return true; |
| 4319 | } |
| 4320 | } |
| 4321 | |
| 4322 | // Now consider the SP based frame case. Note that we will use SP based offsets to access the stack in R11 based |
| 4323 | // frames in the non-funclet main code area. |
| 4324 | |
| 4325 | unsigned maxSPPositiveEncodingOffset = compFloatingPointUsed ? 0x03FC : 0x0FFF; |
| 4326 | JITDUMP(" maxSPPositiveEncodingOffset = %6d\n" , maxSPPositiveEncodingOffset); |
| 4327 | |
| 4328 | // -1 because otherwise we are computing the address just beyond the last argument, which we don't need to do. |
| 4329 | assert(compArgSize + frameSize > 0); |
| 4330 | unsigned maxSPPositiveOffset = compArgSize + frameSize - 1; |
| 4331 | |
| 4332 | if (codeGen->isFramePointerUsed()) |
| 4333 | { |
| 4334 | // We have a frame pointer, so we can use it to access part of the stack, even if SP can't reach those parts. |
| 4335 | // We will still generate SP-relative offsets if SP can reach. |
| 4336 | |
| 4337 | // First, check that the stack between R11 and SP can be fully reached, either via negative offset from FP |
| 4338 | // or positive offset from SP. Don't count stored R11 or LR, which are reached from positive offsets from FP. |
| 4339 | |
| 4340 | unsigned maxSPLocalsCombinedOffset = frameSize - (2 * REGSIZE_BYTES) - 1; |
| 4341 | JITDUMP(" maxSPLocalsCombinedOffset = %6d\n" , maxSPLocalsCombinedOffset); |
| 4342 | |
| 4343 | if (maxSPLocalsCombinedOffset > maxSPPositiveEncodingOffset) |
| 4344 | { |
| 4345 | // Can R11 help? |
| 4346 | unsigned maxRemainingLocalsCombinedOffset = maxSPLocalsCombinedOffset - maxSPPositiveEncodingOffset; |
| 4347 | JITDUMP(" maxRemainingLocalsCombinedOffset = %6d\n" , maxRemainingLocalsCombinedOffset); |
| 4348 | |
| 4349 | if (maxRemainingLocalsCombinedOffset > maxR11NegativeEncodingOffset) |
| 4350 | { |
| 4351 | JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach entire stack between them)\n\n" ); |
| 4352 | return true; |
| 4353 | } |
| 4354 | |
| 4355 | // Otherwise, yes, we can address the remaining parts of the locals frame with negative offsets from R11. |
| 4356 | } |
| 4357 | |
| 4358 | // Check whether either R11 or SP can access the arguments. |
| 4359 | if ((maxR11PositiveOffset > maxR11PositiveEncodingOffset) && |
| 4360 | (maxSPPositiveOffset > maxSPPositiveEncodingOffset)) |
| 4361 | { |
| 4362 | JITDUMP(" Returning true (frame pointer exists; R11 and SP can't reach all arguments)\n\n" ); |
| 4363 | return true; |
| 4364 | } |
| 4365 | } |
| 4366 | else |
| 4367 | { |
| 4368 | if (maxSPPositiveOffset > maxSPPositiveEncodingOffset) |
| 4369 | { |
| 4370 | JITDUMP(" Returning true (no frame pointer exists; SP can't reach all of frame)\n\n" ); |
| 4371 | return true; |
| 4372 | } |
| 4373 | } |
| 4374 | |
| 4375 | // We won't need to reserve REG_OPT_RSVD. |
| 4376 | // |
| 4377 | JITDUMP(" Returning false\n\n" ); |
| 4378 | return false; |
| 4379 | #endif // _TARGET_ARM_ |
| 4380 | } |
| 4381 | #endif // _TARGET_ARMARCH_ |
| 4382 | |
| 4383 | void Compiler::compFunctionTraceStart() |
| 4384 | { |
| 4385 | #ifdef DEBUG |
| 4386 | if (compIsForInlining()) |
| 4387 | { |
| 4388 | return; |
| 4389 | } |
| 4390 | |
| 4391 | if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) |
| 4392 | { |
| 4393 | LONG newJitNestingLevel = InterlockedIncrement(&Compiler::jitNestingLevel); |
| 4394 | if (newJitNestingLevel <= 0) |
| 4395 | { |
| 4396 | printf("{ Illegal nesting level %d }\n" , newJitNestingLevel); |
| 4397 | } |
| 4398 | |
| 4399 | for (LONG i = 0; i < newJitNestingLevel - 1; i++) |
| 4400 | { |
| 4401 | printf(" " ); |
| 4402 | } |
| 4403 | printf("{ Start Jitting %s (MethodHash=%08x)\n" , info.compFullName, |
| 4404 | info.compMethodHash()); /* } editor brace matching workaround for this printf */ |
| 4405 | } |
| 4406 | #endif // DEBUG |
| 4407 | } |
| 4408 | |
| 4409 | void Compiler::compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI) |
| 4410 | { |
| 4411 | #ifdef DEBUG |
| 4412 | assert(!compIsForInlining()); |
| 4413 | |
| 4414 | if ((JitConfig.JitFunctionTrace() != 0) && !opts.disDiffable) |
| 4415 | { |
| 4416 | LONG newJitNestingLevel = InterlockedDecrement(&Compiler::jitNestingLevel); |
| 4417 | if (newJitNestingLevel < 0) |
| 4418 | { |
| 4419 | printf("{ Illegal nesting level %d }\n" , newJitNestingLevel); |
| 4420 | } |
| 4421 | |
| 4422 | for (LONG i = 0; i < newJitNestingLevel; i++) |
| 4423 | { |
| 4424 | printf(" " ); |
| 4425 | } |
| 4426 | /* { editor brace-matching workaround for following printf */ |
| 4427 | printf("} Jitted Entry %03x at" FMT_ADDR "method %s size %08x%s\n" , Compiler::jitTotalMethodCompiled, |
| 4428 | DBG_ADDR(methodCodePtr), info.compFullName, methodCodeSize, |
| 4429 | isNYI ? " NYI" : (compIsForImportOnly() ? " import only" : "" )); |
| 4430 | } |
| 4431 | #endif // DEBUG |
| 4432 | } |
| 4433 | |
| 4434 | //********************************************************************************************* |
| 4435 | // #Phases |
| 4436 | // |
| 4437 | // This is the most interesting 'toplevel' function in the JIT. It goes through the operations of |
| 4438 | // importing, morphing, optimizations and code generation. This is called from the EE through the |
| 4439 | // code:CILJit::compileMethod function. |
| 4440 | // |
| 4441 | // For an overview of the structure of the JIT, see: |
| 4442 | // https://github.com/dotnet/coreclr/blob/master/Documentation/botr/ryujit-overview.md |
| 4443 | // |
| 4444 | void Compiler::compCompile(void** methodCodePtr, ULONG* methodCodeSize, JitFlags* compileFlags) |
| 4445 | { |
| 4446 | if (compIsForInlining()) |
| 4447 | { |
| 4448 | // Notify root instance that an inline attempt is about to import IL |
| 4449 | impInlineRoot()->m_inlineStrategy->NoteImport(); |
| 4450 | } |
| 4451 | |
| 4452 | hashBv::Init(this); |
| 4453 | |
| 4454 | VarSetOps::AssignAllowUninitRhs(this, compCurLife, VarSetOps::UninitVal()); |
| 4455 | |
| 4456 | /* The temp holding the secret stub argument is used by fgImport() when importing the intrinsic. */ |
| 4457 | |
| 4458 | if (info.compPublishStubParam) |
| 4459 | { |
| 4460 | assert(lvaStubArgumentVar == BAD_VAR_NUM); |
| 4461 | lvaStubArgumentVar = lvaGrabTempWithImplicitUse(false DEBUGARG("stub argument" )); |
| 4462 | lvaTable[lvaStubArgumentVar].lvType = TYP_I_IMPL; |
| 4463 | } |
| 4464 | |
| 4465 | EndPhase(PHASE_PRE_IMPORT); |
| 4466 | |
| 4467 | compFunctionTraceStart(); |
| 4468 | |
| 4469 | /* Convert the instrs in each basic block to a tree based intermediate representation */ |
| 4470 | |
| 4471 | fgImport(); |
| 4472 | |
| 4473 | assert(!fgComputePredsDone); |
| 4474 | if (fgCheapPredsValid) |
| 4475 | { |
| 4476 | // Remove cheap predecessors before inlining and fat call transformation; |
| 4477 | // allowing the cheap predecessor lists to be inserted causes problems |
| 4478 | // with splitting existing blocks. |
| 4479 | fgRemovePreds(); |
| 4480 | } |
| 4481 | |
| 4482 | // Transform indirect calls that require control flow expansion. |
| 4483 | fgTransformIndirectCalls(); |
| 4484 | |
| 4485 | EndPhase(PHASE_IMPORTATION); |
| 4486 | |
| 4487 | if (compIsForInlining()) |
| 4488 | { |
| 4489 | /* Quit inlining if fgImport() failed for any reason. */ |
| 4490 | |
| 4491 | if (!compDonotInline()) |
| 4492 | { |
| 4493 | /* Filter out unimported BBs */ |
| 4494 | |
| 4495 | fgRemoveEmptyBlocks(); |
| 4496 | |
| 4497 | // Update type of return spill temp if we have gathered |
| 4498 | // better info when importing the inlinee, and the return |
| 4499 | // spill temp is single def. |
| 4500 | if (fgNeedReturnSpillTemp()) |
| 4501 | { |
| 4502 | CORINFO_CLASS_HANDLE retExprClassHnd = impInlineInfo->retExprClassHnd; |
| 4503 | if (retExprClassHnd != nullptr) |
| 4504 | { |
| 4505 | LclVarDsc* returnSpillVarDsc = lvaGetDesc(lvaInlineeReturnSpillTemp); |
| 4506 | |
| 4507 | if (returnSpillVarDsc->lvSingleDef) |
| 4508 | { |
| 4509 | lvaUpdateClass(lvaInlineeReturnSpillTemp, retExprClassHnd, |
| 4510 | impInlineInfo->retExprClassHndIsExact); |
| 4511 | } |
| 4512 | } |
| 4513 | } |
| 4514 | } |
| 4515 | |
| 4516 | EndPhase(PHASE_POST_IMPORT); |
| 4517 | |
| 4518 | #ifdef FEATURE_JIT_METHOD_PERF |
| 4519 | if (pCompJitTimer != nullptr) |
| 4520 | { |
| 4521 | #if MEASURE_CLRAPI_CALLS |
| 4522 | EndPhase(PHASE_CLR_API); |
| 4523 | #endif |
| 4524 | pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, false); |
| 4525 | } |
| 4526 | #endif |
| 4527 | |
| 4528 | return; |
| 4529 | } |
| 4530 | |
| 4531 | assert(!compDonotInline()); |
| 4532 | |
| 4533 | // Maybe the caller was not interested in generating code |
| 4534 | if (compIsForImportOnly()) |
| 4535 | { |
| 4536 | compFunctionTraceEnd(nullptr, 0, false); |
| 4537 | return; |
| 4538 | } |
| 4539 | |
| 4540 | #if !FEATURE_EH |
| 4541 | // If we aren't yet supporting EH in a compiler bring-up, remove as many EH handlers as possible, so |
| 4542 | // we can pass tests that contain try/catch EH, but don't actually throw any exceptions. |
| 4543 | fgRemoveEH(); |
| 4544 | #endif // !FEATURE_EH |
| 4545 | |
| 4546 | if (compileFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) |
| 4547 | { |
| 4548 | fgInstrumentMethod(); |
| 4549 | } |
| 4550 | |
| 4551 | // We could allow ESP frames. Just need to reserve space for |
| 4552 | // pushing EBP if the method becomes an EBP-frame after an edit. |
| 4553 | // Note that requiring a EBP Frame disallows double alignment. Thus if we change this |
| 4554 | // we either have to disallow double alignment for E&C some other way or handle it in EETwain. |
| 4555 | |
| 4556 | if (opts.compDbgEnC) |
| 4557 | { |
| 4558 | codeGen->setFramePointerRequired(true); |
| 4559 | |
| 4560 | // Since we need a slots for security near ebp, its not possible |
| 4561 | // to do this after an Edit without shifting all the locals. |
| 4562 | // So we just always reserve space for these slots in case an Edit adds them |
| 4563 | opts.compNeedSecurityCheck = true; |
| 4564 | |
| 4565 | // We don't care about localloc right now. If we do support it, |
| 4566 | // EECodeManager::FixContextForEnC() needs to handle it smartly |
| 4567 | // in case the localloc was actually executed. |
| 4568 | // |
| 4569 | // compLocallocUsed = true; |
| 4570 | } |
| 4571 | |
| 4572 | EndPhase(PHASE_POST_IMPORT); |
| 4573 | |
| 4574 | /* Initialize the BlockSet epoch */ |
| 4575 | |
| 4576 | NewBasicBlockEpoch(); |
| 4577 | |
| 4578 | /* Massage the trees so that we can generate code out of them */ |
| 4579 | |
| 4580 | fgMorph(); |
| 4581 | EndPhase(PHASE_MORPH_END); |
| 4582 | |
| 4583 | /* GS security checks for unsafe buffers */ |
| 4584 | if (getNeedsGSSecurityCookie()) |
| 4585 | { |
| 4586 | #ifdef DEBUG |
| 4587 | if (verbose) |
| 4588 | { |
| 4589 | printf("\n*************** -GS checks for unsafe buffers \n" ); |
| 4590 | } |
| 4591 | #endif |
| 4592 | |
| 4593 | gsGSChecksInitCookie(); |
| 4594 | |
| 4595 | if (compGSReorderStackLayout) |
| 4596 | { |
| 4597 | gsCopyShadowParams(); |
| 4598 | } |
| 4599 | |
| 4600 | #ifdef DEBUG |
| 4601 | if (verbose) |
| 4602 | { |
| 4603 | fgDispBasicBlocks(true); |
| 4604 | printf("\n" ); |
| 4605 | } |
| 4606 | #endif |
| 4607 | } |
| 4608 | EndPhase(PHASE_GS_COOKIE); |
| 4609 | |
| 4610 | /* Compute bbNum, bbRefs and bbPreds */ |
| 4611 | |
| 4612 | JITDUMP("\nRenumbering the basic blocks for fgComputePred\n" ); |
| 4613 | fgRenumberBlocks(); |
| 4614 | |
| 4615 | noway_assert(!fgComputePredsDone); // This is the first time full (not cheap) preds will be computed. |
| 4616 | fgComputePreds(); |
| 4617 | EndPhase(PHASE_COMPUTE_PREDS); |
| 4618 | |
| 4619 | /* If we need to emit GC Poll calls, mark the blocks that need them now. This is conservative and can |
| 4620 | * be optimized later. */ |
| 4621 | fgMarkGCPollBlocks(); |
| 4622 | EndPhase(PHASE_MARK_GC_POLL_BLOCKS); |
| 4623 | |
| 4624 | /* From this point on the flowgraph information such as bbNum, |
| 4625 | * bbRefs or bbPreds has to be kept updated */ |
| 4626 | |
| 4627 | // Compute the block and edge weights |
| 4628 | fgComputeBlockAndEdgeWeights(); |
| 4629 | EndPhase(PHASE_COMPUTE_EDGE_WEIGHTS); |
| 4630 | |
| 4631 | #if FEATURE_EH_FUNCLETS |
| 4632 | |
| 4633 | /* Create funclets from the EH handlers. */ |
| 4634 | |
| 4635 | fgCreateFunclets(); |
| 4636 | EndPhase(PHASE_CREATE_FUNCLETS); |
| 4637 | |
| 4638 | #endif // FEATURE_EH_FUNCLETS |
| 4639 | |
| 4640 | if (opts.OptimizationEnabled()) |
| 4641 | { |
| 4642 | optOptimizeLayout(); |
| 4643 | EndPhase(PHASE_OPTIMIZE_LAYOUT); |
| 4644 | |
| 4645 | // Compute reachability sets and dominators. |
| 4646 | fgComputeReachability(); |
| 4647 | EndPhase(PHASE_COMPUTE_REACHABILITY); |
| 4648 | } |
| 4649 | |
| 4650 | if (opts.OptimizationEnabled()) |
| 4651 | { |
| 4652 | /* Perform loop inversion (i.e. transform "while" loops into |
| 4653 | "repeat" loops) and discover and classify natural loops |
| 4654 | (e.g. mark iterative loops as such). Also marks loop blocks |
| 4655 | and sets bbWeight to the loop nesting levels |
| 4656 | */ |
| 4657 | |
| 4658 | optOptimizeLoops(); |
| 4659 | EndPhase(PHASE_OPTIMIZE_LOOPS); |
| 4660 | |
| 4661 | // Clone loops with optimization opportunities, and |
| 4662 | // choose the one based on dynamic condition evaluation. |
| 4663 | optCloneLoops(); |
| 4664 | EndPhase(PHASE_CLONE_LOOPS); |
| 4665 | |
| 4666 | /* Unroll loops */ |
| 4667 | optUnrollLoops(); |
| 4668 | EndPhase(PHASE_UNROLL_LOOPS); |
| 4669 | } |
| 4670 | |
| 4671 | #ifdef DEBUG |
| 4672 | fgDebugCheckLinks(); |
| 4673 | #endif |
| 4674 | |
| 4675 | /* Create the variable table (and compute variable ref counts) */ |
| 4676 | |
| 4677 | lvaMarkLocalVars(); |
| 4678 | EndPhase(PHASE_MARK_LOCAL_VARS); |
| 4679 | |
| 4680 | // IMPORTANT, after this point, every place where trees are modified or cloned |
| 4681 | // the local variable reference counts must be updated |
| 4682 | // You can test the value of the following variable to see if |
| 4683 | // the local variable ref counts must be updated |
| 4684 | // |
| 4685 | assert(lvaLocalVarRefCounted()); |
| 4686 | |
| 4687 | if (opts.OptimizationEnabled()) |
| 4688 | { |
| 4689 | /* Optimize boolean conditions */ |
| 4690 | |
| 4691 | optOptimizeBools(); |
| 4692 | EndPhase(PHASE_OPTIMIZE_BOOLS); |
| 4693 | |
| 4694 | // optOptimizeBools() might have changed the number of blocks; the dominators/reachability might be bad. |
| 4695 | } |
| 4696 | |
| 4697 | /* Figure out the order in which operators are to be evaluated */ |
| 4698 | fgFindOperOrder(); |
| 4699 | EndPhase(PHASE_FIND_OPER_ORDER); |
| 4700 | |
| 4701 | // Weave the tree lists. Anyone who modifies the tree shapes after |
| 4702 | // this point is responsible for calling fgSetStmtSeq() to keep the |
| 4703 | // nodes properly linked. |
| 4704 | // This can create GC poll calls, and create new BasicBlocks (without updating dominators/reachability). |
| 4705 | fgSetBlockOrder(); |
| 4706 | EndPhase(PHASE_SET_BLOCK_ORDER); |
| 4707 | |
| 4708 | // IMPORTANT, after this point, every place where tree topology changes must redo evaluation |
| 4709 | // order (gtSetStmtInfo) and relink nodes (fgSetStmtSeq) if required. |
| 4710 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 4711 | |
| 4712 | #ifdef DEBUG |
| 4713 | // Now we have determined the order of evaluation and the gtCosts for every node. |
| 4714 | // If verbose, dump the full set of trees here before the optimization phases mutate them |
| 4715 | // |
| 4716 | if (verbose) |
| 4717 | { |
| 4718 | fgDispBasicBlocks(true); // 'true' will call fgDumpTrees() after dumping the BasicBlocks |
| 4719 | printf("\n" ); |
| 4720 | } |
| 4721 | #endif |
| 4722 | |
| 4723 | // At this point we know if we are fully interruptible or not |
| 4724 | if (opts.OptimizationEnabled()) |
| 4725 | { |
| 4726 | bool doSsa = true; |
| 4727 | bool doEarlyProp = true; |
| 4728 | bool doValueNum = true; |
| 4729 | bool doLoopHoisting = true; |
| 4730 | bool doCopyProp = true; |
| 4731 | bool doAssertionProp = true; |
| 4732 | bool doRangeAnalysis = true; |
| 4733 | int iterations = 1; |
| 4734 | |
| 4735 | #if defined(OPT_CONFIG) |
| 4736 | doSsa = (JitConfig.JitDoSsa() != 0); |
| 4737 | doEarlyProp = doSsa && (JitConfig.JitDoEarlyProp() != 0); |
| 4738 | doValueNum = doSsa && (JitConfig.JitDoValueNumber() != 0); |
| 4739 | doLoopHoisting = doValueNum && (JitConfig.JitDoLoopHoisting() != 0); |
| 4740 | doCopyProp = doValueNum && (JitConfig.JitDoCopyProp() != 0); |
| 4741 | doAssertionProp = doValueNum && (JitConfig.JitDoAssertionProp() != 0); |
| 4742 | doRangeAnalysis = doAssertionProp && (JitConfig.JitDoRangeAnalysis() != 0); |
| 4743 | |
| 4744 | if (opts.optRepeat) |
| 4745 | { |
| 4746 | iterations = JitConfig.JitOptRepeatCount(); |
| 4747 | } |
| 4748 | #endif // defined(OPT_CONFIG) |
| 4749 | |
| 4750 | while (iterations > 0) |
| 4751 | { |
| 4752 | if (doSsa) |
| 4753 | { |
| 4754 | fgSsaBuild(); |
| 4755 | EndPhase(PHASE_BUILD_SSA); |
| 4756 | } |
| 4757 | |
| 4758 | if (doEarlyProp) |
| 4759 | { |
| 4760 | /* Propagate array length and rewrite getType() method call */ |
| 4761 | optEarlyProp(); |
| 4762 | EndPhase(PHASE_EARLY_PROP); |
| 4763 | } |
| 4764 | |
| 4765 | if (doValueNum) |
| 4766 | { |
| 4767 | fgValueNumber(); |
| 4768 | EndPhase(PHASE_VALUE_NUMBER); |
| 4769 | } |
| 4770 | |
| 4771 | if (doLoopHoisting) |
| 4772 | { |
| 4773 | /* Hoist invariant code out of loops */ |
| 4774 | optHoistLoopCode(); |
| 4775 | EndPhase(PHASE_HOIST_LOOP_CODE); |
| 4776 | } |
| 4777 | |
| 4778 | if (doCopyProp) |
| 4779 | { |
| 4780 | /* Perform VN based copy propagation */ |
| 4781 | optVnCopyProp(); |
| 4782 | EndPhase(PHASE_VN_COPY_PROP); |
| 4783 | } |
| 4784 | |
| 4785 | #if FEATURE_ANYCSE |
| 4786 | /* Remove common sub-expressions */ |
| 4787 | optOptimizeCSEs(); |
| 4788 | #endif // FEATURE_ANYCSE |
| 4789 | |
| 4790 | #if ASSERTION_PROP |
| 4791 | if (doAssertionProp) |
| 4792 | { |
| 4793 | /* Assertion propagation */ |
| 4794 | optAssertionPropMain(); |
| 4795 | EndPhase(PHASE_ASSERTION_PROP_MAIN); |
| 4796 | } |
| 4797 | |
| 4798 | if (doRangeAnalysis) |
| 4799 | { |
| 4800 | /* Optimize array index range checks */ |
| 4801 | RangeCheck rc(this); |
| 4802 | rc.OptimizeRangeChecks(); |
| 4803 | EndPhase(PHASE_OPTIMIZE_INDEX_CHECKS); |
| 4804 | } |
| 4805 | #endif // ASSERTION_PROP |
| 4806 | |
| 4807 | /* update the flowgraph if we modified it during the optimization phase*/ |
| 4808 | if (fgModified) |
| 4809 | { |
| 4810 | fgUpdateFlowGraph(); |
| 4811 | EndPhase(PHASE_UPDATE_FLOW_GRAPH); |
| 4812 | |
| 4813 | // Recompute the edge weight if we have modified the flow graph |
| 4814 | fgComputeEdgeWeights(); |
| 4815 | EndPhase(PHASE_COMPUTE_EDGE_WEIGHTS2); |
| 4816 | } |
| 4817 | |
| 4818 | // Iterate if requested, resetting annotations first. |
| 4819 | if (--iterations == 0) |
| 4820 | { |
| 4821 | break; |
| 4822 | } |
| 4823 | ResetOptAnnotations(); |
| 4824 | RecomputeLoopInfo(); |
| 4825 | } |
| 4826 | } |
| 4827 | |
| 4828 | #ifdef _TARGET_AMD64_ |
| 4829 | // Check if we need to add the Quirk for the PPP backward compat issue |
| 4830 | compQuirkForPPPflag = compQuirkForPPP(); |
| 4831 | #endif |
| 4832 | |
| 4833 | fgDetermineFirstColdBlock(); |
| 4834 | EndPhase(PHASE_DETERMINE_FIRST_COLD_BLOCK); |
| 4835 | |
| 4836 | #ifdef DEBUG |
| 4837 | fgDebugCheckLinks(compStressCompile(STRESS_REMORPH_TREES, 50)); |
| 4838 | |
| 4839 | // Stash the current estimate of the function's size if necessary. |
| 4840 | if (verbose) |
| 4841 | { |
| 4842 | compSizeEstimate = 0; |
| 4843 | compCycleEstimate = 0; |
| 4844 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 4845 | { |
| 4846 | for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; stmt = stmt->getNextStmt()) |
| 4847 | { |
| 4848 | compSizeEstimate += stmt->GetCostSz(); |
| 4849 | compCycleEstimate += stmt->GetCostEx(); |
| 4850 | } |
| 4851 | } |
| 4852 | } |
| 4853 | #endif |
| 4854 | |
| 4855 | // rationalize trees |
| 4856 | Rationalizer rat(this); // PHASE_RATIONALIZE |
| 4857 | rat.Run(); |
| 4858 | |
| 4859 | // Here we do "simple lowering". When the RyuJIT backend works for all |
| 4860 | // platforms, this will be part of the more general lowering phase. For now, though, we do a separate |
| 4861 | // pass of "final lowering." We must do this before (final) liveness analysis, because this creates |
| 4862 | // range check throw blocks, in which the liveness must be correct. |
| 4863 | fgSimpleLowering(); |
| 4864 | EndPhase(PHASE_SIMPLE_LOWERING); |
| 4865 | |
| 4866 | #ifdef DEBUG |
| 4867 | fgDebugCheckBBlist(); |
| 4868 | fgDebugCheckLinks(); |
| 4869 | #endif |
| 4870 | |
| 4871 | /* Enable this to gather statistical data such as |
| 4872 | * call and register argument info, flowgraph and loop info, etc. */ |
| 4873 | |
| 4874 | compJitStats(); |
| 4875 | |
| 4876 | #ifdef _TARGET_ARM_ |
| 4877 | if (compLocallocUsed) |
| 4878 | { |
| 4879 | // We reserve REG_SAVED_LOCALLOC_SP to store SP on entry for stack unwinding |
| 4880 | codeGen->regSet.rsMaskResvd |= RBM_SAVED_LOCALLOC_SP; |
| 4881 | } |
| 4882 | #endif // _TARGET_ARM_ |
| 4883 | |
| 4884 | /* Assign registers to variables, etc. */ |
| 4885 | |
| 4886 | /////////////////////////////////////////////////////////////////////////////// |
| 4887 | // Dominator and reachability sets are no longer valid. They haven't been |
| 4888 | // maintained up to here, and shouldn't be used (unless recomputed). |
| 4889 | /////////////////////////////////////////////////////////////////////////////// |
| 4890 | fgDomsComputed = false; |
| 4891 | |
| 4892 | /* Create LSRA before Lowering, this way Lowering can initialize the TreeNode Map */ |
| 4893 | m_pLinearScan = getLinearScanAllocator(this); |
| 4894 | |
| 4895 | /* Lower */ |
| 4896 | m_pLowering = new (this, CMK_LSRA) Lowering(this, m_pLinearScan); // PHASE_LOWERING |
| 4897 | m_pLowering->Run(); |
| 4898 | |
| 4899 | StackLevelSetter stackLevelSetter(this); // PHASE_STACK_LEVEL_SETTER |
| 4900 | stackLevelSetter.Run(); |
| 4901 | |
| 4902 | lvaTrackedFixed = true; // We can not add any new tracked variables after this point. |
| 4903 | |
| 4904 | /* Now that lowering is completed we can proceed to perform register allocation */ |
| 4905 | m_pLinearScan->doLinearScan(); |
| 4906 | EndPhase(PHASE_LINEAR_SCAN); |
| 4907 | |
| 4908 | // Copied from rpPredictRegUse() |
| 4909 | genFullPtrRegMap = (codeGen->genInterruptible || !codeGen->isFramePointerUsed()); |
| 4910 | |
| 4911 | #ifdef DEBUG |
| 4912 | fgDebugCheckLinks(); |
| 4913 | #endif |
| 4914 | |
| 4915 | /* Generate code */ |
| 4916 | |
| 4917 | codeGen->genGenerateCode(methodCodePtr, methodCodeSize); |
| 4918 | |
| 4919 | #ifdef FEATURE_JIT_METHOD_PERF |
| 4920 | if (pCompJitTimer) |
| 4921 | { |
| 4922 | #if MEASURE_CLRAPI_CALLS |
| 4923 | EndPhase(PHASE_CLR_API); |
| 4924 | #endif |
| 4925 | pCompJitTimer->Terminate(this, CompTimeSummaryInfo::s_compTimeSummary, true); |
| 4926 | } |
| 4927 | #endif |
| 4928 | |
| 4929 | RecordStateAtEndOfCompilation(); |
| 4930 | |
| 4931 | #ifdef FEATURE_TRACELOGGING |
| 4932 | compJitTelemetry.NotifyEndOfCompilation(); |
| 4933 | #endif |
| 4934 | |
| 4935 | #if defined(DEBUG) |
| 4936 | ++Compiler::jitTotalMethodCompiled; |
| 4937 | #endif // defined(DEBUG) |
| 4938 | |
| 4939 | compFunctionTraceEnd(*methodCodePtr, *methodCodeSize, false); |
| 4940 | JITDUMP("Method code size: %d\n" , (unsigned)(*methodCodeSize)); |
| 4941 | |
| 4942 | #if FUNC_INFO_LOGGING |
| 4943 | if (compJitFuncInfoFile != nullptr) |
| 4944 | { |
| 4945 | assert(!compIsForInlining()); |
| 4946 | #ifdef DEBUG // We only have access to info.compFullName in DEBUG builds. |
| 4947 | fprintf(compJitFuncInfoFile, "%s\n" , info.compFullName); |
| 4948 | #elif FEATURE_SIMD |
| 4949 | fprintf(compJitFuncInfoFile, " %s\n" , eeGetMethodFullName(info.compMethodHnd)); |
| 4950 | #endif |
| 4951 | fprintf(compJitFuncInfoFile, "" ); // in our logic this causes a flush |
| 4952 | } |
| 4953 | #endif // FUNC_INFO_LOGGING |
| 4954 | } |
| 4955 | |
| 4956 | //------------------------------------------------------------------------ |
| 4957 | // ResetOptAnnotations: Clear annotations produced during global optimizations. |
| 4958 | // |
| 4959 | // Notes: |
| 4960 | // The intent of this method is to clear any information typically assumed |
| 4961 | // to be set only once; it is used between iterations when JitOptRepeat is |
| 4962 | // in effect. |
| 4963 | |
| 4964 | void Compiler::ResetOptAnnotations() |
| 4965 | { |
| 4966 | assert(opts.optRepeat); |
| 4967 | assert(JitConfig.JitOptRepeatCount() > 0); |
| 4968 | fgResetForSsa(); |
| 4969 | vnStore = nullptr; |
| 4970 | m_opAsgnVarDefSsaNums = nullptr; |
| 4971 | m_blockToEHPreds = nullptr; |
| 4972 | fgSsaPassesCompleted = 0; |
| 4973 | fgVNPassesCompleted = 0; |
| 4974 | |
| 4975 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 4976 | { |
| 4977 | for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; stmt = stmt->getNextStmt()) |
| 4978 | { |
| 4979 | stmt->gtFlags &= ~GTF_STMT_HAS_CSE; |
| 4980 | |
| 4981 | for (GenTree* tree = stmt->gtStmt.gtStmtList; tree != nullptr; tree = tree->gtNext) |
| 4982 | { |
| 4983 | tree->ClearVN(); |
| 4984 | tree->ClearAssertion(); |
| 4985 | tree->gtCSEnum = NO_CSE; |
| 4986 | } |
| 4987 | } |
| 4988 | } |
| 4989 | } |
| 4990 | |
| 4991 | //------------------------------------------------------------------------ |
| 4992 | // RecomputeLoopInfo: Recompute loop annotations between opt-repeat iterations. |
| 4993 | // |
| 4994 | // Notes: |
| 4995 | // The intent of this method is to update loop structure annotations, and those |
| 4996 | // they depend on; these annotations may have become stale during optimization, |
| 4997 | // and need to be up-to-date before running another iteration of optimizations. |
| 4998 | |
| 4999 | void Compiler::RecomputeLoopInfo() |
| 5000 | { |
| 5001 | assert(opts.optRepeat); |
| 5002 | assert(JitConfig.JitOptRepeatCount() > 0); |
| 5003 | // Recompute reachability sets, dominators, and loops. |
| 5004 | optLoopCount = 0; |
| 5005 | fgDomsComputed = false; |
| 5006 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 5007 | { |
| 5008 | block->bbFlags &= ~BBF_LOOP_FLAGS; |
| 5009 | } |
| 5010 | fgComputeReachability(); |
| 5011 | // Rebuild the loop tree annotations themselves. Since this is performed as |
| 5012 | // part of 'optOptimizeLoops', this will also re-perform loop rotation, but |
| 5013 | // not other optimizations, as the others are not part of 'optOptimizeLoops'. |
| 5014 | optOptimizeLoops(); |
| 5015 | } |
| 5016 | |
| 5017 | /*****************************************************************************/ |
| 5018 | void Compiler::ProcessShutdownWork(ICorStaticInfo* statInfo) |
| 5019 | { |
| 5020 | } |
| 5021 | |
| 5022 | #ifdef _TARGET_AMD64_ |
| 5023 | // Check if we need to add the Quirk for the PPP backward compat issue. |
| 5024 | // This Quirk addresses a compatibility issue between the new RyuJit and the previous JIT64. |
| 5025 | // A backward compatibity issue called 'PPP' exists where a PInvoke call passes a 32-byte struct |
| 5026 | // into a native API which basically writes 48 bytes of data into the struct. |
| 5027 | // With the stack frame layout used by the RyuJIT the extra 16 bytes written corrupts a |
| 5028 | // caller saved register and this leads to an A/V in the calling method. |
| 5029 | // The older JIT64 jit compiler just happened to have a different stack layout and/or |
| 5030 | // caller saved register set so that it didn't hit the A/V in the caller. |
| 5031 | // By increasing the amount of stack allocted for the struct by 32 bytes we can fix this. |
| 5032 | // |
| 5033 | // Return true if we actually perform the Quirk, otherwise return false |
| 5034 | // |
| 5035 | bool Compiler::compQuirkForPPP() |
| 5036 | { |
| 5037 | if (lvaCount != 2) |
| 5038 | { // We require that there are exactly two locals |
| 5039 | return false; |
| 5040 | } |
| 5041 | |
| 5042 | if (compTailCallUsed) |
| 5043 | { // Don't try this quirk if a tail call was used |
| 5044 | return false; |
| 5045 | } |
| 5046 | |
| 5047 | bool hasOutArgs = false; |
| 5048 | LclVarDsc* varDscExposedStruct = nullptr; |
| 5049 | |
| 5050 | unsigned lclNum; |
| 5051 | LclVarDsc* varDsc; |
| 5052 | |
| 5053 | /* Look for struct locals that are address taken */ |
| 5054 | for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) |
| 5055 | { |
| 5056 | if (varDsc->lvIsParam) // It can't be a parameter |
| 5057 | { |
| 5058 | continue; |
| 5059 | } |
| 5060 | |
| 5061 | // We require that the OutgoingArg space lclVar exists |
| 5062 | if (lclNum == lvaOutgoingArgSpaceVar) |
| 5063 | { |
| 5064 | hasOutArgs = true; // Record that we saw it |
| 5065 | continue; |
| 5066 | } |
| 5067 | |
| 5068 | // Look for a 32-byte address exposed Struct and record its varDsc |
| 5069 | if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvAddrExposed && (varDsc->lvExactSize == 32)) |
| 5070 | { |
| 5071 | varDscExposedStruct = varDsc; |
| 5072 | } |
| 5073 | } |
| 5074 | |
| 5075 | // We only perform the Quirk when there are two locals |
| 5076 | // one of them is a address exposed struct of size 32 |
| 5077 | // and the other is the outgoing arg space local |
| 5078 | // |
| 5079 | if (hasOutArgs && (varDscExposedStruct != nullptr)) |
| 5080 | { |
| 5081 | #ifdef DEBUG |
| 5082 | if (verbose) |
| 5083 | { |
| 5084 | printf("\nAdding a backwards compatibility quirk for the 'PPP' issue\n" ); |
| 5085 | } |
| 5086 | #endif // DEBUG |
| 5087 | |
| 5088 | // Increase the exact size of this struct by 32 bytes |
| 5089 | // This fixes the PPP backward compat issue |
| 5090 | varDscExposedStruct->lvExactSize += 32; |
| 5091 | |
| 5092 | // Update the GC info to indicate that the padding area does |
| 5093 | // not contain any GC pointers. |
| 5094 | // |
| 5095 | // The struct is now 64 bytes. |
| 5096 | // |
| 5097 | // We're on x64 so this should be 8 pointer slots. |
| 5098 | assert((varDscExposedStruct->lvExactSize / TARGET_POINTER_SIZE) == 8); |
| 5099 | |
| 5100 | BYTE* oldGCPtrs = varDscExposedStruct->lvGcLayout; |
| 5101 | BYTE* newGCPtrs = getAllocator(CMK_LvaTable).allocate<BYTE>(8); |
| 5102 | |
| 5103 | for (int i = 0; i < 4; i++) |
| 5104 | { |
| 5105 | newGCPtrs[i] = oldGCPtrs[i]; |
| 5106 | } |
| 5107 | |
| 5108 | for (int i = 4; i < 8; i++) |
| 5109 | { |
| 5110 | newGCPtrs[i] = TYPE_GC_NONE; |
| 5111 | } |
| 5112 | |
| 5113 | varDscExposedStruct->lvGcLayout = newGCPtrs; |
| 5114 | |
| 5115 | return true; |
| 5116 | } |
| 5117 | return false; |
| 5118 | } |
| 5119 | #endif // _TARGET_AMD64_ |
| 5120 | |
| 5121 | /*****************************************************************************/ |
| 5122 | |
| 5123 | #ifdef DEBUG |
| 5124 | void* forceFrameJIT; // used to force to frame &useful for fastchecked debugging |
| 5125 | |
| 5126 | bool Compiler::skipMethod() |
| 5127 | { |
| 5128 | static ConfigMethodRange fJitRange; |
| 5129 | fJitRange.EnsureInit(JitConfig.JitRange()); |
| 5130 | assert(!fJitRange.Error()); |
| 5131 | |
| 5132 | // Normally JitConfig.JitRange() is null, we don't want to skip |
| 5133 | // jitting any methods. |
| 5134 | // |
| 5135 | // So, the logic below relies on the fact that a null range string |
| 5136 | // passed to ConfigMethodRange represents the set of all methods. |
| 5137 | |
| 5138 | if (!fJitRange.Contains(info.compCompHnd, info.compMethodHnd)) |
| 5139 | { |
| 5140 | return true; |
| 5141 | } |
| 5142 | |
| 5143 | if (JitConfig.JitExclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 5144 | { |
| 5145 | return true; |
| 5146 | } |
| 5147 | |
| 5148 | if (!JitConfig.JitInclude().isEmpty() && |
| 5149 | !JitConfig.JitInclude().contains(info.compMethodName, info.compClassName, &info.compMethodInfo->args)) |
| 5150 | { |
| 5151 | return true; |
| 5152 | } |
| 5153 | |
| 5154 | return false; |
| 5155 | } |
| 5156 | |
| 5157 | #endif |
| 5158 | |
| 5159 | /*****************************************************************************/ |
| 5160 | |
| 5161 | int Compiler::compCompile(CORINFO_METHOD_HANDLE methodHnd, |
| 5162 | CORINFO_MODULE_HANDLE classPtr, |
| 5163 | COMP_HANDLE compHnd, |
| 5164 | CORINFO_METHOD_INFO* methodInfo, |
| 5165 | void** methodCodePtr, |
| 5166 | ULONG* methodCodeSize, |
| 5167 | JitFlags* compileFlags) |
| 5168 | { |
| 5169 | #ifdef FEATURE_JIT_METHOD_PERF |
| 5170 | static bool checkedForJitTimeLog = false; |
| 5171 | |
| 5172 | pCompJitTimer = nullptr; |
| 5173 | |
| 5174 | if (!checkedForJitTimeLog) |
| 5175 | { |
| 5176 | // Call into VM to get the config strings. FEATURE_JIT_METHOD_PERF is enabled for |
| 5177 | // retail builds. Do not call the regular Config helper here as it would pull |
| 5178 | // in a copy of the config parser into the clrjit.dll. |
| 5179 | InterlockedCompareExchangeT(&Compiler::compJitTimeLogFilename, compHnd->getJitTimeLogFilename(), NULL); |
| 5180 | |
| 5181 | // At a process or module boundary clear the file and start afresh. |
| 5182 | JitTimer::PrintCsvHeader(); |
| 5183 | |
| 5184 | checkedForJitTimeLog = true; |
| 5185 | } |
| 5186 | if ((Compiler::compJitTimeLogFilename != nullptr) || (JitTimeLogCsv() != nullptr)) |
| 5187 | { |
| 5188 | pCompJitTimer = JitTimer::Create(this, methodInfo->ILCodeSize); |
| 5189 | } |
| 5190 | #endif // FEATURE_JIT_METHOD_PERF |
| 5191 | |
| 5192 | #ifdef DEBUG |
| 5193 | Compiler* me = this; |
| 5194 | forceFrameJIT = (void*)&me; // let us see the this pointer in fastchecked build |
| 5195 | // set this early so we can use it without relying on random memory values |
| 5196 | verbose = compIsForInlining() ? impInlineInfo->InlinerCompiler->verbose : false; |
| 5197 | |
| 5198 | this->dumpIR = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIR : false; |
| 5199 | this->dumpIRPhase = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRPhase : nullptr; |
| 5200 | this->dumpIRFormat = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRFormat : nullptr; |
| 5201 | this->dumpIRTypes = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRTypes : false; |
| 5202 | this->dumpIRLocals = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRLocals : false; |
| 5203 | this->dumpIRRegs = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRRegs : false; |
| 5204 | this->dumpIRSsa = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRSsa : false; |
| 5205 | this->dumpIRValnums = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRValnums : false; |
| 5206 | this->dumpIRCosts = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRCosts : false; |
| 5207 | this->dumpIRFlags = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRFlags : false; |
| 5208 | this->dumpIRKinds = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRKinds : false; |
| 5209 | this->dumpIRNodes = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRNodes : false; |
| 5210 | this->dumpIRNoLists = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRNoLists : false; |
| 5211 | this->dumpIRNoLeafs = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRNoLeafs : false; |
| 5212 | this->dumpIRNoStmts = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRNoStmts : false; |
| 5213 | this->dumpIRTrees = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRTrees : false; |
| 5214 | this->dumpIRLinear = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRLinear : false; |
| 5215 | this->dumpIRDataflow = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRDataflow : false; |
| 5216 | this->dumpIRBlockHeaders = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRBlockHeaders : NULL; |
| 5217 | this->dumpIRExit = compIsForInlining() ? impInlineInfo->InlinerCompiler->dumpIRExit : NULL; |
| 5218 | |
| 5219 | #endif |
| 5220 | |
| 5221 | #if defined(DEBUG) || defined(INLINE_DATA) |
| 5222 | info.compMethodHashPrivate = 0; |
| 5223 | #endif // defined(DEBUG) || defined(INLINE_DATA) |
| 5224 | |
| 5225 | #if FUNC_INFO_LOGGING |
| 5226 | LPCWSTR tmpJitFuncInfoFilename = JitConfig.JitFuncInfoFile(); |
| 5227 | |
| 5228 | if (tmpJitFuncInfoFilename != nullptr) |
| 5229 | { |
| 5230 | LPCWSTR oldFuncInfoFileName = |
| 5231 | InterlockedCompareExchangeT(&compJitFuncInfoFilename, tmpJitFuncInfoFilename, NULL); |
| 5232 | if (oldFuncInfoFileName == nullptr) |
| 5233 | { |
| 5234 | assert(compJitFuncInfoFile == nullptr); |
| 5235 | compJitFuncInfoFile = _wfopen(compJitFuncInfoFilename, W("a" )); |
| 5236 | if (compJitFuncInfoFile == nullptr) |
| 5237 | { |
| 5238 | #if defined(DEBUG) && !defined(FEATURE_PAL) // no 'perror' in the PAL |
| 5239 | perror("Failed to open JitFuncInfoLogFile" ); |
| 5240 | #endif // defined(DEBUG) && !defined(FEATURE_PAL) |
| 5241 | } |
| 5242 | } |
| 5243 | } |
| 5244 | #endif // FUNC_INFO_LOGGING |
| 5245 | |
| 5246 | // if (s_compMethodsCount==0) setvbuf(jitstdout, NULL, _IONBF, 0); |
| 5247 | |
| 5248 | info.compCompHnd = compHnd; |
| 5249 | info.compMethodHnd = methodHnd; |
| 5250 | info.compMethodInfo = methodInfo; |
| 5251 | |
| 5252 | virtualStubParamInfo = new (this, CMK_Unknown) VirtualStubParamInfo(IsTargetAbi(CORINFO_CORERT_ABI)); |
| 5253 | |
| 5254 | // Do we have a matched VM? Or are we "abusing" the VM to help us do JIT work (such as using an x86 native VM |
| 5255 | // with an ARM-targeting "altjit"). |
| 5256 | info.compMatchedVM = IMAGE_FILE_MACHINE_TARGET == info.compCompHnd->getExpectedTargetArchitecture(); |
| 5257 | |
| 5258 | #if (defined(_TARGET_UNIX_) && !defined(_HOST_UNIX_)) || (!defined(_TARGET_UNIX_) && defined(_HOST_UNIX_)) |
| 5259 | // The host and target platforms don't match. This info isn't handled by the existing |
| 5260 | // getExpectedTargetArchitecture() JIT-EE interface method. |
| 5261 | info.compMatchedVM = false; |
| 5262 | #endif |
| 5263 | |
| 5264 | // If we are not compiling for a matched VM, then we are getting JIT flags that don't match our target |
| 5265 | // architecture. The two main examples here are an ARM targeting altjit hosted on x86 and an ARM64 |
| 5266 | // targeting altjit hosted on x64. (Though with cross-bitness work, the host doesn't necessarily need |
| 5267 | // to be of the same bitness.) In these cases, we need to fix up the JIT flags to be appropriate for |
| 5268 | // the target, as the VM's expected target may overlap bit flags with different meaning to our target. |
| 5269 | // Note that it might be better to do this immediately when setting the JIT flags in CILJit::compileMethod() |
| 5270 | // (when JitFlags::SetFromFlags() is called), but this is close enough. (To move this logic to |
| 5271 | // CILJit::compileMethod() would require moving the info.compMatchedVM computation there as well.) |
| 5272 | |
| 5273 | if (!info.compMatchedVM) |
| 5274 | { |
| 5275 | #if defined(_TARGET_ARM_) |
| 5276 | |
| 5277 | // Currently nothing needs to be done. There are no ARM flags that conflict with other flags. |
| 5278 | |
| 5279 | #endif // defined(_TARGET_ARM_) |
| 5280 | |
| 5281 | #if defined(_TARGET_ARM64_) |
| 5282 | |
| 5283 | // The x86/x64 architecture capabilities flags overlap with the ARM64 ones. Set a reasonable architecture |
| 5284 | // target default. Currently this is disabling all ARM64 architecture features except FP and SIMD, but this |
| 5285 | // should be altered to possibly enable all of them, when they are known to all work. |
| 5286 | |
| 5287 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_AES); |
| 5288 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_ATOMICS); |
| 5289 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_CRC32); |
| 5290 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_DCPOP); |
| 5291 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_DP); |
| 5292 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_FCMA); |
| 5293 | compileFlags->Set(JitFlags::JIT_FLAG_HAS_ARM64_FP); |
| 5294 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_FP16); |
| 5295 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_JSCVT); |
| 5296 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_LRCPC); |
| 5297 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_PMULL); |
| 5298 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SHA1); |
| 5299 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SHA256); |
| 5300 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SHA512); |
| 5301 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SHA3); |
| 5302 | compileFlags->Set(JitFlags::JIT_FLAG_HAS_ARM64_SIMD); |
| 5303 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SIMD_V81); |
| 5304 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SIMD_FP16); |
| 5305 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SM3); |
| 5306 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SM4); |
| 5307 | compileFlags->Clear(JitFlags::JIT_FLAG_HAS_ARM64_SVE); |
| 5308 | |
| 5309 | #endif // defined(_TARGET_ARM64_) |
| 5310 | } |
| 5311 | |
| 5312 | compMaxUncheckedOffsetForNullObject = eeGetEEInfo()->maxUncheckedOffsetForNullObject; |
| 5313 | |
| 5314 | // Set the context for token lookup. |
| 5315 | if (compIsForInlining()) |
| 5316 | { |
| 5317 | impTokenLookupContextHandle = impInlineInfo->tokenLookupContextHandle; |
| 5318 | |
| 5319 | assert(impInlineInfo->inlineCandidateInfo->clsHandle == compHnd->getMethodClass(methodHnd)); |
| 5320 | info.compClassHnd = impInlineInfo->inlineCandidateInfo->clsHandle; |
| 5321 | |
| 5322 | assert(impInlineInfo->inlineCandidateInfo->clsAttr == info.compCompHnd->getClassAttribs(info.compClassHnd)); |
| 5323 | // printf("%x != %x\n", impInlineInfo->inlineCandidateInfo->clsAttr, |
| 5324 | // info.compCompHnd->getClassAttribs(info.compClassHnd)); |
| 5325 | info.compClassAttr = impInlineInfo->inlineCandidateInfo->clsAttr; |
| 5326 | } |
| 5327 | else |
| 5328 | { |
| 5329 | impTokenLookupContextHandle = MAKE_METHODCONTEXT(info.compMethodHnd); |
| 5330 | |
| 5331 | info.compClassHnd = compHnd->getMethodClass(methodHnd); |
| 5332 | info.compClassAttr = info.compCompHnd->getClassAttribs(info.compClassHnd); |
| 5333 | } |
| 5334 | |
| 5335 | info.compProfilerCallback = false; // Assume false until we are told to hook this method. |
| 5336 | |
| 5337 | #if defined(DEBUG) || defined(LATE_DISASM) |
| 5338 | const char* classNamePtr; |
| 5339 | |
| 5340 | info.compMethodName = eeGetMethodName(methodHnd, &classNamePtr); |
| 5341 | unsigned len = (unsigned)roundUp(strlen(classNamePtr) + 1); |
| 5342 | info.compClassName = getAllocator(CMK_DebugOnly).allocate<char>(len); |
| 5343 | strcpy_s((char*)info.compClassName, len, classNamePtr); |
| 5344 | |
| 5345 | info.compFullName = eeGetMethodFullName(methodHnd); |
| 5346 | #endif // defined(DEBUG) || defined(LATE_DISASM) |
| 5347 | |
| 5348 | #ifdef DEBUG |
| 5349 | if (!compIsForInlining()) |
| 5350 | { |
| 5351 | JitTls::GetLogEnv()->setCompiler(this); |
| 5352 | } |
| 5353 | |
| 5354 | // Have we been told to be more selective in our Jitting? |
| 5355 | if (skipMethod()) |
| 5356 | { |
| 5357 | if (compIsForInlining()) |
| 5358 | { |
| 5359 | compInlineResult->NoteFatal(InlineObservation::CALLEE_MARKED_AS_SKIPPED); |
| 5360 | } |
| 5361 | return CORJIT_SKIPPED; |
| 5362 | } |
| 5363 | |
| 5364 | // Opt-in to jit stress based on method hash ranges. |
| 5365 | // |
| 5366 | // Note the default (with JitStressRange not set) is that all |
| 5367 | // methods will be subject to stress. |
| 5368 | static ConfigMethodRange fJitStressRange; |
| 5369 | fJitStressRange.EnsureInit(JitConfig.JitStressRange()); |
| 5370 | assert(!fJitStressRange.Error()); |
| 5371 | bRangeAllowStress = fJitStressRange.Contains(info.compCompHnd, info.compMethodHnd); |
| 5372 | |
| 5373 | #endif // DEBUG |
| 5374 | |
| 5375 | // Set this before the first 'BADCODE' |
| 5376 | // Skip verification where possible |
| 5377 | tiVerificationNeeded = !compileFlags->IsSet(JitFlags::JIT_FLAG_SKIP_VERIFICATION); |
| 5378 | |
| 5379 | assert(!compIsForInlining() || !tiVerificationNeeded); // Inlinees must have been verified. |
| 5380 | |
| 5381 | // assume the code is verifiable unless proven otherwise |
| 5382 | tiIsVerifiableCode = TRUE; |
| 5383 | |
| 5384 | tiRuntimeCalloutNeeded = false; |
| 5385 | |
| 5386 | CorInfoInstantiationVerification instVerInfo = INSTVER_GENERIC_PASSED_VERIFICATION; |
| 5387 | |
| 5388 | if (!compIsForInlining() && tiVerificationNeeded) |
| 5389 | { |
| 5390 | instVerInfo = compHnd->isInstantiationOfVerifiedGeneric(methodHnd); |
| 5391 | |
| 5392 | if (tiVerificationNeeded && (instVerInfo == INSTVER_GENERIC_FAILED_VERIFICATION)) |
| 5393 | { |
| 5394 | CorInfoCanSkipVerificationResult canSkipVerificationResult = |
| 5395 | info.compCompHnd->canSkipMethodVerification(info.compMethodHnd); |
| 5396 | |
| 5397 | switch (canSkipVerificationResult) |
| 5398 | { |
| 5399 | case CORINFO_VERIFICATION_CANNOT_SKIP: |
| 5400 | // We cannot verify concrete instantiation. |
| 5401 | // We can only verify the typical/open instantiation |
| 5402 | // The VM should throw a VerificationException instead of allowing this. |
| 5403 | NO_WAY("Verification of closed instantiations is not supported" ); |
| 5404 | break; |
| 5405 | |
| 5406 | case CORINFO_VERIFICATION_CAN_SKIP: |
| 5407 | // The VM should first verify the open instantiation. If unverifiable code |
| 5408 | // is detected, it should pass in JitFlags::JIT_FLAG_SKIP_VERIFICATION. |
| 5409 | assert(!"The VM should have used JitFlags::JIT_FLAG_SKIP_VERIFICATION" ); |
| 5410 | tiVerificationNeeded = false; |
| 5411 | break; |
| 5412 | |
| 5413 | case CORINFO_VERIFICATION_RUNTIME_CHECK: |
| 5414 | // This is a concrete generic instantiation with unverifiable code, that also |
| 5415 | // needs a runtime callout. |
| 5416 | tiVerificationNeeded = false; |
| 5417 | tiRuntimeCalloutNeeded = true; |
| 5418 | break; |
| 5419 | |
| 5420 | case CORINFO_VERIFICATION_DONT_JIT: |
| 5421 | // We cannot verify concrete instantiation. |
| 5422 | // We can only verify the typical/open instantiation |
| 5423 | // The VM should throw a VerificationException instead of allowing this. |
| 5424 | BADCODE("NGEN of unverifiable transparent code is not supported" ); |
| 5425 | break; |
| 5426 | } |
| 5427 | } |
| 5428 | |
| 5429 | // load any constraints for verification, noting any cycles to be rejected by the verifying importer |
| 5430 | if (tiVerificationNeeded) |
| 5431 | { |
| 5432 | compHnd->initConstraintsForVerification(methodHnd, &info.hasCircularClassConstraints, |
| 5433 | &info.hasCircularMethodConstraints); |
| 5434 | } |
| 5435 | } |
| 5436 | |
| 5437 | /* Setup an error trap */ |
| 5438 | |
| 5439 | struct Param |
| 5440 | { |
| 5441 | Compiler* pThis; |
| 5442 | |
| 5443 | CORINFO_MODULE_HANDLE classPtr; |
| 5444 | COMP_HANDLE compHnd; |
| 5445 | CORINFO_METHOD_INFO* methodInfo; |
| 5446 | void** methodCodePtr; |
| 5447 | ULONG* methodCodeSize; |
| 5448 | JitFlags* compileFlags; |
| 5449 | |
| 5450 | CorInfoInstantiationVerification instVerInfo; |
| 5451 | int result; |
| 5452 | } param; |
| 5453 | param.pThis = this; |
| 5454 | param.classPtr = classPtr; |
| 5455 | param.compHnd = compHnd; |
| 5456 | param.methodInfo = methodInfo; |
| 5457 | param.methodCodePtr = methodCodePtr; |
| 5458 | param.methodCodeSize = methodCodeSize; |
| 5459 | param.compileFlags = compileFlags; |
| 5460 | param.instVerInfo = instVerInfo; |
| 5461 | param.result = CORJIT_INTERNALERROR; |
| 5462 | |
| 5463 | setErrorTrap(compHnd, Param*, pParam, ¶m) // ERROR TRAP: Start normal block |
| 5464 | { |
| 5465 | pParam->result = pParam->pThis->compCompileHelper(pParam->classPtr, pParam->compHnd, pParam->methodInfo, |
| 5466 | pParam->methodCodePtr, pParam->methodCodeSize, |
| 5467 | pParam->compileFlags, pParam->instVerInfo); |
| 5468 | } |
| 5469 | finallyErrorTrap() // ERROR TRAP: The following block handles errors |
| 5470 | { |
| 5471 | /* Cleanup */ |
| 5472 | |
| 5473 | if (compIsForInlining()) |
| 5474 | { |
| 5475 | goto DoneCleanUp; |
| 5476 | } |
| 5477 | |
| 5478 | /* Tell the emitter that we're done with this function */ |
| 5479 | |
| 5480 | genEmitter->emitEndCG(); |
| 5481 | |
| 5482 | DoneCleanUp: |
| 5483 | compDone(); |
| 5484 | } |
| 5485 | endErrorTrap() // ERROR TRAP: End |
| 5486 | |
| 5487 | return param.result; |
| 5488 | } |
| 5489 | |
| 5490 | #if defined(DEBUG) || defined(INLINE_DATA) |
| 5491 | unsigned Compiler::Info::compMethodHash() const |
| 5492 | { |
| 5493 | if (compMethodHashPrivate == 0) |
| 5494 | { |
| 5495 | compMethodHashPrivate = compCompHnd->getMethodHash(compMethodHnd); |
| 5496 | } |
| 5497 | return compMethodHashPrivate; |
| 5498 | } |
| 5499 | #endif // defined(DEBUG) || defined(INLINE_DATA) |
| 5500 | |
| 5501 | void Compiler::compCompileFinish() |
| 5502 | { |
| 5503 | #if defined(DEBUG) || MEASURE_NODE_SIZE || MEASURE_BLOCK_SIZE || DISPLAY_SIZES || CALL_ARG_STATS |
| 5504 | genMethodCnt++; |
| 5505 | #endif |
| 5506 | |
| 5507 | #if MEASURE_MEM_ALLOC |
| 5508 | { |
| 5509 | compArenaAllocator->finishMemStats(); |
| 5510 | memAllocHist.record((unsigned)((compArenaAllocator->getTotalBytesAllocated() + 1023) / 1024)); |
| 5511 | memUsedHist.record((unsigned)((compArenaAllocator->getTotalBytesUsed() + 1023) / 1024)); |
| 5512 | } |
| 5513 | |
| 5514 | #ifdef DEBUG |
| 5515 | if (s_dspMemStats || verbose) |
| 5516 | { |
| 5517 | printf("\nAllocations for %s (MethodHash=%08x)\n" , info.compFullName, info.compMethodHash()); |
| 5518 | compArenaAllocator->dumpMemStats(jitstdout); |
| 5519 | } |
| 5520 | #endif // DEBUG |
| 5521 | #endif // MEASURE_MEM_ALLOC |
| 5522 | |
| 5523 | #if LOOP_HOIST_STATS |
| 5524 | AddLoopHoistStats(); |
| 5525 | #endif // LOOP_HOIST_STATS |
| 5526 | |
| 5527 | #if MEASURE_NODE_SIZE |
| 5528 | genTreeNcntHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeCnt)); |
| 5529 | genTreeNsizHist.record(static_cast<unsigned>(genNodeSizeStatsPerFunc.genTreeNodeSize)); |
| 5530 | #endif |
| 5531 | |
| 5532 | #if defined(DEBUG) |
| 5533 | // Small methods should fit in ArenaAllocator::getDefaultPageSize(), or else |
| 5534 | // we should bump up ArenaAllocator::getDefaultPageSize() |
| 5535 | |
| 5536 | if ((info.compILCodeSize <= 32) && // Is it a reasonably small method? |
| 5537 | (info.compNativeCodeSize < 512) && // Some trivial methods generate huge native code. eg. pushing a single huge |
| 5538 | // struct |
| 5539 | (impInlinedCodeSize <= 128) && // Is the the inlining reasonably bounded? |
| 5540 | // Small methods cannot meaningfully have a big number of locals |
| 5541 | // or arguments. We always track arguments at the start of |
| 5542 | // the prolog which requires memory |
| 5543 | (info.compLocalsCount <= 32) && (!opts.MinOpts()) && // We may have too many local variables, etc |
| 5544 | (getJitStressLevel() == 0) && // We need extra memory for stress |
| 5545 | !opts.optRepeat && // We need extra memory to repeat opts |
| 5546 | !compArenaAllocator->bypassHostAllocator() && // ArenaAllocator::getDefaultPageSize() is artificially low for |
| 5547 | // DirectAlloc |
| 5548 | // Factor of 2x is because data-structures are bigger under DEBUG |
| 5549 | (compArenaAllocator->getTotalBytesAllocated() > (2 * ArenaAllocator::getDefaultPageSize())) && |
| 5550 | // RyuJIT backend needs memory tuning! TODO-Cleanup: remove this case when memory tuning is complete. |
| 5551 | (compArenaAllocator->getTotalBytesAllocated() > (10 * ArenaAllocator::getDefaultPageSize())) && |
| 5552 | !verbose) // We allocate lots of memory to convert sets to strings for JitDump |
| 5553 | { |
| 5554 | genSmallMethodsNeedingExtraMemoryCnt++; |
| 5555 | |
| 5556 | // Less than 1% of all methods should run into this. |
| 5557 | // We cannot be more strict as there are always degenerate cases where we |
| 5558 | // would need extra memory (like huge structs as locals - see lvaSetStruct()). |
| 5559 | assert((genMethodCnt < 500) || (genSmallMethodsNeedingExtraMemoryCnt < (genMethodCnt / 100))); |
| 5560 | } |
| 5561 | #endif // DEBUG |
| 5562 | |
| 5563 | #if defined(DEBUG) || defined(INLINE_DATA) |
| 5564 | |
| 5565 | m_inlineStrategy->DumpData(); |
| 5566 | m_inlineStrategy->DumpXml(); |
| 5567 | |
| 5568 | #endif |
| 5569 | |
| 5570 | #ifdef DEBUG |
| 5571 | if (opts.dspOrder) |
| 5572 | { |
| 5573 | // mdMethodDef __stdcall CEEInfo::getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod) |
| 5574 | mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd); |
| 5575 | |
| 5576 | unsigned profCallCount = 0; |
| 5577 | if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) && fgHaveProfileData()) |
| 5578 | { |
| 5579 | assert(fgProfileBuffer[0].ILOffset == 0); |
| 5580 | profCallCount = fgProfileBuffer[0].ExecutionCount; |
| 5581 | } |
| 5582 | |
| 5583 | static bool = false; |
| 5584 | if (!headerPrinted) |
| 5585 | { |
| 5586 | // clang-format off |
| 5587 | headerPrinted = true; |
| 5588 | printf(" | Profiled | Exec- | Method has | calls | Num |LclV |AProp| CSE | Reg |bytes | %3s code size | \n" , Target::g_tgtCPUName); |
| 5589 | printf(" mdToken | | RGN | Count | EH | FRM | LOOP | NRM | IND | BBs | Cnt | Cnt | Cnt | Alloc | IL | HOT | COLD | method name \n" ); |
| 5590 | printf("---------+-----+------+----------+----+-----+------+-----+-----+-----+-----+-----+-----+---------+------+-------+-------+-----------\n" ); |
| 5591 | // 06001234 | PRF | HOT | 219 | EH | ebp | LOOP | 15 | 6 | 12 | 17 | 12 | 8 | 28 p2 | 145 | 211 | 123 | System.Example(int) |
| 5592 | // clang-format on |
| 5593 | } |
| 5594 | |
| 5595 | printf("%08X | " , currentMethodToken); |
| 5596 | |
| 5597 | CorInfoRegionKind regionKind = info.compMethodInfo->regionKind; |
| 5598 | |
| 5599 | if (opts.altJit) |
| 5600 | { |
| 5601 | printf("ALT | " ); |
| 5602 | } |
| 5603 | else if (fgHaveProfileData()) |
| 5604 | { |
| 5605 | printf("PRF | " ); |
| 5606 | } |
| 5607 | else |
| 5608 | { |
| 5609 | printf(" | " ); |
| 5610 | } |
| 5611 | |
| 5612 | if (regionKind == CORINFO_REGION_NONE) |
| 5613 | { |
| 5614 | printf(" | " ); |
| 5615 | } |
| 5616 | else if (regionKind == CORINFO_REGION_HOT) |
| 5617 | { |
| 5618 | printf(" HOT | " ); |
| 5619 | } |
| 5620 | else if (regionKind == CORINFO_REGION_COLD) |
| 5621 | { |
| 5622 | printf("COLD | " ); |
| 5623 | } |
| 5624 | else if (regionKind == CORINFO_REGION_JIT) |
| 5625 | { |
| 5626 | printf(" JIT | " ); |
| 5627 | } |
| 5628 | else |
| 5629 | { |
| 5630 | printf("UNKN | " ); |
| 5631 | } |
| 5632 | |
| 5633 | printf("%8d | " , profCallCount); |
| 5634 | |
| 5635 | if (compHndBBtabCount > 0) |
| 5636 | { |
| 5637 | printf("EH | " ); |
| 5638 | } |
| 5639 | else |
| 5640 | { |
| 5641 | printf(" | " ); |
| 5642 | } |
| 5643 | |
| 5644 | if (rpFrameType == FT_EBP_FRAME) |
| 5645 | { |
| 5646 | printf("%3s | " , STR_FPBASE); |
| 5647 | } |
| 5648 | else if (rpFrameType == FT_ESP_FRAME) |
| 5649 | { |
| 5650 | printf("%3s | " , STR_SPBASE); |
| 5651 | } |
| 5652 | #if DOUBLE_ALIGN |
| 5653 | else if (rpFrameType == FT_DOUBLE_ALIGN_FRAME) |
| 5654 | { |
| 5655 | printf("dbl | " ); |
| 5656 | } |
| 5657 | #endif |
| 5658 | else // (rpFrameType == FT_NOT_SET) |
| 5659 | { |
| 5660 | printf("??? | " ); |
| 5661 | } |
| 5662 | |
| 5663 | if (fgHasLoops) |
| 5664 | { |
| 5665 | printf("LOOP |" ); |
| 5666 | } |
| 5667 | else |
| 5668 | { |
| 5669 | printf(" |" ); |
| 5670 | } |
| 5671 | |
| 5672 | printf(" %3d |" , optCallCount); |
| 5673 | printf(" %3d |" , optIndirectCallCount); |
| 5674 | printf(" %3d |" , fgBBcountAtCodegen); |
| 5675 | printf(" %3d |" , lvaCount); |
| 5676 | |
| 5677 | if (opts.MinOpts()) |
| 5678 | { |
| 5679 | printf(" MinOpts |" ); |
| 5680 | } |
| 5681 | else |
| 5682 | { |
| 5683 | printf(" %3d |" , optAssertionCount); |
| 5684 | #if FEATURE_ANYCSE |
| 5685 | printf(" %3d |" , optCSEcount); |
| 5686 | #else |
| 5687 | printf(" %3d |" , 0); |
| 5688 | #endif // FEATURE_ANYCSE |
| 5689 | } |
| 5690 | |
| 5691 | printf(" LSRA |" ); // TODO-Cleanup: dump some interesting LSRA stat into the order file? |
| 5692 | printf(" %4d |" , info.compMethodInfo->ILCodeSize); |
| 5693 | printf(" %5d |" , info.compTotalHotCodeSize); |
| 5694 | printf(" %5d |" , info.compTotalColdCodeSize); |
| 5695 | |
| 5696 | printf(" %s\n" , eeGetMethodFullName(info.compMethodHnd)); |
| 5697 | printf("" ); // in our logic this causes a flush |
| 5698 | } |
| 5699 | |
| 5700 | if (verbose) |
| 5701 | { |
| 5702 | printf("****** DONE compiling %s\n" , info.compFullName); |
| 5703 | printf("" ); // in our logic this causes a flush |
| 5704 | } |
| 5705 | |
| 5706 | // Only call _DbgBreakCheck when we are jitting, not when we are ngen-ing |
| 5707 | // For ngen the int3 or breakpoint instruction will be right at the |
| 5708 | // start of the ngen method and we will stop when we execute it. |
| 5709 | // |
| 5710 | if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 5711 | { |
| 5712 | if (compJitHaltMethod()) |
| 5713 | { |
| 5714 | #if !defined(_HOST_UNIX_) |
| 5715 | // TODO-UNIX: re-enable this when we have an OS that supports a pop-up dialog |
| 5716 | |
| 5717 | // Don't do an assert, but just put up the dialog box so we get just-in-time debugger |
| 5718 | // launching. When you hit 'retry' it will continue and naturally stop at the INT 3 |
| 5719 | // that the JIT put in the code |
| 5720 | _DbgBreakCheck(__FILE__, __LINE__, "JitHalt" ); |
| 5721 | #endif |
| 5722 | } |
| 5723 | } |
| 5724 | #endif // DEBUG |
| 5725 | } |
| 5726 | |
| 5727 | #ifdef PSEUDORANDOM_NOP_INSERTION |
| 5728 | // this is zlib adler32 checksum. source came from windows base |
| 5729 | |
| 5730 | #define BASE 65521L // largest prime smaller than 65536 |
| 5731 | #define NMAX 5552 |
| 5732 | // NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 |
| 5733 | |
| 5734 | #define DO1(buf, i) \ |
| 5735 | { \ |
| 5736 | s1 += buf[i]; \ |
| 5737 | s2 += s1; \ |
| 5738 | } |
| 5739 | #define DO2(buf, i) \ |
| 5740 | DO1(buf, i); \ |
| 5741 | DO1(buf, i + 1); |
| 5742 | #define DO4(buf, i) \ |
| 5743 | DO2(buf, i); \ |
| 5744 | DO2(buf, i + 2); |
| 5745 | #define DO8(buf, i) \ |
| 5746 | DO4(buf, i); \ |
| 5747 | DO4(buf, i + 4); |
| 5748 | #define DO16(buf) \ |
| 5749 | DO8(buf, 0); \ |
| 5750 | DO8(buf, 8); |
| 5751 | |
| 5752 | unsigned adler32(unsigned adler, char* buf, unsigned int len) |
| 5753 | { |
| 5754 | unsigned int s1 = adler & 0xffff; |
| 5755 | unsigned int s2 = (adler >> 16) & 0xffff; |
| 5756 | int k; |
| 5757 | |
| 5758 | if (buf == NULL) |
| 5759 | return 1L; |
| 5760 | |
| 5761 | while (len > 0) |
| 5762 | { |
| 5763 | k = len < NMAX ? len : NMAX; |
| 5764 | len -= k; |
| 5765 | while (k >= 16) |
| 5766 | { |
| 5767 | DO16(buf); |
| 5768 | buf += 16; |
| 5769 | k -= 16; |
| 5770 | } |
| 5771 | if (k != 0) |
| 5772 | do |
| 5773 | { |
| 5774 | s1 += *buf++; |
| 5775 | s2 += s1; |
| 5776 | } while (--k); |
| 5777 | s1 %= BASE; |
| 5778 | s2 %= BASE; |
| 5779 | } |
| 5780 | return (s2 << 16) | s1; |
| 5781 | } |
| 5782 | #endif |
| 5783 | |
| 5784 | unsigned getMethodBodyChecksum(__in_z char* code, int size) |
| 5785 | { |
| 5786 | #ifdef PSEUDORANDOM_NOP_INSERTION |
| 5787 | return adler32(0, code, size); |
| 5788 | #else |
| 5789 | return 0; |
| 5790 | #endif |
| 5791 | } |
| 5792 | |
| 5793 | int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, |
| 5794 | COMP_HANDLE compHnd, |
| 5795 | CORINFO_METHOD_INFO* methodInfo, |
| 5796 | void** methodCodePtr, |
| 5797 | ULONG* methodCodeSize, |
| 5798 | JitFlags* compileFlags, |
| 5799 | CorInfoInstantiationVerification instVerInfo) |
| 5800 | { |
| 5801 | CORINFO_METHOD_HANDLE methodHnd = info.compMethodHnd; |
| 5802 | |
| 5803 | info.compCode = methodInfo->ILCode; |
| 5804 | info.compILCodeSize = methodInfo->ILCodeSize; |
| 5805 | |
| 5806 | if (info.compILCodeSize == 0) |
| 5807 | { |
| 5808 | BADCODE("code size is zero" ); |
| 5809 | } |
| 5810 | |
| 5811 | if (compIsForInlining()) |
| 5812 | { |
| 5813 | #ifdef DEBUG |
| 5814 | unsigned methAttr_Old = impInlineInfo->inlineCandidateInfo->methAttr; |
| 5815 | unsigned methAttr_New = info.compCompHnd->getMethodAttribs(info.compMethodHnd); |
| 5816 | unsigned flagsToIgnore = CORINFO_FLG_DONT_INLINE | CORINFO_FLG_FORCEINLINE; |
| 5817 | assert((methAttr_Old & (~flagsToIgnore)) == (methAttr_New & (~flagsToIgnore))); |
| 5818 | #endif |
| 5819 | |
| 5820 | info.compFlags = impInlineInfo->inlineCandidateInfo->methAttr; |
| 5821 | } |
| 5822 | else |
| 5823 | { |
| 5824 | info.compFlags = info.compCompHnd->getMethodAttribs(info.compMethodHnd); |
| 5825 | #ifdef PSEUDORANDOM_NOP_INSERTION |
| 5826 | info.compChecksum = getMethodBodyChecksum((char*)methodInfo->ILCode, methodInfo->ILCodeSize); |
| 5827 | #endif |
| 5828 | } |
| 5829 | |
| 5830 | // compInitOptions will set the correct verbose flag. |
| 5831 | |
| 5832 | compInitOptions(compileFlags); |
| 5833 | |
| 5834 | #ifdef ALT_JIT |
| 5835 | if (!compIsForInlining() && !opts.altJit) |
| 5836 | { |
| 5837 | // We're an altjit, but the COMPlus_AltJit configuration did not say to compile this method, |
| 5838 | // so skip it. |
| 5839 | return CORJIT_SKIPPED; |
| 5840 | } |
| 5841 | #endif // ALT_JIT |
| 5842 | |
| 5843 | #ifdef DEBUG |
| 5844 | |
| 5845 | if (verbose) |
| 5846 | { |
| 5847 | printf("IL to import:\n" ); |
| 5848 | dumpILRange(info.compCode, info.compILCodeSize); |
| 5849 | } |
| 5850 | |
| 5851 | #endif |
| 5852 | |
| 5853 | // Check for COMPlus_AggressiveInlining |
| 5854 | if (JitConfig.JitAggressiveInlining()) |
| 5855 | { |
| 5856 | compDoAggressiveInlining = true; |
| 5857 | } |
| 5858 | |
| 5859 | if (compDoAggressiveInlining) |
| 5860 | { |
| 5861 | info.compFlags |= CORINFO_FLG_FORCEINLINE; |
| 5862 | } |
| 5863 | |
| 5864 | #ifdef DEBUG |
| 5865 | |
| 5866 | // Check for ForceInline stress. |
| 5867 | if (compStressCompile(STRESS_FORCE_INLINE, 0)) |
| 5868 | { |
| 5869 | info.compFlags |= CORINFO_FLG_FORCEINLINE; |
| 5870 | } |
| 5871 | |
| 5872 | if (compIsForInlining()) |
| 5873 | { |
| 5874 | JITLOG((LL_INFO100000, "\nINLINER impTokenLookupContextHandle for %s is 0x%p.\n" , |
| 5875 | eeGetMethodFullName(info.compMethodHnd), dspPtr(impTokenLookupContextHandle))); |
| 5876 | } |
| 5877 | |
| 5878 | // Force verification if asked to do so |
| 5879 | if (JitConfig.JitForceVer()) |
| 5880 | { |
| 5881 | tiVerificationNeeded = (instVerInfo == INSTVER_NOT_INSTANTIATION); |
| 5882 | } |
| 5883 | |
| 5884 | if (tiVerificationNeeded) |
| 5885 | { |
| 5886 | JITLOG((LL_INFO10000, "tiVerificationNeeded initially set to true for %s\n" , info.compFullName)); |
| 5887 | } |
| 5888 | #endif // DEBUG |
| 5889 | |
| 5890 | /* Since tiVerificationNeeded can be turned off in the middle of |
| 5891 | compiling a method, and it might have caused blocks to be queued up |
| 5892 | for reimporting, impCanReimport can be used to check for reimporting. */ |
| 5893 | |
| 5894 | impCanReimport = (tiVerificationNeeded || compStressCompile(STRESS_CHK_REIMPORT, 15)); |
| 5895 | |
| 5896 | // Need security prolog/epilog callouts when there is a declarative security in the method. |
| 5897 | tiSecurityCalloutNeeded = ((info.compFlags & CORINFO_FLG_NOSECURITYWRAP) == 0); |
| 5898 | |
| 5899 | if (tiSecurityCalloutNeeded || (info.compFlags & CORINFO_FLG_SECURITYCHECK)) |
| 5900 | { |
| 5901 | // We need to allocate the security object on the stack |
| 5902 | // when the method being compiled has a declarative security |
| 5903 | // (i.e. when CORINFO_FLG_NOSECURITYWRAP is reset for the current method). |
| 5904 | // This is also the case when we inject a prolog and epilog in the method. |
| 5905 | opts.compNeedSecurityCheck = true; |
| 5906 | } |
| 5907 | |
| 5908 | /* Initialize set a bunch of global values */ |
| 5909 | |
| 5910 | info.compScopeHnd = classPtr; |
| 5911 | info.compXcptnsCount = methodInfo->EHcount; |
| 5912 | info.compMaxStack = methodInfo->maxStack; |
| 5913 | compHndBBtab = nullptr; |
| 5914 | compHndBBtabCount = 0; |
| 5915 | compHndBBtabAllocCount = 0; |
| 5916 | |
| 5917 | info.compNativeCodeSize = 0; |
| 5918 | info.compTotalHotCodeSize = 0; |
| 5919 | info.compTotalColdCodeSize = 0; |
| 5920 | |
| 5921 | #ifdef DEBUG |
| 5922 | compCurBB = nullptr; |
| 5923 | lvaTable = nullptr; |
| 5924 | |
| 5925 | // Reset node and block ID counter |
| 5926 | compGenTreeID = 0; |
| 5927 | compBasicBlockID = 0; |
| 5928 | #endif |
| 5929 | |
| 5930 | /* Initialize emitter */ |
| 5931 | |
| 5932 | if (!compIsForInlining()) |
| 5933 | { |
| 5934 | codeGen->getEmitter()->emitBegCG(this, compHnd); |
| 5935 | } |
| 5936 | |
| 5937 | info.compIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; |
| 5938 | |
| 5939 | info.compIsContextful = (info.compClassAttr & CORINFO_FLG_CONTEXTFUL) != 0; |
| 5940 | |
| 5941 | info.compPublishStubParam = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM); |
| 5942 | |
| 5943 | switch (methodInfo->args.getCallConv()) |
| 5944 | { |
| 5945 | case CORINFO_CALLCONV_VARARG: |
| 5946 | case CORINFO_CALLCONV_NATIVEVARARG: |
| 5947 | info.compIsVarArgs = true; |
| 5948 | break; |
| 5949 | case CORINFO_CALLCONV_DEFAULT: |
| 5950 | info.compIsVarArgs = false; |
| 5951 | break; |
| 5952 | default: |
| 5953 | BADCODE("bad calling convention" ); |
| 5954 | } |
| 5955 | info.compRetNativeType = info.compRetType = JITtype2varType(methodInfo->args.retType); |
| 5956 | |
| 5957 | info.compCallUnmanaged = 0; |
| 5958 | info.compLvFrameListRoot = BAD_VAR_NUM; |
| 5959 | |
| 5960 | info.compInitMem = ((methodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0); |
| 5961 | |
| 5962 | /* Allocate the local variable table */ |
| 5963 | |
| 5964 | lvaInitTypeRef(); |
| 5965 | |
| 5966 | if (!compIsForInlining()) |
| 5967 | { |
| 5968 | compInitDebuggingInfo(); |
| 5969 | } |
| 5970 | |
| 5971 | #ifdef DEBUG |
| 5972 | if (compIsForInlining()) |
| 5973 | { |
| 5974 | compBasicBlockID = impInlineInfo->InlinerCompiler->compBasicBlockID; |
| 5975 | } |
| 5976 | #endif |
| 5977 | |
| 5978 | const bool forceInline = !!(info.compFlags & CORINFO_FLG_FORCEINLINE); |
| 5979 | |
| 5980 | if (!compIsForInlining() && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 5981 | { |
| 5982 | // We're prejitting the root method. We also will analyze it as |
| 5983 | // a potential inline candidate. |
| 5984 | InlineResult prejitResult(this, methodHnd, "prejit" ); |
| 5985 | |
| 5986 | // Do the initial inline screen. |
| 5987 | impCanInlineIL(methodHnd, methodInfo, forceInline, &prejitResult); |
| 5988 | |
| 5989 | // Temporarily install the prejitResult as the |
| 5990 | // compInlineResult so it's available to fgFindJumpTargets |
| 5991 | // and can accumulate more observations as the IL is |
| 5992 | // scanned. |
| 5993 | // |
| 5994 | // We don't pass prejitResult in as a parameter to avoid |
| 5995 | // potential aliasing confusion -- the other call to |
| 5996 | // fgFindBasicBlocks may have set up compInlineResult and |
| 5997 | // the code in fgFindJumpTargets references that data |
| 5998 | // member extensively. |
| 5999 | assert(compInlineResult == nullptr); |
| 6000 | assert(impInlineInfo == nullptr); |
| 6001 | compInlineResult = &prejitResult; |
| 6002 | |
| 6003 | // Find the basic blocks. We must do this regardless of |
| 6004 | // inlineability, since we are prejitting this method. |
| 6005 | // |
| 6006 | // This will also update the status of this method as |
| 6007 | // an inline candidate. |
| 6008 | fgFindBasicBlocks(); |
| 6009 | |
| 6010 | // Undo the temporary setup. |
| 6011 | assert(compInlineResult == &prejitResult); |
| 6012 | compInlineResult = nullptr; |
| 6013 | |
| 6014 | // If still a viable, discretionary inline, assess |
| 6015 | // profitability. |
| 6016 | if (prejitResult.IsDiscretionaryCandidate()) |
| 6017 | { |
| 6018 | prejitResult.DetermineProfitability(methodInfo); |
| 6019 | } |
| 6020 | |
| 6021 | m_inlineStrategy->NotePrejitDecision(prejitResult); |
| 6022 | |
| 6023 | // Handle the results of the inline analysis. |
| 6024 | if (prejitResult.IsFailure()) |
| 6025 | { |
| 6026 | // This method is a bad inlinee according to our |
| 6027 | // analysis. We will let the InlineResult destructor |
| 6028 | // mark it as noinline in the prejit image to save the |
| 6029 | // jit some work. |
| 6030 | // |
| 6031 | // This decision better not be context-dependent. |
| 6032 | assert(prejitResult.IsNever()); |
| 6033 | } |
| 6034 | else |
| 6035 | { |
| 6036 | // This looks like a viable inline candidate. Since |
| 6037 | // we're not actually inlining, don't report anything. |
| 6038 | prejitResult.SetReported(); |
| 6039 | } |
| 6040 | } |
| 6041 | else |
| 6042 | { |
| 6043 | // We are jitting the root method, or inlining. |
| 6044 | fgFindBasicBlocks(); |
| 6045 | } |
| 6046 | |
| 6047 | // If we're inlining and the candidate is bad, bail out. |
| 6048 | if (compDonotInline()) |
| 6049 | { |
| 6050 | goto _Next; |
| 6051 | } |
| 6052 | |
| 6053 | compSetOptimizationLevel(); |
| 6054 | |
| 6055 | #if COUNT_BASIC_BLOCKS |
| 6056 | bbCntTable.record(fgBBcount); |
| 6057 | |
| 6058 | if (fgBBcount == 1) |
| 6059 | { |
| 6060 | bbOneBBSizeTable.record(methodInfo->ILCodeSize); |
| 6061 | } |
| 6062 | #endif // COUNT_BASIC_BLOCKS |
| 6063 | |
| 6064 | #ifdef DEBUG |
| 6065 | if (verbose) |
| 6066 | { |
| 6067 | printf("Basic block list for '%s'\n" , info.compFullName); |
| 6068 | fgDispBasicBlocks(); |
| 6069 | } |
| 6070 | #endif |
| 6071 | |
| 6072 | #ifdef DEBUG |
| 6073 | /* Give the function a unique number */ |
| 6074 | |
| 6075 | if (opts.disAsm || opts.dspEmit || verbose) |
| 6076 | { |
| 6077 | s_compMethodsCount = ~info.compMethodHash() & 0xffff; |
| 6078 | } |
| 6079 | else |
| 6080 | { |
| 6081 | s_compMethodsCount++; |
| 6082 | } |
| 6083 | #endif |
| 6084 | |
| 6085 | if (compIsForInlining()) |
| 6086 | { |
| 6087 | compInlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS, fgBBcount); |
| 6088 | |
| 6089 | if (compInlineResult->IsFailure()) |
| 6090 | { |
| 6091 | goto _Next; |
| 6092 | } |
| 6093 | } |
| 6094 | |
| 6095 | #ifdef DEBUG |
| 6096 | if (JitConfig.DumpJittedMethods() == 1 && !compIsForInlining()) |
| 6097 | { |
| 6098 | printf("Compiling %4d %s::%s, IL size = %u, hsh=0x%x\n" , Compiler::jitTotalMethodCompiled, info.compClassName, |
| 6099 | info.compMethodName, info.compILCodeSize, info.compMethodHash()); |
| 6100 | } |
| 6101 | if (compIsForInlining()) |
| 6102 | { |
| 6103 | compGenTreeID = impInlineInfo->InlinerCompiler->compGenTreeID; |
| 6104 | } |
| 6105 | #endif |
| 6106 | |
| 6107 | compCompile(methodCodePtr, methodCodeSize, compileFlags); |
| 6108 | |
| 6109 | #ifdef DEBUG |
| 6110 | if (compIsForInlining()) |
| 6111 | { |
| 6112 | impInlineInfo->InlinerCompiler->compGenTreeID = compGenTreeID; |
| 6113 | impInlineInfo->InlinerCompiler->compBasicBlockID = compBasicBlockID; |
| 6114 | } |
| 6115 | #endif |
| 6116 | |
| 6117 | _Next: |
| 6118 | |
| 6119 | if (compDonotInline()) |
| 6120 | { |
| 6121 | // Verify we have only one inline result in play. |
| 6122 | assert(impInlineInfo->inlineResult == compInlineResult); |
| 6123 | } |
| 6124 | |
| 6125 | if (!compIsForInlining()) |
| 6126 | { |
| 6127 | compCompileFinish(); |
| 6128 | |
| 6129 | // Did we just compile for a target architecture that the VM isn't expecting? If so, the VM |
| 6130 | // can't used the generated code (and we better be an AltJit!). |
| 6131 | |
| 6132 | if (!info.compMatchedVM) |
| 6133 | { |
| 6134 | return CORJIT_SKIPPED; |
| 6135 | } |
| 6136 | |
| 6137 | #ifdef ALT_JIT |
| 6138 | #ifdef DEBUG |
| 6139 | if (JitConfig.RunAltJitCode() == 0) |
| 6140 | { |
| 6141 | return CORJIT_SKIPPED; |
| 6142 | } |
| 6143 | #endif // DEBUG |
| 6144 | #endif // ALT_JIT |
| 6145 | } |
| 6146 | |
| 6147 | /* Success! */ |
| 6148 | return CORJIT_OK; |
| 6149 | } |
| 6150 | |
| 6151 | //------------------------------------------------------------------------ |
| 6152 | // compFindLocalVarLinear: Linear search for variable's scope containing offset. |
| 6153 | // |
| 6154 | // Arguments: |
| 6155 | // varNum The variable number to search for in the array of scopes. |
| 6156 | // offs The offset value which should occur within the life of the variable. |
| 6157 | // |
| 6158 | // Return Value: |
| 6159 | // VarScopeDsc* of a matching variable that contains the offset within its life |
| 6160 | // begin and life end or nullptr when there is no match found. |
| 6161 | // |
| 6162 | // Description: |
| 6163 | // Linear search for matching variables with their life begin and end containing |
| 6164 | // the offset. |
| 6165 | // or NULL if one couldn't be found. |
| 6166 | // |
| 6167 | // Note: |
| 6168 | // Usually called for scope count = 4. Could be called for values upto 8. |
| 6169 | // |
| 6170 | VarScopeDsc* Compiler::compFindLocalVarLinear(unsigned varNum, unsigned offs) |
| 6171 | { |
| 6172 | for (unsigned i = 0; i < info.compVarScopesCount; i++) |
| 6173 | { |
| 6174 | VarScopeDsc* dsc = &info.compVarScopes[i]; |
| 6175 | if ((dsc->vsdVarNum == varNum) && (dsc->vsdLifeBeg <= offs) && (dsc->vsdLifeEnd > offs)) |
| 6176 | { |
| 6177 | return dsc; |
| 6178 | } |
| 6179 | } |
| 6180 | return nullptr; |
| 6181 | } |
| 6182 | |
| 6183 | //------------------------------------------------------------------------ |
| 6184 | // compFindLocalVar: Search for variable's scope containing offset. |
| 6185 | // |
| 6186 | // Arguments: |
| 6187 | // varNum The variable number to search for in the array of scopes. |
| 6188 | // offs The offset value which should occur within the life of the variable. |
| 6189 | // |
| 6190 | // Return Value: |
| 6191 | // VarScopeDsc* of a matching variable that contains the offset within its life |
| 6192 | // begin and life end. |
| 6193 | // or NULL if one couldn't be found. |
| 6194 | // |
| 6195 | // Description: |
| 6196 | // Linear search for matching variables with their life begin and end containing |
| 6197 | // the offset only when the scope count is < MAX_LINEAR_FIND_LCL_SCOPELIST, |
| 6198 | // else use the hashtable lookup. |
| 6199 | // |
| 6200 | VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned offs) |
| 6201 | { |
| 6202 | if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) |
| 6203 | { |
| 6204 | return compFindLocalVarLinear(varNum, offs); |
| 6205 | } |
| 6206 | else |
| 6207 | { |
| 6208 | VarScopeDsc* ret = compFindLocalVar(varNum, offs, offs); |
| 6209 | assert(ret == compFindLocalVarLinear(varNum, offs)); |
| 6210 | return ret; |
| 6211 | } |
| 6212 | } |
| 6213 | |
| 6214 | //------------------------------------------------------------------------ |
| 6215 | // compFindLocalVar: Search for variable's scope containing offset. |
| 6216 | // |
| 6217 | // Arguments: |
| 6218 | // varNum The variable number to search for in the array of scopes. |
| 6219 | // lifeBeg The life begin of the variable's scope |
| 6220 | // lifeEnd The life end of the variable's scope |
| 6221 | // |
| 6222 | // Return Value: |
| 6223 | // VarScopeDsc* of a matching variable that contains the offset within its life |
| 6224 | // begin and life end, or NULL if one couldn't be found. |
| 6225 | // |
| 6226 | // Description: |
| 6227 | // Following are the steps used: |
| 6228 | // 1. Index into the hashtable using varNum. |
| 6229 | // 2. Iterate through the linked list at index varNum to find a matching |
| 6230 | // var scope. |
| 6231 | // |
| 6232 | VarScopeDsc* Compiler::compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd) |
| 6233 | { |
| 6234 | assert(compVarScopeMap != nullptr); |
| 6235 | |
| 6236 | VarScopeMapInfo* info; |
| 6237 | if (compVarScopeMap->Lookup(varNum, &info)) |
| 6238 | { |
| 6239 | VarScopeListNode* list = info->head; |
| 6240 | while (list != nullptr) |
| 6241 | { |
| 6242 | if ((list->data->vsdLifeBeg <= lifeBeg) && (list->data->vsdLifeEnd > lifeEnd)) |
| 6243 | { |
| 6244 | return list->data; |
| 6245 | } |
| 6246 | list = list->next; |
| 6247 | } |
| 6248 | } |
| 6249 | return nullptr; |
| 6250 | } |
| 6251 | |
| 6252 | //------------------------------------------------------------------------- |
| 6253 | // compInitVarScopeMap: Create a scope map so it can be looked up by varNum |
| 6254 | // |
| 6255 | // Description: |
| 6256 | // Map.K => Map.V :: varNum => List(ScopeDsc) |
| 6257 | // |
| 6258 | // Create a scope map that can be indexed by varNum and can be iterated |
| 6259 | // on it's values to look for matching scope when given an offs or |
| 6260 | // lifeBeg and lifeEnd. |
| 6261 | // |
| 6262 | // Notes: |
| 6263 | // 1. Build the map only when we think linear search is slow, i.e., |
| 6264 | // MAX_LINEAR_FIND_LCL_SCOPELIST is large. |
| 6265 | // 2. Linked list preserves original array order. |
| 6266 | // |
| 6267 | void Compiler::compInitVarScopeMap() |
| 6268 | { |
| 6269 | if (info.compVarScopesCount < MAX_LINEAR_FIND_LCL_SCOPELIST) |
| 6270 | { |
| 6271 | return; |
| 6272 | } |
| 6273 | |
| 6274 | assert(compVarScopeMap == nullptr); |
| 6275 | |
| 6276 | compVarScopeMap = new (getAllocator()) VarNumToScopeDscMap(getAllocator()); |
| 6277 | |
| 6278 | // 599 prime to limit huge allocations; for ex: duplicated scopes on single var. |
| 6279 | compVarScopeMap->Reallocate(min(info.compVarScopesCount, 599)); |
| 6280 | |
| 6281 | for (unsigned i = 0; i < info.compVarScopesCount; ++i) |
| 6282 | { |
| 6283 | unsigned varNum = info.compVarScopes[i].vsdVarNum; |
| 6284 | |
| 6285 | VarScopeListNode* node = VarScopeListNode::Create(&info.compVarScopes[i], getAllocator()); |
| 6286 | |
| 6287 | // Index by varNum and if the list exists append "node" to the "list". |
| 6288 | VarScopeMapInfo* info; |
| 6289 | if (compVarScopeMap->Lookup(varNum, &info)) |
| 6290 | { |
| 6291 | info->tail->next = node; |
| 6292 | info->tail = node; |
| 6293 | } |
| 6294 | // Create a new list. |
| 6295 | else |
| 6296 | { |
| 6297 | info = VarScopeMapInfo::Create(node, getAllocator()); |
| 6298 | compVarScopeMap->Set(varNum, info); |
| 6299 | } |
| 6300 | } |
| 6301 | } |
| 6302 | |
| 6303 | int __cdecl genCmpLocalVarLifeBeg(const void* elem1, const void* elem2) |
| 6304 | { |
| 6305 | return (*((VarScopeDsc**)elem1))->vsdLifeBeg - (*((VarScopeDsc**)elem2))->vsdLifeBeg; |
| 6306 | } |
| 6307 | |
| 6308 | int __cdecl genCmpLocalVarLifeEnd(const void* elem1, const void* elem2) |
| 6309 | { |
| 6310 | return (*((VarScopeDsc**)elem1))->vsdLifeEnd - (*((VarScopeDsc**)elem2))->vsdLifeEnd; |
| 6311 | } |
| 6312 | |
| 6313 | inline void Compiler::compInitScopeLists() |
| 6314 | { |
| 6315 | if (info.compVarScopesCount == 0) |
| 6316 | { |
| 6317 | compEnterScopeList = compExitScopeList = nullptr; |
| 6318 | return; |
| 6319 | } |
| 6320 | |
| 6321 | // Populate the 'compEnterScopeList' and 'compExitScopeList' lists |
| 6322 | |
| 6323 | compEnterScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; |
| 6324 | compExitScopeList = new (this, CMK_DebugInfo) VarScopeDsc*[info.compVarScopesCount]; |
| 6325 | |
| 6326 | for (unsigned i = 0; i < info.compVarScopesCount; i++) |
| 6327 | { |
| 6328 | compEnterScopeList[i] = compExitScopeList[i] = &info.compVarScopes[i]; |
| 6329 | } |
| 6330 | |
| 6331 | qsort(compEnterScopeList, info.compVarScopesCount, sizeof(*compEnterScopeList), genCmpLocalVarLifeBeg); |
| 6332 | qsort(compExitScopeList, info.compVarScopesCount, sizeof(*compExitScopeList), genCmpLocalVarLifeEnd); |
| 6333 | } |
| 6334 | |
| 6335 | void Compiler::compResetScopeLists() |
| 6336 | { |
| 6337 | if (info.compVarScopesCount == 0) |
| 6338 | { |
| 6339 | return; |
| 6340 | } |
| 6341 | |
| 6342 | assert(compEnterScopeList && compExitScopeList); |
| 6343 | |
| 6344 | compNextEnterScope = compNextExitScope = 0; |
| 6345 | } |
| 6346 | |
| 6347 | VarScopeDsc* Compiler::compGetNextEnterScope(unsigned offs, bool scan) |
| 6348 | { |
| 6349 | assert(info.compVarScopesCount); |
| 6350 | assert(compEnterScopeList && compExitScopeList); |
| 6351 | |
| 6352 | if (compNextEnterScope < info.compVarScopesCount) |
| 6353 | { |
| 6354 | assert(compEnterScopeList[compNextEnterScope]); |
| 6355 | unsigned nextEnterOff = compEnterScopeList[compNextEnterScope]->vsdLifeBeg; |
| 6356 | assert(scan || (offs <= nextEnterOff)); |
| 6357 | |
| 6358 | if (!scan) |
| 6359 | { |
| 6360 | if (offs == nextEnterOff) |
| 6361 | { |
| 6362 | return compEnterScopeList[compNextEnterScope++]; |
| 6363 | } |
| 6364 | } |
| 6365 | else |
| 6366 | { |
| 6367 | if (nextEnterOff <= offs) |
| 6368 | { |
| 6369 | return compEnterScopeList[compNextEnterScope++]; |
| 6370 | } |
| 6371 | } |
| 6372 | } |
| 6373 | |
| 6374 | return nullptr; |
| 6375 | } |
| 6376 | |
| 6377 | VarScopeDsc* Compiler::compGetNextExitScope(unsigned offs, bool scan) |
| 6378 | { |
| 6379 | assert(info.compVarScopesCount); |
| 6380 | assert(compEnterScopeList && compExitScopeList); |
| 6381 | |
| 6382 | if (compNextExitScope < info.compVarScopesCount) |
| 6383 | { |
| 6384 | assert(compExitScopeList[compNextExitScope]); |
| 6385 | unsigned nextExitOffs = compExitScopeList[compNextExitScope]->vsdLifeEnd; |
| 6386 | assert(scan || (offs <= nextExitOffs)); |
| 6387 | |
| 6388 | if (!scan) |
| 6389 | { |
| 6390 | if (offs == nextExitOffs) |
| 6391 | { |
| 6392 | return compExitScopeList[compNextExitScope++]; |
| 6393 | } |
| 6394 | } |
| 6395 | else |
| 6396 | { |
| 6397 | if (nextExitOffs <= offs) |
| 6398 | { |
| 6399 | return compExitScopeList[compNextExitScope++]; |
| 6400 | } |
| 6401 | } |
| 6402 | } |
| 6403 | |
| 6404 | return nullptr; |
| 6405 | } |
| 6406 | |
| 6407 | // The function will call the callback functions for scopes with boundaries |
| 6408 | // at instrs from the current status of the scope lists to 'offset', |
| 6409 | // ordered by instrs. |
| 6410 | |
| 6411 | void Compiler::compProcessScopesUntil(unsigned offset, |
| 6412 | VARSET_TP* inScope, |
| 6413 | void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), |
| 6414 | void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)) |
| 6415 | { |
| 6416 | assert(offset != BAD_IL_OFFSET); |
| 6417 | assert(inScope != nullptr); |
| 6418 | |
| 6419 | bool foundExit = false, foundEnter = true; |
| 6420 | VarScopeDsc* scope; |
| 6421 | VarScopeDsc* nextExitScope = nullptr; |
| 6422 | VarScopeDsc* nextEnterScope = nullptr; |
| 6423 | unsigned offs = offset, curEnterOffs = 0; |
| 6424 | |
| 6425 | goto START_FINDING_SCOPES; |
| 6426 | |
| 6427 | // We need to determine the scopes which are open for the current block. |
| 6428 | // This loop walks over the missing blocks between the current and the |
| 6429 | // previous block, keeping the enter and exit offsets in lockstep. |
| 6430 | |
| 6431 | do |
| 6432 | { |
| 6433 | foundExit = foundEnter = false; |
| 6434 | |
| 6435 | if (nextExitScope) |
| 6436 | { |
| 6437 | (this->*exitScopeFn)(inScope, nextExitScope); |
| 6438 | nextExitScope = nullptr; |
| 6439 | foundExit = true; |
| 6440 | } |
| 6441 | |
| 6442 | offs = nextEnterScope ? nextEnterScope->vsdLifeBeg : offset; |
| 6443 | |
| 6444 | while ((scope = compGetNextExitScope(offs, true)) != nullptr) |
| 6445 | { |
| 6446 | foundExit = true; |
| 6447 | |
| 6448 | if (!nextEnterScope || scope->vsdLifeEnd > nextEnterScope->vsdLifeBeg) |
| 6449 | { |
| 6450 | // We overshot the last found Enter scope. Save the scope for later |
| 6451 | // and find an entering scope |
| 6452 | |
| 6453 | nextExitScope = scope; |
| 6454 | break; |
| 6455 | } |
| 6456 | |
| 6457 | (this->*exitScopeFn)(inScope, scope); |
| 6458 | } |
| 6459 | |
| 6460 | if (nextEnterScope) |
| 6461 | { |
| 6462 | (this->*enterScopeFn)(inScope, nextEnterScope); |
| 6463 | curEnterOffs = nextEnterScope->vsdLifeBeg; |
| 6464 | nextEnterScope = nullptr; |
| 6465 | foundEnter = true; |
| 6466 | } |
| 6467 | |
| 6468 | offs = nextExitScope ? nextExitScope->vsdLifeEnd : offset; |
| 6469 | |
| 6470 | START_FINDING_SCOPES: |
| 6471 | |
| 6472 | while ((scope = compGetNextEnterScope(offs, true)) != nullptr) |
| 6473 | { |
| 6474 | foundEnter = true; |
| 6475 | |
| 6476 | if ((nextExitScope && scope->vsdLifeBeg >= nextExitScope->vsdLifeEnd) || (scope->vsdLifeBeg > curEnterOffs)) |
| 6477 | { |
| 6478 | // We overshot the last found exit scope. Save the scope for later |
| 6479 | // and find an exiting scope |
| 6480 | |
| 6481 | nextEnterScope = scope; |
| 6482 | break; |
| 6483 | } |
| 6484 | |
| 6485 | (this->*enterScopeFn)(inScope, scope); |
| 6486 | |
| 6487 | if (!nextExitScope) |
| 6488 | { |
| 6489 | curEnterOffs = scope->vsdLifeBeg; |
| 6490 | } |
| 6491 | } |
| 6492 | } while (foundExit || foundEnter); |
| 6493 | } |
| 6494 | |
| 6495 | #if defined(DEBUG) |
| 6496 | |
| 6497 | void Compiler::compDispScopeLists() |
| 6498 | { |
| 6499 | unsigned i; |
| 6500 | |
| 6501 | printf("Local variable scopes = %d\n" , info.compVarScopesCount); |
| 6502 | |
| 6503 | if (info.compVarScopesCount) |
| 6504 | { |
| 6505 | printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n" ); |
| 6506 | } |
| 6507 | |
| 6508 | printf("Sorted by enter scope:\n" ); |
| 6509 | for (i = 0; i < info.compVarScopesCount; i++) |
| 6510 | { |
| 6511 | VarScopeDsc* varScope = compEnterScopeList[i]; |
| 6512 | assert(varScope); |
| 6513 | printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh" , i, varScope->vsdVarNum, varScope->vsdLVnum, |
| 6514 | VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), |
| 6515 | varScope->vsdLifeBeg, varScope->vsdLifeEnd); |
| 6516 | |
| 6517 | if (compNextEnterScope == i) |
| 6518 | { |
| 6519 | printf(" <-- next enter scope" ); |
| 6520 | } |
| 6521 | |
| 6522 | printf("\n" ); |
| 6523 | } |
| 6524 | |
| 6525 | printf("Sorted by exit scope:\n" ); |
| 6526 | for (i = 0; i < info.compVarScopesCount; i++) |
| 6527 | { |
| 6528 | VarScopeDsc* varScope = compExitScopeList[i]; |
| 6529 | assert(varScope); |
| 6530 | printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh" , i, varScope->vsdVarNum, varScope->vsdLVnum, |
| 6531 | VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), |
| 6532 | varScope->vsdLifeBeg, varScope->vsdLifeEnd); |
| 6533 | |
| 6534 | if (compNextExitScope == i) |
| 6535 | { |
| 6536 | printf(" <-- next exit scope" ); |
| 6537 | } |
| 6538 | |
| 6539 | printf("\n" ); |
| 6540 | } |
| 6541 | } |
| 6542 | |
| 6543 | void Compiler::compDispLocalVars() |
| 6544 | { |
| 6545 | printf("info.compVarScopesCount = %d\n" , info.compVarScopesCount); |
| 6546 | |
| 6547 | if (info.compVarScopesCount > 0) |
| 6548 | { |
| 6549 | printf(" \tVarNum \tLVNum \t Name \tBeg \tEnd\n" ); |
| 6550 | } |
| 6551 | |
| 6552 | for (unsigned i = 0; i < info.compVarScopesCount; i++) |
| 6553 | { |
| 6554 | VarScopeDsc* varScope = &info.compVarScopes[i]; |
| 6555 | printf("%2d: \t%02Xh \t%02Xh \t%10s \t%03Xh \t%03Xh\n" , i, varScope->vsdVarNum, varScope->vsdLVnum, |
| 6556 | VarNameToStr(varScope->vsdName) == nullptr ? "UNKNOWN" : VarNameToStr(varScope->vsdName), |
| 6557 | varScope->vsdLifeBeg, varScope->vsdLifeEnd); |
| 6558 | } |
| 6559 | } |
| 6560 | |
| 6561 | #endif // DEBUG |
| 6562 | |
| 6563 | /*****************************************************************************/ |
| 6564 | |
| 6565 | #if MEASURE_CLRAPI_CALLS |
| 6566 | |
| 6567 | struct WrapICorJitInfo : public ICorJitInfo |
| 6568 | { |
| 6569 | //------------------------------------------------------------------------ |
| 6570 | // WrapICorJitInfo::makeOne: allocate an instance of WrapICorJitInfo |
| 6571 | // |
| 6572 | // Arguments: |
| 6573 | // alloc - the allocator to get memory from for the instance |
| 6574 | // compile - the compiler instance |
| 6575 | // compHndRef - the ICorJitInfo handle from the EE; the caller's |
| 6576 | // copy may be replaced with a "wrapper" instance |
| 6577 | // |
| 6578 | // Return Value: |
| 6579 | // If the config flags indicate that ICorJitInfo should be wrapped, |
| 6580 | // we return the "wrapper" instance; otherwise we return "nullptr". |
| 6581 | |
| 6582 | static WrapICorJitInfo* makeOne(ArenaAllocator* alloc, Compiler* compiler, COMP_HANDLE& compHndRef /* INOUT */) |
| 6583 | { |
| 6584 | WrapICorJitInfo* wrap = nullptr; |
| 6585 | |
| 6586 | if (JitConfig.JitEECallTimingInfo() != 0) |
| 6587 | { |
| 6588 | // It's too early to use the default allocator, so we do this |
| 6589 | // in two steps to be safe (the constructor doesn't need to do |
| 6590 | // anything except fill in the vtable pointer, so we let the |
| 6591 | // compiler do it). |
| 6592 | void* inst = alloc->allocateMemory(roundUp(sizeof(WrapICorJitInfo))); |
| 6593 | if (inst != nullptr) |
| 6594 | { |
| 6595 | // If you get a build error here due to 'WrapICorJitInfo' being |
| 6596 | // an abstract class, it's very likely that the wrapper bodies |
| 6597 | // in ICorJitInfo_API_wrapper.hpp are no longer in sync with |
| 6598 | // the EE interface; please be kind and update the header file. |
| 6599 | wrap = new (inst, jitstd::placement_t()) WrapICorJitInfo(); |
| 6600 | |
| 6601 | wrap->wrapComp = compiler; |
| 6602 | |
| 6603 | // Save the real handle and replace it with our wrapped version. |
| 6604 | wrap->wrapHnd = compHndRef; |
| 6605 | compHndRef = wrap; |
| 6606 | } |
| 6607 | } |
| 6608 | |
| 6609 | return wrap; |
| 6610 | } |
| 6611 | |
| 6612 | private: |
| 6613 | Compiler* wrapComp; |
| 6614 | COMP_HANDLE wrapHnd; // the "real thing" |
| 6615 | |
| 6616 | public: |
| 6617 | #include "ICorJitInfo_API_wrapper.hpp" |
| 6618 | }; |
| 6619 | |
| 6620 | #endif // MEASURE_CLRAPI_CALLS |
| 6621 | |
| 6622 | /*****************************************************************************/ |
| 6623 | |
| 6624 | // Compile a single method |
| 6625 | |
| 6626 | int jitNativeCode(CORINFO_METHOD_HANDLE methodHnd, |
| 6627 | CORINFO_MODULE_HANDLE classPtr, |
| 6628 | COMP_HANDLE compHnd, |
| 6629 | CORINFO_METHOD_INFO* methodInfo, |
| 6630 | void** methodCodePtr, |
| 6631 | ULONG* methodCodeSize, |
| 6632 | JitFlags* compileFlags, |
| 6633 | void* inlineInfoPtr) |
| 6634 | { |
| 6635 | // |
| 6636 | // A non-NULL inlineInfo means we are compiling the inlinee method. |
| 6637 | // |
| 6638 | InlineInfo* inlineInfo = (InlineInfo*)inlineInfoPtr; |
| 6639 | |
| 6640 | bool jitFallbackCompile = false; |
| 6641 | START: |
| 6642 | int result = CORJIT_INTERNALERROR; |
| 6643 | |
| 6644 | ArenaAllocator* pAlloc = nullptr; |
| 6645 | ArenaAllocator alloc; |
| 6646 | |
| 6647 | #if MEASURE_CLRAPI_CALLS |
| 6648 | WrapICorJitInfo* wrapCLR = nullptr; |
| 6649 | #endif |
| 6650 | |
| 6651 | if (inlineInfo) |
| 6652 | { |
| 6653 | // Use inliner's memory allocator when compiling the inlinee. |
| 6654 | pAlloc = inlineInfo->InlinerCompiler->compGetArenaAllocator(); |
| 6655 | } |
| 6656 | else |
| 6657 | { |
| 6658 | pAlloc = &alloc; |
| 6659 | } |
| 6660 | |
| 6661 | Compiler* pComp; |
| 6662 | pComp = nullptr; |
| 6663 | |
| 6664 | struct Param |
| 6665 | { |
| 6666 | Compiler* pComp; |
| 6667 | ArenaAllocator* pAlloc; |
| 6668 | bool jitFallbackCompile; |
| 6669 | |
| 6670 | CORINFO_METHOD_HANDLE methodHnd; |
| 6671 | CORINFO_MODULE_HANDLE classPtr; |
| 6672 | COMP_HANDLE compHnd; |
| 6673 | CORINFO_METHOD_INFO* methodInfo; |
| 6674 | void** methodCodePtr; |
| 6675 | ULONG* methodCodeSize; |
| 6676 | JitFlags* compileFlags; |
| 6677 | InlineInfo* inlineInfo; |
| 6678 | #if MEASURE_CLRAPI_CALLS |
| 6679 | WrapICorJitInfo* wrapCLR; |
| 6680 | #endif |
| 6681 | |
| 6682 | int result; |
| 6683 | } param; |
| 6684 | param.pComp = nullptr; |
| 6685 | param.pAlloc = pAlloc; |
| 6686 | param.jitFallbackCompile = jitFallbackCompile; |
| 6687 | param.methodHnd = methodHnd; |
| 6688 | param.classPtr = classPtr; |
| 6689 | param.compHnd = compHnd; |
| 6690 | param.methodInfo = methodInfo; |
| 6691 | param.methodCodePtr = methodCodePtr; |
| 6692 | param.methodCodeSize = methodCodeSize; |
| 6693 | param.compileFlags = compileFlags; |
| 6694 | param.inlineInfo = inlineInfo; |
| 6695 | #if MEASURE_CLRAPI_CALLS |
| 6696 | param.wrapCLR = nullptr; |
| 6697 | #endif |
| 6698 | param.result = result; |
| 6699 | |
| 6700 | setErrorTrap(compHnd, Param*, pParamOuter, ¶m) |
| 6701 | { |
| 6702 | setErrorTrap(nullptr, Param*, pParam, pParamOuter) |
| 6703 | { |
| 6704 | if (pParam->inlineInfo) |
| 6705 | { |
| 6706 | // Lazily create the inlinee compiler object |
| 6707 | if (pParam->inlineInfo->InlinerCompiler->InlineeCompiler == nullptr) |
| 6708 | { |
| 6709 | pParam->inlineInfo->InlinerCompiler->InlineeCompiler = |
| 6710 | (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); |
| 6711 | } |
| 6712 | |
| 6713 | // Use the inlinee compiler object |
| 6714 | pParam->pComp = pParam->inlineInfo->InlinerCompiler->InlineeCompiler; |
| 6715 | #ifdef DEBUG |
| 6716 | // memset(pParam->pComp, 0xEE, sizeof(Compiler)); |
| 6717 | #endif |
| 6718 | } |
| 6719 | else |
| 6720 | { |
| 6721 | // Allocate create the inliner compiler object |
| 6722 | pParam->pComp = (Compiler*)pParam->pAlloc->allocateMemory(roundUp(sizeof(*pParam->pComp))); |
| 6723 | } |
| 6724 | |
| 6725 | #if MEASURE_CLRAPI_CALLS |
| 6726 | pParam->wrapCLR = WrapICorJitInfo::makeOne(pParam->pAlloc, pParam->pComp, pParam->compHnd); |
| 6727 | #endif |
| 6728 | |
| 6729 | // push this compiler on the stack (TLS) |
| 6730 | pParam->pComp->prevCompiler = JitTls::GetCompiler(); |
| 6731 | JitTls::SetCompiler(pParam->pComp); |
| 6732 | |
| 6733 | // PREFIX_ASSUME gets turned into ASSERT_CHECK and we cannot have it here |
| 6734 | #if defined(_PREFAST_) || defined(_PREFIX_) |
| 6735 | PREFIX_ASSUME(pParam->pComp != NULL); |
| 6736 | #else |
| 6737 | assert(pParam->pComp != nullptr); |
| 6738 | #endif |
| 6739 | |
| 6740 | pParam->pComp->compInit(pParam->pAlloc, pParam->inlineInfo); |
| 6741 | |
| 6742 | #ifdef DEBUG |
| 6743 | pParam->pComp->jitFallbackCompile = pParam->jitFallbackCompile; |
| 6744 | #endif |
| 6745 | |
| 6746 | // Now generate the code |
| 6747 | pParam->result = |
| 6748 | pParam->pComp->compCompile(pParam->methodHnd, pParam->classPtr, pParam->compHnd, pParam->methodInfo, |
| 6749 | pParam->methodCodePtr, pParam->methodCodeSize, pParam->compileFlags); |
| 6750 | } |
| 6751 | finallyErrorTrap() |
| 6752 | { |
| 6753 | Compiler* pCompiler = pParamOuter->pComp; |
| 6754 | |
| 6755 | // If OOM is thrown when allocating memory for a pComp, we will end up here. |
| 6756 | // For this case, pComp and also pCompiler will be a nullptr |
| 6757 | // |
| 6758 | if (pCompiler != nullptr) |
| 6759 | { |
| 6760 | pCompiler->info.compCode = nullptr; |
| 6761 | |
| 6762 | // pop the compiler off the TLS stack only if it was linked above |
| 6763 | assert(JitTls::GetCompiler() == pCompiler); |
| 6764 | JitTls::SetCompiler(pCompiler->prevCompiler); |
| 6765 | } |
| 6766 | |
| 6767 | if (pParamOuter->inlineInfo == nullptr) |
| 6768 | { |
| 6769 | // Free up the allocator we were using |
| 6770 | pParamOuter->pAlloc->destroy(); |
| 6771 | } |
| 6772 | } |
| 6773 | endErrorTrap() |
| 6774 | } |
| 6775 | impJitErrorTrap() |
| 6776 | { |
| 6777 | // If we were looking at an inlinee.... |
| 6778 | if (inlineInfo != nullptr) |
| 6779 | { |
| 6780 | // Note that we failed to compile the inlinee, and that |
| 6781 | // there's no point trying to inline it again anywhere else. |
| 6782 | inlineInfo->inlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); |
| 6783 | } |
| 6784 | param.result = __errc; |
| 6785 | } |
| 6786 | endErrorTrap() |
| 6787 | |
| 6788 | result = param.result; |
| 6789 | |
| 6790 | if (!inlineInfo && (result == CORJIT_INTERNALERROR || result == CORJIT_RECOVERABLEERROR) && !jitFallbackCompile) |
| 6791 | { |
| 6792 | // If we failed the JIT, reattempt with debuggable code. |
| 6793 | jitFallbackCompile = true; |
| 6794 | |
| 6795 | // Update the flags for 'safer' code generation. |
| 6796 | compileFlags->Set(JitFlags::JIT_FLAG_MIN_OPT); |
| 6797 | compileFlags->Clear(JitFlags::JIT_FLAG_SIZE_OPT); |
| 6798 | compileFlags->Clear(JitFlags::JIT_FLAG_SPEED_OPT); |
| 6799 | |
| 6800 | goto START; |
| 6801 | } |
| 6802 | |
| 6803 | return result; |
| 6804 | } |
| 6805 | |
| 6806 | #if defined(UNIX_AMD64_ABI) |
| 6807 | |
| 6808 | // GetTypeFromClassificationAndSizes: |
| 6809 | // Returns the type of the eightbyte accounting for the classification and size of the eightbyte. |
| 6810 | // |
| 6811 | // args: |
| 6812 | // classType: classification type |
| 6813 | // size: size of the eightbyte. |
| 6814 | // |
| 6815 | // static |
| 6816 | var_types Compiler::GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size) |
| 6817 | { |
| 6818 | var_types type = TYP_UNKNOWN; |
| 6819 | switch (classType) |
| 6820 | { |
| 6821 | case SystemVClassificationTypeInteger: |
| 6822 | if (size == 1) |
| 6823 | { |
| 6824 | type = TYP_BYTE; |
| 6825 | } |
| 6826 | else if (size <= 2) |
| 6827 | { |
| 6828 | type = TYP_SHORT; |
| 6829 | } |
| 6830 | else if (size <= 4) |
| 6831 | { |
| 6832 | type = TYP_INT; |
| 6833 | } |
| 6834 | else if (size <= 8) |
| 6835 | { |
| 6836 | type = TYP_LONG; |
| 6837 | } |
| 6838 | else |
| 6839 | { |
| 6840 | assert(false && "GetTypeFromClassificationAndSizes Invalid Integer classification type." ); |
| 6841 | } |
| 6842 | break; |
| 6843 | case SystemVClassificationTypeIntegerReference: |
| 6844 | type = TYP_REF; |
| 6845 | break; |
| 6846 | case SystemVClassificationTypeIntegerByRef: |
| 6847 | type = TYP_BYREF; |
| 6848 | break; |
| 6849 | case SystemVClassificationTypeSSE: |
| 6850 | if (size <= 4) |
| 6851 | { |
| 6852 | type = TYP_FLOAT; |
| 6853 | } |
| 6854 | else if (size <= 8) |
| 6855 | { |
| 6856 | type = TYP_DOUBLE; |
| 6857 | } |
| 6858 | else |
| 6859 | { |
| 6860 | assert(false && "GetTypeFromClassificationAndSizes Invalid SSE classification type." ); |
| 6861 | } |
| 6862 | break; |
| 6863 | |
| 6864 | default: |
| 6865 | assert(false && "GetTypeFromClassificationAndSizes Invalid classification type." ); |
| 6866 | break; |
| 6867 | } |
| 6868 | |
| 6869 | return type; |
| 6870 | } |
| 6871 | |
| 6872 | //------------------------------------------------------------------- |
| 6873 | // GetEightByteType: Returns the type of eightbyte slot of a struct |
| 6874 | // |
| 6875 | // Arguments: |
| 6876 | // structDesc - struct classification description. |
| 6877 | // slotNum - eightbyte slot number for the struct. |
| 6878 | // |
| 6879 | // Return Value: |
| 6880 | // type of the eightbyte slot of the struct |
| 6881 | // |
| 6882 | // static |
| 6883 | var_types Compiler::GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, |
| 6884 | unsigned slotNum) |
| 6885 | { |
| 6886 | var_types eightByteType = TYP_UNDEF; |
| 6887 | unsigned len = structDesc.eightByteSizes[slotNum]; |
| 6888 | |
| 6889 | switch (structDesc.eightByteClassifications[slotNum]) |
| 6890 | { |
| 6891 | case SystemVClassificationTypeInteger: |
| 6892 | // See typelist.h for jit type definition. |
| 6893 | // All the types of size < 4 bytes are of jit type TYP_INT. |
| 6894 | if (structDesc.eightByteSizes[slotNum] <= 4) |
| 6895 | { |
| 6896 | eightByteType = TYP_INT; |
| 6897 | } |
| 6898 | else if (structDesc.eightByteSizes[slotNum] <= 8) |
| 6899 | { |
| 6900 | eightByteType = TYP_LONG; |
| 6901 | } |
| 6902 | else |
| 6903 | { |
| 6904 | assert(false && "GetEightByteType Invalid Integer classification type." ); |
| 6905 | } |
| 6906 | break; |
| 6907 | case SystemVClassificationTypeIntegerReference: |
| 6908 | assert(len == REGSIZE_BYTES); |
| 6909 | eightByteType = TYP_REF; |
| 6910 | break; |
| 6911 | case SystemVClassificationTypeIntegerByRef: |
| 6912 | assert(len == REGSIZE_BYTES); |
| 6913 | eightByteType = TYP_BYREF; |
| 6914 | break; |
| 6915 | case SystemVClassificationTypeSSE: |
| 6916 | if (structDesc.eightByteSizes[slotNum] <= 4) |
| 6917 | { |
| 6918 | eightByteType = TYP_FLOAT; |
| 6919 | } |
| 6920 | else if (structDesc.eightByteSizes[slotNum] <= 8) |
| 6921 | { |
| 6922 | eightByteType = TYP_DOUBLE; |
| 6923 | } |
| 6924 | else |
| 6925 | { |
| 6926 | assert(false && "GetEightByteType Invalid SSE classification type." ); |
| 6927 | } |
| 6928 | break; |
| 6929 | default: |
| 6930 | assert(false && "GetEightByteType Invalid classification type." ); |
| 6931 | break; |
| 6932 | } |
| 6933 | |
| 6934 | return eightByteType; |
| 6935 | } |
| 6936 | |
| 6937 | //------------------------------------------------------------------------------------------------------ |
| 6938 | // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. |
| 6939 | // |
| 6940 | // Arguments: |
| 6941 | // 'structDesc' - struct description |
| 6942 | // 'type0' - out param; returns the type of the first eightbyte. |
| 6943 | // 'type1' - out param; returns the type of the second eightbyte. |
| 6944 | // 'offset0' - out param; returns the offset of the first eightbyte. |
| 6945 | // 'offset1' - out param; returns the offset of the second eightbyte. |
| 6946 | // |
| 6947 | // static |
| 6948 | void Compiler::GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, |
| 6949 | var_types* type0, |
| 6950 | var_types* type1, |
| 6951 | unsigned __int8* offset0, |
| 6952 | unsigned __int8* offset1) |
| 6953 | { |
| 6954 | *offset0 = structDesc.eightByteOffsets[0]; |
| 6955 | *offset1 = structDesc.eightByteOffsets[1]; |
| 6956 | |
| 6957 | *type0 = TYP_UNKNOWN; |
| 6958 | *type1 = TYP_UNKNOWN; |
| 6959 | |
| 6960 | // Set the first eightbyte data |
| 6961 | if (structDesc.eightByteCount >= 1) |
| 6962 | { |
| 6963 | *type0 = GetEightByteType(structDesc, 0); |
| 6964 | } |
| 6965 | |
| 6966 | // Set the second eight byte data |
| 6967 | if (structDesc.eightByteCount == 2) |
| 6968 | { |
| 6969 | *type1 = GetEightByteType(structDesc, 1); |
| 6970 | } |
| 6971 | } |
| 6972 | |
| 6973 | //------------------------------------------------------------------------------------------------------ |
| 6974 | // GetStructTypeOffset: Gets the type, size and offset of the eightbytes of a struct for System V systems. |
| 6975 | // |
| 6976 | // Arguments: |
| 6977 | // 'typeHnd' - type handle |
| 6978 | // 'type0' - out param; returns the type of the first eightbyte. |
| 6979 | // 'type1' - out param; returns the type of the second eightbyte. |
| 6980 | // 'offset0' - out param; returns the offset of the first eightbyte. |
| 6981 | // 'offset1' - out param; returns the offset of the second eightbyte. |
| 6982 | // |
| 6983 | void Compiler::GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, |
| 6984 | var_types* type0, |
| 6985 | var_types* type1, |
| 6986 | unsigned __int8* offset0, |
| 6987 | unsigned __int8* offset1) |
| 6988 | { |
| 6989 | SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; |
| 6990 | eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); |
| 6991 | assert(structDesc.passedInRegisters); |
| 6992 | GetStructTypeOffset(structDesc, type0, type1, offset0, offset1); |
| 6993 | } |
| 6994 | |
| 6995 | #endif // defined(UNIX_AMD64_ABI) |
| 6996 | |
| 6997 | /*****************************************************************************/ |
| 6998 | /*****************************************************************************/ |
| 6999 | |
| 7000 | #ifdef DEBUG |
| 7001 | Compiler::NodeToIntMap* Compiler::FindReachableNodesInNodeTestData() |
| 7002 | { |
| 7003 | NodeToIntMap* reachable = new (getAllocatorDebugOnly()) NodeToIntMap(getAllocatorDebugOnly()); |
| 7004 | |
| 7005 | if (m_nodeTestData == nullptr) |
| 7006 | { |
| 7007 | return reachable; |
| 7008 | } |
| 7009 | |
| 7010 | // Otherwise, iterate. |
| 7011 | |
| 7012 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 7013 | { |
| 7014 | for (GenTree* stmt = block->FirstNonPhiDef(); stmt != nullptr; stmt = stmt->gtNext) |
| 7015 | { |
| 7016 | for (GenTree* tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext) |
| 7017 | { |
| 7018 | TestLabelAndNum tlAndN; |
| 7019 | |
| 7020 | // For call nodes, translate late args to what they stand for. |
| 7021 | if (tree->OperGet() == GT_CALL) |
| 7022 | { |
| 7023 | GenTreeCall* call = tree->AsCall(); |
| 7024 | GenTreeArgList* args = call->gtCallArgs; |
| 7025 | unsigned i = 0; |
| 7026 | while (args != nullptr) |
| 7027 | { |
| 7028 | GenTree* arg = args->Current(); |
| 7029 | if (arg->gtFlags & GTF_LATE_ARG) |
| 7030 | { |
| 7031 | // Find the corresponding late arg. |
| 7032 | GenTree* lateArg = call->fgArgInfo->GetArgNode(i); |
| 7033 | if (GetNodeTestData()->Lookup(lateArg, &tlAndN)) |
| 7034 | { |
| 7035 | reachable->Set(lateArg, 0); |
| 7036 | } |
| 7037 | } |
| 7038 | i++; |
| 7039 | args = args->Rest(); |
| 7040 | } |
| 7041 | } |
| 7042 | |
| 7043 | if (GetNodeTestData()->Lookup(tree, &tlAndN)) |
| 7044 | { |
| 7045 | reachable->Set(tree, 0); |
| 7046 | } |
| 7047 | } |
| 7048 | } |
| 7049 | } |
| 7050 | return reachable; |
| 7051 | } |
| 7052 | |
| 7053 | void Compiler::TransferTestDataToNode(GenTree* from, GenTree* to) |
| 7054 | { |
| 7055 | TestLabelAndNum tlAndN; |
| 7056 | // We can't currently associate multiple annotations with a single node. |
| 7057 | // If we need to, we can fix this... |
| 7058 | |
| 7059 | // If the table is null, don't create it just to do the lookup, which would fail... |
| 7060 | if (m_nodeTestData != nullptr && GetNodeTestData()->Lookup(from, &tlAndN)) |
| 7061 | { |
| 7062 | assert(!GetNodeTestData()->Lookup(to, &tlAndN)); |
| 7063 | // We can't currently associate multiple annotations with a single node. |
| 7064 | // If we need to, we can fix this... |
| 7065 | TestLabelAndNum tlAndNTo; |
| 7066 | assert(!GetNodeTestData()->Lookup(to, &tlAndNTo)); |
| 7067 | |
| 7068 | GetNodeTestData()->Remove(from); |
| 7069 | GetNodeTestData()->Set(to, tlAndN); |
| 7070 | } |
| 7071 | } |
| 7072 | |
| 7073 | void Compiler::CopyTestDataToCloneTree(GenTree* from, GenTree* to) |
| 7074 | { |
| 7075 | if (m_nodeTestData == nullptr) |
| 7076 | { |
| 7077 | return; |
| 7078 | } |
| 7079 | if (from == nullptr) |
| 7080 | { |
| 7081 | assert(to == nullptr); |
| 7082 | return; |
| 7083 | } |
| 7084 | // Otherwise... |
| 7085 | TestLabelAndNum tlAndN; |
| 7086 | if (GetNodeTestData()->Lookup(from, &tlAndN)) |
| 7087 | { |
| 7088 | // We can't currently associate multiple annotations with a single node. |
| 7089 | // If we need to, we can fix this... |
| 7090 | TestLabelAndNum tlAndNTo; |
| 7091 | assert(!GetNodeTestData()->Lookup(to, &tlAndNTo)); |
| 7092 | GetNodeTestData()->Set(to, tlAndN); |
| 7093 | } |
| 7094 | // Now recurse, in parallel on both trees. |
| 7095 | |
| 7096 | genTreeOps oper = from->OperGet(); |
| 7097 | unsigned kind = from->OperKind(); |
| 7098 | assert(oper == to->OperGet()); |
| 7099 | |
| 7100 | // Cconstant or leaf nodes have no children. |
| 7101 | if (kind & (GTK_CONST | GTK_LEAF)) |
| 7102 | { |
| 7103 | return; |
| 7104 | } |
| 7105 | |
| 7106 | // Otherwise, is it a 'simple' unary/binary operator? |
| 7107 | |
| 7108 | if (kind & GTK_SMPOP) |
| 7109 | { |
| 7110 | if (from->gtOp.gtOp1 != nullptr) |
| 7111 | { |
| 7112 | assert(to->gtOp.gtOp1 != nullptr); |
| 7113 | CopyTestDataToCloneTree(from->gtOp.gtOp1, to->gtOp.gtOp1); |
| 7114 | } |
| 7115 | else |
| 7116 | { |
| 7117 | assert(to->gtOp.gtOp1 == nullptr); |
| 7118 | } |
| 7119 | |
| 7120 | if (from->gtGetOp2IfPresent() != nullptr) |
| 7121 | { |
| 7122 | assert(to->gtGetOp2IfPresent() != nullptr); |
| 7123 | CopyTestDataToCloneTree(from->gtGetOp2(), to->gtGetOp2()); |
| 7124 | } |
| 7125 | else |
| 7126 | { |
| 7127 | assert(to->gtGetOp2IfPresent() == nullptr); |
| 7128 | } |
| 7129 | |
| 7130 | return; |
| 7131 | } |
| 7132 | |
| 7133 | // Otherwise, see what kind of a special operator we have here. |
| 7134 | |
| 7135 | switch (oper) |
| 7136 | { |
| 7137 | case GT_STMT: |
| 7138 | CopyTestDataToCloneTree(from->gtStmt.gtStmtExpr, to->gtStmt.gtStmtExpr); |
| 7139 | return; |
| 7140 | |
| 7141 | case GT_CALL: |
| 7142 | CopyTestDataToCloneTree(from->gtCall.gtCallObjp, to->gtCall.gtCallObjp); |
| 7143 | CopyTestDataToCloneTree(from->gtCall.gtCallArgs, to->gtCall.gtCallArgs); |
| 7144 | CopyTestDataToCloneTree(from->gtCall.gtCallLateArgs, to->gtCall.gtCallLateArgs); |
| 7145 | |
| 7146 | if (from->gtCall.gtCallType == CT_INDIRECT) |
| 7147 | { |
| 7148 | CopyTestDataToCloneTree(from->gtCall.gtCallCookie, to->gtCall.gtCallCookie); |
| 7149 | CopyTestDataToCloneTree(from->gtCall.gtCallAddr, to->gtCall.gtCallAddr); |
| 7150 | } |
| 7151 | // The other call types do not have additional GenTree arguments. |
| 7152 | |
| 7153 | return; |
| 7154 | |
| 7155 | case GT_FIELD: |
| 7156 | CopyTestDataToCloneTree(from->gtField.gtFldObj, to->gtField.gtFldObj); |
| 7157 | return; |
| 7158 | |
| 7159 | case GT_ARR_ELEM: |
| 7160 | assert(from->gtArrElem.gtArrRank == to->gtArrElem.gtArrRank); |
| 7161 | for (unsigned dim = 0; dim < from->gtArrElem.gtArrRank; dim++) |
| 7162 | { |
| 7163 | CopyTestDataToCloneTree(from->gtArrElem.gtArrInds[dim], to->gtArrElem.gtArrInds[dim]); |
| 7164 | } |
| 7165 | CopyTestDataToCloneTree(from->gtArrElem.gtArrObj, to->gtArrElem.gtArrObj); |
| 7166 | return; |
| 7167 | |
| 7168 | case GT_CMPXCHG: |
| 7169 | CopyTestDataToCloneTree(from->gtCmpXchg.gtOpLocation, to->gtCmpXchg.gtOpLocation); |
| 7170 | CopyTestDataToCloneTree(from->gtCmpXchg.gtOpValue, to->gtCmpXchg.gtOpValue); |
| 7171 | CopyTestDataToCloneTree(from->gtCmpXchg.gtOpComparand, to->gtCmpXchg.gtOpComparand); |
| 7172 | return; |
| 7173 | |
| 7174 | case GT_ARR_BOUNDS_CHECK: |
| 7175 | #ifdef FEATURE_SIMD |
| 7176 | case GT_SIMD_CHK: |
| 7177 | #endif // FEATURE_SIMD |
| 7178 | #ifdef FEATURE_HW_INTRINSICS |
| 7179 | case GT_HW_INTRINSIC_CHK: |
| 7180 | #endif // FEATURE_HW_INTRINSICS |
| 7181 | CopyTestDataToCloneTree(from->gtBoundsChk.gtIndex, to->gtBoundsChk.gtIndex); |
| 7182 | CopyTestDataToCloneTree(from->gtBoundsChk.gtArrLen, to->gtBoundsChk.gtArrLen); |
| 7183 | return; |
| 7184 | |
| 7185 | default: |
| 7186 | unreached(); |
| 7187 | } |
| 7188 | } |
| 7189 | |
| 7190 | #endif // DEBUG |
| 7191 | |
| 7192 | /* |
| 7193 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 7194 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 7195 | XX XX |
| 7196 | XX jvc XX |
| 7197 | XX XX |
| 7198 | XX Functions for the stand-alone version of the JIT . XX |
| 7199 | XX XX |
| 7200 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 7201 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 7202 | */ |
| 7203 | |
| 7204 | /*****************************************************************************/ |
| 7205 | void codeGeneratorCodeSizeBeg() |
| 7206 | { |
| 7207 | } |
| 7208 | /*****************************************************************************/ |
| 7209 | |
| 7210 | /***************************************************************************** |
| 7211 | * |
| 7212 | * If any temporary tables are smaller than 'genMinSize2free' we won't bother |
| 7213 | * freeing them. |
| 7214 | */ |
| 7215 | |
| 7216 | const size_t genMinSize2free = 64; |
| 7217 | |
| 7218 | /*****************************************************************************/ |
| 7219 | |
| 7220 | /***************************************************************************** |
| 7221 | * |
| 7222 | * Used for counting pointer assignments. |
| 7223 | */ |
| 7224 | |
| 7225 | /*****************************************************************************/ |
| 7226 | void codeGeneratorCodeSizeEnd() |
| 7227 | { |
| 7228 | } |
| 7229 | /***************************************************************************** |
| 7230 | * |
| 7231 | * Gather statistics - mainly used for the standalone |
| 7232 | * Enable various #ifdef's to get the information you need |
| 7233 | */ |
| 7234 | |
| 7235 | void Compiler::compJitStats() |
| 7236 | { |
| 7237 | #if CALL_ARG_STATS |
| 7238 | |
| 7239 | /* Method types and argument statistics */ |
| 7240 | compCallArgStats(); |
| 7241 | #endif // CALL_ARG_STATS |
| 7242 | } |
| 7243 | |
| 7244 | #if CALL_ARG_STATS |
| 7245 | |
| 7246 | /***************************************************************************** |
| 7247 | * |
| 7248 | * Gather statistics about method calls and arguments |
| 7249 | */ |
| 7250 | |
| 7251 | void Compiler::compCallArgStats() |
| 7252 | { |
| 7253 | GenTree* args; |
| 7254 | GenTree* argx; |
| 7255 | |
| 7256 | BasicBlock* block; |
| 7257 | GenTree* stmt; |
| 7258 | GenTree* call; |
| 7259 | |
| 7260 | unsigned argNum; |
| 7261 | |
| 7262 | unsigned argDWordNum; |
| 7263 | unsigned argLngNum; |
| 7264 | unsigned argFltNum; |
| 7265 | unsigned argDblNum; |
| 7266 | |
| 7267 | unsigned regArgNum; |
| 7268 | unsigned regArgDeferred; |
| 7269 | unsigned regArgTemp; |
| 7270 | |
| 7271 | unsigned regArgLclVar; |
| 7272 | unsigned regArgConst; |
| 7273 | |
| 7274 | unsigned argTempsThisMethod = 0; |
| 7275 | |
| 7276 | assert(fgStmtListThreaded); |
| 7277 | |
| 7278 | for (block = fgFirstBB; block; block = block->bbNext) |
| 7279 | { |
| 7280 | for (stmt = block->bbTreeList; stmt; stmt = stmt->gtNext) |
| 7281 | { |
| 7282 | assert(stmt->gtOper == GT_STMT); |
| 7283 | |
| 7284 | for (call = stmt->gtStmt.gtStmtList; call; call = call->gtNext) |
| 7285 | { |
| 7286 | if (call->gtOper != GT_CALL) |
| 7287 | continue; |
| 7288 | |
| 7289 | argNum = |
| 7290 | |
| 7291 | regArgNum = regArgDeferred = regArgTemp = |
| 7292 | |
| 7293 | regArgConst = regArgLclVar = |
| 7294 | |
| 7295 | argDWordNum = argLngNum = argFltNum = argDblNum = 0; |
| 7296 | |
| 7297 | argTotalCalls++; |
| 7298 | |
| 7299 | if (!call->gtCall.gtCallObjp) |
| 7300 | { |
| 7301 | if (call->gtCall.gtCallType == CT_HELPER) |
| 7302 | { |
| 7303 | argHelperCalls++; |
| 7304 | } |
| 7305 | else |
| 7306 | { |
| 7307 | argStaticCalls++; |
| 7308 | } |
| 7309 | } |
| 7310 | else |
| 7311 | { |
| 7312 | /* We have a 'this' pointer */ |
| 7313 | |
| 7314 | argDWordNum++; |
| 7315 | argNum++; |
| 7316 | regArgNum++; |
| 7317 | regArgDeferred++; |
| 7318 | argTotalObjPtr++; |
| 7319 | |
| 7320 | if (call->IsVirtual()) |
| 7321 | { |
| 7322 | /* virtual function */ |
| 7323 | argVirtualCalls++; |
| 7324 | } |
| 7325 | else |
| 7326 | { |
| 7327 | argNonVirtualCalls++; |
| 7328 | } |
| 7329 | } |
| 7330 | } |
| 7331 | } |
| 7332 | } |
| 7333 | |
| 7334 | argTempsCntTable.record(argTempsThisMethod); |
| 7335 | |
| 7336 | if (argMaxTempsPerMethod < argTempsThisMethod) |
| 7337 | { |
| 7338 | argMaxTempsPerMethod = argTempsThisMethod; |
| 7339 | } |
| 7340 | } |
| 7341 | |
| 7342 | /* static */ |
| 7343 | void Compiler::compDispCallArgStats(FILE* fout) |
| 7344 | { |
| 7345 | if (argTotalCalls == 0) |
| 7346 | return; |
| 7347 | |
| 7348 | fprintf(fout, "\n" ); |
| 7349 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7350 | fprintf(fout, "Call stats\n" ); |
| 7351 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7352 | fprintf(fout, "Total # of calls = %d, calls / method = %.3f\n\n" , argTotalCalls, |
| 7353 | (float)argTotalCalls / genMethodCnt); |
| 7354 | |
| 7355 | fprintf(fout, "Percentage of helper calls = %4.2f %%\n" , (float)(100 * argHelperCalls) / argTotalCalls); |
| 7356 | fprintf(fout, "Percentage of static calls = %4.2f %%\n" , (float)(100 * argStaticCalls) / argTotalCalls); |
| 7357 | fprintf(fout, "Percentage of virtual calls = %4.2f %%\n" , (float)(100 * argVirtualCalls) / argTotalCalls); |
| 7358 | fprintf(fout, "Percentage of non-virtual calls = %4.2f %%\n\n" , (float)(100 * argNonVirtualCalls) / argTotalCalls); |
| 7359 | |
| 7360 | fprintf(fout, "Average # of arguments per call = %.2f%%\n\n" , (float)argTotalArgs / argTotalCalls); |
| 7361 | |
| 7362 | fprintf(fout, "Percentage of DWORD arguments = %.2f %%\n" , (float)(100 * argTotalDWordArgs) / argTotalArgs); |
| 7363 | fprintf(fout, "Percentage of LONG arguments = %.2f %%\n" , (float)(100 * argTotalLongArgs) / argTotalArgs); |
| 7364 | fprintf(fout, "Percentage of FLOAT arguments = %.2f %%\n" , (float)(100 * argTotalFloatArgs) / argTotalArgs); |
| 7365 | fprintf(fout, "Percentage of DOUBLE arguments = %.2f %%\n\n" , (float)(100 * argTotalDoubleArgs) / argTotalArgs); |
| 7366 | |
| 7367 | if (argTotalRegArgs == 0) |
| 7368 | return; |
| 7369 | |
| 7370 | /* |
| 7371 | fprintf(fout, "Total deferred arguments = %d \n", argTotalDeferred); |
| 7372 | |
| 7373 | fprintf(fout, "Total temp arguments = %d \n\n", argTotalTemps); |
| 7374 | |
| 7375 | fprintf(fout, "Total 'this' arguments = %d \n", argTotalObjPtr); |
| 7376 | fprintf(fout, "Total local var arguments = %d \n", argTotalLclVar); |
| 7377 | fprintf(fout, "Total constant arguments = %d \n\n", argTotalConst); |
| 7378 | */ |
| 7379 | |
| 7380 | fprintf(fout, "\nRegister Arguments:\n\n" ); |
| 7381 | |
| 7382 | fprintf(fout, "Percentage of deferred arguments = %.2f %%\n" , (float)(100 * argTotalDeferred) / argTotalRegArgs); |
| 7383 | fprintf(fout, "Percentage of temp arguments = %.2f %%\n\n" , (float)(100 * argTotalTemps) / argTotalRegArgs); |
| 7384 | |
| 7385 | fprintf(fout, "Maximum # of temps per method = %d\n\n" , argMaxTempsPerMethod); |
| 7386 | |
| 7387 | fprintf(fout, "Percentage of ObjPtr arguments = %.2f %%\n" , (float)(100 * argTotalObjPtr) / argTotalRegArgs); |
| 7388 | // fprintf(fout, "Percentage of global arguments = %.2f %%\n", (float)(100 * argTotalDWordGlobEf) / |
| 7389 | // argTotalRegArgs); |
| 7390 | fprintf(fout, "Percentage of constant arguments = %.2f %%\n" , (float)(100 * argTotalConst) / argTotalRegArgs); |
| 7391 | fprintf(fout, "Percentage of lcl var arguments = %.2f %%\n\n" , (float)(100 * argTotalLclVar) / argTotalRegArgs); |
| 7392 | |
| 7393 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7394 | fprintf(fout, "Argument count frequency table (includes ObjPtr):\n" ); |
| 7395 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7396 | argCntTable.dump(fout); |
| 7397 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7398 | |
| 7399 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7400 | fprintf(fout, "DWORD argument count frequency table (w/o LONG):\n" ); |
| 7401 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7402 | argDWordCntTable.dump(fout); |
| 7403 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7404 | |
| 7405 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7406 | fprintf(fout, "Temps count frequency table (per method):\n" ); |
| 7407 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7408 | argTempsCntTable.dump(fout); |
| 7409 | fprintf(fout, "--------------------------------------------------\n" ); |
| 7410 | |
| 7411 | /* |
| 7412 | fprintf(fout, "--------------------------------------------------\n"); |
| 7413 | fprintf(fout, "DWORD argument count frequency table (w/ LONG):\n"); |
| 7414 | fprintf(fout, "--------------------------------------------------\n"); |
| 7415 | argDWordLngCntTable.dump(fout); |
| 7416 | fprintf(fout, "--------------------------------------------------\n"); |
| 7417 | */ |
| 7418 | } |
| 7419 | |
| 7420 | #endif // CALL_ARG_STATS |
| 7421 | |
| 7422 | // JIT time end to end, and by phases. |
| 7423 | |
| 7424 | #ifdef FEATURE_JIT_METHOD_PERF |
| 7425 | // Static variables |
| 7426 | CritSecObject CompTimeSummaryInfo::s_compTimeSummaryLock; |
| 7427 | CompTimeSummaryInfo CompTimeSummaryInfo::s_compTimeSummary; |
| 7428 | #if MEASURE_CLRAPI_CALLS |
| 7429 | double JitTimer::s_cyclesPerSec = CycleTimer::CyclesPerSecond(); |
| 7430 | #endif |
| 7431 | #endif // FEATURE_JIT_METHOD_PERF |
| 7432 | |
| 7433 | #if defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS || defined(FEATURE_TRACELOGGING) |
| 7434 | const char* PhaseNames[] = { |
| 7435 | #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) string_nm, |
| 7436 | #include "compphases.h" |
| 7437 | }; |
| 7438 | |
| 7439 | const char* PhaseEnums[] = { |
| 7440 | #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) #enum_nm, |
| 7441 | #include "compphases.h" |
| 7442 | }; |
| 7443 | |
| 7444 | const LPCWSTR PhaseShortNames[] = { |
| 7445 | #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) W(short_nm), |
| 7446 | #include "compphases.h" |
| 7447 | }; |
| 7448 | #endif // defined(FEATURE_JIT_METHOD_PERF) || DUMP_FLOWGRAPHS |
| 7449 | |
| 7450 | #ifdef FEATURE_JIT_METHOD_PERF |
| 7451 | bool PhaseHasChildren[] = { |
| 7452 | #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) hasChildren, |
| 7453 | #include "compphases.h" |
| 7454 | }; |
| 7455 | |
| 7456 | int PhaseParent[] = { |
| 7457 | #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) parent, |
| 7458 | #include "compphases.h" |
| 7459 | }; |
| 7460 | |
| 7461 | bool PhaseReportsIRSize[] = { |
| 7462 | #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) measureIR, |
| 7463 | #include "compphases.h" |
| 7464 | }; |
| 7465 | |
| 7466 | CompTimeInfo::CompTimeInfo(unsigned byteCodeBytes) |
| 7467 | : m_byteCodeBytes(byteCodeBytes) |
| 7468 | , m_totalCycles(0) |
| 7469 | , m_parentPhaseEndSlop(0) |
| 7470 | , m_timerFailure(false) |
| 7471 | #if MEASURE_CLRAPI_CALLS |
| 7472 | , m_allClrAPIcalls(0) |
| 7473 | , m_allClrAPIcycles(0) |
| 7474 | #endif |
| 7475 | { |
| 7476 | for (int i = 0; i < PHASE_NUMBER_OF; i++) |
| 7477 | { |
| 7478 | m_invokesByPhase[i] = 0; |
| 7479 | m_cyclesByPhase[i] = 0; |
| 7480 | #if MEASURE_CLRAPI_CALLS |
| 7481 | m_CLRinvokesByPhase[i] = 0; |
| 7482 | m_CLRcyclesByPhase[i] = 0; |
| 7483 | #endif |
| 7484 | } |
| 7485 | |
| 7486 | #if MEASURE_CLRAPI_CALLS |
| 7487 | assert(ARRAYSIZE(m_perClrAPIcalls) == API_ICorJitInfo_Names::API_COUNT); |
| 7488 | assert(ARRAYSIZE(m_perClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); |
| 7489 | assert(ARRAYSIZE(m_maxClrAPIcycles) == API_ICorJitInfo_Names::API_COUNT); |
| 7490 | for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) |
| 7491 | { |
| 7492 | m_perClrAPIcalls[i] = 0; |
| 7493 | m_perClrAPIcycles[i] = 0; |
| 7494 | m_maxClrAPIcycles[i] = 0; |
| 7495 | } |
| 7496 | #endif |
| 7497 | } |
| 7498 | |
| 7499 | bool CompTimeSummaryInfo::IncludedInFilteredData(CompTimeInfo& info) |
| 7500 | { |
| 7501 | return false; // info.m_byteCodeBytes < 10; |
| 7502 | } |
| 7503 | |
| 7504 | //------------------------------------------------------------------------ |
| 7505 | // CompTimeSummaryInfo::AddInfo: Record timing info from one compile. |
| 7506 | // |
| 7507 | // Arguments: |
| 7508 | // info - The timing information to record. |
| 7509 | // includePhases - If "true", the per-phase info in "info" is valid, |
| 7510 | // which means that a "normal" compile has ended; if |
| 7511 | // the value is "false" we are recording the results |
| 7512 | // of a partial compile (typically an import-only run |
| 7513 | // on behalf of the inliner) in which case the phase |
| 7514 | // info is not valid and so we only record EE call |
| 7515 | // overhead. |
| 7516 | void CompTimeSummaryInfo::AddInfo(CompTimeInfo& info, bool includePhases) |
| 7517 | { |
| 7518 | if (info.m_timerFailure) |
| 7519 | { |
| 7520 | return; // Don't update if there was a failure. |
| 7521 | } |
| 7522 | |
| 7523 | CritSecHolder timeLock(s_compTimeSummaryLock); |
| 7524 | |
| 7525 | if (includePhases) |
| 7526 | { |
| 7527 | bool includeInFiltered = IncludedInFilteredData(info); |
| 7528 | |
| 7529 | m_numMethods++; |
| 7530 | |
| 7531 | // Update the totals and maxima. |
| 7532 | m_total.m_byteCodeBytes += info.m_byteCodeBytes; |
| 7533 | m_maximum.m_byteCodeBytes = max(m_maximum.m_byteCodeBytes, info.m_byteCodeBytes); |
| 7534 | m_total.m_totalCycles += info.m_totalCycles; |
| 7535 | m_maximum.m_totalCycles = max(m_maximum.m_totalCycles, info.m_totalCycles); |
| 7536 | |
| 7537 | #if MEASURE_CLRAPI_CALLS |
| 7538 | // Update the CLR-API values. |
| 7539 | m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; |
| 7540 | m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); |
| 7541 | m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; |
| 7542 | m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); |
| 7543 | #endif |
| 7544 | |
| 7545 | if (includeInFiltered) |
| 7546 | { |
| 7547 | m_numFilteredMethods++; |
| 7548 | m_filtered.m_byteCodeBytes += info.m_byteCodeBytes; |
| 7549 | m_filtered.m_totalCycles += info.m_totalCycles; |
| 7550 | m_filtered.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; |
| 7551 | } |
| 7552 | |
| 7553 | for (int i = 0; i < PHASE_NUMBER_OF; i++) |
| 7554 | { |
| 7555 | m_total.m_invokesByPhase[i] += info.m_invokesByPhase[i]; |
| 7556 | m_total.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; |
| 7557 | |
| 7558 | #if MEASURE_CLRAPI_CALLS |
| 7559 | m_total.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; |
| 7560 | m_total.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; |
| 7561 | #endif |
| 7562 | |
| 7563 | if (includeInFiltered) |
| 7564 | { |
| 7565 | m_filtered.m_invokesByPhase[i] += info.m_invokesByPhase[i]; |
| 7566 | m_filtered.m_cyclesByPhase[i] += info.m_cyclesByPhase[i]; |
| 7567 | #if MEASURE_CLRAPI_CALLS |
| 7568 | m_filtered.m_CLRinvokesByPhase[i] += info.m_CLRinvokesByPhase[i]; |
| 7569 | m_filtered.m_CLRcyclesByPhase[i] += info.m_CLRcyclesByPhase[i]; |
| 7570 | #endif |
| 7571 | } |
| 7572 | m_maximum.m_cyclesByPhase[i] = max(m_maximum.m_cyclesByPhase[i], info.m_cyclesByPhase[i]); |
| 7573 | |
| 7574 | #if MEASURE_CLRAPI_CALLS |
| 7575 | m_maximum.m_CLRcyclesByPhase[i] = max(m_maximum.m_CLRcyclesByPhase[i], info.m_CLRcyclesByPhase[i]); |
| 7576 | #endif |
| 7577 | } |
| 7578 | m_total.m_parentPhaseEndSlop += info.m_parentPhaseEndSlop; |
| 7579 | m_maximum.m_parentPhaseEndSlop = max(m_maximum.m_parentPhaseEndSlop, info.m_parentPhaseEndSlop); |
| 7580 | } |
| 7581 | #if MEASURE_CLRAPI_CALLS |
| 7582 | else |
| 7583 | { |
| 7584 | m_totMethods++; |
| 7585 | |
| 7586 | // Update the "global" CLR-API values. |
| 7587 | m_total.m_allClrAPIcalls += info.m_allClrAPIcalls; |
| 7588 | m_maximum.m_allClrAPIcalls = max(m_maximum.m_allClrAPIcalls, info.m_allClrAPIcalls); |
| 7589 | m_total.m_allClrAPIcycles += info.m_allClrAPIcycles; |
| 7590 | m_maximum.m_allClrAPIcycles = max(m_maximum.m_allClrAPIcycles, info.m_allClrAPIcycles); |
| 7591 | |
| 7592 | // Update the per-phase CLR-API values. |
| 7593 | m_total.m_invokesByPhase[PHASE_CLR_API] += info.m_allClrAPIcalls; |
| 7594 | m_maximum.m_invokesByPhase[PHASE_CLR_API] = |
| 7595 | max(m_maximum.m_perClrAPIcalls[PHASE_CLR_API], info.m_allClrAPIcalls); |
| 7596 | m_total.m_cyclesByPhase[PHASE_CLR_API] += info.m_allClrAPIcycles; |
| 7597 | m_maximum.m_cyclesByPhase[PHASE_CLR_API] = |
| 7598 | max(m_maximum.m_cyclesByPhase[PHASE_CLR_API], info.m_allClrAPIcycles); |
| 7599 | } |
| 7600 | |
| 7601 | for (int i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) |
| 7602 | { |
| 7603 | m_total.m_perClrAPIcalls[i] += info.m_perClrAPIcalls[i]; |
| 7604 | m_maximum.m_perClrAPIcalls[i] = max(m_maximum.m_perClrAPIcalls[i], info.m_perClrAPIcalls[i]); |
| 7605 | |
| 7606 | m_total.m_perClrAPIcycles[i] += info.m_perClrAPIcycles[i]; |
| 7607 | m_maximum.m_perClrAPIcycles[i] = max(m_maximum.m_perClrAPIcycles[i], info.m_perClrAPIcycles[i]); |
| 7608 | |
| 7609 | m_maximum.m_maxClrAPIcycles[i] = max(m_maximum.m_maxClrAPIcycles[i], info.m_maxClrAPIcycles[i]); |
| 7610 | } |
| 7611 | #endif |
| 7612 | } |
| 7613 | |
| 7614 | // Static |
| 7615 | LPCWSTR Compiler::compJitTimeLogFilename = nullptr; |
| 7616 | |
| 7617 | void CompTimeSummaryInfo::Print(FILE* f) |
| 7618 | { |
| 7619 | if (f == nullptr) |
| 7620 | { |
| 7621 | return; |
| 7622 | } |
| 7623 | // Otherwise... |
| 7624 | double countsPerSec = CycleTimer::CyclesPerSecond(); |
| 7625 | if (countsPerSec == 0.0) |
| 7626 | { |
| 7627 | fprintf(f, "Processor does not have a high-frequency timer.\n" ); |
| 7628 | return; |
| 7629 | } |
| 7630 | |
| 7631 | bool = (JitConfig.JitEECallTimingInfo() != 0); |
| 7632 | double totTime_ms = 0.0; |
| 7633 | |
| 7634 | fprintf(f, "JIT Compilation time report:\n" ); |
| 7635 | fprintf(f, " Compiled %d methods.\n" , m_numMethods); |
| 7636 | if (m_numMethods != 0) |
| 7637 | { |
| 7638 | fprintf(f, " Compiled %d bytecodes total (%d max, %8.2f avg).\n" , m_total.m_byteCodeBytes, |
| 7639 | m_maximum.m_byteCodeBytes, (double)m_total.m_byteCodeBytes / (double)m_numMethods); |
| 7640 | totTime_ms = ((double)m_total.m_totalCycles / countsPerSec) * 1000.0; |
| 7641 | fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n" , ((double)m_total.m_totalCycles / 1000000.0), |
| 7642 | totTime_ms); |
| 7643 | fprintf(f, " max: %10.3f Mcycles/%10.3f ms\n" , ((double)m_maximum.m_totalCycles) / 1000000.0, |
| 7644 | ((double)m_maximum.m_totalCycles / countsPerSec) * 1000.0); |
| 7645 | fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n" , |
| 7646 | ((double)m_total.m_totalCycles) / 1000000.0 / (double)m_numMethods, totTime_ms / (double)m_numMethods); |
| 7647 | |
| 7648 | const char* = "" ; |
| 7649 | const char* = "" ; |
| 7650 | #if MEASURE_CLRAPI_CALLS |
| 7651 | if (extraInfo) |
| 7652 | { |
| 7653 | extraHdr1 = " CLRs/meth % in CLR" ; |
| 7654 | extraHdr2 = "-----------------------" ; |
| 7655 | } |
| 7656 | #endif |
| 7657 | |
| 7658 | fprintf(f, "\n Total time by phases:\n" ); |
| 7659 | fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total max (ms)%s\n" , |
| 7660 | extraHdr1); |
| 7661 | fprintf(f, " ---------------------------------------------------------------------------------------%s\n" , |
| 7662 | extraHdr2); |
| 7663 | |
| 7664 | // Ensure that at least the names array and the Phases enum have the same number of entries: |
| 7665 | assert(_countof(PhaseNames) == PHASE_NUMBER_OF); |
| 7666 | for (int i = 0; i < PHASE_NUMBER_OF; i++) |
| 7667 | { |
| 7668 | double phase_tot_ms = (((double)m_total.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; |
| 7669 | double phase_max_ms = (((double)m_maximum.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; |
| 7670 | double phase_tot_pct = 100.0 * phase_tot_ms / totTime_ms; |
| 7671 | |
| 7672 | #if MEASURE_CLRAPI_CALLS |
| 7673 | // Skip showing CLR API call info if we didn't collect any |
| 7674 | if (i == PHASE_CLR_API && !extraInfo) |
| 7675 | continue; |
| 7676 | #endif |
| 7677 | |
| 7678 | // Indent nested phases, according to depth. |
| 7679 | int ancPhase = PhaseParent[i]; |
| 7680 | while (ancPhase != -1) |
| 7681 | { |
| 7682 | fprintf(f, " " ); |
| 7683 | ancPhase = PhaseParent[ancPhase]; |
| 7684 | } |
| 7685 | fprintf(f, " %-30s %6.2f %10.2f %9.3f %8.2f%% %8.3f" , PhaseNames[i], |
| 7686 | ((double)m_total.m_invokesByPhase[i]) / ((double)m_numMethods), |
| 7687 | ((double)m_total.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, (phase_tot_ms * 100.0 / totTime_ms), |
| 7688 | phase_max_ms); |
| 7689 | |
| 7690 | #if MEASURE_CLRAPI_CALLS |
| 7691 | if (extraInfo && i != PHASE_CLR_API) |
| 7692 | { |
| 7693 | double nest_tot_ms = (((double)m_total.m_CLRcyclesByPhase[i]) / countsPerSec) * 1000.0; |
| 7694 | double nest_percent = nest_tot_ms * 100.0 / totTime_ms; |
| 7695 | double calls_per_fn = ((double)m_total.m_CLRinvokesByPhase[i]) / ((double)m_numMethods); |
| 7696 | |
| 7697 | if (nest_percent > 0.1 || calls_per_fn > 10) |
| 7698 | fprintf(f, " %5.1f %8.2f%%" , calls_per_fn, nest_percent); |
| 7699 | } |
| 7700 | #endif |
| 7701 | fprintf(f, "\n" ); |
| 7702 | } |
| 7703 | |
| 7704 | // Show slop if it's over a certain percentage of the total |
| 7705 | double pslop_pct = 100.0 * m_total.m_parentPhaseEndSlop * 1000.0 / countsPerSec / totTime_ms; |
| 7706 | if (pslop_pct >= 1.0) |
| 7707 | { |
| 7708 | fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " |
| 7709 | "%3.1f%% of total.\n\n" , |
| 7710 | m_total.m_parentPhaseEndSlop / 1000000.0, pslop_pct); |
| 7711 | } |
| 7712 | } |
| 7713 | if (m_numFilteredMethods > 0) |
| 7714 | { |
| 7715 | fprintf(f, " Compiled %d methods that meet the filter requirement.\n" , m_numFilteredMethods); |
| 7716 | fprintf(f, " Compiled %d bytecodes total (%8.2f avg).\n" , m_filtered.m_byteCodeBytes, |
| 7717 | (double)m_filtered.m_byteCodeBytes / (double)m_numFilteredMethods); |
| 7718 | double totTime_ms = ((double)m_filtered.m_totalCycles / countsPerSec) * 1000.0; |
| 7719 | fprintf(f, " Time: total: %10.3f Mcycles/%10.3f ms\n" , ((double)m_filtered.m_totalCycles / 1000000.0), |
| 7720 | totTime_ms); |
| 7721 | fprintf(f, " avg: %10.3f Mcycles/%10.3f ms\n" , |
| 7722 | ((double)m_filtered.m_totalCycles) / 1000000.0 / (double)m_numFilteredMethods, |
| 7723 | totTime_ms / (double)m_numFilteredMethods); |
| 7724 | |
| 7725 | fprintf(f, " Total time by phases:\n" ); |
| 7726 | fprintf(f, " PHASE inv/meth Mcycles time (ms) %% of total\n" ); |
| 7727 | fprintf(f, " --------------------------------------------------------------------------------------\n" ); |
| 7728 | // Ensure that at least the names array and the Phases enum have the same number of entries: |
| 7729 | assert(_countof(PhaseNames) == PHASE_NUMBER_OF); |
| 7730 | for (int i = 0; i < PHASE_NUMBER_OF; i++) |
| 7731 | { |
| 7732 | double phase_tot_ms = (((double)m_filtered.m_cyclesByPhase[i]) / countsPerSec) * 1000.0; |
| 7733 | // Indent nested phases, according to depth. |
| 7734 | int ancPhase = PhaseParent[i]; |
| 7735 | while (ancPhase != -1) |
| 7736 | { |
| 7737 | fprintf(f, " " ); |
| 7738 | ancPhase = PhaseParent[ancPhase]; |
| 7739 | } |
| 7740 | fprintf(f, " %-30s %5.2f %10.2f %9.3f %8.2f%%\n" , PhaseNames[i], |
| 7741 | ((double)m_filtered.m_invokesByPhase[i]) / ((double)m_numFilteredMethods), |
| 7742 | ((double)m_filtered.m_cyclesByPhase[i]) / 1000000.0, phase_tot_ms, |
| 7743 | (phase_tot_ms * 100.0 / totTime_ms)); |
| 7744 | } |
| 7745 | |
| 7746 | double fslop_ms = m_filtered.m_parentPhaseEndSlop * 1000.0 / countsPerSec; |
| 7747 | if (fslop_ms > 1.0) |
| 7748 | { |
| 7749 | fprintf(f, "\n 'End phase slop' should be very small (if not, there's unattributed time): %9.3f Mcycles = " |
| 7750 | "%3.1f%% of total.\n\n" , |
| 7751 | m_filtered.m_parentPhaseEndSlop / 1000000.0, fslop_ms); |
| 7752 | } |
| 7753 | } |
| 7754 | |
| 7755 | #if MEASURE_CLRAPI_CALLS |
| 7756 | if (m_total.m_allClrAPIcalls > 0 && m_total.m_allClrAPIcycles > 0) |
| 7757 | { |
| 7758 | fprintf(f, "\n" ); |
| 7759 | if (m_totMethods > 0) |
| 7760 | fprintf(f, " Imported %u methods.\n\n" , m_numMethods + m_totMethods); |
| 7761 | |
| 7762 | fprintf(f, " CLR API # calls total time max time avg time %% " |
| 7763 | "of total\n" ); |
| 7764 | fprintf(f, " -------------------------------------------------------------------------------" ); |
| 7765 | fprintf(f, "---------------------\n" ); |
| 7766 | |
| 7767 | static const char* APInames[] = { |
| 7768 | #define DEF_CLR_API(name) #name, |
| 7769 | #include "ICorJitInfo_API_names.h" |
| 7770 | }; |
| 7771 | |
| 7772 | unsigned shownCalls = 0; |
| 7773 | double shownMillis = 0.0; |
| 7774 | #ifdef DEBUG |
| 7775 | unsigned checkedCalls = 0; |
| 7776 | double checkedMillis = 0.0; |
| 7777 | #endif |
| 7778 | |
| 7779 | for (unsigned pass = 0; pass < 2; pass++) |
| 7780 | { |
| 7781 | for (unsigned i = 0; i < API_ICorJitInfo_Names::API_COUNT; i++) |
| 7782 | { |
| 7783 | unsigned calls = m_total.m_perClrAPIcalls[i]; |
| 7784 | if (calls == 0) |
| 7785 | continue; |
| 7786 | |
| 7787 | unsigned __int64 cycles = m_total.m_perClrAPIcycles[i]; |
| 7788 | double millis = 1000.0 * cycles / countsPerSec; |
| 7789 | |
| 7790 | // Don't show the small fry to keep the results manageable |
| 7791 | if (millis < 0.5) |
| 7792 | { |
| 7793 | // We always show the following API because it is always called |
| 7794 | // exactly once for each method and its body is the simplest one |
| 7795 | // possible (it just returns an integer constant), and therefore |
| 7796 | // it can be used to measure the overhead of adding the CLR API |
| 7797 | // timing code. Roughly speaking, on a 3GHz x64 box the overhead |
| 7798 | // per call should be around 40 ns when using RDTSC, compared to |
| 7799 | // about 140 ns when using GetThreadCycles() under Windows. |
| 7800 | if (i != API_ICorJitInfo_Names::API_getExpectedTargetArchitecture) |
| 7801 | continue; |
| 7802 | } |
| 7803 | |
| 7804 | // In the first pass we just compute the totals. |
| 7805 | if (pass == 0) |
| 7806 | { |
| 7807 | shownCalls += m_total.m_perClrAPIcalls[i]; |
| 7808 | shownMillis += millis; |
| 7809 | continue; |
| 7810 | } |
| 7811 | |
| 7812 | unsigned __int32 maxcyc = m_maximum.m_maxClrAPIcycles[i]; |
| 7813 | double max_ms = 1000.0 * maxcyc / countsPerSec; |
| 7814 | |
| 7815 | fprintf(f, " %-40s" , APInames[i]); // API name |
| 7816 | fprintf(f, " %8u %9.1f ms" , calls, millis); // #calls, total time |
| 7817 | fprintf(f, " %8.1f ms %8.1f ns" , max_ms, 1000000.0 * millis / calls); // max, avg time |
| 7818 | fprintf(f, " %5.1f%%\n" , 100.0 * millis / shownMillis); // % of total |
| 7819 | |
| 7820 | #ifdef DEBUG |
| 7821 | checkedCalls += m_total.m_perClrAPIcalls[i]; |
| 7822 | checkedMillis += millis; |
| 7823 | #endif |
| 7824 | } |
| 7825 | } |
| 7826 | |
| 7827 | #ifdef DEBUG |
| 7828 | assert(checkedCalls == shownCalls); |
| 7829 | assert(checkedMillis == shownMillis); |
| 7830 | #endif |
| 7831 | |
| 7832 | if (shownCalls > 0 || shownMillis > 0) |
| 7833 | { |
| 7834 | fprintf(f, " -------------------------" ); |
| 7835 | fprintf(f, "---------------------------------------------------------------------------\n" ); |
| 7836 | fprintf(f, " Total for calls shown above %8u %10.1f ms" , shownCalls, shownMillis); |
| 7837 | if (totTime_ms > 0.0) |
| 7838 | fprintf(f, " (%4.1lf%% of overall JIT time)" , shownMillis * 100.0 / totTime_ms); |
| 7839 | fprintf(f, "\n" ); |
| 7840 | } |
| 7841 | fprintf(f, "\n" ); |
| 7842 | } |
| 7843 | #endif |
| 7844 | |
| 7845 | fprintf(f, "\n" ); |
| 7846 | } |
| 7847 | |
| 7848 | JitTimer::JitTimer(unsigned byteCodeSize) : m_info(byteCodeSize) |
| 7849 | { |
| 7850 | #if MEASURE_CLRAPI_CALLS |
| 7851 | m_CLRcallInvokes = 0; |
| 7852 | m_CLRcallCycles = 0; |
| 7853 | #endif |
| 7854 | |
| 7855 | #ifdef DEBUG |
| 7856 | m_lastPhase = (Phases)-1; |
| 7857 | #if MEASURE_CLRAPI_CALLS |
| 7858 | m_CLRcallAPInum = -1; |
| 7859 | #endif |
| 7860 | #endif |
| 7861 | |
| 7862 | unsigned __int64 threadCurCycles; |
| 7863 | if (_our_GetThreadCycles(&threadCurCycles)) |
| 7864 | { |
| 7865 | m_start = threadCurCycles; |
| 7866 | m_curPhaseStart = threadCurCycles; |
| 7867 | } |
| 7868 | } |
| 7869 | |
| 7870 | void JitTimer::EndPhase(Compiler* compiler, Phases phase) |
| 7871 | { |
| 7872 | // Otherwise... |
| 7873 | // We re-run some phases currently, so this following assert doesn't work. |
| 7874 | // assert((int)phase > (int)m_lastPhase); // We should end phases in increasing order. |
| 7875 | |
| 7876 | unsigned __int64 threadCurCycles; |
| 7877 | if (_our_GetThreadCycles(&threadCurCycles)) |
| 7878 | { |
| 7879 | unsigned __int64 phaseCycles = (threadCurCycles - m_curPhaseStart); |
| 7880 | |
| 7881 | // If this is not a leaf phase, the assumption is that the last subphase must have just recently ended. |
| 7882 | // Credit the duration to "slop", the total of which should be very small. |
| 7883 | if (PhaseHasChildren[phase]) |
| 7884 | { |
| 7885 | m_info.m_parentPhaseEndSlop += phaseCycles; |
| 7886 | } |
| 7887 | else |
| 7888 | { |
| 7889 | // It is a leaf phase. Credit duration to it. |
| 7890 | m_info.m_invokesByPhase[phase]++; |
| 7891 | m_info.m_cyclesByPhase[phase] += phaseCycles; |
| 7892 | |
| 7893 | #if MEASURE_CLRAPI_CALLS |
| 7894 | // Record the CLR API timing info as well. |
| 7895 | m_info.m_CLRinvokesByPhase[phase] += m_CLRcallInvokes; |
| 7896 | m_info.m_CLRcyclesByPhase[phase] += m_CLRcallCycles; |
| 7897 | #endif |
| 7898 | |
| 7899 | // Credit the phase's ancestors, if any. |
| 7900 | int ancPhase = PhaseParent[phase]; |
| 7901 | while (ancPhase != -1) |
| 7902 | { |
| 7903 | m_info.m_cyclesByPhase[ancPhase] += phaseCycles; |
| 7904 | ancPhase = PhaseParent[ancPhase]; |
| 7905 | } |
| 7906 | |
| 7907 | #if MEASURE_CLRAPI_CALLS |
| 7908 | const Phases lastPhase = PHASE_CLR_API; |
| 7909 | #else |
| 7910 | const Phases lastPhase = PHASE_NUMBER_OF; |
| 7911 | #endif |
| 7912 | if (phase + 1 == lastPhase) |
| 7913 | { |
| 7914 | m_info.m_totalCycles = (threadCurCycles - m_start); |
| 7915 | } |
| 7916 | else |
| 7917 | { |
| 7918 | m_curPhaseStart = threadCurCycles; |
| 7919 | } |
| 7920 | } |
| 7921 | |
| 7922 | if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[phase]) |
| 7923 | { |
| 7924 | m_info.m_nodeCountAfterPhase[phase] = compiler->fgMeasureIR(); |
| 7925 | } |
| 7926 | else |
| 7927 | { |
| 7928 | m_info.m_nodeCountAfterPhase[phase] = 0; |
| 7929 | } |
| 7930 | } |
| 7931 | |
| 7932 | #ifdef DEBUG |
| 7933 | m_lastPhase = phase; |
| 7934 | #endif |
| 7935 | #if MEASURE_CLRAPI_CALLS |
| 7936 | m_CLRcallInvokes = 0; |
| 7937 | m_CLRcallCycles = 0; |
| 7938 | #endif |
| 7939 | } |
| 7940 | |
| 7941 | #if MEASURE_CLRAPI_CALLS |
| 7942 | |
| 7943 | //------------------------------------------------------------------------ |
| 7944 | // JitTimer::CLRApiCallEnter: Start the stopwatch for an EE call. |
| 7945 | // |
| 7946 | // Arguments: |
| 7947 | // apix - The API index - an "enum API_ICorJitInfo_Names" value. |
| 7948 | // |
| 7949 | |
| 7950 | void JitTimer::CLRApiCallEnter(unsigned apix) |
| 7951 | { |
| 7952 | assert(m_CLRcallAPInum == -1); // Nested calls not allowed |
| 7953 | m_CLRcallAPInum = apix; |
| 7954 | |
| 7955 | // If we can't get the cycles, we'll just ignore this call |
| 7956 | if (!_our_GetThreadCycles(&m_CLRcallStart)) |
| 7957 | m_CLRcallStart = 0; |
| 7958 | } |
| 7959 | |
| 7960 | //------------------------------------------------------------------------ |
| 7961 | // JitTimer::CLRApiCallLeave: compute / record time spent in an EE call. |
| 7962 | // |
| 7963 | // Arguments: |
| 7964 | // apix - The API's "enum API_ICorJitInfo_Names" value; this value |
| 7965 | // should match the value passed to the most recent call to |
| 7966 | // "CLRApiCallEnter" (i.e. these must come as matched pairs), |
| 7967 | // and they also may not nest. |
| 7968 | // |
| 7969 | |
| 7970 | void JitTimer::CLRApiCallLeave(unsigned apix) |
| 7971 | { |
| 7972 | // Make sure we're actually inside a measured CLR call. |
| 7973 | assert(m_CLRcallAPInum != -1); |
| 7974 | m_CLRcallAPInum = -1; |
| 7975 | |
| 7976 | // Ignore this one if we don't have a valid starting counter. |
| 7977 | if (m_CLRcallStart != 0) |
| 7978 | { |
| 7979 | if (JitConfig.JitEECallTimingInfo() != 0) |
| 7980 | { |
| 7981 | unsigned __int64 threadCurCycles; |
| 7982 | if (_our_GetThreadCycles(&threadCurCycles)) |
| 7983 | { |
| 7984 | // Compute the cycles spent in the call. |
| 7985 | threadCurCycles -= m_CLRcallStart; |
| 7986 | |
| 7987 | // Add the cycles to the 'phase' and bump its use count. |
| 7988 | m_info.m_cyclesByPhase[PHASE_CLR_API] += threadCurCycles; |
| 7989 | m_info.m_invokesByPhase[PHASE_CLR_API] += 1; |
| 7990 | |
| 7991 | // Add the values to the "per API" info. |
| 7992 | m_info.m_allClrAPIcycles += threadCurCycles; |
| 7993 | m_info.m_allClrAPIcalls += 1; |
| 7994 | |
| 7995 | m_info.m_perClrAPIcalls[apix] += 1; |
| 7996 | m_info.m_perClrAPIcycles[apix] += threadCurCycles; |
| 7997 | m_info.m_maxClrAPIcycles[apix] = max(m_info.m_maxClrAPIcycles[apix], (unsigned __int32)threadCurCycles); |
| 7998 | |
| 7999 | // Subtract the cycles from the enclosing phase by bumping its start time |
| 8000 | m_curPhaseStart += threadCurCycles; |
| 8001 | |
| 8002 | // Update the running totals. |
| 8003 | m_CLRcallInvokes += 1; |
| 8004 | m_CLRcallCycles += threadCurCycles; |
| 8005 | } |
| 8006 | } |
| 8007 | |
| 8008 | m_CLRcallStart = 0; |
| 8009 | } |
| 8010 | |
| 8011 | assert(m_CLRcallAPInum != -1); // No longer in this API call. |
| 8012 | m_CLRcallAPInum = -1; |
| 8013 | } |
| 8014 | |
| 8015 | #endif // MEASURE_CLRAPI_CALLS |
| 8016 | |
| 8017 | CritSecObject JitTimer::s_csvLock; |
| 8018 | |
| 8019 | LPCWSTR Compiler::JitTimeLogCsv() |
| 8020 | { |
| 8021 | LPCWSTR jitTimeLogCsv = JitConfig.JitTimeLogCsv(); |
| 8022 | return jitTimeLogCsv; |
| 8023 | } |
| 8024 | |
| 8025 | void JitTimer::() |
| 8026 | { |
| 8027 | LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); |
| 8028 | if (jitTimeLogCsv == nullptr) |
| 8029 | { |
| 8030 | return; |
| 8031 | } |
| 8032 | |
| 8033 | CritSecHolder csvLock(s_csvLock); |
| 8034 | |
| 8035 | FILE* fp = _wfopen(jitTimeLogCsv, W("a" )); |
| 8036 | if (fp != nullptr) |
| 8037 | { |
| 8038 | // Seek to the end of the file s.t. `ftell` doesn't lie to us on Windows |
| 8039 | fseek(fp, 0, SEEK_END); |
| 8040 | |
| 8041 | // Write the header if the file is empty |
| 8042 | if (ftell(fp) == 0) |
| 8043 | { |
| 8044 | fprintf(fp, "\"Method Name\"," ); |
| 8045 | fprintf(fp, "\"Assembly or SPMI Index\"," ); |
| 8046 | fprintf(fp, "\"IL Bytes\"," ); |
| 8047 | fprintf(fp, "\"Basic Blocks\"," ); |
| 8048 | fprintf(fp, "\"Min Opts\"," ); |
| 8049 | fprintf(fp, "\"Loops Cloned\"," ); |
| 8050 | |
| 8051 | for (int i = 0; i < PHASE_NUMBER_OF; i++) |
| 8052 | { |
| 8053 | fprintf(fp, "\"%s\"," , PhaseNames[i]); |
| 8054 | if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) |
| 8055 | { |
| 8056 | fprintf(fp, "\"Node Count After %s\"," , PhaseNames[i]); |
| 8057 | } |
| 8058 | } |
| 8059 | |
| 8060 | InlineStrategy::DumpCsvHeader(fp); |
| 8061 | |
| 8062 | fprintf(fp, "\"Executable Code Bytes\"," ); |
| 8063 | fprintf(fp, "\"GC Info Bytes\"," ); |
| 8064 | fprintf(fp, "\"Total Bytes Allocated\"," ); |
| 8065 | fprintf(fp, "\"Total Cycles\"," ); |
| 8066 | fprintf(fp, "\"CPS\"\n" ); |
| 8067 | } |
| 8068 | fclose(fp); |
| 8069 | } |
| 8070 | } |
| 8071 | |
| 8072 | extern ICorJitHost* g_jitHost; |
| 8073 | |
| 8074 | void JitTimer::PrintCsvMethodStats(Compiler* comp) |
| 8075 | { |
| 8076 | LPCWSTR jitTimeLogCsv = Compiler::JitTimeLogCsv(); |
| 8077 | if (jitTimeLogCsv == nullptr) |
| 8078 | { |
| 8079 | return; |
| 8080 | } |
| 8081 | |
| 8082 | // eeGetMethodFullName uses locks, so don't enter crit sec before this call. |
| 8083 | const char* methName = comp->eeGetMethodFullName(comp->info.compMethodHnd); |
| 8084 | |
| 8085 | // Try and access the SPMI index to report in the data set. |
| 8086 | // |
| 8087 | // If the jit is not hosted under SPMI this will return the |
| 8088 | // default value of zero. |
| 8089 | // |
| 8090 | // Query the jit host directly here instead of going via the |
| 8091 | // config cache, since value will change for each method. |
| 8092 | int index = g_jitHost->getIntConfigValue(W("SuperPMIMethodContextNumber" ), 0); |
| 8093 | |
| 8094 | CritSecHolder csvLock(s_csvLock); |
| 8095 | |
| 8096 | FILE* fp = _wfopen(jitTimeLogCsv, W("a" )); |
| 8097 | fprintf(fp, "\"%s\"," , methName); |
| 8098 | if (index != 0) |
| 8099 | { |
| 8100 | fprintf(fp, "%d," , index); |
| 8101 | } |
| 8102 | else |
| 8103 | { |
| 8104 | const char* methodAssemblyName = comp->info.compCompHnd->getAssemblyName( |
| 8105 | comp->info.compCompHnd->getModuleAssembly(comp->info.compCompHnd->getClassModule(comp->info.compClassHnd))); |
| 8106 | fprintf(fp, "\"%s\"," , methodAssemblyName); |
| 8107 | } |
| 8108 | fprintf(fp, "%u," , comp->info.compILCodeSize); |
| 8109 | fprintf(fp, "%u," , comp->fgBBcount); |
| 8110 | fprintf(fp, "%u," , comp->opts.MinOpts()); |
| 8111 | fprintf(fp, "%u," , comp->optLoopsCloned); |
| 8112 | unsigned __int64 totCycles = 0; |
| 8113 | for (int i = 0; i < PHASE_NUMBER_OF; i++) |
| 8114 | { |
| 8115 | if (!PhaseHasChildren[i]) |
| 8116 | { |
| 8117 | totCycles += m_info.m_cyclesByPhase[i]; |
| 8118 | } |
| 8119 | fprintf(fp, "%I64u," , m_info.m_cyclesByPhase[i]); |
| 8120 | |
| 8121 | if ((JitConfig.JitMeasureIR() != 0) && PhaseReportsIRSize[i]) |
| 8122 | { |
| 8123 | fprintf(fp, "%u," , m_info.m_nodeCountAfterPhase[i]); |
| 8124 | } |
| 8125 | } |
| 8126 | |
| 8127 | comp->m_inlineStrategy->DumpCsvData(fp); |
| 8128 | |
| 8129 | fprintf(fp, "%u," , comp->info.compNativeCodeSize); |
| 8130 | fprintf(fp, "%Iu," , comp->compInfoBlkSize); |
| 8131 | fprintf(fp, "%Iu," , comp->compGetArenaAllocator()->getTotalBytesAllocated()); |
| 8132 | fprintf(fp, "%I64u," , m_info.m_totalCycles); |
| 8133 | fprintf(fp, "%f\n" , CycleTimer::CyclesPerSecond()); |
| 8134 | fclose(fp); |
| 8135 | } |
| 8136 | |
| 8137 | // Completes the timing of the current method, and adds it to "sum". |
| 8138 | void JitTimer::Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases) |
| 8139 | { |
| 8140 | if (includePhases) |
| 8141 | { |
| 8142 | PrintCsvMethodStats(comp); |
| 8143 | } |
| 8144 | |
| 8145 | sum.AddInfo(m_info, includePhases); |
| 8146 | } |
| 8147 | #endif // FEATURE_JIT_METHOD_PERF |
| 8148 | |
| 8149 | #if LOOP_HOIST_STATS |
| 8150 | // Static fields. |
| 8151 | CritSecObject Compiler::s_loopHoistStatsLock; // Default constructor. |
| 8152 | unsigned Compiler::s_loopsConsidered = 0; |
| 8153 | unsigned Compiler::s_loopsWithHoistedExpressions = 0; |
| 8154 | unsigned Compiler::s_totalHoistedExpressions = 0; |
| 8155 | |
| 8156 | // static |
| 8157 | void Compiler::PrintAggregateLoopHoistStats(FILE* f) |
| 8158 | { |
| 8159 | fprintf(f, "\n" ); |
| 8160 | fprintf(f, "---------------------------------------------------\n" ); |
| 8161 | fprintf(f, "Loop hoisting stats\n" ); |
| 8162 | fprintf(f, "---------------------------------------------------\n" ); |
| 8163 | |
| 8164 | double pctWithHoisted = 0.0; |
| 8165 | if (s_loopsConsidered > 0) |
| 8166 | { |
| 8167 | pctWithHoisted = 100.0 * (double(s_loopsWithHoistedExpressions) / double(s_loopsConsidered)); |
| 8168 | } |
| 8169 | double exprsPerLoopWithExpr = 0.0; |
| 8170 | if (s_loopsWithHoistedExpressions > 0) |
| 8171 | { |
| 8172 | exprsPerLoopWithExpr = double(s_totalHoistedExpressions) / double(s_loopsWithHoistedExpressions); |
| 8173 | } |
| 8174 | fprintf(f, "Considered %d loops. Of these, we hoisted expressions out of %d (%6.2f%%).\n" , s_loopsConsidered, |
| 8175 | s_loopsWithHoistedExpressions, pctWithHoisted); |
| 8176 | fprintf(f, " A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n" , |
| 8177 | s_totalHoistedExpressions, exprsPerLoopWithExpr); |
| 8178 | } |
| 8179 | |
| 8180 | void Compiler::AddLoopHoistStats() |
| 8181 | { |
| 8182 | CritSecHolder statsLock(s_loopHoistStatsLock); |
| 8183 | |
| 8184 | s_loopsConsidered += m_loopsConsidered; |
| 8185 | s_loopsWithHoistedExpressions += m_loopsWithHoistedExpressions; |
| 8186 | s_totalHoistedExpressions += m_totalHoistedExpressions; |
| 8187 | } |
| 8188 | |
| 8189 | void Compiler::PrintPerMethodLoopHoistStats() |
| 8190 | { |
| 8191 | double pctWithHoisted = 0.0; |
| 8192 | if (m_loopsConsidered > 0) |
| 8193 | { |
| 8194 | pctWithHoisted = 100.0 * (double(m_loopsWithHoistedExpressions) / double(m_loopsConsidered)); |
| 8195 | } |
| 8196 | double exprsPerLoopWithExpr = 0.0; |
| 8197 | if (m_loopsWithHoistedExpressions > 0) |
| 8198 | { |
| 8199 | exprsPerLoopWithExpr = double(m_totalHoistedExpressions) / double(m_loopsWithHoistedExpressions); |
| 8200 | } |
| 8201 | printf("Considered %d loops. Of these, we hoisted expressions out of %d (%5.2f%%).\n" , m_loopsConsidered, |
| 8202 | m_loopsWithHoistedExpressions, pctWithHoisted); |
| 8203 | printf(" A total of %d expressions were hoisted, an average of %5.2f per loop-with-hoisted-expr.\n" , |
| 8204 | m_totalHoistedExpressions, exprsPerLoopWithExpr); |
| 8205 | } |
| 8206 | #endif // LOOP_HOIST_STATS |
| 8207 | |
| 8208 | //------------------------------------------------------------------------ |
| 8209 | // RecordStateAtEndOfInlining: capture timing data (if enabled) after |
| 8210 | // inlining as completed. |
| 8211 | // |
| 8212 | // Note: |
| 8213 | // Records data needed for SQM and inlining data dumps. Should be |
| 8214 | // called after inlining is complete. (We do this after inlining |
| 8215 | // because this marks the last point at which the JIT is likely to |
| 8216 | // cause type-loading and class initialization). |
| 8217 | |
| 8218 | void Compiler::RecordStateAtEndOfInlining() |
| 8219 | { |
| 8220 | #if defined(DEBUG) || defined(INLINE_DATA) || defined(FEATURE_CLRSQM) |
| 8221 | |
| 8222 | m_compCyclesAtEndOfInlining = 0; |
| 8223 | m_compTickCountAtEndOfInlining = 0; |
| 8224 | bool b = CycleTimer::GetThreadCyclesS(&m_compCyclesAtEndOfInlining); |
| 8225 | if (!b) |
| 8226 | { |
| 8227 | return; // We don't have a thread cycle counter. |
| 8228 | } |
| 8229 | m_compTickCountAtEndOfInlining = GetTickCount(); |
| 8230 | |
| 8231 | #endif // defined(DEBUG) || defined(INLINE_DATA) || defined(FEATURE_CLRSQM) |
| 8232 | } |
| 8233 | |
| 8234 | //------------------------------------------------------------------------ |
| 8235 | // RecordStateAtEndOfCompilation: capture timing data (if enabled) after |
| 8236 | // compilation is completed. |
| 8237 | |
| 8238 | void Compiler::RecordStateAtEndOfCompilation() |
| 8239 | { |
| 8240 | #if defined(DEBUG) || defined(INLINE_DATA) || defined(FEATURE_CLRSQM) |
| 8241 | |
| 8242 | // Common portion |
| 8243 | m_compCycles = 0; |
| 8244 | unsigned __int64 compCyclesAtEnd; |
| 8245 | bool b = CycleTimer::GetThreadCyclesS(&compCyclesAtEnd); |
| 8246 | if (!b) |
| 8247 | { |
| 8248 | return; // We don't have a thread cycle counter. |
| 8249 | } |
| 8250 | assert(compCyclesAtEnd >= m_compCyclesAtEndOfInlining); |
| 8251 | |
| 8252 | m_compCycles = compCyclesAtEnd - m_compCyclesAtEndOfInlining; |
| 8253 | |
| 8254 | #endif // defined(DEBUG) || defined(INLINE_DATA) || defined(FEATURE_CLRSQM) |
| 8255 | |
| 8256 | #ifdef FEATURE_CLRSQM |
| 8257 | |
| 8258 | // SQM only portion |
| 8259 | unsigned __int64 mcycles64 = m_compCycles / ((unsigned __int64)1000000); |
| 8260 | unsigned mcycles; |
| 8261 | if (mcycles64 > UINT32_MAX) |
| 8262 | { |
| 8263 | mcycles = UINT32_MAX; |
| 8264 | } |
| 8265 | else |
| 8266 | { |
| 8267 | mcycles = (unsigned)mcycles64; |
| 8268 | } |
| 8269 | |
| 8270 | DWORD ticksAtEnd = GetTickCount(); |
| 8271 | assert(ticksAtEnd >= m_compTickCountAtEndOfInlining); |
| 8272 | DWORD compTicks = ticksAtEnd - m_compTickCountAtEndOfInlining; |
| 8273 | |
| 8274 | if (mcycles >= 1000) |
| 8275 | { |
| 8276 | info.compCompHnd->logSQMLongJitEvent(mcycles, compTicks, info.compILCodeSize, fgBBcount, opts.MinOpts(), |
| 8277 | info.compMethodHnd); |
| 8278 | } |
| 8279 | |
| 8280 | #endif // FEATURE_CLRSQM |
| 8281 | } |
| 8282 | |
| 8283 | #if FUNC_INFO_LOGGING |
| 8284 | // static |
| 8285 | LPCWSTR Compiler::compJitFuncInfoFilename = nullptr; |
| 8286 | |
| 8287 | // static |
| 8288 | FILE* Compiler::compJitFuncInfoFile = nullptr; |
| 8289 | #endif // FUNC_INFO_LOGGING |
| 8290 | |
| 8291 | #ifdef DEBUG |
| 8292 | |
| 8293 | // dumpConvertedVarSet() dumps the varset bits that are tracked |
| 8294 | // variable indices, and we convert them to variable numbers, sort the variable numbers, and |
| 8295 | // print them as variable numbers. To do this, we use a temporary set indexed by |
| 8296 | // variable number. We can't use the "all varset" type because it is still size-limited, and might |
| 8297 | // not be big enough to handle all possible variable numbers. |
| 8298 | void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars) |
| 8299 | { |
| 8300 | BYTE* pVarNumSet; // trivial set: one byte per varNum, 0 means not in set, 1 means in set. |
| 8301 | |
| 8302 | size_t varNumSetBytes = comp->lvaCount * sizeof(BYTE); |
| 8303 | pVarNumSet = (BYTE*)_alloca(varNumSetBytes); |
| 8304 | memset(pVarNumSet, 0, varNumSetBytes); // empty the set |
| 8305 | |
| 8306 | VarSetOps::Iter iter(comp, vars); |
| 8307 | unsigned varIndex = 0; |
| 8308 | while (iter.NextElem(&varIndex)) |
| 8309 | { |
| 8310 | unsigned varNum = comp->lvaTrackedToVarNum[varIndex]; |
| 8311 | assert(varNum < comp->lvaCount); |
| 8312 | pVarNumSet[varNum] = 1; // This varNum is in the set |
| 8313 | } |
| 8314 | |
| 8315 | bool first = true; |
| 8316 | printf("{" ); |
| 8317 | for (size_t varNum = 0; varNum < comp->lvaCount; varNum++) |
| 8318 | { |
| 8319 | if (pVarNumSet[varNum] == 1) |
| 8320 | { |
| 8321 | if (!first) |
| 8322 | { |
| 8323 | printf(" " ); |
| 8324 | } |
| 8325 | printf("V%02u" , varNum); |
| 8326 | first = false; |
| 8327 | } |
| 8328 | } |
| 8329 | printf("}" ); |
| 8330 | } |
| 8331 | |
| 8332 | /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 8333 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 8334 | XX XX |
| 8335 | XX Debugging helpers XX |
| 8336 | XX XX |
| 8337 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 8338 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 8339 | */ |
| 8340 | |
| 8341 | /*****************************************************************************/ |
| 8342 | /* The following functions are intended to be called from the debugger, to dump |
| 8343 | * various data structures. |
| 8344 | * |
| 8345 | * The versions that start with 'c' take a Compiler* as the first argument. |
| 8346 | * The versions that start with 'd' use the tlsCompiler, so don't require a Compiler*. |
| 8347 | * |
| 8348 | * Summary: |
| 8349 | * cBlock, dBlock : Display a basic block (call fgTableDispBasicBlock()). |
| 8350 | * cBlocks, dBlocks : Display all the basic blocks of a function (call fgDispBasicBlocks()). |
| 8351 | * cBlocksV, dBlocksV : Display all the basic blocks of a function (call fgDispBasicBlocks(true)). |
| 8352 | * "V" means "verbose", and will dump all the trees. |
| 8353 | * cTree, dTree : Display a tree (call gtDispTree()). |
| 8354 | * cTreeLIR, dTreeLIR : Display a tree in LIR form (call gtDispLIRNode()). |
| 8355 | * cTrees, dTrees : Display all the trees in a function (call fgDumpTrees()). |
| 8356 | * cEH, dEH : Display the EH handler table (call fgDispHandlerTab()). |
| 8357 | * cVar, dVar : Display a local variable given its number (call lvaDumpEntry()). |
| 8358 | * cVarDsc, dVarDsc : Display a local variable given a LclVarDsc* (call lvaDumpEntry()). |
| 8359 | * cVars, dVars : Display the local variable table (call lvaTableDump()). |
| 8360 | * cVarsFinal, dVarsFinal : Display the local variable table (call lvaTableDump(FINAL_FRAME_LAYOUT)). |
| 8361 | * cBlockCheapPreds, dBlockCheapPreds : Display a block's cheap predecessors (call block->dspCheapPreds()). |
| 8362 | * cBlockPreds, dBlockPreds : Display a block's predecessors (call block->dspPreds()). |
| 8363 | * cBlockSuccs, dBlockSuccs : Display a block's successors (call block->dspSuccs(compiler)). |
| 8364 | * cReach, dReach : Display all block reachability (call fgDispReach()). |
| 8365 | * cDoms, dDoms : Display all block dominators (call fgDispDoms()). |
| 8366 | * cLiveness, dLiveness : Display per-block variable liveness (call fgDispBBLiveness()). |
| 8367 | * cCVarSet, dCVarSet : Display a "converted" VARSET_TP: the varset is assumed to be tracked variable |
| 8368 | * indices. These are converted to variable numbers and sorted. (Calls |
| 8369 | * dumpConvertedVarSet()). |
| 8370 | * |
| 8371 | * cFuncIR, dFuncIR : Display all the basic blocks of a function in linear IR form. |
| 8372 | * cLoopIR, dLoopIR : Display a loop in linear IR form. |
| 8373 | * dLoopNumIR : Display a loop (given number) in linear IR form. |
| 8374 | * cBlockIR, dBlockIR : Display a basic block in linear IR form. |
| 8375 | * cTreeIR, dTreeIR : Display a tree in linear IR form. |
| 8376 | * dTabStopIR : Display spaces to the next tab stop column |
| 8377 | * cTreeTypeIR dTreeTypeIR : Display tree type |
| 8378 | * cTreeKindsIR dTreeKindsIR : Display tree kinds |
| 8379 | * cTreeFlagsIR dTreeFlagsIR : Display tree flags |
| 8380 | * cOperandIR dOperandIR : Display tree operand |
| 8381 | * cLeafIR dLeafIR : Display tree leaf |
| 8382 | * cIndirIR dIndirIR : Display indir tree as [t#] or [leaf] |
| 8383 | * cListIR dListIR : Display tree list |
| 8384 | * cSsaNumIR dSsaNumIR : Display SSA number as <u|d:#> |
| 8385 | * cValNumIR dValNumIR : Display Value number as <v{l|c}:#{,R}> |
| 8386 | * cDependsIR : Display dependencies of a tree DEP(t# ...) node |
| 8387 | * based on child comma tree nodes |
| 8388 | * dFormatIR : Display dump format specified on command line |
| 8389 | * |
| 8390 | * |
| 8391 | * The following don't require a Compiler* to work: |
| 8392 | * dRegMask : Display a regMaskTP (call dspRegMask(mask)). |
| 8393 | */ |
| 8394 | |
| 8395 | void cBlock(Compiler* comp, BasicBlock* block) |
| 8396 | { |
| 8397 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8398 | printf("===================================================================== *Block %u\n" , sequenceNumber++); |
| 8399 | comp->fgTableDispBasicBlock(block); |
| 8400 | } |
| 8401 | |
| 8402 | void cBlocks(Compiler* comp) |
| 8403 | { |
| 8404 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8405 | printf("===================================================================== *Blocks %u\n" , sequenceNumber++); |
| 8406 | comp->fgDispBasicBlocks(); |
| 8407 | } |
| 8408 | |
| 8409 | void cBlocksV(Compiler* comp) |
| 8410 | { |
| 8411 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8412 | printf("===================================================================== *BlocksV %u\n" , sequenceNumber++); |
| 8413 | comp->fgDispBasicBlocks(true); |
| 8414 | } |
| 8415 | |
| 8416 | void cTree(Compiler* comp, GenTree* tree) |
| 8417 | { |
| 8418 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8419 | printf("===================================================================== *Tree %u\n" , sequenceNumber++); |
| 8420 | comp->gtDispTree(tree, nullptr, ">>>" ); |
| 8421 | } |
| 8422 | |
| 8423 | void cTreeLIR(Compiler* comp, GenTree* tree) |
| 8424 | { |
| 8425 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8426 | printf("===================================================================== *TreeLIR %u\n" , sequenceNumber++); |
| 8427 | comp->gtDispLIRNode(tree); |
| 8428 | } |
| 8429 | |
| 8430 | void cTrees(Compiler* comp) |
| 8431 | { |
| 8432 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8433 | printf("===================================================================== *Trees %u\n" , sequenceNumber++); |
| 8434 | comp->fgDumpTrees(comp->fgFirstBB, nullptr); |
| 8435 | } |
| 8436 | |
| 8437 | void cEH(Compiler* comp) |
| 8438 | { |
| 8439 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8440 | printf("===================================================================== *EH %u\n" , sequenceNumber++); |
| 8441 | comp->fgDispHandlerTab(); |
| 8442 | } |
| 8443 | |
| 8444 | void cVar(Compiler* comp, unsigned lclNum) |
| 8445 | { |
| 8446 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8447 | printf("===================================================================== *Var %u\n" , sequenceNumber++); |
| 8448 | comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); |
| 8449 | } |
| 8450 | |
| 8451 | void cVarDsc(Compiler* comp, LclVarDsc* varDsc) |
| 8452 | { |
| 8453 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8454 | printf("===================================================================== *VarDsc %u\n" , sequenceNumber++); |
| 8455 | unsigned lclNum = (unsigned)(varDsc - comp->lvaTable); |
| 8456 | comp->lvaDumpEntry(lclNum, Compiler::FINAL_FRAME_LAYOUT); |
| 8457 | } |
| 8458 | |
| 8459 | void cVars(Compiler* comp) |
| 8460 | { |
| 8461 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8462 | printf("===================================================================== *Vars %u\n" , sequenceNumber++); |
| 8463 | comp->lvaTableDump(); |
| 8464 | } |
| 8465 | |
| 8466 | void cVarsFinal(Compiler* comp) |
| 8467 | { |
| 8468 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8469 | printf("===================================================================== *Vars %u\n" , sequenceNumber++); |
| 8470 | comp->lvaTableDump(Compiler::FINAL_FRAME_LAYOUT); |
| 8471 | } |
| 8472 | |
| 8473 | void cBlockCheapPreds(Compiler* comp, BasicBlock* block) |
| 8474 | { |
| 8475 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8476 | printf("===================================================================== *BlockCheapPreds %u\n" , |
| 8477 | sequenceNumber++); |
| 8478 | block->dspCheapPreds(); |
| 8479 | } |
| 8480 | |
| 8481 | void cBlockPreds(Compiler* comp, BasicBlock* block) |
| 8482 | { |
| 8483 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8484 | printf("===================================================================== *BlockPreds %u\n" , sequenceNumber++); |
| 8485 | block->dspPreds(); |
| 8486 | } |
| 8487 | |
| 8488 | void cBlockSuccs(Compiler* comp, BasicBlock* block) |
| 8489 | { |
| 8490 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8491 | printf("===================================================================== *BlockSuccs %u\n" , sequenceNumber++); |
| 8492 | block->dspSuccs(comp); |
| 8493 | } |
| 8494 | |
| 8495 | void cReach(Compiler* comp) |
| 8496 | { |
| 8497 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8498 | printf("===================================================================== *Reach %u\n" , sequenceNumber++); |
| 8499 | comp->fgDispReach(); |
| 8500 | } |
| 8501 | |
| 8502 | void cDoms(Compiler* comp) |
| 8503 | { |
| 8504 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8505 | printf("===================================================================== *Doms %u\n" , sequenceNumber++); |
| 8506 | comp->fgDispDoms(); |
| 8507 | } |
| 8508 | |
| 8509 | void cLiveness(Compiler* comp) |
| 8510 | { |
| 8511 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8512 | printf("===================================================================== *Liveness %u\n" , sequenceNumber++); |
| 8513 | comp->fgDispBBLiveness(); |
| 8514 | } |
| 8515 | |
| 8516 | void cCVarSet(Compiler* comp, VARSET_VALARG_TP vars) |
| 8517 | { |
| 8518 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8519 | printf("===================================================================== dCVarSet %u\n" , sequenceNumber++); |
| 8520 | dumpConvertedVarSet(comp, vars); |
| 8521 | printf("\n" ); // dumpConvertedVarSet() doesn't emit a trailing newline |
| 8522 | } |
| 8523 | |
| 8524 | void dBlock(BasicBlock* block) |
| 8525 | { |
| 8526 | cBlock(JitTls::GetCompiler(), block); |
| 8527 | } |
| 8528 | |
| 8529 | void dBlocks() |
| 8530 | { |
| 8531 | cBlocks(JitTls::GetCompiler()); |
| 8532 | } |
| 8533 | |
| 8534 | void dBlocksV() |
| 8535 | { |
| 8536 | cBlocksV(JitTls::GetCompiler()); |
| 8537 | } |
| 8538 | |
| 8539 | void dTree(GenTree* tree) |
| 8540 | { |
| 8541 | cTree(JitTls::GetCompiler(), tree); |
| 8542 | } |
| 8543 | |
| 8544 | void dTreeLIR(GenTree* tree) |
| 8545 | { |
| 8546 | cTreeLIR(JitTls::GetCompiler(), tree); |
| 8547 | } |
| 8548 | |
| 8549 | void dTrees() |
| 8550 | { |
| 8551 | cTrees(JitTls::GetCompiler()); |
| 8552 | } |
| 8553 | |
| 8554 | void dEH() |
| 8555 | { |
| 8556 | cEH(JitTls::GetCompiler()); |
| 8557 | } |
| 8558 | |
| 8559 | void dVar(unsigned lclNum) |
| 8560 | { |
| 8561 | cVar(JitTls::GetCompiler(), lclNum); |
| 8562 | } |
| 8563 | |
| 8564 | void dVarDsc(LclVarDsc* varDsc) |
| 8565 | { |
| 8566 | cVarDsc(JitTls::GetCompiler(), varDsc); |
| 8567 | } |
| 8568 | |
| 8569 | void dVars() |
| 8570 | { |
| 8571 | cVars(JitTls::GetCompiler()); |
| 8572 | } |
| 8573 | |
| 8574 | void dVarsFinal() |
| 8575 | { |
| 8576 | cVarsFinal(JitTls::GetCompiler()); |
| 8577 | } |
| 8578 | |
| 8579 | void dBlockPreds(BasicBlock* block) |
| 8580 | { |
| 8581 | cBlockPreds(JitTls::GetCompiler(), block); |
| 8582 | } |
| 8583 | |
| 8584 | void dBlockCheapPreds(BasicBlock* block) |
| 8585 | { |
| 8586 | cBlockCheapPreds(JitTls::GetCompiler(), block); |
| 8587 | } |
| 8588 | |
| 8589 | void dBlockSuccs(BasicBlock* block) |
| 8590 | { |
| 8591 | cBlockSuccs(JitTls::GetCompiler(), block); |
| 8592 | } |
| 8593 | |
| 8594 | void dReach() |
| 8595 | { |
| 8596 | cReach(JitTls::GetCompiler()); |
| 8597 | } |
| 8598 | |
| 8599 | void dDoms() |
| 8600 | { |
| 8601 | cDoms(JitTls::GetCompiler()); |
| 8602 | } |
| 8603 | |
| 8604 | void dLiveness() |
| 8605 | { |
| 8606 | cLiveness(JitTls::GetCompiler()); |
| 8607 | } |
| 8608 | |
| 8609 | void dCVarSet(VARSET_VALARG_TP vars) |
| 8610 | { |
| 8611 | cCVarSet(JitTls::GetCompiler(), vars); |
| 8612 | } |
| 8613 | |
| 8614 | void dRegMask(regMaskTP mask) |
| 8615 | { |
| 8616 | static unsigned sequenceNumber = 0; // separate calls with a number to indicate this function has been called |
| 8617 | printf("===================================================================== dRegMask %u\n" , sequenceNumber++); |
| 8618 | dspRegMask(mask); |
| 8619 | printf("\n" ); // dspRegMask() doesn't emit a trailing newline |
| 8620 | } |
| 8621 | |
| 8622 | void dBlockList(BasicBlockList* list) |
| 8623 | { |
| 8624 | printf("WorkList: " ); |
| 8625 | while (list != nullptr) |
| 8626 | { |
| 8627 | printf(FMT_BB " " , list->block->bbNum); |
| 8628 | list = list->next; |
| 8629 | } |
| 8630 | printf("\n" ); |
| 8631 | } |
| 8632 | |
| 8633 | // Global variables available in debug mode. That are set by debug APIs for finding |
| 8634 | // Trees, Stmts, and/or Blocks using id or bbNum. |
| 8635 | // That can be used in watch window or as a way to get address of fields for data break points. |
| 8636 | |
| 8637 | GenTree* dbTree; |
| 8638 | GenTreeStmt* dbStmt; |
| 8639 | BasicBlock* dbTreeBlock; |
| 8640 | BasicBlock* dbBlock; |
| 8641 | |
| 8642 | // Debug APIs for finding Trees, Stmts, and/or Blocks. |
| 8643 | // As a side effect, they set the debug variables above. |
| 8644 | |
| 8645 | GenTree* dFindTree(GenTree* tree, unsigned id) |
| 8646 | { |
| 8647 | GenTree* child; |
| 8648 | |
| 8649 | if (tree == nullptr) |
| 8650 | { |
| 8651 | return nullptr; |
| 8652 | } |
| 8653 | |
| 8654 | if (tree->gtTreeID == id) |
| 8655 | { |
| 8656 | dbTree = tree; |
| 8657 | return tree; |
| 8658 | } |
| 8659 | |
| 8660 | unsigned childCount = tree->NumChildren(); |
| 8661 | for (unsigned childIndex = 0; childIndex < childCount; childIndex++) |
| 8662 | { |
| 8663 | child = tree->GetChild(childIndex); |
| 8664 | child = dFindTree(child, id); |
| 8665 | if (child != nullptr) |
| 8666 | { |
| 8667 | return child; |
| 8668 | } |
| 8669 | } |
| 8670 | |
| 8671 | return nullptr; |
| 8672 | } |
| 8673 | |
| 8674 | GenTree* dFindTree(unsigned id) |
| 8675 | { |
| 8676 | Compiler* comp = JitTls::GetCompiler(); |
| 8677 | BasicBlock* block; |
| 8678 | GenTree* tree; |
| 8679 | |
| 8680 | dbTreeBlock = nullptr; |
| 8681 | dbTree = nullptr; |
| 8682 | |
| 8683 | for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) |
| 8684 | { |
| 8685 | for (GenTreeStmt* stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 8686 | { |
| 8687 | tree = dFindTree(stmt, id); |
| 8688 | if (tree != nullptr) |
| 8689 | { |
| 8690 | dbTreeBlock = block; |
| 8691 | return tree; |
| 8692 | } |
| 8693 | } |
| 8694 | } |
| 8695 | |
| 8696 | return nullptr; |
| 8697 | } |
| 8698 | |
| 8699 | GenTreeStmt* dFindStmt(unsigned id) |
| 8700 | { |
| 8701 | Compiler* comp = JitTls::GetCompiler(); |
| 8702 | BasicBlock* block; |
| 8703 | |
| 8704 | dbStmt = nullptr; |
| 8705 | |
| 8706 | unsigned stmtId = 0; |
| 8707 | for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) |
| 8708 | { |
| 8709 | for (GenTreeStmt* stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 8710 | { |
| 8711 | stmtId++; |
| 8712 | if (stmtId == id) |
| 8713 | { |
| 8714 | dbStmt = stmt; |
| 8715 | return stmt; |
| 8716 | } |
| 8717 | } |
| 8718 | } |
| 8719 | |
| 8720 | return nullptr; |
| 8721 | } |
| 8722 | |
| 8723 | BasicBlock* dFindBlock(unsigned bbNum) |
| 8724 | { |
| 8725 | Compiler* comp = JitTls::GetCompiler(); |
| 8726 | BasicBlock* block = nullptr; |
| 8727 | |
| 8728 | dbBlock = nullptr; |
| 8729 | for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) |
| 8730 | { |
| 8731 | if (block->bbNum == bbNum) |
| 8732 | { |
| 8733 | dbBlock = block; |
| 8734 | break; |
| 8735 | } |
| 8736 | } |
| 8737 | |
| 8738 | return block; |
| 8739 | } |
| 8740 | |
| 8741 | /***************************************************************************** |
| 8742 | * |
| 8743 | * COMPlus_JitDumpIR support - dump out function in linear IR form |
| 8744 | */ |
| 8745 | |
| 8746 | void cFuncIR(Compiler* comp) |
| 8747 | { |
| 8748 | BasicBlock* block; |
| 8749 | |
| 8750 | printf("Method %s::%s, hsh=0x%x\n" , comp->info.compClassName, comp->info.compMethodName, |
| 8751 | comp->info.compMethodHash()); |
| 8752 | |
| 8753 | printf("\n" ); |
| 8754 | |
| 8755 | for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext) |
| 8756 | { |
| 8757 | cBlockIR(comp, block); |
| 8758 | } |
| 8759 | } |
| 8760 | |
| 8761 | /***************************************************************************** |
| 8762 | * |
| 8763 | * COMPlus_JitDumpIR support - dump out the format specifiers from COMPlus_JitDumpIRFormat |
| 8764 | */ |
| 8765 | |
| 8766 | void dFormatIR() |
| 8767 | { |
| 8768 | Compiler* comp = JitTls::GetCompiler(); |
| 8769 | |
| 8770 | if (comp->dumpIRFormat != nullptr) |
| 8771 | { |
| 8772 | printf("COMPlus_JitDumpIRFormat=%ls" , comp->dumpIRFormat); |
| 8773 | } |
| 8774 | } |
| 8775 | |
| 8776 | /***************************************************************************** |
| 8777 | * |
| 8778 | * COMPlus_JitDumpIR support - dump out function in linear IR form |
| 8779 | */ |
| 8780 | |
| 8781 | void dFuncIR() |
| 8782 | { |
| 8783 | cFuncIR(JitTls::GetCompiler()); |
| 8784 | } |
| 8785 | |
| 8786 | /***************************************************************************** |
| 8787 | * |
| 8788 | * COMPlus_JitDumpIR support - dump out loop in linear IR form |
| 8789 | */ |
| 8790 | |
| 8791 | void cLoopIR(Compiler* comp, Compiler::LoopDsc* loop) |
| 8792 | { |
| 8793 | BasicBlock* blockHead = loop->lpHead; |
| 8794 | BasicBlock* blockFirst = loop->lpFirst; |
| 8795 | BasicBlock* blockTop = loop->lpTop; |
| 8796 | BasicBlock* blockEntry = loop->lpEntry; |
| 8797 | BasicBlock* blockBottom = loop->lpBottom; |
| 8798 | BasicBlock* blockExit = loop->lpExit; |
| 8799 | BasicBlock* blockLast = blockBottom->bbNext; |
| 8800 | BasicBlock* block; |
| 8801 | |
| 8802 | printf("LOOP\n" ); |
| 8803 | printf("\n" ); |
| 8804 | printf("HEAD " FMT_BB "\n" , blockHead->bbNum); |
| 8805 | printf("FIRST " FMT_BB "\n" , blockFirst->bbNum); |
| 8806 | printf("TOP " FMT_BB "\n" , blockTop->bbNum); |
| 8807 | printf("ENTRY " FMT_BB "\n" , blockEntry->bbNum); |
| 8808 | if (loop->lpExitCnt == 1) |
| 8809 | { |
| 8810 | printf("EXIT " FMT_BB "\n" , blockExit->bbNum); |
| 8811 | } |
| 8812 | else |
| 8813 | { |
| 8814 | printf("EXITS %u" , loop->lpExitCnt); |
| 8815 | } |
| 8816 | printf("BOTTOM " FMT_BB "\n" , blockBottom->bbNum); |
| 8817 | printf("\n" ); |
| 8818 | |
| 8819 | cBlockIR(comp, blockHead); |
| 8820 | for (block = blockFirst; ((block != nullptr) && (block != blockLast)); block = block->bbNext) |
| 8821 | { |
| 8822 | cBlockIR(comp, block); |
| 8823 | } |
| 8824 | } |
| 8825 | |
| 8826 | /***************************************************************************** |
| 8827 | * |
| 8828 | * COMPlus_JitDumpIR support - dump out loop in linear IR form |
| 8829 | */ |
| 8830 | |
| 8831 | void dLoopIR(Compiler::LoopDsc* loop) |
| 8832 | { |
| 8833 | cLoopIR(JitTls::GetCompiler(), loop); |
| 8834 | } |
| 8835 | |
| 8836 | /***************************************************************************** |
| 8837 | * |
| 8838 | * COMPlus_JitDumpIR support - dump out loop (given loop number) in linear IR form |
| 8839 | */ |
| 8840 | |
| 8841 | void dLoopNumIR(unsigned loopNum) |
| 8842 | { |
| 8843 | Compiler* comp = JitTls::GetCompiler(); |
| 8844 | |
| 8845 | if (loopNum >= comp->optLoopCount) |
| 8846 | { |
| 8847 | printf("loopNum %u out of range\n" ); |
| 8848 | return; |
| 8849 | } |
| 8850 | |
| 8851 | Compiler::LoopDsc* loop = &comp->optLoopTable[loopNum]; |
| 8852 | cLoopIR(JitTls::GetCompiler(), loop); |
| 8853 | } |
| 8854 | |
| 8855 | /***************************************************************************** |
| 8856 | * |
| 8857 | * COMPlus_JitDumpIR support - dump spaces to specified tab stop |
| 8858 | */ |
| 8859 | |
| 8860 | int dTabStopIR(int curr, int tabstop) |
| 8861 | { |
| 8862 | int chars = 0; |
| 8863 | |
| 8864 | if (tabstop <= curr) |
| 8865 | { |
| 8866 | chars += printf(" " ); |
| 8867 | } |
| 8868 | |
| 8869 | for (int i = curr; i < tabstop; i++) |
| 8870 | { |
| 8871 | chars += printf(" " ); |
| 8872 | } |
| 8873 | |
| 8874 | return chars; |
| 8875 | } |
| 8876 | |
| 8877 | void cNodeIR(Compiler* comp, GenTree* tree); |
| 8878 | |
| 8879 | /***************************************************************************** |
| 8880 | * |
| 8881 | * COMPlus_JitDumpIR support - dump out block in linear IR form |
| 8882 | */ |
| 8883 | |
| 8884 | void cBlockIR(Compiler* comp, BasicBlock* block) |
| 8885 | { |
| 8886 | bool noStmts = comp->dumpIRNoStmts; |
| 8887 | bool trees = comp->dumpIRTrees; |
| 8888 | |
| 8889 | if (comp->dumpIRBlockHeaders) |
| 8890 | { |
| 8891 | block->dspBlockHeader(comp); |
| 8892 | } |
| 8893 | else |
| 8894 | { |
| 8895 | printf(FMT_BB ":\n" , block->bbNum); |
| 8896 | } |
| 8897 | |
| 8898 | printf("\n" ); |
| 8899 | |
| 8900 | if (!block->IsLIR()) |
| 8901 | { |
| 8902 | for (GenTreeStmt* stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 8903 | { |
| 8904 | // Print current stmt. |
| 8905 | |
| 8906 | if (trees) |
| 8907 | { |
| 8908 | cTree(comp, stmt); |
| 8909 | printf("\n" ); |
| 8910 | printf("=====================================================================\n" ); |
| 8911 | } |
| 8912 | |
| 8913 | if (comp->compRationalIRForm) |
| 8914 | { |
| 8915 | GenTree* tree; |
| 8916 | |
| 8917 | foreach_treenode_execution_order(tree, stmt) |
| 8918 | { |
| 8919 | cNodeIR(comp, tree); |
| 8920 | } |
| 8921 | } |
| 8922 | else |
| 8923 | { |
| 8924 | cTreeIR(comp, stmt); |
| 8925 | } |
| 8926 | |
| 8927 | if (!noStmts && !trees) |
| 8928 | { |
| 8929 | printf("\n" ); |
| 8930 | } |
| 8931 | } |
| 8932 | } |
| 8933 | else |
| 8934 | { |
| 8935 | for (GenTree* node = block->bbTreeList; node != nullptr; node = node->gtNext) |
| 8936 | { |
| 8937 | cNodeIR(comp, node); |
| 8938 | } |
| 8939 | } |
| 8940 | |
| 8941 | int chars = 0; |
| 8942 | |
| 8943 | chars += dTabStopIR(chars, COLUMN_OPCODE); |
| 8944 | |
| 8945 | chars += printf(" " ); |
| 8946 | switch (block->bbJumpKind) |
| 8947 | { |
| 8948 | case BBJ_EHFINALLYRET: |
| 8949 | chars += printf("BRANCH(EHFINALLYRET)" ); |
| 8950 | break; |
| 8951 | |
| 8952 | case BBJ_EHFILTERRET: |
| 8953 | chars += printf("BRANCH(EHFILTERRET)" ); |
| 8954 | break; |
| 8955 | |
| 8956 | case BBJ_EHCATCHRET: |
| 8957 | chars += printf("BRANCH(EHCATCHRETURN)" ); |
| 8958 | chars += dTabStopIR(chars, COLUMN_OPERANDS); |
| 8959 | chars += printf(" " FMT_BB, block->bbJumpDest->bbNum); |
| 8960 | break; |
| 8961 | |
| 8962 | case BBJ_THROW: |
| 8963 | chars += printf("BRANCH(THROW)" ); |
| 8964 | break; |
| 8965 | |
| 8966 | case BBJ_RETURN: |
| 8967 | chars += printf("BRANCH(RETURN)" ); |
| 8968 | break; |
| 8969 | |
| 8970 | case BBJ_NONE: |
| 8971 | // For fall-through blocks |
| 8972 | chars += printf("BRANCH(NONE)" ); |
| 8973 | break; |
| 8974 | |
| 8975 | case BBJ_ALWAYS: |
| 8976 | chars += printf("BRANCH(ALWAYS)" ); |
| 8977 | chars += dTabStopIR(chars, COLUMN_OPERANDS); |
| 8978 | chars += printf(" " FMT_BB, block->bbJumpDest->bbNum); |
| 8979 | if (block->bbFlags & BBF_KEEP_BBJ_ALWAYS) |
| 8980 | { |
| 8981 | chars += dTabStopIR(chars, COLUMN_KINDS); |
| 8982 | chars += printf("; [KEEP_BBJ_ALWAYS]" ); |
| 8983 | } |
| 8984 | break; |
| 8985 | |
| 8986 | case BBJ_LEAVE: |
| 8987 | chars += printf("BRANCH(LEAVE)" ); |
| 8988 | chars += dTabStopIR(chars, COLUMN_OPERANDS); |
| 8989 | chars += printf(" " FMT_BB, block->bbJumpDest->bbNum); |
| 8990 | break; |
| 8991 | |
| 8992 | case BBJ_CALLFINALLY: |
| 8993 | chars += printf("BRANCH(CALLFINALLY)" ); |
| 8994 | chars += dTabStopIR(chars, COLUMN_OPERANDS); |
| 8995 | chars += printf(" " FMT_BB, block->bbJumpDest->bbNum); |
| 8996 | break; |
| 8997 | |
| 8998 | case BBJ_COND: |
| 8999 | chars += printf("BRANCH(COND)" ); |
| 9000 | chars += dTabStopIR(chars, COLUMN_OPERANDS); |
| 9001 | chars += printf(" " FMT_BB, block->bbJumpDest->bbNum); |
| 9002 | break; |
| 9003 | |
| 9004 | case BBJ_SWITCH: |
| 9005 | chars += printf("BRANCH(SWITCH)" ); |
| 9006 | chars += dTabStopIR(chars, COLUMN_OPERANDS); |
| 9007 | |
| 9008 | unsigned jumpCnt; |
| 9009 | jumpCnt = block->bbJumpSwt->bbsCount; |
| 9010 | BasicBlock** jumpTab; |
| 9011 | jumpTab = block->bbJumpSwt->bbsDstTab; |
| 9012 | do |
| 9013 | { |
| 9014 | chars += printf("%c " FMT_BB, (jumpTab == block->bbJumpSwt->bbsDstTab) ? ' ' : ',', (*jumpTab)->bbNum); |
| 9015 | } while (++jumpTab, --jumpCnt); |
| 9016 | break; |
| 9017 | |
| 9018 | default: |
| 9019 | unreached(); |
| 9020 | break; |
| 9021 | } |
| 9022 | |
| 9023 | printf("\n" ); |
| 9024 | if (block->bbNext != nullptr) |
| 9025 | { |
| 9026 | printf("\n" ); |
| 9027 | } |
| 9028 | } |
| 9029 | |
| 9030 | /***************************************************************************** |
| 9031 | * |
| 9032 | * COMPlus_JitDumpIR support - dump out block in linear IR form |
| 9033 | */ |
| 9034 | |
| 9035 | void dBlockIR(BasicBlock* block) |
| 9036 | { |
| 9037 | cBlockIR(JitTls::GetCompiler(), block); |
| 9038 | } |
| 9039 | |
| 9040 | /***************************************************************************** |
| 9041 | * |
| 9042 | * COMPlus_JitDumpIR support - dump out tree node type for linear IR form |
| 9043 | */ |
| 9044 | |
| 9045 | int cTreeTypeIR(Compiler* comp, GenTree* tree) |
| 9046 | { |
| 9047 | int chars = 0; |
| 9048 | |
| 9049 | var_types type = tree->TypeGet(); |
| 9050 | |
| 9051 | const char* typeName = varTypeName(type); |
| 9052 | chars += printf(".%s" , typeName); |
| 9053 | |
| 9054 | return chars; |
| 9055 | } |
| 9056 | |
| 9057 | /***************************************************************************** |
| 9058 | * |
| 9059 | * COMPlus_JitDumpIR support - dump out tree node type for linear IR form |
| 9060 | */ |
| 9061 | |
| 9062 | int dTreeTypeIR(GenTree* tree) |
| 9063 | { |
| 9064 | int chars = cTreeTypeIR(JitTls::GetCompiler(), tree); |
| 9065 | |
| 9066 | return chars; |
| 9067 | } |
| 9068 | |
| 9069 | /***************************************************************************** |
| 9070 | * |
| 9071 | * COMPlus_JitDumpIR support - dump out tree node kind for linear IR form |
| 9072 | */ |
| 9073 | |
| 9074 | int cTreeKindsIR(Compiler* comp, GenTree* tree) |
| 9075 | { |
| 9076 | int chars = 0; |
| 9077 | |
| 9078 | unsigned kind = tree->OperKind(); |
| 9079 | |
| 9080 | chars += printf("kinds=" ); |
| 9081 | if (kind == GTK_SPECIAL) |
| 9082 | { |
| 9083 | chars += printf("[SPECIAL]" ); |
| 9084 | } |
| 9085 | if (kind & GTK_CONST) |
| 9086 | { |
| 9087 | chars += printf("[CONST]" ); |
| 9088 | } |
| 9089 | if (kind & GTK_LEAF) |
| 9090 | { |
| 9091 | chars += printf("[LEAF]" ); |
| 9092 | } |
| 9093 | if (kind & GTK_UNOP) |
| 9094 | { |
| 9095 | chars += printf("[UNOP]" ); |
| 9096 | } |
| 9097 | if (kind & GTK_BINOP) |
| 9098 | { |
| 9099 | chars += printf("[BINOP]" ); |
| 9100 | } |
| 9101 | if (kind & GTK_LOGOP) |
| 9102 | { |
| 9103 | chars += printf("[LOGOP]" ); |
| 9104 | } |
| 9105 | if (kind & GTK_COMMUTE) |
| 9106 | { |
| 9107 | chars += printf("[COMMUTE]" ); |
| 9108 | } |
| 9109 | if (kind & GTK_EXOP) |
| 9110 | { |
| 9111 | chars += printf("[EXOP]" ); |
| 9112 | } |
| 9113 | if (kind & GTK_LOCAL) |
| 9114 | { |
| 9115 | chars += printf("[LOCAL]" ); |
| 9116 | } |
| 9117 | if (kind & GTK_SMPOP) |
| 9118 | { |
| 9119 | chars += printf("[SMPOP]" ); |
| 9120 | } |
| 9121 | |
| 9122 | return chars; |
| 9123 | } |
| 9124 | |
| 9125 | /***************************************************************************** |
| 9126 | * |
| 9127 | * COMPlus_JitDumpIR support - dump out tree node kind for linear IR form |
| 9128 | */ |
| 9129 | |
| 9130 | int dTreeKindsIR(GenTree* tree) |
| 9131 | { |
| 9132 | int chars = cTreeKindsIR(JitTls::GetCompiler(), tree); |
| 9133 | |
| 9134 | return chars; |
| 9135 | } |
| 9136 | |
| 9137 | /***************************************************************************** |
| 9138 | * |
| 9139 | * COMPlus_JitDumpIR support - dump out tree node flags for linear IR form |
| 9140 | */ |
| 9141 | |
| 9142 | int cTreeFlagsIR(Compiler* comp, GenTree* tree) |
| 9143 | { |
| 9144 | int chars = 0; |
| 9145 | |
| 9146 | if (tree->gtFlags != 0) |
| 9147 | { |
| 9148 | chars += printf("flags=" ); |
| 9149 | |
| 9150 | // Node flags |
| 9151 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 9152 | |
| 9153 | #if defined(DEBUG) |
| 9154 | #if SMALL_TREE_NODES |
| 9155 | if (comp->dumpIRNodes) |
| 9156 | { |
| 9157 | if (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) |
| 9158 | { |
| 9159 | chars += printf("[NODE_LARGE]" ); |
| 9160 | } |
| 9161 | if (tree->gtDebugFlags & GTF_DEBUG_NODE_SMALL) |
| 9162 | { |
| 9163 | chars += printf("[NODE_SMALL]" ); |
| 9164 | } |
| 9165 | } |
| 9166 | #endif // SMALL_TREE_NODES |
| 9167 | if (tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) |
| 9168 | { |
| 9169 | chars += printf("[MORPHED]" ); |
| 9170 | } |
| 9171 | #endif // defined(DEBUG) |
| 9172 | |
| 9173 | if (tree->gtFlags & GTF_COLON_COND) |
| 9174 | { |
| 9175 | chars += printf("[COLON_COND]" ); |
| 9176 | } |
| 9177 | |
| 9178 | // Operator flags |
| 9179 | |
| 9180 | genTreeOps op = tree->OperGet(); |
| 9181 | switch (op) |
| 9182 | { |
| 9183 | |
| 9184 | case GT_LCL_VAR: |
| 9185 | case GT_LCL_VAR_ADDR: |
| 9186 | case GT_LCL_FLD: |
| 9187 | case GT_LCL_FLD_ADDR: |
| 9188 | case GT_STORE_LCL_FLD: |
| 9189 | case GT_STORE_LCL_VAR: |
| 9190 | if (tree->gtFlags & GTF_VAR_DEF) |
| 9191 | { |
| 9192 | chars += printf("[VAR_DEF]" ); |
| 9193 | } |
| 9194 | if (tree->gtFlags & GTF_VAR_USEASG) |
| 9195 | { |
| 9196 | chars += printf("[VAR_USEASG]" ); |
| 9197 | } |
| 9198 | if (tree->gtFlags & GTF_VAR_CAST) |
| 9199 | { |
| 9200 | chars += printf("[VAR_CAST]" ); |
| 9201 | } |
| 9202 | if (tree->gtFlags & GTF_VAR_ITERATOR) |
| 9203 | { |
| 9204 | chars += printf("[VAR_ITERATOR]" ); |
| 9205 | } |
| 9206 | if (tree->gtFlags & GTF_VAR_CLONED) |
| 9207 | { |
| 9208 | chars += printf("[VAR_CLONED]" ); |
| 9209 | } |
| 9210 | if (tree->gtFlags & GTF_VAR_DEATH) |
| 9211 | { |
| 9212 | chars += printf("[VAR_DEATH]" ); |
| 9213 | } |
| 9214 | if (tree->gtFlags & GTF_VAR_ARR_INDEX) |
| 9215 | { |
| 9216 | chars += printf("[VAR_ARR_INDEX]" ); |
| 9217 | } |
| 9218 | #if defined(DEBUG) |
| 9219 | if (tree->gtDebugFlags & GTF_DEBUG_VAR_CSE_REF) |
| 9220 | { |
| 9221 | chars += printf("[VAR_CSE_REF]" ); |
| 9222 | } |
| 9223 | #endif |
| 9224 | break; |
| 9225 | |
| 9226 | case GT_NOP: |
| 9227 | |
| 9228 | if (tree->gtFlags & GTF_NOP_DEATH) |
| 9229 | { |
| 9230 | chars += printf("[NOP_DEATH]" ); |
| 9231 | } |
| 9232 | break; |
| 9233 | |
| 9234 | case GT_NO_OP: |
| 9235 | break; |
| 9236 | |
| 9237 | case GT_FIELD: |
| 9238 | if (tree->gtFlags & GTF_FLD_VOLATILE) |
| 9239 | { |
| 9240 | chars += printf("[FLD_VOLATILE]" ); |
| 9241 | } |
| 9242 | break; |
| 9243 | |
| 9244 | case GT_INDEX: |
| 9245 | |
| 9246 | if (tree->gtFlags & GTF_INX_REFARR_LAYOUT) |
| 9247 | { |
| 9248 | chars += printf("[INX_REFARR_LAYOUT]" ); |
| 9249 | } |
| 9250 | if (tree->gtFlags & GTF_INX_STRING_LAYOUT) |
| 9251 | { |
| 9252 | chars += printf("[INX_STRING_LAYOUT]" ); |
| 9253 | } |
| 9254 | __fallthrough; |
| 9255 | case GT_INDEX_ADDR: |
| 9256 | if (tree->gtFlags & GTF_INX_RNGCHK) |
| 9257 | { |
| 9258 | chars += printf("[INX_RNGCHK]" ); |
| 9259 | } |
| 9260 | break; |
| 9261 | |
| 9262 | case GT_IND: |
| 9263 | case GT_STOREIND: |
| 9264 | |
| 9265 | if (tree->gtFlags & GTF_IND_VOLATILE) |
| 9266 | { |
| 9267 | chars += printf("[IND_VOLATILE]" ); |
| 9268 | } |
| 9269 | if (tree->gtFlags & GTF_IND_TGTANYWHERE) |
| 9270 | { |
| 9271 | chars += printf("[IND_TGTANYWHERE]" ); |
| 9272 | } |
| 9273 | if (tree->gtFlags & GTF_IND_TLS_REF) |
| 9274 | { |
| 9275 | chars += printf("[IND_TLS_REF]" ); |
| 9276 | } |
| 9277 | if (tree->gtFlags & GTF_IND_ASG_LHS) |
| 9278 | { |
| 9279 | chars += printf("[IND_ASG_LHS]" ); |
| 9280 | } |
| 9281 | if (tree->gtFlags & GTF_IND_UNALIGNED) |
| 9282 | { |
| 9283 | chars += printf("[IND_UNALIGNED]" ); |
| 9284 | } |
| 9285 | if (tree->gtFlags & GTF_IND_INVARIANT) |
| 9286 | { |
| 9287 | chars += printf("[IND_INVARIANT]" ); |
| 9288 | } |
| 9289 | break; |
| 9290 | |
| 9291 | case GT_CLS_VAR: |
| 9292 | |
| 9293 | if (tree->gtFlags & GTF_CLS_VAR_ASG_LHS) |
| 9294 | { |
| 9295 | chars += printf("[CLS_VAR_ASG_LHS]" ); |
| 9296 | } |
| 9297 | break; |
| 9298 | |
| 9299 | case GT_ADDR: |
| 9300 | |
| 9301 | if (tree->gtFlags & GTF_ADDR_ONSTACK) |
| 9302 | { |
| 9303 | chars += printf("[ADDR_ONSTACK]" ); |
| 9304 | } |
| 9305 | break; |
| 9306 | |
| 9307 | case GT_MUL: |
| 9308 | #if !defined(_TARGET_64BIT_) |
| 9309 | case GT_MUL_LONG: |
| 9310 | #endif |
| 9311 | |
| 9312 | if (tree->gtFlags & GTF_MUL_64RSLT) |
| 9313 | { |
| 9314 | chars += printf("[64RSLT]" ); |
| 9315 | } |
| 9316 | if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) |
| 9317 | { |
| 9318 | chars += printf("[ADDRMODE_NO_CSE]" ); |
| 9319 | } |
| 9320 | break; |
| 9321 | |
| 9322 | case GT_ADD: |
| 9323 | |
| 9324 | if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) |
| 9325 | { |
| 9326 | chars += printf("[ADDRMODE_NO_CSE]" ); |
| 9327 | } |
| 9328 | break; |
| 9329 | |
| 9330 | case GT_LSH: |
| 9331 | |
| 9332 | if (tree->gtFlags & GTF_ADDRMODE_NO_CSE) |
| 9333 | { |
| 9334 | chars += printf("[ADDRMODE_NO_CSE]" ); |
| 9335 | } |
| 9336 | break; |
| 9337 | |
| 9338 | case GT_MOD: |
| 9339 | case GT_UMOD: |
| 9340 | break; |
| 9341 | |
| 9342 | case GT_EQ: |
| 9343 | case GT_NE: |
| 9344 | case GT_LT: |
| 9345 | case GT_LE: |
| 9346 | case GT_GT: |
| 9347 | case GT_GE: |
| 9348 | |
| 9349 | if (tree->gtFlags & GTF_RELOP_NAN_UN) |
| 9350 | { |
| 9351 | chars += printf("[RELOP_NAN_UN]" ); |
| 9352 | } |
| 9353 | if (tree->gtFlags & GTF_RELOP_JMP_USED) |
| 9354 | { |
| 9355 | chars += printf("[RELOP_JMP_USED]" ); |
| 9356 | } |
| 9357 | if (tree->gtFlags & GTF_RELOP_QMARK) |
| 9358 | { |
| 9359 | chars += printf("[RELOP_QMARK]" ); |
| 9360 | } |
| 9361 | break; |
| 9362 | |
| 9363 | case GT_QMARK: |
| 9364 | |
| 9365 | if (tree->gtFlags & GTF_QMARK_CAST_INSTOF) |
| 9366 | { |
| 9367 | chars += printf("[QMARK_CAST_INSTOF]" ); |
| 9368 | } |
| 9369 | break; |
| 9370 | |
| 9371 | case GT_BOX: |
| 9372 | |
| 9373 | if (tree->gtFlags & GTF_BOX_VALUE) |
| 9374 | { |
| 9375 | chars += printf("[BOX_VALUE]" ); |
| 9376 | } |
| 9377 | break; |
| 9378 | |
| 9379 | case GT_CNS_INT: |
| 9380 | |
| 9381 | { |
| 9382 | unsigned handleKind = (tree->gtFlags & GTF_ICON_HDL_MASK); |
| 9383 | |
| 9384 | switch (handleKind) |
| 9385 | { |
| 9386 | |
| 9387 | case GTF_ICON_SCOPE_HDL: |
| 9388 | |
| 9389 | chars += printf("[ICON_SCOPE_HDL]" ); |
| 9390 | break; |
| 9391 | |
| 9392 | case GTF_ICON_CLASS_HDL: |
| 9393 | |
| 9394 | chars += printf("[ICON_CLASS_HDL]" ); |
| 9395 | break; |
| 9396 | |
| 9397 | case GTF_ICON_METHOD_HDL: |
| 9398 | |
| 9399 | chars += printf("[ICON_METHOD_HDL]" ); |
| 9400 | break; |
| 9401 | |
| 9402 | case GTF_ICON_FIELD_HDL: |
| 9403 | |
| 9404 | chars += printf("[ICON_FIELD_HDL]" ); |
| 9405 | break; |
| 9406 | |
| 9407 | case GTF_ICON_STATIC_HDL: |
| 9408 | |
| 9409 | chars += printf("[ICON_STATIC_HDL]" ); |
| 9410 | break; |
| 9411 | |
| 9412 | case GTF_ICON_STR_HDL: |
| 9413 | |
| 9414 | chars += printf("[ICON_STR_HDL]" ); |
| 9415 | break; |
| 9416 | |
| 9417 | case GTF_ICON_PSTR_HDL: |
| 9418 | |
| 9419 | chars += printf("[ICON_PSTR_HDL]" ); |
| 9420 | break; |
| 9421 | |
| 9422 | case GTF_ICON_PTR_HDL: |
| 9423 | |
| 9424 | chars += printf("[ICON_PTR_HDL]" ); |
| 9425 | break; |
| 9426 | |
| 9427 | case GTF_ICON_VARG_HDL: |
| 9428 | |
| 9429 | chars += printf("[ICON_VARG_HDL]" ); |
| 9430 | break; |
| 9431 | |
| 9432 | case GTF_ICON_PINVKI_HDL: |
| 9433 | |
| 9434 | chars += printf("[ICON_PINVKI_HDL]" ); |
| 9435 | break; |
| 9436 | |
| 9437 | case GTF_ICON_TOKEN_HDL: |
| 9438 | |
| 9439 | chars += printf("[ICON_TOKEN_HDL]" ); |
| 9440 | break; |
| 9441 | |
| 9442 | case GTF_ICON_TLS_HDL: |
| 9443 | |
| 9444 | chars += printf("[ICON_TLD_HDL]" ); |
| 9445 | break; |
| 9446 | |
| 9447 | case GTF_ICON_FTN_ADDR: |
| 9448 | |
| 9449 | chars += printf("[ICON_FTN_ADDR]" ); |
| 9450 | break; |
| 9451 | |
| 9452 | case GTF_ICON_CIDMID_HDL: |
| 9453 | |
| 9454 | chars += printf("[ICON_CIDMID_HDL]" ); |
| 9455 | break; |
| 9456 | |
| 9457 | case GTF_ICON_BBC_PTR: |
| 9458 | |
| 9459 | chars += printf("[ICON_BBC_PTR]" ); |
| 9460 | break; |
| 9461 | |
| 9462 | case GTF_ICON_FIELD_OFF: |
| 9463 | |
| 9464 | chars += printf("[ICON_FIELD_OFF]" ); |
| 9465 | break; |
| 9466 | } |
| 9467 | } |
| 9468 | break; |
| 9469 | |
| 9470 | case GT_OBJ: |
| 9471 | case GT_STORE_OBJ: |
| 9472 | if (tree->AsObj()->HasGCPtr()) |
| 9473 | { |
| 9474 | chars += printf("[BLK_HASGCPTR]" ); |
| 9475 | } |
| 9476 | __fallthrough; |
| 9477 | |
| 9478 | case GT_BLK: |
| 9479 | case GT_DYN_BLK: |
| 9480 | case GT_STORE_BLK: |
| 9481 | case GT_STORE_DYN_BLK: |
| 9482 | |
| 9483 | if (tree->gtFlags & GTF_BLK_VOLATILE) |
| 9484 | { |
| 9485 | chars += printf("[BLK_VOLATILE]" ); |
| 9486 | } |
| 9487 | if (tree->AsBlk()->IsUnaligned()) |
| 9488 | { |
| 9489 | chars += printf("[BLK_UNALIGNED]" ); |
| 9490 | } |
| 9491 | break; |
| 9492 | |
| 9493 | case GT_CALL: |
| 9494 | |
| 9495 | if (tree->gtFlags & GTF_CALL_UNMANAGED) |
| 9496 | { |
| 9497 | chars += printf("[CALL_UNMANAGED]" ); |
| 9498 | } |
| 9499 | if (tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) |
| 9500 | { |
| 9501 | chars += printf("[CALL_INLINE_CANDIDATE]" ); |
| 9502 | } |
| 9503 | if (!tree->AsCall()->IsVirtual()) |
| 9504 | { |
| 9505 | chars += printf("[CALL_NONVIRT]" ); |
| 9506 | } |
| 9507 | if (tree->AsCall()->IsVirtualVtable()) |
| 9508 | { |
| 9509 | chars += printf("[CALL_VIRT_VTABLE]" ); |
| 9510 | } |
| 9511 | if (tree->AsCall()->IsVirtualStub()) |
| 9512 | { |
| 9513 | chars += printf("[CALL_VIRT_STUB]" ); |
| 9514 | } |
| 9515 | if (tree->gtFlags & GTF_CALL_NULLCHECK) |
| 9516 | { |
| 9517 | chars += printf("[CALL_NULLCHECK]" ); |
| 9518 | } |
| 9519 | if (tree->gtFlags & GTF_CALL_POP_ARGS) |
| 9520 | { |
| 9521 | chars += printf("[CALL_POP_ARGS]" ); |
| 9522 | } |
| 9523 | if (tree->gtFlags & GTF_CALL_HOISTABLE) |
| 9524 | { |
| 9525 | chars += printf("[CALL_HOISTABLE]" ); |
| 9526 | } |
| 9527 | |
| 9528 | // More flags associated with calls. |
| 9529 | |
| 9530 | { |
| 9531 | GenTreeCall* call = tree->AsCall(); |
| 9532 | |
| 9533 | if (call->gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) |
| 9534 | { |
| 9535 | chars += printf("[CALL_M_EXPLICIT_TAILCALL]" ); |
| 9536 | } |
| 9537 | if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL) |
| 9538 | { |
| 9539 | chars += printf("[CALL_M_TAILCALL]" ); |
| 9540 | } |
| 9541 | if (call->gtCallMoreFlags & GTF_CALL_M_VARARGS) |
| 9542 | { |
| 9543 | chars += printf("[CALL_M_VARARGS]" ); |
| 9544 | } |
| 9545 | if (call->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) |
| 9546 | { |
| 9547 | chars += printf("[CALL_M_RETBUFFARG]" ); |
| 9548 | } |
| 9549 | if (call->gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) |
| 9550 | { |
| 9551 | chars += printf("[CALL_M_DELEGATE_INV]" ); |
| 9552 | } |
| 9553 | if (call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) |
| 9554 | { |
| 9555 | chars += printf("[CALL_M_NOGCCHECK]" ); |
| 9556 | } |
| 9557 | if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) |
| 9558 | { |
| 9559 | chars += printf("[CALL_M_SPECIAL_INTRINSIC]" ); |
| 9560 | } |
| 9561 | |
| 9562 | if (call->IsUnmanaged()) |
| 9563 | { |
| 9564 | if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) |
| 9565 | { |
| 9566 | chars += printf("[CALL_M_UNMGD_THISCALL]" ); |
| 9567 | } |
| 9568 | } |
| 9569 | else if (call->IsVirtualStub()) |
| 9570 | { |
| 9571 | if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) |
| 9572 | { |
| 9573 | chars += printf("[CALL_M_VIRTSTUB_REL_INDIRECT]" ); |
| 9574 | } |
| 9575 | } |
| 9576 | else if (!call->IsVirtual()) |
| 9577 | { |
| 9578 | if (call->gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) |
| 9579 | { |
| 9580 | chars += printf("[CALL_M_NONVIRT_SAME_THIS]" ); |
| 9581 | } |
| 9582 | } |
| 9583 | |
| 9584 | if (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH) |
| 9585 | { |
| 9586 | chars += printf("[CALL_M_FRAME_VAR_DEATH]" ); |
| 9587 | } |
| 9588 | if (call->gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_HELPER) |
| 9589 | { |
| 9590 | chars += printf("[CALL_M_TAILCALL_VIA_HELPER]" ); |
| 9591 | } |
| 9592 | #if FEATURE_TAILCALL_OPT |
| 9593 | if (call->gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) |
| 9594 | { |
| 9595 | chars += printf("[CALL_M_IMPLICIT_TAILCALL]" ); |
| 9596 | } |
| 9597 | #endif |
| 9598 | if (call->gtCallMoreFlags & GTF_CALL_M_PINVOKE) |
| 9599 | { |
| 9600 | chars += printf("[CALL_M_PINVOKE]" ); |
| 9601 | } |
| 9602 | } |
| 9603 | break; |
| 9604 | |
| 9605 | case GT_STMT: |
| 9606 | |
| 9607 | if (tree->gtFlags & GTF_STMT_CMPADD) |
| 9608 | { |
| 9609 | chars += printf("[STMT_CMPADD]" ); |
| 9610 | } |
| 9611 | if (tree->gtFlags & GTF_STMT_HAS_CSE) |
| 9612 | { |
| 9613 | chars += printf("[STMT_HAS_CSE]" ); |
| 9614 | } |
| 9615 | break; |
| 9616 | |
| 9617 | default: |
| 9618 | |
| 9619 | { |
| 9620 | unsigned flags = (tree->gtFlags & (~(unsigned)(GTF_COMMON_MASK | GTF_OVERFLOW))); |
| 9621 | if (flags != 0) |
| 9622 | { |
| 9623 | chars += printf("[%08X]" , flags); |
| 9624 | } |
| 9625 | } |
| 9626 | break; |
| 9627 | } |
| 9628 | |
| 9629 | // Common flags. |
| 9630 | |
| 9631 | if (tree->gtFlags & GTF_ASG) |
| 9632 | { |
| 9633 | chars += printf("[ASG]" ); |
| 9634 | } |
| 9635 | if (tree->gtFlags & GTF_CALL) |
| 9636 | { |
| 9637 | chars += printf("[CALL]" ); |
| 9638 | } |
| 9639 | switch (op) |
| 9640 | { |
| 9641 | case GT_MUL: |
| 9642 | case GT_CAST: |
| 9643 | case GT_ADD: |
| 9644 | case GT_SUB: |
| 9645 | if (tree->gtFlags & GTF_OVERFLOW) |
| 9646 | { |
| 9647 | chars += printf("[OVERFLOW]" ); |
| 9648 | } |
| 9649 | break; |
| 9650 | default: |
| 9651 | break; |
| 9652 | } |
| 9653 | if (tree->gtFlags & GTF_EXCEPT) |
| 9654 | { |
| 9655 | chars += printf("[EXCEPT]" ); |
| 9656 | } |
| 9657 | if (tree->gtFlags & GTF_GLOB_REF) |
| 9658 | { |
| 9659 | chars += printf("[GLOB_REF]" ); |
| 9660 | } |
| 9661 | if (tree->gtFlags & GTF_ORDER_SIDEEFF) |
| 9662 | { |
| 9663 | chars += printf("[ORDER_SIDEEFF]" ); |
| 9664 | } |
| 9665 | if (tree->gtFlags & GTF_REVERSE_OPS) |
| 9666 | { |
| 9667 | if (op != GT_LCL_VAR) |
| 9668 | { |
| 9669 | chars += printf("[REVERSE_OPS]" ); |
| 9670 | } |
| 9671 | } |
| 9672 | if (tree->gtFlags & GTF_SPILLED) |
| 9673 | { |
| 9674 | chars += printf("[SPILLED_OPER]" ); |
| 9675 | } |
| 9676 | #if FEATURE_SET_FLAGS |
| 9677 | if (tree->gtFlags & GTF_SET_FLAGS) |
| 9678 | { |
| 9679 | if ((op != GT_IND) && (op != GT_STOREIND)) |
| 9680 | { |
| 9681 | chars += printf("[ZSF_SET_FLAGS]" ); |
| 9682 | } |
| 9683 | } |
| 9684 | #endif |
| 9685 | if (tree->gtFlags & GTF_IND_NONFAULTING) |
| 9686 | { |
| 9687 | if (tree->OperIsIndirOrArrLength()) |
| 9688 | { |
| 9689 | chars += printf("[IND_NONFAULTING]" ); |
| 9690 | } |
| 9691 | } |
| 9692 | if (tree->gtFlags & GTF_MAKE_CSE) |
| 9693 | { |
| 9694 | chars += printf("[MAKE_CSE]" ); |
| 9695 | } |
| 9696 | if (tree->gtFlags & GTF_DONT_CSE) |
| 9697 | { |
| 9698 | chars += printf("[DONT_CSE]" ); |
| 9699 | } |
| 9700 | if (tree->gtFlags & GTF_BOOLEAN) |
| 9701 | { |
| 9702 | chars += printf("[BOOLEAN]" ); |
| 9703 | } |
| 9704 | if (tree->gtFlags & GTF_UNSIGNED) |
| 9705 | { |
| 9706 | chars += printf("[SMALL_UNSIGNED]" ); |
| 9707 | } |
| 9708 | if (tree->gtFlags & GTF_LATE_ARG) |
| 9709 | { |
| 9710 | chars += printf("[SMALL_LATE_ARG]" ); |
| 9711 | } |
| 9712 | if (tree->gtFlags & GTF_SPILL) |
| 9713 | { |
| 9714 | chars += printf("[SPILL]" ); |
| 9715 | } |
| 9716 | if (tree->gtFlags & GTF_REUSE_REG_VAL) |
| 9717 | { |
| 9718 | if (op == GT_CNS_INT) |
| 9719 | { |
| 9720 | chars += printf("[REUSE_REG_VAL]" ); |
| 9721 | } |
| 9722 | } |
| 9723 | } |
| 9724 | |
| 9725 | return chars; |
| 9726 | } |
| 9727 | |
| 9728 | /***************************************************************************** |
| 9729 | * |
| 9730 | * COMPlus_JitDumpIR support - dump out tree node flags for linear IR form |
| 9731 | */ |
| 9732 | |
| 9733 | int dTreeFlagsIR(GenTree* tree) |
| 9734 | { |
| 9735 | int chars = cTreeFlagsIR(JitTls::GetCompiler(), tree); |
| 9736 | |
| 9737 | return chars; |
| 9738 | } |
| 9739 | |
| 9740 | /***************************************************************************** |
| 9741 | * |
| 9742 | * COMPlus_JitDumpIR support - dump out SSA number on tree node for linear IR form |
| 9743 | */ |
| 9744 | |
| 9745 | int cSsaNumIR(Compiler* comp, GenTree* tree) |
| 9746 | { |
| 9747 | int chars = 0; |
| 9748 | |
| 9749 | if (tree->gtLclVarCommon.HasSsaName()) |
| 9750 | { |
| 9751 | if (tree->gtFlags & GTF_VAR_USEASG) |
| 9752 | { |
| 9753 | assert(tree->gtFlags & GTF_VAR_DEF); |
| 9754 | chars += printf("<u:%d><d:%d>" , tree->gtLclVarCommon.gtSsaNum, comp->GetSsaNumForLocalVarDef(tree)); |
| 9755 | } |
| 9756 | else |
| 9757 | { |
| 9758 | chars += printf("<%s:%d>" , (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u" , tree->gtLclVarCommon.gtSsaNum); |
| 9759 | } |
| 9760 | } |
| 9761 | |
| 9762 | return chars; |
| 9763 | } |
| 9764 | |
| 9765 | /***************************************************************************** |
| 9766 | * |
| 9767 | * COMPlus_JitDumpIR support - dump out SSA number on tree node for linear IR form |
| 9768 | */ |
| 9769 | |
| 9770 | int dSsaNumIR(GenTree* tree) |
| 9771 | { |
| 9772 | int chars = cSsaNumIR(JitTls::GetCompiler(), tree); |
| 9773 | |
| 9774 | return chars; |
| 9775 | } |
| 9776 | |
| 9777 | /***************************************************************************** |
| 9778 | * |
| 9779 | * COMPlus_JitDumpIR support - dump out Value Number on tree node for linear IR form |
| 9780 | */ |
| 9781 | |
| 9782 | int cValNumIR(Compiler* comp, GenTree* tree) |
| 9783 | { |
| 9784 | int chars = 0; |
| 9785 | |
| 9786 | if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN) |
| 9787 | { |
| 9788 | assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN); |
| 9789 | ValueNumPair vnp = tree->gtVNPair; |
| 9790 | ValueNum vn; |
| 9791 | if (vnp.BothEqual()) |
| 9792 | { |
| 9793 | chars += printf("<v:" ); |
| 9794 | vn = vnp.GetLiberal(); |
| 9795 | chars += printf(FMT_VN, vn); |
| 9796 | if (ValueNumStore::isReservedVN(vn)) |
| 9797 | { |
| 9798 | chars += printf("R" ); |
| 9799 | } |
| 9800 | chars += printf(">" ); |
| 9801 | } |
| 9802 | else |
| 9803 | { |
| 9804 | vn = vnp.GetLiberal(); |
| 9805 | chars += printf("<v:" ); |
| 9806 | chars += printf(FMT_VN, vn); |
| 9807 | if (ValueNumStore::isReservedVN(vn)) |
| 9808 | { |
| 9809 | chars += printf("R" ); |
| 9810 | } |
| 9811 | chars += printf("," ); |
| 9812 | vn = vnp.GetConservative(); |
| 9813 | chars += printf(FMT_VN, vn); |
| 9814 | if (ValueNumStore::isReservedVN(vn)) |
| 9815 | { |
| 9816 | chars += printf("R" ); |
| 9817 | } |
| 9818 | chars += printf(">" ); |
| 9819 | } |
| 9820 | } |
| 9821 | |
| 9822 | return chars; |
| 9823 | } |
| 9824 | |
| 9825 | /***************************************************************************** |
| 9826 | * |
| 9827 | * COMPlus_JitDumpIR support - dump out Value Number on tree node for linear IR form |
| 9828 | */ |
| 9829 | |
| 9830 | int dValNumIR(GenTree* tree) |
| 9831 | { |
| 9832 | int chars = cValNumIR(JitTls::GetCompiler(), tree); |
| 9833 | |
| 9834 | return chars; |
| 9835 | } |
| 9836 | |
| 9837 | /***************************************************************************** |
| 9838 | * |
| 9839 | * COMPlus_JitDumpIR support - dump out tree leaf node for linear IR form |
| 9840 | */ |
| 9841 | |
| 9842 | int cLeafIR(Compiler* comp, GenTree* tree) |
| 9843 | { |
| 9844 | int chars = 0; |
| 9845 | genTreeOps op = tree->OperGet(); |
| 9846 | const char* ilKind = nullptr; |
| 9847 | const char* ilName = nullptr; |
| 9848 | unsigned ilNum = 0; |
| 9849 | unsigned lclNum = 0; |
| 9850 | bool hasSsa = false; |
| 9851 | |
| 9852 | switch (op) |
| 9853 | { |
| 9854 | |
| 9855 | case GT_PHI_ARG: |
| 9856 | case GT_LCL_VAR: |
| 9857 | case GT_LCL_VAR_ADDR: |
| 9858 | case GT_STORE_LCL_VAR: |
| 9859 | lclNum = tree->gtLclVarCommon.gtLclNum; |
| 9860 | comp->gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum); |
| 9861 | if (ilName != nullptr) |
| 9862 | { |
| 9863 | chars += printf("%s" , ilName); |
| 9864 | } |
| 9865 | else |
| 9866 | { |
| 9867 | LclVarDsc* varDsc = comp->lvaTable + lclNum; |
| 9868 | chars += printf("%s%d" , ilKind, ilNum); |
| 9869 | if (comp->dumpIRLocals) |
| 9870 | { |
| 9871 | chars += printf("(V%02u" , lclNum); |
| 9872 | if (varDsc->lvTracked) |
| 9873 | { |
| 9874 | chars += printf(":T%02u" , varDsc->lvVarIndex); |
| 9875 | } |
| 9876 | if (comp->dumpIRRegs) |
| 9877 | { |
| 9878 | if (varDsc->lvRegister) |
| 9879 | { |
| 9880 | chars += printf(":%s" , getRegName(varDsc->lvRegNum)); |
| 9881 | } |
| 9882 | else |
| 9883 | { |
| 9884 | switch (tree->GetRegTag()) |
| 9885 | { |
| 9886 | case GenTree::GT_REGTAG_REG: |
| 9887 | chars += printf(":%s" , comp->compRegVarName(tree->gtRegNum)); |
| 9888 | break; |
| 9889 | default: |
| 9890 | break; |
| 9891 | } |
| 9892 | } |
| 9893 | } |
| 9894 | chars += printf(")" ); |
| 9895 | } |
| 9896 | else if (comp->dumpIRRegs) |
| 9897 | { |
| 9898 | if (varDsc->lvRegister) |
| 9899 | { |
| 9900 | chars += printf("(%s)" , getRegName(varDsc->lvRegNum)); |
| 9901 | } |
| 9902 | else |
| 9903 | { |
| 9904 | switch (tree->GetRegTag()) |
| 9905 | { |
| 9906 | case GenTree::GT_REGTAG_REG: |
| 9907 | chars += printf("(%s)" , comp->compRegVarName(tree->gtRegNum)); |
| 9908 | break; |
| 9909 | default: |
| 9910 | break; |
| 9911 | } |
| 9912 | } |
| 9913 | } |
| 9914 | } |
| 9915 | |
| 9916 | hasSsa = true; |
| 9917 | break; |
| 9918 | |
| 9919 | case GT_LCL_FLD: |
| 9920 | case GT_LCL_FLD_ADDR: |
| 9921 | case GT_STORE_LCL_FLD: |
| 9922 | |
| 9923 | lclNum = tree->gtLclVarCommon.gtLclNum; |
| 9924 | comp->gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum); |
| 9925 | if (ilName != nullptr) |
| 9926 | { |
| 9927 | chars += printf("%s+%u" , ilName, tree->gtLclFld.gtLclOffs); |
| 9928 | } |
| 9929 | else |
| 9930 | { |
| 9931 | chars += printf("%s%d+%u" , ilKind, ilNum, tree->gtLclFld.gtLclOffs); |
| 9932 | LclVarDsc* varDsc = comp->lvaTable + lclNum; |
| 9933 | if (comp->dumpIRLocals) |
| 9934 | { |
| 9935 | chars += printf("(V%02u" , lclNum); |
| 9936 | if (varDsc->lvTracked) |
| 9937 | { |
| 9938 | chars += printf(":T%02u" , varDsc->lvVarIndex); |
| 9939 | } |
| 9940 | if (comp->dumpIRRegs) |
| 9941 | { |
| 9942 | if (varDsc->lvRegister) |
| 9943 | { |
| 9944 | chars += printf(":%s" , getRegName(varDsc->lvRegNum)); |
| 9945 | } |
| 9946 | else |
| 9947 | { |
| 9948 | switch (tree->GetRegTag()) |
| 9949 | { |
| 9950 | case GenTree::GT_REGTAG_REG: |
| 9951 | chars += printf(":%s" , comp->compRegVarName(tree->gtRegNum)); |
| 9952 | break; |
| 9953 | default: |
| 9954 | break; |
| 9955 | } |
| 9956 | } |
| 9957 | } |
| 9958 | chars += printf(")" ); |
| 9959 | } |
| 9960 | else if (comp->dumpIRRegs) |
| 9961 | { |
| 9962 | if (varDsc->lvRegister) |
| 9963 | { |
| 9964 | chars += printf("(%s)" , getRegName(varDsc->lvRegNum)); |
| 9965 | } |
| 9966 | else |
| 9967 | { |
| 9968 | switch (tree->GetRegTag()) |
| 9969 | { |
| 9970 | case GenTree::GT_REGTAG_REG: |
| 9971 | chars += printf("(%s)" , comp->compRegVarName(tree->gtRegNum)); |
| 9972 | break; |
| 9973 | default: |
| 9974 | break; |
| 9975 | } |
| 9976 | } |
| 9977 | } |
| 9978 | } |
| 9979 | |
| 9980 | // TODO: We probably want to expand field sequence. |
| 9981 | // gtDispFieldSeq(tree->gtLclFld.gtFieldSeq); |
| 9982 | |
| 9983 | hasSsa = true; |
| 9984 | break; |
| 9985 | |
| 9986 | case GT_CNS_INT: |
| 9987 | |
| 9988 | if (tree->IsIconHandle()) |
| 9989 | { |
| 9990 | #if 0 |
| 9991 | // TODO: Commented out because sometimes the CLR throws |
| 9992 | // and exception when asking the names of some handles. |
| 9993 | // Need to investigate. |
| 9994 | |
| 9995 | const char* className; |
| 9996 | const char* fieldName; |
| 9997 | const char* methodName; |
| 9998 | const wchar_t* str; |
| 9999 | |
| 10000 | switch (tree->GetIconHandleFlag()) |
| 10001 | { |
| 10002 | |
| 10003 | case GTF_ICON_SCOPE_HDL: |
| 10004 | |
| 10005 | chars += printf("SCOPE(?)" ); |
| 10006 | break; |
| 10007 | |
| 10008 | case GTF_ICON_CLASS_HDL: |
| 10009 | |
| 10010 | className = comp->eeGetClassName((CORINFO_CLASS_HANDLE)tree->gtIntCon.gtIconVal); |
| 10011 | chars += printf("CLASS(%s)" , className); |
| 10012 | break; |
| 10013 | |
| 10014 | case GTF_ICON_METHOD_HDL: |
| 10015 | |
| 10016 | methodName = comp->eeGetMethodName((CORINFO_METHOD_HANDLE)tree->gtIntCon.gtIconVal, |
| 10017 | &className); |
| 10018 | chars += printf("METHOD(%s.%s)" , className, methodName); |
| 10019 | break; |
| 10020 | |
| 10021 | case GTF_ICON_FIELD_HDL: |
| 10022 | |
| 10023 | fieldName = comp->eeGetFieldName((CORINFO_FIELD_HANDLE)tree->gtIntCon.gtIconVal, |
| 10024 | &className); |
| 10025 | chars += printf("FIELD(%s.%s) " , className, fieldName); |
| 10026 | break; |
| 10027 | |
| 10028 | case GTF_ICON_STATIC_HDL: |
| 10029 | |
| 10030 | fieldName = comp->eeGetFieldName((CORINFO_FIELD_HANDLE)tree->gtIntCon.gtIconVal, |
| 10031 | &className); |
| 10032 | chars += printf("STATIC_FIELD(%s.%s)" , className, fieldName); |
| 10033 | break; |
| 10034 | |
| 10035 | case GTF_ICON_STR_HDL: |
| 10036 | |
| 10037 | str = comp->eeGetCPString(tree->gtIntCon.gtIconVal); |
| 10038 | chars += printf("\"%S\"" , str); |
| 10039 | break; |
| 10040 | |
| 10041 | case GTF_ICON_PSTR_HDL: |
| 10042 | |
| 10043 | chars += printf("PSTR(?)" ); |
| 10044 | break; |
| 10045 | |
| 10046 | case GTF_ICON_PTR_HDL: |
| 10047 | |
| 10048 | chars += printf("PTR(?)" ); |
| 10049 | break; |
| 10050 | |
| 10051 | case GTF_ICON_VARG_HDL: |
| 10052 | |
| 10053 | chars += printf("VARARG(?)" ); |
| 10054 | break; |
| 10055 | |
| 10056 | case GTF_ICON_PINVKI_HDL: |
| 10057 | |
| 10058 | chars += printf("PINVOKE(?)" ); |
| 10059 | break; |
| 10060 | |
| 10061 | case GTF_ICON_TOKEN_HDL: |
| 10062 | |
| 10063 | chars += printf("TOKEN(%08X)" , tree->gtIntCon.gtIconVal); |
| 10064 | break; |
| 10065 | |
| 10066 | case GTF_ICON_TLS_HDL: |
| 10067 | |
| 10068 | chars += printf("TLS(?)" ); |
| 10069 | break; |
| 10070 | |
| 10071 | case GTF_ICON_FTN_ADDR: |
| 10072 | |
| 10073 | chars += printf("FTN(?)" ); |
| 10074 | break; |
| 10075 | |
| 10076 | case GTF_ICON_CIDMID_HDL: |
| 10077 | |
| 10078 | chars += printf("CIDMID(?)" ); |
| 10079 | break; |
| 10080 | |
| 10081 | case GTF_ICON_BBC_PTR: |
| 10082 | |
| 10083 | chars += printf("BBC(?)" ); |
| 10084 | break; |
| 10085 | |
| 10086 | default: |
| 10087 | |
| 10088 | chars += printf("HANDLE(?)" ); |
| 10089 | break; |
| 10090 | } |
| 10091 | #else |
| 10092 | #ifdef _TARGET_64BIT_ |
| 10093 | if ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0) |
| 10094 | { |
| 10095 | chars += printf("HANDLE(0x%llx)" , dspPtr(tree->gtIntCon.gtIconVal)); |
| 10096 | } |
| 10097 | else |
| 10098 | #endif |
| 10099 | { |
| 10100 | chars += printf("HANDLE(0x%0x)" , dspPtr(tree->gtIntCon.gtIconVal)); |
| 10101 | } |
| 10102 | #endif |
| 10103 | } |
| 10104 | else |
| 10105 | { |
| 10106 | if (tree->TypeGet() == TYP_REF) |
| 10107 | { |
| 10108 | assert(tree->gtIntCon.gtIconVal == 0); |
| 10109 | chars += printf("null" ); |
| 10110 | } |
| 10111 | #ifdef _TARGET_64BIT_ |
| 10112 | else if ((tree->gtIntCon.gtIconVal & 0xFFFFFFFF00000000LL) != 0) |
| 10113 | { |
| 10114 | chars += printf("0x%llx" , tree->gtIntCon.gtIconVal); |
| 10115 | } |
| 10116 | else |
| 10117 | #endif |
| 10118 | { |
| 10119 | chars += printf("%ld(0x%x)" , tree->gtIntCon.gtIconVal, tree->gtIntCon.gtIconVal); |
| 10120 | } |
| 10121 | } |
| 10122 | break; |
| 10123 | |
| 10124 | case GT_CNS_LNG: |
| 10125 | |
| 10126 | chars += printf("CONST(LONG)" ); |
| 10127 | break; |
| 10128 | |
| 10129 | case GT_CNS_DBL: |
| 10130 | |
| 10131 | chars += printf("CONST(DOUBLE)" ); |
| 10132 | break; |
| 10133 | |
| 10134 | case GT_CNS_STR: |
| 10135 | |
| 10136 | chars += printf("CONST(STR)" ); |
| 10137 | break; |
| 10138 | |
| 10139 | case GT_JMP: |
| 10140 | |
| 10141 | { |
| 10142 | const char* methodName; |
| 10143 | const char* className; |
| 10144 | |
| 10145 | methodName = comp->eeGetMethodName((CORINFO_METHOD_HANDLE)tree->gtVal.gtVal1, &className); |
| 10146 | chars += printf(" %s.%s" , className, methodName); |
| 10147 | } |
| 10148 | break; |
| 10149 | |
| 10150 | case GT_NO_OP: |
| 10151 | case GT_START_NONGC: |
| 10152 | case GT_PROF_HOOK: |
| 10153 | case GT_CATCH_ARG: |
| 10154 | case GT_MEMORYBARRIER: |
| 10155 | case GT_ARGPLACE: |
| 10156 | case GT_PINVOKE_PROLOG: |
| 10157 | case GT_JMPTABLE: |
| 10158 | // Do nothing. |
| 10159 | break; |
| 10160 | |
| 10161 | case GT_RET_EXPR: |
| 10162 | |
| 10163 | chars += printf("t%d" , tree->gtRetExpr.gtInlineCandidate->gtTreeID); |
| 10164 | break; |
| 10165 | |
| 10166 | case GT_PHYSREG: |
| 10167 | |
| 10168 | chars += printf("%s" , getRegName(tree->gtPhysReg.gtSrcReg, varTypeIsFloating(tree))); |
| 10169 | break; |
| 10170 | |
| 10171 | case GT_LABEL: |
| 10172 | |
| 10173 | if (tree->gtLabel.gtLabBB) |
| 10174 | { |
| 10175 | chars += printf(FMT_BB, tree->gtLabel.gtLabBB->bbNum); |
| 10176 | } |
| 10177 | else |
| 10178 | { |
| 10179 | chars += printf("BB?" ); |
| 10180 | } |
| 10181 | break; |
| 10182 | |
| 10183 | case GT_IL_OFFSET: |
| 10184 | |
| 10185 | if (tree->gtStmt.gtStmtILoffsx == BAD_IL_OFFSET) |
| 10186 | { |
| 10187 | chars += printf("?" ); |
| 10188 | } |
| 10189 | else |
| 10190 | { |
| 10191 | chars += printf("0x%x" , jitGetILoffs(tree->gtStmt.gtStmtILoffsx)); |
| 10192 | } |
| 10193 | break; |
| 10194 | |
| 10195 | case GT_CLS_VAR: |
| 10196 | case GT_CLS_VAR_ADDR: |
| 10197 | default: |
| 10198 | |
| 10199 | if (tree->OperIsLeaf()) |
| 10200 | { |
| 10201 | chars += printf("<leaf nyi: %s>" , tree->OpName(tree->OperGet())); |
| 10202 | } |
| 10203 | |
| 10204 | chars += printf("t%d" , tree->gtTreeID); |
| 10205 | break; |
| 10206 | } |
| 10207 | |
| 10208 | if (comp->dumpIRTypes) |
| 10209 | { |
| 10210 | chars += cTreeTypeIR(comp, tree); |
| 10211 | } |
| 10212 | if (comp->dumpIRValnums) |
| 10213 | { |
| 10214 | chars += cValNumIR(comp, tree); |
| 10215 | } |
| 10216 | if (hasSsa && comp->dumpIRSsa) |
| 10217 | { |
| 10218 | chars += cSsaNumIR(comp, tree); |
| 10219 | } |
| 10220 | |
| 10221 | return chars; |
| 10222 | } |
| 10223 | |
| 10224 | /***************************************************************************** |
| 10225 | * |
| 10226 | * COMPlus_JitDumpIR support - dump out tree leaf node for linear IR form |
| 10227 | */ |
| 10228 | |
| 10229 | int dLeafIR(GenTree* tree) |
| 10230 | { |
| 10231 | int chars = cLeafIR(JitTls::GetCompiler(), tree); |
| 10232 | |
| 10233 | return chars; |
| 10234 | } |
| 10235 | |
| 10236 | /***************************************************************************** |
| 10237 | * |
| 10238 | * COMPlus_JitDumpIR support - dump out tree indir node for linear IR form |
| 10239 | */ |
| 10240 | |
| 10241 | int cIndirIR(Compiler* comp, GenTree* tree) |
| 10242 | { |
| 10243 | assert(tree->gtOper == GT_IND); |
| 10244 | |
| 10245 | int chars = 0; |
| 10246 | GenTree* child; |
| 10247 | |
| 10248 | chars += printf("[" ); |
| 10249 | child = tree->GetChild(0); |
| 10250 | chars += cLeafIR(comp, child); |
| 10251 | chars += printf("]" ); |
| 10252 | |
| 10253 | return chars; |
| 10254 | } |
| 10255 | |
| 10256 | /***************************************************************************** |
| 10257 | * |
| 10258 | * COMPlus_JitDumpIR support - dump out tree indir node for linear IR form |
| 10259 | */ |
| 10260 | |
| 10261 | int dIndirIR(GenTree* tree) |
| 10262 | { |
| 10263 | int chars = cIndirIR(JitTls::GetCompiler(), tree); |
| 10264 | |
| 10265 | return chars; |
| 10266 | } |
| 10267 | |
| 10268 | /***************************************************************************** |
| 10269 | * |
| 10270 | * COMPlus_JitDumpIR support - dump out tree operand node for linear IR form |
| 10271 | */ |
| 10272 | |
| 10273 | int cOperandIR(Compiler* comp, GenTree* operand) |
| 10274 | { |
| 10275 | int chars = 0; |
| 10276 | |
| 10277 | if (operand == nullptr) |
| 10278 | { |
| 10279 | chars += printf("t?" ); |
| 10280 | return chars; |
| 10281 | } |
| 10282 | |
| 10283 | bool dumpTypes = comp->dumpIRTypes; |
| 10284 | bool dumpValnums = comp->dumpIRValnums; |
| 10285 | bool foldIndirs = comp->dumpIRDataflow; |
| 10286 | bool foldLeafs = comp->dumpIRNoLeafs; |
| 10287 | bool foldCommas = comp->dumpIRDataflow; |
| 10288 | bool dumpDataflow = comp->dumpIRDataflow; |
| 10289 | bool foldLists = comp->dumpIRNoLists; |
| 10290 | bool dumpRegs = comp->dumpIRRegs; |
| 10291 | |
| 10292 | genTreeOps op = operand->OperGet(); |
| 10293 | |
| 10294 | if (foldLeafs && operand->OperIsLeaf()) |
| 10295 | { |
| 10296 | if ((op == GT_ARGPLACE) && foldLists) |
| 10297 | { |
| 10298 | return chars; |
| 10299 | } |
| 10300 | chars += cLeafIR(comp, operand); |
| 10301 | } |
| 10302 | else if (dumpDataflow && (operand->OperIs(GT_ASG) || (op == GT_STORE_LCL_VAR) || (op == GT_STORE_LCL_FLD))) |
| 10303 | { |
| 10304 | operand = operand->GetChild(0); |
| 10305 | chars += cOperandIR(comp, operand); |
| 10306 | } |
| 10307 | else if ((op == GT_INDEX) && foldIndirs) |
| 10308 | { |
| 10309 | chars += printf("[t%d]" , operand->gtTreeID); |
| 10310 | if (dumpTypes) |
| 10311 | { |
| 10312 | chars += cTreeTypeIR(comp, operand); |
| 10313 | } |
| 10314 | if (dumpValnums) |
| 10315 | { |
| 10316 | chars += cValNumIR(comp, operand); |
| 10317 | } |
| 10318 | } |
| 10319 | else if ((op == GT_IND) && foldIndirs) |
| 10320 | { |
| 10321 | chars += cIndirIR(comp, operand); |
| 10322 | if (dumpTypes) |
| 10323 | { |
| 10324 | chars += cTreeTypeIR(comp, operand); |
| 10325 | } |
| 10326 | if (dumpValnums) |
| 10327 | { |
| 10328 | chars += cValNumIR(comp, operand); |
| 10329 | } |
| 10330 | } |
| 10331 | else if ((op == GT_COMMA) && foldCommas) |
| 10332 | { |
| 10333 | operand = operand->GetChild(1); |
| 10334 | chars += cOperandIR(comp, operand); |
| 10335 | } |
| 10336 | else if ((op == GT_LIST) && foldLists) |
| 10337 | { |
| 10338 | GenTree* list = operand; |
| 10339 | unsigned childCount = list->NumChildren(); |
| 10340 | |
| 10341 | operand = list->GetChild(0); |
| 10342 | int operandChars = cOperandIR(comp, operand); |
| 10343 | chars += operandChars; |
| 10344 | if (childCount > 1) |
| 10345 | { |
| 10346 | if (operandChars > 0) |
| 10347 | { |
| 10348 | chars += printf(", " ); |
| 10349 | } |
| 10350 | operand = list->GetChild(1); |
| 10351 | if (operand->gtOper == GT_LIST) |
| 10352 | { |
| 10353 | chars += cListIR(comp, operand); |
| 10354 | } |
| 10355 | else |
| 10356 | { |
| 10357 | chars += cOperandIR(comp, operand); |
| 10358 | } |
| 10359 | } |
| 10360 | } |
| 10361 | else |
| 10362 | { |
| 10363 | chars += printf("t%d" , operand->gtTreeID); |
| 10364 | if (dumpRegs) |
| 10365 | { |
| 10366 | regNumber regNum = operand->GetReg(); |
| 10367 | if (regNum != REG_NA) |
| 10368 | { |
| 10369 | chars += printf("(%s)" , getRegName(regNum)); |
| 10370 | } |
| 10371 | } |
| 10372 | if (dumpTypes) |
| 10373 | { |
| 10374 | chars += cTreeTypeIR(comp, operand); |
| 10375 | } |
| 10376 | if (dumpValnums) |
| 10377 | { |
| 10378 | chars += cValNumIR(comp, operand); |
| 10379 | } |
| 10380 | } |
| 10381 | |
| 10382 | return chars; |
| 10383 | } |
| 10384 | |
| 10385 | /***************************************************************************** |
| 10386 | * |
| 10387 | * COMPlus_JitDumpIR support - dump out tree operand node for linear IR form |
| 10388 | */ |
| 10389 | |
| 10390 | int dOperandIR(GenTree* operand) |
| 10391 | { |
| 10392 | int chars = cOperandIR(JitTls::GetCompiler(), operand); |
| 10393 | |
| 10394 | return chars; |
| 10395 | } |
| 10396 | |
| 10397 | /***************************************************************************** |
| 10398 | * |
| 10399 | * COMPlus_JitDumpIR support - dump out tree list of nodes for linear IR form |
| 10400 | */ |
| 10401 | |
| 10402 | int cListIR(Compiler* comp, GenTree* list) |
| 10403 | { |
| 10404 | int chars = 0; |
| 10405 | int operandChars; |
| 10406 | |
| 10407 | assert(list->gtOper == GT_LIST); |
| 10408 | |
| 10409 | GenTree* child; |
| 10410 | unsigned childCount; |
| 10411 | |
| 10412 | childCount = list->NumChildren(); |
| 10413 | assert(childCount == 1 || childCount == 2); |
| 10414 | |
| 10415 | operandChars = 0; |
| 10416 | for (unsigned childIndex = 0; childIndex < childCount; childIndex++) |
| 10417 | { |
| 10418 | if ((childIndex > 0) && (operandChars > 0)) |
| 10419 | { |
| 10420 | chars += printf(", " ); |
| 10421 | } |
| 10422 | |
| 10423 | child = list->GetChild(childIndex); |
| 10424 | operandChars = cOperandIR(comp, child); |
| 10425 | chars += operandChars; |
| 10426 | } |
| 10427 | |
| 10428 | return chars; |
| 10429 | } |
| 10430 | |
| 10431 | /***************************************************************************** |
| 10432 | * |
| 10433 | * COMPlus_JitDumpIR support - dump out tree list of nodes for linear IR form |
| 10434 | */ |
| 10435 | |
| 10436 | int dListIR(GenTree* list) |
| 10437 | { |
| 10438 | int chars = cListIR(JitTls::GetCompiler(), list); |
| 10439 | |
| 10440 | return chars; |
| 10441 | } |
| 10442 | |
| 10443 | /***************************************************************************** |
| 10444 | * |
| 10445 | * COMPlus_JitDumpIR support - dump out tree dependencies based on comma nodes for linear IR form |
| 10446 | */ |
| 10447 | |
| 10448 | int cDependsIR(Compiler* comp, GenTree* comma, bool* first) |
| 10449 | { |
| 10450 | int chars = 0; |
| 10451 | |
| 10452 | assert(comma->gtOper == GT_COMMA); |
| 10453 | |
| 10454 | GenTree* child; |
| 10455 | |
| 10456 | child = comma->GetChild(0); |
| 10457 | if (child->gtOper == GT_COMMA) |
| 10458 | { |
| 10459 | chars += cDependsIR(comp, child, first); |
| 10460 | } |
| 10461 | else |
| 10462 | { |
| 10463 | if (!(*first)) |
| 10464 | { |
| 10465 | chars += printf(", " ); |
| 10466 | } |
| 10467 | chars += printf("t%d" , child->gtTreeID); |
| 10468 | *first = false; |
| 10469 | } |
| 10470 | |
| 10471 | child = comma->GetChild(1); |
| 10472 | if (child->gtOper == GT_COMMA) |
| 10473 | { |
| 10474 | chars += cDependsIR(comp, child, first); |
| 10475 | } |
| 10476 | |
| 10477 | return chars; |
| 10478 | } |
| 10479 | |
| 10480 | /***************************************************************************** |
| 10481 | * |
| 10482 | * COMPlus_JitDumpIR support - dump out tree dependencies based on comma nodes for linear IR form |
| 10483 | */ |
| 10484 | |
| 10485 | int dDependsIR(GenTree* comma) |
| 10486 | { |
| 10487 | int chars = 0; |
| 10488 | bool first = TRUE; |
| 10489 | |
| 10490 | chars = cDependsIR(JitTls::GetCompiler(), comma, &first); |
| 10491 | |
| 10492 | return chars; |
| 10493 | } |
| 10494 | |
| 10495 | /***************************************************************************** |
| 10496 | * |
| 10497 | * COMPlus_JitDumpIR support - dump out tree node in linear IR form |
| 10498 | */ |
| 10499 | |
| 10500 | void cNodeIR(Compiler* comp, GenTree* tree) |
| 10501 | { |
| 10502 | bool foldLeafs = comp->dumpIRNoLeafs; |
| 10503 | bool foldIndirs = comp->dumpIRDataflow; |
| 10504 | bool foldLists = comp->dumpIRNoLists; |
| 10505 | bool dataflowView = comp->dumpIRDataflow; |
| 10506 | bool dumpTypes = comp->dumpIRTypes; |
| 10507 | bool dumpValnums = comp->dumpIRValnums; |
| 10508 | bool noStmts = comp->dumpIRNoStmts; |
| 10509 | genTreeOps op = tree->OperGet(); |
| 10510 | unsigned childCount = tree->NumChildren(); |
| 10511 | GenTree* child; |
| 10512 | |
| 10513 | // What are we skipping? |
| 10514 | |
| 10515 | if (tree->OperIsLeaf()) |
| 10516 | { |
| 10517 | if (foldLeafs) |
| 10518 | { |
| 10519 | return; |
| 10520 | } |
| 10521 | } |
| 10522 | else if (op == GT_IND) |
| 10523 | { |
| 10524 | if (foldIndirs) |
| 10525 | { |
| 10526 | return; |
| 10527 | } |
| 10528 | } |
| 10529 | else if (op == GT_LIST) |
| 10530 | { |
| 10531 | if (foldLists) |
| 10532 | { |
| 10533 | return; |
| 10534 | } |
| 10535 | } |
| 10536 | else if (op == GT_STMT) |
| 10537 | { |
| 10538 | if (noStmts) |
| 10539 | { |
| 10540 | if (dataflowView) |
| 10541 | { |
| 10542 | child = tree->GetChild(0); |
| 10543 | if (child->gtOper != GT_COMMA) |
| 10544 | { |
| 10545 | return; |
| 10546 | } |
| 10547 | } |
| 10548 | else |
| 10549 | { |
| 10550 | return; |
| 10551 | } |
| 10552 | } |
| 10553 | } |
| 10554 | else if (op == GT_COMMA) |
| 10555 | { |
| 10556 | if (dataflowView) |
| 10557 | { |
| 10558 | return; |
| 10559 | } |
| 10560 | } |
| 10561 | |
| 10562 | bool nodeIsValue = tree->IsValue(); |
| 10563 | |
| 10564 | // Dump tree id or dataflow destination. |
| 10565 | |
| 10566 | int chars = 0; |
| 10567 | |
| 10568 | // if (comp->compRationalIRForm) |
| 10569 | // { |
| 10570 | // chars += printf("R"); |
| 10571 | // } |
| 10572 | |
| 10573 | chars += printf(" " ); |
| 10574 | if (dataflowView && tree->OperIs(GT_ASG)) |
| 10575 | { |
| 10576 | child = tree->GetChild(0); |
| 10577 | chars += cOperandIR(comp, child); |
| 10578 | } |
| 10579 | else if (dataflowView && ((op == GT_STORE_LCL_VAR) || (op == GT_STORE_LCL_FLD))) |
| 10580 | { |
| 10581 | chars += cLeafIR(comp, tree); |
| 10582 | } |
| 10583 | else if (dataflowView && (op == GT_STOREIND)) |
| 10584 | { |
| 10585 | child = tree->GetChild(0); |
| 10586 | chars += printf("[" ); |
| 10587 | chars += cOperandIR(comp, child); |
| 10588 | chars += printf("]" ); |
| 10589 | if (dumpTypes) |
| 10590 | { |
| 10591 | chars += cTreeTypeIR(comp, tree); |
| 10592 | } |
| 10593 | if (dumpValnums) |
| 10594 | { |
| 10595 | chars += cValNumIR(comp, tree); |
| 10596 | } |
| 10597 | } |
| 10598 | else if (nodeIsValue) |
| 10599 | { |
| 10600 | chars += printf("t%d" , tree->gtTreeID); |
| 10601 | if (comp->dumpIRRegs) |
| 10602 | { |
| 10603 | regNumber regNum = tree->GetReg(); |
| 10604 | if (regNum != REG_NA) |
| 10605 | { |
| 10606 | chars += printf("(%s)" , getRegName(regNum)); |
| 10607 | } |
| 10608 | } |
| 10609 | if (dumpTypes) |
| 10610 | { |
| 10611 | chars += cTreeTypeIR(comp, tree); |
| 10612 | } |
| 10613 | if (dumpValnums) |
| 10614 | { |
| 10615 | chars += cValNumIR(comp, tree); |
| 10616 | } |
| 10617 | } |
| 10618 | |
| 10619 | // Dump opcode and tree ID if need in dataflow view. |
| 10620 | |
| 10621 | chars += dTabStopIR(chars, COLUMN_OPCODE); |
| 10622 | const char* opName = tree->OpName(op); |
| 10623 | chars += printf(" %c %s" , nodeIsValue ? '=' : ' ', opName); |
| 10624 | |
| 10625 | if (dataflowView) |
| 10626 | { |
| 10627 | if (tree->OperIs(GT_ASG) || (op == GT_STORE_LCL_VAR) || (op == GT_STORE_LCL_FLD) || (op == GT_STOREIND)) |
| 10628 | { |
| 10629 | chars += printf("(t%d)" , tree->gtTreeID); |
| 10630 | } |
| 10631 | } |
| 10632 | |
| 10633 | // Dump modifiers for opcodes to help with readability |
| 10634 | |
| 10635 | if (op == GT_CALL) |
| 10636 | { |
| 10637 | GenTreeCall* call = tree->AsCall(); |
| 10638 | |
| 10639 | if (call->gtCallType == CT_USER_FUNC) |
| 10640 | { |
| 10641 | if (call->IsVirtualStub()) |
| 10642 | { |
| 10643 | chars += printf(":VS" ); |
| 10644 | } |
| 10645 | else if (call->IsVirtualVtable()) |
| 10646 | { |
| 10647 | chars += printf(":VT" ); |
| 10648 | } |
| 10649 | else if (call->IsVirtual()) |
| 10650 | { |
| 10651 | chars += printf(":V" ); |
| 10652 | } |
| 10653 | } |
| 10654 | else if (call->gtCallType == CT_HELPER) |
| 10655 | { |
| 10656 | chars += printf(":H" ); |
| 10657 | } |
| 10658 | else if (call->gtCallType == CT_INDIRECT) |
| 10659 | { |
| 10660 | chars += printf(":I" ); |
| 10661 | } |
| 10662 | else if (call->IsUnmanaged()) |
| 10663 | { |
| 10664 | chars += printf(":U" ); |
| 10665 | } |
| 10666 | else |
| 10667 | { |
| 10668 | if (call->IsVirtualStub()) |
| 10669 | { |
| 10670 | chars += printf(":XVS" ); |
| 10671 | } |
| 10672 | else if (call->IsVirtualVtable()) |
| 10673 | { |
| 10674 | chars += printf(":XVT" ); |
| 10675 | } |
| 10676 | else |
| 10677 | { |
| 10678 | chars += printf(":?" ); |
| 10679 | } |
| 10680 | } |
| 10681 | |
| 10682 | if (call->IsUnmanaged()) |
| 10683 | { |
| 10684 | if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) |
| 10685 | { |
| 10686 | chars += printf(":T" ); |
| 10687 | } |
| 10688 | } |
| 10689 | |
| 10690 | if (tree->gtFlags & GTF_CALL_NULLCHECK) |
| 10691 | { |
| 10692 | chars += printf(":N" ); |
| 10693 | } |
| 10694 | } |
| 10695 | else if (op == GT_INTRINSIC) |
| 10696 | { |
| 10697 | CorInfoIntrinsics intrin = tree->gtIntrinsic.gtIntrinsicId; |
| 10698 | |
| 10699 | chars += printf(":" ); |
| 10700 | switch (intrin) |
| 10701 | { |
| 10702 | case CORINFO_INTRINSIC_Sin: |
| 10703 | chars += printf("Sin" ); |
| 10704 | break; |
| 10705 | case CORINFO_INTRINSIC_Cos: |
| 10706 | chars += printf("Cos" ); |
| 10707 | break; |
| 10708 | case CORINFO_INTRINSIC_Cbrt: |
| 10709 | chars += printf("Cbrt" ); |
| 10710 | break; |
| 10711 | case CORINFO_INTRINSIC_Sqrt: |
| 10712 | chars += printf("Sqrt" ); |
| 10713 | break; |
| 10714 | case CORINFO_INTRINSIC_Cosh: |
| 10715 | chars += printf("Cosh" ); |
| 10716 | break; |
| 10717 | case CORINFO_INTRINSIC_Sinh: |
| 10718 | chars += printf("Sinh" ); |
| 10719 | break; |
| 10720 | case CORINFO_INTRINSIC_Tan: |
| 10721 | chars += printf("Tan" ); |
| 10722 | break; |
| 10723 | case CORINFO_INTRINSIC_Tanh: |
| 10724 | chars += printf("Tanh" ); |
| 10725 | break; |
| 10726 | case CORINFO_INTRINSIC_Asin: |
| 10727 | chars += printf("Asin" ); |
| 10728 | break; |
| 10729 | case CORINFO_INTRINSIC_Asinh: |
| 10730 | chars += printf("Asinh" ); |
| 10731 | break; |
| 10732 | case CORINFO_INTRINSIC_Acos: |
| 10733 | chars += printf("Acos" ); |
| 10734 | break; |
| 10735 | case CORINFO_INTRINSIC_Acosh: |
| 10736 | chars += printf("Acosh" ); |
| 10737 | break; |
| 10738 | case CORINFO_INTRINSIC_Atan: |
| 10739 | chars += printf("Atan" ); |
| 10740 | break; |
| 10741 | case CORINFO_INTRINSIC_Atan2: |
| 10742 | chars += printf("Atan2" ); |
| 10743 | break; |
| 10744 | case CORINFO_INTRINSIC_Atanh: |
| 10745 | chars += printf("Atanh" ); |
| 10746 | break; |
| 10747 | case CORINFO_INTRINSIC_Log10: |
| 10748 | chars += printf("Log10" ); |
| 10749 | break; |
| 10750 | case CORINFO_INTRINSIC_Pow: |
| 10751 | chars += printf("Pow" ); |
| 10752 | break; |
| 10753 | case CORINFO_INTRINSIC_Exp: |
| 10754 | chars += printf("Exp" ); |
| 10755 | break; |
| 10756 | case CORINFO_INTRINSIC_Ceiling: |
| 10757 | chars += printf("Ceiling" ); |
| 10758 | break; |
| 10759 | case CORINFO_INTRINSIC_Floor: |
| 10760 | chars += printf("Floor" ); |
| 10761 | break; |
| 10762 | default: |
| 10763 | chars += printf("unknown(%d)" , intrin); |
| 10764 | break; |
| 10765 | } |
| 10766 | } |
| 10767 | |
| 10768 | // Dump operands. |
| 10769 | |
| 10770 | chars += dTabStopIR(chars, COLUMN_OPERANDS); |
| 10771 | |
| 10772 | // Dump operator specific fields as operands |
| 10773 | |
| 10774 | switch (op) |
| 10775 | { |
| 10776 | default: |
| 10777 | break; |
| 10778 | case GT_FIELD: |
| 10779 | |
| 10780 | { |
| 10781 | const char* className = nullptr; |
| 10782 | const char* fieldName = comp->eeGetFieldName(tree->gtField.gtFldHnd, &className); |
| 10783 | |
| 10784 | chars += printf(" %s.%s" , className, fieldName); |
| 10785 | } |
| 10786 | break; |
| 10787 | |
| 10788 | case GT_CALL: |
| 10789 | |
| 10790 | if (tree->gtCall.gtCallType != CT_INDIRECT) |
| 10791 | { |
| 10792 | const char* methodName; |
| 10793 | const char* className; |
| 10794 | |
| 10795 | methodName = comp->eeGetMethodName(tree->gtCall.gtCallMethHnd, &className); |
| 10796 | |
| 10797 | chars += printf(" %s.%s" , className, methodName); |
| 10798 | } |
| 10799 | break; |
| 10800 | |
| 10801 | case GT_STORE_LCL_VAR: |
| 10802 | case GT_STORE_LCL_FLD: |
| 10803 | |
| 10804 | if (!dataflowView) |
| 10805 | { |
| 10806 | chars += printf(" " ); |
| 10807 | chars += cLeafIR(comp, tree); |
| 10808 | } |
| 10809 | break; |
| 10810 | |
| 10811 | case GT_LEA: |
| 10812 | |
| 10813 | GenTreeAddrMode* lea = tree->AsAddrMode(); |
| 10814 | GenTree* base = lea->Base(); |
| 10815 | GenTree* index = lea->Index(); |
| 10816 | unsigned scale = lea->gtScale; |
| 10817 | int offset = lea->Offset(); |
| 10818 | |
| 10819 | chars += printf(" [" ); |
| 10820 | if (base != nullptr) |
| 10821 | { |
| 10822 | chars += cOperandIR(comp, base); |
| 10823 | } |
| 10824 | if (index != nullptr) |
| 10825 | { |
| 10826 | if (base != nullptr) |
| 10827 | { |
| 10828 | chars += printf("+" ); |
| 10829 | } |
| 10830 | chars += cOperandIR(comp, index); |
| 10831 | if (scale > 1) |
| 10832 | { |
| 10833 | chars += printf("*%u" , scale); |
| 10834 | } |
| 10835 | } |
| 10836 | if ((offset != 0) || ((base == nullptr) && (index == nullptr))) |
| 10837 | { |
| 10838 | if ((base != nullptr) || (index != nullptr)) |
| 10839 | { |
| 10840 | chars += printf("+" ); |
| 10841 | } |
| 10842 | chars += printf("%d" , offset); |
| 10843 | } |
| 10844 | chars += printf("]" ); |
| 10845 | break; |
| 10846 | } |
| 10847 | |
| 10848 | // Dump operands. |
| 10849 | |
| 10850 | if (tree->OperIsLeaf()) |
| 10851 | { |
| 10852 | chars += printf(" " ); |
| 10853 | chars += cLeafIR(comp, tree); |
| 10854 | } |
| 10855 | else if (op == GT_LEA) |
| 10856 | { |
| 10857 | // Already dumped it above. |
| 10858 | } |
| 10859 | else if (op == GT_PHI) |
| 10860 | { |
| 10861 | if (tree->gtOp.gtOp1 != nullptr) |
| 10862 | { |
| 10863 | bool first = true; |
| 10864 | for (GenTreeArgList* args = tree->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest()) |
| 10865 | { |
| 10866 | child = args->Current(); |
| 10867 | if (!first) |
| 10868 | { |
| 10869 | chars += printf("," ); |
| 10870 | } |
| 10871 | first = false; |
| 10872 | chars += printf(" " ); |
| 10873 | chars += cOperandIR(comp, child); |
| 10874 | } |
| 10875 | } |
| 10876 | } |
| 10877 | else |
| 10878 | { |
| 10879 | bool hasComma = false; |
| 10880 | bool first = true; |
| 10881 | int operandChars = 0; |
| 10882 | for (unsigned childIndex = 0; childIndex < childCount; childIndex++) |
| 10883 | { |
| 10884 | child = tree->GetChild(childIndex); |
| 10885 | if (child == nullptr) |
| 10886 | { |
| 10887 | continue; |
| 10888 | } |
| 10889 | |
| 10890 | if (child->gtOper == GT_COMMA) |
| 10891 | { |
| 10892 | hasComma = true; |
| 10893 | } |
| 10894 | |
| 10895 | if (dataflowView && (childIndex == 0)) |
| 10896 | { |
| 10897 | if ((op == GT_ASG) || (op == GT_STOREIND)) |
| 10898 | { |
| 10899 | continue; |
| 10900 | } |
| 10901 | } |
| 10902 | |
| 10903 | if (!first) |
| 10904 | { |
| 10905 | chars += printf("," ); |
| 10906 | } |
| 10907 | |
| 10908 | bool isList = (child->gtOper == GT_LIST); |
| 10909 | if (!isList || !foldLists) |
| 10910 | { |
| 10911 | if (foldLeafs && (child->gtOper == GT_ARGPLACE)) |
| 10912 | { |
| 10913 | continue; |
| 10914 | } |
| 10915 | chars += printf(" " ); |
| 10916 | operandChars = cOperandIR(comp, child); |
| 10917 | chars += operandChars; |
| 10918 | if (operandChars > 0) |
| 10919 | { |
| 10920 | first = false; |
| 10921 | } |
| 10922 | } |
| 10923 | else |
| 10924 | { |
| 10925 | assert(isList); |
| 10926 | chars += printf(" " ); |
| 10927 | operandChars = cOperandIR(comp, child); |
| 10928 | chars += operandChars; |
| 10929 | if (operandChars > 0) |
| 10930 | { |
| 10931 | first = false; |
| 10932 | } |
| 10933 | } |
| 10934 | } |
| 10935 | |
| 10936 | if (dataflowView && hasComma) |
| 10937 | { |
| 10938 | chars += printf(", DEPS(" ); |
| 10939 | first = true; |
| 10940 | for (unsigned childIndex = 0; childIndex < childCount; childIndex++) |
| 10941 | { |
| 10942 | child = tree->GetChild(childIndex); |
| 10943 | if (child->gtOper == GT_COMMA) |
| 10944 | { |
| 10945 | chars += cDependsIR(comp, child, &first); |
| 10946 | } |
| 10947 | } |
| 10948 | chars += printf(")" ); |
| 10949 | } |
| 10950 | } |
| 10951 | |
| 10952 | // Dump kinds, flags, costs |
| 10953 | |
| 10954 | if (comp->dumpIRKinds || comp->dumpIRFlags || comp->dumpIRCosts) |
| 10955 | { |
| 10956 | chars += dTabStopIR(chars, COLUMN_KINDS); |
| 10957 | chars += printf(";" ); |
| 10958 | if (comp->dumpIRKinds) |
| 10959 | { |
| 10960 | chars += printf(" " ); |
| 10961 | chars += cTreeKindsIR(comp, tree); |
| 10962 | } |
| 10963 | if (comp->dumpIRFlags && (tree->gtFlags != 0)) |
| 10964 | { |
| 10965 | if (comp->dumpIRKinds) |
| 10966 | { |
| 10967 | chars += dTabStopIR(chars, COLUMN_FLAGS); |
| 10968 | } |
| 10969 | else |
| 10970 | { |
| 10971 | chars += printf(" " ); |
| 10972 | } |
| 10973 | chars += cTreeFlagsIR(comp, tree); |
| 10974 | } |
| 10975 | if (comp->dumpIRCosts && (tree->gtCostsInitialized)) |
| 10976 | { |
| 10977 | chars += printf(" CostEx=%d, CostSz=%d" , tree->GetCostEx(), tree->GetCostSz()); |
| 10978 | } |
| 10979 | } |
| 10980 | |
| 10981 | printf("\n" ); |
| 10982 | } |
| 10983 | |
| 10984 | /***************************************************************************** |
| 10985 | * |
| 10986 | * COMPlus_JitDumpIR support - dump out tree in linear IR form |
| 10987 | */ |
| 10988 | |
| 10989 | void cTreeIR(Compiler* comp, GenTree* tree) |
| 10990 | { |
| 10991 | bool foldLeafs = comp->dumpIRNoLeafs; |
| 10992 | bool foldIndirs = comp->dumpIRDataflow; |
| 10993 | bool foldLists = comp->dumpIRNoLists; |
| 10994 | bool dataflowView = comp->dumpIRDataflow; |
| 10995 | bool dumpTypes = comp->dumpIRTypes; |
| 10996 | bool dumpValnums = comp->dumpIRValnums; |
| 10997 | bool noStmts = comp->dumpIRNoStmts; |
| 10998 | genTreeOps op = tree->OperGet(); |
| 10999 | unsigned childCount = tree->NumChildren(); |
| 11000 | GenTree* child; |
| 11001 | |
| 11002 | // Recurse and dump trees that this node depends on. |
| 11003 | |
| 11004 | if (tree->OperIsLeaf()) |
| 11005 | { |
| 11006 | } |
| 11007 | else if (tree->OperIsBinary() && tree->IsReverseOp()) |
| 11008 | { |
| 11009 | child = tree->GetChild(1); |
| 11010 | cTreeIR(comp, child); |
| 11011 | child = tree->GetChild(0); |
| 11012 | cTreeIR(comp, child); |
| 11013 | } |
| 11014 | else if (op == GT_PHI) |
| 11015 | { |
| 11016 | // Don't recurse. |
| 11017 | } |
| 11018 | else |
| 11019 | { |
| 11020 | assert(!tree->IsReverseOp()); |
| 11021 | for (unsigned childIndex = 0; childIndex < childCount; childIndex++) |
| 11022 | { |
| 11023 | child = tree->GetChild(childIndex); |
| 11024 | if (child != nullptr) |
| 11025 | { |
| 11026 | cTreeIR(comp, child); |
| 11027 | } |
| 11028 | } |
| 11029 | } |
| 11030 | |
| 11031 | cNodeIR(comp, tree); |
| 11032 | } |
| 11033 | |
| 11034 | /***************************************************************************** |
| 11035 | * |
| 11036 | * COMPlus_JitDumpIR support - dump out tree in linear IR form |
| 11037 | */ |
| 11038 | |
| 11039 | void dTreeIR(GenTree* tree) |
| 11040 | { |
| 11041 | cTreeIR(JitTls::GetCompiler(), tree); |
| 11042 | } |
| 11043 | |
| 11044 | #endif // DEBUG |
| 11045 | |
| 11046 | #if VARSET_COUNTOPS |
| 11047 | // static |
| 11048 | BitSetSupport::BitSetOpCounter Compiler::m_varsetOpCounter("VarSetOpCounts.log" ); |
| 11049 | #endif |
| 11050 | #if ALLVARSET_COUNTOPS |
| 11051 | // static |
| 11052 | BitSetSupport::BitSetOpCounter Compiler::m_allvarsetOpCounter("AllVarSetOpCounts.log" ); |
| 11053 | #endif |
| 11054 | |
| 11055 | // static |
| 11056 | HelperCallProperties Compiler::s_helperCallProperties; |
| 11057 | |
| 11058 | /*****************************************************************************/ |
| 11059 | /*****************************************************************************/ |
| 11060 | |
| 11061 | //------------------------------------------------------------------------ |
| 11062 | // killGCRefs: |
| 11063 | // Given some tree node return does it need all GC refs to be spilled from |
| 11064 | // callee save registers. |
| 11065 | // |
| 11066 | // Arguments: |
| 11067 | // tree - the tree for which we ask about gc refs. |
| 11068 | // |
| 11069 | // Return Value: |
| 11070 | // true - tree kills GC refs on callee save registers |
| 11071 | // false - tree doesn't affect GC refs on callee save registers |
| 11072 | bool Compiler::killGCRefs(GenTree* tree) |
| 11073 | { |
| 11074 | if (tree->IsCall()) |
| 11075 | { |
| 11076 | GenTreeCall* call = tree->AsCall(); |
| 11077 | if (call->IsUnmanaged()) |
| 11078 | { |
| 11079 | return true; |
| 11080 | } |
| 11081 | |
| 11082 | if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_JIT_PINVOKE_BEGIN)) |
| 11083 | { |
| 11084 | assert(opts.ShouldUsePInvokeHelpers()); |
| 11085 | return true; |
| 11086 | } |
| 11087 | } |
| 11088 | return false; |
| 11089 | } |
| 11090 | |