| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | |
| 5 | /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 6 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 7 | XX XX |
| 8 | XX FlowGraph XX |
| 9 | XX XX |
| 10 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 11 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
| 12 | */ |
| 13 | |
| 14 | #include "jitpch.h" |
| 15 | #ifdef _MSC_VER |
| 16 | #pragma hdrstop |
| 17 | #endif |
| 18 | |
| 19 | #include "allocacheck.h" // for alloca |
| 20 | #include "lower.h" // for LowerRange() |
| 21 | |
| 22 | /*****************************************************************************/ |
| 23 | |
| 24 | void Compiler::fgInit() |
| 25 | { |
| 26 | impInit(); |
| 27 | |
| 28 | /* Initialization for fgWalkTreePre() and fgWalkTreePost() */ |
| 29 | |
| 30 | fgFirstBBScratch = nullptr; |
| 31 | |
| 32 | #ifdef DEBUG |
| 33 | fgPrintInlinedMethods = JitConfig.JitPrintInlinedMethods() == 1; |
| 34 | #endif // DEBUG |
| 35 | |
| 36 | /* We haven't yet computed the bbPreds lists */ |
| 37 | fgComputePredsDone = false; |
| 38 | |
| 39 | /* We haven't yet computed the bbCheapPreds lists */ |
| 40 | fgCheapPredsValid = false; |
| 41 | |
| 42 | /* We haven't yet computed the edge weight */ |
| 43 | fgEdgeWeightsComputed = false; |
| 44 | fgHaveValidEdgeWeights = false; |
| 45 | fgSlopUsedInEdgeWeights = false; |
| 46 | fgRangeUsedInEdgeWeights = true; |
| 47 | fgNeedsUpdateFlowGraph = false; |
| 48 | fgCalledCount = BB_ZERO_WEIGHT; |
| 49 | |
| 50 | /* We haven't yet computed the dominator sets */ |
| 51 | fgDomsComputed = false; |
| 52 | |
| 53 | #ifdef DEBUG |
| 54 | fgReachabilitySetsValid = false; |
| 55 | #endif // DEBUG |
| 56 | |
| 57 | /* We don't know yet which loops will always execute calls */ |
| 58 | fgLoopCallMarked = false; |
| 59 | |
| 60 | /* We haven't created GC Poll blocks yet. */ |
| 61 | fgGCPollsCreated = false; |
| 62 | |
| 63 | /* Initialize the basic block list */ |
| 64 | |
| 65 | fgFirstBB = nullptr; |
| 66 | fgLastBB = nullptr; |
| 67 | fgFirstColdBlock = nullptr; |
| 68 | |
| 69 | #if FEATURE_EH_FUNCLETS |
| 70 | fgFirstFuncletBB = nullptr; |
| 71 | fgFuncletsCreated = false; |
| 72 | #endif // FEATURE_EH_FUNCLETS |
| 73 | |
| 74 | fgBBcount = 0; |
| 75 | |
| 76 | #ifdef DEBUG |
| 77 | fgBBcountAtCodegen = 0; |
| 78 | #endif // DEBUG |
| 79 | |
| 80 | fgBBNumMax = 0; |
| 81 | fgEdgeCount = 0; |
| 82 | fgDomBBcount = 0; |
| 83 | fgBBVarSetsInited = false; |
| 84 | fgReturnCount = 0; |
| 85 | |
| 86 | // Initialize BlockSet data. |
| 87 | fgCurBBEpoch = 0; |
| 88 | fgCurBBEpochSize = 0; |
| 89 | fgBBSetCountInSizeTUnits = 0; |
| 90 | |
| 91 | genReturnBB = nullptr; |
| 92 | |
| 93 | /* We haven't reached the global morphing phase */ |
| 94 | fgGlobalMorph = false; |
| 95 | fgModified = false; |
| 96 | |
| 97 | #ifdef DEBUG |
| 98 | fgSafeBasicBlockCreation = true; |
| 99 | #endif // DEBUG |
| 100 | |
| 101 | fgLocalVarLivenessDone = false; |
| 102 | |
| 103 | /* Statement list is not threaded yet */ |
| 104 | |
| 105 | fgStmtListThreaded = false; |
| 106 | |
| 107 | // Initialize the logic for adding code. This is used to insert code such |
| 108 | // as the code that raises an exception when an array range check fails. |
| 109 | |
| 110 | fgAddCodeList = nullptr; |
| 111 | fgAddCodeModf = false; |
| 112 | |
| 113 | for (int i = 0; i < SCK_COUNT; i++) |
| 114 | { |
| 115 | fgExcptnTargetCache[i] = nullptr; |
| 116 | } |
| 117 | |
| 118 | /* Keep track of the max count of pointer arguments */ |
| 119 | fgPtrArgCntMax = 0; |
| 120 | |
| 121 | /* This global flag is set whenever we remove a statement */ |
| 122 | fgStmtRemoved = false; |
| 123 | |
| 124 | /* This global flag is set whenever we add a throw block for a RngChk */ |
| 125 | fgRngChkThrowAdded = false; /* reset flag for fgIsCodeAdded() */ |
| 126 | |
| 127 | /* We will record a list of all BBJ_RETURN blocks here */ |
| 128 | fgReturnBlocks = nullptr; |
| 129 | |
| 130 | /* This is set by fgComputeReachability */ |
| 131 | fgEnterBlks = BlockSetOps::UninitVal(); |
| 132 | |
| 133 | #ifdef DEBUG |
| 134 | fgEnterBlksSetValid = false; |
| 135 | #endif // DEBUG |
| 136 | |
| 137 | #if !FEATURE_EH_FUNCLETS |
| 138 | ehMaxHndNestingCount = 0; |
| 139 | #endif // !FEATURE_EH_FUNCLETS |
| 140 | |
| 141 | /* Init the fgBigOffsetMorphingTemps to be BAD_VAR_NUM. */ |
| 142 | for (int i = 0; i < TYP_COUNT; i++) |
| 143 | { |
| 144 | fgBigOffsetMorphingTemps[i] = BAD_VAR_NUM; |
| 145 | } |
| 146 | |
| 147 | fgNoStructPromotion = false; |
| 148 | fgNoStructParamPromotion = false; |
| 149 | |
| 150 | optValnumCSE_phase = false; // referenced in fgMorphSmpOp() |
| 151 | |
| 152 | #ifdef DEBUG |
| 153 | fgNormalizeEHDone = false; |
| 154 | #endif // DEBUG |
| 155 | |
| 156 | #ifdef DEBUG |
| 157 | if (!compIsForInlining()) |
| 158 | { |
| 159 | if ((JitConfig.JitNoStructPromotion() & 1) == 1) |
| 160 | { |
| 161 | fgNoStructPromotion = true; |
| 162 | } |
| 163 | if ((JitConfig.JitNoStructPromotion() & 2) == 2) |
| 164 | { |
| 165 | fgNoStructParamPromotion = true; |
| 166 | } |
| 167 | } |
| 168 | #endif // DEBUG |
| 169 | |
| 170 | if (!compIsForInlining()) |
| 171 | { |
| 172 | m_promotedStructDeathVars = nullptr; |
| 173 | } |
| 174 | #ifdef FEATURE_SIMD |
| 175 | fgPreviousCandidateSIMDFieldAsgStmt = nullptr; |
| 176 | #endif |
| 177 | } |
| 178 | |
| 179 | bool Compiler::fgHaveProfileData() |
| 180 | { |
| 181 | if (compIsForInlining() || compIsForImportOnly()) |
| 182 | { |
| 183 | return false; |
| 184 | } |
| 185 | |
| 186 | return (fgProfileBuffer != nullptr); |
| 187 | } |
| 188 | |
| 189 | bool Compiler::fgGetProfileWeightForBasicBlock(IL_OFFSET offset, unsigned* weightWB) |
| 190 | { |
| 191 | noway_assert(weightWB != nullptr); |
| 192 | unsigned weight = 0; |
| 193 | |
| 194 | #ifdef DEBUG |
| 195 | unsigned hashSeed = fgStressBBProf(); |
| 196 | if (hashSeed != 0) |
| 197 | { |
| 198 | unsigned hash = (info.compMethodHash() * hashSeed) ^ (offset * 1027); |
| 199 | |
| 200 | // We need to especially stress the procedure splitting codepath. Therefore |
| 201 | // one third the time we should return a weight of zero. |
| 202 | // Otherwise we should return some random weight (usually between 0 and 288). |
| 203 | // The below gives a weight of zero, 44% of the time |
| 204 | |
| 205 | if (hash % 3 == 0) |
| 206 | { |
| 207 | weight = 0; |
| 208 | } |
| 209 | else if (hash % 11 == 0) |
| 210 | { |
| 211 | weight = (hash % 23) * (hash % 29) * (hash % 31); |
| 212 | } |
| 213 | else |
| 214 | { |
| 215 | weight = (hash % 17) * (hash % 19); |
| 216 | } |
| 217 | |
| 218 | // The first block is never given a weight of zero |
| 219 | if ((offset == 0) && (weight == 0)) |
| 220 | { |
| 221 | weight = 1 + (hash % 5); |
| 222 | } |
| 223 | |
| 224 | *weightWB = weight; |
| 225 | return true; |
| 226 | } |
| 227 | #endif // DEBUG |
| 228 | |
| 229 | if (fgHaveProfileData() == false) |
| 230 | { |
| 231 | return false; |
| 232 | } |
| 233 | |
| 234 | noway_assert(!compIsForInlining()); |
| 235 | for (unsigned i = 0; i < fgProfileBufferCount; i++) |
| 236 | { |
| 237 | if (fgProfileBuffer[i].ILOffset == offset) |
| 238 | { |
| 239 | weight = fgProfileBuffer[i].ExecutionCount; |
| 240 | |
| 241 | *weightWB = weight; |
| 242 | return true; |
| 243 | } |
| 244 | } |
| 245 | |
| 246 | *weightWB = 0; |
| 247 | return true; |
| 248 | } |
| 249 | |
| 250 | void Compiler::fgInstrumentMethod() |
| 251 | { |
| 252 | noway_assert(!compIsForInlining()); |
| 253 | |
| 254 | // Count the number of basic blocks in the method |
| 255 | |
| 256 | int countOfBlocks = 0; |
| 257 | BasicBlock* block; |
| 258 | for (block = fgFirstBB; (block != nullptr); block = block->bbNext) |
| 259 | { |
| 260 | if (!(block->bbFlags & BBF_IMPORTED) || (block->bbFlags & BBF_INTERNAL)) |
| 261 | { |
| 262 | continue; |
| 263 | } |
| 264 | countOfBlocks++; |
| 265 | } |
| 266 | |
| 267 | // Allocate the profile buffer |
| 268 | |
| 269 | ICorJitInfo::ProfileBuffer* bbProfileBufferStart; |
| 270 | |
| 271 | HRESULT res = info.compCompHnd->allocBBProfileBuffer(countOfBlocks, &bbProfileBufferStart); |
| 272 | |
| 273 | GenTree* stmt; |
| 274 | |
| 275 | if (!SUCCEEDED(res)) |
| 276 | { |
| 277 | // The E_NOTIMPL status is returned when we are profiling a generic method from a different assembly |
| 278 | if (res == E_NOTIMPL) |
| 279 | { |
| 280 | // In such cases we still want to add the method entry callback node |
| 281 | |
| 282 | GenTreeArgList* args = gtNewArgList(gtNewIconEmbMethHndNode(info.compMethodHnd)); |
| 283 | GenTree* call = gtNewHelperCallNode(CORINFO_HELP_BBT_FCN_ENTER, TYP_VOID, args); |
| 284 | |
| 285 | stmt = gtNewStmt(call); |
| 286 | } |
| 287 | else |
| 288 | { |
| 289 | noway_assert(!"Error: failed to allocate bbProfileBuffer" ); |
| 290 | return; |
| 291 | } |
| 292 | } |
| 293 | else |
| 294 | { |
| 295 | // For each BasicBlock (non-Internal) |
| 296 | // 1. Assign the blocks bbCodeOffs to the ILOffset field of this blocks profile data. |
| 297 | // 2. Add an operation that increments the ExecutionCount field at the beginning of the block. |
| 298 | |
| 299 | // Each (non-Internal) block has it own ProfileBuffer tuple [ILOffset, ExecutionCount] |
| 300 | // To start we initialize our current one with the first one that we allocated |
| 301 | // |
| 302 | ICorJitInfo::ProfileBuffer* bbCurrentBlockProfileBuffer = bbProfileBufferStart; |
| 303 | |
| 304 | for (block = fgFirstBB; (block != nullptr); block = block->bbNext) |
| 305 | { |
| 306 | if (!(block->bbFlags & BBF_IMPORTED) || (block->bbFlags & BBF_INTERNAL)) |
| 307 | { |
| 308 | continue; |
| 309 | } |
| 310 | |
| 311 | // Assign the current block's IL offset into the profile data |
| 312 | bbCurrentBlockProfileBuffer->ILOffset = block->bbCodeOffs; |
| 313 | assert(bbCurrentBlockProfileBuffer->ExecutionCount == 0); // This value should already be zero-ed out |
| 314 | |
| 315 | size_t addrOfCurrentExecutionCount = (size_t)&bbCurrentBlockProfileBuffer->ExecutionCount; |
| 316 | |
| 317 | // Read Basic-Block count value |
| 318 | GenTree* valueNode = |
| 319 | gtNewIndOfIconHandleNode(TYP_INT, addrOfCurrentExecutionCount, GTF_ICON_BBC_PTR, false); |
| 320 | |
| 321 | // Increment value by 1 |
| 322 | GenTree* rhsNode = gtNewOperNode(GT_ADD, TYP_INT, valueNode, gtNewIconNode(1)); |
| 323 | |
| 324 | // Write new Basic-Block count value |
| 325 | GenTree* lhsNode = gtNewIndOfIconHandleNode(TYP_INT, addrOfCurrentExecutionCount, GTF_ICON_BBC_PTR, false); |
| 326 | GenTree* asgNode = gtNewAssignNode(lhsNode, rhsNode); |
| 327 | |
| 328 | fgInsertStmtAtBeg(block, asgNode); |
| 329 | |
| 330 | // Advance to the next ProfileBuffer tuple [ILOffset, ExecutionCount] |
| 331 | bbCurrentBlockProfileBuffer++; |
| 332 | |
| 333 | // One less block |
| 334 | countOfBlocks--; |
| 335 | } |
| 336 | // Check that we allocated and initialized the same number of ProfileBuffer tuples |
| 337 | noway_assert(countOfBlocks == 0); |
| 338 | |
| 339 | // Add the method entry callback node |
| 340 | |
| 341 | GenTree* arg; |
| 342 | |
| 343 | #ifdef FEATURE_READYTORUN_COMPILER |
| 344 | if (opts.IsReadyToRun()) |
| 345 | { |
| 346 | mdMethodDef currentMethodToken = info.compCompHnd->getMethodDefFromMethod(info.compMethodHnd); |
| 347 | |
| 348 | CORINFO_RESOLVED_TOKEN resolvedToken; |
| 349 | resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd); |
| 350 | resolvedToken.tokenScope = info.compScopeHnd; |
| 351 | resolvedToken.token = currentMethodToken; |
| 352 | resolvedToken.tokenType = CORINFO_TOKENKIND_Method; |
| 353 | |
| 354 | info.compCompHnd->resolveToken(&resolvedToken); |
| 355 | |
| 356 | arg = impTokenToHandle(&resolvedToken); |
| 357 | } |
| 358 | else |
| 359 | #endif |
| 360 | { |
| 361 | arg = gtNewIconEmbMethHndNode(info.compMethodHnd); |
| 362 | } |
| 363 | |
| 364 | GenTreeArgList* args = gtNewArgList(arg); |
| 365 | GenTree* call = gtNewHelperCallNode(CORINFO_HELP_BBT_FCN_ENTER, TYP_VOID, args); |
| 366 | |
| 367 | // Get the address of the first blocks ExecutionCount |
| 368 | size_t addrOfFirstExecutionCount = (size_t)&bbProfileBufferStart->ExecutionCount; |
| 369 | |
| 370 | // Read Basic-Block count value |
| 371 | GenTree* valueNode = gtNewIndOfIconHandleNode(TYP_INT, addrOfFirstExecutionCount, GTF_ICON_BBC_PTR, false); |
| 372 | |
| 373 | // Compare Basic-Block count value against zero |
| 374 | GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, valueNode, gtNewIconNode(0, TYP_INT)); |
| 375 | GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), call); |
| 376 | GenTree* cond = gtNewQmarkNode(TYP_VOID, relop, colon); |
| 377 | stmt = gtNewStmt(cond); |
| 378 | } |
| 379 | |
| 380 | fgEnsureFirstBBisScratch(); |
| 381 | |
| 382 | fgInsertStmtAtEnd(fgFirstBB, stmt); |
| 383 | } |
| 384 | |
| 385 | /***************************************************************************** |
| 386 | * |
| 387 | * Create a basic block and append it to the current BB list. |
| 388 | */ |
| 389 | |
| 390 | BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) |
| 391 | { |
| 392 | // This method must not be called after the exception table has been |
| 393 | // constructed, because it doesn't not provide support for patching |
| 394 | // the exception table. |
| 395 | |
| 396 | noway_assert(compHndBBtabCount == 0); |
| 397 | |
| 398 | BasicBlock* block; |
| 399 | |
| 400 | /* Allocate the block descriptor */ |
| 401 | |
| 402 | block = bbNewBasicBlock(jumpKind); |
| 403 | noway_assert(block->bbJumpKind == jumpKind); |
| 404 | |
| 405 | /* Append the block to the end of the global basic block list */ |
| 406 | |
| 407 | if (fgFirstBB) |
| 408 | { |
| 409 | fgLastBB->setNext(block); |
| 410 | } |
| 411 | else |
| 412 | { |
| 413 | fgFirstBB = block; |
| 414 | block->bbPrev = nullptr; |
| 415 | } |
| 416 | |
| 417 | fgLastBB = block; |
| 418 | |
| 419 | return block; |
| 420 | } |
| 421 | |
| 422 | /***************************************************************************** |
| 423 | * |
| 424 | * Ensures that fgFirstBB is a scratch BasicBlock that we have added. |
| 425 | * This can be used to add initialization code (without worrying |
| 426 | * about other blocks jumping to it). |
| 427 | * |
| 428 | * Callers have to be careful that they do not mess up the order of things |
| 429 | * added to fgEnsureFirstBBisScratch in a way as to change semantics. |
| 430 | */ |
| 431 | |
| 432 | void Compiler::fgEnsureFirstBBisScratch() |
| 433 | { |
| 434 | // This method does not update predecessor lists and so must only be called before they are computed. |
| 435 | assert(!fgComputePredsDone); |
| 436 | |
| 437 | // Have we already allocated a scratch block? |
| 438 | |
| 439 | if (fgFirstBBisScratch()) |
| 440 | { |
| 441 | return; |
| 442 | } |
| 443 | |
| 444 | assert(fgFirstBBScratch == nullptr); |
| 445 | |
| 446 | BasicBlock* block = bbNewBasicBlock(BBJ_NONE); |
| 447 | |
| 448 | if (fgFirstBB != nullptr) |
| 449 | { |
| 450 | // If we have profile data the new block will inherit fgFirstBlock's weight |
| 451 | if (fgFirstBB->hasProfileWeight()) |
| 452 | { |
| 453 | block->inheritWeight(fgFirstBB); |
| 454 | } |
| 455 | |
| 456 | fgInsertBBbefore(fgFirstBB, block); |
| 457 | } |
| 458 | else |
| 459 | { |
| 460 | noway_assert(fgLastBB == nullptr); |
| 461 | fgFirstBB = block; |
| 462 | fgLastBB = block; |
| 463 | } |
| 464 | |
| 465 | noway_assert(fgLastBB != nullptr); |
| 466 | |
| 467 | block->bbFlags |= (BBF_INTERNAL | BBF_IMPORTED); |
| 468 | |
| 469 | fgFirstBBScratch = fgFirstBB; |
| 470 | |
| 471 | #ifdef DEBUG |
| 472 | if (verbose) |
| 473 | { |
| 474 | printf("New scratch " FMT_BB "\n" , block->bbNum); |
| 475 | } |
| 476 | #endif |
| 477 | } |
| 478 | |
| 479 | bool Compiler::fgFirstBBisScratch() |
| 480 | { |
| 481 | if (fgFirstBBScratch != nullptr) |
| 482 | { |
| 483 | assert(fgFirstBBScratch == fgFirstBB); |
| 484 | assert(fgFirstBBScratch->bbFlags & BBF_INTERNAL); |
| 485 | assert(fgFirstBBScratch->countOfInEdges() == 1); |
| 486 | |
| 487 | // Normally, the first scratch block is a fall-through block. However, if the block after it was an empty |
| 488 | // BBJ_ALWAYS block, it might get removed, and the code that removes it will make the first scratch block |
| 489 | // a BBJ_ALWAYS block. |
| 490 | assert((fgFirstBBScratch->bbJumpKind == BBJ_NONE) || (fgFirstBBScratch->bbJumpKind == BBJ_ALWAYS)); |
| 491 | |
| 492 | return true; |
| 493 | } |
| 494 | else |
| 495 | { |
| 496 | return false; |
| 497 | } |
| 498 | } |
| 499 | |
| 500 | bool Compiler::fgBBisScratch(BasicBlock* block) |
| 501 | { |
| 502 | return fgFirstBBisScratch() && (block == fgFirstBB); |
| 503 | } |
| 504 | |
| 505 | #ifdef DEBUG |
| 506 | // Check to see if block contains a statement but don't spend more than a certain |
| 507 | // budget doing this per method compiled. |
| 508 | // If the budget is exceeded, return 'answerOnBoundExceeded' as the answer. |
| 509 | /* static */ |
| 510 | bool Compiler::fgBlockContainsStatementBounded(BasicBlock* block, GenTree* stmt, bool answerOnBoundExceeded /*= true*/) |
| 511 | { |
| 512 | const __int64 maxLinks = 1000000000; |
| 513 | |
| 514 | assert(stmt->gtOper == GT_STMT); |
| 515 | |
| 516 | __int64* numTraversed = &JitTls::GetCompiler()->compNumStatementLinksTraversed; |
| 517 | |
| 518 | if (*numTraversed > maxLinks) |
| 519 | { |
| 520 | return answerOnBoundExceeded; |
| 521 | } |
| 522 | |
| 523 | GenTree* curr = block->firstStmt(); |
| 524 | do |
| 525 | { |
| 526 | (*numTraversed)++; |
| 527 | if (curr == stmt) |
| 528 | { |
| 529 | break; |
| 530 | } |
| 531 | curr = curr->gtNext; |
| 532 | } while (curr); |
| 533 | return curr != nullptr; |
| 534 | } |
| 535 | #endif // DEBUG |
| 536 | |
| 537 | //------------------------------------------------------------------------ |
| 538 | // fgInsertStmtAtBeg: Insert the given tree or statement at the start of the given basic block. |
| 539 | // |
| 540 | // Arguments: |
| 541 | // block - The block into which 'stmt' will be inserted. |
| 542 | // stmt - The statement to be inserted. |
| 543 | // |
| 544 | // Return Value: |
| 545 | // Returns the new (potentially) GT_STMT node. |
| 546 | // |
| 547 | // Notes: |
| 548 | // If 'stmt' is not already a statement, a new statement is created from it. |
| 549 | // We always insert phi statements at the beginning. |
| 550 | // In other cases, if there are any phi assignments and/or an assignment of |
| 551 | // the GT_CATCH_ARG, we insert after those. |
| 552 | |
| 553 | GenTree* Compiler::fgInsertStmtAtBeg(BasicBlock* block, GenTree* stmt) |
| 554 | { |
| 555 | if (stmt->gtOper != GT_STMT) |
| 556 | { |
| 557 | stmt = gtNewStmt(stmt); |
| 558 | } |
| 559 | |
| 560 | GenTree* list = block->firstStmt(); |
| 561 | |
| 562 | if (!stmt->IsPhiDefnStmt()) |
| 563 | { |
| 564 | GenTree* insertBeforeStmt = block->FirstNonPhiDefOrCatchArgAsg(); |
| 565 | if (insertBeforeStmt != nullptr) |
| 566 | { |
| 567 | return fgInsertStmtBefore(block, insertBeforeStmt, stmt); |
| 568 | } |
| 569 | else if (list != nullptr) |
| 570 | { |
| 571 | return fgInsertStmtAtEnd(block, stmt); |
| 572 | } |
| 573 | // Otherwise, we will simply insert at the beginning, below. |
| 574 | } |
| 575 | |
| 576 | /* The new tree will now be the first one of the block */ |
| 577 | |
| 578 | block->bbTreeList = stmt; |
| 579 | stmt->gtNext = list; |
| 580 | |
| 581 | /* Are there any statements in the block? */ |
| 582 | |
| 583 | if (list) |
| 584 | { |
| 585 | GenTree* last; |
| 586 | |
| 587 | /* There is at least one statement already */ |
| 588 | |
| 589 | last = list->gtPrev; |
| 590 | noway_assert(last && last->gtNext == nullptr); |
| 591 | |
| 592 | /* Insert the statement in front of the first one */ |
| 593 | |
| 594 | list->gtPrev = stmt; |
| 595 | stmt->gtPrev = last; |
| 596 | } |
| 597 | else |
| 598 | { |
| 599 | /* The block was completely empty */ |
| 600 | |
| 601 | stmt->gtPrev = stmt; |
| 602 | } |
| 603 | |
| 604 | return stmt; |
| 605 | } |
| 606 | |
| 607 | /***************************************************************************** |
| 608 | * |
| 609 | * Insert the given tree or statement at the end of the given basic block. |
| 610 | * Returns the (potentially) new GT_STMT node. |
| 611 | * If the block can be a conditional block, use fgInsertStmtNearEnd. |
| 612 | */ |
| 613 | |
| 614 | GenTreeStmt* Compiler::fgInsertStmtAtEnd(BasicBlock* block, GenTree* node) |
| 615 | { |
| 616 | GenTree* list = block->firstStmt(); |
| 617 | GenTreeStmt* stmt; |
| 618 | |
| 619 | if (node->gtOper != GT_STMT) |
| 620 | { |
| 621 | stmt = gtNewStmt(node); |
| 622 | } |
| 623 | else |
| 624 | { |
| 625 | stmt = node->AsStmt(); |
| 626 | } |
| 627 | |
| 628 | assert(stmt->gtNext == nullptr); // We don't set it, and it needs to be this after the insert |
| 629 | |
| 630 | if (list) |
| 631 | { |
| 632 | GenTree* last; |
| 633 | |
| 634 | /* There is at least one statement already */ |
| 635 | |
| 636 | last = list->gtPrev; |
| 637 | noway_assert(last && last->gtNext == nullptr); |
| 638 | |
| 639 | /* Append the statement after the last one */ |
| 640 | |
| 641 | last->gtNext = stmt; |
| 642 | stmt->gtPrev = last; |
| 643 | list->gtPrev = stmt; |
| 644 | } |
| 645 | else |
| 646 | { |
| 647 | /* The block is completely empty */ |
| 648 | |
| 649 | block->bbTreeList = stmt; |
| 650 | stmt->gtPrev = stmt; |
| 651 | } |
| 652 | |
| 653 | return stmt; |
| 654 | } |
| 655 | |
| 656 | /***************************************************************************** |
| 657 | * |
| 658 | * Insert the given tree or statement at the end of the given basic block, but before |
| 659 | * the GT_JTRUE, if present. |
| 660 | * Returns the (potentially) new GT_STMT node. |
| 661 | */ |
| 662 | |
| 663 | GenTreeStmt* Compiler::fgInsertStmtNearEnd(BasicBlock* block, GenTree* node) |
| 664 | { |
| 665 | GenTreeStmt* stmt; |
| 666 | |
| 667 | // This routine can only be used when in tree order. |
| 668 | assert(fgOrder == FGOrderTree); |
| 669 | |
| 670 | if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH) || (block->bbJumpKind == BBJ_RETURN)) |
| 671 | { |
| 672 | if (node->gtOper != GT_STMT) |
| 673 | { |
| 674 | stmt = gtNewStmt(node); |
| 675 | } |
| 676 | else |
| 677 | { |
| 678 | stmt = node->AsStmt(); |
| 679 | } |
| 680 | |
| 681 | GenTreeStmt* first = block->firstStmt(); |
| 682 | noway_assert(first); |
| 683 | GenTreeStmt* last = block->lastStmt(); |
| 684 | noway_assert(last && last->gtNext == nullptr); |
| 685 | GenTree* after = last->gtPrev; |
| 686 | |
| 687 | #if DEBUG |
| 688 | if (block->bbJumpKind == BBJ_COND) |
| 689 | { |
| 690 | noway_assert(last->gtStmtExpr->gtOper == GT_JTRUE); |
| 691 | } |
| 692 | else if (block->bbJumpKind == BBJ_RETURN) |
| 693 | { |
| 694 | noway_assert((last->gtStmtExpr->gtOper == GT_RETURN) || (last->gtStmtExpr->gtOper == GT_JMP) || |
| 695 | // BBJ_RETURN blocks in functions returning void do not get a GT_RETURN node if they |
| 696 | // have a .tail prefix (even if canTailCall returns false for these calls) |
| 697 | // code:Compiler::impImportBlockCode (search for the RET: label) |
| 698 | // Ditto for real tail calls (all code after them has been removed) |
| 699 | ((last->gtStmtExpr->gtOper == GT_CALL) && |
| 700 | ((info.compRetType == TYP_VOID) || last->gtStmtExpr->AsCall()->IsTailCall()))); |
| 701 | } |
| 702 | else |
| 703 | { |
| 704 | noway_assert(block->bbJumpKind == BBJ_SWITCH); |
| 705 | noway_assert(last->gtStmtExpr->gtOper == GT_SWITCH); |
| 706 | } |
| 707 | #endif // DEBUG |
| 708 | |
| 709 | /* Append 'stmt' before 'last' */ |
| 710 | |
| 711 | stmt->gtNext = last; |
| 712 | last->gtPrev = stmt; |
| 713 | |
| 714 | if (first == last) |
| 715 | { |
| 716 | /* There is only one stmt in the block */ |
| 717 | |
| 718 | block->bbTreeList = stmt; |
| 719 | stmt->gtPrev = last; |
| 720 | } |
| 721 | else |
| 722 | { |
| 723 | noway_assert(after && (after->gtNext == last)); |
| 724 | |
| 725 | /* Append 'stmt' after 'after' */ |
| 726 | |
| 727 | after->gtNext = stmt; |
| 728 | stmt->gtPrev = after; |
| 729 | } |
| 730 | |
| 731 | return stmt; |
| 732 | } |
| 733 | else |
| 734 | { |
| 735 | return fgInsertStmtAtEnd(block, node); |
| 736 | } |
| 737 | } |
| 738 | |
| 739 | /***************************************************************************** |
| 740 | * |
| 741 | * Insert the given statement "stmt" after GT_STMT node "insertionPoint". |
| 742 | * Returns the newly inserted GT_STMT node. |
| 743 | * Note that the gtPrev list of statement nodes is circular, but the gtNext list is not. |
| 744 | */ |
| 745 | |
| 746 | GenTree* Compiler::fgInsertStmtAfter(BasicBlock* block, GenTree* insertionPoint, GenTree* stmt) |
| 747 | { |
| 748 | assert(block->bbTreeList != nullptr); |
| 749 | noway_assert(insertionPoint->gtOper == GT_STMT); |
| 750 | noway_assert(stmt->gtOper == GT_STMT); |
| 751 | assert(fgBlockContainsStatementBounded(block, insertionPoint)); |
| 752 | assert(!fgBlockContainsStatementBounded(block, stmt, false)); |
| 753 | |
| 754 | if (insertionPoint->gtNext == nullptr) |
| 755 | { |
| 756 | // Ok, we want to insert after the last statement of the block. |
| 757 | stmt->gtNext = nullptr; |
| 758 | stmt->gtPrev = insertionPoint; |
| 759 | |
| 760 | insertionPoint->gtNext = stmt; |
| 761 | |
| 762 | // Update the backward link of the first statement of the block |
| 763 | // to point to the new last statement. |
| 764 | assert(block->bbTreeList->gtPrev == insertionPoint); |
| 765 | block->bbTreeList->gtPrev = stmt; |
| 766 | } |
| 767 | else |
| 768 | { |
| 769 | stmt->gtNext = insertionPoint->gtNext; |
| 770 | stmt->gtPrev = insertionPoint; |
| 771 | |
| 772 | insertionPoint->gtNext->gtPrev = stmt; |
| 773 | insertionPoint->gtNext = stmt; |
| 774 | } |
| 775 | |
| 776 | return stmt; |
| 777 | } |
| 778 | |
| 779 | // Insert the given tree or statement before GT_STMT node "insertionPoint". |
| 780 | // Returns the newly inserted GT_STMT node. |
| 781 | |
| 782 | GenTree* Compiler::fgInsertStmtBefore(BasicBlock* block, GenTree* insertionPoint, GenTree* stmt) |
| 783 | { |
| 784 | assert(block->bbTreeList != nullptr); |
| 785 | noway_assert(insertionPoint->gtOper == GT_STMT); |
| 786 | noway_assert(stmt->gtOper == GT_STMT); |
| 787 | assert(fgBlockContainsStatementBounded(block, insertionPoint)); |
| 788 | assert(!fgBlockContainsStatementBounded(block, stmt, false)); |
| 789 | |
| 790 | if (insertionPoint == block->bbTreeList) |
| 791 | { |
| 792 | // We're inserting before the first statement in the block. |
| 793 | GenTree* list = block->bbTreeList; |
| 794 | GenTree* last = list->gtPrev; |
| 795 | |
| 796 | stmt->gtNext = list; |
| 797 | stmt->gtPrev = last; |
| 798 | |
| 799 | block->bbTreeList = stmt; |
| 800 | list->gtPrev = stmt; |
| 801 | } |
| 802 | else |
| 803 | { |
| 804 | stmt->gtNext = insertionPoint; |
| 805 | stmt->gtPrev = insertionPoint->gtPrev; |
| 806 | |
| 807 | insertionPoint->gtPrev->gtNext = stmt; |
| 808 | insertionPoint->gtPrev = stmt; |
| 809 | } |
| 810 | |
| 811 | return stmt; |
| 812 | } |
| 813 | |
| 814 | /***************************************************************************** |
| 815 | * |
| 816 | * Insert the list of statements stmtList after the stmtAfter in block. |
| 817 | * Return the last statement stmtList. |
| 818 | */ |
| 819 | |
| 820 | GenTree* Compiler::fgInsertStmtListAfter(BasicBlock* block, // the block where stmtAfter is in. |
| 821 | GenTree* stmtAfter, // the statement where stmtList should be inserted |
| 822 | // after. |
| 823 | GenTree* stmtList) |
| 824 | { |
| 825 | // Currently we can handle when stmtAfter and stmtList are non-NULL. This makes everything easy. |
| 826 | noway_assert(stmtAfter && stmtAfter->gtOper == GT_STMT); |
| 827 | noway_assert(stmtList && stmtList->gtOper == GT_STMT); |
| 828 | |
| 829 | GenTree* stmtLast = stmtList->gtPrev; // Last statement in a non-empty list, circular in the gtPrev list. |
| 830 | noway_assert(stmtLast); |
| 831 | noway_assert(stmtLast->gtNext == nullptr); |
| 832 | |
| 833 | GenTree* stmtNext = stmtAfter->gtNext; |
| 834 | |
| 835 | if (!stmtNext) |
| 836 | { |
| 837 | stmtAfter->gtNext = stmtList; |
| 838 | stmtList->gtPrev = stmtAfter; |
| 839 | block->bbTreeList->gtPrev = stmtLast; |
| 840 | goto _Done; |
| 841 | } |
| 842 | |
| 843 | stmtAfter->gtNext = stmtList; |
| 844 | stmtList->gtPrev = stmtAfter; |
| 845 | |
| 846 | stmtLast->gtNext = stmtNext; |
| 847 | stmtNext->gtPrev = stmtLast; |
| 848 | |
| 849 | _Done: |
| 850 | |
| 851 | noway_assert(block->bbTreeList == nullptr || block->bbTreeList->gtPrev->gtNext == nullptr); |
| 852 | |
| 853 | return stmtLast; |
| 854 | } |
| 855 | |
| 856 | /* |
| 857 | Removes a block from the return block list |
| 858 | */ |
| 859 | void Compiler::fgRemoveReturnBlock(BasicBlock* block) |
| 860 | { |
| 861 | if (fgReturnBlocks == nullptr) |
| 862 | { |
| 863 | return; |
| 864 | } |
| 865 | |
| 866 | if (fgReturnBlocks->block == block) |
| 867 | { |
| 868 | // It's the 1st entry, assign new head of list. |
| 869 | fgReturnBlocks = fgReturnBlocks->next; |
| 870 | return; |
| 871 | } |
| 872 | |
| 873 | for (BasicBlockList* retBlocks = fgReturnBlocks; retBlocks->next != nullptr; retBlocks = retBlocks->next) |
| 874 | { |
| 875 | if (retBlocks->next->block == block) |
| 876 | { |
| 877 | // Found it; splice it out. |
| 878 | retBlocks->next = retBlocks->next->next; |
| 879 | return; |
| 880 | } |
| 881 | } |
| 882 | } |
| 883 | |
| 884 | //------------------------------------------------------------------------ |
| 885 | // fgGetPredForBlock: Find and return the predecessor edge corresponding to a given predecessor block. |
| 886 | // |
| 887 | // Arguments: |
| 888 | // block -- The block with the predecessor list to operate on. |
| 889 | // blockPred -- The predecessor block to find in the predecessor list. |
| 890 | // |
| 891 | // Return Value: |
| 892 | // The flowList edge corresponding to "blockPred". If "blockPred" is not in the predecessor list of "block", |
| 893 | // then returns nullptr. |
| 894 | // |
| 895 | // Assumptions: |
| 896 | // -- This only works on the full predecessor lists, not the cheap preds lists. |
| 897 | |
| 898 | flowList* Compiler::fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred) |
| 899 | { |
| 900 | assert(block); |
| 901 | assert(blockPred); |
| 902 | assert(!fgCheapPredsValid); |
| 903 | |
| 904 | flowList* pred; |
| 905 | |
| 906 | for (pred = block->bbPreds; pred != nullptr; pred = pred->flNext) |
| 907 | { |
| 908 | if (blockPred == pred->flBlock) |
| 909 | { |
| 910 | return pred; |
| 911 | } |
| 912 | } |
| 913 | |
| 914 | return nullptr; |
| 915 | } |
| 916 | |
| 917 | //------------------------------------------------------------------------ |
| 918 | // fgGetPredForBlock: Find and return the predecessor edge corresponding to a given predecessor block. |
| 919 | // Also returns the address of the pointer that points to this edge, to make it possible to remove this edge from the |
| 920 | // predecessor list without doing another linear search over the edge list. |
| 921 | // |
| 922 | // Arguments: |
| 923 | // block -- The block with the predecessor list to operate on. |
| 924 | // blockPred -- The predecessor block to find in the predecessor list. |
| 925 | // ptrToPred -- Out parameter: set to the address of the pointer that points to the returned predecessor edge. |
| 926 | // |
| 927 | // Return Value: |
| 928 | // The flowList edge corresponding to "blockPred". If "blockPred" is not in the predecessor list of "block", |
| 929 | // then returns nullptr. |
| 930 | // |
| 931 | // Assumptions: |
| 932 | // -- This only works on the full predecessor lists, not the cheap preds lists. |
| 933 | |
| 934 | flowList* Compiler::fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred) |
| 935 | { |
| 936 | assert(block); |
| 937 | assert(blockPred); |
| 938 | assert(ptrToPred); |
| 939 | assert(!fgCheapPredsValid); |
| 940 | |
| 941 | flowList** predPrevAddr; |
| 942 | flowList* pred; |
| 943 | |
| 944 | for (predPrevAddr = &block->bbPreds, pred = *predPrevAddr; pred != nullptr; |
| 945 | predPrevAddr = &pred->flNext, pred = *predPrevAddr) |
| 946 | { |
| 947 | if (blockPred == pred->flBlock) |
| 948 | { |
| 949 | *ptrToPred = predPrevAddr; |
| 950 | return pred; |
| 951 | } |
| 952 | } |
| 953 | |
| 954 | *ptrToPred = nullptr; |
| 955 | return nullptr; |
| 956 | } |
| 957 | |
| 958 | //------------------------------------------------------------------------ |
| 959 | // fgSpliceOutPred: Removes a predecessor edge for a block from the predecessor list. |
| 960 | // |
| 961 | // Arguments: |
| 962 | // block -- The block with the predecessor list to operate on. |
| 963 | // blockPred -- The predecessor block to remove from the predecessor list. It must be a predecessor of "block". |
| 964 | // |
| 965 | // Return Value: |
| 966 | // The flowList edge that was removed. |
| 967 | // |
| 968 | // Assumptions: |
| 969 | // -- "blockPred" must be a predecessor block of "block". |
| 970 | // -- This simply splices out the flowList object. It doesn't update block ref counts, handle duplicate counts, etc. |
| 971 | // For that, use fgRemoveRefPred() or fgRemoveAllRefPred(). |
| 972 | // -- This only works on the full predecessor lists, not the cheap preds lists. |
| 973 | // |
| 974 | // Notes: |
| 975 | // -- This must walk the predecessor list to find the block in question. If the predecessor edge |
| 976 | // is found using fgGetPredForBlock(), consider using the version that hands back the predecessor pointer |
| 977 | // address instead, to avoid this search. |
| 978 | // -- Marks fgModified = true, since the flow graph has changed. |
| 979 | |
| 980 | flowList* Compiler::fgSpliceOutPred(BasicBlock* block, BasicBlock* blockPred) |
| 981 | { |
| 982 | assert(!fgCheapPredsValid); |
| 983 | noway_assert(block->bbPreds); |
| 984 | |
| 985 | flowList* oldEdge = nullptr; |
| 986 | |
| 987 | // Is this the first block in the pred list? |
| 988 | if (blockPred == block->bbPreds->flBlock) |
| 989 | { |
| 990 | oldEdge = block->bbPreds; |
| 991 | block->bbPreds = block->bbPreds->flNext; |
| 992 | } |
| 993 | else |
| 994 | { |
| 995 | flowList* pred; |
| 996 | for (pred = block->bbPreds; (pred->flNext != nullptr) && (blockPred != pred->flNext->flBlock); |
| 997 | pred = pred->flNext) |
| 998 | { |
| 999 | // empty |
| 1000 | } |
| 1001 | oldEdge = pred->flNext; |
| 1002 | if (oldEdge == nullptr) |
| 1003 | { |
| 1004 | noway_assert(!"Should always find the blockPred" ); |
| 1005 | } |
| 1006 | pred->flNext = pred->flNext->flNext; |
| 1007 | } |
| 1008 | |
| 1009 | // Any changes to the flow graph invalidate the dominator sets. |
| 1010 | fgModified = true; |
| 1011 | |
| 1012 | return oldEdge; |
| 1013 | } |
| 1014 | |
| 1015 | //------------------------------------------------------------------------ |
| 1016 | // fgAddRefPred: Increment block->bbRefs by one and add "blockPred" to the predecessor list of "block". |
| 1017 | // |
| 1018 | // Arguments: |
| 1019 | // block -- A block to operate on. |
| 1020 | // blockPred -- The predecessor block to add to the predecessor list. |
| 1021 | // oldEdge -- Optional (default: nullptr). If non-nullptr, and a new edge is created (and the dup count |
| 1022 | // of an existing edge is not just incremented), the edge weights are copied from this edge. |
| 1023 | // initializingPreds -- Optional (default: false). Only set to "true" when the initial preds computation is |
| 1024 | // happening. |
| 1025 | // |
| 1026 | // Return Value: |
| 1027 | // The flow edge representing the predecessor. |
| 1028 | // |
| 1029 | // Assumptions: |
| 1030 | // -- This only works on the full predecessor lists, not the cheap preds lists. |
| 1031 | // |
| 1032 | // Notes: |
| 1033 | // -- block->bbRefs is incremented by one to account for the reduction in incoming edges. |
| 1034 | // -- block->bbRefs is adjusted even if preds haven't been computed. If preds haven't been computed, |
| 1035 | // the preds themselves aren't touched. |
| 1036 | // -- fgModified is set if a new flow edge is created (but not if an existing flow edge dup count is incremented), |
| 1037 | // indicating that the flow graph shape has changed. |
| 1038 | |
| 1039 | flowList* Compiler::fgAddRefPred(BasicBlock* block, |
| 1040 | BasicBlock* blockPred, |
| 1041 | flowList* oldEdge /* = nullptr */, |
| 1042 | bool initializingPreds /* = false */) |
| 1043 | { |
| 1044 | assert(block != nullptr); |
| 1045 | assert(blockPred != nullptr); |
| 1046 | |
| 1047 | block->bbRefs++; |
| 1048 | |
| 1049 | if (!fgComputePredsDone && !initializingPreds) |
| 1050 | { |
| 1051 | // Why is someone trying to update the preds list when the preds haven't been created? |
| 1052 | // Ignore them! This can happen when fgMorph is called before the preds list is created. |
| 1053 | return nullptr; |
| 1054 | } |
| 1055 | |
| 1056 | assert(!fgCheapPredsValid); |
| 1057 | |
| 1058 | flowList* flow; |
| 1059 | |
| 1060 | // Keep the predecessor list in lowest to highest bbNum order. This allows us to discover the loops in |
| 1061 | // optFindNaturalLoops from innermost to outermost. |
| 1062 | // |
| 1063 | // TODO-Throughput: Inserting an edge for a block in sorted order requires searching every existing edge. |
| 1064 | // Thus, inserting all the edges for a block is quadratic in the number of edges. We need to either |
| 1065 | // not bother sorting for debuggable code, or sort in optFindNaturalLoops, or better, make the code in |
| 1066 | // optFindNaturalLoops not depend on order. This also requires ensuring that nobody else has taken a |
| 1067 | // dependency on this order. Note also that we don't allow duplicates in the list; we maintain a flDupCount |
| 1068 | // count of duplication. This also necessitates walking the flow list for every edge we add. |
| 1069 | |
| 1070 | flowList** listp = &block->bbPreds; |
| 1071 | while ((*listp != nullptr) && ((*listp)->flBlock->bbNum < blockPred->bbNum)) |
| 1072 | { |
| 1073 | listp = &(*listp)->flNext; |
| 1074 | } |
| 1075 | |
| 1076 | if ((*listp != nullptr) && ((*listp)->flBlock == blockPred)) |
| 1077 | { |
| 1078 | // The predecessor block already exists in the flow list; simply add to its duplicate count. |
| 1079 | flow = *listp; |
| 1080 | noway_assert(flow->flDupCount > 0); |
| 1081 | flow->flDupCount++; |
| 1082 | } |
| 1083 | else |
| 1084 | { |
| 1085 | flow = new (this, CMK_FlowList) flowList(); |
| 1086 | |
| 1087 | #if MEASURE_BLOCK_SIZE |
| 1088 | genFlowNodeCnt += 1; |
| 1089 | genFlowNodeSize += sizeof(flowList); |
| 1090 | #endif // MEASURE_BLOCK_SIZE |
| 1091 | |
| 1092 | // Any changes to the flow graph invalidate the dominator sets. |
| 1093 | fgModified = true; |
| 1094 | |
| 1095 | // Insert the new edge in the list in the correct ordered location. |
| 1096 | flow->flNext = *listp; |
| 1097 | *listp = flow; |
| 1098 | |
| 1099 | flow->flBlock = blockPred; |
| 1100 | flow->flDupCount = 1; |
| 1101 | |
| 1102 | if (fgHaveValidEdgeWeights) |
| 1103 | { |
| 1104 | // We are creating an edge from blockPred to block |
| 1105 | // and we have already computed the edge weights, so |
| 1106 | // we will try to setup this new edge with valid edge weights. |
| 1107 | // |
| 1108 | if (oldEdge != nullptr) |
| 1109 | { |
| 1110 | // If our caller has given us the old edge weights |
| 1111 | // then we will use them. |
| 1112 | // |
| 1113 | flow->flEdgeWeightMin = oldEdge->flEdgeWeightMin; |
| 1114 | flow->flEdgeWeightMax = oldEdge->flEdgeWeightMax; |
| 1115 | } |
| 1116 | else |
| 1117 | { |
| 1118 | // Set the max edge weight to be the minimum of block's or blockPred's weight |
| 1119 | // |
| 1120 | flow->flEdgeWeightMax = min(block->bbWeight, blockPred->bbWeight); |
| 1121 | |
| 1122 | // If we are inserting a conditional block the minimum weight is zero, |
| 1123 | // otherwise it is the same as the edge's max weight. |
| 1124 | if (blockPred->NumSucc() > 1) |
| 1125 | { |
| 1126 | flow->flEdgeWeightMin = BB_ZERO_WEIGHT; |
| 1127 | } |
| 1128 | else |
| 1129 | { |
| 1130 | flow->flEdgeWeightMin = flow->flEdgeWeightMax; |
| 1131 | } |
| 1132 | } |
| 1133 | } |
| 1134 | else |
| 1135 | { |
| 1136 | flow->flEdgeWeightMin = BB_ZERO_WEIGHT; |
| 1137 | flow->flEdgeWeightMax = BB_MAX_WEIGHT; |
| 1138 | } |
| 1139 | } |
| 1140 | return flow; |
| 1141 | } |
| 1142 | |
| 1143 | //------------------------------------------------------------------------ |
| 1144 | // fgRemoveRefPred: Decrements the reference count of a predecessor edge from "blockPred" to "block", |
| 1145 | // removing the edge if it is no longer necessary. |
| 1146 | // |
| 1147 | // Arguments: |
| 1148 | // block -- A block to operate on. |
| 1149 | // blockPred -- The predecessor block to remove from the predecessor list. It must be a predecessor of "block". |
| 1150 | // |
| 1151 | // Return Value: |
| 1152 | // If the flow edge was removed (the predecessor has a "dup count" of 1), |
| 1153 | // returns the flow graph edge that was removed. This means "blockPred" is no longer a predecessor of "block". |
| 1154 | // Otherwise, returns nullptr. This means that "blockPred" is still a predecessor of "block" (because "blockPred" |
| 1155 | // is a switch with multiple cases jumping to "block", or a BBJ_COND with both conditional and fall-through |
| 1156 | // paths leading to "block"). |
| 1157 | // |
| 1158 | // Assumptions: |
| 1159 | // -- "blockPred" must be a predecessor block of "block". |
| 1160 | // -- This only works on the full predecessor lists, not the cheap preds lists. |
| 1161 | // |
| 1162 | // Notes: |
| 1163 | // -- block->bbRefs is decremented by one to account for the reduction in incoming edges. |
| 1164 | // -- block->bbRefs is adjusted even if preds haven't been computed. If preds haven't been computed, |
| 1165 | // the preds themselves aren't touched. |
| 1166 | // -- fgModified is set if a flow edge is removed (but not if an existing flow edge dup count is decremented), |
| 1167 | // indicating that the flow graph shape has changed. |
| 1168 | |
| 1169 | flowList* Compiler::fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred) |
| 1170 | { |
| 1171 | noway_assert(block != nullptr); |
| 1172 | noway_assert(blockPred != nullptr); |
| 1173 | |
| 1174 | noway_assert(block->countOfInEdges() > 0); |
| 1175 | block->bbRefs--; |
| 1176 | |
| 1177 | // Do nothing if we haven't calculated the predecessor list yet. |
| 1178 | // Yes, this does happen. |
| 1179 | // For example the predecessor lists haven't been created yet when we do fgMorph. |
| 1180 | // But fgMorph calls fgFoldConditional, which in turn calls fgRemoveRefPred. |
| 1181 | if (!fgComputePredsDone) |
| 1182 | { |
| 1183 | return nullptr; |
| 1184 | } |
| 1185 | |
| 1186 | assert(!fgCheapPredsValid); |
| 1187 | |
| 1188 | flowList** ptrToPred; |
| 1189 | flowList* pred = fgGetPredForBlock(block, blockPred, &ptrToPred); |
| 1190 | noway_assert(pred); |
| 1191 | noway_assert(pred->flDupCount > 0); |
| 1192 | |
| 1193 | pred->flDupCount--; |
| 1194 | |
| 1195 | if (pred->flDupCount == 0) |
| 1196 | { |
| 1197 | // Splice out the predecessor edge since it's no longer necessary. |
| 1198 | *ptrToPred = pred->flNext; |
| 1199 | |
| 1200 | // Any changes to the flow graph invalidate the dominator sets. |
| 1201 | fgModified = true; |
| 1202 | |
| 1203 | return pred; |
| 1204 | } |
| 1205 | else |
| 1206 | { |
| 1207 | return nullptr; |
| 1208 | } |
| 1209 | } |
| 1210 | |
| 1211 | //------------------------------------------------------------------------ |
| 1212 | // fgRemoveAllRefPreds: Removes a predecessor edge from one block to another, no matter what the "dup count" is. |
| 1213 | // |
| 1214 | // Arguments: |
| 1215 | // block -- A block to operate on. |
| 1216 | // blockPred -- The predecessor block to remove from the predecessor list. It must be a predecessor of "block". |
| 1217 | // |
| 1218 | // Return Value: |
| 1219 | // Returns the flow graph edge that was removed. The dup count on the edge is no longer valid. |
| 1220 | // |
| 1221 | // Assumptions: |
| 1222 | // -- "blockPred" must be a predecessor block of "block". |
| 1223 | // -- This only works on the full predecessor lists, not the cheap preds lists. |
| 1224 | // |
| 1225 | // Notes: |
| 1226 | // block->bbRefs is decremented to account for the reduction in incoming edges. |
| 1227 | |
| 1228 | flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred) |
| 1229 | { |
| 1230 | assert(block != nullptr); |
| 1231 | assert(blockPred != nullptr); |
| 1232 | assert(fgComputePredsDone); |
| 1233 | assert(!fgCheapPredsValid); |
| 1234 | assert(block->countOfInEdges() > 0); |
| 1235 | |
| 1236 | flowList** ptrToPred; |
| 1237 | flowList* pred = fgGetPredForBlock(block, blockPred, &ptrToPred); |
| 1238 | assert(pred != nullptr); |
| 1239 | assert(pred->flDupCount > 0); |
| 1240 | |
| 1241 | assert(block->bbRefs >= pred->flDupCount); |
| 1242 | block->bbRefs -= pred->flDupCount; |
| 1243 | |
| 1244 | // Now splice out the predecessor edge. |
| 1245 | *ptrToPred = pred->flNext; |
| 1246 | |
| 1247 | // Any changes to the flow graph invalidate the dominator sets. |
| 1248 | fgModified = true; |
| 1249 | |
| 1250 | return pred; |
| 1251 | } |
| 1252 | |
| 1253 | //------------------------------------------------------------------------ |
| 1254 | // fgRemoveAllRefPreds: Remove a predecessor edge, given the address of a pointer to it in the |
| 1255 | // predecessor list, no matter what the "dup count" is. |
| 1256 | // |
| 1257 | // Arguments: |
| 1258 | // block -- A block with the predecessor list to operate on. |
| 1259 | // ptrToPred -- The address of a pointer to the predecessor to remove. |
| 1260 | // |
| 1261 | // Return Value: |
| 1262 | // The removed predecessor edge. The dup count on the edge is no longer valid. |
| 1263 | // |
| 1264 | // Assumptions: |
| 1265 | // -- The predecessor edge must be in the predecessor list for "block". |
| 1266 | // -- This only works on the full predecessor lists, not the cheap preds lists. |
| 1267 | // |
| 1268 | // Notes: |
| 1269 | // block->bbRefs is decremented by the dup count of the predecessor edge, to account for the reduction in incoming |
| 1270 | // edges. |
| 1271 | |
| 1272 | flowList* Compiler::fgRemoveAllRefPreds(BasicBlock* block, flowList** ptrToPred) |
| 1273 | { |
| 1274 | assert(block != nullptr); |
| 1275 | assert(ptrToPred != nullptr); |
| 1276 | assert(fgComputePredsDone); |
| 1277 | assert(!fgCheapPredsValid); |
| 1278 | assert(block->countOfInEdges() > 0); |
| 1279 | |
| 1280 | flowList* pred = *ptrToPred; |
| 1281 | assert(pred != nullptr); |
| 1282 | assert(pred->flDupCount > 0); |
| 1283 | |
| 1284 | assert(block->bbRefs >= pred->flDupCount); |
| 1285 | block->bbRefs -= pred->flDupCount; |
| 1286 | |
| 1287 | // Now splice out the predecessor edge. |
| 1288 | *ptrToPred = pred->flNext; |
| 1289 | |
| 1290 | // Any changes to the flow graph invalidate the dominator sets. |
| 1291 | fgModified = true; |
| 1292 | |
| 1293 | return pred; |
| 1294 | } |
| 1295 | |
| 1296 | /* |
| 1297 | Removes all the appearances of block as predecessor of others |
| 1298 | */ |
| 1299 | |
| 1300 | void Compiler::fgRemoveBlockAsPred(BasicBlock* block) |
| 1301 | { |
| 1302 | assert(!fgCheapPredsValid); |
| 1303 | |
| 1304 | PREFIX_ASSUME(block != nullptr); |
| 1305 | |
| 1306 | BasicBlock* bNext; |
| 1307 | |
| 1308 | switch (block->bbJumpKind) |
| 1309 | { |
| 1310 | case BBJ_CALLFINALLY: |
| 1311 | if (!(block->bbFlags & BBF_RETLESS_CALL)) |
| 1312 | { |
| 1313 | assert(block->isBBCallAlwaysPair()); |
| 1314 | |
| 1315 | /* The block after the BBJ_CALLFINALLY block is not reachable */ |
| 1316 | bNext = block->bbNext; |
| 1317 | |
| 1318 | /* bNext is an unreachable BBJ_ALWAYS block */ |
| 1319 | noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); |
| 1320 | |
| 1321 | while (bNext->countOfInEdges() > 0) |
| 1322 | { |
| 1323 | fgRemoveRefPred(bNext, bNext->bbPreds->flBlock); |
| 1324 | } |
| 1325 | } |
| 1326 | |
| 1327 | __fallthrough; |
| 1328 | |
| 1329 | case BBJ_COND: |
| 1330 | case BBJ_ALWAYS: |
| 1331 | case BBJ_EHCATCHRET: |
| 1332 | |
| 1333 | /* Update the predecessor list for 'block->bbJumpDest' and 'block->bbNext' */ |
| 1334 | fgRemoveRefPred(block->bbJumpDest, block); |
| 1335 | |
| 1336 | if (block->bbJumpKind != BBJ_COND) |
| 1337 | { |
| 1338 | break; |
| 1339 | } |
| 1340 | |
| 1341 | /* If BBJ_COND fall through */ |
| 1342 | __fallthrough; |
| 1343 | |
| 1344 | case BBJ_NONE: |
| 1345 | |
| 1346 | /* Update the predecessor list for 'block->bbNext' */ |
| 1347 | fgRemoveRefPred(block->bbNext, block); |
| 1348 | break; |
| 1349 | |
| 1350 | case BBJ_EHFILTERRET: |
| 1351 | |
| 1352 | block->bbJumpDest->bbRefs++; // To compensate the bbRefs-- inside fgRemoveRefPred |
| 1353 | fgRemoveRefPred(block->bbJumpDest, block); |
| 1354 | break; |
| 1355 | |
| 1356 | case BBJ_EHFINALLYRET: |
| 1357 | { |
| 1358 | /* Remove block as the predecessor of the bbNext of all |
| 1359 | BBJ_CALLFINALLY blocks calling this finally. No need |
| 1360 | to look for BBJ_CALLFINALLY for fault handlers. */ |
| 1361 | |
| 1362 | unsigned hndIndex = block->getHndIndex(); |
| 1363 | EHblkDsc* ehDsc = ehGetDsc(hndIndex); |
| 1364 | |
| 1365 | if (ehDsc->HasFinallyHandler()) |
| 1366 | { |
| 1367 | BasicBlock* begBlk; |
| 1368 | BasicBlock* endBlk; |
| 1369 | ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); |
| 1370 | |
| 1371 | BasicBlock* finBeg = ehDsc->ebdHndBeg; |
| 1372 | |
| 1373 | for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) |
| 1374 | { |
| 1375 | if ((bcall->bbFlags & BBF_REMOVED) || bcall->bbJumpKind != BBJ_CALLFINALLY || |
| 1376 | bcall->bbJumpDest != finBeg) |
| 1377 | { |
| 1378 | continue; |
| 1379 | } |
| 1380 | |
| 1381 | assert(bcall->isBBCallAlwaysPair()); |
| 1382 | fgRemoveRefPred(bcall->bbNext, block); |
| 1383 | } |
| 1384 | } |
| 1385 | } |
| 1386 | break; |
| 1387 | |
| 1388 | case BBJ_THROW: |
| 1389 | case BBJ_RETURN: |
| 1390 | break; |
| 1391 | |
| 1392 | case BBJ_SWITCH: |
| 1393 | { |
| 1394 | unsigned jumpCnt = block->bbJumpSwt->bbsCount; |
| 1395 | BasicBlock** jumpTab = block->bbJumpSwt->bbsDstTab; |
| 1396 | |
| 1397 | do |
| 1398 | { |
| 1399 | fgRemoveRefPred(*jumpTab, block); |
| 1400 | } while (++jumpTab, --jumpCnt); |
| 1401 | |
| 1402 | break; |
| 1403 | } |
| 1404 | |
| 1405 | default: |
| 1406 | noway_assert(!"Block doesn't have a valid bbJumpKind!!!!" ); |
| 1407 | break; |
| 1408 | } |
| 1409 | } |
| 1410 | |
| 1411 | /***************************************************************************** |
| 1412 | * fgChangeSwitchBlock: |
| 1413 | * |
| 1414 | * We have a BBJ_SWITCH jump at 'oldSwitchBlock' and we want to move this |
| 1415 | * switch jump over to 'newSwitchBlock'. All of the blocks that are jumped |
| 1416 | * to from jumpTab[] need to have their predecessor lists updated by removing |
| 1417 | * the 'oldSwitchBlock' and adding 'newSwitchBlock'. |
| 1418 | */ |
| 1419 | |
| 1420 | void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock) |
| 1421 | { |
| 1422 | noway_assert(oldSwitchBlock != nullptr); |
| 1423 | noway_assert(newSwitchBlock != nullptr); |
| 1424 | noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH); |
| 1425 | |
| 1426 | unsigned jumpCnt = oldSwitchBlock->bbJumpSwt->bbsCount; |
| 1427 | BasicBlock** jumpTab = oldSwitchBlock->bbJumpSwt->bbsDstTab; |
| 1428 | |
| 1429 | unsigned i; |
| 1430 | |
| 1431 | // Walk the switch's jump table, updating the predecessor for each branch. |
| 1432 | for (i = 0; i < jumpCnt; i++) |
| 1433 | { |
| 1434 | BasicBlock* bJump = jumpTab[i]; |
| 1435 | noway_assert(bJump != nullptr); |
| 1436 | |
| 1437 | // Note that if there are duplicate branch targets in the switch jump table, |
| 1438 | // fgRemoveRefPred()/fgAddRefPred() will do the right thing: the second and |
| 1439 | // subsequent duplicates will simply subtract from and add to the duplicate |
| 1440 | // count (respectively). |
| 1441 | |
| 1442 | // |
| 1443 | // Remove the old edge [oldSwitchBlock => bJump] |
| 1444 | // |
| 1445 | fgRemoveRefPred(bJump, oldSwitchBlock); |
| 1446 | |
| 1447 | // |
| 1448 | // Create the new edge [newSwitchBlock => bJump] |
| 1449 | // |
| 1450 | fgAddRefPred(bJump, newSwitchBlock); |
| 1451 | } |
| 1452 | |
| 1453 | if (m_switchDescMap != nullptr) |
| 1454 | { |
| 1455 | SwitchUniqueSuccSet uniqueSuccSet; |
| 1456 | |
| 1457 | // If already computed and cached the unique descriptors for the old block, let's |
| 1458 | // update those for the new block. |
| 1459 | if (m_switchDescMap->Lookup(oldSwitchBlock, &uniqueSuccSet)) |
| 1460 | { |
| 1461 | m_switchDescMap->Set(newSwitchBlock, uniqueSuccSet); |
| 1462 | } |
| 1463 | else |
| 1464 | { |
| 1465 | fgInvalidateSwitchDescMapEntry(newSwitchBlock); |
| 1466 | } |
| 1467 | fgInvalidateSwitchDescMapEntry(oldSwitchBlock); |
| 1468 | } |
| 1469 | } |
| 1470 | |
| 1471 | /***************************************************************************** |
| 1472 | * fgReplaceSwitchJumpTarget: |
| 1473 | * |
| 1474 | * We have a BBJ_SWITCH at 'blockSwitch' and we want to replace all entries |
| 1475 | * in the jumpTab[] such that so that jumps that previously went to |
| 1476 | * 'oldTarget' now go to 'newTarget'. |
| 1477 | * We also must update the predecessor lists for 'oldTarget' and 'newPred'. |
| 1478 | */ |
| 1479 | |
| 1480 | void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget) |
| 1481 | { |
| 1482 | noway_assert(blockSwitch != nullptr); |
| 1483 | noway_assert(newTarget != nullptr); |
| 1484 | noway_assert(oldTarget != nullptr); |
| 1485 | noway_assert(blockSwitch->bbJumpKind == BBJ_SWITCH); |
| 1486 | |
| 1487 | // For the jump targets values that match oldTarget of our BBJ_SWITCH |
| 1488 | // replace predecessor 'blockSwitch' with 'newTarget' |
| 1489 | // |
| 1490 | |
| 1491 | unsigned jumpCnt = blockSwitch->bbJumpSwt->bbsCount; |
| 1492 | BasicBlock** jumpTab = blockSwitch->bbJumpSwt->bbsDstTab; |
| 1493 | |
| 1494 | unsigned i = 0; |
| 1495 | |
| 1496 | // Walk the switch's jump table looking for blocks to update the preds for |
| 1497 | while (i < jumpCnt) |
| 1498 | { |
| 1499 | if (jumpTab[i] == oldTarget) // We will update when jumpTab[i] matches |
| 1500 | { |
| 1501 | // Remove the old edge [oldTarget from blockSwitch] |
| 1502 | // |
| 1503 | fgRemoveAllRefPreds(oldTarget, blockSwitch); |
| 1504 | |
| 1505 | // |
| 1506 | // Change the jumpTab entry to branch to the new location |
| 1507 | // |
| 1508 | jumpTab[i] = newTarget; |
| 1509 | |
| 1510 | // |
| 1511 | // Create the new edge [newTarget from blockSwitch] |
| 1512 | // |
| 1513 | flowList* newEdge = fgAddRefPred(newTarget, blockSwitch); |
| 1514 | |
| 1515 | // Now set the correct value of newEdge->flDupCount |
| 1516 | // and replace any other jumps in jumpTab[] that go to oldTarget. |
| 1517 | // |
| 1518 | i++; |
| 1519 | while (i < jumpCnt) |
| 1520 | { |
| 1521 | if (jumpTab[i] == oldTarget) |
| 1522 | { |
| 1523 | // |
| 1524 | // We also must update this entry in the jumpTab |
| 1525 | // |
| 1526 | jumpTab[i] = newTarget; |
| 1527 | newTarget->bbRefs++; |
| 1528 | |
| 1529 | // |
| 1530 | // Increment the flDupCount |
| 1531 | // |
| 1532 | newEdge->flDupCount++; |
| 1533 | } |
| 1534 | i++; // Check the next entry in jumpTab[] |
| 1535 | } |
| 1536 | |
| 1537 | // Maintain, if necessary, the set of unique targets of "block." |
| 1538 | UpdateSwitchTableTarget(blockSwitch, oldTarget, newTarget); |
| 1539 | |
| 1540 | // Make sure the new target has the proper bits set for being a branch target. |
| 1541 | newTarget->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET; |
| 1542 | |
| 1543 | return; // We have replaced the jumps to oldTarget with newTarget |
| 1544 | } |
| 1545 | i++; // Check the next entry in jumpTab[] for a match |
| 1546 | } |
| 1547 | noway_assert(!"Did not find oldTarget in jumpTab[]" ); |
| 1548 | } |
| 1549 | |
| 1550 | //------------------------------------------------------------------------ |
| 1551 | // Compiler::fgReplaceJumpTarget: For a given block, replace the target 'oldTarget' with 'newTarget'. |
| 1552 | // |
| 1553 | // Arguments: |
| 1554 | // block - the block in which a jump target will be replaced. |
| 1555 | // newTarget - the new branch target of the block. |
| 1556 | // oldTarget - the old branch target of the block. |
| 1557 | // |
| 1558 | // Notes: |
| 1559 | // 1. Only branches are changed: BBJ_ALWAYS, the non-fallthrough path of BBJ_COND, BBJ_SWITCH, etc. |
| 1560 | // We ignore other block types. |
| 1561 | // 2. Only the first target found is updated. If there are multiple ways for a block |
| 1562 | // to reach 'oldTarget' (e.g., multiple arms of a switch), only the first one found is changed. |
| 1563 | // 3. The predecessor lists are not changed. |
| 1564 | // 4. The switch table "unique successor" cache is invalidated. |
| 1565 | // |
| 1566 | // This function is most useful early, before the full predecessor lists have been computed. |
| 1567 | // |
| 1568 | void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget) |
| 1569 | { |
| 1570 | assert(block != nullptr); |
| 1571 | |
| 1572 | switch (block->bbJumpKind) |
| 1573 | { |
| 1574 | case BBJ_CALLFINALLY: |
| 1575 | case BBJ_COND: |
| 1576 | case BBJ_ALWAYS: |
| 1577 | case BBJ_EHCATCHRET: |
| 1578 | case BBJ_EHFILTERRET: |
| 1579 | case BBJ_LEAVE: // This function will be called before import, so we still have BBJ_LEAVE |
| 1580 | |
| 1581 | if (block->bbJumpDest == oldTarget) |
| 1582 | { |
| 1583 | block->bbJumpDest = newTarget; |
| 1584 | } |
| 1585 | break; |
| 1586 | |
| 1587 | case BBJ_NONE: |
| 1588 | case BBJ_EHFINALLYRET: |
| 1589 | case BBJ_THROW: |
| 1590 | case BBJ_RETURN: |
| 1591 | break; |
| 1592 | |
| 1593 | case BBJ_SWITCH: |
| 1594 | unsigned jumpCnt; |
| 1595 | jumpCnt = block->bbJumpSwt->bbsCount; |
| 1596 | BasicBlock** jumpTab; |
| 1597 | jumpTab = block->bbJumpSwt->bbsDstTab; |
| 1598 | |
| 1599 | for (unsigned i = 0; i < jumpCnt; i++) |
| 1600 | { |
| 1601 | if (jumpTab[i] == oldTarget) |
| 1602 | { |
| 1603 | jumpTab[i] = newTarget; |
| 1604 | break; |
| 1605 | } |
| 1606 | } |
| 1607 | break; |
| 1608 | |
| 1609 | default: |
| 1610 | assert(!"Block doesn't have a valid bbJumpKind!!!!" ); |
| 1611 | unreached(); |
| 1612 | break; |
| 1613 | } |
| 1614 | } |
| 1615 | |
| 1616 | /***************************************************************************** |
| 1617 | * Updates the predecessor list for 'block' by replacing 'oldPred' with 'newPred'. |
| 1618 | * Note that a block can only appear once in the preds list (for normal preds, not |
| 1619 | * cheap preds): if a predecessor has multiple ways to get to this block, then |
| 1620 | * flDupCount will be >1, but the block will still appear exactly once. Thus, this |
| 1621 | * function assumes that all branches from the predecessor (practically, that all |
| 1622 | * switch cases that target this block) are changed to branch from the new predecessor, |
| 1623 | * with the same dup count. |
| 1624 | * |
| 1625 | * Note that the block bbRefs is not changed, since 'block' has the same number of |
| 1626 | * references as before, just from a different predecessor block. |
| 1627 | */ |
| 1628 | |
| 1629 | void Compiler::fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred) |
| 1630 | { |
| 1631 | noway_assert(block != nullptr); |
| 1632 | noway_assert(oldPred != nullptr); |
| 1633 | noway_assert(newPred != nullptr); |
| 1634 | assert(!fgCheapPredsValid); |
| 1635 | |
| 1636 | flowList* pred; |
| 1637 | |
| 1638 | for (pred = block->bbPreds; pred != nullptr; pred = pred->flNext) |
| 1639 | { |
| 1640 | if (oldPred == pred->flBlock) |
| 1641 | { |
| 1642 | pred->flBlock = newPred; |
| 1643 | break; |
| 1644 | } |
| 1645 | } |
| 1646 | } |
| 1647 | |
| 1648 | /***************************************************************************** |
| 1649 | * |
| 1650 | * Returns true if block b1 dominates block b2. |
| 1651 | */ |
| 1652 | |
| 1653 | bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2) |
| 1654 | { |
| 1655 | noway_assert(fgDomsComputed); |
| 1656 | assert(!fgCheapPredsValid); |
| 1657 | |
| 1658 | // |
| 1659 | // If the fgModified flag is false then we made some modifications to |
| 1660 | // the flow graph, like adding a new block or changing a conditional branch |
| 1661 | // into an unconditional branch. |
| 1662 | // |
| 1663 | // We can continue to use the dominator and reachable information to |
| 1664 | // unmark loops as long as we haven't renumbered the blocks or we aren't |
| 1665 | // asking for information about a new block |
| 1666 | // |
| 1667 | |
| 1668 | if (b2->bbNum > fgDomBBcount) |
| 1669 | { |
| 1670 | if (b1 == b2) |
| 1671 | { |
| 1672 | return true; |
| 1673 | } |
| 1674 | |
| 1675 | for (flowList* pred = b2->bbPreds; pred != nullptr; pred = pred->flNext) |
| 1676 | { |
| 1677 | if (!fgDominate(b1, pred->flBlock)) |
| 1678 | { |
| 1679 | return false; |
| 1680 | } |
| 1681 | } |
| 1682 | |
| 1683 | return b2->bbPreds != nullptr; |
| 1684 | } |
| 1685 | |
| 1686 | if (b1->bbNum > fgDomBBcount) |
| 1687 | { |
| 1688 | // if b1 is a loop preheader and Succ is its only successor, then all predecessors of |
| 1689 | // Succ either are b1 itself or are dominated by Succ. Under these conditions, b1 |
| 1690 | // dominates b2 if and only if Succ dominates b2 (or if b2 == b1, but we already tested |
| 1691 | // for this case) |
| 1692 | if (b1->bbFlags & BBF_LOOP_PREHEADER) |
| 1693 | { |
| 1694 | noway_assert(b1->bbFlags & BBF_INTERNAL); |
| 1695 | noway_assert(b1->bbJumpKind == BBJ_NONE); |
| 1696 | return fgDominate(b1->bbNext, b2); |
| 1697 | } |
| 1698 | |
| 1699 | // unknown dominators; err on the safe side and return false |
| 1700 | return false; |
| 1701 | } |
| 1702 | |
| 1703 | /* Check if b1 dominates b2 */ |
| 1704 | unsigned numA = b1->bbNum; |
| 1705 | noway_assert(numA <= fgDomBBcount); |
| 1706 | unsigned numB = b2->bbNum; |
| 1707 | noway_assert(numB <= fgDomBBcount); |
| 1708 | |
| 1709 | // What we want to ask here is basically if A is in the middle of the path from B to the root (the entry node) |
| 1710 | // in the dominator tree. Turns out that can be translated as: |
| 1711 | // |
| 1712 | // A dom B <-> preorder(A) <= preorder(B) && postorder(A) >= postorder(B) |
| 1713 | // |
| 1714 | // where the equality holds when you ask if A dominates itself. |
| 1715 | bool treeDom = |
| 1716 | fgDomTreePreOrder[numA] <= fgDomTreePreOrder[numB] && fgDomTreePostOrder[numA] >= fgDomTreePostOrder[numB]; |
| 1717 | |
| 1718 | return treeDom; |
| 1719 | } |
| 1720 | |
| 1721 | /***************************************************************************** |
| 1722 | * |
| 1723 | * Returns true if block b1 can reach block b2. |
| 1724 | */ |
| 1725 | |
| 1726 | bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2) |
| 1727 | { |
| 1728 | noway_assert(fgDomsComputed); |
| 1729 | assert(!fgCheapPredsValid); |
| 1730 | |
| 1731 | // |
| 1732 | // If the fgModified flag is false then we made some modifications to |
| 1733 | // the flow graph, like adding a new block or changing a conditional branch |
| 1734 | // into an unconditional branch. |
| 1735 | // |
| 1736 | // We can continue to use the dominator and reachable information to |
| 1737 | // unmark loops as long as we haven't renumbered the blocks or we aren't |
| 1738 | // asking for information about a new block |
| 1739 | // |
| 1740 | |
| 1741 | if (b2->bbNum > fgDomBBcount) |
| 1742 | { |
| 1743 | if (b1 == b2) |
| 1744 | { |
| 1745 | return true; |
| 1746 | } |
| 1747 | |
| 1748 | for (flowList* pred = b2->bbPreds; pred != nullptr; pred = pred->flNext) |
| 1749 | { |
| 1750 | if (fgReachable(b1, pred->flBlock)) |
| 1751 | { |
| 1752 | return true; |
| 1753 | } |
| 1754 | } |
| 1755 | |
| 1756 | return false; |
| 1757 | } |
| 1758 | |
| 1759 | if (b1->bbNum > fgDomBBcount) |
| 1760 | { |
| 1761 | noway_assert(b1->bbJumpKind == BBJ_NONE || b1->bbJumpKind == BBJ_ALWAYS || b1->bbJumpKind == BBJ_COND); |
| 1762 | |
| 1763 | if (b1->bbFallsThrough() && fgReachable(b1->bbNext, b2)) |
| 1764 | { |
| 1765 | return true; |
| 1766 | } |
| 1767 | |
| 1768 | if (b1->bbJumpKind == BBJ_ALWAYS || b1->bbJumpKind == BBJ_COND) |
| 1769 | { |
| 1770 | return fgReachable(b1->bbJumpDest, b2); |
| 1771 | } |
| 1772 | |
| 1773 | return false; |
| 1774 | } |
| 1775 | |
| 1776 | /* Check if b1 can reach b2 */ |
| 1777 | assert(fgReachabilitySetsValid); |
| 1778 | assert(BasicBlockBitSetTraits::GetSize(this) == fgDomBBcount + 1); |
| 1779 | return BlockSetOps::IsMember(this, b2->bbReach, b1->bbNum); |
| 1780 | } |
| 1781 | |
| 1782 | /***************************************************************************** |
| 1783 | * Update changed flow graph information. |
| 1784 | * |
| 1785 | * If the flow graph has changed, we need to recompute various information if we want to use |
| 1786 | * it again. |
| 1787 | */ |
| 1788 | |
| 1789 | void Compiler::fgUpdateChangedFlowGraph() |
| 1790 | { |
| 1791 | // We need to clear this so we don't hit an assert calling fgRenumberBlocks(). |
| 1792 | fgDomsComputed = false; |
| 1793 | |
| 1794 | JITDUMP("\nRenumbering the basic blocks for fgUpdateChangeFlowGraph\n" ); |
| 1795 | fgRenumberBlocks(); |
| 1796 | |
| 1797 | fgComputePreds(); |
| 1798 | fgComputeEnterBlocksSet(); |
| 1799 | fgComputeReachabilitySets(); |
| 1800 | fgComputeDoms(); |
| 1801 | } |
| 1802 | |
| 1803 | /***************************************************************************** |
| 1804 | * Compute the bbReach sets. |
| 1805 | * |
| 1806 | * This can be called to recompute the bbReach sets after the flow graph changes, such as when the |
| 1807 | * number of BasicBlocks change (and thus, the BlockSet epoch changes). |
| 1808 | * |
| 1809 | * Finally, this also sets the BBF_GC_SAFE_POINT flag on blocks. |
| 1810 | * |
| 1811 | * Assumes the predecessor lists are correct. |
| 1812 | * |
| 1813 | * TODO-Throughput: This algorithm consumes O(n^2) because we're using dense bitsets to |
| 1814 | * represent reachability. While this yields O(1) time queries, it bloats the memory usage |
| 1815 | * for large code. We can do better if we try to approach reachability by |
| 1816 | * computing the strongly connected components of the flow graph. That way we only need |
| 1817 | * linear memory to label every block with its SCC. |
| 1818 | */ |
| 1819 | |
| 1820 | void Compiler::fgComputeReachabilitySets() |
| 1821 | { |
| 1822 | assert(fgComputePredsDone); |
| 1823 | assert(!fgCheapPredsValid); |
| 1824 | |
| 1825 | #ifdef DEBUG |
| 1826 | fgReachabilitySetsValid = false; |
| 1827 | #endif // DEBUG |
| 1828 | |
| 1829 | BasicBlock* block; |
| 1830 | |
| 1831 | for (block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 1832 | { |
| 1833 | // Initialize the per-block bbReach sets. It creates a new empty set, |
| 1834 | // because the block epoch could change since the previous initialization |
| 1835 | // and the old set could have wrong size. |
| 1836 | block->bbReach = BlockSetOps::MakeEmpty(this); |
| 1837 | |
| 1838 | /* Mark block as reaching itself */ |
| 1839 | BlockSetOps::AddElemD(this, block->bbReach, block->bbNum); |
| 1840 | } |
| 1841 | |
| 1842 | /* Find the reachable blocks */ |
| 1843 | // Also, set BBF_GC_SAFE_POINT. |
| 1844 | |
| 1845 | bool change; |
| 1846 | BlockSet newReach(BlockSetOps::MakeEmpty(this)); |
| 1847 | do |
| 1848 | { |
| 1849 | change = false; |
| 1850 | |
| 1851 | for (block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 1852 | { |
| 1853 | BlockSetOps::Assign(this, newReach, block->bbReach); |
| 1854 | |
| 1855 | bool predGcSafe = (block->bbPreds != nullptr); // Do all of our predecessor blocks have a GC safe bit? |
| 1856 | |
| 1857 | for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext) |
| 1858 | { |
| 1859 | BasicBlock* predBlock = pred->flBlock; |
| 1860 | |
| 1861 | /* Union the predecessor's reachability set into newReach */ |
| 1862 | BlockSetOps::UnionD(this, newReach, predBlock->bbReach); |
| 1863 | |
| 1864 | if (!(predBlock->bbFlags & BBF_GC_SAFE_POINT)) |
| 1865 | { |
| 1866 | predGcSafe = false; |
| 1867 | } |
| 1868 | } |
| 1869 | |
| 1870 | if (predGcSafe) |
| 1871 | { |
| 1872 | block->bbFlags |= BBF_GC_SAFE_POINT; |
| 1873 | } |
| 1874 | |
| 1875 | if (!BlockSetOps::Equal(this, newReach, block->bbReach)) |
| 1876 | { |
| 1877 | BlockSetOps::Assign(this, block->bbReach, newReach); |
| 1878 | change = true; |
| 1879 | } |
| 1880 | } |
| 1881 | } while (change); |
| 1882 | |
| 1883 | #ifdef DEBUG |
| 1884 | if (verbose) |
| 1885 | { |
| 1886 | printf("\nAfter computing reachability sets:\n" ); |
| 1887 | fgDispReach(); |
| 1888 | } |
| 1889 | |
| 1890 | fgReachabilitySetsValid = true; |
| 1891 | #endif // DEBUG |
| 1892 | } |
| 1893 | |
| 1894 | /***************************************************************************** |
| 1895 | * Compute the entry blocks set. |
| 1896 | * |
| 1897 | * Initialize fgEnterBlks to the set of blocks for which we don't have explicit control |
| 1898 | * flow edges. These are the entry basic block and each of the EH handler blocks. |
| 1899 | * For ARM, also include the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, |
| 1900 | * to avoid creating "retless" calls, since we need the BBJ_ALWAYS for the purpose |
| 1901 | * of unwinding, even if the call doesn't return (due to an explicit throw, for example). |
| 1902 | */ |
| 1903 | |
| 1904 | void Compiler::fgComputeEnterBlocksSet() |
| 1905 | { |
| 1906 | #ifdef DEBUG |
| 1907 | fgEnterBlksSetValid = false; |
| 1908 | #endif // DEBUG |
| 1909 | |
| 1910 | fgEnterBlks = BlockSetOps::MakeEmpty(this); |
| 1911 | |
| 1912 | /* Now set the entry basic block */ |
| 1913 | BlockSetOps::AddElemD(this, fgEnterBlks, fgFirstBB->bbNum); |
| 1914 | assert(fgFirstBB->bbNum == 1); |
| 1915 | |
| 1916 | if (compHndBBtabCount > 0) |
| 1917 | { |
| 1918 | /* Also 'or' in the handler basic blocks */ |
| 1919 | EHblkDsc* HBtab; |
| 1920 | EHblkDsc* HBtabEnd; |
| 1921 | for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++) |
| 1922 | { |
| 1923 | if (HBtab->HasFilter()) |
| 1924 | { |
| 1925 | BlockSetOps::AddElemD(this, fgEnterBlks, HBtab->ebdFilter->bbNum); |
| 1926 | } |
| 1927 | BlockSetOps::AddElemD(this, fgEnterBlks, HBtab->ebdHndBeg->bbNum); |
| 1928 | } |
| 1929 | } |
| 1930 | |
| 1931 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 1932 | // TODO-ARM-Cleanup: The ARM code here to prevent creating retless calls by adding the BBJ_ALWAYS |
| 1933 | // to the enter blocks is a bit of a compromise, because sometimes the blocks are already reachable, |
| 1934 | // and it messes up DFS ordering to have them marked as enter block. We should prevent the |
| 1935 | // creation of retless calls some other way. |
| 1936 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 1937 | { |
| 1938 | if (block->bbJumpKind == BBJ_CALLFINALLY) |
| 1939 | { |
| 1940 | assert(block->isBBCallAlwaysPair()); |
| 1941 | |
| 1942 | // Don't remove the BBJ_ALWAYS block that is only here for the unwinder. It might be dead |
| 1943 | // if the finally is no-return, so mark it as an entry point. |
| 1944 | BlockSetOps::AddElemD(this, fgEnterBlks, block->bbNext->bbNum); |
| 1945 | } |
| 1946 | } |
| 1947 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 1948 | |
| 1949 | #ifdef DEBUG |
| 1950 | if (verbose) |
| 1951 | { |
| 1952 | printf("Enter blocks: " ); |
| 1953 | BlockSetOps::Iter iter(this, fgEnterBlks); |
| 1954 | unsigned bbNum = 0; |
| 1955 | while (iter.NextElem(&bbNum)) |
| 1956 | { |
| 1957 | printf(FMT_BB " " , bbNum); |
| 1958 | } |
| 1959 | printf("\n" ); |
| 1960 | } |
| 1961 | #endif // DEBUG |
| 1962 | |
| 1963 | #ifdef DEBUG |
| 1964 | fgEnterBlksSetValid = true; |
| 1965 | #endif // DEBUG |
| 1966 | } |
| 1967 | |
| 1968 | /***************************************************************************** |
| 1969 | * Remove unreachable blocks. |
| 1970 | * |
| 1971 | * Return true if any unreachable blocks were removed. |
| 1972 | */ |
| 1973 | |
| 1974 | bool Compiler::fgRemoveUnreachableBlocks() |
| 1975 | { |
| 1976 | assert(!fgCheapPredsValid); |
| 1977 | assert(fgReachabilitySetsValid); |
| 1978 | |
| 1979 | bool hasLoops = false; |
| 1980 | bool hasUnreachableBlocks = false; |
| 1981 | BasicBlock* block; |
| 1982 | |
| 1983 | /* Record unreachable blocks */ |
| 1984 | for (block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 1985 | { |
| 1986 | /* Internal throw blocks are also reachable */ |
| 1987 | if (fgIsThrowHlpBlk(block)) |
| 1988 | { |
| 1989 | goto SKIP_BLOCK; |
| 1990 | } |
| 1991 | else if (block == genReturnBB) |
| 1992 | { |
| 1993 | // Don't remove statements for the genReturnBB block, as we might have special hookups there. |
| 1994 | // For example, <BUGNUM> in VSW 364383, </BUGNUM> |
| 1995 | // the profiler hookup needs to have the "void GT_RETURN" statement |
| 1996 | // to properly set the info.compProfilerCallback flag. |
| 1997 | goto SKIP_BLOCK; |
| 1998 | } |
| 1999 | else |
| 2000 | { |
| 2001 | // If any of the entry blocks can reach this block, then we skip it. |
| 2002 | if (!BlockSetOps::IsEmptyIntersection(this, fgEnterBlks, block->bbReach)) |
| 2003 | { |
| 2004 | goto SKIP_BLOCK; |
| 2005 | } |
| 2006 | } |
| 2007 | |
| 2008 | // Remove all the code for the block |
| 2009 | fgUnreachableBlock(block); |
| 2010 | |
| 2011 | // Make sure that the block was marked as removed */ |
| 2012 | noway_assert(block->bbFlags & BBF_REMOVED); |
| 2013 | |
| 2014 | // Some blocks mark the end of trys and catches |
| 2015 | // and can't be removed. We convert these into |
| 2016 | // empty blocks of type BBJ_THROW |
| 2017 | |
| 2018 | if (block->bbFlags & BBF_DONT_REMOVE) |
| 2019 | { |
| 2020 | bool bIsBBCallAlwaysPair = block->isBBCallAlwaysPair(); |
| 2021 | |
| 2022 | /* Unmark the block as removed, */ |
| 2023 | /* clear BBF_INTERNAL as well and set BBJ_IMPORTED */ |
| 2024 | |
| 2025 | block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL | BBF_NEEDS_GCPOLL); |
| 2026 | block->bbFlags |= BBF_IMPORTED; |
| 2027 | block->bbJumpKind = BBJ_THROW; |
| 2028 | block->bbSetRunRarely(); |
| 2029 | |
| 2030 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 2031 | // If this is a <BBJ_CALLFINALLY, BBJ_ALWAYS> pair, we have to clear BBF_FINALLY_TARGET flag on |
| 2032 | // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. |
| 2033 | if (bIsBBCallAlwaysPair) |
| 2034 | { |
| 2035 | noway_assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); |
| 2036 | fgClearFinallyTargetBit(block->bbNext->bbJumpDest); |
| 2037 | } |
| 2038 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 2039 | } |
| 2040 | else |
| 2041 | { |
| 2042 | /* We have to call fgRemoveBlock next */ |
| 2043 | hasUnreachableBlocks = true; |
| 2044 | } |
| 2045 | continue; |
| 2046 | |
| 2047 | SKIP_BLOCK:; |
| 2048 | |
| 2049 | // if (block->isRunRarely()) |
| 2050 | // continue; |
| 2051 | if (block->bbJumpKind == BBJ_RETURN) |
| 2052 | { |
| 2053 | continue; |
| 2054 | } |
| 2055 | |
| 2056 | /* Set BBF_LOOP_HEAD if we have backwards branches to this block */ |
| 2057 | |
| 2058 | unsigned blockNum = block->bbNum; |
| 2059 | for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext) |
| 2060 | { |
| 2061 | BasicBlock* predBlock = pred->flBlock; |
| 2062 | if (blockNum <= predBlock->bbNum) |
| 2063 | { |
| 2064 | if (predBlock->bbJumpKind == BBJ_CALLFINALLY) |
| 2065 | { |
| 2066 | continue; |
| 2067 | } |
| 2068 | |
| 2069 | /* If block can reach predBlock then we have a loop head */ |
| 2070 | if (BlockSetOps::IsMember(this, predBlock->bbReach, blockNum)) |
| 2071 | { |
| 2072 | hasLoops = true; |
| 2073 | |
| 2074 | /* Set the BBF_LOOP_HEAD flag */ |
| 2075 | block->bbFlags |= BBF_LOOP_HEAD; |
| 2076 | break; |
| 2077 | } |
| 2078 | } |
| 2079 | } |
| 2080 | } |
| 2081 | |
| 2082 | fgHasLoops = hasLoops; |
| 2083 | |
| 2084 | if (hasUnreachableBlocks) |
| 2085 | { |
| 2086 | // Now remove the unreachable blocks |
| 2087 | for (block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 2088 | { |
| 2089 | // If we mark the block with BBF_REMOVED then |
| 2090 | // we need to call fgRemovedBlock() on it |
| 2091 | |
| 2092 | if (block->bbFlags & BBF_REMOVED) |
| 2093 | { |
| 2094 | fgRemoveBlock(block, true); |
| 2095 | |
| 2096 | // When we have a BBJ_CALLFINALLY, BBJ_ALWAYS pair; fgRemoveBlock will remove |
| 2097 | // both blocks, so we must advance 1 extra place in the block list |
| 2098 | // |
| 2099 | if (block->isBBCallAlwaysPair()) |
| 2100 | { |
| 2101 | block = block->bbNext; |
| 2102 | } |
| 2103 | } |
| 2104 | } |
| 2105 | } |
| 2106 | |
| 2107 | return hasUnreachableBlocks; |
| 2108 | } |
| 2109 | |
| 2110 | /***************************************************************************** |
| 2111 | * |
| 2112 | * Function called to compute the dominator and reachable sets. |
| 2113 | * |
| 2114 | * Assumes the predecessor lists are computed and correct. |
| 2115 | */ |
| 2116 | |
| 2117 | void Compiler::fgComputeReachability() |
| 2118 | { |
| 2119 | #ifdef DEBUG |
| 2120 | if (verbose) |
| 2121 | { |
| 2122 | printf("*************** In fgComputeReachability\n" ); |
| 2123 | } |
| 2124 | |
| 2125 | fgVerifyHandlerTab(); |
| 2126 | |
| 2127 | // Make sure that the predecessor lists are accurate |
| 2128 | assert(fgComputePredsDone); |
| 2129 | fgDebugCheckBBlist(); |
| 2130 | #endif // DEBUG |
| 2131 | |
| 2132 | /* Create a list of all BBJ_RETURN blocks. The head of the list is 'fgReturnBlocks'. */ |
| 2133 | fgReturnBlocks = nullptr; |
| 2134 | |
| 2135 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 2136 | { |
| 2137 | // If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only |
| 2138 | // used to find return blocks. |
| 2139 | if (block->bbJumpKind == BBJ_RETURN) |
| 2140 | { |
| 2141 | fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks); |
| 2142 | } |
| 2143 | } |
| 2144 | |
| 2145 | // Compute reachability and then delete blocks determined to be unreachable. If we delete blocks, we |
| 2146 | // need to loop, as that might have caused more blocks to become unreachable. This can happen in the |
| 2147 | // case where a call to a finally is unreachable and deleted (maybe the call to the finally is |
| 2148 | // preceded by a throw or an infinite loop), making the blocks following the finally unreachable. |
| 2149 | // However, all EH entry blocks are considered global entry blocks, causing the blocks following the |
| 2150 | // call to the finally to stay rooted, until a second round of reachability is done. |
| 2151 | // The dominator algorithm expects that all blocks can be reached from the fgEnterBlks set. |
| 2152 | unsigned passNum = 1; |
| 2153 | bool changed; |
| 2154 | do |
| 2155 | { |
| 2156 | // Just to be paranoid, avoid infinite loops; fall back to minopts. |
| 2157 | if (passNum > 10) |
| 2158 | { |
| 2159 | noway_assert(!"Too many unreachable block removal loops" ); |
| 2160 | } |
| 2161 | |
| 2162 | /* Walk the flow graph, reassign block numbers to keep them in ascending order */ |
| 2163 | JITDUMP("\nRenumbering the basic blocks for fgComputeReachability pass #%u\n" , passNum); |
| 2164 | passNum++; |
| 2165 | fgRenumberBlocks(); |
| 2166 | |
| 2167 | // |
| 2168 | // Compute fgEnterBlks |
| 2169 | // |
| 2170 | |
| 2171 | fgComputeEnterBlocksSet(); |
| 2172 | |
| 2173 | // |
| 2174 | // Compute bbReach |
| 2175 | // |
| 2176 | |
| 2177 | fgComputeReachabilitySets(); |
| 2178 | |
| 2179 | // |
| 2180 | // Use reachability information to delete unreachable blocks. |
| 2181 | // Also, determine if the flow graph has loops and set 'fgHasLoops' accordingly. |
| 2182 | // Set the BBF_LOOP_HEAD flag on the block target of backwards branches. |
| 2183 | // |
| 2184 | |
| 2185 | changed = fgRemoveUnreachableBlocks(); |
| 2186 | |
| 2187 | } while (changed); |
| 2188 | |
| 2189 | #ifdef DEBUG |
| 2190 | if (verbose) |
| 2191 | { |
| 2192 | printf("\nAfter computing reachability:\n" ); |
| 2193 | fgDispBasicBlocks(verboseTrees); |
| 2194 | printf("\n" ); |
| 2195 | } |
| 2196 | |
| 2197 | fgVerifyHandlerTab(); |
| 2198 | fgDebugCheckBBlist(true); |
| 2199 | #endif // DEBUG |
| 2200 | |
| 2201 | // |
| 2202 | // Now, compute the dominators |
| 2203 | // |
| 2204 | |
| 2205 | fgComputeDoms(); |
| 2206 | } |
| 2207 | |
| 2208 | /** In order to be able to compute dominance, we need to first get a DFS reverse post order sort on the basic flow graph |
| 2209 | * for the dominance algorithm to operate correctly. The reason why we need the DFS sort is because |
| 2210 | * we will build the dominance sets using the partial order induced by the DFS sorting. With this |
| 2211 | * precondition not holding true, the algorithm doesn't work properly. |
| 2212 | */ |
| 2213 | void Compiler::fgDfsInvPostOrder() |
| 2214 | { |
| 2215 | // NOTE: This algorithm only pays attention to the actual blocks. It ignores the imaginary entry block. |
| 2216 | |
| 2217 | // visited : Once we run the DFS post order sort recursive algorithm, we mark the nodes we visited to avoid |
| 2218 | // backtracking. |
| 2219 | BlockSet visited(BlockSetOps::MakeEmpty(this)); |
| 2220 | |
| 2221 | // We begin by figuring out which basic blocks don't have incoming edges and mark them as |
| 2222 | // start nodes. Later on we run the recursive algorithm for each node that we |
| 2223 | // mark in this step. |
| 2224 | BlockSet_ValRet_T startNodes = fgDomFindStartNodes(); |
| 2225 | |
| 2226 | // Make sure fgEnterBlks are still there in startNodes, even if they participate in a loop (i.e., there is |
| 2227 | // an incoming edge into the block). |
| 2228 | assert(fgEnterBlksSetValid); |
| 2229 | |
| 2230 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 2231 | // |
| 2232 | // BlockSetOps::UnionD(this, startNodes, fgEnterBlks); |
| 2233 | // |
| 2234 | // This causes problems on ARM, because we for BBJ_CALLFINALLY/BBJ_ALWAYS pairs, we add the BBJ_ALWAYS |
| 2235 | // to the enter blocks set to prevent flow graph optimizations from removing it and creating retless call finallies |
| 2236 | // (BBF_RETLESS_CALL). This leads to an incorrect DFS ordering in some cases, because we start the recursive walk |
| 2237 | // from the BBJ_ALWAYS, which is reachable from other blocks. A better solution would be to change ARM to avoid |
| 2238 | // creating retless calls in a different way, not by adding BBJ_ALWAYS to fgEnterBlks. |
| 2239 | // |
| 2240 | // So, let us make sure at least fgFirstBB is still there, even if it participates in a loop. |
| 2241 | BlockSetOps::AddElemD(this, startNodes, 1); |
| 2242 | assert(fgFirstBB->bbNum == 1); |
| 2243 | #else |
| 2244 | BlockSetOps::UnionD(this, startNodes, fgEnterBlks); |
| 2245 | #endif |
| 2246 | |
| 2247 | assert(BlockSetOps::IsMember(this, startNodes, fgFirstBB->bbNum)); |
| 2248 | |
| 2249 | // Call the flowgraph DFS traversal helper. |
| 2250 | unsigned postIndex = 1; |
| 2251 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 2252 | { |
| 2253 | // If the block has no predecessors, and we haven't already visited it (because it's in fgEnterBlks but also |
| 2254 | // reachable from the first block), go ahead and traverse starting from this block. |
| 2255 | if (BlockSetOps::IsMember(this, startNodes, block->bbNum) && |
| 2256 | !BlockSetOps::IsMember(this, visited, block->bbNum)) |
| 2257 | { |
| 2258 | fgDfsInvPostOrderHelper(block, visited, &postIndex); |
| 2259 | } |
| 2260 | } |
| 2261 | |
| 2262 | // After the DFS reverse postorder is completed, we must have visited all the basic blocks. |
| 2263 | noway_assert(postIndex == fgBBcount + 1); |
| 2264 | noway_assert(fgBBNumMax == fgBBcount); |
| 2265 | |
| 2266 | #ifdef DEBUG |
| 2267 | if (0 && verbose) |
| 2268 | { |
| 2269 | printf("\nAfter doing a post order traversal of the BB graph, this is the ordering:\n" ); |
| 2270 | for (unsigned i = 1; i <= fgBBNumMax; ++i) |
| 2271 | { |
| 2272 | printf("%02u -> " FMT_BB "\n" , i, fgBBInvPostOrder[i]->bbNum); |
| 2273 | } |
| 2274 | printf("\n" ); |
| 2275 | } |
| 2276 | #endif // DEBUG |
| 2277 | } |
| 2278 | |
| 2279 | BlockSet_ValRet_T Compiler::fgDomFindStartNodes() |
| 2280 | { |
| 2281 | unsigned j; |
| 2282 | BasicBlock* block; |
| 2283 | |
| 2284 | // startNodes :: A set that represents which basic blocks in the flow graph don't have incoming edges. |
| 2285 | // We begin assuming everything is a start block and remove any block that is being referenced by another in its |
| 2286 | // successor list. |
| 2287 | |
| 2288 | BlockSet startNodes(BlockSetOps::MakeFull(this)); |
| 2289 | |
| 2290 | for (block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 2291 | { |
| 2292 | unsigned cSucc = block->NumSucc(this); |
| 2293 | for (j = 0; j < cSucc; ++j) |
| 2294 | { |
| 2295 | BasicBlock* succ = block->GetSucc(j, this); |
| 2296 | BlockSetOps::RemoveElemD(this, startNodes, succ->bbNum); |
| 2297 | } |
| 2298 | } |
| 2299 | |
| 2300 | #ifdef DEBUG |
| 2301 | if (verbose) |
| 2302 | { |
| 2303 | printf("\nDominator computation start blocks (those blocks with no incoming edges):\n" ); |
| 2304 | BlockSetOps::Iter iter(this, startNodes); |
| 2305 | unsigned bbNum = 0; |
| 2306 | while (iter.NextElem(&bbNum)) |
| 2307 | { |
| 2308 | printf(FMT_BB " " , bbNum); |
| 2309 | } |
| 2310 | printf("\n" ); |
| 2311 | } |
| 2312 | #endif // DEBUG |
| 2313 | |
| 2314 | return startNodes; |
| 2315 | } |
| 2316 | |
| 2317 | //------------------------------------------------------------------------ |
| 2318 | // fgDfsInvPostOrderHelper: Helper to assign post-order numbers to blocks. |
| 2319 | // |
| 2320 | // Arguments: |
| 2321 | // block - The starting entry block |
| 2322 | // visited - The set of visited blocks |
| 2323 | // count - Pointer to the Dfs counter |
| 2324 | // |
| 2325 | // Notes: |
| 2326 | // Compute a non-recursive DFS traversal of the flow graph using an |
| 2327 | // evaluation stack to assign post-order numbers. |
| 2328 | |
| 2329 | void Compiler::fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count) |
| 2330 | { |
| 2331 | // Assume we haven't visited this node yet (callers ensure this). |
| 2332 | assert(!BlockSetOps::IsMember(this, visited, block->bbNum)); |
| 2333 | |
| 2334 | // Allocate a local stack to hold the DFS traversal actions necessary |
| 2335 | // to compute pre/post-ordering of the control flowgraph. |
| 2336 | ArrayStack<DfsBlockEntry> stack(getAllocator(CMK_ArrayStack)); |
| 2337 | |
| 2338 | // Push the first block on the stack to seed the traversal. |
| 2339 | stack.Push(DfsBlockEntry(DSS_Pre, block)); |
| 2340 | // Flag the node we just visited to avoid backtracking. |
| 2341 | BlockSetOps::AddElemD(this, visited, block->bbNum); |
| 2342 | |
| 2343 | // The search is terminated once all the actions have been processed. |
| 2344 | while (!stack.Empty()) |
| 2345 | { |
| 2346 | DfsBlockEntry current = stack.Pop(); |
| 2347 | BasicBlock* currentBlock = current.dfsBlock; |
| 2348 | |
| 2349 | if (current.dfsStackState == DSS_Pre) |
| 2350 | { |
| 2351 | // This is a pre-visit that corresponds to the first time the |
| 2352 | // node is encountered in the spanning tree and receives pre-order |
| 2353 | // numberings. By pushing the post-action on the stack here we |
| 2354 | // are guaranteed to only process it after all of its successors |
| 2355 | // pre and post actions are processed. |
| 2356 | stack.Push(DfsBlockEntry(DSS_Post, currentBlock)); |
| 2357 | |
| 2358 | unsigned cSucc = currentBlock->NumSucc(this); |
| 2359 | for (unsigned j = 0; j < cSucc; ++j) |
| 2360 | { |
| 2361 | BasicBlock* succ = currentBlock->GetSucc(j, this); |
| 2362 | |
| 2363 | // If this is a node we haven't seen before, go ahead and process |
| 2364 | if (!BlockSetOps::IsMember(this, visited, succ->bbNum)) |
| 2365 | { |
| 2366 | // Push a pre-visit action for this successor onto the stack and |
| 2367 | // mark it as visited in case this block has multiple successors |
| 2368 | // to the same node (multi-graph). |
| 2369 | stack.Push(DfsBlockEntry(DSS_Pre, succ)); |
| 2370 | BlockSetOps::AddElemD(this, visited, succ->bbNum); |
| 2371 | } |
| 2372 | } |
| 2373 | } |
| 2374 | else |
| 2375 | { |
| 2376 | // This is a post-visit that corresponds to the last time the |
| 2377 | // node is visited in the spanning tree and only happens after |
| 2378 | // all descendents in the spanning tree have had pre and post |
| 2379 | // actions applied. |
| 2380 | |
| 2381 | assert(current.dfsStackState == DSS_Post); |
| 2382 | |
| 2383 | unsigned invCount = fgBBcount - *count + 1; |
| 2384 | assert(1 <= invCount && invCount <= fgBBNumMax); |
| 2385 | fgBBInvPostOrder[invCount] = currentBlock; |
| 2386 | currentBlock->bbDfsNum = invCount; |
| 2387 | ++(*count); |
| 2388 | } |
| 2389 | } |
| 2390 | } |
| 2391 | |
| 2392 | void Compiler::fgComputeDoms() |
| 2393 | { |
| 2394 | assert(!fgCheapPredsValid); |
| 2395 | |
| 2396 | #ifdef DEBUG |
| 2397 | if (verbose) |
| 2398 | { |
| 2399 | printf("*************** In fgComputeDoms\n" ); |
| 2400 | } |
| 2401 | |
| 2402 | fgVerifyHandlerTab(); |
| 2403 | |
| 2404 | // Make sure that the predecessor lists are accurate. |
| 2405 | // Also check that the blocks are properly, densely numbered (so calling fgRenumberBlocks is not necessary). |
| 2406 | fgDebugCheckBBlist(true); |
| 2407 | |
| 2408 | // Assert things related to the BlockSet epoch. |
| 2409 | assert(fgBBcount == fgBBNumMax); |
| 2410 | assert(BasicBlockBitSetTraits::GetSize(this) == fgBBNumMax + 1); |
| 2411 | #endif // DEBUG |
| 2412 | |
| 2413 | BlockSet processedBlks(BlockSetOps::MakeEmpty(this)); |
| 2414 | |
| 2415 | fgBBInvPostOrder = new (this, CMK_DominatorMemory) BasicBlock*[fgBBNumMax + 1]; |
| 2416 | memset(fgBBInvPostOrder, 0, sizeof(BasicBlock*) * (fgBBNumMax + 1)); |
| 2417 | |
| 2418 | fgDfsInvPostOrder(); |
| 2419 | noway_assert(fgBBInvPostOrder[0] == nullptr); |
| 2420 | |
| 2421 | // flRoot and bbRoot represent an imaginary unique entry point in the flow graph. |
| 2422 | // All the orphaned EH blocks and fgFirstBB will temporarily have its predecessors list |
| 2423 | // (with bbRoot as the only basic block in it) set as flRoot. |
| 2424 | // Later on, we clear their predecessors and let them to be nullptr again. |
| 2425 | // Since we number basic blocks starting at one, the imaginary entry block is conveniently numbered as zero. |
| 2426 | flowList flRoot; |
| 2427 | BasicBlock bbRoot; |
| 2428 | |
| 2429 | bbRoot.bbPreds = nullptr; |
| 2430 | bbRoot.bbNum = 0; |
| 2431 | bbRoot.bbIDom = &bbRoot; |
| 2432 | bbRoot.bbDfsNum = 0; |
| 2433 | bbRoot.bbFlags = 0; |
| 2434 | flRoot.flNext = nullptr; |
| 2435 | flRoot.flBlock = &bbRoot; |
| 2436 | |
| 2437 | fgBBInvPostOrder[0] = &bbRoot; |
| 2438 | |
| 2439 | // Mark both bbRoot and fgFirstBB processed |
| 2440 | BlockSetOps::AddElemD(this, processedBlks, 0); // bbRoot == block #0 |
| 2441 | BlockSetOps::AddElemD(this, processedBlks, 1); // fgFirstBB == block #1 |
| 2442 | assert(fgFirstBB->bbNum == 1); |
| 2443 | |
| 2444 | // Special case fgFirstBB to say its IDom is bbRoot. |
| 2445 | fgFirstBB->bbIDom = &bbRoot; |
| 2446 | |
| 2447 | BasicBlock* block = nullptr; |
| 2448 | |
| 2449 | for (block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext) |
| 2450 | { |
| 2451 | // If any basic block has no predecessors then we flag it as processed and temporarily |
| 2452 | // mark its precedessor list to be flRoot. This makes the flowgraph connected, |
| 2453 | // a precondition that is needed by the dominance algorithm to operate properly. |
| 2454 | if (block->bbPreds == nullptr) |
| 2455 | { |
| 2456 | block->bbPreds = &flRoot; |
| 2457 | block->bbIDom = &bbRoot; |
| 2458 | BlockSetOps::AddElemD(this, processedBlks, block->bbNum); |
| 2459 | } |
| 2460 | else |
| 2461 | { |
| 2462 | block->bbIDom = nullptr; |
| 2463 | } |
| 2464 | } |
| 2465 | |
| 2466 | // Mark the EH blocks as entry blocks and also flag them as processed. |
| 2467 | if (compHndBBtabCount > 0) |
| 2468 | { |
| 2469 | EHblkDsc* HBtab; |
| 2470 | EHblkDsc* HBtabEnd; |
| 2471 | for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++) |
| 2472 | { |
| 2473 | if (HBtab->HasFilter()) |
| 2474 | { |
| 2475 | HBtab->ebdFilter->bbIDom = &bbRoot; |
| 2476 | BlockSetOps::AddElemD(this, processedBlks, HBtab->ebdFilter->bbNum); |
| 2477 | } |
| 2478 | HBtab->ebdHndBeg->bbIDom = &bbRoot; |
| 2479 | BlockSetOps::AddElemD(this, processedBlks, HBtab->ebdHndBeg->bbNum); |
| 2480 | } |
| 2481 | } |
| 2482 | |
| 2483 | // Now proceed to compute the immediate dominators for each basic block. |
| 2484 | bool changed = true; |
| 2485 | while (changed) |
| 2486 | { |
| 2487 | changed = false; |
| 2488 | for (unsigned i = 1; i <= fgBBNumMax; |
| 2489 | ++i) // Process each actual block; don't process the imaginary predecessor block. |
| 2490 | { |
| 2491 | flowList* first = nullptr; |
| 2492 | BasicBlock* newidom = nullptr; |
| 2493 | block = fgBBInvPostOrder[i]; |
| 2494 | |
| 2495 | // If we have a block that has bbRoot as its bbIDom |
| 2496 | // it means we flag it as processed and as an entry block so |
| 2497 | // in this case we're all set. |
| 2498 | if (block->bbIDom == &bbRoot) |
| 2499 | { |
| 2500 | continue; |
| 2501 | } |
| 2502 | |
| 2503 | // Pick up the first processed predecesor of the current block. |
| 2504 | for (first = block->bbPreds; first != nullptr; first = first->flNext) |
| 2505 | { |
| 2506 | if (BlockSetOps::IsMember(this, processedBlks, first->flBlock->bbNum)) |
| 2507 | { |
| 2508 | break; |
| 2509 | } |
| 2510 | } |
| 2511 | noway_assert(first != nullptr); |
| 2512 | |
| 2513 | // We assume the first processed predecessor will be the |
| 2514 | // immediate dominator and then compute the forward flow analysis. |
| 2515 | newidom = first->flBlock; |
| 2516 | for (flowList* p = block->bbPreds; p != nullptr; p = p->flNext) |
| 2517 | { |
| 2518 | if (p->flBlock == first->flBlock) |
| 2519 | { |
| 2520 | continue; |
| 2521 | } |
| 2522 | if (p->flBlock->bbIDom != nullptr) |
| 2523 | { |
| 2524 | // fgIntersectDom is basically the set intersection between |
| 2525 | // the dominance sets of the new IDom and the current predecessor |
| 2526 | // Since the nodes are ordered in DFS inverse post order and |
| 2527 | // IDom induces a tree, fgIntersectDom actually computes |
| 2528 | // the lowest common ancestor in the dominator tree. |
| 2529 | newidom = fgIntersectDom(p->flBlock, newidom); |
| 2530 | } |
| 2531 | } |
| 2532 | |
| 2533 | // If the Immediate dominator changed, assign the new one |
| 2534 | // to the current working basic block. |
| 2535 | if (block->bbIDom != newidom) |
| 2536 | { |
| 2537 | noway_assert(newidom != nullptr); |
| 2538 | block->bbIDom = newidom; |
| 2539 | changed = true; |
| 2540 | } |
| 2541 | BlockSetOps::AddElemD(this, processedBlks, block->bbNum); |
| 2542 | } |
| 2543 | } |
| 2544 | |
| 2545 | // As stated before, once we have computed immediate dominance we need to clear |
| 2546 | // all the basic blocks whose predecessor list was set to flRoot. This |
| 2547 | // reverts that and leaves the blocks the same as before. |
| 2548 | for (block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 2549 | { |
| 2550 | if (block->bbPreds == &flRoot) |
| 2551 | { |
| 2552 | block->bbPreds = nullptr; |
| 2553 | } |
| 2554 | } |
| 2555 | |
| 2556 | fgCompDominatedByExceptionalEntryBlocks(); |
| 2557 | |
| 2558 | #ifdef DEBUG |
| 2559 | if (verbose) |
| 2560 | { |
| 2561 | fgDispDoms(); |
| 2562 | } |
| 2563 | #endif |
| 2564 | |
| 2565 | fgBuildDomTree(); |
| 2566 | |
| 2567 | fgModified = false; |
| 2568 | fgDomBBcount = fgBBcount; |
| 2569 | assert(fgBBcount == fgBBNumMax); |
| 2570 | assert(BasicBlockBitSetTraits::GetSize(this) == fgDomBBcount + 1); |
| 2571 | |
| 2572 | fgDomsComputed = true; |
| 2573 | } |
| 2574 | |
| 2575 | void Compiler::fgBuildDomTree() |
| 2576 | { |
| 2577 | unsigned i; |
| 2578 | BasicBlock* block; |
| 2579 | |
| 2580 | #ifdef DEBUG |
| 2581 | if (verbose) |
| 2582 | { |
| 2583 | printf("\nInside fgBuildDomTree\n" ); |
| 2584 | } |
| 2585 | #endif // DEBUG |
| 2586 | |
| 2587 | // domTree :: The dominance tree represented using adjacency lists. We use BasicBlockList to represent edges. |
| 2588 | // Indexed by basic block number. |
| 2589 | unsigned bbArraySize = fgBBNumMax + 1; |
| 2590 | BasicBlockList** domTree = new (this, CMK_DominatorMemory) BasicBlockList*[bbArraySize]; |
| 2591 | |
| 2592 | fgDomTreePreOrder = new (this, CMK_DominatorMemory) unsigned[bbArraySize]; |
| 2593 | fgDomTreePostOrder = new (this, CMK_DominatorMemory) unsigned[bbArraySize]; |
| 2594 | |
| 2595 | // Initialize all the data structures. |
| 2596 | for (i = 0; i < bbArraySize; ++i) |
| 2597 | { |
| 2598 | domTree[i] = nullptr; |
| 2599 | fgDomTreePreOrder[i] = fgDomTreePostOrder[i] = 0; |
| 2600 | } |
| 2601 | |
| 2602 | // Build the dominance tree. |
| 2603 | for (block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 2604 | { |
| 2605 | // If the immediate dominator is not the imaginary root (bbRoot) |
| 2606 | // we proceed to append this block to the children of the dominator node. |
| 2607 | if (block->bbIDom->bbNum != 0) |
| 2608 | { |
| 2609 | int bbNum = block->bbIDom->bbNum; |
| 2610 | domTree[bbNum] = new (this, CMK_DominatorMemory) BasicBlockList(block, domTree[bbNum]); |
| 2611 | } |
| 2612 | else |
| 2613 | { |
| 2614 | // This means this block had bbRoot set as its IDom. We clear it out |
| 2615 | // and convert the tree back to a forest. |
| 2616 | block->bbIDom = nullptr; |
| 2617 | } |
| 2618 | } |
| 2619 | |
| 2620 | #ifdef DEBUG |
| 2621 | if (verbose) |
| 2622 | { |
| 2623 | printf("\nAfter computing the Dominance Tree:\n" ); |
| 2624 | fgDispDomTree(domTree); |
| 2625 | } |
| 2626 | #endif // DEBUG |
| 2627 | |
| 2628 | // Get the bitset that represents the roots of the dominance tree. |
| 2629 | // Something to note here is that the dominance tree has been converted from a forest to a tree |
| 2630 | // by using the bbRoot trick on fgComputeDoms. The reason we have a forest instead of a real tree |
| 2631 | // is because we treat the EH blocks as entry nodes so the real dominance tree is not necessarily connected. |
| 2632 | BlockSet_ValRet_T domTreeEntryNodes = fgDomTreeEntryNodes(domTree); |
| 2633 | |
| 2634 | // The preorder and postorder numbers. |
| 2635 | // We start from 1 to match the bbNum ordering. |
| 2636 | unsigned preNum = 1; |
| 2637 | unsigned postNum = 1; |
| 2638 | |
| 2639 | // There will be nodes in the dominance tree that will not be reachable: |
| 2640 | // the catch blocks that return since they don't have any predecessor. |
| 2641 | // For that matter we'll keep track of how many nodes we can |
| 2642 | // reach and assert at the end that we visited all of them. |
| 2643 | unsigned domTreeReachable = fgBBcount; |
| 2644 | |
| 2645 | // Once we have the dominance tree computed, we need to traverse it |
| 2646 | // to get the preorder and postorder numbers for each node. The purpose of |
| 2647 | // this is to achieve O(1) queries for of the form A dominates B. |
| 2648 | for (i = 1; i <= fgBBNumMax; ++i) |
| 2649 | { |
| 2650 | if (BlockSetOps::IsMember(this, domTreeEntryNodes, i)) |
| 2651 | { |
| 2652 | if (domTree[i] == nullptr) |
| 2653 | { |
| 2654 | // If this is an entry node but there's no children on this |
| 2655 | // node, it means it's unreachable so we decrement the reachable |
| 2656 | // counter. |
| 2657 | --domTreeReachable; |
| 2658 | } |
| 2659 | else |
| 2660 | { |
| 2661 | // Otherwise, we do a DFS traversal of the dominator tree. |
| 2662 | fgTraverseDomTree(i, domTree, &preNum, &postNum); |
| 2663 | } |
| 2664 | } |
| 2665 | } |
| 2666 | |
| 2667 | noway_assert(preNum == domTreeReachable + 1); |
| 2668 | noway_assert(postNum == domTreeReachable + 1); |
| 2669 | |
| 2670 | // Once we have all the reachable nodes numbered, we proceed to |
| 2671 | // assign numbers to the non-reachable ones, just assign incrementing |
| 2672 | // values. We must reach fgBBcount at the end. |
| 2673 | |
| 2674 | for (i = 1; i <= fgBBNumMax; ++i) |
| 2675 | { |
| 2676 | if (BlockSetOps::IsMember(this, domTreeEntryNodes, i)) |
| 2677 | { |
| 2678 | if (domTree[i] == nullptr) |
| 2679 | { |
| 2680 | fgDomTreePreOrder[i] = preNum++; |
| 2681 | fgDomTreePostOrder[i] = postNum++; |
| 2682 | } |
| 2683 | } |
| 2684 | } |
| 2685 | |
| 2686 | noway_assert(preNum == fgBBNumMax + 1); |
| 2687 | noway_assert(postNum == fgBBNumMax + 1); |
| 2688 | noway_assert(fgDomTreePreOrder[0] == 0); // Unused first element |
| 2689 | noway_assert(fgDomTreePostOrder[0] == 0); // Unused first element |
| 2690 | |
| 2691 | #ifdef DEBUG |
| 2692 | if (0 && verbose) |
| 2693 | { |
| 2694 | printf("\nAfter traversing the dominance tree:\n" ); |
| 2695 | printf("PreOrder:\n" ); |
| 2696 | for (i = 1; i <= fgBBNumMax; ++i) |
| 2697 | { |
| 2698 | printf(FMT_BB " : %02u\n" , i, fgDomTreePreOrder[i]); |
| 2699 | } |
| 2700 | printf("PostOrder:\n" ); |
| 2701 | for (i = 1; i <= fgBBNumMax; ++i) |
| 2702 | { |
| 2703 | printf(FMT_BB " : %02u\n" , i, fgDomTreePostOrder[i]); |
| 2704 | } |
| 2705 | } |
| 2706 | #endif // DEBUG |
| 2707 | } |
| 2708 | |
| 2709 | BlockSet_ValRet_T Compiler::fgDomTreeEntryNodes(BasicBlockList** domTree) |
| 2710 | { |
| 2711 | // domTreeEntryNodes :: Set that represents which basic blocks are roots of the dominator forest. |
| 2712 | |
| 2713 | BlockSet domTreeEntryNodes(BlockSetOps::MakeFull(this)); |
| 2714 | |
| 2715 | // First of all we need to find all the roots of the dominance forest. |
| 2716 | |
| 2717 | for (unsigned i = 1; i <= fgBBNumMax; ++i) |
| 2718 | { |
| 2719 | for (BasicBlockList* current = domTree[i]; current != nullptr; current = current->next) |
| 2720 | { |
| 2721 | BlockSetOps::RemoveElemD(this, domTreeEntryNodes, current->block->bbNum); |
| 2722 | } |
| 2723 | } |
| 2724 | |
| 2725 | return domTreeEntryNodes; |
| 2726 | } |
| 2727 | |
| 2728 | #ifdef DEBUG |
| 2729 | void Compiler::fgDispDomTree(BasicBlockList** domTree) |
| 2730 | { |
| 2731 | for (unsigned i = 1; i <= fgBBNumMax; ++i) |
| 2732 | { |
| 2733 | if (domTree[i] != nullptr) |
| 2734 | { |
| 2735 | printf(FMT_BB " : " , i); |
| 2736 | for (BasicBlockList* current = domTree[i]; current != nullptr; current = current->next) |
| 2737 | { |
| 2738 | assert(current->block); |
| 2739 | printf(FMT_BB " " , current->block->bbNum); |
| 2740 | } |
| 2741 | printf("\n" ); |
| 2742 | } |
| 2743 | } |
| 2744 | printf("\n" ); |
| 2745 | } |
| 2746 | #endif // DEBUG |
| 2747 | |
| 2748 | //------------------------------------------------------------------------ |
| 2749 | // fgTraverseDomTree: Assign pre/post-order numbers to the dominator tree. |
| 2750 | // |
| 2751 | // Arguments: |
| 2752 | // bbNum - The basic block number of the starting block |
| 2753 | // domTree - The dominator tree (as child block lists) |
| 2754 | // preNum - Pointer to the pre-number counter |
| 2755 | // postNum - Pointer to the post-number counter |
| 2756 | // |
| 2757 | // Notes: |
| 2758 | // Runs a non-recursive DFS traversal of the dominator tree using an |
| 2759 | // evaluation stack to assign pre-order and post-order numbers. |
| 2760 | // These numberings are used to provide constant time lookup for |
| 2761 | // ancestor/descendent tests between pairs of nodes in the tree. |
| 2762 | |
| 2763 | void Compiler::fgTraverseDomTree(unsigned bbNum, BasicBlockList** domTree, unsigned* preNum, unsigned* postNum) |
| 2764 | { |
| 2765 | noway_assert(bbNum <= fgBBNumMax); |
| 2766 | |
| 2767 | // If the block preorder number is not zero it means we already visited |
| 2768 | // that node, so we skip it. |
| 2769 | if (fgDomTreePreOrder[bbNum] == 0) |
| 2770 | { |
| 2771 | // If this is the first time we visit this node, both preorder and postnumber |
| 2772 | // values must be zero. |
| 2773 | noway_assert(fgDomTreePostOrder[bbNum] == 0); |
| 2774 | |
| 2775 | // Allocate a local stack to hold the Dfs traversal actions necessary |
| 2776 | // to compute pre/post-ordering of the dominator tree. |
| 2777 | ArrayStack<DfsNumEntry> stack(getAllocator(CMK_ArrayStack)); |
| 2778 | |
| 2779 | // Push the first entry number on the stack to seed the traversal. |
| 2780 | stack.Push(DfsNumEntry(DSS_Pre, bbNum)); |
| 2781 | |
| 2782 | // The search is terminated once all the actions have been processed. |
| 2783 | while (!stack.Empty()) |
| 2784 | { |
| 2785 | DfsNumEntry current = stack.Pop(); |
| 2786 | unsigned currentNum = current.dfsNum; |
| 2787 | |
| 2788 | if (current.dfsStackState == DSS_Pre) |
| 2789 | { |
| 2790 | // This pre-visit action corresponds to the first time the |
| 2791 | // node is encountered during the spanning traversal. |
| 2792 | noway_assert(fgDomTreePreOrder[currentNum] == 0); |
| 2793 | noway_assert(fgDomTreePostOrder[currentNum] == 0); |
| 2794 | |
| 2795 | // Assign the preorder number on the first visit. |
| 2796 | fgDomTreePreOrder[currentNum] = (*preNum)++; |
| 2797 | |
| 2798 | // Push this nodes post-action on the stack such that all successors |
| 2799 | // pre-order visits occur before this nodes post-action. We will assign |
| 2800 | // its post-order numbers when we pop off the stack. |
| 2801 | stack.Push(DfsNumEntry(DSS_Post, currentNum)); |
| 2802 | |
| 2803 | // For each child in the dominator tree process its pre-actions. |
| 2804 | for (BasicBlockList* child = domTree[currentNum]; child != nullptr; child = child->next) |
| 2805 | { |
| 2806 | unsigned childNum = child->block->bbNum; |
| 2807 | |
| 2808 | // This is a tree so never could have been visited |
| 2809 | assert(fgDomTreePreOrder[childNum] == 0); |
| 2810 | |
| 2811 | // Push the successor in the dominator tree for pre-actions. |
| 2812 | stack.Push(DfsNumEntry(DSS_Pre, childNum)); |
| 2813 | } |
| 2814 | } |
| 2815 | else |
| 2816 | { |
| 2817 | // This post-visit action corresponds to the last time the node |
| 2818 | // is encountered and only after all descendents in the spanning |
| 2819 | // tree have had pre and post-order numbers assigned. |
| 2820 | |
| 2821 | assert(current.dfsStackState == DSS_Post); |
| 2822 | assert(fgDomTreePreOrder[currentNum] != 0); |
| 2823 | assert(fgDomTreePostOrder[currentNum] == 0); |
| 2824 | |
| 2825 | // Now assign this nodes post-order number. |
| 2826 | fgDomTreePostOrder[currentNum] = (*postNum)++; |
| 2827 | } |
| 2828 | } |
| 2829 | } |
| 2830 | } |
| 2831 | |
| 2832 | // This code finds the lowest common ancestor in the |
| 2833 | // dominator tree between two basic blocks. The LCA in the Dominance tree |
| 2834 | // represents the closest dominator between the two basic blocks. Used to |
| 2835 | // adjust the IDom value in fgComputDoms. |
| 2836 | BasicBlock* Compiler::fgIntersectDom(BasicBlock* a, BasicBlock* b) |
| 2837 | { |
| 2838 | BasicBlock* finger1 = a; |
| 2839 | BasicBlock* finger2 = b; |
| 2840 | while (finger1 != finger2) |
| 2841 | { |
| 2842 | while (finger1->bbDfsNum > finger2->bbDfsNum) |
| 2843 | { |
| 2844 | finger1 = finger1->bbIDom; |
| 2845 | } |
| 2846 | while (finger2->bbDfsNum > finger1->bbDfsNum) |
| 2847 | { |
| 2848 | finger2 = finger2->bbIDom; |
| 2849 | } |
| 2850 | } |
| 2851 | return finger1; |
| 2852 | } |
| 2853 | |
| 2854 | // Return a BlockSet containing all the blocks that dominate 'block'. |
| 2855 | BlockSet_ValRet_T Compiler::fgGetDominatorSet(BasicBlock* block) |
| 2856 | { |
| 2857 | assert(block != nullptr); |
| 2858 | |
| 2859 | BlockSet domSet(BlockSetOps::MakeEmpty(this)); |
| 2860 | |
| 2861 | do |
| 2862 | { |
| 2863 | BlockSetOps::AddElemD(this, domSet, block->bbNum); |
| 2864 | if (block == block->bbIDom) |
| 2865 | { |
| 2866 | break; // We found a cycle in the IDom list, so we're done. |
| 2867 | } |
| 2868 | block = block->bbIDom; |
| 2869 | } while (block != nullptr); |
| 2870 | |
| 2871 | return domSet; |
| 2872 | } |
| 2873 | |
| 2874 | /***************************************************************************** |
| 2875 | * |
| 2876 | * fgComputeCheapPreds: Function called to compute the BasicBlock::bbCheapPreds lists. |
| 2877 | * |
| 2878 | * No other block data is changed (e.g., bbRefs, bbFlags). |
| 2879 | * |
| 2880 | * The cheap preds lists are similar to the normal (bbPreds) predecessor lists, but are cheaper to |
| 2881 | * compute and store, as follows: |
| 2882 | * 1. A flow edge is typed BasicBlockList, which only has a block pointer and 'next' pointer. It doesn't |
| 2883 | * have weights or a dup count. |
| 2884 | * 2. The preds list for a block is not sorted by block number. |
| 2885 | * 3. The predecessors of the block following a BBJ_CALLFINALLY (the corresponding BBJ_ALWAYS, |
| 2886 | * for normal, non-retless calls to the finally) are not computed. |
| 2887 | * 4. The cheap preds lists will contain duplicates if a single switch table has multiple branches |
| 2888 | * to the same block. Thus, we don't spend the time looking for duplicates for every edge we insert. |
| 2889 | */ |
| 2890 | void Compiler::fgComputeCheapPreds() |
| 2891 | { |
| 2892 | noway_assert(!fgComputePredsDone); // We can't do this if we've got the full preds. |
| 2893 | noway_assert(fgFirstBB != nullptr); |
| 2894 | |
| 2895 | BasicBlock* block; |
| 2896 | |
| 2897 | #ifdef DEBUG |
| 2898 | if (verbose) |
| 2899 | { |
| 2900 | printf("\n*************** In fgComputeCheapPreds()\n" ); |
| 2901 | fgDispBasicBlocks(); |
| 2902 | printf("\n" ); |
| 2903 | } |
| 2904 | #endif // DEBUG |
| 2905 | |
| 2906 | // Clear out the cheap preds lists. |
| 2907 | fgRemovePreds(); |
| 2908 | |
| 2909 | for (block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 2910 | { |
| 2911 | switch (block->bbJumpKind) |
| 2912 | { |
| 2913 | case BBJ_COND: |
| 2914 | fgAddCheapPred(block->bbJumpDest, block); |
| 2915 | fgAddCheapPred(block->bbNext, block); |
| 2916 | break; |
| 2917 | |
| 2918 | case BBJ_CALLFINALLY: |
| 2919 | case BBJ_LEAVE: // If fgComputeCheapPreds is called before all blocks are imported, BBJ_LEAVE blocks are |
| 2920 | // still in the BB list. |
| 2921 | case BBJ_ALWAYS: |
| 2922 | case BBJ_EHCATCHRET: |
| 2923 | fgAddCheapPred(block->bbJumpDest, block); |
| 2924 | break; |
| 2925 | |
| 2926 | case BBJ_NONE: |
| 2927 | fgAddCheapPred(block->bbNext, block); |
| 2928 | break; |
| 2929 | |
| 2930 | case BBJ_EHFILTERRET: |
| 2931 | // Connect end of filter to catch handler. |
| 2932 | // In a well-formed program, this cannot be null. Tolerate here, so that we can call |
| 2933 | // fgComputeCheapPreds before fgImport on an ill-formed program; the problem will be detected in |
| 2934 | // fgImport. |
| 2935 | if (block->bbJumpDest != nullptr) |
| 2936 | { |
| 2937 | fgAddCheapPred(block->bbJumpDest, block); |
| 2938 | } |
| 2939 | break; |
| 2940 | |
| 2941 | case BBJ_SWITCH: |
| 2942 | unsigned jumpCnt; |
| 2943 | jumpCnt = block->bbJumpSwt->bbsCount; |
| 2944 | BasicBlock** jumpTab; |
| 2945 | jumpTab = block->bbJumpSwt->bbsDstTab; |
| 2946 | |
| 2947 | do |
| 2948 | { |
| 2949 | fgAddCheapPred(*jumpTab, block); |
| 2950 | } while (++jumpTab, --jumpCnt); |
| 2951 | |
| 2952 | break; |
| 2953 | |
| 2954 | case BBJ_EHFINALLYRET: // It's expensive to compute the preds for this case, so we don't for the cheap |
| 2955 | // preds. |
| 2956 | case BBJ_THROW: |
| 2957 | case BBJ_RETURN: |
| 2958 | break; |
| 2959 | |
| 2960 | default: |
| 2961 | noway_assert(!"Unexpected bbJumpKind" ); |
| 2962 | break; |
| 2963 | } |
| 2964 | } |
| 2965 | |
| 2966 | fgCheapPredsValid = true; |
| 2967 | |
| 2968 | #ifdef DEBUG |
| 2969 | if (verbose) |
| 2970 | { |
| 2971 | printf("\n*************** After fgComputeCheapPreds()\n" ); |
| 2972 | fgDispBasicBlocks(); |
| 2973 | printf("\n" ); |
| 2974 | } |
| 2975 | #endif |
| 2976 | } |
| 2977 | |
| 2978 | /***************************************************************************** |
| 2979 | * Add 'blockPred' to the cheap predecessor list of 'block'. |
| 2980 | */ |
| 2981 | |
| 2982 | void Compiler::fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred) |
| 2983 | { |
| 2984 | assert(!fgComputePredsDone); |
| 2985 | assert(block != nullptr); |
| 2986 | assert(blockPred != nullptr); |
| 2987 | |
| 2988 | block->bbCheapPreds = new (this, CMK_FlowList) BasicBlockList(blockPred, block->bbCheapPreds); |
| 2989 | |
| 2990 | #if MEASURE_BLOCK_SIZE |
| 2991 | genFlowNodeCnt += 1; |
| 2992 | genFlowNodeSize += sizeof(BasicBlockList); |
| 2993 | #endif // MEASURE_BLOCK_SIZE |
| 2994 | } |
| 2995 | |
| 2996 | /***************************************************************************** |
| 2997 | * Remove 'blockPred' from the cheap predecessor list of 'block'. |
| 2998 | * If there are duplicate edges, only remove one of them. |
| 2999 | */ |
| 3000 | void Compiler::fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred) |
| 3001 | { |
| 3002 | assert(!fgComputePredsDone); |
| 3003 | assert(fgCheapPredsValid); |
| 3004 | |
| 3005 | flowList* oldEdge = nullptr; |
| 3006 | |
| 3007 | assert(block != nullptr); |
| 3008 | assert(blockPred != nullptr); |
| 3009 | assert(block->bbCheapPreds != nullptr); |
| 3010 | |
| 3011 | /* Is this the first block in the pred list? */ |
| 3012 | if (blockPred == block->bbCheapPreds->block) |
| 3013 | { |
| 3014 | block->bbCheapPreds = block->bbCheapPreds->next; |
| 3015 | } |
| 3016 | else |
| 3017 | { |
| 3018 | BasicBlockList* pred; |
| 3019 | for (pred = block->bbCheapPreds; pred->next != nullptr; pred = pred->next) |
| 3020 | { |
| 3021 | if (blockPred == pred->next->block) |
| 3022 | { |
| 3023 | break; |
| 3024 | } |
| 3025 | } |
| 3026 | noway_assert(pred->next != nullptr); // we better have found it! |
| 3027 | pred->next = pred->next->next; // splice it out |
| 3028 | } |
| 3029 | } |
| 3030 | |
| 3031 | void Compiler::fgRemovePreds() |
| 3032 | { |
| 3033 | C_ASSERT(offsetof(BasicBlock, bbPreds) == |
| 3034 | offsetof(BasicBlock, bbCheapPreds)); // bbPreds and bbCheapPreds are at the same place in a union, |
| 3035 | C_ASSERT(sizeof(((BasicBlock*)nullptr)->bbPreds) == |
| 3036 | sizeof(((BasicBlock*)nullptr)->bbCheapPreds)); // and are the same size. So, this function removes both. |
| 3037 | |
| 3038 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 3039 | { |
| 3040 | block->bbPreds = nullptr; |
| 3041 | } |
| 3042 | fgComputePredsDone = false; |
| 3043 | fgCheapPredsValid = false; |
| 3044 | } |
| 3045 | |
| 3046 | /***************************************************************************** |
| 3047 | * |
| 3048 | * Function called to compute the bbPreds lists. |
| 3049 | */ |
| 3050 | void Compiler::fgComputePreds() |
| 3051 | { |
| 3052 | noway_assert(fgFirstBB); |
| 3053 | |
| 3054 | BasicBlock* block; |
| 3055 | |
| 3056 | #ifdef DEBUG |
| 3057 | if (verbose) |
| 3058 | { |
| 3059 | printf("\n*************** In fgComputePreds()\n" ); |
| 3060 | fgDispBasicBlocks(); |
| 3061 | printf("\n" ); |
| 3062 | } |
| 3063 | #endif // DEBUG |
| 3064 | |
| 3065 | // reset the refs count for each basic block |
| 3066 | |
| 3067 | for (block = fgFirstBB; block; block = block->bbNext) |
| 3068 | { |
| 3069 | block->bbRefs = 0; |
| 3070 | } |
| 3071 | |
| 3072 | /* the first block is always reachable! */ |
| 3073 | fgFirstBB->bbRefs = 1; |
| 3074 | |
| 3075 | /* Treat the initial block as a jump target */ |
| 3076 | fgFirstBB->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL; |
| 3077 | |
| 3078 | fgRemovePreds(); |
| 3079 | |
| 3080 | for (block = fgFirstBB; block; block = block->bbNext) |
| 3081 | { |
| 3082 | switch (block->bbJumpKind) |
| 3083 | { |
| 3084 | case BBJ_CALLFINALLY: |
| 3085 | if (!(block->bbFlags & BBF_RETLESS_CALL)) |
| 3086 | { |
| 3087 | assert(block->isBBCallAlwaysPair()); |
| 3088 | |
| 3089 | /* Mark the next block as being a jump target, |
| 3090 | since the call target will return there */ |
| 3091 | PREFIX_ASSUME(block->bbNext != nullptr); |
| 3092 | block->bbNext->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL); |
| 3093 | } |
| 3094 | |
| 3095 | __fallthrough; |
| 3096 | |
| 3097 | case BBJ_LEAVE: // Sometimes fgComputePreds is called before all blocks are imported, so BBJ_LEAVE |
| 3098 | // blocks are still in the BB list. |
| 3099 | case BBJ_COND: |
| 3100 | case BBJ_ALWAYS: |
| 3101 | case BBJ_EHCATCHRET: |
| 3102 | |
| 3103 | /* Mark the jump dest block as being a jump target */ |
| 3104 | block->bbJumpDest->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL; |
| 3105 | |
| 3106 | fgAddRefPred(block->bbJumpDest, block, nullptr, true); |
| 3107 | |
| 3108 | /* Is the next block reachable? */ |
| 3109 | |
| 3110 | if (block->bbJumpKind != BBJ_COND) |
| 3111 | { |
| 3112 | break; |
| 3113 | } |
| 3114 | |
| 3115 | noway_assert(block->bbNext); |
| 3116 | |
| 3117 | /* Fall through, the next block is also reachable */ |
| 3118 | __fallthrough; |
| 3119 | |
| 3120 | case BBJ_NONE: |
| 3121 | |
| 3122 | fgAddRefPred(block->bbNext, block, nullptr, true); |
| 3123 | break; |
| 3124 | |
| 3125 | case BBJ_EHFILTERRET: |
| 3126 | |
| 3127 | // Connect end of filter to catch handler. |
| 3128 | // In a well-formed program, this cannot be null. Tolerate here, so that we can call |
| 3129 | // fgComputePreds before fgImport on an ill-formed program; the problem will be detected in fgImport. |
| 3130 | if (block->bbJumpDest != nullptr) |
| 3131 | { |
| 3132 | fgAddRefPred(block->bbJumpDest, block, nullptr, true); |
| 3133 | } |
| 3134 | break; |
| 3135 | |
| 3136 | case BBJ_EHFINALLYRET: |
| 3137 | { |
| 3138 | /* Connect the end of the finally to the successor of |
| 3139 | the call to this finally */ |
| 3140 | |
| 3141 | if (!block->hasHndIndex()) |
| 3142 | { |
| 3143 | NO_WAY("endfinally outside a finally/fault block." ); |
| 3144 | } |
| 3145 | |
| 3146 | unsigned hndIndex = block->getHndIndex(); |
| 3147 | EHblkDsc* ehDsc = ehGetDsc(hndIndex); |
| 3148 | |
| 3149 | if (!ehDsc->HasFinallyOrFaultHandler()) |
| 3150 | { |
| 3151 | NO_WAY("endfinally outside a finally/fault block." ); |
| 3152 | } |
| 3153 | |
| 3154 | if (ehDsc->HasFinallyHandler()) |
| 3155 | { |
| 3156 | // Find all BBJ_CALLFINALLY that branched to this finally handler. |
| 3157 | BasicBlock* begBlk; |
| 3158 | BasicBlock* endBlk; |
| 3159 | ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); |
| 3160 | |
| 3161 | BasicBlock* finBeg = ehDsc->ebdHndBeg; |
| 3162 | for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) |
| 3163 | { |
| 3164 | if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) |
| 3165 | { |
| 3166 | continue; |
| 3167 | } |
| 3168 | |
| 3169 | noway_assert(bcall->isBBCallAlwaysPair()); |
| 3170 | fgAddRefPred(bcall->bbNext, block, nullptr, true); |
| 3171 | } |
| 3172 | } |
| 3173 | } |
| 3174 | break; |
| 3175 | |
| 3176 | case BBJ_THROW: |
| 3177 | case BBJ_RETURN: |
| 3178 | break; |
| 3179 | |
| 3180 | case BBJ_SWITCH: |
| 3181 | unsigned jumpCnt; |
| 3182 | jumpCnt = block->bbJumpSwt->bbsCount; |
| 3183 | BasicBlock** jumpTab; |
| 3184 | jumpTab = block->bbJumpSwt->bbsDstTab; |
| 3185 | |
| 3186 | do |
| 3187 | { |
| 3188 | /* Mark the target block as being a jump target */ |
| 3189 | (*jumpTab)->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL; |
| 3190 | |
| 3191 | fgAddRefPred(*jumpTab, block, nullptr, true); |
| 3192 | } while (++jumpTab, --jumpCnt); |
| 3193 | |
| 3194 | break; |
| 3195 | |
| 3196 | default: |
| 3197 | noway_assert(!"Unexpected bbJumpKind" ); |
| 3198 | break; |
| 3199 | } |
| 3200 | } |
| 3201 | |
| 3202 | for (unsigned EHnum = 0; EHnum < compHndBBtabCount; EHnum++) |
| 3203 | { |
| 3204 | EHblkDsc* ehDsc = ehGetDsc(EHnum); |
| 3205 | |
| 3206 | if (ehDsc->HasFilter()) |
| 3207 | { |
| 3208 | ehDsc->ebdFilter->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL; |
| 3209 | |
| 3210 | // The first block of a filter has an artifical extra refcount. |
| 3211 | ehDsc->ebdFilter->bbRefs++; |
| 3212 | } |
| 3213 | |
| 3214 | ehDsc->ebdHndBeg->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL; |
| 3215 | |
| 3216 | // The first block of a handler has an artificial extra refcount. |
| 3217 | ehDsc->ebdHndBeg->bbRefs++; |
| 3218 | } |
| 3219 | |
| 3220 | fgModified = false; |
| 3221 | fgComputePredsDone = true; |
| 3222 | |
| 3223 | #ifdef DEBUG |
| 3224 | if (verbose) |
| 3225 | { |
| 3226 | printf("\n*************** After fgComputePreds()\n" ); |
| 3227 | fgDispBasicBlocks(); |
| 3228 | printf("\n" ); |
| 3229 | } |
| 3230 | #endif |
| 3231 | } |
| 3232 | |
| 3233 | unsigned Compiler::fgNSuccsOfFinallyRet(BasicBlock* block) |
| 3234 | { |
| 3235 | BasicBlock* bb; |
| 3236 | unsigned res; |
| 3237 | fgSuccOfFinallyRetWork(block, ~0, &bb, &res); |
| 3238 | return res; |
| 3239 | } |
| 3240 | |
| 3241 | BasicBlock* Compiler::fgSuccOfFinallyRet(BasicBlock* block, unsigned i) |
| 3242 | { |
| 3243 | BasicBlock* bb; |
| 3244 | unsigned res; |
| 3245 | fgSuccOfFinallyRetWork(block, i, &bb, &res); |
| 3246 | return bb; |
| 3247 | } |
| 3248 | |
| 3249 | void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres) |
| 3250 | { |
| 3251 | assert(block->hasHndIndex()); // Otherwise, endfinally outside a finally/fault block? |
| 3252 | |
| 3253 | unsigned hndIndex = block->getHndIndex(); |
| 3254 | EHblkDsc* ehDsc = ehGetDsc(hndIndex); |
| 3255 | |
| 3256 | assert(ehDsc->HasFinallyOrFaultHandler()); // Otherwise, endfinally outside a finally/fault block. |
| 3257 | |
| 3258 | *bres = nullptr; |
| 3259 | unsigned succNum = 0; |
| 3260 | |
| 3261 | if (ehDsc->HasFinallyHandler()) |
| 3262 | { |
| 3263 | BasicBlock* begBlk; |
| 3264 | BasicBlock* endBlk; |
| 3265 | ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); |
| 3266 | |
| 3267 | BasicBlock* finBeg = ehDsc->ebdHndBeg; |
| 3268 | |
| 3269 | for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) |
| 3270 | { |
| 3271 | if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) |
| 3272 | { |
| 3273 | continue; |
| 3274 | } |
| 3275 | |
| 3276 | assert(bcall->isBBCallAlwaysPair()); |
| 3277 | |
| 3278 | if (succNum == i) |
| 3279 | { |
| 3280 | *bres = bcall->bbNext; |
| 3281 | return; |
| 3282 | } |
| 3283 | succNum++; |
| 3284 | } |
| 3285 | } |
| 3286 | assert(i == ~0u || ehDsc->HasFaultHandler()); // Should reach here only for fault blocks. |
| 3287 | if (i == ~0u) |
| 3288 | { |
| 3289 | *nres = succNum; |
| 3290 | } |
| 3291 | } |
| 3292 | |
| 3293 | Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switchBlk) |
| 3294 | { |
| 3295 | assert(switchBlk->bbJumpKind == BBJ_SWITCH); |
| 3296 | BlockToSwitchDescMap* switchMap = GetSwitchDescMap(); |
| 3297 | SwitchUniqueSuccSet res; |
| 3298 | if (switchMap->Lookup(switchBlk, &res)) |
| 3299 | { |
| 3300 | return res; |
| 3301 | } |
| 3302 | else |
| 3303 | { |
| 3304 | // We must compute the descriptor. Find which are dups, by creating a bit set with the unique successors. |
| 3305 | // We create a temporary bitset of blocks to compute the unique set of successor blocks, |
| 3306 | // since adding a block's number twice leaves just one "copy" in the bitset. Note that |
| 3307 | // we specifically don't use the BlockSet type, because doing so would require making a |
| 3308 | // call to EnsureBasicBlockEpoch() to make sure the epoch is up-to-date. However, that |
| 3309 | // can create a new epoch, thus invalidating all existing BlockSet objects, such as |
| 3310 | // reachability information stored in the blocks. To avoid that, we just use a local BitVec. |
| 3311 | |
| 3312 | BitVecTraits blockVecTraits(fgBBNumMax + 1, this); |
| 3313 | BitVec uniqueSuccBlocks(BitVecOps::MakeEmpty(&blockVecTraits)); |
| 3314 | BasicBlock** jumpTable = switchBlk->bbJumpSwt->bbsDstTab; |
| 3315 | unsigned jumpCount = switchBlk->bbJumpSwt->bbsCount; |
| 3316 | for (unsigned i = 0; i < jumpCount; i++) |
| 3317 | { |
| 3318 | BasicBlock* targ = jumpTable[i]; |
| 3319 | BitVecOps::AddElemD(&blockVecTraits, uniqueSuccBlocks, targ->bbNum); |
| 3320 | } |
| 3321 | // Now we have a set of unique successors. |
| 3322 | unsigned numNonDups = BitVecOps::Count(&blockVecTraits, uniqueSuccBlocks); |
| 3323 | |
| 3324 | BasicBlock** nonDups = new (getAllocator()) BasicBlock*[numNonDups]; |
| 3325 | |
| 3326 | unsigned nonDupInd = 0; |
| 3327 | // At this point, all unique targets are in "uniqueSuccBlocks". As we encounter each, |
| 3328 | // add to nonDups, remove from "uniqueSuccBlocks". |
| 3329 | for (unsigned i = 0; i < jumpCount; i++) |
| 3330 | { |
| 3331 | BasicBlock* targ = jumpTable[i]; |
| 3332 | if (BitVecOps::IsMember(&blockVecTraits, uniqueSuccBlocks, targ->bbNum)) |
| 3333 | { |
| 3334 | nonDups[nonDupInd] = targ; |
| 3335 | nonDupInd++; |
| 3336 | BitVecOps::RemoveElemD(&blockVecTraits, uniqueSuccBlocks, targ->bbNum); |
| 3337 | } |
| 3338 | } |
| 3339 | |
| 3340 | assert(nonDupInd == numNonDups); |
| 3341 | assert(BitVecOps::Count(&blockVecTraits, uniqueSuccBlocks) == 0); |
| 3342 | res.numDistinctSuccs = numNonDups; |
| 3343 | res.nonDuplicates = nonDups; |
| 3344 | switchMap->Set(switchBlk, res); |
| 3345 | return res; |
| 3346 | } |
| 3347 | } |
| 3348 | |
| 3349 | void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator alloc, |
| 3350 | BasicBlock* switchBlk, |
| 3351 | BasicBlock* from, |
| 3352 | BasicBlock* to) |
| 3353 | { |
| 3354 | assert(switchBlk->bbJumpKind == BBJ_SWITCH); // Precondition. |
| 3355 | unsigned jmpTabCnt = switchBlk->bbJumpSwt->bbsCount; |
| 3356 | BasicBlock** jmpTab = switchBlk->bbJumpSwt->bbsDstTab; |
| 3357 | |
| 3358 | // Is "from" still in the switch table (because it had more than one entry before?) |
| 3359 | bool fromStillPresent = false; |
| 3360 | for (unsigned i = 0; i < jmpTabCnt; i++) |
| 3361 | { |
| 3362 | if (jmpTab[i] == from) |
| 3363 | { |
| 3364 | fromStillPresent = true; |
| 3365 | break; |
| 3366 | } |
| 3367 | } |
| 3368 | |
| 3369 | // Is "to" already in "this"? |
| 3370 | bool toAlreadyPresent = false; |
| 3371 | for (unsigned i = 0; i < numDistinctSuccs; i++) |
| 3372 | { |
| 3373 | if (nonDuplicates[i] == to) |
| 3374 | { |
| 3375 | toAlreadyPresent = true; |
| 3376 | break; |
| 3377 | } |
| 3378 | } |
| 3379 | |
| 3380 | // Four cases: |
| 3381 | // If "from" is still present, and "to" is already present, do nothing |
| 3382 | // If "from" is still present, and "to" is not, must reallocate to add an entry. |
| 3383 | // If "from" is not still present, and "to" is not present, write "to" where "from" was. |
| 3384 | // If "from" is not still present, but "to" is present, remove "from". |
| 3385 | if (fromStillPresent && toAlreadyPresent) |
| 3386 | { |
| 3387 | return; |
| 3388 | } |
| 3389 | else if (fromStillPresent && !toAlreadyPresent) |
| 3390 | { |
| 3391 | // reallocate to add an entry |
| 3392 | BasicBlock** newNonDups = new (alloc) BasicBlock*[numDistinctSuccs + 1]; |
| 3393 | memcpy(newNonDups, nonDuplicates, numDistinctSuccs * sizeof(BasicBlock*)); |
| 3394 | newNonDups[numDistinctSuccs] = to; |
| 3395 | numDistinctSuccs++; |
| 3396 | nonDuplicates = newNonDups; |
| 3397 | } |
| 3398 | else if (!fromStillPresent && !toAlreadyPresent) |
| 3399 | { |
| 3400 | #ifdef DEBUG |
| 3401 | // write "to" where "from" was |
| 3402 | bool foundFrom = false; |
| 3403 | #endif // DEBUG |
| 3404 | for (unsigned i = 0; i < numDistinctSuccs; i++) |
| 3405 | { |
| 3406 | if (nonDuplicates[i] == from) |
| 3407 | { |
| 3408 | nonDuplicates[i] = to; |
| 3409 | #ifdef DEBUG |
| 3410 | foundFrom = true; |
| 3411 | #endif // DEBUG |
| 3412 | break; |
| 3413 | } |
| 3414 | } |
| 3415 | assert(foundFrom); |
| 3416 | } |
| 3417 | else |
| 3418 | { |
| 3419 | assert(!fromStillPresent && toAlreadyPresent); |
| 3420 | #ifdef DEBUG |
| 3421 | // remove "from". |
| 3422 | bool foundFrom = false; |
| 3423 | #endif // DEBUG |
| 3424 | for (unsigned i = 0; i < numDistinctSuccs; i++) |
| 3425 | { |
| 3426 | if (nonDuplicates[i] == from) |
| 3427 | { |
| 3428 | nonDuplicates[i] = nonDuplicates[numDistinctSuccs - 1]; |
| 3429 | numDistinctSuccs--; |
| 3430 | #ifdef DEBUG |
| 3431 | foundFrom = true; |
| 3432 | #endif // DEBUG |
| 3433 | break; |
| 3434 | } |
| 3435 | } |
| 3436 | assert(foundFrom); |
| 3437 | } |
| 3438 | } |
| 3439 | |
| 3440 | /***************************************************************************** |
| 3441 | * |
| 3442 | * Simple utility function to remove an entry for a block in the switch desc |
| 3443 | * map. So it can be called from other phases. |
| 3444 | * |
| 3445 | */ |
| 3446 | void Compiler::fgInvalidateSwitchDescMapEntry(BasicBlock* block) |
| 3447 | { |
| 3448 | // Check if map has no entries yet. |
| 3449 | if (m_switchDescMap != nullptr) |
| 3450 | { |
| 3451 | m_switchDescMap->Remove(block); |
| 3452 | } |
| 3453 | } |
| 3454 | |
| 3455 | void Compiler::UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to) |
| 3456 | { |
| 3457 | if (m_switchDescMap == nullptr) |
| 3458 | { |
| 3459 | return; // No mappings, nothing to do. |
| 3460 | } |
| 3461 | |
| 3462 | // Otherwise... |
| 3463 | BlockToSwitchDescMap* switchMap = GetSwitchDescMap(); |
| 3464 | SwitchUniqueSuccSet* res = switchMap->LookupPointer(switchBlk); |
| 3465 | if (res != nullptr) |
| 3466 | { |
| 3467 | // If no result, nothing to do. Otherwise, update it. |
| 3468 | res->UpdateTarget(getAllocator(), switchBlk, from, to); |
| 3469 | } |
| 3470 | } |
| 3471 | |
| 3472 | /***************************************************************************** |
| 3473 | * For a block that is in a handler region, find the first block of the most-nested |
| 3474 | * handler containing the block. |
| 3475 | */ |
| 3476 | BasicBlock* Compiler::fgFirstBlockOfHandler(BasicBlock* block) |
| 3477 | { |
| 3478 | assert(block->hasHndIndex()); |
| 3479 | return ehGetDsc(block->getHndIndex())->ebdHndBeg; |
| 3480 | } |
| 3481 | |
| 3482 | /***************************************************************************** |
| 3483 | * |
| 3484 | * Function called to find back edges and return blocks and mark them as needing GC Polls. This marks all |
| 3485 | * blocks. |
| 3486 | */ |
| 3487 | void Compiler::fgMarkGCPollBlocks() |
| 3488 | { |
| 3489 | if (GCPOLL_NONE == opts.compGCPollType) |
| 3490 | { |
| 3491 | return; |
| 3492 | } |
| 3493 | |
| 3494 | #ifdef DEBUG |
| 3495 | /* Check that the flowgraph data (bbNum, bbRefs, bbPreds) is up-to-date */ |
| 3496 | fgDebugCheckBBlist(); |
| 3497 | #endif |
| 3498 | |
| 3499 | BasicBlock* block; |
| 3500 | |
| 3501 | // Return blocks always need GC polls. In addition, all back edges (including those from switch |
| 3502 | // statements) need GC polls. The poll is on the block with the outgoing back edge (or ret), rather than |
| 3503 | // on the destination or on the edge itself. |
| 3504 | for (block = fgFirstBB; block; block = block->bbNext) |
| 3505 | { |
| 3506 | bool blockNeedsPoll = false; |
| 3507 | switch (block->bbJumpKind) |
| 3508 | { |
| 3509 | case BBJ_COND: |
| 3510 | case BBJ_ALWAYS: |
| 3511 | blockNeedsPoll = (block->bbJumpDest->bbNum <= block->bbNum); |
| 3512 | break; |
| 3513 | |
| 3514 | case BBJ_RETURN: |
| 3515 | blockNeedsPoll = true; |
| 3516 | break; |
| 3517 | |
| 3518 | case BBJ_SWITCH: |
| 3519 | unsigned jumpCnt; |
| 3520 | jumpCnt = block->bbJumpSwt->bbsCount; |
| 3521 | BasicBlock** jumpTab; |
| 3522 | jumpTab = block->bbJumpSwt->bbsDstTab; |
| 3523 | |
| 3524 | do |
| 3525 | { |
| 3526 | if ((*jumpTab)->bbNum <= block->bbNum) |
| 3527 | { |
| 3528 | blockNeedsPoll = true; |
| 3529 | break; |
| 3530 | } |
| 3531 | } while (++jumpTab, --jumpCnt); |
| 3532 | break; |
| 3533 | |
| 3534 | default: |
| 3535 | break; |
| 3536 | } |
| 3537 | |
| 3538 | if (blockNeedsPoll) |
| 3539 | { |
| 3540 | block->bbFlags |= BBF_NEEDS_GCPOLL; |
| 3541 | } |
| 3542 | } |
| 3543 | } |
| 3544 | |
| 3545 | void Compiler::fgInitBlockVarSets() |
| 3546 | { |
| 3547 | for (BasicBlock* block = fgFirstBB; block; block = block->bbNext) |
| 3548 | { |
| 3549 | block->InitVarSets(this); |
| 3550 | } |
| 3551 | |
| 3552 | fgBBVarSetsInited = true; |
| 3553 | } |
| 3554 | |
| 3555 | /***************************************************************************** |
| 3556 | * |
| 3557 | * The following does the final pass on BBF_NEEDS_GCPOLL and then actually creates the GC Polls. |
| 3558 | */ |
| 3559 | void Compiler::fgCreateGCPolls() |
| 3560 | { |
| 3561 | if (GCPOLL_NONE == opts.compGCPollType) |
| 3562 | { |
| 3563 | return; |
| 3564 | } |
| 3565 | |
| 3566 | bool createdPollBlocks = false; |
| 3567 | |
| 3568 | #ifdef DEBUG |
| 3569 | if (verbose) |
| 3570 | { |
| 3571 | printf("*************** In fgCreateGCPolls() for %s\n" , info.compFullName); |
| 3572 | } |
| 3573 | #endif // DEBUG |
| 3574 | |
| 3575 | if (opts.OptimizationEnabled()) |
| 3576 | { |
| 3577 | // Remove polls from well formed loops with a constant upper bound. |
| 3578 | for (unsigned lnum = 0; lnum < optLoopCount; ++lnum) |
| 3579 | { |
| 3580 | // Look for constant counted loops that run for a short duration. This logic is very similar to |
| 3581 | // what's in code:Compiler::optUnrollLoops, since they have similar constraints. However, this |
| 3582 | // logic is much more permissive since we're not doing a complex transformation. |
| 3583 | |
| 3584 | /* TODO-Cleanup: |
| 3585 | * I feel bad cloning so much logic from optUnrollLoops |
| 3586 | */ |
| 3587 | |
| 3588 | // Filter out loops not meeting the obvious preconditions. |
| 3589 | // |
| 3590 | if (optLoopTable[lnum].lpFlags & LPFLG_REMOVED) |
| 3591 | { |
| 3592 | continue; |
| 3593 | } |
| 3594 | |
| 3595 | if (!(optLoopTable[lnum].lpFlags & LPFLG_CONST)) |
| 3596 | { |
| 3597 | continue; |
| 3598 | } |
| 3599 | |
| 3600 | BasicBlock* head = optLoopTable[lnum].lpHead; |
| 3601 | BasicBlock* bottom = optLoopTable[lnum].lpBottom; |
| 3602 | |
| 3603 | // Loops dominated by GC_SAFE_POINT won't have this set. |
| 3604 | if (!(bottom->bbFlags & BBF_NEEDS_GCPOLL)) |
| 3605 | { |
| 3606 | continue; |
| 3607 | } |
| 3608 | |
| 3609 | /* Get the loop data: |
| 3610 | - initial constant |
| 3611 | - limit constant |
| 3612 | - iterator |
| 3613 | - iterator increment |
| 3614 | - increment operation type (i.e. ASG_ADD, ASG_SUB, etc...) |
| 3615 | - loop test type (i.e. GT_GE, GT_LT, etc...) |
| 3616 | */ |
| 3617 | |
| 3618 | int lbeg = optLoopTable[lnum].lpConstInit; |
| 3619 | int llim = optLoopTable[lnum].lpConstLimit(); |
| 3620 | genTreeOps testOper = optLoopTable[lnum].lpTestOper(); |
| 3621 | |
| 3622 | int lvar = optLoopTable[lnum].lpIterVar(); |
| 3623 | int iterInc = optLoopTable[lnum].lpIterConst(); |
| 3624 | genTreeOps iterOper = optLoopTable[lnum].lpIterOper(); |
| 3625 | |
| 3626 | var_types iterOperType = optLoopTable[lnum].lpIterOperType(); |
| 3627 | bool unsTest = (optLoopTable[lnum].lpTestTree->gtFlags & GTF_UNSIGNED) != 0; |
| 3628 | if (lvaTable[lvar].lvAddrExposed) |
| 3629 | { // Can't reason about the value of the iteration variable. |
| 3630 | continue; |
| 3631 | } |
| 3632 | |
| 3633 | unsigned totalIter; |
| 3634 | |
| 3635 | /* Find the number of iterations - the function returns false if not a constant number */ |
| 3636 | |
| 3637 | if (!optComputeLoopRep(lbeg, llim, iterInc, iterOper, iterOperType, testOper, unsTest, |
| 3638 | // The value here doesn't matter for this variation of the optimization |
| 3639 | true, &totalIter)) |
| 3640 | { |
| 3641 | #ifdef DEBUG |
| 3642 | if (verbose) |
| 3643 | { |
| 3644 | printf("Could not compute loop iterations for loop from " FMT_BB " to " FMT_BB, head->bbNum, |
| 3645 | bottom->bbNum); |
| 3646 | } |
| 3647 | #endif // DEBUG |
| 3648 | (void)head; // suppress gcc error. |
| 3649 | |
| 3650 | continue; |
| 3651 | } |
| 3652 | |
| 3653 | /* Forget it if there are too many repetitions or not a constant loop */ |
| 3654 | |
| 3655 | static const unsigned ITER_LIMIT = 256; |
| 3656 | if (totalIter > ITER_LIMIT) |
| 3657 | { |
| 3658 | continue; |
| 3659 | } |
| 3660 | |
| 3661 | // It is safe to elminate the poll from this loop. |
| 3662 | bottom->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 3663 | |
| 3664 | #ifdef DEBUG |
| 3665 | if (verbose) |
| 3666 | { |
| 3667 | printf("Removing poll in block " FMT_BB " because it forms a bounded counted loop\n" , bottom->bbNum); |
| 3668 | } |
| 3669 | #endif // DEBUG |
| 3670 | } |
| 3671 | } |
| 3672 | |
| 3673 | // Final chance to optimize the polls. Move all polls in loops from the bottom of the loop up to the |
| 3674 | // loop head. Also eliminate all epilog polls in non-leaf methods. This only works if we have dominator |
| 3675 | // information. |
| 3676 | if (fgDomsComputed) |
| 3677 | { |
| 3678 | for (BasicBlock* block = fgFirstBB; block; block = block->bbNext) |
| 3679 | { |
| 3680 | if (!(block->bbFlags & BBF_NEEDS_GCPOLL)) |
| 3681 | { |
| 3682 | continue; |
| 3683 | } |
| 3684 | |
| 3685 | if (block->bbJumpKind == BBJ_COND || block->bbJumpKind == BBJ_ALWAYS) |
| 3686 | { |
| 3687 | // make sure that this is loop-like |
| 3688 | if (!fgReachable(block->bbJumpDest, block)) |
| 3689 | { |
| 3690 | block->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 3691 | #ifdef DEBUG |
| 3692 | if (verbose) |
| 3693 | { |
| 3694 | printf("Removing poll in block " FMT_BB " because it is not loop\n" , block->bbNum); |
| 3695 | } |
| 3696 | #endif // DEBUG |
| 3697 | continue; |
| 3698 | } |
| 3699 | } |
| 3700 | else if (!(block->bbJumpKind == BBJ_RETURN || block->bbJumpKind == BBJ_SWITCH)) |
| 3701 | { |
| 3702 | noway_assert(!"GC Poll on a block that has no control transfer." ); |
| 3703 | #ifdef DEBUG |
| 3704 | if (verbose) |
| 3705 | { |
| 3706 | printf("Removing poll in block " FMT_BB " because it is not a jump\n" , block->bbNum); |
| 3707 | } |
| 3708 | #endif // DEBUG |
| 3709 | block->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 3710 | continue; |
| 3711 | } |
| 3712 | |
| 3713 | // Because of block compaction, it's possible to end up with a block that is both poll and safe. |
| 3714 | // Clean those up now. |
| 3715 | |
| 3716 | if (block->bbFlags & BBF_GC_SAFE_POINT) |
| 3717 | { |
| 3718 | #ifdef DEBUG |
| 3719 | if (verbose) |
| 3720 | { |
| 3721 | printf("Removing poll in return block " FMT_BB " because it is GC Safe\n" , block->bbNum); |
| 3722 | } |
| 3723 | #endif // DEBUG |
| 3724 | block->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 3725 | continue; |
| 3726 | } |
| 3727 | |
| 3728 | if (block->bbJumpKind == BBJ_RETURN) |
| 3729 | { |
| 3730 | if (!optReachWithoutCall(fgFirstBB, block)) |
| 3731 | { |
| 3732 | // check to see if there is a call along the path between the first block and the return |
| 3733 | // block. |
| 3734 | block->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 3735 | #ifdef DEBUG |
| 3736 | if (verbose) |
| 3737 | { |
| 3738 | printf("Removing poll in return block " FMT_BB " because it dominated by a call\n" , |
| 3739 | block->bbNum); |
| 3740 | } |
| 3741 | #endif // DEBUG |
| 3742 | continue; |
| 3743 | } |
| 3744 | } |
| 3745 | } |
| 3746 | } |
| 3747 | |
| 3748 | noway_assert(!fgGCPollsCreated); |
| 3749 | BasicBlock* block; |
| 3750 | fgGCPollsCreated = true; |
| 3751 | |
| 3752 | // Walk through the blocks and hunt for a block that has BBF_NEEDS_GCPOLL |
| 3753 | for (block = fgFirstBB; block; block = block->bbNext) |
| 3754 | { |
| 3755 | // Because of block compaction, it's possible to end up with a block that is both poll and safe. |
| 3756 | // And if !fgDomsComputed, we won't have cleared them, so skip them now |
| 3757 | if (!(block->bbFlags & BBF_NEEDS_GCPOLL) || (block->bbFlags & BBF_GC_SAFE_POINT)) |
| 3758 | { |
| 3759 | continue; |
| 3760 | } |
| 3761 | |
| 3762 | // This block needs a poll. We either just insert a callout or we split the block and inline part of |
| 3763 | // the test. This depends on the value of opts.compGCPollType. |
| 3764 | |
| 3765 | // If we're doing GCPOLL_CALL, just insert a GT_CALL node before the last node in the block. |
| 3766 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 3767 | |
| 3768 | #ifdef DEBUG |
| 3769 | switch (block->bbJumpKind) |
| 3770 | { |
| 3771 | case BBJ_RETURN: |
| 3772 | case BBJ_ALWAYS: |
| 3773 | case BBJ_COND: |
| 3774 | case BBJ_SWITCH: |
| 3775 | break; |
| 3776 | default: |
| 3777 | noway_assert(!"Unknown block type for BBF_NEEDS_GCPOLL" ); |
| 3778 | } |
| 3779 | #endif // DEBUG |
| 3780 | |
| 3781 | noway_assert(opts.compGCPollType); |
| 3782 | |
| 3783 | GCPollType pollType = opts.compGCPollType; |
| 3784 | // pollType is set to either CALL or INLINE at this point. Below is the list of places where we |
| 3785 | // can't or don't want to emit an inline check. Check all of those. If after all of that we still |
| 3786 | // have INLINE, then emit an inline check. |
| 3787 | |
| 3788 | if (opts.OptimizationDisabled()) |
| 3789 | { |
| 3790 | #ifdef DEBUG |
| 3791 | if (verbose) |
| 3792 | { |
| 3793 | printf("Selecting CALL poll in block " FMT_BB " because of debug/minopts\n" , block->bbNum); |
| 3794 | } |
| 3795 | #endif // DEBUG |
| 3796 | |
| 3797 | // Don't split blocks and create inlined polls unless we're optimizing. |
| 3798 | pollType = GCPOLL_CALL; |
| 3799 | } |
| 3800 | else if (genReturnBB == block) |
| 3801 | { |
| 3802 | #ifdef DEBUG |
| 3803 | if (verbose) |
| 3804 | { |
| 3805 | printf("Selecting CALL poll in block " FMT_BB " because it is the single return block\n" , block->bbNum); |
| 3806 | } |
| 3807 | #endif // DEBUG |
| 3808 | |
| 3809 | // we don't want to split the single return block |
| 3810 | pollType = GCPOLL_CALL; |
| 3811 | } |
| 3812 | else if (BBJ_SWITCH == block->bbJumpKind) |
| 3813 | { |
| 3814 | #ifdef DEBUG |
| 3815 | if (verbose) |
| 3816 | { |
| 3817 | printf("Selecting CALL poll in block " FMT_BB " because it is a loop formed by a SWITCH\n" , |
| 3818 | block->bbNum); |
| 3819 | } |
| 3820 | #endif // DEBUG |
| 3821 | |
| 3822 | // I don't want to deal with all the outgoing edges of a switch block. |
| 3823 | pollType = GCPOLL_CALL; |
| 3824 | } |
| 3825 | |
| 3826 | // TODO-Cleanup: potentially don't split if we're in an EH region. |
| 3827 | |
| 3828 | createdPollBlocks |= fgCreateGCPoll(pollType, block); |
| 3829 | } |
| 3830 | |
| 3831 | // If we split a block to create a GC Poll, then rerun fgReorderBlocks to push the rarely run blocks out |
| 3832 | // past the epilog. We should never split blocks unless we're optimizing. |
| 3833 | if (createdPollBlocks) |
| 3834 | { |
| 3835 | noway_assert(opts.OptimizationEnabled()); |
| 3836 | fgReorderBlocks(); |
| 3837 | } |
| 3838 | } |
| 3839 | |
| 3840 | /***************************************************************************** |
| 3841 | * |
| 3842 | * Actually create a GCPoll in the given block. Returns true if it created |
| 3843 | * a basic block. |
| 3844 | */ |
| 3845 | |
| 3846 | bool Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) |
| 3847 | { |
| 3848 | assert(!(block->bbFlags & BBF_GC_SAFE_POINT)); |
| 3849 | bool createdPollBlocks; |
| 3850 | |
| 3851 | void* addrTrap; |
| 3852 | void* pAddrOfCaptureThreadGlobal; |
| 3853 | |
| 3854 | addrTrap = info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal); |
| 3855 | |
| 3856 | #ifdef ENABLE_FAST_GCPOLL_HELPER |
| 3857 | // I never want to split blocks if we've got two indirections here. |
| 3858 | // This is a size trade-off assuming the VM has ENABLE_FAST_GCPOLL_HELPER. |
| 3859 | // So don't do it when that is off |
| 3860 | if (pAddrOfCaptureThreadGlobal != NULL) |
| 3861 | { |
| 3862 | pollType = GCPOLL_CALL; |
| 3863 | } |
| 3864 | #endif // ENABLE_FAST_GCPOLL_HELPER |
| 3865 | |
| 3866 | if (GCPOLL_CALL == pollType) |
| 3867 | { |
| 3868 | createdPollBlocks = false; |
| 3869 | GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_POLL_GC, TYP_VOID); |
| 3870 | |
| 3871 | // for BBJ_ALWAYS I don't need to insert it before the condition. Just append it. |
| 3872 | if (block->bbJumpKind == BBJ_ALWAYS) |
| 3873 | { |
| 3874 | fgInsertStmtAtEnd(block, call); |
| 3875 | } |
| 3876 | else |
| 3877 | { |
| 3878 | GenTreeStmt* newStmt = fgInsertStmtNearEnd(block, call); |
| 3879 | // For DDB156656, we need to associate the GC Poll with the IL offset (and therefore sequence |
| 3880 | // point) of the tree before which we inserted the poll. One example of when this is a |
| 3881 | // problem: |
| 3882 | // if (...) { //1 |
| 3883 | // ... |
| 3884 | // } //2 |
| 3885 | // else { //3 |
| 3886 | // ... |
| 3887 | // } |
| 3888 | // (gcpoll) //4 |
| 3889 | // return. //5 |
| 3890 | // |
| 3891 | // If we take the if statement at 1, we encounter a jump at 2. This jumps over the else |
| 3892 | // and lands at 4. 4 is where we inserted the gcpoll. However, that is associated with |
| 3893 | // the sequence point a 3. Therefore, the debugger displays the wrong source line at the |
| 3894 | // gc poll location. |
| 3895 | // |
| 3896 | // More formally, if control flow targets an instruction, that instruction must be the |
| 3897 | // start of a new sequence point. |
| 3898 | if (newStmt->gtNext) |
| 3899 | { |
| 3900 | // Is it possible for gtNext to be NULL? |
| 3901 | noway_assert(newStmt->gtNext->gtOper == GT_STMT); |
| 3902 | newStmt->gtStmtILoffsx = newStmt->gtNextStmt->gtStmtILoffsx; |
| 3903 | } |
| 3904 | } |
| 3905 | |
| 3906 | block->bbFlags |= BBF_GC_SAFE_POINT; |
| 3907 | #ifdef DEBUG |
| 3908 | if (verbose) |
| 3909 | { |
| 3910 | printf("*** creating GC Poll in block " FMT_BB "\n" , block->bbNum); |
| 3911 | gtDispTreeList(block->bbTreeList); |
| 3912 | } |
| 3913 | #endif // DEBUG |
| 3914 | } |
| 3915 | else |
| 3916 | { |
| 3917 | createdPollBlocks = true; |
| 3918 | // if we're doing GCPOLL_INLINE, then: |
| 3919 | // 1) Create two new blocks: Poll and Bottom. The original block is called Top. |
| 3920 | |
| 3921 | // I want to create: |
| 3922 | // top -> poll -> bottom (lexically) |
| 3923 | // so that we jump over poll to get to bottom. |
| 3924 | BasicBlock* top = block; |
| 3925 | BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true); |
| 3926 | BasicBlock* bottom = fgNewBBafter(top->bbJumpKind, poll, true); |
| 3927 | BBjumpKinds oldJumpKind = top->bbJumpKind; |
| 3928 | |
| 3929 | // Update block flags |
| 3930 | const unsigned __int64 originalFlags = top->bbFlags | BBF_GC_SAFE_POINT; |
| 3931 | |
| 3932 | // Unlike Fei's inliner from puclr, I'm allowed to split loops. |
| 3933 | // And we keep a few other flags... |
| 3934 | noway_assert((originalFlags & (BBF_SPLIT_NONEXIST & ~(BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1))) == 0); |
| 3935 | top->bbFlags = originalFlags & (~BBF_SPLIT_LOST | BBF_GC_SAFE_POINT); |
| 3936 | bottom->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT); |
| 3937 | bottom->inheritWeight(top); |
| 3938 | poll->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT); |
| 3939 | |
| 3940 | // 9) Mark Poll as rarely run. |
| 3941 | poll->bbSetRunRarely(); |
| 3942 | |
| 3943 | // 5) Bottom gets all the outgoing edges and inherited flags of Original. |
| 3944 | bottom->bbJumpDest = top->bbJumpDest; |
| 3945 | |
| 3946 | // 2) Add a GC_CALL node to Poll. |
| 3947 | GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_POLL_GC, TYP_VOID); |
| 3948 | fgInsertStmtAtEnd(poll, call); |
| 3949 | |
| 3950 | // 3) Remove the last statement from Top and add it to Bottom. |
| 3951 | if (oldJumpKind != BBJ_ALWAYS) |
| 3952 | { |
| 3953 | // if I'm always jumping to the target, then this is not a condition that needs moving. |
| 3954 | GenTreeStmt* stmt = top->firstStmt(); |
| 3955 | while (stmt->gtNext) |
| 3956 | { |
| 3957 | stmt = stmt->gtNextStmt; |
| 3958 | } |
| 3959 | fgRemoveStmt(top, stmt); |
| 3960 | fgInsertStmtAtEnd(bottom, stmt); |
| 3961 | } |
| 3962 | |
| 3963 | // for BBJ_ALWAYS blocks, bottom is an empty block. |
| 3964 | |
| 3965 | // 4) Create a GT_EQ node that checks against g_TrapReturningThreads. True jumps to Bottom, |
| 3966 | // false falls through to poll. Add this to the end of Top. Top is now BBJ_COND. Bottom is |
| 3967 | // now a jump target |
| 3968 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 3969 | |
| 3970 | #ifdef ENABLE_FAST_GCPOLL_HELPER |
| 3971 | // Prefer the fast gc poll helepr over the double indirection |
| 3972 | noway_assert(pAddrOfCaptureThreadGlobal == nullptr); |
| 3973 | #endif |
| 3974 | |
| 3975 | GenTree* value; // The value of g_TrapReturningThreads |
| 3976 | if (pAddrOfCaptureThreadGlobal != nullptr) |
| 3977 | { |
| 3978 | // Use a double indirection |
| 3979 | GenTree* addr = |
| 3980 | gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pAddrOfCaptureThreadGlobal, GTF_ICON_PTR_HDL, true); |
| 3981 | |
| 3982 | value = gtNewOperNode(GT_IND, TYP_INT, addr); |
| 3983 | // This indirection won't cause an exception. |
| 3984 | value->gtFlags |= GTF_IND_NONFAULTING; |
| 3985 | } |
| 3986 | else |
| 3987 | { |
| 3988 | // Use a single indirection |
| 3989 | value = gtNewIndOfIconHandleNode(TYP_INT, (size_t)addrTrap, GTF_ICON_PTR_HDL, false); |
| 3990 | } |
| 3991 | |
| 3992 | // Treat the reading of g_TrapReturningThreads as volatile. |
| 3993 | value->gtFlags |= GTF_IND_VOLATILE; |
| 3994 | |
| 3995 | // Compare for equal to zero |
| 3996 | GenTree* trapRelop = gtNewOperNode(GT_EQ, TYP_INT, value, gtNewIconNode(0, TYP_INT)); |
| 3997 | |
| 3998 | trapRelop->gtFlags |= GTF_RELOP_JMP_USED | GTF_DONT_CSE; |
| 3999 | GenTree* trapCheck = gtNewOperNode(GT_JTRUE, TYP_VOID, trapRelop); |
| 4000 | fgInsertStmtAtEnd(top, trapCheck); |
| 4001 | top->bbJumpDest = bottom; |
| 4002 | top->bbJumpKind = BBJ_COND; |
| 4003 | bottom->bbFlags |= BBF_JMP_TARGET; |
| 4004 | |
| 4005 | // 7) Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. |
| 4006 | fgAddRefPred(bottom, poll); |
| 4007 | fgAddRefPred(bottom, top); |
| 4008 | fgAddRefPred(poll, top); |
| 4009 | |
| 4010 | // 8) Replace Top with Bottom in the predecessor list of all outgoing edges from Bottom (1 for |
| 4011 | // jumps, 2 for conditional branches, N for switches). |
| 4012 | switch (oldJumpKind) |
| 4013 | { |
| 4014 | case BBJ_RETURN: |
| 4015 | // no successors |
| 4016 | break; |
| 4017 | case BBJ_COND: |
| 4018 | // replace predecessor in the fall through block. |
| 4019 | noway_assert(bottom->bbNext); |
| 4020 | fgReplacePred(bottom->bbNext, top, bottom); |
| 4021 | |
| 4022 | // fall through for the jump target |
| 4023 | __fallthrough; |
| 4024 | |
| 4025 | case BBJ_ALWAYS: |
| 4026 | fgReplacePred(bottom->bbJumpDest, top, bottom); |
| 4027 | break; |
| 4028 | case BBJ_SWITCH: |
| 4029 | NO_WAY("SWITCH should be a call rather than an inlined poll." ); |
| 4030 | break; |
| 4031 | default: |
| 4032 | NO_WAY("Unknown block type for updating predecessor lists." ); |
| 4033 | } |
| 4034 | |
| 4035 | top->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 4036 | noway_assert(!(poll->bbFlags & BBF_NEEDS_GCPOLL)); |
| 4037 | noway_assert(!(bottom->bbFlags & BBF_NEEDS_GCPOLL)); |
| 4038 | |
| 4039 | if (compCurBB == top) |
| 4040 | { |
| 4041 | compCurBB = bottom; |
| 4042 | } |
| 4043 | |
| 4044 | #ifdef DEBUG |
| 4045 | if (verbose) |
| 4046 | { |
| 4047 | printf("*** creating inlined GC Poll in top block " FMT_BB "\n" , top->bbNum); |
| 4048 | gtDispTreeList(top->bbTreeList); |
| 4049 | printf(" poll block is " FMT_BB "\n" , poll->bbNum); |
| 4050 | gtDispTreeList(poll->bbTreeList); |
| 4051 | printf(" bottom block is " FMT_BB "\n" , bottom->bbNum); |
| 4052 | gtDispTreeList(bottom->bbTreeList); |
| 4053 | } |
| 4054 | #endif // DEBUG |
| 4055 | } |
| 4056 | |
| 4057 | return createdPollBlocks; |
| 4058 | } |
| 4059 | |
| 4060 | /***************************************************************************** |
| 4061 | * |
| 4062 | * The following helps find a basic block given its PC offset. |
| 4063 | */ |
| 4064 | |
| 4065 | void Compiler::fgInitBBLookup() |
| 4066 | { |
| 4067 | BasicBlock** dscBBptr; |
| 4068 | BasicBlock* tmpBBdesc; |
| 4069 | |
| 4070 | /* Allocate the basic block table */ |
| 4071 | |
| 4072 | dscBBptr = fgBBs = new (this, CMK_BasicBlock) BasicBlock*[fgBBcount]; |
| 4073 | |
| 4074 | /* Walk all the basic blocks, filling in the table */ |
| 4075 | |
| 4076 | for (tmpBBdesc = fgFirstBB; tmpBBdesc; tmpBBdesc = tmpBBdesc->bbNext) |
| 4077 | { |
| 4078 | *dscBBptr++ = tmpBBdesc; |
| 4079 | } |
| 4080 | |
| 4081 | noway_assert(dscBBptr == fgBBs + fgBBcount); |
| 4082 | } |
| 4083 | |
| 4084 | BasicBlock* Compiler::fgLookupBB(unsigned addr) |
| 4085 | { |
| 4086 | unsigned lo; |
| 4087 | unsigned hi; |
| 4088 | |
| 4089 | /* Do a binary search */ |
| 4090 | |
| 4091 | for (lo = 0, hi = fgBBcount - 1;;) |
| 4092 | { |
| 4093 | |
| 4094 | AGAIN:; |
| 4095 | |
| 4096 | if (lo > hi) |
| 4097 | { |
| 4098 | break; |
| 4099 | } |
| 4100 | |
| 4101 | unsigned mid = (lo + hi) / 2; |
| 4102 | BasicBlock* dsc = fgBBs[mid]; |
| 4103 | |
| 4104 | // We introduce internal blocks for BBJ_CALLFINALLY. Skip over these. |
| 4105 | |
| 4106 | while (dsc->bbFlags & BBF_INTERNAL) |
| 4107 | { |
| 4108 | dsc = dsc->bbNext; |
| 4109 | mid++; |
| 4110 | |
| 4111 | // We skipped over too many, Set hi back to the original mid - 1 |
| 4112 | |
| 4113 | if (mid > hi) |
| 4114 | { |
| 4115 | mid = (lo + hi) / 2; |
| 4116 | hi = mid - 1; |
| 4117 | goto AGAIN; |
| 4118 | } |
| 4119 | } |
| 4120 | |
| 4121 | unsigned pos = dsc->bbCodeOffs; |
| 4122 | |
| 4123 | if (pos < addr) |
| 4124 | { |
| 4125 | if ((lo == hi) && (lo == (fgBBcount - 1))) |
| 4126 | { |
| 4127 | noway_assert(addr == dsc->bbCodeOffsEnd); |
| 4128 | return nullptr; // NULL means the end of method |
| 4129 | } |
| 4130 | lo = mid + 1; |
| 4131 | continue; |
| 4132 | } |
| 4133 | |
| 4134 | if (pos > addr) |
| 4135 | { |
| 4136 | hi = mid - 1; |
| 4137 | continue; |
| 4138 | } |
| 4139 | |
| 4140 | return dsc; |
| 4141 | } |
| 4142 | #ifdef DEBUG |
| 4143 | printf("ERROR: Couldn't find basic block at offset %04X\n" , addr); |
| 4144 | #endif // DEBUG |
| 4145 | NO_WAY("fgLookupBB failed." ); |
| 4146 | } |
| 4147 | |
| 4148 | //------------------------------------------------------------------------ |
| 4149 | // FgStack: simple stack model for the inlinee's evaluation stack. |
| 4150 | // |
| 4151 | // Model the inputs available to various operations in the inline body. |
| 4152 | // Tracks constants, arguments, array lengths. |
| 4153 | |
| 4154 | class FgStack |
| 4155 | { |
| 4156 | public: |
| 4157 | FgStack() : slot0(SLOT_INVALID), slot1(SLOT_INVALID), depth(0) |
| 4158 | { |
| 4159 | // Empty |
| 4160 | } |
| 4161 | |
| 4162 | void Clear() |
| 4163 | { |
| 4164 | depth = 0; |
| 4165 | } |
| 4166 | void PushUnknown() |
| 4167 | { |
| 4168 | Push(SLOT_UNKNOWN); |
| 4169 | } |
| 4170 | void PushConstant() |
| 4171 | { |
| 4172 | Push(SLOT_CONSTANT); |
| 4173 | } |
| 4174 | void PushArrayLen() |
| 4175 | { |
| 4176 | Push(SLOT_ARRAYLEN); |
| 4177 | } |
| 4178 | void PushArgument(unsigned arg) |
| 4179 | { |
| 4180 | Push(SLOT_ARGUMENT + arg); |
| 4181 | } |
| 4182 | unsigned GetSlot0() const |
| 4183 | { |
| 4184 | assert(depth >= 1); |
| 4185 | return slot0; |
| 4186 | } |
| 4187 | unsigned GetSlot1() const |
| 4188 | { |
| 4189 | assert(depth >= 2); |
| 4190 | return slot1; |
| 4191 | } |
| 4192 | static bool IsConstant(unsigned value) |
| 4193 | { |
| 4194 | return value == SLOT_CONSTANT; |
| 4195 | } |
| 4196 | static bool IsArrayLen(unsigned value) |
| 4197 | { |
| 4198 | return value == SLOT_ARRAYLEN; |
| 4199 | } |
| 4200 | static bool IsArgument(unsigned value) |
| 4201 | { |
| 4202 | return value >= SLOT_ARGUMENT; |
| 4203 | } |
| 4204 | static unsigned SlotTypeToArgNum(unsigned value) |
| 4205 | { |
| 4206 | assert(IsArgument(value)); |
| 4207 | return value - SLOT_ARGUMENT; |
| 4208 | } |
| 4209 | bool IsStackTwoDeep() const |
| 4210 | { |
| 4211 | return depth == 2; |
| 4212 | } |
| 4213 | bool IsStackOneDeep() const |
| 4214 | { |
| 4215 | return depth == 1; |
| 4216 | } |
| 4217 | bool IsStackAtLeastOneDeep() const |
| 4218 | { |
| 4219 | return depth >= 1; |
| 4220 | } |
| 4221 | |
| 4222 | private: |
| 4223 | enum |
| 4224 | { |
| 4225 | SLOT_INVALID = UINT_MAX, |
| 4226 | SLOT_UNKNOWN = 0, |
| 4227 | SLOT_CONSTANT = 1, |
| 4228 | SLOT_ARRAYLEN = 2, |
| 4229 | SLOT_ARGUMENT = 3 |
| 4230 | }; |
| 4231 | |
| 4232 | void Push(int type) |
| 4233 | { |
| 4234 | switch (depth) |
| 4235 | { |
| 4236 | case 0: |
| 4237 | ++depth; |
| 4238 | slot0 = type; |
| 4239 | break; |
| 4240 | case 1: |
| 4241 | ++depth; |
| 4242 | __fallthrough; |
| 4243 | case 2: |
| 4244 | slot1 = slot0; |
| 4245 | slot0 = type; |
| 4246 | } |
| 4247 | } |
| 4248 | |
| 4249 | unsigned slot0; |
| 4250 | unsigned slot1; |
| 4251 | unsigned depth; |
| 4252 | }; |
| 4253 | |
| 4254 | //------------------------------------------------------------------------ |
| 4255 | // fgFindJumpTargets: walk the IL stream, determining jump target offsets |
| 4256 | // |
| 4257 | // Arguments: |
| 4258 | // codeAddr - base address of the IL code buffer |
| 4259 | // codeSize - number of bytes in the IL code buffer |
| 4260 | // jumpTarget - [OUT] bit vector for flagging jump targets |
| 4261 | // |
| 4262 | // Notes: |
| 4263 | // If inlining or prejitting the root, this method also makes |
| 4264 | // various observations about the method that factor into inline |
| 4265 | // decisions. |
| 4266 | // |
| 4267 | // May throw an exception if the IL is malformed. |
| 4268 | // |
| 4269 | // jumpTarget[N] is set to 1 if IL offset N is a jump target in the method. |
| 4270 | // |
| 4271 | // Also sets lvAddrExposed and lvHasILStoreOp, ilHasMultipleILStoreOp in lvaTable[]. |
| 4272 | |
| 4273 | #ifdef _PREFAST_ |
| 4274 | #pragma warning(push) |
| 4275 | #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function |
| 4276 | #endif |
| 4277 | |
| 4278 | void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget) |
| 4279 | { |
| 4280 | const BYTE* codeBegp = codeAddr; |
| 4281 | const BYTE* codeEndp = codeAddr + codeSize; |
| 4282 | unsigned varNum; |
| 4283 | bool seenJump = false; |
| 4284 | var_types varType = DUMMY_INIT(TYP_UNDEF); // TYP_ type |
| 4285 | typeInfo ti; // Verifier type. |
| 4286 | bool typeIsNormed = false; |
| 4287 | FgStack pushedStack; |
| 4288 | const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0; |
| 4289 | const bool makeInlineObservations = (compInlineResult != nullptr); |
| 4290 | const bool isInlining = compIsForInlining(); |
| 4291 | unsigned retBlocks = 0; |
| 4292 | |
| 4293 | if (makeInlineObservations) |
| 4294 | { |
| 4295 | // Observe force inline state and code size. |
| 4296 | compInlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, isForceInline); |
| 4297 | compInlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); |
| 4298 | |
| 4299 | // Determine if call site is within a try. |
| 4300 | if (isInlining && impInlineInfo->iciBlock->hasTryIndex()) |
| 4301 | { |
| 4302 | compInlineResult->Note(InlineObservation::CALLSITE_IN_TRY_REGION); |
| 4303 | } |
| 4304 | |
| 4305 | // Determine if the call site is in a loop. |
| 4306 | if (isInlining && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) |
| 4307 | { |
| 4308 | compInlineResult->Note(InlineObservation::CALLSITE_IN_LOOP); |
| 4309 | } |
| 4310 | |
| 4311 | #ifdef DEBUG |
| 4312 | |
| 4313 | // If inlining, this method should still be a candidate. |
| 4314 | if (isInlining) |
| 4315 | { |
| 4316 | assert(compInlineResult->IsCandidate()); |
| 4317 | } |
| 4318 | |
| 4319 | #endif // DEBUG |
| 4320 | |
| 4321 | // note that we're starting to look at the opcodes. |
| 4322 | compInlineResult->Note(InlineObservation::CALLEE_BEGIN_OPCODE_SCAN); |
| 4323 | } |
| 4324 | |
| 4325 | while (codeAddr < codeEndp) |
| 4326 | { |
| 4327 | OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); |
| 4328 | codeAddr += sizeof(__int8); |
| 4329 | opts.instrCount++; |
| 4330 | typeIsNormed = false; |
| 4331 | |
| 4332 | DECODE_OPCODE: |
| 4333 | |
| 4334 | if ((unsigned)opcode >= CEE_COUNT) |
| 4335 | { |
| 4336 | BADCODE3("Illegal opcode" , ": %02X" , (int)opcode); |
| 4337 | } |
| 4338 | |
| 4339 | if ((opcode >= CEE_LDARG_0 && opcode <= CEE_STLOC_S) || (opcode >= CEE_LDARG && opcode <= CEE_STLOC)) |
| 4340 | { |
| 4341 | opts.lvRefCount++; |
| 4342 | } |
| 4343 | |
| 4344 | if (makeInlineObservations && (opcode >= CEE_LDNULL) && (opcode <= CEE_LDC_R8)) |
| 4345 | { |
| 4346 | pushedStack.PushConstant(); |
| 4347 | } |
| 4348 | |
| 4349 | unsigned sz = opcodeSizes[opcode]; |
| 4350 | |
| 4351 | switch (opcode) |
| 4352 | { |
| 4353 | case CEE_PREFIX1: |
| 4354 | { |
| 4355 | if (codeAddr >= codeEndp) |
| 4356 | { |
| 4357 | goto TOO_FAR; |
| 4358 | } |
| 4359 | opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr)); |
| 4360 | codeAddr += sizeof(__int8); |
| 4361 | goto DECODE_OPCODE; |
| 4362 | } |
| 4363 | |
| 4364 | case CEE_PREFIX2: |
| 4365 | case CEE_PREFIX3: |
| 4366 | case CEE_PREFIX4: |
| 4367 | case CEE_PREFIX5: |
| 4368 | case CEE_PREFIX6: |
| 4369 | case CEE_PREFIX7: |
| 4370 | case CEE_PREFIXREF: |
| 4371 | { |
| 4372 | BADCODE3("Illegal opcode" , ": %02X" , (int)opcode); |
| 4373 | } |
| 4374 | |
| 4375 | case CEE_CALL: |
| 4376 | case CEE_CALLVIRT: |
| 4377 | { |
| 4378 | // There has to be code after the call, otherwise the inlinee is unverifiable. |
| 4379 | if (isInlining) |
| 4380 | { |
| 4381 | |
| 4382 | noway_assert(codeAddr < codeEndp - sz); |
| 4383 | } |
| 4384 | |
| 4385 | // If the method has a call followed by a ret, assume that |
| 4386 | // it is a wrapper method. |
| 4387 | if (makeInlineObservations) |
| 4388 | { |
| 4389 | if ((OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET) |
| 4390 | { |
| 4391 | compInlineResult->Note(InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER); |
| 4392 | } |
| 4393 | } |
| 4394 | } |
| 4395 | break; |
| 4396 | |
| 4397 | case CEE_LEAVE: |
| 4398 | case CEE_LEAVE_S: |
| 4399 | case CEE_BR: |
| 4400 | case CEE_BR_S: |
| 4401 | case CEE_BRFALSE: |
| 4402 | case CEE_BRFALSE_S: |
| 4403 | case CEE_BRTRUE: |
| 4404 | case CEE_BRTRUE_S: |
| 4405 | case CEE_BEQ: |
| 4406 | case CEE_BEQ_S: |
| 4407 | case CEE_BGE: |
| 4408 | case CEE_BGE_S: |
| 4409 | case CEE_BGE_UN: |
| 4410 | case CEE_BGE_UN_S: |
| 4411 | case CEE_BGT: |
| 4412 | case CEE_BGT_S: |
| 4413 | case CEE_BGT_UN: |
| 4414 | case CEE_BGT_UN_S: |
| 4415 | case CEE_BLE: |
| 4416 | case CEE_BLE_S: |
| 4417 | case CEE_BLE_UN: |
| 4418 | case CEE_BLE_UN_S: |
| 4419 | case CEE_BLT: |
| 4420 | case CEE_BLT_S: |
| 4421 | case CEE_BLT_UN: |
| 4422 | case CEE_BLT_UN_S: |
| 4423 | case CEE_BNE_UN: |
| 4424 | case CEE_BNE_UN_S: |
| 4425 | { |
| 4426 | seenJump = true; |
| 4427 | |
| 4428 | if (codeAddr > codeEndp - sz) |
| 4429 | { |
| 4430 | goto TOO_FAR; |
| 4431 | } |
| 4432 | |
| 4433 | // Compute jump target address |
| 4434 | signed jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); |
| 4435 | |
| 4436 | if (compIsForInlining() && jmpDist == 0 && |
| 4437 | (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S || opcode == CEE_BR || opcode == CEE_BR_S)) |
| 4438 | { |
| 4439 | break; /* NOP */ |
| 4440 | } |
| 4441 | |
| 4442 | unsigned jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist; |
| 4443 | |
| 4444 | // Make sure target is reasonable |
| 4445 | if (jmpAddr >= codeSize) |
| 4446 | { |
| 4447 | BADCODE3("code jumps to outer space" , " at offset %04X" , (IL_OFFSET)(codeAddr - codeBegp)); |
| 4448 | } |
| 4449 | |
| 4450 | // Mark the jump target |
| 4451 | jumpTarget->bitVectSet(jmpAddr); |
| 4452 | |
| 4453 | // See if jump might be sensitive to inlining |
| 4454 | if (makeInlineObservations && (opcode != CEE_BR_S) && (opcode != CEE_BR)) |
| 4455 | { |
| 4456 | fgObserveInlineConstants(opcode, pushedStack, isInlining); |
| 4457 | } |
| 4458 | } |
| 4459 | break; |
| 4460 | |
| 4461 | case CEE_SWITCH: |
| 4462 | { |
| 4463 | seenJump = true; |
| 4464 | |
| 4465 | if (makeInlineObservations) |
| 4466 | { |
| 4467 | compInlineResult->Note(InlineObservation::CALLEE_HAS_SWITCH); |
| 4468 | |
| 4469 | // Fail fast, if we're inlining and can't handle this. |
| 4470 | if (isInlining && compInlineResult->IsFailure()) |
| 4471 | { |
| 4472 | return; |
| 4473 | } |
| 4474 | } |
| 4475 | |
| 4476 | // Make sure we don't go past the end reading the number of cases |
| 4477 | if (codeAddr > codeEndp - sizeof(DWORD)) |
| 4478 | { |
| 4479 | goto TOO_FAR; |
| 4480 | } |
| 4481 | |
| 4482 | // Read the number of cases |
| 4483 | unsigned jmpCnt = getU4LittleEndian(codeAddr); |
| 4484 | codeAddr += sizeof(DWORD); |
| 4485 | |
| 4486 | if (jmpCnt > codeSize / sizeof(DWORD)) |
| 4487 | { |
| 4488 | goto TOO_FAR; |
| 4489 | } |
| 4490 | |
| 4491 | // Find the end of the switch table |
| 4492 | unsigned jmpBase = (unsigned)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD)); |
| 4493 | |
| 4494 | // Make sure there is more code after the switch |
| 4495 | if (jmpBase >= codeSize) |
| 4496 | { |
| 4497 | goto TOO_FAR; |
| 4498 | } |
| 4499 | |
| 4500 | // jmpBase is also the target of the default case, so mark it |
| 4501 | jumpTarget->bitVectSet(jmpBase); |
| 4502 | |
| 4503 | // Process table entries |
| 4504 | while (jmpCnt > 0) |
| 4505 | { |
| 4506 | unsigned jmpAddr = jmpBase + getI4LittleEndian(codeAddr); |
| 4507 | codeAddr += 4; |
| 4508 | |
| 4509 | if (jmpAddr >= codeSize) |
| 4510 | { |
| 4511 | BADCODE3("jump target out of range" , " at offset %04X" , (IL_OFFSET)(codeAddr - codeBegp)); |
| 4512 | } |
| 4513 | |
| 4514 | jumpTarget->bitVectSet(jmpAddr); |
| 4515 | jmpCnt--; |
| 4516 | } |
| 4517 | |
| 4518 | // We've advanced past all the bytes in this instruction |
| 4519 | sz = 0; |
| 4520 | } |
| 4521 | break; |
| 4522 | |
| 4523 | case CEE_UNALIGNED: |
| 4524 | case CEE_CONSTRAINED: |
| 4525 | case CEE_READONLY: |
| 4526 | case CEE_VOLATILE: |
| 4527 | case CEE_TAILCALL: |
| 4528 | { |
| 4529 | if (codeAddr >= codeEndp) |
| 4530 | { |
| 4531 | goto TOO_FAR; |
| 4532 | } |
| 4533 | } |
| 4534 | break; |
| 4535 | |
| 4536 | case CEE_STARG: |
| 4537 | case CEE_STARG_S: |
| 4538 | { |
| 4539 | noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); |
| 4540 | |
| 4541 | if (codeAddr > codeEndp - sz) |
| 4542 | { |
| 4543 | goto TOO_FAR; |
| 4544 | } |
| 4545 | |
| 4546 | varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); |
| 4547 | |
| 4548 | if (isInlining) |
| 4549 | { |
| 4550 | if (varNum < impInlineInfo->argCnt) |
| 4551 | { |
| 4552 | impInlineInfo->inlArgInfo[varNum].argHasStargOp = true; |
| 4553 | } |
| 4554 | } |
| 4555 | else |
| 4556 | { |
| 4557 | // account for possible hidden param |
| 4558 | varNum = compMapILargNum(varNum); |
| 4559 | |
| 4560 | // This check is only intended to prevent an AV. Bad varNum values will later |
| 4561 | // be handled properly by the verifier. |
| 4562 | if (varNum < lvaTableCnt) |
| 4563 | { |
| 4564 | // In non-inline cases, note written-to arguments. |
| 4565 | lvaTable[varNum].lvHasILStoreOp = 1; |
| 4566 | } |
| 4567 | } |
| 4568 | } |
| 4569 | break; |
| 4570 | |
| 4571 | case CEE_STLOC_0: |
| 4572 | case CEE_STLOC_1: |
| 4573 | case CEE_STLOC_2: |
| 4574 | case CEE_STLOC_3: |
| 4575 | varNum = (opcode - CEE_STLOC_0); |
| 4576 | goto STLOC; |
| 4577 | |
| 4578 | case CEE_STLOC: |
| 4579 | case CEE_STLOC_S: |
| 4580 | { |
| 4581 | noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); |
| 4582 | |
| 4583 | if (codeAddr > codeEndp - sz) |
| 4584 | { |
| 4585 | goto TOO_FAR; |
| 4586 | } |
| 4587 | |
| 4588 | varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); |
| 4589 | |
| 4590 | STLOC: |
| 4591 | if (isInlining) |
| 4592 | { |
| 4593 | InlLclVarInfo& lclInfo = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt]; |
| 4594 | |
| 4595 | if (lclInfo.lclHasStlocOp) |
| 4596 | { |
| 4597 | lclInfo.lclHasMultipleStlocOp = 1; |
| 4598 | } |
| 4599 | else |
| 4600 | { |
| 4601 | lclInfo.lclHasStlocOp = 1; |
| 4602 | } |
| 4603 | } |
| 4604 | else |
| 4605 | { |
| 4606 | varNum += info.compArgsCount; |
| 4607 | |
| 4608 | // This check is only intended to prevent an AV. Bad varNum values will later |
| 4609 | // be handled properly by the verifier. |
| 4610 | if (varNum < lvaTableCnt) |
| 4611 | { |
| 4612 | // In non-inline cases, note written-to locals. |
| 4613 | if (lvaTable[varNum].lvHasILStoreOp) |
| 4614 | { |
| 4615 | lvaTable[varNum].lvHasMultipleILStoreOp = 1; |
| 4616 | } |
| 4617 | else |
| 4618 | { |
| 4619 | lvaTable[varNum].lvHasILStoreOp = 1; |
| 4620 | } |
| 4621 | } |
| 4622 | } |
| 4623 | } |
| 4624 | break; |
| 4625 | |
| 4626 | case CEE_LDARGA: |
| 4627 | case CEE_LDARGA_S: |
| 4628 | case CEE_LDLOCA: |
| 4629 | case CEE_LDLOCA_S: |
| 4630 | { |
| 4631 | // Handle address-taken args or locals |
| 4632 | noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); |
| 4633 | |
| 4634 | if (codeAddr > codeEndp - sz) |
| 4635 | { |
| 4636 | goto TOO_FAR; |
| 4637 | } |
| 4638 | |
| 4639 | varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); |
| 4640 | |
| 4641 | if (isInlining) |
| 4642 | { |
| 4643 | if (opcode == CEE_LDLOCA || opcode == CEE_LDLOCA_S) |
| 4644 | { |
| 4645 | varType = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclTypeInfo; |
| 4646 | ti = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclVerTypeInfo; |
| 4647 | |
| 4648 | impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclHasLdlocaOp = true; |
| 4649 | } |
| 4650 | else |
| 4651 | { |
| 4652 | noway_assert(opcode == CEE_LDARGA || opcode == CEE_LDARGA_S); |
| 4653 | |
| 4654 | varType = impInlineInfo->lclVarInfo[varNum].lclTypeInfo; |
| 4655 | ti = impInlineInfo->lclVarInfo[varNum].lclVerTypeInfo; |
| 4656 | |
| 4657 | impInlineInfo->inlArgInfo[varNum].argHasLdargaOp = true; |
| 4658 | |
| 4659 | pushedStack.PushArgument(varNum); |
| 4660 | } |
| 4661 | } |
| 4662 | else |
| 4663 | { |
| 4664 | if (opcode == CEE_LDLOCA || opcode == CEE_LDLOCA_S) |
| 4665 | { |
| 4666 | if (varNum >= info.compMethodInfo->locals.numArgs) |
| 4667 | { |
| 4668 | BADCODE("bad local number" ); |
| 4669 | } |
| 4670 | |
| 4671 | varNum += info.compArgsCount; |
| 4672 | } |
| 4673 | else |
| 4674 | { |
| 4675 | noway_assert(opcode == CEE_LDARGA || opcode == CEE_LDARGA_S); |
| 4676 | |
| 4677 | if (varNum >= info.compILargsCount) |
| 4678 | { |
| 4679 | BADCODE("bad argument number" ); |
| 4680 | } |
| 4681 | |
| 4682 | varNum = compMapILargNum(varNum); // account for possible hidden param |
| 4683 | } |
| 4684 | |
| 4685 | varType = (var_types)lvaTable[varNum].lvType; |
| 4686 | ti = lvaTable[varNum].lvVerTypeInfo; |
| 4687 | |
| 4688 | // Determine if the next instruction will consume |
| 4689 | // the address. If so we won't mark this var as |
| 4690 | // address taken. |
| 4691 | // |
| 4692 | // We will put structs on the stack and changing |
| 4693 | // the addrTaken of a local requires an extra pass |
| 4694 | // in the morpher so we won't apply this |
| 4695 | // optimization to structs. |
| 4696 | // |
| 4697 | // Debug code spills for every IL instruction, and |
| 4698 | // therefore it will split statements, so we will |
| 4699 | // need the address. Note that this optimization |
| 4700 | // is based in that we know what trees we will |
| 4701 | // generate for this ldfld, and we require that we |
| 4702 | // won't need the address of this local at all |
| 4703 | noway_assert(varNum < lvaTableCnt); |
| 4704 | |
| 4705 | const bool notStruct = !varTypeIsStruct(&lvaTable[varNum]); |
| 4706 | const bool notLastInstr = (codeAddr < codeEndp - sz); |
| 4707 | const bool notDebugCode = !opts.compDbgCode; |
| 4708 | |
| 4709 | if (notStruct && notLastInstr && notDebugCode && |
| 4710 | impILConsumesAddr(codeAddr + sz, impTokenLookupContextHandle, info.compScopeHnd)) |
| 4711 | { |
| 4712 | // We can skip the addrtaken, as next IL instruction consumes |
| 4713 | // the address. |
| 4714 | } |
| 4715 | else |
| 4716 | { |
| 4717 | lvaTable[varNum].lvHasLdAddrOp = 1; |
| 4718 | if (!info.compIsStatic && (varNum == 0)) |
| 4719 | { |
| 4720 | // Addr taken on "this" pointer is significant, |
| 4721 | // go ahead to mark it as permanently addr-exposed here. |
| 4722 | lvaSetVarAddrExposed(0); |
| 4723 | // This may be conservative, but probably not very. |
| 4724 | } |
| 4725 | } |
| 4726 | } // isInlining |
| 4727 | |
| 4728 | typeIsNormed = ti.IsValueClass() && !varTypeIsStruct(varType); |
| 4729 | } |
| 4730 | break; |
| 4731 | |
| 4732 | #if !defined(FEATURE_CORECLR) |
| 4733 | case CEE_CALLI: |
| 4734 | |
| 4735 | // CEE_CALLI should not be inlined if the call indirect target has a calling convention other than |
| 4736 | // CORINFO_CALLCONV_DEFAULT. In the case where we have a no-marshal CALLI P/Invoke we end up calling |
| 4737 | // the IL stub. We don't NGEN these stubs, so we'll have to JIT an IL stub for a trivial func. |
| 4738 | // It's almost certainly a better choice to leave out the inline candidate so we can generate an inlined |
| 4739 | // call frame. |
| 4740 | |
| 4741 | // Consider skipping this bail-out for force inlines. |
| 4742 | if (makeInlineObservations) |
| 4743 | { |
| 4744 | if (codeAddr > codeEndp - sizeof(DWORD)) |
| 4745 | { |
| 4746 | goto TOO_FAR; |
| 4747 | } |
| 4748 | |
| 4749 | CORINFO_SIG_INFO calliSig; |
| 4750 | eeGetSig(getU4LittleEndian(codeAddr), info.compScopeHnd, impTokenLookupContextHandle, &calliSig); |
| 4751 | |
| 4752 | if (calliSig.getCallConv() != CORINFO_CALLCONV_DEFAULT) |
| 4753 | { |
| 4754 | compInlineResult->Note(InlineObservation::CALLEE_UNSUPPORTED_OPCODE); |
| 4755 | |
| 4756 | // Fail fast if we're inlining |
| 4757 | if (isInlining) |
| 4758 | { |
| 4759 | assert(compInlineResult->IsFailure()); |
| 4760 | return; |
| 4761 | } |
| 4762 | } |
| 4763 | } |
| 4764 | break; |
| 4765 | #endif // FEATURE_CORECLR |
| 4766 | |
| 4767 | case CEE_JMP: |
| 4768 | retBlocks++; |
| 4769 | |
| 4770 | #if !defined(_TARGET_X86_) && !defined(_TARGET_ARM_) |
| 4771 | if (!isInlining) |
| 4772 | { |
| 4773 | // We transform this into a set of ldarg's + tail call and |
| 4774 | // thus may push more onto the stack than originally thought. |
| 4775 | // This doesn't interfere with verification because CEE_JMP |
| 4776 | // is never verifiable, and there's nothing unsafe you can |
| 4777 | // do with a an IL stack overflow if the JIT is expecting it. |
| 4778 | info.compMaxStack = max(info.compMaxStack, info.compILargsCount); |
| 4779 | break; |
| 4780 | } |
| 4781 | #endif // !_TARGET_X86_ && !_TARGET_ARM_ |
| 4782 | |
| 4783 | // If we are inlining, we need to fail for a CEE_JMP opcode, just like |
| 4784 | // the list of other opcodes (for all platforms). |
| 4785 | |
| 4786 | __fallthrough; |
| 4787 | case CEE_MKREFANY: |
| 4788 | case CEE_RETHROW: |
| 4789 | if (makeInlineObservations) |
| 4790 | { |
| 4791 | // Arguably this should be NoteFatal, but the legacy behavior is |
| 4792 | // to ignore this for the prejit root. |
| 4793 | compInlineResult->Note(InlineObservation::CALLEE_UNSUPPORTED_OPCODE); |
| 4794 | |
| 4795 | // Fail fast if we're inlining... |
| 4796 | if (isInlining) |
| 4797 | { |
| 4798 | assert(compInlineResult->IsFailure()); |
| 4799 | return; |
| 4800 | } |
| 4801 | } |
| 4802 | break; |
| 4803 | |
| 4804 | case CEE_LOCALLOC: |
| 4805 | |
| 4806 | // We now allow localloc callees to become candidates in some cases. |
| 4807 | if (makeInlineObservations) |
| 4808 | { |
| 4809 | compInlineResult->Note(InlineObservation::CALLEE_HAS_LOCALLOC); |
| 4810 | if (isInlining && compInlineResult->IsFailure()) |
| 4811 | { |
| 4812 | return; |
| 4813 | } |
| 4814 | } |
| 4815 | break; |
| 4816 | |
| 4817 | case CEE_LDARG_0: |
| 4818 | case CEE_LDARG_1: |
| 4819 | case CEE_LDARG_2: |
| 4820 | case CEE_LDARG_3: |
| 4821 | if (makeInlineObservations) |
| 4822 | { |
| 4823 | pushedStack.PushArgument(opcode - CEE_LDARG_0); |
| 4824 | } |
| 4825 | break; |
| 4826 | |
| 4827 | case CEE_LDARG_S: |
| 4828 | case CEE_LDARG: |
| 4829 | { |
| 4830 | if (codeAddr > codeEndp - sz) |
| 4831 | { |
| 4832 | goto TOO_FAR; |
| 4833 | } |
| 4834 | |
| 4835 | varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); |
| 4836 | |
| 4837 | if (makeInlineObservations) |
| 4838 | { |
| 4839 | pushedStack.PushArgument(varNum); |
| 4840 | } |
| 4841 | } |
| 4842 | break; |
| 4843 | |
| 4844 | case CEE_LDLEN: |
| 4845 | if (makeInlineObservations) |
| 4846 | { |
| 4847 | pushedStack.PushArrayLen(); |
| 4848 | } |
| 4849 | break; |
| 4850 | |
| 4851 | case CEE_CEQ: |
| 4852 | case CEE_CGT: |
| 4853 | case CEE_CGT_UN: |
| 4854 | case CEE_CLT: |
| 4855 | case CEE_CLT_UN: |
| 4856 | if (makeInlineObservations) |
| 4857 | { |
| 4858 | fgObserveInlineConstants(opcode, pushedStack, isInlining); |
| 4859 | } |
| 4860 | break; |
| 4861 | case CEE_RET: |
| 4862 | retBlocks++; |
| 4863 | |
| 4864 | default: |
| 4865 | break; |
| 4866 | } |
| 4867 | |
| 4868 | // Skip any remaining operands this opcode may have |
| 4869 | codeAddr += sz; |
| 4870 | |
| 4871 | // Note the opcode we just saw |
| 4872 | if (makeInlineObservations) |
| 4873 | { |
| 4874 | InlineObservation obs = |
| 4875 | typeIsNormed ? InlineObservation::CALLEE_OPCODE_NORMED : InlineObservation::CALLEE_OPCODE; |
| 4876 | compInlineResult->NoteInt(obs, opcode); |
| 4877 | } |
| 4878 | } |
| 4879 | |
| 4880 | if (codeAddr != codeEndp) |
| 4881 | { |
| 4882 | TOO_FAR: |
| 4883 | BADCODE3("Code ends in the middle of an opcode, or there is a branch past the end of the method" , |
| 4884 | " at offset %04X" , (IL_OFFSET)(codeAddr - codeBegp)); |
| 4885 | } |
| 4886 | |
| 4887 | if (makeInlineObservations) |
| 4888 | { |
| 4889 | compInlineResult->Note(InlineObservation::CALLEE_END_OPCODE_SCAN); |
| 4890 | |
| 4891 | // If there are no return blocks we know it does not return, however if there |
| 4892 | // return blocks we don't know it returns as it may be counting unreachable code. |
| 4893 | // However we will still make the CALLEE_DOES_NOT_RETURN observation. |
| 4894 | |
| 4895 | compInlineResult->NoteBool(InlineObservation::CALLEE_DOES_NOT_RETURN, retBlocks == 0); |
| 4896 | |
| 4897 | if (retBlocks == 0 && isInlining) |
| 4898 | { |
| 4899 | // Mark the call node as "no return" as it can impact caller's code quality. |
| 4900 | impInlineInfo->iciCall->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; |
| 4901 | } |
| 4902 | |
| 4903 | // If the inline is viable and discretionary, do the |
| 4904 | // profitability screening. |
| 4905 | if (compInlineResult->IsDiscretionaryCandidate()) |
| 4906 | { |
| 4907 | // Make some callsite specific observations that will feed |
| 4908 | // into the profitability model. |
| 4909 | impMakeDiscretionaryInlineObservations(impInlineInfo, compInlineResult); |
| 4910 | |
| 4911 | // None of those observations should have changed the |
| 4912 | // inline's viability. |
| 4913 | assert(compInlineResult->IsCandidate()); |
| 4914 | |
| 4915 | if (isInlining) |
| 4916 | { |
| 4917 | // Assess profitability... |
| 4918 | CORINFO_METHOD_INFO* methodInfo = &impInlineInfo->inlineCandidateInfo->methInfo; |
| 4919 | compInlineResult->DetermineProfitability(methodInfo); |
| 4920 | |
| 4921 | if (compInlineResult->IsFailure()) |
| 4922 | { |
| 4923 | impInlineRoot()->m_inlineStrategy->NoteUnprofitable(); |
| 4924 | JITDUMP("\n\nInline expansion aborted, inline not profitable\n" ); |
| 4925 | return; |
| 4926 | } |
| 4927 | else |
| 4928 | { |
| 4929 | // The inline is still viable. |
| 4930 | assert(compInlineResult->IsCandidate()); |
| 4931 | } |
| 4932 | } |
| 4933 | else |
| 4934 | { |
| 4935 | // Prejit root case. Profitability assessment for this |
| 4936 | // is done over in compCompileHelper. |
| 4937 | } |
| 4938 | } |
| 4939 | } |
| 4940 | |
| 4941 | // None of the local vars in the inlinee should have address taken or been written to. |
| 4942 | // Therefore we should NOT need to enter this "if" statement. |
| 4943 | if (!isInlining && !info.compIsStatic) |
| 4944 | { |
| 4945 | fgAdjustForAddressExposedOrWrittenThis(); |
| 4946 | } |
| 4947 | |
| 4948 | // Now that we've seen the IL, set lvSingleDef for root method |
| 4949 | // locals. |
| 4950 | // |
| 4951 | // We could also do this for root method arguments but single-def |
| 4952 | // arguments are set by the caller and so we don't know anything |
| 4953 | // about the possible values or types. |
| 4954 | // |
| 4955 | // For inlinees we do this over in impInlineFetchLocal and |
| 4956 | // impInlineFetchArg (here args are included as we somtimes get |
| 4957 | // new information about the types of inlinee args). |
| 4958 | if (!isInlining) |
| 4959 | { |
| 4960 | const unsigned firstLcl = info.compArgsCount; |
| 4961 | const unsigned lastLcl = firstLcl + info.compMethodInfo->locals.numArgs; |
| 4962 | for (unsigned lclNum = firstLcl; lclNum < lastLcl; lclNum++) |
| 4963 | { |
| 4964 | LclVarDsc* lclDsc = lvaGetDesc(lclNum); |
| 4965 | assert(lclDsc->lvSingleDef == 0); |
| 4966 | // could restrict this to TYP_REF |
| 4967 | lclDsc->lvSingleDef = !lclDsc->lvHasMultipleILStoreOp && !lclDsc->lvHasLdAddrOp; |
| 4968 | |
| 4969 | if (lclDsc->lvSingleDef) |
| 4970 | { |
| 4971 | JITDUMP("Marked V%02u as a single def local\n" , lclNum); |
| 4972 | } |
| 4973 | } |
| 4974 | } |
| 4975 | } |
| 4976 | |
| 4977 | #ifdef _PREFAST_ |
| 4978 | #pragma warning(pop) |
| 4979 | #endif |
| 4980 | |
| 4981 | //------------------------------------------------------------------------ |
| 4982 | // fgAdjustForAddressExposedOrWrittenThis: update var table for cases |
| 4983 | // where the this pointer value can change. |
| 4984 | // |
| 4985 | // Notes: |
| 4986 | // Modifies lvaArg0Var to refer to a temp if the value of 'this' can |
| 4987 | // change. The original this (info.compThisArg) then remains |
| 4988 | // unmodified in the method. fgAddInternal is reponsible for |
| 4989 | // adding the code to copy the initial this into the temp. |
| 4990 | |
| 4991 | void Compiler::fgAdjustForAddressExposedOrWrittenThis() |
| 4992 | { |
| 4993 | // Optionally enable adjustment during stress. |
| 4994 | if (!tiVerificationNeeded && compStressCompile(STRESS_GENERIC_VARN, 15)) |
| 4995 | { |
| 4996 | lvaTable[info.compThisArg].lvHasILStoreOp = true; |
| 4997 | } |
| 4998 | |
| 4999 | // If this is exposed or written to, create a temp for the modifiable this |
| 5000 | if (lvaTable[info.compThisArg].lvAddrExposed || lvaTable[info.compThisArg].lvHasILStoreOp) |
| 5001 | { |
| 5002 | // If there is a "ldarga 0" or "starg 0", grab and use the temp. |
| 5003 | lvaArg0Var = lvaGrabTemp(false DEBUGARG("Address-exposed, or written this pointer" )); |
| 5004 | noway_assert(lvaArg0Var > (unsigned)info.compThisArg); |
| 5005 | lvaTable[lvaArg0Var].lvType = lvaTable[info.compThisArg].TypeGet(); |
| 5006 | lvaTable[lvaArg0Var].lvAddrExposed = lvaTable[info.compThisArg].lvAddrExposed; |
| 5007 | lvaTable[lvaArg0Var].lvDoNotEnregister = lvaTable[info.compThisArg].lvDoNotEnregister; |
| 5008 | #ifdef DEBUG |
| 5009 | lvaTable[lvaArg0Var].lvVMNeedsStackAddr = lvaTable[info.compThisArg].lvVMNeedsStackAddr; |
| 5010 | lvaTable[lvaArg0Var].lvLiveInOutOfHndlr = lvaTable[info.compThisArg].lvLiveInOutOfHndlr; |
| 5011 | lvaTable[lvaArg0Var].lvLclFieldExpr = lvaTable[info.compThisArg].lvLclFieldExpr; |
| 5012 | lvaTable[lvaArg0Var].lvLiveAcrossUCall = lvaTable[info.compThisArg].lvLiveAcrossUCall; |
| 5013 | #endif |
| 5014 | lvaTable[lvaArg0Var].lvHasILStoreOp = lvaTable[info.compThisArg].lvHasILStoreOp; |
| 5015 | lvaTable[lvaArg0Var].lvVerTypeInfo = lvaTable[info.compThisArg].lvVerTypeInfo; |
| 5016 | |
| 5017 | // Clear the TI_FLAG_THIS_PTR in the original 'this' pointer. |
| 5018 | noway_assert(lvaTable[lvaArg0Var].lvVerTypeInfo.IsThisPtr()); |
| 5019 | lvaTable[info.compThisArg].lvVerTypeInfo.ClearThisPtr(); |
| 5020 | lvaTable[info.compThisArg].lvAddrExposed = false; |
| 5021 | lvaTable[info.compThisArg].lvHasILStoreOp = false; |
| 5022 | } |
| 5023 | } |
| 5024 | |
| 5025 | //------------------------------------------------------------------------ |
| 5026 | // fgObserveInlineConstants: look for operations that might get optimized |
| 5027 | // if this method were to be inlined, and report these to the inliner. |
| 5028 | // |
| 5029 | // Arguments: |
| 5030 | // opcode -- MSIL opcode under consideration |
| 5031 | // stack -- abstract stack model at this point in the IL |
| 5032 | // isInlining -- true if we're inlining (vs compiling a prejit root) |
| 5033 | // |
| 5034 | // Notes: |
| 5035 | // Currently only invoked on compare and branch opcodes. |
| 5036 | // |
| 5037 | // If we're inlining we also look at the argument values supplied by |
| 5038 | // the caller at this call site. |
| 5039 | // |
| 5040 | // The crude stack model may overestimate stack depth. |
| 5041 | |
| 5042 | void Compiler::fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining) |
| 5043 | { |
| 5044 | // We should be able to record inline observations. |
| 5045 | assert(compInlineResult != nullptr); |
| 5046 | |
| 5047 | // The stack only has to be 1 deep for BRTRUE/FALSE |
| 5048 | bool lookForBranchCases = stack.IsStackAtLeastOneDeep(); |
| 5049 | |
| 5050 | if (lookForBranchCases) |
| 5051 | { |
| 5052 | if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S || opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) |
| 5053 | { |
| 5054 | unsigned slot0 = stack.GetSlot0(); |
| 5055 | if (FgStack::IsArgument(slot0)) |
| 5056 | { |
| 5057 | compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); |
| 5058 | |
| 5059 | if (isInlining) |
| 5060 | { |
| 5061 | // Check for the double whammy of an incoming constant argument |
| 5062 | // feeding a constant test. |
| 5063 | unsigned varNum = FgStack::SlotTypeToArgNum(slot0); |
| 5064 | if (impInlineInfo->inlArgInfo[varNum].argNode->OperIsConst()) |
| 5065 | { |
| 5066 | compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); |
| 5067 | } |
| 5068 | } |
| 5069 | } |
| 5070 | |
| 5071 | return; |
| 5072 | } |
| 5073 | } |
| 5074 | |
| 5075 | // Remaining cases require at least two things on the stack. |
| 5076 | if (!stack.IsStackTwoDeep()) |
| 5077 | { |
| 5078 | return; |
| 5079 | } |
| 5080 | |
| 5081 | unsigned slot0 = stack.GetSlot0(); |
| 5082 | unsigned slot1 = stack.GetSlot1(); |
| 5083 | |
| 5084 | // Arg feeds constant test |
| 5085 | if ((FgStack::IsConstant(slot0) && FgStack::IsArgument(slot1)) || |
| 5086 | (FgStack::IsConstant(slot1) && FgStack::IsArgument(slot0))) |
| 5087 | { |
| 5088 | compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); |
| 5089 | } |
| 5090 | |
| 5091 | // Arg feeds range check |
| 5092 | if ((FgStack::IsArrayLen(slot0) && FgStack::IsArgument(slot1)) || |
| 5093 | (FgStack::IsArrayLen(slot1) && FgStack::IsArgument(slot0))) |
| 5094 | { |
| 5095 | compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); |
| 5096 | } |
| 5097 | |
| 5098 | // Check for an incoming arg that's a constant |
| 5099 | if (isInlining) |
| 5100 | { |
| 5101 | if (FgStack::IsArgument(slot0)) |
| 5102 | { |
| 5103 | compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); |
| 5104 | |
| 5105 | unsigned varNum = FgStack::SlotTypeToArgNum(slot0); |
| 5106 | if (impInlineInfo->inlArgInfo[varNum].argNode->OperIsConst()) |
| 5107 | { |
| 5108 | compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); |
| 5109 | } |
| 5110 | } |
| 5111 | |
| 5112 | if (FgStack::IsArgument(slot1)) |
| 5113 | { |
| 5114 | compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); |
| 5115 | |
| 5116 | unsigned varNum = FgStack::SlotTypeToArgNum(slot1); |
| 5117 | if (impInlineInfo->inlArgInfo[varNum].argNode->OperIsConst()) |
| 5118 | { |
| 5119 | compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); |
| 5120 | } |
| 5121 | } |
| 5122 | } |
| 5123 | } |
| 5124 | |
| 5125 | /***************************************************************************** |
| 5126 | * |
| 5127 | * Finally link up the bbJumpDest of the blocks together |
| 5128 | */ |
| 5129 | |
| 5130 | void Compiler::fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock) |
| 5131 | { |
| 5132 | noway_assert(startBlock->bbNum <= endBlock->bbNum); |
| 5133 | |
| 5134 | for (BasicBlock* block = startBlock; block != endBlock->bbNext; block = block->bbNext) |
| 5135 | { |
| 5136 | if ((block->bbFlags & BBF_BACKWARD_JUMP) == 0) |
| 5137 | { |
| 5138 | block->bbFlags |= BBF_BACKWARD_JUMP; |
| 5139 | } |
| 5140 | } |
| 5141 | } |
| 5142 | |
| 5143 | /***************************************************************************** |
| 5144 | * |
| 5145 | * Finally link up the bbJumpDest of the blocks together |
| 5146 | */ |
| 5147 | |
| 5148 | void Compiler::fgLinkBasicBlocks() |
| 5149 | { |
| 5150 | /* Create the basic block lookup tables */ |
| 5151 | |
| 5152 | fgInitBBLookup(); |
| 5153 | |
| 5154 | /* First block is always reachable */ |
| 5155 | |
| 5156 | fgFirstBB->bbRefs = 1; |
| 5157 | |
| 5158 | /* Walk all the basic blocks, filling in the target addresses */ |
| 5159 | |
| 5160 | for (BasicBlock* curBBdesc = fgFirstBB; curBBdesc; curBBdesc = curBBdesc->bbNext) |
| 5161 | { |
| 5162 | switch (curBBdesc->bbJumpKind) |
| 5163 | { |
| 5164 | case BBJ_COND: |
| 5165 | case BBJ_ALWAYS: |
| 5166 | case BBJ_LEAVE: |
| 5167 | curBBdesc->bbJumpDest = fgLookupBB(curBBdesc->bbJumpOffs); |
| 5168 | curBBdesc->bbJumpDest->bbRefs++; |
| 5169 | if (curBBdesc->bbJumpDest->bbNum <= curBBdesc->bbNum) |
| 5170 | { |
| 5171 | fgMarkBackwardJump(curBBdesc->bbJumpDest, curBBdesc); |
| 5172 | } |
| 5173 | |
| 5174 | /* Is the next block reachable? */ |
| 5175 | |
| 5176 | if (curBBdesc->bbJumpKind == BBJ_ALWAYS || curBBdesc->bbJumpKind == BBJ_LEAVE) |
| 5177 | { |
| 5178 | break; |
| 5179 | } |
| 5180 | |
| 5181 | if (!curBBdesc->bbNext) |
| 5182 | { |
| 5183 | BADCODE("Fall thru the end of a method" ); |
| 5184 | } |
| 5185 | |
| 5186 | // Fall through, the next block is also reachable |
| 5187 | |
| 5188 | case BBJ_NONE: |
| 5189 | curBBdesc->bbNext->bbRefs++; |
| 5190 | break; |
| 5191 | |
| 5192 | case BBJ_EHFINALLYRET: |
| 5193 | case BBJ_EHFILTERRET: |
| 5194 | case BBJ_THROW: |
| 5195 | case BBJ_RETURN: |
| 5196 | break; |
| 5197 | |
| 5198 | case BBJ_SWITCH: |
| 5199 | |
| 5200 | unsigned jumpCnt; |
| 5201 | jumpCnt = curBBdesc->bbJumpSwt->bbsCount; |
| 5202 | BasicBlock** jumpPtr; |
| 5203 | jumpPtr = curBBdesc->bbJumpSwt->bbsDstTab; |
| 5204 | |
| 5205 | do |
| 5206 | { |
| 5207 | *jumpPtr = fgLookupBB((unsigned)*(size_t*)jumpPtr); |
| 5208 | (*jumpPtr)->bbRefs++; |
| 5209 | if ((*jumpPtr)->bbNum <= curBBdesc->bbNum) |
| 5210 | { |
| 5211 | fgMarkBackwardJump(*jumpPtr, curBBdesc); |
| 5212 | } |
| 5213 | } while (++jumpPtr, --jumpCnt); |
| 5214 | |
| 5215 | /* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */ |
| 5216 | |
| 5217 | noway_assert(*(jumpPtr - 1) == curBBdesc->bbNext); |
| 5218 | break; |
| 5219 | |
| 5220 | case BBJ_CALLFINALLY: // BBJ_CALLFINALLY and BBJ_EHCATCHRET don't appear until later |
| 5221 | case BBJ_EHCATCHRET: |
| 5222 | default: |
| 5223 | noway_assert(!"Unexpected bbJumpKind" ); |
| 5224 | break; |
| 5225 | } |
| 5226 | } |
| 5227 | } |
| 5228 | |
| 5229 | //------------------------------------------------------------------------ |
| 5230 | // fgMakeBasicBlocks: walk the IL creating basic blocks, and look for |
| 5231 | // operations that might get optimized if this method were to be inlined. |
| 5232 | // |
| 5233 | // Arguments: |
| 5234 | // codeAddr -- starting address of the method's IL stream |
| 5235 | // codeSize -- length of the IL stream |
| 5236 | // jumpTarget -- [in] bit vector of jump targets found by fgFindJumpTargets |
| 5237 | // |
| 5238 | // Returns: |
| 5239 | // number of return blocks (BBJ_RETURN) in the method (may be zero) |
| 5240 | // |
| 5241 | // Notes: |
| 5242 | // Invoked for prejited and jitted methods, and for all inlinees |
| 5243 | |
| 5244 | unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget) |
| 5245 | { |
| 5246 | unsigned retBlocks = 0; |
| 5247 | const BYTE* codeBegp = codeAddr; |
| 5248 | const BYTE* codeEndp = codeAddr + codeSize; |
| 5249 | bool tailCall = false; |
| 5250 | unsigned curBBoffs = 0; |
| 5251 | BasicBlock* curBBdesc; |
| 5252 | |
| 5253 | // Keep track of where we are in the scope lists, as we will also |
| 5254 | // create blocks at scope boundaries. |
| 5255 | if (opts.compDbgCode && (info.compVarScopesCount > 0)) |
| 5256 | { |
| 5257 | compResetScopeLists(); |
| 5258 | |
| 5259 | // Ignore scopes beginning at offset 0 |
| 5260 | while (compGetNextEnterScope(0)) |
| 5261 | { /* do nothing */ |
| 5262 | } |
| 5263 | while (compGetNextExitScope(0)) |
| 5264 | { /* do nothing */ |
| 5265 | } |
| 5266 | } |
| 5267 | |
| 5268 | do |
| 5269 | { |
| 5270 | unsigned jmpAddr = DUMMY_INIT(BAD_IL_OFFSET); |
| 5271 | unsigned bbFlags = 0; |
| 5272 | BBswtDesc* swtDsc = nullptr; |
| 5273 | unsigned nxtBBoffs; |
| 5274 | OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); |
| 5275 | codeAddr += sizeof(__int8); |
| 5276 | BBjumpKinds jmpKind = BBJ_NONE; |
| 5277 | |
| 5278 | DECODE_OPCODE: |
| 5279 | |
| 5280 | /* Get the size of additional parameters */ |
| 5281 | |
| 5282 | noway_assert((unsigned)opcode < CEE_COUNT); |
| 5283 | |
| 5284 | unsigned sz = opcodeSizes[opcode]; |
| 5285 | |
| 5286 | switch (opcode) |
| 5287 | { |
| 5288 | signed jmpDist; |
| 5289 | |
| 5290 | case CEE_PREFIX1: |
| 5291 | if (jumpTarget->bitVectTest((UINT)(codeAddr - codeBegp))) |
| 5292 | { |
| 5293 | BADCODE3("jump target between prefix 0xFE and opcode" , " at offset %04X" , |
| 5294 | (IL_OFFSET)(codeAddr - codeBegp)); |
| 5295 | } |
| 5296 | |
| 5297 | opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr)); |
| 5298 | codeAddr += sizeof(__int8); |
| 5299 | goto DECODE_OPCODE; |
| 5300 | |
| 5301 | /* Check to see if we have a jump/return opcode */ |
| 5302 | |
| 5303 | case CEE_BRFALSE: |
| 5304 | case CEE_BRFALSE_S: |
| 5305 | case CEE_BRTRUE: |
| 5306 | case CEE_BRTRUE_S: |
| 5307 | |
| 5308 | case CEE_BEQ: |
| 5309 | case CEE_BEQ_S: |
| 5310 | case CEE_BGE: |
| 5311 | case CEE_BGE_S: |
| 5312 | case CEE_BGE_UN: |
| 5313 | case CEE_BGE_UN_S: |
| 5314 | case CEE_BGT: |
| 5315 | case CEE_BGT_S: |
| 5316 | case CEE_BGT_UN: |
| 5317 | case CEE_BGT_UN_S: |
| 5318 | case CEE_BLE: |
| 5319 | case CEE_BLE_S: |
| 5320 | case CEE_BLE_UN: |
| 5321 | case CEE_BLE_UN_S: |
| 5322 | case CEE_BLT: |
| 5323 | case CEE_BLT_S: |
| 5324 | case CEE_BLT_UN: |
| 5325 | case CEE_BLT_UN_S: |
| 5326 | case CEE_BNE_UN: |
| 5327 | case CEE_BNE_UN_S: |
| 5328 | |
| 5329 | jmpKind = BBJ_COND; |
| 5330 | goto JMP; |
| 5331 | |
| 5332 | case CEE_LEAVE: |
| 5333 | case CEE_LEAVE_S: |
| 5334 | |
| 5335 | // We need to check if we are jumping out of a finally-protected try. |
| 5336 | jmpKind = BBJ_LEAVE; |
| 5337 | goto JMP; |
| 5338 | |
| 5339 | case CEE_BR: |
| 5340 | case CEE_BR_S: |
| 5341 | jmpKind = BBJ_ALWAYS; |
| 5342 | goto JMP; |
| 5343 | |
| 5344 | JMP: |
| 5345 | |
| 5346 | /* Compute the target address of the jump */ |
| 5347 | |
| 5348 | jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); |
| 5349 | |
| 5350 | if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_BR || opcode == CEE_BR_S)) |
| 5351 | { |
| 5352 | continue; /* NOP */ |
| 5353 | } |
| 5354 | |
| 5355 | jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist; |
| 5356 | break; |
| 5357 | |
| 5358 | case CEE_SWITCH: |
| 5359 | { |
| 5360 | unsigned jmpBase; |
| 5361 | unsigned jmpCnt; // # of switch cases (excluding default) |
| 5362 | |
| 5363 | BasicBlock** jmpTab; |
| 5364 | BasicBlock** jmpPtr; |
| 5365 | |
| 5366 | /* Allocate the switch descriptor */ |
| 5367 | |
| 5368 | swtDsc = new (this, CMK_BasicBlock) BBswtDesc; |
| 5369 | |
| 5370 | /* Read the number of entries in the table */ |
| 5371 | |
| 5372 | jmpCnt = getU4LittleEndian(codeAddr); |
| 5373 | codeAddr += 4; |
| 5374 | |
| 5375 | /* Compute the base offset for the opcode */ |
| 5376 | |
| 5377 | jmpBase = (IL_OFFSET)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD)); |
| 5378 | |
| 5379 | /* Allocate the jump table */ |
| 5380 | |
| 5381 | jmpPtr = jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jmpCnt + 1]; |
| 5382 | |
| 5383 | /* Fill in the jump table */ |
| 5384 | |
| 5385 | for (unsigned count = jmpCnt; count; count--) |
| 5386 | { |
| 5387 | jmpDist = getI4LittleEndian(codeAddr); |
| 5388 | codeAddr += 4; |
| 5389 | |
| 5390 | // store the offset in the pointer. We change these in fgLinkBasicBlocks(). |
| 5391 | *jmpPtr++ = (BasicBlock*)(size_t)(jmpBase + jmpDist); |
| 5392 | } |
| 5393 | |
| 5394 | /* Append the default label to the target table */ |
| 5395 | |
| 5396 | *jmpPtr++ = (BasicBlock*)(size_t)jmpBase; |
| 5397 | |
| 5398 | /* Make sure we found the right number of labels */ |
| 5399 | |
| 5400 | noway_assert(jmpPtr == jmpTab + jmpCnt + 1); |
| 5401 | |
| 5402 | /* Compute the size of the switch opcode operands */ |
| 5403 | |
| 5404 | sz = sizeof(DWORD) + jmpCnt * sizeof(DWORD); |
| 5405 | |
| 5406 | /* Fill in the remaining fields of the switch descriptor */ |
| 5407 | |
| 5408 | swtDsc->bbsCount = jmpCnt + 1; |
| 5409 | swtDsc->bbsDstTab = jmpTab; |
| 5410 | |
| 5411 | /* This is definitely a jump */ |
| 5412 | |
| 5413 | jmpKind = BBJ_SWITCH; |
| 5414 | fgHasSwitch = true; |
| 5415 | |
| 5416 | if (opts.compProcedureSplitting) |
| 5417 | { |
| 5418 | // TODO-CQ: We might need to create a switch table; we won't know for sure until much later. |
| 5419 | // However, switch tables don't work with hot/cold splitting, currently. The switch table data needs |
| 5420 | // a relocation such that if the base (the first block after the prolog) and target of the switch |
| 5421 | // branch are put in different sections, the difference stored in the table is updated. However, our |
| 5422 | // relocation implementation doesn't support three different pointers (relocation address, base, and |
| 5423 | // target). So, we need to change our switch table implementation to be more like |
| 5424 | // JIT64: put the table in the code section, in the same hot/cold section as the switch jump itself |
| 5425 | // (maybe immediately after the switch jump), and make the "base" address be also in that section, |
| 5426 | // probably the address after the switch jump. |
| 5427 | opts.compProcedureSplitting = false; |
| 5428 | JITDUMP("Turning off procedure splitting for this method, as it might need switch tables; " |
| 5429 | "implementation limitation.\n" ); |
| 5430 | } |
| 5431 | } |
| 5432 | goto GOT_ENDP; |
| 5433 | |
| 5434 | case CEE_ENDFILTER: |
| 5435 | bbFlags |= BBF_DONT_REMOVE; |
| 5436 | jmpKind = BBJ_EHFILTERRET; |
| 5437 | break; |
| 5438 | |
| 5439 | case CEE_ENDFINALLY: |
| 5440 | jmpKind = BBJ_EHFINALLYRET; |
| 5441 | break; |
| 5442 | |
| 5443 | case CEE_TAILCALL: |
| 5444 | if (compIsForInlining()) |
| 5445 | { |
| 5446 | // TODO-CQ: We can inline some callees with explicit tail calls if we can guarantee that the calls |
| 5447 | // can be dispatched as tail calls from the caller. |
| 5448 | compInlineResult->NoteFatal(InlineObservation::CALLEE_EXPLICIT_TAIL_PREFIX); |
| 5449 | retBlocks++; |
| 5450 | return retBlocks; |
| 5451 | } |
| 5452 | |
| 5453 | __fallthrough; |
| 5454 | |
| 5455 | case CEE_READONLY: |
| 5456 | case CEE_CONSTRAINED: |
| 5457 | case CEE_VOLATILE: |
| 5458 | case CEE_UNALIGNED: |
| 5459 | // fgFindJumpTargets should have ruled out this possibility |
| 5460 | // (i.e. a prefix opcodes as last intruction in a block) |
| 5461 | noway_assert(codeAddr < codeEndp); |
| 5462 | |
| 5463 | if (jumpTarget->bitVectTest((UINT)(codeAddr - codeBegp))) |
| 5464 | { |
| 5465 | BADCODE3("jump target between prefix and an opcode" , " at offset %04X" , |
| 5466 | (IL_OFFSET)(codeAddr - codeBegp)); |
| 5467 | } |
| 5468 | break; |
| 5469 | |
| 5470 | case CEE_CALL: |
| 5471 | case CEE_CALLVIRT: |
| 5472 | case CEE_CALLI: |
| 5473 | { |
| 5474 | if (compIsForInlining() || // Ignore tail call in the inlinee. Period. |
| 5475 | (!tailCall && !compTailCallStress()) // A new BB with BBJ_RETURN would have been created |
| 5476 | |
| 5477 | // after a tailcall statement. |
| 5478 | // We need to keep this invariant if we want to stress the tailcall. |
| 5479 | // That way, the potential (tail)call statement is always the last |
| 5480 | // statement in the block. |
| 5481 | // Otherwise, we will assert at the following line in fgMorphCall() |
| 5482 | // noway_assert(fgMorphStmt->gtNext == NULL); |
| 5483 | ) |
| 5484 | { |
| 5485 | // Neither .tailcall prefix, no tailcall stress. So move on. |
| 5486 | break; |
| 5487 | } |
| 5488 | |
| 5489 | // Make sure the code sequence is legal for the tail call. |
| 5490 | // If so, mark this BB as having a BBJ_RETURN. |
| 5491 | |
| 5492 | if (codeAddr >= codeEndp - sz) |
| 5493 | { |
| 5494 | BADCODE3("No code found after the call instruction" , " at offset %04X" , |
| 5495 | (IL_OFFSET)(codeAddr - codeBegp)); |
| 5496 | } |
| 5497 | |
| 5498 | if (tailCall) |
| 5499 | { |
| 5500 | bool isCallPopAndRet = false; |
| 5501 | |
| 5502 | // impIsTailCallILPattern uses isRecursive flag to determine whether ret in a fallthrough block is |
| 5503 | // allowed. We don't know at this point whether the call is recursive so we conservatively pass |
| 5504 | // false. This will only affect explicit tail calls when IL verification is not needed for the |
| 5505 | // method. |
| 5506 | bool isRecursive = false; |
| 5507 | if (!impIsTailCallILPattern(tailCall, opcode, codeAddr + sz, codeEndp, isRecursive, |
| 5508 | &isCallPopAndRet)) |
| 5509 | { |
| 5510 | #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_) |
| 5511 | BADCODE3("tail call not followed by ret or pop+ret" , " at offset %04X" , |
| 5512 | (IL_OFFSET)(codeAddr - codeBegp)); |
| 5513 | #else |
| 5514 | BADCODE3("tail call not followed by ret" , " at offset %04X" , (IL_OFFSET)(codeAddr - codeBegp)); |
| 5515 | #endif // !FEATURE_CORECLR && _TARGET_AMD64_ |
| 5516 | } |
| 5517 | |
| 5518 | #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_) |
| 5519 | if (isCallPopAndRet) |
| 5520 | { |
| 5521 | // By breaking here, we let pop and ret opcodes to be |
| 5522 | // imported after tail call. If tail prefix is honored, |
| 5523 | // stmts corresponding to pop and ret will be removed |
| 5524 | // in fgMorphCall(). |
| 5525 | break; |
| 5526 | } |
| 5527 | #endif // !FEATURE_CORECLR && _TARGET_AMD64_ |
| 5528 | } |
| 5529 | else |
| 5530 | { |
| 5531 | OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr + sz); |
| 5532 | |
| 5533 | if (nextOpcode != CEE_RET) |
| 5534 | { |
| 5535 | noway_assert(compTailCallStress()); |
| 5536 | // Next OPCODE is not a CEE_RET, bail the attempt to stress the tailcall. |
| 5537 | // (I.e. We will not make a new BB after the "call" statement.) |
| 5538 | break; |
| 5539 | } |
| 5540 | } |
| 5541 | } |
| 5542 | |
| 5543 | /* For tail call, we just call CORINFO_HELP_TAILCALL, and it jumps to the |
| 5544 | target. So we don't need an epilog - just like CORINFO_HELP_THROW. |
| 5545 | Make the block BBJ_RETURN, but we will change it to BBJ_THROW |
| 5546 | if the tailness of the call is satisfied. |
| 5547 | NOTE : The next instruction is guaranteed to be a CEE_RET |
| 5548 | and it will create another BasicBlock. But there may be an |
| 5549 | jump directly to that CEE_RET. If we want to avoid creating |
| 5550 | an unnecessary block, we need to check if the CEE_RETURN is |
| 5551 | the target of a jump. |
| 5552 | */ |
| 5553 | |
| 5554 | // fall-through |
| 5555 | |
| 5556 | case CEE_JMP: |
| 5557 | /* These are equivalent to a return from the current method |
| 5558 | But instead of directly returning to the caller we jump and |
| 5559 | execute something else in between */ |
| 5560 | case CEE_RET: |
| 5561 | retBlocks++; |
| 5562 | jmpKind = BBJ_RETURN; |
| 5563 | break; |
| 5564 | |
| 5565 | case CEE_THROW: |
| 5566 | case CEE_RETHROW: |
| 5567 | jmpKind = BBJ_THROW; |
| 5568 | break; |
| 5569 | |
| 5570 | #ifdef DEBUG |
| 5571 | // make certain we did not forget any flow of control instructions |
| 5572 | // by checking the 'ctrl' field in opcode.def. First filter out all |
| 5573 | // non-ctrl instructions |
| 5574 | #define BREAK(name) \ |
| 5575 | case name: \ |
| 5576 | break; |
| 5577 | #define NEXT(name) \ |
| 5578 | case name: \ |
| 5579 | break; |
| 5580 | #define CALL(name) |
| 5581 | #define THROW(name) |
| 5582 | #undef RETURN // undef contract RETURN macro |
| 5583 | #define RETURN(name) |
| 5584 | #define META(name) |
| 5585 | #define BRANCH(name) |
| 5586 | #define COND_BRANCH(name) |
| 5587 | #define PHI(name) |
| 5588 | |
| 5589 | #define OPDEF(name, string, pop, push, oprType, opcType, l, s1, s2, ctrl) ctrl(name) |
| 5590 | #include "opcode.def" |
| 5591 | #undef OPDEF |
| 5592 | |
| 5593 | #undef PHI |
| 5594 | #undef BREAK |
| 5595 | #undef CALL |
| 5596 | #undef NEXT |
| 5597 | #undef THROW |
| 5598 | #undef RETURN |
| 5599 | #undef META |
| 5600 | #undef BRANCH |
| 5601 | #undef COND_BRANCH |
| 5602 | |
| 5603 | // These ctrl-flow opcodes don't need any special handling |
| 5604 | case CEE_NEWOBJ: // CTRL_CALL |
| 5605 | break; |
| 5606 | |
| 5607 | // what's left are forgotten instructions |
| 5608 | default: |
| 5609 | BADCODE("Unrecognized control Opcode" ); |
| 5610 | break; |
| 5611 | #else // !DEBUG |
| 5612 | default: |
| 5613 | break; |
| 5614 | #endif // !DEBUG |
| 5615 | } |
| 5616 | |
| 5617 | /* Jump over the operand */ |
| 5618 | |
| 5619 | codeAddr += sz; |
| 5620 | |
| 5621 | GOT_ENDP: |
| 5622 | |
| 5623 | tailCall = (opcode == CEE_TAILCALL); |
| 5624 | |
| 5625 | /* Make sure a jump target isn't in the middle of our opcode */ |
| 5626 | |
| 5627 | if (sz) |
| 5628 | { |
| 5629 | IL_OFFSET offs = (IL_OFFSET)(codeAddr - codeBegp) - sz; // offset of the operand |
| 5630 | |
| 5631 | for (unsigned i = 0; i < sz; i++, offs++) |
| 5632 | { |
| 5633 | if (jumpTarget->bitVectTest(offs)) |
| 5634 | { |
| 5635 | BADCODE3("jump into the middle of an opcode" , " at offset %04X" , (IL_OFFSET)(codeAddr - codeBegp)); |
| 5636 | } |
| 5637 | } |
| 5638 | } |
| 5639 | |
| 5640 | /* Compute the offset of the next opcode */ |
| 5641 | |
| 5642 | nxtBBoffs = (IL_OFFSET)(codeAddr - codeBegp); |
| 5643 | |
| 5644 | bool foundScope = false; |
| 5645 | |
| 5646 | if (opts.compDbgCode && (info.compVarScopesCount > 0)) |
| 5647 | { |
| 5648 | while (compGetNextEnterScope(nxtBBoffs)) |
| 5649 | { |
| 5650 | foundScope = true; |
| 5651 | } |
| 5652 | while (compGetNextExitScope(nxtBBoffs)) |
| 5653 | { |
| 5654 | foundScope = true; |
| 5655 | } |
| 5656 | } |
| 5657 | |
| 5658 | /* Do we have a jump? */ |
| 5659 | |
| 5660 | if (jmpKind == BBJ_NONE) |
| 5661 | { |
| 5662 | /* No jump; make sure we don't fall off the end of the function */ |
| 5663 | |
| 5664 | if (codeAddr == codeEndp) |
| 5665 | { |
| 5666 | BADCODE3("missing return opcode" , " at offset %04X" , (IL_OFFSET)(codeAddr - codeBegp)); |
| 5667 | } |
| 5668 | |
| 5669 | /* If a label follows this opcode, we'll have to make a new BB */ |
| 5670 | |
| 5671 | bool makeBlock = jumpTarget->bitVectTest(nxtBBoffs); |
| 5672 | |
| 5673 | if (!makeBlock && foundScope) |
| 5674 | { |
| 5675 | makeBlock = true; |
| 5676 | #ifdef DEBUG |
| 5677 | if (verbose) |
| 5678 | { |
| 5679 | printf("Splitting at BBoffs = %04u\n" , nxtBBoffs); |
| 5680 | } |
| 5681 | #endif // DEBUG |
| 5682 | } |
| 5683 | |
| 5684 | if (!makeBlock) |
| 5685 | { |
| 5686 | continue; |
| 5687 | } |
| 5688 | } |
| 5689 | |
| 5690 | /* We need to create a new basic block */ |
| 5691 | |
| 5692 | curBBdesc = fgNewBasicBlock(jmpKind); |
| 5693 | |
| 5694 | curBBdesc->bbFlags |= bbFlags; |
| 5695 | curBBdesc->bbRefs = 0; |
| 5696 | |
| 5697 | curBBdesc->bbCodeOffs = curBBoffs; |
| 5698 | curBBdesc->bbCodeOffsEnd = nxtBBoffs; |
| 5699 | |
| 5700 | unsigned profileWeight; |
| 5701 | if (fgGetProfileWeightForBasicBlock(curBBoffs, &profileWeight)) |
| 5702 | { |
| 5703 | curBBdesc->setBBProfileWeight(profileWeight); |
| 5704 | if (profileWeight == 0) |
| 5705 | { |
| 5706 | curBBdesc->bbSetRunRarely(); |
| 5707 | } |
| 5708 | else |
| 5709 | { |
| 5710 | // Note that bbNewBasicBlock (called from fgNewBasicBlock) may have |
| 5711 | // already marked the block as rarely run. In that case (and when we know |
| 5712 | // that the block profile weight is non-zero) we want to unmark that. |
| 5713 | |
| 5714 | curBBdesc->bbFlags &= ~BBF_RUN_RARELY; |
| 5715 | } |
| 5716 | } |
| 5717 | |
| 5718 | switch (jmpKind) |
| 5719 | { |
| 5720 | case BBJ_SWITCH: |
| 5721 | curBBdesc->bbJumpSwt = swtDsc; |
| 5722 | break; |
| 5723 | |
| 5724 | case BBJ_COND: |
| 5725 | case BBJ_ALWAYS: |
| 5726 | case BBJ_LEAVE: |
| 5727 | noway_assert(jmpAddr != DUMMY_INIT(BAD_IL_OFFSET)); |
| 5728 | curBBdesc->bbJumpOffs = jmpAddr; |
| 5729 | break; |
| 5730 | |
| 5731 | default: |
| 5732 | break; |
| 5733 | } |
| 5734 | |
| 5735 | DBEXEC(verbose, curBBdesc->dspBlockHeader(this, false, false, false)); |
| 5736 | |
| 5737 | /* Remember where the next BB will start */ |
| 5738 | |
| 5739 | curBBoffs = nxtBBoffs; |
| 5740 | } while (codeAddr < codeEndp); |
| 5741 | |
| 5742 | noway_assert(codeAddr == codeEndp); |
| 5743 | |
| 5744 | /* Finally link up the bbJumpDest of the blocks together */ |
| 5745 | |
| 5746 | fgLinkBasicBlocks(); |
| 5747 | |
| 5748 | return retBlocks; |
| 5749 | } |
| 5750 | |
| 5751 | /***************************************************************************** |
| 5752 | * |
| 5753 | * Main entry point to discover the basic blocks for the current function. |
| 5754 | */ |
| 5755 | |
| 5756 | void Compiler::fgFindBasicBlocks() |
| 5757 | { |
| 5758 | #ifdef DEBUG |
| 5759 | if (verbose) |
| 5760 | { |
| 5761 | printf("*************** In fgFindBasicBlocks() for %s\n" , info.compFullName); |
| 5762 | } |
| 5763 | #endif |
| 5764 | |
| 5765 | // Allocate the 'jump target' bit vector |
| 5766 | FixedBitVect* jumpTarget = FixedBitVect::bitVectInit(info.compILCodeSize + 1, this); |
| 5767 | |
| 5768 | // Walk the instrs to find all jump targets |
| 5769 | fgFindJumpTargets(info.compCode, info.compILCodeSize, jumpTarget); |
| 5770 | if (compDonotInline()) |
| 5771 | { |
| 5772 | return; |
| 5773 | } |
| 5774 | |
| 5775 | unsigned XTnum; |
| 5776 | |
| 5777 | /* Are there any exception handlers? */ |
| 5778 | |
| 5779 | if (info.compXcptnsCount > 0) |
| 5780 | { |
| 5781 | noway_assert(!compIsForInlining()); |
| 5782 | |
| 5783 | /* Check and mark all the exception handlers */ |
| 5784 | |
| 5785 | for (XTnum = 0; XTnum < info.compXcptnsCount; XTnum++) |
| 5786 | { |
| 5787 | CORINFO_EH_CLAUSE clause; |
| 5788 | info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); |
| 5789 | noway_assert(clause.HandlerLength != (unsigned)-1); |
| 5790 | |
| 5791 | if (clause.TryLength <= 0) |
| 5792 | { |
| 5793 | BADCODE("try block length <=0" ); |
| 5794 | } |
| 5795 | |
| 5796 | /* Mark the 'try' block extent and the handler itself */ |
| 5797 | |
| 5798 | if (clause.TryOffset > info.compILCodeSize) |
| 5799 | { |
| 5800 | BADCODE("try offset is > codesize" ); |
| 5801 | } |
| 5802 | jumpTarget->bitVectSet(clause.TryOffset); |
| 5803 | |
| 5804 | if (clause.TryOffset + clause.TryLength > info.compILCodeSize) |
| 5805 | { |
| 5806 | BADCODE("try end is > codesize" ); |
| 5807 | } |
| 5808 | jumpTarget->bitVectSet(clause.TryOffset + clause.TryLength); |
| 5809 | |
| 5810 | if (clause.HandlerOffset > info.compILCodeSize) |
| 5811 | { |
| 5812 | BADCODE("handler offset > codesize" ); |
| 5813 | } |
| 5814 | jumpTarget->bitVectSet(clause.HandlerOffset); |
| 5815 | |
| 5816 | if (clause.HandlerOffset + clause.HandlerLength > info.compILCodeSize) |
| 5817 | { |
| 5818 | BADCODE("handler end > codesize" ); |
| 5819 | } |
| 5820 | jumpTarget->bitVectSet(clause.HandlerOffset + clause.HandlerLength); |
| 5821 | |
| 5822 | if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) |
| 5823 | { |
| 5824 | if (clause.FilterOffset > info.compILCodeSize) |
| 5825 | { |
| 5826 | BADCODE("filter offset > codesize" ); |
| 5827 | } |
| 5828 | jumpTarget->bitVectSet(clause.FilterOffset); |
| 5829 | } |
| 5830 | } |
| 5831 | } |
| 5832 | |
| 5833 | #ifdef DEBUG |
| 5834 | if (verbose) |
| 5835 | { |
| 5836 | bool anyJumpTargets = false; |
| 5837 | printf("Jump targets:\n" ); |
| 5838 | for (unsigned i = 0; i < info.compILCodeSize + 1; i++) |
| 5839 | { |
| 5840 | if (jumpTarget->bitVectTest(i)) |
| 5841 | { |
| 5842 | anyJumpTargets = true; |
| 5843 | printf(" IL_%04x\n" , i); |
| 5844 | } |
| 5845 | } |
| 5846 | |
| 5847 | if (!anyJumpTargets) |
| 5848 | { |
| 5849 | printf(" none\n" ); |
| 5850 | } |
| 5851 | } |
| 5852 | #endif // DEBUG |
| 5853 | |
| 5854 | /* Now create the basic blocks */ |
| 5855 | |
| 5856 | unsigned retBlocks = fgMakeBasicBlocks(info.compCode, info.compILCodeSize, jumpTarget); |
| 5857 | |
| 5858 | if (compIsForInlining()) |
| 5859 | { |
| 5860 | |
| 5861 | #ifdef DEBUG |
| 5862 | // If fgFindJumpTargets marked the call as "no return" there |
| 5863 | // really should be no BBJ_RETURN blocks in the method. |
| 5864 | bool markedNoReturn = (impInlineInfo->iciCall->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0; |
| 5865 | assert((markedNoReturn && (retBlocks == 0)) || (!markedNoReturn && (retBlocks >= 1))); |
| 5866 | #endif // DEBUG |
| 5867 | |
| 5868 | if (compInlineResult->IsFailure()) |
| 5869 | { |
| 5870 | return; |
| 5871 | } |
| 5872 | |
| 5873 | noway_assert(info.compXcptnsCount == 0); |
| 5874 | compHndBBtab = impInlineInfo->InlinerCompiler->compHndBBtab; |
| 5875 | compHndBBtabAllocCount = |
| 5876 | impInlineInfo->InlinerCompiler->compHndBBtabAllocCount; // we probably only use the table, not add to it. |
| 5877 | compHndBBtabCount = impInlineInfo->InlinerCompiler->compHndBBtabCount; |
| 5878 | info.compXcptnsCount = impInlineInfo->InlinerCompiler->info.compXcptnsCount; |
| 5879 | |
| 5880 | // Use a spill temp for the return value if there are multiple return blocks, |
| 5881 | // or if the inlinee has GC ref locals. |
| 5882 | if ((info.compRetNativeType != TYP_VOID) && ((retBlocks > 1) || impInlineInfo->HasGcRefLocals())) |
| 5883 | { |
| 5884 | // If we've spilled the ret expr to a temp we can reuse the temp |
| 5885 | // as the inlinee return spill temp. |
| 5886 | // |
| 5887 | // Todo: see if it is even better to always use this existing temp |
| 5888 | // for return values, even if we otherwise wouldn't need a return spill temp... |
| 5889 | lvaInlineeReturnSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; |
| 5890 | |
| 5891 | if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) |
| 5892 | { |
| 5893 | // This temp should already have the type of the return value. |
| 5894 | JITDUMP("\nInliner: re-using pre-existing spill temp V%02u\n" , lvaInlineeReturnSpillTemp); |
| 5895 | |
| 5896 | if (info.compRetType == TYP_REF) |
| 5897 | { |
| 5898 | // We may have co-opted an existing temp for the return spill. |
| 5899 | // We likely assumed it was single-def at the time, but now |
| 5900 | // we can see it has multiple definitions. |
| 5901 | if ((retBlocks > 1) && (lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef == 1)) |
| 5902 | { |
| 5903 | // Make sure it is no longer marked single def. This is only safe |
| 5904 | // to do if we haven't ever updated the type. |
| 5905 | assert(!lvaTable[lvaInlineeReturnSpillTemp].lvClassInfoUpdated); |
| 5906 | JITDUMP("Marked return spill temp V%02u as NOT single def temp\n" , lvaInlineeReturnSpillTemp); |
| 5907 | lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef = 0; |
| 5908 | } |
| 5909 | } |
| 5910 | } |
| 5911 | else |
| 5912 | { |
| 5913 | // The lifetime of this var might expand multiple BBs. So it is a long lifetime compiler temp. |
| 5914 | lvaInlineeReturnSpillTemp = lvaGrabTemp(false DEBUGARG("Inline return value spill temp" )); |
| 5915 | lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType; |
| 5916 | |
| 5917 | // If the method returns a ref class, set the class of the spill temp |
| 5918 | // to the method's return value. We may update this later if it turns |
| 5919 | // out we can prove the method returns a more specific type. |
| 5920 | if (info.compRetType == TYP_REF) |
| 5921 | { |
| 5922 | // The return spill temp is single def only if the method has a single return block. |
| 5923 | if (retBlocks == 1) |
| 5924 | { |
| 5925 | lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef = 1; |
| 5926 | JITDUMP("Marked return spill temp V%02u as a single def temp\n" , lvaInlineeReturnSpillTemp); |
| 5927 | } |
| 5928 | |
| 5929 | CORINFO_CLASS_HANDLE retClassHnd = impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass; |
| 5930 | if (retClassHnd != nullptr) |
| 5931 | { |
| 5932 | lvaSetClass(lvaInlineeReturnSpillTemp, retClassHnd); |
| 5933 | } |
| 5934 | } |
| 5935 | } |
| 5936 | } |
| 5937 | |
| 5938 | return; |
| 5939 | } |
| 5940 | |
| 5941 | /* Mark all blocks within 'try' blocks as such */ |
| 5942 | |
| 5943 | if (info.compXcptnsCount == 0) |
| 5944 | { |
| 5945 | return; |
| 5946 | } |
| 5947 | |
| 5948 | if (info.compXcptnsCount > MAX_XCPTN_INDEX) |
| 5949 | { |
| 5950 | IMPL_LIMITATION("too many exception clauses" ); |
| 5951 | } |
| 5952 | |
| 5953 | /* Allocate the exception handler table */ |
| 5954 | |
| 5955 | fgAllocEHTable(); |
| 5956 | |
| 5957 | /* Assume we don't need to sort the EH table (such that nested try/catch |
| 5958 | * appear before their try or handler parent). The EH verifier will notice |
| 5959 | * when we do need to sort it. |
| 5960 | */ |
| 5961 | |
| 5962 | fgNeedToSortEHTable = false; |
| 5963 | |
| 5964 | verInitEHTree(info.compXcptnsCount); |
| 5965 | EHNodeDsc* initRoot = ehnNext; // remember the original root since |
| 5966 | // it may get modified during insertion |
| 5967 | |
| 5968 | // Annotate BBs with exception handling information required for generating correct eh code |
| 5969 | // as well as checking for correct IL |
| 5970 | |
| 5971 | EHblkDsc* HBtab; |
| 5972 | |
| 5973 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 5974 | { |
| 5975 | CORINFO_EH_CLAUSE clause; |
| 5976 | info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); |
| 5977 | noway_assert(clause.HandlerLength != (unsigned)-1); // @DEPRECATED |
| 5978 | |
| 5979 | #ifdef DEBUG |
| 5980 | if (verbose) |
| 5981 | { |
| 5982 | dispIncomingEHClause(XTnum, clause); |
| 5983 | } |
| 5984 | #endif // DEBUG |
| 5985 | |
| 5986 | IL_OFFSET tryBegOff = clause.TryOffset; |
| 5987 | IL_OFFSET tryEndOff = tryBegOff + clause.TryLength; |
| 5988 | IL_OFFSET filterBegOff = 0; |
| 5989 | IL_OFFSET hndBegOff = clause.HandlerOffset; |
| 5990 | IL_OFFSET hndEndOff = hndBegOff + clause.HandlerLength; |
| 5991 | |
| 5992 | if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) |
| 5993 | { |
| 5994 | filterBegOff = clause.FilterOffset; |
| 5995 | } |
| 5996 | |
| 5997 | if (tryEndOff > info.compILCodeSize) |
| 5998 | { |
| 5999 | BADCODE3("end of try block beyond end of method for try" , " at offset %04X" , tryBegOff); |
| 6000 | } |
| 6001 | if (hndEndOff > info.compILCodeSize) |
| 6002 | { |
| 6003 | BADCODE3("end of hnd block beyond end of method for try" , " at offset %04X" , tryBegOff); |
| 6004 | } |
| 6005 | |
| 6006 | HBtab->ebdTryBegOffset = tryBegOff; |
| 6007 | HBtab->ebdTryEndOffset = tryEndOff; |
| 6008 | HBtab->ebdFilterBegOffset = filterBegOff; |
| 6009 | HBtab->ebdHndBegOffset = hndBegOff; |
| 6010 | HBtab->ebdHndEndOffset = hndEndOff; |
| 6011 | |
| 6012 | /* Convert the various addresses to basic blocks */ |
| 6013 | |
| 6014 | BasicBlock* tryBegBB = fgLookupBB(tryBegOff); |
| 6015 | BasicBlock* tryEndBB = |
| 6016 | fgLookupBB(tryEndOff); // note: this can be NULL if the try region is at the end of the function |
| 6017 | BasicBlock* hndBegBB = fgLookupBB(hndBegOff); |
| 6018 | BasicBlock* hndEndBB = nullptr; |
| 6019 | BasicBlock* filtBB = nullptr; |
| 6020 | BasicBlock* block; |
| 6021 | |
| 6022 | // |
| 6023 | // Assert that the try/hnd beginning blocks are set up correctly |
| 6024 | // |
| 6025 | if (tryBegBB == nullptr) |
| 6026 | { |
| 6027 | BADCODE("Try Clause is invalid" ); |
| 6028 | } |
| 6029 | |
| 6030 | if (hndBegBB == nullptr) |
| 6031 | { |
| 6032 | BADCODE("Handler Clause is invalid" ); |
| 6033 | } |
| 6034 | |
| 6035 | tryBegBB->bbFlags |= BBF_HAS_LABEL; |
| 6036 | hndBegBB->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET; |
| 6037 | |
| 6038 | #if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION |
| 6039 | // This will change the block weight from 0 to 1 |
| 6040 | // and clear the rarely run flag |
| 6041 | hndBegBB->makeBlockHot(); |
| 6042 | #else |
| 6043 | hndBegBB->bbSetRunRarely(); // handler entry points are rarely executed |
| 6044 | #endif |
| 6045 | |
| 6046 | if (hndEndOff < info.compILCodeSize) |
| 6047 | { |
| 6048 | hndEndBB = fgLookupBB(hndEndOff); |
| 6049 | } |
| 6050 | |
| 6051 | if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) |
| 6052 | { |
| 6053 | filtBB = HBtab->ebdFilter = fgLookupBB(clause.FilterOffset); |
| 6054 | |
| 6055 | filtBB->bbCatchTyp = BBCT_FILTER; |
| 6056 | filtBB->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET; |
| 6057 | |
| 6058 | hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER; |
| 6059 | |
| 6060 | #if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION |
| 6061 | // This will change the block weight from 0 to 1 |
| 6062 | // and clear the rarely run flag |
| 6063 | filtBB->makeBlockHot(); |
| 6064 | #else |
| 6065 | filtBB->bbSetRunRarely(); // filter entry points are rarely executed |
| 6066 | #endif |
| 6067 | |
| 6068 | // Mark all BBs that belong to the filter with the XTnum of the corresponding handler |
| 6069 | for (block = filtBB; /**/; block = block->bbNext) |
| 6070 | { |
| 6071 | if (block == nullptr) |
| 6072 | { |
| 6073 | BADCODE3("Missing endfilter for filter" , " at offset %04X" , filtBB->bbCodeOffs); |
| 6074 | return; |
| 6075 | } |
| 6076 | |
| 6077 | // Still inside the filter |
| 6078 | block->setHndIndex(XTnum); |
| 6079 | |
| 6080 | if (block->bbJumpKind == BBJ_EHFILTERRET) |
| 6081 | { |
| 6082 | // Mark catch handler as successor. |
| 6083 | block->bbJumpDest = hndBegBB; |
| 6084 | assert(block->bbJumpDest->bbCatchTyp == BBCT_FILTER_HANDLER); |
| 6085 | break; |
| 6086 | } |
| 6087 | } |
| 6088 | |
| 6089 | if (!block->bbNext || block->bbNext != hndBegBB) |
| 6090 | { |
| 6091 | BADCODE3("Filter does not immediately precede handler for filter" , " at offset %04X" , |
| 6092 | filtBB->bbCodeOffs); |
| 6093 | } |
| 6094 | } |
| 6095 | else |
| 6096 | { |
| 6097 | HBtab->ebdTyp = clause.ClassToken; |
| 6098 | |
| 6099 | /* Set bbCatchTyp as appropriate */ |
| 6100 | |
| 6101 | if (clause.Flags & CORINFO_EH_CLAUSE_FINALLY) |
| 6102 | { |
| 6103 | hndBegBB->bbCatchTyp = BBCT_FINALLY; |
| 6104 | } |
| 6105 | else |
| 6106 | { |
| 6107 | if (clause.Flags & CORINFO_EH_CLAUSE_FAULT) |
| 6108 | { |
| 6109 | hndBegBB->bbCatchTyp = BBCT_FAULT; |
| 6110 | } |
| 6111 | else |
| 6112 | { |
| 6113 | hndBegBB->bbCatchTyp = clause.ClassToken; |
| 6114 | |
| 6115 | // These values should be non-zero value that will |
| 6116 | // not collide with real tokens for bbCatchTyp |
| 6117 | if (clause.ClassToken == 0) |
| 6118 | { |
| 6119 | BADCODE("Exception catch type is Null" ); |
| 6120 | } |
| 6121 | |
| 6122 | noway_assert(clause.ClassToken != BBCT_FAULT); |
| 6123 | noway_assert(clause.ClassToken != BBCT_FINALLY); |
| 6124 | noway_assert(clause.ClassToken != BBCT_FILTER); |
| 6125 | noway_assert(clause.ClassToken != BBCT_FILTER_HANDLER); |
| 6126 | } |
| 6127 | } |
| 6128 | } |
| 6129 | |
| 6130 | /* Mark the initial block and last blocks in the 'try' region */ |
| 6131 | |
| 6132 | tryBegBB->bbFlags |= BBF_TRY_BEG | BBF_HAS_LABEL; |
| 6133 | |
| 6134 | /* Prevent future optimizations of removing the first block */ |
| 6135 | /* of a TRY block and the first block of an exception handler */ |
| 6136 | |
| 6137 | tryBegBB->bbFlags |= BBF_DONT_REMOVE; |
| 6138 | hndBegBB->bbFlags |= BBF_DONT_REMOVE; |
| 6139 | hndBegBB->bbRefs++; // The first block of a handler gets an extra, "artificial" reference count. |
| 6140 | |
| 6141 | if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) |
| 6142 | { |
| 6143 | filtBB->bbFlags |= BBF_DONT_REMOVE; |
| 6144 | filtBB->bbRefs++; // The first block of a filter gets an extra, "artificial" reference count. |
| 6145 | } |
| 6146 | |
| 6147 | tryBegBB->bbFlags |= BBF_DONT_REMOVE; |
| 6148 | hndBegBB->bbFlags |= BBF_DONT_REMOVE; |
| 6149 | |
| 6150 | // |
| 6151 | // Store the info to the table of EH block handlers |
| 6152 | // |
| 6153 | |
| 6154 | HBtab->ebdHandlerType = ToEHHandlerType(clause.Flags); |
| 6155 | |
| 6156 | HBtab->ebdTryBeg = tryBegBB; |
| 6157 | HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->bbPrev; |
| 6158 | |
| 6159 | HBtab->ebdHndBeg = hndBegBB; |
| 6160 | HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->bbPrev; |
| 6161 | |
| 6162 | // |
| 6163 | // Assert that all of our try/hnd blocks are setup correctly. |
| 6164 | // |
| 6165 | if (HBtab->ebdTryLast == nullptr) |
| 6166 | { |
| 6167 | BADCODE("Try Clause is invalid" ); |
| 6168 | } |
| 6169 | |
| 6170 | if (HBtab->ebdHndLast == nullptr) |
| 6171 | { |
| 6172 | BADCODE("Handler Clause is invalid" ); |
| 6173 | } |
| 6174 | |
| 6175 | // |
| 6176 | // Verify that it's legal |
| 6177 | // |
| 6178 | |
| 6179 | verInsertEhNode(&clause, HBtab); |
| 6180 | |
| 6181 | } // end foreach handler table entry |
| 6182 | |
| 6183 | fgSortEHTable(); |
| 6184 | |
| 6185 | // Next, set things related to nesting that depend on the sorting being complete. |
| 6186 | |
| 6187 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 6188 | { |
| 6189 | /* Mark all blocks in the finally/fault or catch clause */ |
| 6190 | |
| 6191 | BasicBlock* tryBegBB = HBtab->ebdTryBeg; |
| 6192 | BasicBlock* hndBegBB = HBtab->ebdHndBeg; |
| 6193 | |
| 6194 | IL_OFFSET tryBegOff = HBtab->ebdTryBegOffset; |
| 6195 | IL_OFFSET tryEndOff = HBtab->ebdTryEndOffset; |
| 6196 | |
| 6197 | IL_OFFSET hndBegOff = HBtab->ebdHndBegOffset; |
| 6198 | IL_OFFSET hndEndOff = HBtab->ebdHndEndOffset; |
| 6199 | |
| 6200 | BasicBlock* block; |
| 6201 | |
| 6202 | for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->bbNext) |
| 6203 | { |
| 6204 | if (!block->hasHndIndex()) |
| 6205 | { |
| 6206 | block->setHndIndex(XTnum); |
| 6207 | } |
| 6208 | |
| 6209 | // All blocks in a catch handler or filter are rarely run, except the entry |
| 6210 | if ((block != hndBegBB) && (hndBegBB->bbCatchTyp != BBCT_FINALLY)) |
| 6211 | { |
| 6212 | block->bbSetRunRarely(); |
| 6213 | } |
| 6214 | } |
| 6215 | |
| 6216 | /* Mark all blocks within the covered range of the try */ |
| 6217 | |
| 6218 | for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->bbNext) |
| 6219 | { |
| 6220 | /* Mark this BB as belonging to a 'try' block */ |
| 6221 | |
| 6222 | if (!block->hasTryIndex()) |
| 6223 | { |
| 6224 | block->setTryIndex(XTnum); |
| 6225 | } |
| 6226 | |
| 6227 | #ifdef DEBUG |
| 6228 | /* Note: the BB can't span the 'try' block */ |
| 6229 | |
| 6230 | if (!(block->bbFlags & BBF_INTERNAL)) |
| 6231 | { |
| 6232 | noway_assert(tryBegOff <= block->bbCodeOffs); |
| 6233 | noway_assert(tryEndOff >= block->bbCodeOffsEnd || tryEndOff == tryBegOff); |
| 6234 | } |
| 6235 | #endif |
| 6236 | } |
| 6237 | |
| 6238 | /* Init ebdHandlerNestingLevel of current clause, and bump up value for all |
| 6239 | * enclosed clauses (which have to be before it in the table). |
| 6240 | * Innermost try-finally blocks must precede outermost |
| 6241 | * try-finally blocks. |
| 6242 | */ |
| 6243 | |
| 6244 | #if !FEATURE_EH_FUNCLETS |
| 6245 | HBtab->ebdHandlerNestingLevel = 0; |
| 6246 | #endif // !FEATURE_EH_FUNCLETS |
| 6247 | |
| 6248 | HBtab->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; |
| 6249 | HBtab->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; |
| 6250 | |
| 6251 | noway_assert(XTnum < compHndBBtabCount); |
| 6252 | noway_assert(XTnum == ehGetIndex(HBtab)); |
| 6253 | |
| 6254 | for (EHblkDsc* xtab = compHndBBtab; xtab < HBtab; xtab++) |
| 6255 | { |
| 6256 | #if !FEATURE_EH_FUNCLETS |
| 6257 | if (jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff)) |
| 6258 | { |
| 6259 | xtab->ebdHandlerNestingLevel++; |
| 6260 | } |
| 6261 | #endif // !FEATURE_EH_FUNCLETS |
| 6262 | |
| 6263 | /* If we haven't recorded an enclosing try index for xtab then see |
| 6264 | * if this EH region should be recorded. We check if the |
| 6265 | * first offset in the xtab lies within our region. If so, |
| 6266 | * the last offset also must lie within the region, due to |
| 6267 | * nesting rules. verInsertEhNode(), below, will check for proper nesting. |
| 6268 | */ |
| 6269 | if (xtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) |
| 6270 | { |
| 6271 | bool begBetween = jitIsBetween(xtab->ebdTryBegOffs(), tryBegOff, tryEndOff); |
| 6272 | if (begBetween) |
| 6273 | { |
| 6274 | // Record the enclosing scope link |
| 6275 | xtab->ebdEnclosingTryIndex = (unsigned short)XTnum; |
| 6276 | } |
| 6277 | } |
| 6278 | |
| 6279 | /* Do the same for the enclosing handler index. |
| 6280 | */ |
| 6281 | if (xtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) |
| 6282 | { |
| 6283 | bool begBetween = jitIsBetween(xtab->ebdTryBegOffs(), hndBegOff, hndEndOff); |
| 6284 | if (begBetween) |
| 6285 | { |
| 6286 | // Record the enclosing scope link |
| 6287 | xtab->ebdEnclosingHndIndex = (unsigned short)XTnum; |
| 6288 | } |
| 6289 | } |
| 6290 | } |
| 6291 | |
| 6292 | } // end foreach handler table entry |
| 6293 | |
| 6294 | #if !FEATURE_EH_FUNCLETS |
| 6295 | |
| 6296 | EHblkDsc* HBtabEnd; |
| 6297 | for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++) |
| 6298 | { |
| 6299 | if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel) |
| 6300 | ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1; |
| 6301 | } |
| 6302 | |
| 6303 | #endif // !FEATURE_EH_FUNCLETS |
| 6304 | |
| 6305 | #ifndef DEBUG |
| 6306 | if (tiVerificationNeeded) |
| 6307 | #endif |
| 6308 | { |
| 6309 | // always run these checks for a debug build |
| 6310 | verCheckNestingLevel(initRoot); |
| 6311 | } |
| 6312 | |
| 6313 | #ifndef DEBUG |
| 6314 | // fgNormalizeEH assumes that this test has been passed. And Ssa assumes that fgNormalizeEHTable |
| 6315 | // has been run. So do this unless we're in minOpts mode (and always in debug). |
| 6316 | if (tiVerificationNeeded || !opts.MinOpts()) |
| 6317 | #endif |
| 6318 | { |
| 6319 | fgCheckBasicBlockControlFlow(); |
| 6320 | } |
| 6321 | |
| 6322 | #ifdef DEBUG |
| 6323 | if (verbose) |
| 6324 | { |
| 6325 | JITDUMP("*************** After fgFindBasicBlocks() has created the EH table\n" ); |
| 6326 | fgDispHandlerTab(); |
| 6327 | } |
| 6328 | |
| 6329 | // We can't verify the handler table until all the IL legality checks have been done (above), since bad IL |
| 6330 | // (such as illegal nesting of regions) will trigger asserts here. |
| 6331 | fgVerifyHandlerTab(); |
| 6332 | #endif |
| 6333 | |
| 6334 | fgNormalizeEH(); |
| 6335 | } |
| 6336 | |
| 6337 | /***************************************************************************** |
| 6338 | * Check control flow constraints for well formed IL. Bail if any of the constraints |
| 6339 | * are violated. |
| 6340 | */ |
| 6341 | |
| 6342 | void Compiler::fgCheckBasicBlockControlFlow() |
| 6343 | { |
| 6344 | assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks |
| 6345 | |
| 6346 | EHblkDsc* HBtab; |
| 6347 | |
| 6348 | for (BasicBlock* blk = fgFirstBB; blk; blk = blk->bbNext) |
| 6349 | { |
| 6350 | if (blk->bbFlags & BBF_INTERNAL) |
| 6351 | { |
| 6352 | continue; |
| 6353 | } |
| 6354 | |
| 6355 | switch (blk->bbJumpKind) |
| 6356 | { |
| 6357 | case BBJ_NONE: // block flows into the next one (no jump) |
| 6358 | |
| 6359 | fgControlFlowPermitted(blk, blk->bbNext); |
| 6360 | |
| 6361 | break; |
| 6362 | |
| 6363 | case BBJ_ALWAYS: // block does unconditional jump to target |
| 6364 | |
| 6365 | fgControlFlowPermitted(blk, blk->bbJumpDest); |
| 6366 | |
| 6367 | break; |
| 6368 | |
| 6369 | case BBJ_COND: // block conditionally jumps to the target |
| 6370 | |
| 6371 | fgControlFlowPermitted(blk, blk->bbNext); |
| 6372 | |
| 6373 | fgControlFlowPermitted(blk, blk->bbJumpDest); |
| 6374 | |
| 6375 | break; |
| 6376 | |
| 6377 | case BBJ_RETURN: // block ends with 'ret' |
| 6378 | |
| 6379 | if (blk->hasTryIndex() || blk->hasHndIndex()) |
| 6380 | { |
| 6381 | BADCODE3("Return from a protected block" , ". Before offset %04X" , blk->bbCodeOffsEnd); |
| 6382 | } |
| 6383 | break; |
| 6384 | |
| 6385 | case BBJ_EHFINALLYRET: |
| 6386 | case BBJ_EHFILTERRET: |
| 6387 | |
| 6388 | if (!blk->hasHndIndex()) // must be part of a handler |
| 6389 | { |
| 6390 | BADCODE3("Missing handler" , ". Before offset %04X" , blk->bbCodeOffsEnd); |
| 6391 | } |
| 6392 | |
| 6393 | HBtab = ehGetDsc(blk->getHndIndex()); |
| 6394 | |
| 6395 | // Endfilter allowed only in a filter block |
| 6396 | if (blk->bbJumpKind == BBJ_EHFILTERRET) |
| 6397 | { |
| 6398 | if (!HBtab->HasFilter()) |
| 6399 | { |
| 6400 | BADCODE("Unexpected endfilter" ); |
| 6401 | } |
| 6402 | } |
| 6403 | // endfinally allowed only in a finally/fault block |
| 6404 | else if (!HBtab->HasFinallyOrFaultHandler()) |
| 6405 | { |
| 6406 | BADCODE("Unexpected endfinally" ); |
| 6407 | } |
| 6408 | |
| 6409 | // The handler block should be the innermost block |
| 6410 | // Exception blocks are listed, innermost first. |
| 6411 | if (blk->hasTryIndex() && (blk->getTryIndex() < blk->getHndIndex())) |
| 6412 | { |
| 6413 | BADCODE("endfinally / endfilter in nested try block" ); |
| 6414 | } |
| 6415 | |
| 6416 | break; |
| 6417 | |
| 6418 | case BBJ_THROW: // block ends with 'throw' |
| 6419 | /* throw is permitted from every BB, so nothing to check */ |
| 6420 | /* importer makes sure that rethrow is done from a catch */ |
| 6421 | break; |
| 6422 | |
| 6423 | case BBJ_LEAVE: // block always jumps to the target, maybe out of guarded |
| 6424 | // region. Used temporarily until importing |
| 6425 | fgControlFlowPermitted(blk, blk->bbJumpDest, TRUE); |
| 6426 | |
| 6427 | break; |
| 6428 | |
| 6429 | case BBJ_SWITCH: // block ends with a switch statement |
| 6430 | |
| 6431 | BBswtDesc* swtDesc; |
| 6432 | swtDesc = blk->bbJumpSwt; |
| 6433 | |
| 6434 | assert(swtDesc); |
| 6435 | |
| 6436 | unsigned i; |
| 6437 | for (i = 0; i < swtDesc->bbsCount; i++) |
| 6438 | { |
| 6439 | fgControlFlowPermitted(blk, swtDesc->bbsDstTab[i]); |
| 6440 | } |
| 6441 | |
| 6442 | break; |
| 6443 | |
| 6444 | case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if FEATURE_EH_FUNCLETS) |
| 6445 | case BBJ_CALLFINALLY: // block always calls the target finally |
| 6446 | default: |
| 6447 | noway_assert(!"Unexpected bbJumpKind" ); // these blocks don't get created until importing |
| 6448 | break; |
| 6449 | } |
| 6450 | } |
| 6451 | } |
| 6452 | |
| 6453 | /**************************************************************************** |
| 6454 | * Check that the leave from the block is legal. |
| 6455 | * Consider removing this check here if we can do it cheaply during importing |
| 6456 | */ |
| 6457 | |
| 6458 | void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, BOOL isLeave) |
| 6459 | { |
| 6460 | assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks |
| 6461 | |
| 6462 | unsigned srcHndBeg, destHndBeg; |
| 6463 | unsigned srcHndEnd, destHndEnd; |
| 6464 | bool srcInFilter, destInFilter; |
| 6465 | bool srcInCatch = false; |
| 6466 | |
| 6467 | EHblkDsc* srcHndTab; |
| 6468 | |
| 6469 | srcHndTab = ehInitHndRange(blkSrc, &srcHndBeg, &srcHndEnd, &srcInFilter); |
| 6470 | ehInitHndRange(blkDest, &destHndBeg, &destHndEnd, &destInFilter); |
| 6471 | |
| 6472 | /* Impose the rules for leaving or jumping from handler blocks */ |
| 6473 | |
| 6474 | if (blkSrc->hasHndIndex()) |
| 6475 | { |
| 6476 | srcInCatch = srcHndTab->HasCatchHandler() && srcHndTab->InHndRegionILRange(blkSrc); |
| 6477 | |
| 6478 | /* Are we jumping within the same handler index? */ |
| 6479 | if (BasicBlock::sameHndRegion(blkSrc, blkDest)) |
| 6480 | { |
| 6481 | /* Do we have a filter clause? */ |
| 6482 | if (srcHndTab->HasFilter()) |
| 6483 | { |
| 6484 | /* filters and catch handlers share same eh index */ |
| 6485 | /* we need to check for control flow between them. */ |
| 6486 | if (srcInFilter != destInFilter) |
| 6487 | { |
| 6488 | if (!jitIsBetween(blkDest->bbCodeOffs, srcHndBeg, srcHndEnd)) |
| 6489 | { |
| 6490 | BADCODE3("Illegal control flow between filter and handler" , ". Before offset %04X" , |
| 6491 | blkSrc->bbCodeOffsEnd); |
| 6492 | } |
| 6493 | } |
| 6494 | } |
| 6495 | } |
| 6496 | else |
| 6497 | { |
| 6498 | /* The handler indexes of blkSrc and blkDest are different */ |
| 6499 | if (isLeave) |
| 6500 | { |
| 6501 | /* Any leave instructions must not enter the dest handler from outside*/ |
| 6502 | if (!jitIsBetween(srcHndBeg, destHndBeg, destHndEnd)) |
| 6503 | { |
| 6504 | BADCODE3("Illegal use of leave to enter handler" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6505 | } |
| 6506 | } |
| 6507 | else |
| 6508 | { |
| 6509 | /* We must use a leave to exit a handler */ |
| 6510 | BADCODE3("Illegal control flow out of a handler" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6511 | } |
| 6512 | |
| 6513 | /* Do we have a filter clause? */ |
| 6514 | if (srcHndTab->HasFilter()) |
| 6515 | { |
| 6516 | /* It is ok to leave from the handler block of a filter, */ |
| 6517 | /* but not from the filter block of a filter */ |
| 6518 | if (srcInFilter != destInFilter) |
| 6519 | { |
| 6520 | BADCODE3("Illegal to leave a filter handler" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6521 | } |
| 6522 | } |
| 6523 | |
| 6524 | /* We should never leave a finally handler */ |
| 6525 | if (srcHndTab->HasFinallyHandler()) |
| 6526 | { |
| 6527 | BADCODE3("Illegal to leave a finally handler" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6528 | } |
| 6529 | |
| 6530 | /* We should never leave a fault handler */ |
| 6531 | if (srcHndTab->HasFaultHandler()) |
| 6532 | { |
| 6533 | BADCODE3("Illegal to leave a fault handler" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6534 | } |
| 6535 | } |
| 6536 | } |
| 6537 | else if (blkDest->hasHndIndex()) |
| 6538 | { |
| 6539 | /* blkSrc was not inside a handler, but blkDst is inside a handler */ |
| 6540 | BADCODE3("Illegal control flow into a handler" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6541 | } |
| 6542 | |
| 6543 | /* Are we jumping from a catch handler into the corresponding try? */ |
| 6544 | /* VB uses this for "on error goto " */ |
| 6545 | |
| 6546 | if (isLeave && srcInCatch) |
| 6547 | { |
| 6548 | // inspect all handlers containing the jump source |
| 6549 | |
| 6550 | bool bValidJumpToTry = false; // are we jumping in a valid way from a catch to the corresponding try? |
| 6551 | bool bCatchHandlerOnly = true; // false if we are jumping out of a non-catch handler |
| 6552 | EHblkDsc* ehTableEnd; |
| 6553 | EHblkDsc* ehDsc; |
| 6554 | |
| 6555 | for (ehDsc = compHndBBtab, ehTableEnd = compHndBBtab + compHndBBtabCount; |
| 6556 | bCatchHandlerOnly && ehDsc < ehTableEnd; ehDsc++) |
| 6557 | { |
| 6558 | if (ehDsc->InHndRegionILRange(blkSrc)) |
| 6559 | { |
| 6560 | if (ehDsc->HasCatchHandler()) |
| 6561 | { |
| 6562 | if (ehDsc->InTryRegionILRange(blkDest)) |
| 6563 | { |
| 6564 | // If we already considered the jump for a different try/catch, |
| 6565 | // we would have two overlapping try regions with two overlapping catch |
| 6566 | // regions, which is illegal. |
| 6567 | noway_assert(!bValidJumpToTry); |
| 6568 | |
| 6569 | // Allowed if it is the first instruction of an inner try |
| 6570 | // (and all trys in between) |
| 6571 | // |
| 6572 | // try { |
| 6573 | // .. |
| 6574 | // _tryAgain: |
| 6575 | // .. |
| 6576 | // try { |
| 6577 | // _tryNestedInner: |
| 6578 | // .. |
| 6579 | // try { |
| 6580 | // _tryNestedIllegal: |
| 6581 | // .. |
| 6582 | // } catch { |
| 6583 | // .. |
| 6584 | // } |
| 6585 | // .. |
| 6586 | // } catch { |
| 6587 | // .. |
| 6588 | // } |
| 6589 | // .. |
| 6590 | // } catch { |
| 6591 | // .. |
| 6592 | // leave _tryAgain // Allowed |
| 6593 | // .. |
| 6594 | // leave _tryNestedInner // Allowed |
| 6595 | // .. |
| 6596 | // leave _tryNestedIllegal // Not Allowed |
| 6597 | // .. |
| 6598 | // } |
| 6599 | // |
| 6600 | // Note: The leave is allowed also from catches nested inside the catch shown above. |
| 6601 | |
| 6602 | /* The common case where leave is to the corresponding try */ |
| 6603 | if (ehDsc->ebdIsSameTry(this, blkDest->getTryIndex()) || |
| 6604 | /* Also allowed is a leave to the start of a try which starts in the handler's try */ |
| 6605 | fgFlowToFirstBlockOfInnerTry(ehDsc->ebdTryBeg, blkDest, false)) |
| 6606 | { |
| 6607 | bValidJumpToTry = true; |
| 6608 | } |
| 6609 | } |
| 6610 | } |
| 6611 | else |
| 6612 | { |
| 6613 | // We are jumping from a handler which is not a catch handler. |
| 6614 | |
| 6615 | // If it's a handler, but not a catch handler, it must be either a finally or fault |
| 6616 | if (!ehDsc->HasFinallyOrFaultHandler()) |
| 6617 | { |
| 6618 | BADCODE3("Handlers must be catch, finally, or fault" , ". Before offset %04X" , |
| 6619 | blkSrc->bbCodeOffsEnd); |
| 6620 | } |
| 6621 | |
| 6622 | // Are we jumping out of this handler? |
| 6623 | if (!ehDsc->InHndRegionILRange(blkDest)) |
| 6624 | { |
| 6625 | bCatchHandlerOnly = false; |
| 6626 | } |
| 6627 | } |
| 6628 | } |
| 6629 | else if (ehDsc->InFilterRegionILRange(blkSrc)) |
| 6630 | { |
| 6631 | // Are we jumping out of a filter? |
| 6632 | if (!ehDsc->InFilterRegionILRange(blkDest)) |
| 6633 | { |
| 6634 | bCatchHandlerOnly = false; |
| 6635 | } |
| 6636 | } |
| 6637 | } |
| 6638 | |
| 6639 | if (bCatchHandlerOnly) |
| 6640 | { |
| 6641 | if (bValidJumpToTry) |
| 6642 | { |
| 6643 | return; |
| 6644 | } |
| 6645 | else |
| 6646 | { |
| 6647 | // FALL THROUGH |
| 6648 | // This is either the case of a leave to outside the try/catch, |
| 6649 | // or a leave to a try not nested in this try/catch. |
| 6650 | // The first case is allowed, the second one will be checked |
| 6651 | // later when we check the try block rules (it is illegal if we |
| 6652 | // jump to the middle of the destination try). |
| 6653 | } |
| 6654 | } |
| 6655 | else |
| 6656 | { |
| 6657 | BADCODE3("illegal leave to exit a finally, fault or filter" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6658 | } |
| 6659 | } |
| 6660 | |
| 6661 | /* Check all the try block rules */ |
| 6662 | |
| 6663 | IL_OFFSET srcTryBeg; |
| 6664 | IL_OFFSET srcTryEnd; |
| 6665 | IL_OFFSET destTryBeg; |
| 6666 | IL_OFFSET destTryEnd; |
| 6667 | |
| 6668 | ehInitTryRange(blkSrc, &srcTryBeg, &srcTryEnd); |
| 6669 | ehInitTryRange(blkDest, &destTryBeg, &destTryEnd); |
| 6670 | |
| 6671 | /* Are we jumping between try indexes? */ |
| 6672 | if (!BasicBlock::sameTryRegion(blkSrc, blkDest)) |
| 6673 | { |
| 6674 | // Are we exiting from an inner to outer try? |
| 6675 | if (jitIsBetween(srcTryBeg, destTryBeg, destTryEnd) && jitIsBetween(srcTryEnd - 1, destTryBeg, destTryEnd)) |
| 6676 | { |
| 6677 | if (!isLeave) |
| 6678 | { |
| 6679 | BADCODE3("exit from try block without a leave" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6680 | } |
| 6681 | } |
| 6682 | else if (jitIsBetween(destTryBeg, srcTryBeg, srcTryEnd)) |
| 6683 | { |
| 6684 | // check that the dest Try is first instruction of an inner try |
| 6685 | if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, false)) |
| 6686 | { |
| 6687 | BADCODE3("control flow into middle of try" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6688 | } |
| 6689 | } |
| 6690 | else // there is no nesting relationship between src and dest |
| 6691 | { |
| 6692 | if (isLeave) |
| 6693 | { |
| 6694 | // check that the dest Try is first instruction of an inner try sibling |
| 6695 | if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, true)) |
| 6696 | { |
| 6697 | BADCODE3("illegal leave into middle of try" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6698 | } |
| 6699 | } |
| 6700 | else |
| 6701 | { |
| 6702 | BADCODE3("illegal control flow in to/out of try block" , ". Before offset %04X" , blkSrc->bbCodeOffsEnd); |
| 6703 | } |
| 6704 | } |
| 6705 | } |
| 6706 | } |
| 6707 | |
| 6708 | /***************************************************************************** |
| 6709 | * Check that blkDest is the first block of an inner try or a sibling |
| 6710 | * with no intervening trys in between |
| 6711 | */ |
| 6712 | |
| 6713 | bool Compiler::fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling) |
| 6714 | { |
| 6715 | assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks |
| 6716 | |
| 6717 | noway_assert(blkDest->hasTryIndex()); |
| 6718 | |
| 6719 | unsigned XTnum = blkDest->getTryIndex(); |
| 6720 | unsigned lastXTnum = blkSrc->hasTryIndex() ? blkSrc->getTryIndex() : compHndBBtabCount; |
| 6721 | noway_assert(XTnum < compHndBBtabCount); |
| 6722 | noway_assert(lastXTnum <= compHndBBtabCount); |
| 6723 | |
| 6724 | EHblkDsc* HBtab = ehGetDsc(XTnum); |
| 6725 | |
| 6726 | // check that we are not jumping into middle of try |
| 6727 | if (HBtab->ebdTryBeg != blkDest) |
| 6728 | { |
| 6729 | return false; |
| 6730 | } |
| 6731 | |
| 6732 | if (sibling) |
| 6733 | { |
| 6734 | noway_assert(!BasicBlock::sameTryRegion(blkSrc, blkDest)); |
| 6735 | |
| 6736 | // find the l.u.b of the two try ranges |
| 6737 | // Set lastXTnum to the l.u.b. |
| 6738 | |
| 6739 | HBtab = ehGetDsc(lastXTnum); |
| 6740 | |
| 6741 | for (lastXTnum++, HBtab++; lastXTnum < compHndBBtabCount; lastXTnum++, HBtab++) |
| 6742 | { |
| 6743 | if (jitIsBetweenInclusive(blkDest->bbNum, HBtab->ebdTryBeg->bbNum, HBtab->ebdTryLast->bbNum)) |
| 6744 | { |
| 6745 | break; |
| 6746 | } |
| 6747 | } |
| 6748 | } |
| 6749 | |
| 6750 | // now check there are no intervening trys between dest and l.u.b |
| 6751 | // (it is ok to have intervening trys as long as they all start at |
| 6752 | // the same code offset) |
| 6753 | |
| 6754 | HBtab = ehGetDsc(XTnum); |
| 6755 | |
| 6756 | for (XTnum++, HBtab++; XTnum < lastXTnum; XTnum++, HBtab++) |
| 6757 | { |
| 6758 | if (HBtab->ebdTryBeg->bbNum < blkDest->bbNum && blkDest->bbNum <= HBtab->ebdTryLast->bbNum) |
| 6759 | { |
| 6760 | return false; |
| 6761 | } |
| 6762 | } |
| 6763 | |
| 6764 | return true; |
| 6765 | } |
| 6766 | |
| 6767 | /***************************************************************************** |
| 6768 | * Returns the handler nesting level of the block. |
| 6769 | * *pFinallyNesting is set to the nesting level of the inner-most |
| 6770 | * finally-protected try the block is in. |
| 6771 | */ |
| 6772 | |
| 6773 | unsigned Compiler::fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting) |
| 6774 | { |
| 6775 | unsigned curNesting = 0; // How many handlers is the block in |
| 6776 | unsigned tryFin = (unsigned)-1; // curNesting when we see innermost finally-protected try |
| 6777 | unsigned XTnum; |
| 6778 | EHblkDsc* HBtab; |
| 6779 | |
| 6780 | /* We find the block's handler nesting level by walking over the |
| 6781 | complete exception table and find enclosing clauses. */ |
| 6782 | |
| 6783 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 6784 | { |
| 6785 | noway_assert(HBtab->ebdTryBeg && HBtab->ebdHndBeg); |
| 6786 | |
| 6787 | if (HBtab->HasFinallyHandler() && (tryFin == (unsigned)-1) && bbInTryRegions(XTnum, block)) |
| 6788 | { |
| 6789 | tryFin = curNesting; |
| 6790 | } |
| 6791 | else if (bbInHandlerRegions(XTnum, block)) |
| 6792 | { |
| 6793 | curNesting++; |
| 6794 | } |
| 6795 | } |
| 6796 | |
| 6797 | if (tryFin == (unsigned)-1) |
| 6798 | { |
| 6799 | tryFin = curNesting; |
| 6800 | } |
| 6801 | |
| 6802 | if (pFinallyNesting) |
| 6803 | { |
| 6804 | *pFinallyNesting = curNesting - tryFin; |
| 6805 | } |
| 6806 | |
| 6807 | return curNesting; |
| 6808 | } |
| 6809 | |
| 6810 | /***************************************************************************** |
| 6811 | * |
| 6812 | * Import the basic blocks of the procedure. |
| 6813 | */ |
| 6814 | |
| 6815 | void Compiler::fgImport() |
| 6816 | { |
| 6817 | impImport(fgFirstBB); |
| 6818 | |
| 6819 | if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_SKIP_VERIFICATION)) |
| 6820 | { |
| 6821 | CorInfoMethodRuntimeFlags verFlag; |
| 6822 | verFlag = tiIsVerifiableCode ? CORINFO_FLG_VERIFIABLE : CORINFO_FLG_UNVERIFIABLE; |
| 6823 | info.compCompHnd->setMethodAttribs(info.compMethodHnd, verFlag); |
| 6824 | } |
| 6825 | } |
| 6826 | |
| 6827 | /***************************************************************************** |
| 6828 | * This function returns true if tree is a node with a call |
| 6829 | * that unconditionally throws an exception |
| 6830 | */ |
| 6831 | |
| 6832 | bool Compiler::fgIsThrow(GenTree* tree) |
| 6833 | { |
| 6834 | if ((tree->gtOper != GT_CALL) || (tree->gtCall.gtCallType != CT_HELPER)) |
| 6835 | { |
| 6836 | return false; |
| 6837 | } |
| 6838 | |
| 6839 | // TODO-Throughput: Replace all these calls to eeFindHelper() with a table based lookup |
| 6840 | |
| 6841 | if ((tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_OVERFLOW)) || |
| 6842 | (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_VERIFICATION)) || |
| 6843 | (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_RNGCHKFAIL)) || |
| 6844 | (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWDIVZERO)) || |
| 6845 | (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWNULLREF)) || |
| 6846 | (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROW)) || |
| 6847 | (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_RETHROW)) || |
| 6848 | (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROW_TYPE_NOT_SUPPORTED)) || |
| 6849 | (tree->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED))) |
| 6850 | { |
| 6851 | noway_assert(tree->gtFlags & GTF_CALL); |
| 6852 | noway_assert(tree->gtFlags & GTF_EXCEPT); |
| 6853 | return true; |
| 6854 | } |
| 6855 | |
| 6856 | // TODO-CQ: there are a bunch of managed methods in [mscorlib]System.ThrowHelper |
| 6857 | // that would be nice to recognize. |
| 6858 | |
| 6859 | return false; |
| 6860 | } |
| 6861 | |
| 6862 | /***************************************************************************** |
| 6863 | * This function returns true for blocks that are in different hot-cold regions. |
| 6864 | * It returns false when the blocks are both in the same regions |
| 6865 | */ |
| 6866 | |
| 6867 | bool Compiler::fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2) |
| 6868 | { |
| 6869 | noway_assert(blk1 != nullptr); |
| 6870 | noway_assert(blk2 != nullptr); |
| 6871 | |
| 6872 | if (fgFirstColdBlock == nullptr) |
| 6873 | { |
| 6874 | return false; |
| 6875 | } |
| 6876 | |
| 6877 | // If one block is Hot and the other is Cold then we are in different regions |
| 6878 | return ((blk1->bbFlags & BBF_COLD) != (blk2->bbFlags & BBF_COLD)); |
| 6879 | } |
| 6880 | |
| 6881 | bool Compiler::fgIsBlockCold(BasicBlock* blk) |
| 6882 | { |
| 6883 | noway_assert(blk != nullptr); |
| 6884 | |
| 6885 | if (fgFirstColdBlock == nullptr) |
| 6886 | { |
| 6887 | return false; |
| 6888 | } |
| 6889 | |
| 6890 | return ((blk->bbFlags & BBF_COLD) != 0); |
| 6891 | } |
| 6892 | |
| 6893 | /***************************************************************************** |
| 6894 | * This function returns true if tree is a GT_COMMA node with a call |
| 6895 | * that unconditionally throws an exception |
| 6896 | */ |
| 6897 | |
| 6898 | bool Compiler::fgIsCommaThrow(GenTree* tree, bool forFolding /* = false */) |
| 6899 | { |
| 6900 | // Instead of always folding comma throws, |
| 6901 | // with stress enabled we only fold half the time |
| 6902 | |
| 6903 | if (forFolding && compStressCompile(STRESS_FOLD, 50)) |
| 6904 | { |
| 6905 | return false; /* Don't fold */ |
| 6906 | } |
| 6907 | |
| 6908 | /* Check for cast of a GT_COMMA with a throw overflow */ |
| 6909 | if ((tree->gtOper == GT_COMMA) && (tree->gtFlags & GTF_CALL) && (tree->gtFlags & GTF_EXCEPT)) |
| 6910 | { |
| 6911 | return (fgIsThrow(tree->gtOp.gtOp1)); |
| 6912 | } |
| 6913 | return false; |
| 6914 | } |
| 6915 | |
| 6916 | //------------------------------------------------------------------------ |
| 6917 | // fgIsIndirOfAddrOfLocal: Determine whether "tree" is an indirection of a local. |
| 6918 | // |
| 6919 | // Arguments: |
| 6920 | // tree - The tree node under consideration |
| 6921 | // |
| 6922 | // Return Value: |
| 6923 | // If "tree" is a indirection (GT_IND, GT_BLK, or GT_OBJ) whose arg is an ADDR, |
| 6924 | // whose arg in turn is a LCL_VAR, return that LCL_VAR node, else nullptr. |
| 6925 | // |
| 6926 | // static |
| 6927 | GenTree* Compiler::fgIsIndirOfAddrOfLocal(GenTree* tree) |
| 6928 | { |
| 6929 | GenTree* res = nullptr; |
| 6930 | if (tree->OperIsIndir()) |
| 6931 | { |
| 6932 | GenTree* addr = tree->AsIndir()->Addr(); |
| 6933 | |
| 6934 | // Post rationalization, we can have Indir(Lea(..) trees. Therefore to recognize |
| 6935 | // Indir of addr of a local, skip over Lea in Indir(Lea(base, index, scale, offset)) |
| 6936 | // to get to base variable. |
| 6937 | if (addr->OperGet() == GT_LEA) |
| 6938 | { |
| 6939 | // We use this method in backward dataflow after liveness computation - fgInterBlockLocalVarLiveness(). |
| 6940 | // Therefore it is critical that we don't miss 'uses' of any local. It may seem this method overlooks |
| 6941 | // if the index part of the LEA has indir( someAddrOperator ( lclVar ) ) to search for a use but it's |
| 6942 | // covered by the fact we're traversing the expression in execution order and we also visit the index. |
| 6943 | GenTreeAddrMode* lea = addr->AsAddrMode(); |
| 6944 | GenTree* base = lea->Base(); |
| 6945 | |
| 6946 | if (base != nullptr) |
| 6947 | { |
| 6948 | if (base->OperGet() == GT_IND) |
| 6949 | { |
| 6950 | return fgIsIndirOfAddrOfLocal(base); |
| 6951 | } |
| 6952 | // else use base as addr |
| 6953 | addr = base; |
| 6954 | } |
| 6955 | } |
| 6956 | |
| 6957 | if (addr->OperGet() == GT_ADDR) |
| 6958 | { |
| 6959 | GenTree* lclvar = addr->gtOp.gtOp1; |
| 6960 | if (lclvar->OperGet() == GT_LCL_VAR) |
| 6961 | { |
| 6962 | res = lclvar; |
| 6963 | } |
| 6964 | } |
| 6965 | else if (addr->OperGet() == GT_LCL_VAR_ADDR) |
| 6966 | { |
| 6967 | res = addr; |
| 6968 | } |
| 6969 | } |
| 6970 | return res; |
| 6971 | } |
| 6972 | |
| 6973 | GenTreeCall* Compiler::fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper) |
| 6974 | { |
| 6975 | bool bNeedClassID = true; |
| 6976 | unsigned callFlags = 0; |
| 6977 | |
| 6978 | var_types type = TYP_BYREF; |
| 6979 | |
| 6980 | // This is sort of ugly, as we have knowledge of what the helper is returning. |
| 6981 | // We need the return type. |
| 6982 | switch (helper) |
| 6983 | { |
| 6984 | case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR: |
| 6985 | bNeedClassID = false; |
| 6986 | __fallthrough; |
| 6987 | |
| 6988 | case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR: |
| 6989 | callFlags |= GTF_CALL_HOISTABLE; |
| 6990 | __fallthrough; |
| 6991 | |
| 6992 | case CORINFO_HELP_GETSHARED_GCSTATIC_BASE: |
| 6993 | case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS: |
| 6994 | case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS: |
| 6995 | case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE: |
| 6996 | case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS: |
| 6997 | // type = TYP_BYREF; |
| 6998 | break; |
| 6999 | |
| 7000 | case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR: |
| 7001 | bNeedClassID = false; |
| 7002 | __fallthrough; |
| 7003 | |
| 7004 | case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR: |
| 7005 | callFlags |= GTF_CALL_HOISTABLE; |
| 7006 | __fallthrough; |
| 7007 | |
| 7008 | case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE: |
| 7009 | case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE: |
| 7010 | case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS: |
| 7011 | case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS: |
| 7012 | type = TYP_I_IMPL; |
| 7013 | break; |
| 7014 | |
| 7015 | default: |
| 7016 | assert(!"unknown shared statics helper" ); |
| 7017 | break; |
| 7018 | } |
| 7019 | |
| 7020 | GenTreeArgList* argList = nullptr; |
| 7021 | |
| 7022 | GenTree* opModuleIDArg; |
| 7023 | GenTree* opClassIDArg; |
| 7024 | |
| 7025 | // Get the class ID |
| 7026 | unsigned clsID; |
| 7027 | size_t moduleID; |
| 7028 | void* pclsID; |
| 7029 | void* pmoduleID; |
| 7030 | |
| 7031 | clsID = info.compCompHnd->getClassDomainID(cls, &pclsID); |
| 7032 | |
| 7033 | moduleID = info.compCompHnd->getClassModuleIdForStatics(cls, nullptr, &pmoduleID); |
| 7034 | |
| 7035 | if (!(callFlags & GTF_CALL_HOISTABLE)) |
| 7036 | { |
| 7037 | if (info.compCompHnd->getClassAttribs(cls) & CORINFO_FLG_BEFOREFIELDINIT) |
| 7038 | { |
| 7039 | callFlags |= GTF_CALL_HOISTABLE; |
| 7040 | } |
| 7041 | } |
| 7042 | |
| 7043 | if (pmoduleID) |
| 7044 | { |
| 7045 | opModuleIDArg = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pmoduleID, GTF_ICON_CIDMID_HDL, true); |
| 7046 | } |
| 7047 | else |
| 7048 | { |
| 7049 | opModuleIDArg = gtNewIconNode((size_t)moduleID, TYP_I_IMPL); |
| 7050 | } |
| 7051 | |
| 7052 | if (bNeedClassID) |
| 7053 | { |
| 7054 | if (pclsID) |
| 7055 | { |
| 7056 | opClassIDArg = gtNewIndOfIconHandleNode(TYP_INT, (size_t)pclsID, GTF_ICON_CIDMID_HDL, true); |
| 7057 | } |
| 7058 | else |
| 7059 | { |
| 7060 | opClassIDArg = gtNewIconNode(clsID, TYP_INT); |
| 7061 | } |
| 7062 | |
| 7063 | // call the helper to get the base |
| 7064 | argList = gtNewArgList(opModuleIDArg, opClassIDArg); |
| 7065 | } |
| 7066 | else |
| 7067 | { |
| 7068 | argList = gtNewArgList(opModuleIDArg); |
| 7069 | } |
| 7070 | |
| 7071 | GenTreeCall* result = gtNewHelperCallNode(helper, type, argList); |
| 7072 | result->gtFlags |= callFlags; |
| 7073 | |
| 7074 | // If we're importing the special EqualityComparer<T>.Default |
| 7075 | // intrinsic, flag the helper call. Later during inlining, we can |
| 7076 | // remove the helper call if the associated field lookup is unused. |
| 7077 | if ((info.compFlags & CORINFO_FLG_JIT_INTRINSIC) != 0) |
| 7078 | { |
| 7079 | NamedIntrinsic ni = lookupNamedIntrinsic(info.compMethodHnd); |
| 7080 | if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) |
| 7081 | { |
| 7082 | JITDUMP("\nmarking helper call [06%u] as special dce...\n" , result->gtTreeID); |
| 7083 | result->gtCallMoreFlags |= GTF_CALL_M_HELPER_SPECIAL_DCE; |
| 7084 | } |
| 7085 | } |
| 7086 | |
| 7087 | return result; |
| 7088 | } |
| 7089 | |
| 7090 | GenTreeCall* Compiler::fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls) |
| 7091 | { |
| 7092 | #ifdef FEATURE_READYTORUN_COMPILER |
| 7093 | if (opts.IsReadyToRun()) |
| 7094 | { |
| 7095 | CORINFO_RESOLVED_TOKEN resolvedToken; |
| 7096 | memset(&resolvedToken, 0, sizeof(resolvedToken)); |
| 7097 | resolvedToken.hClass = cls; |
| 7098 | |
| 7099 | return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); |
| 7100 | } |
| 7101 | #endif |
| 7102 | |
| 7103 | // Call the shared non gc static helper, as its the fastest |
| 7104 | return fgGetStaticsCCtorHelper(cls, info.compCompHnd->getSharedCCtorHelper(cls)); |
| 7105 | } |
| 7106 | |
| 7107 | //------------------------------------------------------------------------------ |
| 7108 | // fgAddrCouldBeNull : Check whether the address tree can represent null. |
| 7109 | // |
| 7110 | // |
| 7111 | // Arguments: |
| 7112 | // addr - Address to check |
| 7113 | // |
| 7114 | // Return Value: |
| 7115 | // True if address could be null; false otherwise |
| 7116 | |
| 7117 | bool Compiler::fgAddrCouldBeNull(GenTree* addr) |
| 7118 | { |
| 7119 | addr = addr->gtEffectiveVal(); |
| 7120 | if ((addr->gtOper == GT_CNS_INT) && addr->IsIconHandle()) |
| 7121 | { |
| 7122 | return false; |
| 7123 | } |
| 7124 | else if (addr->gtOper == GT_LCL_VAR) |
| 7125 | { |
| 7126 | unsigned varNum = addr->AsLclVarCommon()->GetLclNum(); |
| 7127 | |
| 7128 | if (lvaIsImplicitByRefLocal(varNum)) |
| 7129 | { |
| 7130 | return false; |
| 7131 | } |
| 7132 | |
| 7133 | LclVarDsc* varDsc = &lvaTable[varNum]; |
| 7134 | |
| 7135 | if (varDsc->lvStackByref) |
| 7136 | { |
| 7137 | return false; |
| 7138 | } |
| 7139 | } |
| 7140 | else if (addr->gtOper == GT_ADDR) |
| 7141 | { |
| 7142 | if (addr->gtOp.gtOp1->gtOper == GT_CNS_INT) |
| 7143 | { |
| 7144 | GenTree* cns1Tree = addr->gtOp.gtOp1; |
| 7145 | if (!cns1Tree->IsIconHandle()) |
| 7146 | { |
| 7147 | // Indirection of some random constant... |
| 7148 | // It is safest just to return true |
| 7149 | return true; |
| 7150 | } |
| 7151 | } |
| 7152 | |
| 7153 | return false; // we can't have a null address |
| 7154 | } |
| 7155 | else if (addr->gtOper == GT_ADD) |
| 7156 | { |
| 7157 | if (addr->gtOp.gtOp1->gtOper == GT_CNS_INT) |
| 7158 | { |
| 7159 | GenTree* cns1Tree = addr->gtOp.gtOp1; |
| 7160 | if (!cns1Tree->IsIconHandle()) |
| 7161 | { |
| 7162 | if (!fgIsBigOffset(cns1Tree->gtIntCon.gtIconVal)) |
| 7163 | { |
| 7164 | // Op1 was an ordinary small constant |
| 7165 | return fgAddrCouldBeNull(addr->gtOp.gtOp2); |
| 7166 | } |
| 7167 | } |
| 7168 | else // Op1 was a handle represented as a constant |
| 7169 | { |
| 7170 | // Is Op2 also a constant? |
| 7171 | if (addr->gtOp.gtOp2->gtOper == GT_CNS_INT) |
| 7172 | { |
| 7173 | GenTree* cns2Tree = addr->gtOp.gtOp2; |
| 7174 | // Is this an addition of a handle and constant |
| 7175 | if (!cns2Tree->IsIconHandle()) |
| 7176 | { |
| 7177 | if (!fgIsBigOffset(cns2Tree->gtIntCon.gtIconVal)) |
| 7178 | { |
| 7179 | // Op2 was an ordinary small constant |
| 7180 | return false; // we can't have a null address |
| 7181 | } |
| 7182 | } |
| 7183 | } |
| 7184 | } |
| 7185 | } |
| 7186 | else |
| 7187 | { |
| 7188 | // Op1 is not a constant |
| 7189 | // What about Op2? |
| 7190 | if (addr->gtOp.gtOp2->gtOper == GT_CNS_INT) |
| 7191 | { |
| 7192 | GenTree* cns2Tree = addr->gtOp.gtOp2; |
| 7193 | // Is this an addition of a small constant |
| 7194 | if (!cns2Tree->IsIconHandle()) |
| 7195 | { |
| 7196 | if (!fgIsBigOffset(cns2Tree->gtIntCon.gtIconVal)) |
| 7197 | { |
| 7198 | // Op2 was an ordinary small constant |
| 7199 | return fgAddrCouldBeNull(addr->gtOp.gtOp1); |
| 7200 | } |
| 7201 | } |
| 7202 | } |
| 7203 | } |
| 7204 | } |
| 7205 | return true; // default result: addr could be null |
| 7206 | } |
| 7207 | |
| 7208 | //------------------------------------------------------------------------------ |
| 7209 | // fgOptimizeDelegateConstructor: try and optimize construction of a delegate |
| 7210 | // |
| 7211 | // Arguments: |
| 7212 | // call -- call to original delegate constructor |
| 7213 | // exactContextHnd -- [out] context handle to update |
| 7214 | // ldftnToken -- [in] resolved token for the method the delegate will invoke, |
| 7215 | // if known, or nullptr if not known |
| 7216 | // |
| 7217 | // Return Value: |
| 7218 | // Original call tree if no optimization applies. |
| 7219 | // Updated call tree if optimized. |
| 7220 | |
| 7221 | GenTree* Compiler::fgOptimizeDelegateConstructor(GenTreeCall* call, |
| 7222 | CORINFO_CONTEXT_HANDLE* ExactContextHnd, |
| 7223 | CORINFO_RESOLVED_TOKEN* ldftnToken) |
| 7224 | { |
| 7225 | JITDUMP("\nfgOptimizeDelegateConstructor: " ); |
| 7226 | noway_assert(call->gtCallType == CT_USER_FUNC); |
| 7227 | CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd; |
| 7228 | CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getMethodClass(methHnd); |
| 7229 | |
| 7230 | GenTree* targetMethod = call->gtCallArgs->Rest()->Current(); |
| 7231 | noway_assert(targetMethod->TypeGet() == TYP_I_IMPL); |
| 7232 | genTreeOps oper = targetMethod->OperGet(); |
| 7233 | CORINFO_METHOD_HANDLE targetMethodHnd = nullptr; |
| 7234 | GenTree* qmarkNode = nullptr; |
| 7235 | if (oper == GT_FTN_ADDR) |
| 7236 | { |
| 7237 | targetMethodHnd = targetMethod->gtFptrVal.gtFptrMethod; |
| 7238 | } |
| 7239 | else if (oper == GT_CALL && targetMethod->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR)) |
| 7240 | { |
| 7241 | GenTree* handleNode = targetMethod->gtCall.gtCallArgs->Rest()->Rest()->Current(); |
| 7242 | |
| 7243 | if (handleNode->OperGet() == GT_CNS_INT) |
| 7244 | { |
| 7245 | // it's a ldvirtftn case, fetch the methodhandle off the helper for ldvirtftn. It's the 3rd arg |
| 7246 | targetMethodHnd = CORINFO_METHOD_HANDLE(handleNode->gtIntCon.gtCompileTimeHandle); |
| 7247 | } |
| 7248 | // Sometimes the argument to this is the result of a generic dictionary lookup, which shows |
| 7249 | // up as a GT_QMARK. |
| 7250 | else if (handleNode->OperGet() == GT_QMARK) |
| 7251 | { |
| 7252 | qmarkNode = handleNode; |
| 7253 | } |
| 7254 | } |
| 7255 | // Sometimes we don't call CORINFO_HELP_VIRTUAL_FUNC_PTR but instead just call |
| 7256 | // CORINFO_HELP_RUNTIMEHANDLE_METHOD directly. |
| 7257 | else if (oper == GT_QMARK) |
| 7258 | { |
| 7259 | qmarkNode = targetMethod; |
| 7260 | } |
| 7261 | if (qmarkNode) |
| 7262 | { |
| 7263 | noway_assert(qmarkNode->OperGet() == GT_QMARK); |
| 7264 | // The argument is actually a generic dictionary lookup. For delegate creation it looks |
| 7265 | // like: |
| 7266 | // GT_QMARK |
| 7267 | // GT_COLON |
| 7268 | // op1 -> call |
| 7269 | // Arg 1 -> token (has compile time handle) |
| 7270 | // op2 -> lclvar |
| 7271 | // |
| 7272 | // |
| 7273 | // In this case I can find the token (which is a method handle) and that is the compile time |
| 7274 | // handle. |
| 7275 | noway_assert(qmarkNode->gtOp.gtOp2->OperGet() == GT_COLON); |
| 7276 | noway_assert(qmarkNode->gtOp.gtOp2->gtOp.gtOp1->OperGet() == GT_CALL); |
| 7277 | GenTreeCall* runtimeLookupCall = qmarkNode->gtOp.gtOp2->gtOp.gtOp1->AsCall(); |
| 7278 | |
| 7279 | // This could be any of CORINFO_HELP_RUNTIMEHANDLE_(METHOD|CLASS)(_LOG?) |
| 7280 | GenTree* tokenNode = runtimeLookupCall->gtCallArgs->gtOp.gtOp2->gtOp.gtOp1; |
| 7281 | noway_assert(tokenNode->OperGet() == GT_CNS_INT); |
| 7282 | targetMethodHnd = CORINFO_METHOD_HANDLE(tokenNode->gtIntCon.gtCompileTimeHandle); |
| 7283 | } |
| 7284 | |
| 7285 | // Verify using the ldftnToken gives us all of what we used to get |
| 7286 | // via the above pattern match, and more... |
| 7287 | if (ldftnToken != nullptr) |
| 7288 | { |
| 7289 | assert(ldftnToken->hMethod != nullptr); |
| 7290 | |
| 7291 | if (targetMethodHnd != nullptr) |
| 7292 | { |
| 7293 | assert(targetMethodHnd == ldftnToken->hMethod); |
| 7294 | } |
| 7295 | |
| 7296 | targetMethodHnd = ldftnToken->hMethod; |
| 7297 | } |
| 7298 | else |
| 7299 | { |
| 7300 | assert(targetMethodHnd == nullptr); |
| 7301 | } |
| 7302 | |
| 7303 | #ifdef FEATURE_READYTORUN_COMPILER |
| 7304 | if (opts.IsReadyToRun()) |
| 7305 | { |
| 7306 | if (IsTargetAbi(CORINFO_CORERT_ABI)) |
| 7307 | { |
| 7308 | if (ldftnToken != nullptr) |
| 7309 | { |
| 7310 | JITDUMP("optimized\n" ); |
| 7311 | |
| 7312 | GenTree* thisPointer = call->gtCallObjp; |
| 7313 | GenTree* targetObjPointers = call->gtCallArgs->Current(); |
| 7314 | GenTreeArgList* helperArgs = nullptr; |
| 7315 | CORINFO_LOOKUP pLookup; |
| 7316 | CORINFO_CONST_LOOKUP entryPoint; |
| 7317 | info.compCompHnd->getReadyToRunDelegateCtorHelper(ldftnToken, clsHnd, &pLookup); |
| 7318 | if (!pLookup.lookupKind.needsRuntimeLookup) |
| 7319 | { |
| 7320 | helperArgs = gtNewArgList(thisPointer, targetObjPointers); |
| 7321 | entryPoint = pLookup.constLookup; |
| 7322 | } |
| 7323 | else |
| 7324 | { |
| 7325 | assert(oper != GT_FTN_ADDR); |
| 7326 | CORINFO_CONST_LOOKUP genericLookup; |
| 7327 | info.compCompHnd->getReadyToRunHelper(ldftnToken, &pLookup.lookupKind, |
| 7328 | CORINFO_HELP_READYTORUN_GENERIC_HANDLE, &genericLookup); |
| 7329 | GenTree* ctxTree = getRuntimeContextTree(pLookup.lookupKind.runtimeLookupKind); |
| 7330 | helperArgs = gtNewArgList(thisPointer, targetObjPointers, ctxTree); |
| 7331 | entryPoint = genericLookup; |
| 7332 | } |
| 7333 | call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_DELEGATE_CTOR, TYP_VOID, helperArgs); |
| 7334 | call->setEntryPoint(entryPoint); |
| 7335 | } |
| 7336 | else |
| 7337 | { |
| 7338 | JITDUMP("not optimized, CORERT no ldftnToken\n" ); |
| 7339 | } |
| 7340 | } |
| 7341 | // ReadyToRun has this optimization for a non-virtual function pointers only for now. |
| 7342 | else if (oper == GT_FTN_ADDR) |
| 7343 | { |
| 7344 | JITDUMP("optimized\n" ); |
| 7345 | |
| 7346 | GenTree* thisPointer = call->gtCallObjp; |
| 7347 | GenTree* targetObjPointers = call->gtCallArgs->Current(); |
| 7348 | GenTreeArgList* helperArgs = gtNewArgList(thisPointer, targetObjPointers); |
| 7349 | |
| 7350 | call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_DELEGATE_CTOR, TYP_VOID, helperArgs); |
| 7351 | |
| 7352 | CORINFO_LOOKUP entryPoint; |
| 7353 | info.compCompHnd->getReadyToRunDelegateCtorHelper(ldftnToken, clsHnd, &entryPoint); |
| 7354 | assert(!entryPoint.lookupKind.needsRuntimeLookup); |
| 7355 | call->setEntryPoint(entryPoint.constLookup); |
| 7356 | } |
| 7357 | else |
| 7358 | { |
| 7359 | JITDUMP("not optimized, R2R virtual case\n" ); |
| 7360 | } |
| 7361 | } |
| 7362 | else |
| 7363 | #endif |
| 7364 | if (targetMethodHnd != nullptr) |
| 7365 | { |
| 7366 | CORINFO_METHOD_HANDLE alternateCtor = nullptr; |
| 7367 | DelegateCtorArgs ctorData; |
| 7368 | ctorData.pMethod = info.compMethodHnd; |
| 7369 | ctorData.pArg3 = nullptr; |
| 7370 | ctorData.pArg4 = nullptr; |
| 7371 | ctorData.pArg5 = nullptr; |
| 7372 | |
| 7373 | alternateCtor = info.compCompHnd->GetDelegateCtor(methHnd, clsHnd, targetMethodHnd, &ctorData); |
| 7374 | if (alternateCtor != methHnd) |
| 7375 | { |
| 7376 | JITDUMP("optimized\n" ); |
| 7377 | // we erase any inline info that may have been set for generics has it is not needed here, |
| 7378 | // and in fact it will pass the wrong info to the inliner code |
| 7379 | *ExactContextHnd = nullptr; |
| 7380 | |
| 7381 | call->gtCallMethHnd = alternateCtor; |
| 7382 | |
| 7383 | noway_assert(call->gtCallArgs->Rest()->Rest() == nullptr); |
| 7384 | GenTreeArgList* addArgs = nullptr; |
| 7385 | if (ctorData.pArg5) |
| 7386 | { |
| 7387 | GenTree* arg5 = gtNewIconHandleNode(size_t(ctorData.pArg5), GTF_ICON_FTN_ADDR); |
| 7388 | addArgs = gtNewListNode(arg5, addArgs); |
| 7389 | } |
| 7390 | if (ctorData.pArg4) |
| 7391 | { |
| 7392 | GenTree* arg4 = gtNewIconHandleNode(size_t(ctorData.pArg4), GTF_ICON_FTN_ADDR); |
| 7393 | addArgs = gtNewListNode(arg4, addArgs); |
| 7394 | } |
| 7395 | if (ctorData.pArg3) |
| 7396 | { |
| 7397 | GenTree* arg3 = gtNewIconHandleNode(size_t(ctorData.pArg3), GTF_ICON_FTN_ADDR); |
| 7398 | addArgs = gtNewListNode(arg3, addArgs); |
| 7399 | } |
| 7400 | call->gtCallArgs->Rest()->Rest() = addArgs; |
| 7401 | } |
| 7402 | else |
| 7403 | { |
| 7404 | JITDUMP("not optimized, no alternate ctor\n" ); |
| 7405 | } |
| 7406 | } |
| 7407 | else |
| 7408 | { |
| 7409 | JITDUMP("not optimized, no target method\n" ); |
| 7410 | } |
| 7411 | return call; |
| 7412 | } |
| 7413 | |
| 7414 | bool Compiler::fgCastNeeded(GenTree* tree, var_types toType) |
| 7415 | { |
| 7416 | // |
| 7417 | // If tree is a relop and we need an 4-byte integer |
| 7418 | // then we never need to insert a cast |
| 7419 | // |
| 7420 | if ((tree->OperKind() & GTK_RELOP) && (genActualType(toType) == TYP_INT)) |
| 7421 | { |
| 7422 | return false; |
| 7423 | } |
| 7424 | |
| 7425 | var_types fromType; |
| 7426 | |
| 7427 | // |
| 7428 | // Is the tree as GT_CAST or a GT_CALL ? |
| 7429 | // |
| 7430 | if (tree->OperGet() == GT_CAST) |
| 7431 | { |
| 7432 | fromType = tree->CastToType(); |
| 7433 | } |
| 7434 | else if (tree->OperGet() == GT_CALL) |
| 7435 | { |
| 7436 | fromType = (var_types)tree->gtCall.gtReturnType; |
| 7437 | } |
| 7438 | else |
| 7439 | { |
| 7440 | fromType = tree->TypeGet(); |
| 7441 | } |
| 7442 | |
| 7443 | // |
| 7444 | // If both types are the same then an additional cast is not necessary |
| 7445 | // |
| 7446 | if (toType == fromType) |
| 7447 | { |
| 7448 | return false; |
| 7449 | } |
| 7450 | // |
| 7451 | // If the sign-ness of the two types are different then a cast is necessary |
| 7452 | // |
| 7453 | if (varTypeIsUnsigned(toType) != varTypeIsUnsigned(fromType)) |
| 7454 | { |
| 7455 | return true; |
| 7456 | } |
| 7457 | // |
| 7458 | // If the from type is the same size or smaller then an additional cast is not necessary |
| 7459 | // |
| 7460 | if (genTypeSize(toType) >= genTypeSize(fromType)) |
| 7461 | { |
| 7462 | return false; |
| 7463 | } |
| 7464 | |
| 7465 | // |
| 7466 | // Looks like we will need the cast |
| 7467 | // |
| 7468 | return true; |
| 7469 | } |
| 7470 | |
| 7471 | // If assigning to a local var, add a cast if the target is |
| 7472 | // marked as NormalizedOnStore. Returns true if any change was made |
| 7473 | GenTree* Compiler::fgDoNormalizeOnStore(GenTree* tree) |
| 7474 | { |
| 7475 | // |
| 7476 | // Only normalize the stores in the global morph phase |
| 7477 | // |
| 7478 | if (fgGlobalMorph) |
| 7479 | { |
| 7480 | noway_assert(tree->OperGet() == GT_ASG); |
| 7481 | |
| 7482 | GenTree* op1 = tree->gtOp.gtOp1; |
| 7483 | GenTree* op2 = tree->gtOp.gtOp2; |
| 7484 | |
| 7485 | if (op1->gtOper == GT_LCL_VAR && genActualType(op1->TypeGet()) == TYP_INT) |
| 7486 | { |
| 7487 | // Small-typed arguments and aliased locals are normalized on load. |
| 7488 | // Other small-typed locals are normalized on store. |
| 7489 | // If it is an assignment to one of the latter, insert the cast on RHS |
| 7490 | unsigned varNum = op1->gtLclVarCommon.gtLclNum; |
| 7491 | LclVarDsc* varDsc = &lvaTable[varNum]; |
| 7492 | |
| 7493 | if (varDsc->lvNormalizeOnStore()) |
| 7494 | { |
| 7495 | noway_assert(op1->gtType <= TYP_INT); |
| 7496 | op1->gtType = TYP_INT; |
| 7497 | |
| 7498 | if (fgCastNeeded(op2, varDsc->TypeGet())) |
| 7499 | { |
| 7500 | op2 = gtNewCastNode(TYP_INT, op2, false, varDsc->TypeGet()); |
| 7501 | tree->gtOp.gtOp2 = op2; |
| 7502 | |
| 7503 | // Propagate GTF_COLON_COND |
| 7504 | op2->gtFlags |= (tree->gtFlags & GTF_COLON_COND); |
| 7505 | } |
| 7506 | } |
| 7507 | } |
| 7508 | } |
| 7509 | |
| 7510 | return tree; |
| 7511 | } |
| 7512 | |
| 7513 | /***************************************************************************** |
| 7514 | * |
| 7515 | * Mark whether the edge "srcBB -> dstBB" forms a loop that will always |
| 7516 | * execute a call or not. |
| 7517 | */ |
| 7518 | |
| 7519 | inline void Compiler::fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB) |
| 7520 | { |
| 7521 | /* Bail if this is not a backward edge */ |
| 7522 | |
| 7523 | if (srcBB->bbNum < dstBB->bbNum) |
| 7524 | { |
| 7525 | return; |
| 7526 | } |
| 7527 | |
| 7528 | /* Unless we already know that there is a loop without a call here ... */ |
| 7529 | |
| 7530 | if (!(dstBB->bbFlags & BBF_LOOP_CALL0)) |
| 7531 | { |
| 7532 | /* Check whether there is a loop path that doesn't call */ |
| 7533 | |
| 7534 | if (optReachWithoutCall(dstBB, srcBB)) |
| 7535 | { |
| 7536 | dstBB->bbFlags |= BBF_LOOP_CALL0; |
| 7537 | dstBB->bbFlags &= ~BBF_LOOP_CALL1; |
| 7538 | } |
| 7539 | else |
| 7540 | { |
| 7541 | dstBB->bbFlags |= BBF_LOOP_CALL1; |
| 7542 | } |
| 7543 | } |
| 7544 | // if this loop will always call, then we can omit the GC Poll |
| 7545 | if ((GCPOLL_NONE != opts.compGCPollType) && (dstBB->bbFlags & BBF_LOOP_CALL1)) |
| 7546 | { |
| 7547 | srcBB->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 7548 | } |
| 7549 | } |
| 7550 | |
| 7551 | /***************************************************************************** |
| 7552 | * |
| 7553 | * Mark which loops are guaranteed to execute a call. |
| 7554 | */ |
| 7555 | |
| 7556 | void Compiler::fgLoopCallMark() |
| 7557 | { |
| 7558 | BasicBlock* block; |
| 7559 | |
| 7560 | /* If we've already marked all the block, bail */ |
| 7561 | |
| 7562 | if (fgLoopCallMarked) |
| 7563 | { |
| 7564 | return; |
| 7565 | } |
| 7566 | |
| 7567 | fgLoopCallMarked = true; |
| 7568 | |
| 7569 | /* Walk the blocks, looking for backward edges */ |
| 7570 | |
| 7571 | for (block = fgFirstBB; block; block = block->bbNext) |
| 7572 | { |
| 7573 | switch (block->bbJumpKind) |
| 7574 | { |
| 7575 | case BBJ_COND: |
| 7576 | case BBJ_CALLFINALLY: |
| 7577 | case BBJ_ALWAYS: |
| 7578 | case BBJ_EHCATCHRET: |
| 7579 | fgLoopCallTest(block, block->bbJumpDest); |
| 7580 | break; |
| 7581 | |
| 7582 | case BBJ_SWITCH: |
| 7583 | |
| 7584 | unsigned jumpCnt; |
| 7585 | jumpCnt = block->bbJumpSwt->bbsCount; |
| 7586 | BasicBlock** jumpPtr; |
| 7587 | jumpPtr = block->bbJumpSwt->bbsDstTab; |
| 7588 | |
| 7589 | do |
| 7590 | { |
| 7591 | fgLoopCallTest(block, *jumpPtr); |
| 7592 | } while (++jumpPtr, --jumpCnt); |
| 7593 | |
| 7594 | break; |
| 7595 | |
| 7596 | default: |
| 7597 | break; |
| 7598 | } |
| 7599 | } |
| 7600 | } |
| 7601 | |
| 7602 | /***************************************************************************** |
| 7603 | * |
| 7604 | * Note the fact that the given block is a loop header. |
| 7605 | */ |
| 7606 | |
| 7607 | inline void Compiler::fgMarkLoopHead(BasicBlock* block) |
| 7608 | { |
| 7609 | #ifdef DEBUG |
| 7610 | if (verbose) |
| 7611 | { |
| 7612 | printf("fgMarkLoopHead: Checking loop head block " FMT_BB ": " , block->bbNum); |
| 7613 | } |
| 7614 | #endif |
| 7615 | |
| 7616 | /* Have we decided to generate fully interruptible code already? */ |
| 7617 | |
| 7618 | if (genInterruptible) |
| 7619 | { |
| 7620 | #ifdef DEBUG |
| 7621 | if (verbose) |
| 7622 | { |
| 7623 | printf("method is already fully interruptible\n" ); |
| 7624 | } |
| 7625 | #endif |
| 7626 | return; |
| 7627 | } |
| 7628 | |
| 7629 | /* Is the loop head block known to execute a method call? */ |
| 7630 | |
| 7631 | if (block->bbFlags & BBF_GC_SAFE_POINT) |
| 7632 | { |
| 7633 | #ifdef DEBUG |
| 7634 | if (verbose) |
| 7635 | { |
| 7636 | printf("this block will execute a call\n" ); |
| 7637 | } |
| 7638 | #endif |
| 7639 | // single block loops that contain GC safe points don't need polls. |
| 7640 | block->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 7641 | return; |
| 7642 | } |
| 7643 | |
| 7644 | /* Are dominator sets available? */ |
| 7645 | |
| 7646 | if (fgDomsComputed) |
| 7647 | { |
| 7648 | /* Make sure that we know which loops will always execute calls */ |
| 7649 | |
| 7650 | if (!fgLoopCallMarked) |
| 7651 | { |
| 7652 | fgLoopCallMark(); |
| 7653 | } |
| 7654 | |
| 7655 | /* Will every trip through our loop execute a call? */ |
| 7656 | |
| 7657 | if (block->bbFlags & BBF_LOOP_CALL1) |
| 7658 | { |
| 7659 | #ifdef DEBUG |
| 7660 | if (verbose) |
| 7661 | { |
| 7662 | printf("this block dominates a block that will execute a call\n" ); |
| 7663 | } |
| 7664 | #endif |
| 7665 | return; |
| 7666 | } |
| 7667 | } |
| 7668 | |
| 7669 | /* |
| 7670 | * We have to make this method fully interruptible since we can not |
| 7671 | * ensure that this loop will execute a call every time it loops. |
| 7672 | * |
| 7673 | * We'll also need to generate a full register map for this method. |
| 7674 | */ |
| 7675 | |
| 7676 | assert(!codeGen->isGCTypeFixed()); |
| 7677 | |
| 7678 | if (!compCanEncodePtrArgCntMax()) |
| 7679 | { |
| 7680 | #ifdef DEBUG |
| 7681 | if (verbose) |
| 7682 | { |
| 7683 | printf("a callsite with more than 1023 pushed args exists\n" ); |
| 7684 | } |
| 7685 | #endif |
| 7686 | return; |
| 7687 | } |
| 7688 | |
| 7689 | #ifdef DEBUG |
| 7690 | if (verbose) |
| 7691 | { |
| 7692 | printf("no guaranteed callsite exits, marking method as fully interruptible\n" ); |
| 7693 | } |
| 7694 | #endif |
| 7695 | |
| 7696 | // only enable fully interruptible code for if we're hijacking. |
| 7697 | if (GCPOLL_NONE == opts.compGCPollType) |
| 7698 | { |
| 7699 | genInterruptible = true; |
| 7700 | } |
| 7701 | } |
| 7702 | |
| 7703 | GenTree* Compiler::fgGetCritSectOfStaticMethod() |
| 7704 | { |
| 7705 | noway_assert(!compIsForInlining()); |
| 7706 | |
| 7707 | noway_assert(info.compIsStatic); // This method should only be called for static methods. |
| 7708 | |
| 7709 | GenTree* tree = nullptr; |
| 7710 | |
| 7711 | CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd); |
| 7712 | |
| 7713 | if (!kind.needsRuntimeLookup) |
| 7714 | { |
| 7715 | void *critSect = nullptr, **pCrit = nullptr; |
| 7716 | critSect = info.compCompHnd->getMethodSync(info.compMethodHnd, (void**)&pCrit); |
| 7717 | noway_assert((!critSect) != (!pCrit)); |
| 7718 | |
| 7719 | tree = gtNewIconEmbHndNode(critSect, pCrit, GTF_ICON_METHOD_HDL, info.compMethodHnd); |
| 7720 | } |
| 7721 | else |
| 7722 | { |
| 7723 | // Collectible types requires that for shared generic code, if we use the generic context paramter |
| 7724 | // that we report it. (This is a conservative approach, we could detect some cases particularly when the |
| 7725 | // context parameter is this that we don't need the eager reporting logic.) |
| 7726 | lvaGenericsContextUseCount++; |
| 7727 | |
| 7728 | switch (kind.runtimeLookupKind) |
| 7729 | { |
| 7730 | case CORINFO_LOOKUP_THISOBJ: |
| 7731 | { |
| 7732 | noway_assert(!"Should never get this for static method." ); |
| 7733 | break; |
| 7734 | } |
| 7735 | |
| 7736 | case CORINFO_LOOKUP_CLASSPARAM: |
| 7737 | { |
| 7738 | // In this case, the hidden param is the class handle. |
| 7739 | tree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); |
| 7740 | break; |
| 7741 | } |
| 7742 | |
| 7743 | case CORINFO_LOOKUP_METHODPARAM: |
| 7744 | { |
| 7745 | // In this case, the hidden param is the method handle. |
| 7746 | tree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); |
| 7747 | // Call helper CORINFO_HELP_GETCLASSFROMMETHODPARAM to get the class handle |
| 7748 | // from the method handle. |
| 7749 | tree = gtNewHelperCallNode(CORINFO_HELP_GETCLASSFROMMETHODPARAM, TYP_I_IMPL, gtNewArgList(tree)); |
| 7750 | break; |
| 7751 | } |
| 7752 | |
| 7753 | default: |
| 7754 | { |
| 7755 | noway_assert(!"Unknown LOOKUP_KIND" ); |
| 7756 | break; |
| 7757 | } |
| 7758 | } |
| 7759 | |
| 7760 | noway_assert(tree); // tree should now contain the CORINFO_CLASS_HANDLE for the exact class. |
| 7761 | |
| 7762 | // Given the class handle, get the pointer to the Monitor. |
| 7763 | tree = gtNewHelperCallNode(CORINFO_HELP_GETSYNCFROMCLASSHANDLE, TYP_I_IMPL, gtNewArgList(tree)); |
| 7764 | } |
| 7765 | |
| 7766 | noway_assert(tree); |
| 7767 | return tree; |
| 7768 | } |
| 7769 | |
| 7770 | #if FEATURE_EH_FUNCLETS |
| 7771 | |
| 7772 | /***************************************************************************** |
| 7773 | * |
| 7774 | * Add monitor enter/exit calls for synchronized methods, and a try/fault |
| 7775 | * to ensure the 'exit' is called if the 'enter' was successful. On x86, we |
| 7776 | * generate monitor enter/exit calls and tell the VM the code location of |
| 7777 | * these calls. When an exception occurs between those locations, the VM |
| 7778 | * automatically releases the lock. For non-x86 platforms, the JIT is |
| 7779 | * responsible for creating a try/finally to protect the monitor enter/exit, |
| 7780 | * and the VM doesn't need to know anything special about the method during |
| 7781 | * exception processing -- it's just a normal try/finally. |
| 7782 | * |
| 7783 | * We generate the following code: |
| 7784 | * |
| 7785 | * void Foo() |
| 7786 | * { |
| 7787 | * unsigned byte acquired = 0; |
| 7788 | * try { |
| 7789 | * JIT_MonEnterWorker(<lock object>, &acquired); |
| 7790 | * |
| 7791 | * *** all the preexisting user code goes here *** |
| 7792 | * |
| 7793 | * JIT_MonExitWorker(<lock object>, &acquired); |
| 7794 | * } fault { |
| 7795 | * JIT_MonExitWorker(<lock object>, &acquired); |
| 7796 | * } |
| 7797 | * L_return: |
| 7798 | * ret |
| 7799 | * } |
| 7800 | * |
| 7801 | * If the lock is actually acquired, then the 'acquired' variable is set to 1 |
| 7802 | * by the helper call. During normal exit, the finally is called, 'acquired' |
| 7803 | * is 1, and the lock is released. If an exception occurs before the lock is |
| 7804 | * acquired, but within the 'try' (extremely unlikely, but possible), 'acquired' |
| 7805 | * will be 0, and the monitor exit call will quickly return without attempting |
| 7806 | * to release the lock. Otherwise, 'acquired' will be 1, and the lock will be |
| 7807 | * released during exception processing. |
| 7808 | * |
| 7809 | * For synchronized methods, we generate a single return block. |
| 7810 | * We can do this without creating additional "step" blocks because "ret" blocks |
| 7811 | * must occur at the top-level (of the original code), not nested within any EH |
| 7812 | * constructs. From the CLI spec, 12.4.2.8.2.3 "ret": "Shall not be enclosed in any |
| 7813 | * protected block, filter, or handler." Also, 3.57: "The ret instruction cannot be |
| 7814 | * used to transfer control out of a try, filter, catch, or finally block. From within |
| 7815 | * a try or catch, use the leave instruction with a destination of a ret instruction |
| 7816 | * that is outside all enclosing exception blocks." |
| 7817 | * |
| 7818 | * In addition, we can add a "fault" at the end of a method and be guaranteed that no |
| 7819 | * control falls through. From the CLI spec, section 12.4 "Control flow": "Control is not |
| 7820 | * permitted to simply fall through the end of a method. All paths shall terminate with one |
| 7821 | * of these instructions: ret, throw, jmp, or (tail. followed by call, calli, or callvirt)." |
| 7822 | * |
| 7823 | * We only need to worry about "ret" and "throw", as the CLI spec prevents any other |
| 7824 | * alternatives. Section 15.4.3.3 "Implementation information" states about exiting |
| 7825 | * synchronized methods: "Exiting a synchronized method using a tail. call shall be |
| 7826 | * implemented as though the tail. had not been specified." Section 3.37 "jmp" states: |
| 7827 | * "The jmp instruction cannot be used to transferred control out of a try, filter, |
| 7828 | * catch, fault or finally block; or out of a synchronized region." And, "throw" will |
| 7829 | * be handled naturally; no additional work is required. |
| 7830 | */ |
| 7831 | |
| 7832 | void Compiler::fgAddSyncMethodEnterExit() |
| 7833 | { |
| 7834 | assert((info.compFlags & CORINFO_FLG_SYNCH) != 0); |
| 7835 | |
| 7836 | // We need to do this transformation before funclets are created. |
| 7837 | assert(!fgFuncletsCreated); |
| 7838 | |
| 7839 | // Assume we don't need to update the bbPreds lists. |
| 7840 | assert(!fgComputePredsDone); |
| 7841 | |
| 7842 | #if !FEATURE_EH |
| 7843 | // If we don't support EH, we can't add the EH needed by synchronized methods. |
| 7844 | // Of course, we could simply ignore adding the EH constructs, since we don't |
| 7845 | // support exceptions being thrown in this mode, but we would still need to add |
| 7846 | // the monitor enter/exit, and that doesn't seem worth it for this minor case. |
| 7847 | // By the time EH is working, we can just enable the whole thing. |
| 7848 | NYI("No support for synchronized methods" ); |
| 7849 | #endif // !FEATURE_EH |
| 7850 | |
| 7851 | // Create a scratch first BB where we can put the new variable initialization. |
| 7852 | // Don't put the scratch BB in the protected region. |
| 7853 | |
| 7854 | fgEnsureFirstBBisScratch(); |
| 7855 | |
| 7856 | // Create a block for the start of the try region, where the monitor enter call |
| 7857 | // will go. |
| 7858 | |
| 7859 | assert(fgFirstBB->bbFallsThrough()); |
| 7860 | |
| 7861 | BasicBlock* tryBegBB = fgNewBBafter(BBJ_NONE, fgFirstBB, false); |
| 7862 | BasicBlock* tryNextBB = tryBegBB->bbNext; |
| 7863 | BasicBlock* tryLastBB = fgLastBB; |
| 7864 | |
| 7865 | // If we have profile data the new block will inherit the next block's weight |
| 7866 | if (tryNextBB->hasProfileWeight()) |
| 7867 | { |
| 7868 | tryBegBB->inheritWeight(tryNextBB); |
| 7869 | } |
| 7870 | |
| 7871 | // Create a block for the fault. |
| 7872 | |
| 7873 | assert(!tryLastBB->bbFallsThrough()); |
| 7874 | BasicBlock* faultBB = fgNewBBafter(BBJ_EHFINALLYRET, tryLastBB, false); |
| 7875 | |
| 7876 | assert(tryLastBB->bbNext == faultBB); |
| 7877 | assert(faultBB->bbNext == nullptr); |
| 7878 | assert(faultBB == fgLastBB); |
| 7879 | |
| 7880 | { // Scope the EH region creation |
| 7881 | |
| 7882 | // Add the new EH region at the end, since it is the least nested, |
| 7883 | // and thus should be last. |
| 7884 | |
| 7885 | EHblkDsc* newEntry; |
| 7886 | unsigned XTnew = compHndBBtabCount; |
| 7887 | |
| 7888 | newEntry = fgAddEHTableEntry(XTnew); |
| 7889 | |
| 7890 | // Initialize the new entry |
| 7891 | |
| 7892 | newEntry->ebdHandlerType = EH_HANDLER_FAULT; |
| 7893 | |
| 7894 | newEntry->ebdTryBeg = tryBegBB; |
| 7895 | newEntry->ebdTryLast = tryLastBB; |
| 7896 | |
| 7897 | newEntry->ebdHndBeg = faultBB; |
| 7898 | newEntry->ebdHndLast = faultBB; |
| 7899 | |
| 7900 | newEntry->ebdTyp = 0; // unused for fault |
| 7901 | |
| 7902 | newEntry->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; |
| 7903 | newEntry->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; |
| 7904 | |
| 7905 | newEntry->ebdTryBegOffset = tryBegBB->bbCodeOffs; |
| 7906 | newEntry->ebdTryEndOffset = tryLastBB->bbCodeOffsEnd; |
| 7907 | newEntry->ebdFilterBegOffset = 0; |
| 7908 | newEntry->ebdHndBegOffset = 0; // handler doesn't correspond to any IL |
| 7909 | newEntry->ebdHndEndOffset = 0; // handler doesn't correspond to any IL |
| 7910 | |
| 7911 | // Set some flags on the new region. This is the same as when we set up |
| 7912 | // EH regions in fgFindBasicBlocks(). Note that the try has no enclosing |
| 7913 | // handler, and the fault has no enclosing try. |
| 7914 | |
| 7915 | tryBegBB->bbFlags |= BBF_HAS_LABEL | BBF_DONT_REMOVE | BBF_TRY_BEG | BBF_IMPORTED; |
| 7916 | |
| 7917 | faultBB->bbFlags |= BBF_HAS_LABEL | BBF_DONT_REMOVE | BBF_IMPORTED; |
| 7918 | faultBB->bbCatchTyp = BBCT_FAULT; |
| 7919 | |
| 7920 | tryBegBB->setTryIndex(XTnew); |
| 7921 | tryBegBB->clearHndIndex(); |
| 7922 | |
| 7923 | faultBB->clearTryIndex(); |
| 7924 | faultBB->setHndIndex(XTnew); |
| 7925 | |
| 7926 | // Walk the user code blocks and set all blocks that don't already have a try handler |
| 7927 | // to point to the new try handler. |
| 7928 | |
| 7929 | BasicBlock* tmpBB; |
| 7930 | for (tmpBB = tryBegBB->bbNext; tmpBB != faultBB; tmpBB = tmpBB->bbNext) |
| 7931 | { |
| 7932 | if (!tmpBB->hasTryIndex()) |
| 7933 | { |
| 7934 | tmpBB->setTryIndex(XTnew); |
| 7935 | } |
| 7936 | } |
| 7937 | |
| 7938 | // Walk the EH table. Make every EH entry that doesn't already have an enclosing |
| 7939 | // try index mark this new entry as their enclosing try index. |
| 7940 | |
| 7941 | unsigned XTnum; |
| 7942 | EHblkDsc* HBtab; |
| 7943 | |
| 7944 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < XTnew; XTnum++, HBtab++) |
| 7945 | { |
| 7946 | if (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) |
| 7947 | { |
| 7948 | HBtab->ebdEnclosingTryIndex = |
| 7949 | (unsigned short)XTnew; // This EH region wasn't previously nested, but now it is. |
| 7950 | } |
| 7951 | } |
| 7952 | |
| 7953 | #ifdef DEBUG |
| 7954 | if (verbose) |
| 7955 | { |
| 7956 | JITDUMP("Synchronized method - created additional EH descriptor EH#%u for try/fault wrapping monitor " |
| 7957 | "enter/exit\n" , |
| 7958 | XTnew); |
| 7959 | fgDispBasicBlocks(); |
| 7960 | fgDispHandlerTab(); |
| 7961 | } |
| 7962 | |
| 7963 | fgVerifyHandlerTab(); |
| 7964 | #endif // DEBUG |
| 7965 | } |
| 7966 | |
| 7967 | // Create a 'monitor acquired' boolean (actually, an unsigned byte: 1 = acquired, 0 = not acquired). |
| 7968 | |
| 7969 | var_types typeMonAcquired = TYP_UBYTE; |
| 7970 | this->lvaMonAcquired = lvaGrabTemp(true DEBUGARG("Synchronized method monitor acquired boolean" )); |
| 7971 | |
| 7972 | lvaTable[lvaMonAcquired].lvType = typeMonAcquired; |
| 7973 | |
| 7974 | { // Scope the variables of the variable initialization |
| 7975 | |
| 7976 | // Initialize the 'acquired' boolean. |
| 7977 | |
| 7978 | GenTree* zero = gtNewZeroConNode(genActualType(typeMonAcquired)); |
| 7979 | GenTree* varNode = gtNewLclvNode(lvaMonAcquired, typeMonAcquired); |
| 7980 | GenTree* initNode = gtNewAssignNode(varNode, zero); |
| 7981 | |
| 7982 | fgInsertStmtAtEnd(fgFirstBB, initNode); |
| 7983 | |
| 7984 | #ifdef DEBUG |
| 7985 | if (verbose) |
| 7986 | { |
| 7987 | printf("\nSynchronized method - Add 'acquired' initialization in first block %s\n" , |
| 7988 | fgFirstBB->dspToString()); |
| 7989 | gtDispTree(initNode); |
| 7990 | printf("\n" ); |
| 7991 | } |
| 7992 | #endif |
| 7993 | } |
| 7994 | |
| 7995 | // Make a copy of the 'this' pointer to be used in the handler so it does not inhibit enregistration |
| 7996 | // of all uses of the variable. |
| 7997 | unsigned lvaCopyThis = 0; |
| 7998 | if (!info.compIsStatic) |
| 7999 | { |
| 8000 | lvaCopyThis = lvaGrabTemp(true DEBUGARG("Synchronized method monitor acquired boolean" )); |
| 8001 | lvaTable[lvaCopyThis].lvType = TYP_REF; |
| 8002 | |
| 8003 | GenTree* thisNode = gtNewLclvNode(info.compThisArg, TYP_REF); |
| 8004 | GenTree* copyNode = gtNewLclvNode(lvaCopyThis, TYP_REF); |
| 8005 | GenTree* initNode = gtNewAssignNode(copyNode, thisNode); |
| 8006 | |
| 8007 | fgInsertStmtAtEnd(tryBegBB, initNode); |
| 8008 | } |
| 8009 | |
| 8010 | fgCreateMonitorTree(lvaMonAcquired, info.compThisArg, tryBegBB, true /*enter*/); |
| 8011 | |
| 8012 | // exceptional case |
| 8013 | fgCreateMonitorTree(lvaMonAcquired, lvaCopyThis, faultBB, false /*exit*/); |
| 8014 | |
| 8015 | // non-exceptional cases |
| 8016 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 8017 | { |
| 8018 | if (block->bbJumpKind == BBJ_RETURN) |
| 8019 | { |
| 8020 | fgCreateMonitorTree(lvaMonAcquired, info.compThisArg, block, false /*exit*/); |
| 8021 | } |
| 8022 | } |
| 8023 | } |
| 8024 | |
| 8025 | // fgCreateMonitorTree: Create tree to execute a monitor enter or exit operation for synchronized methods |
| 8026 | // lvaMonAcquired: lvaNum of boolean variable that tracks if monitor has been acquired. |
| 8027 | // lvaThisVar: lvaNum of variable being used as 'this' pointer, may not be the original one. Is only used for |
| 8028 | // nonstatic methods |
| 8029 | // block: block to insert the tree in. It is inserted at the end or in the case of a return, immediately before the |
| 8030 | // GT_RETURN |
| 8031 | // enter: whether to create a monitor enter or exit |
| 8032 | |
| 8033 | GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThisVar, BasicBlock* block, bool enter) |
| 8034 | { |
| 8035 | // Insert the expression "enter/exitCrit(this, &acquired)" or "enter/exitCrit(handle, &acquired)" |
| 8036 | |
| 8037 | var_types typeMonAcquired = TYP_UBYTE; |
| 8038 | GenTree* varNode = gtNewLclvNode(lvaMonAcquired, typeMonAcquired); |
| 8039 | GenTree* varAddrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, varNode); |
| 8040 | GenTree* tree; |
| 8041 | |
| 8042 | if (info.compIsStatic) |
| 8043 | { |
| 8044 | tree = fgGetCritSectOfStaticMethod(); |
| 8045 | tree = gtNewHelperCallNode(enter ? CORINFO_HELP_MON_ENTER_STATIC : CORINFO_HELP_MON_EXIT_STATIC, TYP_VOID, |
| 8046 | gtNewArgList(tree, varAddrNode)); |
| 8047 | } |
| 8048 | else |
| 8049 | { |
| 8050 | tree = gtNewLclvNode(lvaThisVar, TYP_REF); |
| 8051 | tree = gtNewHelperCallNode(enter ? CORINFO_HELP_MON_ENTER : CORINFO_HELP_MON_EXIT, TYP_VOID, |
| 8052 | gtNewArgList(tree, varAddrNode)); |
| 8053 | } |
| 8054 | |
| 8055 | #ifdef DEBUG |
| 8056 | if (verbose) |
| 8057 | { |
| 8058 | printf("\nSynchronized method - Add monitor %s call to block %s\n" , enter ? "enter" : "exit" , |
| 8059 | block->dspToString()); |
| 8060 | gtDispTree(tree); |
| 8061 | printf("\n" ); |
| 8062 | } |
| 8063 | #endif |
| 8064 | |
| 8065 | if (block->bbJumpKind == BBJ_RETURN && block->lastStmt()->gtStmtExpr->gtOper == GT_RETURN) |
| 8066 | { |
| 8067 | GenTree* retNode = block->lastStmt()->gtStmtExpr; |
| 8068 | GenTree* retExpr = retNode->gtOp.gtOp1; |
| 8069 | |
| 8070 | if (retExpr != nullptr) |
| 8071 | { |
| 8072 | // have to insert this immediately before the GT_RETURN so we transform: |
| 8073 | // ret(...) -> |
| 8074 | // ret(comma(comma(tmp=...,call mon_exit), tmp) |
| 8075 | // |
| 8076 | // |
| 8077 | // Before morph stage, it is possible to have a case of GT_RETURN(TYP_LONG, op1) where op1's type is |
| 8078 | // TYP_STRUCT (of 8-bytes) and op1 is call node. See the big comment block in impReturnInstruction() |
| 8079 | // for details for the case where info.compRetType is not the same as info.compRetNativeType. For |
| 8080 | // this reason pass compMethodInfo->args.retTypeClass which is guaranteed to be a valid class handle |
| 8081 | // if the return type is a value class. Note that fgInsertCommFormTemp() in turn uses this class handle |
| 8082 | // if the type of op1 is TYP_STRUCT to perform lvaSetStruct() on the new temp that is created, which |
| 8083 | // in turn passes it to VM to know the size of value type. |
| 8084 | GenTree* temp = fgInsertCommaFormTemp(&retNode->gtOp.gtOp1, info.compMethodInfo->args.retTypeClass); |
| 8085 | |
| 8086 | GenTree* lclVar = retNode->gtOp.gtOp1->gtOp.gtOp2; |
| 8087 | |
| 8088 | // The return can't handle all of the trees that could be on the right-hand-side of an assignment, |
| 8089 | // especially in the case of a struct. Therefore, we need to propagate GTF_DONT_CSE. |
| 8090 | // If we don't, assertion propagation may, e.g., change a return of a local to a return of "CNS_INT struct |
| 8091 | // 0", |
| 8092 | // which downstream phases can't handle. |
| 8093 | lclVar->gtFlags |= (retExpr->gtFlags & GTF_DONT_CSE); |
| 8094 | retNode->gtOp.gtOp1->gtOp.gtOp2 = gtNewOperNode(GT_COMMA, retExpr->TypeGet(), tree, lclVar); |
| 8095 | } |
| 8096 | else |
| 8097 | { |
| 8098 | // Insert this immediately before the GT_RETURN |
| 8099 | fgInsertStmtNearEnd(block, tree); |
| 8100 | } |
| 8101 | } |
| 8102 | else |
| 8103 | { |
| 8104 | fgInsertStmtAtEnd(block, tree); |
| 8105 | } |
| 8106 | |
| 8107 | return tree; |
| 8108 | } |
| 8109 | |
| 8110 | // Convert a BBJ_RETURN block in a synchronized method to a BBJ_ALWAYS. |
| 8111 | // We've previously added a 'try' block around the original program code using fgAddSyncMethodEnterExit(). |
| 8112 | // Thus, we put BBJ_RETURN blocks inside a 'try'. In IL this is illegal. Instead, we would |
| 8113 | // see a 'leave' inside a 'try' that would get transformed into BBJ_CALLFINALLY/BBJ_ALWAYS blocks |
| 8114 | // during importing, and the BBJ_ALWAYS would point at an outer block with the BBJ_RETURN. |
| 8115 | // Here, we mimic some of the logic of importing a LEAVE to get the same effect for synchronized methods. |
| 8116 | void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) |
| 8117 | { |
| 8118 | assert(!fgFuncletsCreated); |
| 8119 | assert(info.compFlags & CORINFO_FLG_SYNCH); |
| 8120 | assert(genReturnBB != nullptr); |
| 8121 | assert(genReturnBB != block); |
| 8122 | assert(fgReturnCount <= 1); // We have a single return for synchronized methods |
| 8123 | assert(block->bbJumpKind == BBJ_RETURN); |
| 8124 | assert((block->bbFlags & BBF_HAS_JMP) == 0); |
| 8125 | assert(block->hasTryIndex()); |
| 8126 | assert(!block->hasHndIndex()); |
| 8127 | assert(compHndBBtabCount >= 1); |
| 8128 | |
| 8129 | unsigned tryIndex = block->getTryIndex(); |
| 8130 | assert(tryIndex == compHndBBtabCount - 1); // The BBJ_RETURN must be at the top-level before we inserted the |
| 8131 | // try/finally, which must be the last EH region. |
| 8132 | |
| 8133 | EHblkDsc* ehDsc = ehGetDsc(tryIndex); |
| 8134 | assert(ehDsc->ebdEnclosingTryIndex == |
| 8135 | EHblkDsc::NO_ENCLOSING_INDEX); // There are no enclosing regions of the BBJ_RETURN block |
| 8136 | assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); |
| 8137 | |
| 8138 | // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. |
| 8139 | block->bbJumpKind = BBJ_ALWAYS; |
| 8140 | block->bbJumpDest = genReturnBB; |
| 8141 | block->bbJumpDest->bbRefs++; |
| 8142 | |
| 8143 | #ifdef DEBUG |
| 8144 | if (verbose) |
| 8145 | { |
| 8146 | printf("Synchronized method - convert block " FMT_BB " to BBJ_ALWAYS [targets " FMT_BB "]\n" , block->bbNum, |
| 8147 | block->bbJumpDest->bbNum); |
| 8148 | } |
| 8149 | #endif |
| 8150 | } |
| 8151 | |
| 8152 | #endif // FEATURE_EH_FUNCLETS |
| 8153 | |
| 8154 | //------------------------------------------------------------------------ |
| 8155 | // fgAddReversePInvokeEnterExit: Add enter/exit calls for reverse PInvoke methods |
| 8156 | // |
| 8157 | // Arguments: |
| 8158 | // None. |
| 8159 | // |
| 8160 | // Return Value: |
| 8161 | // None. |
| 8162 | |
| 8163 | void Compiler::fgAddReversePInvokeEnterExit() |
| 8164 | { |
| 8165 | assert(opts.IsReversePInvoke()); |
| 8166 | |
| 8167 | lvaReversePInvokeFrameVar = lvaGrabTempWithImplicitUse(false DEBUGARG("Reverse Pinvoke FrameVar" )); |
| 8168 | |
| 8169 | LclVarDsc* varDsc = &lvaTable[lvaReversePInvokeFrameVar]; |
| 8170 | varDsc->lvType = TYP_BLK; |
| 8171 | varDsc->lvExactSize = eeGetEEInfo()->sizeOfReversePInvokeFrame; |
| 8172 | |
| 8173 | GenTree* tree; |
| 8174 | |
| 8175 | // Add enter pinvoke exit callout at the start of prolog |
| 8176 | |
| 8177 | tree = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaReversePInvokeFrameVar, TYP_BLK)); |
| 8178 | |
| 8179 | tree = gtNewHelperCallNode(CORINFO_HELP_JIT_REVERSE_PINVOKE_ENTER, TYP_VOID, gtNewArgList(tree)); |
| 8180 | |
| 8181 | fgEnsureFirstBBisScratch(); |
| 8182 | |
| 8183 | fgInsertStmtAtBeg(fgFirstBB, tree); |
| 8184 | |
| 8185 | #ifdef DEBUG |
| 8186 | if (verbose) |
| 8187 | { |
| 8188 | printf("\nReverse PInvoke method - Add reverse pinvoke enter in first basic block %s\n" , |
| 8189 | fgFirstBB->dspToString()); |
| 8190 | gtDispTree(tree); |
| 8191 | printf("\n" ); |
| 8192 | } |
| 8193 | #endif |
| 8194 | |
| 8195 | // Add reverse pinvoke exit callout at the end of epilog |
| 8196 | |
| 8197 | tree = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaReversePInvokeFrameVar, TYP_BLK)); |
| 8198 | |
| 8199 | tree = gtNewHelperCallNode(CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT, TYP_VOID, gtNewArgList(tree)); |
| 8200 | |
| 8201 | assert(genReturnBB != nullptr); |
| 8202 | |
| 8203 | fgInsertStmtNearEnd(genReturnBB, tree); |
| 8204 | |
| 8205 | #ifdef DEBUG |
| 8206 | if (verbose) |
| 8207 | { |
| 8208 | printf("\nReverse PInvoke method - Add reverse pinvoke exit in return basic block %s\n" , |
| 8209 | genReturnBB->dspToString()); |
| 8210 | gtDispTree(tree); |
| 8211 | printf("\n" ); |
| 8212 | } |
| 8213 | #endif |
| 8214 | } |
| 8215 | |
| 8216 | /***************************************************************************** |
| 8217 | * |
| 8218 | * Return 'true' if there is more than one BBJ_RETURN block. |
| 8219 | */ |
| 8220 | |
| 8221 | bool Compiler::fgMoreThanOneReturnBlock() |
| 8222 | { |
| 8223 | unsigned retCnt = 0; |
| 8224 | |
| 8225 | for (BasicBlock* block = fgFirstBB; block; block = block->bbNext) |
| 8226 | { |
| 8227 | if (block->bbJumpKind == BBJ_RETURN) |
| 8228 | { |
| 8229 | retCnt++; |
| 8230 | if (retCnt > 1) |
| 8231 | { |
| 8232 | return true; |
| 8233 | } |
| 8234 | } |
| 8235 | } |
| 8236 | |
| 8237 | return false; |
| 8238 | } |
| 8239 | |
| 8240 | namespace |
| 8241 | { |
| 8242 | // Define a helper class for merging return blocks (which we do when the input has |
| 8243 | // more than the limit for this configuration). |
| 8244 | // |
| 8245 | // Notes: sets fgReturnCount, genReturnBB, and genReturnLocal. |
| 8246 | class MergedReturns |
| 8247 | { |
| 8248 | public: |
| 8249 | #ifdef JIT32_GCENCODER |
| 8250 | |
| 8251 | // X86 GC encoding has a hard limit of SET_EPILOGCNT_MAX epilogs. |
| 8252 | const static unsigned ReturnCountHardLimit = SET_EPILOGCNT_MAX; |
| 8253 | #else // JIT32_GCENCODER |
| 8254 | |
| 8255 | // We currently apply a hard limit of '4' to all other targets (see |
| 8256 | // the other uses of SET_EPILOGCNT_MAX), though it would be good |
| 8257 | // to revisit that decision based on CQ analysis. |
| 8258 | const static unsigned ReturnCountHardLimit = 4; |
| 8259 | #endif // JIT32_GCENCODER |
| 8260 | |
| 8261 | private: |
| 8262 | Compiler* comp; |
| 8263 | |
| 8264 | // As we discover returns, we'll record them in `returnBlocks`, until |
| 8265 | // the limit is reached, at which point we'll keep track of the merged |
| 8266 | // return blocks in `returnBlocks`. |
| 8267 | BasicBlock* returnBlocks[ReturnCountHardLimit]; |
| 8268 | |
| 8269 | // Each constant value returned gets its own merged return block that |
| 8270 | // returns that constant (up to the limit on number of returns); in |
| 8271 | // `returnConstants` we track the constant values returned by these |
| 8272 | // merged constant return blocks. |
| 8273 | INT64 returnConstants[ReturnCountHardLimit]; |
| 8274 | |
| 8275 | // Indicators of where in the lexical block list we'd like to place |
| 8276 | // each constant return block. |
| 8277 | BasicBlock* insertionPoints[ReturnCountHardLimit]; |
| 8278 | |
| 8279 | // Number of return blocks allowed |
| 8280 | PhasedVar<unsigned> maxReturns; |
| 8281 | |
| 8282 | // Flag to keep track of when we've hit the limit of returns and are |
| 8283 | // actively merging returns together. |
| 8284 | bool mergingReturns = false; |
| 8285 | |
| 8286 | public: |
| 8287 | MergedReturns(Compiler* comp) : comp(comp) |
| 8288 | { |
| 8289 | comp->fgReturnCount = 0; |
| 8290 | } |
| 8291 | |
| 8292 | void SetMaxReturns(unsigned value) |
| 8293 | { |
| 8294 | maxReturns = value; |
| 8295 | maxReturns.MarkAsReadOnly(); |
| 8296 | } |
| 8297 | |
| 8298 | //------------------------------------------------------------------------ |
| 8299 | // Record: Make note of a return block in the input program. |
| 8300 | // |
| 8301 | // Arguments: |
| 8302 | // returnBlock - Block in the input that has jump kind BBJ_RETURN |
| 8303 | // |
| 8304 | // Notes: |
| 8305 | // Updates fgReturnCount appropriately, and generates a merged return |
| 8306 | // block if necessary. If a constant merged return block is used, |
| 8307 | // `returnBlock` is rewritten to jump to it. If a non-constant return |
| 8308 | // block is used, `genReturnBB` is set to that block, and `genReturnLocal` |
| 8309 | // is set to the lclvar that it returns; morph will need to rewrite |
| 8310 | // `returnBlock` to set the local and jump to the return block in such |
| 8311 | // cases, which it will do after some key transformations like rewriting |
| 8312 | // tail calls and calls that return to hidden buffers. In either of these |
| 8313 | // cases, `fgReturnCount` and the merged return block's profile information |
| 8314 | // will be updated to reflect or anticipate the rewrite of `returnBlock`. |
| 8315 | // |
| 8316 | void Record(BasicBlock* returnBlock) |
| 8317 | { |
| 8318 | // Add this return to our tally |
| 8319 | unsigned oldReturnCount = comp->fgReturnCount++; |
| 8320 | |
| 8321 | if (!mergingReturns) |
| 8322 | { |
| 8323 | if (oldReturnCount < maxReturns) |
| 8324 | { |
| 8325 | // No need to merge just yet; simply record this return. |
| 8326 | returnBlocks[oldReturnCount] = returnBlock; |
| 8327 | return; |
| 8328 | } |
| 8329 | |
| 8330 | // We'e reached our threshold |
| 8331 | mergingReturns = true; |
| 8332 | |
| 8333 | // Merge any returns we've already identified |
| 8334 | for (unsigned i = 0, searchLimit = 0; i < oldReturnCount; ++i) |
| 8335 | { |
| 8336 | BasicBlock* mergedReturnBlock = Merge(returnBlocks[i], searchLimit); |
| 8337 | if (returnBlocks[searchLimit] == mergedReturnBlock) |
| 8338 | { |
| 8339 | // We've added a new block to the searchable set |
| 8340 | ++searchLimit; |
| 8341 | } |
| 8342 | } |
| 8343 | } |
| 8344 | |
| 8345 | // We have too many returns, so merge this one in. |
| 8346 | // Search limit is new return count minus one (to exclude this block). |
| 8347 | unsigned searchLimit = comp->fgReturnCount - 1; |
| 8348 | Merge(returnBlock, searchLimit); |
| 8349 | } |
| 8350 | |
| 8351 | //------------------------------------------------------------------------ |
| 8352 | // EagerCreate: Force creation of a non-constant merged return block `genReturnBB`. |
| 8353 | // |
| 8354 | // Return Value: |
| 8355 | // The newly-created block which returns `genReturnLocal`. |
| 8356 | // |
| 8357 | BasicBlock* EagerCreate() |
| 8358 | { |
| 8359 | mergingReturns = true; |
| 8360 | return Merge(nullptr, 0); |
| 8361 | } |
| 8362 | |
| 8363 | //------------------------------------------------------------------------ |
| 8364 | // PlaceReturns: Move any generated const return blocks to an appropriate |
| 8365 | // spot in the lexical block list. |
| 8366 | // |
| 8367 | // Notes: |
| 8368 | // The goal is to set things up favorably for a reasonable layout without |
| 8369 | // putting too much burden on fgReorderBlocks; in particular, since that |
| 8370 | // method doesn't (currently) shuffle non-profile, non-rare code to create |
| 8371 | // fall-through and reduce gotos, this method places each const return |
| 8372 | // block immediately after its last predecessor, so that the flow from |
| 8373 | // there to it can become fallthrough without requiring any motion to be |
| 8374 | // performed by fgReorderBlocks. |
| 8375 | // |
| 8376 | void PlaceReturns() |
| 8377 | { |
| 8378 | if (!mergingReturns) |
| 8379 | { |
| 8380 | // No returns generated => no returns to place. |
| 8381 | return; |
| 8382 | } |
| 8383 | |
| 8384 | for (unsigned index = 0; index < comp->fgReturnCount; ++index) |
| 8385 | { |
| 8386 | BasicBlock* returnBlock = returnBlocks[index]; |
| 8387 | BasicBlock* genReturnBlock = comp->genReturnBB; |
| 8388 | if (returnBlock == genReturnBlock) |
| 8389 | { |
| 8390 | continue; |
| 8391 | } |
| 8392 | |
| 8393 | BasicBlock* insertionPoint = insertionPoints[index]; |
| 8394 | assert(insertionPoint != nullptr); |
| 8395 | |
| 8396 | comp->fgUnlinkBlock(returnBlock); |
| 8397 | comp->fgMoveBlocksAfter(returnBlock, returnBlock, insertionPoint); |
| 8398 | // Treat the merged return block as belonging to the same EH region |
| 8399 | // as the insertion point block, to make sure we don't break up |
| 8400 | // EH regions; since returning a constant won't throw, this won't |
| 8401 | // affect program behavior. |
| 8402 | comp->fgExtendEHRegionAfter(insertionPoint); |
| 8403 | } |
| 8404 | } |
| 8405 | |
| 8406 | private: |
| 8407 | //------------------------------------------------------------------------ |
| 8408 | // CreateReturnBB: Create a basic block to serve as a merged return point, stored to |
| 8409 | // `returnBlocks` at the given index, and optionally returning the given constant. |
| 8410 | // |
| 8411 | // Arguments: |
| 8412 | // index - Index into `returnBlocks` to store the new block into. |
| 8413 | // returnConst - Constant that the new block should return; may be nullptr to |
| 8414 | // indicate that the new merged return is for the non-constant case, in which |
| 8415 | // case, if the method's return type is non-void, `comp->genReturnLocal` will |
| 8416 | // be initialized to a new local of the appropriate type, and the new block will |
| 8417 | // return it. |
| 8418 | // |
| 8419 | // Return Value: |
| 8420 | // The new merged return block. |
| 8421 | // |
| 8422 | BasicBlock* CreateReturnBB(unsigned index, GenTreeIntConCommon* returnConst = nullptr) |
| 8423 | { |
| 8424 | BasicBlock* newReturnBB = comp->fgNewBBinRegion(BBJ_RETURN); |
| 8425 | newReturnBB->bbRefs = 1; // bbRefs gets update later, for now it should be 1 |
| 8426 | comp->fgReturnCount++; |
| 8427 | |
| 8428 | newReturnBB->bbFlags |= BBF_INTERNAL; |
| 8429 | |
| 8430 | noway_assert(newReturnBB->bbNext == nullptr); |
| 8431 | |
| 8432 | #ifdef DEBUG |
| 8433 | if (comp->verbose) |
| 8434 | { |
| 8435 | printf("\n newReturnBB [" FMT_BB "] created\n" , newReturnBB->bbNum); |
| 8436 | } |
| 8437 | #endif |
| 8438 | |
| 8439 | // We have profile weight, the weight is zero, and the block is run rarely, |
| 8440 | // until we prove otherwise by merging other returns into this one. |
| 8441 | newReturnBB->bbFlags |= (BBF_PROF_WEIGHT | BBF_RUN_RARELY); |
| 8442 | newReturnBB->bbWeight = 0; |
| 8443 | |
| 8444 | GenTree* returnExpr; |
| 8445 | |
| 8446 | if (returnConst != nullptr) |
| 8447 | { |
| 8448 | returnExpr = comp->gtNewOperNode(GT_RETURN, returnConst->gtType, returnConst); |
| 8449 | returnConstants[index] = returnConst->IntegralValue(); |
| 8450 | } |
| 8451 | else if (comp->compMethodHasRetVal()) |
| 8452 | { |
| 8453 | // There is a return value, so create a temp for it. Real returns will store the value in there and |
| 8454 | // it'll be reloaded by the single return. |
| 8455 | unsigned returnLocalNum = comp->lvaGrabTemp(true DEBUGARG("Single return block return value" )); |
| 8456 | comp->genReturnLocal = returnLocalNum; |
| 8457 | LclVarDsc& returnLocalDsc = comp->lvaTable[returnLocalNum]; |
| 8458 | |
| 8459 | if (comp->compMethodReturnsNativeScalarType()) |
| 8460 | { |
| 8461 | returnLocalDsc.lvType = genActualType(comp->info.compRetNativeType); |
| 8462 | } |
| 8463 | else if (comp->compMethodReturnsRetBufAddr()) |
| 8464 | { |
| 8465 | returnLocalDsc.lvType = TYP_BYREF; |
| 8466 | } |
| 8467 | else if (comp->compMethodReturnsMultiRegRetType()) |
| 8468 | { |
| 8469 | returnLocalDsc.lvType = TYP_STRUCT; |
| 8470 | comp->lvaSetStruct(returnLocalNum, comp->info.compMethodInfo->args.retTypeClass, true); |
| 8471 | returnLocalDsc.lvIsMultiRegRet = true; |
| 8472 | } |
| 8473 | else |
| 8474 | { |
| 8475 | assert(!"unreached" ); |
| 8476 | } |
| 8477 | |
| 8478 | if (varTypeIsFloating(returnLocalDsc.lvType)) |
| 8479 | { |
| 8480 | comp->compFloatingPointUsed = true; |
| 8481 | } |
| 8482 | |
| 8483 | #ifdef DEBUG |
| 8484 | // This temporary should not be converted to a double in stress mode, |
| 8485 | // because we introduce assigns to it after the stress conversion |
| 8486 | returnLocalDsc.lvKeepType = 1; |
| 8487 | #endif |
| 8488 | |
| 8489 | GenTree* retTemp = comp->gtNewLclvNode(returnLocalNum, returnLocalDsc.TypeGet()); |
| 8490 | |
| 8491 | // make sure copy prop ignores this node (make sure it always does a reload from the temp). |
| 8492 | retTemp->gtFlags |= GTF_DONT_CSE; |
| 8493 | returnExpr = comp->gtNewOperNode(GT_RETURN, retTemp->gtType, retTemp); |
| 8494 | } |
| 8495 | else |
| 8496 | { |
| 8497 | // return void |
| 8498 | noway_assert(comp->info.compRetType == TYP_VOID || varTypeIsStruct(comp->info.compRetType)); |
| 8499 | comp->genReturnLocal = BAD_VAR_NUM; |
| 8500 | |
| 8501 | returnExpr = new (comp, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); |
| 8502 | } |
| 8503 | |
| 8504 | // Add 'return' expression to the return block |
| 8505 | comp->fgInsertStmtAtEnd(newReturnBB, returnExpr); |
| 8506 | // Flag that this 'return' was generated by return merging so that subsequent |
| 8507 | // return block morhping will know to leave it alone. |
| 8508 | returnExpr->gtFlags |= GTF_RET_MERGED; |
| 8509 | |
| 8510 | #ifdef DEBUG |
| 8511 | if (comp->verbose) |
| 8512 | { |
| 8513 | printf("\nmergeReturns statement tree " ); |
| 8514 | Compiler::printTreeID(returnExpr); |
| 8515 | printf(" added to genReturnBB %s\n" , newReturnBB->dspToString()); |
| 8516 | comp->gtDispTree(returnExpr); |
| 8517 | printf("\n" ); |
| 8518 | } |
| 8519 | #endif |
| 8520 | assert(index < maxReturns); |
| 8521 | returnBlocks[index] = newReturnBB; |
| 8522 | return newReturnBB; |
| 8523 | } |
| 8524 | |
| 8525 | //------------------------------------------------------------------------ |
| 8526 | // Merge: Find or create an appropriate merged return block for the given input block. |
| 8527 | // |
| 8528 | // Arguments: |
| 8529 | // returnBlock - Return block from the input program to find a merged return for. |
| 8530 | // May be nullptr to indicate that new block suitable for non-constant |
| 8531 | // returns should be generated but no existing block modified. |
| 8532 | // searchLimit - Blocks in `returnBlocks` up to but not including index `searchLimit` |
| 8533 | // will be checked to see if we already have an appropriate merged return |
| 8534 | // block for this case. If a new block must be created, it will be stored |
| 8535 | // to `returnBlocks` at index `searchLimit`. |
| 8536 | // |
| 8537 | // Return Value: |
| 8538 | // Merged return block suitable for handling this return value. May be newly-created |
| 8539 | // or pre-existing. |
| 8540 | // |
| 8541 | // Notes: |
| 8542 | // If a constant-valued merged return block is used, `returnBlock` will be rewritten to |
| 8543 | // jump to the merged return block and its `GT_RETURN` statement will be removed. If |
| 8544 | // a non-constant-valued merged return block is used, `genReturnBB` and `genReturnLocal` |
| 8545 | // will be set so that Morph can perform that rewrite, which it will do after some key |
| 8546 | // transformations like rewriting tail calls and calls that return to hidden buffers. |
| 8547 | // In either of these cases, `fgReturnCount` and the merged return block's profile |
| 8548 | // information will be updated to reflect or anticipate the rewrite of `returnBlock`. |
| 8549 | // |
| 8550 | BasicBlock* Merge(BasicBlock* returnBlock, unsigned searchLimit) |
| 8551 | { |
| 8552 | assert(mergingReturns); |
| 8553 | |
| 8554 | BasicBlock* mergedReturnBlock = nullptr; |
| 8555 | |
| 8556 | // Do not look for mergable constant returns in debug codegen as |
| 8557 | // we may lose track of sequence points. |
| 8558 | if ((returnBlock != nullptr) && (maxReturns > 1) && !comp->opts.compDbgCode) |
| 8559 | { |
| 8560 | // Check to see if this is a constant return so that we can search |
| 8561 | // for and/or create a constant return block for it. |
| 8562 | |
| 8563 | GenTreeIntConCommon* retConst = GetReturnConst(returnBlock); |
| 8564 | if (retConst != nullptr) |
| 8565 | { |
| 8566 | // We have a constant. Now find or create a corresponding return block. |
| 8567 | |
| 8568 | unsigned index; |
| 8569 | BasicBlock* constReturnBlock = FindConstReturnBlock(retConst, searchLimit, &index); |
| 8570 | |
| 8571 | if (constReturnBlock == nullptr) |
| 8572 | { |
| 8573 | // We didn't find a const return block. See if we have space left |
| 8574 | // to make one. |
| 8575 | |
| 8576 | // We have already allocated `searchLimit` slots. |
| 8577 | unsigned slotsReserved = searchLimit; |
| 8578 | if (comp->genReturnBB == nullptr) |
| 8579 | { |
| 8580 | // We haven't made a non-const return yet, so we have to reserve |
| 8581 | // a slot for one. |
| 8582 | ++slotsReserved; |
| 8583 | } |
| 8584 | |
| 8585 | if (slotsReserved < maxReturns) |
| 8586 | { |
| 8587 | // We have enough space to allocate a slot for this constant. |
| 8588 | constReturnBlock = CreateReturnBB(searchLimit, retConst); |
| 8589 | } |
| 8590 | } |
| 8591 | |
| 8592 | if (constReturnBlock != nullptr) |
| 8593 | { |
| 8594 | // Found a constant merged return block. |
| 8595 | mergedReturnBlock = constReturnBlock; |
| 8596 | |
| 8597 | // Change BBJ_RETURN to BBJ_ALWAYS targeting const return block. |
| 8598 | assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); |
| 8599 | returnBlock->bbJumpKind = BBJ_ALWAYS; |
| 8600 | returnBlock->bbJumpDest = constReturnBlock; |
| 8601 | |
| 8602 | // Remove GT_RETURN since constReturnBlock returns the constant. |
| 8603 | assert(returnBlock->lastStmt()->gtStmtExpr->OperIs(GT_RETURN)); |
| 8604 | assert(returnBlock->lastStmt()->gtStmtExpr->gtGetOp1()->IsIntegralConst()); |
| 8605 | comp->fgRemoveStmt(returnBlock, returnBlock->lastStmt()); |
| 8606 | |
| 8607 | // Using 'returnBlock' as the insertion point for 'mergedReturnBlock' |
| 8608 | // will give it a chance to use fallthrough rather than BBJ_ALWAYS. |
| 8609 | // Resetting this after each merge ensures that any branches to the |
| 8610 | // merged return block are lexically forward. |
| 8611 | |
| 8612 | insertionPoints[index] = returnBlock; |
| 8613 | } |
| 8614 | } |
| 8615 | } |
| 8616 | |
| 8617 | if (mergedReturnBlock == nullptr) |
| 8618 | { |
| 8619 | // No constant return block for this return; use the general one. |
| 8620 | mergedReturnBlock = comp->genReturnBB; |
| 8621 | if (mergedReturnBlock == nullptr) |
| 8622 | { |
| 8623 | // No general merged return for this function yet; create one. |
| 8624 | // There had better still be room left in the array. |
| 8625 | assert(searchLimit < maxReturns); |
| 8626 | mergedReturnBlock = CreateReturnBB(searchLimit); |
| 8627 | comp->genReturnBB = mergedReturnBlock; |
| 8628 | // Downstream code expects the `genReturnBB` to always remain |
| 8629 | // once created, so that it can redirect flow edges to it. |
| 8630 | mergedReturnBlock->bbFlags |= BBF_DONT_REMOVE; |
| 8631 | } |
| 8632 | } |
| 8633 | |
| 8634 | if (returnBlock != nullptr) |
| 8635 | { |
| 8636 | // Propagate profile weight and related annotations to the merged block. |
| 8637 | // Return weight should never exceed entry weight, so cap it to avoid nonsensical |
| 8638 | // hot returns in synthetic profile settings. |
| 8639 | mergedReturnBlock->bbWeight = |
| 8640 | min(mergedReturnBlock->bbWeight + returnBlock->bbWeight, comp->fgFirstBB->bbWeight); |
| 8641 | if (!returnBlock->hasProfileWeight()) |
| 8642 | { |
| 8643 | mergedReturnBlock->bbFlags &= ~BBF_PROF_WEIGHT; |
| 8644 | } |
| 8645 | if (mergedReturnBlock->bbWeight > 0) |
| 8646 | { |
| 8647 | mergedReturnBlock->bbFlags &= ~BBF_RUN_RARELY; |
| 8648 | } |
| 8649 | |
| 8650 | // Update fgReturnCount to reflect or anticipate that `returnBlock` will no longer |
| 8651 | // be a return point. |
| 8652 | comp->fgReturnCount--; |
| 8653 | } |
| 8654 | |
| 8655 | return mergedReturnBlock; |
| 8656 | } |
| 8657 | |
| 8658 | //------------------------------------------------------------------------ |
| 8659 | // GetReturnConst: If the given block returns an integral constant, return the |
| 8660 | // GenTreeIntConCommon that represents the constant. |
| 8661 | // |
| 8662 | // Arguments: |
| 8663 | // returnBlock - Block whose return value is to be inspected. |
| 8664 | // |
| 8665 | // Return Value: |
| 8666 | // GenTreeIntCommon that is the argument of `returnBlock`'s `GT_RETURN` if |
| 8667 | // such exists; nullptr otherwise. |
| 8668 | // |
| 8669 | static GenTreeIntConCommon* GetReturnConst(BasicBlock* returnBlock) |
| 8670 | { |
| 8671 | GenTreeStmt* lastStmt = returnBlock->lastStmt(); |
| 8672 | if (lastStmt == nullptr) |
| 8673 | { |
| 8674 | return nullptr; |
| 8675 | } |
| 8676 | |
| 8677 | GenTree* lastExpr = lastStmt->gtStmtExpr; |
| 8678 | if (!lastExpr->OperIs(GT_RETURN)) |
| 8679 | { |
| 8680 | return nullptr; |
| 8681 | } |
| 8682 | |
| 8683 | GenTree* retExpr = lastExpr->gtGetOp1(); |
| 8684 | if ((retExpr == nullptr) || !retExpr->IsIntegralConst()) |
| 8685 | { |
| 8686 | return nullptr; |
| 8687 | } |
| 8688 | |
| 8689 | return retExpr->AsIntConCommon(); |
| 8690 | } |
| 8691 | |
| 8692 | //------------------------------------------------------------------------ |
| 8693 | // FindConstReturnBlock: Scan the already-created merged return blocks, up to `searchLimit`, |
| 8694 | // and return the one corresponding to the given const expression if it exists. |
| 8695 | // |
| 8696 | // Arguments: |
| 8697 | // constExpr - GenTreeIntCommon representing the constant return value we're |
| 8698 | // searching for. |
| 8699 | // searchLimit - Check `returnBlocks`/`returnConstants` up to but not including |
| 8700 | // this index. |
| 8701 | // index - [out] Index of return block in the `returnBlocks` array, if found; |
| 8702 | // searchLimit otherwise. |
| 8703 | // |
| 8704 | // Return Value: |
| 8705 | // A block that returns the same constant, if one is found; otherwise nullptr. |
| 8706 | // |
| 8707 | BasicBlock* FindConstReturnBlock(GenTreeIntConCommon* constExpr, unsigned searchLimit, unsigned* index) |
| 8708 | { |
| 8709 | INT64 constVal = constExpr->IntegralValue(); |
| 8710 | |
| 8711 | for (unsigned i = 0; i < searchLimit; ++i) |
| 8712 | { |
| 8713 | // Need to check both for matching const val and for genReturnBB |
| 8714 | // because genReturnBB is used for non-constant returns and its |
| 8715 | // corresponding entry in the returnConstants array is garbage. |
| 8716 | if (returnConstants[i] == constVal) |
| 8717 | { |
| 8718 | BasicBlock* returnBlock = returnBlocks[i]; |
| 8719 | |
| 8720 | if (returnBlock == comp->genReturnBB) |
| 8721 | { |
| 8722 | // This is the block used for non-constant returns, so |
| 8723 | // its returnConstants entry is just garbage; don't be |
| 8724 | // fooled. |
| 8725 | continue; |
| 8726 | } |
| 8727 | |
| 8728 | *index = i; |
| 8729 | return returnBlock; |
| 8730 | } |
| 8731 | } |
| 8732 | |
| 8733 | *index = searchLimit; |
| 8734 | return nullptr; |
| 8735 | } |
| 8736 | }; |
| 8737 | } |
| 8738 | |
| 8739 | /***************************************************************************** |
| 8740 | * |
| 8741 | * Add any internal blocks/trees we may need |
| 8742 | */ |
| 8743 | |
| 8744 | void Compiler::fgAddInternal() |
| 8745 | { |
| 8746 | noway_assert(!compIsForInlining()); |
| 8747 | |
| 8748 | // The backend requires a scratch BB into which it can safely insert a P/Invoke method prolog if one is |
| 8749 | // required. Create it here. |
| 8750 | if (info.compCallUnmanaged != 0) |
| 8751 | { |
| 8752 | fgEnsureFirstBBisScratch(); |
| 8753 | fgFirstBB->bbFlags |= BBF_DONT_REMOVE; |
| 8754 | } |
| 8755 | |
| 8756 | /* |
| 8757 | <BUGNUM> VSW441487 </BUGNUM> |
| 8758 | |
| 8759 | The "this" pointer is implicitly used in the following cases: |
| 8760 | 1. Locking of synchronized methods |
| 8761 | 2. Dictionary access of shared generics code |
| 8762 | 3. If a method has "catch(FooException<T>)", the EH code accesses "this" to determine T. |
| 8763 | 4. Initializing the type from generic methods which require precise cctor semantics |
| 8764 | 5. Verifier does special handling of "this" in the .ctor |
| 8765 | |
| 8766 | However, we might overwrite it with a "starg 0". |
| 8767 | In this case, we will redirect all "ldarg(a)/starg(a) 0" to a temp lvaTable[lvaArg0Var] |
| 8768 | */ |
| 8769 | |
| 8770 | if (!info.compIsStatic) |
| 8771 | { |
| 8772 | if (lvaArg0Var != info.compThisArg) |
| 8773 | { |
| 8774 | // When we're using the general encoder, we mark compThisArg address-taken to ensure that it is not |
| 8775 | // enregistered (since the decoder always reports a stack location for "this" for generics |
| 8776 | // context vars). |
| 8777 | bool lva0CopiedForGenericsCtxt; |
| 8778 | #ifndef JIT32_GCENCODER |
| 8779 | lva0CopiedForGenericsCtxt = ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0); |
| 8780 | #else // JIT32_GCENCODER |
| 8781 | lva0CopiedForGenericsCtxt = false; |
| 8782 | #endif // JIT32_GCENCODER |
| 8783 | noway_assert(lva0CopiedForGenericsCtxt || !lvaTable[info.compThisArg].lvAddrExposed); |
| 8784 | noway_assert(!lvaTable[info.compThisArg].lvHasILStoreOp); |
| 8785 | noway_assert(lvaTable[lvaArg0Var].lvAddrExposed || lvaTable[lvaArg0Var].lvHasILStoreOp || |
| 8786 | lva0CopiedForGenericsCtxt); |
| 8787 | |
| 8788 | var_types thisType = lvaTable[info.compThisArg].TypeGet(); |
| 8789 | |
| 8790 | // Now assign the original input "this" to the temp |
| 8791 | |
| 8792 | GenTree* tree; |
| 8793 | |
| 8794 | tree = gtNewLclvNode(lvaArg0Var, thisType); |
| 8795 | |
| 8796 | tree = gtNewAssignNode(tree, // dst |
| 8797 | gtNewLclvNode(info.compThisArg, thisType) // src |
| 8798 | ); |
| 8799 | |
| 8800 | /* Create a new basic block and stick the assignment in it */ |
| 8801 | |
| 8802 | fgEnsureFirstBBisScratch(); |
| 8803 | |
| 8804 | fgInsertStmtAtEnd(fgFirstBB, tree); |
| 8805 | |
| 8806 | #ifdef DEBUG |
| 8807 | if (verbose) |
| 8808 | { |
| 8809 | printf("\nCopy \"this\" to lvaArg0Var in first basic block %s\n" , fgFirstBB->dspToString()); |
| 8810 | gtDispTree(tree); |
| 8811 | printf("\n" ); |
| 8812 | } |
| 8813 | #endif |
| 8814 | } |
| 8815 | } |
| 8816 | |
| 8817 | // Grab a temp for the security object. |
| 8818 | // (Note: opts.compDbgEnC currently also causes the security object to be generated. See Compiler::compCompile) |
| 8819 | if (opts.compNeedSecurityCheck) |
| 8820 | { |
| 8821 | noway_assert(lvaSecurityObject == BAD_VAR_NUM); |
| 8822 | lvaSecurityObject = lvaGrabTempWithImplicitUse(false DEBUGARG("security check" )); |
| 8823 | lvaTable[lvaSecurityObject].lvType = TYP_REF; |
| 8824 | } |
| 8825 | |
| 8826 | // Merge return points if required or beneficial |
| 8827 | MergedReturns merger(this); |
| 8828 | |
| 8829 | #if FEATURE_EH_FUNCLETS |
| 8830 | // Add the synchronized method enter/exit calls and try/finally protection. Note |
| 8831 | // that this must happen before the one BBJ_RETURN block is created below, so the |
| 8832 | // BBJ_RETURN block gets placed at the top-level, not within an EH region. (Otherwise, |
| 8833 | // we'd have to be really careful when creating the synchronized method try/finally |
| 8834 | // not to include the BBJ_RETURN block.) |
| 8835 | if ((info.compFlags & CORINFO_FLG_SYNCH) != 0) |
| 8836 | { |
| 8837 | fgAddSyncMethodEnterExit(); |
| 8838 | } |
| 8839 | #endif // FEATURE_EH_FUNCLETS |
| 8840 | |
| 8841 | // |
| 8842 | // We will generate just one epilog (return block) |
| 8843 | // when we are asked to generate enter/leave callbacks |
| 8844 | // or for methods with PInvoke |
| 8845 | // or for methods calling into unmanaged code |
| 8846 | // or for synchronized methods. |
| 8847 | // |
| 8848 | BasicBlock* lastBlockBeforeGenReturns = fgLastBB; |
| 8849 | if (compIsProfilerHookNeeded() || (info.compCallUnmanaged != 0) || opts.IsReversePInvoke() || |
| 8850 | ((info.compFlags & CORINFO_FLG_SYNCH) != 0)) |
| 8851 | { |
| 8852 | // We will generate only one return block |
| 8853 | // We will transform the BBJ_RETURN blocks |
| 8854 | // into jumps to the one return block |
| 8855 | // |
| 8856 | merger.SetMaxReturns(1); |
| 8857 | |
| 8858 | // Eagerly create the genReturnBB since the lowering of these constructs |
| 8859 | // will expect to find it. |
| 8860 | BasicBlock* mergedReturn = merger.EagerCreate(); |
| 8861 | assert(mergedReturn == genReturnBB); |
| 8862 | // Assume weight equal to entry weight for this BB. |
| 8863 | mergedReturn->bbFlags &= ~BBF_PROF_WEIGHT; |
| 8864 | mergedReturn->bbWeight = fgFirstBB->bbWeight; |
| 8865 | if (mergedReturn->bbWeight > 0) |
| 8866 | { |
| 8867 | mergedReturn->bbFlags &= ~BBF_RUN_RARELY; |
| 8868 | } |
| 8869 | } |
| 8870 | else |
| 8871 | { |
| 8872 | // |
| 8873 | // We are allowed to have multiple individual exits |
| 8874 | // However we can still decide to have a single return |
| 8875 | // |
| 8876 | if (compCodeOpt() == SMALL_CODE) |
| 8877 | { |
| 8878 | // For the Small_Code case we always generate a |
| 8879 | // single return block when we have multiple |
| 8880 | // return points |
| 8881 | // |
| 8882 | merger.SetMaxReturns(1); |
| 8883 | } |
| 8884 | else |
| 8885 | { |
| 8886 | merger.SetMaxReturns(MergedReturns::ReturnCountHardLimit); |
| 8887 | } |
| 8888 | } |
| 8889 | |
| 8890 | // Visit the BBJ_RETURN blocks and merge as necessary. |
| 8891 | |
| 8892 | for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext) |
| 8893 | { |
| 8894 | if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) |
| 8895 | { |
| 8896 | merger.Record(block); |
| 8897 | } |
| 8898 | } |
| 8899 | |
| 8900 | merger.PlaceReturns(); |
| 8901 | |
| 8902 | if (info.compCallUnmanaged != 0) |
| 8903 | { |
| 8904 | // The P/Invoke helpers only require a frame variable, so only allocate the |
| 8905 | // TCB variable if we're not using them. |
| 8906 | if (!opts.ShouldUsePInvokeHelpers()) |
| 8907 | { |
| 8908 | info.compLvFrameListRoot = lvaGrabTemp(false DEBUGARG("Pinvoke FrameListRoot" )); |
| 8909 | LclVarDsc* rootVarDsc = &lvaTable[info.compLvFrameListRoot]; |
| 8910 | rootVarDsc->lvType = TYP_I_IMPL; |
| 8911 | rootVarDsc->lvImplicitlyReferenced = 1; |
| 8912 | } |
| 8913 | |
| 8914 | lvaInlinedPInvokeFrameVar = lvaGrabTempWithImplicitUse(false DEBUGARG("Pinvoke FrameVar" )); |
| 8915 | |
| 8916 | LclVarDsc* varDsc = &lvaTable[lvaInlinedPInvokeFrameVar]; |
| 8917 | varDsc->lvType = TYP_BLK; |
| 8918 | // Make room for the inlined frame. |
| 8919 | varDsc->lvExactSize = eeGetEEInfo()->inlinedCallFrameInfo.size; |
| 8920 | #if FEATURE_FIXED_OUT_ARGS |
| 8921 | // Grab and reserve space for TCB, Frame regs used in PInvoke epilog to pop the inlined frame. |
| 8922 | // See genPInvokeMethodEpilog() for use of the grabbed var. This is only necessary if we are |
| 8923 | // not using the P/Invoke helpers. |
| 8924 | if (!opts.ShouldUsePInvokeHelpers() && compJmpOpUsed) |
| 8925 | { |
| 8926 | lvaPInvokeFrameRegSaveVar = lvaGrabTempWithImplicitUse(false DEBUGARG("PInvokeFrameRegSave Var" )); |
| 8927 | varDsc = &lvaTable[lvaPInvokeFrameRegSaveVar]; |
| 8928 | varDsc->lvType = TYP_BLK; |
| 8929 | varDsc->lvExactSize = 2 * REGSIZE_BYTES; |
| 8930 | } |
| 8931 | #endif |
| 8932 | } |
| 8933 | |
| 8934 | // Do we need to insert a "JustMyCode" callback? |
| 8935 | |
| 8936 | CORINFO_JUST_MY_CODE_HANDLE* pDbgHandle = nullptr; |
| 8937 | CORINFO_JUST_MY_CODE_HANDLE dbgHandle = nullptr; |
| 8938 | if (opts.compDbgCode && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) |
| 8939 | { |
| 8940 | dbgHandle = info.compCompHnd->getJustMyCodeHandle(info.compMethodHnd, &pDbgHandle); |
| 8941 | } |
| 8942 | |
| 8943 | noway_assert(!dbgHandle || !pDbgHandle); |
| 8944 | |
| 8945 | if (dbgHandle || pDbgHandle) |
| 8946 | { |
| 8947 | GenTree* embNode = gtNewIconEmbHndNode(dbgHandle, pDbgHandle, GTF_ICON_TOKEN_HDL, info.compMethodHnd); |
| 8948 | GenTree* guardCheckVal = gtNewOperNode(GT_IND, TYP_INT, embNode); |
| 8949 | GenTree* guardCheckCond = gtNewOperNode(GT_EQ, TYP_INT, guardCheckVal, gtNewZeroConNode(TYP_INT)); |
| 8950 | |
| 8951 | // Create the callback which will yield the final answer |
| 8952 | |
| 8953 | GenTree* callback = gtNewHelperCallNode(CORINFO_HELP_DBG_IS_JUST_MY_CODE, TYP_VOID); |
| 8954 | callback = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), callback); |
| 8955 | |
| 8956 | // Stick the conditional call at the start of the method |
| 8957 | |
| 8958 | fgEnsureFirstBBisScratch(); |
| 8959 | fgInsertStmtAtEnd(fgFirstBB, gtNewQmarkNode(TYP_VOID, guardCheckCond, callback)); |
| 8960 | } |
| 8961 | |
| 8962 | /* Do we need to call out for security ? */ |
| 8963 | |
| 8964 | if (tiSecurityCalloutNeeded) |
| 8965 | { |
| 8966 | // We must have grabbed this local. |
| 8967 | noway_assert(opts.compNeedSecurityCheck); |
| 8968 | noway_assert(lvaSecurityObject != BAD_VAR_NUM); |
| 8969 | |
| 8970 | GenTree* tree; |
| 8971 | |
| 8972 | /* Insert the expression "call JIT_Security_Prolog(MethodHnd, &SecurityObject)" */ |
| 8973 | |
| 8974 | tree = gtNewIconEmbMethHndNode(info.compMethodHnd); |
| 8975 | |
| 8976 | tree = gtNewHelperCallNode(info.compCompHnd->getSecurityPrologHelper(info.compMethodHnd), TYP_VOID, |
| 8977 | gtNewArgList(tree, gtNewOperNode(GT_ADDR, TYP_BYREF, |
| 8978 | gtNewLclvNode(lvaSecurityObject, TYP_REF)))); |
| 8979 | |
| 8980 | /* Create a new basic block and stick the call in it */ |
| 8981 | |
| 8982 | fgEnsureFirstBBisScratch(); |
| 8983 | |
| 8984 | fgInsertStmtAtEnd(fgFirstBB, tree); |
| 8985 | |
| 8986 | #ifdef DEBUG |
| 8987 | if (verbose) |
| 8988 | { |
| 8989 | printf("\ntiSecurityCalloutNeeded - Add call JIT_Security_Prolog(%08p) statement " , |
| 8990 | dspPtr(info.compMethodHnd)); |
| 8991 | printTreeID(tree); |
| 8992 | printf(" in first basic block %s\n" , fgFirstBB->dspToString()); |
| 8993 | gtDispTree(tree); |
| 8994 | printf("\n" ); |
| 8995 | } |
| 8996 | #endif |
| 8997 | } |
| 8998 | |
| 8999 | #if !FEATURE_EH_FUNCLETS |
| 9000 | |
| 9001 | /* Is this a 'synchronized' method? */ |
| 9002 | |
| 9003 | if (info.compFlags & CORINFO_FLG_SYNCH) |
| 9004 | { |
| 9005 | GenTree* tree = NULL; |
| 9006 | |
| 9007 | /* Insert the expression "enterCrit(this)" or "enterCrit(handle)" */ |
| 9008 | |
| 9009 | if (info.compIsStatic) |
| 9010 | { |
| 9011 | tree = fgGetCritSectOfStaticMethod(); |
| 9012 | |
| 9013 | tree = gtNewHelperCallNode(CORINFO_HELP_MON_ENTER_STATIC, TYP_VOID, gtNewArgList(tree)); |
| 9014 | } |
| 9015 | else |
| 9016 | { |
| 9017 | noway_assert(lvaTable[info.compThisArg].lvType == TYP_REF); |
| 9018 | |
| 9019 | tree = gtNewLclvNode(info.compThisArg, TYP_REF); |
| 9020 | |
| 9021 | tree = gtNewHelperCallNode(CORINFO_HELP_MON_ENTER, TYP_VOID, gtNewArgList(tree)); |
| 9022 | } |
| 9023 | |
| 9024 | /* Create a new basic block and stick the call in it */ |
| 9025 | |
| 9026 | fgEnsureFirstBBisScratch(); |
| 9027 | |
| 9028 | fgInsertStmtAtEnd(fgFirstBB, tree); |
| 9029 | |
| 9030 | #ifdef DEBUG |
| 9031 | if (verbose) |
| 9032 | { |
| 9033 | printf("\nSynchronized method - Add enterCrit statement in first basic block %s\n" , |
| 9034 | fgFirstBB->dspToString()); |
| 9035 | gtDispTree(tree); |
| 9036 | printf("\n" ); |
| 9037 | } |
| 9038 | #endif |
| 9039 | |
| 9040 | /* We must be generating a single exit point for this to work */ |
| 9041 | |
| 9042 | noway_assert(genReturnBB != nullptr); |
| 9043 | |
| 9044 | /* Create the expression "exitCrit(this)" or "exitCrit(handle)" */ |
| 9045 | |
| 9046 | if (info.compIsStatic) |
| 9047 | { |
| 9048 | tree = fgGetCritSectOfStaticMethod(); |
| 9049 | |
| 9050 | tree = gtNewHelperCallNode(CORINFO_HELP_MON_EXIT_STATIC, TYP_VOID, gtNewArgList(tree)); |
| 9051 | } |
| 9052 | else |
| 9053 | { |
| 9054 | tree = gtNewLclvNode(info.compThisArg, TYP_REF); |
| 9055 | |
| 9056 | tree = gtNewHelperCallNode(CORINFO_HELP_MON_EXIT, TYP_VOID, gtNewArgList(tree)); |
| 9057 | } |
| 9058 | |
| 9059 | fgInsertStmtNearEnd(genReturnBB, tree); |
| 9060 | |
| 9061 | #ifdef DEBUG |
| 9062 | if (verbose) |
| 9063 | { |
| 9064 | printf("\nSynchronized method - Add exit expression " ); |
| 9065 | printTreeID(tree); |
| 9066 | printf("\n" ); |
| 9067 | } |
| 9068 | #endif |
| 9069 | |
| 9070 | // Reset cookies used to track start and end of the protected region in synchronized methods |
| 9071 | syncStartEmitCookie = NULL; |
| 9072 | syncEndEmitCookie = NULL; |
| 9073 | } |
| 9074 | |
| 9075 | #endif // !FEATURE_EH_FUNCLETS |
| 9076 | |
| 9077 | /* Do we need to do runtime call out to check the security? */ |
| 9078 | |
| 9079 | if (tiRuntimeCalloutNeeded) |
| 9080 | { |
| 9081 | GenTree* tree; |
| 9082 | |
| 9083 | /* Insert the expression "call verificationRuntimeCheck(MethodHnd)" */ |
| 9084 | |
| 9085 | tree = gtNewIconEmbMethHndNode(info.compMethodHnd); |
| 9086 | |
| 9087 | tree = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION_RUNTIME_CHECK, TYP_VOID, gtNewArgList(tree)); |
| 9088 | |
| 9089 | /* Create a new basic block and stick the call in it */ |
| 9090 | |
| 9091 | fgEnsureFirstBBisScratch(); |
| 9092 | |
| 9093 | fgInsertStmtAtEnd(fgFirstBB, tree); |
| 9094 | |
| 9095 | #ifdef DEBUG |
| 9096 | if (verbose) |
| 9097 | { |
| 9098 | printf("\ntiRuntimeCalloutNeeded - Call verificationRuntimeCheck(%08p) statement in first basic block %s\n" , |
| 9099 | dspPtr(info.compMethodHnd), fgFirstBB->dspToString()); |
| 9100 | gtDispTree(tree); |
| 9101 | printf("\n" ); |
| 9102 | } |
| 9103 | #endif |
| 9104 | } |
| 9105 | |
| 9106 | if (opts.IsReversePInvoke()) |
| 9107 | { |
| 9108 | fgAddReversePInvokeEnterExit(); |
| 9109 | } |
| 9110 | |
| 9111 | #ifdef DEBUG |
| 9112 | if (verbose) |
| 9113 | { |
| 9114 | printf("\n*************** After fgAddInternal()\n" ); |
| 9115 | fgDispBasicBlocks(); |
| 9116 | fgDispHandlerTab(); |
| 9117 | } |
| 9118 | #endif |
| 9119 | } |
| 9120 | |
| 9121 | /***************************************************************************** |
| 9122 | * |
| 9123 | * Create a new statement from tree and wire the links up. |
| 9124 | */ |
| 9125 | GenTreeStmt* Compiler::fgNewStmtFromTree(GenTree* tree, BasicBlock* block, IL_OFFSETX offs) |
| 9126 | { |
| 9127 | GenTreeStmt* stmt = gtNewStmt(tree, offs); |
| 9128 | |
| 9129 | if (fgStmtListThreaded) |
| 9130 | { |
| 9131 | gtSetStmtInfo(stmt); |
| 9132 | fgSetStmtSeq(stmt); |
| 9133 | } |
| 9134 | |
| 9135 | #if DEBUG |
| 9136 | if (block != nullptr) |
| 9137 | { |
| 9138 | fgDebugCheckNodeLinks(block, stmt); |
| 9139 | } |
| 9140 | #endif |
| 9141 | |
| 9142 | return stmt; |
| 9143 | } |
| 9144 | |
| 9145 | GenTreeStmt* Compiler::fgNewStmtFromTree(GenTree* tree) |
| 9146 | { |
| 9147 | return fgNewStmtFromTree(tree, nullptr, BAD_IL_OFFSET); |
| 9148 | } |
| 9149 | |
| 9150 | GenTreeStmt* Compiler::fgNewStmtFromTree(GenTree* tree, BasicBlock* block) |
| 9151 | { |
| 9152 | return fgNewStmtFromTree(tree, block, BAD_IL_OFFSET); |
| 9153 | } |
| 9154 | |
| 9155 | GenTreeStmt* Compiler::fgNewStmtFromTree(GenTree* tree, IL_OFFSETX offs) |
| 9156 | { |
| 9157 | return fgNewStmtFromTree(tree, nullptr, offs); |
| 9158 | } |
| 9159 | |
| 9160 | //------------------------------------------------------------------------ |
| 9161 | // fgFindBlockILOffset: Given a block, find the IL offset corresponding to the first statement |
| 9162 | // in the block with a legal IL offset. Skip any leading statements that have BAD_IL_OFFSET. |
| 9163 | // If no statement has an initialized statement offset (including the case where there are |
| 9164 | // no statements in the block), then return BAD_IL_OFFSET. This function is used when |
| 9165 | // blocks are split or modified, and we want to maintain the IL offset as much as possible |
| 9166 | // to preserve good debugging behavior. |
| 9167 | // |
| 9168 | // Arguments: |
| 9169 | // block - The block to check. |
| 9170 | // |
| 9171 | // Return Value: |
| 9172 | // The first good IL offset of a statement in the block, or BAD_IL_OFFSET if such an IL offset |
| 9173 | // cannot be found. |
| 9174 | // |
| 9175 | IL_OFFSET Compiler::fgFindBlockILOffset(BasicBlock* block) |
| 9176 | { |
| 9177 | // This function searches for IL offsets in statement nodes, so it can't be used in LIR. We |
| 9178 | // could have a similar function for LIR that searches for GT_IL_OFFSET nodes. |
| 9179 | assert(!block->IsLIR()); |
| 9180 | |
| 9181 | for (GenTree* stmt = block->bbTreeList; stmt != nullptr; stmt = stmt->gtNext) |
| 9182 | { |
| 9183 | assert(stmt->IsStatement()); |
| 9184 | if (stmt->gtStmt.gtStmtILoffsx != BAD_IL_OFFSET) |
| 9185 | { |
| 9186 | return jitGetILoffs(stmt->gtStmt.gtStmtILoffsx); |
| 9187 | } |
| 9188 | } |
| 9189 | |
| 9190 | return BAD_IL_OFFSET; |
| 9191 | } |
| 9192 | |
| 9193 | //------------------------------------------------------------------------------ |
| 9194 | // fgSplitBlockAtEnd - split the given block into two blocks. |
| 9195 | // All code in the block stays in the original block. |
| 9196 | // Control falls through from original to new block, and |
| 9197 | // the new block is returned. |
| 9198 | //------------------------------------------------------------------------------ |
| 9199 | BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) |
| 9200 | { |
| 9201 | // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. |
| 9202 | // (We need the successors of 'curr' to be correct when we do this.) |
| 9203 | BasicBlock* newBlock = bbNewBasicBlock(curr->bbJumpKind); |
| 9204 | |
| 9205 | // Start the new block with no refs. When we set the preds below, this will get updated correctly. |
| 9206 | newBlock->bbRefs = 0; |
| 9207 | |
| 9208 | // For each successor of the original block, set the new block as their predecessor. |
| 9209 | // Note we are using the "rational" version of the successor iterator that does not hide the finallyret arcs. |
| 9210 | // Without these arcs, a block 'b' may not be a member of succs(preds(b)) |
| 9211 | if (curr->bbJumpKind != BBJ_SWITCH) |
| 9212 | { |
| 9213 | unsigned numSuccs = curr->NumSucc(this); |
| 9214 | for (unsigned i = 0; i < numSuccs; i++) |
| 9215 | { |
| 9216 | BasicBlock* succ = curr->GetSucc(i, this); |
| 9217 | if (succ != newBlock) |
| 9218 | { |
| 9219 | JITDUMP(FMT_BB " previous predecessor was " FMT_BB ", now is " FMT_BB "\n" , succ->bbNum, curr->bbNum, |
| 9220 | newBlock->bbNum); |
| 9221 | fgReplacePred(succ, curr, newBlock); |
| 9222 | } |
| 9223 | } |
| 9224 | |
| 9225 | newBlock->bbJumpDest = curr->bbJumpDest; |
| 9226 | curr->bbJumpDest = nullptr; |
| 9227 | } |
| 9228 | else |
| 9229 | { |
| 9230 | // In the case of a switch statement there's more complicated logic in order to wire up the predecessor lists |
| 9231 | // but fortunately there's an existing method that implements this functionality. |
| 9232 | newBlock->bbJumpSwt = curr->bbJumpSwt; |
| 9233 | |
| 9234 | fgChangeSwitchBlock(curr, newBlock); |
| 9235 | |
| 9236 | curr->bbJumpSwt = nullptr; |
| 9237 | } |
| 9238 | |
| 9239 | newBlock->inheritWeight(curr); |
| 9240 | |
| 9241 | // Set the new block's flags. Note that the new block isn't BBF_INTERNAL unless the old block is. |
| 9242 | newBlock->bbFlags = curr->bbFlags; |
| 9243 | |
| 9244 | // Remove flags that the new block can't have. |
| 9245 | newBlock->bbFlags &= ~(BBF_TRY_BEG | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_HAS_LABEL | |
| 9246 | BBF_JMP_TARGET | BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER | BBF_KEEP_BBJ_ALWAYS); |
| 9247 | |
| 9248 | // Remove the GC safe bit on the new block. It seems clear that if we split 'curr' at the end, |
| 9249 | // such that all the code is left in 'curr', and 'newBlock' just gets the control flow, then |
| 9250 | // both 'curr' and 'newBlock' could accurately retain an existing GC safe bit. However, callers |
| 9251 | // use this function to split blocks in the middle, or at the beginning, and they don't seem to |
| 9252 | // be careful about updating this flag appropriately. So, removing the GC safe bit is simply |
| 9253 | // conservative: some functions might end up being fully interruptible that could be partially |
| 9254 | // interruptible if we exercised more care here. |
| 9255 | newBlock->bbFlags &= ~BBF_GC_SAFE_POINT; |
| 9256 | |
| 9257 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 9258 | newBlock->bbFlags &= ~(BBF_FINALLY_TARGET); |
| 9259 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 9260 | |
| 9261 | // The new block has no code, so we leave bbCodeOffs/bbCodeOffsEnd set to BAD_IL_OFFSET. If a caller |
| 9262 | // puts code in the block, then it needs to update these. |
| 9263 | |
| 9264 | // Insert the new block in the block list after the 'curr' block. |
| 9265 | fgInsertBBafter(curr, newBlock); |
| 9266 | fgExtendEHRegionAfter(curr); // The new block is in the same EH region as the old block. |
| 9267 | |
| 9268 | // Remove flags from the old block that are no longer possible. |
| 9269 | curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); |
| 9270 | |
| 9271 | // Default to fallthru, and add the arc for that. |
| 9272 | curr->bbJumpKind = BBJ_NONE; |
| 9273 | fgAddRefPred(newBlock, curr); |
| 9274 | |
| 9275 | return newBlock; |
| 9276 | } |
| 9277 | |
| 9278 | //------------------------------------------------------------------------------ |
| 9279 | // fgSplitBlockAfterStatement - Split the given block, with all code after |
| 9280 | // the given statement going into the second block. |
| 9281 | //------------------------------------------------------------------------------ |
| 9282 | BasicBlock* Compiler::fgSplitBlockAfterStatement(BasicBlock* curr, GenTree* stmt) |
| 9283 | { |
| 9284 | assert(!curr->IsLIR()); // No statements in LIR, so you can't use this function. |
| 9285 | |
| 9286 | BasicBlock* newBlock = fgSplitBlockAtEnd(curr); |
| 9287 | |
| 9288 | if (stmt) |
| 9289 | { |
| 9290 | newBlock->bbTreeList = stmt->gtNext; |
| 9291 | if (newBlock->bbTreeList) |
| 9292 | { |
| 9293 | newBlock->bbTreeList->gtPrev = curr->bbTreeList->gtPrev; |
| 9294 | } |
| 9295 | curr->bbTreeList->gtPrev = stmt; |
| 9296 | stmt->gtNext = nullptr; |
| 9297 | |
| 9298 | // Update the IL offsets of the blocks to match the split. |
| 9299 | |
| 9300 | assert(newBlock->bbCodeOffs == BAD_IL_OFFSET); |
| 9301 | assert(newBlock->bbCodeOffsEnd == BAD_IL_OFFSET); |
| 9302 | |
| 9303 | // curr->bbCodeOffs remains the same |
| 9304 | newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; |
| 9305 | |
| 9306 | IL_OFFSET splitPointILOffset = fgFindBlockILOffset(newBlock); |
| 9307 | |
| 9308 | curr->bbCodeOffsEnd = splitPointILOffset; |
| 9309 | newBlock->bbCodeOffs = splitPointILOffset; |
| 9310 | } |
| 9311 | else |
| 9312 | { |
| 9313 | assert(curr->bbTreeList == nullptr); // if no tree was given then it better be an empty block |
| 9314 | } |
| 9315 | |
| 9316 | return newBlock; |
| 9317 | } |
| 9318 | |
| 9319 | //------------------------------------------------------------------------------ |
| 9320 | // fgSplitBlockAfterNode - Split the given block, with all code after |
| 9321 | // the given node going into the second block. |
| 9322 | // This function is only used in LIR. |
| 9323 | //------------------------------------------------------------------------------ |
| 9324 | BasicBlock* Compiler::fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node) |
| 9325 | { |
| 9326 | assert(curr->IsLIR()); |
| 9327 | |
| 9328 | BasicBlock* newBlock = fgSplitBlockAtEnd(curr); |
| 9329 | |
| 9330 | if (node != nullptr) |
| 9331 | { |
| 9332 | LIR::Range& currBBRange = LIR::AsRange(curr); |
| 9333 | |
| 9334 | if (node != currBBRange.LastNode()) |
| 9335 | { |
| 9336 | LIR::Range nodesToMove = currBBRange.Remove(node->gtNext, currBBRange.LastNode()); |
| 9337 | LIR::AsRange(newBlock).InsertAtBeginning(std::move(nodesToMove)); |
| 9338 | } |
| 9339 | |
| 9340 | // Update the IL offsets of the blocks to match the split. |
| 9341 | |
| 9342 | assert(newBlock->bbCodeOffs == BAD_IL_OFFSET); |
| 9343 | assert(newBlock->bbCodeOffsEnd == BAD_IL_OFFSET); |
| 9344 | |
| 9345 | // curr->bbCodeOffs remains the same |
| 9346 | newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; |
| 9347 | |
| 9348 | // Search backwards from the end of the current block looking for the IL offset to use |
| 9349 | // for the end IL offset for the original block. |
| 9350 | IL_OFFSET splitPointILOffset = BAD_IL_OFFSET; |
| 9351 | LIR::Range::ReverseIterator riter; |
| 9352 | LIR::Range::ReverseIterator riterEnd; |
| 9353 | for (riter = currBBRange.rbegin(), riterEnd = currBBRange.rend(); riter != riterEnd; ++riter) |
| 9354 | { |
| 9355 | if ((*riter)->gtOper == GT_IL_OFFSET) |
| 9356 | { |
| 9357 | GenTreeStmt* stmt = (*riter)->AsStmt(); |
| 9358 | if (stmt->gtStmtILoffsx != BAD_IL_OFFSET) |
| 9359 | { |
| 9360 | splitPointILOffset = jitGetILoffs(stmt->gtStmtILoffsx); |
| 9361 | break; |
| 9362 | } |
| 9363 | } |
| 9364 | } |
| 9365 | |
| 9366 | curr->bbCodeOffsEnd = splitPointILOffset; |
| 9367 | |
| 9368 | // Also use this as the beginning offset of the next block. Presumably we could/should |
| 9369 | // look to see if the first node is a GT_IL_OFFSET node, and use that instead. |
| 9370 | newBlock->bbCodeOffs = splitPointILOffset; |
| 9371 | } |
| 9372 | else |
| 9373 | { |
| 9374 | assert(curr->bbTreeList == nullptr); // if no node was given then it better be an empty block |
| 9375 | } |
| 9376 | |
| 9377 | return newBlock; |
| 9378 | } |
| 9379 | |
| 9380 | //------------------------------------------------------------------------------ |
| 9381 | // fgSplitBlockAtBeginning - Split the given block into two blocks. |
| 9382 | // Control falls through from original to new block, |
| 9383 | // and the new block is returned. |
| 9384 | // All code in the original block goes into the new block |
| 9385 | //------------------------------------------------------------------------------ |
| 9386 | BasicBlock* Compiler::fgSplitBlockAtBeginning(BasicBlock* curr) |
| 9387 | { |
| 9388 | BasicBlock* newBlock = fgSplitBlockAtEnd(curr); |
| 9389 | |
| 9390 | newBlock->bbTreeList = curr->bbTreeList; |
| 9391 | curr->bbTreeList = nullptr; |
| 9392 | |
| 9393 | // The new block now has all the code, and the old block has none. Update the |
| 9394 | // IL offsets for the block to reflect this. |
| 9395 | |
| 9396 | newBlock->bbCodeOffs = curr->bbCodeOffs; |
| 9397 | newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; |
| 9398 | |
| 9399 | curr->bbCodeOffs = BAD_IL_OFFSET; |
| 9400 | curr->bbCodeOffsEnd = BAD_IL_OFFSET; |
| 9401 | |
| 9402 | return newBlock; |
| 9403 | } |
| 9404 | |
| 9405 | //------------------------------------------------------------------------ |
| 9406 | // fgSplitEdge: Splits the edge between a block 'curr' and its successor 'succ' by creating a new block |
| 9407 | // that replaces 'succ' as a successor of 'curr', and which branches unconditionally |
| 9408 | // to (or falls through to) 'succ'. Note that for a BBJ_COND block 'curr', |
| 9409 | // 'succ' might be the fall-through path or the branch path from 'curr'. |
| 9410 | // |
| 9411 | // Arguments: |
| 9412 | // curr - A block which branches conditionally to 'succ' |
| 9413 | // succ - The target block |
| 9414 | // |
| 9415 | // Return Value: |
| 9416 | // Returns a new block, that is a successor of 'curr' and which branches unconditionally to 'succ' |
| 9417 | // |
| 9418 | // Assumptions: |
| 9419 | // 'curr' must have a bbJumpKind of BBJ_COND or BBJ_SWITCH |
| 9420 | // |
| 9421 | // Notes: |
| 9422 | // The returned block is empty. |
| 9423 | |
| 9424 | BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) |
| 9425 | { |
| 9426 | assert(curr->bbJumpKind == BBJ_COND || curr->bbJumpKind == BBJ_SWITCH); |
| 9427 | assert(fgGetPredForBlock(succ, curr) != nullptr); |
| 9428 | |
| 9429 | BasicBlock* newBlock; |
| 9430 | if (succ == curr->bbNext) |
| 9431 | { |
| 9432 | // The successor is the fall-through path of a BBJ_COND, or |
| 9433 | // an immediately following block of a BBJ_SWITCH (which has |
| 9434 | // no fall-through path). For this case, simply insert a new |
| 9435 | // fall-through block after 'curr'. |
| 9436 | newBlock = fgNewBBafter(BBJ_NONE, curr, true /*extendRegion*/); |
| 9437 | } |
| 9438 | else |
| 9439 | { |
| 9440 | newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, curr->isRunRarely()); |
| 9441 | // The new block always jumps to 'succ' |
| 9442 | newBlock->bbJumpDest = succ; |
| 9443 | } |
| 9444 | newBlock->bbFlags |= (curr->bbFlags & succ->bbFlags & (BBF_BACKWARD_JUMP)); |
| 9445 | |
| 9446 | JITDUMP("Splitting edge from " FMT_BB " to " FMT_BB "; adding " FMT_BB "\n" , curr->bbNum, succ->bbNum, |
| 9447 | newBlock->bbNum); |
| 9448 | |
| 9449 | if (curr->bbJumpKind == BBJ_COND) |
| 9450 | { |
| 9451 | fgReplacePred(succ, curr, newBlock); |
| 9452 | if (curr->bbJumpDest == succ) |
| 9453 | { |
| 9454 | // Now 'curr' jumps to newBlock |
| 9455 | curr->bbJumpDest = newBlock; |
| 9456 | newBlock->bbFlags |= BBF_JMP_TARGET; |
| 9457 | } |
| 9458 | fgAddRefPred(newBlock, curr); |
| 9459 | } |
| 9460 | else |
| 9461 | { |
| 9462 | assert(curr->bbJumpKind == BBJ_SWITCH); |
| 9463 | |
| 9464 | // newBlock replaces 'succ' in the switch. |
| 9465 | fgReplaceSwitchJumpTarget(curr, newBlock, succ); |
| 9466 | |
| 9467 | // And 'succ' has 'newBlock' as a new predecessor. |
| 9468 | fgAddRefPred(succ, newBlock); |
| 9469 | } |
| 9470 | |
| 9471 | // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the |
| 9472 | // branch 50% of the time. |
| 9473 | newBlock->inheritWeightPercentage(curr, 50); |
| 9474 | |
| 9475 | // The bbLiveIn and bbLiveOut are both equal to the bbLiveIn of 'succ' |
| 9476 | if (fgLocalVarLivenessDone) |
| 9477 | { |
| 9478 | VarSetOps::Assign(this, newBlock->bbLiveIn, succ->bbLiveIn); |
| 9479 | VarSetOps::Assign(this, newBlock->bbLiveOut, succ->bbLiveIn); |
| 9480 | } |
| 9481 | |
| 9482 | return newBlock; |
| 9483 | } |
| 9484 | |
| 9485 | /*****************************************************************************/ |
| 9486 | /*****************************************************************************/ |
| 9487 | |
| 9488 | void Compiler::fgFindOperOrder() |
| 9489 | { |
| 9490 | #ifdef DEBUG |
| 9491 | if (verbose) |
| 9492 | { |
| 9493 | printf("*************** In fgFindOperOrder()\n" ); |
| 9494 | } |
| 9495 | #endif |
| 9496 | |
| 9497 | BasicBlock* block; |
| 9498 | GenTreeStmt* stmt; |
| 9499 | |
| 9500 | /* Walk the basic blocks and for each statement determine |
| 9501 | * the evaluation order, cost, FP levels, etc... */ |
| 9502 | |
| 9503 | for (block = fgFirstBB; block; block = block->bbNext) |
| 9504 | { |
| 9505 | compCurBB = block; |
| 9506 | for (stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 9507 | { |
| 9508 | /* Recursively process the statement */ |
| 9509 | |
| 9510 | compCurStmt = stmt; |
| 9511 | gtSetStmtInfo(stmt); |
| 9512 | } |
| 9513 | } |
| 9514 | } |
| 9515 | |
| 9516 | //------------------------------------------------------------------------ |
| 9517 | // fgSimpleLowering: do full walk of all IR, lowering selected operations |
| 9518 | // and computing lvaOutgoingArgumentAreaSize. |
| 9519 | // |
| 9520 | // Notes: |
| 9521 | // Lowers GT_ARR_LENGTH, GT_ARR_BOUNDS_CHECK, and GT_SIMD_CHK. |
| 9522 | // |
| 9523 | // For target ABIs with fixed out args area, computes upper bound on |
| 9524 | // the size of this area from the calls in the IR. |
| 9525 | // |
| 9526 | // Outgoing arg area size is computed here because we want to run it |
| 9527 | // after optimization (in case calls are removed) and need to look at |
| 9528 | // all possible calls in the method. |
| 9529 | |
| 9530 | void Compiler::fgSimpleLowering() |
| 9531 | { |
| 9532 | #if FEATURE_FIXED_OUT_ARGS |
| 9533 | unsigned outgoingArgSpaceSize = 0; |
| 9534 | #endif // FEATURE_FIXED_OUT_ARGS |
| 9535 | |
| 9536 | for (BasicBlock* block = fgFirstBB; block; block = block->bbNext) |
| 9537 | { |
| 9538 | // Walk the statement trees in this basic block. |
| 9539 | compCurBB = block; // Used in fgRngChkTarget. |
| 9540 | |
| 9541 | LIR::Range& range = LIR::AsRange(block); |
| 9542 | for (GenTree* tree : range) |
| 9543 | { |
| 9544 | switch (tree->OperGet()) |
| 9545 | { |
| 9546 | case GT_ARR_LENGTH: |
| 9547 | { |
| 9548 | GenTreeArrLen* arrLen = tree->AsArrLen(); |
| 9549 | GenTree* arr = arrLen->gtArrLen.ArrRef(); |
| 9550 | GenTree* add; |
| 9551 | GenTree* con; |
| 9552 | |
| 9553 | /* Create the expression "*(array_addr + ArrLenOffs)" */ |
| 9554 | |
| 9555 | noway_assert(arr->gtNext == tree); |
| 9556 | |
| 9557 | noway_assert(arrLen->ArrLenOffset() == OFFSETOF__CORINFO_Array__length || |
| 9558 | arrLen->ArrLenOffset() == OFFSETOF__CORINFO_String__stringLen); |
| 9559 | |
| 9560 | if ((arr->gtOper == GT_CNS_INT) && (arr->gtIntCon.gtIconVal == 0)) |
| 9561 | { |
| 9562 | // If the array is NULL, then we should get a NULL reference |
| 9563 | // exception when computing its length. We need to maintain |
| 9564 | // an invariant where there is no sum of two constants node, so |
| 9565 | // let's simply return an indirection of NULL. |
| 9566 | |
| 9567 | add = arr; |
| 9568 | } |
| 9569 | else |
| 9570 | { |
| 9571 | con = gtNewIconNode(arrLen->ArrLenOffset(), TYP_I_IMPL); |
| 9572 | add = gtNewOperNode(GT_ADD, TYP_REF, arr, con); |
| 9573 | |
| 9574 | range.InsertAfter(arr, con, add); |
| 9575 | } |
| 9576 | |
| 9577 | // Change to a GT_IND. |
| 9578 | tree->ChangeOperUnchecked(GT_IND); |
| 9579 | |
| 9580 | tree->gtOp.gtOp1 = add; |
| 9581 | break; |
| 9582 | } |
| 9583 | |
| 9584 | case GT_ARR_BOUNDS_CHECK: |
| 9585 | #ifdef FEATURE_SIMD |
| 9586 | case GT_SIMD_CHK: |
| 9587 | #endif // FEATURE_SIMD |
| 9588 | #ifdef FEATURE_HW_INTRINSICS |
| 9589 | case GT_HW_INTRINSIC_CHK: |
| 9590 | #endif // FEATURE_HW_INTRINSICS |
| 9591 | { |
| 9592 | // Add in a call to an error routine. |
| 9593 | fgSetRngChkTarget(tree, false); |
| 9594 | break; |
| 9595 | } |
| 9596 | |
| 9597 | #if FEATURE_FIXED_OUT_ARGS |
| 9598 | case GT_CALL: |
| 9599 | { |
| 9600 | GenTreeCall* call = tree->AsCall(); |
| 9601 | // Fast tail calls use the caller-supplied scratch |
| 9602 | // space so have no impact on this method's outgoing arg size. |
| 9603 | if (!call->IsFastTailCall()) |
| 9604 | { |
| 9605 | // Update outgoing arg size to handle this call |
| 9606 | const unsigned thisCallOutAreaSize = call->fgArgInfo->GetOutArgSize(); |
| 9607 | assert(thisCallOutAreaSize >= MIN_ARG_AREA_FOR_CALL); |
| 9608 | |
| 9609 | if (thisCallOutAreaSize > outgoingArgSpaceSize) |
| 9610 | { |
| 9611 | outgoingArgSpaceSize = thisCallOutAreaSize; |
| 9612 | JITDUMP("Bumping outgoingArgSpaceSize to %u for call [%06d]\n" , outgoingArgSpaceSize, |
| 9613 | dspTreeID(tree)); |
| 9614 | } |
| 9615 | else |
| 9616 | { |
| 9617 | JITDUMP("outgoingArgSpaceSize %u sufficient for call [%06d], which needs %u\n" , |
| 9618 | outgoingArgSpaceSize, dspTreeID(tree), thisCallOutAreaSize); |
| 9619 | } |
| 9620 | } |
| 9621 | else |
| 9622 | { |
| 9623 | JITDUMP("outgoingArgSpaceSize not impacted by fast tail call [%06d]\n" , dspTreeID(tree)); |
| 9624 | } |
| 9625 | break; |
| 9626 | } |
| 9627 | #endif // FEATURE_FIXED_OUT_ARGS |
| 9628 | |
| 9629 | default: |
| 9630 | { |
| 9631 | // No other operators need processing. |
| 9632 | break; |
| 9633 | } |
| 9634 | } // switch on oper |
| 9635 | } // foreach tree |
| 9636 | } // foreach BB |
| 9637 | |
| 9638 | #if FEATURE_FIXED_OUT_ARGS |
| 9639 | // Finish computing the outgoing args area size |
| 9640 | // |
| 9641 | // Need to make sure the MIN_ARG_AREA_FOR_CALL space is added to the frame if: |
| 9642 | // 1. there are calls to THROW_HEPLPER methods. |
| 9643 | // 2. we are generating profiling Enter/Leave/TailCall hooks. This will ensure |
| 9644 | // that even methods without any calls will have outgoing arg area space allocated. |
| 9645 | // |
| 9646 | // An example for these two cases is Windows Amd64, where the ABI requires to have 4 slots for |
| 9647 | // the outgoing arg space if the method makes any calls. |
| 9648 | if (outgoingArgSpaceSize < MIN_ARG_AREA_FOR_CALL) |
| 9649 | { |
| 9650 | if (compUsesThrowHelper || compIsProfilerHookNeeded()) |
| 9651 | { |
| 9652 | outgoingArgSpaceSize = MIN_ARG_AREA_FOR_CALL; |
| 9653 | JITDUMP("Bumping outgoingArgSpaceSize to %u for throw helper or profile hook" , outgoingArgSpaceSize); |
| 9654 | } |
| 9655 | } |
| 9656 | |
| 9657 | // If a function has localloc, we will need to move the outgoing arg space when the |
| 9658 | // localloc happens. When we do this, we need to maintain stack alignment. To avoid |
| 9659 | // leaving alignment-related holes when doing this move, make sure the outgoing |
| 9660 | // argument space size is a multiple of the stack alignment by aligning up to the next |
| 9661 | // stack alignment boundary. |
| 9662 | if (compLocallocUsed) |
| 9663 | { |
| 9664 | outgoingArgSpaceSize = roundUp(outgoingArgSpaceSize, STACK_ALIGN); |
| 9665 | JITDUMP("Bumping outgoingArgSpaceSize to %u for localloc" , outgoingArgSpaceSize); |
| 9666 | } |
| 9667 | |
| 9668 | // Publish the final value and mark it as read only so any update |
| 9669 | // attempt later will cause an assert. |
| 9670 | lvaOutgoingArgSpaceSize = outgoingArgSpaceSize; |
| 9671 | lvaOutgoingArgSpaceSize.MarkAsReadOnly(); |
| 9672 | |
| 9673 | #endif // FEATURE_FIXED_OUT_ARGS |
| 9674 | |
| 9675 | #ifdef DEBUG |
| 9676 | if (verbose && fgRngChkThrowAdded) |
| 9677 | { |
| 9678 | printf("\nAfter fgSimpleLowering() added some RngChk throw blocks" ); |
| 9679 | fgDispBasicBlocks(); |
| 9680 | fgDispHandlerTab(); |
| 9681 | printf("\n" ); |
| 9682 | } |
| 9683 | #endif |
| 9684 | } |
| 9685 | |
| 9686 | VARSET_VALRET_TP Compiler::fgGetVarBits(GenTree* tree) |
| 9687 | { |
| 9688 | VARSET_TP varBits(VarSetOps::MakeEmpty(this)); |
| 9689 | |
| 9690 | assert(tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_LCL_FLD); |
| 9691 | |
| 9692 | unsigned int lclNum = tree->gtLclVarCommon.gtLclNum; |
| 9693 | LclVarDsc* varDsc = lvaTable + lclNum; |
| 9694 | if (varDsc->lvTracked) |
| 9695 | { |
| 9696 | VarSetOps::AddElemD(this, varBits, varDsc->lvVarIndex); |
| 9697 | } |
| 9698 | // We have to check type of root tree, not Local Var descriptor because |
| 9699 | // for legacy backend we promote TYP_STRUCT to TYP_INT if it is an unused or |
| 9700 | // independently promoted non-argument struct local. |
| 9701 | // For more details see Compiler::raAssignVars() method. |
| 9702 | else if (tree->gtType == TYP_STRUCT && varDsc->lvPromoted) |
| 9703 | { |
| 9704 | assert(varDsc->lvType == TYP_STRUCT); |
| 9705 | for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) |
| 9706 | { |
| 9707 | noway_assert(lvaTable[i].lvIsStructField); |
| 9708 | if (lvaTable[i].lvTracked) |
| 9709 | { |
| 9710 | unsigned varIndex = lvaTable[i].lvVarIndex; |
| 9711 | noway_assert(varIndex < lvaTrackedCount); |
| 9712 | VarSetOps::AddElemD(this, varBits, varIndex); |
| 9713 | } |
| 9714 | } |
| 9715 | } |
| 9716 | return varBits; |
| 9717 | } |
| 9718 | |
| 9719 | /***************************************************************************** |
| 9720 | * |
| 9721 | * Find and remove any basic blocks that are useless (e.g. they have not been |
| 9722 | * imported because they are not reachable, or they have been optimized away). |
| 9723 | */ |
| 9724 | |
| 9725 | void Compiler::fgRemoveEmptyBlocks() |
| 9726 | { |
| 9727 | BasicBlock* cur; |
| 9728 | BasicBlock* nxt; |
| 9729 | |
| 9730 | /* If we remove any blocks, we'll have to do additional work */ |
| 9731 | |
| 9732 | unsigned removedBlks = 0; |
| 9733 | |
| 9734 | for (cur = fgFirstBB; cur != nullptr; cur = nxt) |
| 9735 | { |
| 9736 | /* Get hold of the next block (in case we delete 'cur') */ |
| 9737 | |
| 9738 | nxt = cur->bbNext; |
| 9739 | |
| 9740 | /* Should this block be removed? */ |
| 9741 | |
| 9742 | if (!(cur->bbFlags & BBF_IMPORTED)) |
| 9743 | { |
| 9744 | noway_assert(cur->isEmpty()); |
| 9745 | |
| 9746 | if (ehCanDeleteEmptyBlock(cur)) |
| 9747 | { |
| 9748 | /* Mark the block as removed */ |
| 9749 | |
| 9750 | cur->bbFlags |= BBF_REMOVED; |
| 9751 | |
| 9752 | /* Remember that we've removed a block from the list */ |
| 9753 | |
| 9754 | removedBlks++; |
| 9755 | |
| 9756 | #ifdef DEBUG |
| 9757 | if (verbose) |
| 9758 | { |
| 9759 | printf(FMT_BB " was not imported, marked as removed (%d)\n" , cur->bbNum, removedBlks); |
| 9760 | } |
| 9761 | #endif // DEBUG |
| 9762 | |
| 9763 | /* Drop the block from the list */ |
| 9764 | |
| 9765 | fgUnlinkBlock(cur); |
| 9766 | } |
| 9767 | else |
| 9768 | { |
| 9769 | // We were prevented from deleting this block by EH normalization. Mark the block as imported. |
| 9770 | cur->bbFlags |= BBF_IMPORTED; |
| 9771 | } |
| 9772 | } |
| 9773 | } |
| 9774 | |
| 9775 | /* If no blocks were removed, we're done */ |
| 9776 | |
| 9777 | if (removedBlks == 0) |
| 9778 | { |
| 9779 | return; |
| 9780 | } |
| 9781 | |
| 9782 | /* Update all references in the exception handler table. |
| 9783 | * Mark the new blocks as non-removable. |
| 9784 | * |
| 9785 | * We may have made the entire try block unreachable. |
| 9786 | * Check for this case and remove the entry from the EH table. |
| 9787 | */ |
| 9788 | |
| 9789 | unsigned XTnum; |
| 9790 | EHblkDsc* HBtab; |
| 9791 | INDEBUG(unsigned delCnt = 0;) |
| 9792 | |
| 9793 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 9794 | { |
| 9795 | AGAIN: |
| 9796 | /* If the beginning of the try block was not imported, we |
| 9797 | * need to remove the entry from the EH table. */ |
| 9798 | |
| 9799 | if (HBtab->ebdTryBeg->bbFlags & BBF_REMOVED) |
| 9800 | { |
| 9801 | noway_assert(!(HBtab->ebdTryBeg->bbFlags & BBF_IMPORTED)); |
| 9802 | #ifdef DEBUG |
| 9803 | if (verbose) |
| 9804 | { |
| 9805 | printf("Beginning of try block (" FMT_BB ") not imported " |
| 9806 | "- remove index #%u from the EH table\n" , |
| 9807 | HBtab->ebdTryBeg->bbNum, XTnum + delCnt); |
| 9808 | } |
| 9809 | delCnt++; |
| 9810 | #endif // DEBUG |
| 9811 | |
| 9812 | fgRemoveEHTableEntry(XTnum); |
| 9813 | |
| 9814 | if (XTnum < compHndBBtabCount) |
| 9815 | { |
| 9816 | // There are more entries left to process, so do more. Note that |
| 9817 | // HBtab now points to the next entry, that we copied down to the |
| 9818 | // current slot. XTnum also stays the same. |
| 9819 | goto AGAIN; |
| 9820 | } |
| 9821 | |
| 9822 | break; // no more entries (we deleted the last one), so exit the loop |
| 9823 | } |
| 9824 | |
| 9825 | /* At this point we know we have a valid try block */ |
| 9826 | |
| 9827 | #ifdef DEBUG |
| 9828 | assert(HBtab->ebdTryBeg->bbFlags & BBF_IMPORTED); |
| 9829 | assert(HBtab->ebdTryBeg->bbFlags & BBF_DONT_REMOVE); |
| 9830 | |
| 9831 | assert(HBtab->ebdHndBeg->bbFlags & BBF_IMPORTED); |
| 9832 | assert(HBtab->ebdHndBeg->bbFlags & BBF_DONT_REMOVE); |
| 9833 | |
| 9834 | if (HBtab->HasFilter()) |
| 9835 | { |
| 9836 | assert(HBtab->ebdFilter->bbFlags & BBF_IMPORTED); |
| 9837 | assert(HBtab->ebdFilter->bbFlags & BBF_DONT_REMOVE); |
| 9838 | } |
| 9839 | #endif // DEBUG |
| 9840 | |
| 9841 | fgSkipRmvdBlocks(HBtab); |
| 9842 | } /* end of the for loop over XTnum */ |
| 9843 | |
| 9844 | // Renumber the basic blocks |
| 9845 | JITDUMP("\nRenumbering the basic blocks for fgRemoveEmptyBlocks\n" ); |
| 9846 | fgRenumberBlocks(); |
| 9847 | |
| 9848 | #ifdef DEBUG |
| 9849 | fgVerifyHandlerTab(); |
| 9850 | #endif // DEBUG |
| 9851 | } |
| 9852 | |
| 9853 | /***************************************************************************** |
| 9854 | * |
| 9855 | * Remove a useless statement from a basic block. |
| 9856 | * |
| 9857 | */ |
| 9858 | |
| 9859 | void Compiler::fgRemoveStmt(BasicBlock* block, GenTree* node) |
| 9860 | { |
| 9861 | noway_assert(node); |
| 9862 | assert(fgOrder == FGOrderTree); |
| 9863 | |
| 9864 | GenTreeStmt* tree = block->firstStmt(); |
| 9865 | GenTreeStmt* stmt = node->AsStmt(); |
| 9866 | |
| 9867 | #ifdef DEBUG |
| 9868 | if (verbose && |
| 9869 | stmt->gtStmtExpr->gtOper != GT_NOP) // Don't print if it is a GT_NOP. Too much noise from the inliner. |
| 9870 | { |
| 9871 | printf("\nRemoving statement " ); |
| 9872 | printTreeID(stmt); |
| 9873 | printf(" in " FMT_BB " as useless:\n" , block->bbNum); |
| 9874 | gtDispTree(stmt); |
| 9875 | } |
| 9876 | #endif // DEBUG |
| 9877 | |
| 9878 | if (opts.compDbgCode && stmt->gtPrev != stmt && stmt->gtStmtILoffsx != BAD_IL_OFFSET) |
| 9879 | { |
| 9880 | /* TODO: For debuggable code, should we remove significant |
| 9881 | statement boundaries. Or should we leave a GT_NO_OP in its place? */ |
| 9882 | } |
| 9883 | |
| 9884 | GenTreeStmt* firstStmt = block->firstStmt(); |
| 9885 | if (firstStmt == stmt) // Is it the first statement in the list? |
| 9886 | { |
| 9887 | if (firstStmt->gtNext == nullptr) |
| 9888 | { |
| 9889 | assert(firstStmt == block->lastStmt()); |
| 9890 | |
| 9891 | /* this is the only statement - basic block becomes empty */ |
| 9892 | block->bbTreeList = nullptr; |
| 9893 | } |
| 9894 | else |
| 9895 | { |
| 9896 | block->bbTreeList = tree->gtNext; |
| 9897 | block->bbTreeList->gtPrev = tree->gtPrev; |
| 9898 | } |
| 9899 | } |
| 9900 | else if (stmt == block->lastStmt()) // Is it the last statement in the list? |
| 9901 | { |
| 9902 | stmt->gtPrev->gtNext = nullptr; |
| 9903 | block->bbTreeList->gtPrev = stmt->gtPrev; |
| 9904 | } |
| 9905 | else // The statement is in the middle. |
| 9906 | { |
| 9907 | assert(stmt->gtPrevStmt != nullptr && stmt->gtNext != nullptr); |
| 9908 | |
| 9909 | tree = stmt->gtPrevStmt; |
| 9910 | |
| 9911 | tree->gtNext = stmt->gtNext; |
| 9912 | stmt->gtNext->gtPrev = tree; |
| 9913 | } |
| 9914 | |
| 9915 | noway_assert(!optValnumCSE_phase); |
| 9916 | |
| 9917 | fgStmtRemoved = true; |
| 9918 | |
| 9919 | #ifdef DEBUG |
| 9920 | if (verbose) |
| 9921 | { |
| 9922 | if (block->bbTreeList == nullptr) |
| 9923 | { |
| 9924 | printf("\n" FMT_BB " becomes empty" , block->bbNum); |
| 9925 | } |
| 9926 | printf("\n" ); |
| 9927 | } |
| 9928 | #endif // DEBUG |
| 9929 | } |
| 9930 | |
| 9931 | /******************************************************************************/ |
| 9932 | // Returns true if the operator is involved in control-flow |
| 9933 | // TODO-Cleanup: Move this into genTreeKinds in genTree.h |
| 9934 | |
| 9935 | inline bool OperIsControlFlow(genTreeOps oper) |
| 9936 | { |
| 9937 | switch (oper) |
| 9938 | { |
| 9939 | case GT_JTRUE: |
| 9940 | case GT_JCMP: |
| 9941 | case GT_JCC: |
| 9942 | case GT_SWITCH: |
| 9943 | case GT_LABEL: |
| 9944 | |
| 9945 | case GT_CALL: |
| 9946 | case GT_JMP: |
| 9947 | |
| 9948 | case GT_RETURN: |
| 9949 | case GT_RETFILT: |
| 9950 | #if !FEATURE_EH_FUNCLETS |
| 9951 | case GT_END_LFIN: |
| 9952 | #endif // !FEATURE_EH_FUNCLETS |
| 9953 | return true; |
| 9954 | |
| 9955 | default: |
| 9956 | return false; |
| 9957 | } |
| 9958 | } |
| 9959 | |
| 9960 | /****************************************************************************** |
| 9961 | * Tries to throw away a stmt. The statement can be anywhere in block->bbTreeList. |
| 9962 | * Returns true if it did remove the statement. |
| 9963 | */ |
| 9964 | |
| 9965 | bool Compiler::fgCheckRemoveStmt(BasicBlock* block, GenTree* node) |
| 9966 | { |
| 9967 | if (opts.compDbgCode) |
| 9968 | { |
| 9969 | return false; |
| 9970 | } |
| 9971 | |
| 9972 | GenTreeStmt* stmt = node->AsStmt(); |
| 9973 | |
| 9974 | GenTree* tree = stmt->gtStmtExpr; |
| 9975 | genTreeOps oper = tree->OperGet(); |
| 9976 | |
| 9977 | if (OperIsControlFlow(oper) || GenTree::OperIsHWIntrinsic(oper) || oper == GT_NO_OP) |
| 9978 | { |
| 9979 | return false; |
| 9980 | } |
| 9981 | |
| 9982 | // TODO: Use a recursive version of gtNodeHasSideEffects() |
| 9983 | if (tree->gtFlags & GTF_SIDE_EFFECT) |
| 9984 | { |
| 9985 | return false; |
| 9986 | } |
| 9987 | |
| 9988 | fgRemoveStmt(block, stmt); |
| 9989 | return true; |
| 9990 | } |
| 9991 | |
| 9992 | /**************************************************************************************************** |
| 9993 | * |
| 9994 | * |
| 9995 | */ |
| 9996 | bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) |
| 9997 | { |
| 9998 | if ((block == nullptr) || (bNext == nullptr)) |
| 9999 | { |
| 10000 | return false; |
| 10001 | } |
| 10002 | |
| 10003 | noway_assert(block->bbNext == bNext); |
| 10004 | |
| 10005 | if (block->bbJumpKind != BBJ_NONE) |
| 10006 | { |
| 10007 | return false; |
| 10008 | } |
| 10009 | |
| 10010 | // If the next block has multiple incoming edges, we can still compact if the first block is empty. |
| 10011 | // However, not if it is the beginning of a handler. |
| 10012 | if (bNext->countOfInEdges() != 1 && |
| 10013 | (!block->isEmpty() || (block->bbFlags & BBF_FUNCLET_BEG) || (block->bbCatchTyp != BBCT_NONE))) |
| 10014 | { |
| 10015 | return false; |
| 10016 | } |
| 10017 | |
| 10018 | if (bNext->bbFlags & BBF_DONT_REMOVE) |
| 10019 | { |
| 10020 | return false; |
| 10021 | } |
| 10022 | |
| 10023 | // Don't compact the first block if it was specially created as a scratch block. |
| 10024 | if (fgBBisScratch(block)) |
| 10025 | { |
| 10026 | return false; |
| 10027 | } |
| 10028 | |
| 10029 | #if defined(_TARGET_ARM_) |
| 10030 | // We can't compact a finally target block, as we need to generate special code for such blocks during code |
| 10031 | // generation |
| 10032 | if ((bNext->bbFlags & BBF_FINALLY_TARGET) != 0) |
| 10033 | return false; |
| 10034 | #endif |
| 10035 | |
| 10036 | // We don't want to compact blocks that are in different Hot/Cold regions |
| 10037 | // |
| 10038 | if (fgInDifferentRegions(block, bNext)) |
| 10039 | { |
| 10040 | return false; |
| 10041 | } |
| 10042 | |
| 10043 | // We cannot compact two blocks in different EH regions. |
| 10044 | // |
| 10045 | if (fgCanRelocateEHRegions) |
| 10046 | { |
| 10047 | if (!BasicBlock::sameEHRegion(block, bNext)) |
| 10048 | { |
| 10049 | return false; |
| 10050 | } |
| 10051 | } |
| 10052 | // if there is a switch predecessor don't bother because we'd have to update the uniquesuccs as well |
| 10053 | // (if they are valid) |
| 10054 | for (flowList* pred = bNext->bbPreds; pred; pred = pred->flNext) |
| 10055 | { |
| 10056 | if (pred->flBlock->bbJumpKind == BBJ_SWITCH) |
| 10057 | { |
| 10058 | return false; |
| 10059 | } |
| 10060 | } |
| 10061 | |
| 10062 | return true; |
| 10063 | } |
| 10064 | |
| 10065 | /***************************************************************************************************** |
| 10066 | * |
| 10067 | * Function called to compact two given blocks in the flowgraph |
| 10068 | * Assumes that all necessary checks have been performed, |
| 10069 | * i.e. fgCanCompactBlocks returns true. |
| 10070 | * |
| 10071 | * Uses for this function - whenever we change links, insert blocks,... |
| 10072 | * It will keep the flowgraph data in synch - bbNum, bbRefs, bbPreds |
| 10073 | */ |
| 10074 | |
| 10075 | void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) |
| 10076 | { |
| 10077 | noway_assert(block != nullptr); |
| 10078 | noway_assert((block->bbFlags & BBF_REMOVED) == 0); |
| 10079 | noway_assert(block->bbJumpKind == BBJ_NONE); |
| 10080 | |
| 10081 | noway_assert(bNext == block->bbNext); |
| 10082 | noway_assert(bNext != nullptr); |
| 10083 | noway_assert((bNext->bbFlags & BBF_REMOVED) == 0); |
| 10084 | noway_assert(bNext->countOfInEdges() == 1 || block->isEmpty()); |
| 10085 | noway_assert(bNext->bbPreds); |
| 10086 | |
| 10087 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10088 | noway_assert((bNext->bbFlags & BBF_FINALLY_TARGET) == 0); |
| 10089 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10090 | |
| 10091 | // Make sure the second block is not the start of a TRY block or an exception handler |
| 10092 | |
| 10093 | noway_assert(bNext->bbCatchTyp == BBCT_NONE); |
| 10094 | noway_assert((bNext->bbFlags & BBF_TRY_BEG) == 0); |
| 10095 | noway_assert((bNext->bbFlags & BBF_DONT_REMOVE) == 0); |
| 10096 | |
| 10097 | /* both or none must have an exception handler */ |
| 10098 | noway_assert(block->hasTryIndex() == bNext->hasTryIndex()); |
| 10099 | |
| 10100 | #ifdef DEBUG |
| 10101 | if (verbose) |
| 10102 | { |
| 10103 | printf("\nCompacting blocks " FMT_BB " and " FMT_BB ":\n" , block->bbNum, bNext->bbNum); |
| 10104 | } |
| 10105 | #endif |
| 10106 | |
| 10107 | if (bNext->countOfInEdges() > 1) |
| 10108 | { |
| 10109 | JITDUMP("Second block has multiple incoming edges\n" ); |
| 10110 | |
| 10111 | assert(block->isEmpty()); |
| 10112 | block->bbFlags |= BBF_JMP_TARGET; |
| 10113 | for (flowList* pred = bNext->bbPreds; pred; pred = pred->flNext) |
| 10114 | { |
| 10115 | fgReplaceJumpTarget(pred->flBlock, block, bNext); |
| 10116 | |
| 10117 | if (pred->flBlock != block) |
| 10118 | { |
| 10119 | fgAddRefPred(block, pred->flBlock); |
| 10120 | } |
| 10121 | } |
| 10122 | bNext->bbPreds = nullptr; |
| 10123 | } |
| 10124 | else |
| 10125 | { |
| 10126 | noway_assert(bNext->bbPreds->flNext == nullptr); |
| 10127 | noway_assert(bNext->bbPreds->flBlock == block); |
| 10128 | } |
| 10129 | |
| 10130 | /* Start compacting - move all the statements in the second block to the first block */ |
| 10131 | |
| 10132 | // First move any phi definitions of the second block after the phi defs of the first. |
| 10133 | // TODO-CQ: This may be the wrong thing to do. If we're compacting blocks, it's because a |
| 10134 | // control-flow choice was constant-folded away. So probably phi's need to go away, |
| 10135 | // as well, in favor of one of the incoming branches. Or at least be modified. |
| 10136 | |
| 10137 | assert(block->IsLIR() == bNext->IsLIR()); |
| 10138 | if (block->IsLIR()) |
| 10139 | { |
| 10140 | LIR::Range& blockRange = LIR::AsRange(block); |
| 10141 | LIR::Range& = LIR::AsRange(bNext); |
| 10142 | |
| 10143 | // Does the next block have any phis? |
| 10144 | GenTree* nextFirstNonPhi = nullptr; |
| 10145 | LIR::ReadOnlyRange nextPhis = nextRange.PhiNodes(); |
| 10146 | if (!nextPhis.IsEmpty()) |
| 10147 | { |
| 10148 | GenTree* blockLastPhi = blockRange.LastPhiNode(); |
| 10149 | nextFirstNonPhi = nextPhis.LastNode()->gtNext; |
| 10150 | |
| 10151 | LIR::Range phisToMove = nextRange.Remove(std::move(nextPhis)); |
| 10152 | blockRange.InsertAfter(blockLastPhi, std::move(phisToMove)); |
| 10153 | } |
| 10154 | else |
| 10155 | { |
| 10156 | nextFirstNonPhi = nextRange.FirstNode(); |
| 10157 | } |
| 10158 | |
| 10159 | // Does the block have any other code? |
| 10160 | if (nextFirstNonPhi != nullptr) |
| 10161 | { |
| 10162 | LIR::Range nextNodes = nextRange.Remove(nextFirstNonPhi, nextRange.LastNode()); |
| 10163 | blockRange.InsertAtEnd(std::move(nextNodes)); |
| 10164 | } |
| 10165 | } |
| 10166 | else |
| 10167 | { |
| 10168 | GenTree* blkNonPhi1 = block->FirstNonPhiDef(); |
| 10169 | GenTree* bNextNonPhi1 = bNext->FirstNonPhiDef(); |
| 10170 | GenTree* blkFirst = block->firstStmt(); |
| 10171 | GenTree* bNextFirst = bNext->firstStmt(); |
| 10172 | |
| 10173 | // Does the second have any phis? |
| 10174 | if (bNextFirst != nullptr && bNextFirst != bNextNonPhi1) |
| 10175 | { |
| 10176 | GenTree* bNextLast = bNextFirst->gtPrev; |
| 10177 | assert(bNextLast->gtNext == nullptr); |
| 10178 | |
| 10179 | // Does "blk" have phis? |
| 10180 | if (blkNonPhi1 != blkFirst) |
| 10181 | { |
| 10182 | // Yes, has phis. |
| 10183 | // Insert after the last phi of "block." |
| 10184 | // First, bNextPhis after last phi of block. |
| 10185 | GenTree* blkLastPhi; |
| 10186 | if (blkNonPhi1 != nullptr) |
| 10187 | { |
| 10188 | blkLastPhi = blkNonPhi1->gtPrev; |
| 10189 | } |
| 10190 | else |
| 10191 | { |
| 10192 | blkLastPhi = blkFirst->gtPrev; |
| 10193 | } |
| 10194 | |
| 10195 | blkLastPhi->gtNext = bNextFirst; |
| 10196 | bNextFirst->gtPrev = blkLastPhi; |
| 10197 | |
| 10198 | // Now, rest of "block" after last phi of "bNext". |
| 10199 | GenTree* bNextLastPhi = nullptr; |
| 10200 | if (bNextNonPhi1 != nullptr) |
| 10201 | { |
| 10202 | bNextLastPhi = bNextNonPhi1->gtPrev; |
| 10203 | } |
| 10204 | else |
| 10205 | { |
| 10206 | bNextLastPhi = bNextFirst->gtPrev; |
| 10207 | } |
| 10208 | |
| 10209 | bNextLastPhi->gtNext = blkNonPhi1; |
| 10210 | if (blkNonPhi1 != nullptr) |
| 10211 | { |
| 10212 | blkNonPhi1->gtPrev = bNextLastPhi; |
| 10213 | } |
| 10214 | else |
| 10215 | { |
| 10216 | // block has no non phis, so make the last statement be the last added phi. |
| 10217 | blkFirst->gtPrev = bNextLastPhi; |
| 10218 | } |
| 10219 | |
| 10220 | // Now update the bbTreeList of "bNext". |
| 10221 | bNext->bbTreeList = bNextNonPhi1; |
| 10222 | if (bNextNonPhi1 != nullptr) |
| 10223 | { |
| 10224 | bNextNonPhi1->gtPrev = bNextLast; |
| 10225 | } |
| 10226 | } |
| 10227 | else |
| 10228 | { |
| 10229 | if (blkFirst != nullptr) // If "block" has no statements, fusion will work fine... |
| 10230 | { |
| 10231 | // First, bNextPhis at start of block. |
| 10232 | GenTree* blkLast = blkFirst->gtPrev; |
| 10233 | block->bbTreeList = bNextFirst; |
| 10234 | // Now, rest of "block" (if it exists) after last phi of "bNext". |
| 10235 | GenTree* bNextLastPhi = nullptr; |
| 10236 | if (bNextNonPhi1 != nullptr) |
| 10237 | { |
| 10238 | // There is a first non phi, so the last phi is before it. |
| 10239 | bNextLastPhi = bNextNonPhi1->gtPrev; |
| 10240 | } |
| 10241 | else |
| 10242 | { |
| 10243 | // All the statements are phi defns, so the last one is the prev of the first. |
| 10244 | bNextLastPhi = bNextFirst->gtPrev; |
| 10245 | } |
| 10246 | bNextFirst->gtPrev = blkLast; |
| 10247 | bNextLastPhi->gtNext = blkFirst; |
| 10248 | blkFirst->gtPrev = bNextLastPhi; |
| 10249 | // Now update the bbTreeList of "bNext" |
| 10250 | bNext->bbTreeList = bNextNonPhi1; |
| 10251 | if (bNextNonPhi1 != nullptr) |
| 10252 | { |
| 10253 | bNextNonPhi1->gtPrev = bNextLast; |
| 10254 | } |
| 10255 | } |
| 10256 | } |
| 10257 | } |
| 10258 | |
| 10259 | // Now proceed with the updated bbTreeLists. |
| 10260 | GenTree* stmtList1 = block->firstStmt(); |
| 10261 | GenTree* stmtList2 = bNext->firstStmt(); |
| 10262 | |
| 10263 | /* the block may have an empty list */ |
| 10264 | |
| 10265 | if (stmtList1) |
| 10266 | { |
| 10267 | GenTree* stmtLast1 = block->lastStmt(); |
| 10268 | |
| 10269 | /* The second block may be a GOTO statement or something with an empty bbTreeList */ |
| 10270 | if (stmtList2) |
| 10271 | { |
| 10272 | GenTree* stmtLast2 = bNext->lastStmt(); |
| 10273 | |
| 10274 | /* append list2 to list 1 */ |
| 10275 | |
| 10276 | stmtLast1->gtNext = stmtList2; |
| 10277 | stmtList2->gtPrev = stmtLast1; |
| 10278 | stmtList1->gtPrev = stmtLast2; |
| 10279 | } |
| 10280 | } |
| 10281 | else |
| 10282 | { |
| 10283 | /* block was formerly empty and now has bNext's statements */ |
| 10284 | block->bbTreeList = stmtList2; |
| 10285 | } |
| 10286 | } |
| 10287 | |
| 10288 | // Note we could update the local variable weights here by |
| 10289 | // calling lvaMarkLocalVars, with the block and weight adjustment. |
| 10290 | |
| 10291 | // If either block or bNext has a profile weight |
| 10292 | // or if both block and bNext have non-zero weights |
| 10293 | // then we select the highest weight block. |
| 10294 | |
| 10295 | if (block->hasProfileWeight() || bNext->hasProfileWeight() || (block->bbWeight && bNext->bbWeight)) |
| 10296 | { |
| 10297 | // We are keeping block so update its fields |
| 10298 | // when bNext has a greater weight |
| 10299 | |
| 10300 | if (block->bbWeight < bNext->bbWeight) |
| 10301 | { |
| 10302 | block->bbWeight = bNext->bbWeight; |
| 10303 | |
| 10304 | block->bbFlags |= (bNext->bbFlags & BBF_PROF_WEIGHT); // Set the profile weight flag (if necessary) |
| 10305 | if (block->bbWeight != 0) |
| 10306 | { |
| 10307 | block->bbFlags &= ~BBF_RUN_RARELY; // Clear any RarelyRun flag |
| 10308 | } |
| 10309 | } |
| 10310 | } |
| 10311 | // otherwise if either block has a zero weight we select the zero weight |
| 10312 | else |
| 10313 | { |
| 10314 | noway_assert((block->bbWeight == BB_ZERO_WEIGHT) || (bNext->bbWeight == BB_ZERO_WEIGHT)); |
| 10315 | block->bbWeight = BB_ZERO_WEIGHT; |
| 10316 | block->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag |
| 10317 | } |
| 10318 | |
| 10319 | /* set the right links */ |
| 10320 | |
| 10321 | block->bbJumpKind = bNext->bbJumpKind; |
| 10322 | VarSetOps::AssignAllowUninitRhs(this, block->bbLiveOut, bNext->bbLiveOut); |
| 10323 | |
| 10324 | // Update the beginning and ending IL offsets (bbCodeOffs and bbCodeOffsEnd). |
| 10325 | // Set the beginning IL offset to the minimum, and the ending offset to the maximum, of the respective blocks. |
| 10326 | // If one block has an unknown offset, we take the other block. |
| 10327 | // We are merging into 'block', so if its values are correct, just leave them alone. |
| 10328 | // TODO: we should probably base this on the statements within. |
| 10329 | |
| 10330 | if (block->bbCodeOffs == BAD_IL_OFFSET) |
| 10331 | { |
| 10332 | block->bbCodeOffs = bNext->bbCodeOffs; // If they are both BAD_IL_OFFSET, this doesn't change anything. |
| 10333 | } |
| 10334 | else if (bNext->bbCodeOffs != BAD_IL_OFFSET) |
| 10335 | { |
| 10336 | // The are both valid offsets; compare them. |
| 10337 | if (block->bbCodeOffs > bNext->bbCodeOffs) |
| 10338 | { |
| 10339 | block->bbCodeOffs = bNext->bbCodeOffs; |
| 10340 | } |
| 10341 | } |
| 10342 | |
| 10343 | if (block->bbCodeOffsEnd == BAD_IL_OFFSET) |
| 10344 | { |
| 10345 | block->bbCodeOffsEnd = bNext->bbCodeOffsEnd; // If they are both BAD_IL_OFFSET, this doesn't change anything. |
| 10346 | } |
| 10347 | else if (bNext->bbCodeOffsEnd != BAD_IL_OFFSET) |
| 10348 | { |
| 10349 | // The are both valid offsets; compare them. |
| 10350 | if (block->bbCodeOffsEnd < bNext->bbCodeOffsEnd) |
| 10351 | { |
| 10352 | block->bbCodeOffsEnd = bNext->bbCodeOffsEnd; |
| 10353 | } |
| 10354 | } |
| 10355 | |
| 10356 | if (((block->bbFlags & BBF_INTERNAL) != 0) && ((bNext->bbFlags & BBF_INTERNAL) == 0)) |
| 10357 | { |
| 10358 | // If 'block' is an internal block and 'bNext' isn't, then adjust the flags set on 'block'. |
| 10359 | block->bbFlags &= ~BBF_INTERNAL; // Clear the BBF_INTERNAL flag |
| 10360 | block->bbFlags |= BBF_IMPORTED; // Set the BBF_IMPORTED flag |
| 10361 | } |
| 10362 | |
| 10363 | /* Update the flags for block with those found in bNext */ |
| 10364 | |
| 10365 | block->bbFlags |= (bNext->bbFlags & BBF_COMPACT_UPD); |
| 10366 | |
| 10367 | /* mark bNext as removed */ |
| 10368 | |
| 10369 | bNext->bbFlags |= BBF_REMOVED; |
| 10370 | |
| 10371 | /* Unlink bNext and update all the marker pointers if necessary */ |
| 10372 | |
| 10373 | fgUnlinkRange(block->bbNext, bNext); |
| 10374 | |
| 10375 | // If bNext was the last block of a try or handler, update the EH table. |
| 10376 | |
| 10377 | ehUpdateForDeletedBlock(bNext); |
| 10378 | |
| 10379 | /* If we're collapsing a block created after the dominators are |
| 10380 | computed, rename the block and reuse dominator information from |
| 10381 | the other block */ |
| 10382 | if (fgDomsComputed && block->bbNum > fgDomBBcount) |
| 10383 | { |
| 10384 | BlockSetOps::Assign(this, block->bbReach, bNext->bbReach); |
| 10385 | BlockSetOps::ClearD(this, bNext->bbReach); |
| 10386 | |
| 10387 | block->bbIDom = bNext->bbIDom; |
| 10388 | bNext->bbIDom = nullptr; |
| 10389 | |
| 10390 | // In this case, there's no need to update the preorder and postorder numbering |
| 10391 | // since we're changing the bbNum, this makes the basic block all set. |
| 10392 | block->bbNum = bNext->bbNum; |
| 10393 | } |
| 10394 | |
| 10395 | /* Set the jump targets */ |
| 10396 | |
| 10397 | switch (bNext->bbJumpKind) |
| 10398 | { |
| 10399 | case BBJ_CALLFINALLY: |
| 10400 | // Propagate RETLESS property |
| 10401 | block->bbFlags |= (bNext->bbFlags & BBF_RETLESS_CALL); |
| 10402 | |
| 10403 | __fallthrough; |
| 10404 | |
| 10405 | case BBJ_COND: |
| 10406 | case BBJ_ALWAYS: |
| 10407 | case BBJ_EHCATCHRET: |
| 10408 | block->bbJumpDest = bNext->bbJumpDest; |
| 10409 | |
| 10410 | /* Update the predecessor list for 'bNext->bbJumpDest' */ |
| 10411 | fgReplacePred(bNext->bbJumpDest, bNext, block); |
| 10412 | |
| 10413 | /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ |
| 10414 | if (bNext->bbJumpKind == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) |
| 10415 | { |
| 10416 | fgReplacePred(bNext->bbNext, bNext, block); |
| 10417 | } |
| 10418 | break; |
| 10419 | |
| 10420 | case BBJ_NONE: |
| 10421 | /* Update the predecessor list for 'bNext->bbNext' */ |
| 10422 | fgReplacePred(bNext->bbNext, bNext, block); |
| 10423 | break; |
| 10424 | |
| 10425 | case BBJ_EHFILTERRET: |
| 10426 | fgReplacePred(bNext->bbJumpDest, bNext, block); |
| 10427 | break; |
| 10428 | |
| 10429 | case BBJ_EHFINALLYRET: |
| 10430 | { |
| 10431 | unsigned hndIndex = block->getHndIndex(); |
| 10432 | EHblkDsc* ehDsc = ehGetDsc(hndIndex); |
| 10433 | |
| 10434 | if (ehDsc->HasFinallyHandler()) // No need to do this for fault handlers |
| 10435 | { |
| 10436 | BasicBlock* begBlk; |
| 10437 | BasicBlock* endBlk; |
| 10438 | ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); |
| 10439 | |
| 10440 | BasicBlock* finBeg = ehDsc->ebdHndBeg; |
| 10441 | |
| 10442 | for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) |
| 10443 | { |
| 10444 | if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) |
| 10445 | { |
| 10446 | continue; |
| 10447 | } |
| 10448 | |
| 10449 | noway_assert(bcall->isBBCallAlwaysPair()); |
| 10450 | fgReplacePred(bcall->bbNext, bNext, block); |
| 10451 | } |
| 10452 | } |
| 10453 | } |
| 10454 | break; |
| 10455 | |
| 10456 | case BBJ_THROW: |
| 10457 | case BBJ_RETURN: |
| 10458 | /* no jumps or fall through blocks to set here */ |
| 10459 | break; |
| 10460 | |
| 10461 | case BBJ_SWITCH: |
| 10462 | block->bbJumpSwt = bNext->bbJumpSwt; |
| 10463 | // We are moving the switch jump from bNext to block. Examine the jump targets |
| 10464 | // of the BBJ_SWITCH at bNext and replace the predecessor to 'bNext' with ones to 'block' |
| 10465 | fgChangeSwitchBlock(bNext, block); |
| 10466 | break; |
| 10467 | |
| 10468 | default: |
| 10469 | noway_assert(!"Unexpected bbJumpKind" ); |
| 10470 | break; |
| 10471 | } |
| 10472 | |
| 10473 | fgUpdateLoopsAfterCompacting(block, bNext); |
| 10474 | |
| 10475 | #if DEBUG |
| 10476 | if (verbose && 0) |
| 10477 | { |
| 10478 | printf("\nAfter compacting:\n" ); |
| 10479 | fgDispBasicBlocks(false); |
| 10480 | } |
| 10481 | #endif |
| 10482 | |
| 10483 | #if DEBUG |
| 10484 | if (JitConfig.JitSlowDebugChecksEnabled() != 0) |
| 10485 | { |
| 10486 | // Make sure that the predecessor lists are accurate |
| 10487 | fgDebugCheckBBlist(); |
| 10488 | } |
| 10489 | #endif // DEBUG |
| 10490 | } |
| 10491 | |
| 10492 | void Compiler::fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext) |
| 10493 | { |
| 10494 | /* Check if the removed block is not part the loop table */ |
| 10495 | noway_assert(bNext); |
| 10496 | |
| 10497 | for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) |
| 10498 | { |
| 10499 | /* Some loops may have been already removed by |
| 10500 | * loop unrolling or conditional folding */ |
| 10501 | |
| 10502 | if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED) |
| 10503 | { |
| 10504 | continue; |
| 10505 | } |
| 10506 | |
| 10507 | /* Check the loop head (i.e. the block preceding the loop) */ |
| 10508 | |
| 10509 | if (optLoopTable[loopNum].lpHead == bNext) |
| 10510 | { |
| 10511 | optLoopTable[loopNum].lpHead = block; |
| 10512 | } |
| 10513 | |
| 10514 | /* Check the loop bottom */ |
| 10515 | |
| 10516 | if (optLoopTable[loopNum].lpBottom == bNext) |
| 10517 | { |
| 10518 | optLoopTable[loopNum].lpBottom = block; |
| 10519 | } |
| 10520 | |
| 10521 | /* Check the loop exit */ |
| 10522 | |
| 10523 | if (optLoopTable[loopNum].lpExit == bNext) |
| 10524 | { |
| 10525 | noway_assert(optLoopTable[loopNum].lpExitCnt == 1); |
| 10526 | optLoopTable[loopNum].lpExit = block; |
| 10527 | } |
| 10528 | |
| 10529 | /* Check the loop entry */ |
| 10530 | |
| 10531 | if (optLoopTable[loopNum].lpEntry == bNext) |
| 10532 | { |
| 10533 | optLoopTable[loopNum].lpEntry = block; |
| 10534 | } |
| 10535 | } |
| 10536 | } |
| 10537 | |
| 10538 | /***************************************************************************************************** |
| 10539 | * |
| 10540 | * Function called to remove a block when it is unreachable. |
| 10541 | * |
| 10542 | * This function cannot remove the first block. |
| 10543 | */ |
| 10544 | |
| 10545 | void Compiler::fgUnreachableBlock(BasicBlock* block) |
| 10546 | { |
| 10547 | // genReturnBB should never be removed, as we might have special hookups there. |
| 10548 | // Therefore, we should never come here to remove the statements in the genReturnBB block. |
| 10549 | // For example, <BUGNUM> in VSW 364383, </BUGNUM> |
| 10550 | // the profiler hookup needs to have the "void GT_RETURN" statement |
| 10551 | // to properly set the info.compProfilerCallback flag. |
| 10552 | noway_assert(block != genReturnBB); |
| 10553 | |
| 10554 | if (block->bbFlags & BBF_REMOVED) |
| 10555 | { |
| 10556 | return; |
| 10557 | } |
| 10558 | |
| 10559 | /* Removing an unreachable block */ |
| 10560 | |
| 10561 | #ifdef DEBUG |
| 10562 | if (verbose) |
| 10563 | { |
| 10564 | printf("\nRemoving unreachable " FMT_BB "\n" , block->bbNum); |
| 10565 | } |
| 10566 | #endif // DEBUG |
| 10567 | |
| 10568 | noway_assert(block->bbPrev != nullptr); // Can use this function to remove the first block |
| 10569 | |
| 10570 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10571 | assert(!block->bbPrev->isBBCallAlwaysPair()); // can't remove the BBJ_ALWAYS of a BBJ_CALLFINALLY / BBJ_ALWAYS pair |
| 10572 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10573 | |
| 10574 | /* First walk the statement trees in this basic block and delete each stmt */ |
| 10575 | |
| 10576 | /* Make the block publicly available */ |
| 10577 | compCurBB = block; |
| 10578 | |
| 10579 | if (block->IsLIR()) |
| 10580 | { |
| 10581 | LIR::Range& blockRange = LIR::AsRange(block); |
| 10582 | if (!blockRange.IsEmpty()) |
| 10583 | { |
| 10584 | blockRange.Delete(this, block, blockRange.FirstNode(), blockRange.LastNode()); |
| 10585 | } |
| 10586 | } |
| 10587 | else |
| 10588 | { |
| 10589 | // TODO-Cleanup: I'm not sure why this happens -- if the block is unreachable, why does it have phis? |
| 10590 | // Anyway, remove any phis. |
| 10591 | |
| 10592 | GenTree* firstNonPhi = block->FirstNonPhiDef(); |
| 10593 | if (block->bbTreeList != firstNonPhi) |
| 10594 | { |
| 10595 | if (firstNonPhi != nullptr) |
| 10596 | { |
| 10597 | firstNonPhi->gtPrev = block->lastStmt(); |
| 10598 | } |
| 10599 | block->bbTreeList = firstNonPhi; |
| 10600 | } |
| 10601 | |
| 10602 | for (GenTreeStmt* stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 10603 | { |
| 10604 | fgRemoveStmt(block, stmt); |
| 10605 | } |
| 10606 | noway_assert(block->bbTreeList == nullptr); |
| 10607 | } |
| 10608 | |
| 10609 | /* Next update the loop table and bbWeights */ |
| 10610 | optUpdateLoopsBeforeRemoveBlock(block); |
| 10611 | |
| 10612 | /* Mark the block as removed */ |
| 10613 | block->bbFlags |= BBF_REMOVED; |
| 10614 | |
| 10615 | /* update bbRefs and bbPreds for the blocks reached by this block */ |
| 10616 | fgRemoveBlockAsPred(block); |
| 10617 | } |
| 10618 | |
| 10619 | /***************************************************************************************************** |
| 10620 | * |
| 10621 | * Function called to remove or morph a jump when we jump to the same |
| 10622 | * block when both the condition is true or false. |
| 10623 | */ |
| 10624 | void Compiler::fgRemoveConditionalJump(BasicBlock* block) |
| 10625 | { |
| 10626 | noway_assert(block->bbJumpKind == BBJ_COND && block->bbJumpDest == block->bbNext); |
| 10627 | assert(compRationalIRForm == block->IsLIR()); |
| 10628 | |
| 10629 | flowList* flow = fgGetPredForBlock(block->bbNext, block); |
| 10630 | noway_assert(flow->flDupCount == 2); |
| 10631 | |
| 10632 | // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. |
| 10633 | block->bbJumpKind = BBJ_NONE; |
| 10634 | block->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 10635 | --block->bbNext->bbRefs; |
| 10636 | --flow->flDupCount; |
| 10637 | |
| 10638 | #ifdef DEBUG |
| 10639 | block->bbJumpDest = nullptr; |
| 10640 | if (verbose) |
| 10641 | { |
| 10642 | printf("Block " FMT_BB " becoming a BBJ_NONE to " FMT_BB |
| 10643 | " (jump target is the same whether the condition is true or " |
| 10644 | "false)\n" , |
| 10645 | block->bbNum, block->bbNext->bbNum); |
| 10646 | } |
| 10647 | #endif |
| 10648 | |
| 10649 | /* Remove the block jump condition */ |
| 10650 | |
| 10651 | if (block->IsLIR()) |
| 10652 | { |
| 10653 | LIR::Range& blockRange = LIR::AsRange(block); |
| 10654 | |
| 10655 | GenTree* test = blockRange.LastNode(); |
| 10656 | assert(test->OperIsConditionalJump()); |
| 10657 | |
| 10658 | bool isClosed; |
| 10659 | unsigned sideEffects; |
| 10660 | LIR::ReadOnlyRange testRange = blockRange.GetTreeRange(test, &isClosed, &sideEffects); |
| 10661 | |
| 10662 | // TODO-LIR: this should really be checking GTF_ALL_EFFECT, but that produces unacceptable |
| 10663 | // diffs compared to the existing backend. |
| 10664 | if (isClosed && ((sideEffects & GTF_SIDE_EFFECT) == 0)) |
| 10665 | { |
| 10666 | // If the jump and its operands form a contiguous, side-effect-free range, |
| 10667 | // remove them. |
| 10668 | blockRange.Delete(this, block, std::move(testRange)); |
| 10669 | } |
| 10670 | else |
| 10671 | { |
| 10672 | // Otherwise, just remove the jump node itself. |
| 10673 | blockRange.Remove(test, true); |
| 10674 | } |
| 10675 | } |
| 10676 | else |
| 10677 | { |
| 10678 | GenTreeStmt* test = block->lastStmt(); |
| 10679 | GenTree* tree = test->gtStmtExpr; |
| 10680 | |
| 10681 | noway_assert(tree->gtOper == GT_JTRUE); |
| 10682 | |
| 10683 | GenTree* sideEffList = nullptr; |
| 10684 | |
| 10685 | if (tree->gtFlags & GTF_SIDE_EFFECT) |
| 10686 | { |
| 10687 | gtExtractSideEffList(tree, &sideEffList); |
| 10688 | |
| 10689 | if (sideEffList) |
| 10690 | { |
| 10691 | noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); |
| 10692 | #ifdef DEBUG |
| 10693 | if (verbose) |
| 10694 | { |
| 10695 | printf("Extracted side effects list from condition...\n" ); |
| 10696 | gtDispTree(sideEffList); |
| 10697 | printf("\n" ); |
| 10698 | } |
| 10699 | #endif |
| 10700 | } |
| 10701 | } |
| 10702 | |
| 10703 | // Delete the cond test or replace it with the side effect tree |
| 10704 | if (sideEffList == nullptr) |
| 10705 | { |
| 10706 | fgRemoveStmt(block, test); |
| 10707 | } |
| 10708 | else |
| 10709 | { |
| 10710 | test->gtStmtExpr = sideEffList; |
| 10711 | |
| 10712 | fgMorphBlockStmt(block, test DEBUGARG("fgRemoveConditionalJump" )); |
| 10713 | } |
| 10714 | } |
| 10715 | } |
| 10716 | |
| 10717 | /***************************************************************************************************** |
| 10718 | * |
| 10719 | * Function to return the last basic block in the main part of the function. With funclets, it is |
| 10720 | * the block immediately before the first funclet. |
| 10721 | * An inclusive end of the main method. |
| 10722 | */ |
| 10723 | |
| 10724 | BasicBlock* Compiler::fgLastBBInMainFunction() |
| 10725 | { |
| 10726 | #if FEATURE_EH_FUNCLETS |
| 10727 | |
| 10728 | if (fgFirstFuncletBB != nullptr) |
| 10729 | { |
| 10730 | return fgFirstFuncletBB->bbPrev; |
| 10731 | } |
| 10732 | |
| 10733 | #endif // FEATURE_EH_FUNCLETS |
| 10734 | |
| 10735 | assert(fgLastBB->bbNext == nullptr); |
| 10736 | |
| 10737 | return fgLastBB; |
| 10738 | } |
| 10739 | |
| 10740 | /***************************************************************************************************** |
| 10741 | * |
| 10742 | * Function to return the first basic block after the main part of the function. With funclets, it is |
| 10743 | * the block of the first funclet. Otherwise it is NULL if there are no funclets (fgLastBB->bbNext). |
| 10744 | * This is equivalent to fgLastBBInMainFunction()->bbNext |
| 10745 | * An exclusive end of the main method. |
| 10746 | */ |
| 10747 | |
| 10748 | BasicBlock* Compiler::fgEndBBAfterMainFunction() |
| 10749 | { |
| 10750 | #if FEATURE_EH_FUNCLETS |
| 10751 | |
| 10752 | if (fgFirstFuncletBB != nullptr) |
| 10753 | { |
| 10754 | return fgFirstFuncletBB; |
| 10755 | } |
| 10756 | |
| 10757 | #endif // FEATURE_EH_FUNCLETS |
| 10758 | |
| 10759 | assert(fgLastBB->bbNext == nullptr); |
| 10760 | |
| 10761 | return nullptr; |
| 10762 | } |
| 10763 | |
| 10764 | // Removes the block from the bbPrev/bbNext chain |
| 10765 | // Updates fgFirstBB and fgLastBB if necessary |
| 10766 | // Does not update fgFirstFuncletBB or fgFirstColdBlock (fgUnlinkRange does) |
| 10767 | |
| 10768 | void Compiler::fgUnlinkBlock(BasicBlock* block) |
| 10769 | { |
| 10770 | if (block->bbPrev) |
| 10771 | { |
| 10772 | block->bbPrev->bbNext = block->bbNext; |
| 10773 | if (block->bbNext) |
| 10774 | { |
| 10775 | block->bbNext->bbPrev = block->bbPrev; |
| 10776 | } |
| 10777 | else |
| 10778 | { |
| 10779 | fgLastBB = block->bbPrev; |
| 10780 | } |
| 10781 | } |
| 10782 | else |
| 10783 | { |
| 10784 | assert(block == fgFirstBB); |
| 10785 | assert(block != fgLastBB); |
| 10786 | assert((fgFirstBBScratch == nullptr) || (fgFirstBBScratch == fgFirstBB)); |
| 10787 | |
| 10788 | fgFirstBB = block->bbNext; |
| 10789 | fgFirstBB->bbPrev = nullptr; |
| 10790 | |
| 10791 | if (fgFirstBBScratch != nullptr) |
| 10792 | { |
| 10793 | #ifdef DEBUG |
| 10794 | // We had created an initial scratch BB, but now we're deleting it. |
| 10795 | if (verbose) |
| 10796 | { |
| 10797 | printf("Unlinking scratch " FMT_BB "\n" , block->bbNum); |
| 10798 | } |
| 10799 | #endif // DEBUG |
| 10800 | fgFirstBBScratch = nullptr; |
| 10801 | } |
| 10802 | } |
| 10803 | } |
| 10804 | |
| 10805 | /***************************************************************************************************** |
| 10806 | * |
| 10807 | * Function called to unlink basic block range [bBeg .. bEnd] from the basic block list. |
| 10808 | * |
| 10809 | * 'bBeg' can't be the first block. |
| 10810 | */ |
| 10811 | |
| 10812 | void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) |
| 10813 | { |
| 10814 | assert(bBeg != nullptr); |
| 10815 | assert(bEnd != nullptr); |
| 10816 | |
| 10817 | BasicBlock* bPrev = bBeg->bbPrev; |
| 10818 | assert(bPrev != nullptr); // Can't unlink a range starting with the first block |
| 10819 | |
| 10820 | bPrev->setNext(bEnd->bbNext); |
| 10821 | |
| 10822 | /* If we removed the last block in the method then update fgLastBB */ |
| 10823 | if (fgLastBB == bEnd) |
| 10824 | { |
| 10825 | fgLastBB = bPrev; |
| 10826 | noway_assert(fgLastBB->bbNext == nullptr); |
| 10827 | } |
| 10828 | |
| 10829 | // If bEnd was the first Cold basic block update fgFirstColdBlock |
| 10830 | if (fgFirstColdBlock == bEnd) |
| 10831 | { |
| 10832 | fgFirstColdBlock = bPrev->bbNext; |
| 10833 | } |
| 10834 | |
| 10835 | #if FEATURE_EH_FUNCLETS |
| 10836 | #ifdef DEBUG |
| 10837 | // You can't unlink a range that includes the first funclet block. A range certainly |
| 10838 | // can't cross the non-funclet/funclet region. And you can't unlink the first block |
| 10839 | // of the first funclet with this, either. (If that's necessary, it could be allowed |
| 10840 | // by updating fgFirstFuncletBB to bEnd->bbNext.) |
| 10841 | for (BasicBlock* tempBB = bBeg; tempBB != bEnd->bbNext; tempBB = tempBB->bbNext) |
| 10842 | { |
| 10843 | assert(tempBB != fgFirstFuncletBB); |
| 10844 | } |
| 10845 | #endif // DEBUG |
| 10846 | #endif // FEATURE_EH_FUNCLETS |
| 10847 | } |
| 10848 | |
| 10849 | /***************************************************************************************************** |
| 10850 | * |
| 10851 | * Function called to remove a basic block |
| 10852 | */ |
| 10853 | |
| 10854 | void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) |
| 10855 | { |
| 10856 | BasicBlock* bPrev = block->bbPrev; |
| 10857 | |
| 10858 | /* The block has to be either unreachable or empty */ |
| 10859 | |
| 10860 | PREFIX_ASSUME(block != nullptr); |
| 10861 | |
| 10862 | JITDUMP("fgRemoveBlock " FMT_BB "\n" , block->bbNum); |
| 10863 | |
| 10864 | // If we've cached any mappings from switch blocks to SwitchDesc's (which contain only the |
| 10865 | // *unique* successors of the switch block), invalidate that cache, since an entry in one of |
| 10866 | // the SwitchDescs might be removed. |
| 10867 | InvalidateUniqueSwitchSuccMap(); |
| 10868 | |
| 10869 | noway_assert((block == fgFirstBB) || (bPrev && (bPrev->bbNext == block))); |
| 10870 | noway_assert(!(block->bbFlags & BBF_DONT_REMOVE)); |
| 10871 | |
| 10872 | // Should never remove a genReturnBB, as we might have special hookups there. |
| 10873 | noway_assert(block != genReturnBB); |
| 10874 | |
| 10875 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10876 | // Don't remove a finally target |
| 10877 | assert(!(block->bbFlags & BBF_FINALLY_TARGET)); |
| 10878 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10879 | |
| 10880 | if (unreachable) |
| 10881 | { |
| 10882 | PREFIX_ASSUME(bPrev != nullptr); |
| 10883 | |
| 10884 | fgUnreachableBlock(block); |
| 10885 | |
| 10886 | /* If this is the last basic block update fgLastBB */ |
| 10887 | if (block == fgLastBB) |
| 10888 | { |
| 10889 | fgLastBB = bPrev; |
| 10890 | } |
| 10891 | |
| 10892 | #if FEATURE_EH_FUNCLETS |
| 10893 | // If block was the fgFirstFuncletBB then set fgFirstFuncletBB to block->bbNext |
| 10894 | if (block == fgFirstFuncletBB) |
| 10895 | { |
| 10896 | fgFirstFuncletBB = block->bbNext; |
| 10897 | } |
| 10898 | #endif // FEATURE_EH_FUNCLETS |
| 10899 | |
| 10900 | if (bPrev->bbJumpKind == BBJ_CALLFINALLY) |
| 10901 | { |
| 10902 | // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable |
| 10903 | bPrev->bbFlags |= BBF_RETLESS_CALL; |
| 10904 | |
| 10905 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10906 | NO_WAY("No retless call finally blocks; need unwind target instead" ); |
| 10907 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10908 | } |
| 10909 | else if (bPrev->bbJumpKind == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && |
| 10910 | !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && |
| 10911 | (block->bbNext != fgFirstColdBlock)) |
| 10912 | { |
| 10913 | // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. |
| 10914 | // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), |
| 10915 | // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by |
| 10916 | // BBJ_ALWAYS blocks. |
| 10917 | bPrev->bbJumpKind = BBJ_NONE; |
| 10918 | bPrev->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 10919 | } |
| 10920 | |
| 10921 | // If this is the first Cold basic block update fgFirstColdBlock |
| 10922 | if (block == fgFirstColdBlock) |
| 10923 | { |
| 10924 | fgFirstColdBlock = block->bbNext; |
| 10925 | } |
| 10926 | |
| 10927 | /* Unlink this block from the bbNext chain */ |
| 10928 | fgUnlinkBlock(block); |
| 10929 | |
| 10930 | /* At this point the bbPreds and bbRefs had better be zero */ |
| 10931 | noway_assert((block->bbRefs == 0) && (block->bbPreds == nullptr)); |
| 10932 | |
| 10933 | /* A BBJ_CALLFINALLY is usually paired with a BBJ_ALWAYS. |
| 10934 | * If we delete such a BBJ_CALLFINALLY we also delete the BBJ_ALWAYS |
| 10935 | */ |
| 10936 | if (block->isBBCallAlwaysPair()) |
| 10937 | { |
| 10938 | BasicBlock* leaveBlk = block->bbNext; |
| 10939 | noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); |
| 10940 | |
| 10941 | leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; |
| 10942 | leaveBlk->bbRefs = 0; |
| 10943 | leaveBlk->bbPreds = nullptr; |
| 10944 | |
| 10945 | fgRemoveBlock(leaveBlk, true); |
| 10946 | |
| 10947 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10948 | fgClearFinallyTargetBit(leaveBlk->bbJumpDest); |
| 10949 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 10950 | } |
| 10951 | else if (block->bbJumpKind == BBJ_RETURN) |
| 10952 | { |
| 10953 | fgRemoveReturnBlock(block); |
| 10954 | } |
| 10955 | } |
| 10956 | else // block is empty |
| 10957 | { |
| 10958 | noway_assert(block->isEmpty()); |
| 10959 | |
| 10960 | /* The block cannot follow a non-retless BBJ_CALLFINALLY (because we don't know who may jump to it) */ |
| 10961 | noway_assert((bPrev == nullptr) || !bPrev->isBBCallAlwaysPair()); |
| 10962 | |
| 10963 | /* This cannot be the last basic block */ |
| 10964 | noway_assert(block != fgLastBB); |
| 10965 | |
| 10966 | #ifdef DEBUG |
| 10967 | if (verbose) |
| 10968 | { |
| 10969 | printf("Removing empty " FMT_BB "\n" , block->bbNum); |
| 10970 | } |
| 10971 | #endif // DEBUG |
| 10972 | |
| 10973 | #ifdef DEBUG |
| 10974 | /* Some extra checks for the empty case */ |
| 10975 | |
| 10976 | switch (block->bbJumpKind) |
| 10977 | { |
| 10978 | case BBJ_NONE: |
| 10979 | break; |
| 10980 | |
| 10981 | case BBJ_ALWAYS: |
| 10982 | /* Do not remove a block that jumps to itself - used for while (true){} */ |
| 10983 | noway_assert(block->bbJumpDest != block); |
| 10984 | |
| 10985 | /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ |
| 10986 | noway_assert(bPrev && bPrev->bbJumpKind == BBJ_NONE); |
| 10987 | break; |
| 10988 | |
| 10989 | default: |
| 10990 | noway_assert(!"Empty block of this type cannot be removed!" ); |
| 10991 | break; |
| 10992 | } |
| 10993 | #endif // DEBUG |
| 10994 | |
| 10995 | noway_assert(block->bbJumpKind == BBJ_NONE || block->bbJumpKind == BBJ_ALWAYS); |
| 10996 | |
| 10997 | /* Who is the "real" successor of this block? */ |
| 10998 | |
| 10999 | BasicBlock* succBlock; |
| 11000 | |
| 11001 | if (block->bbJumpKind == BBJ_ALWAYS) |
| 11002 | { |
| 11003 | succBlock = block->bbJumpDest; |
| 11004 | } |
| 11005 | else |
| 11006 | { |
| 11007 | succBlock = block->bbNext; |
| 11008 | } |
| 11009 | |
| 11010 | bool skipUnmarkLoop = false; |
| 11011 | |
| 11012 | // If block is the backedge for a loop and succBlock precedes block |
| 11013 | // then the succBlock becomes the new LOOP HEAD |
| 11014 | // NOTE: there's an assumption here that the blocks are numbered in increasing bbNext order. |
| 11015 | // NOTE 2: if fgDomsComputed is false, then we can't check reachability. However, if this is |
| 11016 | // the case, then the loop structures probably are also invalid, and shouldn't be used. This |
| 11017 | // can be the case late in compilation (such as Lower), where remnants of earlier created |
| 11018 | // structures exist, but haven't been maintained. |
| 11019 | if (block->isLoopHead() && (succBlock->bbNum <= block->bbNum)) |
| 11020 | { |
| 11021 | succBlock->bbFlags |= BBF_LOOP_HEAD; |
| 11022 | if (fgDomsComputed && fgReachable(succBlock, block)) |
| 11023 | { |
| 11024 | /* Mark all the reachable blocks between 'succBlock' and 'block', excluding 'block' */ |
| 11025 | optMarkLoopBlocks(succBlock, block, true); |
| 11026 | } |
| 11027 | } |
| 11028 | else if (succBlock->isLoopHead() && bPrev && (succBlock->bbNum <= bPrev->bbNum)) |
| 11029 | { |
| 11030 | skipUnmarkLoop = true; |
| 11031 | } |
| 11032 | |
| 11033 | noway_assert(succBlock); |
| 11034 | |
| 11035 | // If this is the first Cold basic block update fgFirstColdBlock |
| 11036 | if (block == fgFirstColdBlock) |
| 11037 | { |
| 11038 | fgFirstColdBlock = block->bbNext; |
| 11039 | } |
| 11040 | |
| 11041 | #if FEATURE_EH_FUNCLETS |
| 11042 | // Update fgFirstFuncletBB if necessary |
| 11043 | if (block == fgFirstFuncletBB) |
| 11044 | { |
| 11045 | fgFirstFuncletBB = block->bbNext; |
| 11046 | } |
| 11047 | #endif // FEATURE_EH_FUNCLETS |
| 11048 | |
| 11049 | /* First update the loop table and bbWeights */ |
| 11050 | optUpdateLoopsBeforeRemoveBlock(block, skipUnmarkLoop); |
| 11051 | |
| 11052 | // Update successor block start IL offset, if empty predecessor |
| 11053 | // covers the immediately preceding range. |
| 11054 | if ((block->bbCodeOffsEnd == succBlock->bbCodeOffs) && (block->bbCodeOffs != BAD_IL_OFFSET)) |
| 11055 | { |
| 11056 | assert(block->bbCodeOffs <= succBlock->bbCodeOffs); |
| 11057 | succBlock->bbCodeOffs = block->bbCodeOffs; |
| 11058 | } |
| 11059 | |
| 11060 | /* Remove the block */ |
| 11061 | |
| 11062 | if (bPrev == nullptr) |
| 11063 | { |
| 11064 | /* special case if this is the first BB */ |
| 11065 | |
| 11066 | noway_assert(block == fgFirstBB); |
| 11067 | |
| 11068 | /* Must be a fall through to next block */ |
| 11069 | |
| 11070 | noway_assert(block->bbJumpKind == BBJ_NONE); |
| 11071 | |
| 11072 | /* old block no longer gets the extra ref count for being the first block */ |
| 11073 | block->bbRefs--; |
| 11074 | succBlock->bbRefs++; |
| 11075 | |
| 11076 | /* Set the new firstBB */ |
| 11077 | fgUnlinkBlock(block); |
| 11078 | |
| 11079 | /* Always treat the initial block as a jump target */ |
| 11080 | fgFirstBB->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL; |
| 11081 | } |
| 11082 | else |
| 11083 | { |
| 11084 | fgUnlinkBlock(block); |
| 11085 | } |
| 11086 | |
| 11087 | /* mark the block as removed and set the change flag */ |
| 11088 | |
| 11089 | block->bbFlags |= BBF_REMOVED; |
| 11090 | |
| 11091 | /* Update bbRefs and bbPreds. |
| 11092 | * All blocks jumping to 'block' now jump to 'succBlock'. |
| 11093 | * First, remove 'block' from the predecessor list of succBlock. |
| 11094 | */ |
| 11095 | |
| 11096 | fgRemoveRefPred(succBlock, block); |
| 11097 | |
| 11098 | for (flowList* pred = block->bbPreds; pred; pred = pred->flNext) |
| 11099 | { |
| 11100 | BasicBlock* predBlock = pred->flBlock; |
| 11101 | |
| 11102 | /* Are we changing a loop backedge into a forward jump? */ |
| 11103 | |
| 11104 | if (block->isLoopHead() && (predBlock->bbNum >= block->bbNum) && (predBlock->bbNum <= succBlock->bbNum)) |
| 11105 | { |
| 11106 | /* First update the loop table and bbWeights */ |
| 11107 | optUpdateLoopsBeforeRemoveBlock(predBlock); |
| 11108 | } |
| 11109 | |
| 11110 | /* If predBlock is a new predecessor, then add it to succBlock's |
| 11111 | predecessor's list. */ |
| 11112 | if (predBlock->bbJumpKind != BBJ_SWITCH) |
| 11113 | { |
| 11114 | // Even if the pred is not a switch, we could have a conditional branch |
| 11115 | // to the fallthrough, so duplicate there could be preds |
| 11116 | for (unsigned i = 0; i < pred->flDupCount; i++) |
| 11117 | { |
| 11118 | fgAddRefPred(succBlock, predBlock); |
| 11119 | } |
| 11120 | } |
| 11121 | |
| 11122 | /* change all jumps to the removed block */ |
| 11123 | switch (predBlock->bbJumpKind) |
| 11124 | { |
| 11125 | default: |
| 11126 | noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()" ); |
| 11127 | break; |
| 11128 | |
| 11129 | case BBJ_NONE: |
| 11130 | noway_assert(predBlock == bPrev); |
| 11131 | PREFIX_ASSUME(bPrev != nullptr); |
| 11132 | |
| 11133 | /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ |
| 11134 | if (block->bbJumpKind == BBJ_ALWAYS) |
| 11135 | { |
| 11136 | /* bPrev now becomes a BBJ_ALWAYS */ |
| 11137 | bPrev->bbJumpKind = BBJ_ALWAYS; |
| 11138 | bPrev->bbJumpDest = succBlock; |
| 11139 | } |
| 11140 | break; |
| 11141 | |
| 11142 | case BBJ_COND: |
| 11143 | /* The links for the direct predecessor case have already been updated above */ |
| 11144 | if (predBlock->bbJumpDest != block) |
| 11145 | { |
| 11146 | succBlock->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET; |
| 11147 | break; |
| 11148 | } |
| 11149 | |
| 11150 | /* Check if both side of the BBJ_COND now jump to the same block */ |
| 11151 | if (predBlock->bbNext == succBlock) |
| 11152 | { |
| 11153 | // Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest. |
| 11154 | noway_assert(predBlock->bbJumpDest == block); |
| 11155 | predBlock->bbJumpDest = succBlock; |
| 11156 | fgRemoveConditionalJump(predBlock); |
| 11157 | break; |
| 11158 | } |
| 11159 | |
| 11160 | /* Fall through for the jump case */ |
| 11161 | __fallthrough; |
| 11162 | |
| 11163 | case BBJ_CALLFINALLY: |
| 11164 | case BBJ_ALWAYS: |
| 11165 | case BBJ_EHCATCHRET: |
| 11166 | noway_assert(predBlock->bbJumpDest == block); |
| 11167 | predBlock->bbJumpDest = succBlock; |
| 11168 | succBlock->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET; |
| 11169 | break; |
| 11170 | |
| 11171 | case BBJ_SWITCH: |
| 11172 | // Change any jumps from 'predBlock' (a BBJ_SWITCH) to 'block' to jump to 'succBlock' |
| 11173 | // |
| 11174 | // For the jump targets of 'predBlock' (a BBJ_SWITCH) that jump to 'block' |
| 11175 | // remove the old predecessor at 'block' from 'predBlock' and |
| 11176 | // add the new predecessor at 'succBlock' from 'predBlock' |
| 11177 | // |
| 11178 | fgReplaceSwitchJumpTarget(predBlock, succBlock, block); |
| 11179 | break; |
| 11180 | } |
| 11181 | } |
| 11182 | } |
| 11183 | |
| 11184 | if (bPrev != nullptr) |
| 11185 | { |
| 11186 | switch (bPrev->bbJumpKind) |
| 11187 | { |
| 11188 | case BBJ_CALLFINALLY: |
| 11189 | // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS |
| 11190 | noway_assert(bPrev->bbFlags & BBF_RETLESS_CALL); |
| 11191 | break; |
| 11192 | |
| 11193 | case BBJ_ALWAYS: |
| 11194 | // Check for branch to next block. Just make sure the BBJ_ALWAYS block is not |
| 11195 | // part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. We do this here and don't rely on fgUpdateFlowGraph |
| 11196 | // because we can be called by ComputeDominators and it expects it to remove this jump to |
| 11197 | // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS |
| 11198 | // pairing. |
| 11199 | |
| 11200 | if ((bPrev->bbJumpDest == bPrev->bbNext) && |
| 11201 | !fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold |
| 11202 | { |
| 11203 | if ((bPrev == fgFirstBB) || !bPrev->bbPrev->isBBCallAlwaysPair()) |
| 11204 | { |
| 11205 | // It's safe to change the jump type |
| 11206 | bPrev->bbJumpKind = BBJ_NONE; |
| 11207 | bPrev->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 11208 | } |
| 11209 | } |
| 11210 | break; |
| 11211 | |
| 11212 | case BBJ_COND: |
| 11213 | /* Check for branch to next block */ |
| 11214 | if (bPrev->bbJumpDest == bPrev->bbNext) |
| 11215 | { |
| 11216 | fgRemoveConditionalJump(bPrev); |
| 11217 | } |
| 11218 | break; |
| 11219 | |
| 11220 | default: |
| 11221 | break; |
| 11222 | } |
| 11223 | |
| 11224 | ehUpdateForDeletedBlock(block); |
| 11225 | } |
| 11226 | } |
| 11227 | |
| 11228 | /***************************************************************************** |
| 11229 | * |
| 11230 | * Function called to connect to block that previously had a fall through |
| 11231 | */ |
| 11232 | |
| 11233 | BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) |
| 11234 | { |
| 11235 | BasicBlock* jmpBlk = nullptr; |
| 11236 | |
| 11237 | /* If bSrc is non-NULL */ |
| 11238 | |
| 11239 | if (bSrc != nullptr) |
| 11240 | { |
| 11241 | /* If bSrc falls through to a block that is not bDst, we will insert a jump to bDst */ |
| 11242 | |
| 11243 | if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) |
| 11244 | { |
| 11245 | switch (bSrc->bbJumpKind) |
| 11246 | { |
| 11247 | |
| 11248 | case BBJ_NONE: |
| 11249 | bSrc->bbJumpKind = BBJ_ALWAYS; |
| 11250 | bSrc->bbJumpDest = bDst; |
| 11251 | bSrc->bbJumpDest->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL); |
| 11252 | #ifdef DEBUG |
| 11253 | if (verbose) |
| 11254 | { |
| 11255 | printf("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB |
| 11256 | "\n" , |
| 11257 | bSrc->bbNum, bSrc->bbJumpDest->bbNum); |
| 11258 | } |
| 11259 | #endif |
| 11260 | break; |
| 11261 | |
| 11262 | case BBJ_CALLFINALLY: |
| 11263 | case BBJ_COND: |
| 11264 | |
| 11265 | // Add a new block after bSrc which jumps to 'bDst' |
| 11266 | jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); |
| 11267 | |
| 11268 | if (fgComputePredsDone) |
| 11269 | { |
| 11270 | fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc)); |
| 11271 | } |
| 11272 | |
| 11273 | // When adding a new jmpBlk we will set the bbWeight and bbFlags |
| 11274 | // |
| 11275 | if (fgHaveValidEdgeWeights) |
| 11276 | { |
| 11277 | noway_assert(fgComputePredsDone); |
| 11278 | |
| 11279 | flowList* newEdge = fgGetPredForBlock(jmpBlk, bSrc); |
| 11280 | |
| 11281 | jmpBlk->bbWeight = (newEdge->flEdgeWeightMin + newEdge->flEdgeWeightMax) / 2; |
| 11282 | if (bSrc->bbWeight == 0) |
| 11283 | { |
| 11284 | jmpBlk->bbWeight = 0; |
| 11285 | } |
| 11286 | |
| 11287 | if (jmpBlk->bbWeight == 0) |
| 11288 | { |
| 11289 | jmpBlk->bbFlags |= BBF_RUN_RARELY; |
| 11290 | } |
| 11291 | |
| 11292 | BasicBlock::weight_t weightDiff = (newEdge->flEdgeWeightMax - newEdge->flEdgeWeightMin); |
| 11293 | BasicBlock::weight_t slop = BasicBlock::GetSlopFraction(bSrc, bDst); |
| 11294 | |
| 11295 | // |
| 11296 | // If the [min/max] values for our edge weight is within the slop factor |
| 11297 | // then we will set the BBF_PROF_WEIGHT flag for the block |
| 11298 | // |
| 11299 | if (weightDiff <= slop) |
| 11300 | { |
| 11301 | jmpBlk->bbFlags |= BBF_PROF_WEIGHT; |
| 11302 | } |
| 11303 | } |
| 11304 | else |
| 11305 | { |
| 11306 | // We set the bbWeight to the smaller of bSrc->bbWeight or bDst->bbWeight |
| 11307 | if (bSrc->bbWeight < bDst->bbWeight) |
| 11308 | { |
| 11309 | jmpBlk->bbWeight = bSrc->bbWeight; |
| 11310 | jmpBlk->bbFlags |= (bSrc->bbFlags & BBF_RUN_RARELY); |
| 11311 | } |
| 11312 | else |
| 11313 | { |
| 11314 | jmpBlk->bbWeight = bDst->bbWeight; |
| 11315 | jmpBlk->bbFlags |= (bDst->bbFlags & BBF_RUN_RARELY); |
| 11316 | } |
| 11317 | } |
| 11318 | |
| 11319 | jmpBlk->bbJumpDest = bDst; |
| 11320 | jmpBlk->bbJumpDest->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL); |
| 11321 | |
| 11322 | if (fgComputePredsDone) |
| 11323 | { |
| 11324 | fgReplacePred(bDst, bSrc, jmpBlk); |
| 11325 | } |
| 11326 | else |
| 11327 | { |
| 11328 | jmpBlk->bbFlags |= BBF_IMPORTED; |
| 11329 | } |
| 11330 | |
| 11331 | #ifdef DEBUG |
| 11332 | if (verbose) |
| 11333 | { |
| 11334 | printf("Added an unconditional jump to " FMT_BB " after block " FMT_BB "\n" , |
| 11335 | jmpBlk->bbJumpDest->bbNum, bSrc->bbNum); |
| 11336 | } |
| 11337 | #endif // DEBUG |
| 11338 | break; |
| 11339 | |
| 11340 | default: |
| 11341 | noway_assert(!"Unexpected bbJumpKind" ); |
| 11342 | break; |
| 11343 | } |
| 11344 | } |
| 11345 | else |
| 11346 | { |
| 11347 | // If bSrc is an unconditional branch to the next block |
| 11348 | // then change it to a BBJ_NONE block |
| 11349 | // |
| 11350 | if ((bSrc->bbJumpKind == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && |
| 11351 | (bSrc->bbJumpDest == bSrc->bbNext)) |
| 11352 | { |
| 11353 | bSrc->bbJumpKind = BBJ_NONE; |
| 11354 | bSrc->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 11355 | #ifdef DEBUG |
| 11356 | if (verbose) |
| 11357 | { |
| 11358 | printf("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB |
| 11359 | " into a BBJ_NONE block\n" , |
| 11360 | bSrc->bbNum, bSrc->bbNext->bbNum); |
| 11361 | } |
| 11362 | #endif // DEBUG |
| 11363 | } |
| 11364 | } |
| 11365 | } |
| 11366 | |
| 11367 | return jmpBlk; |
| 11368 | } |
| 11369 | |
| 11370 | /***************************************************************************** |
| 11371 | Walk the flow graph, reassign block numbers to keep them in ascending order. |
| 11372 | Returns 'true' if any renumbering was actually done, OR if we change the |
| 11373 | maximum number of assigned basic blocks (this can happen if we do inlining, |
| 11374 | create a new, high-numbered block, then that block goes away. We go to |
| 11375 | renumber the blocks, none of them actually change number, but we shrink the |
| 11376 | maximum assigned block number. This affects the block set epoch). |
| 11377 | */ |
| 11378 | |
| 11379 | bool Compiler::fgRenumberBlocks() |
| 11380 | { |
| 11381 | // If we renumber the blocks the dominator information will be out-of-date |
| 11382 | if (fgDomsComputed) |
| 11383 | { |
| 11384 | noway_assert(!"Can't call Compiler::fgRenumberBlocks() when fgDomsComputed==true" ); |
| 11385 | } |
| 11386 | |
| 11387 | #ifdef DEBUG |
| 11388 | if (verbose) |
| 11389 | { |
| 11390 | printf("\n*************** Before renumbering the basic blocks\n" ); |
| 11391 | fgDispBasicBlocks(); |
| 11392 | fgDispHandlerTab(); |
| 11393 | } |
| 11394 | #endif // DEBUG |
| 11395 | |
| 11396 | bool renumbered = false; |
| 11397 | bool newMaxBBNum = false; |
| 11398 | BasicBlock* block; |
| 11399 | |
| 11400 | unsigned numStart = 1 + (compIsForInlining() ? impInlineInfo->InlinerCompiler->fgBBNumMax : 0); |
| 11401 | unsigned num; |
| 11402 | |
| 11403 | for (block = fgFirstBB, num = numStart; block != nullptr; block = block->bbNext, num++) |
| 11404 | { |
| 11405 | noway_assert((block->bbFlags & BBF_REMOVED) == 0); |
| 11406 | |
| 11407 | if (block->bbNum != num) |
| 11408 | { |
| 11409 | renumbered = true; |
| 11410 | #ifdef DEBUG |
| 11411 | if (verbose) |
| 11412 | { |
| 11413 | printf("Renumber " FMT_BB " to " FMT_BB "\n" , block->bbNum, num); |
| 11414 | } |
| 11415 | #endif // DEBUG |
| 11416 | block->bbNum = num; |
| 11417 | } |
| 11418 | |
| 11419 | if (block->bbNext == nullptr) |
| 11420 | { |
| 11421 | fgLastBB = block; |
| 11422 | fgBBcount = num - numStart + 1; |
| 11423 | if (compIsForInlining()) |
| 11424 | { |
| 11425 | if (impInlineInfo->InlinerCompiler->fgBBNumMax != num) |
| 11426 | { |
| 11427 | impInlineInfo->InlinerCompiler->fgBBNumMax = num; |
| 11428 | newMaxBBNum = true; |
| 11429 | } |
| 11430 | } |
| 11431 | else |
| 11432 | { |
| 11433 | if (fgBBNumMax != num) |
| 11434 | { |
| 11435 | fgBBNumMax = num; |
| 11436 | newMaxBBNum = true; |
| 11437 | } |
| 11438 | } |
| 11439 | } |
| 11440 | } |
| 11441 | |
| 11442 | #ifdef DEBUG |
| 11443 | if (verbose) |
| 11444 | { |
| 11445 | printf("\n*************** After renumbering the basic blocks\n" ); |
| 11446 | if (renumbered) |
| 11447 | { |
| 11448 | fgDispBasicBlocks(); |
| 11449 | fgDispHandlerTab(); |
| 11450 | } |
| 11451 | else |
| 11452 | { |
| 11453 | printf("=============== No blocks renumbered!\n" ); |
| 11454 | } |
| 11455 | } |
| 11456 | #endif // DEBUG |
| 11457 | |
| 11458 | // Now update the BlockSet epoch, which depends on the block numbers. |
| 11459 | // If any blocks have been renumbered then create a new BlockSet epoch. |
| 11460 | // Even if we have not renumbered any blocks, we might still need to force |
| 11461 | // a new BlockSet epoch, for one of several reasons. If there are any new |
| 11462 | // blocks with higher numbers than the former maximum numbered block, then we |
| 11463 | // need a new epoch with a new size matching the new largest numbered block. |
| 11464 | // Also, if the number of blocks is different from the last time we set the |
| 11465 | // BlockSet epoch, then we need a new epoch. This wouldn't happen if we |
| 11466 | // renumbered blocks after every block addition/deletion, but it might be |
| 11467 | // the case that we can change the number of blocks, then set the BlockSet |
| 11468 | // epoch without renumbering, then change the number of blocks again, then |
| 11469 | // renumber. |
| 11470 | if (renumbered || newMaxBBNum) |
| 11471 | { |
| 11472 | NewBasicBlockEpoch(); |
| 11473 | |
| 11474 | // The key in the unique switch successor map is dependent on the block number, so invalidate that cache. |
| 11475 | InvalidateUniqueSwitchSuccMap(); |
| 11476 | } |
| 11477 | else |
| 11478 | { |
| 11479 | EnsureBasicBlockEpoch(); |
| 11480 | } |
| 11481 | |
| 11482 | // Tell our caller if any blocks actually were renumbered. |
| 11483 | return renumbered || newMaxBBNum; |
| 11484 | } |
| 11485 | |
| 11486 | /***************************************************************************** |
| 11487 | * |
| 11488 | * Is the BasicBlock bJump a forward branch? |
| 11489 | * Optionally bSrc can be supplied to indicate that |
| 11490 | * bJump must be forward with respect to bSrc |
| 11491 | */ |
| 11492 | bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL */) |
| 11493 | { |
| 11494 | bool result = false; |
| 11495 | |
| 11496 | if ((bJump->bbJumpKind == BBJ_COND) || (bJump->bbJumpKind == BBJ_ALWAYS)) |
| 11497 | { |
| 11498 | BasicBlock* bDest = bJump->bbJumpDest; |
| 11499 | BasicBlock* bTemp = (bSrc == nullptr) ? bJump : bSrc; |
| 11500 | |
| 11501 | while (true) |
| 11502 | { |
| 11503 | bTemp = bTemp->bbNext; |
| 11504 | |
| 11505 | if (bTemp == nullptr) |
| 11506 | { |
| 11507 | break; |
| 11508 | } |
| 11509 | |
| 11510 | if (bTemp == bDest) |
| 11511 | { |
| 11512 | result = true; |
| 11513 | break; |
| 11514 | } |
| 11515 | } |
| 11516 | } |
| 11517 | |
| 11518 | return result; |
| 11519 | } |
| 11520 | |
| 11521 | /***************************************************************************** |
| 11522 | * |
| 11523 | * Function called to expand the set of rarely run blocks |
| 11524 | */ |
| 11525 | |
| 11526 | bool Compiler::fgExpandRarelyRunBlocks() |
| 11527 | { |
| 11528 | bool result = false; |
| 11529 | |
| 11530 | #ifdef DEBUG |
| 11531 | if (verbose) |
| 11532 | { |
| 11533 | printf("\n*************** In fgExpandRarelyRunBlocks()\n" ); |
| 11534 | } |
| 11535 | |
| 11536 | const char* reason = nullptr; |
| 11537 | #endif |
| 11538 | |
| 11539 | // We expand the number of rarely run blocks by observing |
| 11540 | // that a block that falls into or jumps to a rarely run block, |
| 11541 | // must itself be rarely run and when we have a conditional |
| 11542 | // jump in which both branches go to rarely run blocks then |
| 11543 | // the block must itself be rarely run |
| 11544 | |
| 11545 | BasicBlock* block; |
| 11546 | BasicBlock* bPrev; |
| 11547 | |
| 11548 | for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) |
| 11549 | { |
| 11550 | if (bPrev->isRunRarely()) |
| 11551 | { |
| 11552 | continue; |
| 11553 | } |
| 11554 | |
| 11555 | /* bPrev is known to be a normal block here */ |
| 11556 | switch (bPrev->bbJumpKind) |
| 11557 | { |
| 11558 | case BBJ_ALWAYS: |
| 11559 | |
| 11560 | /* Is the jump target rarely run? */ |
| 11561 | if (bPrev->bbJumpDest->isRunRarely()) |
| 11562 | { |
| 11563 | INDEBUG(reason = "Unconditional jump to a rarely run block" ;) |
| 11564 | goto NEW_RARELY_RUN; |
| 11565 | } |
| 11566 | break; |
| 11567 | |
| 11568 | case BBJ_CALLFINALLY: |
| 11569 | |
| 11570 | // Check for a BBJ_CALLFINALLY followed by a rarely run paired BBJ_ALWAYS |
| 11571 | // |
| 11572 | if (bPrev->isBBCallAlwaysPair()) |
| 11573 | { |
| 11574 | /* Is the next block rarely run? */ |
| 11575 | if (block->isRunRarely()) |
| 11576 | { |
| 11577 | INDEBUG(reason = "Call of finally followed by a rarely run block" ;) |
| 11578 | goto NEW_RARELY_RUN; |
| 11579 | } |
| 11580 | } |
| 11581 | break; |
| 11582 | |
| 11583 | case BBJ_NONE: |
| 11584 | |
| 11585 | /* is fall through target rarely run? */ |
| 11586 | if (block->isRunRarely()) |
| 11587 | { |
| 11588 | INDEBUG(reason = "Falling into a rarely run block" ;) |
| 11589 | goto NEW_RARELY_RUN; |
| 11590 | } |
| 11591 | break; |
| 11592 | |
| 11593 | case BBJ_COND: |
| 11594 | |
| 11595 | if (!block->isRunRarely()) |
| 11596 | { |
| 11597 | continue; |
| 11598 | } |
| 11599 | |
| 11600 | /* If both targets of the BBJ_COND are run rarely then don't reorder */ |
| 11601 | if (bPrev->bbJumpDest->isRunRarely()) |
| 11602 | { |
| 11603 | /* bPrev should also be marked as run rarely */ |
| 11604 | if (!bPrev->isRunRarely()) |
| 11605 | { |
| 11606 | INDEBUG(reason = "Both sides of a conditional jump are rarely run" ;) |
| 11607 | |
| 11608 | NEW_RARELY_RUN: |
| 11609 | /* If the weight of the block was obtained from a profile run, |
| 11610 | than it's more accurate than our static analysis */ |
| 11611 | if (bPrev->hasProfileWeight()) |
| 11612 | { |
| 11613 | continue; |
| 11614 | } |
| 11615 | result = true; |
| 11616 | |
| 11617 | #ifdef DEBUG |
| 11618 | assert(reason != nullptr); |
| 11619 | if (verbose) |
| 11620 | { |
| 11621 | printf("%s, marking " FMT_BB " as rarely run\n" , reason, bPrev->bbNum); |
| 11622 | } |
| 11623 | #endif // DEBUG |
| 11624 | |
| 11625 | /* Must not have previously been marked */ |
| 11626 | noway_assert(!bPrev->isRunRarely()); |
| 11627 | |
| 11628 | /* Mark bPrev as a new rarely run block */ |
| 11629 | bPrev->bbSetRunRarely(); |
| 11630 | |
| 11631 | BasicBlock* bPrevPrev = nullptr; |
| 11632 | BasicBlock* tmpbb; |
| 11633 | |
| 11634 | if ((bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0) |
| 11635 | { |
| 11636 | // If we've got a BBJ_CALLFINALLY/BBJ_ALWAYS pair, treat the BBJ_CALLFINALLY as an |
| 11637 | // additional predecessor for the BBJ_ALWAYS block |
| 11638 | tmpbb = bPrev->bbPrev; |
| 11639 | noway_assert(tmpbb != nullptr); |
| 11640 | #if FEATURE_EH_FUNCLETS |
| 11641 | noway_assert(tmpbb->isBBCallAlwaysPair()); |
| 11642 | bPrevPrev = tmpbb; |
| 11643 | #else |
| 11644 | if (tmpbb->bbJumpKind == BBJ_CALLFINALLY) |
| 11645 | { |
| 11646 | bPrevPrev = tmpbb; |
| 11647 | } |
| 11648 | #endif |
| 11649 | } |
| 11650 | |
| 11651 | /* Now go back to it's earliest predecessor to see */ |
| 11652 | /* if it too should now be marked as rarely run */ |
| 11653 | flowList* pred = bPrev->bbPreds; |
| 11654 | |
| 11655 | if ((pred != nullptr) || (bPrevPrev != nullptr)) |
| 11656 | { |
| 11657 | // bPrevPrev will be set to the lexically |
| 11658 | // earliest predecessor of bPrev. |
| 11659 | |
| 11660 | while (pred != nullptr) |
| 11661 | { |
| 11662 | if (bPrevPrev == nullptr) |
| 11663 | { |
| 11664 | // Initially we select the first block in the bbPreds list |
| 11665 | bPrevPrev = pred->flBlock; |
| 11666 | continue; |
| 11667 | } |
| 11668 | |
| 11669 | // Walk the flow graph lexically forward from pred->flBlock |
| 11670 | // if we find (block == bPrevPrev) then |
| 11671 | // pred->flBlock is an earlier predecessor. |
| 11672 | for (tmpbb = pred->flBlock; tmpbb != nullptr; tmpbb = tmpbb->bbNext) |
| 11673 | { |
| 11674 | if (tmpbb == bPrevPrev) |
| 11675 | { |
| 11676 | /* We found an ealier predecessor */ |
| 11677 | bPrevPrev = pred->flBlock; |
| 11678 | break; |
| 11679 | } |
| 11680 | else if (tmpbb == bPrev) |
| 11681 | { |
| 11682 | // We have reached bPrev so stop walking |
| 11683 | // as this cannot be an earlier predecessor |
| 11684 | break; |
| 11685 | } |
| 11686 | } |
| 11687 | |
| 11688 | // Onto the next predecessor |
| 11689 | pred = pred->flNext; |
| 11690 | } |
| 11691 | |
| 11692 | // Walk the flow graph forward from bPrevPrev |
| 11693 | // if we don't find (tmpbb == bPrev) then our candidate |
| 11694 | // bPrevPrev is lexically after bPrev and we do not |
| 11695 | // want to select it as our new block |
| 11696 | |
| 11697 | for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->bbNext) |
| 11698 | { |
| 11699 | if (tmpbb == bPrev) |
| 11700 | { |
| 11701 | // Set up block back to the lexically |
| 11702 | // earliest predecessor of pPrev |
| 11703 | |
| 11704 | block = bPrevPrev; |
| 11705 | } |
| 11706 | } |
| 11707 | } |
| 11708 | } |
| 11709 | break; |
| 11710 | |
| 11711 | default: |
| 11712 | break; |
| 11713 | } |
| 11714 | } |
| 11715 | } |
| 11716 | |
| 11717 | // Now iterate over every block to see if we can prove that a block is rarely run |
| 11718 | // (i.e. when all predecessors to the block are rarely run) |
| 11719 | // |
| 11720 | for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) |
| 11721 | { |
| 11722 | // If block is not run rarely, then check to make sure that it has |
| 11723 | // at least one non-rarely run block. |
| 11724 | |
| 11725 | if (!block->isRunRarely()) |
| 11726 | { |
| 11727 | bool rare = true; |
| 11728 | |
| 11729 | /* Make sure that block has at least one normal predecessor */ |
| 11730 | for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext) |
| 11731 | { |
| 11732 | /* Find the fall through predecessor, if any */ |
| 11733 | if (!pred->flBlock->isRunRarely()) |
| 11734 | { |
| 11735 | rare = false; |
| 11736 | break; |
| 11737 | } |
| 11738 | } |
| 11739 | |
| 11740 | if (rare) |
| 11741 | { |
| 11742 | // If 'block' is the start of a handler or filter then we cannot make it |
| 11743 | // rarely run because we may have an exceptional edge that |
| 11744 | // branches here. |
| 11745 | // |
| 11746 | if (bbIsHandlerBeg(block)) |
| 11747 | { |
| 11748 | rare = false; |
| 11749 | } |
| 11750 | } |
| 11751 | |
| 11752 | if (rare) |
| 11753 | { |
| 11754 | block->bbSetRunRarely(); |
| 11755 | result = true; |
| 11756 | |
| 11757 | #ifdef DEBUG |
| 11758 | if (verbose) |
| 11759 | { |
| 11760 | printf("All branches to " FMT_BB " are from rarely run blocks, marking as rarely run\n" , |
| 11761 | block->bbNum); |
| 11762 | } |
| 11763 | #endif // DEBUG |
| 11764 | |
| 11765 | // When marking a BBJ_CALLFINALLY as rarely run we also mark |
| 11766 | // the BBJ_ALWAYS that comes after it as rarely run |
| 11767 | // |
| 11768 | if (block->isBBCallAlwaysPair()) |
| 11769 | { |
| 11770 | BasicBlock* bNext = block->bbNext; |
| 11771 | PREFIX_ASSUME(bNext != nullptr); |
| 11772 | bNext->bbSetRunRarely(); |
| 11773 | #ifdef DEBUG |
| 11774 | if (verbose) |
| 11775 | { |
| 11776 | printf("Also marking the BBJ_ALWAYS at " FMT_BB " as rarely run\n" , bNext->bbNum); |
| 11777 | } |
| 11778 | #endif // DEBUG |
| 11779 | } |
| 11780 | } |
| 11781 | } |
| 11782 | |
| 11783 | /* COMPACT blocks if possible */ |
| 11784 | if (bPrev->bbJumpKind == BBJ_NONE) |
| 11785 | { |
| 11786 | if (fgCanCompactBlocks(bPrev, block)) |
| 11787 | { |
| 11788 | fgCompactBlocks(bPrev, block); |
| 11789 | |
| 11790 | block = bPrev; |
| 11791 | continue; |
| 11792 | } |
| 11793 | } |
| 11794 | // |
| 11795 | // if bPrev->bbWeight is not based upon profile data we can adjust |
| 11796 | // the weights of bPrev and block |
| 11797 | // |
| 11798 | else if (bPrev->isBBCallAlwaysPair() && // we must have a BBJ_CALLFINALLY and BBK_ALWAYS pair |
| 11799 | (bPrev->bbWeight != block->bbWeight) && // the weights are currently different |
| 11800 | !bPrev->hasProfileWeight()) // and the BBJ_CALLFINALLY block is not using profiled |
| 11801 | // weights |
| 11802 | { |
| 11803 | if (block->isRunRarely()) |
| 11804 | { |
| 11805 | bPrev->bbWeight = |
| 11806 | block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block |
| 11807 | bPrev->bbFlags |= BBF_RUN_RARELY; // and is now rarely run |
| 11808 | #ifdef DEBUG |
| 11809 | if (verbose) |
| 11810 | { |
| 11811 | printf("Marking the BBJ_CALLFINALLY block at " FMT_BB " as rarely run because " FMT_BB |
| 11812 | " is rarely run\n" , |
| 11813 | bPrev->bbNum, block->bbNum); |
| 11814 | } |
| 11815 | #endif // DEBUG |
| 11816 | } |
| 11817 | else if (bPrev->isRunRarely()) |
| 11818 | { |
| 11819 | block->bbWeight = |
| 11820 | bPrev->bbWeight; // the BBJ_ALWAYS block now has the same weight as the BBJ_CALLFINALLY block |
| 11821 | block->bbFlags |= BBF_RUN_RARELY; // and is now rarely run |
| 11822 | #ifdef DEBUG |
| 11823 | if (verbose) |
| 11824 | { |
| 11825 | printf("Marking the BBJ_ALWAYS block at " FMT_BB " as rarely run because " FMT_BB |
| 11826 | " is rarely run\n" , |
| 11827 | block->bbNum, bPrev->bbNum); |
| 11828 | } |
| 11829 | #endif // DEBUG |
| 11830 | } |
| 11831 | else // Both blocks are hot, bPrev is known not to be using profiled weight |
| 11832 | { |
| 11833 | bPrev->bbWeight = |
| 11834 | block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block |
| 11835 | } |
| 11836 | noway_assert(block->bbWeight == bPrev->bbWeight); |
| 11837 | } |
| 11838 | } |
| 11839 | |
| 11840 | return result; |
| 11841 | } |
| 11842 | |
| 11843 | /***************************************************************************** |
| 11844 | * |
| 11845 | * Returns true if it is allowable (based upon the EH regions) |
| 11846 | * to place block bAfter immediately after bBefore. It is allowable |
| 11847 | * if the 'bBefore' and 'bAfter' blocks are in the exact same EH region. |
| 11848 | */ |
| 11849 | |
| 11850 | bool Compiler::fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter) |
| 11851 | { |
| 11852 | return BasicBlock::sameEHRegion(bBefore, bAfter); |
| 11853 | } |
| 11854 | |
| 11855 | /***************************************************************************** |
| 11856 | * |
| 11857 | * Function called to move the range of blocks [bStart .. bEnd]. |
| 11858 | * The blocks are placed immediately after the insertAfterBlk. |
| 11859 | * fgFirstFuncletBB is not updated; that is the responsibility of the caller, if necessary. |
| 11860 | */ |
| 11861 | |
| 11862 | void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk) |
| 11863 | { |
| 11864 | /* We have decided to insert the block(s) after 'insertAfterBlk' */ |
| 11865 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 11866 | |
| 11867 | #ifdef DEBUG |
| 11868 | if (verbose) |
| 11869 | { |
| 11870 | printf("Relocated block%s [" FMT_BB ".." FMT_BB "] inserted after " FMT_BB "%s\n" , (bStart == bEnd) ? "" : "s" , |
| 11871 | bStart->bbNum, bEnd->bbNum, insertAfterBlk->bbNum, |
| 11872 | (insertAfterBlk->bbNext == nullptr) ? " at the end of method" : "" ); |
| 11873 | } |
| 11874 | #endif // DEBUG |
| 11875 | |
| 11876 | /* relink [bStart .. bEnd] into the flow graph */ |
| 11877 | |
| 11878 | bEnd->bbNext = insertAfterBlk->bbNext; |
| 11879 | if (insertAfterBlk->bbNext) |
| 11880 | { |
| 11881 | insertAfterBlk->bbNext->bbPrev = bEnd; |
| 11882 | } |
| 11883 | insertAfterBlk->setNext(bStart); |
| 11884 | |
| 11885 | /* If insertAfterBlk was fgLastBB then update fgLastBB */ |
| 11886 | if (insertAfterBlk == fgLastBB) |
| 11887 | { |
| 11888 | fgLastBB = bEnd; |
| 11889 | noway_assert(fgLastBB->bbNext == nullptr); |
| 11890 | } |
| 11891 | } |
| 11892 | |
| 11893 | /***************************************************************************** |
| 11894 | * |
| 11895 | * Function called to relocate a single range to the end of the method. |
| 11896 | * Only an entire consecutive region can be moved and it will be kept together. |
| 11897 | * Except for the first block, the range cannot have any blocks that jump into or out of the region. |
| 11898 | * When successful we return the bLast block which is the last block that we relocated. |
| 11899 | * When unsuccessful we return NULL. |
| 11900 | |
| 11901 | ============================================================= |
| 11902 | NOTE: This function can invalidate all pointers into the EH table, as well as change the size of the EH table! |
| 11903 | ============================================================= |
| 11904 | */ |
| 11905 | |
| 11906 | BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType) |
| 11907 | { |
| 11908 | INDEBUG(const char* reason = "None" ;) |
| 11909 | |
| 11910 | // Figure out the range of blocks we're going to move |
| 11911 | |
| 11912 | unsigned XTnum; |
| 11913 | EHblkDsc* HBtab; |
| 11914 | BasicBlock* bStart = nullptr; |
| 11915 | BasicBlock* bMiddle = nullptr; |
| 11916 | BasicBlock* bLast = nullptr; |
| 11917 | BasicBlock* bPrev = nullptr; |
| 11918 | |
| 11919 | #if FEATURE_EH_FUNCLETS |
| 11920 | // We don't support moving try regions... yet? |
| 11921 | noway_assert(relocateType == FG_RELOCATE_HANDLER); |
| 11922 | #endif // FEATURE_EH_FUNCLETS |
| 11923 | |
| 11924 | HBtab = ehGetDsc(regionIndex); |
| 11925 | |
| 11926 | if (relocateType == FG_RELOCATE_TRY) |
| 11927 | { |
| 11928 | bStart = HBtab->ebdTryBeg; |
| 11929 | bLast = HBtab->ebdTryLast; |
| 11930 | } |
| 11931 | else if (relocateType == FG_RELOCATE_HANDLER) |
| 11932 | { |
| 11933 | if (HBtab->HasFilter()) |
| 11934 | { |
| 11935 | // The filter and handler funclets must be moved together, and remain contiguous. |
| 11936 | bStart = HBtab->ebdFilter; |
| 11937 | bMiddle = HBtab->ebdHndBeg; |
| 11938 | bLast = HBtab->ebdHndLast; |
| 11939 | } |
| 11940 | else |
| 11941 | { |
| 11942 | bStart = HBtab->ebdHndBeg; |
| 11943 | bLast = HBtab->ebdHndLast; |
| 11944 | } |
| 11945 | } |
| 11946 | |
| 11947 | // Our range must contain either all rarely run blocks or all non-rarely run blocks |
| 11948 | bool inTheRange = false; |
| 11949 | bool validRange = false; |
| 11950 | |
| 11951 | BasicBlock* block; |
| 11952 | |
| 11953 | noway_assert(bStart != nullptr && bLast != nullptr); |
| 11954 | if (bStart == fgFirstBB) |
| 11955 | { |
| 11956 | INDEBUG(reason = "can not relocate first block" ;) |
| 11957 | goto FAILURE; |
| 11958 | } |
| 11959 | |
| 11960 | #if !FEATURE_EH_FUNCLETS |
| 11961 | // In the funclets case, we still need to set some information on the handler blocks |
| 11962 | if (bLast->bbNext == NULL) |
| 11963 | { |
| 11964 | INDEBUG(reason = "region is already at the end of the method" ;) |
| 11965 | goto FAILURE; |
| 11966 | } |
| 11967 | #endif // !FEATURE_EH_FUNCLETS |
| 11968 | |
| 11969 | // Walk the block list for this purpose: |
| 11970 | // 1. Verify that all the blocks in the range are either all rarely run or not rarely run. |
| 11971 | // When creating funclets, we ignore the run rarely flag, as we need to be able to move any blocks |
| 11972 | // in the range. |
| 11973 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 11974 | |
| 11975 | #if !FEATURE_EH_FUNCLETS |
| 11976 | bool isRare; |
| 11977 | isRare = bStart->isRunRarely(); |
| 11978 | #endif // !FEATURE_EH_FUNCLETS |
| 11979 | block = fgFirstBB; |
| 11980 | while (true) |
| 11981 | { |
| 11982 | if (block == bStart) |
| 11983 | { |
| 11984 | noway_assert(inTheRange == false); |
| 11985 | inTheRange = true; |
| 11986 | } |
| 11987 | else if (block == bLast->bbNext) |
| 11988 | { |
| 11989 | noway_assert(inTheRange == true); |
| 11990 | inTheRange = false; |
| 11991 | break; // we found the end, so we're done |
| 11992 | } |
| 11993 | |
| 11994 | if (inTheRange) |
| 11995 | { |
| 11996 | #if !FEATURE_EH_FUNCLETS |
| 11997 | // Unless all blocks are (not) run rarely we must return false. |
| 11998 | if (isRare != block->isRunRarely()) |
| 11999 | { |
| 12000 | INDEBUG(reason = "this region contains both rarely run and non-rarely run blocks" ;) |
| 12001 | goto FAILURE; |
| 12002 | } |
| 12003 | #endif // !FEATURE_EH_FUNCLETS |
| 12004 | |
| 12005 | validRange = true; |
| 12006 | } |
| 12007 | |
| 12008 | if (block == nullptr) |
| 12009 | { |
| 12010 | break; |
| 12011 | } |
| 12012 | |
| 12013 | block = block->bbNext; |
| 12014 | } |
| 12015 | // Ensure that bStart .. bLast defined a valid range |
| 12016 | noway_assert((validRange == true) && (inTheRange == false)); |
| 12017 | |
| 12018 | bPrev = bStart->bbPrev; |
| 12019 | noway_assert(bPrev != nullptr); // Can't move a range that includes the first block of the function. |
| 12020 | |
| 12021 | JITDUMP("Relocating %s range " FMT_BB ".." FMT_BB " (EH#%u) to end of BBlist\n" , |
| 12022 | (relocateType == FG_RELOCATE_TRY) ? "try" : "handler" , bStart->bbNum, bLast->bbNum, regionIndex); |
| 12023 | |
| 12024 | #ifdef DEBUG |
| 12025 | if (verbose) |
| 12026 | { |
| 12027 | fgDispBasicBlocks(); |
| 12028 | fgDispHandlerTab(); |
| 12029 | } |
| 12030 | |
| 12031 | if (!FEATURE_EH_FUNCLETS) |
| 12032 | { |
| 12033 | // This is really expensive, and quickly becomes O(n^n) with funclets |
| 12034 | // so only do it once after we've created them (see fgCreateFunclets) |
| 12035 | if (expensiveDebugCheckLevel >= 2) |
| 12036 | { |
| 12037 | fgDebugCheckBBlist(); |
| 12038 | } |
| 12039 | } |
| 12040 | #endif // DEBUG |
| 12041 | |
| 12042 | #if FEATURE_EH_FUNCLETS |
| 12043 | |
| 12044 | bStart->bbFlags |= BBF_FUNCLET_BEG; // Mark the start block of the funclet |
| 12045 | |
| 12046 | if (bMiddle != nullptr) |
| 12047 | { |
| 12048 | bMiddle->bbFlags |= BBF_FUNCLET_BEG; // Also mark the start block of a filter handler as a funclet |
| 12049 | } |
| 12050 | |
| 12051 | #endif // FEATURE_EH_FUNCLETS |
| 12052 | |
| 12053 | BasicBlock* bNext; |
| 12054 | bNext = bLast->bbNext; |
| 12055 | |
| 12056 | /* Temporarily unlink [bStart .. bLast] from the flow graph */ |
| 12057 | fgUnlinkRange(bStart, bLast); |
| 12058 | |
| 12059 | BasicBlock* insertAfterBlk; |
| 12060 | insertAfterBlk = fgLastBB; |
| 12061 | |
| 12062 | #if FEATURE_EH_FUNCLETS |
| 12063 | |
| 12064 | // There are several cases we need to consider when moving an EH range. |
| 12065 | // If moving a range X, we must consider its relationship to every other EH |
| 12066 | // range A in the table. Note that each entry in the table represents both |
| 12067 | // a protected region and a handler region (possibly including a filter region |
| 12068 | // that must live before and adjacent to the handler region), so we must |
| 12069 | // consider try and handler regions independently. These are the cases: |
| 12070 | // 1. A is completely contained within X (where "completely contained" means |
| 12071 | // that the 'begin' and 'last' parts of A are strictly between the 'begin' |
| 12072 | // and 'end' parts of X, and aren't equal to either, for example, they don't |
| 12073 | // share 'last' blocks). In this case, when we move X, A moves with it, and |
| 12074 | // the EH table doesn't need to change. |
| 12075 | // 2. X is completely contained within A. In this case, X gets extracted from A, |
| 12076 | // and the range of A shrinks, but because A is strictly within X, the EH |
| 12077 | // table doesn't need to change. |
| 12078 | // 3. A and X have exactly the same range. In this case, A is moving with X and |
| 12079 | // the EH table doesn't need to change. |
| 12080 | // 4. A and X share the 'last' block. There are two sub-cases: |
| 12081 | // (a) A is a larger range than X (such that the beginning of A precedes the |
| 12082 | // beginning of X): in this case, we are moving the tail of A. We set the |
| 12083 | // 'last' block of A to the the block preceding the beginning block of X. |
| 12084 | // (b) A is a smaller range than X. Thus, we are moving the entirety of A along |
| 12085 | // with X. In this case, nothing in the EH record for A needs to change. |
| 12086 | // 5. A and X share the 'beginning' block (but aren't the same range, as in #3). |
| 12087 | // This can never happen here, because we are only moving handler ranges (we don't |
| 12088 | // move try ranges), and handler regions cannot start at the beginning of a try |
| 12089 | // range or handler range and be a subset. |
| 12090 | // |
| 12091 | // Note that A and X must properly nest for the table to be well-formed. For example, |
| 12092 | // the beginning of A can't be strictly within the range of X (that is, the beginning |
| 12093 | // of A isn't shared with the beginning of X) and the end of A outside the range. |
| 12094 | |
| 12095 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 12096 | { |
| 12097 | if (XTnum != regionIndex) // we don't need to update our 'last' pointer |
| 12098 | { |
| 12099 | if (HBtab->ebdTryLast == bLast) |
| 12100 | { |
| 12101 | // If we moved a set of blocks that were at the end of |
| 12102 | // a different try region then we may need to update ebdTryLast |
| 12103 | for (block = HBtab->ebdTryBeg; block != nullptr; block = block->bbNext) |
| 12104 | { |
| 12105 | if (block == bPrev) |
| 12106 | { |
| 12107 | // We were contained within it, so shrink its region by |
| 12108 | // setting its 'last' |
| 12109 | fgSetTryEnd(HBtab, bPrev); |
| 12110 | break; |
| 12111 | } |
| 12112 | else if (block == HBtab->ebdTryLast->bbNext) |
| 12113 | { |
| 12114 | // bPrev does not come after the TryBeg, thus we are larger, and |
| 12115 | // it is moving with us. |
| 12116 | break; |
| 12117 | } |
| 12118 | } |
| 12119 | } |
| 12120 | if (HBtab->ebdHndLast == bLast) |
| 12121 | { |
| 12122 | // If we moved a set of blocks that were at the end of |
| 12123 | // a different handler region then we must update ebdHndLast |
| 12124 | for (block = HBtab->ebdHndBeg; block != nullptr; block = block->bbNext) |
| 12125 | { |
| 12126 | if (block == bPrev) |
| 12127 | { |
| 12128 | fgSetHndEnd(HBtab, bPrev); |
| 12129 | break; |
| 12130 | } |
| 12131 | else if (block == HBtab->ebdHndLast->bbNext) |
| 12132 | { |
| 12133 | // bPrev does not come after the HndBeg |
| 12134 | break; |
| 12135 | } |
| 12136 | } |
| 12137 | } |
| 12138 | } |
| 12139 | } // end exception table iteration |
| 12140 | |
| 12141 | // Insert the block(s) we are moving after fgLastBlock |
| 12142 | fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); |
| 12143 | |
| 12144 | if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet |
| 12145 | { |
| 12146 | fgFirstFuncletBB = bStart; |
| 12147 | } |
| 12148 | else |
| 12149 | { |
| 12150 | assert(fgFirstFuncletBB != |
| 12151 | insertAfterBlk->bbNext); // We insert at the end, not at the beginning, of the funclet region. |
| 12152 | } |
| 12153 | |
| 12154 | // These asserts assume we aren't moving try regions (which we might need to do). Only |
| 12155 | // try regions can have fall through into or out of the region. |
| 12156 | |
| 12157 | noway_assert(!bPrev->bbFallsThrough()); // There can be no fall through into a filter or handler region |
| 12158 | noway_assert(!bLast->bbFallsThrough()); // There can be no fall through out of a handler region |
| 12159 | |
| 12160 | #ifdef DEBUG |
| 12161 | if (verbose) |
| 12162 | { |
| 12163 | printf("Create funclets: moved region\n" ); |
| 12164 | fgDispHandlerTab(); |
| 12165 | } |
| 12166 | |
| 12167 | // We have to wait to do this until we've created all the additional regions |
| 12168 | // Because this relies on ebdEnclosingTryIndex and ebdEnclosingHndIndex |
| 12169 | if (!FEATURE_EH_FUNCLETS) |
| 12170 | { |
| 12171 | // This is really expensive, and quickly becomes O(n^n) with funclets |
| 12172 | // so only do it once after we've created them (see fgCreateFunclets) |
| 12173 | if (expensiveDebugCheckLevel >= 2) |
| 12174 | { |
| 12175 | fgDebugCheckBBlist(); |
| 12176 | } |
| 12177 | } |
| 12178 | #endif // DEBUG |
| 12179 | |
| 12180 | #else // FEATURE_EH_FUNCLETS |
| 12181 | |
| 12182 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 12183 | { |
| 12184 | if (XTnum == regionIndex) |
| 12185 | { |
| 12186 | // Don't update our handler's Last info |
| 12187 | continue; |
| 12188 | } |
| 12189 | |
| 12190 | if (HBtab->ebdTryLast == bLast) |
| 12191 | { |
| 12192 | // If we moved a set of blocks that were at the end of |
| 12193 | // a different try region then we may need to update ebdTryLast |
| 12194 | for (block = HBtab->ebdTryBeg; block != NULL; block = block->bbNext) |
| 12195 | { |
| 12196 | if (block == bPrev) |
| 12197 | { |
| 12198 | fgSetTryEnd(HBtab, bPrev); |
| 12199 | break; |
| 12200 | } |
| 12201 | else if (block == HBtab->ebdTryLast->bbNext) |
| 12202 | { |
| 12203 | // bPrev does not come after the TryBeg |
| 12204 | break; |
| 12205 | } |
| 12206 | } |
| 12207 | } |
| 12208 | if (HBtab->ebdHndLast == bLast) |
| 12209 | { |
| 12210 | // If we moved a set of blocks that were at the end of |
| 12211 | // a different handler region then we must update ebdHndLast |
| 12212 | for (block = HBtab->ebdHndBeg; block != NULL; block = block->bbNext) |
| 12213 | { |
| 12214 | if (block == bPrev) |
| 12215 | { |
| 12216 | fgSetHndEnd(HBtab, bPrev); |
| 12217 | break; |
| 12218 | } |
| 12219 | else if (block == HBtab->ebdHndLast->bbNext) |
| 12220 | { |
| 12221 | // bPrev does not come after the HndBeg |
| 12222 | break; |
| 12223 | } |
| 12224 | } |
| 12225 | } |
| 12226 | } // end exception table iteration |
| 12227 | |
| 12228 | // We have decided to insert the block(s) after fgLastBlock |
| 12229 | fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); |
| 12230 | |
| 12231 | // If bPrev falls through, we will insert a jump to block |
| 12232 | fgConnectFallThrough(bPrev, bStart); |
| 12233 | |
| 12234 | // If bLast falls through, we will insert a jump to bNext |
| 12235 | fgConnectFallThrough(bLast, bNext); |
| 12236 | |
| 12237 | #endif // FEATURE_EH_FUNCLETS |
| 12238 | |
| 12239 | goto DONE; |
| 12240 | |
| 12241 | FAILURE: |
| 12242 | |
| 12243 | #ifdef DEBUG |
| 12244 | if (verbose) |
| 12245 | { |
| 12246 | printf("*************** Failed fgRelocateEHRange(" FMT_BB ".." FMT_BB ") because %s\n" , bStart->bbNum, |
| 12247 | bLast->bbNum, reason); |
| 12248 | } |
| 12249 | #endif // DEBUG |
| 12250 | |
| 12251 | bLast = nullptr; |
| 12252 | |
| 12253 | DONE: |
| 12254 | |
| 12255 | return bLast; |
| 12256 | } |
| 12257 | |
| 12258 | #if FEATURE_EH_FUNCLETS |
| 12259 | |
| 12260 | #if defined(_TARGET_ARM_) |
| 12261 | |
| 12262 | /***************************************************************************** |
| 12263 | * We just removed a BBJ_CALLFINALLY/BBJ_ALWAYS pair. If this was the only such pair |
| 12264 | * targeting the BBJ_ALWAYS target, then we need to clear the BBF_FINALLY_TARGET bit |
| 12265 | * so that target can also be removed. 'block' is the finally target. Since we just |
| 12266 | * removed the BBJ_ALWAYS, it better have the BBF_FINALLY_TARGET bit set. |
| 12267 | */ |
| 12268 | |
| 12269 | void Compiler::fgClearFinallyTargetBit(BasicBlock* block) |
| 12270 | { |
| 12271 | assert(fgComputePredsDone); |
| 12272 | assert((block->bbFlags & BBF_FINALLY_TARGET) != 0); |
| 12273 | |
| 12274 | for (flowList* pred = block->bbPreds; pred; pred = pred->flNext) |
| 12275 | { |
| 12276 | if (pred->flBlock->bbJumpKind == BBJ_ALWAYS && pred->flBlock->bbJumpDest == block) |
| 12277 | { |
| 12278 | BasicBlock* pPrev = pred->flBlock->bbPrev; |
| 12279 | if (pPrev != NULL) |
| 12280 | { |
| 12281 | if (pPrev->bbJumpKind == BBJ_CALLFINALLY) |
| 12282 | { |
| 12283 | // We found a BBJ_CALLFINALLY / BBJ_ALWAYS that still points to this finally target |
| 12284 | return; |
| 12285 | } |
| 12286 | } |
| 12287 | } |
| 12288 | } |
| 12289 | |
| 12290 | // Didn't find any BBJ_CALLFINALLY / BBJ_ALWAYS that still points here, so clear the bit |
| 12291 | |
| 12292 | block->bbFlags &= ~BBF_FINALLY_TARGET; |
| 12293 | } |
| 12294 | |
| 12295 | #endif // defined(_TARGET_ARM_) |
| 12296 | |
| 12297 | /***************************************************************************** |
| 12298 | * Is this an intra-handler control flow edge? |
| 12299 | * |
| 12300 | * 'block' is the head block of a funclet/handler region, or . |
| 12301 | * 'predBlock' is a predecessor block of 'block' in the predecessor list. |
| 12302 | * |
| 12303 | * 'predBlock' can legally only be one of three things: |
| 12304 | * 1. in the same handler region (e.g., the source of a back-edge of a loop from |
| 12305 | * 'predBlock' to 'block'), including in nested regions within the handler, |
| 12306 | * 2. if 'block' begins a handler that is a filter-handler, 'predBlock' must be in the 'filter' region, |
| 12307 | * 3. for other handlers, 'predBlock' must be in the 'try' region corresponding to handler (or any |
| 12308 | * region nested in the 'try' region). |
| 12309 | * |
| 12310 | * Note that on AMD64/ARM64, the BBJ_CALLFINALLY block that calls a finally handler is not |
| 12311 | * within the corresponding 'try' region: it is placed in the corresponding 'try' region's |
| 12312 | * parent (which might be the main function body). This is how it is represented to the VM |
| 12313 | * (with a special "cloned finally" EH table entry). |
| 12314 | * |
| 12315 | * Return 'true' for case #1, and 'false' otherwise. |
| 12316 | */ |
| 12317 | bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) |
| 12318 | { |
| 12319 | // Some simple preconditions (as stated above) |
| 12320 | assert(!fgFuncletsCreated); |
| 12321 | assert(fgGetPredForBlock(block, predBlock) != nullptr); |
| 12322 | assert(block->hasHndIndex()); |
| 12323 | |
| 12324 | EHblkDsc* xtab = ehGetDsc(block->getHndIndex()); |
| 12325 | |
| 12326 | #if FEATURE_EH_CALLFINALLY_THUNKS |
| 12327 | if (xtab->HasFinallyHandler()) |
| 12328 | { |
| 12329 | assert((xtab->ebdHndBeg == block) || // The normal case |
| 12330 | ((xtab->ebdHndBeg->bbNext == block) && |
| 12331 | (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're |
| 12332 | // trying to decide how to split up the predecessor edges. |
| 12333 | if (predBlock->bbJumpKind == BBJ_CALLFINALLY) |
| 12334 | { |
| 12335 | assert(predBlock->bbJumpDest == block); |
| 12336 | |
| 12337 | // A BBJ_CALLFINALLY predecessor of the handler can only come from the corresponding try, |
| 12338 | // not from any EH clauses nested in this handler. However, we represent the BBJ_CALLFINALLY |
| 12339 | // as being in the 'try' region's parent EH region, which might be the main function body. |
| 12340 | |
| 12341 | unsigned tryIndex = xtab->ebdEnclosingTryIndex; |
| 12342 | if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) |
| 12343 | { |
| 12344 | assert(!predBlock->hasTryIndex()); |
| 12345 | } |
| 12346 | else |
| 12347 | { |
| 12348 | assert(predBlock->hasTryIndex()); |
| 12349 | assert(tryIndex == predBlock->getTryIndex()); |
| 12350 | assert(ehGetDsc(tryIndex)->InTryRegionBBRange(predBlock)); |
| 12351 | } |
| 12352 | return false; |
| 12353 | } |
| 12354 | } |
| 12355 | #endif // FEATURE_EH_CALLFINALLY_THUNKS |
| 12356 | |
| 12357 | assert(predBlock->hasHndIndex() || predBlock->hasTryIndex()); |
| 12358 | |
| 12359 | // We could search the try region looking for predBlock by using bbInTryRegions |
| 12360 | // but that does a lexical search for the block, and then assumes funclets |
| 12361 | // have been created and does a lexical search of all funclets that were pulled |
| 12362 | // out of the parent try region. |
| 12363 | // First, funclets haven't been created yet, and even if they had been, we shouldn't |
| 12364 | // have any funclet directly branching to another funclet (they have to return first). |
| 12365 | // So we can safely use CheckIsTryRegion instead of bbInTryRegions. |
| 12366 | // Second, I believe the depth of any EH graph will on average be smaller than the |
| 12367 | // breadth of the blocks within a try body. Thus it is faster to get our answer by |
| 12368 | // looping outward over the region graph. However, I have added asserts, as a |
| 12369 | // precaution, to ensure both algorithms agree. The asserts also check that the only |
| 12370 | // way to reach the head of a funclet is from the corresponding try body or from |
| 12371 | // within the funclet (and *not* any nested funclets). |
| 12372 | |
| 12373 | if (predBlock->hasTryIndex()) |
| 12374 | { |
| 12375 | // Because the EH clauses are listed inside-out, any nested trys will be at a |
| 12376 | // lower index than the current try and if there's no enclosing try, tryIndex |
| 12377 | // will terminate at NO_ENCLOSING_INDEX |
| 12378 | |
| 12379 | unsigned tryIndex = predBlock->getTryIndex(); |
| 12380 | while (tryIndex < block->getHndIndex()) |
| 12381 | { |
| 12382 | tryIndex = ehGetEnclosingTryIndex(tryIndex); |
| 12383 | } |
| 12384 | // tryIndex should enclose predBlock |
| 12385 | assert((tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) || ehGetDsc(tryIndex)->InTryRegionBBRange(predBlock)); |
| 12386 | |
| 12387 | // At this point tryIndex is either block's handler's corresponding try body |
| 12388 | // or some outer try region that contains both predBlock & block or |
| 12389 | // NO_ENCLOSING_REGION (because there was no try body that encloses both). |
| 12390 | if (tryIndex == block->getHndIndex()) |
| 12391 | { |
| 12392 | assert(xtab->InTryRegionBBRange(predBlock)); |
| 12393 | assert(!xtab->InHndRegionBBRange(predBlock)); |
| 12394 | return false; |
| 12395 | } |
| 12396 | // tryIndex should enclose block (and predBlock as previously asserted) |
| 12397 | assert((tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) || ehGetDsc(tryIndex)->InTryRegionBBRange(block)); |
| 12398 | } |
| 12399 | if (xtab->HasFilter()) |
| 12400 | { |
| 12401 | // The block is a handler. Check if the pred block is from its filter. We only need to |
| 12402 | // check the end filter flag, as there is only a single filter for any handler, and we |
| 12403 | // already know predBlock is a predecessor of block. |
| 12404 | if (predBlock->bbJumpKind == BBJ_EHFILTERRET) |
| 12405 | { |
| 12406 | assert(!xtab->InHndRegionBBRange(predBlock)); |
| 12407 | return false; |
| 12408 | } |
| 12409 | } |
| 12410 | // It is not in our try region (or filter), so it must be within this handler (or try bodies |
| 12411 | // within this handler) |
| 12412 | assert(!xtab->InTryRegionBBRange(predBlock)); |
| 12413 | assert(xtab->InHndRegionBBRange(predBlock)); |
| 12414 | return true; |
| 12415 | } |
| 12416 | |
| 12417 | /***************************************************************************** |
| 12418 | * Does this block, first block of a handler region, have any predecessor edges |
| 12419 | * that are not from its corresponding try region? |
| 12420 | */ |
| 12421 | |
| 12422 | bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block) |
| 12423 | { |
| 12424 | assert(block->hasHndIndex()); |
| 12425 | assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler |
| 12426 | |
| 12427 | flowList* pred; |
| 12428 | |
| 12429 | for (pred = block->bbPreds; pred; pred = pred->flNext) |
| 12430 | { |
| 12431 | BasicBlock* predBlock = pred->flBlock; |
| 12432 | |
| 12433 | if (fgIsIntraHandlerPred(predBlock, block)) |
| 12434 | { |
| 12435 | // We have a predecessor that is not from our try region |
| 12436 | return true; |
| 12437 | } |
| 12438 | } |
| 12439 | |
| 12440 | return false; |
| 12441 | } |
| 12442 | |
| 12443 | /***************************************************************************** |
| 12444 | * Introduce a new head block of the handler for the prolog to be put in, ahead |
| 12445 | * of the current handler head 'block'. |
| 12446 | * Note that this code has some similarities to fgCreateLoopPreHeader(). |
| 12447 | */ |
| 12448 | |
| 12449 | void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) |
| 12450 | { |
| 12451 | #ifdef DEBUG |
| 12452 | if (verbose) |
| 12453 | { |
| 12454 | printf("\nCreating funclet prolog header for " FMT_BB "\n" , block->bbNum); |
| 12455 | } |
| 12456 | #endif |
| 12457 | |
| 12458 | assert(block->hasHndIndex()); |
| 12459 | assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler |
| 12460 | |
| 12461 | /* Allocate a new basic block */ |
| 12462 | |
| 12463 | BasicBlock* newHead = bbNewBasicBlock(BBJ_NONE); |
| 12464 | |
| 12465 | // In fgComputePreds() we set the BBF_JMP_TARGET and BBF_HAS_LABEL for all of the handler entry points |
| 12466 | // |
| 12467 | newHead->bbFlags |= (BBF_INTERNAL | BBF_JMP_TARGET | BBF_HAS_LABEL); |
| 12468 | newHead->inheritWeight(block); |
| 12469 | newHead->bbRefs = 0; |
| 12470 | |
| 12471 | fgInsertBBbefore(block, newHead); // insert the new block in the block list |
| 12472 | fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH |
| 12473 | // block. |
| 12474 | |
| 12475 | // Distribute the pred list between newHead and block. Incoming edges coming from outside |
| 12476 | // the handler go to the prolog. Edges coming from with the handler are back-edges, and |
| 12477 | // go to the existing 'block'. |
| 12478 | |
| 12479 | for (flowList* pred = block->bbPreds; pred; pred = pred->flNext) |
| 12480 | { |
| 12481 | BasicBlock* predBlock = pred->flBlock; |
| 12482 | if (!fgIsIntraHandlerPred(predBlock, block)) |
| 12483 | { |
| 12484 | // It's a jump from outside the handler; add it to the newHead preds list and remove |
| 12485 | // it from the block preds list. |
| 12486 | |
| 12487 | switch (predBlock->bbJumpKind) |
| 12488 | { |
| 12489 | case BBJ_CALLFINALLY: |
| 12490 | noway_assert(predBlock->bbJumpDest == block); |
| 12491 | predBlock->bbJumpDest = newHead; |
| 12492 | fgRemoveRefPred(block, predBlock); |
| 12493 | fgAddRefPred(newHead, predBlock); |
| 12494 | break; |
| 12495 | |
| 12496 | default: |
| 12497 | // The only way into the handler is via a BBJ_CALLFINALLY (to a finally handler), or |
| 12498 | // via exception handling. |
| 12499 | noway_assert(false); |
| 12500 | break; |
| 12501 | } |
| 12502 | } |
| 12503 | } |
| 12504 | |
| 12505 | assert(nullptr == fgGetPredForBlock(block, newHead)); |
| 12506 | fgAddRefPred(block, newHead); |
| 12507 | |
| 12508 | assert((newHead->bbFlags & (BBF_INTERNAL | BBF_JMP_TARGET | BBF_HAS_LABEL)) == |
| 12509 | (BBF_INTERNAL | BBF_JMP_TARGET | BBF_HAS_LABEL)); |
| 12510 | } |
| 12511 | |
| 12512 | /***************************************************************************** |
| 12513 | * |
| 12514 | * Every funclet will have a prolog. That prolog will be inserted as the first instructions |
| 12515 | * in the first block of the funclet. If the prolog is also the head block of a loop, we |
| 12516 | * would end up with the prolog instructions being executed more than once. |
| 12517 | * Check for this by searching the predecessor list for loops, and create a new prolog header |
| 12518 | * block when needed. We detect a loop by looking for any predecessor that isn't in the |
| 12519 | * handler's try region, since the only way to get into a handler is via that try region. |
| 12520 | */ |
| 12521 | |
| 12522 | void Compiler::fgCreateFuncletPrologBlocks() |
| 12523 | { |
| 12524 | noway_assert(fgComputePredsDone); |
| 12525 | noway_assert(!fgDomsComputed); // this function doesn't maintain the dom sets |
| 12526 | assert(!fgFuncletsCreated); |
| 12527 | |
| 12528 | bool prologBlocksCreated = false; |
| 12529 | EHblkDsc* HBtabEnd; |
| 12530 | EHblkDsc* HBtab; |
| 12531 | |
| 12532 | for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++) |
| 12533 | { |
| 12534 | BasicBlock* head = HBtab->ebdHndBeg; |
| 12535 | |
| 12536 | if (fgAnyIntraHandlerPreds(head)) |
| 12537 | { |
| 12538 | // We need to create a new block in which to place the prolog, and split the existing |
| 12539 | // head block predecessor edges into those that should point to the prolog, and those |
| 12540 | // that shouldn't. |
| 12541 | // |
| 12542 | // It's arguable that we should just always do this, and not only when we "need to", |
| 12543 | // so there aren't two different code paths. However, it's unlikely to be necessary |
| 12544 | // for catch handlers because they have an incoming argument (the exception object) |
| 12545 | // that needs to get stored or saved, so back-arcs won't normally go to the head. It's |
| 12546 | // possible when writing in IL to generate a legal loop (e.g., push an Exception object |
| 12547 | // on the stack before jumping back to the catch head), but C# probably won't. This will |
| 12548 | // most commonly only be needed for finallys with a do/while loop at the top of the |
| 12549 | // finally. |
| 12550 | // |
| 12551 | // Note that we don't check filters. This might be a bug, but filters always have a filter |
| 12552 | // object live on entry, so it's at least unlikely (illegal?) that a loop edge targets the |
| 12553 | // filter head. |
| 12554 | |
| 12555 | fgInsertFuncletPrologBlock(head); |
| 12556 | prologBlocksCreated = true; |
| 12557 | } |
| 12558 | } |
| 12559 | |
| 12560 | if (prologBlocksCreated) |
| 12561 | { |
| 12562 | // If we've modified the graph, reset the 'modified' flag, since the dominators haven't |
| 12563 | // been computed. |
| 12564 | fgModified = false; |
| 12565 | |
| 12566 | #if DEBUG |
| 12567 | if (verbose) |
| 12568 | { |
| 12569 | JITDUMP("\nAfter fgCreateFuncletPrologBlocks()" ); |
| 12570 | fgDispBasicBlocks(); |
| 12571 | fgDispHandlerTab(); |
| 12572 | } |
| 12573 | |
| 12574 | fgVerifyHandlerTab(); |
| 12575 | fgDebugCheckBBlist(); |
| 12576 | #endif // DEBUG |
| 12577 | } |
| 12578 | } |
| 12579 | |
| 12580 | /***************************************************************************** |
| 12581 | * |
| 12582 | * Function to create funclets out of all EH catch/finally/fault blocks. |
| 12583 | * We only move filter and handler blocks, not try blocks. |
| 12584 | */ |
| 12585 | |
| 12586 | void Compiler::fgCreateFunclets() |
| 12587 | { |
| 12588 | assert(!fgFuncletsCreated); |
| 12589 | |
| 12590 | #ifdef DEBUG |
| 12591 | if (verbose) |
| 12592 | { |
| 12593 | printf("*************** In fgCreateFunclets()\n" ); |
| 12594 | } |
| 12595 | #endif |
| 12596 | |
| 12597 | fgCreateFuncletPrologBlocks(); |
| 12598 | |
| 12599 | unsigned XTnum; |
| 12600 | EHblkDsc* HBtab; |
| 12601 | const unsigned int funcCnt = ehFuncletCount() + 1; |
| 12602 | |
| 12603 | if (!FitsIn<unsigned short>(funcCnt)) |
| 12604 | { |
| 12605 | IMPL_LIMITATION("Too many funclets" ); |
| 12606 | } |
| 12607 | |
| 12608 | FuncInfoDsc* funcInfo = new (this, CMK_BasicBlock) FuncInfoDsc[funcCnt]; |
| 12609 | |
| 12610 | unsigned short funcIdx; |
| 12611 | |
| 12612 | // Setup the root FuncInfoDsc and prepare to start associating |
| 12613 | // FuncInfoDsc's with their corresponding EH region |
| 12614 | memset((void*)funcInfo, 0, funcCnt * sizeof(FuncInfoDsc)); |
| 12615 | assert(funcInfo[0].funKind == FUNC_ROOT); |
| 12616 | funcIdx = 1; |
| 12617 | |
| 12618 | // Because we iterate from the top to the bottom of the compHndBBtab array, we are iterating |
| 12619 | // from most nested (innermost) to least nested (outermost) EH region. It would be reasonable |
| 12620 | // to iterate in the opposite order, but the order of funclets shouldn't matter. |
| 12621 | // |
| 12622 | // We move every handler region to the end of the function: each handler will become a funclet. |
| 12623 | // |
| 12624 | // Note that fgRelocateEHRange() can add new entries to the EH table. However, they will always |
| 12625 | // be added *after* the current index, so our iteration here is not invalidated. |
| 12626 | // It *can* invalidate the compHndBBtab pointer itself, though, if it gets reallocated! |
| 12627 | |
| 12628 | for (XTnum = 0; XTnum < compHndBBtabCount; XTnum++) |
| 12629 | { |
| 12630 | HBtab = ehGetDsc(XTnum); // must re-compute this every loop, since fgRelocateEHRange changes the table |
| 12631 | if (HBtab->HasFilter()) |
| 12632 | { |
| 12633 | assert(funcIdx < funcCnt); |
| 12634 | funcInfo[funcIdx].funKind = FUNC_FILTER; |
| 12635 | funcInfo[funcIdx].funEHIndex = (unsigned short)XTnum; |
| 12636 | funcIdx++; |
| 12637 | } |
| 12638 | assert(funcIdx < funcCnt); |
| 12639 | funcInfo[funcIdx].funKind = FUNC_HANDLER; |
| 12640 | funcInfo[funcIdx].funEHIndex = (unsigned short)XTnum; |
| 12641 | HBtab->ebdFuncIndex = funcIdx; |
| 12642 | funcIdx++; |
| 12643 | fgRelocateEHRange(XTnum, FG_RELOCATE_HANDLER); |
| 12644 | } |
| 12645 | |
| 12646 | // We better have populated all of them by now |
| 12647 | assert(funcIdx == funcCnt); |
| 12648 | |
| 12649 | // Publish |
| 12650 | compCurrFuncIdx = 0; |
| 12651 | compFuncInfos = funcInfo; |
| 12652 | compFuncInfoCount = (unsigned short)funcCnt; |
| 12653 | |
| 12654 | fgFuncletsCreated = true; |
| 12655 | |
| 12656 | #if DEBUG |
| 12657 | if (verbose) |
| 12658 | { |
| 12659 | JITDUMP("\nAfter fgCreateFunclets()" ); |
| 12660 | fgDispBasicBlocks(); |
| 12661 | fgDispHandlerTab(); |
| 12662 | } |
| 12663 | |
| 12664 | fgVerifyHandlerTab(); |
| 12665 | fgDebugCheckBBlist(); |
| 12666 | #endif // DEBUG |
| 12667 | } |
| 12668 | |
| 12669 | #else // !FEATURE_EH_FUNCLETS |
| 12670 | |
| 12671 | /***************************************************************************** |
| 12672 | * |
| 12673 | * Function called to relocate any and all EH regions. |
| 12674 | * Only entire consecutive EH regions will be moved and they will be kept together. |
| 12675 | * Except for the first block, the range can not have any blocks that jump into or out of the region. |
| 12676 | */ |
| 12677 | |
| 12678 | bool Compiler::fgRelocateEHRegions() |
| 12679 | { |
| 12680 | bool result = false; // Our return value |
| 12681 | |
| 12682 | #ifdef DEBUG |
| 12683 | if (verbose) |
| 12684 | printf("*************** In fgRelocateEHRegions()\n" ); |
| 12685 | #endif |
| 12686 | |
| 12687 | if (fgCanRelocateEHRegions) |
| 12688 | { |
| 12689 | unsigned XTnum; |
| 12690 | EHblkDsc* HBtab; |
| 12691 | |
| 12692 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 12693 | { |
| 12694 | // Nested EH regions cannot be moved. |
| 12695 | // Also we don't want to relocate an EH region that has a filter |
| 12696 | if ((HBtab->ebdHandlerNestingLevel == 0) && !HBtab->HasFilter()) |
| 12697 | { |
| 12698 | bool movedTry = false; |
| 12699 | #if DEBUG |
| 12700 | bool movedHnd = false; |
| 12701 | #endif // DEBUG |
| 12702 | |
| 12703 | // Only try to move the outermost try region |
| 12704 | if (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) |
| 12705 | { |
| 12706 | // Move the entire try region if it can be moved |
| 12707 | if (HBtab->ebdTryBeg->isRunRarely()) |
| 12708 | { |
| 12709 | BasicBlock* bTryLastBB = fgRelocateEHRange(XTnum, FG_RELOCATE_TRY); |
| 12710 | if (bTryLastBB != NULL) |
| 12711 | { |
| 12712 | result = true; |
| 12713 | movedTry = true; |
| 12714 | } |
| 12715 | } |
| 12716 | #if DEBUG |
| 12717 | if (verbose && movedTry) |
| 12718 | { |
| 12719 | printf("\nAfter relocating an EH try region" ); |
| 12720 | fgDispBasicBlocks(); |
| 12721 | fgDispHandlerTab(); |
| 12722 | |
| 12723 | // Make sure that the predecessor lists are accurate |
| 12724 | if (expensiveDebugCheckLevel >= 2) |
| 12725 | { |
| 12726 | fgDebugCheckBBlist(); |
| 12727 | } |
| 12728 | } |
| 12729 | #endif // DEBUG |
| 12730 | } |
| 12731 | |
| 12732 | // Currently it is not good to move the rarely run handler regions to the end of the method |
| 12733 | // because fgDetermineFirstColdBlock() must put the start of any handler region in the hot |
| 12734 | // section. |
| 12735 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 12736 | |
| 12737 | #if 0 |
| 12738 | // Now try to move the entire handler region if it can be moved. |
| 12739 | // Don't try to move a finally handler unless we already moved the try region. |
| 12740 | if (HBtab->ebdHndBeg->isRunRarely() && |
| 12741 | !HBtab->ebdHndBeg->hasTryIndex() && |
| 12742 | (movedTry || !HBtab->HasFinallyHandler())) |
| 12743 | { |
| 12744 | BasicBlock* bHndLastBB = fgRelocateEHRange(XTnum, FG_RELOCATE_HANDLER); |
| 12745 | if (bHndLastBB != NULL) |
| 12746 | { |
| 12747 | result = true; |
| 12748 | movedHnd = true; |
| 12749 | } |
| 12750 | } |
| 12751 | #endif // 0 |
| 12752 | |
| 12753 | #if DEBUG |
| 12754 | if (verbose && movedHnd) |
| 12755 | { |
| 12756 | printf("\nAfter relocating an EH handler region" ); |
| 12757 | fgDispBasicBlocks(); |
| 12758 | fgDispHandlerTab(); |
| 12759 | |
| 12760 | // Make sure that the predecessor lists are accurate |
| 12761 | if (expensiveDebugCheckLevel >= 2) |
| 12762 | { |
| 12763 | fgDebugCheckBBlist(); |
| 12764 | } |
| 12765 | } |
| 12766 | #endif // DEBUG |
| 12767 | } |
| 12768 | } |
| 12769 | } |
| 12770 | |
| 12771 | #if DEBUG |
| 12772 | fgVerifyHandlerTab(); |
| 12773 | |
| 12774 | if (verbose && result) |
| 12775 | { |
| 12776 | printf("\nAfter fgRelocateEHRegions()" ); |
| 12777 | fgDispBasicBlocks(); |
| 12778 | fgDispHandlerTab(); |
| 12779 | // Make sure that the predecessor lists are accurate |
| 12780 | fgDebugCheckBBlist(); |
| 12781 | } |
| 12782 | #endif // DEBUG |
| 12783 | |
| 12784 | return result; |
| 12785 | } |
| 12786 | |
| 12787 | #endif // !FEATURE_EH_FUNCLETS |
| 12788 | |
| 12789 | bool flowList::setEdgeWeightMinChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop) |
| 12790 | { |
| 12791 | bool result = false; |
| 12792 | if ((newWeight <= flEdgeWeightMax) && (newWeight >= flEdgeWeightMin)) |
| 12793 | { |
| 12794 | flEdgeWeightMin = newWeight; |
| 12795 | result = true; |
| 12796 | } |
| 12797 | else if (slop > 0) |
| 12798 | { |
| 12799 | // We allow for a small amount of inaccuracy in block weight counts. |
| 12800 | if (flEdgeWeightMax < newWeight) |
| 12801 | { |
| 12802 | // We have already determined that this edge's weight |
| 12803 | // is less than newWeight, so we just allow for the slop |
| 12804 | if (newWeight <= (flEdgeWeightMax + slop)) |
| 12805 | { |
| 12806 | result = true; |
| 12807 | |
| 12808 | if (flEdgeWeightMax != 0) |
| 12809 | { |
| 12810 | // We will raise flEdgeWeightMin and Max towards newWeight |
| 12811 | flEdgeWeightMin = flEdgeWeightMax; |
| 12812 | flEdgeWeightMax = newWeight; |
| 12813 | } |
| 12814 | |
| 12815 | if (wbUsedSlop != nullptr) |
| 12816 | { |
| 12817 | *wbUsedSlop = true; |
| 12818 | } |
| 12819 | } |
| 12820 | } |
| 12821 | else |
| 12822 | { |
| 12823 | assert(flEdgeWeightMin > newWeight); |
| 12824 | |
| 12825 | // We have already determined that this edge's weight |
| 12826 | // is more than newWeight, so we just allow for the slop |
| 12827 | if ((newWeight + slop) >= flEdgeWeightMin) |
| 12828 | { |
| 12829 | result = true; |
| 12830 | |
| 12831 | assert(flEdgeWeightMax != 0); |
| 12832 | |
| 12833 | // We will lower flEdgeWeightMin towards newWeight |
| 12834 | flEdgeWeightMin = newWeight; |
| 12835 | |
| 12836 | if (wbUsedSlop != nullptr) |
| 12837 | { |
| 12838 | *wbUsedSlop = true; |
| 12839 | } |
| 12840 | } |
| 12841 | } |
| 12842 | |
| 12843 | // If we are returning true then we should have adjusted the range so that |
| 12844 | // the newWeight is in new range [Min..Max] or fgEdjeWeightMax is zero. |
| 12845 | // Also we should have set wbUsedSlop to true. |
| 12846 | if (result == true) |
| 12847 | { |
| 12848 | assert((flEdgeWeightMax == 0) || ((newWeight <= flEdgeWeightMax) && (newWeight >= flEdgeWeightMin))); |
| 12849 | |
| 12850 | if (wbUsedSlop != nullptr) |
| 12851 | { |
| 12852 | assert(*wbUsedSlop == true); |
| 12853 | } |
| 12854 | } |
| 12855 | } |
| 12856 | |
| 12857 | #if DEBUG |
| 12858 | if (result == false) |
| 12859 | { |
| 12860 | result = false; // break here |
| 12861 | } |
| 12862 | #endif // DEBUG |
| 12863 | |
| 12864 | return result; |
| 12865 | } |
| 12866 | |
| 12867 | bool flowList::setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop) |
| 12868 | { |
| 12869 | bool result = false; |
| 12870 | if ((newWeight >= flEdgeWeightMin) && (newWeight <= flEdgeWeightMax)) |
| 12871 | { |
| 12872 | flEdgeWeightMax = newWeight; |
| 12873 | result = true; |
| 12874 | } |
| 12875 | else if (slop > 0) |
| 12876 | { |
| 12877 | // We allow for a small amount of inaccuracy in block weight counts. |
| 12878 | if (flEdgeWeightMax < newWeight) |
| 12879 | { |
| 12880 | // We have already determined that this edge's weight |
| 12881 | // is less than newWeight, so we just allow for the slop |
| 12882 | if (newWeight <= (flEdgeWeightMax + slop)) |
| 12883 | { |
| 12884 | result = true; |
| 12885 | |
| 12886 | if (flEdgeWeightMax != 0) |
| 12887 | { |
| 12888 | // We will allow this to raise flEdgeWeightMax towards newWeight |
| 12889 | flEdgeWeightMax = newWeight; |
| 12890 | } |
| 12891 | |
| 12892 | if (wbUsedSlop != nullptr) |
| 12893 | { |
| 12894 | *wbUsedSlop = true; |
| 12895 | } |
| 12896 | } |
| 12897 | } |
| 12898 | else |
| 12899 | { |
| 12900 | assert(flEdgeWeightMin > newWeight); |
| 12901 | |
| 12902 | // We have already determined that this edge's weight |
| 12903 | // is more than newWeight, so we just allow for the slop |
| 12904 | if ((newWeight + slop) >= flEdgeWeightMin) |
| 12905 | { |
| 12906 | result = true; |
| 12907 | |
| 12908 | assert(flEdgeWeightMax != 0); |
| 12909 | |
| 12910 | // We will allow this to lower flEdgeWeightMin and Max towards newWeight |
| 12911 | flEdgeWeightMax = flEdgeWeightMin; |
| 12912 | flEdgeWeightMin = newWeight; |
| 12913 | |
| 12914 | if (wbUsedSlop != nullptr) |
| 12915 | { |
| 12916 | *wbUsedSlop = true; |
| 12917 | } |
| 12918 | } |
| 12919 | } |
| 12920 | |
| 12921 | // If we are returning true then we should have adjusted the range so that |
| 12922 | // the newWeight is in new range [Min..Max] or fgEdjeWeightMax is zero |
| 12923 | // Also we should have set wbUsedSlop to true, unless it is NULL |
| 12924 | if (result == true) |
| 12925 | { |
| 12926 | assert((flEdgeWeightMax == 0) || ((newWeight <= flEdgeWeightMax) && (newWeight >= flEdgeWeightMin))); |
| 12927 | |
| 12928 | assert((wbUsedSlop == nullptr) || (*wbUsedSlop == true)); |
| 12929 | } |
| 12930 | } |
| 12931 | |
| 12932 | #if DEBUG |
| 12933 | if (result == false) |
| 12934 | { |
| 12935 | result = false; // break here |
| 12936 | } |
| 12937 | #endif // DEBUG |
| 12938 | |
| 12939 | return result; |
| 12940 | } |
| 12941 | |
| 12942 | #ifdef DEBUG |
| 12943 | void Compiler::fgPrintEdgeWeights() |
| 12944 | { |
| 12945 | BasicBlock* bSrc; |
| 12946 | BasicBlock* bDst; |
| 12947 | flowList* edge; |
| 12948 | |
| 12949 | // Print out all of the edge weights |
| 12950 | for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) |
| 12951 | { |
| 12952 | if (bDst->bbPreds != nullptr) |
| 12953 | { |
| 12954 | printf(" Edge weights into " FMT_BB " :" , bDst->bbNum); |
| 12955 | for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext) |
| 12956 | { |
| 12957 | bSrc = edge->flBlock; |
| 12958 | // This is the control flow edge (bSrc -> bDst) |
| 12959 | |
| 12960 | printf(FMT_BB " " , bSrc->bbNum); |
| 12961 | |
| 12962 | if (edge->flEdgeWeightMin < BB_MAX_WEIGHT) |
| 12963 | { |
| 12964 | printf("(%u" , edge->flEdgeWeightMin); |
| 12965 | } |
| 12966 | else |
| 12967 | { |
| 12968 | printf("(MAX" ); |
| 12969 | } |
| 12970 | if (edge->flEdgeWeightMin != edge->flEdgeWeightMax) |
| 12971 | { |
| 12972 | if (edge->flEdgeWeightMax < BB_MAX_WEIGHT) |
| 12973 | { |
| 12974 | printf("..%u" , edge->flEdgeWeightMax); |
| 12975 | } |
| 12976 | else |
| 12977 | { |
| 12978 | printf("..MAX" ); |
| 12979 | } |
| 12980 | } |
| 12981 | printf(")" ); |
| 12982 | if (edge->flNext != nullptr) |
| 12983 | { |
| 12984 | printf(", " ); |
| 12985 | } |
| 12986 | } |
| 12987 | printf("\n" ); |
| 12988 | } |
| 12989 | } |
| 12990 | } |
| 12991 | #endif // DEBUG |
| 12992 | |
| 12993 | // return true if there is a possibility that the method has a loop (a backedge is present) |
| 12994 | bool Compiler::fgMightHaveLoop() |
| 12995 | { |
| 12996 | // Don't use a BlockSet for this temporary bitset of blocks: we don't want to have to call EnsureBasicBlockEpoch() |
| 12997 | // and potentially change the block epoch. |
| 12998 | |
| 12999 | BitVecTraits blockVecTraits(fgBBNumMax + 1, this); |
| 13000 | BitVec blocksSeen(BitVecOps::MakeEmpty(&blockVecTraits)); |
| 13001 | |
| 13002 | for (BasicBlock* block = fgFirstBB; block; block = block->bbNext) |
| 13003 | { |
| 13004 | BitVecOps::AddElemD(&blockVecTraits, blocksSeen, block->bbNum); |
| 13005 | |
| 13006 | for (BasicBlock* succ : block->GetAllSuccs(this)) |
| 13007 | { |
| 13008 | if (BitVecOps::IsMember(&blockVecTraits, blocksSeen, succ->bbNum)) |
| 13009 | { |
| 13010 | return true; |
| 13011 | } |
| 13012 | } |
| 13013 | } |
| 13014 | return false; |
| 13015 | } |
| 13016 | |
| 13017 | //------------------------------------------------------------- |
| 13018 | // fgComputeBlockAndEdgeWeights: determine weights for blocks |
| 13019 | // and optionally for edges |
| 13020 | // |
| 13021 | void Compiler::fgComputeBlockAndEdgeWeights() |
| 13022 | { |
| 13023 | JITDUMP("*************** In fgComputeBlockAndEdgeWeights()\n" ); |
| 13024 | |
| 13025 | const bool usingProfileWeights = fgIsUsingProfileWeights(); |
| 13026 | const bool isOptimizing = opts.OptimizationEnabled(); |
| 13027 | |
| 13028 | fgHaveValidEdgeWeights = false; |
| 13029 | fgCalledCount = BB_UNITY_WEIGHT; |
| 13030 | |
| 13031 | #if DEBUG |
| 13032 | if (verbose) |
| 13033 | { |
| 13034 | fgDispBasicBlocks(); |
| 13035 | printf("\n" ); |
| 13036 | } |
| 13037 | #endif // DEBUG |
| 13038 | |
| 13039 | const BasicBlock::weight_t returnWeight = fgComputeMissingBlockWeights(); |
| 13040 | |
| 13041 | if (usingProfileWeights) |
| 13042 | { |
| 13043 | fgComputeCalledCount(returnWeight); |
| 13044 | } |
| 13045 | else |
| 13046 | { |
| 13047 | JITDUMP(" -- no profile data, so using default called count\n" ); |
| 13048 | } |
| 13049 | |
| 13050 | if (isOptimizing) |
| 13051 | { |
| 13052 | fgComputeEdgeWeights(); |
| 13053 | } |
| 13054 | else |
| 13055 | { |
| 13056 | JITDUMP(" -- not optimizing, so not computing edge weights\n" ); |
| 13057 | } |
| 13058 | } |
| 13059 | |
| 13060 | //------------------------------------------------------------- |
| 13061 | // fgComputeMissingBlockWeights: determine weights for blocks |
| 13062 | // that were not profiled and do not yet have weights. |
| 13063 | // |
| 13064 | // Returns: |
| 13065 | // sum of weights for all return and throw blocks in the method |
| 13066 | |
| 13067 | BasicBlock::weight_t Compiler::fgComputeMissingBlockWeights() |
| 13068 | { |
| 13069 | BasicBlock* bSrc; |
| 13070 | BasicBlock* bDst; |
| 13071 | unsigned iterations = 0; |
| 13072 | bool changed; |
| 13073 | bool modified = false; |
| 13074 | BasicBlock::weight_t returnWeight; |
| 13075 | |
| 13076 | // If we have any blocks that did not have profile derived weight |
| 13077 | // we will try to fix their weight up here |
| 13078 | // |
| 13079 | modified = false; |
| 13080 | do // while (changed) |
| 13081 | { |
| 13082 | changed = false; |
| 13083 | returnWeight = 0; |
| 13084 | iterations++; |
| 13085 | |
| 13086 | for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) |
| 13087 | { |
| 13088 | if (!bDst->hasProfileWeight() && (bDst->bbPreds != nullptr)) |
| 13089 | { |
| 13090 | BasicBlock* bOnlyNext; |
| 13091 | |
| 13092 | // This block does not have a profile derived weight |
| 13093 | // |
| 13094 | BasicBlock::weight_t newWeight = BB_MAX_WEIGHT; |
| 13095 | |
| 13096 | if (bDst->countOfInEdges() == 1) |
| 13097 | { |
| 13098 | // Only one block flows into bDst |
| 13099 | bSrc = bDst->bbPreds->flBlock; |
| 13100 | |
| 13101 | // Does this block flow into only one other block |
| 13102 | if (bSrc->bbJumpKind == BBJ_NONE) |
| 13103 | { |
| 13104 | bOnlyNext = bSrc->bbNext; |
| 13105 | } |
| 13106 | else if (bSrc->bbJumpKind == BBJ_ALWAYS) |
| 13107 | { |
| 13108 | bOnlyNext = bSrc->bbJumpDest; |
| 13109 | } |
| 13110 | else |
| 13111 | { |
| 13112 | bOnlyNext = nullptr; |
| 13113 | } |
| 13114 | |
| 13115 | if ((bOnlyNext == bDst) && bSrc->hasProfileWeight()) |
| 13116 | { |
| 13117 | // We know the exact weight of bDst |
| 13118 | newWeight = bSrc->bbWeight; |
| 13119 | } |
| 13120 | } |
| 13121 | |
| 13122 | // Does this block flow into only one other block |
| 13123 | if (bDst->bbJumpKind == BBJ_NONE) |
| 13124 | { |
| 13125 | bOnlyNext = bDst->bbNext; |
| 13126 | } |
| 13127 | else if (bDst->bbJumpKind == BBJ_ALWAYS) |
| 13128 | { |
| 13129 | bOnlyNext = bDst->bbJumpDest; |
| 13130 | } |
| 13131 | else |
| 13132 | { |
| 13133 | bOnlyNext = nullptr; |
| 13134 | } |
| 13135 | |
| 13136 | if ((bOnlyNext != nullptr) && (bOnlyNext->bbPreds != nullptr)) |
| 13137 | { |
| 13138 | // Does only one block flow into bOnlyNext |
| 13139 | if (bOnlyNext->countOfInEdges() == 1) |
| 13140 | { |
| 13141 | noway_assert(bOnlyNext->bbPreds->flBlock == bDst); |
| 13142 | |
| 13143 | // We know the exact weight of bDst |
| 13144 | newWeight = bOnlyNext->bbWeight; |
| 13145 | } |
| 13146 | } |
| 13147 | |
| 13148 | if ((newWeight != BB_MAX_WEIGHT) && (bDst->bbWeight != newWeight)) |
| 13149 | { |
| 13150 | changed = true; |
| 13151 | modified = true; |
| 13152 | bDst->bbWeight = newWeight; |
| 13153 | if (newWeight == 0) |
| 13154 | { |
| 13155 | bDst->bbFlags |= BBF_RUN_RARELY; |
| 13156 | } |
| 13157 | else |
| 13158 | { |
| 13159 | bDst->bbFlags &= ~BBF_RUN_RARELY; |
| 13160 | } |
| 13161 | } |
| 13162 | } |
| 13163 | |
| 13164 | // Sum up the weights of all of the return blocks and throw blocks |
| 13165 | // This is used when we have a back-edge into block 1 |
| 13166 | // |
| 13167 | if (bDst->hasProfileWeight() && ((bDst->bbJumpKind == BBJ_RETURN) || (bDst->bbJumpKind == BBJ_THROW))) |
| 13168 | { |
| 13169 | returnWeight += bDst->bbWeight; |
| 13170 | } |
| 13171 | } |
| 13172 | } |
| 13173 | // Generally when we synthesize profile estimates we do it in a way where this algorithm will converge |
| 13174 | // but downstream opts that remove conditional branches may create a situation where this is not the case. |
| 13175 | // For instance a loop that becomes unreachable creates a sort of 'ring oscillator' (See test b539509) |
| 13176 | while (changed && iterations < 10); |
| 13177 | |
| 13178 | #if DEBUG |
| 13179 | if (verbose && modified) |
| 13180 | { |
| 13181 | printf("fgComputeMissingBlockWeights() adjusted the weight of some blocks\n" ); |
| 13182 | fgDispBasicBlocks(); |
| 13183 | printf("\n" ); |
| 13184 | } |
| 13185 | #endif |
| 13186 | |
| 13187 | return returnWeight; |
| 13188 | } |
| 13189 | |
| 13190 | //------------------------------------------------------------- |
| 13191 | // fgComputeCalledCount: when profile information is in use, |
| 13192 | // compute fgCalledCount |
| 13193 | // |
| 13194 | // Argument: |
| 13195 | // returnWeight - sum of weights for all return and throw blocks |
| 13196 | |
| 13197 | void Compiler::fgComputeCalledCount(BasicBlock::weight_t returnWeight) |
| 13198 | { |
| 13199 | // When we are not using profile data we have already setup fgCalledCount |
| 13200 | // only set it here if we are using profile data |
| 13201 | assert(fgIsUsingProfileWeights()); |
| 13202 | |
| 13203 | BasicBlock* firstILBlock = fgFirstBB; // The first block for IL code (i.e. for the IL code at offset 0) |
| 13204 | |
| 13205 | // Do we have an internal block as our first Block? |
| 13206 | if (firstILBlock->bbFlags & BBF_INTERNAL) |
| 13207 | { |
| 13208 | // Skip past any/all BBF_INTERNAL blocks that may have been added before the first real IL block. |
| 13209 | // |
| 13210 | while (firstILBlock->bbFlags & BBF_INTERNAL) |
| 13211 | { |
| 13212 | firstILBlock = firstILBlock->bbNext; |
| 13213 | } |
| 13214 | // The 'firstILBlock' is now expected to have a profile-derived weight |
| 13215 | assert(firstILBlock->hasProfileWeight()); |
| 13216 | } |
| 13217 | |
| 13218 | // If the first block only has one ref then we use it's weight for fgCalledCount. |
| 13219 | // Otherwise we have backedge's into the first block, so instead we use the sum |
| 13220 | // of the return block weights for fgCalledCount. |
| 13221 | // |
| 13222 | // If the profile data has a 0 for the returnWeight |
| 13223 | // (i.e. the function never returns because it always throws) |
| 13224 | // then just use the first block weight rather than 0. |
| 13225 | // |
| 13226 | if ((firstILBlock->countOfInEdges() == 1) || (returnWeight == 0)) |
| 13227 | { |
| 13228 | assert(firstILBlock->hasProfileWeight()); // This should always be a profile-derived weight |
| 13229 | fgCalledCount = firstILBlock->bbWeight; |
| 13230 | } |
| 13231 | else |
| 13232 | { |
| 13233 | fgCalledCount = returnWeight; |
| 13234 | } |
| 13235 | |
| 13236 | // If we allocated a scratch block as the first BB then we need |
| 13237 | // to set its profile-derived weight to be fgCalledCount |
| 13238 | if (fgFirstBBisScratch()) |
| 13239 | { |
| 13240 | fgFirstBB->setBBProfileWeight(fgCalledCount); |
| 13241 | if (fgFirstBB->bbWeight == 0) |
| 13242 | { |
| 13243 | fgFirstBB->bbFlags |= BBF_RUN_RARELY; |
| 13244 | } |
| 13245 | } |
| 13246 | |
| 13247 | #if DEBUG |
| 13248 | if (verbose) |
| 13249 | { |
| 13250 | printf("We are using the Profile Weights and fgCalledCount is %d.\n" , fgCalledCount); |
| 13251 | } |
| 13252 | #endif |
| 13253 | } |
| 13254 | |
| 13255 | //------------------------------------------------------------- |
| 13256 | // fgComputeEdgeWeights: compute edge weights from block weights |
| 13257 | |
| 13258 | void Compiler::fgComputeEdgeWeights() |
| 13259 | { |
| 13260 | BasicBlock* bSrc; |
| 13261 | BasicBlock* bDst; |
| 13262 | flowList* edge; |
| 13263 | BasicBlock::weight_t slop; |
| 13264 | unsigned goodEdgeCountCurrent = 0; |
| 13265 | unsigned goodEdgeCountPrevious = 0; |
| 13266 | bool inconsistentProfileData = false; |
| 13267 | bool hasIncompleteEdgeWeights = false; |
| 13268 | bool usedSlop = false; |
| 13269 | unsigned numEdges = 0; |
| 13270 | unsigned iterations = 0; |
| 13271 | |
| 13272 | // Now we will compute the initial flEdgeWeightMin and flEdgeWeightMax values |
| 13273 | for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) |
| 13274 | { |
| 13275 | BasicBlock::weight_t bDstWeight = bDst->bbWeight; |
| 13276 | |
| 13277 | // We subtract out the called count so that bDstWeight is |
| 13278 | // the sum of all edges that go into this block from this method. |
| 13279 | // |
| 13280 | if (bDst == fgFirstBB) |
| 13281 | { |
| 13282 | bDstWeight -= fgCalledCount; |
| 13283 | } |
| 13284 | |
| 13285 | for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext) |
| 13286 | { |
| 13287 | bool assignOK = true; |
| 13288 | |
| 13289 | bSrc = edge->flBlock; |
| 13290 | // We are processing the control flow edge (bSrc -> bDst) |
| 13291 | |
| 13292 | numEdges++; |
| 13293 | |
| 13294 | // |
| 13295 | // If the bSrc or bDst blocks do not have exact profile weights |
| 13296 | // then we must reset any values that they currently have |
| 13297 | // |
| 13298 | |
| 13299 | if (!bSrc->hasProfileWeight() || !bDst->hasProfileWeight()) |
| 13300 | { |
| 13301 | edge->flEdgeWeightMin = BB_ZERO_WEIGHT; |
| 13302 | edge->flEdgeWeightMax = BB_MAX_WEIGHT; |
| 13303 | } |
| 13304 | |
| 13305 | slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; |
| 13306 | switch (bSrc->bbJumpKind) |
| 13307 | { |
| 13308 | case BBJ_ALWAYS: |
| 13309 | case BBJ_EHCATCHRET: |
| 13310 | case BBJ_NONE: |
| 13311 | case BBJ_CALLFINALLY: |
| 13312 | // We know the exact edge weight |
| 13313 | assignOK &= edge->setEdgeWeightMinChecked(bSrc->bbWeight, slop, &usedSlop); |
| 13314 | assignOK &= edge->setEdgeWeightMaxChecked(bSrc->bbWeight, slop, &usedSlop); |
| 13315 | break; |
| 13316 | |
| 13317 | case BBJ_COND: |
| 13318 | case BBJ_SWITCH: |
| 13319 | case BBJ_EHFINALLYRET: |
| 13320 | case BBJ_EHFILTERRET: |
| 13321 | if (edge->flEdgeWeightMax > bSrc->bbWeight) |
| 13322 | { |
| 13323 | // The maximum edge weight to block can't be greater than the weight of bSrc |
| 13324 | assignOK &= edge->setEdgeWeightMaxChecked(bSrc->bbWeight, slop, &usedSlop); |
| 13325 | } |
| 13326 | break; |
| 13327 | |
| 13328 | default: |
| 13329 | // We should never have an edge that starts from one of these jump kinds |
| 13330 | noway_assert(!"Unexpected bbJumpKind" ); |
| 13331 | break; |
| 13332 | } |
| 13333 | |
| 13334 | // The maximum edge weight to block can't be greater than the weight of bDst |
| 13335 | if (edge->flEdgeWeightMax > bDstWeight) |
| 13336 | { |
| 13337 | assignOK &= edge->setEdgeWeightMaxChecked(bDstWeight, slop, &usedSlop); |
| 13338 | } |
| 13339 | |
| 13340 | if (!assignOK) |
| 13341 | { |
| 13342 | // Here we have inconsistent profile data |
| 13343 | inconsistentProfileData = true; |
| 13344 | // No point in continuing |
| 13345 | goto EARLY_EXIT; |
| 13346 | } |
| 13347 | } |
| 13348 | } |
| 13349 | |
| 13350 | fgEdgeCount = numEdges; |
| 13351 | |
| 13352 | iterations = 0; |
| 13353 | |
| 13354 | do |
| 13355 | { |
| 13356 | iterations++; |
| 13357 | goodEdgeCountPrevious = goodEdgeCountCurrent; |
| 13358 | goodEdgeCountCurrent = 0; |
| 13359 | hasIncompleteEdgeWeights = false; |
| 13360 | |
| 13361 | for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) |
| 13362 | { |
| 13363 | for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext) |
| 13364 | { |
| 13365 | bool assignOK = true; |
| 13366 | |
| 13367 | // We are processing the control flow edge (bSrc -> bDst) |
| 13368 | bSrc = edge->flBlock; |
| 13369 | |
| 13370 | slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; |
| 13371 | if (bSrc->bbJumpKind == BBJ_COND) |
| 13372 | { |
| 13373 | int diff; |
| 13374 | flowList* otherEdge; |
| 13375 | if (bSrc->bbNext == bDst) |
| 13376 | { |
| 13377 | otherEdge = fgGetPredForBlock(bSrc->bbJumpDest, bSrc); |
| 13378 | } |
| 13379 | else |
| 13380 | { |
| 13381 | otherEdge = fgGetPredForBlock(bSrc->bbNext, bSrc); |
| 13382 | } |
| 13383 | noway_assert(edge->flEdgeWeightMin <= edge->flEdgeWeightMax); |
| 13384 | noway_assert(otherEdge->flEdgeWeightMin <= otherEdge->flEdgeWeightMax); |
| 13385 | |
| 13386 | // Adjust edge->flEdgeWeightMin up or adjust otherEdge->flEdgeWeightMax down |
| 13387 | diff = ((int)bSrc->bbWeight) - ((int)edge->flEdgeWeightMin + (int)otherEdge->flEdgeWeightMax); |
| 13388 | if (diff > 0) |
| 13389 | { |
| 13390 | assignOK &= edge->setEdgeWeightMinChecked(edge->flEdgeWeightMin + diff, slop, &usedSlop); |
| 13391 | } |
| 13392 | else if (diff < 0) |
| 13393 | { |
| 13394 | assignOK &= |
| 13395 | otherEdge->setEdgeWeightMaxChecked(otherEdge->flEdgeWeightMax + diff, slop, &usedSlop); |
| 13396 | } |
| 13397 | |
| 13398 | // Adjust otherEdge->flEdgeWeightMin up or adjust edge->flEdgeWeightMax down |
| 13399 | diff = ((int)bSrc->bbWeight) - ((int)otherEdge->flEdgeWeightMin + (int)edge->flEdgeWeightMax); |
| 13400 | if (diff > 0) |
| 13401 | { |
| 13402 | assignOK &= |
| 13403 | otherEdge->setEdgeWeightMinChecked(otherEdge->flEdgeWeightMin + diff, slop, &usedSlop); |
| 13404 | } |
| 13405 | else if (diff < 0) |
| 13406 | { |
| 13407 | assignOK &= edge->setEdgeWeightMaxChecked(edge->flEdgeWeightMax + diff, slop, &usedSlop); |
| 13408 | } |
| 13409 | |
| 13410 | if (!assignOK) |
| 13411 | { |
| 13412 | // Here we have inconsistent profile data |
| 13413 | inconsistentProfileData = true; |
| 13414 | // No point in continuing |
| 13415 | goto EARLY_EXIT; |
| 13416 | } |
| 13417 | #ifdef DEBUG |
| 13418 | // Now edge->flEdgeWeightMin and otherEdge->flEdgeWeightMax) should add up to bSrc->bbWeight |
| 13419 | diff = ((int)bSrc->bbWeight) - ((int)edge->flEdgeWeightMin + (int)otherEdge->flEdgeWeightMax); |
| 13420 | noway_assert((-((int)slop) <= diff) && (diff <= ((int)slop))); |
| 13421 | |
| 13422 | // Now otherEdge->flEdgeWeightMin and edge->flEdgeWeightMax) should add up to bSrc->bbWeight |
| 13423 | diff = ((int)bSrc->bbWeight) - ((int)otherEdge->flEdgeWeightMin + (int)edge->flEdgeWeightMax); |
| 13424 | noway_assert((-((int)slop) <= diff) && (diff <= ((int)slop))); |
| 13425 | #endif // DEBUG |
| 13426 | } |
| 13427 | } |
| 13428 | } |
| 13429 | |
| 13430 | for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) |
| 13431 | { |
| 13432 | BasicBlock::weight_t bDstWeight = bDst->bbWeight; |
| 13433 | |
| 13434 | if (bDstWeight == BB_MAX_WEIGHT) |
| 13435 | { |
| 13436 | inconsistentProfileData = true; |
| 13437 | // No point in continuing |
| 13438 | goto EARLY_EXIT; |
| 13439 | } |
| 13440 | else |
| 13441 | { |
| 13442 | // We subtract out the called count so that bDstWeight is |
| 13443 | // the sum of all edges that go into this block from this method. |
| 13444 | // |
| 13445 | if (bDst == fgFirstBB) |
| 13446 | { |
| 13447 | bDstWeight -= fgCalledCount; |
| 13448 | } |
| 13449 | |
| 13450 | UINT64 minEdgeWeightSum = 0; |
| 13451 | UINT64 maxEdgeWeightSum = 0; |
| 13452 | |
| 13453 | // Calculate the sums of the minimum and maximum edge weights |
| 13454 | for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext) |
| 13455 | { |
| 13456 | // We are processing the control flow edge (bSrc -> bDst) |
| 13457 | bSrc = edge->flBlock; |
| 13458 | |
| 13459 | maxEdgeWeightSum += edge->flEdgeWeightMax; |
| 13460 | minEdgeWeightSum += edge->flEdgeWeightMin; |
| 13461 | } |
| 13462 | |
| 13463 | // maxEdgeWeightSum is the sum of all flEdgeWeightMax values into bDst |
| 13464 | // minEdgeWeightSum is the sum of all flEdgeWeightMin values into bDst |
| 13465 | |
| 13466 | for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext) |
| 13467 | { |
| 13468 | bool assignOK = true; |
| 13469 | |
| 13470 | // We are processing the control flow edge (bSrc -> bDst) |
| 13471 | bSrc = edge->flBlock; |
| 13472 | slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; |
| 13473 | |
| 13474 | // otherMaxEdgesWeightSum is the sum of all of the other edges flEdgeWeightMax values |
| 13475 | // This can be used to compute a lower bound for our minimum edge weight |
| 13476 | noway_assert(maxEdgeWeightSum >= edge->flEdgeWeightMax); |
| 13477 | UINT64 otherMaxEdgesWeightSum = maxEdgeWeightSum - edge->flEdgeWeightMax; |
| 13478 | |
| 13479 | // otherMinEdgesWeightSum is the sum of all of the other edges flEdgeWeightMin values |
| 13480 | // This can be used to compute an upper bound for our maximum edge weight |
| 13481 | noway_assert(minEdgeWeightSum >= edge->flEdgeWeightMin); |
| 13482 | UINT64 otherMinEdgesWeightSum = minEdgeWeightSum - edge->flEdgeWeightMin; |
| 13483 | |
| 13484 | if (bDstWeight >= otherMaxEdgesWeightSum) |
| 13485 | { |
| 13486 | // minWeightCalc is our minWeight when every other path to bDst takes it's flEdgeWeightMax value |
| 13487 | BasicBlock::weight_t minWeightCalc = |
| 13488 | (BasicBlock::weight_t)(bDstWeight - otherMaxEdgesWeightSum); |
| 13489 | if (minWeightCalc > edge->flEdgeWeightMin) |
| 13490 | { |
| 13491 | assignOK &= edge->setEdgeWeightMinChecked(minWeightCalc, slop, &usedSlop); |
| 13492 | } |
| 13493 | } |
| 13494 | |
| 13495 | if (bDstWeight >= otherMinEdgesWeightSum) |
| 13496 | { |
| 13497 | // maxWeightCalc is our maxWeight when every other path to bDst takes it's flEdgeWeightMin value |
| 13498 | BasicBlock::weight_t maxWeightCalc = |
| 13499 | (BasicBlock::weight_t)(bDstWeight - otherMinEdgesWeightSum); |
| 13500 | if (maxWeightCalc < edge->flEdgeWeightMax) |
| 13501 | { |
| 13502 | assignOK &= edge->setEdgeWeightMaxChecked(maxWeightCalc, slop, &usedSlop); |
| 13503 | } |
| 13504 | } |
| 13505 | |
| 13506 | if (!assignOK) |
| 13507 | { |
| 13508 | // Here we have inconsistent profile data |
| 13509 | inconsistentProfileData = true; |
| 13510 | // No point in continuing |
| 13511 | goto EARLY_EXIT; |
| 13512 | } |
| 13513 | |
| 13514 | // When flEdgeWeightMin equals flEdgeWeightMax we have a "good" edge weight |
| 13515 | if (edge->flEdgeWeightMin == edge->flEdgeWeightMax) |
| 13516 | { |
| 13517 | // Count how many "good" edge weights we have |
| 13518 | // Each time through we should have more "good" weights |
| 13519 | // We exit the while loop when no longer find any new "good" edges |
| 13520 | goodEdgeCountCurrent++; |
| 13521 | } |
| 13522 | else |
| 13523 | { |
| 13524 | // Remember that we have seen at least one "Bad" edge weight |
| 13525 | // so that we will repeat the while loop again |
| 13526 | hasIncompleteEdgeWeights = true; |
| 13527 | } |
| 13528 | } |
| 13529 | } |
| 13530 | } |
| 13531 | |
| 13532 | if (inconsistentProfileData) |
| 13533 | { |
| 13534 | hasIncompleteEdgeWeights = true; |
| 13535 | break; |
| 13536 | } |
| 13537 | |
| 13538 | if (numEdges == goodEdgeCountCurrent) |
| 13539 | { |
| 13540 | noway_assert(hasIncompleteEdgeWeights == false); |
| 13541 | break; |
| 13542 | } |
| 13543 | |
| 13544 | } while (hasIncompleteEdgeWeights && (goodEdgeCountCurrent > goodEdgeCountPrevious) && (iterations < 8)); |
| 13545 | |
| 13546 | EARLY_EXIT:; |
| 13547 | |
| 13548 | #ifdef DEBUG |
| 13549 | if (verbose) |
| 13550 | { |
| 13551 | if (inconsistentProfileData) |
| 13552 | { |
| 13553 | printf("fgComputeEdgeWeights() found inconsistent profile data, not using the edge weights\n" ); |
| 13554 | } |
| 13555 | else |
| 13556 | { |
| 13557 | if (hasIncompleteEdgeWeights) |
| 13558 | { |
| 13559 | printf("fgComputeEdgeWeights() was able to compute exact edge weights for %3d of the %3d edges, using " |
| 13560 | "%d passes.\n" , |
| 13561 | goodEdgeCountCurrent, numEdges, iterations); |
| 13562 | } |
| 13563 | else |
| 13564 | { |
| 13565 | printf("fgComputeEdgeWeights() was able to compute exact edge weights for all of the %3d edges, using " |
| 13566 | "%d passes.\n" , |
| 13567 | numEdges, iterations); |
| 13568 | } |
| 13569 | |
| 13570 | fgPrintEdgeWeights(); |
| 13571 | } |
| 13572 | } |
| 13573 | #endif // DEBUG |
| 13574 | |
| 13575 | fgSlopUsedInEdgeWeights = usedSlop; |
| 13576 | fgRangeUsedInEdgeWeights = false; |
| 13577 | |
| 13578 | // See if any edge weight are expressed in [min..max] form |
| 13579 | |
| 13580 | for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext) |
| 13581 | { |
| 13582 | if (bDst->bbPreds != nullptr) |
| 13583 | { |
| 13584 | for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext) |
| 13585 | { |
| 13586 | bSrc = edge->flBlock; |
| 13587 | // This is the control flow edge (bSrc -> bDst) |
| 13588 | |
| 13589 | if (edge->flEdgeWeightMin != edge->flEdgeWeightMax) |
| 13590 | { |
| 13591 | fgRangeUsedInEdgeWeights = true; |
| 13592 | break; |
| 13593 | } |
| 13594 | } |
| 13595 | if (fgRangeUsedInEdgeWeights) |
| 13596 | { |
| 13597 | break; |
| 13598 | } |
| 13599 | } |
| 13600 | } |
| 13601 | |
| 13602 | fgHaveValidEdgeWeights = !inconsistentProfileData; |
| 13603 | fgEdgeWeightsComputed = true; |
| 13604 | } |
| 13605 | |
| 13606 | // fgOptimizeBranchToEmptyUnconditional: |
| 13607 | // optimize a jump to an empty block which ends in an unconditional branch. |
| 13608 | // Args: |
| 13609 | // block: source block |
| 13610 | // bDest: destination |
| 13611 | // Returns: true if we changed the code |
| 13612 | // |
| 13613 | bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest) |
| 13614 | { |
| 13615 | bool optimizeJump = true; |
| 13616 | |
| 13617 | assert(bDest->isEmpty()); |
| 13618 | assert(bDest->bbJumpKind == BBJ_ALWAYS); |
| 13619 | |
| 13620 | // We do not optimize jumps between two different try regions. |
| 13621 | // However jumping to a block that is not in any try region is OK |
| 13622 | // |
| 13623 | if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) |
| 13624 | { |
| 13625 | optimizeJump = false; |
| 13626 | } |
| 13627 | |
| 13628 | // Don't optimize a jump to a removed block |
| 13629 | if (bDest->bbJumpDest->bbFlags & BBF_REMOVED) |
| 13630 | { |
| 13631 | optimizeJump = false; |
| 13632 | } |
| 13633 | |
| 13634 | // Don't optimize a jump to a cloned finally |
| 13635 | if (bDest->bbFlags & BBF_CLONED_FINALLY_BEGIN) |
| 13636 | { |
| 13637 | optimizeJump = false; |
| 13638 | } |
| 13639 | |
| 13640 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 13641 | // Don't optimize a jump to a finally target. For BB1->BB2->BB3, where |
| 13642 | // BB2 is a finally target, if we changed BB1 to jump directly to BB3, |
| 13643 | // it would skip the finally target. BB1 might be a BBJ_ALWAYS block part |
| 13644 | // of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, so changing the finally target |
| 13645 | // would change the unwind behavior. |
| 13646 | if (bDest->bbFlags & BBF_FINALLY_TARGET) |
| 13647 | { |
| 13648 | optimizeJump = false; |
| 13649 | } |
| 13650 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 13651 | |
| 13652 | // Must optimize jump if bDest has been removed |
| 13653 | // |
| 13654 | if (bDest->bbFlags & BBF_REMOVED) |
| 13655 | { |
| 13656 | optimizeJump = true; |
| 13657 | } |
| 13658 | |
| 13659 | // If we are optimizing using real profile weights |
| 13660 | // then don't optimize a conditional jump to an unconditional jump |
| 13661 | // until after we have computed the edge weights |
| 13662 | // |
| 13663 | if (fgIsUsingProfileWeights() && !fgEdgeWeightsComputed) |
| 13664 | { |
| 13665 | fgNeedsUpdateFlowGraph = true; |
| 13666 | optimizeJump = false; |
| 13667 | } |
| 13668 | |
| 13669 | if (optimizeJump) |
| 13670 | { |
| 13671 | #ifdef DEBUG |
| 13672 | if (verbose) |
| 13673 | { |
| 13674 | printf("\nOptimizing a jump to an unconditional jump (" FMT_BB " -> " FMT_BB " -> " FMT_BB ")\n" , |
| 13675 | block->bbNum, bDest->bbNum, bDest->bbJumpDest->bbNum); |
| 13676 | } |
| 13677 | #endif // DEBUG |
| 13678 | |
| 13679 | // |
| 13680 | // When we optimize a branch to branch we need to update the profile weight |
| 13681 | // of bDest by subtracting out the block/edge weight of the path that is being optimized. |
| 13682 | // |
| 13683 | if (fgHaveValidEdgeWeights && bDest->hasProfileWeight()) |
| 13684 | { |
| 13685 | flowList* edge1 = fgGetPredForBlock(bDest, block); |
| 13686 | noway_assert(edge1 != nullptr); |
| 13687 | |
| 13688 | BasicBlock::weight_t edgeWeight; |
| 13689 | |
| 13690 | if (edge1->flEdgeWeightMin != edge1->flEdgeWeightMax) |
| 13691 | { |
| 13692 | // |
| 13693 | // We only have an estimate for the edge weight |
| 13694 | // |
| 13695 | edgeWeight = (edge1->flEdgeWeightMin + edge1->flEdgeWeightMax) / 2; |
| 13696 | // |
| 13697 | // Clear the profile weight flag |
| 13698 | // |
| 13699 | bDest->bbFlags &= ~BBF_PROF_WEIGHT; |
| 13700 | } |
| 13701 | else |
| 13702 | { |
| 13703 | // |
| 13704 | // We only have the exact edge weight |
| 13705 | // |
| 13706 | edgeWeight = edge1->flEdgeWeightMin; |
| 13707 | } |
| 13708 | |
| 13709 | // |
| 13710 | // Update the bDest->bbWeight |
| 13711 | // |
| 13712 | if (bDest->bbWeight > edgeWeight) |
| 13713 | { |
| 13714 | bDest->bbWeight -= edgeWeight; |
| 13715 | } |
| 13716 | else |
| 13717 | { |
| 13718 | bDest->bbWeight = BB_ZERO_WEIGHT; |
| 13719 | bDest->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag |
| 13720 | } |
| 13721 | |
| 13722 | flowList* edge2 = fgGetPredForBlock(bDest->bbJumpDest, bDest); |
| 13723 | |
| 13724 | if (edge2 != nullptr) |
| 13725 | { |
| 13726 | // |
| 13727 | // Update the edge2 min/max weights |
| 13728 | // |
| 13729 | if (edge2->flEdgeWeightMin > edge1->flEdgeWeightMin) |
| 13730 | { |
| 13731 | edge2->flEdgeWeightMin -= edge1->flEdgeWeightMin; |
| 13732 | } |
| 13733 | else |
| 13734 | { |
| 13735 | edge2->flEdgeWeightMin = BB_ZERO_WEIGHT; |
| 13736 | } |
| 13737 | |
| 13738 | if (edge2->flEdgeWeightMax > edge1->flEdgeWeightMin) |
| 13739 | { |
| 13740 | edge2->flEdgeWeightMax -= edge1->flEdgeWeightMin; |
| 13741 | } |
| 13742 | else |
| 13743 | { |
| 13744 | edge2->flEdgeWeightMax = BB_ZERO_WEIGHT; |
| 13745 | } |
| 13746 | } |
| 13747 | } |
| 13748 | |
| 13749 | // Optimize the JUMP to empty unconditional JUMP to go to the new target |
| 13750 | block->bbJumpDest = bDest->bbJumpDest; |
| 13751 | |
| 13752 | fgAddRefPred(bDest->bbJumpDest, block, fgRemoveRefPred(bDest, block)); |
| 13753 | |
| 13754 | return true; |
| 13755 | } |
| 13756 | return false; |
| 13757 | } |
| 13758 | |
| 13759 | // fgOptimizeEmptyBlock: |
| 13760 | // Does flow optimization of an empty block (can remove it in some cases) |
| 13761 | // |
| 13762 | // Args: |
| 13763 | // block: an empty block |
| 13764 | // Returns: true if we changed the code |
| 13765 | |
| 13766 | bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) |
| 13767 | { |
| 13768 | assert(block->isEmpty()); |
| 13769 | |
| 13770 | BasicBlock* bPrev = block->bbPrev; |
| 13771 | |
| 13772 | switch (block->bbJumpKind) |
| 13773 | { |
| 13774 | case BBJ_COND: |
| 13775 | case BBJ_SWITCH: |
| 13776 | case BBJ_THROW: |
| 13777 | |
| 13778 | /* can never happen */ |
| 13779 | noway_assert(!"Conditional, switch, or throw block with empty body!" ); |
| 13780 | break; |
| 13781 | |
| 13782 | case BBJ_CALLFINALLY: |
| 13783 | case BBJ_RETURN: |
| 13784 | case BBJ_EHCATCHRET: |
| 13785 | case BBJ_EHFINALLYRET: |
| 13786 | case BBJ_EHFILTERRET: |
| 13787 | |
| 13788 | /* leave them as is */ |
| 13789 | /* some compilers generate multiple returns and put all of them at the end - |
| 13790 | * to solve that we need the predecessor list */ |
| 13791 | |
| 13792 | break; |
| 13793 | |
| 13794 | case BBJ_ALWAYS: |
| 13795 | |
| 13796 | // A GOTO cannot be to the next block since that |
| 13797 | // should have been fixed by the optimization above |
| 13798 | // An exception is made for a jump from Hot to Cold |
| 13799 | noway_assert(block->bbJumpDest != block->bbNext || ((bPrev != nullptr) && bPrev->isBBCallAlwaysPair()) || |
| 13800 | fgInDifferentRegions(block, block->bbNext)); |
| 13801 | |
| 13802 | /* Cannot remove the first BB */ |
| 13803 | if (!bPrev) |
| 13804 | { |
| 13805 | break; |
| 13806 | } |
| 13807 | |
| 13808 | /* Do not remove a block that jumps to itself - used for while (true){} */ |
| 13809 | if (block->bbJumpDest == block) |
| 13810 | { |
| 13811 | break; |
| 13812 | } |
| 13813 | |
| 13814 | /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ |
| 13815 | if (bPrev->bbJumpKind != BBJ_NONE) |
| 13816 | { |
| 13817 | break; |
| 13818 | } |
| 13819 | |
| 13820 | // can't allow fall through into cold code |
| 13821 | if (block->bbNext == fgFirstColdBlock) |
| 13822 | { |
| 13823 | break; |
| 13824 | } |
| 13825 | |
| 13826 | /* Can fall through since this is similar with removing |
| 13827 | * a BBJ_NONE block, only the successor is different */ |
| 13828 | |
| 13829 | __fallthrough; |
| 13830 | |
| 13831 | case BBJ_NONE: |
| 13832 | |
| 13833 | /* special case if this is the first BB */ |
| 13834 | if (!bPrev) |
| 13835 | { |
| 13836 | assert(block == fgFirstBB); |
| 13837 | } |
| 13838 | else |
| 13839 | { |
| 13840 | /* If this block follows a BBJ_CALLFINALLY do not remove it |
| 13841 | * (because we don't know who may jump to it) */ |
| 13842 | if (bPrev->bbJumpKind == BBJ_CALLFINALLY) |
| 13843 | { |
| 13844 | break; |
| 13845 | } |
| 13846 | } |
| 13847 | |
| 13848 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 13849 | /* Don't remove finally targets */ |
| 13850 | if (block->bbFlags & BBF_FINALLY_TARGET) |
| 13851 | break; |
| 13852 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 13853 | |
| 13854 | #if FEATURE_EH_FUNCLETS |
| 13855 | /* Don't remove an empty block that is in a different EH region |
| 13856 | * from its successor block, if the block is the target of a |
| 13857 | * catch return. It is required that the return address of a |
| 13858 | * catch be in the correct EH region, for re-raise of thread |
| 13859 | * abort exceptions to work. Insert a NOP in the empty block |
| 13860 | * to ensure we generate code for the block, if we keep it. |
| 13861 | */ |
| 13862 | { |
| 13863 | BasicBlock* succBlock; |
| 13864 | |
| 13865 | if (block->bbJumpKind == BBJ_ALWAYS) |
| 13866 | { |
| 13867 | succBlock = block->bbJumpDest; |
| 13868 | } |
| 13869 | else |
| 13870 | { |
| 13871 | succBlock = block->bbNext; |
| 13872 | } |
| 13873 | |
| 13874 | if ((succBlock != nullptr) && !BasicBlock::sameEHRegion(block, succBlock)) |
| 13875 | { |
| 13876 | // The empty block and the block that follows it are in different |
| 13877 | // EH regions. Is this a case where they can't be merged? |
| 13878 | |
| 13879 | bool okToMerge = true; // assume it's ok |
| 13880 | for (flowList* pred = block->bbPreds; pred; pred = pred->flNext) |
| 13881 | { |
| 13882 | if (pred->flBlock->bbJumpKind == BBJ_EHCATCHRET) |
| 13883 | { |
| 13884 | assert(pred->flBlock->bbJumpDest == block); |
| 13885 | okToMerge = false; // we can't get rid of the empty block |
| 13886 | break; |
| 13887 | } |
| 13888 | } |
| 13889 | |
| 13890 | if (!okToMerge) |
| 13891 | { |
| 13892 | // Insert a NOP in the empty block to ensure we generate code |
| 13893 | // for the catchret target in the right EH region. |
| 13894 | GenTree* nop = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); |
| 13895 | |
| 13896 | if (block->IsLIR()) |
| 13897 | { |
| 13898 | LIR::AsRange(block).InsertAtEnd(nop); |
| 13899 | LIR::ReadOnlyRange range(nop, nop); |
| 13900 | m_pLowering->LowerRange(block, range); |
| 13901 | } |
| 13902 | else |
| 13903 | { |
| 13904 | GenTree* nopStmt = fgInsertStmtAtEnd(block, nop); |
| 13905 | fgSetStmtSeq(nopStmt); |
| 13906 | gtSetStmtInfo(nopStmt); |
| 13907 | } |
| 13908 | |
| 13909 | #ifdef DEBUG |
| 13910 | if (verbose) |
| 13911 | { |
| 13912 | printf("\nKeeping empty block " FMT_BB " - it is the target of a catch return\n" , |
| 13913 | block->bbNum); |
| 13914 | } |
| 13915 | #endif // DEBUG |
| 13916 | |
| 13917 | break; // go to the next block |
| 13918 | } |
| 13919 | } |
| 13920 | } |
| 13921 | #endif // FEATURE_EH_FUNCLETS |
| 13922 | |
| 13923 | if (!ehCanDeleteEmptyBlock(block)) |
| 13924 | { |
| 13925 | // We're not allowed to remove this block due to reasons related to the EH table. |
| 13926 | break; |
| 13927 | } |
| 13928 | |
| 13929 | /* special case if this is the last BB */ |
| 13930 | if (block == fgLastBB) |
| 13931 | { |
| 13932 | if (!bPrev) |
| 13933 | { |
| 13934 | break; |
| 13935 | } |
| 13936 | fgLastBB = bPrev; |
| 13937 | } |
| 13938 | |
| 13939 | // When using profile weights, fgComputeEdgeWeights expects the first non-internal block to have profile |
| 13940 | // weight. |
| 13941 | // Make sure we don't break that invariant. |
| 13942 | if (fgIsUsingProfileWeights() && block->hasProfileWeight() && (block->bbFlags & BBF_INTERNAL) == 0) |
| 13943 | { |
| 13944 | BasicBlock* bNext = block->bbNext; |
| 13945 | |
| 13946 | // Check if the next block can't maintain the invariant. |
| 13947 | if ((bNext == nullptr) || ((bNext->bbFlags & BBF_INTERNAL) != 0) || !bNext->hasProfileWeight()) |
| 13948 | { |
| 13949 | // Check if the current block is the first non-internal block. |
| 13950 | BasicBlock* curBB = bPrev; |
| 13951 | while ((curBB != nullptr) && (curBB->bbFlags & BBF_INTERNAL) != 0) |
| 13952 | { |
| 13953 | curBB = curBB->bbPrev; |
| 13954 | } |
| 13955 | if (curBB == nullptr) |
| 13956 | { |
| 13957 | // This block is the first non-internal block and it has profile weight. |
| 13958 | // Don't delete it. |
| 13959 | break; |
| 13960 | } |
| 13961 | } |
| 13962 | } |
| 13963 | |
| 13964 | /* Remove the block */ |
| 13965 | compCurBB = block; |
| 13966 | fgRemoveBlock(block, false); |
| 13967 | return true; |
| 13968 | |
| 13969 | default: |
| 13970 | noway_assert(!"Unexpected bbJumpKind" ); |
| 13971 | break; |
| 13972 | } |
| 13973 | return false; |
| 13974 | } |
| 13975 | |
| 13976 | // fgOptimizeSwitchBranches: |
| 13977 | // Does flow optimization for a switch - bypasses jumps to empty unconditional branches, |
| 13978 | // and transforms degenerate switch cases like those with 1 or 2 targets |
| 13979 | // |
| 13980 | // Args: |
| 13981 | // block: BasicBlock that contains the switch |
| 13982 | // Returns: true if we changed the code |
| 13983 | // |
| 13984 | bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) |
| 13985 | { |
| 13986 | assert(block->bbJumpKind == BBJ_SWITCH); |
| 13987 | |
| 13988 | unsigned jmpCnt = block->bbJumpSwt->bbsCount; |
| 13989 | BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab; |
| 13990 | BasicBlock* bNewDest; // the new jump target for the current switch case |
| 13991 | BasicBlock* bDest; |
| 13992 | bool returnvalue = false; |
| 13993 | |
| 13994 | do |
| 13995 | { |
| 13996 | REPEAT_SWITCH:; |
| 13997 | bDest = *jmpTab; |
| 13998 | bNewDest = bDest; |
| 13999 | |
| 14000 | // Do we have a JUMP to an empty unconditional JUMP block? |
| 14001 | if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && |
| 14002 | (bDest != bDest->bbJumpDest)) // special case for self jumps |
| 14003 | { |
| 14004 | bool optimizeJump = true; |
| 14005 | |
| 14006 | // We do not optimize jumps between two different try regions. |
| 14007 | // However jumping to a block that is not in any try region is OK |
| 14008 | // |
| 14009 | if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) |
| 14010 | { |
| 14011 | optimizeJump = false; |
| 14012 | } |
| 14013 | |
| 14014 | // If we are optimize using real profile weights |
| 14015 | // then don't optimize a switch jump to an unconditional jump |
| 14016 | // until after we have computed the edge weights |
| 14017 | // |
| 14018 | if (fgIsUsingProfileWeights() && !fgEdgeWeightsComputed) |
| 14019 | { |
| 14020 | fgNeedsUpdateFlowGraph = true; |
| 14021 | optimizeJump = false; |
| 14022 | } |
| 14023 | |
| 14024 | if (optimizeJump) |
| 14025 | { |
| 14026 | bNewDest = bDest->bbJumpDest; |
| 14027 | #ifdef DEBUG |
| 14028 | if (verbose) |
| 14029 | { |
| 14030 | printf("\nOptimizing a switch jump to an empty block with an unconditional jump (" FMT_BB |
| 14031 | " -> " FMT_BB " " |
| 14032 | "-> " FMT_BB ")\n" , |
| 14033 | block->bbNum, bDest->bbNum, bNewDest->bbNum); |
| 14034 | } |
| 14035 | #endif // DEBUG |
| 14036 | } |
| 14037 | } |
| 14038 | |
| 14039 | if (bNewDest != bDest) |
| 14040 | { |
| 14041 | // |
| 14042 | // When we optimize a branch to branch we need to update the profile weight |
| 14043 | // of bDest by subtracting out the block/edge weight of the path that is being optimized. |
| 14044 | // |
| 14045 | if (fgIsUsingProfileWeights() && bDest->hasProfileWeight()) |
| 14046 | { |
| 14047 | if (fgHaveValidEdgeWeights) |
| 14048 | { |
| 14049 | flowList* edge = fgGetPredForBlock(bDest, block); |
| 14050 | BasicBlock::weight_t branchThroughWeight = edge->flEdgeWeightMin; |
| 14051 | |
| 14052 | if (bDest->bbWeight > branchThroughWeight) |
| 14053 | { |
| 14054 | bDest->bbWeight -= branchThroughWeight; |
| 14055 | } |
| 14056 | else |
| 14057 | { |
| 14058 | bDest->bbWeight = BB_ZERO_WEIGHT; |
| 14059 | bDest->bbFlags |= BBF_RUN_RARELY; |
| 14060 | } |
| 14061 | } |
| 14062 | } |
| 14063 | |
| 14064 | // Update the switch jump table |
| 14065 | *jmpTab = bNewDest; |
| 14066 | |
| 14067 | // Maintain, if necessary, the set of unique targets of "block." |
| 14068 | UpdateSwitchTableTarget(block, bDest, bNewDest); |
| 14069 | |
| 14070 | fgAddRefPred(bNewDest, block, fgRemoveRefPred(bDest, block)); |
| 14071 | |
| 14072 | // we optimized a Switch label - goto REPEAT_SWITCH to follow this new jump |
| 14073 | returnvalue = true; |
| 14074 | |
| 14075 | goto REPEAT_SWITCH; |
| 14076 | } |
| 14077 | } while (++jmpTab, --jmpCnt); |
| 14078 | |
| 14079 | GenTreeStmt* switchStmt = nullptr; |
| 14080 | LIR::Range* blockRange = nullptr; |
| 14081 | |
| 14082 | GenTree* switchTree; |
| 14083 | if (block->IsLIR()) |
| 14084 | { |
| 14085 | blockRange = &LIR::AsRange(block); |
| 14086 | switchTree = blockRange->LastNode(); |
| 14087 | |
| 14088 | assert(switchTree->OperGet() == GT_SWITCH_TABLE); |
| 14089 | } |
| 14090 | else |
| 14091 | { |
| 14092 | switchStmt = block->lastStmt(); |
| 14093 | switchTree = switchStmt->gtStmtExpr; |
| 14094 | |
| 14095 | assert(switchTree->OperGet() == GT_SWITCH); |
| 14096 | } |
| 14097 | |
| 14098 | noway_assert(switchTree->gtType == TYP_VOID); |
| 14099 | |
| 14100 | // At this point all of the case jump targets have been updated such |
| 14101 | // that none of them go to block that is an empty unconditional block |
| 14102 | // |
| 14103 | jmpTab = block->bbJumpSwt->bbsDstTab; |
| 14104 | jmpCnt = block->bbJumpSwt->bbsCount; |
| 14105 | // Now check for two trivial switch jumps. |
| 14106 | // |
| 14107 | if (block->NumSucc(this) == 1) |
| 14108 | { |
| 14109 | // Use BBJ_ALWAYS for a switch with only a default clause, or with only one unique successor. |
| 14110 | BasicBlock* uniqueSucc = jmpTab[0]; |
| 14111 | |
| 14112 | #ifdef DEBUG |
| 14113 | if (verbose) |
| 14114 | { |
| 14115 | printf("\nRemoving a switch jump with a single target (" FMT_BB ")\n" , block->bbNum); |
| 14116 | printf("BEFORE:\n" ); |
| 14117 | } |
| 14118 | #endif // DEBUG |
| 14119 | |
| 14120 | if (block->IsLIR()) |
| 14121 | { |
| 14122 | bool isClosed; |
| 14123 | unsigned sideEffects; |
| 14124 | LIR::ReadOnlyRange switchTreeRange = blockRange->GetTreeRange(switchTree, &isClosed, &sideEffects); |
| 14125 | |
| 14126 | // The switch tree should form a contiguous, side-effect free range by construction. See |
| 14127 | // Lowering::LowerSwitch for details. |
| 14128 | assert(isClosed); |
| 14129 | assert((sideEffects & GTF_ALL_EFFECT) == 0); |
| 14130 | |
| 14131 | blockRange->Delete(this, block, std::move(switchTreeRange)); |
| 14132 | } |
| 14133 | else |
| 14134 | { |
| 14135 | /* check for SIDE_EFFECTS */ |
| 14136 | if (switchTree->gtFlags & GTF_SIDE_EFFECT) |
| 14137 | { |
| 14138 | /* Extract the side effects from the conditional */ |
| 14139 | GenTree* sideEffList = nullptr; |
| 14140 | |
| 14141 | gtExtractSideEffList(switchTree, &sideEffList); |
| 14142 | |
| 14143 | if (sideEffList == nullptr) |
| 14144 | { |
| 14145 | goto NO_SWITCH_SIDE_EFFECT; |
| 14146 | } |
| 14147 | |
| 14148 | noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); |
| 14149 | |
| 14150 | #ifdef DEBUG |
| 14151 | if (verbose) |
| 14152 | { |
| 14153 | printf("\nSwitch expression has side effects! Extracting side effects...\n" ); |
| 14154 | gtDispTree(switchTree); |
| 14155 | printf("\n" ); |
| 14156 | gtDispTree(sideEffList); |
| 14157 | printf("\n" ); |
| 14158 | } |
| 14159 | #endif // DEBUG |
| 14160 | |
| 14161 | /* Replace the conditional statement with the list of side effects */ |
| 14162 | noway_assert(sideEffList->gtOper != GT_STMT); |
| 14163 | noway_assert(sideEffList->gtOper != GT_SWITCH); |
| 14164 | |
| 14165 | switchStmt->gtStmtExpr = sideEffList; |
| 14166 | |
| 14167 | if (fgStmtListThreaded) |
| 14168 | { |
| 14169 | compCurBB = block; |
| 14170 | |
| 14171 | /* Update ordering, costs, FP levels, etc. */ |
| 14172 | gtSetStmtInfo(switchStmt); |
| 14173 | |
| 14174 | /* Re-link the nodes for this statement */ |
| 14175 | fgSetStmtSeq(switchStmt); |
| 14176 | } |
| 14177 | } |
| 14178 | else |
| 14179 | { |
| 14180 | |
| 14181 | NO_SWITCH_SIDE_EFFECT: |
| 14182 | |
| 14183 | /* conditional has NO side effect - remove it */ |
| 14184 | fgRemoveStmt(block, switchStmt); |
| 14185 | } |
| 14186 | } |
| 14187 | |
| 14188 | // Change the switch jump into a BBJ_ALWAYS |
| 14189 | block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; |
| 14190 | block->bbJumpKind = BBJ_ALWAYS; |
| 14191 | if (jmpCnt > 1) |
| 14192 | { |
| 14193 | for (unsigned i = 1; i < jmpCnt; ++i) |
| 14194 | { |
| 14195 | (void)fgRemoveRefPred(jmpTab[i], block); |
| 14196 | } |
| 14197 | } |
| 14198 | |
| 14199 | return true; |
| 14200 | } |
| 14201 | else if (block->bbJumpSwt->bbsCount == 2 && block->bbJumpSwt->bbsDstTab[1] == block->bbNext) |
| 14202 | { |
| 14203 | /* Use a BBJ_COND(switchVal==0) for a switch with only one |
| 14204 | significant clause besides the default clause, if the |
| 14205 | default clause is bbNext */ |
| 14206 | GenTree* switchVal = switchTree->gtOp.gtOp1; |
| 14207 | noway_assert(genActualTypeIsIntOrI(switchVal->TypeGet())); |
| 14208 | |
| 14209 | // If we are in LIR, remove the jump table from the block. |
| 14210 | if (block->IsLIR()) |
| 14211 | { |
| 14212 | GenTree* jumpTable = switchTree->gtOp.gtOp2; |
| 14213 | assert(jumpTable->OperGet() == GT_JMPTABLE); |
| 14214 | blockRange->Remove(jumpTable); |
| 14215 | } |
| 14216 | |
| 14217 | // Change the GT_SWITCH(switchVal) into GT_JTRUE(GT_EQ(switchVal==0)). |
| 14218 | // Also mark the node as GTF_DONT_CSE as further down JIT is not capable of handling it. |
| 14219 | // For example CSE could determine that the expression rooted at GT_EQ is a candidate cse and |
| 14220 | // replace it with a COMMA node. In such a case we will end up with GT_JTRUE node pointing to |
| 14221 | // a COMMA node which results in noway asserts in fgMorphSmpOp(), optAssertionGen() and rpPredictTreeRegUse(). |
| 14222 | // For the same reason fgMorphSmpOp() marks GT_JTRUE nodes with RELOP children as GTF_DONT_CSE. |
| 14223 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 14224 | |
| 14225 | #ifdef DEBUG |
| 14226 | if (verbose) |
| 14227 | { |
| 14228 | printf("\nConverting a switch (" FMT_BB ") with only one significant clause besides a default target to a " |
| 14229 | "conditional branch\n" , |
| 14230 | block->bbNum); |
| 14231 | } |
| 14232 | #endif // DEBUG |
| 14233 | |
| 14234 | switchTree->ChangeOper(GT_JTRUE); |
| 14235 | GenTree* zeroConstNode = gtNewZeroConNode(genActualType(switchVal->TypeGet())); |
| 14236 | GenTree* condNode = gtNewOperNode(GT_EQ, TYP_INT, switchVal, zeroConstNode); |
| 14237 | switchTree->gtOp.gtOp1 = condNode; |
| 14238 | switchTree->gtOp.gtOp1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE); |
| 14239 | |
| 14240 | if (block->IsLIR()) |
| 14241 | { |
| 14242 | blockRange->InsertAfter(switchVal, zeroConstNode, condNode); |
| 14243 | LIR::ReadOnlyRange range(zeroConstNode, switchTree); |
| 14244 | m_pLowering->LowerRange(block, range); |
| 14245 | } |
| 14246 | else |
| 14247 | { |
| 14248 | // Re-link the nodes for this statement. |
| 14249 | fgSetStmtSeq(switchStmt); |
| 14250 | } |
| 14251 | |
| 14252 | block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; |
| 14253 | block->bbJumpKind = BBJ_COND; |
| 14254 | |
| 14255 | return true; |
| 14256 | } |
| 14257 | return returnvalue; |
| 14258 | } |
| 14259 | |
| 14260 | // fgBlockEndFavorsTailDuplication: |
| 14261 | // Heuristic function that returns true if this block ends in a statement that looks favorable |
| 14262 | // for tail-duplicating its successor (such as assigning a constant to a local). |
| 14263 | // Args: |
| 14264 | // block: BasicBlock we are considering duplicating the successor of |
| 14265 | // Returns: |
| 14266 | // true if it seems like a good idea |
| 14267 | // |
| 14268 | bool Compiler::fgBlockEndFavorsTailDuplication(BasicBlock* block) |
| 14269 | { |
| 14270 | if (block->isRunRarely()) |
| 14271 | { |
| 14272 | return false; |
| 14273 | } |
| 14274 | |
| 14275 | if (!block->lastStmt()) |
| 14276 | { |
| 14277 | return false; |
| 14278 | } |
| 14279 | else |
| 14280 | { |
| 14281 | // Tail duplication tends to pay off when the last statement |
| 14282 | // is an assignment of a constant, arraylength, or a relop. |
| 14283 | // This is because these statements produce information about values |
| 14284 | // that would otherwise be lost at the upcoming merge point. |
| 14285 | |
| 14286 | GenTreeStmt* lastStmt = block->lastStmt(); |
| 14287 | GenTree* tree = lastStmt->gtStmtExpr; |
| 14288 | if (tree->gtOper != GT_ASG) |
| 14289 | { |
| 14290 | return false; |
| 14291 | } |
| 14292 | |
| 14293 | if (tree->OperIsBlkOp()) |
| 14294 | { |
| 14295 | return false; |
| 14296 | } |
| 14297 | |
| 14298 | GenTree* op2 = tree->gtOp.gtOp2; |
| 14299 | if (op2->gtOper != GT_ARR_LENGTH && !op2->OperIsConst() && ((op2->OperKind() & GTK_RELOP) == 0)) |
| 14300 | { |
| 14301 | return false; |
| 14302 | } |
| 14303 | } |
| 14304 | return true; |
| 14305 | } |
| 14306 | |
| 14307 | // fgBlockIsGoodTailDuplicationCandidate: |
| 14308 | // Heuristic function that examines a block (presumably one that is a merge point) to determine |
| 14309 | // if it should be duplicated. |
| 14310 | // args: |
| 14311 | // target - the tail block (candidate for duplication) |
| 14312 | // returns: |
| 14313 | // true if this block seems like a good candidate for duplication |
| 14314 | // |
| 14315 | bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target) |
| 14316 | { |
| 14317 | GenTreeStmt* stmt = target->FirstNonPhiDef(); |
| 14318 | |
| 14319 | // Here we are looking for blocks with a single statement feeding a conditional branch. |
| 14320 | // These blocks are small, and when duplicated onto the tail of blocks that end in |
| 14321 | // assignments, there is a high probability of the branch completely going away. |
| 14322 | |
| 14323 | // This is by no means the only kind of tail that it is beneficial to duplicate, |
| 14324 | // just the only one we recognize for now. |
| 14325 | |
| 14326 | if (stmt != target->lastStmt()) |
| 14327 | { |
| 14328 | return false; |
| 14329 | } |
| 14330 | |
| 14331 | if (target->bbJumpKind != BBJ_COND) |
| 14332 | { |
| 14333 | return false; |
| 14334 | } |
| 14335 | |
| 14336 | GenTree* tree = stmt->gtStmtExpr; |
| 14337 | |
| 14338 | if (tree->gtOper != GT_JTRUE) |
| 14339 | { |
| 14340 | return false; |
| 14341 | } |
| 14342 | |
| 14343 | // must be some kind of relational operator |
| 14344 | GenTree* cond = tree->gtOp.gtOp1; |
| 14345 | if (!(cond->OperKind() & GTK_RELOP)) |
| 14346 | { |
| 14347 | return false; |
| 14348 | } |
| 14349 | |
| 14350 | // op1 must be some combinations of casts of local or constant |
| 14351 | GenTree* op1 = cond->gtOp.gtOp1; |
| 14352 | while (op1->gtOper == GT_CAST) |
| 14353 | { |
| 14354 | op1 = op1->gtOp.gtOp1; |
| 14355 | } |
| 14356 | if (!op1->IsLocal() && !op1->OperIsConst()) |
| 14357 | { |
| 14358 | return false; |
| 14359 | } |
| 14360 | |
| 14361 | // op2 must be some combinations of casts of local or constant |
| 14362 | GenTree* op2 = cond->gtOp.gtOp2; |
| 14363 | while (op2->gtOper == GT_CAST) |
| 14364 | { |
| 14365 | op2 = op2->gtOp.gtOp1; |
| 14366 | } |
| 14367 | if (!op2->IsLocal() && !op2->OperIsConst()) |
| 14368 | { |
| 14369 | return false; |
| 14370 | } |
| 14371 | |
| 14372 | return true; |
| 14373 | } |
| 14374 | |
| 14375 | // fgOptimizeUncondBranchToSimpleCond: |
| 14376 | // For a block which has an unconditional branch, look to see if its target block |
| 14377 | // is a good candidate for tail duplication, and if so do that duplication. |
| 14378 | // |
| 14379 | // Args: |
| 14380 | // block - block with uncond branch |
| 14381 | // target - block which is target of first block |
| 14382 | // |
| 14383 | // returns: true if changes were made |
| 14384 | |
| 14385 | bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target) |
| 14386 | { |
| 14387 | assert(block->bbJumpKind == BBJ_ALWAYS); |
| 14388 | assert(block->bbJumpDest == target); |
| 14389 | |
| 14390 | // TODO-Review: OK if they are in the same region? |
| 14391 | if (compHndBBtabCount > 0) |
| 14392 | { |
| 14393 | return false; |
| 14394 | } |
| 14395 | |
| 14396 | if (!fgBlockIsGoodTailDuplicationCandidate(target)) |
| 14397 | { |
| 14398 | return false; |
| 14399 | } |
| 14400 | |
| 14401 | if (!fgBlockEndFavorsTailDuplication(block)) |
| 14402 | { |
| 14403 | return false; |
| 14404 | } |
| 14405 | |
| 14406 | // NOTE: we do not currently hit this assert because this function is only called when |
| 14407 | // `fgUpdateFlowGraph` has been called with `doTailDuplication` set to true, and the |
| 14408 | // backend always calls `fgUpdateFlowGraph` with `doTailDuplication` set to false. |
| 14409 | assert(!block->IsLIR()); |
| 14410 | |
| 14411 | GenTreeStmt* stmt = target->FirstNonPhiDef(); |
| 14412 | assert(stmt == target->lastStmt()); |
| 14413 | |
| 14414 | // Duplicate the target block at the end of this block |
| 14415 | |
| 14416 | GenTree* cloned = gtCloneExpr(stmt->gtStmtExpr); |
| 14417 | noway_assert(cloned); |
| 14418 | GenTree* jmpStmt = gtNewStmt(cloned); |
| 14419 | |
| 14420 | block->bbJumpKind = BBJ_COND; |
| 14421 | block->bbJumpDest = target->bbJumpDest; |
| 14422 | fgAddRefPred(block->bbJumpDest, block); |
| 14423 | fgRemoveRefPred(target, block); |
| 14424 | |
| 14425 | // add an unconditional block after this block to jump to the target block's fallthrough block |
| 14426 | |
| 14427 | BasicBlock* next = fgNewBBafter(BBJ_ALWAYS, block, true); |
| 14428 | |
| 14429 | // The new block 'next' will inherit its weight from 'block' |
| 14430 | next->inheritWeight(block); |
| 14431 | next->bbJumpDest = target->bbNext; |
| 14432 | target->bbNext->bbFlags |= BBF_JMP_TARGET; |
| 14433 | fgAddRefPred(next, block); |
| 14434 | fgAddRefPred(next->bbJumpDest, next); |
| 14435 | |
| 14436 | #ifdef DEBUG |
| 14437 | if (verbose) |
| 14438 | { |
| 14439 | printf("fgOptimizeUncondBranchToSimpleCond(from " FMT_BB " to cond " FMT_BB "), created new uncond " FMT_BB |
| 14440 | "\n" , |
| 14441 | block->bbNum, target->bbNum, next->bbNum); |
| 14442 | } |
| 14443 | #endif // DEBUG |
| 14444 | |
| 14445 | if (fgStmtListThreaded) |
| 14446 | { |
| 14447 | gtSetStmtInfo(jmpStmt); |
| 14448 | } |
| 14449 | |
| 14450 | fgInsertStmtAtEnd(block, jmpStmt); |
| 14451 | |
| 14452 | return true; |
| 14453 | } |
| 14454 | |
| 14455 | // fgOptimizeBranchToNext: |
| 14456 | // Optimize a block which has a branch to the following block |
| 14457 | // Args: |
| 14458 | // block - block with a branch |
| 14459 | // bNext - block which is both next and the target of the first block |
| 14460 | // bPrev - block which is prior to the first block |
| 14461 | // |
| 14462 | // returns: true if changes were made |
| 14463 | // |
| 14464 | bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev) |
| 14465 | { |
| 14466 | assert(block->bbJumpKind == BBJ_COND || block->bbJumpKind == BBJ_ALWAYS); |
| 14467 | assert(block->bbJumpDest == bNext); |
| 14468 | assert(block->bbNext == bNext); |
| 14469 | assert(block->bbPrev == bPrev); |
| 14470 | |
| 14471 | if (block->bbJumpKind == BBJ_ALWAYS) |
| 14472 | { |
| 14473 | // We can't remove it if it is a branch from hot => cold |
| 14474 | if (!fgInDifferentRegions(block, bNext)) |
| 14475 | { |
| 14476 | // We can't remove if it is marked as BBF_KEEP_BBJ_ALWAYS |
| 14477 | if (!(block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) |
| 14478 | { |
| 14479 | // We can't remove if the BBJ_ALWAYS is part of a BBJ_CALLFINALLY pair |
| 14480 | if ((bPrev == nullptr) || !bPrev->isBBCallAlwaysPair()) |
| 14481 | { |
| 14482 | /* the unconditional jump is to the next BB */ |
| 14483 | block->bbJumpKind = BBJ_NONE; |
| 14484 | block->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 14485 | #ifdef DEBUG |
| 14486 | if (verbose) |
| 14487 | { |
| 14488 | printf("\nRemoving unconditional jump to next block (" FMT_BB " -> " FMT_BB |
| 14489 | ") (converted " FMT_BB " to " |
| 14490 | "fall-through)\n" , |
| 14491 | block->bbNum, bNext->bbNum, block->bbNum); |
| 14492 | } |
| 14493 | #endif // DEBUG |
| 14494 | return true; |
| 14495 | } |
| 14496 | } |
| 14497 | } |
| 14498 | } |
| 14499 | else |
| 14500 | { |
| 14501 | /* remove the conditional statement at the end of block */ |
| 14502 | noway_assert(block->bbJumpKind == BBJ_COND); |
| 14503 | noway_assert(block->bbTreeList); |
| 14504 | |
| 14505 | #ifdef DEBUG |
| 14506 | if (verbose) |
| 14507 | { |
| 14508 | printf("\nRemoving conditional jump to next block (" FMT_BB " -> " FMT_BB ")\n" , block->bbNum, |
| 14509 | bNext->bbNum); |
| 14510 | } |
| 14511 | #endif // DEBUG |
| 14512 | |
| 14513 | if (block->IsLIR()) |
| 14514 | { |
| 14515 | LIR::Range& blockRange = LIR::AsRange(block); |
| 14516 | GenTree* jmp = blockRange.LastNode(); |
| 14517 | assert(jmp->OperIsConditionalJump()); |
| 14518 | if (jmp->OperGet() == GT_JTRUE) |
| 14519 | { |
| 14520 | jmp->gtOp.gtOp1->gtFlags &= ~GTF_SET_FLAGS; |
| 14521 | } |
| 14522 | |
| 14523 | bool isClosed; |
| 14524 | unsigned sideEffects; |
| 14525 | LIR::ReadOnlyRange jmpRange = blockRange.GetTreeRange(jmp, &isClosed, &sideEffects); |
| 14526 | |
| 14527 | // TODO-LIR: this should really be checking GTF_ALL_EFFECT, but that produces unacceptable |
| 14528 | // diffs compared to the existing backend. |
| 14529 | if (isClosed && ((sideEffects & GTF_SIDE_EFFECT) == 0)) |
| 14530 | { |
| 14531 | // If the jump and its operands form a contiguous, side-effect-free range, |
| 14532 | // remove them. |
| 14533 | blockRange.Delete(this, block, std::move(jmpRange)); |
| 14534 | } |
| 14535 | else |
| 14536 | { |
| 14537 | // Otherwise, just remove the jump node itself. |
| 14538 | blockRange.Remove(jmp, true); |
| 14539 | } |
| 14540 | } |
| 14541 | else |
| 14542 | { |
| 14543 | GenTreeStmt* cond = block->lastStmt(); |
| 14544 | noway_assert(cond->gtStmtExpr->gtOper == GT_JTRUE); |
| 14545 | |
| 14546 | /* check for SIDE_EFFECTS */ |
| 14547 | if (cond->gtStmtExpr->gtFlags & GTF_SIDE_EFFECT) |
| 14548 | { |
| 14549 | /* Extract the side effects from the conditional */ |
| 14550 | GenTree* sideEffList = nullptr; |
| 14551 | |
| 14552 | gtExtractSideEffList(cond->gtStmtExpr, &sideEffList); |
| 14553 | |
| 14554 | if (sideEffList == nullptr) |
| 14555 | { |
| 14556 | compCurBB = block; |
| 14557 | fgRemoveStmt(block, cond); |
| 14558 | } |
| 14559 | else |
| 14560 | { |
| 14561 | noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); |
| 14562 | #ifdef DEBUG |
| 14563 | if (verbose) |
| 14564 | { |
| 14565 | printf("\nConditional has side effects! Extracting side effects...\n" ); |
| 14566 | gtDispTree(cond); |
| 14567 | printf("\n" ); |
| 14568 | gtDispTree(sideEffList); |
| 14569 | printf("\n" ); |
| 14570 | } |
| 14571 | #endif // DEBUG |
| 14572 | |
| 14573 | /* Replace the conditional statement with the list of side effects */ |
| 14574 | noway_assert(sideEffList->gtOper != GT_STMT); |
| 14575 | noway_assert(sideEffList->gtOper != GT_JTRUE); |
| 14576 | |
| 14577 | cond->gtStmtExpr = sideEffList; |
| 14578 | |
| 14579 | if (fgStmtListThreaded) |
| 14580 | { |
| 14581 | compCurBB = block; |
| 14582 | |
| 14583 | /* Update ordering, costs, FP levels, etc. */ |
| 14584 | gtSetStmtInfo(cond); |
| 14585 | |
| 14586 | /* Re-link the nodes for this statement */ |
| 14587 | fgSetStmtSeq(cond); |
| 14588 | } |
| 14589 | } |
| 14590 | } |
| 14591 | else |
| 14592 | { |
| 14593 | compCurBB = block; |
| 14594 | /* conditional has NO side effect - remove it */ |
| 14595 | fgRemoveStmt(block, cond); |
| 14596 | } |
| 14597 | } |
| 14598 | |
| 14599 | /* Conditional is gone - simply fall into the next block */ |
| 14600 | |
| 14601 | block->bbJumpKind = BBJ_NONE; |
| 14602 | block->bbFlags &= ~BBF_NEEDS_GCPOLL; |
| 14603 | |
| 14604 | /* Update bbRefs and bbNum - Conditional predecessors to the same |
| 14605 | * block are counted twice so we have to remove one of them */ |
| 14606 | |
| 14607 | noway_assert(bNext->countOfInEdges() > 1); |
| 14608 | fgRemoveRefPred(bNext, block); |
| 14609 | |
| 14610 | return true; |
| 14611 | } |
| 14612 | return false; |
| 14613 | } |
| 14614 | |
| 14615 | /***************************************************************************** |
| 14616 | * |
| 14617 | * Function called to optimize an unconditional branch that branches |
| 14618 | * to a conditional branch. |
| 14619 | * Currently we require that the conditional branch jump back to the |
| 14620 | * block that follows the unconditional branch. |
| 14621 | * |
| 14622 | * We can improve the code execution and layout by concatenating a copy |
| 14623 | * of the conditional branch block at the end of the conditional branch |
| 14624 | * and reversing the sense of the branch. |
| 14625 | * |
| 14626 | * This is only done when the amount of code to be copied is smaller than |
| 14627 | * our calculated threshold in maxDupCostSz. |
| 14628 | * |
| 14629 | */ |
| 14630 | |
| 14631 | bool Compiler::fgOptimizeBranch(BasicBlock* bJump) |
| 14632 | { |
| 14633 | if (opts.MinOpts()) |
| 14634 | { |
| 14635 | return false; |
| 14636 | } |
| 14637 | |
| 14638 | if (bJump->bbJumpKind != BBJ_ALWAYS) |
| 14639 | { |
| 14640 | return false; |
| 14641 | } |
| 14642 | |
| 14643 | if (bJump->bbFlags & BBF_KEEP_BBJ_ALWAYS) |
| 14644 | { |
| 14645 | return false; |
| 14646 | } |
| 14647 | |
| 14648 | // Don't hoist a conditional branch into the scratch block; we'd prefer it stay |
| 14649 | // either BBJ_NONE or BBJ_ALWAYS. |
| 14650 | if (fgBBisScratch(bJump)) |
| 14651 | { |
| 14652 | return false; |
| 14653 | } |
| 14654 | |
| 14655 | BasicBlock* bDest = bJump->bbJumpDest; |
| 14656 | |
| 14657 | if (bDest->bbJumpKind != BBJ_COND) |
| 14658 | { |
| 14659 | return false; |
| 14660 | } |
| 14661 | |
| 14662 | if (bDest->bbJumpDest != bJump->bbNext) |
| 14663 | { |
| 14664 | return false; |
| 14665 | } |
| 14666 | |
| 14667 | // 'bJump' must be in the same try region as the condition, since we're going to insert |
| 14668 | // a duplicated condition in 'bJump', and the condition might include exception throwing code. |
| 14669 | if (!BasicBlock::sameTryRegion(bJump, bDest)) |
| 14670 | { |
| 14671 | return false; |
| 14672 | } |
| 14673 | |
| 14674 | // do not jump into another try region |
| 14675 | BasicBlock* bDestNext = bDest->bbNext; |
| 14676 | if (bDestNext->hasTryIndex() && !BasicBlock::sameTryRegion(bJump, bDestNext)) |
| 14677 | { |
| 14678 | return false; |
| 14679 | } |
| 14680 | |
| 14681 | // This function is only called by fgReorderBlocks, which we do not run in the backend. |
| 14682 | // If we wanted to run block reordering in the backend, we would need to be able to |
| 14683 | // calculate cost information for LIR on a per-node basis in order for this function |
| 14684 | // to work. |
| 14685 | assert(!bJump->IsLIR()); |
| 14686 | assert(!bDest->IsLIR()); |
| 14687 | |
| 14688 | GenTreeStmt* stmt; |
| 14689 | unsigned estDupCostSz = 0; |
| 14690 | for (stmt = bDest->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 14691 | { |
| 14692 | GenTree* expr = stmt->gtStmtExpr; |
| 14693 | |
| 14694 | /* We call gtPrepareCost to measure the cost of duplicating this tree */ |
| 14695 | gtPrepareCost(expr); |
| 14696 | |
| 14697 | estDupCostSz += expr->gtCostSz; |
| 14698 | } |
| 14699 | |
| 14700 | bool allProfileWeightsAreValid = false; |
| 14701 | BasicBlock::weight_t weightJump = bJump->bbWeight; |
| 14702 | BasicBlock::weight_t weightDest = bDest->bbWeight; |
| 14703 | BasicBlock::weight_t weightNext = bJump->bbNext->bbWeight; |
| 14704 | bool rareJump = bJump->isRunRarely(); |
| 14705 | bool rareDest = bDest->isRunRarely(); |
| 14706 | bool rareNext = bJump->bbNext->isRunRarely(); |
| 14707 | |
| 14708 | // If we have profile data then we calculate the number of time |
| 14709 | // the loop will iterate into loopIterations |
| 14710 | if (fgIsUsingProfileWeights()) |
| 14711 | { |
| 14712 | // Only rely upon the profile weight when all three of these blocks |
| 14713 | // have either good profile weights or are rarelyRun |
| 14714 | // |
| 14715 | if ((bJump->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && |
| 14716 | (bDest->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && |
| 14717 | (bJump->bbNext->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY))) |
| 14718 | { |
| 14719 | allProfileWeightsAreValid = true; |
| 14720 | |
| 14721 | if ((weightJump * 100) < weightDest) |
| 14722 | { |
| 14723 | rareJump = true; |
| 14724 | } |
| 14725 | |
| 14726 | if ((weightNext * 100) < weightDest) |
| 14727 | { |
| 14728 | rareNext = true; |
| 14729 | } |
| 14730 | |
| 14731 | if (((weightDest * 100) < weightJump) && ((weightDest * 100) < weightNext)) |
| 14732 | { |
| 14733 | rareDest = true; |
| 14734 | } |
| 14735 | } |
| 14736 | } |
| 14737 | |
| 14738 | unsigned maxDupCostSz = 6; |
| 14739 | |
| 14740 | // |
| 14741 | // Branches between the hot and rarely run regions |
| 14742 | // should be minimized. So we allow a larger size |
| 14743 | // |
| 14744 | if (rareDest != rareJump) |
| 14745 | { |
| 14746 | maxDupCostSz += 6; |
| 14747 | } |
| 14748 | |
| 14749 | if (rareDest != rareNext) |
| 14750 | { |
| 14751 | maxDupCostSz += 6; |
| 14752 | } |
| 14753 | |
| 14754 | // |
| 14755 | // We we are ngen-ing: |
| 14756 | // If the uncondional branch is a rarely run block then |
| 14757 | // we are willing to have more code expansion since we |
| 14758 | // won't be running code from this page |
| 14759 | // |
| 14760 | if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 14761 | { |
| 14762 | if (rareJump) |
| 14763 | { |
| 14764 | maxDupCostSz *= 2; |
| 14765 | } |
| 14766 | } |
| 14767 | |
| 14768 | // If the compare has too high cost then we don't want to dup |
| 14769 | |
| 14770 | bool costIsTooHigh = (estDupCostSz > maxDupCostSz); |
| 14771 | |
| 14772 | #ifdef DEBUG |
| 14773 | if (verbose) |
| 14774 | { |
| 14775 | printf("\nDuplication of the conditional block " FMT_BB " (always branch from " FMT_BB |
| 14776 | ") %s, because the cost of " |
| 14777 | "duplication (%i) is %s than %i," |
| 14778 | " validProfileWeights = %s\n" , |
| 14779 | bDest->bbNum, bJump->bbNum, costIsTooHigh ? "not done" : "performed" , estDupCostSz, |
| 14780 | costIsTooHigh ? "greater" : "less or equal" , maxDupCostSz, allProfileWeightsAreValid ? "true" : "false" ); |
| 14781 | } |
| 14782 | #endif // DEBUG |
| 14783 | |
| 14784 | if (costIsTooHigh) |
| 14785 | { |
| 14786 | return false; |
| 14787 | } |
| 14788 | |
| 14789 | /* Looks good - duplicate the conditional block */ |
| 14790 | |
| 14791 | GenTree* newStmtList = nullptr; // new stmt list to be added to bJump |
| 14792 | GenTree* newStmtLast = nullptr; |
| 14793 | bool cloneExprFailed = false; |
| 14794 | |
| 14795 | /* Visit all the statements in bDest */ |
| 14796 | |
| 14797 | for (GenTree* curStmt = bDest->bbTreeList; curStmt; curStmt = curStmt->gtNext) |
| 14798 | { |
| 14799 | /* Clone/substitute the expression */ |
| 14800 | |
| 14801 | stmt = gtCloneExpr(curStmt)->AsStmt(); |
| 14802 | |
| 14803 | // cloneExpr doesn't handle everything |
| 14804 | |
| 14805 | if (stmt == nullptr) |
| 14806 | { |
| 14807 | cloneExprFailed = true; |
| 14808 | break; |
| 14809 | } |
| 14810 | |
| 14811 | /* Append the expression to our list */ |
| 14812 | |
| 14813 | if (newStmtList != nullptr) |
| 14814 | { |
| 14815 | newStmtLast->gtNext = stmt; |
| 14816 | } |
| 14817 | else |
| 14818 | { |
| 14819 | newStmtList = stmt; |
| 14820 | } |
| 14821 | |
| 14822 | stmt->gtPrev = newStmtLast; |
| 14823 | newStmtLast = stmt; |
| 14824 | } |
| 14825 | |
| 14826 | if (cloneExprFailed) |
| 14827 | { |
| 14828 | return false; |
| 14829 | } |
| 14830 | |
| 14831 | noway_assert(newStmtLast != nullptr); |
| 14832 | noway_assert(stmt != nullptr); |
| 14833 | noway_assert(stmt->gtOper == GT_STMT); |
| 14834 | |
| 14835 | if ((newStmtLast == nullptr) || (stmt == nullptr) || (stmt->gtOper != GT_STMT)) |
| 14836 | { |
| 14837 | return false; |
| 14838 | } |
| 14839 | |
| 14840 | /* Get to the condition node from the statement tree */ |
| 14841 | |
| 14842 | GenTree* condTree = stmt->gtStmtExpr; |
| 14843 | noway_assert(condTree->gtOper == GT_JTRUE); |
| 14844 | |
| 14845 | if (condTree->gtOper != GT_JTRUE) |
| 14846 | { |
| 14847 | return false; |
| 14848 | } |
| 14849 | |
| 14850 | // |
| 14851 | // Set condTree to the operand to the GT_JTRUE |
| 14852 | // |
| 14853 | condTree = condTree->gtOp.gtOp1; |
| 14854 | |
| 14855 | // |
| 14856 | // This condTree has to be a RelOp comparison |
| 14857 | // |
| 14858 | if (condTree->OperIsCompare() == false) |
| 14859 | { |
| 14860 | return false; |
| 14861 | } |
| 14862 | |
| 14863 | // |
| 14864 | // Find the last statement in the bJump block |
| 14865 | // |
| 14866 | GenTreeStmt* lastStmt = nullptr; |
| 14867 | for (stmt = bJump->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 14868 | { |
| 14869 | lastStmt = stmt; |
| 14870 | } |
| 14871 | stmt = bJump->firstStmt(); |
| 14872 | |
| 14873 | /* Join the two linked lists */ |
| 14874 | newStmtLast->gtNext = nullptr; |
| 14875 | |
| 14876 | if (lastStmt != nullptr) |
| 14877 | { |
| 14878 | stmt->gtPrev = newStmtLast; |
| 14879 | lastStmt->gtNext = newStmtList; |
| 14880 | newStmtList->gtPrev = lastStmt; |
| 14881 | } |
| 14882 | else |
| 14883 | { |
| 14884 | bJump->bbTreeList = newStmtList; |
| 14885 | newStmtList->gtPrev = newStmtLast; |
| 14886 | } |
| 14887 | |
| 14888 | // |
| 14889 | // Reverse the sense of the compare |
| 14890 | // |
| 14891 | gtReverseCond(condTree); |
| 14892 | |
| 14893 | // We need to update the following flags of the bJump block if they were set in the bDest block |
| 14894 | bJump->bbFlags |= |
| 14895 | (bDest->bbFlags & (BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY | BBF_HAS_NULLCHECK | BBF_HAS_IDX_LEN | BBF_HAS_VTABREF)); |
| 14896 | |
| 14897 | bJump->bbJumpKind = BBJ_COND; |
| 14898 | bJump->bbJumpDest = bDest->bbNext; |
| 14899 | |
| 14900 | /* Mark the jump dest block as being a jump target */ |
| 14901 | bJump->bbJumpDest->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL; |
| 14902 | |
| 14903 | /* Update bbRefs and bbPreds */ |
| 14904 | |
| 14905 | // bJump now falls through into the next block |
| 14906 | // |
| 14907 | fgAddRefPred(bJump->bbNext, bJump); |
| 14908 | |
| 14909 | // bJump no longer jumps to bDest |
| 14910 | // |
| 14911 | fgRemoveRefPred(bDest, bJump); |
| 14912 | |
| 14913 | // bJump now jumps to bDest->bbNext |
| 14914 | // |
| 14915 | fgAddRefPred(bDest->bbNext, bJump); |
| 14916 | |
| 14917 | if (weightJump > 0) |
| 14918 | { |
| 14919 | if (allProfileWeightsAreValid) |
| 14920 | { |
| 14921 | if (weightDest > weightJump) |
| 14922 | { |
| 14923 | bDest->bbWeight = (weightDest - weightJump); |
| 14924 | } |
| 14925 | else if (!bDest->isRunRarely()) |
| 14926 | { |
| 14927 | bDest->bbWeight = BB_UNITY_WEIGHT; |
| 14928 | } |
| 14929 | } |
| 14930 | else |
| 14931 | { |
| 14932 | BasicBlock::weight_t newWeightDest = 0; |
| 14933 | BasicBlock::weight_t unloopWeightDest = 0; |
| 14934 | |
| 14935 | if (weightDest > weightJump) |
| 14936 | { |
| 14937 | newWeightDest = (weightDest - weightJump); |
| 14938 | } |
| 14939 | if (weightDest >= (BB_LOOP_WEIGHT * BB_UNITY_WEIGHT) / 2) |
| 14940 | { |
| 14941 | newWeightDest = (weightDest * 2) / (BB_LOOP_WEIGHT * BB_UNITY_WEIGHT); |
| 14942 | } |
| 14943 | if ((newWeightDest > 0) || (unloopWeightDest > 0)) |
| 14944 | { |
| 14945 | bDest->bbWeight = Max(newWeightDest, unloopWeightDest); |
| 14946 | } |
| 14947 | } |
| 14948 | } |
| 14949 | |
| 14950 | #if DEBUG |
| 14951 | if (verbose) |
| 14952 | { |
| 14953 | // Dump out the newStmtList that we created |
| 14954 | printf("\nfgOptimizeBranch added these statements(s) at the end of " FMT_BB ":\n" , bJump->bbNum); |
| 14955 | for (stmt = newStmtList->AsStmt(); stmt; stmt = stmt->gtNextStmt) |
| 14956 | { |
| 14957 | gtDispTree(stmt); |
| 14958 | } |
| 14959 | printf("\nfgOptimizeBranch changed block " FMT_BB " from BBJ_ALWAYS to BBJ_COND.\n" , bJump->bbNum); |
| 14960 | |
| 14961 | printf("\nAfter this change in fgOptimizeBranch the BB graph is:" ); |
| 14962 | fgDispBasicBlocks(verboseTrees); |
| 14963 | printf("\n" ); |
| 14964 | } |
| 14965 | #endif // DEBUG |
| 14966 | |
| 14967 | return true; |
| 14968 | } |
| 14969 | |
| 14970 | /***************************************************************************** |
| 14971 | * |
| 14972 | * Function called to optimize switch statements |
| 14973 | */ |
| 14974 | |
| 14975 | bool Compiler::fgOptimizeSwitchJumps() |
| 14976 | { |
| 14977 | bool result = false; // Our return value |
| 14978 | |
| 14979 | #if 0 |
| 14980 | // TODO-CQ: Add switch jump optimizations? |
| 14981 | if (!fgHasSwitch) |
| 14982 | return false; |
| 14983 | |
| 14984 | if (!fgHaveValidEdgeWeights) |
| 14985 | return false; |
| 14986 | |
| 14987 | for (BasicBlock* bSrc = fgFirstBB; bSrc != NULL; bSrc = bSrc->bbNext) |
| 14988 | { |
| 14989 | if (bSrc->bbJumpKind == BBJ_SWITCH) |
| 14990 | { |
| 14991 | unsigned jumpCnt; jumpCnt = bSrc->bbJumpSwt->bbsCount; |
| 14992 | BasicBlock** jumpTab; jumpTab = bSrc->bbJumpSwt->bbsDstTab; |
| 14993 | |
| 14994 | do |
| 14995 | { |
| 14996 | BasicBlock* bDst = *jumpTab; |
| 14997 | flowList* edgeToDst = fgGetPredForBlock(bDst, bSrc); |
| 14998 | double outRatio = (double) edgeToDst->flEdgeWeightMin / (double) bSrc->bbWeight; |
| 14999 | |
| 15000 | if (outRatio >= 0.60) |
| 15001 | { |
| 15002 | // straighten switch here... |
| 15003 | } |
| 15004 | } |
| 15005 | while (++jumpTab, --jumpCnt); |
| 15006 | } |
| 15007 | } |
| 15008 | #endif |
| 15009 | |
| 15010 | return result; |
| 15011 | } |
| 15012 | |
| 15013 | #ifdef _PREFAST_ |
| 15014 | #pragma warning(push) |
| 15015 | #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function |
| 15016 | #endif |
| 15017 | /***************************************************************************** |
| 15018 | * |
| 15019 | * Function called to reorder the flowgraph of BasicBlocks such that any |
| 15020 | * rarely run blocks are placed at the end of the block list. |
| 15021 | * If we have profile information we also use that information to reverse |
| 15022 | * all conditional jumps that would benefit. |
| 15023 | */ |
| 15024 | |
| 15025 | void Compiler::fgReorderBlocks() |
| 15026 | { |
| 15027 | noway_assert(opts.compDbgCode == false); |
| 15028 | |
| 15029 | #if FEATURE_EH_FUNCLETS |
| 15030 | assert(fgFuncletsCreated); |
| 15031 | #endif // FEATURE_EH_FUNCLETS |
| 15032 | |
| 15033 | // We can't relocate anything if we only have one block |
| 15034 | if (fgFirstBB->bbNext == nullptr) |
| 15035 | { |
| 15036 | return; |
| 15037 | } |
| 15038 | |
| 15039 | bool newRarelyRun = false; |
| 15040 | bool movedBlocks = false; |
| 15041 | bool optimizedSwitches = false; |
| 15042 | |
| 15043 | // First let us expand the set of run rarely blocks |
| 15044 | newRarelyRun |= fgExpandRarelyRunBlocks(); |
| 15045 | |
| 15046 | #if !FEATURE_EH_FUNCLETS |
| 15047 | movedBlocks |= fgRelocateEHRegions(); |
| 15048 | #endif // !FEATURE_EH_FUNCLETS |
| 15049 | |
| 15050 | // |
| 15051 | // If we are using profile weights we can change some |
| 15052 | // switch jumps into conditional test and jump |
| 15053 | // |
| 15054 | if (fgIsUsingProfileWeights()) |
| 15055 | { |
| 15056 | // |
| 15057 | // Note that this is currently not yet implemented |
| 15058 | // |
| 15059 | optimizedSwitches = fgOptimizeSwitchJumps(); |
| 15060 | if (optimizedSwitches) |
| 15061 | { |
| 15062 | fgUpdateFlowGraph(); |
| 15063 | } |
| 15064 | } |
| 15065 | |
| 15066 | #ifdef DEBUG |
| 15067 | if (verbose) |
| 15068 | { |
| 15069 | printf("*************** In fgReorderBlocks()\n" ); |
| 15070 | |
| 15071 | printf("\nInitial BasicBlocks" ); |
| 15072 | fgDispBasicBlocks(verboseTrees); |
| 15073 | printf("\n" ); |
| 15074 | } |
| 15075 | #endif // DEBUG |
| 15076 | |
| 15077 | BasicBlock* bNext; |
| 15078 | BasicBlock* bPrev; |
| 15079 | BasicBlock* block; |
| 15080 | unsigned XTnum; |
| 15081 | EHblkDsc* HBtab; |
| 15082 | |
| 15083 | // Iterate over every block, remembering our previous block in bPrev |
| 15084 | for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) |
| 15085 | { |
| 15086 | // |
| 15087 | // Consider relocating the rarely run blocks such that they are at the end of the method. |
| 15088 | // We also consider reversing conditional branches so that they become a not taken forwards branch. |
| 15089 | // |
| 15090 | |
| 15091 | // If block is marked with a BBF_KEEP_BBJ_ALWAYS flag then we don't move the block |
| 15092 | if ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0) |
| 15093 | { |
| 15094 | continue; |
| 15095 | } |
| 15096 | |
| 15097 | // Finally and handlers blocks are to be kept contiguous. |
| 15098 | // TODO-CQ: Allow reordering within the handler region |
| 15099 | if (block->hasHndIndex() == true) |
| 15100 | { |
| 15101 | continue; |
| 15102 | } |
| 15103 | |
| 15104 | bool reorderBlock = true; // This is set to false if we decide not to reorder 'block' |
| 15105 | bool isRare = block->isRunRarely(); |
| 15106 | BasicBlock* bDest = nullptr; |
| 15107 | bool forwardBranch = false; |
| 15108 | bool backwardBranch = false; |
| 15109 | |
| 15110 | // Setup bDest |
| 15111 | if ((bPrev->bbJumpKind == BBJ_COND) || (bPrev->bbJumpKind == BBJ_ALWAYS)) |
| 15112 | { |
| 15113 | bDest = bPrev->bbJumpDest; |
| 15114 | forwardBranch = fgIsForwardBranch(bPrev); |
| 15115 | backwardBranch = !forwardBranch; |
| 15116 | } |
| 15117 | |
| 15118 | // We will look for bPrev as a non rarely run block followed by block as a rarely run block |
| 15119 | // |
| 15120 | if (bPrev->isRunRarely()) |
| 15121 | { |
| 15122 | reorderBlock = false; |
| 15123 | } |
| 15124 | |
| 15125 | // If the weights of the bPrev, block and bDest were all obtained from a profile run |
| 15126 | // then we can use them to decide if it is useful to reverse this conditional branch |
| 15127 | |
| 15128 | BasicBlock::weight_t profHotWeight = -1; |
| 15129 | |
| 15130 | if (bPrev->hasProfileWeight() && block->hasProfileWeight() && ((bDest == nullptr) || bDest->hasProfileWeight())) |
| 15131 | { |
| 15132 | // |
| 15133 | // All blocks have profile information |
| 15134 | // |
| 15135 | if (forwardBranch) |
| 15136 | { |
| 15137 | if (bPrev->bbJumpKind == BBJ_ALWAYS) |
| 15138 | { |
| 15139 | // We can pull up the blocks that the unconditional jump branches to |
| 15140 | // if the weight of bDest is greater or equal to the weight of block |
| 15141 | // also the weight of bDest can't be zero. |
| 15142 | // |
| 15143 | if ((bDest->bbWeight < block->bbWeight) || (bDest->bbWeight == 0)) |
| 15144 | { |
| 15145 | reorderBlock = false; |
| 15146 | } |
| 15147 | else |
| 15148 | { |
| 15149 | // |
| 15150 | // If this remains true then we will try to pull up bDest to succeed bPrev |
| 15151 | // |
| 15152 | bool moveDestUp = true; |
| 15153 | |
| 15154 | if (fgHaveValidEdgeWeights) |
| 15155 | { |
| 15156 | // |
| 15157 | // The edge bPrev -> bDest must have a higher minimum weight |
| 15158 | // than every other edge into bDest |
| 15159 | // |
| 15160 | flowList* edgeFromPrev = fgGetPredForBlock(bDest, bPrev); |
| 15161 | noway_assert(edgeFromPrev != nullptr); |
| 15162 | |
| 15163 | // Examine all of the other edges into bDest |
| 15164 | for (flowList* edge = bDest->bbPreds; edge != nullptr; edge = edge->flNext) |
| 15165 | { |
| 15166 | if (edge != edgeFromPrev) |
| 15167 | { |
| 15168 | if (edge->flEdgeWeightMax >= edgeFromPrev->flEdgeWeightMin) |
| 15169 | { |
| 15170 | moveDestUp = false; |
| 15171 | break; |
| 15172 | } |
| 15173 | } |
| 15174 | } |
| 15175 | } |
| 15176 | else |
| 15177 | { |
| 15178 | // |
| 15179 | // The block bPrev must have a higher weight |
| 15180 | // than every other block that goes into bDest |
| 15181 | // |
| 15182 | |
| 15183 | // Examine all of the other edges into bDest |
| 15184 | for (flowList* edge = bDest->bbPreds; edge != nullptr; edge = edge->flNext) |
| 15185 | { |
| 15186 | BasicBlock* bTemp = edge->flBlock; |
| 15187 | |
| 15188 | if ((bTemp != bPrev) && (bTemp->bbWeight >= bPrev->bbWeight)) |
| 15189 | { |
| 15190 | moveDestUp = false; |
| 15191 | break; |
| 15192 | } |
| 15193 | } |
| 15194 | } |
| 15195 | |
| 15196 | // Are we still good to move bDest up to bPrev? |
| 15197 | if (moveDestUp) |
| 15198 | { |
| 15199 | // |
| 15200 | // We will consider all blocks that have less weight than profHotWeight to be |
| 15201 | // uncommonly run blocks as compared with the hot path of bPrev taken-jump to bDest |
| 15202 | // |
| 15203 | profHotWeight = bDest->bbWeight - 1; |
| 15204 | } |
| 15205 | else |
| 15206 | { |
| 15207 | if (block->isRunRarely()) |
| 15208 | { |
| 15209 | // We will move any rarely run blocks blocks |
| 15210 | profHotWeight = 0; |
| 15211 | } |
| 15212 | else |
| 15213 | { |
| 15214 | // We will move all blocks that have a weight less or equal to our fall through block |
| 15215 | profHotWeight = block->bbWeight + 1; |
| 15216 | } |
| 15217 | // But we won't try to connect with bDest |
| 15218 | bDest = nullptr; |
| 15219 | } |
| 15220 | } |
| 15221 | } |
| 15222 | else // (bPrev->bbJumpKind == BBJ_COND) |
| 15223 | { |
| 15224 | noway_assert(bPrev->bbJumpKind == BBJ_COND); |
| 15225 | // |
| 15226 | // We will reverse branch if the taken-jump to bDest ratio (i.e. 'takenRatio') |
| 15227 | // is more than 51% |
| 15228 | // |
| 15229 | // We will setup profHotWeight to be maximum bbWeight that a block |
| 15230 | // could have for us not to want to reverse the conditional branch |
| 15231 | // |
| 15232 | // We will consider all blocks that have less weight than profHotWeight to be |
| 15233 | // uncommonly run blocks as compared with the hot path of bPrev taken-jump to bDest |
| 15234 | // |
| 15235 | if (fgHaveValidEdgeWeights) |
| 15236 | { |
| 15237 | // We have valid edge weights, however even with valid edge weights |
| 15238 | // we may have a minimum and maximum range for each edges value |
| 15239 | // |
| 15240 | // We will check that the min weight of the bPrev to bDest edge |
| 15241 | // is more than twice the max weight of the bPrev to block edge. |
| 15242 | // |
| 15243 | // bPrev --> [BB04, weight 31] |
| 15244 | // | \ |
| 15245 | // edgeToBlock -------------> O \ |
| 15246 | // [min=8,max=10] V \ |
| 15247 | // block --> [BB05, weight 10] \ |
| 15248 | // \ |
| 15249 | // edgeToDest ----------------------------> O |
| 15250 | // [min=21,max=23] | |
| 15251 | // V |
| 15252 | // bDest ---------------> [BB08, weight 21] |
| 15253 | // |
| 15254 | flowList* edgeToDest = fgGetPredForBlock(bDest, bPrev); |
| 15255 | flowList* edgeToBlock = fgGetPredForBlock(block, bPrev); |
| 15256 | noway_assert(edgeToDest != nullptr); |
| 15257 | noway_assert(edgeToBlock != nullptr); |
| 15258 | // |
| 15259 | // Calculate the taken ratio |
| 15260 | // A takenRation of 0.10 means taken 10% of the time, not taken 90% of the time |
| 15261 | // A takenRation of 0.50 means taken 50% of the time, not taken 50% of the time |
| 15262 | // A takenRation of 0.90 means taken 90% of the time, not taken 10% of the time |
| 15263 | // |
| 15264 | double takenCount = |
| 15265 | ((double)edgeToDest->flEdgeWeightMin + (double)edgeToDest->flEdgeWeightMax) / 2.0; |
| 15266 | double notTakenCount = |
| 15267 | ((double)edgeToBlock->flEdgeWeightMin + (double)edgeToBlock->flEdgeWeightMax) / 2.0; |
| 15268 | double totalCount = takenCount + notTakenCount; |
| 15269 | double takenRatio = takenCount / totalCount; |
| 15270 | |
| 15271 | // If the takenRatio is greater or equal to 51% then we will reverse the branch |
| 15272 | if (takenRatio < 0.51) |
| 15273 | { |
| 15274 | reorderBlock = false; |
| 15275 | } |
| 15276 | else |
| 15277 | { |
| 15278 | // set profHotWeight |
| 15279 | profHotWeight = (edgeToBlock->flEdgeWeightMin + edgeToBlock->flEdgeWeightMax) / 2 - 1; |
| 15280 | } |
| 15281 | } |
| 15282 | else |
| 15283 | { |
| 15284 | // We don't have valid edge weight so we will be more conservative |
| 15285 | // We could have bPrev, block or bDest as part of a loop and thus have extra weight |
| 15286 | // |
| 15287 | // We will do two checks: |
| 15288 | // 1. Check that the weight of bDest is at least two times more than block |
| 15289 | // 2. Check that the weight of bPrev is at least three times more than block |
| 15290 | // |
| 15291 | // bPrev --> [BB04, weight 31] |
| 15292 | // | \ |
| 15293 | // V \ |
| 15294 | // block --> [BB05, weight 10] \ |
| 15295 | // \ |
| 15296 | // | |
| 15297 | // V |
| 15298 | // bDest ---------------> [BB08, weight 21] |
| 15299 | // |
| 15300 | // For this case weightDest is calculated as (21+1)/2 or 11 |
| 15301 | // and weightPrev is calculated as (31+2)/3 also 11 |
| 15302 | // |
| 15303 | // Generally both weightDest and weightPrev should calculate |
| 15304 | // the same value unless bPrev or bDest are part of a loop |
| 15305 | // |
| 15306 | BasicBlock::weight_t weightDest = |
| 15307 | bDest->isMaxBBWeight() ? bDest->bbWeight : (bDest->bbWeight + 1) / 2; |
| 15308 | BasicBlock::weight_t weightPrev = |
| 15309 | bPrev->isMaxBBWeight() ? bPrev->bbWeight : (bPrev->bbWeight + 2) / 3; |
| 15310 | |
| 15311 | // select the lower of weightDest and weightPrev |
| 15312 | profHotWeight = (weightDest < weightPrev) ? weightDest : weightPrev; |
| 15313 | |
| 15314 | // if the weight of block is greater (or equal) to profHotWeight then we don't reverse the cond |
| 15315 | if (block->bbWeight >= profHotWeight) |
| 15316 | { |
| 15317 | reorderBlock = false; |
| 15318 | } |
| 15319 | } |
| 15320 | } |
| 15321 | } |
| 15322 | else // not a forwardBranch |
| 15323 | { |
| 15324 | if (bPrev->bbFallsThrough()) |
| 15325 | { |
| 15326 | goto CHECK_FOR_RARE; |
| 15327 | } |
| 15328 | |
| 15329 | // Here we should pull up the highest weight block remaining |
| 15330 | // and place it here since bPrev does not fall through. |
| 15331 | |
| 15332 | BasicBlock::weight_t highestWeight = 0; |
| 15333 | BasicBlock* candidateBlock = nullptr; |
| 15334 | BasicBlock* lastNonFallThroughBlock = bPrev; |
| 15335 | BasicBlock* bTmp = bPrev->bbNext; |
| 15336 | |
| 15337 | while (bTmp != nullptr) |
| 15338 | { |
| 15339 | // Don't try to split a Call/Always pair |
| 15340 | // |
| 15341 | if (bTmp->isBBCallAlwaysPair()) |
| 15342 | { |
| 15343 | // Move bTmp forward |
| 15344 | bTmp = bTmp->bbNext; |
| 15345 | } |
| 15346 | |
| 15347 | // |
| 15348 | // Check for loop exit condition |
| 15349 | // |
| 15350 | if (bTmp == nullptr) |
| 15351 | { |
| 15352 | break; |
| 15353 | } |
| 15354 | |
| 15355 | // |
| 15356 | // if its weight is the highest one we've seen and |
| 15357 | // the EH regions allow for us to place bTmp after bPrev |
| 15358 | // |
| 15359 | if ((bTmp->bbWeight > highestWeight) && fgEhAllowsMoveBlock(bPrev, bTmp)) |
| 15360 | { |
| 15361 | // When we have a current candidateBlock that is a conditional (or unconditional) jump |
| 15362 | // to bTmp (which is a higher weighted block) then it is better to keep out current |
| 15363 | // candidateBlock and have it fall into bTmp |
| 15364 | // |
| 15365 | if ((candidateBlock == nullptr) || |
| 15366 | ((candidateBlock->bbJumpKind != BBJ_COND) && (candidateBlock->bbJumpKind != BBJ_ALWAYS)) || |
| 15367 | (candidateBlock->bbJumpDest != bTmp)) |
| 15368 | { |
| 15369 | // otherwise we have a new candidateBlock |
| 15370 | // |
| 15371 | highestWeight = bTmp->bbWeight; |
| 15372 | candidateBlock = lastNonFallThroughBlock->bbNext; |
| 15373 | } |
| 15374 | } |
| 15375 | |
| 15376 | if ((bTmp->bbFallsThrough() == false) || (bTmp->bbWeight == 0)) |
| 15377 | { |
| 15378 | lastNonFallThroughBlock = bTmp; |
| 15379 | } |
| 15380 | |
| 15381 | bTmp = bTmp->bbNext; |
| 15382 | } |
| 15383 | |
| 15384 | // If we didn't find a suitable block then skip this |
| 15385 | if (highestWeight == 0) |
| 15386 | { |
| 15387 | reorderBlock = false; |
| 15388 | } |
| 15389 | else |
| 15390 | { |
| 15391 | noway_assert(candidateBlock != nullptr); |
| 15392 | |
| 15393 | // If the candidateBlock is the same a block then skip this |
| 15394 | if (candidateBlock == block) |
| 15395 | { |
| 15396 | reorderBlock = false; |
| 15397 | } |
| 15398 | else |
| 15399 | { |
| 15400 | // Set bDest to the block that we want to come after bPrev |
| 15401 | bDest = candidateBlock; |
| 15402 | |
| 15403 | // set profHotWeight |
| 15404 | profHotWeight = highestWeight - 1; |
| 15405 | } |
| 15406 | } |
| 15407 | } |
| 15408 | } |
| 15409 | else // we don't have good profile info (or we are falling through) |
| 15410 | { |
| 15411 | |
| 15412 | CHECK_FOR_RARE:; |
| 15413 | |
| 15414 | /* We only want to reorder when we have a rarely run */ |
| 15415 | /* block right after a normal block, */ |
| 15416 | /* (bPrev is known to be a normal block at this point) */ |
| 15417 | if (!isRare) |
| 15418 | { |
| 15419 | if ((bDest == block->bbNext) && (block->bbJumpKind == BBJ_RETURN) && (bPrev->bbJumpKind == BBJ_ALWAYS)) |
| 15420 | { |
| 15421 | // This is a common case with expressions like "return Expr1 && Expr2" -- move the return |
| 15422 | // to establish fall-through. |
| 15423 | } |
| 15424 | else |
| 15425 | { |
| 15426 | reorderBlock = false; |
| 15427 | } |
| 15428 | } |
| 15429 | else |
| 15430 | { |
| 15431 | /* If the jump target bDest is also a rarely run block then we don't want to do the reversal */ |
| 15432 | if (bDest && bDest->isRunRarely()) |
| 15433 | { |
| 15434 | reorderBlock = false; /* Both block and bDest are rarely run */ |
| 15435 | } |
| 15436 | else |
| 15437 | { |
| 15438 | // We will move any rarely run blocks blocks |
| 15439 | profHotWeight = 0; |
| 15440 | } |
| 15441 | } |
| 15442 | } |
| 15443 | |
| 15444 | if (reorderBlock == false) |
| 15445 | { |
| 15446 | // |
| 15447 | // Check for an unconditional branch to a conditional branch |
| 15448 | // which also branches back to our next block |
| 15449 | // |
| 15450 | if (fgOptimizeBranch(bPrev)) |
| 15451 | { |
| 15452 | noway_assert(bPrev->bbJumpKind == BBJ_COND); |
| 15453 | } |
| 15454 | continue; |
| 15455 | } |
| 15456 | |
| 15457 | // Now we need to determine which blocks should be moved |
| 15458 | // |
| 15459 | // We consider one of two choices: |
| 15460 | // |
| 15461 | // 1. Moving the fall-through blocks (or rarely run blocks) down to |
| 15462 | // later in the method and hopefully connecting the jump dest block |
| 15463 | // so that it becomes the fall through block |
| 15464 | // |
| 15465 | // And when bDest in not NULL, we also consider: |
| 15466 | // |
| 15467 | // 2. Moving the bDest block (or blocks) up to bPrev |
| 15468 | // so that it could be used as a fall through block |
| 15469 | // |
| 15470 | // We will prefer option #1 if we are able to connect the jump dest |
| 15471 | // block as the fall though block otherwise will we try to use option #2 |
| 15472 | // |
| 15473 | |
| 15474 | // |
| 15475 | // Consider option #1: relocating blocks starting at 'block' |
| 15476 | // to later in flowgraph |
| 15477 | // |
| 15478 | // We set bStart to the first block that will be relocated |
| 15479 | // and bEnd to the last block that will be relocated |
| 15480 | |
| 15481 | BasicBlock* bStart = block; |
| 15482 | BasicBlock* bEnd = bStart; |
| 15483 | bNext = bEnd->bbNext; |
| 15484 | bool connected_bDest = false; |
| 15485 | |
| 15486 | if ((backwardBranch && !isRare) || |
| 15487 | ((block->bbFlags & BBF_DONT_REMOVE) != 0)) // Don't choose option #1 when block is the start of a try region |
| 15488 | { |
| 15489 | bStart = nullptr; |
| 15490 | bEnd = nullptr; |
| 15491 | } |
| 15492 | else |
| 15493 | { |
| 15494 | while (true) |
| 15495 | { |
| 15496 | // Don't try to split a Call/Always pair |
| 15497 | // |
| 15498 | if (bEnd->isBBCallAlwaysPair()) |
| 15499 | { |
| 15500 | // Move bEnd and bNext forward |
| 15501 | bEnd = bNext; |
| 15502 | bNext = bNext->bbNext; |
| 15503 | } |
| 15504 | |
| 15505 | // |
| 15506 | // Check for loop exit condition |
| 15507 | // |
| 15508 | if (bNext == nullptr) |
| 15509 | { |
| 15510 | break; |
| 15511 | } |
| 15512 | |
| 15513 | #if FEATURE_EH_FUNCLETS |
| 15514 | // Check if we've reached the funclets region, at the end of the function |
| 15515 | if (fgFirstFuncletBB == bEnd->bbNext) |
| 15516 | { |
| 15517 | break; |
| 15518 | } |
| 15519 | #endif // FEATURE_EH_FUNCLETS |
| 15520 | |
| 15521 | if (bNext == bDest) |
| 15522 | { |
| 15523 | connected_bDest = true; |
| 15524 | break; |
| 15525 | } |
| 15526 | |
| 15527 | // All the blocks must have the same try index |
| 15528 | // and must not have the BBF_DONT_REMOVE flag set |
| 15529 | |
| 15530 | if (!BasicBlock::sameTryRegion(bStart, bNext) || ((bNext->bbFlags & BBF_DONT_REMOVE) != 0)) |
| 15531 | { |
| 15532 | // exit the loop, bEnd is now set to the |
| 15533 | // last block that we want to relocate |
| 15534 | break; |
| 15535 | } |
| 15536 | |
| 15537 | // If we are relocating rarely run blocks.. |
| 15538 | if (isRare) |
| 15539 | { |
| 15540 | // ... then all blocks must be rarely run |
| 15541 | if (!bNext->isRunRarely()) |
| 15542 | { |
| 15543 | // exit the loop, bEnd is now set to the |
| 15544 | // last block that we want to relocate |
| 15545 | break; |
| 15546 | } |
| 15547 | } |
| 15548 | else |
| 15549 | { |
| 15550 | // If we are moving blocks that are hot then all |
| 15551 | // of the blocks moved must be less than profHotWeight */ |
| 15552 | if (bNext->bbWeight >= profHotWeight) |
| 15553 | { |
| 15554 | // exit the loop, bEnd is now set to the |
| 15555 | // last block that we would relocate |
| 15556 | break; |
| 15557 | } |
| 15558 | } |
| 15559 | |
| 15560 | // Move bEnd and bNext forward |
| 15561 | bEnd = bNext; |
| 15562 | bNext = bNext->bbNext; |
| 15563 | } |
| 15564 | |
| 15565 | // Set connected_bDest to true if moving blocks [bStart .. bEnd] |
| 15566 | // connects with the the jump dest of bPrev (i.e bDest) and |
| 15567 | // thus allows bPrev fall through instead of jump. |
| 15568 | if (bNext == bDest) |
| 15569 | { |
| 15570 | connected_bDest = true; |
| 15571 | } |
| 15572 | } |
| 15573 | |
| 15574 | // Now consider option #2: Moving the jump dest block (or blocks) |
| 15575 | // up to bPrev |
| 15576 | // |
| 15577 | // The variables bStart2, bEnd2 and bPrev2 are used for option #2 |
| 15578 | // |
| 15579 | // We will setup bStart2 to the first block that will be relocated |
| 15580 | // and bEnd2 to the last block that will be relocated |
| 15581 | // and bPrev2 to be the lexical pred of bDest |
| 15582 | // |
| 15583 | // If after this calculation bStart2 is NULL we cannot use option #2, |
| 15584 | // otherwise bStart2, bEnd2 and bPrev2 are all non-NULL and we will use option #2 |
| 15585 | |
| 15586 | BasicBlock* bStart2 = nullptr; |
| 15587 | BasicBlock* bEnd2 = nullptr; |
| 15588 | BasicBlock* bPrev2 = nullptr; |
| 15589 | |
| 15590 | // If option #1 didn't connect bDest and bDest isn't NULL |
| 15591 | if ((connected_bDest == false) && (bDest != nullptr) && |
| 15592 | // The jump target cannot be moved if it has the BBF_DONT_REMOVE flag set |
| 15593 | ((bDest->bbFlags & BBF_DONT_REMOVE) == 0)) |
| 15594 | { |
| 15595 | // We will consider option #2: relocating blocks starting at 'bDest' to succeed bPrev |
| 15596 | // |
| 15597 | // setup bPrev2 to be the lexical pred of bDest |
| 15598 | |
| 15599 | bPrev2 = block; |
| 15600 | while (bPrev2 != nullptr) |
| 15601 | { |
| 15602 | if (bPrev2->bbNext == bDest) |
| 15603 | { |
| 15604 | break; |
| 15605 | } |
| 15606 | |
| 15607 | bPrev2 = bPrev2->bbNext; |
| 15608 | } |
| 15609 | |
| 15610 | if ((bPrev2 != nullptr) && fgEhAllowsMoveBlock(bPrev, bDest)) |
| 15611 | { |
| 15612 | // We have decided that relocating bDest to be after bPrev is best |
| 15613 | // Set bStart2 to the first block that will be relocated |
| 15614 | // and bEnd2 to the last block that will be relocated |
| 15615 | // |
| 15616 | // Assigning to bStart2 selects option #2 |
| 15617 | // |
| 15618 | bStart2 = bDest; |
| 15619 | bEnd2 = bStart2; |
| 15620 | bNext = bEnd2->bbNext; |
| 15621 | |
| 15622 | while (true) |
| 15623 | { |
| 15624 | // Don't try to split a Call/Always pair |
| 15625 | // |
| 15626 | if (bEnd2->isBBCallAlwaysPair()) |
| 15627 | { |
| 15628 | noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); |
| 15629 | // Move bEnd2 and bNext forward |
| 15630 | bEnd2 = bNext; |
| 15631 | bNext = bNext->bbNext; |
| 15632 | } |
| 15633 | |
| 15634 | // Check for the Loop exit conditions |
| 15635 | |
| 15636 | if (bNext == nullptr) |
| 15637 | { |
| 15638 | break; |
| 15639 | } |
| 15640 | |
| 15641 | if (bEnd2->bbFallsThrough() == false) |
| 15642 | { |
| 15643 | break; |
| 15644 | } |
| 15645 | |
| 15646 | // If we are relocating rarely run blocks.. |
| 15647 | // All the blocks must have the same try index, |
| 15648 | // and must not have the BBF_DONT_REMOVE flag set |
| 15649 | |
| 15650 | if (!BasicBlock::sameTryRegion(bStart2, bNext) || ((bNext->bbFlags & BBF_DONT_REMOVE) != 0)) |
| 15651 | { |
| 15652 | // exit the loop, bEnd2 is now set to the |
| 15653 | // last block that we want to relocate |
| 15654 | break; |
| 15655 | } |
| 15656 | |
| 15657 | if (isRare) |
| 15658 | { |
| 15659 | /* ... then all blocks must not be rarely run */ |
| 15660 | if (bNext->isRunRarely()) |
| 15661 | { |
| 15662 | // exit the loop, bEnd2 is now set to the |
| 15663 | // last block that we want to relocate |
| 15664 | break; |
| 15665 | } |
| 15666 | } |
| 15667 | else |
| 15668 | { |
| 15669 | // If we are relocating hot blocks |
| 15670 | // all blocks moved must be greater than profHotWeight |
| 15671 | if (bNext->bbWeight <= profHotWeight) |
| 15672 | { |
| 15673 | // exit the loop, bEnd2 is now set to the |
| 15674 | // last block that we want to relocate |
| 15675 | break; |
| 15676 | } |
| 15677 | } |
| 15678 | |
| 15679 | // Move bEnd2 and bNext forward |
| 15680 | bEnd2 = bNext; |
| 15681 | bNext = bNext->bbNext; |
| 15682 | } |
| 15683 | } |
| 15684 | } |
| 15685 | |
| 15686 | // If we are using option #1 then ... |
| 15687 | if (bStart2 == nullptr) |
| 15688 | { |
| 15689 | // Don't use option #1 for a backwards branch |
| 15690 | if (bStart == nullptr) |
| 15691 | { |
| 15692 | continue; |
| 15693 | } |
| 15694 | |
| 15695 | // .... Don't move a set of blocks that are already at the end of the main method |
| 15696 | if (bEnd == fgLastBBInMainFunction()) |
| 15697 | { |
| 15698 | continue; |
| 15699 | } |
| 15700 | } |
| 15701 | |
| 15702 | #ifdef DEBUG |
| 15703 | if (verbose) |
| 15704 | { |
| 15705 | if (bDest != nullptr) |
| 15706 | { |
| 15707 | if (bPrev->bbJumpKind == BBJ_COND) |
| 15708 | { |
| 15709 | printf("Decided to reverse conditional branch at block " FMT_BB " branch to " FMT_BB " " , |
| 15710 | bPrev->bbNum, bDest->bbNum); |
| 15711 | } |
| 15712 | else if (bPrev->bbJumpKind == BBJ_ALWAYS) |
| 15713 | { |
| 15714 | printf("Decided to straighten unconditional branch at block " FMT_BB " branch to " FMT_BB " " , |
| 15715 | bPrev->bbNum, bDest->bbNum); |
| 15716 | } |
| 15717 | else |
| 15718 | { |
| 15719 | printf("Decided to place hot code after " FMT_BB ", placed " FMT_BB " after this block " , |
| 15720 | bPrev->bbNum, bDest->bbNum); |
| 15721 | } |
| 15722 | |
| 15723 | if (profHotWeight > 0) |
| 15724 | { |
| 15725 | printf("because of IBC profile data\n" ); |
| 15726 | } |
| 15727 | else |
| 15728 | { |
| 15729 | if (bPrev->bbFallsThrough()) |
| 15730 | { |
| 15731 | printf("since it falls into a rarely run block\n" ); |
| 15732 | } |
| 15733 | else |
| 15734 | { |
| 15735 | printf("since it is succeeded by a rarely run block\n" ); |
| 15736 | } |
| 15737 | } |
| 15738 | } |
| 15739 | else |
| 15740 | { |
| 15741 | printf("Decided to relocate block(s) after block " FMT_BB " since they are %s block(s)\n" , bPrev->bbNum, |
| 15742 | block->isRunRarely() ? "rarely run" : "uncommonly run" ); |
| 15743 | } |
| 15744 | } |
| 15745 | #endif // DEBUG |
| 15746 | |
| 15747 | // We will set insertAfterBlk to the block the precedes our insertion range |
| 15748 | // We will set bStartPrev to be the block that precedes the set of blocks that we are moving |
| 15749 | BasicBlock* insertAfterBlk; |
| 15750 | BasicBlock* bStartPrev; |
| 15751 | |
| 15752 | if (bStart2 != nullptr) |
| 15753 | { |
| 15754 | // Option #2: relocating blocks starting at 'bDest' to follow bPrev |
| 15755 | |
| 15756 | // Update bStart and bEnd so that we can use these two for all later operations |
| 15757 | bStart = bStart2; |
| 15758 | bEnd = bEnd2; |
| 15759 | |
| 15760 | // Set bStartPrev to be the block that comes before bStart |
| 15761 | bStartPrev = bPrev2; |
| 15762 | |
| 15763 | // We will move [bStart..bEnd] to immediately after bPrev |
| 15764 | insertAfterBlk = bPrev; |
| 15765 | } |
| 15766 | else |
| 15767 | { |
| 15768 | // option #1: Moving the fall-through blocks (or rarely run blocks) down to later in the method |
| 15769 | |
| 15770 | // Set bStartPrev to be the block that come before bStart |
| 15771 | bStartPrev = bPrev; |
| 15772 | |
| 15773 | // We will move [bStart..bEnd] but we will pick the insert location later |
| 15774 | insertAfterBlk = nullptr; |
| 15775 | } |
| 15776 | |
| 15777 | // We are going to move [bStart..bEnd] so they can't be NULL |
| 15778 | noway_assert(bStart != nullptr); |
| 15779 | noway_assert(bEnd != nullptr); |
| 15780 | |
| 15781 | // bEnd can't be a BBJ_CALLFINALLY unless it is a RETLESS call |
| 15782 | noway_assert((bEnd->bbJumpKind != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); |
| 15783 | |
| 15784 | // bStartPrev must be set to the block that precedes bStart |
| 15785 | noway_assert(bStartPrev->bbNext == bStart); |
| 15786 | |
| 15787 | // Since we will be unlinking [bStart..bEnd], |
| 15788 | // we need to compute and remember if bStart is in each of |
| 15789 | // the try and handler regions |
| 15790 | // |
| 15791 | bool* fStartIsInTry = nullptr; |
| 15792 | bool* fStartIsInHnd = nullptr; |
| 15793 | |
| 15794 | if (compHndBBtabCount > 0) |
| 15795 | { |
| 15796 | fStartIsInTry = new (this, CMK_Unknown) bool[compHndBBtabCount]; |
| 15797 | fStartIsInHnd = new (this, CMK_Unknown) bool[compHndBBtabCount]; |
| 15798 | |
| 15799 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 15800 | { |
| 15801 | fStartIsInTry[XTnum] = HBtab->InTryRegionBBRange(bStart); |
| 15802 | fStartIsInHnd[XTnum] = HBtab->InHndRegionBBRange(bStart); |
| 15803 | } |
| 15804 | } |
| 15805 | |
| 15806 | /* Temporarily unlink [bStart..bEnd] from the flow graph */ |
| 15807 | fgUnlinkRange(bStart, bEnd); |
| 15808 | |
| 15809 | if (insertAfterBlk == nullptr) |
| 15810 | { |
| 15811 | // Find new location for the unlinked block(s) |
| 15812 | // Set insertAfterBlk to the block which will precede the insertion point |
| 15813 | |
| 15814 | if (!bStart->hasTryIndex() && isRare) |
| 15815 | { |
| 15816 | // We'll just insert the blocks at the end of the method. If the method |
| 15817 | // has funclets, we will insert at the end of the main method but before |
| 15818 | // any of the funclets. Note that we create funclets before we call |
| 15819 | // fgReorderBlocks(). |
| 15820 | |
| 15821 | insertAfterBlk = fgLastBBInMainFunction(); |
| 15822 | noway_assert(insertAfterBlk != bPrev); |
| 15823 | } |
| 15824 | else |
| 15825 | { |
| 15826 | BasicBlock* startBlk; |
| 15827 | BasicBlock* lastBlk; |
| 15828 | EHblkDsc* ehDsc = ehInitTryBlockRange(bStart, &startBlk, &lastBlk); |
| 15829 | |
| 15830 | BasicBlock* endBlk; |
| 15831 | |
| 15832 | /* Setup startBlk and endBlk as the range to search */ |
| 15833 | |
| 15834 | if (ehDsc != nullptr) |
| 15835 | { |
| 15836 | endBlk = lastBlk->bbNext; |
| 15837 | |
| 15838 | /* |
| 15839 | Multiple (nested) try regions might start from the same BB. |
| 15840 | For example, |
| 15841 | |
| 15842 | try3 try2 try1 |
| 15843 | |--- |--- |--- BB01 |
| 15844 | | | | BB02 |
| 15845 | | | |--- BB03 |
| 15846 | | | BB04 |
| 15847 | | |------------ BB05 |
| 15848 | | BB06 |
| 15849 | |------------------- BB07 |
| 15850 | |
| 15851 | Now if we want to insert in try2 region, we will start with startBlk=BB01. |
| 15852 | The following loop will allow us to start from startBlk==BB04. |
| 15853 | */ |
| 15854 | while (!BasicBlock::sameTryRegion(startBlk, bStart) && (startBlk != endBlk)) |
| 15855 | { |
| 15856 | startBlk = startBlk->bbNext; |
| 15857 | } |
| 15858 | |
| 15859 | // startBlk cannot equal endBlk as it must come before endBlk |
| 15860 | if (startBlk == endBlk) |
| 15861 | { |
| 15862 | goto CANNOT_MOVE; |
| 15863 | } |
| 15864 | |
| 15865 | // we also can't start searching the try region at bStart |
| 15866 | if (startBlk == bStart) |
| 15867 | { |
| 15868 | // if bEnd is the last block in the method or |
| 15869 | // or if bEnd->bbNext is in a different try region |
| 15870 | // then we cannot move the blocks |
| 15871 | // |
| 15872 | if ((bEnd->bbNext == nullptr) || !BasicBlock::sameTryRegion(startBlk, bEnd->bbNext)) |
| 15873 | { |
| 15874 | goto CANNOT_MOVE; |
| 15875 | } |
| 15876 | |
| 15877 | startBlk = bEnd->bbNext; |
| 15878 | |
| 15879 | // Check that the new startBlk still comes before endBlk |
| 15880 | |
| 15881 | // startBlk cannot equal endBlk as it must come before endBlk |
| 15882 | if (startBlk == endBlk) |
| 15883 | { |
| 15884 | goto CANNOT_MOVE; |
| 15885 | } |
| 15886 | |
| 15887 | BasicBlock* tmpBlk = startBlk; |
| 15888 | while ((tmpBlk != endBlk) && (tmpBlk != nullptr)) |
| 15889 | { |
| 15890 | tmpBlk = tmpBlk->bbNext; |
| 15891 | } |
| 15892 | |
| 15893 | // when tmpBlk is NULL that means startBlk is after endBlk |
| 15894 | // so there is no way to move bStart..bEnd within the try region |
| 15895 | if (tmpBlk == nullptr) |
| 15896 | { |
| 15897 | goto CANNOT_MOVE; |
| 15898 | } |
| 15899 | } |
| 15900 | } |
| 15901 | else |
| 15902 | { |
| 15903 | noway_assert(isRare == false); |
| 15904 | |
| 15905 | /* We'll search through the entire main method */ |
| 15906 | startBlk = fgFirstBB; |
| 15907 | endBlk = fgEndBBAfterMainFunction(); |
| 15908 | } |
| 15909 | |
| 15910 | // Calculate nearBlk and jumpBlk and then call fgFindInsertPoint() |
| 15911 | // to find our insertion block |
| 15912 | // |
| 15913 | { |
| 15914 | // If the set of blocks that we are moving ends with a BBJ_ALWAYS to |
| 15915 | // another [rarely run] block that comes after bPrev (forward branch) |
| 15916 | // then we can set up nearBlk to eliminate this jump sometimes |
| 15917 | // |
| 15918 | BasicBlock* nearBlk = nullptr; |
| 15919 | BasicBlock* jumpBlk = nullptr; |
| 15920 | |
| 15921 | if ((bEnd->bbJumpKind == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && |
| 15922 | fgIsForwardBranch(bEnd, bPrev)) |
| 15923 | { |
| 15924 | // Set nearBlk to be the block in [startBlk..endBlk] |
| 15925 | // such that nearBlk->bbNext == bEnd->JumpDest |
| 15926 | // if no such block exists then set nearBlk to NULL |
| 15927 | nearBlk = startBlk; |
| 15928 | jumpBlk = bEnd; |
| 15929 | do |
| 15930 | { |
| 15931 | // We do not want to set nearBlk to bPrev |
| 15932 | // since then we will not move [bStart..bEnd] |
| 15933 | // |
| 15934 | if (nearBlk != bPrev) |
| 15935 | { |
| 15936 | // Check if nearBlk satisfies our requirement |
| 15937 | if (nearBlk->bbNext == bEnd->bbJumpDest) |
| 15938 | { |
| 15939 | break; |
| 15940 | } |
| 15941 | } |
| 15942 | |
| 15943 | // Did we reach the endBlk? |
| 15944 | if (nearBlk == endBlk) |
| 15945 | { |
| 15946 | nearBlk = nullptr; |
| 15947 | break; |
| 15948 | } |
| 15949 | |
| 15950 | // advance nearBlk to the next block |
| 15951 | nearBlk = nearBlk->bbNext; |
| 15952 | |
| 15953 | } while (nearBlk != nullptr); |
| 15954 | } |
| 15955 | |
| 15956 | // if nearBlk is NULL then we set nearBlk to be the |
| 15957 | // first block that we want to insert after. |
| 15958 | if (nearBlk == nullptr) |
| 15959 | { |
| 15960 | if (bDest != nullptr) |
| 15961 | { |
| 15962 | // we want to insert after bDest |
| 15963 | nearBlk = bDest; |
| 15964 | } |
| 15965 | else |
| 15966 | { |
| 15967 | // we want to insert after bPrev |
| 15968 | nearBlk = bPrev; |
| 15969 | } |
| 15970 | } |
| 15971 | |
| 15972 | /* Set insertAfterBlk to the block which we will insert after. */ |
| 15973 | |
| 15974 | insertAfterBlk = |
| 15975 | fgFindInsertPoint(bStart->bbTryIndex, |
| 15976 | true, // Insert in the try region. |
| 15977 | startBlk, endBlk, nearBlk, jumpBlk, bStart->bbWeight == BB_ZERO_WEIGHT); |
| 15978 | } |
| 15979 | |
| 15980 | /* See if insertAfterBlk is the same as where we started, */ |
| 15981 | /* or if we could not find any insertion point */ |
| 15982 | |
| 15983 | if ((insertAfterBlk == bPrev) || (insertAfterBlk == nullptr)) |
| 15984 | { |
| 15985 | CANNOT_MOVE:; |
| 15986 | /* We couldn't move the blocks, so put everything back */ |
| 15987 | /* relink [bStart .. bEnd] into the flow graph */ |
| 15988 | |
| 15989 | bPrev->setNext(bStart); |
| 15990 | if (bEnd->bbNext) |
| 15991 | { |
| 15992 | bEnd->bbNext->bbPrev = bEnd; |
| 15993 | } |
| 15994 | #ifdef DEBUG |
| 15995 | if (verbose) |
| 15996 | { |
| 15997 | if (bStart != bEnd) |
| 15998 | { |
| 15999 | printf("Could not relocate blocks (" FMT_BB " .. " FMT_BB ")\n" , bStart->bbNum, |
| 16000 | bEnd->bbNum); |
| 16001 | } |
| 16002 | else |
| 16003 | { |
| 16004 | printf("Could not relocate block " FMT_BB "\n" , bStart->bbNum); |
| 16005 | } |
| 16006 | } |
| 16007 | #endif // DEBUG |
| 16008 | continue; |
| 16009 | } |
| 16010 | } |
| 16011 | } |
| 16012 | |
| 16013 | noway_assert(insertAfterBlk != nullptr); |
| 16014 | noway_assert(bStartPrev != nullptr); |
| 16015 | noway_assert(bStartPrev != insertAfterBlk); |
| 16016 | |
| 16017 | #ifdef DEBUG |
| 16018 | movedBlocks = true; |
| 16019 | |
| 16020 | if (verbose) |
| 16021 | { |
| 16022 | const char* msg; |
| 16023 | if (bStart2 != nullptr) |
| 16024 | { |
| 16025 | msg = "hot" ; |
| 16026 | } |
| 16027 | else |
| 16028 | { |
| 16029 | if (isRare) |
| 16030 | { |
| 16031 | msg = "rarely run" ; |
| 16032 | } |
| 16033 | else |
| 16034 | { |
| 16035 | msg = "uncommon" ; |
| 16036 | } |
| 16037 | } |
| 16038 | |
| 16039 | printf("Relocated %s " , msg); |
| 16040 | if (bStart != bEnd) |
| 16041 | { |
| 16042 | printf("blocks (" FMT_BB " .. " FMT_BB ")" , bStart->bbNum, bEnd->bbNum); |
| 16043 | } |
| 16044 | else |
| 16045 | { |
| 16046 | printf("block " FMT_BB, bStart->bbNum); |
| 16047 | } |
| 16048 | |
| 16049 | if (bPrev->bbJumpKind == BBJ_COND) |
| 16050 | { |
| 16051 | printf(" by reversing conditional jump at " FMT_BB "\n" , bPrev->bbNum); |
| 16052 | } |
| 16053 | else |
| 16054 | { |
| 16055 | printf("\n" , bPrev->bbNum); |
| 16056 | } |
| 16057 | } |
| 16058 | #endif // DEBUG |
| 16059 | |
| 16060 | if (bPrev->bbJumpKind == BBJ_COND) |
| 16061 | { |
| 16062 | /* Reverse the bPrev jump condition */ |
| 16063 | GenTree* condTest = bPrev->lastStmt(); |
| 16064 | |
| 16065 | condTest = condTest->gtStmt.gtStmtExpr; |
| 16066 | noway_assert(condTest->gtOper == GT_JTRUE); |
| 16067 | |
| 16068 | condTest->gtOp.gtOp1 = gtReverseCond(condTest->gtOp.gtOp1); |
| 16069 | |
| 16070 | if (bStart2 == nullptr) |
| 16071 | { |
| 16072 | /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ |
| 16073 | bPrev->bbJumpDest = bStart; |
| 16074 | bStart->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL); |
| 16075 | } |
| 16076 | else |
| 16077 | { |
| 16078 | noway_assert(insertAfterBlk == bPrev); |
| 16079 | noway_assert(insertAfterBlk->bbNext == block); |
| 16080 | |
| 16081 | /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ |
| 16082 | bPrev->bbJumpDest = block; |
| 16083 | block->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL); |
| 16084 | } |
| 16085 | } |
| 16086 | |
| 16087 | // If we are moving blocks that are at the end of a try or handler |
| 16088 | // we will need to shorten ebdTryLast or ebdHndLast |
| 16089 | // |
| 16090 | ehUpdateLastBlocks(bEnd, bStartPrev); |
| 16091 | |
| 16092 | // If we are moving blocks into the end of a try region or handler region |
| 16093 | // we will need to extend ebdTryLast or ebdHndLast so the blocks that we |
| 16094 | // are moving are part of this try or handler region. |
| 16095 | // |
| 16096 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 16097 | { |
| 16098 | // Are we moving blocks to the end of a try region? |
| 16099 | if (HBtab->ebdTryLast == insertAfterBlk) |
| 16100 | { |
| 16101 | if (fStartIsInTry[XTnum]) |
| 16102 | { |
| 16103 | // bStart..bEnd is in the try, so extend the try region |
| 16104 | fgSetTryEnd(HBtab, bEnd); |
| 16105 | } |
| 16106 | } |
| 16107 | |
| 16108 | // Are we moving blocks to the end of a handler region? |
| 16109 | if (HBtab->ebdHndLast == insertAfterBlk) |
| 16110 | { |
| 16111 | if (fStartIsInHnd[XTnum]) |
| 16112 | { |
| 16113 | // bStart..bEnd is in the handler, so extend the handler region |
| 16114 | fgSetHndEnd(HBtab, bEnd); |
| 16115 | } |
| 16116 | } |
| 16117 | } |
| 16118 | |
| 16119 | /* We have decided to insert the block(s) after 'insertAfterBlk' */ |
| 16120 | fgMoveBlocksAfter(bStart, bEnd, insertAfterBlk); |
| 16121 | |
| 16122 | if (bDest) |
| 16123 | { |
| 16124 | /* We may need to insert an unconditional branch after bPrev to bDest */ |
| 16125 | fgConnectFallThrough(bPrev, bDest); |
| 16126 | } |
| 16127 | else |
| 16128 | { |
| 16129 | /* If bPrev falls through, we must insert a jump to block */ |
| 16130 | fgConnectFallThrough(bPrev, block); |
| 16131 | } |
| 16132 | |
| 16133 | BasicBlock* bSkip = bEnd->bbNext; |
| 16134 | |
| 16135 | /* If bEnd falls through, we must insert a jump to bNext */ |
| 16136 | fgConnectFallThrough(bEnd, bNext); |
| 16137 | |
| 16138 | if (bStart2 == nullptr) |
| 16139 | { |
| 16140 | /* If insertAfterBlk falls through, we are forced to */ |
| 16141 | /* add a jump around the block(s) we just inserted */ |
| 16142 | fgConnectFallThrough(insertAfterBlk, bSkip); |
| 16143 | } |
| 16144 | else |
| 16145 | { |
| 16146 | /* We may need to insert an unconditional branch after bPrev2 to bStart */ |
| 16147 | fgConnectFallThrough(bPrev2, bStart); |
| 16148 | } |
| 16149 | |
| 16150 | #if DEBUG |
| 16151 | if (verbose) |
| 16152 | { |
| 16153 | printf("\nAfter this change in fgReorderBlocks the BB graph is:" ); |
| 16154 | fgDispBasicBlocks(verboseTrees); |
| 16155 | printf("\n" ); |
| 16156 | } |
| 16157 | fgVerifyHandlerTab(); |
| 16158 | |
| 16159 | // Make sure that the predecessor lists are accurate |
| 16160 | if (expensiveDebugCheckLevel >= 2) |
| 16161 | { |
| 16162 | fgDebugCheckBBlist(); |
| 16163 | } |
| 16164 | #endif // DEBUG |
| 16165 | |
| 16166 | // Set our iteration point 'block' to be the new bPrev->bbNext |
| 16167 | // It will be used as the next bPrev |
| 16168 | block = bPrev->bbNext; |
| 16169 | |
| 16170 | } // end of for loop(bPrev,block) |
| 16171 | |
| 16172 | bool changed = movedBlocks || newRarelyRun || optimizedSwitches; |
| 16173 | |
| 16174 | if (changed) |
| 16175 | { |
| 16176 | fgNeedsUpdateFlowGraph = true; |
| 16177 | #if DEBUG |
| 16178 | // Make sure that the predecessor lists are accurate |
| 16179 | if (expensiveDebugCheckLevel >= 2) |
| 16180 | { |
| 16181 | fgDebugCheckBBlist(); |
| 16182 | } |
| 16183 | #endif // DEBUG |
| 16184 | } |
| 16185 | } |
| 16186 | #ifdef _PREFAST_ |
| 16187 | #pragma warning(pop) |
| 16188 | #endif |
| 16189 | |
| 16190 | /*------------------------------------------------------------------------- |
| 16191 | * |
| 16192 | * Walk the basic blocks list to determine the first block to place in the |
| 16193 | * cold section. This would be the first of a series of rarely executed blocks |
| 16194 | * such that no succeeding blocks are in a try region or an exception handler |
| 16195 | * or are rarely executed. |
| 16196 | */ |
| 16197 | |
| 16198 | void Compiler::fgDetermineFirstColdBlock() |
| 16199 | { |
| 16200 | #ifdef DEBUG |
| 16201 | if (verbose) |
| 16202 | { |
| 16203 | printf("\n*************** In fgDetermineFirstColdBlock()\n" ); |
| 16204 | } |
| 16205 | #endif // DEBUG |
| 16206 | |
| 16207 | // Since we may need to create a new transistion block |
| 16208 | // we assert that it is OK to create new blocks. |
| 16209 | // |
| 16210 | assert(fgSafeBasicBlockCreation); |
| 16211 | |
| 16212 | fgFirstColdBlock = nullptr; |
| 16213 | |
| 16214 | if (!opts.compProcedureSplitting) |
| 16215 | { |
| 16216 | JITDUMP("No procedure splitting will be done for this method\n" ); |
| 16217 | return; |
| 16218 | } |
| 16219 | |
| 16220 | #ifdef DEBUG |
| 16221 | if ((compHndBBtabCount > 0) && !opts.compProcedureSplittingEH) |
| 16222 | { |
| 16223 | JITDUMP("No procedure splitting will be done for this method with EH (by request)\n" ); |
| 16224 | return; |
| 16225 | } |
| 16226 | #endif // DEBUG |
| 16227 | |
| 16228 | #if FEATURE_EH_FUNCLETS |
| 16229 | // TODO-CQ: handle hot/cold splitting in functions with EH (including synchronized methods |
| 16230 | // that create EH in methods without explicit EH clauses). |
| 16231 | |
| 16232 | if (compHndBBtabCount > 0) |
| 16233 | { |
| 16234 | JITDUMP("No procedure splitting will be done for this method with EH (implementation limitation)\n" ); |
| 16235 | return; |
| 16236 | } |
| 16237 | #endif // FEATURE_EH_FUNCLETS |
| 16238 | |
| 16239 | BasicBlock* firstColdBlock = nullptr; |
| 16240 | BasicBlock* prevToFirstColdBlock = nullptr; |
| 16241 | BasicBlock* block; |
| 16242 | BasicBlock* lblk; |
| 16243 | |
| 16244 | for (lblk = nullptr, block = fgFirstBB; block != nullptr; lblk = block, block = block->bbNext) |
| 16245 | { |
| 16246 | bool blockMustBeInHotSection = false; |
| 16247 | |
| 16248 | #if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION |
| 16249 | if (bbIsHandlerBeg(block)) |
| 16250 | { |
| 16251 | blockMustBeInHotSection = true; |
| 16252 | } |
| 16253 | #endif // HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION |
| 16254 | |
| 16255 | // Do we have a candidate for the first cold block? |
| 16256 | if (firstColdBlock != nullptr) |
| 16257 | { |
| 16258 | // We have a candidate for first cold block |
| 16259 | |
| 16260 | // Is this a hot block? |
| 16261 | if (blockMustBeInHotSection || (block->isRunRarely() == false)) |
| 16262 | { |
| 16263 | // We have to restart the search for the first cold block |
| 16264 | firstColdBlock = nullptr; |
| 16265 | prevToFirstColdBlock = nullptr; |
| 16266 | } |
| 16267 | } |
| 16268 | else // (firstColdBlock == NULL) |
| 16269 | { |
| 16270 | // We don't have a candidate for first cold block |
| 16271 | |
| 16272 | // Is this a cold block? |
| 16273 | if (!blockMustBeInHotSection && (block->isRunRarely() == true)) |
| 16274 | { |
| 16275 | // |
| 16276 | // If the last block that was hot was a BBJ_COND |
| 16277 | // then we will have to add an unconditional jump |
| 16278 | // so the code size for block needs be large |
| 16279 | // enough to make it worth our while |
| 16280 | // |
| 16281 | if ((lblk == nullptr) || (lblk->bbJumpKind != BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) |
| 16282 | { |
| 16283 | // This block is now a candidate for first cold block |
| 16284 | // Also remember the predecessor to this block |
| 16285 | firstColdBlock = block; |
| 16286 | prevToFirstColdBlock = lblk; |
| 16287 | } |
| 16288 | } |
| 16289 | } |
| 16290 | } |
| 16291 | |
| 16292 | if (firstColdBlock == fgFirstBB) |
| 16293 | { |
| 16294 | // If the first block is Cold then we can't move any blocks |
| 16295 | // into the cold section |
| 16296 | |
| 16297 | firstColdBlock = nullptr; |
| 16298 | } |
| 16299 | |
| 16300 | if (firstColdBlock != nullptr) |
| 16301 | { |
| 16302 | noway_assert(prevToFirstColdBlock != nullptr); |
| 16303 | |
| 16304 | if (prevToFirstColdBlock == nullptr) |
| 16305 | { |
| 16306 | return; // To keep Prefast happy |
| 16307 | } |
| 16308 | |
| 16309 | // If we only have one cold block |
| 16310 | // then it may not be worth it to move it |
| 16311 | // into the Cold section as a jump to the |
| 16312 | // Cold section is 5 bytes in size. |
| 16313 | // |
| 16314 | if (firstColdBlock->bbNext == nullptr) |
| 16315 | { |
| 16316 | // If the size of the cold block is 7 or less |
| 16317 | // then we will keep it in the Hot section. |
| 16318 | // |
| 16319 | if (fgGetCodeEstimate(firstColdBlock) < 8) |
| 16320 | { |
| 16321 | firstColdBlock = nullptr; |
| 16322 | goto EXIT; |
| 16323 | } |
| 16324 | } |
| 16325 | |
| 16326 | // When the last Hot block fall through into the Cold section |
| 16327 | // we may need to add a jump |
| 16328 | // |
| 16329 | if (prevToFirstColdBlock->bbFallsThrough()) |
| 16330 | { |
| 16331 | switch (prevToFirstColdBlock->bbJumpKind) |
| 16332 | { |
| 16333 | default: |
| 16334 | noway_assert(!"Unhandled jumpkind in fgDetermineFirstColdBlock()" ); |
| 16335 | |
| 16336 | case BBJ_CALLFINALLY: |
| 16337 | // A BBJ_CALLFINALLY that falls through is always followed |
| 16338 | // by an empty BBJ_ALWAYS. |
| 16339 | // |
| 16340 | assert(prevToFirstColdBlock->isBBCallAlwaysPair()); |
| 16341 | firstColdBlock = |
| 16342 | firstColdBlock->bbNext; // Note that this assignment could make firstColdBlock == nullptr |
| 16343 | break; |
| 16344 | |
| 16345 | case BBJ_COND: |
| 16346 | // |
| 16347 | // This is a slightly more complicated case, because we will |
| 16348 | // probably need to insert a block to jump to the cold section. |
| 16349 | // |
| 16350 | if (firstColdBlock->isEmpty() && (firstColdBlock->bbJumpKind == BBJ_ALWAYS)) |
| 16351 | { |
| 16352 | // We can just use this block as the transitionBlock |
| 16353 | firstColdBlock = firstColdBlock->bbNext; |
| 16354 | // Note that this assignment could make firstColdBlock == NULL |
| 16355 | } |
| 16356 | else |
| 16357 | { |
| 16358 | BasicBlock* transitionBlock = fgNewBBafter(BBJ_ALWAYS, prevToFirstColdBlock, true); |
| 16359 | transitionBlock->bbJumpDest = firstColdBlock; |
| 16360 | transitionBlock->inheritWeight(firstColdBlock); |
| 16361 | |
| 16362 | noway_assert(fgComputePredsDone); |
| 16363 | |
| 16364 | // Update the predecessor list for firstColdBlock |
| 16365 | fgReplacePred(firstColdBlock, prevToFirstColdBlock, transitionBlock); |
| 16366 | |
| 16367 | // Add prevToFirstColdBlock as a predecessor for transitionBlock |
| 16368 | fgAddRefPred(transitionBlock, prevToFirstColdBlock); |
| 16369 | } |
| 16370 | break; |
| 16371 | |
| 16372 | case BBJ_NONE: |
| 16373 | // If the block preceding the first cold block is BBJ_NONE, |
| 16374 | // convert it to BBJ_ALWAYS to force an explicit jump. |
| 16375 | |
| 16376 | prevToFirstColdBlock->bbJumpDest = firstColdBlock; |
| 16377 | prevToFirstColdBlock->bbJumpKind = BBJ_ALWAYS; |
| 16378 | break; |
| 16379 | } |
| 16380 | } |
| 16381 | } |
| 16382 | |
| 16383 | if (firstColdBlock != nullptr) |
| 16384 | { |
| 16385 | firstColdBlock->bbFlags |= BBF_JMP_TARGET; |
| 16386 | |
| 16387 | for (block = firstColdBlock; block; block = block->bbNext) |
| 16388 | { |
| 16389 | block->bbFlags |= BBF_COLD; |
| 16390 | } |
| 16391 | } |
| 16392 | |
| 16393 | EXIT:; |
| 16394 | |
| 16395 | #ifdef DEBUG |
| 16396 | if (verbose) |
| 16397 | { |
| 16398 | if (firstColdBlock) |
| 16399 | { |
| 16400 | printf("fgFirstColdBlock is " FMT_BB ".\n" , firstColdBlock->bbNum); |
| 16401 | } |
| 16402 | else |
| 16403 | { |
| 16404 | printf("fgFirstColdBlock is NULL.\n" ); |
| 16405 | } |
| 16406 | |
| 16407 | fgDispBasicBlocks(); |
| 16408 | } |
| 16409 | |
| 16410 | fgVerifyHandlerTab(); |
| 16411 | #endif // DEBUG |
| 16412 | |
| 16413 | fgFirstColdBlock = firstColdBlock; |
| 16414 | } |
| 16415 | |
| 16416 | #ifdef _PREFAST_ |
| 16417 | #pragma warning(push) |
| 16418 | #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function |
| 16419 | #endif |
| 16420 | /***************************************************************************** |
| 16421 | * |
| 16422 | * Function called to "comb" the basic block list. |
| 16423 | * Removes any empty blocks, unreachable blocks and redundant jumps. |
| 16424 | * Most of those appear after dead store removal and folding of conditionals. |
| 16425 | * |
| 16426 | * Returns: true if the flowgraph has been modified |
| 16427 | * |
| 16428 | * It also compacts basic blocks |
| 16429 | * (consecutive basic blocks that should in fact be one). |
| 16430 | * |
| 16431 | * NOTE: |
| 16432 | * Debuggable code and Min Optimization JIT also introduces basic blocks |
| 16433 | * but we do not optimize those! |
| 16434 | */ |
| 16435 | |
| 16436 | bool Compiler::fgUpdateFlowGraph(bool doTailDuplication) |
| 16437 | { |
| 16438 | #ifdef DEBUG |
| 16439 | if (verbose) |
| 16440 | { |
| 16441 | printf("\n*************** In fgUpdateFlowGraph()" ); |
| 16442 | } |
| 16443 | #endif // DEBUG |
| 16444 | |
| 16445 | /* This should never be called for debuggable code */ |
| 16446 | |
| 16447 | noway_assert(opts.OptimizationEnabled()); |
| 16448 | |
| 16449 | #ifdef DEBUG |
| 16450 | if (verbose) |
| 16451 | { |
| 16452 | printf("\nBefore updating the flow graph:\n" ); |
| 16453 | fgDispBasicBlocks(verboseTrees); |
| 16454 | printf("\n" ); |
| 16455 | } |
| 16456 | #endif // DEBUG |
| 16457 | |
| 16458 | /* Walk all the basic blocks - look for unconditional jumps, empty blocks, blocks to compact, etc... |
| 16459 | * |
| 16460 | * OBSERVATION: |
| 16461 | * Once a block is removed the predecessors are not accurate (assuming they were at the beginning) |
| 16462 | * For now we will only use the information in bbRefs because it is easier to be updated |
| 16463 | */ |
| 16464 | |
| 16465 | bool modified = false; |
| 16466 | bool change; |
| 16467 | do |
| 16468 | { |
| 16469 | change = false; |
| 16470 | |
| 16471 | BasicBlock* block; // the current block |
| 16472 | BasicBlock* bPrev = nullptr; // the previous non-worthless block |
| 16473 | BasicBlock* bNext; // the successor of the current block |
| 16474 | BasicBlock* bDest; // the jump target of the current block |
| 16475 | |
| 16476 | for (block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 16477 | { |
| 16478 | /* Some blocks may be already marked removed by other optimizations |
| 16479 | * (e.g worthless loop removal), without being explicitly removed |
| 16480 | * from the list. |
| 16481 | */ |
| 16482 | |
| 16483 | if (block->bbFlags & BBF_REMOVED) |
| 16484 | { |
| 16485 | if (bPrev) |
| 16486 | { |
| 16487 | bPrev->setNext(block->bbNext); |
| 16488 | } |
| 16489 | else |
| 16490 | { |
| 16491 | /* WEIRD first basic block is removed - should have an assert here */ |
| 16492 | noway_assert(!"First basic block marked as BBF_REMOVED???" ); |
| 16493 | |
| 16494 | fgFirstBB = block->bbNext; |
| 16495 | } |
| 16496 | continue; |
| 16497 | } |
| 16498 | |
| 16499 | /* We jump to the REPEAT label if we performed a change involving the current block |
| 16500 | * This is in case there are other optimizations that can show up |
| 16501 | * (e.g. - compact 3 blocks in a row) |
| 16502 | * If nothing happens, we then finish the iteration and move to the next block |
| 16503 | */ |
| 16504 | |
| 16505 | REPEAT:; |
| 16506 | |
| 16507 | bNext = block->bbNext; |
| 16508 | bDest = nullptr; |
| 16509 | |
| 16510 | if (block->bbJumpKind == BBJ_ALWAYS) |
| 16511 | { |
| 16512 | bDest = block->bbJumpDest; |
| 16513 | if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) |
| 16514 | { |
| 16515 | change = true; |
| 16516 | modified = true; |
| 16517 | bDest = block->bbJumpDest; |
| 16518 | bNext = block->bbNext; |
| 16519 | } |
| 16520 | } |
| 16521 | |
| 16522 | // Remove JUMPS to the following block |
| 16523 | // and optimize any JUMPS to JUMPS |
| 16524 | |
| 16525 | if (block->bbJumpKind == BBJ_COND || block->bbJumpKind == BBJ_ALWAYS) |
| 16526 | { |
| 16527 | bDest = block->bbJumpDest; |
| 16528 | if (bDest == bNext) |
| 16529 | { |
| 16530 | if (fgOptimizeBranchToNext(block, bNext, bPrev)) |
| 16531 | { |
| 16532 | change = true; |
| 16533 | modified = true; |
| 16534 | bDest = nullptr; |
| 16535 | } |
| 16536 | } |
| 16537 | } |
| 16538 | |
| 16539 | if (bDest != nullptr) |
| 16540 | { |
| 16541 | // Do we have a JUMP to an empty unconditional JUMP block? |
| 16542 | if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && |
| 16543 | (bDest != bDest->bbJumpDest)) // special case for self jumps |
| 16544 | { |
| 16545 | if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) |
| 16546 | { |
| 16547 | change = true; |
| 16548 | modified = true; |
| 16549 | goto REPEAT; |
| 16550 | } |
| 16551 | } |
| 16552 | |
| 16553 | // Check for a conditional branch that just skips over an empty BBJ_ALWAYS block |
| 16554 | |
| 16555 | if ((block->bbJumpKind == BBJ_COND) && // block is a BBJ_COND block |
| 16556 | (bNext != nullptr) && // block is not the last block |
| 16557 | (bNext->bbRefs == 1) && // No other block jumps to bNext |
| 16558 | (bNext->bbNext == bDest) && // The block after bNext is the BBJ_COND jump dest |
| 16559 | (bNext->bbJumpKind == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block |
| 16560 | bNext->isEmpty() && // and it is an an empty block |
| 16561 | (bNext != bNext->bbJumpDest) && // special case for self jumps |
| 16562 | (bDest != fgFirstColdBlock)) |
| 16563 | { |
| 16564 | bool optimizeJump = true; |
| 16565 | |
| 16566 | // We do not optimize jumps between two different try regions. |
| 16567 | // However jumping to a block that is not in any try region is OK |
| 16568 | // |
| 16569 | if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) |
| 16570 | { |
| 16571 | optimizeJump = false; |
| 16572 | } |
| 16573 | |
| 16574 | // Also consider bNext's try region |
| 16575 | // |
| 16576 | if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) |
| 16577 | { |
| 16578 | optimizeJump = false; |
| 16579 | } |
| 16580 | |
| 16581 | // If we are optimizing using real profile weights |
| 16582 | // then don't optimize a conditional jump to an unconditional jump |
| 16583 | // until after we have computed the edge weights |
| 16584 | // |
| 16585 | if (fgIsUsingProfileWeights()) |
| 16586 | { |
| 16587 | // if block and bdest are in different hot/cold regions we can't do this this optimization |
| 16588 | // because we can't allow fall-through into the cold region. |
| 16589 | if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) |
| 16590 | { |
| 16591 | fgNeedsUpdateFlowGraph = true; |
| 16592 | optimizeJump = false; |
| 16593 | } |
| 16594 | } |
| 16595 | |
| 16596 | if (optimizeJump) |
| 16597 | { |
| 16598 | #ifdef DEBUG |
| 16599 | if (verbose) |
| 16600 | { |
| 16601 | printf("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB |
| 16602 | " -> " FMT_BB ")\n" , |
| 16603 | block->bbNum, bDest->bbNum, bNext->bbJumpDest->bbNum); |
| 16604 | } |
| 16605 | #endif // DEBUG |
| 16606 | /* Reverse the jump condition */ |
| 16607 | |
| 16608 | GenTree* test = block->lastNode(); |
| 16609 | noway_assert(test->OperIsConditionalJump()); |
| 16610 | |
| 16611 | if (test->OperGet() == GT_JTRUE) |
| 16612 | { |
| 16613 | GenTree* cond = gtReverseCond(test->gtOp.gtOp1); |
| 16614 | assert(cond == test->gtOp.gtOp1); // Ensure `gtReverseCond` did not create a new node. |
| 16615 | test->gtOp.gtOp1 = cond; |
| 16616 | } |
| 16617 | else |
| 16618 | { |
| 16619 | gtReverseCond(test); |
| 16620 | } |
| 16621 | |
| 16622 | // Optimize the Conditional JUMP to go to the new target |
| 16623 | block->bbJumpDest = bNext->bbJumpDest; |
| 16624 | |
| 16625 | fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); |
| 16626 | |
| 16627 | /* |
| 16628 | Unlink bNext from the BasicBlock list; note that we can |
| 16629 | do this even though other blocks could jump to it - the |
| 16630 | reason is that elsewhere in this function we always |
| 16631 | redirect jumps to jumps to jump to the final label, |
| 16632 | so even if another block jumps to bNext it won't matter |
| 16633 | once we're done since any such jump will be redirected |
| 16634 | to the final target by the time we're done here. |
| 16635 | */ |
| 16636 | |
| 16637 | fgRemoveRefPred(bNext, block); |
| 16638 | fgUnlinkBlock(bNext); |
| 16639 | |
| 16640 | /* Mark the block as removed */ |
| 16641 | bNext->bbFlags |= BBF_REMOVED; |
| 16642 | |
| 16643 | // If this is the first Cold basic block update fgFirstColdBlock |
| 16644 | if (bNext == fgFirstColdBlock) |
| 16645 | { |
| 16646 | fgFirstColdBlock = bNext->bbNext; |
| 16647 | } |
| 16648 | |
| 16649 | // |
| 16650 | // If we removed the end of a try region or handler region |
| 16651 | // we will need to update ebdTryLast or ebdHndLast. |
| 16652 | // |
| 16653 | |
| 16654 | EHblkDsc* HBtab; |
| 16655 | EHblkDsc* HBtabEnd; |
| 16656 | |
| 16657 | for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; |
| 16658 | HBtab++) |
| 16659 | { |
| 16660 | if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) |
| 16661 | { |
| 16662 | fgSkipRmvdBlocks(HBtab); |
| 16663 | } |
| 16664 | } |
| 16665 | |
| 16666 | // we optimized this JUMP - goto REPEAT to catch similar cases |
| 16667 | change = true; |
| 16668 | modified = true; |
| 16669 | |
| 16670 | #ifdef DEBUG |
| 16671 | if (verbose) |
| 16672 | { |
| 16673 | printf("\nAfter reversing the jump:\n" ); |
| 16674 | fgDispBasicBlocks(verboseTrees); |
| 16675 | } |
| 16676 | #endif // DEBUG |
| 16677 | |
| 16678 | /* |
| 16679 | For a rare special case we cannot jump to REPEAT |
| 16680 | as jumping to REPEAT will cause us to delete 'block' |
| 16681 | because it currently appears to be unreachable. As |
| 16682 | it is a self loop that only has a single bbRef (itself) |
| 16683 | However since the unlinked bNext has additional bbRefs |
| 16684 | (that we will later connect to 'block'), it is not really |
| 16685 | unreachable. |
| 16686 | */ |
| 16687 | if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) |
| 16688 | { |
| 16689 | continue; |
| 16690 | } |
| 16691 | |
| 16692 | goto REPEAT; |
| 16693 | } |
| 16694 | } |
| 16695 | } |
| 16696 | |
| 16697 | // |
| 16698 | // Update the switch jump table such that it follows jumps to jumps: |
| 16699 | // |
| 16700 | if (block->bbJumpKind == BBJ_SWITCH) |
| 16701 | { |
| 16702 | if (fgOptimizeSwitchBranches(block)) |
| 16703 | { |
| 16704 | change = true; |
| 16705 | modified = true; |
| 16706 | goto REPEAT; |
| 16707 | } |
| 16708 | } |
| 16709 | |
| 16710 | noway_assert(!(block->bbFlags & BBF_REMOVED)); |
| 16711 | |
| 16712 | /* COMPACT blocks if possible */ |
| 16713 | |
| 16714 | if (fgCanCompactBlocks(block, bNext)) |
| 16715 | { |
| 16716 | fgCompactBlocks(block, bNext); |
| 16717 | |
| 16718 | /* we compacted two blocks - goto REPEAT to catch similar cases */ |
| 16719 | change = true; |
| 16720 | modified = true; |
| 16721 | goto REPEAT; |
| 16722 | } |
| 16723 | |
| 16724 | /* Remove unreachable or empty blocks - do not consider blocks marked BBF_DONT_REMOVE or genReturnBB block |
| 16725 | * These include first and last block of a TRY, exception handlers and RANGE_CHECK_FAIL THROW blocks */ |
| 16726 | |
| 16727 | if ((block->bbFlags & BBF_DONT_REMOVE) == BBF_DONT_REMOVE || block == genReturnBB) |
| 16728 | { |
| 16729 | bPrev = block; |
| 16730 | continue; |
| 16731 | } |
| 16732 | |
| 16733 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 16734 | // Don't remove the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. |
| 16735 | if (block->countOfInEdges() == 0 && bPrev->bbJumpKind == BBJ_CALLFINALLY) |
| 16736 | { |
| 16737 | assert(bPrev->isBBCallAlwaysPair()); |
| 16738 | noway_assert(!(bPrev->bbFlags & BBF_RETLESS_CALL)); |
| 16739 | noway_assert(block->bbJumpKind == BBJ_ALWAYS); |
| 16740 | bPrev = block; |
| 16741 | continue; |
| 16742 | } |
| 16743 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 16744 | |
| 16745 | noway_assert(!block->bbCatchTyp); |
| 16746 | noway_assert(!(block->bbFlags & BBF_TRY_BEG)); |
| 16747 | |
| 16748 | /* Remove unreachable blocks |
| 16749 | * |
| 16750 | * We'll look for blocks that have countOfInEdges() = 0 (blocks may become |
| 16751 | * unreachable due to a BBJ_ALWAYS introduced by conditional folding for example) |
| 16752 | */ |
| 16753 | |
| 16754 | if (block->countOfInEdges() == 0) |
| 16755 | { |
| 16756 | /* no references -> unreachable - remove it */ |
| 16757 | /* For now do not update the bbNum, do it at the end */ |
| 16758 | |
| 16759 | fgRemoveBlock(block, true); |
| 16760 | |
| 16761 | change = true; |
| 16762 | modified = true; |
| 16763 | |
| 16764 | /* we removed the current block - the rest of the optimizations won't have a target |
| 16765 | * continue with the next one */ |
| 16766 | |
| 16767 | continue; |
| 16768 | } |
| 16769 | else if (block->countOfInEdges() == 1) |
| 16770 | { |
| 16771 | switch (block->bbJumpKind) |
| 16772 | { |
| 16773 | case BBJ_COND: |
| 16774 | case BBJ_ALWAYS: |
| 16775 | if (block->bbJumpDest == block) |
| 16776 | { |
| 16777 | fgRemoveBlock(block, true); |
| 16778 | |
| 16779 | change = true; |
| 16780 | modified = true; |
| 16781 | |
| 16782 | /* we removed the current block - the rest of the optimizations |
| 16783 | * won't have a target so continue with the next block */ |
| 16784 | |
| 16785 | continue; |
| 16786 | } |
| 16787 | break; |
| 16788 | |
| 16789 | default: |
| 16790 | break; |
| 16791 | } |
| 16792 | } |
| 16793 | |
| 16794 | noway_assert(!(block->bbFlags & BBF_REMOVED)); |
| 16795 | |
| 16796 | /* Remove EMPTY blocks */ |
| 16797 | |
| 16798 | if (block->isEmpty()) |
| 16799 | { |
| 16800 | assert(bPrev == block->bbPrev); |
| 16801 | if (fgOptimizeEmptyBlock(block)) |
| 16802 | { |
| 16803 | change = true; |
| 16804 | modified = true; |
| 16805 | } |
| 16806 | |
| 16807 | /* Have we removed the block? */ |
| 16808 | |
| 16809 | if (block->bbFlags & BBF_REMOVED) |
| 16810 | { |
| 16811 | /* block was removed - no change to bPrev */ |
| 16812 | continue; |
| 16813 | } |
| 16814 | } |
| 16815 | |
| 16816 | /* Set the predecessor of the last reachable block |
| 16817 | * If we removed the current block, the predecessor remains unchanged |
| 16818 | * otherwise, since the current block is ok, it becomes the predecessor */ |
| 16819 | |
| 16820 | noway_assert(!(block->bbFlags & BBF_REMOVED)); |
| 16821 | |
| 16822 | bPrev = block; |
| 16823 | } |
| 16824 | } while (change); |
| 16825 | |
| 16826 | fgNeedsUpdateFlowGraph = false; |
| 16827 | |
| 16828 | #ifdef DEBUG |
| 16829 | if (verbose && modified) |
| 16830 | { |
| 16831 | printf("\nAfter updating the flow graph:\n" ); |
| 16832 | fgDispBasicBlocks(verboseTrees); |
| 16833 | fgDispHandlerTab(); |
| 16834 | } |
| 16835 | |
| 16836 | if (compRationalIRForm) |
| 16837 | { |
| 16838 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 16839 | { |
| 16840 | LIR::AsRange(block).CheckLIR(this); |
| 16841 | } |
| 16842 | } |
| 16843 | |
| 16844 | fgVerifyHandlerTab(); |
| 16845 | // Make sure that the predecessor lists are accurate |
| 16846 | fgDebugCheckBBlist(); |
| 16847 | fgDebugCheckUpdate(); |
| 16848 | #endif // DEBUG |
| 16849 | |
| 16850 | return modified; |
| 16851 | } |
| 16852 | #ifdef _PREFAST_ |
| 16853 | #pragma warning(pop) |
| 16854 | #endif |
| 16855 | |
| 16856 | /***************************************************************************** |
| 16857 | * Check that the flow graph is really updated |
| 16858 | */ |
| 16859 | |
| 16860 | #ifdef DEBUG |
| 16861 | |
| 16862 | void Compiler::fgDebugCheckUpdate() |
| 16863 | { |
| 16864 | if (!compStressCompile(STRESS_CHK_FLOW_UPDATE, 30)) |
| 16865 | { |
| 16866 | return; |
| 16867 | } |
| 16868 | |
| 16869 | /* We check for these conditions: |
| 16870 | * no unreachable blocks -> no blocks have countOfInEdges() = 0 |
| 16871 | * no empty blocks -> no blocks have bbTreeList = 0 |
| 16872 | * no un-imported blocks -> no blocks have BBF_IMPORTED not set (this is |
| 16873 | * kind of redundand with the above, but to make sure) |
| 16874 | * no un-compacted blocks -> BBJ_NONE followed by block with no jumps to it (countOfInEdges() = 1) |
| 16875 | */ |
| 16876 | |
| 16877 | BasicBlock* prev; |
| 16878 | BasicBlock* block; |
| 16879 | for (prev = nullptr, block = fgFirstBB; block != nullptr; prev = block, block = block->bbNext) |
| 16880 | { |
| 16881 | /* no unreachable blocks */ |
| 16882 | |
| 16883 | if ((block->countOfInEdges() == 0) && !(block->bbFlags & BBF_DONT_REMOVE) |
| 16884 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 16885 | // With funclets, we never get rid of the BBJ_ALWAYS part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, |
| 16886 | // even if we can prove that the finally block never returns. |
| 16887 | && (prev == NULL || block->bbJumpKind != BBJ_ALWAYS || !prev->isBBCallAlwaysPair()) |
| 16888 | #endif // FEATURE_EH_FUNCLETS |
| 16889 | ) |
| 16890 | { |
| 16891 | noway_assert(!"Unreachable block not removed!" ); |
| 16892 | } |
| 16893 | |
| 16894 | /* no empty blocks */ |
| 16895 | |
| 16896 | if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE)) |
| 16897 | { |
| 16898 | switch (block->bbJumpKind) |
| 16899 | { |
| 16900 | case BBJ_CALLFINALLY: |
| 16901 | case BBJ_EHFINALLYRET: |
| 16902 | case BBJ_EHFILTERRET: |
| 16903 | case BBJ_RETURN: |
| 16904 | /* for BBJ_ALWAYS is probably just a GOTO, but will have to be treated */ |
| 16905 | case BBJ_ALWAYS: |
| 16906 | case BBJ_EHCATCHRET: |
| 16907 | /* These jump kinds are allowed to have empty tree lists */ |
| 16908 | break; |
| 16909 | |
| 16910 | default: |
| 16911 | /* it may be the case that the block had more than one reference to it |
| 16912 | * so we couldn't remove it */ |
| 16913 | |
| 16914 | if (block->countOfInEdges() == 0) |
| 16915 | { |
| 16916 | noway_assert(!"Empty block not removed!" ); |
| 16917 | } |
| 16918 | break; |
| 16919 | } |
| 16920 | } |
| 16921 | |
| 16922 | /* no un-imported blocks */ |
| 16923 | |
| 16924 | if (!(block->bbFlags & BBF_IMPORTED)) |
| 16925 | { |
| 16926 | /* internal blocks do not count */ |
| 16927 | |
| 16928 | if (!(block->bbFlags & BBF_INTERNAL)) |
| 16929 | { |
| 16930 | noway_assert(!"Non IMPORTED block not removed!" ); |
| 16931 | } |
| 16932 | } |
| 16933 | |
| 16934 | bool prevIsCallAlwaysPair = ((prev != nullptr) && prev->isBBCallAlwaysPair()); |
| 16935 | |
| 16936 | // Check for an unnecessary jumps to the next block |
| 16937 | bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert |
| 16938 | |
| 16939 | if (block->bbJumpKind == BBJ_COND) |
| 16940 | { |
| 16941 | // A conditional branch should never jump to the next block |
| 16942 | // as it can be folded into a BBJ_NONE; |
| 16943 | doAssertOnJumpToNextBlock = true; |
| 16944 | } |
| 16945 | else if (block->bbJumpKind == BBJ_ALWAYS) |
| 16946 | { |
| 16947 | // Generally we will want to assert if a BBJ_ALWAYS branches to the next block |
| 16948 | doAssertOnJumpToNextBlock = true; |
| 16949 | |
| 16950 | // If the BBF_KEEP_BBJ_ALWAYS flag is set we allow it to jump to the next block |
| 16951 | if (block->bbFlags & BBF_KEEP_BBJ_ALWAYS) |
| 16952 | { |
| 16953 | doAssertOnJumpToNextBlock = false; |
| 16954 | } |
| 16955 | |
| 16956 | // A call/always pair is also allowed to jump to the next block |
| 16957 | if (prevIsCallAlwaysPair) |
| 16958 | { |
| 16959 | doAssertOnJumpToNextBlock = false; |
| 16960 | } |
| 16961 | |
| 16962 | // We are allowed to have a branch from a hot 'block' to a cold 'bbNext' |
| 16963 | // |
| 16964 | if ((block->bbNext != nullptr) && fgInDifferentRegions(block, block->bbNext)) |
| 16965 | { |
| 16966 | doAssertOnJumpToNextBlock = false; |
| 16967 | } |
| 16968 | } |
| 16969 | |
| 16970 | if (doAssertOnJumpToNextBlock) |
| 16971 | { |
| 16972 | if (block->bbJumpDest == block->bbNext) |
| 16973 | { |
| 16974 | noway_assert(!"Unnecessary jump to the next block!" ); |
| 16975 | } |
| 16976 | } |
| 16977 | |
| 16978 | /* Make sure BBF_KEEP_BBJ_ALWAYS is set correctly */ |
| 16979 | |
| 16980 | if ((block->bbJumpKind == BBJ_ALWAYS) && prevIsCallAlwaysPair) |
| 16981 | { |
| 16982 | noway_assert(block->bbFlags & BBF_KEEP_BBJ_ALWAYS); |
| 16983 | } |
| 16984 | |
| 16985 | /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ |
| 16986 | /* an BBJ_ALWAYS block with BBF_INTERNAL set */ |
| 16987 | /* or that it's a BBF_RETLESS_CALL */ |
| 16988 | if (block->bbJumpKind == BBJ_CALLFINALLY) |
| 16989 | { |
| 16990 | assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); |
| 16991 | } |
| 16992 | |
| 16993 | /* no un-compacted blocks */ |
| 16994 | |
| 16995 | if (fgCanCompactBlocks(block, block->bbNext)) |
| 16996 | { |
| 16997 | noway_assert(!"Found un-compacted blocks!" ); |
| 16998 | } |
| 16999 | } |
| 17000 | } |
| 17001 | |
| 17002 | #endif // DEBUG |
| 17003 | |
| 17004 | /***************************************************************************** |
| 17005 | * We've inserted a new block before 'block' that should be part of the same EH region as 'block'. |
| 17006 | * Update the EH table to make this so. Also, set the new block to have the right EH region data |
| 17007 | * (copy the bbTryIndex, bbHndIndex, and bbCatchTyp from 'block' to the new predecessor, and clear |
| 17008 | * 'bbCatchTyp' from 'block'). |
| 17009 | */ |
| 17010 | void Compiler::fgExtendEHRegionBefore(BasicBlock* block) |
| 17011 | { |
| 17012 | assert(block->bbPrev != nullptr); |
| 17013 | |
| 17014 | BasicBlock* bPrev = block->bbPrev; |
| 17015 | |
| 17016 | bPrev->copyEHRegion(block); |
| 17017 | |
| 17018 | // The first block (and only the first block) of a handler has bbCatchTyp set |
| 17019 | bPrev->bbCatchTyp = block->bbCatchTyp; |
| 17020 | block->bbCatchTyp = BBCT_NONE; |
| 17021 | |
| 17022 | EHblkDsc* HBtab; |
| 17023 | EHblkDsc* HBtabEnd; |
| 17024 | |
| 17025 | for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++) |
| 17026 | { |
| 17027 | /* Multiple pointers in EHblkDsc can point to same block. We can not early out after the first match. */ |
| 17028 | if (HBtab->ebdTryBeg == block) |
| 17029 | { |
| 17030 | #ifdef DEBUG |
| 17031 | if (verbose) |
| 17032 | { |
| 17033 | printf("EH#%u: New first block of try: " FMT_BB "\n" , ehGetIndex(HBtab), bPrev->bbNum); |
| 17034 | } |
| 17035 | #endif // DEBUG |
| 17036 | HBtab->ebdTryBeg = bPrev; |
| 17037 | bPrev->bbFlags |= BBF_TRY_BEG | BBF_DONT_REMOVE | BBF_HAS_LABEL; |
| 17038 | |
| 17039 | // clear the TryBeg flag unless it begins another try region |
| 17040 | if (!bbIsTryBeg(block)) |
| 17041 | { |
| 17042 | block->bbFlags &= ~BBF_TRY_BEG; |
| 17043 | } |
| 17044 | } |
| 17045 | |
| 17046 | if (HBtab->ebdHndBeg == block) |
| 17047 | { |
| 17048 | #ifdef DEBUG |
| 17049 | if (verbose) |
| 17050 | { |
| 17051 | printf("EH#%u: New first block of handler: " FMT_BB "\n" , ehGetIndex(HBtab), bPrev->bbNum); |
| 17052 | } |
| 17053 | #endif // DEBUG |
| 17054 | |
| 17055 | // The first block of a handler has an artificial extra refcount. Transfer that to the new block. |
| 17056 | assert(block->bbRefs > 0); |
| 17057 | block->bbRefs--; |
| 17058 | |
| 17059 | HBtab->ebdHndBeg = bPrev; |
| 17060 | bPrev->bbFlags |= BBF_DONT_REMOVE | BBF_HAS_LABEL; |
| 17061 | |
| 17062 | #if FEATURE_EH_FUNCLETS |
| 17063 | if (fgFuncletsCreated) |
| 17064 | { |
| 17065 | assert((block->bbFlags & BBF_FUNCLET_BEG) != 0); |
| 17066 | bPrev->bbFlags |= BBF_FUNCLET_BEG; |
| 17067 | block->bbFlags &= ~BBF_FUNCLET_BEG; |
| 17068 | } |
| 17069 | #endif // FEATURE_EH_FUNCLETS |
| 17070 | |
| 17071 | bPrev->bbRefs++; |
| 17072 | |
| 17073 | // If this is a handler for a filter, the last block of the filter will end with |
| 17074 | // a BBJ_EJFILTERRET block that has a bbJumpDest that jumps to the first block of |
| 17075 | // it's handler. So we need to update it to keep things in sync. |
| 17076 | // |
| 17077 | if (HBtab->HasFilter()) |
| 17078 | { |
| 17079 | BasicBlock* bFilterLast = HBtab->BBFilterLast(); |
| 17080 | assert(bFilterLast != nullptr); |
| 17081 | assert(bFilterLast->bbJumpKind == BBJ_EHFILTERRET); |
| 17082 | assert(bFilterLast->bbJumpDest == block); |
| 17083 | #ifdef DEBUG |
| 17084 | if (verbose) |
| 17085 | { |
| 17086 | printf("EH#%u: Updating bbJumpDest for filter ret block: " FMT_BB " => " FMT_BB "\n" , |
| 17087 | ehGetIndex(HBtab), bFilterLast->bbNum, bPrev->bbNum); |
| 17088 | } |
| 17089 | #endif // DEBUG |
| 17090 | // Change the bbJumpDest for bFilterLast from the old first 'block' to the new first 'bPrev' |
| 17091 | bFilterLast->bbJumpDest = bPrev; |
| 17092 | } |
| 17093 | } |
| 17094 | |
| 17095 | if (HBtab->HasFilter() && (HBtab->ebdFilter == block)) |
| 17096 | { |
| 17097 | #ifdef DEBUG |
| 17098 | if (verbose) |
| 17099 | { |
| 17100 | printf("EH#%u: New first block of filter: " FMT_BB "\n" , ehGetIndex(HBtab), bPrev->bbNum); |
| 17101 | } |
| 17102 | #endif // DEBUG |
| 17103 | |
| 17104 | // The first block of a filter has an artificial extra refcount. Transfer that to the new block. |
| 17105 | assert(block->bbRefs > 0); |
| 17106 | block->bbRefs--; |
| 17107 | |
| 17108 | HBtab->ebdFilter = bPrev; |
| 17109 | bPrev->bbFlags |= BBF_DONT_REMOVE | BBF_HAS_LABEL; |
| 17110 | |
| 17111 | #if FEATURE_EH_FUNCLETS |
| 17112 | if (fgFuncletsCreated) |
| 17113 | { |
| 17114 | assert((block->bbFlags & BBF_FUNCLET_BEG) != 0); |
| 17115 | bPrev->bbFlags |= BBF_FUNCLET_BEG; |
| 17116 | block->bbFlags &= ~BBF_FUNCLET_BEG; |
| 17117 | } |
| 17118 | #endif // FEATURE_EH_FUNCLETS |
| 17119 | |
| 17120 | bPrev->bbRefs++; |
| 17121 | } |
| 17122 | } |
| 17123 | } |
| 17124 | |
| 17125 | /***************************************************************************** |
| 17126 | * We've inserted a new block after 'block' that should be part of the same EH region as 'block'. |
| 17127 | * Update the EH table to make this so. Also, set the new block to have the right EH region data. |
| 17128 | */ |
| 17129 | |
| 17130 | void Compiler::fgExtendEHRegionAfter(BasicBlock* block) |
| 17131 | { |
| 17132 | BasicBlock* newBlk = block->bbNext; |
| 17133 | assert(newBlk != nullptr); |
| 17134 | |
| 17135 | newBlk->copyEHRegion(block); |
| 17136 | newBlk->bbCatchTyp = |
| 17137 | BBCT_NONE; // Only the first block of a catch has this set, and 'newBlk' can't be the first block of a catch. |
| 17138 | |
| 17139 | // TODO-Throughput: if the block is not in an EH region, then we don't need to walk the EH table looking for 'last' |
| 17140 | // block pointers to update. |
| 17141 | ehUpdateLastBlocks(block, newBlk); |
| 17142 | } |
| 17143 | |
| 17144 | /***************************************************************************** |
| 17145 | * |
| 17146 | * Insert a BasicBlock before the given block. |
| 17147 | */ |
| 17148 | |
| 17149 | BasicBlock* Compiler::fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion) |
| 17150 | { |
| 17151 | // Create a new BasicBlock and chain it in |
| 17152 | |
| 17153 | BasicBlock* newBlk = bbNewBasicBlock(jumpKind); |
| 17154 | newBlk->bbFlags |= BBF_INTERNAL; |
| 17155 | |
| 17156 | fgInsertBBbefore(block, newBlk); |
| 17157 | |
| 17158 | newBlk->bbRefs = 0; |
| 17159 | |
| 17160 | if (newBlk->bbFallsThrough() && block->isRunRarely()) |
| 17161 | { |
| 17162 | newBlk->bbSetRunRarely(); |
| 17163 | } |
| 17164 | |
| 17165 | if (extendRegion) |
| 17166 | { |
| 17167 | fgExtendEHRegionBefore(block); |
| 17168 | } |
| 17169 | else |
| 17170 | { |
| 17171 | // When extendRegion is false the caller is responsible for setting these two values |
| 17172 | newBlk->setTryIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely |
| 17173 | newBlk->setHndIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely |
| 17174 | } |
| 17175 | |
| 17176 | // We assume that if the block we are inserting before is in the cold region, then this new |
| 17177 | // block will also be in the cold region. |
| 17178 | newBlk->bbFlags |= (block->bbFlags & BBF_COLD); |
| 17179 | |
| 17180 | return newBlk; |
| 17181 | } |
| 17182 | |
| 17183 | /***************************************************************************** |
| 17184 | * |
| 17185 | * Insert a BasicBlock after the given block. |
| 17186 | */ |
| 17187 | |
| 17188 | BasicBlock* Compiler::fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion) |
| 17189 | { |
| 17190 | // Create a new BasicBlock and chain it in |
| 17191 | |
| 17192 | BasicBlock* newBlk = bbNewBasicBlock(jumpKind); |
| 17193 | newBlk->bbFlags |= BBF_INTERNAL; |
| 17194 | |
| 17195 | fgInsertBBafter(block, newBlk); |
| 17196 | |
| 17197 | newBlk->bbRefs = 0; |
| 17198 | |
| 17199 | if (block->bbFallsThrough() && block->isRunRarely()) |
| 17200 | { |
| 17201 | newBlk->bbSetRunRarely(); |
| 17202 | } |
| 17203 | |
| 17204 | if (extendRegion) |
| 17205 | { |
| 17206 | fgExtendEHRegionAfter(block); |
| 17207 | } |
| 17208 | else |
| 17209 | { |
| 17210 | // When extendRegion is false the caller is responsible for setting these two values |
| 17211 | newBlk->setTryIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely |
| 17212 | newBlk->setHndIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely |
| 17213 | } |
| 17214 | |
| 17215 | // If the new block is in the cold region (because the block we are inserting after |
| 17216 | // is in the cold region), mark it as such. |
| 17217 | newBlk->bbFlags |= (block->bbFlags & BBF_COLD); |
| 17218 | |
| 17219 | return newBlk; |
| 17220 | } |
| 17221 | |
| 17222 | /***************************************************************************** |
| 17223 | * Inserts basic block before existing basic block. |
| 17224 | * |
| 17225 | * If insertBeforeBlk is in the funclet region, then newBlk will be in the funclet region. |
| 17226 | * (If insertBeforeBlk is the first block of the funclet region, then 'newBlk' will be the |
| 17227 | * new first block of the funclet region.) |
| 17228 | */ |
| 17229 | void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) |
| 17230 | { |
| 17231 | if (insertBeforeBlk->bbPrev) |
| 17232 | { |
| 17233 | fgInsertBBafter(insertBeforeBlk->bbPrev, newBlk); |
| 17234 | } |
| 17235 | else |
| 17236 | { |
| 17237 | newBlk->setNext(fgFirstBB); |
| 17238 | |
| 17239 | fgFirstBB = newBlk; |
| 17240 | newBlk->bbPrev = nullptr; |
| 17241 | } |
| 17242 | |
| 17243 | #if FEATURE_EH_FUNCLETS |
| 17244 | |
| 17245 | /* Update fgFirstFuncletBB if insertBeforeBlk is the first block of the funclet region. */ |
| 17246 | |
| 17247 | if (fgFirstFuncletBB == insertBeforeBlk) |
| 17248 | { |
| 17249 | fgFirstFuncletBB = newBlk; |
| 17250 | } |
| 17251 | |
| 17252 | #endif // FEATURE_EH_FUNCLETS |
| 17253 | } |
| 17254 | |
| 17255 | /***************************************************************************** |
| 17256 | * Inserts basic block after existing basic block. |
| 17257 | * |
| 17258 | * If insertBeforeBlk is in the funclet region, then newBlk will be in the funclet region. |
| 17259 | * (It can't be used to insert a block as the first block of the funclet region). |
| 17260 | */ |
| 17261 | void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk) |
| 17262 | { |
| 17263 | newBlk->bbNext = insertAfterBlk->bbNext; |
| 17264 | |
| 17265 | if (insertAfterBlk->bbNext) |
| 17266 | { |
| 17267 | insertAfterBlk->bbNext->bbPrev = newBlk; |
| 17268 | } |
| 17269 | |
| 17270 | insertAfterBlk->bbNext = newBlk; |
| 17271 | newBlk->bbPrev = insertAfterBlk; |
| 17272 | |
| 17273 | if (fgLastBB == insertAfterBlk) |
| 17274 | { |
| 17275 | fgLastBB = newBlk; |
| 17276 | assert(fgLastBB->bbNext == nullptr); |
| 17277 | } |
| 17278 | } |
| 17279 | |
| 17280 | // We have two edges (bAlt => bCur) and (bCur => bNext). |
| 17281 | // |
| 17282 | // Returns true if the weight of (bAlt => bCur) |
| 17283 | // is greater than the weight of (bCur => bNext). |
| 17284 | // We compare the edge weights if we have valid edge weights |
| 17285 | // otherwise we compare blocks weights. |
| 17286 | // |
| 17287 | bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) |
| 17288 | { |
| 17289 | // bCur can't be NULL and must be a fall through bbJumpKind |
| 17290 | noway_assert(bCur != nullptr); |
| 17291 | noway_assert(bCur->bbFallsThrough()); |
| 17292 | noway_assert(bAlt != nullptr); |
| 17293 | |
| 17294 | // We only handle the cases when bAlt is a BBJ_ALWAYS or a BBJ_COND |
| 17295 | if ((bAlt->bbJumpKind != BBJ_ALWAYS) && (bAlt->bbJumpKind != BBJ_COND)) |
| 17296 | { |
| 17297 | return false; |
| 17298 | } |
| 17299 | |
| 17300 | // if bAlt doesn't jump to bCur it can't be a better fall through than bCur |
| 17301 | if (bAlt->bbJumpDest != bCur) |
| 17302 | { |
| 17303 | return false; |
| 17304 | } |
| 17305 | |
| 17306 | // Currently bNext is the fall through for bCur |
| 17307 | BasicBlock* bNext = bCur->bbNext; |
| 17308 | noway_assert(bNext != nullptr); |
| 17309 | |
| 17310 | // We will set result to true if bAlt is a better fall through than bCur |
| 17311 | bool result; |
| 17312 | if (fgHaveValidEdgeWeights) |
| 17313 | { |
| 17314 | // We will compare the edge weight for our two choices |
| 17315 | flowList* edgeFromAlt = fgGetPredForBlock(bCur, bAlt); |
| 17316 | flowList* edgeFromCur = fgGetPredForBlock(bNext, bCur); |
| 17317 | noway_assert(edgeFromCur != nullptr); |
| 17318 | noway_assert(edgeFromAlt != nullptr); |
| 17319 | |
| 17320 | result = (edgeFromAlt->flEdgeWeightMin > edgeFromCur->flEdgeWeightMax); |
| 17321 | } |
| 17322 | else |
| 17323 | { |
| 17324 | if (bAlt->bbJumpKind == BBJ_ALWAYS) |
| 17325 | { |
| 17326 | // Our result is true if bAlt's weight is more than bCur's weight |
| 17327 | result = (bAlt->bbWeight > bCur->bbWeight); |
| 17328 | } |
| 17329 | else |
| 17330 | { |
| 17331 | noway_assert(bAlt->bbJumpKind == BBJ_COND); |
| 17332 | // Our result is true if bAlt's weight is more than twice bCur's weight |
| 17333 | result = (bAlt->bbWeight > (2 * bCur->bbWeight)); |
| 17334 | } |
| 17335 | } |
| 17336 | return result; |
| 17337 | } |
| 17338 | |
| 17339 | //------------------------------------------------------------------------ |
| 17340 | // fgCheckEHCanInsertAfterBlock: Determine if a block can be inserted after |
| 17341 | // 'blk' and legally be put in the EH region specified by 'regionIndex'. This |
| 17342 | // can be true if the most nested region the block is in is already 'regionIndex', |
| 17343 | // as we'll just extend the most nested region (and any region ending at the same block). |
| 17344 | // It can also be true if it is the end of (a set of) EH regions, such that |
| 17345 | // inserting the block and properly extending some EH regions (if necessary) |
| 17346 | // puts the block in the correct region. We only consider the case of extending |
| 17347 | // an EH region after 'blk' (that is, to include 'blk' and the newly insert block); |
| 17348 | // we don't consider inserting a block as the the first block of an EH region following 'blk'. |
| 17349 | // |
| 17350 | // Consider this example: |
| 17351 | // |
| 17352 | // try3 try2 try1 |
| 17353 | // |--- | | BB01 |
| 17354 | // | |--- | BB02 |
| 17355 | // | | |--- BB03 |
| 17356 | // | | | BB04 |
| 17357 | // | |--- |--- BB05 |
| 17358 | // | BB06 |
| 17359 | // |----------------- BB07 |
| 17360 | // |
| 17361 | // Passing BB05 and try1/try2/try3 as the region to insert into (as well as putInTryRegion==true) |
| 17362 | // will all return 'true'. Here are the cases: |
| 17363 | // 1. Insert into try1: the most nested EH region BB05 is in is already try1, so we can insert after |
| 17364 | // it and extend try1 (and try2). |
| 17365 | // 2. Insert into try2: we can extend try2, but leave try1 alone. |
| 17366 | // 3. Insert into try3: we can leave try1 and try2 alone, and put the new block just in try3. Note that |
| 17367 | // in this case, after we "loop outwards" in the EH nesting, we get to a place where we're in the middle |
| 17368 | // of the try3 region, not at the end of it. |
| 17369 | // In all cases, it is possible to put a block after BB05 and put it in any of these three 'try' regions legally. |
| 17370 | // |
| 17371 | // Filters are ignored; if 'blk' is in a filter, the answer will be false. |
| 17372 | // |
| 17373 | // Arguments: |
| 17374 | // blk - the BasicBlock we are checking to see if we can insert after. |
| 17375 | // regionIndex - the EH region we want to insert a block into. regionIndex is |
| 17376 | // in the range [0..compHndBBtabCount]; 0 means "main method". |
| 17377 | // putInTryRegion - 'true' if the new block should be inserted in the 'try' region of 'regionIndex'. |
| 17378 | // For regionIndex 0 (the "main method"), this should be 'true'. |
| 17379 | // |
| 17380 | // Return Value: |
| 17381 | // 'true' if a block can be inserted after 'blk' and put in EH region 'regionIndex', else 'false'. |
| 17382 | // |
| 17383 | bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion) |
| 17384 | { |
| 17385 | assert(blk != nullptr); |
| 17386 | assert(regionIndex <= compHndBBtabCount); |
| 17387 | |
| 17388 | if (regionIndex == 0) |
| 17389 | { |
| 17390 | assert(putInTryRegion); |
| 17391 | } |
| 17392 | |
| 17393 | bool inTryRegion; |
| 17394 | unsigned nestedRegionIndex = ehGetMostNestedRegionIndex(blk, &inTryRegion); |
| 17395 | |
| 17396 | bool insertOK = true; |
| 17397 | for (;;) |
| 17398 | { |
| 17399 | if (nestedRegionIndex == regionIndex) |
| 17400 | { |
| 17401 | // This block is in the region we want to be in. We can insert here if it's the right type of region. |
| 17402 | // (If we want to be in the 'try' region, but the block is in the handler region, then inserting a |
| 17403 | // new block after 'blk' can't put it in the 'try' region, and vice-versa, since we only consider |
| 17404 | // extending regions after, not prepending to regions.) |
| 17405 | // This check will be 'true' if we are trying to put something in the main function (as putInTryRegion |
| 17406 | // must be 'true' if regionIndex is zero, and inTryRegion will also be 'true' if nestedRegionIndex is zero). |
| 17407 | insertOK = (putInTryRegion == inTryRegion); |
| 17408 | break; |
| 17409 | } |
| 17410 | else if (nestedRegionIndex == 0) |
| 17411 | { |
| 17412 | // The block is in the main function, but we want to put something in a nested region. We can't do that. |
| 17413 | insertOK = false; |
| 17414 | break; |
| 17415 | } |
| 17416 | |
| 17417 | assert(nestedRegionIndex > 0); |
| 17418 | EHblkDsc* ehDsc = ehGetDsc(nestedRegionIndex - 1); // ehGetDsc uses [0..compHndBBtabCount) form. |
| 17419 | |
| 17420 | if (inTryRegion) |
| 17421 | { |
| 17422 | if (blk != ehDsc->ebdTryLast) |
| 17423 | { |
| 17424 | // Not the last block? Then it must be somewhere else within the try region, so we can't insert here. |
| 17425 | insertOK = false; |
| 17426 | break; // exit the 'for' loop |
| 17427 | } |
| 17428 | } |
| 17429 | else |
| 17430 | { |
| 17431 | // We ignore filters. |
| 17432 | if (blk != ehDsc->ebdHndLast) |
| 17433 | { |
| 17434 | // Not the last block? Then it must be somewhere else within the handler region, so we can't insert |
| 17435 | // here. |
| 17436 | insertOK = false; |
| 17437 | break; // exit the 'for' loop |
| 17438 | } |
| 17439 | } |
| 17440 | |
| 17441 | // Things look good for this region; check the enclosing regions, if any. |
| 17442 | |
| 17443 | nestedRegionIndex = |
| 17444 | ehGetEnclosingRegionIndex(nestedRegionIndex - 1, |
| 17445 | &inTryRegion); // ehGetEnclosingRegionIndex uses [0..compHndBBtabCount) form. |
| 17446 | |
| 17447 | // Convert to [0..compHndBBtabCount] form. |
| 17448 | nestedRegionIndex = (nestedRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : nestedRegionIndex + 1; |
| 17449 | } // end of for(;;) |
| 17450 | |
| 17451 | return insertOK; |
| 17452 | } |
| 17453 | |
| 17454 | //------------------------------------------------------------------------ |
| 17455 | // Finds the block closest to endBlk in the range [startBlk..endBlk) after which a block can be |
| 17456 | // inserted easily. Note that endBlk cannot be returned; its predecessor is the last block that can |
| 17457 | // be returned. The new block will be put in an EH region described by the arguments regionIndex, |
| 17458 | // putInTryRegion, startBlk, and endBlk (explained below), so it must be legal to place to put the |
| 17459 | // new block after the insertion location block, give it the specified EH region index, and not break |
| 17460 | // EH nesting rules. This function is careful to choose a block in the correct EH region. However, |
| 17461 | // it assumes that the new block can ALWAYS be placed at the end (just before endBlk). That means |
| 17462 | // that the caller must ensure that is true. |
| 17463 | // |
| 17464 | // Below are the possible cases for the arguments to this method: |
| 17465 | // 1. putInTryRegion == true and regionIndex > 0: |
| 17466 | // Search in the try region indicated by regionIndex. |
| 17467 | // 2. putInTryRegion == false and regionIndex > 0: |
| 17468 | // a. If startBlk is the first block of a filter and endBlk is the block after the end of the |
| 17469 | // filter (that is, the startBlk and endBlk match a filter bounds exactly), then choose a |
| 17470 | // location within this filter region. (Note that, due to IL rules, filters do not have any |
| 17471 | // EH nested within them.) Otherwise, filters are skipped. |
| 17472 | // b. Else, search in the handler region indicated by regionIndex. |
| 17473 | // 3. regionIndex = 0: |
| 17474 | // Search in the entire main method, excluding all EH regions. In this case, putInTryRegion must be true. |
| 17475 | // |
| 17476 | // This method makes sure to find an insertion point which would not cause the inserted block to |
| 17477 | // be put inside any inner try/filter/handler regions. |
| 17478 | // |
| 17479 | // The actual insertion occurs after the returned block. Note that the returned insertion point might |
| 17480 | // be the last block of a more nested EH region, because the new block will be inserted after the insertion |
| 17481 | // point, and will not extend the more nested EH region. For example: |
| 17482 | // |
| 17483 | // try3 try2 try1 |
| 17484 | // |--- | | BB01 |
| 17485 | // | |--- | BB02 |
| 17486 | // | | |--- BB03 |
| 17487 | // | | | BB04 |
| 17488 | // | |--- |--- BB05 |
| 17489 | // | BB06 |
| 17490 | // |----------------- BB07 |
| 17491 | // |
| 17492 | // for regionIndex==try3, putInTryRegion==true, we might return BB05, even though BB05 will have a try index |
| 17493 | // for try1 (the most nested 'try' region the block is in). That's because when we insert after BB05, the new |
| 17494 | // block will be in the correct, desired EH region, since try1 and try2 regions will not be extended to include |
| 17495 | // the inserted block. Furthermore, for regionIndex==try2, putInTryRegion==true, we can also return BB05. In this |
| 17496 | // case, when the new block is inserted, the try1 region remains the same, but we need extend region 'try2' to |
| 17497 | // include the inserted block. (We also need to check all parent regions as well, just in case any parent regions |
| 17498 | // also end on the same block, in which case we would also need to extend the parent regions. This is standard |
| 17499 | // procedure when inserting a block at the end of an EH region.) |
| 17500 | // |
| 17501 | // If nearBlk is non-nullptr then we return the closest block after nearBlk that will work best. |
| 17502 | // |
| 17503 | // We try to find a block in the appropriate region that is not a fallthrough block, so we can insert after it |
| 17504 | // without the need to insert a jump around the inserted block. |
| 17505 | // |
| 17506 | // Note that regionIndex is numbered the same as BasicBlock::bbTryIndex and BasicBlock::bbHndIndex, that is, "0" is |
| 17507 | // "main method" and otherwise is +1 from normal, so we can call, e.g., ehGetDsc(tryIndex - 1). |
| 17508 | // |
| 17509 | // Arguments: |
| 17510 | // regionIndex - the region index where the new block will be inserted. Zero means entire method; |
| 17511 | // non-zero means either a "try" or a "handler" region, depending on what putInTryRegion says. |
| 17512 | // putInTryRegion - 'true' to put the block in the 'try' region corresponding to 'regionIndex', 'false' |
| 17513 | // to put the block in the handler region. Should be 'true' if regionIndex==0. |
| 17514 | // startBlk - start block of range to search. |
| 17515 | // endBlk - end block of range to search (don't include this block in the range). Can be nullptr to indicate |
| 17516 | // the end of the function. |
| 17517 | // nearBlk - If non-nullptr, try to find an insertion location closely after this block. If nullptr, we insert |
| 17518 | // at the best location found towards the end of the acceptable block range. |
| 17519 | // jumpBlk - When nearBlk is set, this can be set to the block which jumps to bNext->bbNext (TODO: need to review |
| 17520 | // this?) |
| 17521 | // runRarely - true if the block being inserted is expected to be rarely run. This helps determine |
| 17522 | // the best place to put the new block, by putting in a place that has the same 'rarely run' characteristic. |
| 17523 | // |
| 17524 | // Return Value: |
| 17525 | // A block with the desired characteristics, so the new block will be inserted after this one. |
| 17526 | // If there is no suitable location, return nullptr. This should basically never happen. |
| 17527 | // |
| 17528 | BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, |
| 17529 | bool putInTryRegion, |
| 17530 | BasicBlock* startBlk, |
| 17531 | BasicBlock* endBlk, |
| 17532 | BasicBlock* nearBlk, |
| 17533 | BasicBlock* jumpBlk, |
| 17534 | bool runRarely) |
| 17535 | { |
| 17536 | noway_assert(startBlk != nullptr); |
| 17537 | noway_assert(startBlk != endBlk); |
| 17538 | noway_assert((regionIndex == 0 && putInTryRegion) || // Search in the main method |
| 17539 | (putInTryRegion && regionIndex > 0 && |
| 17540 | startBlk->bbTryIndex == regionIndex) || // Search in the specified try region |
| 17541 | (!putInTryRegion && regionIndex > 0 && |
| 17542 | startBlk->bbHndIndex == regionIndex)); // Search in the specified handler region |
| 17543 | |
| 17544 | #ifdef DEBUG |
| 17545 | // Assert that startBlk precedes endBlk in the block list. |
| 17546 | // We don't want to use bbNum to assert this condition, as we cannot depend on the block numbers being |
| 17547 | // sequential at all times. |
| 17548 | for (BasicBlock* b = startBlk; b != endBlk; b = b->bbNext) |
| 17549 | { |
| 17550 | assert(b != nullptr); // We reached the end of the block list, but never found endBlk. |
| 17551 | } |
| 17552 | #endif // DEBUG |
| 17553 | |
| 17554 | JITDUMP("fgFindInsertPoint(regionIndex=%u, putInTryRegion=%s, startBlk=" FMT_BB ", endBlk=" FMT_BB |
| 17555 | ", nearBlk=" FMT_BB ", " |
| 17556 | "jumpBlk=" FMT_BB ", runRarely=%s)\n" , |
| 17557 | regionIndex, dspBool(putInTryRegion), startBlk->bbNum, (endBlk == nullptr) ? 0 : endBlk->bbNum, |
| 17558 | (nearBlk == nullptr) ? 0 : nearBlk->bbNum, (jumpBlk == nullptr) ? 0 : jumpBlk->bbNum, dspBool(runRarely)); |
| 17559 | |
| 17560 | bool insertingIntoFilter = false; |
| 17561 | if (!putInTryRegion) |
| 17562 | { |
| 17563 | EHblkDsc* const dsc = ehGetDsc(regionIndex - 1); |
| 17564 | insertingIntoFilter = dsc->HasFilter() && (startBlk == dsc->ebdFilter) && (endBlk == dsc->ebdHndBeg); |
| 17565 | } |
| 17566 | |
| 17567 | bool reachedNear = false; // Have we reached 'nearBlk' in our search? If not, we'll keep searching. |
| 17568 | bool inFilter = false; // Are we in a filter region that we need to skip? |
| 17569 | BasicBlock* bestBlk = |
| 17570 | nullptr; // Set to the best insertion point we've found so far that meets all the EH requirements. |
| 17571 | BasicBlock* goodBlk = |
| 17572 | nullptr; // Set to an acceptable insertion point that we'll use if we don't find a 'best' option. |
| 17573 | BasicBlock* blk; |
| 17574 | |
| 17575 | if (nearBlk != nullptr) |
| 17576 | { |
| 17577 | // Does the nearBlk precede the startBlk? |
| 17578 | for (blk = nearBlk; blk != nullptr; blk = blk->bbNext) |
| 17579 | { |
| 17580 | if (blk == startBlk) |
| 17581 | { |
| 17582 | reachedNear = true; |
| 17583 | break; |
| 17584 | } |
| 17585 | else if (blk == endBlk) |
| 17586 | { |
| 17587 | break; |
| 17588 | } |
| 17589 | } |
| 17590 | } |
| 17591 | |
| 17592 | for (blk = startBlk; blk != endBlk; blk = blk->bbNext) |
| 17593 | { |
| 17594 | // The only way (blk == nullptr) could be true is if the caller passed an endBlk that preceded startBlk in the |
| 17595 | // block list, or if endBlk isn't in the block list at all. In DEBUG, we'll instead hit the similar |
| 17596 | // well-formedness assert earlier in this function. |
| 17597 | noway_assert(blk != nullptr); |
| 17598 | |
| 17599 | if (blk == nearBlk) |
| 17600 | { |
| 17601 | reachedNear = true; |
| 17602 | } |
| 17603 | |
| 17604 | if (blk->bbCatchTyp == BBCT_FILTER) |
| 17605 | { |
| 17606 | // Record the fact that we entered a filter region, so we don't insert into filters... |
| 17607 | // Unless the caller actually wanted the block inserted in this exact filter region. |
| 17608 | if (!insertingIntoFilter || (blk != startBlk)) |
| 17609 | { |
| 17610 | inFilter = true; |
| 17611 | } |
| 17612 | } |
| 17613 | else if (blk->bbCatchTyp == BBCT_FILTER_HANDLER) |
| 17614 | { |
| 17615 | // Record the fact that we exited a filter region. |
| 17616 | inFilter = false; |
| 17617 | } |
| 17618 | |
| 17619 | // Don't insert a block inside this filter region. |
| 17620 | if (inFilter) |
| 17621 | { |
| 17622 | continue; |
| 17623 | } |
| 17624 | |
| 17625 | // Note that the new block will be inserted AFTER "blk". We check to make sure that doing so |
| 17626 | // would put the block in the correct EH region. We make an assumption here that you can |
| 17627 | // ALWAYS insert the new block before "endBlk" (that is, at the end of the search range) |
| 17628 | // and be in the correct EH region. This is must be guaranteed by the caller (as it is by |
| 17629 | // fgNewBBinRegion(), which passes the search range as an exact EH region block range). |
| 17630 | // Because of this assumption, we only check the EH information for blocks before the last block. |
| 17631 | if (blk->bbNext != endBlk) |
| 17632 | { |
| 17633 | // We are in the middle of the search range. We can't insert the new block in |
| 17634 | // an inner try or handler region. We can, however, set the insertion |
| 17635 | // point to the last block of an EH try/handler region, if the enclosing |
| 17636 | // region is the region we wish to insert in. (Since multiple regions can |
| 17637 | // end at the same block, we need to search outwards, checking that the |
| 17638 | // block is the last block of every EH region out to the region we want |
| 17639 | // to insert in.) This is especially useful for putting a call-to-finally |
| 17640 | // block on AMD64 immediately after its corresponding 'try' block, so in the |
| 17641 | // common case, we'll just fall through to it. For example: |
| 17642 | // |
| 17643 | // BB01 |
| 17644 | // BB02 -- first block of try |
| 17645 | // BB03 |
| 17646 | // BB04 -- last block of try |
| 17647 | // BB05 -- first block of finally |
| 17648 | // BB06 |
| 17649 | // BB07 -- last block of handler |
| 17650 | // BB08 |
| 17651 | // |
| 17652 | // Assume there is only one try/finally, so BB01 and BB08 are in the "main function". |
| 17653 | // For AMD64 call-to-finally, we'll want to insert the BBJ_CALLFINALLY in |
| 17654 | // the main function, immediately after BB04. This allows us to do that. |
| 17655 | |
| 17656 | if (!fgCheckEHCanInsertAfterBlock(blk, regionIndex, putInTryRegion)) |
| 17657 | { |
| 17658 | // Can't insert here. |
| 17659 | continue; |
| 17660 | } |
| 17661 | } |
| 17662 | |
| 17663 | // Look for an insert location: |
| 17664 | // 1. We want blocks that don't end with a fall through, |
| 17665 | // 2. Also, when blk equals nearBlk we may want to insert here. |
| 17666 | if (!blk->bbFallsThrough() || (blk == nearBlk)) |
| 17667 | { |
| 17668 | bool updateBestBlk = true; // We will probably update the bestBlk |
| 17669 | |
| 17670 | // If blk falls through then we must decide whether to use the nearBlk |
| 17671 | // hint |
| 17672 | if (blk->bbFallsThrough()) |
| 17673 | { |
| 17674 | noway_assert(blk == nearBlk); |
| 17675 | if (jumpBlk != nullptr) |
| 17676 | { |
| 17677 | updateBestBlk = fgIsBetterFallThrough(blk, jumpBlk); |
| 17678 | } |
| 17679 | else |
| 17680 | { |
| 17681 | updateBestBlk = false; |
| 17682 | } |
| 17683 | } |
| 17684 | |
| 17685 | // If we already have a best block, see if the 'runRarely' flags influences |
| 17686 | // our choice. If we want a runRarely insertion point, and the existing best |
| 17687 | // block is run rarely but the current block isn't run rarely, then don't |
| 17688 | // update the best block. |
| 17689 | // TODO-CQ: We should also handle the reverse case, where runRarely is false (we |
| 17690 | // want a non-rarely-run block), but bestBlock->isRunRarely() is true. In that |
| 17691 | // case, we should update the block, also. Probably what we want is: |
| 17692 | // (bestBlk->isRunRarely() != runRarely) && (blk->isRunRarely() == runRarely) |
| 17693 | if (updateBestBlk && (bestBlk != nullptr) && runRarely && bestBlk->isRunRarely() && !blk->isRunRarely()) |
| 17694 | { |
| 17695 | updateBestBlk = false; |
| 17696 | } |
| 17697 | |
| 17698 | if (updateBestBlk) |
| 17699 | { |
| 17700 | // We found a 'best' insertion location, so save it away. |
| 17701 | bestBlk = blk; |
| 17702 | |
| 17703 | // If we've reached nearBlk, we've satisfied all the criteria, |
| 17704 | // so we're done. |
| 17705 | if (reachedNear) |
| 17706 | { |
| 17707 | goto DONE; |
| 17708 | } |
| 17709 | |
| 17710 | // If we haven't reached nearBlk, keep looking for a 'best' location, just |
| 17711 | // in case we'll find one at or after nearBlk. If no nearBlk was specified, |
| 17712 | // we prefer inserting towards the end of the given range, so keep looking |
| 17713 | // for more acceptable insertion locations. |
| 17714 | } |
| 17715 | } |
| 17716 | |
| 17717 | // No need to update goodBlk after we have set bestBlk, but we could still find a better |
| 17718 | // bestBlk, so keep looking. |
| 17719 | if (bestBlk != nullptr) |
| 17720 | { |
| 17721 | continue; |
| 17722 | } |
| 17723 | |
| 17724 | // Set the current block as a "good enough" insertion point, if it meets certain criteria. |
| 17725 | // We'll return this block if we don't find a "best" block in the search range. The block |
| 17726 | // can't be a BBJ_CALLFINALLY of a BBJ_CALLFINALLY/BBJ_ALWAYS pair (since we don't want |
| 17727 | // to insert anything between these two blocks). Otherwise, we can use it. However, |
| 17728 | // if we'd previously chosen a BBJ_COND block, then we'd prefer the "good" block to be |
| 17729 | // something else. We keep updating it until we've reached the 'nearBlk', to push it as |
| 17730 | // close to endBlk as possible. |
| 17731 | if (!blk->isBBCallAlwaysPair()) |
| 17732 | { |
| 17733 | if (goodBlk == nullptr) |
| 17734 | { |
| 17735 | goodBlk = blk; |
| 17736 | } |
| 17737 | else if ((goodBlk->bbJumpKind == BBJ_COND) || (blk->bbJumpKind != BBJ_COND)) |
| 17738 | { |
| 17739 | if ((blk == nearBlk) || !reachedNear) |
| 17740 | { |
| 17741 | goodBlk = blk; |
| 17742 | } |
| 17743 | } |
| 17744 | } |
| 17745 | } |
| 17746 | |
| 17747 | // If we didn't find a non-fall_through block, then insert at the last good block. |
| 17748 | |
| 17749 | if (bestBlk == nullptr) |
| 17750 | { |
| 17751 | bestBlk = goodBlk; |
| 17752 | } |
| 17753 | |
| 17754 | DONE: |
| 17755 | |
| 17756 | #if defined(JIT32_GCENCODER) |
| 17757 | // If we are inserting into a filter and the best block is the end of the filter region, we need to |
| 17758 | // insert after its predecessor instead: the JIT32 GC encoding used by the x86 CLR ABI states that the |
| 17759 | // terminal block of a filter region is its exit block. If the filter region consists of a single block, |
| 17760 | // a new block cannot be inserted without either splitting the single block before inserting a new block |
| 17761 | // or inserting the new block before the single block and updating the filter description such that the |
| 17762 | // inserted block is marked as the entry block for the filter. Becuase this sort of split can be complex |
| 17763 | // (especially given that it must ensure that the liveness of the exception object is properly tracked), |
| 17764 | // we avoid this situation by never generating single-block filters on x86 (see impPushCatchArgOnStack). |
| 17765 | if (insertingIntoFilter && (bestBlk == endBlk->bbPrev)) |
| 17766 | { |
| 17767 | assert(bestBlk != startBlk); |
| 17768 | bestBlk = bestBlk->bbPrev; |
| 17769 | } |
| 17770 | #endif // defined(JIT32_GCENCODER) |
| 17771 | |
| 17772 | return bestBlk; |
| 17773 | } |
| 17774 | |
| 17775 | //------------------------------------------------------------------------ |
| 17776 | // Creates a new BasicBlock and inserts it in a specific EH region, given by 'tryIndex', 'hndIndex', and 'putInFilter'. |
| 17777 | // |
| 17778 | // If 'putInFilter' it true, then the block is inserted in the filter region given by 'hndIndex'. In this case, tryIndex |
| 17779 | // must be a less nested EH region (that is, tryIndex > hndIndex). |
| 17780 | // |
| 17781 | // Otherwise, the block is inserted in either the try region or the handler region, depending on which one is the inner |
| 17782 | // region. In other words, if the try region indicated by tryIndex is nested in the handler region indicated by |
| 17783 | // hndIndex, |
| 17784 | // then the new BB will be created in the try region. Vice versa. |
| 17785 | // |
| 17786 | // Note that tryIndex and hndIndex are numbered the same as BasicBlock::bbTryIndex and BasicBlock::bbHndIndex, that is, |
| 17787 | // "0" is "main method" and otherwise is +1 from normal, so we can call, e.g., ehGetDsc(tryIndex - 1). |
| 17788 | // |
| 17789 | // To be more specific, this function will create a new BB in one of the following 5 regions (if putInFilter is false): |
| 17790 | // 1. When tryIndex = 0 and hndIndex = 0: |
| 17791 | // The new BB will be created in the method region. |
| 17792 | // 2. When tryIndex != 0 and hndIndex = 0: |
| 17793 | // The new BB will be created in the try region indicated by tryIndex. |
| 17794 | // 3. When tryIndex == 0 and hndIndex != 0: |
| 17795 | // The new BB will be created in the handler region indicated by hndIndex. |
| 17796 | // 4. When tryIndex != 0 and hndIndex != 0 and tryIndex < hndIndex: |
| 17797 | // In this case, the try region is nested inside the handler region. Therefore, the new BB will be created |
| 17798 | // in the try region indicated by tryIndex. |
| 17799 | // 5. When tryIndex != 0 and hndIndex != 0 and tryIndex > hndIndex: |
| 17800 | // In this case, the handler region is nested inside the try region. Therefore, the new BB will be created |
| 17801 | // in the handler region indicated by hndIndex. |
| 17802 | // |
| 17803 | // Note that if tryIndex != 0 and hndIndex != 0 then tryIndex must not be equal to hndIndex (this makes sense because |
| 17804 | // if they are equal, you are asking to put the new block in both the try and handler, which is impossible). |
| 17805 | // |
| 17806 | // The BasicBlock will not be inserted inside an EH region that is more nested than the requested tryIndex/hndIndex |
| 17807 | // region (so the function is careful to skip more nested EH regions when searching for a place to put the new block). |
| 17808 | // |
| 17809 | // This function cannot be used to insert a block as the first block of any region. It always inserts a block after |
| 17810 | // an existing block in the given region. |
| 17811 | // |
| 17812 | // If nearBlk is nullptr, or the block is run rarely, then the new block is assumed to be run rarely. |
| 17813 | // |
| 17814 | // Arguments: |
| 17815 | // jumpKind - the jump kind of the new block to create. |
| 17816 | // tryIndex - the try region to insert the new block in, described above. This must be a number in the range |
| 17817 | // [0..compHndBBtabCount]. |
| 17818 | // hndIndex - the handler region to insert the new block in, described above. This must be a number in the range |
| 17819 | // [0..compHndBBtabCount]. |
| 17820 | // nearBlk - insert the new block closely after this block, if possible. If nullptr, put the new block anywhere |
| 17821 | // in the requested region. |
| 17822 | // putInFilter - put the new block in the filter region given by hndIndex, as described above. |
| 17823 | // runRarely - 'true' if the new block is run rarely. |
| 17824 | // insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only |
| 17825 | // implemented when inserting into the main function (not into any EH region). |
| 17826 | // |
| 17827 | // Return Value: |
| 17828 | // The new block. |
| 17829 | |
| 17830 | BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, |
| 17831 | unsigned tryIndex, |
| 17832 | unsigned hndIndex, |
| 17833 | BasicBlock* nearBlk, |
| 17834 | bool putInFilter /* = false */, |
| 17835 | bool runRarely /* = false */, |
| 17836 | bool insertAtEnd /* = false */) |
| 17837 | { |
| 17838 | assert(tryIndex <= compHndBBtabCount); |
| 17839 | assert(hndIndex <= compHndBBtabCount); |
| 17840 | |
| 17841 | /* afterBlk is the block which will precede the newBB */ |
| 17842 | BasicBlock* afterBlk; |
| 17843 | |
| 17844 | // start and end limit for inserting the block |
| 17845 | BasicBlock* startBlk = nullptr; |
| 17846 | BasicBlock* endBlk = nullptr; |
| 17847 | |
| 17848 | bool putInTryRegion = true; |
| 17849 | unsigned regionIndex = 0; |
| 17850 | |
| 17851 | // First, figure out which region (the "try" region or the "handler" region) to put the newBB in. |
| 17852 | if ((tryIndex == 0) && (hndIndex == 0)) |
| 17853 | { |
| 17854 | assert(!putInFilter); |
| 17855 | |
| 17856 | endBlk = fgEndBBAfterMainFunction(); // don't put new BB in funclet region |
| 17857 | |
| 17858 | if (insertAtEnd || (nearBlk == nullptr)) |
| 17859 | { |
| 17860 | /* We'll just insert the block at the end of the method, before the funclets */ |
| 17861 | |
| 17862 | afterBlk = fgLastBBInMainFunction(); |
| 17863 | goto _FoundAfterBlk; |
| 17864 | } |
| 17865 | else |
| 17866 | { |
| 17867 | // We'll search through the entire method |
| 17868 | startBlk = fgFirstBB; |
| 17869 | } |
| 17870 | |
| 17871 | noway_assert(regionIndex == 0); |
| 17872 | } |
| 17873 | else |
| 17874 | { |
| 17875 | noway_assert(tryIndex > 0 || hndIndex > 0); |
| 17876 | PREFIX_ASSUME(tryIndex <= compHndBBtabCount); |
| 17877 | PREFIX_ASSUME(hndIndex <= compHndBBtabCount); |
| 17878 | |
| 17879 | // Decide which region to put in, the "try" region or the "handler" region. |
| 17880 | if (tryIndex == 0) |
| 17881 | { |
| 17882 | noway_assert(hndIndex > 0); |
| 17883 | putInTryRegion = false; |
| 17884 | } |
| 17885 | else if (hndIndex == 0) |
| 17886 | { |
| 17887 | noway_assert(tryIndex > 0); |
| 17888 | noway_assert(putInTryRegion); |
| 17889 | assert(!putInFilter); |
| 17890 | } |
| 17891 | else |
| 17892 | { |
| 17893 | noway_assert(tryIndex > 0 && hndIndex > 0 && tryIndex != hndIndex); |
| 17894 | putInTryRegion = (tryIndex < hndIndex); |
| 17895 | } |
| 17896 | |
| 17897 | if (putInTryRegion) |
| 17898 | { |
| 17899 | // Try region is the inner region. |
| 17900 | // In other words, try region must be nested inside the handler region. |
| 17901 | noway_assert(hndIndex == 0 || bbInHandlerRegions(hndIndex - 1, ehGetDsc(tryIndex - 1)->ebdTryBeg)); |
| 17902 | assert(!putInFilter); |
| 17903 | } |
| 17904 | else |
| 17905 | { |
| 17906 | // Handler region is the inner region. |
| 17907 | // In other words, handler region must be nested inside the try region. |
| 17908 | noway_assert(tryIndex == 0 || bbInTryRegions(tryIndex - 1, ehGetDsc(hndIndex - 1)->ebdHndBeg)); |
| 17909 | } |
| 17910 | |
| 17911 | // Figure out the start and end block range to search for an insertion location. Pick the beginning and |
| 17912 | // ending blocks of the target EH region (the 'endBlk' is one past the last block of the EH region, to make |
| 17913 | // loop iteration easier). Note that, after funclets have been created (for FEATURE_EH_FUNCLETS), |
| 17914 | // this linear block range will not include blocks of handlers for try/handler clauses nested within |
| 17915 | // this EH region, as those blocks have been extracted as funclets. That is ok, though, because we don't |
| 17916 | // want to insert a block in any nested EH region. |
| 17917 | |
| 17918 | if (putInTryRegion) |
| 17919 | { |
| 17920 | // We will put the newBB in the try region. |
| 17921 | EHblkDsc* ehDsc = ehGetDsc(tryIndex - 1); |
| 17922 | startBlk = ehDsc->ebdTryBeg; |
| 17923 | endBlk = ehDsc->ebdTryLast->bbNext; |
| 17924 | regionIndex = tryIndex; |
| 17925 | } |
| 17926 | else if (putInFilter) |
| 17927 | { |
| 17928 | // We will put the newBB in the filter region. |
| 17929 | EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); |
| 17930 | startBlk = ehDsc->ebdFilter; |
| 17931 | endBlk = ehDsc->ebdHndBeg; |
| 17932 | regionIndex = hndIndex; |
| 17933 | } |
| 17934 | else |
| 17935 | { |
| 17936 | // We will put the newBB in the handler region. |
| 17937 | EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); |
| 17938 | startBlk = ehDsc->ebdHndBeg; |
| 17939 | endBlk = ehDsc->ebdHndLast->bbNext; |
| 17940 | regionIndex = hndIndex; |
| 17941 | } |
| 17942 | |
| 17943 | noway_assert(regionIndex > 0); |
| 17944 | } |
| 17945 | |
| 17946 | // Now find the insertion point. |
| 17947 | afterBlk = fgFindInsertPoint(regionIndex, putInTryRegion, startBlk, endBlk, nearBlk, nullptr, runRarely); |
| 17948 | |
| 17949 | _FoundAfterBlk:; |
| 17950 | |
| 17951 | /* We have decided to insert the block after 'afterBlk'. */ |
| 17952 | noway_assert(afterBlk != nullptr); |
| 17953 | |
| 17954 | JITDUMP("fgNewBBinRegion(jumpKind=%u, tryIndex=%u, hndIndex=%u, putInFilter=%s, runRarely=%s, insertAtEnd=%s): " |
| 17955 | "inserting after " FMT_BB "\n" , |
| 17956 | jumpKind, tryIndex, hndIndex, dspBool(putInFilter), dspBool(runRarely), dspBool(insertAtEnd), |
| 17957 | afterBlk->bbNum); |
| 17958 | |
| 17959 | return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion); |
| 17960 | } |
| 17961 | |
| 17962 | //------------------------------------------------------------------------ |
| 17963 | // Creates a new BasicBlock and inserts it in the same EH region as 'srcBlk'. |
| 17964 | // |
| 17965 | // See the implementation of fgNewBBinRegion() used by this one for more notes. |
| 17966 | // |
| 17967 | // Arguments: |
| 17968 | // jumpKind - the jump kind of the new block to create. |
| 17969 | // srcBlk - insert the new block in the same EH region as this block, and closely after it if possible. |
| 17970 | // |
| 17971 | // Return Value: |
| 17972 | // The new block. |
| 17973 | |
| 17974 | BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, |
| 17975 | BasicBlock* srcBlk, |
| 17976 | bool runRarely /* = false */, |
| 17977 | bool insertAtEnd /* = false */) |
| 17978 | { |
| 17979 | assert(srcBlk != nullptr); |
| 17980 | |
| 17981 | const unsigned tryIndex = srcBlk->bbTryIndex; |
| 17982 | const unsigned hndIndex = srcBlk->bbHndIndex; |
| 17983 | bool putInFilter = false; |
| 17984 | |
| 17985 | // Check to see if we need to put the new block in a filter. We do if srcBlk is in a filter. |
| 17986 | // This can only be true if there is a handler index, and the handler region is more nested than the |
| 17987 | // try region (if any). This is because no EH regions can be nested within a filter. |
| 17988 | if (BasicBlock::ehIndexMaybeMoreNested(hndIndex, tryIndex)) |
| 17989 | { |
| 17990 | assert(hndIndex != 0); // If hndIndex is more nested, we must be in some handler! |
| 17991 | putInFilter = ehGetDsc(hndIndex - 1)->InFilterRegionBBRange(srcBlk); |
| 17992 | } |
| 17993 | |
| 17994 | return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, putInFilter, runRarely, insertAtEnd); |
| 17995 | } |
| 17996 | |
| 17997 | //------------------------------------------------------------------------ |
| 17998 | // Creates a new BasicBlock and inserts it at the end of the function. |
| 17999 | // |
| 18000 | // See the implementation of fgNewBBinRegion() used by this one for more notes. |
| 18001 | // |
| 18002 | // Arguments: |
| 18003 | // jumpKind - the jump kind of the new block to create. |
| 18004 | // |
| 18005 | // Return Value: |
| 18006 | // The new block. |
| 18007 | |
| 18008 | BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind) |
| 18009 | { |
| 18010 | return fgNewBBinRegion(jumpKind, 0, 0, nullptr, /* putInFilter */ false, /* runRarely */ false, |
| 18011 | /* insertAtEnd */ true); |
| 18012 | } |
| 18013 | |
| 18014 | //------------------------------------------------------------------------ |
| 18015 | // Creates a new BasicBlock, and inserts it after 'afterBlk'. |
| 18016 | // |
| 18017 | // The block cannot be inserted into a more nested try/handler region than that specified by 'regionIndex'. |
| 18018 | // (It is given exactly 'regionIndex'.) Thus, the parameters must be passed to ensure proper EH nesting |
| 18019 | // rules are followed. |
| 18020 | // |
| 18021 | // Arguments: |
| 18022 | // jumpKind - the jump kind of the new block to create. |
| 18023 | // afterBlk - insert the new block after this one. |
| 18024 | // regionIndex - the block will be put in this EH region. |
| 18025 | // putInTryRegion - If true, put the new block in the 'try' region corresponding to 'regionIndex', and |
| 18026 | // set its handler index to the most nested handler region enclosing that 'try' region. |
| 18027 | // Otherwise, put the block in the handler region specified by 'regionIndex', and set its 'try' |
| 18028 | // index to the most nested 'try' region enclosing that handler region. |
| 18029 | // |
| 18030 | // Return Value: |
| 18031 | // The new block. |
| 18032 | |
| 18033 | BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, |
| 18034 | BasicBlock* afterBlk, |
| 18035 | unsigned regionIndex, |
| 18036 | bool putInTryRegion) |
| 18037 | { |
| 18038 | /* Insert the new block */ |
| 18039 | BasicBlock* afterBlkNext = afterBlk->bbNext; |
| 18040 | (void)afterBlkNext; // prevent "unused variable" error from GCC |
| 18041 | BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false); |
| 18042 | |
| 18043 | if (putInTryRegion) |
| 18044 | { |
| 18045 | noway_assert(regionIndex <= MAX_XCPTN_INDEX); |
| 18046 | newBlk->bbTryIndex = (unsigned short)regionIndex; |
| 18047 | newBlk->bbHndIndex = bbFindInnermostHandlerRegionContainingTryRegion(regionIndex); |
| 18048 | } |
| 18049 | else |
| 18050 | { |
| 18051 | newBlk->bbTryIndex = bbFindInnermostTryRegionContainingHandlerRegion(regionIndex); |
| 18052 | noway_assert(regionIndex <= MAX_XCPTN_INDEX); |
| 18053 | newBlk->bbHndIndex = (unsigned short)regionIndex; |
| 18054 | } |
| 18055 | |
| 18056 | // We're going to compare for equal try regions (to handle the case of 'mutually protect' |
| 18057 | // regions). We need to save off the current try region, otherwise we might change it |
| 18058 | // before it gets compared later, thereby making future comparisons fail. |
| 18059 | |
| 18060 | BasicBlock* newTryBeg; |
| 18061 | BasicBlock* newTryLast; |
| 18062 | (void)ehInitTryBlockRange(newBlk, &newTryBeg, &newTryLast); |
| 18063 | |
| 18064 | unsigned XTnum; |
| 18065 | EHblkDsc* HBtab; |
| 18066 | |
| 18067 | for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 18068 | { |
| 18069 | // Is afterBlk at the end of a try region? |
| 18070 | if (HBtab->ebdTryLast == afterBlk) |
| 18071 | { |
| 18072 | noway_assert(afterBlkNext == newBlk->bbNext); |
| 18073 | |
| 18074 | bool extendTryRegion = false; |
| 18075 | if (newBlk->hasTryIndex()) |
| 18076 | { |
| 18077 | // We're adding a block after the last block of some try region. Do |
| 18078 | // we extend the try region to include the block, or not? |
| 18079 | // If the try region is exactly the same as the try region |
| 18080 | // associated with the new block (based on the block's try index, |
| 18081 | // which represents the innermost try the block is a part of), then |
| 18082 | // we extend it. |
| 18083 | // If the try region is a "parent" try region -- an enclosing try region |
| 18084 | // that has the same last block as the new block's try region -- then |
| 18085 | // we also extend. For example: |
| 18086 | // try { // 1 |
| 18087 | // ... |
| 18088 | // try { // 2 |
| 18089 | // ... |
| 18090 | // } /* 2 */ } /* 1 */ |
| 18091 | // This example is meant to indicate that both try regions 1 and 2 end at |
| 18092 | // the same block, and we're extending 2. Thus, we must also extend 1. If we |
| 18093 | // only extended 2, we would break proper nesting. (Dev11 bug 137967) |
| 18094 | |
| 18095 | extendTryRegion = HBtab->ebdIsSameTry(newTryBeg, newTryLast) || bbInTryRegions(XTnum, newBlk); |
| 18096 | } |
| 18097 | |
| 18098 | // Does newBlk extend this try region? |
| 18099 | if (extendTryRegion) |
| 18100 | { |
| 18101 | // Yes, newBlk extends this try region |
| 18102 | |
| 18103 | // newBlk is the now the new try last block |
| 18104 | fgSetTryEnd(HBtab, newBlk); |
| 18105 | } |
| 18106 | } |
| 18107 | |
| 18108 | // Is afterBlk at the end of a handler region? |
| 18109 | if (HBtab->ebdHndLast == afterBlk) |
| 18110 | { |
| 18111 | noway_assert(afterBlkNext == newBlk->bbNext); |
| 18112 | |
| 18113 | // Does newBlk extend this handler region? |
| 18114 | bool extendHndRegion = false; |
| 18115 | if (newBlk->hasHndIndex()) |
| 18116 | { |
| 18117 | // We're adding a block after the last block of some handler region. Do |
| 18118 | // we extend the handler region to include the block, or not? |
| 18119 | // If the handler region is exactly the same as the handler region |
| 18120 | // associated with the new block (based on the block's handler index, |
| 18121 | // which represents the innermost handler the block is a part of), then |
| 18122 | // we extend it. |
| 18123 | // If the handler region is a "parent" handler region -- an enclosing |
| 18124 | // handler region that has the same last block as the new block's handler |
| 18125 | // region -- then we also extend. For example: |
| 18126 | // catch { // 1 |
| 18127 | // ... |
| 18128 | // catch { // 2 |
| 18129 | // ... |
| 18130 | // } /* 2 */ } /* 1 */ |
| 18131 | // This example is meant to indicate that both handler regions 1 and 2 end at |
| 18132 | // the same block, and we're extending 2. Thus, we must also extend 1. If we |
| 18133 | // only extended 2, we would break proper nesting. (Dev11 bug 372051) |
| 18134 | |
| 18135 | extendHndRegion = bbInHandlerRegions(XTnum, newBlk); |
| 18136 | } |
| 18137 | |
| 18138 | if (extendHndRegion) |
| 18139 | { |
| 18140 | // Yes, newBlk extends this handler region |
| 18141 | |
| 18142 | // newBlk is now the last block of the handler. |
| 18143 | fgSetHndEnd(HBtab, newBlk); |
| 18144 | } |
| 18145 | } |
| 18146 | } |
| 18147 | |
| 18148 | /* If afterBlk falls through, we insert a jump around newBlk */ |
| 18149 | fgConnectFallThrough(afterBlk, newBlk->bbNext); |
| 18150 | |
| 18151 | #ifdef DEBUG |
| 18152 | fgVerifyHandlerTab(); |
| 18153 | #endif |
| 18154 | |
| 18155 | return newBlk; |
| 18156 | } |
| 18157 | |
| 18158 | /***************************************************************************** |
| 18159 | */ |
| 18160 | |
| 18161 | /* static */ |
| 18162 | unsigned Compiler::acdHelper(SpecialCodeKind codeKind) |
| 18163 | { |
| 18164 | switch (codeKind) |
| 18165 | { |
| 18166 | case SCK_RNGCHK_FAIL: |
| 18167 | return CORINFO_HELP_RNGCHKFAIL; |
| 18168 | case SCK_ARG_EXCPN: |
| 18169 | return CORINFO_HELP_THROW_ARGUMENTEXCEPTION; |
| 18170 | case SCK_ARG_RNG_EXCPN: |
| 18171 | return CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION; |
| 18172 | case SCK_DIV_BY_ZERO: |
| 18173 | return CORINFO_HELP_THROWDIVZERO; |
| 18174 | case SCK_ARITH_EXCPN: |
| 18175 | return CORINFO_HELP_OVERFLOW; |
| 18176 | default: |
| 18177 | assert(!"Bad codeKind" ); |
| 18178 | return 0; |
| 18179 | } |
| 18180 | } |
| 18181 | |
| 18182 | //------------------------------------------------------------------------ |
| 18183 | // fgAddCodeRef: Find/create an added code entry associated with the given block and with the given kind. |
| 18184 | // |
| 18185 | // Arguments: |
| 18186 | // srcBlk - the block that needs an entry; |
| 18187 | // refData - the index to use as the cache key for sharing throw blocks; |
| 18188 | // kind - the kind of exception; |
| 18189 | // |
| 18190 | // Return Value: |
| 18191 | // The target throw helper block or nullptr if throw helper blocks are disabled. |
| 18192 | // |
| 18193 | BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind) |
| 18194 | { |
| 18195 | // Record that the code will call a THROW_HELPER |
| 18196 | // so on Windows Amd64 we can allocate the 4 outgoing |
| 18197 | // arg slots on the stack frame if there are no other calls. |
| 18198 | compUsesThrowHelper = true; |
| 18199 | |
| 18200 | if (!fgUseThrowHelperBlocks()) |
| 18201 | { |
| 18202 | return nullptr; |
| 18203 | } |
| 18204 | |
| 18205 | const static BBjumpKinds jumpKinds[] = { |
| 18206 | BBJ_NONE, // SCK_NONE |
| 18207 | BBJ_THROW, // SCK_RNGCHK_FAIL |
| 18208 | BBJ_ALWAYS, // SCK_PAUSE_EXEC |
| 18209 | BBJ_THROW, // SCK_DIV_BY_ZERO |
| 18210 | BBJ_THROW, // SCK_ARITH_EXCP, SCK_OVERFLOW |
| 18211 | BBJ_THROW, // SCK_ARG_EXCPN |
| 18212 | BBJ_THROW, // SCK_ARG_RNG_EXCPN |
| 18213 | }; |
| 18214 | |
| 18215 | noway_assert(sizeof(jumpKinds) == SCK_COUNT); // sanity check |
| 18216 | |
| 18217 | /* First look for an existing entry that matches what we're looking for */ |
| 18218 | |
| 18219 | AddCodeDsc* add = fgFindExcptnTarget(kind, refData); |
| 18220 | |
| 18221 | if (add) // found it |
| 18222 | { |
| 18223 | return add->acdDstBlk; |
| 18224 | } |
| 18225 | |
| 18226 | /* We have to allocate a new entry and prepend it to the list */ |
| 18227 | |
| 18228 | add = new (this, CMK_Unknown) AddCodeDsc; |
| 18229 | add->acdData = refData; |
| 18230 | add->acdKind = kind; |
| 18231 | add->acdNext = fgAddCodeList; |
| 18232 | #if !FEATURE_FIXED_OUT_ARGS |
| 18233 | add->acdStkLvl = 0; |
| 18234 | add->acdStkLvlInit = false; |
| 18235 | #endif // !FEATURE_FIXED_OUT_ARGS |
| 18236 | |
| 18237 | fgAddCodeList = add; |
| 18238 | |
| 18239 | /* Create the target basic block */ |
| 18240 | |
| 18241 | BasicBlock* newBlk; |
| 18242 | |
| 18243 | newBlk = add->acdDstBlk = fgNewBBinRegion(jumpKinds[kind], srcBlk, /* runRarely */ true, /* insertAtEnd */ true); |
| 18244 | |
| 18245 | add->acdDstBlk->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL; |
| 18246 | |
| 18247 | #ifdef DEBUG |
| 18248 | if (verbose) |
| 18249 | { |
| 18250 | const char* msgWhere = "" ; |
| 18251 | if (!srcBlk->hasTryIndex() && !srcBlk->hasHndIndex()) |
| 18252 | { |
| 18253 | msgWhere = "non-EH region" ; |
| 18254 | } |
| 18255 | else if (!srcBlk->hasTryIndex()) |
| 18256 | { |
| 18257 | msgWhere = "handler" ; |
| 18258 | } |
| 18259 | else if (!srcBlk->hasHndIndex()) |
| 18260 | { |
| 18261 | msgWhere = "try" ; |
| 18262 | } |
| 18263 | else if (srcBlk->getTryIndex() < srcBlk->getHndIndex()) |
| 18264 | { |
| 18265 | msgWhere = "try" ; |
| 18266 | } |
| 18267 | else |
| 18268 | { |
| 18269 | msgWhere = "handler" ; |
| 18270 | } |
| 18271 | |
| 18272 | const char* msg; |
| 18273 | switch (kind) |
| 18274 | { |
| 18275 | case SCK_RNGCHK_FAIL: |
| 18276 | msg = " for RNGCHK_FAIL" ; |
| 18277 | break; |
| 18278 | case SCK_PAUSE_EXEC: |
| 18279 | msg = " for PAUSE_EXEC" ; |
| 18280 | break; |
| 18281 | case SCK_DIV_BY_ZERO: |
| 18282 | msg = " for DIV_BY_ZERO" ; |
| 18283 | break; |
| 18284 | case SCK_OVERFLOW: |
| 18285 | msg = " for OVERFLOW" ; |
| 18286 | break; |
| 18287 | case SCK_ARG_EXCPN: |
| 18288 | msg = " for ARG_EXCPN" ; |
| 18289 | break; |
| 18290 | case SCK_ARG_RNG_EXCPN: |
| 18291 | msg = " for ARG_RNG_EXCPN" ; |
| 18292 | break; |
| 18293 | default: |
| 18294 | msg = " for ??" ; |
| 18295 | break; |
| 18296 | } |
| 18297 | |
| 18298 | printf("\nfgAddCodeRef - Add BB in %s%s, new block %s\n" , msgWhere, msg, add->acdDstBlk->dspToString()); |
| 18299 | } |
| 18300 | #endif // DEBUG |
| 18301 | |
| 18302 | /* Mark the block as added by the compiler and not removable by future flow |
| 18303 | graph optimizations. Note that no bbJumpDest points to these blocks. */ |
| 18304 | |
| 18305 | newBlk->bbFlags |= BBF_IMPORTED; |
| 18306 | newBlk->bbFlags |= BBF_DONT_REMOVE; |
| 18307 | |
| 18308 | /* Remember that we're adding a new basic block */ |
| 18309 | |
| 18310 | fgAddCodeModf = true; |
| 18311 | fgRngChkThrowAdded = true; |
| 18312 | |
| 18313 | /* Now figure out what code to insert */ |
| 18314 | |
| 18315 | GenTreeCall* tree; |
| 18316 | int helper = CORINFO_HELP_UNDEF; |
| 18317 | |
| 18318 | switch (kind) |
| 18319 | { |
| 18320 | case SCK_RNGCHK_FAIL: |
| 18321 | helper = CORINFO_HELP_RNGCHKFAIL; |
| 18322 | break; |
| 18323 | |
| 18324 | case SCK_DIV_BY_ZERO: |
| 18325 | helper = CORINFO_HELP_THROWDIVZERO; |
| 18326 | break; |
| 18327 | |
| 18328 | case SCK_ARITH_EXCPN: |
| 18329 | helper = CORINFO_HELP_OVERFLOW; |
| 18330 | noway_assert(SCK_OVERFLOW == SCK_ARITH_EXCPN); |
| 18331 | break; |
| 18332 | |
| 18333 | case SCK_ARG_EXCPN: |
| 18334 | helper = CORINFO_HELP_THROW_ARGUMENTEXCEPTION; |
| 18335 | break; |
| 18336 | |
| 18337 | case SCK_ARG_RNG_EXCPN: |
| 18338 | helper = CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION; |
| 18339 | break; |
| 18340 | |
| 18341 | // case SCK_PAUSE_EXEC: |
| 18342 | // noway_assert(!"add code to pause exec"); |
| 18343 | |
| 18344 | default: |
| 18345 | noway_assert(!"unexpected code addition kind" ); |
| 18346 | return nullptr; |
| 18347 | } |
| 18348 | |
| 18349 | noway_assert(helper != CORINFO_HELP_UNDEF); |
| 18350 | |
| 18351 | // Add the appropriate helper call. |
| 18352 | tree = gtNewHelperCallNode(helper, TYP_VOID); |
| 18353 | |
| 18354 | // There are no args here but fgMorphArgs has side effects |
| 18355 | // such as setting the outgoing arg area (which is necessary |
| 18356 | // on AMD if there are any calls). |
| 18357 | tree = fgMorphArgs(tree); |
| 18358 | |
| 18359 | // Store the tree in the new basic block. |
| 18360 | assert(!srcBlk->isEmpty()); |
| 18361 | if (!srcBlk->IsLIR()) |
| 18362 | { |
| 18363 | fgInsertStmtAtEnd(newBlk, fgNewStmtFromTree(tree)); |
| 18364 | } |
| 18365 | else |
| 18366 | { |
| 18367 | LIR::AsRange(newBlk).InsertAtEnd(LIR::SeqTree(this, tree)); |
| 18368 | } |
| 18369 | |
| 18370 | return add->acdDstBlk; |
| 18371 | } |
| 18372 | |
| 18373 | /***************************************************************************** |
| 18374 | * Finds the block to jump to, to throw a given kind of exception |
| 18375 | * We maintain a cache of one AddCodeDsc for each kind, to make searching fast. |
| 18376 | * Note : Each block uses the same (maybe shared) block as the jump target for |
| 18377 | * a given type of exception |
| 18378 | */ |
| 18379 | |
| 18380 | Compiler::AddCodeDsc* Compiler::fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData) |
| 18381 | { |
| 18382 | assert(fgUseThrowHelperBlocks()); |
| 18383 | if (!(fgExcptnTargetCache[kind] && // Try the cached value first |
| 18384 | fgExcptnTargetCache[kind]->acdData == refData)) |
| 18385 | { |
| 18386 | // Too bad, have to search for the jump target for the exception |
| 18387 | |
| 18388 | AddCodeDsc* add = nullptr; |
| 18389 | |
| 18390 | for (add = fgAddCodeList; add != nullptr; add = add->acdNext) |
| 18391 | { |
| 18392 | if (add->acdData == refData && add->acdKind == kind) |
| 18393 | { |
| 18394 | break; |
| 18395 | } |
| 18396 | } |
| 18397 | |
| 18398 | fgExcptnTargetCache[kind] = add; // Cache it |
| 18399 | } |
| 18400 | |
| 18401 | return fgExcptnTargetCache[kind]; |
| 18402 | } |
| 18403 | |
| 18404 | /***************************************************************************** |
| 18405 | * |
| 18406 | * The given basic block contains an array range check; return the label this |
| 18407 | * range check is to jump to upon failure. |
| 18408 | */ |
| 18409 | |
| 18410 | //------------------------------------------------------------------------ |
| 18411 | // fgRngChkTarget: Create/find the appropriate "range-fail" label for the block. |
| 18412 | // |
| 18413 | // Arguments: |
| 18414 | // srcBlk - the block that needs an entry; |
| 18415 | // kind - the kind of exception; |
| 18416 | // |
| 18417 | // Return Value: |
| 18418 | // The target throw helper block this check jumps to upon failure. |
| 18419 | // |
| 18420 | BasicBlock* Compiler::fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind) |
| 18421 | { |
| 18422 | #ifdef DEBUG |
| 18423 | if (verbose) |
| 18424 | { |
| 18425 | printf("*** Computing fgRngChkTarget for block " FMT_BB "\n" , block->bbNum); |
| 18426 | if (!block->IsLIR()) |
| 18427 | { |
| 18428 | gtDispTree(compCurStmt); |
| 18429 | } |
| 18430 | } |
| 18431 | #endif // DEBUG |
| 18432 | |
| 18433 | /* We attach the target label to the containing try block (if any) */ |
| 18434 | noway_assert(!compIsForInlining()); |
| 18435 | return fgAddCodeRef(block, bbThrowIndex(block), kind); |
| 18436 | } |
| 18437 | |
| 18438 | // Sequences the tree. |
| 18439 | // prevTree is what gtPrev of the first node in execution order gets set to. |
| 18440 | // Returns the first node (execution order) in the sequenced tree. |
| 18441 | GenTree* Compiler::fgSetTreeSeq(GenTree* tree, GenTree* prevTree, bool isLIR) |
| 18442 | { |
| 18443 | GenTree list; |
| 18444 | |
| 18445 | if (prevTree == nullptr) |
| 18446 | { |
| 18447 | prevTree = &list; |
| 18448 | } |
| 18449 | fgTreeSeqLst = prevTree; |
| 18450 | fgTreeSeqNum = 0; |
| 18451 | fgTreeSeqBeg = nullptr; |
| 18452 | fgSetTreeSeqHelper(tree, isLIR); |
| 18453 | |
| 18454 | GenTree* result = prevTree->gtNext; |
| 18455 | if (prevTree == &list) |
| 18456 | { |
| 18457 | list.gtNext->gtPrev = nullptr; |
| 18458 | } |
| 18459 | |
| 18460 | return result; |
| 18461 | } |
| 18462 | |
| 18463 | /***************************************************************************** |
| 18464 | * |
| 18465 | * Assigns sequence numbers to the given tree and its sub-operands, and |
| 18466 | * threads all the nodes together via the 'gtNext' and 'gtPrev' fields. |
| 18467 | * Uses 'global' - fgTreeSeqLst |
| 18468 | */ |
| 18469 | |
| 18470 | void Compiler::fgSetTreeSeqHelper(GenTree* tree, bool isLIR) |
| 18471 | { |
| 18472 | genTreeOps oper; |
| 18473 | unsigned kind; |
| 18474 | |
| 18475 | noway_assert(tree); |
| 18476 | assert(!IsUninitialized(tree)); |
| 18477 | noway_assert(tree->gtOper != GT_STMT); |
| 18478 | |
| 18479 | /* Figure out what kind of a node we have */ |
| 18480 | |
| 18481 | oper = tree->OperGet(); |
| 18482 | kind = tree->OperKind(); |
| 18483 | |
| 18484 | /* Is this a leaf/constant node? */ |
| 18485 | |
| 18486 | if (kind & (GTK_CONST | GTK_LEAF)) |
| 18487 | { |
| 18488 | fgSetTreeSeqFinish(tree, isLIR); |
| 18489 | return; |
| 18490 | } |
| 18491 | |
| 18492 | // Special handling for dynamic block ops. |
| 18493 | if (tree->OperIs(GT_DYN_BLK, GT_STORE_DYN_BLK)) |
| 18494 | { |
| 18495 | GenTreeDynBlk* dynBlk = tree->AsDynBlk(); |
| 18496 | GenTree* sizeNode = dynBlk->gtDynamicSize; |
| 18497 | GenTree* dstAddr = dynBlk->Addr(); |
| 18498 | GenTree* src = dynBlk->Data(); |
| 18499 | bool isReverse = ((dynBlk->gtFlags & GTF_REVERSE_OPS) != 0); |
| 18500 | if (dynBlk->gtEvalSizeFirst) |
| 18501 | { |
| 18502 | fgSetTreeSeqHelper(sizeNode, isLIR); |
| 18503 | } |
| 18504 | |
| 18505 | // We either have a DYN_BLK or a STORE_DYN_BLK. If the latter, we have a |
| 18506 | // src (the Data to be stored), and isReverse tells us whether to evaluate |
| 18507 | // that before dstAddr. |
| 18508 | if (isReverse && (src != nullptr)) |
| 18509 | { |
| 18510 | fgSetTreeSeqHelper(src, isLIR); |
| 18511 | } |
| 18512 | fgSetTreeSeqHelper(dstAddr, isLIR); |
| 18513 | if (!isReverse && (src != nullptr)) |
| 18514 | { |
| 18515 | fgSetTreeSeqHelper(src, isLIR); |
| 18516 | } |
| 18517 | if (!dynBlk->gtEvalSizeFirst) |
| 18518 | { |
| 18519 | fgSetTreeSeqHelper(sizeNode, isLIR); |
| 18520 | } |
| 18521 | fgSetTreeSeqFinish(dynBlk, isLIR); |
| 18522 | return; |
| 18523 | } |
| 18524 | |
| 18525 | /* Is it a 'simple' unary/binary operator? */ |
| 18526 | |
| 18527 | if (kind & GTK_SMPOP) |
| 18528 | { |
| 18529 | GenTree* op1 = tree->gtOp.gtOp1; |
| 18530 | GenTree* op2 = tree->gtGetOp2IfPresent(); |
| 18531 | |
| 18532 | // Special handling for GT_LIST |
| 18533 | if (tree->OperGet() == GT_LIST) |
| 18534 | { |
| 18535 | // First, handle the list items, which will be linked in forward order. |
| 18536 | // As we go, we will link the GT_LIST nodes in reverse order - we will number |
| 18537 | // them and update fgTreeSeqList in a subsequent traversal. |
| 18538 | GenTree* nextList = tree; |
| 18539 | GenTree* list = nullptr; |
| 18540 | while (nextList != nullptr && nextList->OperGet() == GT_LIST) |
| 18541 | { |
| 18542 | list = nextList; |
| 18543 | GenTree* listItem = list->gtOp.gtOp1; |
| 18544 | fgSetTreeSeqHelper(listItem, isLIR); |
| 18545 | nextList = list->gtOp.gtOp2; |
| 18546 | if (nextList != nullptr) |
| 18547 | { |
| 18548 | nextList->gtNext = list; |
| 18549 | } |
| 18550 | list->gtPrev = nextList; |
| 18551 | } |
| 18552 | // Next, handle the GT_LIST nodes. |
| 18553 | // Note that fgSetTreeSeqFinish() sets the gtNext to null, so we need to capture the nextList |
| 18554 | // before we call that method. |
| 18555 | nextList = list; |
| 18556 | do |
| 18557 | { |
| 18558 | assert(list != nullptr); |
| 18559 | list = nextList; |
| 18560 | nextList = list->gtNext; |
| 18561 | fgSetTreeSeqFinish(list, isLIR); |
| 18562 | } while (list != tree); |
| 18563 | return; |
| 18564 | } |
| 18565 | |
| 18566 | /* Special handling for AddrMode */ |
| 18567 | if (tree->OperIsAddrMode()) |
| 18568 | { |
| 18569 | bool reverse = ((tree->gtFlags & GTF_REVERSE_OPS) != 0); |
| 18570 | if (reverse) |
| 18571 | { |
| 18572 | assert(op1 != nullptr && op2 != nullptr); |
| 18573 | fgSetTreeSeqHelper(op2, isLIR); |
| 18574 | } |
| 18575 | if (op1 != nullptr) |
| 18576 | { |
| 18577 | fgSetTreeSeqHelper(op1, isLIR); |
| 18578 | } |
| 18579 | if (!reverse && op2 != nullptr) |
| 18580 | { |
| 18581 | fgSetTreeSeqHelper(op2, isLIR); |
| 18582 | } |
| 18583 | |
| 18584 | fgSetTreeSeqFinish(tree, isLIR); |
| 18585 | return; |
| 18586 | } |
| 18587 | |
| 18588 | /* Check for a nilary operator */ |
| 18589 | |
| 18590 | if (op1 == nullptr) |
| 18591 | { |
| 18592 | noway_assert(op2 == nullptr); |
| 18593 | fgSetTreeSeqFinish(tree, isLIR); |
| 18594 | return; |
| 18595 | } |
| 18596 | |
| 18597 | /* Is this a unary operator? |
| 18598 | * Although UNARY GT_IND has a special structure */ |
| 18599 | |
| 18600 | if (oper == GT_IND) |
| 18601 | { |
| 18602 | /* Visit the indirection first - op2 may point to the |
| 18603 | * jump Label for array-index-out-of-range */ |
| 18604 | |
| 18605 | fgSetTreeSeqHelper(op1, isLIR); |
| 18606 | fgSetTreeSeqFinish(tree, isLIR); |
| 18607 | return; |
| 18608 | } |
| 18609 | |
| 18610 | /* Now this is REALLY a unary operator */ |
| 18611 | |
| 18612 | if (!op2) |
| 18613 | { |
| 18614 | /* Visit the (only) operand and we're done */ |
| 18615 | |
| 18616 | fgSetTreeSeqHelper(op1, isLIR); |
| 18617 | fgSetTreeSeqFinish(tree, isLIR); |
| 18618 | return; |
| 18619 | } |
| 18620 | |
| 18621 | /* |
| 18622 | For "real" ?: operators, we make sure the order is |
| 18623 | as follows: |
| 18624 | |
| 18625 | condition |
| 18626 | 1st operand |
| 18627 | GT_COLON |
| 18628 | 2nd operand |
| 18629 | GT_QMARK |
| 18630 | */ |
| 18631 | |
| 18632 | if (oper == GT_QMARK) |
| 18633 | { |
| 18634 | noway_assert((tree->gtFlags & GTF_REVERSE_OPS) == 0); |
| 18635 | |
| 18636 | fgSetTreeSeqHelper(op1, isLIR); |
| 18637 | // Here, for the colon, the sequence does not actually represent "order of evaluation": |
| 18638 | // one or the other of the branches is executed, not both. Still, to make debugging checks |
| 18639 | // work, we want the sequence to match the order in which we'll generate code, which means |
| 18640 | // "else" clause then "then" clause. |
| 18641 | fgSetTreeSeqHelper(op2->AsColon()->ElseNode(), isLIR); |
| 18642 | fgSetTreeSeqHelper(op2, isLIR); |
| 18643 | fgSetTreeSeqHelper(op2->AsColon()->ThenNode(), isLIR); |
| 18644 | |
| 18645 | fgSetTreeSeqFinish(tree, isLIR); |
| 18646 | return; |
| 18647 | } |
| 18648 | |
| 18649 | if (oper == GT_COLON) |
| 18650 | { |
| 18651 | fgSetTreeSeqFinish(tree, isLIR); |
| 18652 | return; |
| 18653 | } |
| 18654 | |
| 18655 | /* This is a binary operator */ |
| 18656 | |
| 18657 | if (tree->gtFlags & GTF_REVERSE_OPS) |
| 18658 | { |
| 18659 | fgSetTreeSeqHelper(op2, isLIR); |
| 18660 | fgSetTreeSeqHelper(op1, isLIR); |
| 18661 | } |
| 18662 | else |
| 18663 | { |
| 18664 | fgSetTreeSeqHelper(op1, isLIR); |
| 18665 | fgSetTreeSeqHelper(op2, isLIR); |
| 18666 | } |
| 18667 | |
| 18668 | fgSetTreeSeqFinish(tree, isLIR); |
| 18669 | return; |
| 18670 | } |
| 18671 | |
| 18672 | /* See what kind of a special operator we have here */ |
| 18673 | |
| 18674 | switch (oper) |
| 18675 | { |
| 18676 | case GT_FIELD: |
| 18677 | noway_assert(tree->gtField.gtFldObj == nullptr); |
| 18678 | break; |
| 18679 | |
| 18680 | case GT_CALL: |
| 18681 | |
| 18682 | /* We'll evaluate the 'this' argument value first */ |
| 18683 | if (tree->gtCall.gtCallObjp) |
| 18684 | { |
| 18685 | fgSetTreeSeqHelper(tree->gtCall.gtCallObjp, isLIR); |
| 18686 | } |
| 18687 | |
| 18688 | /* We'll evaluate the arguments next, left to right |
| 18689 | * NOTE: setListOrder needs cleanup - eliminate the #ifdef afterwards */ |
| 18690 | |
| 18691 | if (tree->gtCall.gtCallArgs) |
| 18692 | { |
| 18693 | fgSetTreeSeqHelper(tree->gtCall.gtCallArgs, isLIR); |
| 18694 | } |
| 18695 | |
| 18696 | /* Evaluate the temp register arguments list |
| 18697 | * This is a "hidden" list and its only purpose is to |
| 18698 | * extend the life of temps until we make the call */ |
| 18699 | |
| 18700 | if (tree->gtCall.gtCallLateArgs) |
| 18701 | { |
| 18702 | fgSetTreeSeqHelper(tree->gtCall.gtCallLateArgs, isLIR); |
| 18703 | } |
| 18704 | |
| 18705 | if ((tree->gtCall.gtCallType == CT_INDIRECT) && (tree->gtCall.gtCallCookie != nullptr)) |
| 18706 | { |
| 18707 | fgSetTreeSeqHelper(tree->gtCall.gtCallCookie, isLIR); |
| 18708 | } |
| 18709 | |
| 18710 | if (tree->gtCall.gtCallType == CT_INDIRECT) |
| 18711 | { |
| 18712 | fgSetTreeSeqHelper(tree->gtCall.gtCallAddr, isLIR); |
| 18713 | } |
| 18714 | |
| 18715 | if (tree->gtCall.gtControlExpr) |
| 18716 | { |
| 18717 | fgSetTreeSeqHelper(tree->gtCall.gtControlExpr, isLIR); |
| 18718 | } |
| 18719 | |
| 18720 | break; |
| 18721 | |
| 18722 | case GT_ARR_ELEM: |
| 18723 | |
| 18724 | fgSetTreeSeqHelper(tree->gtArrElem.gtArrObj, isLIR); |
| 18725 | |
| 18726 | unsigned dim; |
| 18727 | for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++) |
| 18728 | { |
| 18729 | fgSetTreeSeqHelper(tree->gtArrElem.gtArrInds[dim], isLIR); |
| 18730 | } |
| 18731 | |
| 18732 | break; |
| 18733 | |
| 18734 | case GT_ARR_OFFSET: |
| 18735 | fgSetTreeSeqHelper(tree->gtArrOffs.gtOffset, isLIR); |
| 18736 | fgSetTreeSeqHelper(tree->gtArrOffs.gtIndex, isLIR); |
| 18737 | fgSetTreeSeqHelper(tree->gtArrOffs.gtArrObj, isLIR); |
| 18738 | break; |
| 18739 | |
| 18740 | case GT_CMPXCHG: |
| 18741 | // Evaluate the trees left to right |
| 18742 | fgSetTreeSeqHelper(tree->gtCmpXchg.gtOpLocation, isLIR); |
| 18743 | fgSetTreeSeqHelper(tree->gtCmpXchg.gtOpValue, isLIR); |
| 18744 | fgSetTreeSeqHelper(tree->gtCmpXchg.gtOpComparand, isLIR); |
| 18745 | break; |
| 18746 | |
| 18747 | case GT_ARR_BOUNDS_CHECK: |
| 18748 | #ifdef FEATURE_SIMD |
| 18749 | case GT_SIMD_CHK: |
| 18750 | #endif // FEATURE_SIMD |
| 18751 | #ifdef FEATURE_HW_INTRINSICS |
| 18752 | case GT_HW_INTRINSIC_CHK: |
| 18753 | #endif // FEATURE_HW_INTRINSICS |
| 18754 | // Evaluate the trees left to right |
| 18755 | fgSetTreeSeqHelper(tree->gtBoundsChk.gtIndex, isLIR); |
| 18756 | fgSetTreeSeqHelper(tree->gtBoundsChk.gtArrLen, isLIR); |
| 18757 | break; |
| 18758 | |
| 18759 | case GT_STORE_DYN_BLK: |
| 18760 | case GT_DYN_BLK: |
| 18761 | noway_assert(!"DYN_BLK nodes should be sequenced as a special case" ); |
| 18762 | break; |
| 18763 | |
| 18764 | case GT_INDEX_ADDR: |
| 18765 | // Evaluate the array first, then the index.... |
| 18766 | assert((tree->gtFlags & GTF_REVERSE_OPS) == 0); |
| 18767 | fgSetTreeSeqHelper(tree->AsIndexAddr()->Arr(), isLIR); |
| 18768 | fgSetTreeSeqHelper(tree->AsIndexAddr()->Index(), isLIR); |
| 18769 | break; |
| 18770 | |
| 18771 | default: |
| 18772 | #ifdef DEBUG |
| 18773 | gtDispTree(tree); |
| 18774 | noway_assert(!"unexpected operator" ); |
| 18775 | #endif // DEBUG |
| 18776 | break; |
| 18777 | } |
| 18778 | |
| 18779 | fgSetTreeSeqFinish(tree, isLIR); |
| 18780 | } |
| 18781 | |
| 18782 | void Compiler::fgSetTreeSeqFinish(GenTree* tree, bool isLIR) |
| 18783 | { |
| 18784 | // If we are sequencing for LIR: |
| 18785 | // - Clear the reverse ops flag |
| 18786 | // - If we are processing a node that does not appear in LIR, do not add it to the list. |
| 18787 | if (isLIR) |
| 18788 | { |
| 18789 | tree->gtFlags &= ~GTF_REVERSE_OPS; |
| 18790 | |
| 18791 | if ((tree->OperGet() == GT_LIST) || (tree->OperGet() == GT_ARGPLACE) || |
| 18792 | (tree->OperGet() == GT_FIELD_LIST && !tree->AsFieldList()->IsFieldListHead())) |
| 18793 | { |
| 18794 | return; |
| 18795 | } |
| 18796 | } |
| 18797 | |
| 18798 | /* Append to the node list */ |
| 18799 | ++fgTreeSeqNum; |
| 18800 | |
| 18801 | #ifdef DEBUG |
| 18802 | tree->gtSeqNum = fgTreeSeqNum; |
| 18803 | |
| 18804 | if (verbose & 0) |
| 18805 | { |
| 18806 | printf("SetTreeOrder: " ); |
| 18807 | printTreeID(fgTreeSeqLst); |
| 18808 | printf(" followed by " ); |
| 18809 | printTreeID(tree); |
| 18810 | printf("\n" ); |
| 18811 | } |
| 18812 | #endif // DEBUG |
| 18813 | |
| 18814 | fgTreeSeqLst->gtNext = tree; |
| 18815 | tree->gtNext = nullptr; |
| 18816 | tree->gtPrev = fgTreeSeqLst; |
| 18817 | fgTreeSeqLst = tree; |
| 18818 | |
| 18819 | /* Remember the very first node */ |
| 18820 | |
| 18821 | if (!fgTreeSeqBeg) |
| 18822 | { |
| 18823 | fgTreeSeqBeg = tree; |
| 18824 | assert(tree->gtSeqNum == 1); |
| 18825 | } |
| 18826 | } |
| 18827 | |
| 18828 | /***************************************************************************** |
| 18829 | * |
| 18830 | * Figure out the order in which operators should be evaluated, along with |
| 18831 | * other information (such as the register sets trashed by each subtree). |
| 18832 | * Also finds blocks that need GC polls and inserts them as needed. |
| 18833 | */ |
| 18834 | |
| 18835 | void Compiler::fgSetBlockOrder() |
| 18836 | { |
| 18837 | #ifdef DEBUG |
| 18838 | if (verbose) |
| 18839 | { |
| 18840 | printf("*************** In fgSetBlockOrder()\n" ); |
| 18841 | } |
| 18842 | #endif // DEBUG |
| 18843 | |
| 18844 | #ifdef DEBUG |
| 18845 | BasicBlock::s_nMaxTrees = 0; |
| 18846 | #endif |
| 18847 | |
| 18848 | /* Walk the basic blocks to assign sequence numbers */ |
| 18849 | |
| 18850 | /* If we don't compute the doms, then we never mark blocks as loops. */ |
| 18851 | if (fgDomsComputed) |
| 18852 | { |
| 18853 | for (BasicBlock* block = fgFirstBB; block; block = block->bbNext) |
| 18854 | { |
| 18855 | /* If this block is a loop header, mark it appropriately */ |
| 18856 | |
| 18857 | if (block->isLoopHead()) |
| 18858 | { |
| 18859 | fgMarkLoopHead(block); |
| 18860 | } |
| 18861 | } |
| 18862 | } |
| 18863 | // only enable fully interruptible code for if we're hijacking. |
| 18864 | else if (GCPOLL_NONE == opts.compGCPollType) |
| 18865 | { |
| 18866 | /* If we don't have the dominators, use an abbreviated test for fully interruptible. If there are |
| 18867 | * any back edges, check the source and destination blocks to see if they're GC Safe. If not, then |
| 18868 | * go fully interruptible. */ |
| 18869 | |
| 18870 | /* XXX Mon 1/21/2008 |
| 18871 | * Wouldn't it be nice to have a block iterator that can do this loop? |
| 18872 | */ |
| 18873 | for (BasicBlock* block = fgFirstBB; block; block = block->bbNext) |
| 18874 | { |
| 18875 | // true if the edge is forward, or if it is a back edge and either the source and dest are GC safe. |
| 18876 | #define EDGE_IS_GC_SAFE(src, dst) \ |
| 18877 | (((src)->bbNum < (dst)->bbNum) || (((src)->bbFlags | (dst)->bbFlags) & BBF_GC_SAFE_POINT)) |
| 18878 | |
| 18879 | bool partiallyInterruptible = true; |
| 18880 | switch (block->bbJumpKind) |
| 18881 | { |
| 18882 | case BBJ_COND: |
| 18883 | case BBJ_ALWAYS: |
| 18884 | partiallyInterruptible = EDGE_IS_GC_SAFE(block, block->bbJumpDest); |
| 18885 | break; |
| 18886 | |
| 18887 | case BBJ_SWITCH: |
| 18888 | |
| 18889 | unsigned jumpCnt; |
| 18890 | jumpCnt = block->bbJumpSwt->bbsCount; |
| 18891 | BasicBlock** jumpPtr; |
| 18892 | jumpPtr = block->bbJumpSwt->bbsDstTab; |
| 18893 | |
| 18894 | do |
| 18895 | { |
| 18896 | partiallyInterruptible &= EDGE_IS_GC_SAFE(block, *jumpPtr); |
| 18897 | } while (++jumpPtr, --jumpCnt); |
| 18898 | |
| 18899 | break; |
| 18900 | |
| 18901 | default: |
| 18902 | break; |
| 18903 | } |
| 18904 | |
| 18905 | if (!partiallyInterruptible) |
| 18906 | { |
| 18907 | // DDB 204533: |
| 18908 | // The GC encoding for fully interruptible methods does not |
| 18909 | // support more than 1023 pushed arguments, so we can't set |
| 18910 | // genInterruptible here when we have 1024 or more pushed args |
| 18911 | // |
| 18912 | if (compCanEncodePtrArgCntMax()) |
| 18913 | { |
| 18914 | genInterruptible = true; |
| 18915 | } |
| 18916 | break; |
| 18917 | } |
| 18918 | #undef EDGE_IS_GC_SAFE |
| 18919 | } |
| 18920 | } |
| 18921 | |
| 18922 | if (!fgGCPollsCreated) |
| 18923 | { |
| 18924 | fgCreateGCPolls(); |
| 18925 | } |
| 18926 | |
| 18927 | for (BasicBlock* block = fgFirstBB; block; block = block->bbNext) |
| 18928 | { |
| 18929 | |
| 18930 | #if FEATURE_FASTTAILCALL |
| 18931 | #ifndef JIT32_GCENCODER |
| 18932 | if (block->endsWithTailCallOrJmp(this, true) && optReachWithoutCall(fgFirstBB, block)) |
| 18933 | { |
| 18934 | // We have a tail call that is reachable without making any other |
| 18935 | // 'normal' call that would have counted as a GC Poll. If we were |
| 18936 | // using polls, all return blocks meeting this criteria would have |
| 18937 | // already added polls and then marked as being GC safe |
| 18938 | // (BBF_GC_SAFE_POINT). Thus we can only reach here when *NOT* |
| 18939 | // using GC polls, but instead relying on the JIT to generate |
| 18940 | // fully-interruptible code. |
| 18941 | noway_assert(GCPOLL_NONE == opts.compGCPollType); |
| 18942 | |
| 18943 | // This tail call might combine with other tail calls to form a |
| 18944 | // loop. Thus we need to either add a poll, or make the method |
| 18945 | // fully interruptible. I chose the later because that's what |
| 18946 | // JIT64 does. |
| 18947 | genInterruptible = true; |
| 18948 | } |
| 18949 | #endif // !JIT32_GCENCODER |
| 18950 | #endif // FEATURE_FASTTAILCALL |
| 18951 | |
| 18952 | fgSetBlockOrder(block); |
| 18953 | } |
| 18954 | |
| 18955 | /* Remember that now the tree list is threaded */ |
| 18956 | |
| 18957 | fgStmtListThreaded = true; |
| 18958 | |
| 18959 | #ifdef DEBUG |
| 18960 | if (verbose) |
| 18961 | { |
| 18962 | printf("The biggest BB has %4u tree nodes\n" , BasicBlock::s_nMaxTrees); |
| 18963 | } |
| 18964 | fgDebugCheckLinks(); |
| 18965 | #endif // DEBUG |
| 18966 | } |
| 18967 | |
| 18968 | /*****************************************************************************/ |
| 18969 | |
| 18970 | void Compiler::fgSetStmtSeq(GenTree* tree) |
| 18971 | { |
| 18972 | GenTree list; // helper node that we use to start the StmtList |
| 18973 | // It's located in front of the first node in the list |
| 18974 | |
| 18975 | noway_assert(tree->gtOper == GT_STMT); |
| 18976 | |
| 18977 | /* Assign numbers and next/prev links for this tree */ |
| 18978 | |
| 18979 | fgTreeSeqNum = 0; |
| 18980 | fgTreeSeqLst = &list; |
| 18981 | fgTreeSeqBeg = nullptr; |
| 18982 | |
| 18983 | fgSetTreeSeqHelper(tree->gtStmt.gtStmtExpr, false); |
| 18984 | |
| 18985 | /* Record the address of the first node */ |
| 18986 | |
| 18987 | tree->gtStmt.gtStmtList = fgTreeSeqBeg; |
| 18988 | |
| 18989 | #ifdef DEBUG |
| 18990 | |
| 18991 | if (list.gtNext->gtPrev != &list) |
| 18992 | { |
| 18993 | printf("&list " ); |
| 18994 | printTreeID(&list); |
| 18995 | printf(" != list.next->prev " ); |
| 18996 | printTreeID(list.gtNext->gtPrev); |
| 18997 | printf("\n" ); |
| 18998 | goto BAD_LIST; |
| 18999 | } |
| 19000 | |
| 19001 | GenTree* temp; |
| 19002 | GenTree* last; |
| 19003 | for (temp = list.gtNext, last = &list; temp; last = temp, temp = temp->gtNext) |
| 19004 | { |
| 19005 | if (temp->gtPrev != last) |
| 19006 | { |
| 19007 | printTreeID(temp); |
| 19008 | printf("->gtPrev = " ); |
| 19009 | printTreeID(temp->gtPrev); |
| 19010 | printf(", but last = " ); |
| 19011 | printTreeID(last); |
| 19012 | printf("\n" ); |
| 19013 | |
| 19014 | BAD_LIST:; |
| 19015 | |
| 19016 | printf("\n" ); |
| 19017 | gtDispTree(tree->gtStmt.gtStmtExpr); |
| 19018 | printf("\n" ); |
| 19019 | |
| 19020 | for (GenTree* bad = &list; bad; bad = bad->gtNext) |
| 19021 | { |
| 19022 | printf(" entry at " ); |
| 19023 | printTreeID(bad); |
| 19024 | printf(" (prev=" ); |
| 19025 | printTreeID(bad->gtPrev); |
| 19026 | printf(",next=)" ); |
| 19027 | printTreeID(bad->gtNext); |
| 19028 | printf("\n" ); |
| 19029 | } |
| 19030 | |
| 19031 | printf("\n" ); |
| 19032 | noway_assert(!"Badly linked tree" ); |
| 19033 | break; |
| 19034 | } |
| 19035 | } |
| 19036 | #endif // DEBUG |
| 19037 | |
| 19038 | /* Fix the first node's 'prev' link */ |
| 19039 | |
| 19040 | noway_assert(list.gtNext->gtPrev == &list); |
| 19041 | list.gtNext->gtPrev = nullptr; |
| 19042 | |
| 19043 | #ifdef DEBUG |
| 19044 | /* Keep track of the highest # of tree nodes */ |
| 19045 | |
| 19046 | if (BasicBlock::s_nMaxTrees < fgTreeSeqNum) |
| 19047 | { |
| 19048 | BasicBlock::s_nMaxTrees = fgTreeSeqNum; |
| 19049 | } |
| 19050 | #endif // DEBUG |
| 19051 | } |
| 19052 | |
| 19053 | /*****************************************************************************/ |
| 19054 | |
| 19055 | void Compiler::fgSetBlockOrder(BasicBlock* block) |
| 19056 | { |
| 19057 | GenTree* tree; |
| 19058 | |
| 19059 | tree = block->bbTreeList; |
| 19060 | if (!tree) |
| 19061 | { |
| 19062 | return; |
| 19063 | } |
| 19064 | |
| 19065 | for (;;) |
| 19066 | { |
| 19067 | fgSetStmtSeq(tree); |
| 19068 | |
| 19069 | /* Are there any more trees in this basic block? */ |
| 19070 | |
| 19071 | if (tree->gtNext == nullptr) |
| 19072 | { |
| 19073 | /* last statement in the tree list */ |
| 19074 | noway_assert(block->lastStmt() == tree); |
| 19075 | break; |
| 19076 | } |
| 19077 | |
| 19078 | #ifdef DEBUG |
| 19079 | if (block->bbTreeList == tree) |
| 19080 | { |
| 19081 | /* first statement in the list */ |
| 19082 | noway_assert(tree->gtPrev->gtNext == nullptr); |
| 19083 | } |
| 19084 | else |
| 19085 | { |
| 19086 | noway_assert(tree->gtPrev->gtNext == tree); |
| 19087 | } |
| 19088 | |
| 19089 | noway_assert(tree->gtNext->gtPrev == tree); |
| 19090 | #endif // DEBUG |
| 19091 | |
| 19092 | tree = tree->gtNext; |
| 19093 | } |
| 19094 | } |
| 19095 | |
| 19096 | //------------------------------------------------------------------------ |
| 19097 | // fgGetFirstNode: Get the first node in the tree, in execution order |
| 19098 | // |
| 19099 | // Arguments: |
| 19100 | // tree - The top node of the tree of interest |
| 19101 | // |
| 19102 | // Return Value: |
| 19103 | // The first node in execution order, that belongs to tree. |
| 19104 | // |
| 19105 | // Assumptions: |
| 19106 | // 'tree' must either be a leaf, or all of its constituent nodes must be contiguous |
| 19107 | // in execution order. |
| 19108 | // TODO-Cleanup: Add a debug-only method that verifies this. |
| 19109 | |
| 19110 | /* static */ |
| 19111 | GenTree* Compiler::fgGetFirstNode(GenTree* tree) |
| 19112 | { |
| 19113 | GenTree* child = tree; |
| 19114 | while (child->NumChildren() > 0) |
| 19115 | { |
| 19116 | if (child->OperIsBinary() && child->IsReverseOp()) |
| 19117 | { |
| 19118 | child = child->GetChild(1); |
| 19119 | } |
| 19120 | else |
| 19121 | { |
| 19122 | child = child->GetChild(0); |
| 19123 | } |
| 19124 | } |
| 19125 | return child; |
| 19126 | } |
| 19127 | |
| 19128 | // Examine the bbTreeList and return the estimated code size for this block |
| 19129 | unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) |
| 19130 | { |
| 19131 | unsigned costSz = 0; // estimate of blocks code size cost |
| 19132 | |
| 19133 | switch (block->bbJumpKind) |
| 19134 | { |
| 19135 | case BBJ_NONE: |
| 19136 | costSz = 0; |
| 19137 | break; |
| 19138 | case BBJ_ALWAYS: |
| 19139 | case BBJ_EHCATCHRET: |
| 19140 | case BBJ_LEAVE: |
| 19141 | case BBJ_COND: |
| 19142 | costSz = 2; |
| 19143 | break; |
| 19144 | case BBJ_CALLFINALLY: |
| 19145 | costSz = 5; |
| 19146 | break; |
| 19147 | case BBJ_SWITCH: |
| 19148 | costSz = 10; |
| 19149 | break; |
| 19150 | case BBJ_THROW: |
| 19151 | costSz = 1; // We place a int3 after the code for a throw block |
| 19152 | break; |
| 19153 | case BBJ_EHFINALLYRET: |
| 19154 | case BBJ_EHFILTERRET: |
| 19155 | costSz = 1; |
| 19156 | break; |
| 19157 | case BBJ_RETURN: // return from method |
| 19158 | costSz = 3; |
| 19159 | break; |
| 19160 | default: |
| 19161 | noway_assert(!"Bad bbJumpKind" ); |
| 19162 | break; |
| 19163 | } |
| 19164 | |
| 19165 | GenTree* tree = block->FirstNonPhiDef(); |
| 19166 | if (tree) |
| 19167 | { |
| 19168 | do |
| 19169 | { |
| 19170 | noway_assert(tree->gtOper == GT_STMT); |
| 19171 | |
| 19172 | if (tree->gtCostSz < MAX_COST) |
| 19173 | { |
| 19174 | costSz += tree->gtCostSz; |
| 19175 | } |
| 19176 | else |
| 19177 | { |
| 19178 | // We could walk the tree to find out the real gtCostSz, |
| 19179 | // but just using MAX_COST for this trees code size works OK |
| 19180 | costSz += tree->gtCostSz; |
| 19181 | } |
| 19182 | |
| 19183 | tree = tree->gtNext; |
| 19184 | } while (tree); |
| 19185 | } |
| 19186 | |
| 19187 | return costSz; |
| 19188 | } |
| 19189 | |
| 19190 | #if DUMP_FLOWGRAPHS |
| 19191 | |
| 19192 | struct escapeMapping_t |
| 19193 | { |
| 19194 | char ch; |
| 19195 | const char* sub; |
| 19196 | }; |
| 19197 | |
| 19198 | // clang-format off |
| 19199 | static escapeMapping_t s_EscapeFileMapping[] = |
| 19200 | { |
| 19201 | {':', "=" }, |
| 19202 | {'<', "[" }, |
| 19203 | {'>', "]" }, |
| 19204 | {';', "~semi~" }, |
| 19205 | {'|', "~bar~" }, |
| 19206 | {'&', "~amp~" }, |
| 19207 | {'"', "~quot~" }, |
| 19208 | {'*', "~star~" }, |
| 19209 | {0, nullptr} |
| 19210 | }; |
| 19211 | |
| 19212 | static escapeMapping_t s_EscapeMapping[] = |
| 19213 | { |
| 19214 | {'<', "<" }, |
| 19215 | {'>', ">" }, |
| 19216 | {'&', "&" }, |
| 19217 | {'"', """ }, |
| 19218 | {0, nullptr} |
| 19219 | }; |
| 19220 | // clang-format on |
| 19221 | |
| 19222 | const char* Compiler::fgProcessEscapes(const char* nameIn, escapeMapping_t* map) |
| 19223 | { |
| 19224 | const char* nameOut = nameIn; |
| 19225 | unsigned lengthOut; |
| 19226 | unsigned index; |
| 19227 | bool match; |
| 19228 | bool subsitutionRequired; |
| 19229 | const char* pChar; |
| 19230 | |
| 19231 | lengthOut = 1; |
| 19232 | subsitutionRequired = false; |
| 19233 | pChar = nameIn; |
| 19234 | while (*pChar != '\0') |
| 19235 | { |
| 19236 | match = false; |
| 19237 | index = 0; |
| 19238 | while (map[index].ch != 0) |
| 19239 | { |
| 19240 | if (*pChar == map[index].ch) |
| 19241 | { |
| 19242 | match = true; |
| 19243 | break; |
| 19244 | } |
| 19245 | index++; |
| 19246 | } |
| 19247 | if (match) |
| 19248 | { |
| 19249 | subsitutionRequired = true; |
| 19250 | lengthOut += (unsigned)strlen(map[index].sub); |
| 19251 | } |
| 19252 | else |
| 19253 | { |
| 19254 | lengthOut += 1; |
| 19255 | } |
| 19256 | pChar++; |
| 19257 | } |
| 19258 | |
| 19259 | if (subsitutionRequired) |
| 19260 | { |
| 19261 | char* newName = getAllocator(CMK_DebugOnly).allocate<char>(lengthOut); |
| 19262 | char* pDest; |
| 19263 | pDest = newName; |
| 19264 | pChar = nameIn; |
| 19265 | while (*pChar != '\0') |
| 19266 | { |
| 19267 | match = false; |
| 19268 | index = 0; |
| 19269 | while (map[index].ch != 0) |
| 19270 | { |
| 19271 | if (*pChar == map[index].ch) |
| 19272 | { |
| 19273 | match = true; |
| 19274 | break; |
| 19275 | } |
| 19276 | index++; |
| 19277 | } |
| 19278 | if (match) |
| 19279 | { |
| 19280 | strcpy(pDest, map[index].sub); |
| 19281 | pDest += strlen(map[index].sub); |
| 19282 | } |
| 19283 | else |
| 19284 | { |
| 19285 | *pDest++ = *pChar; |
| 19286 | } |
| 19287 | pChar++; |
| 19288 | } |
| 19289 | *pDest++ = '\0'; |
| 19290 | nameOut = (const char*)newName; |
| 19291 | } |
| 19292 | |
| 19293 | return nameOut; |
| 19294 | } |
| 19295 | |
| 19296 | static void fprintfDouble(FILE* fgxFile, double value) |
| 19297 | { |
| 19298 | assert(value >= 0.0); |
| 19299 | |
| 19300 | if ((value >= 0.010) || (value == 0.0)) |
| 19301 | { |
| 19302 | fprintf(fgxFile, "\"%7.3f\"" , value); |
| 19303 | } |
| 19304 | else if (value >= 0.00010) |
| 19305 | { |
| 19306 | fprintf(fgxFile, "\"%7.5f\"" , value); |
| 19307 | } |
| 19308 | else |
| 19309 | { |
| 19310 | fprintf(fgxFile, "\"%7E\"" , value); |
| 19311 | } |
| 19312 | } |
| 19313 | |
| 19314 | //------------------------------------------------------------------------ |
| 19315 | // fgOpenFlowGraphFile: Open a file to dump either the xml or dot format flow graph |
| 19316 | // |
| 19317 | // Arguments: |
| 19318 | // wbDontClose - A boolean out argument that indicates whether the caller should close the file |
| 19319 | // phase - A phase identifier to indicate which phase is associated with the dump |
| 19320 | // type - A (wide) string indicating the type of dump, "dot" or "xml" |
| 19321 | // |
| 19322 | // Return Value: |
| 19323 | // Opens a file to which a flowgraph can be dumped, whose name is based on the current |
| 19324 | // config vales. |
| 19325 | |
| 19326 | FILE* Compiler::fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, LPCWSTR type) |
| 19327 | { |
| 19328 | FILE* fgxFile; |
| 19329 | LPCWSTR pattern = nullptr; |
| 19330 | LPCWSTR filename = nullptr; |
| 19331 | LPCWSTR pathname = nullptr; |
| 19332 | const char* escapedString; |
| 19333 | bool createDuplicateFgxFiles = true; |
| 19334 | |
| 19335 | #ifdef DEBUG |
| 19336 | if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) |
| 19337 | { |
| 19338 | pattern = JitConfig.NgenDumpFg(); |
| 19339 | filename = JitConfig.NgenDumpFgFile(); |
| 19340 | pathname = JitConfig.NgenDumpFgDir(); |
| 19341 | } |
| 19342 | else |
| 19343 | { |
| 19344 | pattern = JitConfig.JitDumpFg(); |
| 19345 | filename = JitConfig.JitDumpFgFile(); |
| 19346 | pathname = JitConfig.JitDumpFgDir(); |
| 19347 | } |
| 19348 | #endif // DEBUG |
| 19349 | |
| 19350 | if (fgBBcount <= 1) |
| 19351 | { |
| 19352 | return nullptr; |
| 19353 | } |
| 19354 | |
| 19355 | if (pattern == nullptr) |
| 19356 | { |
| 19357 | return nullptr; |
| 19358 | } |
| 19359 | |
| 19360 | if (wcslen(pattern) == 0) |
| 19361 | { |
| 19362 | return nullptr; |
| 19363 | } |
| 19364 | |
| 19365 | LPCWSTR phasePattern = JitConfig.JitDumpFgPhase(); |
| 19366 | LPCWSTR phaseName = PhaseShortNames[phase]; |
| 19367 | if (phasePattern == nullptr) |
| 19368 | { |
| 19369 | if (phase != PHASE_DETERMINE_FIRST_COLD_BLOCK) |
| 19370 | { |
| 19371 | return nullptr; |
| 19372 | } |
| 19373 | } |
| 19374 | else if (*phasePattern != W('*')) |
| 19375 | { |
| 19376 | if (wcsstr(phasePattern, phaseName) == nullptr) |
| 19377 | { |
| 19378 | return nullptr; |
| 19379 | } |
| 19380 | } |
| 19381 | |
| 19382 | if (*pattern != W('*')) |
| 19383 | { |
| 19384 | bool hasColon = (wcschr(pattern, W(':')) != nullptr); |
| 19385 | |
| 19386 | if (hasColon) |
| 19387 | { |
| 19388 | const char* className = info.compClassName; |
| 19389 | if (*pattern == W('*')) |
| 19390 | { |
| 19391 | pattern++; |
| 19392 | } |
| 19393 | else |
| 19394 | { |
| 19395 | while ((*pattern != W(':')) && (*pattern != W('*'))) |
| 19396 | { |
| 19397 | if (*pattern != *className) |
| 19398 | { |
| 19399 | return nullptr; |
| 19400 | } |
| 19401 | |
| 19402 | pattern++; |
| 19403 | className++; |
| 19404 | } |
| 19405 | if (*pattern == W('*')) |
| 19406 | { |
| 19407 | pattern++; |
| 19408 | } |
| 19409 | else |
| 19410 | { |
| 19411 | if (*className != 0) |
| 19412 | { |
| 19413 | return nullptr; |
| 19414 | } |
| 19415 | } |
| 19416 | } |
| 19417 | if (*pattern != W(':')) |
| 19418 | { |
| 19419 | return nullptr; |
| 19420 | } |
| 19421 | |
| 19422 | pattern++; |
| 19423 | } |
| 19424 | |
| 19425 | const char* methodName = info.compMethodName; |
| 19426 | if (*pattern == W('*')) |
| 19427 | { |
| 19428 | pattern++; |
| 19429 | } |
| 19430 | else |
| 19431 | { |
| 19432 | while ((*pattern != 0) && (*pattern != W('*'))) |
| 19433 | { |
| 19434 | if (*pattern != *methodName) |
| 19435 | { |
| 19436 | return nullptr; |
| 19437 | } |
| 19438 | |
| 19439 | pattern++; |
| 19440 | methodName++; |
| 19441 | } |
| 19442 | if (*pattern == W('*')) |
| 19443 | { |
| 19444 | pattern++; |
| 19445 | } |
| 19446 | else |
| 19447 | { |
| 19448 | if (*methodName != 0) |
| 19449 | { |
| 19450 | return nullptr; |
| 19451 | } |
| 19452 | } |
| 19453 | } |
| 19454 | if (*pattern != 0) |
| 19455 | { |
| 19456 | return nullptr; |
| 19457 | } |
| 19458 | } |
| 19459 | |
| 19460 | if (filename == nullptr) |
| 19461 | { |
| 19462 | filename = W("default" ); |
| 19463 | } |
| 19464 | |
| 19465 | if (wcscmp(filename, W("profiled" )) == 0) |
| 19466 | { |
| 19467 | if (fgFirstBB->hasProfileWeight()) |
| 19468 | { |
| 19469 | createDuplicateFgxFiles = true; |
| 19470 | goto ONE_FILE_PER_METHOD; |
| 19471 | } |
| 19472 | else |
| 19473 | { |
| 19474 | return nullptr; |
| 19475 | } |
| 19476 | } |
| 19477 | if (wcscmp(filename, W("hot" )) == 0) |
| 19478 | { |
| 19479 | if (info.compMethodInfo->regionKind == CORINFO_REGION_HOT) |
| 19480 | |
| 19481 | { |
| 19482 | createDuplicateFgxFiles = true; |
| 19483 | goto ONE_FILE_PER_METHOD; |
| 19484 | } |
| 19485 | else |
| 19486 | { |
| 19487 | return nullptr; |
| 19488 | } |
| 19489 | } |
| 19490 | else if (wcscmp(filename, W("cold" )) == 0) |
| 19491 | { |
| 19492 | if (info.compMethodInfo->regionKind == CORINFO_REGION_COLD) |
| 19493 | { |
| 19494 | createDuplicateFgxFiles = true; |
| 19495 | goto ONE_FILE_PER_METHOD; |
| 19496 | } |
| 19497 | else |
| 19498 | { |
| 19499 | return nullptr; |
| 19500 | } |
| 19501 | } |
| 19502 | else if (wcscmp(filename, W("jit" )) == 0) |
| 19503 | { |
| 19504 | if (info.compMethodInfo->regionKind == CORINFO_REGION_JIT) |
| 19505 | { |
| 19506 | createDuplicateFgxFiles = true; |
| 19507 | goto ONE_FILE_PER_METHOD; |
| 19508 | } |
| 19509 | else |
| 19510 | { |
| 19511 | return nullptr; |
| 19512 | } |
| 19513 | } |
| 19514 | else if (wcscmp(filename, W("all" )) == 0) |
| 19515 | { |
| 19516 | createDuplicateFgxFiles = true; |
| 19517 | |
| 19518 | ONE_FILE_PER_METHOD:; |
| 19519 | |
| 19520 | escapedString = fgProcessEscapes(info.compFullName, s_EscapeFileMapping); |
| 19521 | size_t wCharCount = strlen(escapedString) + wcslen(phaseName) + 1 + strlen("~999" ) + wcslen(type) + 1; |
| 19522 | if (pathname != nullptr) |
| 19523 | { |
| 19524 | wCharCount += wcslen(pathname) + 1; |
| 19525 | } |
| 19526 | filename = (LPCWSTR)alloca(wCharCount * sizeof(WCHAR)); |
| 19527 | if (pathname != nullptr) |
| 19528 | { |
| 19529 | swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%S-%s.%s" ), pathname, escapedString, phaseName, type); |
| 19530 | } |
| 19531 | else |
| 19532 | { |
| 19533 | swprintf_s((LPWSTR)filename, wCharCount, W("%S.%s" ), escapedString, type); |
| 19534 | } |
| 19535 | fgxFile = _wfopen(filename, W("r" )); // Check if this file already exists |
| 19536 | if (fgxFile != nullptr) |
| 19537 | { |
| 19538 | // For Generic methods we will have both hot and cold versions |
| 19539 | if (createDuplicateFgxFiles == false) |
| 19540 | { |
| 19541 | fclose(fgxFile); |
| 19542 | return nullptr; |
| 19543 | } |
| 19544 | // Yes, this filename already exists, so create a different one by appending ~2, ~3, etc... |
| 19545 | for (int i = 2; i < 1000; i++) |
| 19546 | { |
| 19547 | fclose(fgxFile); |
| 19548 | if (pathname != nullptr) |
| 19549 | { |
| 19550 | swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%S~%d.%s" ), pathname, escapedString, i, type); |
| 19551 | } |
| 19552 | else |
| 19553 | { |
| 19554 | swprintf_s((LPWSTR)filename, wCharCount, W("%S~%d.%s" ), escapedString, i, type); |
| 19555 | } |
| 19556 | fgxFile = _wfopen(filename, W("r" )); // Check if this file exists |
| 19557 | if (fgxFile == nullptr) |
| 19558 | { |
| 19559 | break; |
| 19560 | } |
| 19561 | } |
| 19562 | // If we have already created 1000 files with this name then just fail |
| 19563 | if (fgxFile != nullptr) |
| 19564 | { |
| 19565 | fclose(fgxFile); |
| 19566 | return nullptr; |
| 19567 | } |
| 19568 | } |
| 19569 | fgxFile = _wfopen(filename, W("a+" )); |
| 19570 | *wbDontClose = false; |
| 19571 | } |
| 19572 | else if (wcscmp(filename, W("stdout" )) == 0) |
| 19573 | { |
| 19574 | fgxFile = jitstdout; |
| 19575 | *wbDontClose = true; |
| 19576 | } |
| 19577 | else if (wcscmp(filename, W("stderr" )) == 0) |
| 19578 | { |
| 19579 | fgxFile = stderr; |
| 19580 | *wbDontClose = true; |
| 19581 | } |
| 19582 | else |
| 19583 | { |
| 19584 | LPCWSTR origFilename = filename; |
| 19585 | size_t wCharCount = wcslen(origFilename) + wcslen(type) + 2; |
| 19586 | if (pathname != nullptr) |
| 19587 | { |
| 19588 | wCharCount += wcslen(pathname) + 1; |
| 19589 | } |
| 19590 | filename = (LPCWSTR)alloca(wCharCount * sizeof(WCHAR)); |
| 19591 | if (pathname != nullptr) |
| 19592 | { |
| 19593 | swprintf_s((LPWSTR)filename, wCharCount, W("%s\\%s.%s" ), pathname, origFilename, type); |
| 19594 | } |
| 19595 | else |
| 19596 | { |
| 19597 | swprintf_s((LPWSTR)filename, wCharCount, W("%s.%s" ), origFilename, type); |
| 19598 | } |
| 19599 | fgxFile = _wfopen(filename, W("a+" )); |
| 19600 | *wbDontClose = false; |
| 19601 | } |
| 19602 | |
| 19603 | return fgxFile; |
| 19604 | } |
| 19605 | |
| 19606 | //------------------------------------------------------------------------ |
| 19607 | // fgDumpFlowGraph: Dump the xml or dot format flow graph, if enabled for this phase. |
| 19608 | // |
| 19609 | // Arguments: |
| 19610 | // phase - A phase identifier to indicate which phase is associated with the dump, |
| 19611 | // i.e. which phase has just completed. |
| 19612 | // |
| 19613 | // Return Value: |
| 19614 | // True iff a flowgraph has been dumped. |
| 19615 | // |
| 19616 | // Notes: |
| 19617 | // The xml dumps are the historical mechanism for dumping the flowgraph. |
| 19618 | // The dot format can be viewed by: |
| 19619 | // - Graphviz (http://www.graphviz.org/) |
| 19620 | // - The command "C:\Program Files (x86)\Graphviz2.38\bin\dot.exe" -Tsvg -oFoo.svg -Kdot Foo.dot |
| 19621 | // will produce a Foo.svg file that can be opened with any svg-capable browser (e.g. IE). |
| 19622 | // - http://rise4fun.com/Agl/ |
| 19623 | // - Cut and paste the graph from your .dot file, replacing the digraph on the page, and then click the play |
| 19624 | // button. |
| 19625 | // - It will show a rotating '/' and then render the graph in the browser. |
| 19626 | // MSAGL has also been open-sourced to https://github.com/Microsoft/automatic-graph-layout.git. |
| 19627 | // |
| 19628 | // Here are the config values that control it: |
| 19629 | // COMPlus_JitDumpFg A string (ala the COMPlus_JitDump string) indicating what methods to dump flowgraphs |
| 19630 | // for. |
| 19631 | // COMPlus_JitDumpFgDir A path to a directory into which the flowgraphs will be dumped. |
| 19632 | // COMPlus_JitDumpFgFile The filename to use. The default is "default.[xml|dot]". |
| 19633 | // Note that the new graphs will be appended to this file if it already exists. |
| 19634 | // COMPlus_JitDumpFgPhase Phase(s) after which to dump the flowgraph. |
| 19635 | // Set to the short name of a phase to see the flowgraph after that phase. |
| 19636 | // Leave unset to dump after COLD-BLK (determine first cold block) or set to * for all |
| 19637 | // phases. |
| 19638 | // COMPlus_JitDumpFgDot Set to non-zero to emit Dot instead of Xml Flowgraph dump. (Default is xml format.) |
| 19639 | |
| 19640 | bool Compiler::fgDumpFlowGraph(Phases phase) |
| 19641 | { |
| 19642 | bool result = false; |
| 19643 | bool dontClose = false; |
| 19644 | bool createDotFile = false; |
| 19645 | if (JitConfig.JitDumpFgDot()) |
| 19646 | { |
| 19647 | createDotFile = true; |
| 19648 | } |
| 19649 | |
| 19650 | FILE* fgxFile = fgOpenFlowGraphFile(&dontClose, phase, createDotFile ? W("dot" ) : W("fgx" )); |
| 19651 | |
| 19652 | if (fgxFile == nullptr) |
| 19653 | { |
| 19654 | return false; |
| 19655 | } |
| 19656 | bool validWeights = fgHaveValidEdgeWeights; |
| 19657 | unsigned calledCount = max(fgCalledCount, BB_UNITY_WEIGHT) / BB_UNITY_WEIGHT; |
| 19658 | double weightDivisor = (double)(calledCount * BB_UNITY_WEIGHT); |
| 19659 | const char* escapedString; |
| 19660 | const char* regionString = "NONE" ; |
| 19661 | |
| 19662 | if (info.compMethodInfo->regionKind == CORINFO_REGION_HOT) |
| 19663 | { |
| 19664 | regionString = "HOT" ; |
| 19665 | } |
| 19666 | else if (info.compMethodInfo->regionKind == CORINFO_REGION_COLD) |
| 19667 | { |
| 19668 | regionString = "COLD" ; |
| 19669 | } |
| 19670 | else if (info.compMethodInfo->regionKind == CORINFO_REGION_JIT) |
| 19671 | { |
| 19672 | regionString = "JIT" ; |
| 19673 | } |
| 19674 | |
| 19675 | if (createDotFile) |
| 19676 | { |
| 19677 | fprintf(fgxFile, "digraph %s\n{\n" , info.compMethodName); |
| 19678 | fprintf(fgxFile, "/* Method %d, after phase %s */" , Compiler::jitTotalMethodCompiled, PhaseNames[phase]); |
| 19679 | } |
| 19680 | else |
| 19681 | { |
| 19682 | fprintf(fgxFile, "<method" ); |
| 19683 | |
| 19684 | escapedString = fgProcessEscapes(info.compFullName, s_EscapeMapping); |
| 19685 | fprintf(fgxFile, "\n name=\"%s\"" , escapedString); |
| 19686 | |
| 19687 | escapedString = fgProcessEscapes(info.compClassName, s_EscapeMapping); |
| 19688 | fprintf(fgxFile, "\n className=\"%s\"" , escapedString); |
| 19689 | |
| 19690 | escapedString = fgProcessEscapes(info.compMethodName, s_EscapeMapping); |
| 19691 | fprintf(fgxFile, "\n methodName=\"%s\"" , escapedString); |
| 19692 | fprintf(fgxFile, "\n ngenRegion=\"%s\"" , regionString); |
| 19693 | |
| 19694 | fprintf(fgxFile, "\n bytesOfIL=\"%d\"" , info.compILCodeSize); |
| 19695 | fprintf(fgxFile, "\n localVarCount=\"%d\"" , lvaCount); |
| 19696 | |
| 19697 | if (fgHaveProfileData()) |
| 19698 | { |
| 19699 | fprintf(fgxFile, "\n calledCount=\"%d\"" , calledCount); |
| 19700 | fprintf(fgxFile, "\n profileData=\"true\"" ); |
| 19701 | } |
| 19702 | if (compHndBBtabCount > 0) |
| 19703 | { |
| 19704 | fprintf(fgxFile, "\n hasEHRegions=\"true\"" ); |
| 19705 | } |
| 19706 | if (fgHasLoops) |
| 19707 | { |
| 19708 | fprintf(fgxFile, "\n hasLoops=\"true\"" ); |
| 19709 | } |
| 19710 | if (validWeights) |
| 19711 | { |
| 19712 | fprintf(fgxFile, "\n validEdgeWeights=\"true\"" ); |
| 19713 | if (!fgSlopUsedInEdgeWeights && !fgRangeUsedInEdgeWeights) |
| 19714 | { |
| 19715 | fprintf(fgxFile, "\n exactEdgeWeights=\"true\"" ); |
| 19716 | } |
| 19717 | } |
| 19718 | if (fgFirstColdBlock != nullptr) |
| 19719 | { |
| 19720 | fprintf(fgxFile, "\n firstColdBlock=\"%d\"" , fgFirstColdBlock->bbNum); |
| 19721 | } |
| 19722 | |
| 19723 | fprintf(fgxFile, ">" ); |
| 19724 | |
| 19725 | fprintf(fgxFile, "\n <blocks" ); |
| 19726 | fprintf(fgxFile, "\n blockCount=\"%d\"" , fgBBcount); |
| 19727 | fprintf(fgxFile, ">" ); |
| 19728 | } |
| 19729 | |
| 19730 | static const char* kindImage[] = {"EHFINALLYRET" , "EHFILTERRET" , "EHCATCHRET" , "THROW" , "RETURN" , "NONE" , |
| 19731 | "ALWAYS" , "LEAVE" , "CALLFINALLY" , "COND" , "SWITCH" }; |
| 19732 | |
| 19733 | BasicBlock* block; |
| 19734 | unsigned blockOrdinal; |
| 19735 | for (block = fgFirstBB, blockOrdinal = 1; block != nullptr; block = block->bbNext, blockOrdinal++) |
| 19736 | { |
| 19737 | if (createDotFile) |
| 19738 | { |
| 19739 | // Add constraint edges to try to keep nodes ordered. |
| 19740 | // It seems to work best if these edges are all created first. |
| 19741 | switch (block->bbJumpKind) |
| 19742 | { |
| 19743 | case BBJ_COND: |
| 19744 | case BBJ_NONE: |
| 19745 | assert(block->bbNext != nullptr); |
| 19746 | fprintf(fgxFile, " " FMT_BB " -> " FMT_BB "\n" , block->bbNum, block->bbNext->bbNum); |
| 19747 | break; |
| 19748 | default: |
| 19749 | // These may or may not have an edge to the next block. |
| 19750 | // Add a transparent edge to keep nodes ordered. |
| 19751 | if (block->bbNext != nullptr) |
| 19752 | { |
| 19753 | fprintf(fgxFile, " " FMT_BB " -> " FMT_BB " [arrowtail=none,color=transparent]\n" , |
| 19754 | block->bbNum, block->bbNext->bbNum); |
| 19755 | } |
| 19756 | } |
| 19757 | } |
| 19758 | else |
| 19759 | { |
| 19760 | fprintf(fgxFile, "\n <block" ); |
| 19761 | fprintf(fgxFile, "\n id=\"%d\"" , block->bbNum); |
| 19762 | fprintf(fgxFile, "\n ordinal=\"%d\"" , blockOrdinal); |
| 19763 | fprintf(fgxFile, "\n jumpKind=\"%s\"" , kindImage[block->bbJumpKind]); |
| 19764 | if (block->hasTryIndex()) |
| 19765 | { |
| 19766 | fprintf(fgxFile, "\n inTry=\"%s\"" , "true" ); |
| 19767 | } |
| 19768 | if (block->hasHndIndex()) |
| 19769 | { |
| 19770 | fprintf(fgxFile, "\n inHandler=\"%s\"" , "true" ); |
| 19771 | } |
| 19772 | if ((fgFirstBB->hasProfileWeight()) && ((block->bbFlags & BBF_COLD) == 0)) |
| 19773 | { |
| 19774 | fprintf(fgxFile, "\n hot=\"true\"" ); |
| 19775 | } |
| 19776 | if (block->bbFlags & (BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY)) |
| 19777 | { |
| 19778 | fprintf(fgxFile, "\n callsNew=\"true\"" ); |
| 19779 | } |
| 19780 | if (block->bbFlags & BBF_LOOP_HEAD) |
| 19781 | { |
| 19782 | fprintf(fgxFile, "\n loopHead=\"true\"" ); |
| 19783 | } |
| 19784 | fprintf(fgxFile, "\n weight=" ); |
| 19785 | fprintfDouble(fgxFile, ((double)block->bbWeight) / weightDivisor); |
| 19786 | fprintf(fgxFile, "\n codeEstimate=\"%d\"" , fgGetCodeEstimate(block)); |
| 19787 | fprintf(fgxFile, "\n startOffset=\"%d\"" , block->bbCodeOffs); |
| 19788 | fprintf(fgxFile, "\n endOffset=\"%d\"" , block->bbCodeOffsEnd); |
| 19789 | fprintf(fgxFile, ">" ); |
| 19790 | fprintf(fgxFile, "\n </block>" ); |
| 19791 | } |
| 19792 | } |
| 19793 | |
| 19794 | if (!createDotFile) |
| 19795 | { |
| 19796 | fprintf(fgxFile, "\n </blocks>" ); |
| 19797 | |
| 19798 | fprintf(fgxFile, "\n <edges" ); |
| 19799 | fprintf(fgxFile, "\n edgeCount=\"%d\"" , fgEdgeCount); |
| 19800 | fprintf(fgxFile, ">" ); |
| 19801 | } |
| 19802 | |
| 19803 | unsigned edgeNum = 1; |
| 19804 | BasicBlock* bTarget; |
| 19805 | for (bTarget = fgFirstBB; bTarget != nullptr; bTarget = bTarget->bbNext) |
| 19806 | { |
| 19807 | double targetWeightDivisor; |
| 19808 | if (bTarget->bbWeight == BB_ZERO_WEIGHT) |
| 19809 | { |
| 19810 | targetWeightDivisor = 1.0; |
| 19811 | } |
| 19812 | else |
| 19813 | { |
| 19814 | targetWeightDivisor = (double)bTarget->bbWeight; |
| 19815 | } |
| 19816 | |
| 19817 | flowList* edge; |
| 19818 | for (edge = bTarget->bbPreds; edge != nullptr; edge = edge->flNext, edgeNum++) |
| 19819 | { |
| 19820 | BasicBlock* bSource = edge->flBlock; |
| 19821 | double sourceWeightDivisor; |
| 19822 | if (bSource->bbWeight == BB_ZERO_WEIGHT) |
| 19823 | { |
| 19824 | sourceWeightDivisor = 1.0; |
| 19825 | } |
| 19826 | else |
| 19827 | { |
| 19828 | sourceWeightDivisor = (double)bSource->bbWeight; |
| 19829 | } |
| 19830 | if (createDotFile) |
| 19831 | { |
| 19832 | // Don't duplicate the edges we added above. |
| 19833 | if ((bSource->bbNum == (bTarget->bbNum - 1)) && |
| 19834 | ((bSource->bbJumpKind == BBJ_NONE) || (bSource->bbJumpKind == BBJ_COND))) |
| 19835 | { |
| 19836 | continue; |
| 19837 | } |
| 19838 | fprintf(fgxFile, " " FMT_BB " -> " FMT_BB, bSource->bbNum, bTarget->bbNum); |
| 19839 | if ((bSource->bbNum > bTarget->bbNum)) |
| 19840 | { |
| 19841 | fprintf(fgxFile, "[arrowhead=normal,arrowtail=none,color=green]\n" ); |
| 19842 | } |
| 19843 | else |
| 19844 | { |
| 19845 | fprintf(fgxFile, "\n" ); |
| 19846 | } |
| 19847 | } |
| 19848 | else |
| 19849 | { |
| 19850 | fprintf(fgxFile, "\n <edge" ); |
| 19851 | fprintf(fgxFile, "\n id=\"%d\"" , edgeNum); |
| 19852 | fprintf(fgxFile, "\n source=\"%d\"" , bSource->bbNum); |
| 19853 | fprintf(fgxFile, "\n target=\"%d\"" , bTarget->bbNum); |
| 19854 | if (bSource->bbJumpKind == BBJ_SWITCH) |
| 19855 | { |
| 19856 | if (edge->flDupCount >= 2) |
| 19857 | { |
| 19858 | fprintf(fgxFile, "\n switchCases=\"%d\"" , edge->flDupCount); |
| 19859 | } |
| 19860 | if (bSource->bbJumpSwt->getDefault() == bTarget) |
| 19861 | { |
| 19862 | fprintf(fgxFile, "\n switchDefault=\"true\"" ); |
| 19863 | } |
| 19864 | } |
| 19865 | if (validWeights) |
| 19866 | { |
| 19867 | unsigned edgeWeight = (edge->flEdgeWeightMin + edge->flEdgeWeightMax) / 2; |
| 19868 | fprintf(fgxFile, "\n weight=" ); |
| 19869 | fprintfDouble(fgxFile, ((double)edgeWeight) / weightDivisor); |
| 19870 | |
| 19871 | if (edge->flEdgeWeightMin != edge->flEdgeWeightMax) |
| 19872 | { |
| 19873 | fprintf(fgxFile, "\n minWeight=" ); |
| 19874 | fprintfDouble(fgxFile, ((double)edge->flEdgeWeightMin) / weightDivisor); |
| 19875 | fprintf(fgxFile, "\n maxWeight=" ); |
| 19876 | fprintfDouble(fgxFile, ((double)edge->flEdgeWeightMax) / weightDivisor); |
| 19877 | } |
| 19878 | |
| 19879 | if (edgeWeight > 0) |
| 19880 | { |
| 19881 | if (edgeWeight < bSource->bbWeight) |
| 19882 | { |
| 19883 | fprintf(fgxFile, "\n out=" ); |
| 19884 | fprintfDouble(fgxFile, ((double)edgeWeight) / sourceWeightDivisor); |
| 19885 | } |
| 19886 | if (edgeWeight < bTarget->bbWeight) |
| 19887 | { |
| 19888 | fprintf(fgxFile, "\n in=" ); |
| 19889 | fprintfDouble(fgxFile, ((double)edgeWeight) / targetWeightDivisor); |
| 19890 | } |
| 19891 | } |
| 19892 | } |
| 19893 | } |
| 19894 | if (!createDotFile) |
| 19895 | { |
| 19896 | fprintf(fgxFile, ">" ); |
| 19897 | fprintf(fgxFile, "\n </edge>" ); |
| 19898 | } |
| 19899 | } |
| 19900 | } |
| 19901 | if (createDotFile) |
| 19902 | { |
| 19903 | fprintf(fgxFile, "}\n" ); |
| 19904 | } |
| 19905 | else |
| 19906 | { |
| 19907 | fprintf(fgxFile, "\n </edges>" ); |
| 19908 | fprintf(fgxFile, "\n</method>\n" ); |
| 19909 | } |
| 19910 | |
| 19911 | if (dontClose) |
| 19912 | { |
| 19913 | // fgxFile is jitstdout or stderr |
| 19914 | fprintf(fgxFile, "\n" ); |
| 19915 | } |
| 19916 | else |
| 19917 | { |
| 19918 | fclose(fgxFile); |
| 19919 | } |
| 19920 | |
| 19921 | return result; |
| 19922 | } |
| 19923 | |
| 19924 | #endif // DUMP_FLOWGRAPHS |
| 19925 | |
| 19926 | /*****************************************************************************/ |
| 19927 | #ifdef DEBUG |
| 19928 | |
| 19929 | void Compiler::fgDispReach() |
| 19930 | { |
| 19931 | printf("------------------------------------------------\n" ); |
| 19932 | printf("BBnum Reachable by \n" ); |
| 19933 | printf("------------------------------------------------\n" ); |
| 19934 | |
| 19935 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 19936 | { |
| 19937 | printf(FMT_BB " : " , block->bbNum); |
| 19938 | BlockSetOps::Iter iter(this, block->bbReach); |
| 19939 | unsigned bbNum = 0; |
| 19940 | while (iter.NextElem(&bbNum)) |
| 19941 | { |
| 19942 | printf(FMT_BB " " , bbNum); |
| 19943 | } |
| 19944 | printf("\n" ); |
| 19945 | } |
| 19946 | } |
| 19947 | |
| 19948 | void Compiler::fgDispDoms() |
| 19949 | { |
| 19950 | // Don't bother printing this when we have a large number of BasicBlocks in the method |
| 19951 | if (fgBBcount > 256) |
| 19952 | { |
| 19953 | return; |
| 19954 | } |
| 19955 | |
| 19956 | printf("------------------------------------------------\n" ); |
| 19957 | printf("BBnum Dominated by\n" ); |
| 19958 | printf("------------------------------------------------\n" ); |
| 19959 | |
| 19960 | for (unsigned i = 1; i <= fgBBNumMax; ++i) |
| 19961 | { |
| 19962 | BasicBlock* current = fgBBInvPostOrder[i]; |
| 19963 | printf(FMT_BB ": " , current->bbNum); |
| 19964 | while (current != current->bbIDom) |
| 19965 | { |
| 19966 | printf(FMT_BB " " , current->bbNum); |
| 19967 | current = current->bbIDom; |
| 19968 | } |
| 19969 | printf("\n" ); |
| 19970 | } |
| 19971 | } |
| 19972 | |
| 19973 | /*****************************************************************************/ |
| 19974 | |
| 19975 | void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 */) |
| 19976 | { |
| 19977 | const unsigned __int64 flags = block->bbFlags; |
| 19978 | unsigned bbNumMax = compIsForInlining() ? impInlineInfo->InlinerCompiler->fgBBNumMax : fgBBNumMax; |
| 19979 | int maxBlockNumWidth = CountDigits(bbNumMax); |
| 19980 | maxBlockNumWidth = max(maxBlockNumWidth, 2); |
| 19981 | int blockNumWidth = CountDigits(block->bbNum); |
| 19982 | blockNumWidth = max(blockNumWidth, 2); |
| 19983 | int blockNumPadding = maxBlockNumWidth - blockNumWidth; |
| 19984 | |
| 19985 | printf("%s %2u" , block->dspToString(blockNumPadding), block->bbRefs); |
| 19986 | |
| 19987 | // |
| 19988 | // Display EH 'try' region index |
| 19989 | // |
| 19990 | |
| 19991 | if (block->hasTryIndex()) |
| 19992 | { |
| 19993 | printf(" %2u" , block->getTryIndex()); |
| 19994 | } |
| 19995 | else |
| 19996 | { |
| 19997 | printf(" " ); |
| 19998 | } |
| 19999 | |
| 20000 | // |
| 20001 | // Display EH handler region index |
| 20002 | // |
| 20003 | |
| 20004 | if (block->hasHndIndex()) |
| 20005 | { |
| 20006 | printf(" %2u" , block->getHndIndex()); |
| 20007 | } |
| 20008 | else |
| 20009 | { |
| 20010 | printf(" " ); |
| 20011 | } |
| 20012 | |
| 20013 | printf(" " ); |
| 20014 | |
| 20015 | // |
| 20016 | // Display block predecessor list |
| 20017 | // |
| 20018 | |
| 20019 | unsigned charCnt; |
| 20020 | if (fgCheapPredsValid) |
| 20021 | { |
| 20022 | charCnt = block->dspCheapPreds(); |
| 20023 | } |
| 20024 | else |
| 20025 | { |
| 20026 | charCnt = block->dspPreds(); |
| 20027 | } |
| 20028 | |
| 20029 | if (charCnt < 19) |
| 20030 | { |
| 20031 | printf("%*s" , 19 - charCnt, "" ); |
| 20032 | } |
| 20033 | |
| 20034 | printf(" " ); |
| 20035 | |
| 20036 | // |
| 20037 | // Display block weight |
| 20038 | // |
| 20039 | |
| 20040 | if (block->isMaxBBWeight()) |
| 20041 | { |
| 20042 | printf(" MAX " ); |
| 20043 | } |
| 20044 | else |
| 20045 | { |
| 20046 | BasicBlock::weight_t weight = block->getBBWeight(this); |
| 20047 | |
| 20048 | if (weight > 99999) // Is it going to be more than 6 characters? |
| 20049 | { |
| 20050 | if (weight <= 99999 * BB_UNITY_WEIGHT) |
| 20051 | { |
| 20052 | // print weight in this format ddddd. |
| 20053 | printf("%5u." , (weight + (BB_UNITY_WEIGHT / 2)) / BB_UNITY_WEIGHT); |
| 20054 | } |
| 20055 | else // print weight in terms of k (i.e. 156k ) |
| 20056 | { |
| 20057 | // print weight in this format dddddk |
| 20058 | BasicBlock::weight_t weightK = weight / 1000; |
| 20059 | printf("%5uk" , (weightK + (BB_UNITY_WEIGHT / 2)) / BB_UNITY_WEIGHT); |
| 20060 | } |
| 20061 | } |
| 20062 | else // print weight in this format ddd.dd |
| 20063 | { |
| 20064 | printf("%6s" , refCntWtd2str(weight)); |
| 20065 | } |
| 20066 | } |
| 20067 | printf(" " ); |
| 20068 | |
| 20069 | // |
| 20070 | // Display optional IBC weight column. |
| 20071 | // Note that iColWidth includes one character for a leading space, if there is an IBC column. |
| 20072 | // |
| 20073 | |
| 20074 | if (ibcColWidth > 0) |
| 20075 | { |
| 20076 | if (block->hasProfileWeight()) |
| 20077 | { |
| 20078 | printf("%*u" , ibcColWidth, block->bbWeight); |
| 20079 | } |
| 20080 | else |
| 20081 | { |
| 20082 | // No IBC data. Just print spaces to align the column. |
| 20083 | printf("%*s" , ibcColWidth, "" ); |
| 20084 | } |
| 20085 | } |
| 20086 | |
| 20087 | printf(" " ); |
| 20088 | |
| 20089 | // |
| 20090 | // Display block IL range |
| 20091 | // |
| 20092 | |
| 20093 | block->dspBlockILRange(); |
| 20094 | |
| 20095 | // |
| 20096 | // Display block branch target |
| 20097 | // |
| 20098 | |
| 20099 | if (flags & BBF_REMOVED) |
| 20100 | { |
| 20101 | printf("[removed] " ); |
| 20102 | } |
| 20103 | else |
| 20104 | { |
| 20105 | switch (block->bbJumpKind) |
| 20106 | { |
| 20107 | case BBJ_COND: |
| 20108 | printf("-> " FMT_BB "%*s ( cond )" , block->bbJumpDest->bbNum, |
| 20109 | maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), "" ); |
| 20110 | break; |
| 20111 | |
| 20112 | case BBJ_CALLFINALLY: |
| 20113 | printf("-> " FMT_BB "%*s (callf )" , block->bbJumpDest->bbNum, |
| 20114 | maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), "" ); |
| 20115 | break; |
| 20116 | |
| 20117 | case BBJ_ALWAYS: |
| 20118 | if (flags & BBF_KEEP_BBJ_ALWAYS) |
| 20119 | { |
| 20120 | printf("-> " FMT_BB "%*s (ALWAYS)" , block->bbJumpDest->bbNum, |
| 20121 | maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), "" ); |
| 20122 | } |
| 20123 | else |
| 20124 | { |
| 20125 | printf("-> " FMT_BB "%*s (always)" , block->bbJumpDest->bbNum, |
| 20126 | maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), "" ); |
| 20127 | } |
| 20128 | break; |
| 20129 | |
| 20130 | case BBJ_LEAVE: |
| 20131 | printf("-> " FMT_BB "%*s (leave )" , block->bbJumpDest->bbNum, |
| 20132 | maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), "" ); |
| 20133 | break; |
| 20134 | |
| 20135 | case BBJ_EHFINALLYRET: |
| 20136 | printf("%*s (finret)" , maxBlockNumWidth - 2, "" ); |
| 20137 | break; |
| 20138 | |
| 20139 | case BBJ_EHFILTERRET: |
| 20140 | printf("%*s (fltret)" , maxBlockNumWidth - 2, "" ); |
| 20141 | break; |
| 20142 | |
| 20143 | case BBJ_EHCATCHRET: |
| 20144 | printf("-> " FMT_BB "%*s ( cret )" , block->bbJumpDest->bbNum, |
| 20145 | maxBlockNumWidth - max(CountDigits(block->bbJumpDest->bbNum), 2), "" ); |
| 20146 | break; |
| 20147 | |
| 20148 | case BBJ_THROW: |
| 20149 | printf("%*s (throw )" , maxBlockNumWidth - 2, "" ); |
| 20150 | break; |
| 20151 | |
| 20152 | case BBJ_RETURN: |
| 20153 | printf("%*s (return)" , maxBlockNumWidth - 2, "" ); |
| 20154 | break; |
| 20155 | |
| 20156 | default: |
| 20157 | printf("%*s " , maxBlockNumWidth - 2, "" ); |
| 20158 | break; |
| 20159 | |
| 20160 | case BBJ_SWITCH: |
| 20161 | printf("->" ); |
| 20162 | |
| 20163 | unsigned jumpCnt; |
| 20164 | jumpCnt = block->bbJumpSwt->bbsCount; |
| 20165 | BasicBlock** jumpTab; |
| 20166 | jumpTab = block->bbJumpSwt->bbsDstTab; |
| 20167 | int switchWidth; |
| 20168 | switchWidth = 0; |
| 20169 | do |
| 20170 | { |
| 20171 | printf("%c" FMT_BB, (jumpTab == block->bbJumpSwt->bbsDstTab) ? ' ' : ',', (*jumpTab)->bbNum); |
| 20172 | switchWidth += 1 /* space/comma */ + 2 /* BB */ + max(CountDigits((*jumpTab)->bbNum), 2); |
| 20173 | } while (++jumpTab, --jumpCnt); |
| 20174 | |
| 20175 | if (switchWidth < 7) |
| 20176 | { |
| 20177 | printf("%*s" , 8 - switchWidth, "" ); |
| 20178 | } |
| 20179 | |
| 20180 | printf(" (switch)" ); |
| 20181 | break; |
| 20182 | } |
| 20183 | } |
| 20184 | |
| 20185 | printf(" " ); |
| 20186 | |
| 20187 | // |
| 20188 | // Display block EH region and type, including nesting indicator |
| 20189 | // |
| 20190 | |
| 20191 | if (block->hasTryIndex()) |
| 20192 | { |
| 20193 | printf("T%d " , block->getTryIndex()); |
| 20194 | } |
| 20195 | else |
| 20196 | { |
| 20197 | printf(" " ); |
| 20198 | } |
| 20199 | |
| 20200 | if (block->hasHndIndex()) |
| 20201 | { |
| 20202 | printf("H%d " , block->getHndIndex()); |
| 20203 | } |
| 20204 | else |
| 20205 | { |
| 20206 | printf(" " ); |
| 20207 | } |
| 20208 | |
| 20209 | if (flags & BBF_FUNCLET_BEG) |
| 20210 | { |
| 20211 | printf("F " ); |
| 20212 | } |
| 20213 | else |
| 20214 | { |
| 20215 | printf(" " ); |
| 20216 | } |
| 20217 | |
| 20218 | int cnt = 0; |
| 20219 | |
| 20220 | switch (block->bbCatchTyp) |
| 20221 | { |
| 20222 | case BBCT_NONE: |
| 20223 | break; |
| 20224 | case BBCT_FAULT: |
| 20225 | printf("fault " ); |
| 20226 | cnt += 6; |
| 20227 | break; |
| 20228 | case BBCT_FINALLY: |
| 20229 | printf("finally " ); |
| 20230 | cnt += 8; |
| 20231 | break; |
| 20232 | case BBCT_FILTER: |
| 20233 | printf("filter " ); |
| 20234 | cnt += 7; |
| 20235 | break; |
| 20236 | case BBCT_FILTER_HANDLER: |
| 20237 | printf("filtHnd " ); |
| 20238 | cnt += 8; |
| 20239 | break; |
| 20240 | default: |
| 20241 | printf("catch " ); |
| 20242 | cnt += 6; |
| 20243 | break; |
| 20244 | } |
| 20245 | |
| 20246 | if (block->bbCatchTyp != BBCT_NONE) |
| 20247 | { |
| 20248 | cnt += 2; |
| 20249 | printf("{ " ); |
| 20250 | /* brace matching editor workaround to compensate for the preceding line: } */ |
| 20251 | } |
| 20252 | |
| 20253 | if (flags & BBF_TRY_BEG) |
| 20254 | { |
| 20255 | // Output a brace for every try region that this block opens |
| 20256 | |
| 20257 | EHblkDsc* HBtab; |
| 20258 | EHblkDsc* HBtabEnd; |
| 20259 | |
| 20260 | for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++) |
| 20261 | { |
| 20262 | if (HBtab->ebdTryBeg == block) |
| 20263 | { |
| 20264 | cnt += 6; |
| 20265 | printf("try { " ); |
| 20266 | /* brace matching editor workaround to compensate for the preceding line: } */ |
| 20267 | } |
| 20268 | } |
| 20269 | } |
| 20270 | |
| 20271 | EHblkDsc* HBtab; |
| 20272 | EHblkDsc* HBtabEnd; |
| 20273 | |
| 20274 | for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++) |
| 20275 | { |
| 20276 | if (HBtab->ebdTryLast == block) |
| 20277 | { |
| 20278 | cnt += 2; |
| 20279 | /* brace matching editor workaround to compensate for the following line: { */ |
| 20280 | printf("} " ); |
| 20281 | } |
| 20282 | if (HBtab->ebdHndLast == block) |
| 20283 | { |
| 20284 | cnt += 2; |
| 20285 | /* brace matching editor workaround to compensate for the following line: { */ |
| 20286 | printf("} " ); |
| 20287 | } |
| 20288 | if (HBtab->HasFilter() && block->bbNext == HBtab->ebdHndBeg) |
| 20289 | { |
| 20290 | cnt += 2; |
| 20291 | /* brace matching editor workaround to compensate for the following line: { */ |
| 20292 | printf("} " ); |
| 20293 | } |
| 20294 | } |
| 20295 | |
| 20296 | while (cnt < 12) |
| 20297 | { |
| 20298 | cnt++; |
| 20299 | printf(" " ); |
| 20300 | } |
| 20301 | |
| 20302 | // |
| 20303 | // Display block flags |
| 20304 | // |
| 20305 | |
| 20306 | block->dspFlags(); |
| 20307 | |
| 20308 | printf("\n" ); |
| 20309 | } |
| 20310 | |
| 20311 | /**************************************************************************** |
| 20312 | Dump blocks from firstBlock to lastBlock. |
| 20313 | */ |
| 20314 | |
| 20315 | void Compiler::fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees) |
| 20316 | { |
| 20317 | BasicBlock* block; |
| 20318 | |
| 20319 | // If any block has IBC data, we add an "IBC weight" column just before the 'IL range' column. This column is as |
| 20320 | // wide as necessary to accommodate all the various IBC weights. It's at least 4 characters wide, to accommodate |
| 20321 | // the "IBC" title and leading space. |
| 20322 | int ibcColWidth = 0; |
| 20323 | for (block = firstBlock; block != nullptr; block = block->bbNext) |
| 20324 | { |
| 20325 | if (block->hasProfileWeight()) |
| 20326 | { |
| 20327 | int thisIbcWidth = CountDigits(block->bbWeight); |
| 20328 | ibcColWidth = max(ibcColWidth, thisIbcWidth); |
| 20329 | } |
| 20330 | |
| 20331 | if (block == lastBlock) |
| 20332 | { |
| 20333 | break; |
| 20334 | } |
| 20335 | } |
| 20336 | if (ibcColWidth > 0) |
| 20337 | { |
| 20338 | ibcColWidth = max(ibcColWidth, 3) + 1; // + 1 for the leading space |
| 20339 | } |
| 20340 | |
| 20341 | unsigned bbNumMax = compIsForInlining() ? impInlineInfo->InlinerCompiler->fgBBNumMax : fgBBNumMax; |
| 20342 | int maxBlockNumWidth = CountDigits(bbNumMax); |
| 20343 | maxBlockNumWidth = max(maxBlockNumWidth, 2); |
| 20344 | int padWidth = maxBlockNumWidth - 2; // Account for functions with a large number of blocks. |
| 20345 | |
| 20346 | // clang-format off |
| 20347 | |
| 20348 | printf("\n" ); |
| 20349 | printf("------%*s-------------------------------------%*s-----------------------%*s----------------------------------------\n" , |
| 20350 | padWidth, "------------" , |
| 20351 | ibcColWidth, "------------" , |
| 20352 | maxBlockNumWidth, "----" ); |
| 20353 | printf("BBnum %*sBBid ref try hnd %s weight %*s%s [IL range] [jump]%*s [EH region] [flags]\n" , |
| 20354 | padWidth, "" , |
| 20355 | fgCheapPredsValid ? "cheap preds" : |
| 20356 | (fgComputePredsDone ? "preds " |
| 20357 | : " " ), |
| 20358 | ((ibcColWidth > 0) ? ibcColWidth - 3 : 0), "" , // Subtract 3 for the width of "IBC", printed next. |
| 20359 | ((ibcColWidth > 0) ? "IBC" |
| 20360 | : "" ), |
| 20361 | maxBlockNumWidth, "" |
| 20362 | ); |
| 20363 | printf("------%*s-------------------------------------%*s-----------------------%*s----------------------------------------\n" , |
| 20364 | padWidth, "------------" , |
| 20365 | ibcColWidth, "------------" , |
| 20366 | maxBlockNumWidth, "----" ); |
| 20367 | |
| 20368 | // clang-format on |
| 20369 | |
| 20370 | for (block = firstBlock; block; block = block->bbNext) |
| 20371 | { |
| 20372 | // First, do some checking on the bbPrev links |
| 20373 | if (block->bbPrev) |
| 20374 | { |
| 20375 | if (block->bbPrev->bbNext != block) |
| 20376 | { |
| 20377 | printf("bad prev link\n" ); |
| 20378 | } |
| 20379 | } |
| 20380 | else if (block != fgFirstBB) |
| 20381 | { |
| 20382 | printf("bad prev link!\n" ); |
| 20383 | } |
| 20384 | |
| 20385 | if (block == fgFirstColdBlock) |
| 20386 | { |
| 20387 | printf("~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~%*s~~~~~~~~~~~~~~~~~~~~~~~~" |
| 20388 | "~~~~~~~~~~~~~~~~\n" , |
| 20389 | padWidth, "~~~~~~~~~~~~" , ibcColWidth, "~~~~~~~~~~~~" , maxBlockNumWidth, "~~~~" ); |
| 20390 | } |
| 20391 | |
| 20392 | #if FEATURE_EH_FUNCLETS |
| 20393 | if (block == fgFirstFuncletBB) |
| 20394 | { |
| 20395 | printf("++++++%*s+++++++++++++++++++++++++++++++++++++%*s+++++++++++++++++++++++%*s++++++++++++++++++++++++" |
| 20396 | "++++++++++++++++ funclets follow\n" , |
| 20397 | padWidth, "++++++++++++" , ibcColWidth, "++++++++++++" , maxBlockNumWidth, "++++" ); |
| 20398 | } |
| 20399 | #endif // FEATURE_EH_FUNCLETS |
| 20400 | |
| 20401 | fgTableDispBasicBlock(block, ibcColWidth); |
| 20402 | |
| 20403 | if (block == lastBlock) |
| 20404 | { |
| 20405 | break; |
| 20406 | } |
| 20407 | } |
| 20408 | |
| 20409 | printf("------%*s-------------------------------------%*s-----------------------%*s--------------------------------" |
| 20410 | "--------\n" , |
| 20411 | padWidth, "------------" , ibcColWidth, "------------" , maxBlockNumWidth, "----" ); |
| 20412 | |
| 20413 | if (dumpTrees) |
| 20414 | { |
| 20415 | fgDumpTrees(firstBlock, lastBlock); |
| 20416 | } |
| 20417 | } |
| 20418 | |
| 20419 | /*****************************************************************************/ |
| 20420 | |
| 20421 | void Compiler::fgDispBasicBlocks(bool dumpTrees) |
| 20422 | { |
| 20423 | fgDispBasicBlocks(fgFirstBB, nullptr, dumpTrees); |
| 20424 | } |
| 20425 | |
| 20426 | /*****************************************************************************/ |
| 20427 | // Increment the stmtNum and dump the tree using gtDispTree |
| 20428 | // |
| 20429 | void Compiler::fgDumpStmtTree(GenTree* stmt, unsigned bbNum) |
| 20430 | { |
| 20431 | compCurStmtNum++; // Increment the current stmtNum |
| 20432 | |
| 20433 | printf("\n***** " FMT_BB ", stmt %d\n" , bbNum, compCurStmtNum); |
| 20434 | |
| 20435 | if (fgOrder == FGOrderLinear || opts.compDbgInfo) |
| 20436 | { |
| 20437 | gtDispTree(stmt); |
| 20438 | } |
| 20439 | else |
| 20440 | { |
| 20441 | gtDispTree(stmt->gtStmt.gtStmtExpr); |
| 20442 | } |
| 20443 | } |
| 20444 | |
| 20445 | //------------------------------------------------------------------------ |
| 20446 | // Compiler::fgDumpBlock: dumps the contents of the given block to stdout. |
| 20447 | // |
| 20448 | // Arguments: |
| 20449 | // block - The block to dump. |
| 20450 | // |
| 20451 | void Compiler::fgDumpBlock(BasicBlock* block) |
| 20452 | { |
| 20453 | printf("\n------------ " ); |
| 20454 | block->dspBlockHeader(this); |
| 20455 | |
| 20456 | if (!block->IsLIR()) |
| 20457 | { |
| 20458 | for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; stmt = stmt->gtNextStmt) |
| 20459 | { |
| 20460 | fgDumpStmtTree(stmt, block->bbNum); |
| 20461 | if (stmt == block->bbTreeList) |
| 20462 | { |
| 20463 | block->bbStmtNum = compCurStmtNum; // Set the block->bbStmtNum |
| 20464 | } |
| 20465 | } |
| 20466 | } |
| 20467 | else |
| 20468 | { |
| 20469 | gtDispRange(LIR::AsRange(block)); |
| 20470 | } |
| 20471 | } |
| 20472 | |
| 20473 | /*****************************************************************************/ |
| 20474 | // Walk the BasicBlock list calling fgDumpTree once per Stmt |
| 20475 | // |
| 20476 | void Compiler::fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock) |
| 20477 | { |
| 20478 | compCurStmtNum = 0; // Reset the current stmtNum |
| 20479 | |
| 20480 | /* Walk the basic blocks */ |
| 20481 | |
| 20482 | // Note that typically we have already called fgDispBasicBlocks() |
| 20483 | // so we don't need to print the preds and succs again here |
| 20484 | // |
| 20485 | for (BasicBlock* block = firstBlock; block; block = block->bbNext) |
| 20486 | { |
| 20487 | fgDumpBlock(block); |
| 20488 | |
| 20489 | if (block == lastBlock) |
| 20490 | { |
| 20491 | break; |
| 20492 | } |
| 20493 | } |
| 20494 | printf("\n---------------------------------------------------------------------------------------------------------" |
| 20495 | "----------\n" ); |
| 20496 | } |
| 20497 | |
| 20498 | /***************************************************************************** |
| 20499 | * Try to create as many candidates for GTF_MUL_64RSLT as possible. |
| 20500 | * We convert 'intOp1*intOp2' into 'int(long(nop(intOp1))*long(intOp2))'. |
| 20501 | */ |
| 20502 | |
| 20503 | /* static */ |
| 20504 | Compiler::fgWalkResult Compiler::fgStress64RsltMulCB(GenTree** pTree, fgWalkData* data) |
| 20505 | { |
| 20506 | GenTree* tree = *pTree; |
| 20507 | Compiler* pComp = data->compiler; |
| 20508 | |
| 20509 | if (tree->gtOper != GT_MUL || tree->gtType != TYP_INT || (tree->gtOverflow())) |
| 20510 | { |
| 20511 | return WALK_CONTINUE; |
| 20512 | } |
| 20513 | |
| 20514 | #ifdef DEBUG |
| 20515 | if (pComp->verbose) |
| 20516 | { |
| 20517 | printf("STRESS_64RSLT_MUL before:\n" ); |
| 20518 | pComp->gtDispTree(tree); |
| 20519 | } |
| 20520 | #endif // DEBUG |
| 20521 | |
| 20522 | // To ensure optNarrowTree() doesn't fold back to the original tree. |
| 20523 | tree->gtOp.gtOp1 = pComp->gtNewCastNode(TYP_LONG, tree->gtOp.gtOp1, false, TYP_LONG); |
| 20524 | tree->gtOp.gtOp1 = pComp->gtNewOperNode(GT_NOP, TYP_LONG, tree->gtOp.gtOp1); |
| 20525 | tree->gtOp.gtOp1 = pComp->gtNewCastNode(TYP_LONG, tree->gtOp.gtOp1, false, TYP_LONG); |
| 20526 | tree->gtOp.gtOp2 = pComp->gtNewCastNode(TYP_LONG, tree->gtOp.gtOp2, false, TYP_LONG); |
| 20527 | tree->gtType = TYP_LONG; |
| 20528 | *pTree = pComp->gtNewCastNode(TYP_INT, tree, false, TYP_INT); |
| 20529 | |
| 20530 | #ifdef DEBUG |
| 20531 | if (pComp->verbose) |
| 20532 | { |
| 20533 | printf("STRESS_64RSLT_MUL after:\n" ); |
| 20534 | pComp->gtDispTree(*pTree); |
| 20535 | } |
| 20536 | #endif // DEBUG |
| 20537 | |
| 20538 | return WALK_SKIP_SUBTREES; |
| 20539 | } |
| 20540 | |
| 20541 | void Compiler::fgStress64RsltMul() |
| 20542 | { |
| 20543 | if (!compStressCompile(STRESS_64RSLT_MUL, 20)) |
| 20544 | { |
| 20545 | return; |
| 20546 | } |
| 20547 | |
| 20548 | fgWalkAllTreesPre(fgStress64RsltMulCB, (void*)this); |
| 20549 | } |
| 20550 | |
| 20551 | // BBPredsChecker checks jumps from the block's predecessors to the block. |
| 20552 | class BBPredsChecker |
| 20553 | { |
| 20554 | public: |
| 20555 | BBPredsChecker(Compiler* compiler) : comp(compiler) |
| 20556 | { |
| 20557 | } |
| 20558 | |
| 20559 | unsigned CheckBBPreds(BasicBlock* block, unsigned curTraversalStamp); |
| 20560 | |
| 20561 | private: |
| 20562 | bool CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehTryDsc); |
| 20563 | bool CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehHndlDsc); |
| 20564 | bool CheckJump(BasicBlock* blockPred, BasicBlock* block); |
| 20565 | bool CheckEHFinalyRet(BasicBlock* blockPred, BasicBlock* block); |
| 20566 | |
| 20567 | private: |
| 20568 | Compiler* comp; |
| 20569 | }; |
| 20570 | |
| 20571 | //------------------------------------------------------------------------ |
| 20572 | // CheckBBPreds: Check basic block predecessors list. |
| 20573 | // |
| 20574 | // Notes: |
| 20575 | // This DEBUG routine checks that all predecessors have the correct traversal stamp |
| 20576 | // and have correct jumps to the block. |
| 20577 | // It calculates the number of incoming edges from the internal block, |
| 20578 | // i.e. it does not count the global incoming edge for the first block. |
| 20579 | // |
| 20580 | // Arguments: |
| 20581 | // block - the block to process; |
| 20582 | // curTraversalStamp - current traversal stamp to distinguish different iterations. |
| 20583 | // |
| 20584 | // Return value: |
| 20585 | // the number of incoming edges for the block. |
| 20586 | unsigned BBPredsChecker::CheckBBPreds(BasicBlock* block, unsigned curTraversalStamp) |
| 20587 | { |
| 20588 | if (comp->fgCheapPredsValid) |
| 20589 | { |
| 20590 | return 0; |
| 20591 | } |
| 20592 | |
| 20593 | if (!comp->fgComputePredsDone) |
| 20594 | { |
| 20595 | assert(block->bbPreds == nullptr); |
| 20596 | return 0; |
| 20597 | } |
| 20598 | |
| 20599 | unsigned blockRefs = 0; |
| 20600 | for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext) |
| 20601 | { |
| 20602 | blockRefs += pred->flDupCount; |
| 20603 | |
| 20604 | BasicBlock* blockPred = pred->flBlock; |
| 20605 | |
| 20606 | // Make sure this pred is part of the BB list. |
| 20607 | assert(blockPred->bbTraversalStamp == curTraversalStamp); |
| 20608 | |
| 20609 | EHblkDsc* ehTryDsc = comp->ehGetBlockTryDsc(block); |
| 20610 | if (ehTryDsc != nullptr) |
| 20611 | { |
| 20612 | assert(CheckEhTryDsc(block, blockPred, ehTryDsc)); |
| 20613 | } |
| 20614 | |
| 20615 | EHblkDsc* ehHndDsc = comp->ehGetBlockHndDsc(block); |
| 20616 | if (ehHndDsc != nullptr) |
| 20617 | { |
| 20618 | assert(CheckEhHndDsc(block, blockPred, ehHndDsc)); |
| 20619 | } |
| 20620 | |
| 20621 | assert(CheckJump(blockPred, block)); |
| 20622 | } |
| 20623 | return blockRefs; |
| 20624 | } |
| 20625 | |
| 20626 | bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehTryDsc) |
| 20627 | { |
| 20628 | // You can jump to the start of a try |
| 20629 | if (ehTryDsc->ebdTryBeg == block) |
| 20630 | { |
| 20631 | return true; |
| 20632 | } |
| 20633 | |
| 20634 | // You can jump within the same try region |
| 20635 | if (comp->bbInTryRegions(block->getTryIndex(), blockPred)) |
| 20636 | { |
| 20637 | return true; |
| 20638 | } |
| 20639 | |
| 20640 | // The catch block can jump back into the middle of the try |
| 20641 | if (comp->bbInCatchHandlerRegions(block, blockPred)) |
| 20642 | { |
| 20643 | return true; |
| 20644 | } |
| 20645 | |
| 20646 | // The end of a finally region is a BBJ_EHFINALLYRET block (during importing, BBJ_LEAVE) which |
| 20647 | // is marked as "returning" to the BBJ_ALWAYS block following the BBJ_CALLFINALLY |
| 20648 | // block that does a local call to the finally. This BBJ_ALWAYS is within |
| 20649 | // the try region protected by the finally (for x86, ARM), but that's ok. |
| 20650 | BasicBlock* prevBlock = block->bbPrev; |
| 20651 | if (prevBlock->bbJumpKind == BBJ_CALLFINALLY && block->bbJumpKind == BBJ_ALWAYS && |
| 20652 | blockPred->bbJumpKind == BBJ_EHFINALLYRET) |
| 20653 | { |
| 20654 | return true; |
| 20655 | } |
| 20656 | |
| 20657 | printf("Jump into the middle of try region: " FMT_BB " branches to " FMT_BB "\n" , blockPred->bbNum, block->bbNum); |
| 20658 | assert(!"Jump into middle of try region" ); |
| 20659 | return false; |
| 20660 | } |
| 20661 | |
| 20662 | bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHblkDsc* ehHndlDsc) |
| 20663 | { |
| 20664 | // You can do a BBJ_EHFINALLYRET or BBJ_EHFILTERRET into a handler region |
| 20665 | if ((blockPred->bbJumpKind == BBJ_EHFINALLYRET) || (blockPred->bbJumpKind == BBJ_EHFILTERRET)) |
| 20666 | { |
| 20667 | return true; |
| 20668 | } |
| 20669 | |
| 20670 | // Our try block can call our finally block |
| 20671 | if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->bbJumpKind == BBJ_CALLFINALLY) && |
| 20672 | comp->ehCallFinallyInCorrectRegion(blockPred, block->getHndIndex())) |
| 20673 | { |
| 20674 | return true; |
| 20675 | } |
| 20676 | |
| 20677 | // You can jump within the same handler region |
| 20678 | if (comp->bbInHandlerRegions(block->getHndIndex(), blockPred)) |
| 20679 | { |
| 20680 | return true; |
| 20681 | } |
| 20682 | |
| 20683 | // A filter can jump to the start of the filter handler |
| 20684 | if (ehHndlDsc->HasFilter()) |
| 20685 | { |
| 20686 | return true; |
| 20687 | } |
| 20688 | |
| 20689 | printf("Jump into the middle of handler region: " FMT_BB " branches to " FMT_BB "\n" , blockPred->bbNum, |
| 20690 | block->bbNum); |
| 20691 | assert(!"Jump into the middle of handler region" ); |
| 20692 | return false; |
| 20693 | } |
| 20694 | |
| 20695 | bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) |
| 20696 | { |
| 20697 | switch (blockPred->bbJumpKind) |
| 20698 | { |
| 20699 | case BBJ_COND: |
| 20700 | assert(blockPred->bbNext == block || blockPred->bbJumpDest == block); |
| 20701 | return true; |
| 20702 | |
| 20703 | case BBJ_NONE: |
| 20704 | assert(blockPred->bbNext == block); |
| 20705 | return true; |
| 20706 | |
| 20707 | case BBJ_CALLFINALLY: |
| 20708 | case BBJ_ALWAYS: |
| 20709 | case BBJ_EHCATCHRET: |
| 20710 | case BBJ_EHFILTERRET: |
| 20711 | assert(blockPred->bbJumpDest == block); |
| 20712 | return true; |
| 20713 | |
| 20714 | case BBJ_EHFINALLYRET: |
| 20715 | assert(CheckEHFinalyRet(blockPred, block)); |
| 20716 | return true; |
| 20717 | |
| 20718 | case BBJ_THROW: |
| 20719 | case BBJ_RETURN: |
| 20720 | assert(!"THROW and RETURN block cannot be in the predecessor list!" ); |
| 20721 | break; |
| 20722 | |
| 20723 | case BBJ_SWITCH: |
| 20724 | { |
| 20725 | unsigned jumpCnt = blockPred->bbJumpSwt->bbsCount; |
| 20726 | |
| 20727 | for (unsigned i = 0; i < jumpCnt; ++i) |
| 20728 | { |
| 20729 | BasicBlock* jumpTab = blockPred->bbJumpSwt->bbsDstTab[i]; |
| 20730 | assert(jumpTab != nullptr); |
| 20731 | if (block == jumpTab) |
| 20732 | { |
| 20733 | return true; |
| 20734 | } |
| 20735 | } |
| 20736 | |
| 20737 | assert(!"SWITCH in the predecessor list with no jump label to BLOCK!" ); |
| 20738 | } |
| 20739 | break; |
| 20740 | |
| 20741 | default: |
| 20742 | assert(!"Unexpected bbJumpKind" ); |
| 20743 | break; |
| 20744 | } |
| 20745 | return false; |
| 20746 | } |
| 20747 | |
| 20748 | bool BBPredsChecker::CheckEHFinalyRet(BasicBlock* blockPred, BasicBlock* block) |
| 20749 | { |
| 20750 | |
| 20751 | // If the current block is a successor to a BBJ_EHFINALLYRET (return from finally), |
| 20752 | // then the lexically previous block should be a call to the same finally. |
| 20753 | // Verify all of that. |
| 20754 | |
| 20755 | unsigned hndIndex = blockPred->getHndIndex(); |
| 20756 | EHblkDsc* ehDsc = comp->ehGetDsc(hndIndex); |
| 20757 | BasicBlock* finBeg = ehDsc->ebdHndBeg; |
| 20758 | |
| 20759 | // Because there is no bbPrev, we have to search for the lexically previous |
| 20760 | // block. We can shorten the search by only looking in places where it is legal |
| 20761 | // to have a call to the finally. |
| 20762 | |
| 20763 | BasicBlock* begBlk; |
| 20764 | BasicBlock* endBlk; |
| 20765 | comp->ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); |
| 20766 | |
| 20767 | for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) |
| 20768 | { |
| 20769 | if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) |
| 20770 | { |
| 20771 | continue; |
| 20772 | } |
| 20773 | |
| 20774 | if (block == bcall->bbNext) |
| 20775 | { |
| 20776 | return true; |
| 20777 | } |
| 20778 | } |
| 20779 | |
| 20780 | #if FEATURE_EH_FUNCLETS |
| 20781 | |
| 20782 | if (comp->fgFuncletsCreated) |
| 20783 | { |
| 20784 | // There is no easy way to search just the funclets that were pulled out of |
| 20785 | // the corresponding try body, so instead we search all the funclets, and if |
| 20786 | // we find a potential 'hit' we check if the funclet we're looking at is |
| 20787 | // from the correct try region. |
| 20788 | |
| 20789 | for (BasicBlock* bcall = comp->fgFirstFuncletBB; bcall != nullptr; bcall = bcall->bbNext) |
| 20790 | { |
| 20791 | if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) |
| 20792 | { |
| 20793 | continue; |
| 20794 | } |
| 20795 | |
| 20796 | if (block != bcall->bbNext) |
| 20797 | { |
| 20798 | continue; |
| 20799 | } |
| 20800 | |
| 20801 | if (comp->ehCallFinallyInCorrectRegion(bcall, hndIndex)) |
| 20802 | { |
| 20803 | return true; |
| 20804 | } |
| 20805 | } |
| 20806 | } |
| 20807 | |
| 20808 | #endif // FEATURE_EH_FUNCLETS |
| 20809 | |
| 20810 | assert(!"BBJ_EHFINALLYRET predecessor of block that doesn't follow a BBJ_CALLFINALLY!" ); |
| 20811 | return false; |
| 20812 | } |
| 20813 | |
| 20814 | // This variable is used to generate "traversal labels": one-time constants with which |
| 20815 | // we label basic blocks that are members of the basic block list, in order to have a |
| 20816 | // fast, high-probability test for membership in that list. Type is "volatile" because |
| 20817 | // it's incremented with an atomic operation, which wants a volatile type; "long" so that |
| 20818 | // wrap-around to 0 (which I think has the highest probability of accidental collision) is |
| 20819 | // postponed a *long* time. |
| 20820 | static volatile int bbTraverseLabel = 1; |
| 20821 | |
| 20822 | /***************************************************************************** |
| 20823 | * |
| 20824 | * A DEBUG routine to check the consistency of the flowgraph, |
| 20825 | * i.e. bbNum, bbRefs, bbPreds have to be up to date. |
| 20826 | * |
| 20827 | *****************************************************************************/ |
| 20828 | |
| 20829 | void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRefs /* = true */) |
| 20830 | { |
| 20831 | #ifdef DEBUG |
| 20832 | if (verbose) |
| 20833 | { |
| 20834 | printf("*************** In fgDebugCheckBBlist\n" ); |
| 20835 | } |
| 20836 | #endif // DEBUG |
| 20837 | |
| 20838 | fgDebugCheckBlockLinks(); |
| 20839 | |
| 20840 | if (fgBBcount > 10000 && expensiveDebugCheckLevel < 1) |
| 20841 | { |
| 20842 | // The basic block checks are too expensive if there are too many blocks, |
| 20843 | // so give up unless we've been told to try hard. |
| 20844 | return; |
| 20845 | } |
| 20846 | |
| 20847 | DWORD startTickCount = GetTickCount(); |
| 20848 | |
| 20849 | #if FEATURE_EH_FUNCLETS |
| 20850 | bool reachedFirstFunclet = false; |
| 20851 | if (fgFuncletsCreated) |
| 20852 | { |
| 20853 | // |
| 20854 | // Make sure that fgFirstFuncletBB is accurate. |
| 20855 | // It should be the first basic block in a handler region. |
| 20856 | // |
| 20857 | if (fgFirstFuncletBB != nullptr) |
| 20858 | { |
| 20859 | assert(fgFirstFuncletBB->hasHndIndex() == true); |
| 20860 | assert(fgFirstFuncletBB->bbFlags & BBF_FUNCLET_BEG); |
| 20861 | } |
| 20862 | } |
| 20863 | #endif // FEATURE_EH_FUNCLETS |
| 20864 | |
| 20865 | /* Check bbNum, bbRefs and bbPreds */ |
| 20866 | // First, pick a traversal stamp, and label all the blocks with it. |
| 20867 | unsigned curTraversalStamp = unsigned(InterlockedIncrement((LONG*)&bbTraverseLabel)); |
| 20868 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 20869 | { |
| 20870 | block->bbTraversalStamp = curTraversalStamp; |
| 20871 | } |
| 20872 | |
| 20873 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 20874 | { |
| 20875 | if (checkBBNum) |
| 20876 | { |
| 20877 | // Check that bbNum is sequential |
| 20878 | assert(block->bbNext == nullptr || (block->bbNum + 1 == block->bbNext->bbNum)); |
| 20879 | } |
| 20880 | |
| 20881 | // If the block is a BBJ_COND, a BBJ_SWITCH or a |
| 20882 | // lowered GT_SWITCH_TABLE node then make sure it |
| 20883 | // ends with a conditional jump or a GT_SWITCH |
| 20884 | |
| 20885 | if (block->bbJumpKind == BBJ_COND) |
| 20886 | { |
| 20887 | assert(block->lastNode()->gtNext == nullptr && block->lastNode()->OperIsConditionalJump()); |
| 20888 | } |
| 20889 | else if (block->bbJumpKind == BBJ_SWITCH) |
| 20890 | { |
| 20891 | assert(block->lastNode()->gtNext == nullptr && |
| 20892 | (block->lastNode()->gtOper == GT_SWITCH || block->lastNode()->gtOper == GT_SWITCH_TABLE)); |
| 20893 | } |
| 20894 | else if (!(block->bbJumpKind == BBJ_ALWAYS || block->bbJumpKind == BBJ_RETURN)) |
| 20895 | { |
| 20896 | // this block cannot have a poll |
| 20897 | assert(!(block->bbFlags & BBF_NEEDS_GCPOLL)); |
| 20898 | } |
| 20899 | |
| 20900 | if (block->bbCatchTyp == BBCT_FILTER) |
| 20901 | { |
| 20902 | if (!fgCheapPredsValid) // Don't check cheap preds |
| 20903 | { |
| 20904 | // A filter has no predecessors |
| 20905 | assert(block->bbPreds == nullptr); |
| 20906 | } |
| 20907 | } |
| 20908 | |
| 20909 | #if FEATURE_EH_FUNCLETS |
| 20910 | if (fgFuncletsCreated) |
| 20911 | { |
| 20912 | // |
| 20913 | // There should be no handler blocks until |
| 20914 | // we get to the fgFirstFuncletBB block, |
| 20915 | // then every block should be a handler block |
| 20916 | // |
| 20917 | if (!reachedFirstFunclet) |
| 20918 | { |
| 20919 | if (block == fgFirstFuncletBB) |
| 20920 | { |
| 20921 | assert(block->hasHndIndex() == true); |
| 20922 | reachedFirstFunclet = true; |
| 20923 | } |
| 20924 | else |
| 20925 | { |
| 20926 | assert(block->hasHndIndex() == false); |
| 20927 | } |
| 20928 | } |
| 20929 | else // reachedFirstFunclet |
| 20930 | { |
| 20931 | assert(block->hasHndIndex() == true); |
| 20932 | } |
| 20933 | } |
| 20934 | #endif // FEATURE_EH_FUNCLETS |
| 20935 | |
| 20936 | if (checkBBRefs) |
| 20937 | { |
| 20938 | assert(fgComputePredsDone); |
| 20939 | } |
| 20940 | |
| 20941 | BBPredsChecker checker(this); |
| 20942 | unsigned blockRefs = checker.CheckBBPreds(block, curTraversalStamp); |
| 20943 | |
| 20944 | // First basic block has an additional global incoming edge. |
| 20945 | if (block == fgFirstBB) |
| 20946 | { |
| 20947 | blockRefs += 1; |
| 20948 | } |
| 20949 | |
| 20950 | /* Check the bbRefs */ |
| 20951 | if (checkBBRefs) |
| 20952 | { |
| 20953 | if (block->bbRefs != blockRefs) |
| 20954 | { |
| 20955 | // Check to see if this block is the beginning of a filter or a handler and adjust the ref count |
| 20956 | // appropriately. |
| 20957 | for (EHblkDsc *HBtab = compHndBBtab, *HBtabEnd = &compHndBBtab[compHndBBtabCount]; HBtab != HBtabEnd; |
| 20958 | HBtab++) |
| 20959 | { |
| 20960 | if (HBtab->ebdHndBeg == block) |
| 20961 | { |
| 20962 | blockRefs++; |
| 20963 | } |
| 20964 | if (HBtab->HasFilter() && (HBtab->ebdFilter == block)) |
| 20965 | { |
| 20966 | blockRefs++; |
| 20967 | } |
| 20968 | } |
| 20969 | } |
| 20970 | |
| 20971 | assert(block->bbRefs == blockRefs); |
| 20972 | } |
| 20973 | |
| 20974 | /* Check that BBF_HAS_HANDLER is valid bbTryIndex */ |
| 20975 | if (block->hasTryIndex()) |
| 20976 | { |
| 20977 | assert(block->getTryIndex() < compHndBBtabCount); |
| 20978 | } |
| 20979 | |
| 20980 | /* Check if BBF_RUN_RARELY is set that we have bbWeight of zero */ |
| 20981 | if (block->isRunRarely()) |
| 20982 | { |
| 20983 | assert(block->bbWeight == BB_ZERO_WEIGHT); |
| 20984 | } |
| 20985 | else |
| 20986 | { |
| 20987 | assert(block->bbWeight > BB_ZERO_WEIGHT); |
| 20988 | } |
| 20989 | } |
| 20990 | |
| 20991 | // Make sure the one return BB is not changed. |
| 20992 | if (genReturnBB != nullptr) |
| 20993 | { |
| 20994 | assert(genReturnBB->bbTreeList); |
| 20995 | assert(genReturnBB->IsLIR() || genReturnBB->bbTreeList->gtOper == GT_STMT); |
| 20996 | assert(genReturnBB->IsLIR() || genReturnBB->bbTreeList->gtType == TYP_VOID); |
| 20997 | } |
| 20998 | |
| 20999 | // The general encoder/decoder (currently) only reports "this" as a generics context as a stack location, |
| 21000 | // so we mark info.compThisArg as lvAddrTaken to ensure that it is not enregistered. Otherwise, it should |
| 21001 | // not be address-taken. This variable determines if the address-taken-ness of "thisArg" is "OK". |
| 21002 | bool copiedForGenericsCtxt; |
| 21003 | #ifndef JIT32_GCENCODER |
| 21004 | copiedForGenericsCtxt = ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0); |
| 21005 | #else // JIT32_GCENCODER |
| 21006 | copiedForGenericsCtxt = FALSE; |
| 21007 | #endif // JIT32_GCENCODER |
| 21008 | |
| 21009 | // This if only in support of the noway_asserts it contains. |
| 21010 | if (info.compIsStatic) |
| 21011 | { |
| 21012 | // For static method, should have never grabbed the temp. |
| 21013 | assert(lvaArg0Var == BAD_VAR_NUM); |
| 21014 | } |
| 21015 | else |
| 21016 | { |
| 21017 | // For instance method: |
| 21018 | assert(info.compThisArg != BAD_VAR_NUM); |
| 21019 | bool compThisArgAddrExposedOK = !lvaTable[info.compThisArg].lvAddrExposed; |
| 21020 | |
| 21021 | #ifndef JIT32_GCENCODER |
| 21022 | compThisArgAddrExposedOK = compThisArgAddrExposedOK || copiedForGenericsCtxt; |
| 21023 | #endif // !JIT32_GCENCODER |
| 21024 | |
| 21025 | // Should never expose the address of arg 0 or write to arg 0. |
| 21026 | // In addition, lvArg0Var should remain 0 if arg0 is not |
| 21027 | // written to or address-exposed. |
| 21028 | assert(compThisArgAddrExposedOK && !lvaTable[info.compThisArg].lvHasILStoreOp && |
| 21029 | (lvaArg0Var == info.compThisArg || |
| 21030 | lvaArg0Var != info.compThisArg && (lvaTable[lvaArg0Var].lvAddrExposed || |
| 21031 | lvaTable[lvaArg0Var].lvHasILStoreOp || copiedForGenericsCtxt))); |
| 21032 | } |
| 21033 | } |
| 21034 | |
| 21035 | /***************************************************************************** |
| 21036 | * |
| 21037 | * A DEBUG routine to check the that the exception flags are correctly set. |
| 21038 | * |
| 21039 | ****************************************************************************/ |
| 21040 | |
| 21041 | void Compiler::fgDebugCheckFlags(GenTree* tree) |
| 21042 | { |
| 21043 | noway_assert(tree->gtOper != GT_STMT); |
| 21044 | |
| 21045 | const genTreeOps oper = tree->OperGet(); |
| 21046 | const unsigned kind = tree->OperKind(); |
| 21047 | unsigned treeFlags = tree->gtFlags & GTF_ALL_EFFECT; |
| 21048 | unsigned chkFlags = 0; |
| 21049 | |
| 21050 | if (tree->OperMayThrow(this)) |
| 21051 | { |
| 21052 | chkFlags |= GTF_EXCEPT; |
| 21053 | } |
| 21054 | |
| 21055 | if (tree->OperRequiresCallFlag(this)) |
| 21056 | { |
| 21057 | chkFlags |= GTF_CALL; |
| 21058 | } |
| 21059 | |
| 21060 | /* Is this a leaf node? */ |
| 21061 | |
| 21062 | if (kind & GTK_LEAF) |
| 21063 | { |
| 21064 | switch (oper) |
| 21065 | { |
| 21066 | case GT_CLS_VAR: |
| 21067 | chkFlags |= GTF_GLOB_REF; |
| 21068 | break; |
| 21069 | |
| 21070 | case GT_CATCH_ARG: |
| 21071 | chkFlags |= GTF_ORDER_SIDEEFF; |
| 21072 | break; |
| 21073 | |
| 21074 | case GT_MEMORYBARRIER: |
| 21075 | chkFlags |= GTF_GLOB_REF | GTF_ASG; |
| 21076 | break; |
| 21077 | |
| 21078 | default: |
| 21079 | break; |
| 21080 | } |
| 21081 | } |
| 21082 | |
| 21083 | /* Is it a 'simple' unary/binary operator? */ |
| 21084 | |
| 21085 | else if (kind & GTK_SMPOP) |
| 21086 | { |
| 21087 | GenTree* op1 = tree->gtOp.gtOp1; |
| 21088 | GenTree* op2 = tree->gtGetOp2IfPresent(); |
| 21089 | |
| 21090 | // During GS work, we make shadow copies for params. |
| 21091 | // In gsParamsToShadows(), we create a shadow var of TYP_INT for every small type param. |
| 21092 | // Then in gsReplaceShadowParams(), we change the gtLclNum to the shadow var. |
| 21093 | // We also change the types of the local var tree and the assignment tree to TYP_INT if necessary. |
| 21094 | // However, since we don't morph the tree at this late stage. Manually propagating |
| 21095 | // TYP_INT up to the GT_ASG tree is only correct if we don't need to propagate the TYP_INT back up. |
| 21096 | // The following checks will ensure this. |
| 21097 | |
| 21098 | // Is the left child of "tree" a GT_ASG? |
| 21099 | // |
| 21100 | // If parent is a TYP_VOID, we don't no need to propagate TYP_INT up. We are fine. |
| 21101 | // (or) If GT_ASG is the left child of a GT_COMMA, the type of the GT_COMMA node will |
| 21102 | // be determined by its right child. So we don't need to propagate TYP_INT up either. We are fine. |
| 21103 | if (op1 && op1->gtOper == GT_ASG) |
| 21104 | { |
| 21105 | assert(tree->gtType == TYP_VOID || tree->gtOper == GT_COMMA); |
| 21106 | } |
| 21107 | |
| 21108 | // Is the right child of "tree" a GT_ASG? |
| 21109 | // |
| 21110 | // If parent is a TYP_VOID, we don't no need to propagate TYP_INT up. We are fine. |
| 21111 | if (op2 && op2->gtOper == GT_ASG) |
| 21112 | { |
| 21113 | assert(tree->gtType == TYP_VOID); |
| 21114 | } |
| 21115 | |
| 21116 | switch (oper) |
| 21117 | { |
| 21118 | case GT_QMARK: |
| 21119 | if (op1->OperIsCompare()) |
| 21120 | { |
| 21121 | noway_assert(op1->gtFlags & GTF_DONT_CSE); |
| 21122 | } |
| 21123 | else |
| 21124 | { |
| 21125 | noway_assert((op1->gtOper == GT_CNS_INT) && |
| 21126 | ((op1->gtIntCon.gtIconVal == 0) || (op1->gtIntCon.gtIconVal == 1))); |
| 21127 | } |
| 21128 | break; |
| 21129 | |
| 21130 | case GT_LIST: |
| 21131 | case GT_FIELD_LIST: |
| 21132 | if ((op2 != nullptr) && op2->OperIsAnyList()) |
| 21133 | { |
| 21134 | ArrayStack<GenTree*> stack(getAllocator(CMK_DebugOnly)); |
| 21135 | while ((tree->gtGetOp2() != nullptr) && tree->gtGetOp2()->OperIsAnyList()) |
| 21136 | { |
| 21137 | stack.Push(tree); |
| 21138 | tree = tree->gtGetOp2(); |
| 21139 | } |
| 21140 | |
| 21141 | fgDebugCheckFlags(tree); |
| 21142 | |
| 21143 | while (!stack.Empty()) |
| 21144 | { |
| 21145 | tree = stack.Pop(); |
| 21146 | assert((tree->gtFlags & GTF_REVERSE_OPS) == 0); |
| 21147 | fgDebugCheckFlags(tree->gtOp.gtOp1); |
| 21148 | chkFlags |= (tree->gtOp.gtOp1->gtFlags & GTF_ALL_EFFECT); |
| 21149 | chkFlags |= (tree->gtGetOp2()->gtFlags & GTF_ALL_EFFECT); |
| 21150 | fgDebugCheckFlagsHelper(tree, (tree->gtFlags & GTF_ALL_EFFECT), chkFlags); |
| 21151 | } |
| 21152 | |
| 21153 | return; |
| 21154 | } |
| 21155 | break; |
| 21156 | |
| 21157 | default: |
| 21158 | break; |
| 21159 | } |
| 21160 | |
| 21161 | /* Recursively check the subtrees */ |
| 21162 | |
| 21163 | if (op1) |
| 21164 | { |
| 21165 | fgDebugCheckFlags(op1); |
| 21166 | } |
| 21167 | if (op2) |
| 21168 | { |
| 21169 | fgDebugCheckFlags(op2); |
| 21170 | } |
| 21171 | |
| 21172 | if (op1) |
| 21173 | { |
| 21174 | chkFlags |= (op1->gtFlags & GTF_ALL_EFFECT); |
| 21175 | } |
| 21176 | if (op2) |
| 21177 | { |
| 21178 | chkFlags |= (op2->gtFlags & GTF_ALL_EFFECT); |
| 21179 | } |
| 21180 | |
| 21181 | // We reuse the value of GTF_REVERSE_OPS for a GT_IND-specific flag, |
| 21182 | // so exempt that (unary) operator. |
| 21183 | if (tree->OperGet() != GT_IND && tree->gtFlags & GTF_REVERSE_OPS) |
| 21184 | { |
| 21185 | /* Must have two operands if GTF_REVERSE is set */ |
| 21186 | noway_assert(op1 && op2); |
| 21187 | |
| 21188 | /* Make sure that the order of side effects has not been swapped. */ |
| 21189 | |
| 21190 | /* However CSE may introduce an assignment after the reverse flag |
| 21191 | was set and thus GTF_ASG cannot be considered here. */ |
| 21192 | |
| 21193 | /* For a GT_ASG(GT_IND(x), y) we are interested in the side effects of x */ |
| 21194 | GenTree* op1p; |
| 21195 | if ((oper == GT_ASG) && (op1->gtOper == GT_IND)) |
| 21196 | { |
| 21197 | op1p = op1->gtOp.gtOp1; |
| 21198 | } |
| 21199 | else |
| 21200 | { |
| 21201 | op1p = op1; |
| 21202 | } |
| 21203 | |
| 21204 | /* This isn't true any more with the sticky GTF_REVERSE */ |
| 21205 | /* |
| 21206 | // if op1p has side effects, then op2 cannot have side effects |
| 21207 | if (op1p->gtFlags & (GTF_SIDE_EFFECT & ~GTF_ASG)) |
| 21208 | { |
| 21209 | if (op2->gtFlags & (GTF_SIDE_EFFECT & ~GTF_ASG)) |
| 21210 | gtDispTree(tree); |
| 21211 | noway_assert(!(op2->gtFlags & (GTF_SIDE_EFFECT & ~GTF_ASG))); |
| 21212 | } |
| 21213 | */ |
| 21214 | } |
| 21215 | |
| 21216 | if (tree->OperRequiresAsgFlag()) |
| 21217 | { |
| 21218 | chkFlags |= GTF_ASG; |
| 21219 | } |
| 21220 | |
| 21221 | if (oper == GT_ADDR && (op1->OperIsLocal() || op1->gtOper == GT_CLS_VAR || |
| 21222 | (op1->gtOper == GT_IND && op1->gtOp.gtOp1->gtOper == GT_CLS_VAR_ADDR))) |
| 21223 | { |
| 21224 | /* &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does. |
| 21225 | Similarly for clsVar */ |
| 21226 | treeFlags |= GTF_GLOB_REF; |
| 21227 | } |
| 21228 | } |
| 21229 | |
| 21230 | /* See what kind of a special operator we have here */ |
| 21231 | |
| 21232 | else |
| 21233 | { |
| 21234 | switch (tree->OperGet()) |
| 21235 | { |
| 21236 | case GT_CALL: |
| 21237 | |
| 21238 | GenTree* args; |
| 21239 | GenTree* argx; |
| 21240 | GenTreeCall* call; |
| 21241 | |
| 21242 | call = tree->AsCall(); |
| 21243 | |
| 21244 | if (call->gtCallObjp) |
| 21245 | { |
| 21246 | fgDebugCheckFlags(call->gtCallObjp); |
| 21247 | chkFlags |= (call->gtCallObjp->gtFlags & GTF_SIDE_EFFECT); |
| 21248 | |
| 21249 | if (call->gtCallObjp->gtFlags & GTF_ASG) |
| 21250 | { |
| 21251 | treeFlags |= GTF_ASG; |
| 21252 | } |
| 21253 | } |
| 21254 | |
| 21255 | for (args = call->gtCallArgs; args; args = args->gtOp.gtOp2) |
| 21256 | { |
| 21257 | argx = args->gtOp.gtOp1; |
| 21258 | fgDebugCheckFlags(argx); |
| 21259 | |
| 21260 | chkFlags |= (argx->gtFlags & GTF_SIDE_EFFECT); |
| 21261 | |
| 21262 | if (argx->gtFlags & GTF_ASG) |
| 21263 | { |
| 21264 | treeFlags |= GTF_ASG; |
| 21265 | } |
| 21266 | } |
| 21267 | |
| 21268 | for (args = call->gtCallLateArgs; args; args = args->gtOp.gtOp2) |
| 21269 | { |
| 21270 | argx = args->gtOp.gtOp1; |
| 21271 | fgDebugCheckFlags(argx); |
| 21272 | |
| 21273 | chkFlags |= (argx->gtFlags & GTF_SIDE_EFFECT); |
| 21274 | |
| 21275 | if (argx->gtFlags & GTF_ASG) |
| 21276 | { |
| 21277 | treeFlags |= GTF_ASG; |
| 21278 | } |
| 21279 | } |
| 21280 | |
| 21281 | if ((call->gtCallType == CT_INDIRECT) && (call->gtCallCookie != nullptr)) |
| 21282 | { |
| 21283 | fgDebugCheckFlags(call->gtCallCookie); |
| 21284 | chkFlags |= (call->gtCallCookie->gtFlags & GTF_SIDE_EFFECT); |
| 21285 | } |
| 21286 | |
| 21287 | if (call->gtCallType == CT_INDIRECT) |
| 21288 | { |
| 21289 | fgDebugCheckFlags(call->gtCallAddr); |
| 21290 | chkFlags |= (call->gtCallAddr->gtFlags & GTF_SIDE_EFFECT); |
| 21291 | } |
| 21292 | |
| 21293 | if (call->IsUnmanaged() && (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)) |
| 21294 | { |
| 21295 | if (call->gtCallArgs->gtOp.gtOp1->OperGet() == GT_NOP) |
| 21296 | { |
| 21297 | noway_assert(call->gtCallLateArgs->gtOp.gtOp1->TypeGet() == TYP_I_IMPL || |
| 21298 | call->gtCallLateArgs->gtOp.gtOp1->TypeGet() == TYP_BYREF); |
| 21299 | } |
| 21300 | else |
| 21301 | { |
| 21302 | noway_assert(call->gtCallArgs->gtOp.gtOp1->TypeGet() == TYP_I_IMPL || |
| 21303 | call->gtCallArgs->gtOp.gtOp1->TypeGet() == TYP_BYREF); |
| 21304 | } |
| 21305 | } |
| 21306 | break; |
| 21307 | |
| 21308 | case GT_ARR_ELEM: |
| 21309 | |
| 21310 | GenTree* arrObj; |
| 21311 | unsigned dim; |
| 21312 | |
| 21313 | arrObj = tree->gtArrElem.gtArrObj; |
| 21314 | fgDebugCheckFlags(arrObj); |
| 21315 | chkFlags |= (arrObj->gtFlags & GTF_ALL_EFFECT); |
| 21316 | |
| 21317 | for (dim = 0; dim < tree->gtArrElem.gtArrRank; dim++) |
| 21318 | { |
| 21319 | fgDebugCheckFlags(tree->gtArrElem.gtArrInds[dim]); |
| 21320 | chkFlags |= tree->gtArrElem.gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT; |
| 21321 | } |
| 21322 | break; |
| 21323 | |
| 21324 | case GT_ARR_OFFSET: |
| 21325 | |
| 21326 | fgDebugCheckFlags(tree->gtArrOffs.gtOffset); |
| 21327 | chkFlags |= (tree->gtArrOffs.gtOffset->gtFlags & GTF_ALL_EFFECT); |
| 21328 | fgDebugCheckFlags(tree->gtArrOffs.gtIndex); |
| 21329 | chkFlags |= (tree->gtArrOffs.gtIndex->gtFlags & GTF_ALL_EFFECT); |
| 21330 | fgDebugCheckFlags(tree->gtArrOffs.gtArrObj); |
| 21331 | chkFlags |= (tree->gtArrOffs.gtArrObj->gtFlags & GTF_ALL_EFFECT); |
| 21332 | break; |
| 21333 | |
| 21334 | case GT_ARR_BOUNDS_CHECK: |
| 21335 | #ifdef FEATURE_SIMD |
| 21336 | case GT_SIMD_CHK: |
| 21337 | #endif // FEATURE_SIMD |
| 21338 | #ifdef FEATURE_HW_INTRINSICS |
| 21339 | case GT_HW_INTRINSIC_CHK: |
| 21340 | #endif // FEATURE_HW_INTRINSICS |
| 21341 | |
| 21342 | GenTreeBoundsChk* bndsChk; |
| 21343 | bndsChk = tree->AsBoundsChk(); |
| 21344 | fgDebugCheckFlags(bndsChk->gtIndex); |
| 21345 | chkFlags |= (bndsChk->gtIndex->gtFlags & GTF_ALL_EFFECT); |
| 21346 | fgDebugCheckFlags(bndsChk->gtArrLen); |
| 21347 | chkFlags |= (bndsChk->gtArrLen->gtFlags & GTF_ALL_EFFECT); |
| 21348 | break; |
| 21349 | |
| 21350 | case GT_CMPXCHG: |
| 21351 | |
| 21352 | chkFlags |= (GTF_GLOB_REF | GTF_ASG); |
| 21353 | GenTreeCmpXchg* cmpXchg; |
| 21354 | cmpXchg = tree->AsCmpXchg(); |
| 21355 | fgDebugCheckFlags(cmpXchg->gtOpLocation); |
| 21356 | chkFlags |= (cmpXchg->gtOpLocation->gtFlags & GTF_ALL_EFFECT); |
| 21357 | fgDebugCheckFlags(cmpXchg->gtOpValue); |
| 21358 | chkFlags |= (cmpXchg->gtOpValue->gtFlags & GTF_ALL_EFFECT); |
| 21359 | fgDebugCheckFlags(cmpXchg->gtOpComparand); |
| 21360 | chkFlags |= (cmpXchg->gtOpComparand->gtFlags & GTF_ALL_EFFECT); |
| 21361 | break; |
| 21362 | |
| 21363 | case GT_STORE_DYN_BLK: |
| 21364 | case GT_DYN_BLK: |
| 21365 | |
| 21366 | GenTreeDynBlk* dynBlk; |
| 21367 | dynBlk = tree->AsDynBlk(); |
| 21368 | fgDebugCheckFlags(dynBlk->gtDynamicSize); |
| 21369 | chkFlags |= (dynBlk->gtDynamicSize->gtFlags & GTF_ALL_EFFECT); |
| 21370 | fgDebugCheckFlags(dynBlk->Addr()); |
| 21371 | chkFlags |= (dynBlk->Addr()->gtFlags & GTF_ALL_EFFECT); |
| 21372 | if (tree->OperGet() == GT_STORE_DYN_BLK) |
| 21373 | { |
| 21374 | fgDebugCheckFlags(dynBlk->Data()); |
| 21375 | chkFlags |= (dynBlk->Data()->gtFlags & GTF_ALL_EFFECT); |
| 21376 | } |
| 21377 | break; |
| 21378 | |
| 21379 | default: |
| 21380 | |
| 21381 | #ifdef DEBUG |
| 21382 | gtDispTree(tree); |
| 21383 | #endif |
| 21384 | |
| 21385 | assert(!"Unknown operator for fgDebugCheckFlags" ); |
| 21386 | break; |
| 21387 | } |
| 21388 | } |
| 21389 | |
| 21390 | fgDebugCheckFlagsHelper(tree, treeFlags, chkFlags); |
| 21391 | } |
| 21392 | |
| 21393 | //------------------------------------------------------------------------------ |
| 21394 | // fgDebugCheckFlagsHelper : Check if all bits that are set in chkFlags are also set in treeFlags. |
| 21395 | // |
| 21396 | // |
| 21397 | // Arguments: |
| 21398 | // tree - Tree whose flags are being checked |
| 21399 | // treeFlags - Actual flags on the tree |
| 21400 | // chkFlags - Expected flags |
| 21401 | // |
| 21402 | // Note: |
| 21403 | // Checking that all bits that are set in treeFlags are also set in chkFlags is currently disabled. |
| 21404 | |
| 21405 | void Compiler::fgDebugCheckFlagsHelper(GenTree* tree, unsigned treeFlags, unsigned chkFlags) |
| 21406 | { |
| 21407 | if (chkFlags & ~treeFlags) |
| 21408 | { |
| 21409 | // Print the tree so we can see it in the log. |
| 21410 | printf("Missing flags on tree [%06d]: " , dspTreeID(tree)); |
| 21411 | GenTree::gtDispFlags(chkFlags & ~treeFlags, GTF_DEBUG_NONE); |
| 21412 | printf("\n" ); |
| 21413 | gtDispTree(tree); |
| 21414 | |
| 21415 | noway_assert(!"Missing flags on tree" ); |
| 21416 | |
| 21417 | // Print the tree again so we can see it right after we hook up the debugger. |
| 21418 | printf("Missing flags on tree [%06d]: " , dspTreeID(tree)); |
| 21419 | GenTree::gtDispFlags(chkFlags & ~treeFlags, GTF_DEBUG_NONE); |
| 21420 | printf("\n" ); |
| 21421 | gtDispTree(tree); |
| 21422 | } |
| 21423 | else if (treeFlags & ~chkFlags) |
| 21424 | { |
| 21425 | // TODO: We are currently only checking extra GTF_EXCEPT, GTF_ASG, and GTF_CALL flags. |
| 21426 | if ((treeFlags & ~chkFlags & ~GTF_GLOB_REF & ~GTF_ORDER_SIDEEFF) != 0) |
| 21427 | { |
| 21428 | // Print the tree so we can see it in the log. |
| 21429 | printf("Extra flags on parent tree [%X]: " , tree); |
| 21430 | GenTree::gtDispFlags(treeFlags & ~chkFlags, GTF_DEBUG_NONE); |
| 21431 | printf("\n" ); |
| 21432 | gtDispTree(tree); |
| 21433 | |
| 21434 | noway_assert(!"Extra flags on tree" ); |
| 21435 | |
| 21436 | // Print the tree again so we can see it right after we hook up the debugger. |
| 21437 | printf("Extra flags on parent tree [%X]: " , tree); |
| 21438 | GenTree::gtDispFlags(treeFlags & ~chkFlags, GTF_DEBUG_NONE); |
| 21439 | printf("\n" ); |
| 21440 | gtDispTree(tree); |
| 21441 | } |
| 21442 | } |
| 21443 | } |
| 21444 | |
| 21445 | // DEBUG routine to check correctness of the internal gtNext, gtPrev threading of a statement. |
| 21446 | // This threading is only valid when fgStmtListThreaded is true. |
| 21447 | // This calls an alternate method for FGOrderLinear. |
| 21448 | void Compiler::fgDebugCheckNodeLinks(BasicBlock* block, GenTree* node) |
| 21449 | { |
| 21450 | // LIR blocks are checked using BasicBlock::CheckLIR(). |
| 21451 | if (block->IsLIR()) |
| 21452 | { |
| 21453 | LIR::AsRange(block).CheckLIR(this); |
| 21454 | // TODO: return? |
| 21455 | } |
| 21456 | |
| 21457 | GenTreeStmt* stmt = node->AsStmt(); |
| 21458 | |
| 21459 | assert(fgStmtListThreaded); |
| 21460 | |
| 21461 | noway_assert(stmt->gtStmtList); |
| 21462 | |
| 21463 | // The first node's gtPrev must be nullptr (the gtPrev list is not circular). |
| 21464 | // The last node's gtNext must be nullptr (the gtNext list is not circular). This is tested if the loop below |
| 21465 | // terminates. |
| 21466 | assert(stmt->gtStmtList->gtPrev == nullptr); |
| 21467 | |
| 21468 | for (GenTree* tree = stmt->gtStmtList; tree != nullptr; tree = tree->gtNext) |
| 21469 | { |
| 21470 | if (tree->gtPrev) |
| 21471 | { |
| 21472 | noway_assert(tree->gtPrev->gtNext == tree); |
| 21473 | } |
| 21474 | else |
| 21475 | { |
| 21476 | noway_assert(tree == stmt->gtStmtList); |
| 21477 | } |
| 21478 | |
| 21479 | if (tree->gtNext) |
| 21480 | { |
| 21481 | noway_assert(tree->gtNext->gtPrev == tree); |
| 21482 | } |
| 21483 | else |
| 21484 | { |
| 21485 | noway_assert(tree == stmt->gtStmtExpr); |
| 21486 | } |
| 21487 | |
| 21488 | /* Cross-check gtPrev,gtNext with gtOp for simple trees */ |
| 21489 | |
| 21490 | GenTree* expectedPrevTree = nullptr; |
| 21491 | |
| 21492 | if (tree->OperIsLeaf()) |
| 21493 | { |
| 21494 | if (tree->gtOper == GT_CATCH_ARG) |
| 21495 | { |
| 21496 | // The GT_CATCH_ARG should always have GTF_ORDER_SIDEEFF set |
| 21497 | noway_assert(tree->gtFlags & GTF_ORDER_SIDEEFF); |
| 21498 | // The GT_CATCH_ARG has to be the first thing evaluated |
| 21499 | noway_assert(stmt == block->FirstNonPhiDef()); |
| 21500 | noway_assert(stmt->gtStmtList->gtOper == GT_CATCH_ARG); |
| 21501 | // The root of the tree should have GTF_ORDER_SIDEEFF set |
| 21502 | noway_assert(stmt->gtStmtExpr->gtFlags & GTF_ORDER_SIDEEFF); |
| 21503 | } |
| 21504 | } |
| 21505 | |
| 21506 | if (tree->OperIsUnary() && tree->gtOp.gtOp1) |
| 21507 | { |
| 21508 | expectedPrevTree = tree->gtOp.gtOp1; |
| 21509 | } |
| 21510 | else if (tree->OperIsBinary() && tree->gtOp.gtOp1) |
| 21511 | { |
| 21512 | switch (tree->gtOper) |
| 21513 | { |
| 21514 | case GT_QMARK: |
| 21515 | expectedPrevTree = |
| 21516 | tree->gtOp.gtOp2->AsColon()->ThenNode(); // "then" operand of the GT_COLON (generated second). |
| 21517 | break; |
| 21518 | |
| 21519 | case GT_COLON: |
| 21520 | expectedPrevTree = tree->AsColon()->ElseNode(); // "else" branch result (generated first). |
| 21521 | break; |
| 21522 | |
| 21523 | default: |
| 21524 | if (tree->gtOp.gtOp2) |
| 21525 | { |
| 21526 | if (tree->gtFlags & GTF_REVERSE_OPS) |
| 21527 | { |
| 21528 | expectedPrevTree = tree->gtOp.gtOp1; |
| 21529 | } |
| 21530 | else |
| 21531 | { |
| 21532 | expectedPrevTree = tree->gtOp.gtOp2; |
| 21533 | } |
| 21534 | } |
| 21535 | else |
| 21536 | { |
| 21537 | expectedPrevTree = tree->gtOp.gtOp1; |
| 21538 | } |
| 21539 | break; |
| 21540 | } |
| 21541 | } |
| 21542 | |
| 21543 | noway_assert(expectedPrevTree == nullptr || // No expectations about the prev node |
| 21544 | tree->gtPrev == expectedPrevTree); // The "normal" case |
| 21545 | } |
| 21546 | } |
| 21547 | |
| 21548 | /***************************************************************************** |
| 21549 | * |
| 21550 | * A DEBUG routine to check the correctness of the links between GT_STMT nodes |
| 21551 | * and ordinary nodes within a statement. |
| 21552 | * |
| 21553 | ****************************************************************************/ |
| 21554 | |
| 21555 | void Compiler::fgDebugCheckLinks(bool morphTrees) |
| 21556 | { |
| 21557 | // This used to be only on for stress, and there was a comment stating that |
| 21558 | // it was "quite an expensive operation" but I did not find that to be true. |
| 21559 | // Set DO_SANITY_DEBUG_CHECKS to false to revert to that behavior. |
| 21560 | const bool DO_SANITY_DEBUG_CHECKS = true; |
| 21561 | |
| 21562 | if (!DO_SANITY_DEBUG_CHECKS && !compStressCompile(STRESS_CHK_FLOW_UPDATE, 30)) |
| 21563 | { |
| 21564 | return; |
| 21565 | } |
| 21566 | |
| 21567 | fgDebugCheckBlockLinks(); |
| 21568 | |
| 21569 | /* For each basic block check the bbTreeList links */ |
| 21570 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 21571 | { |
| 21572 | if (block->IsLIR()) |
| 21573 | { |
| 21574 | LIR::AsRange(block).CheckLIR(this); |
| 21575 | } |
| 21576 | else |
| 21577 | { |
| 21578 | fgDebugCheckStmtsList(block, morphTrees); |
| 21579 | } |
| 21580 | } |
| 21581 | |
| 21582 | fgDebugCheckNodesUniqueness(); |
| 21583 | } |
| 21584 | |
| 21585 | //------------------------------------------------------------------------------ |
| 21586 | // fgDebugCheckStmtsList : Perfoms the set of checks: |
| 21587 | // - all statements in the block are linked correctly |
| 21588 | // - check statements flags |
| 21589 | // - check nodes gtNext and gtPrev values, if the node list is threaded |
| 21590 | // |
| 21591 | // Arguments: |
| 21592 | // block - the block to check statements in |
| 21593 | // morphTrees - try to morph trees in the checker |
| 21594 | // |
| 21595 | // Note: |
| 21596 | // Checking that all bits that are set in treeFlags are also set in chkFlags is currently disabled. |
| 21597 | |
| 21598 | void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees) |
| 21599 | { |
| 21600 | for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; stmt = stmt->gtNextStmt) |
| 21601 | { |
| 21602 | /* Verify that bbTreeList is threaded correctly */ |
| 21603 | /* Note that for the GT_STMT list, the gtPrev list is circular. The gtNext list is not: gtNext of the |
| 21604 | * last GT_STMT in a block is nullptr. */ |
| 21605 | |
| 21606 | noway_assert(stmt->gtPrev); |
| 21607 | |
| 21608 | if (stmt == block->bbTreeList) |
| 21609 | { |
| 21610 | noway_assert(stmt->gtPrev->gtNext == nullptr); |
| 21611 | } |
| 21612 | else |
| 21613 | { |
| 21614 | noway_assert(stmt->gtPrev->gtNext == stmt); |
| 21615 | } |
| 21616 | |
| 21617 | if (stmt->gtNext) |
| 21618 | { |
| 21619 | noway_assert(stmt->gtNext->gtPrev == stmt); |
| 21620 | } |
| 21621 | else |
| 21622 | { |
| 21623 | noway_assert(block->lastStmt() == stmt); |
| 21624 | } |
| 21625 | |
| 21626 | /* For each statement check that the exception flags are properly set */ |
| 21627 | |
| 21628 | noway_assert(stmt->gtStmtExpr); |
| 21629 | |
| 21630 | if (verbose && 0) |
| 21631 | { |
| 21632 | gtDispTree(stmt->gtStmtExpr); |
| 21633 | } |
| 21634 | |
| 21635 | fgDebugCheckFlags(stmt->gtStmtExpr); |
| 21636 | |
| 21637 | // Not only will this stress fgMorphBlockStmt(), but we also get all the checks |
| 21638 | // done by fgMorphTree() |
| 21639 | |
| 21640 | if (morphTrees) |
| 21641 | { |
| 21642 | // If 'stmt' is removed from the block, start a new check for the current block, |
| 21643 | // break the current check. |
| 21644 | if (fgMorphBlockStmt(block, stmt DEBUGARG("test morphing" ))) |
| 21645 | { |
| 21646 | fgDebugCheckStmtsList(block, morphTrees); |
| 21647 | break; |
| 21648 | } |
| 21649 | } |
| 21650 | |
| 21651 | /* For each GT_STMT node check that the nodes are threaded correcly - gtStmtList */ |
| 21652 | |
| 21653 | if (fgStmtListThreaded) |
| 21654 | { |
| 21655 | fgDebugCheckNodeLinks(block, stmt); |
| 21656 | } |
| 21657 | } |
| 21658 | } |
| 21659 | |
| 21660 | // ensure that bbNext and bbPrev are consistent |
| 21661 | void Compiler::fgDebugCheckBlockLinks() |
| 21662 | { |
| 21663 | assert(fgFirstBB->bbPrev == nullptr); |
| 21664 | |
| 21665 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 21666 | { |
| 21667 | if (block->bbNext) |
| 21668 | { |
| 21669 | assert(block->bbNext->bbPrev == block); |
| 21670 | } |
| 21671 | else |
| 21672 | { |
| 21673 | assert(block == fgLastBB); |
| 21674 | } |
| 21675 | |
| 21676 | if (block->bbPrev) |
| 21677 | { |
| 21678 | assert(block->bbPrev->bbNext == block); |
| 21679 | } |
| 21680 | else |
| 21681 | { |
| 21682 | assert(block == fgFirstBB); |
| 21683 | } |
| 21684 | |
| 21685 | // If this is a switch, check that the tables are consistent. |
| 21686 | // Note that we don't call GetSwitchDescMap(), because it has the side-effect |
| 21687 | // of allocating it if it is not present. |
| 21688 | if (block->bbJumpKind == BBJ_SWITCH && m_switchDescMap != nullptr) |
| 21689 | { |
| 21690 | SwitchUniqueSuccSet uniqueSuccSet; |
| 21691 | if (m_switchDescMap->Lookup(block, &uniqueSuccSet)) |
| 21692 | { |
| 21693 | // Create a set with all the successors. Don't use BlockSet, so we don't need to worry |
| 21694 | // about the BlockSet epoch. |
| 21695 | BitVecTraits bitVecTraits(fgBBNumMax + 1, this); |
| 21696 | BitVec succBlocks(BitVecOps::MakeEmpty(&bitVecTraits)); |
| 21697 | BasicBlock** jumpTable = block->bbJumpSwt->bbsDstTab; |
| 21698 | unsigned jumpCount = block->bbJumpSwt->bbsCount; |
| 21699 | for (unsigned i = 0; i < jumpCount; i++) |
| 21700 | { |
| 21701 | BitVecOps::AddElemD(&bitVecTraits, succBlocks, jumpTable[i]->bbNum); |
| 21702 | } |
| 21703 | // Now we should have a set of unique successors that matches what's in the switchMap. |
| 21704 | // First, check the number of entries, then make sure all the blocks in uniqueSuccSet |
| 21705 | // are in the BlockSet. |
| 21706 | unsigned count = BitVecOps::Count(&bitVecTraits, succBlocks); |
| 21707 | assert(uniqueSuccSet.numDistinctSuccs == count); |
| 21708 | for (unsigned i = 0; i < uniqueSuccSet.numDistinctSuccs; i++) |
| 21709 | { |
| 21710 | assert(BitVecOps::IsMember(&bitVecTraits, succBlocks, uniqueSuccSet.nonDuplicates[i]->bbNum)); |
| 21711 | } |
| 21712 | } |
| 21713 | } |
| 21714 | } |
| 21715 | } |
| 21716 | |
| 21717 | // UniquenessCheckWalker keeps data that is neccesary to check |
| 21718 | // that each tree has it is own unique id and they do not repeat. |
| 21719 | class UniquenessCheckWalker |
| 21720 | { |
| 21721 | public: |
| 21722 | UniquenessCheckWalker(Compiler* comp) |
| 21723 | : comp(comp), nodesVecTraits(comp->compGenTreeID, comp), uniqueNodes(BitVecOps::MakeEmpty(&nodesVecTraits)) |
| 21724 | { |
| 21725 | } |
| 21726 | |
| 21727 | //------------------------------------------------------------------------ |
| 21728 | // fgMarkTreeId: Visit all subtrees in the tree and check gtTreeIDs. |
| 21729 | // |
| 21730 | // Arguments: |
| 21731 | // pTree - Pointer to the tree to walk |
| 21732 | // fgWalkPre - the UniquenessCheckWalker instance |
| 21733 | // |
| 21734 | static Compiler::fgWalkResult MarkTreeId(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) |
| 21735 | { |
| 21736 | UniquenessCheckWalker* walker = static_cast<UniquenessCheckWalker*>(fgWalkPre->pCallbackData); |
| 21737 | unsigned gtTreeID = (*pTree)->gtTreeID; |
| 21738 | walker->CheckTreeId(gtTreeID); |
| 21739 | return Compiler::WALK_CONTINUE; |
| 21740 | } |
| 21741 | |
| 21742 | //------------------------------------------------------------------------ |
| 21743 | // CheckTreeId: Check that this tree was not visit before and memorize it as visited. |
| 21744 | // |
| 21745 | // Arguments: |
| 21746 | // gtTreeID - identificator of GenTree. |
| 21747 | // |
| 21748 | void CheckTreeId(unsigned gtTreeID) |
| 21749 | { |
| 21750 | assert(!BitVecOps::IsMember(&nodesVecTraits, uniqueNodes, gtTreeID)); |
| 21751 | BitVecOps::AddElemD(&nodesVecTraits, uniqueNodes, gtTreeID); |
| 21752 | } |
| 21753 | |
| 21754 | private: |
| 21755 | Compiler* comp; |
| 21756 | BitVecTraits nodesVecTraits; |
| 21757 | BitVec uniqueNodes; |
| 21758 | }; |
| 21759 | |
| 21760 | //------------------------------------------------------------------------------ |
| 21761 | // fgDebugCheckNodesUniqueness: Check that each tree in the method has its own unique gtTreeId. |
| 21762 | // |
| 21763 | void Compiler::fgDebugCheckNodesUniqueness() |
| 21764 | { |
| 21765 | UniquenessCheckWalker walker(this); |
| 21766 | |
| 21767 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 21768 | { |
| 21769 | if (block->IsLIR()) |
| 21770 | { |
| 21771 | for (GenTree* i : LIR::AsRange(block)) |
| 21772 | { |
| 21773 | walker.CheckTreeId(i->gtTreeID); |
| 21774 | } |
| 21775 | } |
| 21776 | else |
| 21777 | { |
| 21778 | for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; stmt = stmt->gtNextStmt) |
| 21779 | { |
| 21780 | GenTree* root = stmt->gtStmtExpr; |
| 21781 | fgWalkTreePre(&root, UniquenessCheckWalker::MarkTreeId, &walker); |
| 21782 | } |
| 21783 | } |
| 21784 | } |
| 21785 | } |
| 21786 | |
| 21787 | /*****************************************************************************/ |
| 21788 | #endif // DEBUG |
| 21789 | /*****************************************************************************/ |
| 21790 | |
| 21791 | //------------------------------------------------------------------------ |
| 21792 | // fgCheckForInlineDepthAndRecursion: compute depth of the candidate, and |
| 21793 | // check for recursion. |
| 21794 | // |
| 21795 | // Return Value: |
| 21796 | // The depth of the inline candidate. The root method is a depth 0, top-level |
| 21797 | // candidates at depth 1, etc. |
| 21798 | // |
| 21799 | // Notes: |
| 21800 | // We generally disallow recursive inlines by policy. However, they are |
| 21801 | // supported by the underlying machinery. |
| 21802 | // |
| 21803 | // Likewise the depth limit is a policy consideration, and serves mostly |
| 21804 | // as a safeguard to prevent runaway inlining of small methods. |
| 21805 | // |
| 21806 | unsigned Compiler::fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo) |
| 21807 | { |
| 21808 | BYTE* candidateCode = inlineInfo->inlineCandidateInfo->methInfo.ILCode; |
| 21809 | InlineContext* inlineContext = inlineInfo->iciStmt->gtInlineContext; |
| 21810 | InlineResult* inlineResult = inlineInfo->inlineResult; |
| 21811 | |
| 21812 | // There should be a context for all candidates. |
| 21813 | assert(inlineContext != nullptr); |
| 21814 | int depth = 0; |
| 21815 | |
| 21816 | for (; inlineContext != nullptr; inlineContext = inlineContext->GetParent()) |
| 21817 | { |
| 21818 | |
| 21819 | depth++; |
| 21820 | |
| 21821 | if (inlineContext->GetCode() == candidateCode) |
| 21822 | { |
| 21823 | // This inline candidate has the same IL code buffer as an already |
| 21824 | // inlined method does. |
| 21825 | inlineResult->NoteFatal(InlineObservation::CALLSITE_IS_RECURSIVE); |
| 21826 | break; |
| 21827 | } |
| 21828 | |
| 21829 | if (depth > InlineStrategy::IMPLEMENTATION_MAX_INLINE_DEPTH) |
| 21830 | { |
| 21831 | break; |
| 21832 | } |
| 21833 | } |
| 21834 | |
| 21835 | inlineResult->NoteInt(InlineObservation::CALLSITE_DEPTH, depth); |
| 21836 | return depth; |
| 21837 | } |
| 21838 | |
| 21839 | /***************************************************************************** |
| 21840 | * |
| 21841 | * Inlining phase |
| 21842 | */ |
| 21843 | |
| 21844 | void Compiler::fgInline() |
| 21845 | { |
| 21846 | if (!opts.OptEnabled(CLFLG_INLINING)) |
| 21847 | { |
| 21848 | return; |
| 21849 | } |
| 21850 | |
| 21851 | #ifdef DEBUG |
| 21852 | if (verbose) |
| 21853 | { |
| 21854 | printf("*************** In fgInline()\n" ); |
| 21855 | } |
| 21856 | #endif // DEBUG |
| 21857 | |
| 21858 | BasicBlock* block = fgFirstBB; |
| 21859 | noway_assert(block != nullptr); |
| 21860 | |
| 21861 | // Set the root inline context on all statements |
| 21862 | InlineContext* rootContext = m_inlineStrategy->GetRootContext(); |
| 21863 | |
| 21864 | for (; block != nullptr; block = block->bbNext) |
| 21865 | { |
| 21866 | for (GenTreeStmt* stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 21867 | { |
| 21868 | stmt->gtInlineContext = rootContext; |
| 21869 | } |
| 21870 | } |
| 21871 | |
| 21872 | // Reset block back to start for inlining |
| 21873 | block = fgFirstBB; |
| 21874 | |
| 21875 | do |
| 21876 | { |
| 21877 | // Make the current basic block address available globally |
| 21878 | compCurBB = block; |
| 21879 | |
| 21880 | for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; stmt = stmt->gtNextStmt) |
| 21881 | { |
| 21882 | |
| 21883 | #ifdef DEBUG |
| 21884 | // In debug builds we want the inline tree to show all failed |
| 21885 | // inlines. Some inlines may fail very early and never make it to |
| 21886 | // candidate stage. So scan the tree looking for those early failures. |
| 21887 | fgWalkTreePre(&stmt->gtStmtExpr, fgFindNonInlineCandidate, stmt); |
| 21888 | #endif |
| 21889 | |
| 21890 | GenTree* expr = stmt->gtStmtExpr; |
| 21891 | |
| 21892 | // The importer ensures that all inline candidates are |
| 21893 | // statement expressions. So see if we have a call. |
| 21894 | if (expr->IsCall()) |
| 21895 | { |
| 21896 | GenTreeCall* call = expr->AsCall(); |
| 21897 | |
| 21898 | // We do. Is it an inline candidate? |
| 21899 | // |
| 21900 | // Note we also process GuardeDevirtualizationCandidates here as we've |
| 21901 | // split off GT_RET_EXPRs for them even when they are not inline candidates |
| 21902 | // as we need similar processing to ensure they get patched back to where |
| 21903 | // they belong. |
| 21904 | if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) |
| 21905 | { |
| 21906 | InlineResult inlineResult(this, call, stmt, "fgInline" ); |
| 21907 | |
| 21908 | fgMorphStmt = stmt; |
| 21909 | |
| 21910 | fgMorphCallInline(call, &inlineResult); |
| 21911 | |
| 21912 | // fgMorphCallInline may have updated the |
| 21913 | // statement expression to a GT_NOP if the |
| 21914 | // call returned a value, regardless of |
| 21915 | // whether the inline succeeded or failed. |
| 21916 | // |
| 21917 | // If so, remove the GT_NOP and continue |
| 21918 | // on with the next statement. |
| 21919 | if (stmt->gtStmtExpr->IsNothingNode()) |
| 21920 | { |
| 21921 | fgRemoveStmt(block, stmt); |
| 21922 | continue; |
| 21923 | } |
| 21924 | } |
| 21925 | } |
| 21926 | |
| 21927 | // See if we need to replace some return value place holders. |
| 21928 | // Also, see if this replacement enables further devirtualization. |
| 21929 | // |
| 21930 | // Note we have both preorder and postorder callbacks here. |
| 21931 | // |
| 21932 | // The preorder callback is responsible for replacing GT_RET_EXPRs |
| 21933 | // with the appropriate expansion (call or inline result). |
| 21934 | // Replacement may introduce subtrees with GT_RET_EXPR and so |
| 21935 | // we rely on the preorder to recursively process those as well. |
| 21936 | // |
| 21937 | // On the way back up, the postorder callback then re-examines nodes for |
| 21938 | // possible further optimization, as the (now complete) GT_RET_EXPR |
| 21939 | // replacement may have enabled optimizations by providing more |
| 21940 | // specific types for trees or variables. |
| 21941 | fgWalkTree(&stmt->gtStmtExpr, fgUpdateInlineReturnExpressionPlaceHolder, fgLateDevirtualization, |
| 21942 | (void*)this); |
| 21943 | |
| 21944 | // See if stmt is of the form GT_COMMA(call, nop) |
| 21945 | // If yes, we can get rid of GT_COMMA. |
| 21946 | if (expr->OperGet() == GT_COMMA && expr->gtOp.gtOp1->OperGet() == GT_CALL && |
| 21947 | expr->gtOp.gtOp2->OperGet() == GT_NOP) |
| 21948 | { |
| 21949 | stmt->gtStmtExpr = expr->gtOp.gtOp1; |
| 21950 | } |
| 21951 | } |
| 21952 | |
| 21953 | block = block->bbNext; |
| 21954 | |
| 21955 | } while (block); |
| 21956 | |
| 21957 | #ifdef DEBUG |
| 21958 | |
| 21959 | // Check that we should not have any inline candidate or return value place holder left. |
| 21960 | |
| 21961 | block = fgFirstBB; |
| 21962 | noway_assert(block); |
| 21963 | |
| 21964 | do |
| 21965 | { |
| 21966 | GenTreeStmt* stmt; |
| 21967 | |
| 21968 | for (stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 21969 | { |
| 21970 | // Call Compiler::fgDebugCheckInlineCandidates on each node |
| 21971 | fgWalkTreePre(&stmt->gtStmtExpr, fgDebugCheckInlineCandidates); |
| 21972 | } |
| 21973 | |
| 21974 | block = block->bbNext; |
| 21975 | |
| 21976 | } while (block); |
| 21977 | |
| 21978 | fgVerifyHandlerTab(); |
| 21979 | |
| 21980 | if (verbose) |
| 21981 | { |
| 21982 | printf("*************** After fgInline()\n" ); |
| 21983 | fgDispBasicBlocks(true); |
| 21984 | fgDispHandlerTab(); |
| 21985 | } |
| 21986 | |
| 21987 | if (verbose || fgPrintInlinedMethods) |
| 21988 | { |
| 21989 | JITDUMP("**************** Inline Tree" ); |
| 21990 | printf("\n" ); |
| 21991 | m_inlineStrategy->Dump(verbose); |
| 21992 | } |
| 21993 | |
| 21994 | #endif // DEBUG |
| 21995 | } |
| 21996 | |
| 21997 | #ifdef DEBUG |
| 21998 | |
| 21999 | //------------------------------------------------------------------------ |
| 22000 | // fgFindNonInlineCandidate: tree walk helper to ensure that a tree node |
| 22001 | // that is not an inline candidate is noted as a failed inline. |
| 22002 | // |
| 22003 | // Arguments: |
| 22004 | // pTree - pointer to pointer tree node being walked |
| 22005 | // data - contextual data for the walk |
| 22006 | // |
| 22007 | // Return Value: |
| 22008 | // walk result |
| 22009 | // |
| 22010 | // Note: |
| 22011 | // Invokes fgNoteNonInlineCandidate on the nodes it finds. |
| 22012 | |
| 22013 | Compiler::fgWalkResult Compiler::fgFindNonInlineCandidate(GenTree** pTree, fgWalkData* data) |
| 22014 | { |
| 22015 | GenTree* tree = *pTree; |
| 22016 | if (tree->gtOper == GT_CALL) |
| 22017 | { |
| 22018 | Compiler* compiler = data->compiler; |
| 22019 | GenTreeStmt* stmt = (GenTreeStmt*)data->pCallbackData; |
| 22020 | GenTreeCall* call = tree->AsCall(); |
| 22021 | |
| 22022 | compiler->fgNoteNonInlineCandidate(stmt, call); |
| 22023 | } |
| 22024 | return WALK_CONTINUE; |
| 22025 | } |
| 22026 | |
| 22027 | //------------------------------------------------------------------------ |
| 22028 | // fgNoteNonInlineCandidate: account for inlining failures in calls |
| 22029 | // not marked as inline candidates. |
| 22030 | // |
| 22031 | // Arguments: |
| 22032 | // stmt - statement containing the call |
| 22033 | // call - the call itself |
| 22034 | // |
| 22035 | // Notes: |
| 22036 | // Used in debug only to try and place descriptions of inline failures |
| 22037 | // into the proper context in the inline tree. |
| 22038 | |
| 22039 | void Compiler::fgNoteNonInlineCandidate(GenTreeStmt* stmt, GenTreeCall* call) |
| 22040 | { |
| 22041 | if (call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate()) |
| 22042 | { |
| 22043 | return; |
| 22044 | } |
| 22045 | |
| 22046 | InlineResult inlineResult(this, call, nullptr, "fgNotInlineCandidate" ); |
| 22047 | InlineObservation currentObservation = InlineObservation::CALLSITE_NOT_CANDIDATE; |
| 22048 | |
| 22049 | // Try and recover the reason left behind when the jit decided |
| 22050 | // this call was not a candidate. |
| 22051 | InlineObservation priorObservation = call->gtInlineObservation; |
| 22052 | |
| 22053 | if (InlIsValidObservation(priorObservation)) |
| 22054 | { |
| 22055 | currentObservation = priorObservation; |
| 22056 | } |
| 22057 | |
| 22058 | // Propagate the prior failure observation to this result. |
| 22059 | inlineResult.NotePriorFailure(currentObservation); |
| 22060 | inlineResult.SetReported(); |
| 22061 | |
| 22062 | if (call->gtCallType == CT_USER_FUNC) |
| 22063 | { |
| 22064 | // Create InlineContext for the failure |
| 22065 | m_inlineStrategy->NewFailure(stmt, &inlineResult); |
| 22066 | } |
| 22067 | } |
| 22068 | |
| 22069 | #endif |
| 22070 | |
| 22071 | #if FEATURE_MULTIREG_RET |
| 22072 | |
| 22073 | /********************************************************************************* |
| 22074 | * |
| 22075 | * tree - The node which needs to be converted to a struct pointer. |
| 22076 | * |
| 22077 | * Return the pointer by either __replacing__ the tree node with a suitable pointer |
| 22078 | * type or __without replacing__ and just returning a subtree or by __modifying__ |
| 22079 | * a subtree. |
| 22080 | */ |
| 22081 | GenTree* Compiler::fgGetStructAsStructPtr(GenTree* tree) |
| 22082 | { |
| 22083 | noway_assert((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_FIELD) || (tree->gtOper == GT_IND) || |
| 22084 | (tree->gtOper == GT_BLK) || (tree->gtOper == GT_OBJ) || tree->OperIsSIMD() || |
| 22085 | // tree->gtOper == GT_CALL || cannot get address of call. |
| 22086 | // tree->gtOper == GT_MKREFANY || inlining should've been aborted due to mkrefany opcode. |
| 22087 | // tree->gtOper == GT_RET_EXPR || cannot happen after fgUpdateInlineReturnExpressionPlaceHolder |
| 22088 | (tree->gtOper == GT_COMMA)); |
| 22089 | |
| 22090 | switch (tree->OperGet()) |
| 22091 | { |
| 22092 | case GT_BLK: |
| 22093 | case GT_OBJ: |
| 22094 | case GT_IND: |
| 22095 | return tree->gtOp.gtOp1; |
| 22096 | |
| 22097 | case GT_COMMA: |
| 22098 | tree->gtOp.gtOp2 = fgGetStructAsStructPtr(tree->gtOp.gtOp2); |
| 22099 | tree->gtType = TYP_BYREF; |
| 22100 | return tree; |
| 22101 | |
| 22102 | default: |
| 22103 | return gtNewOperNode(GT_ADDR, TYP_BYREF, tree); |
| 22104 | } |
| 22105 | } |
| 22106 | |
| 22107 | /*************************************************************************************************** |
| 22108 | * child - The inlinee of the retExpr node. |
| 22109 | * retClsHnd - The struct class handle of the type of the inlinee. |
| 22110 | * |
| 22111 | * Assign the inlinee to a tmp, if it is a call, just assign it to a lclVar, else we can |
| 22112 | * use a copyblock to do the assignment. |
| 22113 | */ |
| 22114 | GenTree* Compiler::fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) |
| 22115 | { |
| 22116 | assert(child->gtOper != GT_RET_EXPR && child->gtOper != GT_MKREFANY); |
| 22117 | |
| 22118 | unsigned tmpNum = lvaGrabTemp(false DEBUGARG("RetBuf for struct inline return candidates." )); |
| 22119 | lvaSetStruct(tmpNum, retClsHnd, false); |
| 22120 | var_types structType = lvaTable[tmpNum].lvType; |
| 22121 | |
| 22122 | GenTree* dst = gtNewLclvNode(tmpNum, structType); |
| 22123 | |
| 22124 | // If we have a call, we'd like it to be: V00 = call(), but first check if |
| 22125 | // we have a ", , , call()" -- this is very defensive as we may never get |
| 22126 | // an inlinee that is made of commas. If the inlinee is not a call, then |
| 22127 | // we use a copy block to do the assignment. |
| 22128 | GenTree* src = child; |
| 22129 | GenTree* lastComma = nullptr; |
| 22130 | while (src->gtOper == GT_COMMA) |
| 22131 | { |
| 22132 | lastComma = src; |
| 22133 | src = src->gtOp.gtOp2; |
| 22134 | } |
| 22135 | |
| 22136 | GenTree* newInlinee = nullptr; |
| 22137 | if (src->gtOper == GT_CALL) |
| 22138 | { |
| 22139 | // If inlinee was just a call, new inlinee is v05 = call() |
| 22140 | newInlinee = gtNewAssignNode(dst, src); |
| 22141 | |
| 22142 | // When returning a multi-register value in a local var, make sure the variable is |
| 22143 | // marked as lvIsMultiRegRet, so it does not get promoted. |
| 22144 | if (src->AsCall()->HasMultiRegRetVal()) |
| 22145 | { |
| 22146 | lvaTable[tmpNum].lvIsMultiRegRet = true; |
| 22147 | } |
| 22148 | |
| 22149 | // If inlinee was comma, but a deeper call, new inlinee is (, , , v05 = call()) |
| 22150 | if (child->gtOper == GT_COMMA) |
| 22151 | { |
| 22152 | lastComma->gtOp.gtOp2 = newInlinee; |
| 22153 | newInlinee = child; |
| 22154 | } |
| 22155 | } |
| 22156 | else |
| 22157 | { |
| 22158 | // Inlinee is not a call, so just create a copy block to the tmp. |
| 22159 | src = child; |
| 22160 | GenTree* dstAddr = fgGetStructAsStructPtr(dst); |
| 22161 | GenTree* srcAddr = fgGetStructAsStructPtr(src); |
| 22162 | newInlinee = gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false); |
| 22163 | } |
| 22164 | |
| 22165 | GenTree* production = gtNewLclvNode(tmpNum, structType); |
| 22166 | return gtNewOperNode(GT_COMMA, structType, newInlinee, production); |
| 22167 | } |
| 22168 | |
| 22169 | /*************************************************************************************************** |
| 22170 | * tree - The tree pointer that has one of its child nodes as retExpr. |
| 22171 | * child - The inlinee child. |
| 22172 | * retClsHnd - The struct class handle of the type of the inlinee. |
| 22173 | * |
| 22174 | * V04 = call() assignments are okay as we codegen it. Everything else needs to be a copy block or |
| 22175 | * would need a temp. For example, a cast(ldobj) will then be, cast(v05 = ldobj, v05); But it is |
| 22176 | * a very rare (or impossible) scenario that we'd have a retExpr transform into a ldobj other than |
| 22177 | * a lclVar/call. So it is not worthwhile to do pattern matching optimizations like addr(ldobj(op1)) |
| 22178 | * can just be op1. |
| 22179 | */ |
| 22180 | void Compiler::fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd) |
| 22181 | { |
| 22182 | // We are okay to have: |
| 22183 | // 1. V02 = call(); |
| 22184 | // 2. copyBlk(dstAddr, srcAddr); |
| 22185 | assert(tree->gtOper == GT_ASG); |
| 22186 | |
| 22187 | // We have an assignment, we codegen only V05 = call(). |
| 22188 | if (child->gtOper == GT_CALL && tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) |
| 22189 | { |
| 22190 | // If it is a multireg return on x64/ux, the local variable should be marked as lvIsMultiRegRet |
| 22191 | if (child->AsCall()->HasMultiRegRetVal()) |
| 22192 | { |
| 22193 | unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum; |
| 22194 | lvaTable[lclNum].lvIsMultiRegRet = true; |
| 22195 | } |
| 22196 | return; |
| 22197 | } |
| 22198 | |
| 22199 | GenTree* dstAddr = fgGetStructAsStructPtr(tree->gtOp.gtOp1); |
| 22200 | GenTree* srcAddr = fgGetStructAsStructPtr( |
| 22201 | (child->gtOper == GT_CALL) |
| 22202 | ? fgAssignStructInlineeToVar(child, retClsHnd) // Assign to a variable if it is a call. |
| 22203 | : child); // Just get the address, if not a call. |
| 22204 | |
| 22205 | tree->ReplaceWith(gtNewCpObjNode(dstAddr, srcAddr, retClsHnd, false), this); |
| 22206 | } |
| 22207 | |
| 22208 | #endif // FEATURE_MULTIREG_RET |
| 22209 | |
| 22210 | //------------------------------------------------------------------------ |
| 22211 | // fgUpdateInlineReturnExpressionPlaceHolder: callback to replace the |
| 22212 | // inline return expression placeholder. |
| 22213 | // |
| 22214 | // Arguments: |
| 22215 | // pTree -- pointer to tree to examine for updates |
| 22216 | // data -- context data for the tree walk |
| 22217 | // |
| 22218 | // Returns: |
| 22219 | // fgWalkResult indicating the walk should continue; that |
| 22220 | // is we wish to fully explore the tree. |
| 22221 | // |
| 22222 | // Notes: |
| 22223 | // Looks for GT_RET_EXPR nodes that arose from tree splitting done |
| 22224 | // during importation for inline candidates, and replaces them. |
| 22225 | // |
| 22226 | // For successful inlines, substitutes the return value expression |
| 22227 | // from the inline body for the GT_RET_EXPR. |
| 22228 | // |
| 22229 | // For failed inlines, rejoins the original call into the tree from |
| 22230 | // whence it was split during importation. |
| 22231 | // |
| 22232 | // The code doesn't actually know if the corresponding inline |
| 22233 | // succeeded or not; it relies on the fact that gtInlineCandidate |
| 22234 | // initially points back at the call and is modified in place to |
| 22235 | // the inlinee return expression if the inline is successful (see |
| 22236 | // tail end of fgInsertInlineeBlocks for the update of iciCall). |
| 22237 | // |
| 22238 | // If the return type is a struct type and we're on a platform |
| 22239 | // where structs can be returned in multiple registers, ensure the |
| 22240 | // call has a suitable parent. |
| 22241 | |
| 22242 | Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTree** pTree, fgWalkData* data) |
| 22243 | { |
| 22244 | // All the operations here and in the corresponding postorder |
| 22245 | // callback (fgLateDevirtualization) are triggered by GT_CALL or |
| 22246 | // GT_RET_EXPR trees, and these (should) have the call side |
| 22247 | // effect flag. |
| 22248 | // |
| 22249 | // So bail out for any trees that don't have this flag. |
| 22250 | GenTree* tree = *pTree; |
| 22251 | |
| 22252 | if ((tree->gtFlags & GTF_CALL) == 0) |
| 22253 | { |
| 22254 | return WALK_SKIP_SUBTREES; |
| 22255 | } |
| 22256 | |
| 22257 | Compiler* comp = data->compiler; |
| 22258 | CORINFO_CLASS_HANDLE retClsHnd = NO_CLASS_HANDLE; |
| 22259 | |
| 22260 | if (tree->OperGet() == GT_RET_EXPR) |
| 22261 | { |
| 22262 | // We are going to copy the tree from the inlinee, |
| 22263 | // so record the handle now. |
| 22264 | // |
| 22265 | if (varTypeIsStruct(tree)) |
| 22266 | { |
| 22267 | retClsHnd = tree->gtRetExpr.gtRetClsHnd; |
| 22268 | } |
| 22269 | |
| 22270 | // Skip through chains of GT_RET_EXPRs (say from nested inlines) |
| 22271 | // to the actual tree to use. |
| 22272 | GenTree* inlineCandidate = tree->gtRetExprVal(); |
| 22273 | var_types retType = tree->TypeGet(); |
| 22274 | |
| 22275 | #ifdef DEBUG |
| 22276 | if (comp->verbose) |
| 22277 | { |
| 22278 | printf("\nReplacing the return expression placeholder " ); |
| 22279 | printTreeID(tree); |
| 22280 | printf(" with " ); |
| 22281 | printTreeID(inlineCandidate); |
| 22282 | printf("\n" ); |
| 22283 | // Dump out the old return expression placeholder it will be overwritten by the ReplaceWith below |
| 22284 | comp->gtDispTree(tree); |
| 22285 | } |
| 22286 | #endif // DEBUG |
| 22287 | |
| 22288 | tree->ReplaceWith(inlineCandidate, comp); |
| 22289 | |
| 22290 | #ifdef DEBUG |
| 22291 | if (comp->verbose) |
| 22292 | { |
| 22293 | printf("\nInserting the inline return expression\n" ); |
| 22294 | comp->gtDispTree(tree); |
| 22295 | printf("\n" ); |
| 22296 | } |
| 22297 | #endif // DEBUG |
| 22298 | |
| 22299 | var_types newType = tree->TypeGet(); |
| 22300 | |
| 22301 | // If we end up swapping in an RVA static we may need to retype it here, |
| 22302 | // if we've reinterpreted it as a byref. |
| 22303 | if ((retType != newType) && (retType == TYP_BYREF) && (tree->OperGet() == GT_IND)) |
| 22304 | { |
| 22305 | assert(newType == TYP_I_IMPL); |
| 22306 | JITDUMP("Updating type of the return GT_IND expression to TYP_BYREF\n" ); |
| 22307 | tree->gtType = TYP_BYREF; |
| 22308 | } |
| 22309 | } |
| 22310 | |
| 22311 | // If an inline was rejected and the call returns a struct, we may |
| 22312 | // have deferred some work when importing call for cases where the |
| 22313 | // struct is returned in register(s). |
| 22314 | // |
| 22315 | // See the bail-out clauses in impFixupCallStructReturn for inline |
| 22316 | // candidates. |
| 22317 | // |
| 22318 | // Do the deferred work now. |
| 22319 | if (retClsHnd != NO_CLASS_HANDLE) |
| 22320 | { |
| 22321 | structPassingKind howToReturnStruct; |
| 22322 | var_types returnType = comp->getReturnTypeForStruct(retClsHnd, &howToReturnStruct); |
| 22323 | GenTree* parent = data->parent; |
| 22324 | |
| 22325 | switch (howToReturnStruct) |
| 22326 | { |
| 22327 | |
| 22328 | #if FEATURE_MULTIREG_RET |
| 22329 | |
| 22330 | // Is this a type that is returned in multiple registers |
| 22331 | // or a via a primitve type that is larger than the struct type? |
| 22332 | // if so we need to force into into a form we accept. |
| 22333 | // i.e. LclVar = call() |
| 22334 | case SPK_ByValue: |
| 22335 | case SPK_ByValueAsHfa: |
| 22336 | { |
| 22337 | // See assert below, we only look one level above for an asg parent. |
| 22338 | if (parent->gtOper == GT_ASG) |
| 22339 | { |
| 22340 | // Either lhs is a call V05 = call(); or lhs is addr, and asg becomes a copyBlk. |
| 22341 | comp->fgAttachStructInlineeToAsg(parent, tree, retClsHnd); |
| 22342 | } |
| 22343 | else |
| 22344 | { |
| 22345 | // Just assign the inlinee to a variable to keep it simple. |
| 22346 | tree->ReplaceWith(comp->fgAssignStructInlineeToVar(tree, retClsHnd), comp); |
| 22347 | } |
| 22348 | } |
| 22349 | break; |
| 22350 | |
| 22351 | #endif // FEATURE_MULTIREG_RET |
| 22352 | |
| 22353 | case SPK_EnclosingType: |
| 22354 | { |
| 22355 | // For enclosing type returns, we must return the call value to a temp since |
| 22356 | // the return type is larger than the struct type. |
| 22357 | if (!tree->IsCall()) |
| 22358 | { |
| 22359 | break; |
| 22360 | } |
| 22361 | |
| 22362 | GenTreeCall* call = tree->AsCall(); |
| 22363 | |
| 22364 | assert(call->gtReturnType == TYP_STRUCT); |
| 22365 | |
| 22366 | if (call->gtReturnType != TYP_STRUCT) |
| 22367 | { |
| 22368 | break; |
| 22369 | } |
| 22370 | |
| 22371 | JITDUMP("\nCall returns small struct via enclosing type, retyping. Before:\n" ); |
| 22372 | DISPTREE(call); |
| 22373 | |
| 22374 | // Create new struct typed temp for return value |
| 22375 | const unsigned tmpNum = |
| 22376 | comp->lvaGrabTemp(true DEBUGARG("small struct return temp for rejected inline" )); |
| 22377 | comp->lvaSetStruct(tmpNum, retClsHnd, false); |
| 22378 | GenTree* assign = comp->gtNewTempAssign(tmpNum, call); |
| 22379 | |
| 22380 | // Modify assign tree and call return types to the primitive return type |
| 22381 | call->gtReturnType = returnType; |
| 22382 | call->gtType = returnType; |
| 22383 | assign->gtType = returnType; |
| 22384 | |
| 22385 | // Modify the temp reference in the assign as a primitive reference via GT_LCL_FLD |
| 22386 | GenTree* tempAsPrimitive = assign->gtOp.gtOp1; |
| 22387 | assert(tempAsPrimitive->gtOper == GT_LCL_VAR); |
| 22388 | tempAsPrimitive->gtType = returnType; |
| 22389 | tempAsPrimitive->ChangeOper(GT_LCL_FLD); |
| 22390 | |
| 22391 | // Return temp as value of call tree via comma |
| 22392 | GenTree* tempAsStruct = comp->gtNewLclvNode(tmpNum, TYP_STRUCT); |
| 22393 | GenTree* comma = comp->gtNewOperNode(GT_COMMA, TYP_STRUCT, assign, tempAsStruct); |
| 22394 | parent->ReplaceOperand(pTree, comma); |
| 22395 | |
| 22396 | JITDUMP("\nAfter:\n" ); |
| 22397 | DISPTREE(comma); |
| 22398 | } |
| 22399 | break; |
| 22400 | |
| 22401 | case SPK_PrimitiveType: |
| 22402 | // We should have already retyped the call as a primitive type |
| 22403 | // when we first imported the call |
| 22404 | break; |
| 22405 | |
| 22406 | case SPK_ByReference: |
| 22407 | // We should have already added the return buffer |
| 22408 | // when we first imported the call |
| 22409 | break; |
| 22410 | |
| 22411 | default: |
| 22412 | noway_assert(!"Unexpected struct passing kind" ); |
| 22413 | break; |
| 22414 | } |
| 22415 | } |
| 22416 | |
| 22417 | #if FEATURE_MULTIREG_RET |
| 22418 | #if defined(DEBUG) |
| 22419 | |
| 22420 | // Make sure we don't have a tree like so: V05 = (, , , retExpr); |
| 22421 | // Since we only look one level above for the parent for '=' and |
| 22422 | // do not check if there is a series of COMMAs. See above. |
| 22423 | // Importer and FlowGraph will not generate such a tree, so just |
| 22424 | // leaving an assert in here. This can be fixed by looking ahead |
| 22425 | // when we visit GT_ASG similar to fgAttachStructInlineeToAsg. |
| 22426 | // |
| 22427 | if (tree->OperGet() == GT_ASG) |
| 22428 | { |
| 22429 | GenTree* value = tree->gtOp.gtOp2; |
| 22430 | |
| 22431 | if (value->OperGet() == GT_COMMA) |
| 22432 | { |
| 22433 | GenTree* effectiveValue = value->gtEffectiveVal(/*commaOnly*/ true); |
| 22434 | |
| 22435 | noway_assert(!varTypeIsStruct(effectiveValue) || (effectiveValue->OperGet() != GT_RET_EXPR) || |
| 22436 | !comp->IsMultiRegReturnedType(effectiveValue->gtRetExpr.gtRetClsHnd)); |
| 22437 | } |
| 22438 | } |
| 22439 | |
| 22440 | #endif // defined(DEBUG) |
| 22441 | #endif // FEATURE_MULTIREG_RET |
| 22442 | |
| 22443 | return WALK_CONTINUE; |
| 22444 | } |
| 22445 | |
| 22446 | //------------------------------------------------------------------------ |
| 22447 | // fgLateDevirtualization: re-examine calls after inlining to see if we |
| 22448 | // can do more devirtualization |
| 22449 | // |
| 22450 | // Arguments: |
| 22451 | // pTree -- pointer to tree to examine for updates |
| 22452 | // data -- context data for the tree walk |
| 22453 | // |
| 22454 | // Returns: |
| 22455 | // fgWalkResult indicating the walk should continue; that |
| 22456 | // is we wish to fully explore the tree. |
| 22457 | // |
| 22458 | // Notes: |
| 22459 | // We used to check this opportunistically in the preorder callback for |
| 22460 | // calls where the `obj` was fed by a return, but we now re-examine |
| 22461 | // all calls. |
| 22462 | // |
| 22463 | // Late devirtualization (and eventually, perhaps, other type-driven |
| 22464 | // opts like cast optimization) can happen now because inlining or other |
| 22465 | // optimizations may have provided more accurate types than we saw when |
| 22466 | // first importing the trees. |
| 22467 | // |
| 22468 | // It would be nice to screen candidate sites based on the likelihood |
| 22469 | // that something has changed. Otherwise we'll waste some time retrying |
| 22470 | // an optimization that will just fail again. |
| 22471 | |
| 22472 | Compiler::fgWalkResult Compiler::fgLateDevirtualization(GenTree** pTree, fgWalkData* data) |
| 22473 | { |
| 22474 | GenTree* tree = *pTree; |
| 22475 | GenTree* parent = data->parent; |
| 22476 | Compiler* comp = data->compiler; |
| 22477 | |
| 22478 | // In some (rare) cases the parent node of tree will be smashed to a NOP during |
| 22479 | // the preorder by fgAttachStructToInlineeArg. |
| 22480 | // |
| 22481 | // jit\Methodical\VT\callconv\_il_reljumper3 for x64 linux |
| 22482 | // |
| 22483 | // If so, just bail out here. |
| 22484 | if (tree == nullptr) |
| 22485 | { |
| 22486 | assert((parent != nullptr) && parent->OperGet() == GT_NOP); |
| 22487 | return WALK_CONTINUE; |
| 22488 | } |
| 22489 | |
| 22490 | if (tree->OperGet() == GT_CALL) |
| 22491 | { |
| 22492 | GenTreeCall* call = tree->AsCall(); |
| 22493 | bool tryLateDevirt = call->IsVirtual() && (call->gtCallType == CT_USER_FUNC); |
| 22494 | |
| 22495 | #ifdef DEBUG |
| 22496 | tryLateDevirt = tryLateDevirt && (JitConfig.JitEnableLateDevirtualization() == 1); |
| 22497 | #endif // DEBUG |
| 22498 | |
| 22499 | if (tryLateDevirt) |
| 22500 | { |
| 22501 | #ifdef DEBUG |
| 22502 | if (comp->verbose) |
| 22503 | { |
| 22504 | printf("**** Late devirt opportunity\n" ); |
| 22505 | comp->gtDispTree(call); |
| 22506 | } |
| 22507 | #endif // DEBUG |
| 22508 | |
| 22509 | CORINFO_METHOD_HANDLE method = call->gtCallMethHnd; |
| 22510 | unsigned methodFlags = 0; |
| 22511 | CORINFO_CONTEXT_HANDLE context = nullptr; |
| 22512 | const bool isLateDevirtualization = true; |
| 22513 | comp->impDevirtualizeCall(call, &method, &methodFlags, &context, nullptr, isLateDevirtualization); |
| 22514 | } |
| 22515 | } |
| 22516 | else if (tree->OperGet() == GT_ASG) |
| 22517 | { |
| 22518 | // If we're assigning to a ref typed local that has one definition, |
| 22519 | // we may be able to sharpen the type for the local. |
| 22520 | GenTree* lhs = tree->gtGetOp1()->gtEffectiveVal(); |
| 22521 | |
| 22522 | if ((lhs->OperGet() == GT_LCL_VAR) && (lhs->TypeGet() == TYP_REF)) |
| 22523 | { |
| 22524 | const unsigned lclNum = lhs->gtLclVarCommon.gtLclNum; |
| 22525 | LclVarDsc* lcl = comp->lvaGetDesc(lclNum); |
| 22526 | |
| 22527 | if (lcl->lvSingleDef) |
| 22528 | { |
| 22529 | GenTree* rhs = tree->gtGetOp2(); |
| 22530 | bool isExact = false; |
| 22531 | bool isNonNull = false; |
| 22532 | CORINFO_CLASS_HANDLE newClass = comp->gtGetClassHandle(rhs, &isExact, &isNonNull); |
| 22533 | |
| 22534 | if (newClass != NO_CLASS_HANDLE) |
| 22535 | { |
| 22536 | comp->lvaUpdateClass(lclNum, newClass, isExact); |
| 22537 | } |
| 22538 | } |
| 22539 | } |
| 22540 | } |
| 22541 | |
| 22542 | return WALK_CONTINUE; |
| 22543 | } |
| 22544 | |
| 22545 | #ifdef DEBUG |
| 22546 | |
| 22547 | /***************************************************************************** |
| 22548 | * Callback to make sure there is no more GT_RET_EXPR and GTF_CALL_INLINE_CANDIDATE nodes. |
| 22549 | */ |
| 22550 | |
| 22551 | /* static */ |
| 22552 | Compiler::fgWalkResult Compiler::fgDebugCheckInlineCandidates(GenTree** pTree, fgWalkData* data) |
| 22553 | { |
| 22554 | GenTree* tree = *pTree; |
| 22555 | if (tree->gtOper == GT_CALL) |
| 22556 | { |
| 22557 | assert((tree->gtFlags & GTF_CALL_INLINE_CANDIDATE) == 0); |
| 22558 | } |
| 22559 | else |
| 22560 | { |
| 22561 | assert(tree->gtOper != GT_RET_EXPR); |
| 22562 | } |
| 22563 | |
| 22564 | return WALK_CONTINUE; |
| 22565 | } |
| 22566 | |
| 22567 | #endif // DEBUG |
| 22568 | |
| 22569 | void Compiler::fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* inlineResult) |
| 22570 | { |
| 22571 | noway_assert(call->gtOper == GT_CALL); |
| 22572 | noway_assert((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0); |
| 22573 | noway_assert(opts.OptEnabled(CLFLG_INLINING)); |
| 22574 | |
| 22575 | // This is the InlineInfo struct representing a method to be inlined. |
| 22576 | InlineInfo inlineInfo; |
| 22577 | memset(&inlineInfo, 0, sizeof(inlineInfo)); |
| 22578 | CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd; |
| 22579 | |
| 22580 | inlineInfo.fncHandle = fncHandle; |
| 22581 | inlineInfo.iciCall = call; |
| 22582 | inlineInfo.iciStmt = fgMorphStmt; |
| 22583 | inlineInfo.iciBlock = compCurBB; |
| 22584 | inlineInfo.thisDereferencedFirst = false; |
| 22585 | inlineInfo.retExpr = nullptr; |
| 22586 | inlineInfo.retExprClassHnd = nullptr; |
| 22587 | inlineInfo.retExprClassHndIsExact = false; |
| 22588 | inlineInfo.inlineResult = inlineResult; |
| 22589 | #ifdef FEATURE_SIMD |
| 22590 | inlineInfo.hasSIMDTypeArgLocalOrReturn = false; |
| 22591 | #endif // FEATURE_SIMD |
| 22592 | |
| 22593 | InlineCandidateInfo* inlineCandidateInfo = call->gtInlineCandidateInfo; |
| 22594 | noway_assert(inlineCandidateInfo); |
| 22595 | // Store the link to inlineCandidateInfo into inlineInfo |
| 22596 | inlineInfo.inlineCandidateInfo = inlineCandidateInfo; |
| 22597 | |
| 22598 | unsigned inlineDepth = fgCheckInlineDepthAndRecursion(&inlineInfo); |
| 22599 | |
| 22600 | if (inlineResult->IsFailure()) |
| 22601 | { |
| 22602 | #ifdef DEBUG |
| 22603 | if (verbose) |
| 22604 | { |
| 22605 | printf("Recursive or deep inline recursion detected. Will not expand this INLINECANDIDATE \n" ); |
| 22606 | } |
| 22607 | #endif // DEBUG |
| 22608 | return; |
| 22609 | } |
| 22610 | |
| 22611 | // Set the trap to catch all errors (including recoverable ones from the EE) |
| 22612 | struct Param |
| 22613 | { |
| 22614 | Compiler* pThis; |
| 22615 | GenTree* call; |
| 22616 | CORINFO_METHOD_HANDLE fncHandle; |
| 22617 | InlineCandidateInfo* inlineCandidateInfo; |
| 22618 | InlineInfo* inlineInfo; |
| 22619 | } param; |
| 22620 | memset(¶m, 0, sizeof(param)); |
| 22621 | |
| 22622 | param.pThis = this; |
| 22623 | param.call = call; |
| 22624 | param.fncHandle = fncHandle; |
| 22625 | param.inlineCandidateInfo = inlineCandidateInfo; |
| 22626 | param.inlineInfo = &inlineInfo; |
| 22627 | bool success = eeRunWithErrorTrap<Param>( |
| 22628 | [](Param* pParam) { |
| 22629 | // Init the local var info of the inlinee |
| 22630 | pParam->pThis->impInlineInitVars(pParam->inlineInfo); |
| 22631 | |
| 22632 | if (pParam->inlineInfo->inlineResult->IsCandidate()) |
| 22633 | { |
| 22634 | /* Clear the temp table */ |
| 22635 | memset(pParam->inlineInfo->lclTmpNum, -1, sizeof(pParam->inlineInfo->lclTmpNum)); |
| 22636 | |
| 22637 | // |
| 22638 | // Prepare the call to jitNativeCode |
| 22639 | // |
| 22640 | |
| 22641 | pParam->inlineInfo->InlinerCompiler = pParam->pThis; |
| 22642 | if (pParam->pThis->impInlineInfo == nullptr) |
| 22643 | { |
| 22644 | pParam->inlineInfo->InlineRoot = pParam->pThis; |
| 22645 | } |
| 22646 | else |
| 22647 | { |
| 22648 | pParam->inlineInfo->InlineRoot = pParam->pThis->impInlineInfo->InlineRoot; |
| 22649 | } |
| 22650 | pParam->inlineInfo->argCnt = pParam->inlineCandidateInfo->methInfo.args.totalILArgs(); |
| 22651 | pParam->inlineInfo->tokenLookupContextHandle = pParam->inlineCandidateInfo->exactContextHnd; |
| 22652 | |
| 22653 | JITLOG_THIS(pParam->pThis, |
| 22654 | (LL_INFO100000, "INLINER: inlineInfo.tokenLookupContextHandle for %s set to 0x%p:\n" , |
| 22655 | pParam->pThis->eeGetMethodFullName(pParam->fncHandle), |
| 22656 | pParam->pThis->dspPtr(pParam->inlineInfo->tokenLookupContextHandle))); |
| 22657 | |
| 22658 | JitFlags compileFlagsForInlinee = *pParam->pThis->opts.jitFlags; |
| 22659 | |
| 22660 | // The following flags are lost when inlining. |
| 22661 | // (This is checked in Compiler::compInitOptions().) |
| 22662 | compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBOPT); |
| 22663 | compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_BBINSTR); |
| 22664 | compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_PROF_ENTERLEAVE); |
| 22665 | compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_EnC); |
| 22666 | compileFlagsForInlinee.Clear(JitFlags::JIT_FLAG_DEBUG_INFO); |
| 22667 | |
| 22668 | compileFlagsForInlinee.Set(JitFlags::JIT_FLAG_SKIP_VERIFICATION); |
| 22669 | |
| 22670 | #ifdef DEBUG |
| 22671 | if (pParam->pThis->verbose) |
| 22672 | { |
| 22673 | printf("\nInvoking compiler for the inlinee method %s :\n" , |
| 22674 | pParam->pThis->eeGetMethodFullName(pParam->fncHandle)); |
| 22675 | } |
| 22676 | #endif // DEBUG |
| 22677 | |
| 22678 | int result = |
| 22679 | jitNativeCode(pParam->fncHandle, pParam->inlineCandidateInfo->methInfo.scope, |
| 22680 | pParam->pThis->info.compCompHnd, &pParam->inlineCandidateInfo->methInfo, |
| 22681 | (void**)pParam->inlineInfo, nullptr, &compileFlagsForInlinee, pParam->inlineInfo); |
| 22682 | |
| 22683 | if (result != CORJIT_OK) |
| 22684 | { |
| 22685 | // If we haven't yet determined why this inline fails, use |
| 22686 | // a catch-all something bad happened observation. |
| 22687 | InlineResult* innerInlineResult = pParam->inlineInfo->inlineResult; |
| 22688 | |
| 22689 | if (!innerInlineResult->IsFailure()) |
| 22690 | { |
| 22691 | innerInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_FAILURE); |
| 22692 | } |
| 22693 | } |
| 22694 | } |
| 22695 | }, |
| 22696 | ¶m); |
| 22697 | if (!success) |
| 22698 | { |
| 22699 | #ifdef DEBUG |
| 22700 | if (verbose) |
| 22701 | { |
| 22702 | printf("\nInlining failed due to an exception during invoking the compiler for the inlinee method %s.\n" , |
| 22703 | eeGetMethodFullName(fncHandle)); |
| 22704 | } |
| 22705 | #endif // DEBUG |
| 22706 | |
| 22707 | // If we haven't yet determined why this inline fails, use |
| 22708 | // a catch-all something bad happened observation. |
| 22709 | if (!inlineResult->IsFailure()) |
| 22710 | { |
| 22711 | inlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); |
| 22712 | } |
| 22713 | } |
| 22714 | |
| 22715 | if (inlineResult->IsFailure()) |
| 22716 | { |
| 22717 | return; |
| 22718 | } |
| 22719 | |
| 22720 | #ifdef DEBUG |
| 22721 | if (0 && verbose) |
| 22722 | { |
| 22723 | printf("\nDone invoking compiler for the inlinee method %s\n" , eeGetMethodFullName(fncHandle)); |
| 22724 | } |
| 22725 | #endif // DEBUG |
| 22726 | |
| 22727 | // If there is non-NULL return, but we haven't set the pInlineInfo->retExpr, |
| 22728 | // That means we haven't imported any BB that contains CEE_RET opcode. |
| 22729 | // (This could happen for example for a BBJ_THROW block fall through a BBJ_RETURN block which |
| 22730 | // causes the BBJ_RETURN block not to be imported at all.) |
| 22731 | // Fail the inlining attempt |
| 22732 | if (inlineCandidateInfo->fncRetType != TYP_VOID && inlineInfo.retExpr == nullptr) |
| 22733 | { |
| 22734 | #ifdef DEBUG |
| 22735 | if (verbose) |
| 22736 | { |
| 22737 | printf("\nInlining failed because pInlineInfo->retExpr is not set in the inlinee method %s.\n" , |
| 22738 | eeGetMethodFullName(fncHandle)); |
| 22739 | } |
| 22740 | #endif // DEBUG |
| 22741 | inlineResult->NoteFatal(InlineObservation::CALLEE_LACKS_RETURN); |
| 22742 | return; |
| 22743 | } |
| 22744 | |
| 22745 | if (inlineCandidateInfo->initClassResult & CORINFO_INITCLASS_SPECULATIVE) |
| 22746 | { |
| 22747 | // we defer the call to initClass() until inlining is completed in case it fails. If inlining succeeds, |
| 22748 | // we will call initClass(). |
| 22749 | if (!(info.compCompHnd->initClass(nullptr /* field */, fncHandle /* method */, |
| 22750 | inlineCandidateInfo->exactContextHnd /* context */) & |
| 22751 | CORINFO_INITCLASS_INITIALIZED)) |
| 22752 | { |
| 22753 | inlineResult->NoteFatal(InlineObservation::CALLEE_CLASS_INIT_FAILURE); |
| 22754 | return; |
| 22755 | } |
| 22756 | } |
| 22757 | |
| 22758 | // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! |
| 22759 | // The inlining attempt cannot be failed starting from this point. |
| 22760 | // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! |
| 22761 | |
| 22762 | // We've successfully obtain the list of inlinee's basic blocks. |
| 22763 | // Let's insert it to inliner's basic block list. |
| 22764 | fgInsertInlineeBlocks(&inlineInfo); |
| 22765 | |
| 22766 | #ifdef DEBUG |
| 22767 | |
| 22768 | if (verbose) |
| 22769 | { |
| 22770 | printf("Successfully inlined %s (%d IL bytes) (depth %d) [%s]\n" , eeGetMethodFullName(fncHandle), |
| 22771 | inlineCandidateInfo->methInfo.ILCodeSize, inlineDepth, inlineResult->ReasonString()); |
| 22772 | } |
| 22773 | |
| 22774 | if (verbose) |
| 22775 | { |
| 22776 | printf("--------------------------------------------------------------------------------------------\n" ); |
| 22777 | } |
| 22778 | #endif // DEBUG |
| 22779 | |
| 22780 | #if defined(DEBUG) |
| 22781 | impInlinedCodeSize += inlineCandidateInfo->methInfo.ILCodeSize; |
| 22782 | #endif |
| 22783 | |
| 22784 | // We inlined... |
| 22785 | inlineResult->NoteSuccess(); |
| 22786 | } |
| 22787 | |
| 22788 | //------------------------------------------------------------------------ |
| 22789 | // fgInsertInlineeBlocks: incorporate statements for an inline into the |
| 22790 | // root method. |
| 22791 | // |
| 22792 | // Arguments: |
| 22793 | // inlineInfo -- info for the inline |
| 22794 | // |
| 22795 | // Notes: |
| 22796 | // The inlining attempt cannot be failed once this method is called. |
| 22797 | // |
| 22798 | // Adds all inlinee statements, plus any glue statements needed |
| 22799 | // either before or after the inlined call. |
| 22800 | // |
| 22801 | // Updates flow graph and assigns weights to inlinee |
| 22802 | // blocks. Currently does not attempt to read IBC data for the |
| 22803 | // inlinee. |
| 22804 | // |
| 22805 | // Updates relevant root method status flags (eg optMethodFlags) to |
| 22806 | // include information from the inlinee. |
| 22807 | // |
| 22808 | // Marks newly added statements with an appropriate inline context. |
| 22809 | |
| 22810 | void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) |
| 22811 | { |
| 22812 | GenTreeCall* iciCall = pInlineInfo->iciCall; |
| 22813 | GenTreeStmt* iciStmt = pInlineInfo->iciStmt; |
| 22814 | BasicBlock* iciBlock = pInlineInfo->iciBlock; |
| 22815 | BasicBlock* block; |
| 22816 | |
| 22817 | // We can write better assert here. For example, we can check that |
| 22818 | // iciBlock contains iciStmt, which in turn contains iciCall. |
| 22819 | noway_assert(iciBlock->bbTreeList != nullptr); |
| 22820 | noway_assert(iciStmt->gtStmtExpr != nullptr); |
| 22821 | noway_assert(iciCall->gtOper == GT_CALL); |
| 22822 | |
| 22823 | #ifdef DEBUG |
| 22824 | |
| 22825 | GenTree* currentDumpStmt = nullptr; |
| 22826 | |
| 22827 | if (verbose) |
| 22828 | { |
| 22829 | printf("\n\n----------- Statements (and blocks) added due to the inlining of call " ); |
| 22830 | printTreeID(iciCall); |
| 22831 | printf(" -----------\n" ); |
| 22832 | } |
| 22833 | |
| 22834 | #endif // DEBUG |
| 22835 | |
| 22836 | // Create a new inline context and mark the inlined statements with it |
| 22837 | InlineContext* calleeContext = m_inlineStrategy->NewSuccess(pInlineInfo); |
| 22838 | |
| 22839 | for (block = InlineeCompiler->fgFirstBB; block != nullptr; block = block->bbNext) |
| 22840 | { |
| 22841 | for (GenTreeStmt* stmt = block->firstStmt(); stmt; stmt = stmt->gtNextStmt) |
| 22842 | { |
| 22843 | stmt->gtInlineContext = calleeContext; |
| 22844 | } |
| 22845 | } |
| 22846 | |
| 22847 | // Prepend statements |
| 22848 | GenTree* stmtAfter = fgInlinePrependStatements(pInlineInfo); |
| 22849 | |
| 22850 | #ifdef DEBUG |
| 22851 | if (verbose) |
| 22852 | { |
| 22853 | currentDumpStmt = stmtAfter; |
| 22854 | printf("\nInlinee method body:" ); |
| 22855 | } |
| 22856 | #endif // DEBUG |
| 22857 | |
| 22858 | BasicBlock* topBlock = iciBlock; |
| 22859 | BasicBlock* bottomBlock = nullptr; |
| 22860 | |
| 22861 | if (InlineeCompiler->fgBBcount == 1) |
| 22862 | { |
| 22863 | // When fgBBCount is 1 we will always have a non-NULL fgFirstBB |
| 22864 | // |
| 22865 | PREFAST_ASSUME(InlineeCompiler->fgFirstBB != nullptr); |
| 22866 | |
| 22867 | // DDB 91389: Don't throw away the (only) inlinee block |
| 22868 | // when its return type is not BBJ_RETURN. |
| 22869 | // In other words, we need its BBJ_ to perform the right thing. |
| 22870 | if (InlineeCompiler->fgFirstBB->bbJumpKind == BBJ_RETURN) |
| 22871 | { |
| 22872 | // Inlinee contains just one BB. So just insert its statement list to topBlock. |
| 22873 | if (InlineeCompiler->fgFirstBB->bbTreeList) |
| 22874 | { |
| 22875 | stmtAfter = fgInsertStmtListAfter(iciBlock, stmtAfter, InlineeCompiler->fgFirstBB->bbTreeList); |
| 22876 | |
| 22877 | // Copy inlinee bbFlags to caller bbFlags. |
| 22878 | const unsigned __int64 inlineeBlockFlags = InlineeCompiler->fgFirstBB->bbFlags; |
| 22879 | noway_assert((inlineeBlockFlags & BBF_HAS_JMP) == 0); |
| 22880 | noway_assert((inlineeBlockFlags & BBF_KEEP_BBJ_ALWAYS) == 0); |
| 22881 | iciBlock->bbFlags |= inlineeBlockFlags; |
| 22882 | } |
| 22883 | |
| 22884 | #ifdef DEBUG |
| 22885 | if (verbose) |
| 22886 | { |
| 22887 | noway_assert(currentDumpStmt); |
| 22888 | |
| 22889 | if (currentDumpStmt != stmtAfter) |
| 22890 | { |
| 22891 | do |
| 22892 | { |
| 22893 | currentDumpStmt = currentDumpStmt->gtNext; |
| 22894 | |
| 22895 | printf("\n" ); |
| 22896 | |
| 22897 | noway_assert(currentDumpStmt->gtOper == GT_STMT); |
| 22898 | |
| 22899 | gtDispTree(currentDumpStmt); |
| 22900 | printf("\n" ); |
| 22901 | |
| 22902 | } while (currentDumpStmt != stmtAfter); |
| 22903 | } |
| 22904 | } |
| 22905 | #endif // DEBUG |
| 22906 | |
| 22907 | // Append statements to null out gc ref locals, if necessary. |
| 22908 | fgInlineAppendStatements(pInlineInfo, iciBlock, stmtAfter); |
| 22909 | |
| 22910 | goto _Done; |
| 22911 | } |
| 22912 | } |
| 22913 | |
| 22914 | // |
| 22915 | // ======= Inserting inlinee's basic blocks =============== |
| 22916 | // |
| 22917 | |
| 22918 | bottomBlock = fgNewBBafter(topBlock->bbJumpKind, topBlock, true); |
| 22919 | bottomBlock->bbRefs = 1; |
| 22920 | bottomBlock->bbJumpDest = topBlock->bbJumpDest; |
| 22921 | bottomBlock->inheritWeight(topBlock); |
| 22922 | |
| 22923 | topBlock->bbJumpKind = BBJ_NONE; |
| 22924 | |
| 22925 | // Update block flags |
| 22926 | { |
| 22927 | const unsigned __int64 originalFlags = topBlock->bbFlags; |
| 22928 | noway_assert((originalFlags & BBF_SPLIT_NONEXIST) == 0); |
| 22929 | topBlock->bbFlags &= ~(BBF_SPLIT_LOST); |
| 22930 | bottomBlock->bbFlags |= originalFlags & BBF_SPLIT_GAINED; |
| 22931 | } |
| 22932 | |
| 22933 | // |
| 22934 | // Split statements between topBlock and bottomBlock |
| 22935 | // |
| 22936 | GenTree* topBlock_Begin; |
| 22937 | GenTree* topBlock_End; |
| 22938 | GenTree* bottomBlock_Begin; |
| 22939 | GenTree* bottomBlock_End; |
| 22940 | |
| 22941 | topBlock_Begin = nullptr; |
| 22942 | topBlock_End = nullptr; |
| 22943 | bottomBlock_Begin = nullptr; |
| 22944 | bottomBlock_End = nullptr; |
| 22945 | |
| 22946 | // |
| 22947 | // First figure out bottomBlock_Begin |
| 22948 | // |
| 22949 | |
| 22950 | bottomBlock_Begin = stmtAfter->gtNext; |
| 22951 | |
| 22952 | if (topBlock->bbTreeList == nullptr) |
| 22953 | { |
| 22954 | // topBlock is empty before the split. |
| 22955 | // In this case, both topBlock and bottomBlock should be empty |
| 22956 | noway_assert(bottomBlock_Begin == nullptr); |
| 22957 | topBlock->bbTreeList = nullptr; |
| 22958 | bottomBlock->bbTreeList = nullptr; |
| 22959 | } |
| 22960 | else if (topBlock->bbTreeList == bottomBlock_Begin) |
| 22961 | { |
| 22962 | noway_assert(bottomBlock_Begin); |
| 22963 | |
| 22964 | // topBlock contains at least one statement before the split. |
| 22965 | // And the split is before the first statement. |
| 22966 | // In this case, topBlock should be empty, and everything else should be moved to the bottonBlock. |
| 22967 | bottomBlock->bbTreeList = topBlock->bbTreeList; |
| 22968 | topBlock->bbTreeList = nullptr; |
| 22969 | } |
| 22970 | else if (bottomBlock_Begin == nullptr) |
| 22971 | { |
| 22972 | noway_assert(topBlock->bbTreeList); |
| 22973 | |
| 22974 | // topBlock contains at least one statement before the split. |
| 22975 | // And the split is at the end of the topBlock. |
| 22976 | // In this case, everything should be kept in the topBlock, and the bottomBlock should be empty |
| 22977 | |
| 22978 | bottomBlock->bbTreeList = nullptr; |
| 22979 | } |
| 22980 | else |
| 22981 | { |
| 22982 | noway_assert(topBlock->bbTreeList); |
| 22983 | noway_assert(bottomBlock_Begin); |
| 22984 | |
| 22985 | // This is the normal case where both blocks should contain at least one statement. |
| 22986 | topBlock_Begin = topBlock->bbTreeList; |
| 22987 | noway_assert(topBlock_Begin); |
| 22988 | topBlock_End = bottomBlock_Begin->gtPrev; |
| 22989 | noway_assert(topBlock_End); |
| 22990 | bottomBlock_End = topBlock->lastStmt(); |
| 22991 | noway_assert(bottomBlock_End); |
| 22992 | |
| 22993 | // Break the linkage between 2 blocks. |
| 22994 | topBlock_End->gtNext = nullptr; |
| 22995 | |
| 22996 | // Fix up all the pointers. |
| 22997 | topBlock->bbTreeList = topBlock_Begin; |
| 22998 | topBlock->bbTreeList->gtPrev = topBlock_End; |
| 22999 | |
| 23000 | bottomBlock->bbTreeList = bottomBlock_Begin; |
| 23001 | bottomBlock->bbTreeList->gtPrev = bottomBlock_End; |
| 23002 | } |
| 23003 | |
| 23004 | // |
| 23005 | // Set the try and handler index and fix the jump types of inlinee's blocks. |
| 23006 | // |
| 23007 | |
| 23008 | bool inheritWeight; |
| 23009 | inheritWeight = true; // The firstBB does inherit the weight from the iciBlock |
| 23010 | |
| 23011 | for (block = InlineeCompiler->fgFirstBB; block != nullptr; block = block->bbNext) |
| 23012 | { |
| 23013 | noway_assert(!block->hasTryIndex()); |
| 23014 | noway_assert(!block->hasHndIndex()); |
| 23015 | block->copyEHRegion(iciBlock); |
| 23016 | block->bbFlags |= iciBlock->bbFlags & BBF_BACKWARD_JUMP; |
| 23017 | |
| 23018 | if (iciStmt->gtStmtILoffsx != BAD_IL_OFFSET) |
| 23019 | { |
| 23020 | block->bbCodeOffs = jitGetILoffs(iciStmt->gtStmtILoffsx); |
| 23021 | block->bbCodeOffsEnd = block->bbCodeOffs + 1; // TODO: is code size of 1 some magic number for inlining? |
| 23022 | } |
| 23023 | else |
| 23024 | { |
| 23025 | block->bbCodeOffs = 0; // TODO: why not BAD_IL_OFFSET? |
| 23026 | block->bbCodeOffsEnd = 0; |
| 23027 | block->bbFlags |= BBF_INTERNAL; |
| 23028 | } |
| 23029 | |
| 23030 | if (block->bbJumpKind == BBJ_RETURN) |
| 23031 | { |
| 23032 | inheritWeight = true; // A return block does inherit the weight from the iciBlock |
| 23033 | noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); |
| 23034 | if (block->bbNext) |
| 23035 | { |
| 23036 | block->bbJumpKind = BBJ_ALWAYS; |
| 23037 | block->bbJumpDest = bottomBlock; |
| 23038 | #ifdef DEBUG |
| 23039 | if (verbose) |
| 23040 | { |
| 23041 | printf("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n" , block->bbNum, |
| 23042 | bottomBlock->bbNum); |
| 23043 | } |
| 23044 | #endif // DEBUG |
| 23045 | } |
| 23046 | else |
| 23047 | { |
| 23048 | #ifdef DEBUG |
| 23049 | if (verbose) |
| 23050 | { |
| 23051 | printf("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n" , block->bbNum); |
| 23052 | } |
| 23053 | #endif // DEBUG |
| 23054 | block->bbJumpKind = BBJ_NONE; |
| 23055 | } |
| 23056 | } |
| 23057 | if (inheritWeight) |
| 23058 | { |
| 23059 | block->inheritWeight(iciBlock); |
| 23060 | inheritWeight = false; |
| 23061 | } |
| 23062 | else |
| 23063 | { |
| 23064 | block->modifyBBWeight(iciBlock->bbWeight / 2); |
| 23065 | } |
| 23066 | } |
| 23067 | |
| 23068 | // Insert inlinee's blocks into inliner's block list. |
| 23069 | topBlock->setNext(InlineeCompiler->fgFirstBB); |
| 23070 | InlineeCompiler->fgLastBB->setNext(bottomBlock); |
| 23071 | |
| 23072 | // |
| 23073 | // Add inlinee's block count to inliner's. |
| 23074 | // |
| 23075 | fgBBcount += InlineeCompiler->fgBBcount; |
| 23076 | |
| 23077 | // Append statements to null out gc ref locals, if necessary. |
| 23078 | fgInlineAppendStatements(pInlineInfo, bottomBlock, nullptr); |
| 23079 | |
| 23080 | #ifdef DEBUG |
| 23081 | if (verbose) |
| 23082 | { |
| 23083 | fgDispBasicBlocks(InlineeCompiler->fgFirstBB, InlineeCompiler->fgLastBB, true); |
| 23084 | } |
| 23085 | #endif // DEBUG |
| 23086 | |
| 23087 | _Done: |
| 23088 | |
| 23089 | // |
| 23090 | // At this point, we have successully inserted inlinee's code. |
| 23091 | // |
| 23092 | |
| 23093 | // |
| 23094 | // Copy out some flags |
| 23095 | // |
| 23096 | compLongUsed |= InlineeCompiler->compLongUsed; |
| 23097 | compFloatingPointUsed |= InlineeCompiler->compFloatingPointUsed; |
| 23098 | compLocallocUsed |= InlineeCompiler->compLocallocUsed; |
| 23099 | compLocallocOptimized |= InlineeCompiler->compLocallocOptimized; |
| 23100 | compQmarkUsed |= InlineeCompiler->compQmarkUsed; |
| 23101 | compUnsafeCastUsed |= InlineeCompiler->compUnsafeCastUsed; |
| 23102 | compNeedsGSSecurityCookie |= InlineeCompiler->compNeedsGSSecurityCookie; |
| 23103 | compGSReorderStackLayout |= InlineeCompiler->compGSReorderStackLayout; |
| 23104 | |
| 23105 | #ifdef FEATURE_SIMD |
| 23106 | if (InlineeCompiler->usesSIMDTypes()) |
| 23107 | { |
| 23108 | setUsesSIMDTypes(true); |
| 23109 | } |
| 23110 | #endif // FEATURE_SIMD |
| 23111 | |
| 23112 | // Update unmanaged call count |
| 23113 | info.compCallUnmanaged += InlineeCompiler->info.compCallUnmanaged; |
| 23114 | |
| 23115 | // Update optMethodFlags |
| 23116 | |
| 23117 | #ifdef DEBUG |
| 23118 | unsigned optMethodFlagsBefore = optMethodFlags; |
| 23119 | #endif |
| 23120 | |
| 23121 | optMethodFlags |= InlineeCompiler->optMethodFlags; |
| 23122 | |
| 23123 | #ifdef DEBUG |
| 23124 | if (optMethodFlags != optMethodFlagsBefore) |
| 23125 | { |
| 23126 | JITDUMP("INLINER: Updating optMethodFlags -- root:%0x callee:%0x new:%0x\n" , optMethodFlagsBefore, |
| 23127 | InlineeCompiler->optMethodFlags, optMethodFlags); |
| 23128 | } |
| 23129 | #endif |
| 23130 | |
| 23131 | // If there is non-NULL return, replace the GT_CALL with its return value expression, |
| 23132 | // so later it will be picked up by the GT_RET_EXPR node. |
| 23133 | if ((pInlineInfo->inlineCandidateInfo->fncRetType != TYP_VOID) || (iciCall->gtReturnType == TYP_STRUCT)) |
| 23134 | { |
| 23135 | noway_assert(pInlineInfo->retExpr); |
| 23136 | #ifdef DEBUG |
| 23137 | if (verbose) |
| 23138 | { |
| 23139 | printf("\nReturn expression for call at " ); |
| 23140 | printTreeID(iciCall); |
| 23141 | printf(" is\n" ); |
| 23142 | gtDispTree(pInlineInfo->retExpr); |
| 23143 | } |
| 23144 | #endif // DEBUG |
| 23145 | // Replace the call with the return expression |
| 23146 | iciCall->ReplaceWith(pInlineInfo->retExpr, this); |
| 23147 | } |
| 23148 | |
| 23149 | // |
| 23150 | // Detach the GT_CALL node from the original statement by hanging a "nothing" node under it, |
| 23151 | // so that fgMorphStmts can remove the statement once we return from here. |
| 23152 | // |
| 23153 | iciStmt->gtStmtExpr = gtNewNothingNode(); |
| 23154 | } |
| 23155 | |
| 23156 | //------------------------------------------------------------------------ |
| 23157 | // fgInlinePrependStatements: prepend statements needed to match up |
| 23158 | // caller and inlined callee |
| 23159 | // |
| 23160 | // Arguments: |
| 23161 | // inlineInfo -- info for the inline |
| 23162 | // |
| 23163 | // Return Value: |
| 23164 | // The last statement that was added, or the original call if no |
| 23165 | // statements were added. |
| 23166 | // |
| 23167 | // Notes: |
| 23168 | // Statements prepended may include the following: |
| 23169 | // * This pointer null check |
| 23170 | // * Class initialization |
| 23171 | // * Zeroing of must-init locals in the callee |
| 23172 | // * Passing of call arguments via temps |
| 23173 | // |
| 23174 | // Newly added statements are placed just after the original call |
| 23175 | // and are are given the same inline context as the call any calls |
| 23176 | // added here will appear to have been part of the immediate caller. |
| 23177 | |
| 23178 | GenTree* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) |
| 23179 | { |
| 23180 | BasicBlock* block = inlineInfo->iciBlock; |
| 23181 | GenTreeStmt* callStmt = inlineInfo->iciStmt; |
| 23182 | IL_OFFSETX callILOffset = callStmt->gtStmtILoffsx; |
| 23183 | GenTreeStmt* postStmt = callStmt->gtNextStmt; |
| 23184 | GenTree* afterStmt = callStmt; // afterStmt is the place where the new statements should be inserted after. |
| 23185 | GenTree* newStmt = nullptr; |
| 23186 | GenTreeCall* call = inlineInfo->iciCall->AsCall(); |
| 23187 | |
| 23188 | noway_assert(call->gtOper == GT_CALL); |
| 23189 | |
| 23190 | #ifdef DEBUG |
| 23191 | if (0 && verbose) |
| 23192 | { |
| 23193 | printf("\nfgInlinePrependStatements for iciCall= " ); |
| 23194 | printTreeID(call); |
| 23195 | printf(":\n" ); |
| 23196 | } |
| 23197 | #endif |
| 23198 | |
| 23199 | // Prepend statements for any initialization / side effects |
| 23200 | |
| 23201 | InlArgInfo* inlArgInfo = inlineInfo->inlArgInfo; |
| 23202 | InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; |
| 23203 | |
| 23204 | GenTree* tree; |
| 23205 | |
| 23206 | // Create the null check statement (but not appending it to the statement list yet) for the 'this' pointer if |
| 23207 | // necessary. |
| 23208 | // The NULL check should be done after "argument setup statements". |
| 23209 | // The only reason we move it here is for calling "impInlineFetchArg(0,..." to reserve a temp |
| 23210 | // for the "this" pointer. |
| 23211 | // Note: Here we no longer do the optimization that was done by thisDereferencedFirst in the old inliner. |
| 23212 | // However the assetionProp logic will remove any unecessary null checks that we may have added |
| 23213 | // |
| 23214 | GenTree* nullcheck = nullptr; |
| 23215 | |
| 23216 | if (call->gtFlags & GTF_CALL_NULLCHECK && !inlineInfo->thisDereferencedFirst) |
| 23217 | { |
| 23218 | // Call impInlineFetchArg to "reserve" a temp for the "this" pointer. |
| 23219 | nullcheck = gtNewOperNode(GT_IND, TYP_INT, impInlineFetchArg(0, inlArgInfo, lclVarInfo)); |
| 23220 | nullcheck->gtFlags |= GTF_EXCEPT; |
| 23221 | |
| 23222 | // The NULL-check statement will be inserted to the statement list after those statements |
| 23223 | // that assign arguments to temps and before the actual body of the inlinee method. |
| 23224 | } |
| 23225 | |
| 23226 | /* Treat arguments that had to be assigned to temps */ |
| 23227 | if (inlineInfo->argCnt) |
| 23228 | { |
| 23229 | |
| 23230 | #ifdef DEBUG |
| 23231 | if (verbose) |
| 23232 | { |
| 23233 | printf("\nArguments setup:\n" ); |
| 23234 | } |
| 23235 | #endif // DEBUG |
| 23236 | |
| 23237 | for (unsigned argNum = 0; argNum < inlineInfo->argCnt; argNum++) |
| 23238 | { |
| 23239 | const InlArgInfo& argInfo = inlArgInfo[argNum]; |
| 23240 | const bool argIsSingleDef = !argInfo.argHasLdargaOp && !argInfo.argHasStargOp; |
| 23241 | GenTree* const argNode = inlArgInfo[argNum].argNode; |
| 23242 | |
| 23243 | if (argInfo.argHasTmp) |
| 23244 | { |
| 23245 | noway_assert(argInfo.argIsUsed); |
| 23246 | |
| 23247 | /* argBashTmpNode is non-NULL iff the argument's value was |
| 23248 | referenced exactly once by the original IL. This offers an |
| 23249 | opportunity to avoid an intermediate temp and just insert |
| 23250 | the original argument tree. |
| 23251 | |
| 23252 | However, if the temp node has been cloned somewhere while |
| 23253 | importing (e.g. when handling isinst or dup), or if the IL |
| 23254 | took the address of the argument, then argBashTmpNode will |
| 23255 | be set (because the value was only explicitly retrieved |
| 23256 | once) but the optimization cannot be applied. |
| 23257 | */ |
| 23258 | |
| 23259 | GenTree* argSingleUseNode = argInfo.argBashTmpNode; |
| 23260 | |
| 23261 | if ((argSingleUseNode != nullptr) && !(argSingleUseNode->gtFlags & GTF_VAR_CLONED) && argIsSingleDef) |
| 23262 | { |
| 23263 | // Change the temp in-place to the actual argument. |
| 23264 | // We currently do not support this for struct arguments, so it must not be a GT_OBJ. |
| 23265 | assert(argNode->gtOper != GT_OBJ); |
| 23266 | argSingleUseNode->ReplaceWith(argNode, this); |
| 23267 | continue; |
| 23268 | } |
| 23269 | else |
| 23270 | { |
| 23271 | // We're going to assign the argument value to the |
| 23272 | // temp we use for it in the inline body. |
| 23273 | const unsigned tmpNum = argInfo.argTmpNum; |
| 23274 | const var_types argType = lclVarInfo[argNum].lclTypeInfo; |
| 23275 | |
| 23276 | // Create the temp assignment for this argument |
| 23277 | CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; |
| 23278 | |
| 23279 | if (varTypeIsStruct(argType)) |
| 23280 | { |
| 23281 | structHnd = gtGetStructHandleIfPresent(argNode); |
| 23282 | noway_assert(structHnd != NO_CLASS_HANDLE); |
| 23283 | } |
| 23284 | |
| 23285 | // Unsafe value cls check is not needed for |
| 23286 | // argTmpNum here since in-linee compiler instance |
| 23287 | // would have iterated over these and marked them |
| 23288 | // accordingly. |
| 23289 | impAssignTempGen(tmpNum, argNode, structHnd, (unsigned)CHECK_SPILL_NONE, &afterStmt, callILOffset, |
| 23290 | block); |
| 23291 | |
| 23292 | // We used to refine the temp type here based on |
| 23293 | // the actual arg, but we now do this up front, when |
| 23294 | // creating the temp, over in impInlineFetchArg. |
| 23295 | CLANG_FORMAT_COMMENT_ANCHOR; |
| 23296 | |
| 23297 | #ifdef DEBUG |
| 23298 | if (verbose) |
| 23299 | { |
| 23300 | gtDispTree(afterStmt); |
| 23301 | } |
| 23302 | #endif // DEBUG |
| 23303 | } |
| 23304 | } |
| 23305 | else if (argInfo.argIsByRefToStructLocal) |
| 23306 | { |
| 23307 | // Do nothing. Arg was directly substituted as we read |
| 23308 | // the inlinee. |
| 23309 | } |
| 23310 | else |
| 23311 | { |
| 23312 | /* The argument is either not used or a const or lcl var */ |
| 23313 | |
| 23314 | noway_assert(!argInfo.argIsUsed || argInfo.argIsInvariant || argInfo.argIsLclVar); |
| 23315 | |
| 23316 | /* Make sure we didnt change argNode's along the way, or else |
| 23317 | subsequent uses of the arg would have worked with the bashed value */ |
| 23318 | if (argInfo.argIsInvariant) |
| 23319 | { |
| 23320 | assert(argNode->OperIsConst() || argNode->gtOper == GT_ADDR); |
| 23321 | } |
| 23322 | noway_assert((argInfo.argIsLclVar == 0) == |
| 23323 | (argNode->gtOper != GT_LCL_VAR || (argNode->gtFlags & GTF_GLOB_REF))); |
| 23324 | |
| 23325 | /* If the argument has side effects, append it */ |
| 23326 | |
| 23327 | if (argInfo.argHasSideEff) |
| 23328 | { |
| 23329 | noway_assert(argInfo.argIsUsed == false); |
| 23330 | newStmt = nullptr; |
| 23331 | bool append = true; |
| 23332 | |
| 23333 | if (argNode->gtOper == GT_OBJ || argNode->gtOper == GT_MKREFANY) |
| 23334 | { |
| 23335 | // Don't put GT_OBJ node under a GT_COMMA. |
| 23336 | // Codegen can't deal with it. |
| 23337 | // Just hang the address here in case there are side-effect. |
| 23338 | newStmt = gtNewStmt(gtUnusedValNode(argNode->gtOp.gtOp1), callILOffset); |
| 23339 | } |
| 23340 | else |
| 23341 | { |
| 23342 | // In some special cases, unused args with side effects can |
| 23343 | // trigger further changes. |
| 23344 | // |
| 23345 | // (1) If the arg is a static field access and the field access |
| 23346 | // was produced by a call to EqualityComparer<T>.get_Default, the |
| 23347 | // helper call to ensure the field has a value can be suppressed. |
| 23348 | // This helper call is marked as a "Special DCE" helper during |
| 23349 | // importation, over in fgGetStaticsCCtorHelper. |
| 23350 | // |
| 23351 | // (2) NYI. If, after tunneling through GT_RET_VALs, we find that |
| 23352 | // the actual arg expression has no side effects, we can skip |
| 23353 | // appending all together. This will help jit TP a bit. |
| 23354 | // |
| 23355 | // Chase through any GT_RET_EXPRs to find the actual argument |
| 23356 | // expression. |
| 23357 | GenTree* actualArgNode = argNode->gtRetExprVal(); |
| 23358 | |
| 23359 | // For case (1) |
| 23360 | // |
| 23361 | // Look for the following tree shapes |
| 23362 | // prejit: (IND (ADD (CONST, CALL(special dce helper...)))) |
| 23363 | // jit : (COMMA (CALL(special dce helper...), (FIELD ...))) |
| 23364 | if (actualArgNode->gtOper == GT_COMMA) |
| 23365 | { |
| 23366 | // Look for (COMMA (CALL(special dce helper...), (FIELD ...))) |
| 23367 | GenTree* op1 = actualArgNode->gtOp.gtOp1; |
| 23368 | GenTree* op2 = actualArgNode->gtOp.gtOp2; |
| 23369 | if (op1->IsCall() && ((op1->gtCall.gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && |
| 23370 | (op2->gtOper == GT_FIELD) && ((op2->gtFlags & GTF_EXCEPT) == 0)) |
| 23371 | { |
| 23372 | JITDUMP("\nPerforming special dce on unused arg [%06u]:" |
| 23373 | " actual arg [%06u] helper call [%06u]\n" , |
| 23374 | argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); |
| 23375 | // Drop the whole tree |
| 23376 | append = false; |
| 23377 | } |
| 23378 | } |
| 23379 | else if (actualArgNode->gtOper == GT_IND) |
| 23380 | { |
| 23381 | // Look for (IND (ADD (CONST, CALL(special dce helper...)))) |
| 23382 | GenTree* addr = actualArgNode->gtOp.gtOp1; |
| 23383 | |
| 23384 | if (addr->gtOper == GT_ADD) |
| 23385 | { |
| 23386 | GenTree* op1 = addr->gtOp.gtOp1; |
| 23387 | GenTree* op2 = addr->gtOp.gtOp2; |
| 23388 | if (op1->IsCall() && |
| 23389 | ((op1->gtCall.gtCallMoreFlags & GTF_CALL_M_HELPER_SPECIAL_DCE) != 0) && |
| 23390 | op2->IsCnsIntOrI()) |
| 23391 | { |
| 23392 | // Drop the whole tree |
| 23393 | JITDUMP("\nPerforming special dce on unused arg [%06u]:" |
| 23394 | " actual arg [%06u] helper call [%06u]\n" , |
| 23395 | argNode->gtTreeID, actualArgNode->gtTreeID, op1->gtTreeID); |
| 23396 | append = false; |
| 23397 | } |
| 23398 | } |
| 23399 | } |
| 23400 | } |
| 23401 | |
| 23402 | if (!append) |
| 23403 | { |
| 23404 | assert(newStmt == nullptr); |
| 23405 | JITDUMP("Arg tree side effects were discardable, not appending anything for arg\n" ); |
| 23406 | } |
| 23407 | else |
| 23408 | { |
| 23409 | // If we don't have something custom to append, |
| 23410 | // just append the arg node as an unused value. |
| 23411 | if (newStmt == nullptr) |
| 23412 | { |
| 23413 | newStmt = gtNewStmt(gtUnusedValNode(argNode), callILOffset); |
| 23414 | } |
| 23415 | |
| 23416 | afterStmt = fgInsertStmtAfter(block, afterStmt, newStmt); |
| 23417 | #ifdef DEBUG |
| 23418 | if (verbose) |
| 23419 | { |
| 23420 | gtDispTree(afterStmt); |
| 23421 | } |
| 23422 | #endif // DEBUG |
| 23423 | } |
| 23424 | } |
| 23425 | else if (argNode->IsBoxedValue()) |
| 23426 | { |
| 23427 | // Try to clean up any unnecessary boxing side effects |
| 23428 | // since the box itself will be ignored. |
| 23429 | gtTryRemoveBoxUpstreamEffects(argNode); |
| 23430 | } |
| 23431 | } |
| 23432 | } |
| 23433 | } |
| 23434 | |
| 23435 | // Add the CCTOR check if asked for. |
| 23436 | // Note: We no longer do the optimization that is done before by staticAccessedFirstUsingHelper in the old inliner. |
| 23437 | // Therefore we might prepend redundant call to HELPER.CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE |
| 23438 | // before the inlined method body, even if a static field of this type was accessed in the inlinee |
| 23439 | // using a helper before any other observable side-effect. |
| 23440 | |
| 23441 | if (inlineInfo->inlineCandidateInfo->initClassResult & CORINFO_INITCLASS_USE_HELPER) |
| 23442 | { |
| 23443 | CORINFO_CONTEXT_HANDLE exactContext = inlineInfo->inlineCandidateInfo->exactContextHnd; |
| 23444 | CORINFO_CLASS_HANDLE exactClass; |
| 23445 | |
| 23446 | if (((SIZE_T)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS) |
| 23447 | { |
| 23448 | exactClass = CORINFO_CLASS_HANDLE((SIZE_T)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); |
| 23449 | } |
| 23450 | else |
| 23451 | { |
| 23452 | exactClass = info.compCompHnd->getMethodClass( |
| 23453 | CORINFO_METHOD_HANDLE((SIZE_T)exactContext & ~CORINFO_CONTEXTFLAGS_MASK)); |
| 23454 | } |
| 23455 | |
| 23456 | tree = fgGetSharedCCtor(exactClass); |
| 23457 | newStmt = gtNewStmt(tree, callILOffset); |
| 23458 | afterStmt = fgInsertStmtAfter(block, afterStmt, newStmt); |
| 23459 | } |
| 23460 | |
| 23461 | // Insert the nullcheck statement now. |
| 23462 | if (nullcheck) |
| 23463 | { |
| 23464 | newStmt = gtNewStmt(nullcheck, callILOffset); |
| 23465 | afterStmt = fgInsertStmtAfter(block, afterStmt, newStmt); |
| 23466 | } |
| 23467 | |
| 23468 | // |
| 23469 | // Now zero-init inlinee locals |
| 23470 | // |
| 23471 | |
| 23472 | CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; |
| 23473 | |
| 23474 | unsigned lclCnt = InlineeMethodInfo->locals.numArgs; |
| 23475 | |
| 23476 | // Does callee contain any zero-init local? |
| 23477 | if ((lclCnt != 0) && (InlineeMethodInfo->options & CORINFO_OPT_INIT_LOCALS) != 0) |
| 23478 | { |
| 23479 | |
| 23480 | #ifdef DEBUG |
| 23481 | if (verbose) |
| 23482 | { |
| 23483 | printf("\nZero init inlinee locals:\n" ); |
| 23484 | } |
| 23485 | #endif // DEBUG |
| 23486 | |
| 23487 | for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) |
| 23488 | { |
| 23489 | unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; |
| 23490 | |
| 23491 | // Is the local used at all? |
| 23492 | if (tmpNum != BAD_VAR_NUM) |
| 23493 | { |
| 23494 | var_types lclTyp = (var_types)lvaTable[tmpNum].lvType; |
| 23495 | noway_assert(lclTyp == lclVarInfo[lclNum + inlineInfo->argCnt].lclTypeInfo); |
| 23496 | |
| 23497 | if (!varTypeIsStruct(lclTyp)) |
| 23498 | { |
| 23499 | // Unsafe value cls check is not needed here since in-linee compiler instance would have |
| 23500 | // iterated over locals and marked accordingly. |
| 23501 | impAssignTempGen(tmpNum, gtNewZeroConNode(genActualType(lclTyp)), NO_CLASS_HANDLE, |
| 23502 | (unsigned)CHECK_SPILL_NONE, &afterStmt, callILOffset, block); |
| 23503 | } |
| 23504 | else |
| 23505 | { |
| 23506 | CORINFO_CLASS_HANDLE structType = |
| 23507 | lclVarInfo[lclNum + inlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(); |
| 23508 | |
| 23509 | if (fgStructTempNeedsExplicitZeroInit(lvaTable + tmpNum, block)) |
| 23510 | { |
| 23511 | tree = gtNewBlkOpNode(gtNewLclvNode(tmpNum, lclTyp), // Dest |
| 23512 | gtNewIconNode(0), // Value |
| 23513 | info.compCompHnd->getClassSize(structType), // Size |
| 23514 | false, // isVolatile |
| 23515 | false); // not copyBlock |
| 23516 | |
| 23517 | newStmt = gtNewStmt(tree, callILOffset); |
| 23518 | afterStmt = fgInsertStmtAfter(block, afterStmt, newStmt); |
| 23519 | } |
| 23520 | } |
| 23521 | |
| 23522 | #ifdef DEBUG |
| 23523 | if (verbose) |
| 23524 | { |
| 23525 | gtDispTree(afterStmt); |
| 23526 | } |
| 23527 | #endif // DEBUG |
| 23528 | } |
| 23529 | } |
| 23530 | } |
| 23531 | |
| 23532 | // Update any newly added statements with the appropriate context. |
| 23533 | InlineContext* context = callStmt->gtInlineContext; |
| 23534 | assert(context != nullptr); |
| 23535 | for (GenTreeStmt* addedStmt = callStmt->gtNextStmt; addedStmt != postStmt; addedStmt = addedStmt->gtNextStmt) |
| 23536 | { |
| 23537 | assert(addedStmt->gtInlineContext == nullptr); |
| 23538 | addedStmt->gtInlineContext = context; |
| 23539 | } |
| 23540 | |
| 23541 | return afterStmt; |
| 23542 | } |
| 23543 | |
| 23544 | //------------------------------------------------------------------------ |
| 23545 | // fgInlineAppendStatements: Append statements that are needed |
| 23546 | // after the inlined call. |
| 23547 | // |
| 23548 | // Arguments: |
| 23549 | // inlineInfo - information about the inline |
| 23550 | // block - basic block for the new statements |
| 23551 | // stmtAfter - (optional) insertion point for mid-block cases |
| 23552 | // |
| 23553 | // Notes: |
| 23554 | // If the call we're inlining is in tail position then |
| 23555 | // we skip nulling the locals, since it can interfere |
| 23556 | // with tail calls introduced by the local. |
| 23557 | |
| 23558 | void Compiler::fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, GenTree* stmtAfter) |
| 23559 | { |
| 23560 | // If this inlinee was passed a runtime lookup generic context and |
| 23561 | // ignores it, we can decrement the "generic context was used" ref |
| 23562 | // count, because we created a new lookup tree and incremented the |
| 23563 | // count when we imported the type parameter argument to pass to |
| 23564 | // the inlinee. See corresponding logic in impImportCall that |
| 23565 | // checks the sig for CORINFO_CALLCONV_PARAMTYPE. |
| 23566 | // |
| 23567 | // Does this method require a context (type) parameter? |
| 23568 | if ((inlineInfo->inlineCandidateInfo->methInfo.args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0) |
| 23569 | { |
| 23570 | // Did the computation of that parameter require the |
| 23571 | // caller to perform a runtime lookup? |
| 23572 | if (inlineInfo->inlineCandidateInfo->exactContextNeedsRuntimeLookup) |
| 23573 | { |
| 23574 | // Fetch the temp for the generic context as it would |
| 23575 | // appear in the inlinee's body. |
| 23576 | const unsigned typeCtxtArg = inlineInfo->typeContextArg; |
| 23577 | const unsigned tmpNum = inlineInfo->lclTmpNum[typeCtxtArg]; |
| 23578 | |
| 23579 | // Was it used in the inline body? |
| 23580 | if (tmpNum == BAD_VAR_NUM) |
| 23581 | { |
| 23582 | // No -- so the associated runtime lookup is not needed |
| 23583 | // and also no longer provides evidence that the generic |
| 23584 | // context should be kept alive. |
| 23585 | JITDUMP("Inlinee ignores runtime lookup generics context\n" ); |
| 23586 | assert(lvaGenericsContextUseCount > 0); |
| 23587 | lvaGenericsContextUseCount--; |
| 23588 | } |
| 23589 | } |
| 23590 | } |
| 23591 | |
| 23592 | // Null out any gc ref locals |
| 23593 | if (!inlineInfo->HasGcRefLocals()) |
| 23594 | { |
| 23595 | // No ref locals, nothing to do. |
| 23596 | JITDUMP("fgInlineAppendStatements: no gc ref inline locals.\n" ); |
| 23597 | return; |
| 23598 | } |
| 23599 | |
| 23600 | if (inlineInfo->iciCall->IsImplicitTailCall()) |
| 23601 | { |
| 23602 | JITDUMP("fgInlineAppendStatements: implicit tail call; skipping nulling.\n" ); |
| 23603 | return; |
| 23604 | } |
| 23605 | |
| 23606 | JITDUMP("fgInlineAppendStatements: nulling out gc ref inlinee locals.\n" ); |
| 23607 | |
| 23608 | GenTree* callStmt = inlineInfo->iciStmt; |
| 23609 | IL_OFFSETX callILOffset = callStmt->gtStmt.gtStmtILoffsx; |
| 23610 | CORINFO_METHOD_INFO* InlineeMethodInfo = InlineeCompiler->info.compMethodInfo; |
| 23611 | const unsigned lclCnt = InlineeMethodInfo->locals.numArgs; |
| 23612 | InlLclVarInfo* lclVarInfo = inlineInfo->lclVarInfo; |
| 23613 | unsigned gcRefLclCnt = inlineInfo->numberOfGcRefLocals; |
| 23614 | const unsigned argCnt = inlineInfo->argCnt; |
| 23615 | |
| 23616 | noway_assert(callStmt->gtOper == GT_STMT); |
| 23617 | |
| 23618 | for (unsigned lclNum = 0; lclNum < lclCnt; lclNum++) |
| 23619 | { |
| 23620 | // Is the local a gc ref type? Need to look at the |
| 23621 | // inline info for this since we will not have local |
| 23622 | // temps for unused inlinee locals. |
| 23623 | const var_types lclTyp = lclVarInfo[argCnt + lclNum].lclTypeInfo; |
| 23624 | |
| 23625 | if (!varTypeIsGC(lclTyp)) |
| 23626 | { |
| 23627 | // Nope, nothing to null out. |
| 23628 | continue; |
| 23629 | } |
| 23630 | |
| 23631 | // Ensure we're examining just the right number of locals. |
| 23632 | assert(gcRefLclCnt > 0); |
| 23633 | gcRefLclCnt--; |
| 23634 | |
| 23635 | // Fetch the temp for this inline local |
| 23636 | const unsigned tmpNum = inlineInfo->lclTmpNum[lclNum]; |
| 23637 | |
| 23638 | // Is the local used at all? |
| 23639 | if (tmpNum == BAD_VAR_NUM) |
| 23640 | { |
| 23641 | // Nope, nothing to null out. |
| 23642 | continue; |
| 23643 | } |
| 23644 | |
| 23645 | // Local was used, make sure the type is consistent. |
| 23646 | assert(lvaTable[tmpNum].lvType == lclTyp); |
| 23647 | |
| 23648 | // Does the local we're about to null out appear in the return |
| 23649 | // expression? If so we somehow messed up and didn't properly |
| 23650 | // spill the return value. See impInlineFetchLocal. |
| 23651 | GenTree* retExpr = inlineInfo->retExpr; |
| 23652 | if (retExpr != nullptr) |
| 23653 | { |
| 23654 | const bool interferesWithReturn = gtHasRef(inlineInfo->retExpr, tmpNum, false); |
| 23655 | noway_assert(!interferesWithReturn); |
| 23656 | } |
| 23657 | |
| 23658 | // Assign null to the local. |
| 23659 | GenTree* nullExpr = gtNewTempAssign(tmpNum, gtNewZeroConNode(lclTyp)); |
| 23660 | GenTree* nullStmt = gtNewStmt(nullExpr, callILOffset); |
| 23661 | |
| 23662 | if (stmtAfter == nullptr) |
| 23663 | { |
| 23664 | stmtAfter = fgInsertStmtAtBeg(block, nullStmt); |
| 23665 | } |
| 23666 | else |
| 23667 | { |
| 23668 | stmtAfter = fgInsertStmtAfter(block, stmtAfter, nullStmt); |
| 23669 | } |
| 23670 | |
| 23671 | #ifdef DEBUG |
| 23672 | if (verbose) |
| 23673 | { |
| 23674 | gtDispTree(nullStmt); |
| 23675 | } |
| 23676 | #endif // DEBUG |
| 23677 | } |
| 23678 | |
| 23679 | // There should not be any GC ref locals left to null out. |
| 23680 | assert(gcRefLclCnt == 0); |
| 23681 | } |
| 23682 | |
| 23683 | /*****************************************************************************/ |
| 23684 | /*static*/ |
| 23685 | Compiler::fgWalkResult Compiler::fgChkThrowCB(GenTree** pTree, fgWalkData* data) |
| 23686 | { |
| 23687 | GenTree* tree = *pTree; |
| 23688 | |
| 23689 | // If this tree doesn't have the EXCEPT flag set, then there is no |
| 23690 | // way any of the child nodes could throw, so we can stop recursing. |
| 23691 | if (!(tree->gtFlags & GTF_EXCEPT)) |
| 23692 | { |
| 23693 | return Compiler::WALK_SKIP_SUBTREES; |
| 23694 | } |
| 23695 | |
| 23696 | switch (tree->gtOper) |
| 23697 | { |
| 23698 | case GT_MUL: |
| 23699 | case GT_ADD: |
| 23700 | case GT_SUB: |
| 23701 | case GT_CAST: |
| 23702 | if (tree->gtOverflow()) |
| 23703 | { |
| 23704 | return Compiler::WALK_ABORT; |
| 23705 | } |
| 23706 | break; |
| 23707 | |
| 23708 | case GT_INDEX: |
| 23709 | case GT_INDEX_ADDR: |
| 23710 | // These two call CORINFO_HELP_RNGCHKFAIL for Debug code |
| 23711 | if (tree->gtFlags & GTF_INX_RNGCHK) |
| 23712 | { |
| 23713 | return Compiler::WALK_ABORT; |
| 23714 | } |
| 23715 | break; |
| 23716 | |
| 23717 | case GT_ARR_BOUNDS_CHECK: |
| 23718 | return Compiler::WALK_ABORT; |
| 23719 | |
| 23720 | default: |
| 23721 | break; |
| 23722 | } |
| 23723 | |
| 23724 | return Compiler::WALK_CONTINUE; |
| 23725 | } |
| 23726 | |
| 23727 | /*****************************************************************************/ |
| 23728 | /*static*/ |
| 23729 | Compiler::fgWalkResult Compiler::fgChkLocAllocCB(GenTree** pTree, fgWalkData* data) |
| 23730 | { |
| 23731 | GenTree* tree = *pTree; |
| 23732 | |
| 23733 | if (tree->gtOper == GT_LCLHEAP) |
| 23734 | { |
| 23735 | return Compiler::WALK_ABORT; |
| 23736 | } |
| 23737 | |
| 23738 | return Compiler::WALK_CONTINUE; |
| 23739 | } |
| 23740 | |
| 23741 | /*****************************************************************************/ |
| 23742 | /*static*/ |
| 23743 | Compiler::fgWalkResult Compiler::fgChkQmarkCB(GenTree** pTree, fgWalkData* data) |
| 23744 | { |
| 23745 | GenTree* tree = *pTree; |
| 23746 | |
| 23747 | if (tree->gtOper == GT_QMARK) |
| 23748 | { |
| 23749 | return Compiler::WALK_ABORT; |
| 23750 | } |
| 23751 | |
| 23752 | return Compiler::WALK_CONTINUE; |
| 23753 | } |
| 23754 | |
| 23755 | void Compiler::fgLclFldAssign(unsigned lclNum) |
| 23756 | { |
| 23757 | assert(varTypeIsStruct(lvaTable[lclNum].lvType)); |
| 23758 | if (lvaTable[lclNum].lvPromoted && lvaTable[lclNum].lvFieldCnt > 1) |
| 23759 | { |
| 23760 | lvaSetVarDoNotEnregister(lclNum DEBUGARG(DNER_LocalField)); |
| 23761 | } |
| 23762 | } |
| 23763 | |
| 23764 | //------------------------------------------------------------------------ |
| 23765 | // fgRemoveEmptyFinally: Remove try/finallys where the finally is empty |
| 23766 | // |
| 23767 | // Notes: |
| 23768 | // Removes all try/finallys in the method with empty finallys. |
| 23769 | // These typically arise from inlining empty Dispose methods. |
| 23770 | // |
| 23771 | // Converts callfinally to a jump to the finally continuation. |
| 23772 | // Removes the finally, and reparents all blocks in the try to the |
| 23773 | // enclosing try or method region. |
| 23774 | // |
| 23775 | // Currently limited to trivially empty finallys: those with one basic |
| 23776 | // block containing only single RETFILT statement. It is possible but |
| 23777 | // not likely that more complex-looking finallys will eventually become |
| 23778 | // empty (from say subsequent optimization). An SPMI run with |
| 23779 | // just the "detection" part of this phase run after optimization |
| 23780 | // found only one example where a new empty finally was detected. |
| 23781 | |
| 23782 | void Compiler::fgRemoveEmptyFinally() |
| 23783 | { |
| 23784 | JITDUMP("\n*************** In fgRemoveEmptyFinally()\n" ); |
| 23785 | |
| 23786 | #if FEATURE_EH_FUNCLETS |
| 23787 | // We need to do this transformation before funclets are created. |
| 23788 | assert(!fgFuncletsCreated); |
| 23789 | #endif // FEATURE_EH_FUNCLETS |
| 23790 | |
| 23791 | // Assume we don't need to update the bbPreds lists. |
| 23792 | assert(!fgComputePredsDone); |
| 23793 | |
| 23794 | if (compHndBBtabCount == 0) |
| 23795 | { |
| 23796 | JITDUMP("No EH in this method, nothing to remove.\n" ); |
| 23797 | return; |
| 23798 | } |
| 23799 | |
| 23800 | if (opts.MinOpts()) |
| 23801 | { |
| 23802 | JITDUMP("Method compiled with minOpts, no removal.\n" ); |
| 23803 | return; |
| 23804 | } |
| 23805 | |
| 23806 | if (opts.compDbgCode) |
| 23807 | { |
| 23808 | JITDUMP("Method compiled with debug codegen, no removal.\n" ); |
| 23809 | return; |
| 23810 | } |
| 23811 | |
| 23812 | #ifdef DEBUG |
| 23813 | if (verbose) |
| 23814 | { |
| 23815 | printf("\n*************** Before fgRemoveEmptyFinally()\n" ); |
| 23816 | fgDispBasicBlocks(); |
| 23817 | fgDispHandlerTab(); |
| 23818 | printf("\n" ); |
| 23819 | } |
| 23820 | #endif // DEBUG |
| 23821 | |
| 23822 | // Look for finallys or faults that are empty. |
| 23823 | unsigned finallyCount = 0; |
| 23824 | unsigned emptyCount = 0; |
| 23825 | unsigned XTnum = 0; |
| 23826 | while (XTnum < compHndBBtabCount) |
| 23827 | { |
| 23828 | EHblkDsc* const HBtab = &compHndBBtab[XTnum]; |
| 23829 | |
| 23830 | // Check if this is a try/finally. We could also look for empty |
| 23831 | // try/fault but presumably those are rare. |
| 23832 | if (!HBtab->HasFinallyHandler()) |
| 23833 | { |
| 23834 | JITDUMP("EH#%u is not a try-finally; skipping.\n" , XTnum); |
| 23835 | XTnum++; |
| 23836 | continue; |
| 23837 | } |
| 23838 | |
| 23839 | finallyCount++; |
| 23840 | |
| 23841 | // Look at blocks involved. |
| 23842 | BasicBlock* const firstBlock = HBtab->ebdHndBeg; |
| 23843 | BasicBlock* const lastBlock = HBtab->ebdHndLast; |
| 23844 | |
| 23845 | // Limit for now to finallys that are single blocks. |
| 23846 | if (firstBlock != lastBlock) |
| 23847 | { |
| 23848 | JITDUMP("EH#%u finally has multiple basic blocks; skipping.\n" , XTnum); |
| 23849 | XTnum++; |
| 23850 | continue; |
| 23851 | } |
| 23852 | |
| 23853 | // Limit for now to finallys that contain only a GT_RETFILT. |
| 23854 | bool isEmpty = true; |
| 23855 | |
| 23856 | for (GenTreeStmt* stmt = firstBlock->firstStmt(); stmt != nullptr; stmt = stmt->gtNextStmt) |
| 23857 | { |
| 23858 | GenTree* stmtExpr = stmt->gtStmtExpr; |
| 23859 | |
| 23860 | if (stmtExpr->gtOper != GT_RETFILT) |
| 23861 | { |
| 23862 | isEmpty = false; |
| 23863 | break; |
| 23864 | } |
| 23865 | } |
| 23866 | |
| 23867 | if (!isEmpty) |
| 23868 | { |
| 23869 | JITDUMP("EH#%u finally is not empty; skipping.\n" , XTnum); |
| 23870 | XTnum++; |
| 23871 | continue; |
| 23872 | } |
| 23873 | |
| 23874 | JITDUMP("EH#%u has empty finally, removing the region.\n" , XTnum); |
| 23875 | |
| 23876 | // Find all the call finallys that invoke this finally, |
| 23877 | // and modify them to jump to the return point. |
| 23878 | BasicBlock* firstCallFinallyRangeBlock = nullptr; |
| 23879 | BasicBlock* endCallFinallyRangeBlock = nullptr; |
| 23880 | ehGetCallFinallyBlockRange(XTnum, &firstCallFinallyRangeBlock, &endCallFinallyRangeBlock); |
| 23881 | |
| 23882 | BasicBlock* currentBlock = firstCallFinallyRangeBlock; |
| 23883 | |
| 23884 | while (currentBlock != endCallFinallyRangeBlock) |
| 23885 | { |
| 23886 | BasicBlock* nextBlock = currentBlock->bbNext; |
| 23887 | |
| 23888 | if ((currentBlock->bbJumpKind == BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) |
| 23889 | { |
| 23890 | // Retarget the call finally to jump to the return |
| 23891 | // point. |
| 23892 | // |
| 23893 | // We don't expect to see retless finallys here, since |
| 23894 | // the finally is empty. |
| 23895 | noway_assert(currentBlock->isBBCallAlwaysPair()); |
| 23896 | |
| 23897 | BasicBlock* const leaveBlock = currentBlock->bbNext; |
| 23898 | BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest; |
| 23899 | |
| 23900 | JITDUMP("Modifying callfinally " FMT_BB " leave " FMT_BB " finally " FMT_BB " continuation " FMT_BB |
| 23901 | "\n" , |
| 23902 | currentBlock->bbNum, leaveBlock->bbNum, firstBlock->bbNum, postTryFinallyBlock->bbNum); |
| 23903 | JITDUMP("so that " FMT_BB " jumps to " FMT_BB "; then remove " FMT_BB "\n" , currentBlock->bbNum, |
| 23904 | postTryFinallyBlock->bbNum, leaveBlock->bbNum); |
| 23905 | |
| 23906 | noway_assert(leaveBlock->bbJumpKind == BBJ_ALWAYS); |
| 23907 | |
| 23908 | currentBlock->bbJumpDest = postTryFinallyBlock; |
| 23909 | currentBlock->bbJumpKind = BBJ_ALWAYS; |
| 23910 | |
| 23911 | // Ref count updates. |
| 23912 | fgAddRefPred(postTryFinallyBlock, currentBlock); |
| 23913 | // fgRemoveRefPred(firstBlock, currentBlock); |
| 23914 | |
| 23915 | // Delete the leave block, which should be marked as |
| 23916 | // keep always. |
| 23917 | assert((leaveBlock->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0); |
| 23918 | nextBlock = leaveBlock->bbNext; |
| 23919 | |
| 23920 | leaveBlock->bbFlags &= ~BBF_KEEP_BBJ_ALWAYS; |
| 23921 | fgRemoveBlock(leaveBlock, true); |
| 23922 | |
| 23923 | // Cleanup the postTryFinallyBlock |
| 23924 | fgCleanupContinuation(postTryFinallyBlock); |
| 23925 | |
| 23926 | // Make sure iteration isn't going off the deep end. |
| 23927 | assert(leaveBlock != endCallFinallyRangeBlock); |
| 23928 | } |
| 23929 | |
| 23930 | currentBlock = nextBlock; |
| 23931 | } |
| 23932 | |
| 23933 | JITDUMP("Remove now-unreachable handler " FMT_BB "\n" , firstBlock->bbNum); |
| 23934 | |
| 23935 | // Handler block should now be unreferenced, since the only |
| 23936 | // explicit references to it were in call finallys. |
| 23937 | firstBlock->bbRefs = 0; |
| 23938 | |
| 23939 | // Remove the handler block. |
| 23940 | const bool unreachable = true; |
| 23941 | firstBlock->bbFlags &= ~BBF_DONT_REMOVE; |
| 23942 | fgRemoveBlock(firstBlock, unreachable); |
| 23943 | |
| 23944 | // Find enclosing try region for the try, if any, and update |
| 23945 | // the try region. Note the handler region (if any) won't |
| 23946 | // change. |
| 23947 | BasicBlock* const firstTryBlock = HBtab->ebdTryBeg; |
| 23948 | BasicBlock* const lastTryBlock = HBtab->ebdTryLast; |
| 23949 | assert(firstTryBlock->getTryIndex() == XTnum); |
| 23950 | |
| 23951 | for (BasicBlock* block = firstTryBlock; block != nullptr; block = block->bbNext) |
| 23952 | { |
| 23953 | // Look for blocks directly contained in this try, and |
| 23954 | // update the try region appropriately. |
| 23955 | // |
| 23956 | // Try region for blocks transitively contained (say in a |
| 23957 | // child try) will get updated by the subsequent call to |
| 23958 | // fgRemoveEHTableEntry. |
| 23959 | if (block->getTryIndex() == XTnum) |
| 23960 | { |
| 23961 | if (firstBlock->hasTryIndex()) |
| 23962 | { |
| 23963 | block->setTryIndex(firstBlock->getTryIndex()); |
| 23964 | } |
| 23965 | else |
| 23966 | { |
| 23967 | block->clearTryIndex(); |
| 23968 | } |
| 23969 | } |
| 23970 | |
| 23971 | if (block == firstTryBlock) |
| 23972 | { |
| 23973 | assert((block->bbFlags & BBF_TRY_BEG) != 0); |
| 23974 | block->bbFlags &= ~BBF_TRY_BEG; |
| 23975 | } |
| 23976 | |
| 23977 | if (block == lastTryBlock) |
| 23978 | { |
| 23979 | break; |
| 23980 | } |
| 23981 | } |
| 23982 | |
| 23983 | // Remove the try-finally EH region. This will compact the EH table |
| 23984 | // so XTnum now points at the next entry. |
| 23985 | fgRemoveEHTableEntry(XTnum); |
| 23986 | |
| 23987 | emptyCount++; |
| 23988 | } |
| 23989 | |
| 23990 | if (emptyCount > 0) |
| 23991 | { |
| 23992 | JITDUMP("fgRemoveEmptyFinally() removed %u try-finally clauses from %u finallys\n" , emptyCount, finallyCount); |
| 23993 | fgOptimizedFinally = true; |
| 23994 | |
| 23995 | #ifdef DEBUG |
| 23996 | if (verbose) |
| 23997 | { |
| 23998 | printf("\n*************** After fgRemoveEmptyFinally()\n" ); |
| 23999 | fgDispBasicBlocks(); |
| 24000 | fgDispHandlerTab(); |
| 24001 | printf("\n" ); |
| 24002 | } |
| 24003 | |
| 24004 | fgVerifyHandlerTab(); |
| 24005 | fgDebugCheckBBlist(false, false); |
| 24006 | |
| 24007 | #endif // DEBUG |
| 24008 | } |
| 24009 | } |
| 24010 | |
| 24011 | //------------------------------------------------------------------------ |
| 24012 | // fgRemoveEmptyTry: Optimize try/finallys where the try is empty |
| 24013 | // |
| 24014 | // Notes: |
| 24015 | // In runtimes where thread abort is not possible, `try {} finally {S}` |
| 24016 | // can be optimized to simply `S`. This method looks for such |
| 24017 | // cases and removes the try-finally from the EH table, making |
| 24018 | // suitable flow, block flag, statement, and region updates. |
| 24019 | // |
| 24020 | // This optimization is not legal in runtimes that support thread |
| 24021 | // abort because those runtimes ensure that a finally is completely |
| 24022 | // executed before continuing to process the thread abort. With |
| 24023 | // this optimization, the code block `S` can lose special |
| 24024 | // within-finally status and so complete execution is no longer |
| 24025 | // guaranteed. |
| 24026 | |
| 24027 | void Compiler::fgRemoveEmptyTry() |
| 24028 | { |
| 24029 | JITDUMP("\n*************** In fgRemoveEmptyTry()\n" ); |
| 24030 | |
| 24031 | #if FEATURE_EH_FUNCLETS |
| 24032 | // We need to do this transformation before funclets are created. |
| 24033 | assert(!fgFuncletsCreated); |
| 24034 | #endif // FEATURE_EH_FUNCLETS |
| 24035 | |
| 24036 | // Assume we don't need to update the bbPreds lists. |
| 24037 | assert(!fgComputePredsDone); |
| 24038 | |
| 24039 | #ifdef FEATURE_CORECLR |
| 24040 | bool enableRemoveEmptyTry = true; |
| 24041 | #else |
| 24042 | // Code in a finally gets special treatment in the presence of |
| 24043 | // thread abort. |
| 24044 | bool enableRemoveEmptyTry = false; |
| 24045 | #endif // FEATURE_CORECLR |
| 24046 | |
| 24047 | #ifdef DEBUG |
| 24048 | // Allow override to enable/disable. |
| 24049 | enableRemoveEmptyTry = (JitConfig.JitEnableRemoveEmptyTry() == 1); |
| 24050 | #endif // DEBUG |
| 24051 | |
| 24052 | if (!enableRemoveEmptyTry) |
| 24053 | { |
| 24054 | JITDUMP("Empty try removal disabled.\n" ); |
| 24055 | return; |
| 24056 | } |
| 24057 | |
| 24058 | if (compHndBBtabCount == 0) |
| 24059 | { |
| 24060 | JITDUMP("No EH in this method, nothing to remove.\n" ); |
| 24061 | return; |
| 24062 | } |
| 24063 | |
| 24064 | if (opts.MinOpts()) |
| 24065 | { |
| 24066 | JITDUMP("Method compiled with minOpts, no removal.\n" ); |
| 24067 | return; |
| 24068 | } |
| 24069 | |
| 24070 | if (opts.compDbgCode) |
| 24071 | { |
| 24072 | JITDUMP("Method compiled with debug codegen, no removal.\n" ); |
| 24073 | return; |
| 24074 | } |
| 24075 | |
| 24076 | #ifdef DEBUG |
| 24077 | if (verbose) |
| 24078 | { |
| 24079 | printf("\n*************** Before fgRemoveEmptyTry()\n" ); |
| 24080 | fgDispBasicBlocks(); |
| 24081 | fgDispHandlerTab(); |
| 24082 | printf("\n" ); |
| 24083 | } |
| 24084 | #endif // DEBUG |
| 24085 | |
| 24086 | // Look for try-finallys where the try is empty. |
| 24087 | unsigned emptyCount = 0; |
| 24088 | unsigned XTnum = 0; |
| 24089 | while (XTnum < compHndBBtabCount) |
| 24090 | { |
| 24091 | EHblkDsc* const HBtab = &compHndBBtab[XTnum]; |
| 24092 | |
| 24093 | // Check if this is a try/finally. We could also look for empty |
| 24094 | // try/fault but presumably those are rare. |
| 24095 | if (!HBtab->HasFinallyHandler()) |
| 24096 | { |
| 24097 | JITDUMP("EH#%u is not a try-finally; skipping.\n" , XTnum); |
| 24098 | XTnum++; |
| 24099 | continue; |
| 24100 | } |
| 24101 | |
| 24102 | // Examine the try region |
| 24103 | BasicBlock* const firstTryBlock = HBtab->ebdTryBeg; |
| 24104 | BasicBlock* const lastTryBlock = HBtab->ebdTryLast; |
| 24105 | BasicBlock* const firstHandlerBlock = HBtab->ebdHndBeg; |
| 24106 | BasicBlock* const lastHandlerBlock = HBtab->ebdHndLast; |
| 24107 | BasicBlock* const endHandlerBlock = lastHandlerBlock->bbNext; |
| 24108 | |
| 24109 | assert(firstTryBlock->getTryIndex() == XTnum); |
| 24110 | |
| 24111 | // Limit for now to trys that contain only a callfinally pair |
| 24112 | // or branch to same. |
| 24113 | if (!firstTryBlock->isEmpty()) |
| 24114 | { |
| 24115 | JITDUMP("EH#%u first try block " FMT_BB " not empty; skipping.\n" , XTnum, firstTryBlock->bbNum); |
| 24116 | XTnum++; |
| 24117 | continue; |
| 24118 | } |
| 24119 | |
| 24120 | #if FEATURE_EH_CALLFINALLY_THUNKS |
| 24121 | |
| 24122 | // Look for blocks that are always jumps to a call finally |
| 24123 | // pair that targets the finally |
| 24124 | if (firstTryBlock->bbJumpKind != BBJ_ALWAYS) |
| 24125 | { |
| 24126 | JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n" , XTnum, |
| 24127 | firstTryBlock->bbNum); |
| 24128 | XTnum++; |
| 24129 | continue; |
| 24130 | } |
| 24131 | |
| 24132 | BasicBlock* const callFinally = firstTryBlock->bbJumpDest; |
| 24133 | |
| 24134 | // Look for call always pair. Note this will also disqualify |
| 24135 | // empty try removal in cases where the finally doesn't |
| 24136 | // return. |
| 24137 | if (!callFinally->isBBCallAlwaysPair() || (callFinally->bbJumpDest != firstHandlerBlock)) |
| 24138 | { |
| 24139 | JITDUMP("EH#%u first try block " FMT_BB " always jumps but not to a callfinally; skipping.\n" , XTnum, |
| 24140 | firstTryBlock->bbNum); |
| 24141 | XTnum++; |
| 24142 | continue; |
| 24143 | } |
| 24144 | |
| 24145 | // Try itself must be a single block. |
| 24146 | if (firstTryBlock != lastTryBlock) |
| 24147 | { |
| 24148 | JITDUMP("EH#%u first try block " FMT_BB " not only block in try; skipping.\n" , XTnum, |
| 24149 | firstTryBlock->bbNext->bbNum); |
| 24150 | XTnum++; |
| 24151 | continue; |
| 24152 | } |
| 24153 | |
| 24154 | #else |
| 24155 | // Look for call always pair within the try itself. Note this |
| 24156 | // will also disqualify empty try removal in cases where the |
| 24157 | // finally doesn't return. |
| 24158 | if (!firstTryBlock->isBBCallAlwaysPair() || (firstTryBlock->bbJumpDest != firstHandlerBlock)) |
| 24159 | { |
| 24160 | JITDUMP("EH#%u first try block " FMT_BB " not a callfinally; skipping.\n" , XTnum, firstTryBlock->bbNum); |
| 24161 | XTnum++; |
| 24162 | continue; |
| 24163 | } |
| 24164 | |
| 24165 | BasicBlock* const callFinally = firstTryBlock; |
| 24166 | |
| 24167 | // Try must be a callalways pair of blocks. |
| 24168 | if (firstTryBlock->bbNext != lastTryBlock) |
| 24169 | { |
| 24170 | JITDUMP("EH#%u block " FMT_BB " not last block in try; skipping.\n" , XTnum, firstTryBlock->bbNext->bbNum); |
| 24171 | XTnum++; |
| 24172 | continue; |
| 24173 | } |
| 24174 | |
| 24175 | #endif // FEATURE_EH_CALLFINALLY_THUNKS |
| 24176 | |
| 24177 | JITDUMP("EH#%u has empty try, removing the try region and promoting the finally.\n" , XTnum); |
| 24178 | |
| 24179 | // There should be just one callfinally that invokes this |
| 24180 | // finally, the one we found above. Verify this. |
| 24181 | BasicBlock* firstCallFinallyRangeBlock = nullptr; |
| 24182 | BasicBlock* endCallFinallyRangeBlock = nullptr; |
| 24183 | bool verifiedSingleCallfinally = true; |
| 24184 | ehGetCallFinallyBlockRange(XTnum, &firstCallFinallyRangeBlock, &endCallFinallyRangeBlock); |
| 24185 | |
| 24186 | for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->bbNext) |
| 24187 | { |
| 24188 | if ((block->bbJumpKind == BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) |
| 24189 | { |
| 24190 | assert(block->isBBCallAlwaysPair()); |
| 24191 | |
| 24192 | if (block != callFinally) |
| 24193 | { |
| 24194 | JITDUMP("EH#%u found unexpected callfinally " FMT_BB "; skipping.\n" ); |
| 24195 | verifiedSingleCallfinally = false; |
| 24196 | break; |
| 24197 | } |
| 24198 | |
| 24199 | block = block->bbNext; |
| 24200 | } |
| 24201 | } |
| 24202 | |
| 24203 | if (!verifiedSingleCallfinally) |
| 24204 | { |
| 24205 | JITDUMP("EH#%u -- unexpectedly -- has multiple callfinallys; skipping.\n" ); |
| 24206 | XTnum++; |
| 24207 | assert(verifiedSingleCallfinally); |
| 24208 | continue; |
| 24209 | } |
| 24210 | |
| 24211 | // Time to optimize. |
| 24212 | // |
| 24213 | // (1) Convert the callfinally to a normal jump to the handler |
| 24214 | callFinally->bbJumpKind = BBJ_ALWAYS; |
| 24215 | |
| 24216 | // Identify the leave block and the continuation |
| 24217 | BasicBlock* const leave = callFinally->bbNext; |
| 24218 | BasicBlock* const continuation = leave->bbJumpDest; |
| 24219 | |
| 24220 | // (2) Cleanup the leave so it can be deleted by subsequent opts |
| 24221 | assert((leave->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0); |
| 24222 | leave->bbFlags &= ~BBF_KEEP_BBJ_ALWAYS; |
| 24223 | |
| 24224 | // (3) Cleanup the continuation |
| 24225 | fgCleanupContinuation(continuation); |
| 24226 | |
| 24227 | // (4) Find enclosing try region for the try, if any, and |
| 24228 | // update the try region for the blocks in the try. Note the |
| 24229 | // handler region (if any) won't change. |
| 24230 | // |
| 24231 | // Kind of overkill to loop here, but hey. |
| 24232 | for (BasicBlock* block = firstTryBlock; block != nullptr; block = block->bbNext) |
| 24233 | { |
| 24234 | // Look for blocks directly contained in this try, and |
| 24235 | // update the try region appropriately. |
| 24236 | // |
| 24237 | // The try region for blocks transitively contained (say in a |
| 24238 | // child try) will get updated by the subsequent call to |
| 24239 | // fgRemoveEHTableEntry. |
| 24240 | if (block->getTryIndex() == XTnum) |
| 24241 | { |
| 24242 | if (firstHandlerBlock->hasTryIndex()) |
| 24243 | { |
| 24244 | block->setTryIndex(firstHandlerBlock->getTryIndex()); |
| 24245 | } |
| 24246 | else |
| 24247 | { |
| 24248 | block->clearTryIndex(); |
| 24249 | } |
| 24250 | } |
| 24251 | |
| 24252 | if (block == firstTryBlock) |
| 24253 | { |
| 24254 | assert((block->bbFlags & BBF_TRY_BEG) != 0); |
| 24255 | block->bbFlags &= ~BBF_TRY_BEG; |
| 24256 | } |
| 24257 | |
| 24258 | if (block == lastTryBlock) |
| 24259 | { |
| 24260 | break; |
| 24261 | } |
| 24262 | } |
| 24263 | |
| 24264 | // (5) Update the directly contained handler blocks' handler index. |
| 24265 | // Handler index of any nested blocks will update when we |
| 24266 | // remove the EH table entry. Change handler exits to jump to |
| 24267 | // the continuation. Clear catch type on handler entry. |
| 24268 | // Decrement nesting level of enclosed GT_END_LFINs. |
| 24269 | for (BasicBlock* block = firstHandlerBlock; block != endHandlerBlock; block = block->bbNext) |
| 24270 | { |
| 24271 | if (block == firstHandlerBlock) |
| 24272 | { |
| 24273 | block->bbCatchTyp = BBCT_NONE; |
| 24274 | } |
| 24275 | |
| 24276 | if (block->getHndIndex() == XTnum) |
| 24277 | { |
| 24278 | if (firstTryBlock->hasHndIndex()) |
| 24279 | { |
| 24280 | block->setHndIndex(firstTryBlock->getHndIndex()); |
| 24281 | } |
| 24282 | else |
| 24283 | { |
| 24284 | block->clearHndIndex(); |
| 24285 | } |
| 24286 | |
| 24287 | if (block->bbJumpKind == BBJ_EHFINALLYRET) |
| 24288 | { |
| 24289 | GenTreeStmt* finallyRet = block->lastStmt(); |
| 24290 | GenTree* finallyRetExpr = finallyRet->gtStmtExpr; |
| 24291 | assert(finallyRetExpr->gtOper == GT_RETFILT); |
| 24292 | fgRemoveStmt(block, finallyRet); |
| 24293 | block->bbJumpKind = BBJ_ALWAYS; |
| 24294 | block->bbJumpDest = continuation; |
| 24295 | fgAddRefPred(continuation, block); |
| 24296 | } |
| 24297 | } |
| 24298 | |
| 24299 | #if !FEATURE_EH_FUNCLETS |
| 24300 | // If we're in a non-funclet model, decrement the nesting |
| 24301 | // level of any GT_END_LFIN we find in the handler region, |
| 24302 | // since we're removing the enclosing handler. |
| 24303 | for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; stmt = stmt->gtNextStmt) |
| 24304 | { |
| 24305 | GenTree* expr = stmt->gtStmtExpr; |
| 24306 | if (expr->gtOper == GT_END_LFIN) |
| 24307 | { |
| 24308 | const unsigned nestLevel = expr->gtVal.gtVal1; |
| 24309 | assert(nestLevel > 0); |
| 24310 | expr->gtVal.gtVal1 = nestLevel - 1; |
| 24311 | } |
| 24312 | } |
| 24313 | #endif // !FEATURE_EH_FUNCLETS |
| 24314 | } |
| 24315 | |
| 24316 | // (6) Remove the try-finally EH region. This will compact the |
| 24317 | // EH table so XTnum now points at the next entry and will update |
| 24318 | // the EH region indices of any nested EH in the (former) handler. |
| 24319 | fgRemoveEHTableEntry(XTnum); |
| 24320 | |
| 24321 | // Another one bites the dust... |
| 24322 | emptyCount++; |
| 24323 | } |
| 24324 | |
| 24325 | if (emptyCount > 0) |
| 24326 | { |
| 24327 | JITDUMP("fgRemoveEmptyTry() optimized %u empty-try try-finally clauses\n" , emptyCount); |
| 24328 | fgOptimizedFinally = true; |
| 24329 | |
| 24330 | #ifdef DEBUG |
| 24331 | if (verbose) |
| 24332 | { |
| 24333 | printf("\n*************** After fgRemoveEmptyTry()\n" ); |
| 24334 | fgDispBasicBlocks(); |
| 24335 | fgDispHandlerTab(); |
| 24336 | printf("\n" ); |
| 24337 | } |
| 24338 | |
| 24339 | fgVerifyHandlerTab(); |
| 24340 | fgDebugCheckBBlist(false, false); |
| 24341 | |
| 24342 | #endif // DEBUG |
| 24343 | } |
| 24344 | } |
| 24345 | |
| 24346 | //------------------------------------------------------------------------ |
| 24347 | // fgCloneFinally: Optimize normal exit path from a try/finally |
| 24348 | // |
| 24349 | // Notes: |
| 24350 | // Handles finallys that are not enclosed by or enclosing other |
| 24351 | // handler regions. |
| 24352 | // |
| 24353 | // Converts the "normal exit" callfinally to a jump to a cloned copy |
| 24354 | // of the finally, which in turn jumps to the finally continuation. |
| 24355 | // |
| 24356 | // If all callfinallys for a given finally are converted to jump to |
| 24357 | // the clone, the try-finally is modified into a try-fault, |
| 24358 | // distingushable from organic try-faults by handler type |
| 24359 | // EH_HANDLER_FAULT_WAS_FINALLY vs the organic EH_HANDLER_FAULT. |
| 24360 | // |
| 24361 | // Does not yet handle thread abort. The open issues here are how |
| 24362 | // to maintain the proper description of the cloned finally blocks |
| 24363 | // as a handler (for thread abort purposes), how to prevent code |
| 24364 | // motion in or out of these blocks, and how to report this cloned |
| 24365 | // handler to the runtime. Some building blocks for thread abort |
| 24366 | // exist (see below) but more work needed. |
| 24367 | // |
| 24368 | // The first and last blocks of the cloned finally are marked with |
| 24369 | // BBF_CLONED_FINALLY_BEGIN and BBF_CLONED_FINALLY_END. However |
| 24370 | // these markers currently can get lost during subsequent |
| 24371 | // optimizations. |
| 24372 | |
| 24373 | void Compiler::fgCloneFinally() |
| 24374 | { |
| 24375 | JITDUMP("\n*************** In fgCloneFinally()\n" ); |
| 24376 | |
| 24377 | #if FEATURE_EH_FUNCLETS |
| 24378 | // We need to do this transformation before funclets are created. |
| 24379 | assert(!fgFuncletsCreated); |
| 24380 | #endif // FEATURE_EH_FUNCLETS |
| 24381 | |
| 24382 | // Assume we don't need to update the bbPreds lists. |
| 24383 | assert(!fgComputePredsDone); |
| 24384 | |
| 24385 | #ifdef FEATURE_CORECLR |
| 24386 | bool enableCloning = true; |
| 24387 | #else |
| 24388 | // Finally cloning currently doesn't provide sufficient protection |
| 24389 | // for the cloned code in the presence of thread abort. |
| 24390 | bool enableCloning = false; |
| 24391 | #endif // FEATURE_CORECLR |
| 24392 | |
| 24393 | #ifdef DEBUG |
| 24394 | // Allow override to enable/disable. |
| 24395 | enableCloning = (JitConfig.JitEnableFinallyCloning() == 1); |
| 24396 | #endif // DEBUG |
| 24397 | |
| 24398 | if (!enableCloning) |
| 24399 | { |
| 24400 | JITDUMP("Finally cloning disabled.\n" ); |
| 24401 | return; |
| 24402 | } |
| 24403 | |
| 24404 | if (compHndBBtabCount == 0) |
| 24405 | { |
| 24406 | JITDUMP("No EH in this method, no cloning.\n" ); |
| 24407 | return; |
| 24408 | } |
| 24409 | |
| 24410 | if (opts.MinOpts()) |
| 24411 | { |
| 24412 | JITDUMP("Method compiled with minOpts, no cloning.\n" ); |
| 24413 | return; |
| 24414 | } |
| 24415 | |
| 24416 | if (opts.compDbgCode) |
| 24417 | { |
| 24418 | JITDUMP("Method compiled with debug codegen, no cloning.\n" ); |
| 24419 | return; |
| 24420 | } |
| 24421 | |
| 24422 | #ifdef DEBUG |
| 24423 | if (verbose) |
| 24424 | { |
| 24425 | printf("\n*************** Before fgCloneFinally()\n" ); |
| 24426 | fgDispBasicBlocks(); |
| 24427 | fgDispHandlerTab(); |
| 24428 | printf("\n" ); |
| 24429 | } |
| 24430 | |
| 24431 | // Verify try-finally exits look good before we start. |
| 24432 | fgDebugCheckTryFinallyExits(); |
| 24433 | |
| 24434 | #endif // DEBUG |
| 24435 | |
| 24436 | // Look for finallys that are not contained within other handlers, |
| 24437 | // and which do not themselves contain EH. |
| 24438 | // |
| 24439 | // Note these cases potentially could be handled, but are less |
| 24440 | // obviously profitable and require modification of the handler |
| 24441 | // table. |
| 24442 | unsigned XTnum = 0; |
| 24443 | EHblkDsc* HBtab = compHndBBtab; |
| 24444 | unsigned cloneCount = 0; |
| 24445 | for (; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 24446 | { |
| 24447 | // Check if this is a try/finally |
| 24448 | if (!HBtab->HasFinallyHandler()) |
| 24449 | { |
| 24450 | JITDUMP("EH#%u is not a try-finally; skipping.\n" , XTnum); |
| 24451 | continue; |
| 24452 | } |
| 24453 | |
| 24454 | // Check if enclosed by another handler. |
| 24455 | const unsigned enclosingHandlerRegion = ehGetEnclosingHndIndex(XTnum); |
| 24456 | |
| 24457 | if (enclosingHandlerRegion != EHblkDsc::NO_ENCLOSING_INDEX) |
| 24458 | { |
| 24459 | JITDUMP("EH#%u is enclosed by handler EH#%u; skipping.\n" , XTnum, enclosingHandlerRegion); |
| 24460 | continue; |
| 24461 | } |
| 24462 | |
| 24463 | bool containsEH = false; |
| 24464 | unsigned exampleEnclosedHandlerRegion = 0; |
| 24465 | |
| 24466 | // Only need to look at lower numbered regions because the |
| 24467 | // handler table is ordered by nesting. |
| 24468 | for (unsigned i = 0; i < XTnum; i++) |
| 24469 | { |
| 24470 | if (ehGetEnclosingHndIndex(i) == XTnum) |
| 24471 | { |
| 24472 | exampleEnclosedHandlerRegion = i; |
| 24473 | containsEH = true; |
| 24474 | break; |
| 24475 | } |
| 24476 | } |
| 24477 | |
| 24478 | if (containsEH) |
| 24479 | { |
| 24480 | JITDUMP("Finally for EH#%u encloses handler EH#%u; skipping.\n" , XTnum, exampleEnclosedHandlerRegion); |
| 24481 | continue; |
| 24482 | } |
| 24483 | |
| 24484 | // Look at blocks involved. |
| 24485 | BasicBlock* const firstBlock = HBtab->ebdHndBeg; |
| 24486 | BasicBlock* const lastBlock = HBtab->ebdHndLast; |
| 24487 | assert(firstBlock != nullptr); |
| 24488 | assert(lastBlock != nullptr); |
| 24489 | BasicBlock* nextBlock = lastBlock->bbNext; |
| 24490 | unsigned regionBBCount = 0; |
| 24491 | unsigned regionStmtCount = 0; |
| 24492 | bool hasFinallyRet = false; |
| 24493 | bool isAllRare = true; |
| 24494 | bool hasSwitch = false; |
| 24495 | |
| 24496 | for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) |
| 24497 | { |
| 24498 | if (block->bbJumpKind == BBJ_SWITCH) |
| 24499 | { |
| 24500 | hasSwitch = true; |
| 24501 | break; |
| 24502 | } |
| 24503 | |
| 24504 | regionBBCount++; |
| 24505 | |
| 24506 | // Should we compute statement cost here, or is it |
| 24507 | // premature...? For now just count statements I guess. |
| 24508 | for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; stmt = stmt->gtNextStmt) |
| 24509 | { |
| 24510 | regionStmtCount++; |
| 24511 | } |
| 24512 | |
| 24513 | hasFinallyRet = hasFinallyRet || (block->bbJumpKind == BBJ_EHFINALLYRET); |
| 24514 | isAllRare = isAllRare && block->isRunRarely(); |
| 24515 | } |
| 24516 | |
| 24517 | // Skip cloning if the finally has a switch. |
| 24518 | if (hasSwitch) |
| 24519 | { |
| 24520 | JITDUMP("Finally in EH#%u has a switch; skipping.\n" , XTnum); |
| 24521 | continue; |
| 24522 | } |
| 24523 | |
| 24524 | // Skip cloning if the finally must throw. |
| 24525 | if (!hasFinallyRet) |
| 24526 | { |
| 24527 | JITDUMP("Finally in EH#%u does not return; skipping.\n" , XTnum); |
| 24528 | continue; |
| 24529 | } |
| 24530 | |
| 24531 | // Skip cloning if the finally is rarely run code. |
| 24532 | if (isAllRare) |
| 24533 | { |
| 24534 | JITDUMP("Finally in EH#%u is run rarely; skipping.\n" , XTnum); |
| 24535 | continue; |
| 24536 | } |
| 24537 | |
| 24538 | // Empirical studies from CoreCLR and CoreFX show that less |
| 24539 | // that 1% of finally regions have more than 15 |
| 24540 | // statements. So, to avoid potentially excessive code growth, |
| 24541 | // only clone finallys that have 15 or fewer statements. |
| 24542 | const unsigned stmtCountLimit = 15; |
| 24543 | if (regionStmtCount > stmtCountLimit) |
| 24544 | { |
| 24545 | JITDUMP("Finally in EH#%u has %u statements, limit is %u; skipping.\n" , XTnum, regionStmtCount, |
| 24546 | stmtCountLimit); |
| 24547 | continue; |
| 24548 | } |
| 24549 | |
| 24550 | JITDUMP("EH#%u is a candidate for finally cloning:" |
| 24551 | " %u blocks, %u statements\n" , |
| 24552 | XTnum, regionBBCount, regionStmtCount); |
| 24553 | |
| 24554 | // Walk the try region backwards looking for the last block |
| 24555 | // that transfers control to a callfinally. |
| 24556 | BasicBlock* const firstTryBlock = HBtab->ebdTryBeg; |
| 24557 | BasicBlock* const lastTryBlock = HBtab->ebdTryLast; |
| 24558 | assert(firstTryBlock->getTryIndex() == XTnum); |
| 24559 | assert(bbInTryRegions(XTnum, lastTryBlock)); |
| 24560 | BasicBlock* const beforeTryBlock = firstTryBlock->bbPrev; |
| 24561 | |
| 24562 | BasicBlock* normalCallFinallyBlock = nullptr; |
| 24563 | BasicBlock* normalCallFinallyReturn = nullptr; |
| 24564 | BasicBlock* cloneInsertAfter = HBtab->ebdTryLast; |
| 24565 | bool tryToRelocateCallFinally = false; |
| 24566 | |
| 24567 | for (BasicBlock* block = lastTryBlock; block != beforeTryBlock; block = block->bbPrev) |
| 24568 | { |
| 24569 | #if FEATURE_EH_CALLFINALLY_THUNKS |
| 24570 | // Blocks that transfer control to callfinallies are usually |
| 24571 | // BBJ_ALWAYS blocks, but the last block of a try may fall |
| 24572 | // through to a callfinally. |
| 24573 | BasicBlock* jumpDest = nullptr; |
| 24574 | |
| 24575 | if ((block->bbJumpKind == BBJ_NONE) && (block == lastTryBlock)) |
| 24576 | { |
| 24577 | jumpDest = block->bbNext; |
| 24578 | } |
| 24579 | else if (block->bbJumpKind == BBJ_ALWAYS) |
| 24580 | { |
| 24581 | jumpDest = block->bbJumpDest; |
| 24582 | } |
| 24583 | |
| 24584 | if (jumpDest == nullptr) |
| 24585 | { |
| 24586 | continue; |
| 24587 | } |
| 24588 | |
| 24589 | // The jumpDest must be a callfinally that in turn invokes the |
| 24590 | // finally of interest. |
| 24591 | if (!jumpDest->isBBCallAlwaysPair() || (jumpDest->bbJumpDest != firstBlock)) |
| 24592 | { |
| 24593 | continue; |
| 24594 | } |
| 24595 | #else |
| 24596 | // Look for call finally pair directly within the try |
| 24597 | if (!block->isBBCallAlwaysPair() || (block->bbJumpDest != firstBlock)) |
| 24598 | { |
| 24599 | continue; |
| 24600 | } |
| 24601 | |
| 24602 | BasicBlock* const jumpDest = block; |
| 24603 | #endif // FEATURE_EH_CALLFINALLY_THUNKS |
| 24604 | |
| 24605 | // Found our block. |
| 24606 | BasicBlock* const finallyReturnBlock = jumpDest->bbNext; |
| 24607 | BasicBlock* const postTryFinallyBlock = finallyReturnBlock->bbJumpDest; |
| 24608 | |
| 24609 | normalCallFinallyBlock = jumpDest; |
| 24610 | normalCallFinallyReturn = postTryFinallyBlock; |
| 24611 | |
| 24612 | #if FEATURE_EH_CALLFINALLY_THUNKS |
| 24613 | // When there are callfinally thunks, we don't expect to see the |
| 24614 | // callfinally within a handler region either. |
| 24615 | assert(!jumpDest->hasHndIndex()); |
| 24616 | |
| 24617 | // Update the clone insertion point to just after the |
| 24618 | // call always pair. |
| 24619 | cloneInsertAfter = finallyReturnBlock; |
| 24620 | |
| 24621 | // We will consider moving the callfinally so we can fall |
| 24622 | // through from the try into the clone. |
| 24623 | tryToRelocateCallFinally = true; |
| 24624 | |
| 24625 | JITDUMP("Chose path to clone: try block " FMT_BB " jumps to callfinally at " FMT_BB ";" |
| 24626 | " the call returns to " FMT_BB " which jumps to " FMT_BB "\n" , |
| 24627 | block->bbNum, jumpDest->bbNum, finallyReturnBlock->bbNum, postTryFinallyBlock->bbNum); |
| 24628 | #else |
| 24629 | JITDUMP("Chose path to clone: try block " FMT_BB " is a callfinally;" |
| 24630 | " the call returns to " FMT_BB " which jumps to " FMT_BB "\n" , |
| 24631 | block->bbNum, finallyReturnBlock->bbNum, postTryFinallyBlock->bbNum); |
| 24632 | #endif // FEATURE_EH_CALLFINALLY_THUNKS |
| 24633 | |
| 24634 | break; |
| 24635 | } |
| 24636 | |
| 24637 | // If there is no call to the finally, don't clone. |
| 24638 | if (normalCallFinallyBlock == nullptr) |
| 24639 | { |
| 24640 | JITDUMP("EH#%u: no calls from the try to the finally, skipping.\n" , XTnum); |
| 24641 | continue; |
| 24642 | } |
| 24643 | |
| 24644 | JITDUMP("Will update callfinally block " FMT_BB " to jump to the clone;" |
| 24645 | " clone will jump to " FMT_BB "\n" , |
| 24646 | normalCallFinallyBlock->bbNum, normalCallFinallyReturn->bbNum); |
| 24647 | |
| 24648 | // If there are multiple callfinallys and we're in the |
| 24649 | // callfinally thunk model, all the callfinallys are placed |
| 24650 | // just outside the try region. We'd like our chosen |
| 24651 | // callfinally to come first after the try, so we can fall out of the try |
| 24652 | // into the clone. |
| 24653 | BasicBlock* firstCallFinallyRangeBlock = nullptr; |
| 24654 | BasicBlock* endCallFinallyRangeBlock = nullptr; |
| 24655 | ehGetCallFinallyBlockRange(XTnum, &firstCallFinallyRangeBlock, &endCallFinallyRangeBlock); |
| 24656 | |
| 24657 | if (tryToRelocateCallFinally) |
| 24658 | { |
| 24659 | BasicBlock* firstCallFinallyBlock = nullptr; |
| 24660 | |
| 24661 | for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; |
| 24662 | block = block->bbNext) |
| 24663 | { |
| 24664 | if (block->isBBCallAlwaysPair()) |
| 24665 | { |
| 24666 | if (block->bbJumpDest == firstBlock) |
| 24667 | { |
| 24668 | firstCallFinallyBlock = block; |
| 24669 | break; |
| 24670 | } |
| 24671 | } |
| 24672 | } |
| 24673 | |
| 24674 | // We better have found at least one call finally. |
| 24675 | assert(firstCallFinallyBlock != nullptr); |
| 24676 | |
| 24677 | // If there is more than one callfinally, we'd like to move |
| 24678 | // the one we are going to retarget to be first in the callfinally, |
| 24679 | // but only if it's targeted by the last block in the try range. |
| 24680 | if (firstCallFinallyBlock != normalCallFinallyBlock) |
| 24681 | { |
| 24682 | BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->bbPrev; |
| 24683 | |
| 24684 | if ((placeToMoveAfter->bbJumpKind == BBJ_ALWAYS) && |
| 24685 | (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) |
| 24686 | { |
| 24687 | JITDUMP("Moving callfinally " FMT_BB " to be first in line, before " FMT_BB "\n" , |
| 24688 | normalCallFinallyBlock->bbNum, firstCallFinallyBlock->bbNum); |
| 24689 | |
| 24690 | BasicBlock* const firstToMove = normalCallFinallyBlock; |
| 24691 | BasicBlock* const lastToMove = normalCallFinallyBlock->bbNext; |
| 24692 | |
| 24693 | fgUnlinkRange(firstToMove, lastToMove); |
| 24694 | fgMoveBlocksAfter(firstToMove, lastToMove, placeToMoveAfter); |
| 24695 | |
| 24696 | #ifdef DEBUG |
| 24697 | // Sanity checks |
| 24698 | fgDebugCheckBBlist(false, false); |
| 24699 | fgVerifyHandlerTab(); |
| 24700 | #endif // DEBUG |
| 24701 | |
| 24702 | assert(nextBlock == lastBlock->bbNext); |
| 24703 | |
| 24704 | // Update where the callfinally range begins, since we might |
| 24705 | // have altered this with callfinally rearrangement, and/or |
| 24706 | // the range begin might have been pretty loose to begin with. |
| 24707 | firstCallFinallyRangeBlock = normalCallFinallyBlock; |
| 24708 | } |
| 24709 | else |
| 24710 | { |
| 24711 | JITDUMP("Can't move callfinally " FMT_BB " to be first in line" |
| 24712 | " -- last finally block " FMT_BB " doesn't jump to it\n" , |
| 24713 | normalCallFinallyBlock->bbNum, placeToMoveAfter->bbNum); |
| 24714 | } |
| 24715 | } |
| 24716 | } |
| 24717 | |
| 24718 | // Clone the finally and retarget the normal return path and |
| 24719 | // any other path that happens to share that same return |
| 24720 | // point. For instance a construct like: |
| 24721 | // |
| 24722 | // try { } catch { } finally { } |
| 24723 | // |
| 24724 | // will have two call finally blocks, one for the normal exit |
| 24725 | // from the try, and the the other for the exit from the |
| 24726 | // catch. They'll both pass the same return point which is the |
| 24727 | // statement after the finally, so they can share the clone. |
| 24728 | // |
| 24729 | // Clone the finally body, and splice it into the flow graph |
| 24730 | // within in the parent region of the try. |
| 24731 | const unsigned finallyTryIndex = firstBlock->bbTryIndex; |
| 24732 | BasicBlock* insertAfter = nullptr; |
| 24733 | BlockToBlockMap blockMap(getAllocator()); |
| 24734 | bool clonedOk = true; |
| 24735 | unsigned cloneBBCount = 0; |
| 24736 | |
| 24737 | for (BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) |
| 24738 | { |
| 24739 | BasicBlock* newBlock; |
| 24740 | |
| 24741 | if (block == firstBlock) |
| 24742 | { |
| 24743 | // Put first cloned finally block into the appropriate |
| 24744 | // region, somewhere within or after the range of |
| 24745 | // callfinallys, depending on the EH implementation. |
| 24746 | const unsigned hndIndex = 0; |
| 24747 | BasicBlock* const nearBlk = cloneInsertAfter; |
| 24748 | newBlock = fgNewBBinRegion(block->bbJumpKind, finallyTryIndex, hndIndex, nearBlk); |
| 24749 | |
| 24750 | // If the clone ends up just after the finally, adjust |
| 24751 | // the stopping point for finally traversal. |
| 24752 | if (newBlock->bbNext == nextBlock) |
| 24753 | { |
| 24754 | assert(newBlock->bbPrev == lastBlock); |
| 24755 | nextBlock = newBlock; |
| 24756 | } |
| 24757 | } |
| 24758 | else |
| 24759 | { |
| 24760 | // Put subsequent blocks in the same region... |
| 24761 | const bool extendRegion = true; |
| 24762 | newBlock = fgNewBBafter(block->bbJumpKind, insertAfter, extendRegion); |
| 24763 | } |
| 24764 | |
| 24765 | cloneBBCount++; |
| 24766 | assert(cloneBBCount <= regionBBCount); |
| 24767 | |
| 24768 | insertAfter = newBlock; |
| 24769 | blockMap.Set(block, newBlock); |
| 24770 | |
| 24771 | clonedOk = BasicBlock::CloneBlockState(this, newBlock, block); |
| 24772 | |
| 24773 | if (!clonedOk) |
| 24774 | { |
| 24775 | break; |
| 24776 | } |
| 24777 | |
| 24778 | // Update block flags. Note a block can be both first and last. |
| 24779 | if (block == firstBlock) |
| 24780 | { |
| 24781 | // Mark the block as the start of the cloned finally. |
| 24782 | newBlock->bbFlags |= BBF_CLONED_FINALLY_BEGIN; |
| 24783 | } |
| 24784 | |
| 24785 | if (block == lastBlock) |
| 24786 | { |
| 24787 | // Mark the block as the end of the cloned finally. |
| 24788 | newBlock->bbFlags |= BBF_CLONED_FINALLY_END; |
| 24789 | } |
| 24790 | |
| 24791 | // Make sure clone block state hasn't munged the try region. |
| 24792 | assert(newBlock->bbTryIndex == finallyTryIndex); |
| 24793 | |
| 24794 | // Cloned handler block is no longer within the handler. |
| 24795 | newBlock->clearHndIndex(); |
| 24796 | |
| 24797 | // Jump dests are set in a post-pass; make sure CloneBlockState hasn't tried to set them. |
| 24798 | assert(newBlock->bbJumpDest == nullptr); |
| 24799 | } |
| 24800 | |
| 24801 | if (!clonedOk) |
| 24802 | { |
| 24803 | // TODO: cleanup the partial clone? |
| 24804 | JITDUMP("Unable to clone the finally; skipping.\n" ); |
| 24805 | continue; |
| 24806 | } |
| 24807 | |
| 24808 | // We should have cloned all the finally region blocks. |
| 24809 | assert(cloneBBCount == regionBBCount); |
| 24810 | |
| 24811 | JITDUMP("Cloned finally blocks are: " FMT_BB " ... " FMT_BB "\n" , blockMap[firstBlock]->bbNum, |
| 24812 | blockMap[lastBlock]->bbNum); |
| 24813 | |
| 24814 | // Redirect redirect any branches within the newly-cloned |
| 24815 | // finally, and any finally returns to jump to the return |
| 24816 | // point. |
| 24817 | for (BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) |
| 24818 | { |
| 24819 | BasicBlock* newBlock = blockMap[block]; |
| 24820 | |
| 24821 | if (block->bbJumpKind == BBJ_EHFINALLYRET) |
| 24822 | { |
| 24823 | GenTreeStmt* finallyRet = newBlock->lastStmt(); |
| 24824 | GenTree* finallyRetExpr = finallyRet->gtStmtExpr; |
| 24825 | assert(finallyRetExpr->gtOper == GT_RETFILT); |
| 24826 | fgRemoveStmt(newBlock, finallyRet); |
| 24827 | newBlock->bbJumpKind = BBJ_ALWAYS; |
| 24828 | newBlock->bbJumpDest = normalCallFinallyReturn; |
| 24829 | |
| 24830 | fgAddRefPred(normalCallFinallyReturn, newBlock); |
| 24831 | } |
| 24832 | else |
| 24833 | { |
| 24834 | optCopyBlkDest(block, newBlock); |
| 24835 | optRedirectBlock(newBlock, &blockMap); |
| 24836 | } |
| 24837 | } |
| 24838 | |
| 24839 | // Modify the targeting call finallys to branch to the cloned |
| 24840 | // finally. Make a note if we see some calls that can't be |
| 24841 | // retargeted (since they want to return to other places). |
| 24842 | BasicBlock* const firstCloneBlock = blockMap[firstBlock]; |
| 24843 | bool retargetedAllCalls = true; |
| 24844 | BasicBlock* currentBlock = firstCallFinallyRangeBlock; |
| 24845 | |
| 24846 | while (currentBlock != endCallFinallyRangeBlock) |
| 24847 | { |
| 24848 | BasicBlock* nextBlockToScan = currentBlock->bbNext; |
| 24849 | |
| 24850 | if (currentBlock->isBBCallAlwaysPair()) |
| 24851 | { |
| 24852 | if (currentBlock->bbJumpDest == firstBlock) |
| 24853 | { |
| 24854 | BasicBlock* const leaveBlock = currentBlock->bbNext; |
| 24855 | BasicBlock* const postTryFinallyBlock = leaveBlock->bbJumpDest; |
| 24856 | |
| 24857 | // Note we must retarget all callfinallies that have this |
| 24858 | // continuation, or we can't clean up the continuation |
| 24859 | // block properly below, since it will be reachable both |
| 24860 | // by the cloned finally and by the called finally. |
| 24861 | if (postTryFinallyBlock == normalCallFinallyReturn) |
| 24862 | { |
| 24863 | // This call returns to the expected spot, so |
| 24864 | // retarget it to branch to the clone. |
| 24865 | currentBlock->bbJumpDest = firstCloneBlock; |
| 24866 | currentBlock->bbJumpKind = BBJ_ALWAYS; |
| 24867 | |
| 24868 | // Ref count updates. |
| 24869 | fgAddRefPred(firstCloneBlock, currentBlock); |
| 24870 | // fgRemoveRefPred(firstBlock, currentBlock); |
| 24871 | |
| 24872 | // Delete the leave block, which should be marked as |
| 24873 | // keep always. |
| 24874 | assert((leaveBlock->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0); |
| 24875 | nextBlock = leaveBlock->bbNext; |
| 24876 | |
| 24877 | leaveBlock->bbFlags &= ~BBF_KEEP_BBJ_ALWAYS; |
| 24878 | fgRemoveBlock(leaveBlock, true); |
| 24879 | |
| 24880 | // Make sure iteration isn't going off the deep end. |
| 24881 | assert(leaveBlock != endCallFinallyRangeBlock); |
| 24882 | } |
| 24883 | else |
| 24884 | { |
| 24885 | // We can't retarget this call since it |
| 24886 | // returns somewhere else. |
| 24887 | JITDUMP("Can't retarget callfinally in " FMT_BB " as it jumps to " FMT_BB ", not " FMT_BB "\n" , |
| 24888 | currentBlock->bbNum, postTryFinallyBlock->bbNum, normalCallFinallyReturn->bbNum); |
| 24889 | |
| 24890 | retargetedAllCalls = false; |
| 24891 | } |
| 24892 | } |
| 24893 | } |
| 24894 | |
| 24895 | currentBlock = nextBlockToScan; |
| 24896 | } |
| 24897 | |
| 24898 | // If we retargeted all calls, modify EH descriptor to be |
| 24899 | // try-fault instead of try-finally, and then non-cloned |
| 24900 | // finally catch type to be fault. |
| 24901 | if (retargetedAllCalls) |
| 24902 | { |
| 24903 | JITDUMP("All callfinallys retargeted; changing finally to fault.\n" ); |
| 24904 | HBtab->ebdHandlerType = EH_HANDLER_FAULT_WAS_FINALLY; |
| 24905 | firstBlock->bbCatchTyp = BBCT_FAULT; |
| 24906 | } |
| 24907 | else |
| 24908 | { |
| 24909 | JITDUMP("Some callfinallys *not* retargeted, so region must remain as a finally.\n" ); |
| 24910 | } |
| 24911 | |
| 24912 | // Modify first block of cloned finally to be a "normal" block. |
| 24913 | BasicBlock* firstClonedBlock = blockMap[firstBlock]; |
| 24914 | firstClonedBlock->bbCatchTyp = BBCT_NONE; |
| 24915 | |
| 24916 | // Cleanup the continuation |
| 24917 | fgCleanupContinuation(normalCallFinallyReturn); |
| 24918 | |
| 24919 | // Todo -- mark cloned blocks as a cloned finally.... |
| 24920 | |
| 24921 | // Done! |
| 24922 | JITDUMP("\nDone with EH#%u\n\n" , XTnum); |
| 24923 | cloneCount++; |
| 24924 | } |
| 24925 | |
| 24926 | if (cloneCount > 0) |
| 24927 | { |
| 24928 | JITDUMP("fgCloneFinally() cloned %u finally handlers\n" , cloneCount); |
| 24929 | fgOptimizedFinally = true; |
| 24930 | |
| 24931 | #ifdef DEBUG |
| 24932 | if (verbose) |
| 24933 | { |
| 24934 | printf("\n*************** After fgCloneFinally()\n" ); |
| 24935 | fgDispBasicBlocks(); |
| 24936 | fgDispHandlerTab(); |
| 24937 | printf("\n" ); |
| 24938 | } |
| 24939 | |
| 24940 | fgVerifyHandlerTab(); |
| 24941 | fgDebugCheckBBlist(false, false); |
| 24942 | fgDebugCheckTryFinallyExits(); |
| 24943 | |
| 24944 | #endif // DEBUG |
| 24945 | } |
| 24946 | } |
| 24947 | |
| 24948 | #ifdef DEBUG |
| 24949 | |
| 24950 | //------------------------------------------------------------------------ |
| 24951 | // fgDebugCheckTryFinallyExits: validate normal flow from try-finally |
| 24952 | // or try-fault-was-finally. |
| 24953 | // |
| 24954 | // Notes: |
| 24955 | // |
| 24956 | // Normal control flow exiting the try block of a try-finally must |
| 24957 | // pass through the finally. This checker attempts to verify that by |
| 24958 | // looking at the control flow graph. |
| 24959 | // |
| 24960 | // Each path that exits the try of a try-finally (including try-faults |
| 24961 | // that were optimized into try-finallys by fgCloneFinally) should |
| 24962 | // thus either execute a callfinally to the associated finally or else |
| 24963 | // jump to a block with the BBF_CLONED_FINALLY_BEGIN flag set. |
| 24964 | // |
| 24965 | // Depending on when this check is done, there may also be an empty |
| 24966 | // block along the path. |
| 24967 | // |
| 24968 | // Depending on the model for invoking finallys, the callfinallies may |
| 24969 | // lie within the try region (callfinally thunks) or in the enclosing |
| 24970 | // region. |
| 24971 | |
| 24972 | void Compiler::fgDebugCheckTryFinallyExits() |
| 24973 | { |
| 24974 | unsigned XTnum = 0; |
| 24975 | EHblkDsc* HBtab = compHndBBtab; |
| 24976 | unsigned cloneCount = 0; |
| 24977 | bool allTryExitsValid = true; |
| 24978 | for (; XTnum < compHndBBtabCount; XTnum++, HBtab++) |
| 24979 | { |
| 24980 | const EHHandlerType handlerType = HBtab->ebdHandlerType; |
| 24981 | const bool isFinally = (handlerType == EH_HANDLER_FINALLY); |
| 24982 | const bool wasFinally = (handlerType == EH_HANDLER_FAULT_WAS_FINALLY); |
| 24983 | |
| 24984 | // Screen out regions that are or were not finallys. |
| 24985 | if (!isFinally && !wasFinally) |
| 24986 | { |
| 24987 | continue; |
| 24988 | } |
| 24989 | |
| 24990 | // Walk blocks of the try, looking for normal control flow to |
| 24991 | // an ancestor region. |
| 24992 | |
| 24993 | BasicBlock* const firstTryBlock = HBtab->ebdTryBeg; |
| 24994 | BasicBlock* const lastTryBlock = HBtab->ebdTryLast; |
| 24995 | assert(firstTryBlock->getTryIndex() <= XTnum); |
| 24996 | assert(lastTryBlock->getTryIndex() <= XTnum); |
| 24997 | BasicBlock* const afterTryBlock = lastTryBlock->bbNext; |
| 24998 | BasicBlock* const finallyBlock = isFinally ? HBtab->ebdHndBeg : nullptr; |
| 24999 | |
| 25000 | for (BasicBlock* block = firstTryBlock; block != afterTryBlock; block = block->bbNext) |
| 25001 | { |
| 25002 | // Only check the directly contained blocks. |
| 25003 | assert(block->hasTryIndex()); |
| 25004 | |
| 25005 | if (block->getTryIndex() != XTnum) |
| 25006 | { |
| 25007 | continue; |
| 25008 | } |
| 25009 | |
| 25010 | // Look at each of the normal control flow possibilities. |
| 25011 | const unsigned numSuccs = block->NumSucc(); |
| 25012 | |
| 25013 | for (unsigned i = 0; i < numSuccs; i++) |
| 25014 | { |
| 25015 | BasicBlock* const succBlock = block->GetSucc(i); |
| 25016 | |
| 25017 | if (succBlock->hasTryIndex() && succBlock->getTryIndex() <= XTnum) |
| 25018 | { |
| 25019 | // Successor does not exit this try region. |
| 25020 | continue; |
| 25021 | } |
| 25022 | |
| 25023 | #if FEATURE_EH_CALLFINALLY_THUNKS |
| 25024 | |
| 25025 | // When there are callfinally thunks, callfinallies |
| 25026 | // logically "belong" to a child region and the exit |
| 25027 | // path validity will be checked when looking at the |
| 25028 | // try blocks in that region. |
| 25029 | if (block->bbJumpKind == BBJ_CALLFINALLY) |
| 25030 | { |
| 25031 | continue; |
| 25032 | } |
| 25033 | |
| 25034 | #endif // FEATURE_EH_CALLFINALLY_THUNKS |
| 25035 | |
| 25036 | // Now we know block lies directly within the try of a |
| 25037 | // try-finally, and succBlock is in an enclosing |
| 25038 | // region (possibly the method region). So this path |
| 25039 | // represents flow out of the try and should be |
| 25040 | // checked. |
| 25041 | // |
| 25042 | // There are various ways control can properly leave a |
| 25043 | // try-finally (or try-fault-was-finally): |
| 25044 | // |
| 25045 | // (a1) via a jump to a callfinally (only for finallys, only for call finally thunks) |
| 25046 | // (a2) via a callfinally (only for finallys, only for !call finally thunks) |
| 25047 | // (b) via a jump to a begin finally clone block |
| 25048 | // (c) via a jump to an empty block to (b) |
| 25049 | // (d) via a fallthrough to an empty block to (b) |
| 25050 | // (e) via the always half of a callfinally pair |
| 25051 | // (f) via an always jump clonefinally exit |
| 25052 | bool isCallToFinally = false; |
| 25053 | |
| 25054 | #if FEATURE_EH_CALLFINALLY_THUNKS |
| 25055 | if (succBlock->bbJumpKind == BBJ_CALLFINALLY) |
| 25056 | { |
| 25057 | // case (a1) |
| 25058 | isCallToFinally = isFinally && (succBlock->bbJumpDest == finallyBlock); |
| 25059 | } |
| 25060 | #else |
| 25061 | if (block->bbJumpKind == BBJ_CALLFINALLY) |
| 25062 | { |
| 25063 | // case (a2) |
| 25064 | isCallToFinally = isFinally && (block->bbJumpDest == finallyBlock); |
| 25065 | } |
| 25066 | #endif // FEATURE_EH_CALLFINALLY_THUNKS |
| 25067 | |
| 25068 | bool isJumpToClonedFinally = false; |
| 25069 | |
| 25070 | if (succBlock->bbFlags & BBF_CLONED_FINALLY_BEGIN) |
| 25071 | { |
| 25072 | // case (b) |
| 25073 | isJumpToClonedFinally = true; |
| 25074 | } |
| 25075 | else if (succBlock->bbJumpKind == BBJ_ALWAYS) |
| 25076 | { |
| 25077 | if (succBlock->isEmpty()) |
| 25078 | { |
| 25079 | // case (c) |
| 25080 | BasicBlock* const succSuccBlock = succBlock->bbJumpDest; |
| 25081 | |
| 25082 | if (succSuccBlock->bbFlags & BBF_CLONED_FINALLY_BEGIN) |
| 25083 | { |
| 25084 | isJumpToClonedFinally = true; |
| 25085 | } |
| 25086 | } |
| 25087 | } |
| 25088 | else if (succBlock->bbJumpKind == BBJ_NONE) |
| 25089 | { |
| 25090 | if (succBlock->isEmpty()) |
| 25091 | { |
| 25092 | BasicBlock* const succSuccBlock = succBlock->bbNext; |
| 25093 | |
| 25094 | // case (d) |
| 25095 | if (succSuccBlock->bbFlags & BBF_CLONED_FINALLY_BEGIN) |
| 25096 | { |
| 25097 | isJumpToClonedFinally = true; |
| 25098 | } |
| 25099 | } |
| 25100 | } |
| 25101 | |
| 25102 | bool isReturnFromFinally = false; |
| 25103 | |
| 25104 | // Case (e). Ideally we'd have something stronger to |
| 25105 | // check here -- eg that we are returning from a call |
| 25106 | // to the right finally -- but there are odd cases |
| 25107 | // like orphaned second halves of callfinally pairs |
| 25108 | // that we need to tolerate. |
| 25109 | if (block->bbFlags & BBF_KEEP_BBJ_ALWAYS) |
| 25110 | { |
| 25111 | isReturnFromFinally = true; |
| 25112 | } |
| 25113 | |
| 25114 | // Case (f) |
| 25115 | if (block->bbFlags & BBF_CLONED_FINALLY_END) |
| 25116 | { |
| 25117 | isReturnFromFinally = true; |
| 25118 | } |
| 25119 | |
| 25120 | const bool thisExitValid = isCallToFinally || isJumpToClonedFinally || isReturnFromFinally; |
| 25121 | |
| 25122 | if (!thisExitValid) |
| 25123 | { |
| 25124 | JITDUMP("fgCheckTryFinallyExitS: EH#%u exit via " FMT_BB " -> " FMT_BB " is invalid\n" , XTnum, |
| 25125 | block->bbNum, succBlock->bbNum); |
| 25126 | } |
| 25127 | |
| 25128 | allTryExitsValid = allTryExitsValid & thisExitValid; |
| 25129 | } |
| 25130 | } |
| 25131 | } |
| 25132 | |
| 25133 | if (!allTryExitsValid) |
| 25134 | { |
| 25135 | JITDUMP("fgCheckTryFinallyExits: method contains invalid try exit paths\n" ); |
| 25136 | assert(allTryExitsValid); |
| 25137 | } |
| 25138 | } |
| 25139 | |
| 25140 | #endif // DEBUG |
| 25141 | |
| 25142 | //------------------------------------------------------------------------ |
| 25143 | // fgCleanupContinuation: cleanup a finally continuation after a |
| 25144 | // finally is removed or converted to normal control flow. |
| 25145 | // |
| 25146 | // Notes: |
| 25147 | // The continuation is the block targeted by the second half of |
| 25148 | // a callfinally/always pair. |
| 25149 | // |
| 25150 | // Used by finally cloning, empty try removal, and empty |
| 25151 | // finally removal. |
| 25152 | // |
| 25153 | // BBF_FINALLY_TARGET bbFlag is left unchanged by this method |
| 25154 | // since it cannot be incrementally updated. Proper updates happen |
| 25155 | // when fgUpdateFinallyTargetFlags runs after all finally optimizations. |
| 25156 | |
| 25157 | void Compiler::fgCleanupContinuation(BasicBlock* continuation) |
| 25158 | { |
| 25159 | // The continuation may be a finalStep block. |
| 25160 | // It is now a normal block, so clear the special keep |
| 25161 | // always flag. |
| 25162 | continuation->bbFlags &= ~BBF_KEEP_BBJ_ALWAYS; |
| 25163 | |
| 25164 | #if !FEATURE_EH_FUNCLETS |
| 25165 | // Remove the GT_END_LFIN from the continuation, |
| 25166 | // Note we only expect to see one such statement. |
| 25167 | bool foundEndLFin = false; |
| 25168 | for (GenTreeStmt* stmt = continuation->firstStmt(); stmt != nullptr; stmt = stmt->gtNextStmt) |
| 25169 | { |
| 25170 | GenTree* expr = stmt->gtStmtExpr; |
| 25171 | if (expr->gtOper == GT_END_LFIN) |
| 25172 | { |
| 25173 | assert(!foundEndLFin); |
| 25174 | fgRemoveStmt(continuation, stmt); |
| 25175 | foundEndLFin = true; |
| 25176 | } |
| 25177 | } |
| 25178 | assert(foundEndLFin); |
| 25179 | #endif // !FEATURE_EH_FUNCLETS |
| 25180 | } |
| 25181 | |
| 25182 | //------------------------------------------------------------------------ |
| 25183 | // fgUpdateFinallyTargetFlags: recompute BBF_FINALLY_TARGET bits for all blocks |
| 25184 | // after finally optimizations have run. |
| 25185 | |
| 25186 | void Compiler::fgUpdateFinallyTargetFlags() |
| 25187 | { |
| 25188 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 25189 | |
| 25190 | // Any fixup required? |
| 25191 | if (!fgOptimizedFinally) |
| 25192 | { |
| 25193 | JITDUMP("In fgUpdateFinallyTargetFlags - no finally opts, no fixup required\n" ); |
| 25194 | return; |
| 25195 | } |
| 25196 | |
| 25197 | JITDUMP("In fgUpdateFinallyTargetFlags, updating finally target flag bits\n" ); |
| 25198 | |
| 25199 | fgClearAllFinallyTargetBits(); |
| 25200 | fgAddFinallyTargetFlags(); |
| 25201 | |
| 25202 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 25203 | } |
| 25204 | |
| 25205 | //------------------------------------------------------------------------ |
| 25206 | // fgClearAllFinallyTargetBits: Clear all BBF_FINALLY_TARGET bits; these will need to be |
| 25207 | // recomputed later. |
| 25208 | // |
| 25209 | void Compiler::fgClearAllFinallyTargetBits() |
| 25210 | { |
| 25211 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 25212 | |
| 25213 | JITDUMP("*************** In fgClearAllFinallyTargetBits()\n" ); |
| 25214 | |
| 25215 | // Note that we clear the flags even if there are no EH clauses (compHndBBtabCount == 0) |
| 25216 | // in case bits are left over from EH clauses being deleted. |
| 25217 | |
| 25218 | // Walk all blocks, and reset the target bits. |
| 25219 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 25220 | { |
| 25221 | block->bbFlags &= ~BBF_FINALLY_TARGET; |
| 25222 | } |
| 25223 | |
| 25224 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 25225 | } |
| 25226 | |
| 25227 | //------------------------------------------------------------------------ |
| 25228 | // fgAddFinallyTargetFlags: Add BBF_FINALLY_TARGET bits to all finally targets. |
| 25229 | // |
| 25230 | void Compiler::fgAddFinallyTargetFlags() |
| 25231 | { |
| 25232 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 25233 | |
| 25234 | JITDUMP("*************** In fgAddFinallyTargetFlags()\n" ); |
| 25235 | |
| 25236 | if (compHndBBtabCount == 0) |
| 25237 | { |
| 25238 | JITDUMP("No EH in this method, no flags to set.\n" ); |
| 25239 | return; |
| 25240 | } |
| 25241 | |
| 25242 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 25243 | { |
| 25244 | if (block->isBBCallAlwaysPair()) |
| 25245 | { |
| 25246 | BasicBlock* const leave = block->bbNext; |
| 25247 | BasicBlock* const continuation = leave->bbJumpDest; |
| 25248 | |
| 25249 | if ((continuation->bbFlags & BBF_FINALLY_TARGET) == 0) |
| 25250 | { |
| 25251 | JITDUMP("Found callfinally " FMT_BB "; setting finally target bit on " FMT_BB "\n" , block->bbNum, |
| 25252 | continuation->bbNum); |
| 25253 | |
| 25254 | continuation->bbFlags |= BBF_FINALLY_TARGET; |
| 25255 | } |
| 25256 | } |
| 25257 | } |
| 25258 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) |
| 25259 | } |
| 25260 | |
| 25261 | //------------------------------------------------------------------------ |
| 25262 | // fgMergeFinallyChains: tail merge finally invocations |
| 25263 | // |
| 25264 | // Notes: |
| 25265 | // |
| 25266 | // Looks for common suffixes in chains of finally invocations |
| 25267 | // (callfinallys) and merges them. These typically arise from |
| 25268 | // try-finallys where there are multiple exit points in the try |
| 25269 | // that have the same target. |
| 25270 | |
| 25271 | void Compiler::fgMergeFinallyChains() |
| 25272 | { |
| 25273 | JITDUMP("\n*************** In fgMergeFinallyChains()\n" ); |
| 25274 | |
| 25275 | #if FEATURE_EH_FUNCLETS |
| 25276 | // We need to do this transformation before funclets are created. |
| 25277 | assert(!fgFuncletsCreated); |
| 25278 | #endif // FEATURE_EH_FUNCLETS |
| 25279 | |
| 25280 | // Assume we don't need to update the bbPreds lists. |
| 25281 | assert(!fgComputePredsDone); |
| 25282 | |
| 25283 | if (compHndBBtabCount == 0) |
| 25284 | { |
| 25285 | JITDUMP("No EH in this method, nothing to merge.\n" ); |
| 25286 | return; |
| 25287 | } |
| 25288 | |
| 25289 | if (opts.MinOpts()) |
| 25290 | { |
| 25291 | JITDUMP("Method compiled with minOpts, no merging.\n" ); |
| 25292 | return; |
| 25293 | } |
| 25294 | |
| 25295 | if (opts.compDbgCode) |
| 25296 | { |
| 25297 | JITDUMP("Method compiled with debug codegen, no merging.\n" ); |
| 25298 | return; |
| 25299 | } |
| 25300 | |
| 25301 | bool enableMergeFinallyChains = true; |
| 25302 | |
| 25303 | #if !FEATURE_EH_FUNCLETS |
| 25304 | // For non-funclet models (x86) the callfinallys may contain |
| 25305 | // statements and the continuations contain GT_END_LFINs. So no |
| 25306 | // merging is possible until the GT_END_LFIN blocks can be merged |
| 25307 | // and merging is not safe unless the callfinally blocks are split. |
| 25308 | JITDUMP("EH using non-funclet model; merging not yet implemented.\n" ); |
| 25309 | enableMergeFinallyChains = false; |
| 25310 | #endif // !FEATURE_EH_FUNCLETS |
| 25311 | |
| 25312 | #if !FEATURE_EH_CALLFINALLY_THUNKS |
| 25313 | // For non-thunk EH models (arm32) the callfinallys may contain |
| 25314 | // statements, and merging is not safe unless the callfinally |
| 25315 | // blocks are split. |
| 25316 | JITDUMP("EH using non-callfinally thunk model; merging not yet implemented.\n" ); |
| 25317 | enableMergeFinallyChains = false; |
| 25318 | #endif |
| 25319 | |
| 25320 | if (!enableMergeFinallyChains) |
| 25321 | { |
| 25322 | JITDUMP("fgMergeFinallyChains disabled\n" ); |
| 25323 | return; |
| 25324 | } |
| 25325 | |
| 25326 | #ifdef DEBUG |
| 25327 | if (verbose) |
| 25328 | { |
| 25329 | printf("\n*************** Before fgMergeFinallyChains()\n" ); |
| 25330 | fgDispBasicBlocks(); |
| 25331 | fgDispHandlerTab(); |
| 25332 | printf("\n" ); |
| 25333 | } |
| 25334 | #endif // DEBUG |
| 25335 | |
| 25336 | // Look for finallys. |
| 25337 | bool hasFinally = false; |
| 25338 | for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) |
| 25339 | { |
| 25340 | EHblkDsc* const HBtab = &compHndBBtab[XTnum]; |
| 25341 | |
| 25342 | // Check if this is a try/finally. |
| 25343 | if (HBtab->HasFinallyHandler()) |
| 25344 | { |
| 25345 | hasFinally = true; |
| 25346 | break; |
| 25347 | } |
| 25348 | } |
| 25349 | |
| 25350 | if (!hasFinally) |
| 25351 | { |
| 25352 | JITDUMP("Method does not have any try-finallys; no merging.\n" ); |
| 25353 | return; |
| 25354 | } |
| 25355 | |
| 25356 | // Process finallys from outside in, merging as we go. This gives |
| 25357 | // us the desired bottom-up tail merge order for callfinally |
| 25358 | // chains: outer merges may enable inner merges. |
| 25359 | bool canMerge = false; |
| 25360 | bool didMerge = false; |
| 25361 | BlockToBlockMap continuationMap(getAllocator()); |
| 25362 | |
| 25363 | // Note XTnum is signed here so we can count down. |
| 25364 | for (int XTnum = compHndBBtabCount - 1; XTnum >= 0; XTnum--) |
| 25365 | { |
| 25366 | EHblkDsc* const HBtab = &compHndBBtab[XTnum]; |
| 25367 | |
| 25368 | // Screen out non-finallys |
| 25369 | if (!HBtab->HasFinallyHandler()) |
| 25370 | { |
| 25371 | continue; |
| 25372 | } |
| 25373 | |
| 25374 | JITDUMP("Examining callfinallys for EH#%d.\n" , XTnum); |
| 25375 | |
| 25376 | // Find all the callfinallys that invoke this finally. |
| 25377 | BasicBlock* firstCallFinallyRangeBlock = nullptr; |
| 25378 | BasicBlock* endCallFinallyRangeBlock = nullptr; |
| 25379 | ehGetCallFinallyBlockRange(XTnum, &firstCallFinallyRangeBlock, &endCallFinallyRangeBlock); |
| 25380 | |
| 25381 | // Clear out any stale entries in the continuation map |
| 25382 | continuationMap.RemoveAll(); |
| 25383 | |
| 25384 | // Build a map from each continuation to the "canonical" |
| 25385 | // callfinally for that continuation. |
| 25386 | unsigned callFinallyCount = 0; |
| 25387 | BasicBlock* const beginHandlerBlock = HBtab->ebdHndBeg; |
| 25388 | |
| 25389 | for (BasicBlock* currentBlock = firstCallFinallyRangeBlock; currentBlock != endCallFinallyRangeBlock; |
| 25390 | currentBlock = currentBlock->bbNext) |
| 25391 | { |
| 25392 | // Ignore "retless" callfinallys (where the finally doesn't return). |
| 25393 | if (currentBlock->isBBCallAlwaysPair() && (currentBlock->bbJumpDest == beginHandlerBlock)) |
| 25394 | { |
| 25395 | // The callfinally must be empty, so that we can |
| 25396 | // safely retarget anything that branches here to |
| 25397 | // another callfinally with the same contiuation. |
| 25398 | assert(currentBlock->isEmpty()); |
| 25399 | |
| 25400 | // This callfinally invokes the finally for this try. |
| 25401 | callFinallyCount++; |
| 25402 | |
| 25403 | // Locate the continuation |
| 25404 | BasicBlock* const leaveBlock = currentBlock->bbNext; |
| 25405 | BasicBlock* const continuationBlock = leaveBlock->bbJumpDest; |
| 25406 | |
| 25407 | // If this is the first time we've seen this |
| 25408 | // continuation, register this callfinally as the |
| 25409 | // canonical one. |
| 25410 | if (!continuationMap.Lookup(continuationBlock)) |
| 25411 | { |
| 25412 | continuationMap.Set(continuationBlock, currentBlock); |
| 25413 | } |
| 25414 | } |
| 25415 | } |
| 25416 | |
| 25417 | // Now we've seen all the callfinallys and their continuations. |
| 25418 | JITDUMP("EH#%i has %u callfinallys, %u continuations\n" , XTnum, callFinallyCount, continuationMap.GetCount()); |
| 25419 | |
| 25420 | // If there are more callfinallys than continuations, some of the |
| 25421 | // callfinallys must share a continuation, and we can merge them. |
| 25422 | const bool tryMerge = callFinallyCount > continuationMap.GetCount(); |
| 25423 | |
| 25424 | if (!tryMerge) |
| 25425 | { |
| 25426 | JITDUMP("EH#%i does not have any mergeable callfinallys\n" , XTnum); |
| 25427 | continue; |
| 25428 | } |
| 25429 | |
| 25430 | canMerge = true; |
| 25431 | |
| 25432 | // Walk the callfinally region, looking for blocks that jump |
| 25433 | // to a callfinally that invokes this try's finally, and make |
| 25434 | // sure they all jump to the appropriate canonical |
| 25435 | // callfinally. |
| 25436 | for (BasicBlock* currentBlock = firstCallFinallyRangeBlock; currentBlock != endCallFinallyRangeBlock; |
| 25437 | currentBlock = currentBlock->bbNext) |
| 25438 | { |
| 25439 | bool merged = fgRetargetBranchesToCanonicalCallFinally(currentBlock, beginHandlerBlock, continuationMap); |
| 25440 | didMerge = didMerge || merged; |
| 25441 | } |
| 25442 | } |
| 25443 | |
| 25444 | if (!canMerge) |
| 25445 | { |
| 25446 | JITDUMP("Method had try-finallys, but did not have any mergeable finally chains.\n" ); |
| 25447 | } |
| 25448 | else |
| 25449 | { |
| 25450 | if (didMerge) |
| 25451 | { |
| 25452 | JITDUMP("Method had mergeable try-finallys and some callfinally merges were performed.\n" ); |
| 25453 | |
| 25454 | #if DEBUG |
| 25455 | if (verbose) |
| 25456 | { |
| 25457 | printf("\n*************** After fgMergeFinallyChains()\n" ); |
| 25458 | fgDispBasicBlocks(); |
| 25459 | fgDispHandlerTab(); |
| 25460 | printf("\n" ); |
| 25461 | } |
| 25462 | |
| 25463 | #endif // DEBUG |
| 25464 | } |
| 25465 | else |
| 25466 | { |
| 25467 | // We may not end up doing any merges, because we are only |
| 25468 | // merging continuations for callfinallys that can |
| 25469 | // actually be invoked, and the importer may leave |
| 25470 | // unreachable callfinallys around (for instance, if it |
| 25471 | // is forced to re-import a leave). |
| 25472 | JITDUMP("Method had mergeable try-finallys but no callfinally merges were performed,\n" |
| 25473 | "likely the non-canonical callfinallys were unreachable\n" ); |
| 25474 | } |
| 25475 | } |
| 25476 | } |
| 25477 | |
| 25478 | //------------------------------------------------------------------------ |
| 25479 | // fgRetargetBranchesToCanonicalCallFinally: find non-canonical callfinally |
| 25480 | // invocations and make them canonical. |
| 25481 | // |
| 25482 | // Arguments: |
| 25483 | // block -- block to examine for call finally invocation |
| 25484 | // handler -- start of the finally region for the try |
| 25485 | // continuationMap -- map giving the canonical callfinally for |
| 25486 | // each continuation |
| 25487 | // |
| 25488 | // Returns: |
| 25489 | // true iff the block's branch was retargeted. |
| 25490 | |
| 25491 | bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, |
| 25492 | BasicBlock* handler, |
| 25493 | BlockToBlockMap& continuationMap) |
| 25494 | { |
| 25495 | // We expect callfinallys to be invoked by a BBJ_ALWAYS at this |
| 25496 | // stage in compilation. |
| 25497 | if (block->bbJumpKind != BBJ_ALWAYS) |
| 25498 | { |
| 25499 | // Possible paranoia assert here -- no flow successor of |
| 25500 | // this block should be a callfinally for this try. |
| 25501 | return false; |
| 25502 | } |
| 25503 | |
| 25504 | // Screen out cases that are not callfinallys to the right |
| 25505 | // handler. |
| 25506 | BasicBlock* const callFinally = block->bbJumpDest; |
| 25507 | |
| 25508 | if (!callFinally->isBBCallAlwaysPair()) |
| 25509 | { |
| 25510 | return false; |
| 25511 | } |
| 25512 | |
| 25513 | if (callFinally->bbJumpDest != handler) |
| 25514 | { |
| 25515 | return false; |
| 25516 | } |
| 25517 | |
| 25518 | // Ok, this is a callfinally that invokes the right handler. |
| 25519 | // Get its continuation. |
| 25520 | BasicBlock* const leaveBlock = callFinally->bbNext; |
| 25521 | BasicBlock* const continuationBlock = leaveBlock->bbJumpDest; |
| 25522 | |
| 25523 | // Find the canonical callfinally for that continuation. |
| 25524 | BasicBlock* const canonicalCallFinally = continuationMap[continuationBlock]; |
| 25525 | assert(canonicalCallFinally != nullptr); |
| 25526 | |
| 25527 | // If the block already jumps to the canoncial call finally, no work needed. |
| 25528 | if (block->bbJumpDest == canonicalCallFinally) |
| 25529 | { |
| 25530 | JITDUMP(FMT_BB " already canonical\n" , block->bbNum); |
| 25531 | return false; |
| 25532 | } |
| 25533 | |
| 25534 | // Else, retarget it so that it does... |
| 25535 | JITDUMP("Redirecting branch in " FMT_BB " from " FMT_BB " to " FMT_BB ".\n" , block->bbNum, callFinally->bbNum, |
| 25536 | canonicalCallFinally->bbNum); |
| 25537 | |
| 25538 | block->bbJumpDest = canonicalCallFinally; |
| 25539 | fgAddRefPred(canonicalCallFinally, block); |
| 25540 | assert(callFinally->bbRefs > 0); |
| 25541 | fgRemoveRefPred(callFinally, block); |
| 25542 | |
| 25543 | return true; |
| 25544 | } |
| 25545 | |
| 25546 | //------------------------------------------------------------------------ |
| 25547 | // fgMeasureIR: count and return the number of IR nodes in the function. |
| 25548 | // |
| 25549 | unsigned Compiler::fgMeasureIR() |
| 25550 | { |
| 25551 | unsigned nodeCount = 0; |
| 25552 | |
| 25553 | for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) |
| 25554 | { |
| 25555 | if (!block->IsLIR()) |
| 25556 | { |
| 25557 | for (GenTreeStmt* stmt = block->firstStmt(); stmt != nullptr; stmt = stmt->getNextStmt()) |
| 25558 | { |
| 25559 | fgWalkTreePre(&stmt->gtStmtExpr, |
| 25560 | [](GenTree** slot, fgWalkData* data) -> Compiler::fgWalkResult { |
| 25561 | (*reinterpret_cast<unsigned*>(data->pCallbackData))++; |
| 25562 | return Compiler::WALK_CONTINUE; |
| 25563 | }, |
| 25564 | &nodeCount); |
| 25565 | } |
| 25566 | } |
| 25567 | else |
| 25568 | { |
| 25569 | for (GenTree* node : LIR::AsRange(block)) |
| 25570 | { |
| 25571 | nodeCount++; |
| 25572 | } |
| 25573 | } |
| 25574 | } |
| 25575 | |
| 25576 | return nodeCount; |
| 25577 | } |
| 25578 | |
| 25579 | //------------------------------------------------------------------------ |
| 25580 | // fgCompDominatedByExceptionalEntryBlocks: compute blocks that are |
| 25581 | // dominated by not normal entry. |
| 25582 | // |
| 25583 | void Compiler::fgCompDominatedByExceptionalEntryBlocks() |
| 25584 | { |
| 25585 | assert(fgEnterBlksSetValid); |
| 25586 | if (BlockSetOps::Count(this, fgEnterBlks) != 1) // There are exception entries. |
| 25587 | { |
| 25588 | for (unsigned i = 1; i <= fgBBNumMax; ++i) |
| 25589 | { |
| 25590 | BasicBlock* block = fgBBInvPostOrder[i]; |
| 25591 | if (BlockSetOps::IsMember(this, fgEnterBlks, block->bbNum)) |
| 25592 | { |
| 25593 | if (fgFirstBB != block) // skip the normal entry. |
| 25594 | { |
| 25595 | block->SetDominatedByExceptionalEntryFlag(); |
| 25596 | } |
| 25597 | } |
| 25598 | else if (block->bbIDom->IsDominatedByExceptionalEntryFlag()) |
| 25599 | { |
| 25600 | block->SetDominatedByExceptionalEntryFlag(); |
| 25601 | } |
| 25602 | } |
| 25603 | } |
| 25604 | } |
| 25605 | |
| 25606 | //------------------------------------------------------------------------ |
| 25607 | // fgNeedReturnSpillTemp: Answers does the inlinee need to spill all returns |
| 25608 | // as a temp. |
| 25609 | // |
| 25610 | // Return Value: |
| 25611 | // true if the inlinee has to spill return exprs. |
| 25612 | bool Compiler::fgNeedReturnSpillTemp() |
| 25613 | { |
| 25614 | assert(compIsForInlining()); |
| 25615 | return (lvaInlineeReturnSpillTemp != BAD_VAR_NUM); |
| 25616 | } |
| 25617 | |
| 25618 | //------------------------------------------------------------------------ |
| 25619 | // fgUseThrowHelperBlocks: Determinate does compiler use throw helper blocks. |
| 25620 | // |
| 25621 | // Note: |
| 25622 | // For debuggable code, codegen will generate the 'throw' code inline. |
| 25623 | // Return Value: |
| 25624 | // true if 'throw' helper block should be created. |
| 25625 | bool Compiler::fgUseThrowHelperBlocks() |
| 25626 | { |
| 25627 | return !opts.compDbgCode; |
| 25628 | } |
| 25629 | |