| 1 | // Licensed to the .NET Foundation under one or more agreements. | 
| 2 | // The .NET Foundation licenses this file to you under the MIT license. | 
| 3 | // See the LICENSE file in the project root for more information. | 
| 4 |  | 
| 5 | /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX | 
| 6 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX | 
| 7 | XX                                                                           XX | 
| 8 | XX                          BasicBlock                                       XX | 
| 9 | XX                                                                           XX | 
| 10 | XX                                                                           XX | 
| 11 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX | 
| 12 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX | 
| 13 | */ | 
| 14 |  | 
| 15 | /*****************************************************************************/ | 
| 16 | #ifndef _BLOCK_H_ | 
| 17 | #define _BLOCK_H_ | 
| 18 | /*****************************************************************************/ | 
| 19 |  | 
| 20 | #include "vartype.h" // For "var_types.h" | 
| 21 | #include "_typeinfo.h" | 
| 22 | /*****************************************************************************/ | 
| 23 |  | 
| 24 | // Defines VARSET_TP | 
| 25 | #include "varset.h" | 
| 26 |  | 
| 27 | #include "blockset.h" | 
| 28 | #include "jitstd.h" | 
| 29 | #include "bitvec.h" | 
| 30 | #include "jithashtable.h" | 
| 31 |  | 
| 32 | /*****************************************************************************/ | 
| 33 | typedef BitVec EXPSET_TP; | 
| 34 | #if LARGE_EXPSET | 
| 35 | #define EXPSET_SZ 64 | 
| 36 | #else | 
| 37 | #define EXPSET_SZ 32 | 
| 38 | #endif | 
| 39 |  | 
| 40 | typedef BitVec          ASSERT_TP; | 
| 41 | typedef BitVec_ValArg_T ASSERT_VALARG_TP; | 
| 42 | typedef BitVec_ValRet_T ASSERT_VALRET_TP; | 
| 43 |  | 
| 44 | // We use the following format when print the BasicBlock number: bbNum | 
| 45 | // This define is used with string concatenation to put this in printf format strings  (Note that %u means unsigned int) | 
| 46 | #define FMT_BB "BB%02u" | 
| 47 |  | 
| 48 | /***************************************************************************** | 
| 49 |  * | 
| 50 |  *  Each basic block ends with a jump which is described as a value | 
| 51 |  *  of the following enumeration. | 
| 52 |  */ | 
| 53 |  | 
| 54 | // clang-format off | 
| 55 |  | 
| 56 | enum BBjumpKinds : BYTE | 
| 57 | { | 
| 58 |     BBJ_EHFINALLYRET,// block ends with 'endfinally' (for finally or fault) | 
| 59 |     BBJ_EHFILTERRET, // block ends with 'endfilter' | 
| 60 |     BBJ_EHCATCHRET,  // block ends with a leave out of a catch (only #if FEATURE_EH_FUNCLETS) | 
| 61 |     BBJ_THROW,       // block ends with 'throw' | 
| 62 |     BBJ_RETURN,      // block ends with 'ret' | 
| 63 |     BBJ_NONE,        // block flows into the next one (no jump) | 
| 64 |     BBJ_ALWAYS,      // block always jumps to the target | 
| 65 |     BBJ_LEAVE,       // block always jumps to the target, maybe out of guarded region. Only used until importing. | 
| 66 |     BBJ_CALLFINALLY, // block always calls the target finally | 
| 67 |     BBJ_COND,        // block conditionally jumps to the target | 
| 68 |     BBJ_SWITCH,      // block ends with a switch statement | 
| 69 |  | 
| 70 |     BBJ_COUNT | 
| 71 | }; | 
| 72 |  | 
| 73 | // clang-format on | 
| 74 |  | 
| 75 | struct GenTree; | 
| 76 | struct GenTreeStmt; | 
| 77 | struct BasicBlock; | 
| 78 | class Compiler; | 
| 79 | class typeInfo; | 
| 80 | struct BasicBlockList; | 
| 81 | struct flowList; | 
| 82 | struct EHblkDsc; | 
| 83 |  | 
| 84 | /***************************************************************************** | 
| 85 |  * | 
| 86 |  *  The following describes a switch block. | 
| 87 |  * | 
| 88 |  *  Things to know: | 
| 89 |  *  1. If bbsHasDefault is true, the default case is the last one in the array of basic block addresses | 
| 90 |  *     namely bbsDstTab[bbsCount - 1]. | 
| 91 |  *  2. bbsCount must be at least 1, for the default case. bbsCount cannot be zero. It appears that the ECMA spec | 
| 92 |  *     allows for a degenerate switch with zero cases. Normally, the optimizer will optimize degenerate | 
| 93 |  *     switches with just a default case to a BBJ_ALWAYS branch, and a switch with just two cases to a BBJ_COND. | 
| 94 |  *     However, in debuggable code, we might not do that, so bbsCount might be 1. | 
| 95 |  */ | 
| 96 | struct BBswtDesc | 
| 97 | { | 
| 98 |     unsigned     bbsCount;  // count of cases (includes 'default' if bbsHasDefault) | 
| 99 |     BasicBlock** bbsDstTab; // case label table address | 
| 100 |     bool         bbsHasDefault; | 
| 101 |  | 
| 102 |     BBswtDesc() : bbsHasDefault(true) | 
| 103 |     { | 
| 104 |     } | 
| 105 |  | 
| 106 |     void removeDefault() | 
| 107 |     { | 
| 108 |         assert(bbsHasDefault); | 
| 109 |         assert(bbsCount > 0); | 
| 110 |         bbsHasDefault = false; | 
| 111 |         bbsCount--; | 
| 112 |     } | 
| 113 |  | 
| 114 |     BasicBlock* getDefault() | 
| 115 |     { | 
| 116 |         assert(bbsHasDefault); | 
| 117 |         assert(bbsCount > 0); | 
| 118 |         return bbsDstTab[bbsCount - 1]; | 
| 119 |     } | 
| 120 | }; | 
| 121 |  | 
| 122 | struct StackEntry | 
| 123 | { | 
| 124 |     GenTree* val; | 
| 125 |     typeInfo seTypeInfo; | 
| 126 | }; | 
| 127 | /*****************************************************************************/ | 
| 128 |  | 
| 129 | enum ThisInitState | 
| 130 | { | 
| 131 |     TIS_Bottom, // We don't know anything about the 'this' pointer. | 
| 132 |     TIS_Uninit, // The 'this' pointer for this constructor is known to be uninitialized. | 
| 133 |     TIS_Init,   // The 'this' pointer for this constructor is known to be initialized. | 
| 134 |     TIS_Top,    // This results from merging the state of two blocks one with TIS_Unint and the other with TIS_Init. | 
| 135 |                 // We use this in fault blocks to prevent us from accessing the 'this' pointer, but otherwise | 
| 136 |                 // allowing the fault block to generate code. | 
| 137 | }; | 
| 138 |  | 
| 139 | struct EntryState | 
| 140 | { | 
| 141 |     ThisInitState thisInitialized; // used to track whether the this ptr is initialized. | 
| 142 |     unsigned      esStackDepth;    // size of esStack | 
| 143 |     StackEntry*   esStack;         // ptr to  stack | 
| 144 | }; | 
| 145 |  | 
| 146 | // Enumeration of the kinds of memory whose state changes the compiler tracks | 
| 147 | enum MemoryKind | 
| 148 | { | 
| 149 |     ByrefExposed = 0, // Includes anything byrefs can read/write (everything in GcHeap, address-taken locals, | 
| 150 |                       //                                          unmanaged heap, callers' locals, etc.) | 
| 151 |     GcHeap,           // Includes actual GC heap, and also static fields | 
| 152 |     MemoryKindCount,  // Number of MemoryKinds | 
| 153 | }; | 
| 154 | #ifdef DEBUG | 
| 155 | const char* const memoryKindNames[] = {"ByrefExposed" , "GcHeap" }; | 
| 156 | #endif // DEBUG | 
| 157 |  | 
| 158 | // Bitmask describing a set of memory kinds (usable in bitfields) | 
| 159 | typedef unsigned int MemoryKindSet; | 
| 160 |  | 
| 161 | // Bitmask for a MemoryKindSet containing just the specified MemoryKind | 
| 162 | inline MemoryKindSet memoryKindSet(MemoryKind memoryKind) | 
| 163 | { | 
| 164 |     return (1U << memoryKind); | 
| 165 | } | 
| 166 |  | 
| 167 | // Bitmask for a MemoryKindSet containing the specified MemoryKinds | 
| 168 | template <typename... MemoryKinds> | 
| 169 | inline MemoryKindSet memoryKindSet(MemoryKind memoryKind, MemoryKinds... memoryKinds) | 
| 170 | { | 
| 171 |     return memoryKindSet(memoryKind) | memoryKindSet(memoryKinds...); | 
| 172 | } | 
| 173 |  | 
| 174 | // Bitmask containing all the MemoryKinds | 
| 175 | const MemoryKindSet fullMemoryKindSet = (1 << MemoryKindCount) - 1; | 
| 176 |  | 
| 177 | // Bitmask containing no MemoryKinds | 
| 178 | const MemoryKindSet emptyMemoryKindSet = 0; | 
| 179 |  | 
| 180 | // Standard iterator class for iterating through MemoryKinds | 
| 181 | class MemoryKindIterator | 
| 182 | { | 
| 183 |     int value; | 
| 184 |  | 
| 185 | public: | 
| 186 |     explicit inline MemoryKindIterator(int val) : value(val) | 
| 187 |     { | 
| 188 |     } | 
| 189 |     inline MemoryKindIterator& operator++() | 
| 190 |     { | 
| 191 |         ++value; | 
| 192 |         return *this; | 
| 193 |     } | 
| 194 |     inline MemoryKindIterator operator++(int) | 
| 195 |     { | 
| 196 |         return MemoryKindIterator(value++); | 
| 197 |     } | 
| 198 |     inline MemoryKind operator*() | 
| 199 |     { | 
| 200 |         return static_cast<MemoryKind>(value); | 
| 201 |     } | 
| 202 |     friend bool operator==(const MemoryKindIterator& left, const MemoryKindIterator& right) | 
| 203 |     { | 
| 204 |         return left.value == right.value; | 
| 205 |     } | 
| 206 |     friend bool operator!=(const MemoryKindIterator& left, const MemoryKindIterator& right) | 
| 207 |     { | 
| 208 |         return left.value != right.value; | 
| 209 |     } | 
| 210 | }; | 
| 211 |  | 
| 212 | // Empty struct that allows enumerating memory kinds via `for(MemoryKind kind : allMemoryKinds())` | 
| 213 | struct allMemoryKinds | 
| 214 | { | 
| 215 |     inline allMemoryKinds() | 
| 216 |     { | 
| 217 |     } | 
| 218 |     inline MemoryKindIterator begin() | 
| 219 |     { | 
| 220 |         return MemoryKindIterator(0); | 
| 221 |     } | 
| 222 |     inline MemoryKindIterator end() | 
| 223 |     { | 
| 224 |         return MemoryKindIterator(MemoryKindCount); | 
| 225 |     } | 
| 226 | }; | 
| 227 |  | 
| 228 | // This encapsulates the "exception handling" successors of a block.  That is, | 
| 229 | // if a basic block BB1 occurs in a try block, we consider the first basic block | 
| 230 | // BB2 of the corresponding handler to be an "EH successor" of BB1.  Because we | 
| 231 | // make the conservative assumption that control flow can jump from a try block | 
| 232 | // to its handler at any time, the immediate (regular control flow) | 
| 233 | // predecessor(s) of the the first block of a try block are also considered to | 
| 234 | // have the first block of the handler as an EH successor.  This makes variables that | 
| 235 | // are "live-in" to the handler become "live-out" for these try-predecessor block, | 
| 236 | // so that they become live-in to the try -- which we require. | 
| 237 | // | 
| 238 | // This class maintains the minimum amount of state necessary to implement | 
| 239 | // successor iteration. The basic block whose successors are enumerated and | 
| 240 | // the compiler need to be provided by Advance/Current's callers. In addition | 
| 241 | // to iterators, this allows the use of other approaches that are more space | 
| 242 | // efficient. | 
| 243 | class EHSuccessorIterPosition | 
| 244 | { | 
| 245 |     // The number of "regular" (i.e., non-exceptional) successors that remain to | 
| 246 |     // be considered.  If BB1 has successor BB2, and BB2 is the first block of a | 
| 247 |     // try block, then we consider the catch block of BB2's try to be an EH | 
| 248 |     // successor of BB1.  This captures the iteration over the successors of BB1 | 
| 249 |     // for this purpose.  (In reverse order; we're done when this field is 0). | 
| 250 |     unsigned m_remainingRegSuccs; | 
| 251 |  | 
| 252 |     // The current "regular" successor of "m_block" that we're considering. | 
| 253 |     BasicBlock* m_curRegSucc; | 
| 254 |  | 
| 255 |     // The current try block.  If non-null, then the current successor "m_curRegSucc" | 
| 256 |     // is the first block of the handler of this block.  While this try block has | 
| 257 |     // enclosing try's that also start with "m_curRegSucc", the corresponding handlers will be | 
| 258 |     // further EH successors. | 
| 259 |     EHblkDsc* m_curTry; | 
| 260 |  | 
| 261 |     // Requires that "m_curTry" is NULL.  Determines whether there is, as | 
| 262 |     // discussed just above, a regular successor that's the first block of a | 
| 263 |     // try; if so, sets "m_curTry" to that try block.  (As noted above, selecting | 
| 264 |     // the try containing the current regular successor as the "current try" may cause | 
| 265 |     // multiple first-blocks of catches to be yielded as EH successors: trys enclosing | 
| 266 |     // the current try are also included if they also start with the current EH successor.) | 
| 267 |     void FindNextRegSuccTry(Compiler* comp, BasicBlock* block); | 
| 268 |  | 
| 269 | public: | 
| 270 |     // Constructs a position that "points" to the first EH successor of `block`. | 
| 271 |     EHSuccessorIterPosition(Compiler* comp, BasicBlock* block); | 
| 272 |  | 
| 273 |     // Constructs a position that "points" past the last EH successor of `block` ("end" position). | 
| 274 |     EHSuccessorIterPosition() : m_remainingRegSuccs(0), m_curTry(nullptr) | 
| 275 |     { | 
| 276 |     } | 
| 277 |  | 
| 278 |     // Go on to the next EH successor. | 
| 279 |     void Advance(Compiler* comp, BasicBlock* block); | 
| 280 |  | 
| 281 |     // Returns the current EH successor. | 
| 282 |     // Requires that "*this" is not equal to the "end" position. | 
| 283 |     BasicBlock* Current(Compiler* comp, BasicBlock* block); | 
| 284 |  | 
| 285 |     // Returns "true" iff "*this" is equal to "ehsi". | 
| 286 |     bool operator==(const EHSuccessorIterPosition& ehsi) | 
| 287 |     { | 
| 288 |         return m_curTry == ehsi.m_curTry && m_remainingRegSuccs == ehsi.m_remainingRegSuccs; | 
| 289 |     } | 
| 290 |  | 
| 291 |     bool operator!=(const EHSuccessorIterPosition& ehsi) | 
| 292 |     { | 
| 293 |         return !((*this) == ehsi); | 
| 294 |     } | 
| 295 | }; | 
| 296 |  | 
| 297 | // Yields both normal and EH successors (in that order) in one iteration. | 
| 298 | // | 
| 299 | // This class maintains the minimum amount of state necessary to implement | 
| 300 | // successor iteration. The basic block whose successors are enumerated and | 
| 301 | // the compiler need to be provided by Advance/Current's callers. In addition | 
| 302 | // to iterators, this allows the use of other approaches that are more space | 
| 303 | // efficient. | 
| 304 | class AllSuccessorIterPosition | 
| 305 | { | 
| 306 |     // Normal successor position | 
| 307 |     unsigned m_numNormSuccs; | 
| 308 |     unsigned m_remainingNormSucc; | 
| 309 |     // EH successor position | 
| 310 |     EHSuccessorIterPosition m_ehIter; | 
| 311 |  | 
| 312 |     // True iff m_blk is a BBJ_CALLFINALLY block, and the current try block of m_ehIter, | 
| 313 |     // the first block of whose handler would be next yielded, is the jump target of m_blk. | 
| 314 |     inline bool CurTryIsBlkCallFinallyTarget(Compiler* comp, BasicBlock* block); | 
| 315 |  | 
| 316 | public: | 
| 317 |     // Constructs a position that "points" to the first successor of `block`. | 
| 318 |     inline AllSuccessorIterPosition(Compiler* comp, BasicBlock* block); | 
| 319 |  | 
| 320 |     // Constructs a position that "points" past the last successor of `block` ("end" position). | 
| 321 |     AllSuccessorIterPosition() : m_remainingNormSucc(0), m_ehIter() | 
| 322 |     { | 
| 323 |     } | 
| 324 |  | 
| 325 |     // Go on to the next successor. | 
| 326 |     inline void Advance(Compiler* comp, BasicBlock* block); | 
| 327 |  | 
| 328 |     // Returns the current successor. | 
| 329 |     // Requires that "*this" is not equal to the "end" position. | 
| 330 |     inline BasicBlock* Current(Compiler* comp, BasicBlock* block); | 
| 331 |  | 
| 332 |     bool IsCurrentEH() | 
| 333 |     { | 
| 334 |         return m_remainingNormSucc == 0; | 
| 335 |     } | 
| 336 |  | 
| 337 |     bool HasCurrent() | 
| 338 |     { | 
| 339 |         return *this != AllSuccessorIterPosition(); | 
| 340 |     } | 
| 341 |  | 
| 342 |     // Returns "true" iff "*this" is equal to "asi". | 
| 343 |     bool operator==(const AllSuccessorIterPosition& asi) | 
| 344 |     { | 
| 345 |         return (m_remainingNormSucc == asi.m_remainingNormSucc) && (m_ehIter == asi.m_ehIter); | 
| 346 |     } | 
| 347 |  | 
| 348 |     bool operator!=(const AllSuccessorIterPosition& asi) | 
| 349 |     { | 
| 350 |         return !((*this) == asi); | 
| 351 |     } | 
| 352 | }; | 
| 353 |  | 
| 354 | //------------------------------------------------------------------------ | 
| 355 | // BasicBlock: describes a basic block in the flowgraph. | 
| 356 | // | 
| 357 | // Note that this type derives from LIR::Range in order to make the LIR | 
| 358 | // utilities that are polymorphic over basic block and scratch ranges | 
| 359 | // faster and simpler. | 
| 360 | // | 
| 361 | struct BasicBlock : private LIR::Range | 
| 362 | { | 
| 363 |     friend class LIR; | 
| 364 |  | 
| 365 |     BasicBlock* bbNext; // next BB in ascending PC offset order | 
| 366 |     BasicBlock* bbPrev; | 
| 367 |  | 
| 368 |     void setNext(BasicBlock* next) | 
| 369 |     { | 
| 370 |         bbNext = next; | 
| 371 |         if (next) | 
| 372 |         { | 
| 373 |             next->bbPrev = this; | 
| 374 |         } | 
| 375 |     } | 
| 376 |  | 
| 377 |     unsigned __int64 bbFlags; // see BBF_xxxx below | 
| 378 |  | 
| 379 |     unsigned bbNum; // the block's number | 
| 380 |  | 
| 381 |     unsigned bbPostOrderNum; // the block's post order number in the graph. | 
| 382 |     unsigned bbRefs; // number of blocks that can reach here, either by fall-through or a branch. If this falls to zero, | 
| 383 |                      // the block is unreachable. | 
| 384 |  | 
| 385 | // clang-format off | 
| 386 |  | 
| 387 | #define BBF_VISITED             0x00000001 // BB visited during optimizations | 
| 388 | #define BBF_MARKED              0x00000002 // BB marked  during optimizations | 
| 389 | #define BBF_CHANGED             0x00000004 // input/output of this block has changed | 
| 390 | #define BBF_REMOVED             0x00000008 // BB has been removed from bb-list | 
| 391 |  | 
| 392 | #define BBF_DONT_REMOVE         0x00000010 // BB should not be removed during flow graph optimizations | 
| 393 | #define BBF_IMPORTED            0x00000020 // BB byte-code has been imported | 
| 394 | #define BBF_INTERNAL            0x00000040 // BB has been added by the compiler | 
| 395 | #define BBF_FAILED_VERIFICATION 0x00000080 // BB has verification exception | 
| 396 |  | 
| 397 | #define BBF_TRY_BEG             0x00000100 // BB starts a 'try' block | 
| 398 | #define BBF_FUNCLET_BEG         0x00000200 // BB is the beginning of a funclet | 
| 399 | #define BBF_HAS_NULLCHECK       0x00000400 // BB contains a null check | 
| 400 | #define BBF_NEEDS_GCPOLL        0x00000800 // This BB is the source of a back edge and needs a GC Poll | 
| 401 |  | 
| 402 | #define BBF_RUN_RARELY          0x00001000 // BB is rarely run (catch clauses, blocks with throws etc) | 
| 403 | #define BBF_LOOP_HEAD           0x00002000 // BB is the head of a loop | 
| 404 | #define BBF_LOOP_CALL0          0x00004000 // BB starts a loop that sometimes won't call | 
| 405 | #define BBF_LOOP_CALL1          0x00008000 // BB starts a loop that will always     call | 
| 406 |  | 
| 407 | #define BBF_HAS_LABEL           0x00010000 // BB needs a label | 
| 408 | #define BBF_JMP_TARGET          0x00020000 // BB is a target of an implicit/explicit jump | 
| 409 | #define BBF_HAS_JMP             0x00040000 // BB executes a JMP instruction (instead of return) | 
| 410 | #define BBF_GC_SAFE_POINT       0x00080000 // BB has a GC safe point (a call).  More abstractly, BB does not require a | 
| 411 |                                            // (further) poll -- this may be because this BB has a call, or, in some | 
| 412 |                                            // cases, because the BB occurs in a loop, and we've determined that all | 
| 413 |                                            // paths in the loop body leading to BB include a call. | 
| 414 |  | 
| 415 | #define BBF_HAS_VTABREF         0x00100000 // BB contains reference of vtable | 
| 416 | #define BBF_HAS_IDX_LEN         0x00200000 // BB contains simple index or length expressions on an array local var. | 
| 417 | #define BBF_HAS_NEWARRAY        0x00400000 // BB contains 'new' of an array | 
| 418 | #define BBF_HAS_NEWOBJ          0x00800000 // BB contains 'new' of an object type. | 
| 419 |  | 
| 420 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) | 
| 421 |  | 
| 422 | #define BBF_FINALLY_TARGET      0x01000000 // BB is the target of a finally return: where a finally will return during | 
| 423 |                                            // non-exceptional flow. Because the ARM calling sequence for calling a | 
| 424 |                                            // finally explicitly sets the return address to the finally target and jumps | 
| 425 |                                            // to the finally, instead of using a call instruction, ARM needs this to | 
| 426 |                                            // generate correct code at the finally target, to allow for proper stack | 
| 427 |                                            // unwind from within a non-exceptional call to a finally. | 
| 428 |  | 
| 429 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) | 
| 430 |  | 
| 431 | #define BBF_BACKWARD_JUMP       0x02000000 // BB is surrounded by a backward jump/switch arc | 
| 432 | #define BBF_RETLESS_CALL        0x04000000 // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired | 
| 433 |                                            // BBJ_ALWAYS); see isBBCallAlwaysPair(). | 
| 434 | #define       0x08000000 // BB is a loop preheader block | 
| 435 |  | 
| 436 | #define BBF_COLD                0x10000000 // BB is cold | 
| 437 | #define BBF_PROF_WEIGHT         0x20000000 // BB weight is computed from profile data | 
| 438 | #define BBF_IS_LIR              0x40000000 // Set if the basic block contains LIR (as opposed to HIR) | 
| 439 | #define BBF_KEEP_BBJ_ALWAYS     0x80000000 // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind | 
| 440 |                                            // as BBJ_ALWAYS. Used for the paired BBJ_ALWAYS block following the | 
| 441 |                                            // BBJ_CALLFINALLY block, as well as, on x86, the final step block out of a | 
| 442 |                                            // finally. | 
| 443 |  | 
| 444 | #define BBF_CLONED_FINALLY_BEGIN    0x100000000 // First block of a cloned finally region | 
| 445 | #define BBF_CLONED_FINALLY_END      0x200000000 // Last block of a cloned finally region | 
| 446 |  | 
| 447 | // clang-format on | 
| 448 |  | 
| 449 | #define BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY 0x400000000 // Block is dominated by exceptional entry. | 
| 450 |  | 
| 451 | // Flags that relate blocks to loop structure. | 
| 452 |  | 
| 453 | #define BBF_LOOP_FLAGS (BBF_LOOP_PREHEADER | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1) | 
| 454 |  | 
| 455 |     bool isRunRarely() const | 
| 456 |     { | 
| 457 |         return ((bbFlags & BBF_RUN_RARELY) != 0); | 
| 458 |     } | 
| 459 |     bool isLoopHead() const | 
| 460 |     { | 
| 461 |         return ((bbFlags & BBF_LOOP_HEAD) != 0); | 
| 462 |     } | 
| 463 |  | 
| 464 | // Flags to update when two blocks are compacted | 
| 465 |  | 
| 466 | #define BBF_COMPACT_UPD                                                                                                \ | 
| 467 |     (BBF_CHANGED | BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_NEEDS_GCPOLL | BBF_HAS_IDX_LEN | BBF_BACKWARD_JUMP |          \ | 
| 468 |      BBF_HAS_NEWARRAY | BBF_HAS_NEWOBJ) | 
| 469 |  | 
| 470 | // Flags a block should not have had before it is split. | 
| 471 |  | 
| 472 | #define BBF_SPLIT_NONEXIST                                                                                             \ | 
| 473 |     (BBF_CHANGED | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_RETLESS_CALL | BBF_LOOP_PREHEADER | BBF_COLD) | 
| 474 |  | 
| 475 | // Flags lost by the top block when a block is split. | 
| 476 | // Note, this is a conservative guess. | 
| 477 | // For example, the top block might or might not have BBF_GC_SAFE_POINT, | 
| 478 | // but we assume it does not have BBF_GC_SAFE_POINT any more. | 
| 479 |  | 
| 480 | #define BBF_SPLIT_LOST (BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END) | 
| 481 |  | 
| 482 | // Flags gained by the bottom block when a block is split. | 
| 483 | // Note, this is a conservative guess. | 
| 484 | // For example, the bottom block might or might not have BBF_HAS_NEWARRAY, | 
| 485 | // but we assume it has BBF_HAS_NEWARRAY. | 
| 486 |  | 
| 487 | // TODO: Should BBF_RUN_RARELY be added to BBF_SPLIT_GAINED ? | 
| 488 |  | 
| 489 | #define BBF_SPLIT_GAINED                                                                                               \ | 
| 490 |     (BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_NEWARRAY |          \ | 
| 491 |      BBF_PROF_WEIGHT | BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END) | 
| 492 |  | 
| 493 | #ifndef __GNUC__ // GCC doesn't like C_ASSERT at global scope | 
| 494 |     static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0); | 
| 495 |     static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_GAINED) == 0); | 
| 496 | #endif | 
| 497 |  | 
| 498 | #ifdef DEBUG | 
| 499 |     void     dspFlags();                   // Print the flags | 
| 500 |     unsigned dspCheapPreds();              // Print the predecessors (bbCheapPreds) | 
| 501 |     unsigned dspPreds();                   // Print the predecessors (bbPreds) | 
| 502 |     unsigned dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH | 
| 503 |                                            // regions are printed: see NumSucc() for details. | 
| 504 |     void dspJumpKind();                    // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.). | 
| 505 |     void (Compiler* compiler, | 
| 506 |                         bool      showKind  = true, | 
| 507 |                         bool      showFlags = false, | 
| 508 |                         bool showPreds = true); // Print a simple basic block header for various output, including a | 
| 509 |                                                 // list of predecessors and successors. | 
| 510 |     const char* dspToString(int blockNumPadding = 0); | 
| 511 | #endif // DEBUG | 
| 512 |  | 
| 513 |     typedef unsigned weight_t; // Type used to hold block and edge weights | 
| 514 |                                // Note that for CLR v2.0 and earlier our | 
| 515 |                                // block weights were stored using unsigned shorts | 
| 516 |  | 
| 517 | #define BB_UNITY_WEIGHT 100 // how much a normal execute once block weights | 
| 518 | #define BB_LOOP_WEIGHT 8    // how much more loops are weighted | 
| 519 | #define BB_ZERO_WEIGHT 0 | 
| 520 | #define BB_MAX_WEIGHT ULONG_MAX // we're using an 'unsigned' for the weight | 
| 521 | #define BB_VERY_HOT_WEIGHT 256  // how many average hits a BB has (per BBT scenario run) for this block | 
| 522 |                                 // to be considered as very hot | 
| 523 |  | 
| 524 |     weight_t bbWeight; // The dynamic execution weight of this block | 
| 525 |  | 
| 526 |     // getCalledCount -- get the value used to normalize weights for this method | 
| 527 |     weight_t getCalledCount(Compiler* comp); | 
| 528 |  | 
| 529 |     // getBBWeight -- get the normalized weight of this block | 
| 530 |     weight_t getBBWeight(Compiler* comp); | 
| 531 |  | 
| 532 |     // hasProfileWeight -- Returns true if this block's weight came from profile data | 
| 533 |     bool hasProfileWeight() const | 
| 534 |     { | 
| 535 |         return ((this->bbFlags & BBF_PROF_WEIGHT) != 0); | 
| 536 |     } | 
| 537 |  | 
| 538 |     // setBBWeight -- if the block weight is not derived from a profile, | 
| 539 |     // then set the weight to the input weight, making sure to not overflow BB_MAX_WEIGHT | 
| 540 |     // Note to set the weight from profile data, instead use setBBProfileWeight | 
| 541 |     void setBBWeight(weight_t weight) | 
| 542 |     { | 
| 543 |         if (!hasProfileWeight()) | 
| 544 |         { | 
| 545 |             this->bbWeight = min(weight, BB_MAX_WEIGHT); | 
| 546 |         } | 
| 547 |     } | 
| 548 |  | 
| 549 |     // setBBProfileWeight -- Set the profile-derived weight for a basic block | 
| 550 |     void setBBProfileWeight(unsigned weight) | 
| 551 |     { | 
| 552 |         this->bbFlags |= BBF_PROF_WEIGHT; | 
| 553 |         this->bbWeight = weight; | 
| 554 |     } | 
| 555 |  | 
| 556 |     // modifyBBWeight -- same as setBBWeight, but also make sure that if the block is rarely run, it stays that | 
| 557 |     // way, and if it's not rarely run then its weight never drops below 1. | 
| 558 |     void modifyBBWeight(weight_t weight) | 
| 559 |     { | 
| 560 |         if (this->bbWeight != BB_ZERO_WEIGHT) | 
| 561 |         { | 
| 562 |             setBBWeight(max(weight, 1)); | 
| 563 |         } | 
| 564 |     } | 
| 565 |  | 
| 566 |     // this block will inherit the same weight and relevant bbFlags as bSrc | 
| 567 |     void inheritWeight(BasicBlock* bSrc) | 
| 568 |     { | 
| 569 |         this->bbWeight = bSrc->bbWeight; | 
| 570 |  | 
| 571 |         if (bSrc->hasProfileWeight()) | 
| 572 |         { | 
| 573 |             this->bbFlags |= BBF_PROF_WEIGHT; | 
| 574 |         } | 
| 575 |         else | 
| 576 |         { | 
| 577 |             this->bbFlags &= ~BBF_PROF_WEIGHT; | 
| 578 |         } | 
| 579 |  | 
| 580 |         if (this->bbWeight == 0) | 
| 581 |         { | 
| 582 |             this->bbFlags |= BBF_RUN_RARELY; | 
| 583 |         } | 
| 584 |         else | 
| 585 |         { | 
| 586 |             this->bbFlags &= ~BBF_RUN_RARELY; | 
| 587 |         } | 
| 588 |     } | 
| 589 |  | 
| 590 |     // Similar to inheritWeight(), but we're splitting a block (such as creating blocks for qmark removal). | 
| 591 |     // So, specify a percentage (0 to 99; if it's 100, just use inheritWeight()) of the weight that we're | 
| 592 |     // going to inherit. Since the number isn't exact, clear the BBF_PROF_WEIGHT flag. | 
| 593 |     void inheritWeightPercentage(BasicBlock* bSrc, unsigned percentage) | 
| 594 |     { | 
| 595 |         assert(0 <= percentage && percentage < 100); | 
| 596 |  | 
| 597 |         // Check for overflow | 
| 598 |         if (bSrc->bbWeight * 100 <= bSrc->bbWeight) | 
| 599 |         { | 
| 600 |             this->bbWeight = bSrc->bbWeight; | 
| 601 |         } | 
| 602 |         else | 
| 603 |         { | 
| 604 |             this->bbWeight = bSrc->bbWeight * percentage / 100; | 
| 605 |         } | 
| 606 |  | 
| 607 |         this->bbFlags &= ~BBF_PROF_WEIGHT; | 
| 608 |  | 
| 609 |         if (this->bbWeight == 0) | 
| 610 |         { | 
| 611 |             this->bbFlags |= BBF_RUN_RARELY; | 
| 612 |         } | 
| 613 |         else | 
| 614 |         { | 
| 615 |             this->bbFlags &= ~BBF_RUN_RARELY; | 
| 616 |         } | 
| 617 |     } | 
| 618 |  | 
| 619 |     // makeBlockHot() | 
| 620 |     //     This is used to override any profiling data | 
| 621 |     //     and force a block to be in the hot region. | 
| 622 |     //     We only call this method for handler entry point | 
| 623 |     //     and only when HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION is 1. | 
| 624 |     //     Doing this helps fgReorderBlocks() by telling | 
| 625 |     //     it to try to move these blocks into the hot region. | 
| 626 |     //     Note that we do this strictly as an optimization, | 
| 627 |     //     not for correctness. fgDetermineFirstColdBlock() | 
| 628 |     //     will find all handler entry points and ensure that | 
| 629 |     //     for now we don't place them in the cold section. | 
| 630 |     // | 
| 631 |     void makeBlockHot() | 
| 632 |     { | 
| 633 |         if (this->bbWeight == BB_ZERO_WEIGHT) | 
| 634 |         { | 
| 635 |             this->bbFlags &= ~BBF_RUN_RARELY;  // Clear any RarelyRun flag | 
| 636 |             this->bbFlags &= ~BBF_PROF_WEIGHT; // Clear any profile-derived flag | 
| 637 |             this->bbWeight = 1; | 
| 638 |         } | 
| 639 |     } | 
| 640 |  | 
| 641 |     bool isMaxBBWeight() | 
| 642 |     { | 
| 643 |         return (bbWeight == BB_MAX_WEIGHT); | 
| 644 |     } | 
| 645 |  | 
| 646 |     // Returns "true" if the block is empty. Empty here means there are no statement | 
| 647 |     // trees *except* PHI definitions. | 
| 648 |     bool isEmpty(); | 
| 649 |  | 
| 650 |     // Returns "true" iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair -- | 
| 651 |     // a block corresponding to an exit from the try of a try/finally.  In the flow graph, | 
| 652 |     // this becomes a block that calls the finally, and a second, immediately | 
| 653 |     // following empty block (in the bbNext chain) to which the finally will return, and which | 
| 654 |     // branches unconditionally to the next block to be executed outside the try/finally. | 
| 655 |     // Note that code is often generated differently than this description. For example, on ARM, | 
| 656 |     // the target of the BBJ_ALWAYS is loaded in LR (the return register), and a direct jump is | 
| 657 |     // made to the 'finally'. The effect is that the 'finally' returns directly to the target of | 
| 658 |     // the BBJ_ALWAYS. A "retless" BBJ_CALLFINALLY is one that has no corresponding BBJ_ALWAYS. | 
| 659 |     // This can happen if the finally is known to not return (e.g., it contains a 'throw'). In | 
| 660 |     // that case, the BBJ_CALLFINALLY flags has BBF_RETLESS_CALL set. Note that ARM never has | 
| 661 |     // "retless" BBJ_CALLFINALLY blocks due to a requirement to use the BBJ_ALWAYS for | 
| 662 |     // generating code. | 
| 663 |     bool isBBCallAlwaysPair() | 
| 664 |     { | 
| 665 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) | 
| 666 |         if (this->bbJumpKind == BBJ_CALLFINALLY) | 
| 667 | #else | 
| 668 |         if ((this->bbJumpKind == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) | 
| 669 | #endif | 
| 670 |         { | 
| 671 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) | 
| 672 |             // On ARM, there are no retless BBJ_CALLFINALLY. | 
| 673 |             assert(!(this->bbFlags & BBF_RETLESS_CALL)); | 
| 674 | #endif | 
| 675 |             // Some asserts that the next block is a BBJ_ALWAYS of the proper form. | 
| 676 |             assert(this->bbNext != nullptr); | 
| 677 |             assert(this->bbNext->bbJumpKind == BBJ_ALWAYS); | 
| 678 |             assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS); | 
| 679 |             assert(this->bbNext->isEmpty()); | 
| 680 |  | 
| 681 |             return true; | 
| 682 |         } | 
| 683 |         else | 
| 684 |         { | 
| 685 |             return false; | 
| 686 |         } | 
| 687 |     } | 
| 688 |  | 
| 689 |     BBjumpKinds bbJumpKind; // jump (if any) at the end of this block | 
| 690 |  | 
| 691 |     /* The following union describes the jump target(s) of this block */ | 
| 692 |     union { | 
| 693 |         unsigned    bbJumpOffs; // PC offset (temporary only) | 
| 694 |         BasicBlock* bbJumpDest; // basic block | 
| 695 |         BBswtDesc*  bbJumpSwt;  // switch descriptor | 
| 696 |     }; | 
| 697 |  | 
| 698 |     // NumSucc() gives the number of successors, and GetSucc() returns a given numbered successor. | 
| 699 |     // | 
| 700 |     // There are two versions of these functions: ones that take a Compiler* and ones that don't. You must | 
| 701 |     // always use a matching set. Thus, if you call NumSucc() without a Compiler*, you must also call | 
| 702 |     // GetSucc() without a Compiler*. | 
| 703 |     // | 
| 704 |     // The behavior of NumSucc()/GetSucc() is different when passed a Compiler* for blocks that end in: | 
| 705 |     // (1) BBJ_EHFINALLYRET (a return from a finally or fault block) | 
| 706 |     // (2) BBJ_EHFILTERRET (a return from EH filter block) | 
| 707 |     // (3) BBJ_SWITCH | 
| 708 |     // | 
| 709 |     // For BBJ_EHFINALLYRET, if no Compiler* is passed, then the block is considered to have no | 
| 710 |     // successor. If Compiler* is passed, we figure out the actual successors. Some cases will want one behavior, | 
| 711 |     // other cases the other. For example, IL verification requires that these blocks end in an empty operand | 
| 712 |     // stack, and since the dataflow analysis of IL verification is concerned only with the contents of the | 
| 713 |     // operand stack, we can consider the finally block to have no successors. But a more general dataflow | 
| 714 |     // analysis that is tracking the contents of local variables might want to consider *all* successors, | 
| 715 |     // and would pass the current Compiler object. | 
| 716 |     // | 
| 717 |     // Similarly, BBJ_EHFILTERRET blocks are assumed to have no successors if Compiler* is not passed; if | 
| 718 |     // Compiler* is passed, NumSucc/GetSucc yields the first block of the try block's handler. | 
| 719 |     // | 
| 720 |     // For BBJ_SWITCH, if Compiler* is not passed, then all switch successors are returned. If Compiler* | 
| 721 |     // is passed, then only unique switch successors are returned; the duplicate successors are omitted. | 
| 722 |     // | 
| 723 |     // Note that for BBJ_COND, which has two successors (fall through and condition true branch target), | 
| 724 |     // only the unique targets are returned. Thus, if both targets are the same, NumSucc() will only return 1 | 
| 725 |     // instead of 2. | 
| 726 |  | 
| 727 |     // NumSucc: Returns the number of successors of "this". | 
| 728 |     unsigned NumSucc(); | 
| 729 |     unsigned NumSucc(Compiler* comp); | 
| 730 |  | 
| 731 |     // GetSucc: Returns the "i"th successor. Requires (0 <= i < NumSucc()). | 
| 732 |     BasicBlock* GetSucc(unsigned i); | 
| 733 |     BasicBlock* GetSucc(unsigned i, Compiler* comp); | 
| 734 |  | 
| 735 |     BasicBlock* GetUniquePred(Compiler* comp); | 
| 736 |  | 
| 737 |     BasicBlock* GetUniqueSucc(); | 
| 738 |  | 
| 739 |     unsigned countOfInEdges() const | 
| 740 |     { | 
| 741 |         return bbRefs; | 
| 742 |     } | 
| 743 |  | 
| 744 |     __declspec(property(get = getBBTreeList, put = setBBTreeList)) GenTree* bbTreeList; // the body of the block. | 
| 745 |  | 
| 746 |     GenTree* getBBTreeList() const | 
| 747 |     { | 
| 748 |         return m_firstNode; | 
| 749 |     } | 
| 750 |  | 
| 751 |     void setBBTreeList(GenTree* tree) | 
| 752 |     { | 
| 753 |         m_firstNode = tree; | 
| 754 |     } | 
| 755 |  | 
| 756 |     EntryState* bbEntryState; // verifier tracked state of all entries in stack. | 
| 757 |  | 
| 758 | #define NO_BASE_TMP UINT_MAX // base# to use when we have none | 
| 759 |     unsigned bbStkTempsIn;   // base# for input stack temps | 
| 760 |     unsigned bbStkTempsOut;  // base# for output stack temps | 
| 761 |  | 
| 762 | #define MAX_XCPTN_INDEX (USHRT_MAX - 1) | 
| 763 |  | 
| 764 |     // It would be nice to make bbTryIndex and bbHndIndex private, but there is still code that uses them directly, | 
| 765 |     // especially Compiler::fgNewBBinRegion() and friends. | 
| 766 |  | 
| 767 |     // index, into the compHndBBtab table, of innermost 'try' clause containing the BB (used for raising exceptions). | 
| 768 |     // Stored as index + 1; 0 means "no try index". | 
| 769 |     unsigned short bbTryIndex; | 
| 770 |  | 
| 771 |     // index, into the compHndBBtab table, of innermost handler (filter, catch, fault/finally) containing the BB. | 
| 772 |     // Stored as index + 1; 0 means "no handler index". | 
| 773 |     unsigned short bbHndIndex; | 
| 774 |  | 
| 775 |     // Given two EH indices that are either bbTryIndex or bbHndIndex (or related), determine if index1 might be more | 
| 776 |     // deeply nested than index2. Both index1 and index2 are in the range [0..compHndBBtabCount], where 0 means | 
| 777 |     // "main function" and otherwise the value is an index into compHndBBtab[]. Note that "sibling" EH regions will | 
| 778 |     // have a numeric index relationship that doesn't indicate nesting, whereas a more deeply nested region must have | 
| 779 |     // a lower index than the region it is nested within. Note that if you compare a single block's bbTryIndex and | 
| 780 |     // bbHndIndex, there is guaranteed to be a nesting relationship, since that block can't be simultaneously in two | 
| 781 |     // sibling EH regions. In that case, "maybe" is actually "definitely". | 
| 782 |     static bool ehIndexMaybeMoreNested(unsigned index1, unsigned index2) | 
| 783 |     { | 
| 784 |         if (index1 == 0) | 
| 785 |         { | 
| 786 |             // index1 is in the main method. It can't be more deeply nested than index2. | 
| 787 |             return false; | 
| 788 |         } | 
| 789 |         else if (index2 == 0) | 
| 790 |         { | 
| 791 |             // index1 represents an EH region, whereas index2 is the main method. Thus, index1 is more deeply nested. | 
| 792 |             assert(index1 > 0); | 
| 793 |             return true; | 
| 794 |         } | 
| 795 |         else | 
| 796 |         { | 
| 797 |             // If index1 has a smaller index, it might be more deeply nested than index2. | 
| 798 |             assert(index1 > 0); | 
| 799 |             assert(index2 > 0); | 
| 800 |             return index1 < index2; | 
| 801 |         } | 
| 802 |     } | 
| 803 |  | 
| 804 |     // catch type: class token of handler, or one of BBCT_*. Only set on first block of catch handler. | 
| 805 |     unsigned bbCatchTyp; | 
| 806 |  | 
| 807 |     bool hasTryIndex() const | 
| 808 |     { | 
| 809 |         return bbTryIndex != 0; | 
| 810 |     } | 
| 811 |     bool hasHndIndex() const | 
| 812 |     { | 
| 813 |         return bbHndIndex != 0; | 
| 814 |     } | 
| 815 |     unsigned getTryIndex() const | 
| 816 |     { | 
| 817 |         assert(bbTryIndex != 0); | 
| 818 |         return bbTryIndex - 1; | 
| 819 |     } | 
| 820 |     unsigned getHndIndex() const | 
| 821 |     { | 
| 822 |         assert(bbHndIndex != 0); | 
| 823 |         return bbHndIndex - 1; | 
| 824 |     } | 
| 825 |     void setTryIndex(unsigned val) | 
| 826 |     { | 
| 827 |         bbTryIndex = (unsigned short)(val + 1); | 
| 828 |         assert(bbTryIndex != 0); | 
| 829 |     } | 
| 830 |     void setHndIndex(unsigned val) | 
| 831 |     { | 
| 832 |         bbHndIndex = (unsigned short)(val + 1); | 
| 833 |         assert(bbHndIndex != 0); | 
| 834 |     } | 
| 835 |     void clearTryIndex() | 
| 836 |     { | 
| 837 |         bbTryIndex = 0; | 
| 838 |     } | 
| 839 |     void clearHndIndex() | 
| 840 |     { | 
| 841 |         bbHndIndex = 0; | 
| 842 |     } | 
| 843 |  | 
| 844 |     void copyEHRegion(const BasicBlock* from) | 
| 845 |     { | 
| 846 |         bbTryIndex = from->bbTryIndex; | 
| 847 |         bbHndIndex = from->bbHndIndex; | 
| 848 |     } | 
| 849 |  | 
| 850 |     static bool sameTryRegion(const BasicBlock* blk1, const BasicBlock* blk2) | 
| 851 |     { | 
| 852 |         return blk1->bbTryIndex == blk2->bbTryIndex; | 
| 853 |     } | 
| 854 |     static bool sameHndRegion(const BasicBlock* blk1, const BasicBlock* blk2) | 
| 855 |     { | 
| 856 |         return blk1->bbHndIndex == blk2->bbHndIndex; | 
| 857 |     } | 
| 858 |     static bool sameEHRegion(const BasicBlock* blk1, const BasicBlock* blk2) | 
| 859 |     { | 
| 860 |         return sameTryRegion(blk1, blk2) && sameHndRegion(blk1, blk2); | 
| 861 |     } | 
| 862 |  | 
| 863 | // Some non-zero value that will not collide with real tokens for bbCatchTyp | 
| 864 | #define BBCT_NONE 0x00000000 | 
| 865 | #define BBCT_FAULT 0xFFFFFFFC | 
| 866 | #define BBCT_FINALLY 0xFFFFFFFD | 
| 867 | #define BBCT_FILTER 0xFFFFFFFE | 
| 868 | #define BBCT_FILTER_HANDLER 0xFFFFFFFF | 
| 869 | #define handlerGetsXcptnObj(hndTyp) ((hndTyp) != BBCT_NONE && (hndTyp) != BBCT_FAULT && (hndTyp) != BBCT_FINALLY) | 
| 870 |  | 
| 871 |     // TODO-Cleanup: Get rid of bbStkDepth and use bbStackDepthOnEntry() instead | 
| 872 |     union { | 
| 873 |         unsigned short bbStkDepth; // stack depth on entry | 
| 874 |         unsigned short bbFPinVars; // number of inner enregistered FP vars | 
| 875 |     }; | 
| 876 |  | 
| 877 |     // Basic block predecessor lists. Early in compilation, some phases might need to compute "cheap" predecessor | 
| 878 |     // lists. These are stored in bbCheapPreds, computed by fgComputeCheapPreds(). If bbCheapPreds is valid, | 
| 879 |     // 'fgCheapPredsValid' will be 'true'. Later, the "full" predecessor lists are created by fgComputePreds(), stored | 
| 880 |     // in 'bbPreds', and then maintained throughout compilation. 'fgComputePredsDone' will be 'true' after the | 
| 881 |     // full predecessor lists are created. See the comment at fgComputeCheapPreds() to see how those differ from | 
| 882 |     // the "full" variant. | 
| 883 |     union { | 
| 884 |         BasicBlockList* bbCheapPreds; // ptr to list of cheap predecessors (used before normal preds are computed) | 
| 885 |         flowList*       bbPreds;      // ptr to list of predecessors | 
| 886 |     }; | 
| 887 |  | 
| 888 |     BlockSet    bbReach; // Set of all blocks that can reach this one | 
| 889 |     BasicBlock* bbIDom;  // Represent the closest dominator to this block (called the Immediate | 
| 890 |                          // Dominator) used to compute the dominance tree. | 
| 891 |     unsigned bbDfsNum;   // The index of this block in DFS reverse post order | 
| 892 |                          // relative to the flow graph. | 
| 893 |  | 
| 894 |     IL_OFFSET bbCodeOffs;    // IL offset of the beginning of the block | 
| 895 |     IL_OFFSET bbCodeOffsEnd; // IL offset past the end of the block. Thus, the [bbCodeOffs..bbCodeOffsEnd) | 
| 896 |                              // range is not inclusive of the end offset. The count of IL bytes in the block | 
| 897 |                              // is bbCodeOffsEnd - bbCodeOffs, assuming neither are BAD_IL_OFFSET. | 
| 898 |  | 
| 899 | #ifdef DEBUG | 
| 900 |     void dspBlockILRange(); // Display the block's IL range as [XXX...YYY), where XXX and YYY might be "???" for | 
| 901 |                             // BAD_IL_OFFSET. | 
| 902 | #endif                      // DEBUG | 
| 903 |  | 
| 904 |     VARSET_TP bbVarUse; // variables used     by block (before an assignment) | 
| 905 |     VARSET_TP bbVarDef; // variables assigned by block (before a use) | 
| 906 |  | 
| 907 |     VARSET_TP bbLiveIn;  // variables live on entry | 
| 908 |     VARSET_TP bbLiveOut; // variables live on exit | 
| 909 |  | 
| 910 |     // Use, def, live in/out information for the implicit memory variable. | 
| 911 |     MemoryKindSet bbMemoryUse : MemoryKindCount; // must be set for any MemoryKinds this block references | 
| 912 |     MemoryKindSet bbMemoryDef : MemoryKindCount; // must be set for any MemoryKinds this block mutates | 
| 913 |     MemoryKindSet bbMemoryLiveIn : MemoryKindCount; | 
| 914 |     MemoryKindSet bbMemoryLiveOut : MemoryKindCount; | 
| 915 |     MemoryKindSet bbMemoryHavoc : MemoryKindCount; // If true, at some point the block does an operation | 
| 916 |                                                    // that leaves memory in an unknown state. (E.g., | 
| 917 |                                                    // unanalyzed call, store through unknown pointer...) | 
| 918 |  | 
| 919 |     // We want to make phi functions for the special implicit var memory.  But since this is not a real | 
| 920 |     // lclVar, and thus has no local #, we can't use a GenTreePhiArg.  Instead, we use this struct. | 
| 921 |     struct MemoryPhiArg | 
| 922 |     { | 
| 923 |         unsigned      m_ssaNum;  // SSA# for incoming value. | 
| 924 |         MemoryPhiArg* m_nextArg; // Next arg in the list, else NULL. | 
| 925 |  | 
| 926 |         unsigned GetSsaNum() | 
| 927 |         { | 
| 928 |             return m_ssaNum; | 
| 929 |         } | 
| 930 |  | 
| 931 |         MemoryPhiArg(unsigned ssaNum, MemoryPhiArg* nextArg = nullptr) : m_ssaNum(ssaNum), m_nextArg(nextArg) | 
| 932 |         { | 
| 933 |         } | 
| 934 |  | 
| 935 |         void* operator new(size_t sz, class Compiler* comp); | 
| 936 |     }; | 
| 937 |     static MemoryPhiArg* EmptyMemoryPhiDef; // Special value (0x1, FWIW) to represent a to-be-filled in Phi arg list | 
| 938 |                                             // for Heap. | 
| 939 |     MemoryPhiArg* bbMemorySsaPhiFunc[MemoryKindCount]; // If the "in" Heap SSA var is not a phi definition, this value | 
| 940 |                                                        // is NULL. | 
| 941 |     // Otherwise, it is either the special value EmptyMemoryPhiDefn, to indicate | 
| 942 |     // that Heap needs a phi definition on entry, or else it is the linked list | 
| 943 |     // of the phi arguments. | 
| 944 |     unsigned bbMemorySsaNumIn[MemoryKindCount];  // The SSA # of memory on entry to the block. | 
| 945 |     unsigned bbMemorySsaNumOut[MemoryKindCount]; // The SSA # of memory on exit from the block. | 
| 946 |  | 
| 947 |     VARSET_TP bbScope; // variables in scope over the block | 
| 948 |  | 
| 949 |     void InitVarSets(class Compiler* comp); | 
| 950 |  | 
| 951 |     /* The following are the standard bit sets for dataflow analysis. | 
| 952 |      *  We perform CSE and range-checks at the same time | 
| 953 |      *  and assertion propagation separately, | 
| 954 |      *  thus we can union them since the two operations are completely disjunct. | 
| 955 |      */ | 
| 956 |  | 
| 957 |     union { | 
| 958 |         EXPSET_TP bbCseGen; // CSEs computed by block | 
| 959 | #if ASSERTION_PROP | 
| 960 |         ASSERT_TP bbAssertionGen; // value assignments computed by block | 
| 961 | #endif | 
| 962 |     }; | 
| 963 |  | 
| 964 |     union { | 
| 965 |         EXPSET_TP bbCseIn; // CSEs available on entry | 
| 966 | #if ASSERTION_PROP | 
| 967 |         ASSERT_TP bbAssertionIn; // value assignments available on entry | 
| 968 | #endif | 
| 969 |     }; | 
| 970 |  | 
| 971 |     union { | 
| 972 |         EXPSET_TP bbCseOut; // CSEs available on exit | 
| 973 | #if ASSERTION_PROP | 
| 974 |         ASSERT_TP bbAssertionOut; // value assignments available on exit | 
| 975 | #endif | 
| 976 |     }; | 
| 977 |  | 
| 978 |     void* bbEmitCookie; | 
| 979 |  | 
| 980 | #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) | 
| 981 |     void* bbUnwindNopEmitCookie; | 
| 982 | #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_) | 
| 983 |  | 
| 984 | #ifdef VERIFIER | 
| 985 |     stackDesc bbStackIn;  // stack descriptor for  input | 
| 986 |     stackDesc bbStackOut; // stack descriptor for output | 
| 987 |  | 
| 988 |     verTypeVal* bbTypesIn;  // list of variable types on  input | 
| 989 |     verTypeVal* bbTypesOut; // list of variable types on output | 
| 990 | #endif                      // VERIFIER | 
| 991 |  | 
| 992 |     /* The following fields used for loop detection */ | 
| 993 |  | 
| 994 |     typedef unsigned char loopNumber; | 
| 995 |     static const unsigned NOT_IN_LOOP = UCHAR_MAX; | 
| 996 |  | 
| 997 | #ifdef DEBUG | 
| 998 |     // This is the label a loop gets as part of the second, reachability-based | 
| 999 |     // loop discovery mechanism.  This is apparently only used for debugging. | 
| 1000 |     // We hope we'll eventually just have one loop-discovery mechanism, and this will go away. | 
| 1001 |     loopNumber bbLoopNum; // set to 'n' for a loop #n header | 
| 1002 | #endif                    // DEBUG | 
| 1003 |  | 
| 1004 |     loopNumber bbNatLoopNum; // Index, in optLoopTable, of most-nested loop that contains this block, | 
| 1005 |                              // or else NOT_IN_LOOP if this block is not in a loop. | 
| 1006 |  | 
| 1007 | #define MAX_LOOP_NUM 16       // we're using a 'short' for the mask | 
| 1008 | #define LOOP_MASK_TP unsigned // must be big enough for a mask | 
| 1009 |  | 
| 1010 | //------------------------------------------------------------------------- | 
| 1011 |  | 
| 1012 | #if MEASURE_BLOCK_SIZE | 
| 1013 |     static size_t s_Size; | 
| 1014 |     static size_t s_Count; | 
| 1015 | #endif // MEASURE_BLOCK_SIZE | 
| 1016 |  | 
| 1017 |     bool bbFallsThrough(); | 
| 1018 |  | 
| 1019 |     // Our slop fraction is 1/128 of the block weight rounded off | 
| 1020 |     static weight_t GetSlopFraction(weight_t weightBlk) | 
| 1021 |     { | 
| 1022 |         return ((weightBlk + 64) / 128); | 
| 1023 |     } | 
| 1024 |  | 
| 1025 |     // Given an the edge b1 -> b2, calculate the slop fraction by | 
| 1026 |     // using the higher of the two block weights | 
| 1027 |     static weight_t GetSlopFraction(BasicBlock* b1, BasicBlock* b2) | 
| 1028 |     { | 
| 1029 |         return GetSlopFraction(max(b1->bbWeight, b2->bbWeight)); | 
| 1030 |     } | 
| 1031 |  | 
| 1032 | #ifdef DEBUG | 
| 1033 |     unsigned        bbTgtStkDepth; // Native stack depth on entry (for throw-blocks) | 
| 1034 |     static unsigned s_nMaxTrees;   // The max # of tree nodes in any BB | 
| 1035 |  | 
| 1036 |     unsigned bbStmtNum; // The statement number of the first stmt in this block | 
| 1037 |  | 
| 1038 |     // This is used in integrity checks.  We semi-randomly pick a traversal stamp, label all blocks | 
| 1039 |     // in the BB list with that stamp (in this field); then we can tell if (e.g.) predecessors are | 
| 1040 |     // still in the BB list by whether they have the same stamp (with high probability). | 
| 1041 |     unsigned bbTraversalStamp; | 
| 1042 |     unsigned bbID; | 
| 1043 | #endif // DEBUG | 
| 1044 |  | 
| 1045 |     ThisInitState bbThisOnEntry(); | 
| 1046 |     unsigned      bbStackDepthOnEntry(); | 
| 1047 |     void bbSetStack(void* stackBuffer); | 
| 1048 |     StackEntry* bbStackOnEntry(); | 
| 1049 |     void        bbSetRunRarely(); | 
| 1050 |  | 
| 1051 |     // "bbNum" is one-based (for unknown reasons); it is sometimes useful to have the corresponding | 
| 1052 |     // zero-based number for use as an array index. | 
| 1053 |     unsigned bbInd() | 
| 1054 |     { | 
| 1055 |         assert(bbNum > 0); | 
| 1056 |         return bbNum - 1; | 
| 1057 |     } | 
| 1058 |  | 
| 1059 |     GenTreeStmt* firstStmt() const; | 
| 1060 |     GenTreeStmt* lastStmt() const; | 
| 1061 |  | 
| 1062 |     GenTree* firstNode(); | 
| 1063 |     GenTree* lastNode(); | 
| 1064 |  | 
| 1065 |     bool endsWithJmpMethod(Compiler* comp); | 
| 1066 |  | 
| 1067 |     bool endsWithTailCall(Compiler* comp, | 
| 1068 |                           bool      fastTailCallsOnly, | 
| 1069 |                           bool      tailCallsConvertibleToLoopOnly, | 
| 1070 |                           GenTree** tailCall); | 
| 1071 |  | 
| 1072 |     bool endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly = false); | 
| 1073 |  | 
| 1074 |     bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall); | 
| 1075 |  | 
| 1076 |     // Returns the first statement in the statement list of "this" that is | 
| 1077 |     // not an SSA definition (a lcl = phi(...) assignment). | 
| 1078 |     GenTreeStmt* FirstNonPhiDef(); | 
| 1079 |     GenTree*     FirstNonPhiDefOrCatchArgAsg(); | 
| 1080 |  | 
| 1081 |     BasicBlock() : bbLiveIn(VarSetOps::UninitVal()), bbLiveOut(VarSetOps::UninitVal()) | 
| 1082 |     { | 
| 1083 |     } | 
| 1084 |  | 
| 1085 |     // Iteratable collection of successors of a block. | 
| 1086 |     template <typename TPosition> | 
| 1087 |     class Successors | 
| 1088 |     { | 
| 1089 |         Compiler*   m_comp; | 
| 1090 |         BasicBlock* m_block; | 
| 1091 |  | 
| 1092 |     public: | 
| 1093 |         Successors(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) | 
| 1094 |         { | 
| 1095 |         } | 
| 1096 |  | 
| 1097 |         class iterator | 
| 1098 |         { | 
| 1099 |             Compiler*   m_comp; | 
| 1100 |             BasicBlock* m_block; | 
| 1101 |             TPosition   m_pos; | 
| 1102 |  | 
| 1103 |         public: | 
| 1104 |             iterator(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block), m_pos(comp, block) | 
| 1105 |             { | 
| 1106 |             } | 
| 1107 |  | 
| 1108 |             iterator() : m_pos() | 
| 1109 |             { | 
| 1110 |             } | 
| 1111 |  | 
| 1112 |             void operator++(void) | 
| 1113 |             { | 
| 1114 |                 m_pos.Advance(m_comp, m_block); | 
| 1115 |             } | 
| 1116 |  | 
| 1117 |             BasicBlock* operator*() | 
| 1118 |             { | 
| 1119 |                 return m_pos.Current(m_comp, m_block); | 
| 1120 |             } | 
| 1121 |  | 
| 1122 |             bool operator==(const iterator& other) | 
| 1123 |             { | 
| 1124 |                 return m_pos == other.m_pos; | 
| 1125 |             } | 
| 1126 |  | 
| 1127 |             bool operator!=(const iterator& other) | 
| 1128 |             { | 
| 1129 |                 return m_pos != other.m_pos; | 
| 1130 |             } | 
| 1131 |         }; | 
| 1132 |  | 
| 1133 |         iterator begin() | 
| 1134 |         { | 
| 1135 |             return iterator(m_comp, m_block); | 
| 1136 |         } | 
| 1137 |  | 
| 1138 |         iterator end() | 
| 1139 |         { | 
| 1140 |             return iterator(); | 
| 1141 |         } | 
| 1142 |     }; | 
| 1143 |  | 
| 1144 |     Successors<EHSuccessorIterPosition> GetEHSuccs(Compiler* comp) | 
| 1145 |     { | 
| 1146 |         return Successors<EHSuccessorIterPosition>(comp, this); | 
| 1147 |     } | 
| 1148 |  | 
| 1149 |     Successors<AllSuccessorIterPosition> GetAllSuccs(Compiler* comp) | 
| 1150 |     { | 
| 1151 |         return Successors<AllSuccessorIterPosition>(comp, this); | 
| 1152 |     } | 
| 1153 |  | 
| 1154 |     // Try to clone block state and statements from `from` block to `to` block (which must be new/empty), | 
| 1155 |     // optionally replacing uses of local `varNum` with IntCns `varVal`.  Return true if all statements | 
| 1156 |     // in the block are cloned successfully, false (with partially-populated `to` block) if one fails. | 
| 1157 |     static bool CloneBlockState( | 
| 1158 |         Compiler* compiler, BasicBlock* to, const BasicBlock* from, unsigned varNum = (unsigned)-1, int varVal = 0); | 
| 1159 |  | 
| 1160 |     void MakeLIR(GenTree* firstNode, GenTree* lastNode); | 
| 1161 |     bool IsLIR(); | 
| 1162 |  | 
| 1163 |     void SetDominatedByExceptionalEntryFlag() | 
| 1164 |     { | 
| 1165 |         bbFlags |= BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY; | 
| 1166 |     } | 
| 1167 |  | 
| 1168 |     bool IsDominatedByExceptionalEntryFlag() | 
| 1169 |     { | 
| 1170 |         return (bbFlags & BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY) != 0; | 
| 1171 |     } | 
| 1172 | }; | 
| 1173 |  | 
| 1174 | template <> | 
| 1175 | struct JitPtrKeyFuncs<BasicBlock> : public JitKeyFuncsDefEquals<const BasicBlock*> | 
| 1176 | { | 
| 1177 | public: | 
| 1178 |     // Make sure hashing is deterministic and not on "ptr." | 
| 1179 |     static unsigned GetHashCode(const BasicBlock* ptr); | 
| 1180 | }; | 
| 1181 |  | 
| 1182 | // A set of blocks. | 
| 1183 | typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, bool> BlkSet; | 
| 1184 |  | 
| 1185 | // A vector of blocks. | 
| 1186 | typedef jitstd::vector<BasicBlock*> BlkVector; | 
| 1187 |  | 
| 1188 | // A map of block -> set of blocks, can be used as sparse block trees. | 
| 1189 | typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BlkSet*> BlkToBlkSetMap; | 
| 1190 |  | 
| 1191 | // A map of block -> vector of blocks, can be used as sparse block trees. | 
| 1192 | typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BlkVector> BlkToBlkVectorMap; | 
| 1193 |  | 
| 1194 | // Map from Block to Block.  Used for a variety of purposes. | 
| 1195 | typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BasicBlock*> BlockToBlockMap; | 
| 1196 |  | 
| 1197 | // In compiler terminology the control flow between two BasicBlocks | 
| 1198 | // is typically referred to as an "edge".  Most well known are the | 
| 1199 | // backward branches for loops, which are often called "back-edges". | 
| 1200 | // | 
| 1201 | // "struct flowList" is the type that represents our control flow edges. | 
| 1202 | // This type is a linked list of zero or more "edges". | 
| 1203 | // (The list of zero edges is represented by NULL.) | 
| 1204 | // Every BasicBlock has a field called bbPreds of this type.  This field | 
| 1205 | // represents the list of "edges" that flow into this BasicBlock. | 
| 1206 | // The flowList type only stores the BasicBlock* of the source for the | 
| 1207 | // control flow edge.  The destination block for the control flow edge | 
| 1208 | // is implied to be the block which contained the bbPreds field. | 
| 1209 | // | 
| 1210 | // For a switch branch target there may be multiple "edges" that have | 
| 1211 | // the same source block (and destination block).  We need to count the | 
| 1212 | // number of these edges so that during optimization we will know when | 
| 1213 | // we have zero of them.  Rather than have extra flowList entries we | 
| 1214 | // increment the flDupCount field. | 
| 1215 | // | 
| 1216 | // When we have Profile weight for the BasicBlocks we can usually compute | 
| 1217 | // the number of times each edge was executed by examining the adjacent | 
| 1218 | // BasicBlock weights.  As we are doing for BasicBlocks, we call the number | 
| 1219 | // of times that a control flow edge was executed the "edge weight". | 
| 1220 | // In order to compute the edge weights we need to use a bounded range | 
| 1221 | // for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax' | 
| 1222 | // are used to hold a bounded range.  Most often these will converge such | 
| 1223 | // that both values are the same and that value is the exact edge weight. | 
| 1224 | // Sometimes we are left with a rage of possible values between [Min..Max] | 
| 1225 | // which represents an inexact edge weight. | 
| 1226 | // | 
| 1227 | // The bbPreds list is initially created by Compiler::fgComputePreds() | 
| 1228 | // and is incrementally kept up to date. | 
| 1229 | // | 
| 1230 | // The edge weight are computed by Compiler::fgComputeEdgeWeights() | 
| 1231 | // the edge weights are used to straighten conditional branches | 
| 1232 | // by Compiler::fgReorderBlocks() | 
| 1233 | // | 
| 1234 | // We have a simpler struct, BasicBlockList, which is simply a singly-linked | 
| 1235 | // list of blocks. This is used for various purposes, but one is as a "cheap" | 
| 1236 | // predecessor list, computed by fgComputeCheapPreds(), and stored as a list | 
| 1237 | // on BasicBlock pointed to by bbCheapPreds. | 
| 1238 |  | 
| 1239 | struct BasicBlockList | 
| 1240 | { | 
| 1241 |     BasicBlockList* next;  // The next BasicBlock in the list, nullptr for end of list. | 
| 1242 |     BasicBlock*     block; // The BasicBlock of interest. | 
| 1243 |  | 
| 1244 |     BasicBlockList() : next(nullptr), block(nullptr) | 
| 1245 |     { | 
| 1246 |     } | 
| 1247 |  | 
| 1248 |     BasicBlockList(BasicBlock* blk, BasicBlockList* rest) : next(rest), block(blk) | 
| 1249 |     { | 
| 1250 |     } | 
| 1251 | }; | 
| 1252 |  | 
| 1253 | struct flowList | 
| 1254 | { | 
| 1255 |     flowList*   flNext;  // The next BasicBlock in the list, nullptr for end of list. | 
| 1256 |     BasicBlock* flBlock; // The BasicBlock of interest. | 
| 1257 |  | 
| 1258 |     BasicBlock::weight_t flEdgeWeightMin; | 
| 1259 |     BasicBlock::weight_t flEdgeWeightMax; | 
| 1260 |  | 
| 1261 |     unsigned flDupCount; // The count of duplicate "edges" (use only for switch stmts) | 
| 1262 |  | 
| 1263 |     // These two methods are used to set new values for flEdgeWeightMin and flEdgeWeightMax | 
| 1264 |     // they are used only during the computation of the edge weights | 
| 1265 |     // They return false if the newWeight is not between the current [min..max] | 
| 1266 |     // when slop is non-zero we allow for the case where our weights might be off by 'slop' | 
| 1267 |     // | 
| 1268 |     bool setEdgeWeightMinChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop); | 
| 1269 |     bool setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop); | 
| 1270 |  | 
| 1271 |     flowList() : flNext(nullptr), flBlock(nullptr), flEdgeWeightMin(0), flEdgeWeightMax(0), flDupCount(0) | 
| 1272 |     { | 
| 1273 |     } | 
| 1274 |  | 
| 1275 |     flowList(BasicBlock* blk, flowList* rest) | 
| 1276 |         : flNext(rest), flBlock(blk), flEdgeWeightMin(0), flEdgeWeightMax(0), flDupCount(0) | 
| 1277 |     { | 
| 1278 |     } | 
| 1279 | }; | 
| 1280 |  | 
| 1281 | // This enum represents a pre/post-visit action state to emulate a depth-first | 
| 1282 | // spanning tree traversal of a tree or graph. | 
| 1283 | enum DfsStackState | 
| 1284 | { | 
| 1285 |     DSS_Invalid, // The initialized, invalid error state | 
| 1286 |     DSS_Pre,     // The DFS pre-order (first visit) traversal state | 
| 1287 |     DSS_Post     // The DFS post-order (last visit) traversal state | 
| 1288 | }; | 
| 1289 |  | 
| 1290 | // These structs represents an entry in a stack used to emulate a non-recursive | 
| 1291 | // depth-first spanning tree traversal of a graph. The entry contains either a | 
| 1292 | // block pointer or a block number depending on which is more useful. | 
| 1293 | struct DfsBlockEntry | 
| 1294 | { | 
| 1295 |     DfsStackState dfsStackState; // The pre/post traversal action for this entry | 
| 1296 |     BasicBlock*   dfsBlock;      // The corresponding block for the action | 
| 1297 |  | 
| 1298 |     DfsBlockEntry(DfsStackState state, BasicBlock* basicBlock) : dfsStackState(state), dfsBlock(basicBlock) | 
| 1299 |     { | 
| 1300 |     } | 
| 1301 | }; | 
| 1302 |  | 
| 1303 | struct DfsNumEntry | 
| 1304 | { | 
| 1305 |     DfsStackState dfsStackState; // The pre/post traversal action for this entry | 
| 1306 |     unsigned      dfsNum;        // The corresponding block number for the action | 
| 1307 |  | 
| 1308 |     DfsNumEntry() : dfsStackState(DSS_Invalid), dfsNum(0) | 
| 1309 |     { | 
| 1310 |     } | 
| 1311 |  | 
| 1312 |     DfsNumEntry(DfsStackState state, unsigned bbNum) : dfsStackState(state), dfsNum(bbNum) | 
| 1313 |     { | 
| 1314 |     } | 
| 1315 | }; | 
| 1316 |  | 
| 1317 | /***************************************************************************** | 
| 1318 |  * | 
| 1319 |  *  The following call-backs supplied by the client; it's used by the code | 
| 1320 |  *  emitter to convert a basic block to its corresponding emitter cookie. | 
| 1321 |  */ | 
| 1322 |  | 
| 1323 | void* emitCodeGetCookie(BasicBlock* block); | 
| 1324 |  | 
| 1325 | AllSuccessorIterPosition::AllSuccessorIterPosition(Compiler* comp, BasicBlock* block) | 
| 1326 |     : m_numNormSuccs(block->NumSucc(comp)), m_remainingNormSucc(m_numNormSuccs), m_ehIter(comp, block) | 
| 1327 | { | 
| 1328 |     if (CurTryIsBlkCallFinallyTarget(comp, block)) | 
| 1329 |     { | 
| 1330 |         m_ehIter.Advance(comp, block); | 
| 1331 |     } | 
| 1332 | } | 
| 1333 |  | 
| 1334 | bool AllSuccessorIterPosition::CurTryIsBlkCallFinallyTarget(Compiler* comp, BasicBlock* block) | 
| 1335 | { | 
| 1336 |     return (block->bbJumpKind == BBJ_CALLFINALLY) && (m_ehIter != EHSuccessorIterPosition()) && | 
| 1337 |            (block->bbJumpDest == m_ehIter.Current(comp, block)); | 
| 1338 | } | 
| 1339 |  | 
| 1340 | void AllSuccessorIterPosition::Advance(Compiler* comp, BasicBlock* block) | 
| 1341 | { | 
| 1342 |     if (m_remainingNormSucc > 0) | 
| 1343 |     { | 
| 1344 |         m_remainingNormSucc--; | 
| 1345 |     } | 
| 1346 |     else | 
| 1347 |     { | 
| 1348 |         m_ehIter.Advance(comp, block); | 
| 1349 |  | 
| 1350 |         // If the original block whose successors we're iterating over | 
| 1351 |         // is a BBJ_CALLFINALLY, that finally clause's first block | 
| 1352 |         // will be yielded as a normal successor.  Don't also yield as | 
| 1353 |         // an exceptional successor. | 
| 1354 |         if (CurTryIsBlkCallFinallyTarget(comp, block)) | 
| 1355 |         { | 
| 1356 |             m_ehIter.Advance(comp, block); | 
| 1357 |         } | 
| 1358 |     } | 
| 1359 | } | 
| 1360 |  | 
| 1361 | // Requires that "this" is not equal to the standard "end" iterator.  Returns the | 
| 1362 | // current successor. | 
| 1363 | BasicBlock* AllSuccessorIterPosition::Current(Compiler* comp, BasicBlock* block) | 
| 1364 | { | 
| 1365 |     if (m_remainingNormSucc > 0) | 
| 1366 |     { | 
| 1367 |         return block->GetSucc(m_numNormSuccs - m_remainingNormSucc, comp); | 
| 1368 |     } | 
| 1369 |     else | 
| 1370 |     { | 
| 1371 |         return m_ehIter.Current(comp, block); | 
| 1372 |     } | 
| 1373 | } | 
| 1374 |  | 
| 1375 | typedef BasicBlock::Successors<EHSuccessorIterPosition>::iterator  EHSuccessorIter; | 
| 1376 | typedef BasicBlock::Successors<AllSuccessorIterPosition>::iterator AllSuccessorIter; | 
| 1377 |  | 
| 1378 | // An enumerator of a block's all successors. In some cases (e.g. SsaBuilder::TopologicalSort) | 
| 1379 | // using iterators is not exactly efficient, at least because they contain an unnecessary | 
| 1380 | // member - a pointer to the Compiler object. | 
| 1381 | class AllSuccessorEnumerator | 
| 1382 | { | 
| 1383 |     BasicBlock*              m_block; | 
| 1384 |     AllSuccessorIterPosition m_pos; | 
| 1385 |  | 
| 1386 | public: | 
| 1387 |     // Constructs an enumerator of all `block`'s successors. | 
| 1388 |     AllSuccessorEnumerator(Compiler* comp, BasicBlock* block) : m_block(block), m_pos(comp, block) | 
| 1389 |     { | 
| 1390 |     } | 
| 1391 |  | 
| 1392 |     // Gets the block whose successors are enumerated. | 
| 1393 |     BasicBlock* Block() | 
| 1394 |     { | 
| 1395 |         return m_block; | 
| 1396 |     } | 
| 1397 |  | 
| 1398 |     // Returns true if the next successor is an EH successor. | 
| 1399 |     bool IsNextEHSuccessor() | 
| 1400 |     { | 
| 1401 |         return m_pos.IsCurrentEH(); | 
| 1402 |     } | 
| 1403 |  | 
| 1404 |     // Returns the next available successor or `nullptr` if there are no more successors. | 
| 1405 |     BasicBlock* NextSuccessor(Compiler* comp) | 
| 1406 |     { | 
| 1407 |         if (!m_pos.HasCurrent()) | 
| 1408 |         { | 
| 1409 |             return nullptr; | 
| 1410 |         } | 
| 1411 |  | 
| 1412 |         BasicBlock* succ = m_pos.Current(comp, m_block); | 
| 1413 |         m_pos.Advance(comp, m_block); | 
| 1414 |         return succ; | 
| 1415 |     } | 
| 1416 | }; | 
| 1417 |  | 
| 1418 | /*****************************************************************************/ | 
| 1419 | #endif // _BLOCK_H_ | 
| 1420 | /*****************************************************************************/ | 
| 1421 |  |