1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | // |
6 | |
7 | #include "jitpch.h" |
8 | #include "rangecheck.h" |
9 | |
10 | // Max stack depth (path length) in walking the UD chain. |
11 | static const int MAX_SEARCH_DEPTH = 100; |
12 | |
13 | // Max nodes to visit in the UD chain for the current method being compiled. |
14 | static const int MAX_VISIT_BUDGET = 8192; |
15 | |
16 | // RangeCheck constructor. |
17 | RangeCheck::RangeCheck(Compiler* pCompiler) |
18 | : m_pOverflowMap(nullptr) |
19 | , m_pRangeMap(nullptr) |
20 | , m_pSearchPath(nullptr) |
21 | #ifdef DEBUG |
22 | , m_fMappedDefs(false) |
23 | , m_pDefTable(nullptr) |
24 | #endif |
25 | , m_pCompiler(pCompiler) |
26 | , m_alloc(pCompiler->getAllocator(CMK_RangeCheck)) |
27 | , m_nVisitBudget(MAX_VISIT_BUDGET) |
28 | { |
29 | } |
30 | |
31 | bool RangeCheck::IsOverBudget() |
32 | { |
33 | return (m_nVisitBudget <= 0); |
34 | } |
35 | |
36 | // Get the range map in which computed ranges are cached. |
37 | RangeCheck::RangeMap* RangeCheck::GetRangeMap() |
38 | { |
39 | if (m_pRangeMap == nullptr) |
40 | { |
41 | m_pRangeMap = new (m_alloc) RangeMap(m_alloc); |
42 | } |
43 | return m_pRangeMap; |
44 | } |
45 | |
46 | // Get the overflow map in which computed overflows are cached. |
47 | RangeCheck::OverflowMap* RangeCheck::GetOverflowMap() |
48 | { |
49 | if (m_pOverflowMap == nullptr) |
50 | { |
51 | m_pOverflowMap = new (m_alloc) OverflowMap(m_alloc); |
52 | } |
53 | return m_pOverflowMap; |
54 | } |
55 | |
56 | // Get the length of the array vn, if it is new. |
57 | int RangeCheck::GetArrLength(ValueNum vn) |
58 | { |
59 | ValueNum arrRefVN = m_pCompiler->vnStore->GetArrForLenVn(vn); |
60 | return m_pCompiler->vnStore->GetNewArrSize(arrRefVN); |
61 | } |
62 | |
63 | // Check if the computed range is within bounds. |
64 | bool RangeCheck::BetweenBounds(Range& range, int lower, GenTree* upper) |
65 | { |
66 | #ifdef DEBUG |
67 | if (m_pCompiler->verbose) |
68 | { |
69 | printf("%s BetweenBounds <%d, " , range.ToString(m_pCompiler->getAllocatorDebugOnly()), lower); |
70 | Compiler::printTreeID(upper); |
71 | printf(">\n" ); |
72 | } |
73 | #endif // DEBUG |
74 | |
75 | ValueNumStore* vnStore = m_pCompiler->vnStore; |
76 | |
77 | // Get the VN for the upper limit. |
78 | ValueNum uLimitVN = vnStore->VNConservativeNormalValue(upper->gtVNPair); |
79 | |
80 | #ifdef DEBUG |
81 | JITDUMP(FMT_VN " upper bound is: " , uLimitVN); |
82 | if (m_pCompiler->verbose) |
83 | { |
84 | vnStore->vnDump(m_pCompiler, uLimitVN); |
85 | } |
86 | JITDUMP("\n" ); |
87 | #endif |
88 | |
89 | int arrSize = 0; |
90 | |
91 | if (vnStore->IsVNConstant(uLimitVN)) |
92 | { |
93 | ssize_t constVal = -1; |
94 | unsigned iconFlags = 0; |
95 | |
96 | if (m_pCompiler->optIsTreeKnownIntValue(true, upper, &constVal, &iconFlags)) |
97 | { |
98 | arrSize = (int)constVal; |
99 | } |
100 | } |
101 | else if (vnStore->IsVNArrLen(uLimitVN)) |
102 | { |
103 | // Get the array reference from the length. |
104 | ValueNum arrRefVN = vnStore->GetArrForLenVn(uLimitVN); |
105 | // Check if array size can be obtained. |
106 | arrSize = vnStore->GetNewArrSize(arrRefVN); |
107 | } |
108 | else if (!vnStore->IsVNCheckedBound(uLimitVN)) |
109 | { |
110 | // If the upper limit is not length, then bail. |
111 | return false; |
112 | } |
113 | |
114 | JITDUMP("Array size is: %d\n" , arrSize); |
115 | |
116 | // Upper limit: len + ucns (upper limit constant). |
117 | if (range.UpperLimit().IsBinOpArray()) |
118 | { |
119 | if (range.UpperLimit().vn != uLimitVN) |
120 | { |
121 | return false; |
122 | } |
123 | |
124 | int ucns = range.UpperLimit().GetConstant(); |
125 | |
126 | // Upper limit: Len + [0..n] |
127 | if (ucns >= 0) |
128 | { |
129 | return false; |
130 | } |
131 | |
132 | // Since upper limit is bounded by the array, return true if lower bound is good. |
133 | if (range.LowerLimit().IsConstant() && range.LowerLimit().GetConstant() >= 0) |
134 | { |
135 | return true; |
136 | } |
137 | |
138 | // Check if we have the array size allocated by new. |
139 | if (arrSize <= 0) |
140 | { |
141 | return false; |
142 | } |
143 | |
144 | // At this point, |
145 | // upper limit = len + ucns. ucns < 0 |
146 | // lower limit = len + lcns. |
147 | if (range.LowerLimit().IsBinOpArray()) |
148 | { |
149 | int lcns = range.LowerLimit().GetConstant(); |
150 | if (lcns >= 0 || -lcns > arrSize) |
151 | { |
152 | return false; |
153 | } |
154 | return (range.LowerLimit().vn == uLimitVN && lcns <= ucns); |
155 | } |
156 | } |
157 | // If upper limit is constant |
158 | else if (range.UpperLimit().IsConstant()) |
159 | { |
160 | if (arrSize <= 0) |
161 | { |
162 | return false; |
163 | } |
164 | int ucns = range.UpperLimit().GetConstant(); |
165 | if (ucns >= arrSize) |
166 | { |
167 | return false; |
168 | } |
169 | if (range.LowerLimit().IsConstant()) |
170 | { |
171 | int lcns = range.LowerLimit().GetConstant(); |
172 | // Make sure lcns < ucns which is already less than arrSize. |
173 | return (lcns >= 0 && lcns <= ucns); |
174 | } |
175 | if (range.LowerLimit().IsBinOpArray()) |
176 | { |
177 | int lcns = range.LowerLimit().GetConstant(); |
178 | // len + lcns, make sure we don't subtract too much from len. |
179 | if (lcns >= 0 || -lcns > arrSize) |
180 | { |
181 | return false; |
182 | } |
183 | // Make sure a.len + lcns <= ucns. |
184 | return (range.LowerLimit().vn == uLimitVN && (arrSize + lcns) <= ucns); |
185 | } |
186 | } |
187 | |
188 | return false; |
189 | } |
190 | |
191 | void RangeCheck::OptimizeRangeCheck(BasicBlock* block, GenTree* stmt, GenTree* treeParent) |
192 | { |
193 | // Check if we are dealing with a bounds check node. |
194 | if (treeParent->OperGet() != GT_COMMA) |
195 | { |
196 | return; |
197 | } |
198 | |
199 | // If we are not looking at array bounds check, bail. |
200 | GenTree* tree = treeParent->gtOp.gtOp1; |
201 | if (!tree->OperIsBoundsCheck()) |
202 | { |
203 | return; |
204 | } |
205 | |
206 | GenTreeBoundsChk* bndsChk = tree->AsBoundsChk(); |
207 | m_pCurBndsChk = bndsChk; |
208 | GenTree* treeIndex = bndsChk->gtIndex; |
209 | |
210 | // Take care of constant index first, like a[2], for example. |
211 | ValueNum idxVn = m_pCompiler->vnStore->VNConservativeNormalValue(treeIndex->gtVNPair); |
212 | ValueNum arrLenVn = m_pCompiler->vnStore->VNConservativeNormalValue(bndsChk->gtArrLen->gtVNPair); |
213 | int arrSize = 0; |
214 | |
215 | if (m_pCompiler->vnStore->IsVNConstant(arrLenVn)) |
216 | { |
217 | ssize_t constVal = -1; |
218 | unsigned iconFlags = 0; |
219 | |
220 | if (m_pCompiler->optIsTreeKnownIntValue(true, bndsChk->gtArrLen, &constVal, &iconFlags)) |
221 | { |
222 | arrSize = (int)constVal; |
223 | } |
224 | } |
225 | else |
226 | #ifdef FEATURE_SIMD |
227 | if (tree->gtOper != GT_SIMD_CHK |
228 | #ifdef FEATURE_HW_INTRINSICS |
229 | && tree->gtOper != GT_HW_INTRINSIC_CHK |
230 | #endif // FEATURE_HW_INTRINSICS |
231 | ) |
232 | #endif // FEATURE_SIMD |
233 | { |
234 | arrSize = GetArrLength(arrLenVn); |
235 | } |
236 | |
237 | JITDUMP("ArrSize for lengthVN:%03X = %d\n" , arrLenVn, arrSize); |
238 | if (m_pCompiler->vnStore->IsVNConstant(idxVn) && (arrSize > 0)) |
239 | { |
240 | ssize_t idxVal = -1; |
241 | unsigned iconFlags = 0; |
242 | if (!m_pCompiler->optIsTreeKnownIntValue(true, treeIndex, &idxVal, &iconFlags)) |
243 | { |
244 | return; |
245 | } |
246 | |
247 | JITDUMP("[RangeCheck::OptimizeRangeCheck] Is index %d in <0, arrLenVn " FMT_VN " sz:%d>.\n" , idxVal, arrLenVn, |
248 | arrSize); |
249 | if ((idxVal < arrSize) && (idxVal >= 0)) |
250 | { |
251 | JITDUMP("Removing range check\n" ); |
252 | m_pCompiler->optRemoveRangeCheck(treeParent, stmt); |
253 | return; |
254 | } |
255 | } |
256 | |
257 | GetRangeMap()->RemoveAll(); |
258 | GetOverflowMap()->RemoveAll(); |
259 | m_pSearchPath = new (m_alloc) SearchPath(m_alloc); |
260 | |
261 | // Get the range for this index. |
262 | Range range = GetRange(block, treeIndex, false DEBUGARG(0)); |
263 | |
264 | // If upper or lower limit is found to be unknown (top), or it was found to |
265 | // be unknown because of over budget or a deep search, then return early. |
266 | if (range.UpperLimit().IsUnknown() || range.LowerLimit().IsUnknown()) |
267 | { |
268 | // Note: If we had stack depth too deep in the GetRange call, we'd be |
269 | // too deep even in the DoesOverflow call. So return early. |
270 | return; |
271 | } |
272 | |
273 | if (DoesOverflow(block, treeIndex)) |
274 | { |
275 | JITDUMP("Method determined to overflow.\n" ); |
276 | return; |
277 | } |
278 | |
279 | JITDUMP("Range value %s\n" , range.ToString(m_pCompiler->getAllocatorDebugOnly())); |
280 | m_pSearchPath->RemoveAll(); |
281 | Widen(block, treeIndex, &range); |
282 | |
283 | // If upper or lower limit is unknown, then return. |
284 | if (range.UpperLimit().IsUnknown() || range.LowerLimit().IsUnknown()) |
285 | { |
286 | return; |
287 | } |
288 | |
289 | // Is the range between the lower and upper bound values. |
290 | if (BetweenBounds(range, 0, bndsChk->gtArrLen)) |
291 | { |
292 | JITDUMP("[RangeCheck::OptimizeRangeCheck] Between bounds\n" ); |
293 | m_pCompiler->optRemoveRangeCheck(treeParent, stmt); |
294 | } |
295 | return; |
296 | } |
297 | |
298 | void RangeCheck::Widen(BasicBlock* block, GenTree* tree, Range* pRange) |
299 | { |
300 | #ifdef DEBUG |
301 | if (m_pCompiler->verbose) |
302 | { |
303 | printf("[RangeCheck::Widen] " FMT_BB ", \n" , block->bbNum); |
304 | Compiler::printTreeID(tree); |
305 | printf("\n" ); |
306 | } |
307 | #endif // DEBUG |
308 | |
309 | Range& range = *pRange; |
310 | |
311 | // Try to deduce the lower bound, if it is not known already. |
312 | if (range.LowerLimit().IsDependent() || range.LowerLimit().IsUnknown()) |
313 | { |
314 | // To determine the lower bound, ask if the loop increases monotonically. |
315 | bool increasing = IsMonotonicallyIncreasing(tree, false); |
316 | JITDUMP("IsMonotonicallyIncreasing %d" , increasing); |
317 | if (increasing) |
318 | { |
319 | GetRangeMap()->RemoveAll(); |
320 | *pRange = GetRange(block, tree, true DEBUGARG(0)); |
321 | } |
322 | } |
323 | } |
324 | |
325 | bool RangeCheck::IsBinOpMonotonicallyIncreasing(GenTreeOp* binop) |
326 | { |
327 | assert(binop->OperIs(GT_ADD)); |
328 | |
329 | GenTree* op1 = binop->gtGetOp1(); |
330 | GenTree* op2 = binop->gtGetOp2(); |
331 | |
332 | JITDUMP("[RangeCheck::IsBinOpMonotonicallyIncreasing] [%06d], [%06d]\n" , Compiler::dspTreeID(op1), |
333 | Compiler::dspTreeID(op2)); |
334 | // Check if we have a var + const. |
335 | if (op2->OperGet() == GT_LCL_VAR) |
336 | { |
337 | jitstd::swap(op1, op2); |
338 | } |
339 | if (op1->OperGet() != GT_LCL_VAR) |
340 | { |
341 | JITDUMP("Not monotonic because op1 is not lclVar.\n" ); |
342 | return false; |
343 | } |
344 | switch (op2->OperGet()) |
345 | { |
346 | case GT_LCL_VAR: |
347 | // When adding two local variables, we also must ensure that any constant is non-negative. |
348 | return IsMonotonicallyIncreasing(op1, true) && IsMonotonicallyIncreasing(op2, true); |
349 | |
350 | case GT_CNS_INT: |
351 | return (op2->AsIntConCommon()->IconValue() >= 0) && IsMonotonicallyIncreasing(op1, false); |
352 | |
353 | default: |
354 | JITDUMP("Not monotonic because expression is not recognized.\n" ); |
355 | return false; |
356 | } |
357 | } |
358 | |
359 | // The parameter rejectNegativeConst is true when we are adding two local vars (see above) |
360 | bool RangeCheck::IsMonotonicallyIncreasing(GenTree* expr, bool rejectNegativeConst) |
361 | { |
362 | JITDUMP("[RangeCheck::IsMonotonicallyIncreasing] [%06d]\n" , Compiler::dspTreeID(expr)); |
363 | |
364 | // Add hashtable entry for expr. |
365 | bool alreadyPresent = m_pSearchPath->Set(expr, nullptr); |
366 | if (alreadyPresent) |
367 | { |
368 | return true; |
369 | } |
370 | |
371 | // Remove hashtable entry for expr when we exit the present scope. |
372 | auto code = [this, expr] { m_pSearchPath->Remove(expr); }; |
373 | jitstd::utility::scoped_code<decltype(code)> finally(code); |
374 | |
375 | if (m_pSearchPath->GetCount() > MAX_SEARCH_DEPTH) |
376 | { |
377 | return false; |
378 | } |
379 | |
380 | // If expr is constant, then it is not part of the dependency |
381 | // loop which has to increase monotonically. |
382 | ValueNum vn = expr->gtVNPair.GetConservative(); |
383 | if (m_pCompiler->vnStore->IsVNInt32Constant(vn)) |
384 | { |
385 | if (rejectNegativeConst) |
386 | { |
387 | int cons = m_pCompiler->vnStore->ConstantValue<int>(vn); |
388 | return (cons >= 0); |
389 | } |
390 | else |
391 | { |
392 | return true; |
393 | } |
394 | } |
395 | // If the rhs expr is local, then try to find the def of the local. |
396 | else if (expr->IsLocal()) |
397 | { |
398 | BasicBlock* asgBlock; |
399 | GenTreeOp* asg = GetSsaDefAsg(expr->AsLclVarCommon(), &asgBlock); |
400 | return (asg != nullptr) && IsMonotonicallyIncreasing(asg->gtGetOp2(), rejectNegativeConst); |
401 | } |
402 | else if (expr->OperGet() == GT_ADD) |
403 | { |
404 | return IsBinOpMonotonicallyIncreasing(expr->AsOp()); |
405 | } |
406 | else if (expr->OperGet() == GT_PHI) |
407 | { |
408 | for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest()) |
409 | { |
410 | // If the arg is already in the path, skip. |
411 | if (m_pSearchPath->Lookup(args->Current())) |
412 | { |
413 | continue; |
414 | } |
415 | if (!IsMonotonicallyIncreasing(args->Current(), rejectNegativeConst)) |
416 | { |
417 | JITDUMP("Phi argument not monotonic\n" ); |
418 | return false; |
419 | } |
420 | } |
421 | return true; |
422 | } |
423 | JITDUMP("Unknown tree type\n" ); |
424 | return false; |
425 | } |
426 | |
427 | // Given a lclvar use, try to find the lclvar's defining assignment and its containing block. |
428 | GenTreeOp* RangeCheck::GetSsaDefAsg(GenTreeLclVarCommon* lclUse, BasicBlock** asgBlock) |
429 | { |
430 | unsigned ssaNum = lclUse->GetSsaNum(); |
431 | |
432 | if (ssaNum == SsaConfig::RESERVED_SSA_NUM) |
433 | { |
434 | return nullptr; |
435 | } |
436 | |
437 | LclSsaVarDsc* ssaData = m_pCompiler->lvaTable[lclUse->GetLclNum()].GetPerSsaData(ssaNum); |
438 | GenTree* lclDef = ssaData->m_defLoc.m_tree; |
439 | |
440 | if (lclDef == nullptr) |
441 | { |
442 | return nullptr; |
443 | } |
444 | |
445 | // We have the def node but we also need the assignment node to get its source. |
446 | // gtGetParent can be used to get the assignment node but it's rather expensive |
447 | // and not strictly necessary here, there shouldn't be any other node between |
448 | // the assignment node and its destination node. |
449 | GenTree* asg = lclDef->gtNext; |
450 | |
451 | if (!asg->OperIs(GT_ASG) || (asg->gtGetOp1() != lclDef)) |
452 | { |
453 | return nullptr; |
454 | } |
455 | |
456 | #ifdef DEBUG |
457 | Location* loc = GetDef(lclUse); |
458 | assert(loc->parent == asg); |
459 | assert(loc->block == ssaData->m_defLoc.m_blk); |
460 | #endif |
461 | |
462 | *asgBlock = ssaData->m_defLoc.m_blk; |
463 | return asg->AsOp(); |
464 | } |
465 | |
466 | #ifdef DEBUG |
467 | UINT64 RangeCheck::HashCode(unsigned lclNum, unsigned ssaNum) |
468 | { |
469 | assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); |
470 | return UINT64(lclNum) << 32 | ssaNum; |
471 | } |
472 | |
473 | // Get the def location of a given variable. |
474 | RangeCheck::Location* RangeCheck::GetDef(unsigned lclNum, unsigned ssaNum) |
475 | { |
476 | Location* loc = nullptr; |
477 | if (ssaNum == SsaConfig::RESERVED_SSA_NUM) |
478 | { |
479 | return nullptr; |
480 | } |
481 | if (!m_fMappedDefs) |
482 | { |
483 | MapMethodDefs(); |
484 | } |
485 | // No defs. |
486 | if (m_pDefTable == nullptr) |
487 | { |
488 | return nullptr; |
489 | } |
490 | m_pDefTable->Lookup(HashCode(lclNum, ssaNum), &loc); |
491 | return loc; |
492 | } |
493 | |
494 | RangeCheck::Location* RangeCheck::GetDef(GenTreeLclVarCommon* lcl) |
495 | { |
496 | return GetDef(lcl->GetLclNum(), lcl->GetSsaNum()); |
497 | } |
498 | |
499 | // Add the def location to the hash table. |
500 | void RangeCheck::SetDef(UINT64 hash, Location* loc) |
501 | { |
502 | if (m_pDefTable == nullptr) |
503 | { |
504 | m_pDefTable = new (m_alloc) VarToLocMap(m_alloc); |
505 | } |
506 | #ifdef DEBUG |
507 | Location* loc2; |
508 | if (m_pDefTable->Lookup(hash, &loc2)) |
509 | { |
510 | JITDUMP("Already have " FMT_BB ", [%06d], [%06d] for hash => %0I64X" , loc2->block->bbNum, |
511 | Compiler::dspTreeID(loc2->stmt), Compiler::dspTreeID(loc2->tree), hash); |
512 | assert(false); |
513 | } |
514 | #endif |
515 | m_pDefTable->Set(hash, loc); |
516 | } |
517 | #endif |
518 | |
519 | // Merge assertions on the edge flowing into the block about a variable. |
520 | void RangeCheck::MergeEdgeAssertions(GenTreeLclVarCommon* lcl, ASSERT_VALARG_TP assertions, Range* pRange) |
521 | { |
522 | if (BitVecOps::IsEmpty(m_pCompiler->apTraits, assertions)) |
523 | { |
524 | return; |
525 | } |
526 | |
527 | if (lcl->gtSsaNum == SsaConfig::RESERVED_SSA_NUM) |
528 | { |
529 | return; |
530 | } |
531 | // Walk through the "assertions" to check if the apply. |
532 | BitVecOps::Iter iter(m_pCompiler->apTraits, assertions); |
533 | unsigned index = 0; |
534 | while (iter.NextElem(&index)) |
535 | { |
536 | AssertionIndex assertionIndex = GetAssertionIndex(index); |
537 | |
538 | Compiler::AssertionDsc* curAssertion = m_pCompiler->optGetAssertion(assertionIndex); |
539 | |
540 | Limit limit(Limit::keUndef); |
541 | genTreeOps cmpOper = GT_NONE; |
542 | |
543 | LclSsaVarDsc* ssaData = m_pCompiler->lvaTable[lcl->gtLclNum].GetPerSsaData(lcl->gtSsaNum); |
544 | ValueNum normalLclVN = m_pCompiler->vnStore->VNConservativeNormalValue(ssaData->m_vnPair); |
545 | |
546 | // Current assertion is of the form (i < len - cns) != 0 |
547 | if (curAssertion->IsCheckedBoundArithBound()) |
548 | { |
549 | ValueNumStore::CompareCheckedBoundArithInfo info; |
550 | |
551 | // Get i, len, cns and < as "info." |
552 | m_pCompiler->vnStore->GetCompareCheckedBoundArithInfo(curAssertion->op1.vn, &info); |
553 | |
554 | // If we don't have the same variable we are comparing against, bail. |
555 | if (normalLclVN != info.cmpOp) |
556 | { |
557 | continue; |
558 | } |
559 | |
560 | if ((info.arrOper != GT_ADD) && (info.arrOper != GT_SUB)) |
561 | { |
562 | continue; |
563 | } |
564 | |
565 | // If the operand that operates on the bound is not constant, then done. |
566 | if (!m_pCompiler->vnStore->IsVNInt32Constant(info.arrOp)) |
567 | { |
568 | continue; |
569 | } |
570 | |
571 | int cons = m_pCompiler->vnStore->ConstantValue<int>(info.arrOp); |
572 | limit = Limit(Limit::keBinOpArray, info.vnBound, info.arrOper == GT_SUB ? -cons : cons); |
573 | cmpOper = (genTreeOps)info.cmpOper; |
574 | } |
575 | // Current assertion is of the form (i < len) != 0 |
576 | else if (curAssertion->IsCheckedBoundBound()) |
577 | { |
578 | ValueNumStore::CompareCheckedBoundArithInfo info; |
579 | |
580 | // Get the info as "i", "<" and "len" |
581 | m_pCompiler->vnStore->GetCompareCheckedBound(curAssertion->op1.vn, &info); |
582 | |
583 | // If we don't have the same variable we are comparing against, bail. |
584 | if (normalLclVN != info.cmpOp) |
585 | { |
586 | continue; |
587 | } |
588 | |
589 | limit = Limit(Limit::keBinOpArray, info.vnBound, 0); |
590 | cmpOper = (genTreeOps)info.cmpOper; |
591 | } |
592 | // Current assertion is of the form (i < 100) != 0 |
593 | else if (curAssertion->IsConstantBound()) |
594 | { |
595 | ValueNumStore::ConstantBoundInfo info; |
596 | |
597 | // Get the info as "i", "<" and "100" |
598 | m_pCompiler->vnStore->GetConstantBoundInfo(curAssertion->op1.vn, &info); |
599 | |
600 | // If we don't have the same variable we are comparing against, bail. |
601 | if (normalLclVN != info.cmpOpVN) |
602 | { |
603 | continue; |
604 | } |
605 | |
606 | limit = Limit(Limit::keConstant, info.constVal); |
607 | cmpOper = (genTreeOps)info.cmpOper; |
608 | } |
609 | // Current assertion is not supported, ignore it |
610 | else |
611 | { |
612 | continue; |
613 | } |
614 | |
615 | assert(limit.IsBinOpArray() || limit.IsConstant()); |
616 | |
617 | // Make sure the assertion is of the form != 0 or == 0. |
618 | if (curAssertion->op2.vn != m_pCompiler->vnStore->VNZeroForType(TYP_INT)) |
619 | { |
620 | continue; |
621 | } |
622 | #ifdef DEBUG |
623 | if (m_pCompiler->verbose) |
624 | { |
625 | m_pCompiler->optPrintAssertion(curAssertion, assertionIndex); |
626 | } |
627 | #endif |
628 | |
629 | ValueNum arrLenVN = m_pCompiler->vnStore->VNConservativeNormalValue(m_pCurBndsChk->gtArrLen->gtVNPair); |
630 | |
631 | if (m_pCompiler->vnStore->IsVNConstant(arrLenVN)) |
632 | { |
633 | // Set arrLenVN to NoVN; this will make it match the "vn" recorded on |
634 | // constant limits (where we explicitly track the constant and don't |
635 | // redundantly store its VN in the "vn" field). |
636 | arrLenVN = ValueNumStore::NoVN; |
637 | } |
638 | |
639 | // During assertion prop we add assertions of the form: |
640 | // |
641 | // (i < length) == 0 |
642 | // (i < length) != 0 |
643 | // (i < 100) == 0 |
644 | // (i < 100) != 0 |
645 | // |
646 | // At this point, we have detected that op1.vn is (i < length) or (i < length + cns) or |
647 | // (i < 100) and the op2.vn is 0. |
648 | // |
649 | // Now, let us check if we are == 0 (i.e., op1 assertion is false) or != 0 (op1 assertion |
650 | // is true.), |
651 | // |
652 | // If we have an assertion of the form == 0 (i.e., equals false), then reverse relop. |
653 | // The relop has to be reversed because we have: (i < length) is false which is the same |
654 | // as (i >= length). |
655 | if (curAssertion->assertionKind == Compiler::OAK_EQUAL) |
656 | { |
657 | cmpOper = GenTree::ReverseRelop(cmpOper); |
658 | } |
659 | |
660 | // Bounds are inclusive, so add -1 for upper bound when "<". But make sure we won't overflow. |
661 | if (cmpOper == GT_LT && !limit.AddConstant(-1)) |
662 | { |
663 | continue; |
664 | } |
665 | // Bounds are inclusive, so add +1 for lower bound when ">". But make sure we won't overflow. |
666 | if (cmpOper == GT_GT && !limit.AddConstant(1)) |
667 | { |
668 | continue; |
669 | } |
670 | |
671 | // Doesn't tighten the current bound. So skip. |
672 | if (pRange->uLimit.IsConstant() && limit.vn != arrLenVN) |
673 | { |
674 | continue; |
675 | } |
676 | |
677 | // Check if the incoming limit from assertions tightens the existing upper limit. |
678 | if (pRange->uLimit.IsBinOpArray() && (pRange->uLimit.vn == arrLenVN)) |
679 | { |
680 | // We have checked the current range's (pRange's) upper limit is either of the form: |
681 | // length + cns |
682 | // and length == the bndsChkCandidate's arrLen |
683 | // |
684 | // We want to check if the incoming limit tightens the bound, and for that |
685 | // we need to make sure that incoming limit is also on the same length (or |
686 | // length + cns) and not some other length. |
687 | |
688 | if (limit.vn != arrLenVN) |
689 | { |
690 | JITDUMP("Array length VN did not match arrLen=" FMT_VN ", limit=" FMT_VN "\n" , arrLenVN, limit.vn); |
691 | continue; |
692 | } |
693 | |
694 | int curCns = pRange->uLimit.cns; |
695 | int limCns = (limit.IsBinOpArray()) ? limit.cns : 0; |
696 | |
697 | // Incoming limit doesn't tighten the existing upper limit. |
698 | if (limCns >= curCns) |
699 | { |
700 | JITDUMP("Bound limit %d doesn't tighten current bound %d\n" , limCns, curCns); |
701 | continue; |
702 | } |
703 | } |
704 | else |
705 | { |
706 | // Current range's upper bound is not "length + cns" and the |
707 | // incoming limit is not on the same length as the bounds check candidate. |
708 | // So we could skip this assertion. But in cases, of Dependent or Unknown |
709 | // type of upper limit, the incoming assertion still tightens the upper |
710 | // bound to a saner value. So do not skip the assertion. |
711 | } |
712 | |
713 | // cmpOp (loop index i) cmpOper len +/- cns |
714 | switch (cmpOper) |
715 | { |
716 | case GT_LT: |
717 | pRange->uLimit = limit; |
718 | break; |
719 | |
720 | case GT_GT: |
721 | pRange->lLimit = limit; |
722 | break; |
723 | |
724 | case GT_GE: |
725 | pRange->lLimit = limit; |
726 | break; |
727 | |
728 | case GT_LE: |
729 | pRange->uLimit = limit; |
730 | break; |
731 | |
732 | default: |
733 | // All other 'cmpOper' kinds leave lLimit/uLimit unchanged |
734 | break; |
735 | } |
736 | JITDUMP("The range after edge merging:" ); |
737 | JITDUMP(pRange->ToString(m_pCompiler->getAllocatorDebugOnly())); |
738 | JITDUMP("\n" ); |
739 | } |
740 | } |
741 | |
742 | // Merge assertions from the pred edges of the block, i.e., check for any assertions about "op's" value numbers for phi |
743 | // arguments. If not a phi argument, check if we assertions about local variables. |
744 | void RangeCheck::MergeAssertion(BasicBlock* block, GenTree* op, Range* pRange DEBUGARG(int indent)) |
745 | { |
746 | JITDUMP("Merging assertions from pred edges of " FMT_BB " for op [%06d] " FMT_VN "\n" , block->bbNum, |
747 | Compiler::dspTreeID(op), m_pCompiler->vnStore->VNConservativeNormalValue(op->gtVNPair)); |
748 | ASSERT_TP assertions = BitVecOps::UninitVal(); |
749 | |
750 | // If we have a phi arg, we can get to the block from it and use its assertion out. |
751 | if (op->gtOper == GT_PHI_ARG) |
752 | { |
753 | GenTreePhiArg* arg = (GenTreePhiArg*)op; |
754 | BasicBlock* pred = arg->gtPredBB; |
755 | if (pred->bbFallsThrough() && pred->bbNext == block) |
756 | { |
757 | assertions = pred->bbAssertionOut; |
758 | JITDUMP("Merge assertions from pred " FMT_BB " edge: %s\n" , pred->bbNum, |
759 | BitVecOps::ToString(m_pCompiler->apTraits, assertions)); |
760 | } |
761 | else if ((pred->bbJumpKind == BBJ_COND || pred->bbJumpKind == BBJ_ALWAYS) && pred->bbJumpDest == block) |
762 | { |
763 | if (m_pCompiler->bbJtrueAssertionOut != nullptr) |
764 | { |
765 | assertions = m_pCompiler->bbJtrueAssertionOut[pred->bbNum]; |
766 | JITDUMP("Merge assertions from pred " FMT_BB " JTrue edge: %s\n" , pred->bbNum, |
767 | BitVecOps::ToString(m_pCompiler->apTraits, assertions)); |
768 | } |
769 | } |
770 | } |
771 | // Get assertions from bbAssertionIn. |
772 | else if (op->IsLocal()) |
773 | { |
774 | assertions = block->bbAssertionIn; |
775 | } |
776 | |
777 | if (!BitVecOps::MayBeUninit(assertions)) |
778 | { |
779 | // Perform the merge step to fine tune the range value. |
780 | MergeEdgeAssertions(op->AsLclVarCommon(), assertions, pRange); |
781 | } |
782 | } |
783 | |
784 | // Compute the range for a binary operation. |
785 | Range RangeCheck::ComputeRangeForBinOp(BasicBlock* block, GenTreeOp* binop, bool monotonic DEBUGARG(int indent)) |
786 | { |
787 | assert(binop->OperIs(GT_ADD)); |
788 | |
789 | GenTree* op1 = binop->gtGetOp1(); |
790 | GenTree* op2 = binop->gtGetOp2(); |
791 | |
792 | Range* op1RangeCached = nullptr; |
793 | Range op1Range = Limit(Limit::keUndef); |
794 | // Check if the range value is already cached. |
795 | if (!GetRangeMap()->Lookup(op1, &op1RangeCached)) |
796 | { |
797 | // If we already have the op in the path, then, just rely on assertions, else |
798 | // find the range. |
799 | if (m_pSearchPath->Lookup(op1)) |
800 | { |
801 | op1Range = Range(Limit(Limit::keDependent)); |
802 | } |
803 | else |
804 | { |
805 | op1Range = GetRange(block, op1, monotonic DEBUGARG(indent)); |
806 | } |
807 | MergeAssertion(block, op1, &op1Range DEBUGARG(indent + 1)); |
808 | } |
809 | else |
810 | { |
811 | op1Range = *op1RangeCached; |
812 | } |
813 | |
814 | Range* op2RangeCached; |
815 | Range op2Range = Limit(Limit::keUndef); |
816 | // Check if the range value is already cached. |
817 | if (!GetRangeMap()->Lookup(op2, &op2RangeCached)) |
818 | { |
819 | // If we already have the op in the path, then, just rely on assertions, else |
820 | // find the range. |
821 | if (m_pSearchPath->Lookup(op2)) |
822 | { |
823 | op2Range = Range(Limit(Limit::keDependent)); |
824 | } |
825 | else |
826 | { |
827 | op2Range = GetRange(block, op2, monotonic DEBUGARG(indent)); |
828 | } |
829 | MergeAssertion(block, op2, &op2Range DEBUGARG(indent + 1)); |
830 | } |
831 | else |
832 | { |
833 | op2Range = *op2RangeCached; |
834 | } |
835 | |
836 | Range r = RangeOps::Add(op1Range, op2Range); |
837 | JITDUMP("BinOp add ranges %s %s = %s\n" , op1Range.ToString(m_pCompiler->getAllocatorDebugOnly()), |
838 | op2Range.ToString(m_pCompiler->getAllocatorDebugOnly()), r.ToString(m_pCompiler->getAllocatorDebugOnly())); |
839 | return r; |
840 | } |
841 | |
842 | // Compute the range for a local var definition. |
843 | Range RangeCheck::ComputeRangeForLocalDef(BasicBlock* block, |
844 | GenTreeLclVarCommon* lcl, |
845 | bool monotonic DEBUGARG(int indent)) |
846 | { |
847 | BasicBlock* asgBlock; |
848 | GenTreeOp* asg = GetSsaDefAsg(lcl, &asgBlock); |
849 | if (asg == nullptr) |
850 | { |
851 | return Range(Limit(Limit::keUnknown)); |
852 | } |
853 | #ifdef DEBUG |
854 | if (m_pCompiler->verbose) |
855 | { |
856 | JITDUMP("----------------------------------------------------\n" ); |
857 | m_pCompiler->gtDispTree(asg); |
858 | JITDUMP("----------------------------------------------------\n" ); |
859 | } |
860 | #endif |
861 | assert(asg->OperIs(GT_ASG)); |
862 | Range range = GetRange(asgBlock, asg->gtGetOp2(), monotonic DEBUGARG(indent)); |
863 | if (!BitVecOps::MayBeUninit(block->bbAssertionIn)) |
864 | { |
865 | JITDUMP("Merge assertions from " FMT_BB ":%s for assignment about [%06d]\n" , block->bbNum, |
866 | BitVecOps::ToString(m_pCompiler->apTraits, block->bbAssertionIn), Compiler::dspTreeID(asg->gtGetOp1())); |
867 | MergeEdgeAssertions(asg->gtGetOp1()->AsLclVarCommon(), block->bbAssertionIn, &range); |
868 | JITDUMP("done merging\n" ); |
869 | } |
870 | return range; |
871 | } |
872 | |
873 | // https://msdn.microsoft.com/en-us/windows/apps/hh285054.aspx |
874 | // CLR throws IDS_EE_ARRAY_DIMENSIONS_EXCEEDED if array length is > INT_MAX. |
875 | // new byte[INT_MAX]; still throws OutOfMemoryException on my system with 32 GB RAM. |
876 | // I believe practical limits are still smaller than this number. |
877 | #define ARRLEN_MAX (0x7FFFFFFF) |
878 | |
879 | // Get the limit's maximum possible value, treating array length to be ARRLEN_MAX. |
880 | bool RangeCheck::GetLimitMax(Limit& limit, int* pMax) |
881 | { |
882 | int& max1 = *pMax; |
883 | switch (limit.type) |
884 | { |
885 | case Limit::keConstant: |
886 | max1 = limit.GetConstant(); |
887 | break; |
888 | |
889 | case Limit::keBinOpArray: |
890 | { |
891 | int tmp = GetArrLength(limit.vn); |
892 | if (tmp <= 0) |
893 | { |
894 | tmp = ARRLEN_MAX; |
895 | } |
896 | if (IntAddOverflows(tmp, limit.GetConstant())) |
897 | { |
898 | return false; |
899 | } |
900 | max1 = tmp + limit.GetConstant(); |
901 | } |
902 | break; |
903 | |
904 | default: |
905 | return false; |
906 | } |
907 | return true; |
908 | } |
909 | |
910 | // Check if the arithmetic overflows. |
911 | bool RangeCheck::AddOverflows(Limit& limit1, Limit& limit2) |
912 | { |
913 | int max1; |
914 | if (!GetLimitMax(limit1, &max1)) |
915 | { |
916 | return true; |
917 | } |
918 | |
919 | int max2; |
920 | if (!GetLimitMax(limit2, &max2)) |
921 | { |
922 | return true; |
923 | } |
924 | |
925 | return IntAddOverflows(max1, max2); |
926 | } |
927 | |
928 | // Does the bin operation overflow. |
929 | bool RangeCheck::DoesBinOpOverflow(BasicBlock* block, GenTreeOp* binop) |
930 | { |
931 | GenTree* op1 = binop->gtGetOp1(); |
932 | GenTree* op2 = binop->gtGetOp2(); |
933 | |
934 | if (!m_pSearchPath->Lookup(op1) && DoesOverflow(block, op1)) |
935 | { |
936 | return true; |
937 | } |
938 | |
939 | if (!m_pSearchPath->Lookup(op2) && DoesOverflow(block, op2)) |
940 | { |
941 | return true; |
942 | } |
943 | |
944 | // Get the cached ranges of op1 |
945 | Range* op1Range = nullptr; |
946 | if (!GetRangeMap()->Lookup(op1, &op1Range)) |
947 | { |
948 | return true; |
949 | } |
950 | // Get the cached ranges of op2 |
951 | Range* op2Range = nullptr; |
952 | if (!GetRangeMap()->Lookup(op2, &op2Range)) |
953 | { |
954 | return true; |
955 | } |
956 | |
957 | // If dependent, check if we can use some assertions. |
958 | if (op1Range->UpperLimit().IsDependent()) |
959 | { |
960 | MergeAssertion(block, op1, op1Range DEBUGARG(0)); |
961 | } |
962 | |
963 | // If dependent, check if we can use some assertions. |
964 | if (op2Range->UpperLimit().IsDependent()) |
965 | { |
966 | MergeAssertion(block, op2, op2Range DEBUGARG(0)); |
967 | } |
968 | |
969 | JITDUMP("Checking bin op overflow %s %s\n" , op1Range->ToString(m_pCompiler->getAllocatorDebugOnly()), |
970 | op2Range->ToString(m_pCompiler->getAllocatorDebugOnly())); |
971 | |
972 | if (!AddOverflows(op1Range->UpperLimit(), op2Range->UpperLimit())) |
973 | { |
974 | return false; |
975 | } |
976 | return true; |
977 | } |
978 | |
979 | // Check if the var definition the rhs involves arithmetic that overflows. |
980 | bool RangeCheck::DoesVarDefOverflow(GenTreeLclVarCommon* lcl) |
981 | { |
982 | BasicBlock* asgBlock; |
983 | GenTreeOp* asg = GetSsaDefAsg(lcl, &asgBlock); |
984 | return (asg == nullptr) || DoesOverflow(asgBlock, asg->gtGetOp2()); |
985 | } |
986 | |
987 | bool RangeCheck::DoesPhiOverflow(BasicBlock* block, GenTree* expr) |
988 | { |
989 | for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest()) |
990 | { |
991 | GenTree* arg = args->Current(); |
992 | if (m_pSearchPath->Lookup(arg)) |
993 | { |
994 | continue; |
995 | } |
996 | if (DoesOverflow(block, arg)) |
997 | { |
998 | return true; |
999 | } |
1000 | } |
1001 | return false; |
1002 | } |
1003 | |
1004 | bool RangeCheck::DoesOverflow(BasicBlock* block, GenTree* expr) |
1005 | { |
1006 | bool overflows = false; |
1007 | if (!GetOverflowMap()->Lookup(expr, &overflows)) |
1008 | { |
1009 | overflows = ComputeDoesOverflow(block, expr); |
1010 | } |
1011 | return overflows; |
1012 | } |
1013 | |
1014 | bool RangeCheck::ComputeDoesOverflow(BasicBlock* block, GenTree* expr) |
1015 | { |
1016 | JITDUMP("Does overflow [%06d]?\n" , Compiler::dspTreeID(expr)); |
1017 | m_pSearchPath->Set(expr, block); |
1018 | |
1019 | bool overflows = true; |
1020 | |
1021 | if (m_pSearchPath->GetCount() > MAX_SEARCH_DEPTH) |
1022 | { |
1023 | overflows = true; |
1024 | } |
1025 | // If the definition chain resolves to a constant, it doesn't overflow. |
1026 | else if (m_pCompiler->vnStore->IsVNConstant(expr->gtVNPair.GetConservative())) |
1027 | { |
1028 | overflows = false; |
1029 | } |
1030 | // Check if the var def has rhs involving arithmetic that overflows. |
1031 | else if (expr->IsLocal()) |
1032 | { |
1033 | overflows = DoesVarDefOverflow(expr->AsLclVarCommon()); |
1034 | } |
1035 | // Check if add overflows. |
1036 | else if (expr->OperGet() == GT_ADD) |
1037 | { |
1038 | overflows = DoesBinOpOverflow(block, expr->AsOp()); |
1039 | } |
1040 | // Walk through phi arguments to check if phi arguments involve arithmetic that overflows. |
1041 | else if (expr->OperGet() == GT_PHI) |
1042 | { |
1043 | overflows = DoesPhiOverflow(block, expr); |
1044 | } |
1045 | GetOverflowMap()->Set(expr, overflows); |
1046 | m_pSearchPath->Remove(expr); |
1047 | return overflows; |
1048 | } |
1049 | |
1050 | // Compute the range recursively by asking for the range of each variable in the dependency chain. |
1051 | // eg.: c = a + b; ask range of "a" and "b" and add the results. |
1052 | // If the result cannot be determined i.e., the dependency chain does not terminate in a value, |
1053 | // but continues to loop, which will happen with phi nodes. We end the looping by calling the |
1054 | // value as "dependent" (dep). |
1055 | // If the loop is proven to be "monotonic", then make liberal decisions while merging phi node. |
1056 | // eg.: merge((0, dep), (dep, dep)) = (0, dep) |
1057 | Range RangeCheck::ComputeRange(BasicBlock* block, GenTree* expr, bool monotonic DEBUGARG(int indent)) |
1058 | { |
1059 | bool newlyAdded = !m_pSearchPath->Set(expr, block); |
1060 | Range range = Limit(Limit::keUndef); |
1061 | |
1062 | ValueNum vn = m_pCompiler->vnStore->VNConservativeNormalValue(expr->gtVNPair); |
1063 | // If newly added in the current search path, then reduce the budget. |
1064 | if (newlyAdded) |
1065 | { |
1066 | // Assert that we are not re-entrant for a node which has been |
1067 | // visited and resolved before and not currently on the search path. |
1068 | noway_assert(!GetRangeMap()->Lookup(expr)); |
1069 | m_nVisitBudget--; |
1070 | } |
1071 | // Prevent quadratic behavior. |
1072 | if (IsOverBudget()) |
1073 | { |
1074 | // Set to unknown, since an Unknown range resolution, will stop further |
1075 | // searches. This is because anything that merges with Unknown will |
1076 | // yield Unknown. Unknown is lattice top. |
1077 | range = Range(Limit(Limit::keUnknown)); |
1078 | JITDUMP("GetRange not tractable within max node visit budget.\n" ); |
1079 | } |
1080 | // Prevent unbounded recursion. |
1081 | else if (m_pSearchPath->GetCount() > MAX_SEARCH_DEPTH) |
1082 | { |
1083 | // Unknown is lattice top, anything that merges with Unknown will yield Unknown. |
1084 | range = Range(Limit(Limit::keUnknown)); |
1085 | JITDUMP("GetRange not tractable within max stack depth.\n" ); |
1086 | } |
1087 | // TODO-CQ: The current implementation is reliant on integer storage types |
1088 | // for constants. It could use INT64. Still, representing ULONG constants |
1089 | // might require preserving the var_type whether it is a un/signed 64-bit. |
1090 | // JIT64 doesn't do anything for "long" either. No asm diffs. |
1091 | else if (expr->TypeGet() == TYP_LONG || expr->TypeGet() == TYP_ULONG) |
1092 | { |
1093 | range = Range(Limit(Limit::keUnknown)); |
1094 | JITDUMP("GetRange long or ulong, setting to unknown value.\n" ); |
1095 | } |
1096 | // If VN is constant return range as constant. |
1097 | else if (m_pCompiler->vnStore->IsVNConstant(vn)) |
1098 | { |
1099 | range = (m_pCompiler->vnStore->TypeOfVN(vn) == TYP_INT) |
1100 | ? Range(Limit(Limit::keConstant, m_pCompiler->vnStore->ConstantValue<int>(vn))) |
1101 | : Limit(Limit::keUnknown); |
1102 | } |
1103 | // If local, find the definition from the def map and evaluate the range for rhs. |
1104 | else if (expr->IsLocal()) |
1105 | { |
1106 | range = ComputeRangeForLocalDef(block, expr->AsLclVarCommon(), monotonic DEBUGARG(indent + 1)); |
1107 | MergeAssertion(block, expr, &range DEBUGARG(indent + 1)); |
1108 | } |
1109 | // If add, then compute the range for the operands and add them. |
1110 | else if (expr->OperGet() == GT_ADD) |
1111 | { |
1112 | range = ComputeRangeForBinOp(block, expr->AsOp(), monotonic DEBUGARG(indent + 1)); |
1113 | } |
1114 | // If phi, then compute the range for arguments, calling the result "dependent" when looping begins. |
1115 | else if (expr->OperGet() == GT_PHI) |
1116 | { |
1117 | for (GenTreeArgList* args = expr->gtOp.gtOp1->AsArgList(); args != nullptr; args = args->Rest()) |
1118 | { |
1119 | Range argRange = Range(Limit(Limit::keUndef)); |
1120 | if (m_pSearchPath->Lookup(args->Current())) |
1121 | { |
1122 | JITDUMP("PhiArg [%06d] is already being computed\n" , Compiler::dspTreeID(args->Current())); |
1123 | argRange = Range(Limit(Limit::keDependent)); |
1124 | } |
1125 | else |
1126 | { |
1127 | argRange = GetRange(block, args->Current(), monotonic DEBUGARG(indent + 1)); |
1128 | } |
1129 | assert(!argRange.LowerLimit().IsUndef()); |
1130 | assert(!argRange.UpperLimit().IsUndef()); |
1131 | MergeAssertion(block, args->Current(), &argRange DEBUGARG(indent + 1)); |
1132 | JITDUMP("Merging ranges %s %s:" , range.ToString(m_pCompiler->getAllocatorDebugOnly()), |
1133 | argRange.ToString(m_pCompiler->getAllocatorDebugOnly())); |
1134 | range = RangeOps::Merge(range, argRange, monotonic); |
1135 | JITDUMP("%s\n" , range.ToString(m_pCompiler->getAllocatorDebugOnly())); |
1136 | } |
1137 | } |
1138 | else |
1139 | { |
1140 | // The expression is not recognized, so the result is unknown. |
1141 | range = Range(Limit(Limit::keUnknown)); |
1142 | } |
1143 | |
1144 | GetRangeMap()->Set(expr, new (m_alloc) Range(range)); |
1145 | m_pSearchPath->Remove(expr); |
1146 | return range; |
1147 | } |
1148 | |
1149 | #ifdef DEBUG |
1150 | void Indent(int indent) |
1151 | { |
1152 | for (int i = 0; i < indent; ++i) |
1153 | { |
1154 | JITDUMP(" " ); |
1155 | } |
1156 | } |
1157 | #endif |
1158 | |
1159 | // Get the range, if it is already computed, use the cached range value, else compute it. |
1160 | Range RangeCheck::GetRange(BasicBlock* block, GenTree* expr, bool monotonic DEBUGARG(int indent)) |
1161 | { |
1162 | #ifdef DEBUG |
1163 | if (m_pCompiler->verbose) |
1164 | { |
1165 | Indent(indent); |
1166 | JITDUMP("[RangeCheck::GetRange] " FMT_BB, block->bbNum); |
1167 | m_pCompiler->gtDispTree(expr); |
1168 | Indent(indent); |
1169 | JITDUMP("{\n" , expr); |
1170 | } |
1171 | #endif |
1172 | |
1173 | Range* pRange = nullptr; |
1174 | Range range = |
1175 | GetRangeMap()->Lookup(expr, &pRange) ? *pRange : ComputeRange(block, expr, monotonic DEBUGARG(indent)); |
1176 | |
1177 | #ifdef DEBUG |
1178 | if (m_pCompiler->verbose) |
1179 | { |
1180 | Indent(indent); |
1181 | JITDUMP(" %s Range [%06d] => %s\n" , (pRange == nullptr) ? "Computed" : "Cached" , Compiler::dspTreeID(expr), |
1182 | range.ToString(m_pCompiler->getAllocatorDebugOnly())); |
1183 | Indent(indent); |
1184 | JITDUMP("}\n" , expr); |
1185 | } |
1186 | #endif |
1187 | return range; |
1188 | } |
1189 | |
1190 | #ifdef DEBUG |
1191 | // If this is a tree local definition add its location to the def map. |
1192 | void RangeCheck::MapStmtDefs(const Location& loc) |
1193 | { |
1194 | GenTreeLclVarCommon* tree = loc.tree; |
1195 | |
1196 | unsigned lclNum = tree->GetLclNum(); |
1197 | unsigned ssaNum = tree->GetSsaNum(); |
1198 | if (ssaNum == SsaConfig::RESERVED_SSA_NUM) |
1199 | { |
1200 | return; |
1201 | } |
1202 | |
1203 | // If useasg then get the correct ssaNum to add to the map. |
1204 | if (tree->gtFlags & GTF_VAR_USEASG) |
1205 | { |
1206 | unsigned ssaNum = m_pCompiler->GetSsaNumForLocalVarDef(tree); |
1207 | if (ssaNum != SsaConfig::RESERVED_SSA_NUM) |
1208 | { |
1209 | // To avoid ind(addr) use asgs |
1210 | if (loc.parent->OperIs(GT_ASG)) |
1211 | { |
1212 | SetDef(HashCode(lclNum, ssaNum), new (m_alloc) Location(loc)); |
1213 | } |
1214 | } |
1215 | } |
1216 | // If def get the location and store it against the variable's ssaNum. |
1217 | else if (tree->gtFlags & GTF_VAR_DEF) |
1218 | { |
1219 | if (loc.parent->OperGet() == GT_ASG) |
1220 | { |
1221 | SetDef(HashCode(lclNum, ssaNum), new (m_alloc) Location(loc)); |
1222 | } |
1223 | } |
1224 | } |
1225 | |
1226 | struct MapMethodDefsData |
1227 | { |
1228 | RangeCheck* rc; |
1229 | BasicBlock* block; |
1230 | GenTree* stmt; |
1231 | |
1232 | MapMethodDefsData(RangeCheck* rc, BasicBlock* block, GenTree* stmt) : rc(rc), block(block), stmt(stmt) |
1233 | { |
1234 | } |
1235 | }; |
1236 | |
1237 | Compiler::fgWalkResult MapMethodDefsVisitor(GenTree** ptr, Compiler::fgWalkData* data) |
1238 | { |
1239 | GenTree* tree = *ptr; |
1240 | MapMethodDefsData* rcd = ((MapMethodDefsData*)data->pCallbackData); |
1241 | |
1242 | if (tree->IsLocal()) |
1243 | { |
1244 | rcd->rc->MapStmtDefs(RangeCheck::Location(rcd->block, rcd->stmt, tree->AsLclVarCommon(), data->parent)); |
1245 | } |
1246 | |
1247 | return Compiler::WALK_CONTINUE; |
1248 | } |
1249 | |
1250 | void RangeCheck::MapMethodDefs() |
1251 | { |
1252 | // First, gather where all definitions occur in the program and store it in a map. |
1253 | for (BasicBlock* block = m_pCompiler->fgFirstBB; block; block = block->bbNext) |
1254 | { |
1255 | for (GenTree* stmt = block->bbTreeList; stmt; stmt = stmt->gtNext) |
1256 | { |
1257 | MapMethodDefsData data(this, block, stmt); |
1258 | m_pCompiler->fgWalkTreePre(&stmt->gtStmt.gtStmtExpr, MapMethodDefsVisitor, &data, false, true); |
1259 | } |
1260 | } |
1261 | m_fMappedDefs = true; |
1262 | } |
1263 | #endif |
1264 | |
1265 | // Entry point to range check optimizations. |
1266 | void RangeCheck::OptimizeRangeChecks() |
1267 | { |
1268 | if (m_pCompiler->fgSsaPassesCompleted == 0) |
1269 | { |
1270 | return; |
1271 | } |
1272 | #ifdef DEBUG |
1273 | if (m_pCompiler->verbose) |
1274 | { |
1275 | JITDUMP("*************** In OptimizeRangeChecks()\n" ); |
1276 | JITDUMP("Blocks/trees before phase\n" ); |
1277 | m_pCompiler->fgDispBasicBlocks(true); |
1278 | } |
1279 | #endif |
1280 | |
1281 | // Walk through trees looking for arrBndsChk node and check if it can be optimized. |
1282 | for (BasicBlock* block = m_pCompiler->fgFirstBB; block; block = block->bbNext) |
1283 | { |
1284 | for (GenTree* stmt = block->bbTreeList; stmt; stmt = stmt->gtNext) |
1285 | { |
1286 | for (GenTree* tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext) |
1287 | { |
1288 | if (IsOverBudget()) |
1289 | { |
1290 | return; |
1291 | } |
1292 | OptimizeRangeCheck(block, stmt, tree); |
1293 | } |
1294 | } |
1295 | } |
1296 | } |
1297 | |