1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
6 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
7 | XX XX |
8 | XX ValueNum XX |
9 | XX XX |
10 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
11 | XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX |
12 | */ |
13 | |
14 | #include "jitpch.h" |
15 | #ifdef _MSC_VER |
16 | #pragma hdrstop |
17 | #endif |
18 | |
19 | #include "valuenum.h" |
20 | #include "ssaconfig.h" |
21 | |
22 | // Windows x86 and Windows ARM/ARM64 may not define _isnanf() but they do define _isnan(). |
23 | // We will redirect the macros to these other functions if the macro is not defined for the |
24 | // platform. This has the side effect of a possible implicit upcasting for arguments passed. |
25 | #if (defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) && !defined(FEATURE_PAL) |
26 | |
27 | #if !defined(_isnanf) |
28 | #define _isnanf _isnan |
29 | #endif |
30 | |
31 | #endif // (defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) && !defined(FEATURE_PAL) |
32 | |
33 | // We need to use target-specific NaN values when statically compute expressions. |
34 | // Otherwise, cross crossgen (e.g. x86_arm) would have different binary outputs |
35 | // from native crossgen (i.e. arm_arm) when the NaN got "embedded" into code. |
36 | // |
37 | // For example, when placing NaN value in r3 register |
38 | // x86_arm crossgen would emit |
39 | // movw r3, 0x00 |
40 | // movt r3, 0xfff8 |
41 | // while arm_arm crossgen (and JIT) output is |
42 | // movw r3, 0x00 |
43 | // movt r3, 0x7ff8 |
44 | |
45 | struct FloatTraits |
46 | { |
47 | //------------------------------------------------------------------------ |
48 | // NaN: Return target-specific float NaN value |
49 | // |
50 | // Notes: |
51 | // "Default" NaN value returned by expression 0.0f / 0.0f on x86/x64 has |
52 | // different binary representation (0xffc00000) than NaN on |
53 | // ARM32/ARM64 (0x7fc00000). |
54 | |
55 | static float NaN() |
56 | { |
57 | #if defined(_TARGET_XARCH_) |
58 | unsigned bits = 0xFFC00000u; |
59 | #elif defined(_TARGET_ARMARCH_) |
60 | unsigned bits = 0x7FC00000u; |
61 | #else |
62 | #error Unsupported or unset target architecture |
63 | #endif |
64 | float result; |
65 | static_assert(sizeof(bits) == sizeof(result), "sizeof(unsigned) must equal sizeof(float)" ); |
66 | memcpy(&result, &bits, sizeof(result)); |
67 | return result; |
68 | } |
69 | }; |
70 | |
71 | struct DoubleTraits |
72 | { |
73 | //------------------------------------------------------------------------ |
74 | // NaN: Return target-specific double NaN value |
75 | // |
76 | // Notes: |
77 | // "Default" NaN value returned by expression 0.0 / 0.0 on x86/x64 has |
78 | // different binary representation (0xfff8000000000000) than NaN on |
79 | // ARM32/ARM64 (0x7ff8000000000000). |
80 | |
81 | static double NaN() |
82 | { |
83 | #if defined(_TARGET_XARCH_) |
84 | unsigned long long bits = 0xFFF8000000000000ull; |
85 | #elif defined(_TARGET_ARMARCH_) |
86 | unsigned long long bits = 0x7FF8000000000000ull; |
87 | #else |
88 | #error Unsupported or unset target architecture |
89 | #endif |
90 | double result; |
91 | static_assert(sizeof(bits) == sizeof(result), "sizeof(unsigned long long) must equal sizeof(double)" ); |
92 | memcpy(&result, &bits, sizeof(result)); |
93 | return result; |
94 | } |
95 | }; |
96 | |
97 | //------------------------------------------------------------------------ |
98 | // FpAdd: Computes value1 + value2 |
99 | // |
100 | // Return Value: |
101 | // TFpTraits::NaN() - If target ARM32/ARM64 and result value is NaN |
102 | // value1 + value2 - Otherwise |
103 | // |
104 | // Notes: |
105 | // See FloatTraits::NaN() and DoubleTraits::NaN() notes. |
106 | |
107 | template <typename TFp, typename TFpTraits> |
108 | TFp FpAdd(TFp value1, TFp value2) |
109 | { |
110 | #ifdef _TARGET_ARMARCH_ |
111 | // If [value1] is negative infinity and [value2] is positive infinity |
112 | // the result is NaN. |
113 | // If [value1] is positive infinity and [value2] is negative infinity |
114 | // the result is NaN. |
115 | |
116 | if (!_finite(value1) && !_finite(value2)) |
117 | { |
118 | if (value1 < 0 && value2 > 0) |
119 | { |
120 | return TFpTraits::NaN(); |
121 | } |
122 | |
123 | if (value1 > 0 && value2 < 0) |
124 | { |
125 | return TFpTraits::NaN(); |
126 | } |
127 | } |
128 | #endif // _TARGET_ARMARCH_ |
129 | |
130 | return value1 + value2; |
131 | } |
132 | |
133 | //------------------------------------------------------------------------ |
134 | // FpSub: Computes value1 - value2 |
135 | // |
136 | // Return Value: |
137 | // TFpTraits::NaN() - If target ARM32/ARM64 and result value is NaN |
138 | // value1 - value2 - Otherwise |
139 | // |
140 | // Notes: |
141 | // See FloatTraits::NaN() and DoubleTraits::NaN() notes. |
142 | |
143 | template <typename TFp, typename TFpTraits> |
144 | TFp FpSub(TFp value1, TFp value2) |
145 | { |
146 | #ifdef _TARGET_ARMARCH_ |
147 | // If [value1] is positive infinity and [value2] is positive infinity |
148 | // the result is NaN. |
149 | // If [value1] is negative infinity and [value2] is negative infinity |
150 | // the result is NaN. |
151 | |
152 | if (!_finite(value1) && !_finite(value2)) |
153 | { |
154 | if (value1 > 0 && value2 > 0) |
155 | { |
156 | return TFpTraits::NaN(); |
157 | } |
158 | |
159 | if (value1 < 0 && value2 < 0) |
160 | { |
161 | return TFpTraits::NaN(); |
162 | } |
163 | } |
164 | #endif // _TARGET_ARMARCH_ |
165 | |
166 | return value1 - value2; |
167 | } |
168 | |
169 | //------------------------------------------------------------------------ |
170 | // FpMul: Computes value1 * value2 |
171 | // |
172 | // Return Value: |
173 | // TFpTraits::NaN() - If target ARM32/ARM64 and result value is NaN |
174 | // value1 * value2 - Otherwise |
175 | // |
176 | // Notes: |
177 | // See FloatTraits::NaN() and DoubleTraits::NaN() notes. |
178 | |
179 | template <typename TFp, typename TFpTraits> |
180 | TFp FpMul(TFp value1, TFp value2) |
181 | { |
182 | #ifdef _TARGET_ARMARCH_ |
183 | // From the ECMA standard: |
184 | // |
185 | // If [value1] is zero and [value2] is infinity |
186 | // the result is NaN. |
187 | // If [value1] is infinity and [value2] is zero |
188 | // the result is NaN. |
189 | |
190 | if (value1 == 0 && !_finite(value2) && !_isnan(value2)) |
191 | { |
192 | return TFpTraits::NaN(); |
193 | } |
194 | if (!_finite(value1) && !_isnan(value1) && value2 == 0) |
195 | { |
196 | return TFpTraits::NaN(); |
197 | } |
198 | #endif // _TARGET_ARMARCH_ |
199 | |
200 | return value1 * value2; |
201 | } |
202 | |
203 | //------------------------------------------------------------------------ |
204 | // FpDiv: Computes value1 / value2 |
205 | // |
206 | // Return Value: |
207 | // TFpTraits::NaN() - If target ARM32/ARM64 and result value is NaN |
208 | // value1 / value2 - Otherwise |
209 | // |
210 | // Notes: |
211 | // See FloatTraits::NaN() and DoubleTraits::NaN() notes. |
212 | |
213 | template <typename TFp, typename TFpTraits> |
214 | TFp FpDiv(TFp dividend, TFp divisor) |
215 | { |
216 | #ifdef _TARGET_ARMARCH_ |
217 | // From the ECMA standard: |
218 | // |
219 | // If [dividend] is zero and [divisor] is zero |
220 | // the result is NaN. |
221 | // If [dividend] is infinity and [divisor] is infinity |
222 | // the result is NaN. |
223 | |
224 | if (dividend == 0 && divisor == 0) |
225 | { |
226 | return TFpTraits::NaN(); |
227 | } |
228 | else if (!_finite(dividend) && !_isnan(dividend) && !_finite(divisor) && !_isnan(divisor)) |
229 | { |
230 | return TFpTraits::NaN(); |
231 | } |
232 | #endif // _TARGET_ARMARCH_ |
233 | |
234 | return dividend / divisor; |
235 | } |
236 | |
237 | template <typename TFp, typename TFpTraits> |
238 | TFp FpRem(TFp dividend, TFp divisor) |
239 | { |
240 | // From the ECMA standard: |
241 | // |
242 | // If [divisor] is zero or [dividend] is infinity |
243 | // the result is NaN. |
244 | // If [divisor] is infinity, |
245 | // the result is [dividend] |
246 | |
247 | if (divisor == 0 || !_finite(dividend)) |
248 | { |
249 | return TFpTraits::NaN(); |
250 | } |
251 | else if (!_finite(divisor) && !_isnan(divisor)) |
252 | { |
253 | return dividend; |
254 | } |
255 | |
256 | return (TFp)fmod((double)dividend, (double)divisor); |
257 | } |
258 | |
259 | //-------------------------------------------------------------------------------- |
260 | // VNGetOperKind: - Given two bools: isUnsigned and overFlowCheck |
261 | // return the correct VNOperKind for them. |
262 | // |
263 | // Arguments: |
264 | // isUnsigned - The operKind returned should have the unsigned property |
265 | // overflowCheck - The operKind returned should have the overflow check property |
266 | // |
267 | // Return Value: |
268 | // - The VNOperKind to use for this pair of (isUnsigned, overflowCheck) |
269 | // |
270 | VNOperKind VNGetOperKind(bool isUnsigned, bool overflowCheck) |
271 | { |
272 | if (!isUnsigned) |
273 | { |
274 | if (!overflowCheck) |
275 | { |
276 | return VOK_Default; |
277 | } |
278 | else |
279 | { |
280 | return VOK_OverflowCheck; |
281 | } |
282 | } |
283 | else // isUnsigned |
284 | { |
285 | if (!overflowCheck) |
286 | { |
287 | return VOK_Unsigned; |
288 | } |
289 | else |
290 | { |
291 | return VOK_Unsigned_OverflowCheck; |
292 | } |
293 | } |
294 | } |
295 | |
296 | //-------------------------------------------------------------------------------- |
297 | // GetVNFuncForOper: - Given a genTreeOper this function Returns the correct |
298 | // VNFunc to use for ValueNumbering |
299 | // |
300 | // Arguments: |
301 | // oper - The gtOper value from the GenTree node |
302 | // operKind - An enum that supports Normal, Unsigned, OverflowCheck, |
303 | // and Unsigned_OverflowCheck, |
304 | // |
305 | // Return Value: |
306 | // - The VNFunc to use for this pair of (oper, operKind) |
307 | // |
308 | // Notes: - An assert will fire when the oper does not support |
309 | // the operKInd that is supplied. |
310 | // |
311 | VNFunc GetVNFuncForOper(genTreeOps oper, VNOperKind operKind) |
312 | { |
313 | VNFunc result = VNF_COUNT; // An illegal value |
314 | bool invalid = false; |
315 | |
316 | // For most genTreeOpers we just use the VNFunc with the same enum value as the oper |
317 | // |
318 | if (operKind == VOK_Default) |
319 | { |
320 | // We can directly use the enum value of oper |
321 | result = VNFunc(oper); |
322 | } |
323 | else if ((oper == GT_EQ) || (oper == GT_NE)) |
324 | { |
325 | if (operKind == VOK_Unsigned) |
326 | { |
327 | // We will permit unsignedOper to be used with GT_EQ and GT_NE (as it is a no-op) |
328 | // |
329 | // Again we directly use the enum value of oper |
330 | result = VNFunc(oper); |
331 | } |
332 | else |
333 | { |
334 | invalid = true; |
335 | } |
336 | } |
337 | else // We will need to use a VNF_ function |
338 | { |
339 | switch (oper) |
340 | { |
341 | case GT_LT: |
342 | if (operKind == VOK_Unsigned) |
343 | { |
344 | result = VNF_LT_UN; |
345 | } |
346 | else |
347 | { |
348 | invalid = true; |
349 | } |
350 | break; |
351 | |
352 | case GT_LE: |
353 | if (operKind == VOK_Unsigned) |
354 | { |
355 | result = VNF_LE_UN; |
356 | } |
357 | else |
358 | { |
359 | invalid = true; |
360 | } |
361 | break; |
362 | |
363 | case GT_GE: |
364 | if (operKind == VOK_Unsigned) |
365 | { |
366 | result = VNF_GE_UN; |
367 | } |
368 | else |
369 | { |
370 | invalid = true; |
371 | } |
372 | break; |
373 | |
374 | case GT_GT: |
375 | if (operKind == VOK_Unsigned) |
376 | { |
377 | result = VNF_GT_UN; |
378 | } |
379 | else |
380 | { |
381 | invalid = true; |
382 | } |
383 | break; |
384 | |
385 | case GT_ADD: |
386 | if (operKind == VOK_OverflowCheck) |
387 | { |
388 | result = VNF_ADD_OVF; |
389 | } |
390 | else if (operKind == VOK_Unsigned_OverflowCheck) |
391 | { |
392 | result = VNF_ADD_UN_OVF; |
393 | } |
394 | else |
395 | { |
396 | invalid = true; |
397 | } |
398 | break; |
399 | |
400 | case GT_SUB: |
401 | if (operKind == VOK_OverflowCheck) |
402 | { |
403 | result = VNF_SUB_OVF; |
404 | } |
405 | else if (operKind == VOK_Unsigned_OverflowCheck) |
406 | { |
407 | result = VNF_SUB_UN_OVF; |
408 | } |
409 | else |
410 | { |
411 | invalid = true; |
412 | } |
413 | break; |
414 | |
415 | case GT_MUL: |
416 | if (operKind == VOK_OverflowCheck) |
417 | { |
418 | result = VNF_MUL_OVF; |
419 | } |
420 | else if (operKind == VOK_Unsigned_OverflowCheck) |
421 | { |
422 | result = VNF_MUL_UN_OVF; |
423 | } |
424 | #ifndef _TARGET_64BIT_ |
425 | else if (operKind == VOK_Unsigned) |
426 | { |
427 | // This is the special 64-bit unsigned multiply used on 32-bit targets |
428 | result = VNF_MUL64_UN; |
429 | } |
430 | #endif |
431 | else |
432 | { |
433 | invalid = true; |
434 | } |
435 | break; |
436 | |
437 | default: |
438 | // Will trigger the noway_assert below. |
439 | break; |
440 | } |
441 | } |
442 | noway_assert(!invalid && (result != VNF_COUNT)); |
443 | |
444 | return result; |
445 | } |
446 | |
447 | //-------------------------------------------------------------------------------- |
448 | // GetVNFuncForNode: - Given a GenTree node, this returns the proper |
449 | // VNFunc to use for ValueNumbering |
450 | // |
451 | // Arguments: |
452 | // node - The GenTree node that we need the VNFunc for. |
453 | // |
454 | // Return Value: |
455 | // - The VNFunc to use for this GenTree node |
456 | // |
457 | // Notes: - The gtFlags from the node are used to set operKind |
458 | // to one of Normal, Unsigned, OverflowCheck, |
459 | // or Unsigned_OverflowCheck. Also see GetVNFuncForOper() |
460 | // |
461 | VNFunc GetVNFuncForNode(GenTree* node) |
462 | { |
463 | bool isUnsignedOper = ((node->gtFlags & GTF_UNSIGNED) != 0); |
464 | bool hasOverflowCheck = node->gtOverflowEx(); |
465 | VNOperKind operKind = VNGetOperKind(isUnsignedOper, hasOverflowCheck); |
466 | VNFunc result = GetVNFuncForOper(node->gtOper, operKind); |
467 | |
468 | return result; |
469 | } |
470 | |
471 | unsigned ValueNumStore::VNFuncArity(VNFunc vnf) |
472 | { |
473 | // Read the bit field out of the table... |
474 | return (s_vnfOpAttribs[vnf] & VNFOA_ArityMask) >> VNFOA_ArityShift; |
475 | } |
476 | |
477 | template <> |
478 | bool ValueNumStore::IsOverflowIntDiv(int v0, int v1) |
479 | { |
480 | return (v1 == -1) && (v0 == INT32_MIN); |
481 | } |
482 | |
483 | template <> |
484 | bool ValueNumStore::IsOverflowIntDiv(INT64 v0, INT64 v1) |
485 | { |
486 | return (v1 == -1) && (v0 == INT64_MIN); |
487 | } |
488 | |
489 | template <typename T> |
490 | bool ValueNumStore::IsOverflowIntDiv(T v0, T v1) |
491 | { |
492 | return false; |
493 | } |
494 | |
495 | template <> |
496 | bool ValueNumStore::IsIntZero(int v) |
497 | { |
498 | return v == 0; |
499 | } |
500 | template <> |
501 | bool ValueNumStore::IsIntZero(unsigned v) |
502 | { |
503 | return v == 0; |
504 | } |
505 | template <> |
506 | bool ValueNumStore::IsIntZero(INT64 v) |
507 | { |
508 | return v == 0; |
509 | } |
510 | template <> |
511 | bool ValueNumStore::IsIntZero(UINT64 v) |
512 | { |
513 | return v == 0; |
514 | } |
515 | template <typename T> |
516 | bool ValueNumStore::IsIntZero(T v) |
517 | { |
518 | return false; |
519 | } |
520 | |
521 | ValueNumStore::ValueNumStore(Compiler* comp, CompAllocator alloc) |
522 | : m_pComp(comp) |
523 | , m_alloc(alloc) |
524 | , m_nextChunkBase(0) |
525 | , m_fixedPointMapSels(alloc, 8) |
526 | , m_checkedBoundVNs(alloc) |
527 | , m_chunks(alloc, 8) |
528 | , m_intCnsMap(nullptr) |
529 | , m_longCnsMap(nullptr) |
530 | , m_handleMap(nullptr) |
531 | , m_floatCnsMap(nullptr) |
532 | , m_doubleCnsMap(nullptr) |
533 | , m_byrefCnsMap(nullptr) |
534 | , m_VNFunc0Map(nullptr) |
535 | , m_VNFunc1Map(nullptr) |
536 | , m_VNFunc2Map(nullptr) |
537 | , m_VNFunc3Map(nullptr) |
538 | , m_VNFunc4Map(nullptr) |
539 | #ifdef DEBUG |
540 | , m_numMapSels(0) |
541 | #endif |
542 | { |
543 | // We have no current allocation chunks. |
544 | for (unsigned i = 0; i < TYP_COUNT; i++) |
545 | { |
546 | for (unsigned j = CEA_None; j <= CEA_Count + MAX_LOOP_NUM; j++) |
547 | { |
548 | m_curAllocChunk[i][j] = NoChunk; |
549 | } |
550 | } |
551 | |
552 | for (unsigned i = 0; i < SmallIntConstNum; i++) |
553 | { |
554 | m_VNsForSmallIntConsts[i] = NoVN; |
555 | } |
556 | // We will reserve chunk 0 to hold some special constants, like the constant NULL, the "exception" value, and the |
557 | // "zero map." |
558 | Chunk* specialConstChunk = new (m_alloc) Chunk(m_alloc, &m_nextChunkBase, TYP_REF, CEA_Const, MAX_LOOP_NUM); |
559 | specialConstChunk->m_numUsed += |
560 | SRC_NumSpecialRefConsts; // Implicitly allocate 0 ==> NULL, and 1 ==> Exception, 2 ==> ZeroMap. |
561 | ChunkNum cn = m_chunks.Push(specialConstChunk); |
562 | assert(cn == 0); |
563 | |
564 | m_mapSelectBudget = (int)JitConfig.JitVNMapSelBudget(); // We cast the unsigned DWORD to a signed int. |
565 | |
566 | // This value must be non-negative and non-zero, reset the value to DEFAULT_MAP_SELECT_BUDGET if it isn't. |
567 | if (m_mapSelectBudget <= 0) |
568 | { |
569 | m_mapSelectBudget = DEFAULT_MAP_SELECT_BUDGET; |
570 | } |
571 | } |
572 | |
573 | // |
574 | // Unary EvalOp |
575 | // |
576 | |
577 | template <typename T> |
578 | T ValueNumStore::EvalOp(VNFunc vnf, T v0) |
579 | { |
580 | genTreeOps oper = genTreeOps(vnf); |
581 | |
582 | // Here we handle unary ops that are the same for all types. |
583 | switch (oper) |
584 | { |
585 | case GT_NEG: |
586 | // Note that GT_NEG is the only valid unary floating point operation |
587 | return -v0; |
588 | |
589 | default: |
590 | break; |
591 | } |
592 | |
593 | // Otherwise must be handled by the type specific method |
594 | return EvalOpSpecialized(vnf, v0); |
595 | } |
596 | |
597 | template <> |
598 | double ValueNumStore::EvalOpSpecialized<double>(VNFunc vnf, double v0) |
599 | { |
600 | // Here we handle specialized double unary ops. |
601 | noway_assert(!"EvalOpSpecialized<double> - unary" ); |
602 | return 0.0; |
603 | } |
604 | |
605 | template <> |
606 | float ValueNumStore::EvalOpSpecialized<float>(VNFunc vnf, float v0) |
607 | { |
608 | // Here we handle specialized float unary ops. |
609 | noway_assert(!"EvalOpSpecialized<float> - unary" ); |
610 | return 0.0f; |
611 | } |
612 | |
613 | template <typename T> |
614 | T ValueNumStore::EvalOpSpecialized(VNFunc vnf, T v0) |
615 | { |
616 | if (vnf < VNF_Boundary) |
617 | { |
618 | genTreeOps oper = genTreeOps(vnf); |
619 | |
620 | switch (oper) |
621 | { |
622 | case GT_NEG: |
623 | return -v0; |
624 | |
625 | case GT_NOT: |
626 | return ~v0; |
627 | |
628 | case GT_BSWAP16: |
629 | { |
630 | UINT16 v0_unsigned = UINT16(v0); |
631 | |
632 | v0_unsigned = ((v0_unsigned >> 8) & 0xFF) | ((v0_unsigned << 8) & 0xFF00); |
633 | return T(v0_unsigned); |
634 | } |
635 | |
636 | case GT_BSWAP: |
637 | if (sizeof(T) == 4) |
638 | { |
639 | UINT32 v0_unsigned = UINT32(v0); |
640 | |
641 | v0_unsigned = ((v0_unsigned >> 24) & 0xFF) | ((v0_unsigned >> 8) & 0xFF00) | |
642 | ((v0_unsigned << 8) & 0xFF0000) | ((v0_unsigned << 24) & 0xFF000000); |
643 | return T(v0_unsigned); |
644 | } |
645 | else if (sizeof(T) == 8) |
646 | { |
647 | UINT64 v0_unsigned = UINT64(v0); |
648 | |
649 | v0_unsigned = ((v0_unsigned >> 56) & 0xFF) | ((v0_unsigned >> 40) & 0xFF00) | |
650 | ((v0_unsigned >> 24) & 0xFF0000) | ((v0_unsigned >> 8) & 0xFF000000) | |
651 | ((v0_unsigned << 8) & 0xFF00000000) | ((v0_unsigned << 24) & 0xFF0000000000) | |
652 | ((v0_unsigned << 40) & 0xFF000000000000) | ((v0_unsigned << 56) & 0xFF00000000000000); |
653 | return T(v0_unsigned); |
654 | } |
655 | else |
656 | { |
657 | break; // unknown primitive |
658 | } |
659 | |
660 | default: |
661 | break; |
662 | } |
663 | } |
664 | |
665 | noway_assert(!"Unhandled operation in EvalOpSpecialized<T> - unary" ); |
666 | return v0; |
667 | } |
668 | |
669 | // |
670 | // Binary EvalOp |
671 | // |
672 | |
673 | template <typename T> |
674 | T ValueNumStore::EvalOp(VNFunc vnf, T v0, T v1) |
675 | { |
676 | // Here we handle the binary ops that are the same for all types. |
677 | |
678 | // Currently there are none (due to floating point NaN representations) |
679 | |
680 | // Otherwise must be handled by the type specific method |
681 | return EvalOpSpecialized(vnf, v0, v1); |
682 | } |
683 | |
684 | template <> |
685 | double ValueNumStore::EvalOpSpecialized<double>(VNFunc vnf, double v0, double v1) |
686 | { |
687 | // Here we handle specialized double binary ops. |
688 | if (vnf < VNF_Boundary) |
689 | { |
690 | genTreeOps oper = genTreeOps(vnf); |
691 | |
692 | // Here we handle |
693 | switch (oper) |
694 | { |
695 | case GT_ADD: |
696 | return FpAdd<double, DoubleTraits>(v0, v1); |
697 | case GT_SUB: |
698 | return FpSub<double, DoubleTraits>(v0, v1); |
699 | case GT_MUL: |
700 | return FpMul<double, DoubleTraits>(v0, v1); |
701 | case GT_DIV: |
702 | return FpDiv<double, DoubleTraits>(v0, v1); |
703 | case GT_MOD: |
704 | return FpRem<double, DoubleTraits>(v0, v1); |
705 | |
706 | default: |
707 | // For any other value of 'oper', we will assert below |
708 | break; |
709 | } |
710 | } |
711 | |
712 | noway_assert(!"EvalOpSpecialized<double> - binary" ); |
713 | return v0; |
714 | } |
715 | |
716 | template <> |
717 | float ValueNumStore::EvalOpSpecialized<float>(VNFunc vnf, float v0, float v1) |
718 | { |
719 | // Here we handle specialized float binary ops. |
720 | if (vnf < VNF_Boundary) |
721 | { |
722 | genTreeOps oper = genTreeOps(vnf); |
723 | |
724 | // Here we handle |
725 | switch (oper) |
726 | { |
727 | case GT_ADD: |
728 | return FpAdd<float, FloatTraits>(v0, v1); |
729 | case GT_SUB: |
730 | return FpSub<float, FloatTraits>(v0, v1); |
731 | case GT_MUL: |
732 | return FpMul<float, FloatTraits>(v0, v1); |
733 | case GT_DIV: |
734 | return FpDiv<float, FloatTraits>(v0, v1); |
735 | case GT_MOD: |
736 | return FpRem<float, FloatTraits>(v0, v1); |
737 | |
738 | default: |
739 | // For any other value of 'oper', we will assert below |
740 | break; |
741 | } |
742 | } |
743 | assert(!"EvalOpSpecialized<float> - binary" ); |
744 | return v0; |
745 | } |
746 | |
747 | template <typename T> |
748 | T ValueNumStore::EvalOpSpecialized(VNFunc vnf, T v0, T v1) |
749 | { |
750 | typedef typename jitstd::make_unsigned<T>::type UT; |
751 | |
752 | assert((sizeof(T) == 4) || (sizeof(T) == 8)); |
753 | |
754 | // Here we handle binary ops that are the same for all integer types |
755 | if (vnf < VNF_Boundary) |
756 | { |
757 | genTreeOps oper = genTreeOps(vnf); |
758 | |
759 | switch (oper) |
760 | { |
761 | case GT_ADD: |
762 | return v0 + v1; |
763 | case GT_SUB: |
764 | return v0 - v1; |
765 | case GT_MUL: |
766 | return v0 * v1; |
767 | |
768 | case GT_DIV: |
769 | assert(IsIntZero(v1) == false); |
770 | assert(IsOverflowIntDiv(v0, v1) == false); |
771 | return v0 / v1; |
772 | |
773 | case GT_MOD: |
774 | assert(IsIntZero(v1) == false); |
775 | assert(IsOverflowIntDiv(v0, v1) == false); |
776 | return v0 % v1; |
777 | |
778 | case GT_UDIV: |
779 | assert(IsIntZero(v1) == false); |
780 | return T(UT(v0) / UT(v1)); |
781 | |
782 | case GT_UMOD: |
783 | assert(IsIntZero(v1) == false); |
784 | return T(UT(v0) % UT(v1)); |
785 | |
786 | case GT_AND: |
787 | return v0 & v1; |
788 | case GT_OR: |
789 | return v0 | v1; |
790 | case GT_XOR: |
791 | return v0 ^ v1; |
792 | |
793 | case GT_LSH: |
794 | if (sizeof(T) == 8) |
795 | { |
796 | return v0 << (v1 & 0x3F); |
797 | } |
798 | else |
799 | { |
800 | return v0 << v1; |
801 | } |
802 | case GT_RSH: |
803 | if (sizeof(T) == 8) |
804 | { |
805 | return v0 >> (v1 & 0x3F); |
806 | } |
807 | else |
808 | { |
809 | return v0 >> v1; |
810 | } |
811 | case GT_RSZ: |
812 | if (sizeof(T) == 8) |
813 | { |
814 | return UINT64(v0) >> (v1 & 0x3F); |
815 | } |
816 | else |
817 | { |
818 | return UINT32(v0) >> v1; |
819 | } |
820 | case GT_ROL: |
821 | if (sizeof(T) == 8) |
822 | { |
823 | return (v0 << v1) | (UINT64(v0) >> (64 - v1)); |
824 | } |
825 | else |
826 | { |
827 | return (v0 << v1) | (UINT32(v0) >> (32 - v1)); |
828 | } |
829 | |
830 | case GT_ROR: |
831 | if (sizeof(T) == 8) |
832 | { |
833 | return (v0 << (64 - v1)) | (UINT64(v0) >> v1); |
834 | } |
835 | else |
836 | { |
837 | return (v0 << (32 - v1)) | (UINT32(v0) >> v1); |
838 | } |
839 | |
840 | default: |
841 | // For any other value of 'oper', we will assert below |
842 | break; |
843 | } |
844 | } |
845 | else // must be a VNF_ function |
846 | { |
847 | switch (vnf) |
848 | { |
849 | // Here we handle those that are the same for all integer types. |
850 | |
851 | default: |
852 | // For any other value of 'vnf', we will assert below |
853 | break; |
854 | } |
855 | } |
856 | |
857 | noway_assert(!"Unhandled operation in EvalOpSpecialized<T> - binary" ); |
858 | return v0; |
859 | } |
860 | |
861 | template <> |
862 | int ValueNumStore::EvalComparison<double>(VNFunc vnf, double v0, double v1) |
863 | { |
864 | // Here we handle specialized double comparisons. |
865 | |
866 | // We must check for a NaN argument as they they need special handling |
867 | bool hasNanArg = (_isnan(v0) || _isnan(v1)); |
868 | |
869 | if (vnf < VNF_Boundary) |
870 | { |
871 | genTreeOps oper = genTreeOps(vnf); |
872 | |
873 | if (hasNanArg) |
874 | { |
875 | // return false in all cases except for GT_NE; |
876 | return (oper == GT_NE); |
877 | } |
878 | |
879 | switch (oper) |
880 | { |
881 | case GT_EQ: |
882 | return v0 == v1; |
883 | case GT_NE: |
884 | return v0 != v1; |
885 | case GT_GT: |
886 | return v0 > v1; |
887 | case GT_GE: |
888 | return v0 >= v1; |
889 | case GT_LT: |
890 | return v0 < v1; |
891 | case GT_LE: |
892 | return v0 <= v1; |
893 | default: |
894 | // For any other value of 'oper', we will assert below |
895 | break; |
896 | } |
897 | } |
898 | noway_assert(!"Unhandled operation in EvalComparison<double>" ); |
899 | return 0; |
900 | } |
901 | |
902 | template <> |
903 | int ValueNumStore::EvalComparison<float>(VNFunc vnf, float v0, float v1) |
904 | { |
905 | // Here we handle specialized float comparisons. |
906 | |
907 | // We must check for a NaN argument as they they need special handling |
908 | bool hasNanArg = (_isnanf(v0) || _isnanf(v1)); |
909 | |
910 | if (vnf < VNF_Boundary) |
911 | { |
912 | genTreeOps oper = genTreeOps(vnf); |
913 | |
914 | if (hasNanArg) |
915 | { |
916 | // return false in all cases except for GT_NE; |
917 | return (oper == GT_NE); |
918 | } |
919 | |
920 | switch (oper) |
921 | { |
922 | case GT_EQ: |
923 | return v0 == v1; |
924 | case GT_NE: |
925 | return v0 != v1; |
926 | case GT_GT: |
927 | return v0 > v1; |
928 | case GT_GE: |
929 | return v0 >= v1; |
930 | case GT_LT: |
931 | return v0 < v1; |
932 | case GT_LE: |
933 | return v0 <= v1; |
934 | default: |
935 | // For any other value of 'oper', we will assert below |
936 | break; |
937 | } |
938 | } |
939 | else // must be a VNF_ function |
940 | { |
941 | if (hasNanArg) |
942 | { |
943 | // always returns true |
944 | return false; |
945 | } |
946 | |
947 | switch (vnf) |
948 | { |
949 | case VNF_GT_UN: |
950 | return v0 > v1; |
951 | case VNF_GE_UN: |
952 | return v0 >= v1; |
953 | case VNF_LT_UN: |
954 | return v0 < v1; |
955 | case VNF_LE_UN: |
956 | return v0 <= v1; |
957 | default: |
958 | // For any other value of 'vnf', we will assert below |
959 | break; |
960 | } |
961 | } |
962 | noway_assert(!"Unhandled operation in EvalComparison<float>" ); |
963 | return 0; |
964 | } |
965 | |
966 | template <typename T> |
967 | int ValueNumStore::EvalComparison(VNFunc vnf, T v0, T v1) |
968 | { |
969 | typedef typename jitstd::make_unsigned<T>::type UT; |
970 | |
971 | // Here we handle the compare ops that are the same for all integer types. |
972 | if (vnf < VNF_Boundary) |
973 | { |
974 | genTreeOps oper = genTreeOps(vnf); |
975 | switch (oper) |
976 | { |
977 | case GT_EQ: |
978 | return v0 == v1; |
979 | case GT_NE: |
980 | return v0 != v1; |
981 | case GT_GT: |
982 | return v0 > v1; |
983 | case GT_GE: |
984 | return v0 >= v1; |
985 | case GT_LT: |
986 | return v0 < v1; |
987 | case GT_LE: |
988 | return v0 <= v1; |
989 | default: |
990 | // For any other value of 'oper', we will assert below |
991 | break; |
992 | } |
993 | } |
994 | else // must be a VNF_ function |
995 | { |
996 | switch (vnf) |
997 | { |
998 | case VNF_GT_UN: |
999 | return T(UT(v0) > UT(v1)); |
1000 | case VNF_GE_UN: |
1001 | return T(UT(v0) >= UT(v1)); |
1002 | case VNF_LT_UN: |
1003 | return T(UT(v0) < UT(v1)); |
1004 | case VNF_LE_UN: |
1005 | return T(UT(v0) <= UT(v1)); |
1006 | default: |
1007 | // For any other value of 'vnf', we will assert below |
1008 | break; |
1009 | } |
1010 | } |
1011 | noway_assert(!"Unhandled operation in EvalComparison<T>" ); |
1012 | return 0; |
1013 | } |
1014 | |
1015 | // Create a ValueNum for an exception set singleton for 'x' |
1016 | // |
1017 | ValueNum ValueNumStore::VNExcSetSingleton(ValueNum x) |
1018 | { |
1019 | return VNForFunc(TYP_REF, VNF_ExcSetCons, x, VNForEmptyExcSet()); |
1020 | } |
1021 | // Create a ValueNumPair for an exception set singleton for 'xp' |
1022 | // |
1023 | ValueNumPair ValueNumStore::VNPExcSetSingleton(ValueNumPair xp) |
1024 | { |
1025 | return ValueNumPair(VNExcSetSingleton(xp.GetLiberal()), VNExcSetSingleton(xp.GetConservative())); |
1026 | } |
1027 | |
1028 | //------------------------------------------------------------------------------------------- |
1029 | // VNCheckAscending: - Helper method used to verify that elements in an exception set list |
1030 | // are sorted in ascending order. This method only checks that the |
1031 | // next value in the list has a greater value number than 'item'. |
1032 | // |
1033 | // Arguments: |
1034 | // item - The previous item visited in the exception set that we are iterating |
1035 | // xs1 - The tail portion of the exception set that we are iterating. |
1036 | // |
1037 | // Return Value: |
1038 | // - Returns true when the next value is greater than 'item' |
1039 | // - or whne we have an empty list remaining. |
1040 | // |
1041 | // Note: - Duplicates items aren't allowed in an exception set |
1042 | // Used to verify that exception sets are in ascending order when processing them. |
1043 | // |
1044 | bool ValueNumStore::VNCheckAscending(ValueNum item, ValueNum xs1) |
1045 | { |
1046 | if (xs1 == VNForEmptyExcSet()) |
1047 | { |
1048 | return true; |
1049 | } |
1050 | else |
1051 | { |
1052 | VNFuncApp funcXs1; |
1053 | bool b1 = GetVNFunc(xs1, &funcXs1); |
1054 | assert(b1 && funcXs1.m_func == VNF_ExcSetCons); // Precondition: xs1 is an exception set. |
1055 | |
1056 | return (item < funcXs1.m_args[0]); |
1057 | } |
1058 | } |
1059 | |
1060 | //------------------------------------------------------------------------------------------- |
1061 | // VNExcSetUnion: - Given two exception sets, performs a set Union operation |
1062 | // and returns the value number for the combined exception set. |
1063 | // |
1064 | // Arguments: - The arguments must be applications of VNF_ExcSetCons or the empty set |
1065 | // xs0 - The value number of the first exception set |
1066 | // xs1 - The value number of the second exception set |
1067 | // |
1068 | // Return Value: - The value number of the combined exception set |
1069 | // |
1070 | // Note: - Checks and relies upon the invariant that exceptions sets |
1071 | // 1. Have no duplicate values |
1072 | // 2. all elements in an exception set are in sorted order. |
1073 | // |
1074 | ValueNum ValueNumStore::VNExcSetUnion(ValueNum xs0, ValueNum xs1) |
1075 | { |
1076 | if (xs0 == VNForEmptyExcSet()) |
1077 | { |
1078 | return xs1; |
1079 | } |
1080 | else if (xs1 == VNForEmptyExcSet()) |
1081 | { |
1082 | return xs0; |
1083 | } |
1084 | else |
1085 | { |
1086 | VNFuncApp funcXs0; |
1087 | bool b0 = GetVNFunc(xs0, &funcXs0); |
1088 | assert(b0 && funcXs0.m_func == VNF_ExcSetCons); // Precondition: xs0 is an exception set. |
1089 | VNFuncApp funcXs1; |
1090 | bool b1 = GetVNFunc(xs1, &funcXs1); |
1091 | assert(b1 && funcXs1.m_func == VNF_ExcSetCons); // Precondition: xs1 is an exception set. |
1092 | ValueNum res = NoVN; |
1093 | if (funcXs0.m_args[0] < funcXs1.m_args[0]) |
1094 | { |
1095 | assert(VNCheckAscending(funcXs0.m_args[0], funcXs0.m_args[1])); |
1096 | |
1097 | // add the lower one (from xs0) to the result, advance xs0 |
1098 | res = VNForFunc(TYP_REF, VNF_ExcSetCons, funcXs0.m_args[0], VNExcSetUnion(funcXs0.m_args[1], xs1)); |
1099 | } |
1100 | else if (funcXs0.m_args[0] == funcXs1.m_args[0]) |
1101 | { |
1102 | assert(VNCheckAscending(funcXs0.m_args[0], funcXs0.m_args[1])); |
1103 | assert(VNCheckAscending(funcXs1.m_args[0], funcXs1.m_args[1])); |
1104 | |
1105 | // Equal elements; add one (from xs0) to the result, advance both sets |
1106 | res = VNForFunc(TYP_REF, VNF_ExcSetCons, funcXs0.m_args[0], |
1107 | VNExcSetUnion(funcXs0.m_args[1], funcXs1.m_args[1])); |
1108 | } |
1109 | else |
1110 | { |
1111 | assert(funcXs0.m_args[0] > funcXs1.m_args[0]); |
1112 | assert(VNCheckAscending(funcXs1.m_args[0], funcXs1.m_args[1])); |
1113 | |
1114 | // add the lower one (from xs1) to the result, advance xs1 |
1115 | res = VNForFunc(TYP_REF, VNF_ExcSetCons, funcXs1.m_args[0], VNExcSetUnion(xs0, funcXs1.m_args[1])); |
1116 | } |
1117 | |
1118 | return res; |
1119 | } |
1120 | } |
1121 | |
1122 | //-------------------------------------------------------------------------------- |
1123 | // VNPExcSetUnion: - Returns a Value Number Pair that represents the set union |
1124 | // for both parts. |
1125 | // (see VNExcSetUnion for more details) |
1126 | // |
1127 | // Notes: - This method is used to form a Value Number Pair when we |
1128 | // want both the Liberal and Conservative Value Numbers |
1129 | // |
1130 | ValueNumPair ValueNumStore::VNPExcSetUnion(ValueNumPair xs0vnp, ValueNumPair xs1vnp) |
1131 | { |
1132 | return ValueNumPair(VNExcSetUnion(xs0vnp.GetLiberal(), xs1vnp.GetLiberal()), |
1133 | VNExcSetUnion(xs0vnp.GetConservative(), xs1vnp.GetConservative())); |
1134 | } |
1135 | |
1136 | //------------------------------------------------------------------------------------------- |
1137 | // VNExcSetIntersection: - Given two exception sets, performs a set Intersection operation |
1138 | // and returns the value number for this exception set. |
1139 | // |
1140 | // Arguments: - The arguments must be applications of VNF_ExcSetCons or the empty set |
1141 | // xs0 - The value number of the first exception set |
1142 | // xs1 - The value number of the second exception set |
1143 | // |
1144 | // Return Value: - The value number of the new exception set. |
1145 | // if the e are no values in common then VNForEmptyExcSet() is returned. |
1146 | // |
1147 | // Note: - Checks and relies upon the invariant that exceptions sets |
1148 | // 1. Have no duplicate values |
1149 | // 2. all elements in an exception set are in sorted order. |
1150 | // |
1151 | ValueNum ValueNumStore::VNExcSetIntersection(ValueNum xs0, ValueNum xs1) |
1152 | { |
1153 | if ((xs0 == VNForEmptyExcSet()) || (xs1 == VNForEmptyExcSet())) |
1154 | { |
1155 | return VNForEmptyExcSet(); |
1156 | } |
1157 | else |
1158 | { |
1159 | VNFuncApp funcXs0; |
1160 | bool b0 = GetVNFunc(xs0, &funcXs0); |
1161 | assert(b0 && funcXs0.m_func == VNF_ExcSetCons); // Precondition: xs0 is an exception set. |
1162 | VNFuncApp funcXs1; |
1163 | bool b1 = GetVNFunc(xs1, &funcXs1); |
1164 | assert(b1 && funcXs1.m_func == VNF_ExcSetCons); // Precondition: xs1 is an exception set. |
1165 | ValueNum res = NoVN; |
1166 | |
1167 | if (funcXs0.m_args[0] < funcXs1.m_args[0]) |
1168 | { |
1169 | assert(VNCheckAscending(funcXs0.m_args[0], funcXs0.m_args[1])); |
1170 | res = VNExcSetIntersection(funcXs0.m_args[1], xs1); |
1171 | } |
1172 | else if (funcXs0.m_args[0] == funcXs1.m_args[0]) |
1173 | { |
1174 | assert(VNCheckAscending(funcXs0.m_args[0], funcXs0.m_args[1])); |
1175 | assert(VNCheckAscending(funcXs1.m_args[0], funcXs1.m_args[1])); |
1176 | |
1177 | // Equal elements; Add it to the result. |
1178 | res = VNForFunc(TYP_REF, VNF_ExcSetCons, funcXs0.m_args[0], |
1179 | VNExcSetIntersection(funcXs0.m_args[1], funcXs1.m_args[1])); |
1180 | } |
1181 | else |
1182 | { |
1183 | assert(funcXs0.m_args[0] > funcXs1.m_args[0]); |
1184 | assert(VNCheckAscending(funcXs1.m_args[0], funcXs1.m_args[1])); |
1185 | res = VNExcSetIntersection(xs0, funcXs1.m_args[1]); |
1186 | } |
1187 | |
1188 | return res; |
1189 | } |
1190 | } |
1191 | |
1192 | //-------------------------------------------------------------------------------- |
1193 | // VNPExcSetIntersection: - Returns a Value Number Pair that represents the set |
1194 | // intersection for both parts. |
1195 | // (see VNExcSetIntersection for more details) |
1196 | // |
1197 | // Notes: - This method is used to form a Value Number Pair when we |
1198 | // want both the Liberal and Conservative Value Numbers |
1199 | // |
1200 | ValueNumPair ValueNumStore::VNPExcSetIntersection(ValueNumPair xs0vnp, ValueNumPair xs1vnp) |
1201 | { |
1202 | return ValueNumPair(VNExcSetIntersection(xs0vnp.GetLiberal(), xs1vnp.GetLiberal()), |
1203 | VNExcSetIntersection(xs0vnp.GetConservative(), xs1vnp.GetConservative())); |
1204 | } |
1205 | |
1206 | //---------------------------------------------------------------------------------------- |
1207 | // VNExcIsSubset - Given two exception sets, returns true when vnCandidateSet is a |
1208 | // subset of vnFullSet |
1209 | // |
1210 | // Arguments: - The arguments must be applications of VNF_ExcSetCons or the empty set |
1211 | // vnFullSet - The value number of the 'full' exception set |
1212 | // vnCandidateSet - The value number of the 'candidate' exception set |
1213 | // |
1214 | // Return Value: - Returns true if every singleton ExcSet value in the vnCandidateSet |
1215 | // is also present in the vnFullSet. |
1216 | // |
1217 | // Note: - Checks and relies upon the invariant that exceptions sets |
1218 | // 1. Have no duplicate values |
1219 | // 2. all elements in an exception set are in sorted order. |
1220 | // |
1221 | bool ValueNumStore::VNExcIsSubset(ValueNum vnFullSet, ValueNum vnCandidateSet) |
1222 | { |
1223 | if (vnCandidateSet == VNForEmptyExcSet()) |
1224 | { |
1225 | return true; |
1226 | } |
1227 | else if ((vnFullSet == VNForEmptyExcSet()) || (vnFullSet == ValueNumStore::NoVN)) |
1228 | { |
1229 | return false; |
1230 | } |
1231 | |
1232 | VNFuncApp funcXsFull; |
1233 | bool b0 = GetVNFunc(vnFullSet, &funcXsFull); |
1234 | assert(b0 && funcXsFull.m_func == VNF_ExcSetCons); // Precondition: vnFullSet is an exception set. |
1235 | VNFuncApp funcXsCand; |
1236 | bool b1 = GetVNFunc(vnCandidateSet, &funcXsCand); |
1237 | assert(b1 && funcXsCand.m_func == VNF_ExcSetCons); // Precondition: vnCandidateSet is an exception set. |
1238 | |
1239 | ValueNum vnFullSetPrev = VNForNull(); |
1240 | ValueNum vnCandSetPrev = VNForNull(); |
1241 | |
1242 | ValueNum vnFullSetRemainder = funcXsFull.m_args[1]; |
1243 | ValueNum vnCandSetRemainder = funcXsCand.m_args[1]; |
1244 | |
1245 | while (true) |
1246 | { |
1247 | ValueNum vnFullSetItem = funcXsFull.m_args[0]; |
1248 | ValueNum vnCandSetItem = funcXsCand.m_args[0]; |
1249 | |
1250 | // Enforce that both sets are sorted by increasing ValueNumbers |
1251 | // |
1252 | assert(vnFullSetItem > vnFullSetPrev); |
1253 | assert(vnCandSetItem >= vnCandSetPrev); // equal when we didn't advance the candidate set |
1254 | |
1255 | if (vnFullSetItem > vnCandSetItem) |
1256 | { |
1257 | // The Full set does not contain the vnCandSetItem |
1258 | return false; |
1259 | } |
1260 | // now we must have (vnFullSetItem <= vnCandSetItem) |
1261 | |
1262 | // When we have a matching value we advance the candidate set |
1263 | // |
1264 | if (vnFullSetItem == vnCandSetItem) |
1265 | { |
1266 | // Have we finished matching? |
1267 | // |
1268 | if (vnCandSetRemainder == VNForEmptyExcSet()) |
1269 | { |
1270 | // We matched every item in the candidate set' |
1271 | // |
1272 | return true; |
1273 | } |
1274 | |
1275 | // Advance the candidate set |
1276 | // |
1277 | b1 = GetVNFunc(vnCandSetRemainder, &funcXsCand); |
1278 | assert(b1 && funcXsCand.m_func == VNF_ExcSetCons); // Precondition: vnCandSetRemainder is an exception set. |
1279 | vnCandSetRemainder = funcXsCand.m_args[1]; |
1280 | } |
1281 | |
1282 | if (vnFullSetRemainder == VNForEmptyExcSet()) |
1283 | { |
1284 | // No more items are left in the full exception set |
1285 | return false; |
1286 | } |
1287 | |
1288 | // |
1289 | // We will advance the full set |
1290 | // |
1291 | b0 = GetVNFunc(vnFullSetRemainder, &funcXsFull); |
1292 | assert(b0 && funcXsFull.m_func == VNF_ExcSetCons); // Precondition: vnFullSetRemainder is an exception set. |
1293 | vnFullSetRemainder = funcXsFull.m_args[1]; |
1294 | |
1295 | vnFullSetPrev = vnFullSetItem; |
1296 | vnCandSetPrev = vnCandSetItem; |
1297 | } |
1298 | } |
1299 | |
1300 | //------------------------------------------------------------------------------------- |
1301 | // VNUnpackExc: - Given a ValueNum 'vnWx, return via write back parameters both |
1302 | // the normal and the exception set components. |
1303 | // |
1304 | // Arguments: |
1305 | // vnWx - A value number, it may have an exception set |
1306 | // pvn - a write back pointer to the normal value portion of 'vnWx' |
1307 | // pvnx - a write back pointer for the exception set portion of 'vnWx' |
1308 | // |
1309 | // Return Values: - This method signature is void but returns two values using |
1310 | // the write back parameters. |
1311 | // |
1312 | // Note: When 'vnWx' does not have an exception set, the orginal value is the |
1313 | // normal value and is written to 'pvn' and VNForEmptyExcSet() is |
1314 | // written to 'pvnx'. |
1315 | // When we have an exception set 'vnWx' will be a VN func with m_func |
1316 | // equal to VNF_ValWithExc. |
1317 | // |
1318 | void ValueNumStore::VNUnpackExc(ValueNum vnWx, ValueNum* pvn, ValueNum* pvnx) |
1319 | { |
1320 | assert(vnWx != NoVN); |
1321 | VNFuncApp funcApp; |
1322 | if (GetVNFunc(vnWx, &funcApp) && funcApp.m_func == VNF_ValWithExc) |
1323 | { |
1324 | *pvn = funcApp.m_args[0]; |
1325 | *pvnx = funcApp.m_args[1]; |
1326 | } |
1327 | else |
1328 | { |
1329 | *pvn = vnWx; |
1330 | *pvnx = VNForEmptyExcSet(); |
1331 | } |
1332 | } |
1333 | |
1334 | //------------------------------------------------------------------------------------- |
1335 | // VNPUnpackExc: - Given a ValueNumPair 'vnpWx, return via write back parameters |
1336 | // both the normal and the exception set components. |
1337 | // (see VNUnpackExc for more details) |
1338 | // |
1339 | // Notes: - This method is used to form a Value Number Pair when we |
1340 | // want both the Liberal and Conservative Value Numbers |
1341 | // |
1342 | void ValueNumStore::VNPUnpackExc(ValueNumPair vnpWx, ValueNumPair* pvnp, ValueNumPair* pvnpx) |
1343 | { |
1344 | VNUnpackExc(vnpWx.GetLiberal(), pvnp->GetLiberalAddr(), pvnpx->GetLiberalAddr()); |
1345 | VNUnpackExc(vnpWx.GetConservative(), pvnp->GetConservativeAddr(), pvnpx->GetConservativeAddr()); |
1346 | } |
1347 | |
1348 | //------------------------------------------------------------------------------------- |
1349 | // VNUnionExcSet: - Given a ValueNum 'vnWx' and a current 'vnExcSet', return an |
1350 | // exception set of the Union of both exception sets. |
1351 | // |
1352 | // Arguments: |
1353 | // vnWx - A value number, it may have an exception set |
1354 | // vnExcSet - The value number for the current exception set |
1355 | // |
1356 | // Return Values: - The value number of the Union of the exception set of 'vnWx' |
1357 | // with the current 'vnExcSet'. |
1358 | // |
1359 | // Note: When 'vnWx' does not have an exception set, 'vnExcSet' is returned. |
1360 | // |
1361 | ValueNum ValueNumStore::VNUnionExcSet(ValueNum vnWx, ValueNum vnExcSet) |
1362 | { |
1363 | assert(vnWx != NoVN); |
1364 | VNFuncApp funcApp; |
1365 | if (GetVNFunc(vnWx, &funcApp) && funcApp.m_func == VNF_ValWithExc) |
1366 | { |
1367 | vnExcSet = VNExcSetUnion(funcApp.m_args[1], vnExcSet); |
1368 | } |
1369 | return vnExcSet; |
1370 | } |
1371 | |
1372 | //------------------------------------------------------------------------------------- |
1373 | // VNPUnionExcSet: - Given a ValueNum 'vnWx' and a current 'excSet', return an |
1374 | // exception set of the Union of both exception sets. |
1375 | // (see VNUnionExcSet for more details) |
1376 | // |
1377 | // Notes: - This method is used to form a Value Number Pair when we |
1378 | // want both the Liberal and Conservative Value Numbers |
1379 | // |
1380 | ValueNumPair ValueNumStore::VNPUnionExcSet(ValueNumPair vnpWx, ValueNumPair vnpExcSet) |
1381 | { |
1382 | return ValueNumPair(VNUnionExcSet(vnpWx.GetLiberal(), vnpExcSet.GetLiberal()), |
1383 | VNUnionExcSet(vnpWx.GetConservative(), vnpExcSet.GetConservative())); |
1384 | } |
1385 | |
1386 | //-------------------------------------------------------------------------------- |
1387 | // VNNormalValue: - Returns a Value Number that represents the result for the |
1388 | // normal (non-exceptional) evaluation for the expression. |
1389 | // |
1390 | // Arguments: |
1391 | // vn - The Value Number for the expression, including any excSet. |
1392 | // This excSet is an optional item and represents the set of |
1393 | // possible exceptions for the expression. |
1394 | // |
1395 | // Return Value: |
1396 | // - The Value Number for the expression without the exception set. |
1397 | // This can be the orginal 'vn', when there are no exceptions. |
1398 | // |
1399 | // Notes: - Whenever we have an exception set the Value Number will be |
1400 | // a VN func with VNF_ValWithExc. |
1401 | // This VN func has the normal value as m_args[0] |
1402 | // |
1403 | ValueNum ValueNumStore::VNNormalValue(ValueNum vn) |
1404 | { |
1405 | VNFuncApp funcApp; |
1406 | if (GetVNFunc(vn, &funcApp) && funcApp.m_func == VNF_ValWithExc) |
1407 | { |
1408 | return funcApp.m_args[0]; |
1409 | } |
1410 | else |
1411 | { |
1412 | return vn; |
1413 | } |
1414 | } |
1415 | |
1416 | //------------------------------------------------------------------------------------ |
1417 | // VNMakeNormalUnique: |
1418 | // |
1419 | // Arguments: |
1420 | // vn - The current Value Number for the expression, including any excSet. |
1421 | // This excSet is an optional item and represents the set of |
1422 | // possible exceptions for the expression. |
1423 | // |
1424 | // Return Value: |
1425 | // - The normal value is set to a new unique VN, while keeping |
1426 | // the excSet (if any) |
1427 | // |
1428 | ValueNum ValueNumStore::VNMakeNormalUnique(ValueNum orig) |
1429 | { |
1430 | // First Unpack the existing Norm,Exc for 'elem' |
1431 | ValueNum vnOrigNorm; |
1432 | ValueNum vnOrigExcSet; |
1433 | VNUnpackExc(orig, &vnOrigNorm, &vnOrigExcSet); |
1434 | |
1435 | // Replace the normal value with a unique ValueNum |
1436 | ValueNum vnUnique = VNForExpr(m_pComp->compCurBB, TypeOfVN(vnOrigNorm)); |
1437 | |
1438 | // Keep any ExcSet from 'elem' |
1439 | return VNWithExc(vnUnique, vnOrigExcSet); |
1440 | } |
1441 | |
1442 | //-------------------------------------------------------------------------------- |
1443 | // VNPMakeNormalUniquePair: |
1444 | // |
1445 | // Arguments: |
1446 | // vnp - The Value Number Pair for the expression, including any excSet. |
1447 | // |
1448 | // Return Value: |
1449 | // - The normal values are set to a new unique VNs, while keeping |
1450 | // the excSets (if any) |
1451 | // |
1452 | ValueNumPair ValueNumStore::VNPMakeNormalUniquePair(ValueNumPair vnp) |
1453 | { |
1454 | return ValueNumPair(VNMakeNormalUnique(vnp.GetLiberal()), VNMakeNormalUnique(vnp.GetConservative())); |
1455 | } |
1456 | |
1457 | //-------------------------------------------------------------------------------- |
1458 | // VNNormalValue: - Returns a Value Number that represents the result for the |
1459 | // normal (non-exceptional) evaluation for the expression. |
1460 | // |
1461 | // Arguments: |
1462 | // vnp - The Value Number Pair for the expression, including any excSet. |
1463 | // This excSet is an optional item and represents the set of |
1464 | // possible exceptions for the expression. |
1465 | // vnk - The ValueNumKind either liberal or conservative |
1466 | // |
1467 | // Return Value: |
1468 | // - The Value Number for the expression without the exception set. |
1469 | // This can be the orginal 'vn', when there are no exceptions. |
1470 | // |
1471 | // Notes: - Whenever we have an exception set the Value Number will be |
1472 | // a VN func with VNF_ValWithExc. |
1473 | // This VN func has the normal value as m_args[0] |
1474 | // |
1475 | ValueNum ValueNumStore::VNNormalValue(ValueNumPair vnp, ValueNumKind vnk) |
1476 | { |
1477 | return VNNormalValue(vnp.Get(vnk)); |
1478 | } |
1479 | |
1480 | //-------------------------------------------------------------------------------- |
1481 | // VNPNormalPair: - Returns a Value Number Pair that represents the result for the |
1482 | // normal (non-exceptional) evaluation for the expression. |
1483 | // (see VNNormalValue for more details) |
1484 | // Arguments: |
1485 | // vnp - The Value Number Pair for the expression, including any excSet. |
1486 | // |
1487 | // Notes: - This method is used to form a Value Number Pair using both |
1488 | // the Liberal and Conservative Value Numbers normal (non-exceptional) |
1489 | // |
1490 | ValueNumPair ValueNumStore::VNPNormalPair(ValueNumPair vnp) |
1491 | { |
1492 | return ValueNumPair(VNNormalValue(vnp.GetLiberal()), VNNormalValue(vnp.GetConservative())); |
1493 | } |
1494 | |
1495 | //--------------------------------------------------------------------------- |
1496 | // VNExceptionSet: - Returns a Value Number that represents the set of possible |
1497 | // exceptions that could be encountered for the expression. |
1498 | // |
1499 | // Arguments: |
1500 | // vn - The Value Number for the expression, including any excSet. |
1501 | // This excSet is an optional item and represents the set of |
1502 | // possible exceptions for the expression. |
1503 | // |
1504 | // Return Value: |
1505 | // - The Value Number for the set of exceptions of the expression. |
1506 | // If the 'vn' has no exception set then a special Value Number |
1507 | // representing the empty exception set is returned. |
1508 | // |
1509 | // Notes: - Whenever we have an exception set the Value Number will be |
1510 | // a VN func with VNF_ValWithExc. |
1511 | // This VN func has the exception set as m_args[1] |
1512 | // |
1513 | ValueNum ValueNumStore::VNExceptionSet(ValueNum vn) |
1514 | { |
1515 | VNFuncApp funcApp; |
1516 | if (GetVNFunc(vn, &funcApp) && funcApp.m_func == VNF_ValWithExc) |
1517 | { |
1518 | return funcApp.m_args[1]; |
1519 | } |
1520 | else |
1521 | { |
1522 | return VNForEmptyExcSet(); |
1523 | } |
1524 | } |
1525 | |
1526 | //-------------------------------------------------------------------------------- |
1527 | // VNPExceptionSet: - Returns a Value Number Pair that represents the set of possible |
1528 | // exceptions that could be encountered for the expression. |
1529 | // (see VNExceptionSet for more details) |
1530 | // |
1531 | // Notes: - This method is used to form a Value Number Pair when we |
1532 | // want both the Liberal and Conservative Value Numbers |
1533 | // |
1534 | ValueNumPair ValueNumStore::VNPExceptionSet(ValueNumPair vnp) |
1535 | { |
1536 | return ValueNumPair(VNExceptionSet(vnp.GetLiberal()), VNExceptionSet(vnp.GetConservative())); |
1537 | } |
1538 | |
1539 | //--------------------------------------------------------------------------- |
1540 | // VNWithExc: - Returns a Value Number that also can have both a normal value |
1541 | // as well as am exception set. |
1542 | // |
1543 | // Arguments: |
1544 | // vn - The current Value Number for the expression, it may include |
1545 | // an exception set. |
1546 | // excSet - The Value Number representing the new exception set that |
1547 | // is to be added to any exceptions already present in 'vn' |
1548 | // |
1549 | // Return Value: |
1550 | // - The new Value Number for the combination the two inputs. |
1551 | // If the 'excSet' is the special Value Number representing |
1552 | // the empty exception set then 'vn' is returned. |
1553 | // |
1554 | // Notes: - We use a Set Union operation, 'VNExcSetUnion', to add any |
1555 | // new exception items from 'excSet' to the existing set. |
1556 | // |
1557 | ValueNum ValueNumStore::VNWithExc(ValueNum vn, ValueNum excSet) |
1558 | { |
1559 | if (excSet == VNForEmptyExcSet()) |
1560 | { |
1561 | return vn; |
1562 | } |
1563 | else |
1564 | { |
1565 | ValueNum vnNorm; |
1566 | ValueNum vnX; |
1567 | VNUnpackExc(vn, &vnNorm, &vnX); |
1568 | return VNForFunc(TypeOfVN(vnNorm), VNF_ValWithExc, vnNorm, VNExcSetUnion(vnX, excSet)); |
1569 | } |
1570 | } |
1571 | |
1572 | //-------------------------------------------------------------------------------- |
1573 | // VNPWithExc: - Returns a Value Number Pair that also can have both a normal value |
1574 | // as well as am exception set. |
1575 | // (see VNWithExc for more details) |
1576 | // |
1577 | // Notes: = This method is used to form a Value Number Pair when we |
1578 | // want both the Liberal and Conservative Value Numbers |
1579 | // |
1580 | ValueNumPair ValueNumStore::VNPWithExc(ValueNumPair vnp, ValueNumPair excSetVNP) |
1581 | { |
1582 | return ValueNumPair(VNWithExc(vnp.GetLiberal(), excSetVNP.GetLiberal()), |
1583 | VNWithExc(vnp.GetConservative(), excSetVNP.GetConservative())); |
1584 | } |
1585 | |
1586 | bool ValueNumStore::IsKnownNonNull(ValueNum vn) |
1587 | { |
1588 | if (vn == NoVN) |
1589 | { |
1590 | return false; |
1591 | } |
1592 | VNFuncApp funcAttr; |
1593 | return GetVNFunc(vn, &funcAttr) && (s_vnfOpAttribs[funcAttr.m_func] & VNFOA_KnownNonNull) != 0; |
1594 | } |
1595 | |
1596 | bool ValueNumStore::IsSharedStatic(ValueNum vn) |
1597 | { |
1598 | if (vn == NoVN) |
1599 | { |
1600 | return false; |
1601 | } |
1602 | VNFuncApp funcAttr; |
1603 | return GetVNFunc(vn, &funcAttr) && (s_vnfOpAttribs[funcAttr.m_func] & VNFOA_SharedStatic) != 0; |
1604 | } |
1605 | |
1606 | ValueNumStore::Chunk::(CompAllocator alloc, |
1607 | ValueNum* pNextBaseVN, |
1608 | var_types typ, |
1609 | ChunkExtraAttribs attribs, |
1610 | BasicBlock::loopNumber loopNum) |
1611 | : m_defs(nullptr), m_numUsed(0), m_baseVN(*pNextBaseVN), m_typ(typ), m_attribs(attribs), m_loopNum(loopNum) |
1612 | { |
1613 | // Allocate "m_defs" here, according to the typ/attribs pair. |
1614 | switch (attribs) |
1615 | { |
1616 | case CEA_None: |
1617 | case CEA_NotAField: |
1618 | break; // Nothing to do. |
1619 | case CEA_Const: |
1620 | switch (typ) |
1621 | { |
1622 | case TYP_INT: |
1623 | m_defs = new (alloc) Alloc<TYP_INT>::Type[ChunkSize]; |
1624 | break; |
1625 | case TYP_FLOAT: |
1626 | m_defs = new (alloc) Alloc<TYP_FLOAT>::Type[ChunkSize]; |
1627 | break; |
1628 | case TYP_LONG: |
1629 | m_defs = new (alloc) Alloc<TYP_LONG>::Type[ChunkSize]; |
1630 | break; |
1631 | case TYP_DOUBLE: |
1632 | m_defs = new (alloc) Alloc<TYP_DOUBLE>::Type[ChunkSize]; |
1633 | break; |
1634 | case TYP_BYREF: |
1635 | m_defs = new (alloc) Alloc<TYP_BYREF>::Type[ChunkSize]; |
1636 | break; |
1637 | case TYP_REF: |
1638 | // We allocate space for a single REF constant, NULL, so we can access these values uniformly. |
1639 | // Since this value is always the same, we represent it as a static. |
1640 | m_defs = &s_specialRefConsts[0]; |
1641 | break; // Nothing to do. |
1642 | default: |
1643 | assert(false); // Should not reach here. |
1644 | } |
1645 | break; |
1646 | |
1647 | case CEA_Handle: |
1648 | m_defs = new (alloc) VNHandle[ChunkSize]; |
1649 | break; |
1650 | |
1651 | case CEA_Func0: |
1652 | m_defs = new (alloc) VNFunc[ChunkSize]; |
1653 | break; |
1654 | |
1655 | case CEA_Func1: |
1656 | m_defs = new (alloc) VNDefFunc1Arg[ChunkSize]; |
1657 | break; |
1658 | case CEA_Func2: |
1659 | m_defs = new (alloc) VNDefFunc2Arg[ChunkSize]; |
1660 | break; |
1661 | case CEA_Func3: |
1662 | m_defs = new (alloc) VNDefFunc3Arg[ChunkSize]; |
1663 | break; |
1664 | case CEA_Func4: |
1665 | m_defs = new (alloc) VNDefFunc4Arg[ChunkSize]; |
1666 | break; |
1667 | default: |
1668 | unreached(); |
1669 | } |
1670 | *pNextBaseVN += ChunkSize; |
1671 | } |
1672 | |
1673 | ValueNumStore::Chunk* ValueNumStore::(var_types typ, |
1674 | ChunkExtraAttribs attribs, |
1675 | BasicBlock::loopNumber loopNum) |
1676 | { |
1677 | Chunk* res; |
1678 | unsigned index; |
1679 | if (loopNum == MAX_LOOP_NUM) |
1680 | { |
1681 | // Loop nest is unknown/irrelevant for this VN. |
1682 | index = attribs; |
1683 | } |
1684 | else |
1685 | { |
1686 | // Loop nest is interesting. Since we know this is only true for unique VNs, we know attribs will |
1687 | // be CEA_None and can just index based on loop number. |
1688 | noway_assert(attribs == CEA_None); |
1689 | // Map NOT_IN_LOOP -> MAX_LOOP_NUM to make the index range contiguous [0..MAX_LOOP_NUM] |
1690 | index = CEA_Count + (loopNum == BasicBlock::NOT_IN_LOOP ? MAX_LOOP_NUM : loopNum); |
1691 | } |
1692 | ChunkNum cn = m_curAllocChunk[typ][index]; |
1693 | if (cn != NoChunk) |
1694 | { |
1695 | res = m_chunks.Get(cn); |
1696 | if (res->m_numUsed < ChunkSize) |
1697 | { |
1698 | return res; |
1699 | } |
1700 | } |
1701 | // Otherwise, must allocate a new one. |
1702 | res = new (m_alloc) Chunk(m_alloc, &m_nextChunkBase, typ, attribs, loopNum); |
1703 | cn = m_chunks.Push(res); |
1704 | m_curAllocChunk[typ][index] = cn; |
1705 | return res; |
1706 | } |
1707 | |
1708 | ValueNum ValueNumStore::VNForIntCon(INT32 cnsVal) |
1709 | { |
1710 | if (IsSmallIntConst(cnsVal)) |
1711 | { |
1712 | unsigned ind = cnsVal - SmallIntConstMin; |
1713 | ValueNum vn = m_VNsForSmallIntConsts[ind]; |
1714 | if (vn != NoVN) |
1715 | { |
1716 | return vn; |
1717 | } |
1718 | vn = GetVNForIntCon(cnsVal); |
1719 | m_VNsForSmallIntConsts[ind] = vn; |
1720 | return vn; |
1721 | } |
1722 | else |
1723 | { |
1724 | return GetVNForIntCon(cnsVal); |
1725 | } |
1726 | } |
1727 | |
1728 | ValueNum ValueNumStore::VNForLongCon(INT64 cnsVal) |
1729 | { |
1730 | ValueNum res; |
1731 | if (GetLongCnsMap()->Lookup(cnsVal, &res)) |
1732 | { |
1733 | return res; |
1734 | } |
1735 | else |
1736 | { |
1737 | Chunk* c = GetAllocChunk(TYP_LONG, CEA_Const); |
1738 | unsigned offsetWithinChunk = c->AllocVN(); |
1739 | res = c->m_baseVN + offsetWithinChunk; |
1740 | reinterpret_cast<INT64*>(c->m_defs)[offsetWithinChunk] = cnsVal; |
1741 | GetLongCnsMap()->Set(cnsVal, res); |
1742 | return res; |
1743 | } |
1744 | } |
1745 | |
1746 | ValueNum ValueNumStore::VNForFloatCon(float cnsVal) |
1747 | { |
1748 | ValueNum res; |
1749 | if (GetFloatCnsMap()->Lookup(cnsVal, &res)) |
1750 | { |
1751 | return res; |
1752 | } |
1753 | else |
1754 | { |
1755 | Chunk* c = GetAllocChunk(TYP_FLOAT, CEA_Const); |
1756 | unsigned offsetWithinChunk = c->AllocVN(); |
1757 | res = c->m_baseVN + offsetWithinChunk; |
1758 | reinterpret_cast<float*>(c->m_defs)[offsetWithinChunk] = cnsVal; |
1759 | GetFloatCnsMap()->Set(cnsVal, res); |
1760 | return res; |
1761 | } |
1762 | } |
1763 | |
1764 | ValueNum ValueNumStore::VNForDoubleCon(double cnsVal) |
1765 | { |
1766 | ValueNum res; |
1767 | if (GetDoubleCnsMap()->Lookup(cnsVal, &res)) |
1768 | { |
1769 | return res; |
1770 | } |
1771 | else |
1772 | { |
1773 | Chunk* c = GetAllocChunk(TYP_DOUBLE, CEA_Const); |
1774 | unsigned offsetWithinChunk = c->AllocVN(); |
1775 | res = c->m_baseVN + offsetWithinChunk; |
1776 | reinterpret_cast<double*>(c->m_defs)[offsetWithinChunk] = cnsVal; |
1777 | GetDoubleCnsMap()->Set(cnsVal, res); |
1778 | return res; |
1779 | } |
1780 | } |
1781 | |
1782 | ValueNum ValueNumStore::VNForByrefCon(INT64 cnsVal) |
1783 | { |
1784 | ValueNum res; |
1785 | if (GetByrefCnsMap()->Lookup(cnsVal, &res)) |
1786 | { |
1787 | return res; |
1788 | } |
1789 | else |
1790 | { |
1791 | Chunk* c = GetAllocChunk(TYP_BYREF, CEA_Const); |
1792 | unsigned offsetWithinChunk = c->AllocVN(); |
1793 | res = c->m_baseVN + offsetWithinChunk; |
1794 | reinterpret_cast<INT64*>(c->m_defs)[offsetWithinChunk] = cnsVal; |
1795 | GetByrefCnsMap()->Set(cnsVal, res); |
1796 | return res; |
1797 | } |
1798 | } |
1799 | |
1800 | ValueNum ValueNumStore::VNForCastOper(var_types castToType, bool srcIsUnsigned /*=false*/) |
1801 | { |
1802 | assert(castToType != TYP_STRUCT); |
1803 | INT32 cnsVal = INT32(castToType) << INT32(VCA_BitCount); |
1804 | assert((cnsVal & INT32(VCA_ReservedBits)) == 0); |
1805 | |
1806 | if (srcIsUnsigned) |
1807 | { |
1808 | // We record the srcIsUnsigned by or-ing a 0x01 |
1809 | cnsVal |= INT32(VCA_UnsignedSrc); |
1810 | } |
1811 | ValueNum result = VNForIntCon(cnsVal); |
1812 | |
1813 | #ifdef DEBUG |
1814 | if (m_pComp->verbose) |
1815 | { |
1816 | printf(" VNForCastOper(%s%s) is " FMT_VN "\n" , varTypeName(castToType), srcIsUnsigned ? ", unsignedSrc" : "" , |
1817 | result); |
1818 | } |
1819 | #endif |
1820 | |
1821 | return result; |
1822 | } |
1823 | |
1824 | ValueNum ValueNumStore::VNForHandle(ssize_t cnsVal, unsigned handleFlags) |
1825 | { |
1826 | assert((handleFlags & ~GTF_ICON_HDL_MASK) == 0); |
1827 | |
1828 | ValueNum res; |
1829 | VNHandle handle; |
1830 | VNHandle::Initialize(&handle, cnsVal, handleFlags); |
1831 | if (GetHandleMap()->Lookup(handle, &res)) |
1832 | { |
1833 | return res; |
1834 | } |
1835 | else |
1836 | { |
1837 | Chunk* c = GetAllocChunk(TYP_I_IMPL, CEA_Handle); |
1838 | unsigned offsetWithinChunk = c->AllocVN(); |
1839 | res = c->m_baseVN + offsetWithinChunk; |
1840 | reinterpret_cast<VNHandle*>(c->m_defs)[offsetWithinChunk] = handle; |
1841 | GetHandleMap()->Set(handle, res); |
1842 | return res; |
1843 | } |
1844 | } |
1845 | |
1846 | // Returns the value number for zero of the given "typ". |
1847 | // It has an unreached() for a "typ" that has no zero value, such as TYP_VOID. |
1848 | ValueNum ValueNumStore::VNZeroForType(var_types typ) |
1849 | { |
1850 | switch (typ) |
1851 | { |
1852 | case TYP_BOOL: |
1853 | case TYP_BYTE: |
1854 | case TYP_UBYTE: |
1855 | case TYP_SHORT: |
1856 | case TYP_USHORT: |
1857 | case TYP_INT: |
1858 | case TYP_UINT: |
1859 | return VNForIntCon(0); |
1860 | case TYP_LONG: |
1861 | case TYP_ULONG: |
1862 | return VNForLongCon(0); |
1863 | case TYP_FLOAT: |
1864 | return VNForFloatCon(0.0f); |
1865 | case TYP_DOUBLE: |
1866 | return VNForDoubleCon(0.0); |
1867 | case TYP_REF: |
1868 | return VNForNull(); |
1869 | case TYP_BYREF: |
1870 | return VNForByrefCon(0); |
1871 | case TYP_STRUCT: |
1872 | #ifdef FEATURE_SIMD |
1873 | // TODO-CQ: Improve value numbering for SIMD types. |
1874 | case TYP_SIMD8: |
1875 | case TYP_SIMD12: |
1876 | case TYP_SIMD16: |
1877 | case TYP_SIMD32: |
1878 | #endif // FEATURE_SIMD |
1879 | return VNForZeroMap(); // Recursion! |
1880 | |
1881 | // These should be unreached. |
1882 | default: |
1883 | unreached(); // Should handle all types. |
1884 | } |
1885 | } |
1886 | |
1887 | // Returns the value number for one of the given "typ". |
1888 | // It returns NoVN for a "typ" that has no one value, such as TYP_REF. |
1889 | ValueNum ValueNumStore::VNOneForType(var_types typ) |
1890 | { |
1891 | switch (typ) |
1892 | { |
1893 | case TYP_BOOL: |
1894 | case TYP_BYTE: |
1895 | case TYP_UBYTE: |
1896 | case TYP_SHORT: |
1897 | case TYP_USHORT: |
1898 | case TYP_INT: |
1899 | case TYP_UINT: |
1900 | return VNForIntCon(1); |
1901 | case TYP_LONG: |
1902 | case TYP_ULONG: |
1903 | return VNForLongCon(1); |
1904 | case TYP_FLOAT: |
1905 | return VNForFloatCon(1.0f); |
1906 | case TYP_DOUBLE: |
1907 | return VNForDoubleCon(1.0); |
1908 | |
1909 | default: |
1910 | return NoVN; |
1911 | } |
1912 | } |
1913 | |
1914 | class Object* ValueNumStore::s_specialRefConsts[] = {nullptr, nullptr, nullptr}; |
1915 | |
1916 | //---------------------------------------------------------------------------------------- |
1917 | // VNForFunc - Returns the ValueNum associated with 'func' |
1918 | // There is a one-to-one relationship between the ValueNum and 'func' |
1919 | // |
1920 | // Arguments: |
1921 | // typ - The type of the resulting ValueNum produced by 'func' |
1922 | // func - Any nullary VNFunc |
1923 | // |
1924 | // Return Value: - Returns the ValueNum associated with 'func' |
1925 | // |
1926 | // Note: - This method only handles Nullary operators (i.e., symbolic constants). |
1927 | // |
1928 | ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func) |
1929 | { |
1930 | assert(VNFuncArity(func) == 0); |
1931 | assert(func != VNF_NotAField); |
1932 | |
1933 | ValueNum resultVN; |
1934 | |
1935 | // Have we already assigned a ValueNum for 'func' ? |
1936 | // |
1937 | if (!GetVNFunc0Map()->Lookup(func, &resultVN)) |
1938 | { |
1939 | // Allocate a new ValueNum for 'func' |
1940 | Chunk* c = GetAllocChunk(typ, CEA_Func0); |
1941 | unsigned offsetWithinChunk = c->AllocVN(); |
1942 | resultVN = c->m_baseVN + offsetWithinChunk; |
1943 | reinterpret_cast<VNFunc*>(c->m_defs)[offsetWithinChunk] = func; |
1944 | GetVNFunc0Map()->Set(func, resultVN); |
1945 | } |
1946 | return resultVN; |
1947 | } |
1948 | |
1949 | //---------------------------------------------------------------------------------------- |
1950 | // VNForFunc - Returns the ValueNum associated with 'func'('arg0VN') |
1951 | // There is a one-to-one relationship between the ValueNum |
1952 | // and 'func'('arg0VN') |
1953 | // |
1954 | // Arguments: |
1955 | // typ - The type of the resulting ValueNum produced by 'func' |
1956 | // func - Any unary VNFunc |
1957 | // arg0VN - The ValueNum of the argument to 'func' |
1958 | // |
1959 | // Return Value: - Returns the ValueNum associated with 'func'('arg0VN') |
1960 | // |
1961 | // Note: - This method only handles Unary operators |
1962 | // |
1963 | ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN) |
1964 | { |
1965 | assert(arg0VN == VNNormalValue(arg0VN)); // Arguments don't carry exceptions. |
1966 | |
1967 | // Try to perform constant-folding. |
1968 | if (CanEvalForConstantArgs(func) && IsVNConstant(arg0VN)) |
1969 | { |
1970 | return EvalFuncForConstantArgs(typ, func, arg0VN); |
1971 | } |
1972 | |
1973 | ValueNum resultVN; |
1974 | |
1975 | // Have we already assigned a ValueNum for 'func'('arg0VN') ? |
1976 | // |
1977 | VNDefFunc1Arg fstruct(func, arg0VN); |
1978 | if (!GetVNFunc1Map()->Lookup(fstruct, &resultVN)) |
1979 | { |
1980 | // Otherwise, Allocate a new ValueNum for 'func'('arg0VN') |
1981 | // |
1982 | Chunk* c = GetAllocChunk(typ, CEA_Func1); |
1983 | unsigned offsetWithinChunk = c->AllocVN(); |
1984 | resultVN = c->m_baseVN + offsetWithinChunk; |
1985 | reinterpret_cast<VNDefFunc1Arg*>(c->m_defs)[offsetWithinChunk] = fstruct; |
1986 | // Record 'resultVN' in the Func1Map |
1987 | GetVNFunc1Map()->Set(fstruct, resultVN); |
1988 | } |
1989 | return resultVN; |
1990 | } |
1991 | |
1992 | //---------------------------------------------------------------------------------------- |
1993 | // VNForFunc - Returns the ValueNum associated with 'func'('arg0VN','arg1VN') |
1994 | // There is a one-to-one relationship between the ValueNum |
1995 | // and 'func'('arg0VN','arg1VN') |
1996 | // |
1997 | // Arguments: |
1998 | // typ - The type of the resulting ValueNum produced by 'func' |
1999 | // func - Any binary VNFunc |
2000 | // arg0VN - The ValueNum of the first argument to 'func' |
2001 | // arg1VN - The ValueNum of the second argument to 'func' |
2002 | // |
2003 | // Return Value: - Returns the ValueNum associated with 'func'('arg0VN','arg1VN') |
2004 | // |
2005 | // Note: - This method only handles Binary operators |
2006 | // |
2007 | ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN) |
2008 | { |
2009 | assert(arg0VN != NoVN && arg1VN != NoVN); |
2010 | assert(arg0VN == VNNormalValue(arg0VN)); // Arguments carry no exceptions. |
2011 | assert(arg1VN == VNNormalValue(arg1VN)); // Arguments carry no exceptions. |
2012 | assert(VNFuncArity(func) == 2); |
2013 | assert(func != VNF_MapSelect); // Precondition: use the special function VNForMapSelect defined for that. |
2014 | |
2015 | ValueNum resultVN; |
2016 | |
2017 | // When both operands are constants we can usually perform constant-folding. |
2018 | // |
2019 | if (CanEvalForConstantArgs(func) && IsVNConstant(arg0VN) && IsVNConstant(arg1VN)) |
2020 | { |
2021 | bool canFold = true; // Normally we will be able to fold this 'func' |
2022 | |
2023 | // Special case for VNF_Cast of constant handles |
2024 | // Don't allow an eval/fold of a GT_CAST(non-I_IMPL, Handle) |
2025 | // |
2026 | if ((func == VNF_Cast) && (typ != TYP_I_IMPL) && IsVNHandle(arg0VN)) |
2027 | { |
2028 | canFold = false; |
2029 | } |
2030 | |
2031 | // Currently CanEvalForConstantArgs() returns false for VNF_CastOvf |
2032 | // In the future we could handle this case in folding. |
2033 | assert(func != VNF_CastOvf); |
2034 | |
2035 | // It is possible for us to have mismatched types (see Bug 750863) |
2036 | // We don't try to fold a binary operation when one of the constant operands |
2037 | // is a floating-point constant and the other is not. |
2038 | // |
2039 | var_types arg0VNtyp = TypeOfVN(arg0VN); |
2040 | bool arg0IsFloating = varTypeIsFloating(arg0VNtyp); |
2041 | |
2042 | var_types arg1VNtyp = TypeOfVN(arg1VN); |
2043 | bool arg1IsFloating = varTypeIsFloating(arg1VNtyp); |
2044 | |
2045 | if (arg0IsFloating != arg1IsFloating) |
2046 | { |
2047 | canFold = false; |
2048 | } |
2049 | |
2050 | // NaNs are unordered wrt to other floats. While an ordered |
2051 | // comparison would return false, an unordered comparison |
2052 | // will return true if any operands are a NaN. We only perform |
2053 | // ordered NaN comparison in EvalComparison. |
2054 | if ((arg0IsFloating && (((arg0VNtyp == TYP_FLOAT) && _isnanf(GetConstantSingle(arg0VN))) || |
2055 | ((arg0VNtyp == TYP_DOUBLE) && _isnan(GetConstantDouble(arg0VN))))) || |
2056 | (arg1IsFloating && (((arg1VNtyp == TYP_FLOAT) && _isnanf(GetConstantSingle(arg1VN))) || |
2057 | ((arg1VNtyp == TYP_DOUBLE) && _isnan(GetConstantDouble(arg1VN)))))) |
2058 | { |
2059 | canFold = false; |
2060 | } |
2061 | |
2062 | if (typ == TYP_BYREF) |
2063 | { |
2064 | // We don't want to fold expressions that produce TYP_BYREF |
2065 | canFold = false; |
2066 | } |
2067 | |
2068 | bool shouldFold = canFold; |
2069 | |
2070 | if (canFold) |
2071 | { |
2072 | // We can fold the expression, but we don't want to fold |
2073 | // when the expression will always throw an exception |
2074 | shouldFold = VNEvalShouldFold(typ, func, arg0VN, arg1VN); |
2075 | } |
2076 | |
2077 | if (shouldFold) |
2078 | { |
2079 | return EvalFuncForConstantArgs(typ, func, arg0VN, arg1VN); |
2080 | } |
2081 | } |
2082 | // We canonicalize commutative operations. |
2083 | // (Perhaps should eventually handle associative/commutative [AC] ops -- but that gets complicated...) |
2084 | if (VNFuncIsCommutative(func)) |
2085 | { |
2086 | // Order arg0 arg1 by numerical VN value. |
2087 | if (arg0VN > arg1VN) |
2088 | { |
2089 | jitstd::swap(arg0VN, arg1VN); |
2090 | } |
2091 | } |
2092 | |
2093 | // Have we already assigned a ValueNum for 'func'('arg0VN','arg1VN') ? |
2094 | // |
2095 | VNDefFunc2Arg fstruct(func, arg0VN, arg1VN); |
2096 | if (!GetVNFunc2Map()->Lookup(fstruct, &resultVN)) |
2097 | { |
2098 | if (func == VNF_CastClass) |
2099 | { |
2100 | // In terms of values, a castclass always returns its second argument, the object being cast. |
2101 | // The operation may also throw an exception |
2102 | ValueNum vnExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_InvalidCastExc, arg1VN, arg0VN)); |
2103 | resultVN = VNWithExc(arg1VN, vnExcSet); |
2104 | } |
2105 | else |
2106 | { |
2107 | resultVN = EvalUsingMathIdentity(typ, func, arg0VN, arg1VN); |
2108 | |
2109 | // Do we have a valid resultVN? |
2110 | if ((resultVN == NoVN) || (TypeOfVN(resultVN) != typ)) |
2111 | { |
2112 | // Otherwise, Allocate a new ValueNum for 'func'('arg0VN','arg1VN') |
2113 | // |
2114 | Chunk* c = GetAllocChunk(typ, CEA_Func2); |
2115 | unsigned offsetWithinChunk = c->AllocVN(); |
2116 | resultVN = c->m_baseVN + offsetWithinChunk; |
2117 | reinterpret_cast<VNDefFunc2Arg*>(c->m_defs)[offsetWithinChunk] = fstruct; |
2118 | // Record 'resultVN' in the Func2Map |
2119 | GetVNFunc2Map()->Set(fstruct, resultVN); |
2120 | } |
2121 | } |
2122 | } |
2123 | return resultVN; |
2124 | } |
2125 | |
2126 | //---------------------------------------------------------------------------------------- |
2127 | // VNForFunc - Returns the ValueNum associated with 'func'('arg0VN','arg1VN','arg2VN') |
2128 | // There is a one-to-one relationship between the ValueNum |
2129 | // and 'func'('arg0VN','arg1VN','arg2VN') |
2130 | // |
2131 | // Arguments: |
2132 | // typ - The type of the resulting ValueNum produced by 'func' |
2133 | // func - Any binary VNFunc |
2134 | // arg0VN - The ValueNum of the first argument to 'func' |
2135 | // arg1VN - The ValueNum of the second argument to 'func' |
2136 | // arg2VN - The ValueNum of the third argument to 'func' |
2137 | // |
2138 | // Return Value: - Returns the ValueNum associated with 'func'('arg0VN','arg1VN','arg1VN) |
2139 | // |
2140 | // Note: - This method only handles Trinary operations |
2141 | // We have to special case VNF_PhiDef, as it's first two arguments are not ValueNums |
2142 | // |
2143 | ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN) |
2144 | { |
2145 | assert(arg0VN != NoVN); |
2146 | assert(arg1VN != NoVN); |
2147 | assert(arg2VN != NoVN); |
2148 | assert(VNFuncArity(func) == 3); |
2149 | |
2150 | #ifdef DEBUG |
2151 | // Function arguments carry no exceptions. |
2152 | // |
2153 | if (func != VNF_PhiDef) |
2154 | { |
2155 | // For a phi definition first and second argument are "plain" local/ssa numbers. |
2156 | // (I don't know if having such non-VN arguments to a VN function is a good idea -- if we wanted to declare |
2157 | // ValueNum to be "short" it would be a problem, for example. But we'll leave it for now, with these explicit |
2158 | // exceptions.) |
2159 | assert(arg0VN == VNNormalValue(arg0VN)); |
2160 | assert(arg1VN == VNNormalValue(arg1VN)); |
2161 | } |
2162 | assert(arg2VN == VNNormalValue(arg2VN)); |
2163 | #endif |
2164 | assert(VNFuncArity(func) == 3); |
2165 | |
2166 | ValueNum resultVN; |
2167 | |
2168 | // Have we already assigned a ValueNum for 'func'('arg0VN','arg1VN','arg2VN') ? |
2169 | // |
2170 | VNDefFunc3Arg fstruct(func, arg0VN, arg1VN, arg2VN); |
2171 | if (!GetVNFunc3Map()->Lookup(fstruct, &resultVN)) |
2172 | { |
2173 | // Otherwise, Allocate a new ValueNum for 'func'('arg0VN','arg1VN','arg2VN') |
2174 | // |
2175 | Chunk* c = GetAllocChunk(typ, CEA_Func3); |
2176 | unsigned offsetWithinChunk = c->AllocVN(); |
2177 | resultVN = c->m_baseVN + offsetWithinChunk; |
2178 | reinterpret_cast<VNDefFunc3Arg*>(c->m_defs)[offsetWithinChunk] = fstruct; |
2179 | // Record 'resultVN' in the Func3Map |
2180 | GetVNFunc3Map()->Set(fstruct, resultVN); |
2181 | } |
2182 | return resultVN; |
2183 | } |
2184 | |
2185 | // ---------------------------------------------------------------------------------------- |
2186 | // VNForFunc - Returns the ValueNum associated with 'func'('arg0VN','arg1VN','arg2VN','arg3VN') |
2187 | // There is a one-to-one relationship between the ValueNum |
2188 | // and 'func'('arg0VN','arg1VN','arg2VN','arg3VN') |
2189 | // |
2190 | // Arguments: |
2191 | // typ - The type of the resulting ValueNum produced by 'func' |
2192 | // func - Any binary VNFunc |
2193 | // arg0VN - The ValueNum of the first argument to 'func' |
2194 | // arg1VN - The ValueNum of the second argument to 'func' |
2195 | // arg2VN - The ValueNum of the third argument to 'func' |
2196 | // arg3VN - The ValueNum of the fourth argument to 'func' |
2197 | // |
2198 | // Return Value: - Returns the ValueNum associated with 'func'('arg0VN','arg1VN','arg2VN','arg3VN') |
2199 | // |
2200 | // Note: Currently the only four operand func is the VNF_PtrToArrElem operation |
2201 | // |
2202 | ValueNum ValueNumStore::VNForFunc( |
2203 | var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN, ValueNum arg3VN) |
2204 | { |
2205 | assert(arg0VN != NoVN && arg1VN != NoVN && arg2VN != NoVN && arg3VN != NoVN); |
2206 | |
2207 | // Function arguments carry no exceptions. |
2208 | assert(arg0VN == VNNormalValue(arg0VN)); |
2209 | assert(arg1VN == VNNormalValue(arg1VN)); |
2210 | assert(arg2VN == VNNormalValue(arg2VN)); |
2211 | assert(arg3VN == VNNormalValue(arg3VN)); |
2212 | assert(VNFuncArity(func) == 4); |
2213 | |
2214 | ValueNum resultVN; |
2215 | |
2216 | // Have we already assigned a ValueNum for 'func'('arg0VN','arg1VN','arg2VN','arg3VN') ? |
2217 | // |
2218 | VNDefFunc4Arg fstruct(func, arg0VN, arg1VN, arg2VN, arg3VN); |
2219 | if (!GetVNFunc4Map()->Lookup(fstruct, &resultVN)) |
2220 | { |
2221 | // Otherwise, Allocate a new ValueNum for 'func'('arg0VN','arg1VN','arg2VN','arg3VN') |
2222 | // |
2223 | Chunk* c = GetAllocChunk(typ, CEA_Func4); |
2224 | unsigned offsetWithinChunk = c->AllocVN(); |
2225 | resultVN = c->m_baseVN + offsetWithinChunk; |
2226 | reinterpret_cast<VNDefFunc4Arg*>(c->m_defs)[offsetWithinChunk] = fstruct; |
2227 | // Record 'resultVN' in the Func4Map |
2228 | GetVNFunc4Map()->Set(fstruct, resultVN); |
2229 | } |
2230 | return resultVN; |
2231 | } |
2232 | |
2233 | //------------------------------------------------------------------------------ |
2234 | // VNForMapStore : Evaluate VNF_MapStore with the given arguments. |
2235 | // |
2236 | // |
2237 | // Arguments: |
2238 | // typ - Value type |
2239 | // arg0VN - Map value number |
2240 | // arg1VN - Index value number |
2241 | // arg2VN - New value for map[index] |
2242 | // |
2243 | // Return Value: |
2244 | // Value number for the result of the evaluation. |
2245 | |
2246 | ValueNum ValueNumStore::VNForMapStore(var_types typ, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN) |
2247 | { |
2248 | ValueNum result = VNForFunc(typ, VNF_MapStore, arg0VN, arg1VN, arg2VN); |
2249 | #ifdef DEBUG |
2250 | if (m_pComp->verbose) |
2251 | { |
2252 | printf(" VNForMapStore(" FMT_VN ", " FMT_VN ", " FMT_VN "):%s returns " , arg0VN, arg1VN, arg2VN, |
2253 | varTypeName(typ)); |
2254 | m_pComp->vnPrint(result, 1); |
2255 | printf("\n" ); |
2256 | } |
2257 | #endif |
2258 | return result; |
2259 | } |
2260 | |
2261 | //------------------------------------------------------------------------------ |
2262 | // VNForMapSelect : Evaluate VNF_MapSelect with the given arguments. |
2263 | // |
2264 | // |
2265 | // Arguments: |
2266 | // vnk - Value number kind |
2267 | // typ - Value type |
2268 | // arg0VN - Map value number |
2269 | // arg1VN - Index value number |
2270 | // |
2271 | // Return Value: |
2272 | // Value number for the result of the evaluation. |
2273 | // |
2274 | // Notes: |
2275 | // This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate |
2276 | // "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number |
2277 | // (liberal/conservative) to read from the SSA def referenced in the phi argument. |
2278 | |
2279 | ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types typ, ValueNum arg0VN, ValueNum arg1VN) |
2280 | { |
2281 | int budget = m_mapSelectBudget; |
2282 | bool usedRecursiveVN = false; |
2283 | ValueNum result = VNForMapSelectWork(vnk, typ, arg0VN, arg1VN, &budget, &usedRecursiveVN); |
2284 | |
2285 | // The remaining budget should always be between [0..m_mapSelectBudget] |
2286 | assert((budget >= 0) && (budget <= m_mapSelectBudget)); |
2287 | |
2288 | #ifdef DEBUG |
2289 | if (m_pComp->verbose) |
2290 | { |
2291 | printf(" VNForMapSelect(" FMT_VN ", " FMT_VN "):%s returns " , arg0VN, arg1VN, varTypeName(typ)); |
2292 | m_pComp->vnPrint(result, 1); |
2293 | printf("\n" ); |
2294 | } |
2295 | #endif |
2296 | return result; |
2297 | } |
2298 | |
2299 | //------------------------------------------------------------------------------ |
2300 | // VNForMapSelectWork : A method that does the work for VNForMapSelect and may call itself recursively. |
2301 | // |
2302 | // |
2303 | // Arguments: |
2304 | // vnk - Value number kind |
2305 | // typ - Value type |
2306 | // arg0VN - Zeroth argument |
2307 | // arg1VN - First argument |
2308 | // pBudget - Remaining budget for the outer evaluation |
2309 | // pUsedRecursiveVN - Out-parameter that is set to true iff RecursiveVN was returned from this method |
2310 | // or from a method called during one of recursive invocations. |
2311 | // |
2312 | // Return Value: |
2313 | // Value number for the result of the evaluation. |
2314 | // |
2315 | // Notes: |
2316 | // This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate |
2317 | // "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number |
2318 | // (liberal/conservative) to read from the SSA def referenced in the phi argument. |
2319 | |
2320 | ValueNum ValueNumStore::VNForMapSelectWork( |
2321 | ValueNumKind vnk, var_types typ, ValueNum arg0VN, ValueNum arg1VN, int* pBudget, bool* pUsedRecursiveVN) |
2322 | { |
2323 | TailCall: |
2324 | // This label allows us to directly implement a tail call by setting up the arguments, and doing a goto to here. |
2325 | assert(arg0VN != NoVN && arg1VN != NoVN); |
2326 | assert(arg0VN == VNNormalValue(arg0VN)); // Arguments carry no exceptions. |
2327 | assert(arg1VN == VNNormalValue(arg1VN)); // Arguments carry no exceptions. |
2328 | |
2329 | *pUsedRecursiveVN = false; |
2330 | |
2331 | #ifdef DEBUG |
2332 | // Provide a mechanism for writing tests that ensure we don't call this ridiculously often. |
2333 | m_numMapSels++; |
2334 | #if 1 |
2335 | // This printing is sometimes useful in debugging. |
2336 | // if ((m_numMapSels % 1000) == 0) printf("%d VNF_MapSelect applications.\n", m_numMapSels); |
2337 | #endif |
2338 | unsigned selLim = JitConfig.JitVNMapSelLimit(); |
2339 | assert(selLim == 0 || m_numMapSels < selLim); |
2340 | #endif |
2341 | ValueNum res; |
2342 | |
2343 | VNDefFunc2Arg fstruct(VNF_MapSelect, arg0VN, arg1VN); |
2344 | if (GetVNFunc2Map()->Lookup(fstruct, &res)) |
2345 | { |
2346 | return res; |
2347 | } |
2348 | else |
2349 | { |
2350 | // Give up if we've run out of budget. |
2351 | if (--(*pBudget) <= 0) |
2352 | { |
2353 | // We have to use 'nullptr' for the basic block here, because subsequent expressions |
2354 | // in different blocks may find this result in the VNFunc2Map -- other expressions in |
2355 | // the IR may "evaluate" to this same VNForExpr, so it is not "unique" in the sense |
2356 | // that permits the BasicBlock attribution. |
2357 | res = VNForExpr(nullptr, typ); |
2358 | GetVNFunc2Map()->Set(fstruct, res); |
2359 | return res; |
2360 | } |
2361 | |
2362 | // If it's recursive, stop the recursion. |
2363 | if (SelectIsBeingEvaluatedRecursively(arg0VN, arg1VN)) |
2364 | { |
2365 | *pUsedRecursiveVN = true; |
2366 | return RecursiveVN; |
2367 | } |
2368 | |
2369 | if (arg0VN == VNForZeroMap()) |
2370 | { |
2371 | return VNZeroForType(typ); |
2372 | } |
2373 | else if (IsVNFunc(arg0VN)) |
2374 | { |
2375 | VNFuncApp funcApp; |
2376 | GetVNFunc(arg0VN, &funcApp); |
2377 | if (funcApp.m_func == VNF_MapStore) |
2378 | { |
2379 | // select(store(m, i, v), i) == v |
2380 | if (funcApp.m_args[1] == arg1VN) |
2381 | { |
2382 | #if FEATURE_VN_TRACE_APPLY_SELECTORS |
2383 | JITDUMP(" AX1: select([" FMT_VN "]store(" FMT_VN ", " FMT_VN ", " FMT_VN "), " FMT_VN |
2384 | ") ==> " FMT_VN ".\n" , |
2385 | funcApp.m_args[0], arg0VN, funcApp.m_args[1], funcApp.m_args[2], arg1VN, funcApp.m_args[2]); |
2386 | #endif |
2387 | return funcApp.m_args[2]; |
2388 | } |
2389 | // i # j ==> select(store(m, i, v), j) == select(m, j) |
2390 | // Currently the only source of distinctions is when both indices are constants. |
2391 | else if (IsVNConstant(arg1VN) && IsVNConstant(funcApp.m_args[1])) |
2392 | { |
2393 | assert(funcApp.m_args[1] != arg1VN); // we already checked this above. |
2394 | #if FEATURE_VN_TRACE_APPLY_SELECTORS |
2395 | JITDUMP(" AX2: " FMT_VN " != " FMT_VN " ==> select([" FMT_VN "]store(" FMT_VN ", " FMT_VN |
2396 | ", " FMT_VN "), " FMT_VN ") ==> select(" FMT_VN ", " FMT_VN ").\n" , |
2397 | arg1VN, funcApp.m_args[1], arg0VN, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], |
2398 | arg1VN, funcApp.m_args[0], arg1VN); |
2399 | #endif |
2400 | // This is the equivalent of the recursive tail call: |
2401 | // return VNForMapSelect(vnk, typ, funcApp.m_args[0], arg1VN); |
2402 | // Make sure we capture any exceptions from the "i" and "v" of the store... |
2403 | arg0VN = funcApp.m_args[0]; |
2404 | goto TailCall; |
2405 | } |
2406 | } |
2407 | else if (funcApp.m_func == VNF_PhiDef || funcApp.m_func == VNF_PhiMemoryDef) |
2408 | { |
2409 | unsigned lclNum = BAD_VAR_NUM; |
2410 | bool isMemory = false; |
2411 | VNFuncApp phiFuncApp; |
2412 | bool defArgIsFunc = false; |
2413 | if (funcApp.m_func == VNF_PhiDef) |
2414 | { |
2415 | lclNum = unsigned(funcApp.m_args[0]); |
2416 | defArgIsFunc = GetVNFunc(funcApp.m_args[2], &phiFuncApp); |
2417 | } |
2418 | else |
2419 | { |
2420 | assert(funcApp.m_func == VNF_PhiMemoryDef); |
2421 | isMemory = true; |
2422 | defArgIsFunc = GetVNFunc(funcApp.m_args[1], &phiFuncApp); |
2423 | } |
2424 | if (defArgIsFunc && phiFuncApp.m_func == VNF_Phi) |
2425 | { |
2426 | // select(phi(m1, m2), x): if select(m1, x) == select(m2, x), return that, else new fresh. |
2427 | // Get the first argument of the phi. |
2428 | |
2429 | // We need to be careful about breaking infinite recursion. Record the outer select. |
2430 | m_fixedPointMapSels.Push(VNDefFunc2Arg(VNF_MapSelect, arg0VN, arg1VN)); |
2431 | |
2432 | assert(IsVNConstant(phiFuncApp.m_args[0])); |
2433 | unsigned phiArgSsaNum = ConstantValue<unsigned>(phiFuncApp.m_args[0]); |
2434 | ValueNum phiArgVN; |
2435 | if (isMemory) |
2436 | { |
2437 | phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); |
2438 | } |
2439 | else |
2440 | { |
2441 | phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); |
2442 | } |
2443 | if (phiArgVN != ValueNumStore::NoVN) |
2444 | { |
2445 | bool allSame = true; |
2446 | ValueNum argRest = phiFuncApp.m_args[1]; |
2447 | ValueNum sameSelResult = |
2448 | VNForMapSelectWork(vnk, typ, phiArgVN, arg1VN, pBudget, pUsedRecursiveVN); |
2449 | |
2450 | // It is possible that we just now exceeded our budget, if so we need to force an early exit |
2451 | // and stop calling VNForMapSelectWork |
2452 | if (*pBudget <= 0) |
2453 | { |
2454 | // We don't have any budget remaining to verify that all phiArgs are the same |
2455 | // so setup the default failure case now. |
2456 | allSame = false; |
2457 | } |
2458 | |
2459 | while (allSame && argRest != ValueNumStore::NoVN) |
2460 | { |
2461 | ValueNum cur = argRest; |
2462 | VNFuncApp phiArgFuncApp; |
2463 | if (GetVNFunc(argRest, &phiArgFuncApp) && phiArgFuncApp.m_func == VNF_Phi) |
2464 | { |
2465 | cur = phiArgFuncApp.m_args[0]; |
2466 | argRest = phiArgFuncApp.m_args[1]; |
2467 | } |
2468 | else |
2469 | { |
2470 | argRest = ValueNumStore::NoVN; // Cause the loop to terminate. |
2471 | } |
2472 | assert(IsVNConstant(cur)); |
2473 | phiArgSsaNum = ConstantValue<unsigned>(cur); |
2474 | if (isMemory) |
2475 | { |
2476 | phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); |
2477 | } |
2478 | else |
2479 | { |
2480 | phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk); |
2481 | } |
2482 | if (phiArgVN == ValueNumStore::NoVN) |
2483 | { |
2484 | allSame = false; |
2485 | } |
2486 | else |
2487 | { |
2488 | bool usedRecursiveVN = false; |
2489 | ValueNum curResult = |
2490 | VNForMapSelectWork(vnk, typ, phiArgVN, arg1VN, pBudget, &usedRecursiveVN); |
2491 | *pUsedRecursiveVN |= usedRecursiveVN; |
2492 | if (sameSelResult == ValueNumStore::RecursiveVN) |
2493 | { |
2494 | sameSelResult = curResult; |
2495 | } |
2496 | if (curResult != ValueNumStore::RecursiveVN && curResult != sameSelResult) |
2497 | { |
2498 | allSame = false; |
2499 | } |
2500 | } |
2501 | } |
2502 | if (allSame && sameSelResult != ValueNumStore::RecursiveVN) |
2503 | { |
2504 | // Make sure we're popping what we pushed. |
2505 | assert(FixedPointMapSelsTopHasValue(arg0VN, arg1VN)); |
2506 | m_fixedPointMapSels.Pop(); |
2507 | |
2508 | // To avoid exponential searches, we make sure that this result is memo-ized. |
2509 | // The result is always valid for memoization if we didn't rely on RecursiveVN to get it. |
2510 | // If RecursiveVN was used, we are processing a loop and we can't memo-ize this intermediate |
2511 | // result if, e.g., this block is in a multi-entry loop. |
2512 | if (!*pUsedRecursiveVN) |
2513 | { |
2514 | GetVNFunc2Map()->Set(fstruct, sameSelResult); |
2515 | } |
2516 | |
2517 | return sameSelResult; |
2518 | } |
2519 | // Otherwise, fall through to creating the select(phi(m1, m2), x) function application. |
2520 | } |
2521 | // Make sure we're popping what we pushed. |
2522 | assert(FixedPointMapSelsTopHasValue(arg0VN, arg1VN)); |
2523 | m_fixedPointMapSels.Pop(); |
2524 | } |
2525 | } |
2526 | } |
2527 | |
2528 | // Otherwise, assign a new VN for the function application. |
2529 | Chunk* c = GetAllocChunk(typ, CEA_Func2); |
2530 | unsigned offsetWithinChunk = c->AllocVN(); |
2531 | res = c->m_baseVN + offsetWithinChunk; |
2532 | reinterpret_cast<VNDefFunc2Arg*>(c->m_defs)[offsetWithinChunk] = fstruct; |
2533 | GetVNFunc2Map()->Set(fstruct, res); |
2534 | return res; |
2535 | } |
2536 | } |
2537 | |
2538 | ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, ValueNum arg0VN) |
2539 | { |
2540 | assert(CanEvalForConstantArgs(func)); |
2541 | assert(IsVNConstant(arg0VN)); |
2542 | switch (TypeOfVN(arg0VN)) |
2543 | { |
2544 | case TYP_INT: |
2545 | { |
2546 | int resVal = EvalOp<int>(func, ConstantValue<int>(arg0VN)); |
2547 | // Unary op on a handle results in a handle. |
2548 | return IsVNHandle(arg0VN) ? VNForHandle(ssize_t(resVal), GetHandleFlags(arg0VN)) : VNForIntCon(resVal); |
2549 | } |
2550 | case TYP_LONG: |
2551 | { |
2552 | INT64 resVal = EvalOp<INT64>(func, ConstantValue<INT64>(arg0VN)); |
2553 | // Unary op on a handle results in a handle. |
2554 | return IsVNHandle(arg0VN) ? VNForHandle(ssize_t(resVal), GetHandleFlags(arg0VN)) : VNForLongCon(resVal); |
2555 | } |
2556 | case TYP_FLOAT: |
2557 | { |
2558 | float resVal = EvalOp<float>(func, ConstantValue<float>(arg0VN)); |
2559 | return VNForFloatCon(resVal); |
2560 | } |
2561 | case TYP_DOUBLE: |
2562 | { |
2563 | double resVal = EvalOp<double>(func, ConstantValue<double>(arg0VN)); |
2564 | return VNForDoubleCon(resVal); |
2565 | } |
2566 | case TYP_REF: |
2567 | { |
2568 | // If arg0 has a possible exception, it wouldn't have been constant. |
2569 | assert(!VNHasExc(arg0VN)); |
2570 | // Otherwise... |
2571 | assert(arg0VN == VNForNull()); // Only other REF constant. |
2572 | assert(func == VNFunc(GT_ARR_LENGTH)); // Only function we can apply to a REF constant! |
2573 | return VNWithExc(VNForVoid(), VNExcSetSingleton(VNForFunc(TYP_REF, VNF_NullPtrExc, VNForNull()))); |
2574 | } |
2575 | default: |
2576 | // We will assert below |
2577 | break; |
2578 | } |
2579 | noway_assert(!"Unhandled operation in EvalFuncForConstantArgs" ); |
2580 | return NoVN; |
2581 | } |
2582 | |
2583 | bool ValueNumStore::SelectIsBeingEvaluatedRecursively(ValueNum map, ValueNum ind) |
2584 | { |
2585 | for (unsigned i = 0; i < m_fixedPointMapSels.Size(); i++) |
2586 | { |
2587 | VNDefFunc2Arg& elem = m_fixedPointMapSels.GetRef(i); |
2588 | assert(elem.m_func == VNF_MapSelect); |
2589 | if (elem.m_arg0 == map && elem.m_arg1 == ind) |
2590 | { |
2591 | return true; |
2592 | } |
2593 | } |
2594 | return false; |
2595 | } |
2596 | |
2597 | #ifdef DEBUG |
2598 | bool ValueNumStore::FixedPointMapSelsTopHasValue(ValueNum map, ValueNum index) |
2599 | { |
2600 | if (m_fixedPointMapSels.Size() == 0) |
2601 | { |
2602 | return false; |
2603 | } |
2604 | VNDefFunc2Arg& top = m_fixedPointMapSels.TopRef(); |
2605 | return top.m_func == VNF_MapSelect && top.m_arg0 == map && top.m_arg1 == index; |
2606 | } |
2607 | #endif |
2608 | |
2609 | // Given an integer constant value number return its value as an int. |
2610 | // |
2611 | int ValueNumStore::GetConstantInt32(ValueNum argVN) |
2612 | { |
2613 | assert(IsVNConstant(argVN)); |
2614 | var_types argVNtyp = TypeOfVN(argVN); |
2615 | |
2616 | int result = 0; |
2617 | |
2618 | switch (argVNtyp) |
2619 | { |
2620 | case TYP_INT: |
2621 | result = ConstantValue<int>(argVN); |
2622 | break; |
2623 | #ifndef _TARGET_64BIT_ |
2624 | case TYP_REF: |
2625 | case TYP_BYREF: |
2626 | result = (int)ConstantValue<size_t>(argVN); |
2627 | break; |
2628 | #endif |
2629 | default: |
2630 | unreached(); |
2631 | } |
2632 | return result; |
2633 | } |
2634 | |
2635 | // Given an integer constant value number return its value as an INT64. |
2636 | // |
2637 | INT64 ValueNumStore::GetConstantInt64(ValueNum argVN) |
2638 | { |
2639 | assert(IsVNConstant(argVN)); |
2640 | var_types argVNtyp = TypeOfVN(argVN); |
2641 | |
2642 | INT64 result = 0; |
2643 | |
2644 | switch (argVNtyp) |
2645 | { |
2646 | case TYP_INT: |
2647 | result = (INT64)ConstantValue<int>(argVN); |
2648 | break; |
2649 | case TYP_LONG: |
2650 | result = ConstantValue<INT64>(argVN); |
2651 | break; |
2652 | case TYP_REF: |
2653 | case TYP_BYREF: |
2654 | result = (INT64)ConstantValue<size_t>(argVN); |
2655 | break; |
2656 | default: |
2657 | unreached(); |
2658 | } |
2659 | return result; |
2660 | } |
2661 | |
2662 | // Given a double constant value number return its value as a double. |
2663 | // |
2664 | double ValueNumStore::GetConstantDouble(ValueNum argVN) |
2665 | { |
2666 | assert(IsVNConstant(argVN)); |
2667 | assert(TypeOfVN(argVN) == TYP_DOUBLE); |
2668 | |
2669 | return ConstantValue<double>(argVN); |
2670 | } |
2671 | |
2672 | // Given a float constant value number return its value as a float. |
2673 | // |
2674 | float ValueNumStore::GetConstantSingle(ValueNum argVN) |
2675 | { |
2676 | assert(IsVNConstant(argVN)); |
2677 | assert(TypeOfVN(argVN) == TYP_FLOAT); |
2678 | |
2679 | return ConstantValue<float>(argVN); |
2680 | } |
2681 | |
2682 | // Compute the proper value number when the VNFunc has all constant arguments |
2683 | // This essentially performs constant folding at value numbering time |
2684 | // |
2685 | ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN) |
2686 | { |
2687 | assert(CanEvalForConstantArgs(func)); |
2688 | assert(IsVNConstant(arg0VN) && IsVNConstant(arg1VN)); |
2689 | assert(!VNHasExc(arg0VN) && !VNHasExc(arg1VN)); // Otherwise, would not be constant. |
2690 | |
2691 | // if our func is the VNF_Cast operation we handle it first |
2692 | if (func == VNF_Cast) |
2693 | { |
2694 | return EvalCastForConstantArgs(typ, func, arg0VN, arg1VN); |
2695 | } |
2696 | |
2697 | var_types arg0VNtyp = TypeOfVN(arg0VN); |
2698 | var_types arg1VNtyp = TypeOfVN(arg1VN); |
2699 | |
2700 | // When both arguments are floating point types |
2701 | // We defer to the EvalFuncForConstantFPArgs() |
2702 | if (varTypeIsFloating(arg0VNtyp) && varTypeIsFloating(arg1VNtyp)) |
2703 | { |
2704 | return EvalFuncForConstantFPArgs(typ, func, arg0VN, arg1VN); |
2705 | } |
2706 | |
2707 | // after this we shouldn't have to deal with floating point types for arg0VN or arg1VN |
2708 | assert(!varTypeIsFloating(arg0VNtyp)); |
2709 | assert(!varTypeIsFloating(arg1VNtyp)); |
2710 | |
2711 | // Stack-normalize the result type. |
2712 | if (varTypeIsSmall(typ)) |
2713 | { |
2714 | typ = TYP_INT; |
2715 | } |
2716 | |
2717 | ValueNum result; // left uninitialized, we are required to initialize it on all paths below. |
2718 | |
2719 | // Are both args of the same type? |
2720 | if (arg0VNtyp == arg1VNtyp) |
2721 | { |
2722 | if (arg0VNtyp == TYP_INT) |
2723 | { |
2724 | int arg0Val = ConstantValue<int>(arg0VN); |
2725 | int arg1Val = ConstantValue<int>(arg1VN); |
2726 | |
2727 | if (VNFuncIsComparison(func)) |
2728 | { |
2729 | assert(typ == TYP_INT); |
2730 | result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val)); |
2731 | } |
2732 | else |
2733 | { |
2734 | assert(typ == TYP_INT); |
2735 | int resultVal = EvalOp<int>(func, arg0Val, arg1Val); |
2736 | // Bin op on a handle results in a handle. |
2737 | ValueNum handleVN = IsVNHandle(arg0VN) ? arg0VN : IsVNHandle(arg1VN) ? arg1VN : NoVN; |
2738 | if (handleVN != NoVN) |
2739 | { |
2740 | result = VNForHandle(ssize_t(resultVal), GetHandleFlags(handleVN)); // Use VN for Handle |
2741 | } |
2742 | else |
2743 | { |
2744 | result = VNForIntCon(resultVal); |
2745 | } |
2746 | } |
2747 | } |
2748 | else if (arg0VNtyp == TYP_LONG) |
2749 | { |
2750 | INT64 arg0Val = ConstantValue<INT64>(arg0VN); |
2751 | INT64 arg1Val = ConstantValue<INT64>(arg1VN); |
2752 | |
2753 | if (VNFuncIsComparison(func)) |
2754 | { |
2755 | assert(typ == TYP_INT); |
2756 | result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val)); |
2757 | } |
2758 | else |
2759 | { |
2760 | assert(typ == TYP_LONG); |
2761 | INT64 resultVal = EvalOp<INT64>(func, arg0Val, arg1Val); |
2762 | ValueNum handleVN = IsVNHandle(arg0VN) ? arg0VN : IsVNHandle(arg1VN) ? arg1VN : NoVN; |
2763 | |
2764 | if (handleVN != NoVN) |
2765 | { |
2766 | result = VNForHandle(ssize_t(resultVal), GetHandleFlags(handleVN)); // Use VN for Handle |
2767 | } |
2768 | else |
2769 | { |
2770 | result = VNForLongCon(resultVal); |
2771 | } |
2772 | } |
2773 | } |
2774 | else // both args are TYP_REF or both args are TYP_BYREF |
2775 | { |
2776 | INT64 arg0Val = ConstantValue<size_t>(arg0VN); // We represent ref/byref constants as size_t's. |
2777 | INT64 arg1Val = ConstantValue<size_t>(arg1VN); // Also we consider null to be zero. |
2778 | |
2779 | if (VNFuncIsComparison(func)) |
2780 | { |
2781 | assert(typ == TYP_INT); |
2782 | result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val)); |
2783 | } |
2784 | else if (typ == TYP_INT) // We could see GT_OR of a constant ByRef and Null |
2785 | { |
2786 | int resultVal = (int)EvalOp<INT64>(func, arg0Val, arg1Val); |
2787 | result = VNForIntCon(resultVal); |
2788 | } |
2789 | else // We could see GT_OR of a constant ByRef and Null |
2790 | { |
2791 | assert((typ == TYP_BYREF) || (typ == TYP_LONG)); |
2792 | INT64 resultVal = EvalOp<INT64>(func, arg0Val, arg1Val); |
2793 | result = VNForByrefCon(resultVal); |
2794 | } |
2795 | } |
2796 | } |
2797 | else // We have args of different types |
2798 | { |
2799 | // We represent ref/byref constants as size_t's. |
2800 | // Also we consider null to be zero. |
2801 | // |
2802 | INT64 arg0Val = GetConstantInt64(arg0VN); |
2803 | INT64 arg1Val = GetConstantInt64(arg1VN); |
2804 | |
2805 | if (VNFuncIsComparison(func)) |
2806 | { |
2807 | assert(typ == TYP_INT); |
2808 | result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val)); |
2809 | } |
2810 | else if (typ == TYP_INT) // We could see GT_OR of an int and constant ByRef or Null |
2811 | { |
2812 | int resultVal = (int)EvalOp<INT64>(func, arg0Val, arg1Val); |
2813 | result = VNForIntCon(resultVal); |
2814 | } |
2815 | else |
2816 | { |
2817 | assert(typ != TYP_INT); |
2818 | INT64 resultVal = EvalOp<INT64>(func, arg0Val, arg1Val); |
2819 | |
2820 | switch (typ) |
2821 | { |
2822 | case TYP_BYREF: |
2823 | result = VNForByrefCon(resultVal); |
2824 | break; |
2825 | case TYP_LONG: |
2826 | result = VNForLongCon(resultVal); |
2827 | break; |
2828 | case TYP_REF: |
2829 | assert(resultVal == 0); // Only valid REF constant |
2830 | result = VNForNull(); |
2831 | break; |
2832 | default: |
2833 | unreached(); |
2834 | } |
2835 | } |
2836 | } |
2837 | |
2838 | return result; |
2839 | } |
2840 | |
2841 | // Compute the proper value number when the VNFunc has all constant floating-point arguments |
2842 | // This essentially must perform constant folding at value numbering time |
2843 | // |
2844 | ValueNum ValueNumStore::EvalFuncForConstantFPArgs(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN) |
2845 | { |
2846 | assert(CanEvalForConstantArgs(func)); |
2847 | assert(IsVNConstant(arg0VN) && IsVNConstant(arg1VN)); |
2848 | |
2849 | // We expect both argument types to be floating-point types |
2850 | var_types arg0VNtyp = TypeOfVN(arg0VN); |
2851 | var_types arg1VNtyp = TypeOfVN(arg1VN); |
2852 | |
2853 | assert(varTypeIsFloating(arg0VNtyp)); |
2854 | assert(varTypeIsFloating(arg1VNtyp)); |
2855 | |
2856 | // We also expect both arguments to be of the same floating-point type |
2857 | assert(arg0VNtyp == arg1VNtyp); |
2858 | |
2859 | ValueNum result; // left uninitialized, we are required to initialize it on all paths below. |
2860 | |
2861 | if (VNFuncIsComparison(func)) |
2862 | { |
2863 | assert(genActualType(typ) == TYP_INT); |
2864 | |
2865 | if (arg0VNtyp == TYP_FLOAT) |
2866 | { |
2867 | result = VNForIntCon(EvalComparison<float>(func, GetConstantSingle(arg0VN), GetConstantSingle(arg1VN))); |
2868 | } |
2869 | else |
2870 | { |
2871 | assert(arg0VNtyp == TYP_DOUBLE); |
2872 | result = VNForIntCon(EvalComparison<double>(func, GetConstantDouble(arg0VN), GetConstantDouble(arg1VN))); |
2873 | } |
2874 | } |
2875 | else |
2876 | { |
2877 | // We expect the return type to be the same as the argument type |
2878 | assert(varTypeIsFloating(typ)); |
2879 | assert(arg0VNtyp == typ); |
2880 | |
2881 | if (typ == TYP_FLOAT) |
2882 | { |
2883 | float floatResultVal = EvalOp<float>(func, GetConstantSingle(arg0VN), GetConstantSingle(arg1VN)); |
2884 | result = VNForFloatCon(floatResultVal); |
2885 | } |
2886 | else |
2887 | { |
2888 | assert(typ == TYP_DOUBLE); |
2889 | |
2890 | double doubleResultVal = EvalOp<double>(func, GetConstantDouble(arg0VN), GetConstantDouble(arg1VN)); |
2891 | result = VNForDoubleCon(doubleResultVal); |
2892 | } |
2893 | } |
2894 | |
2895 | return result; |
2896 | } |
2897 | |
2898 | // Compute the proper value number for a VNF_Cast with constant arguments |
2899 | // This essentially must perform constant folding at value numbering time |
2900 | // |
2901 | ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN) |
2902 | { |
2903 | assert(func == VNF_Cast); |
2904 | assert(IsVNConstant(arg0VN) && IsVNConstant(arg1VN)); |
2905 | |
2906 | // Stack-normalize the result type. |
2907 | if (varTypeIsSmall(typ)) |
2908 | { |
2909 | typ = TYP_INT; |
2910 | } |
2911 | |
2912 | var_types arg0VNtyp = TypeOfVN(arg0VN); |
2913 | var_types arg1VNtyp = TypeOfVN(arg1VN); |
2914 | |
2915 | // arg1VN is really the gtCastType that we are casting to |
2916 | assert(arg1VNtyp == TYP_INT); |
2917 | int arg1Val = ConstantValue<int>(arg1VN); |
2918 | assert(arg1Val >= 0); |
2919 | |
2920 | if (IsVNHandle(arg0VN)) |
2921 | { |
2922 | // We don't allow handles to be cast to random var_types. |
2923 | assert(typ == TYP_I_IMPL); |
2924 | } |
2925 | |
2926 | // We previously encoded the castToType operation using vnForCastOper() |
2927 | // |
2928 | bool srcIsUnsigned = ((arg1Val & INT32(VCA_UnsignedSrc)) != 0); |
2929 | var_types castToType = var_types(arg1Val >> INT32(VCA_BitCount)); |
2930 | |
2931 | var_types castFromType = arg0VNtyp; |
2932 | |
2933 | switch (castFromType) // GT_CAST source type |
2934 | { |
2935 | #ifndef _TARGET_64BIT_ |
2936 | case TYP_REF: |
2937 | case TYP_BYREF: |
2938 | #endif |
2939 | case TYP_INT: |
2940 | { |
2941 | int arg0Val = GetConstantInt32(arg0VN); |
2942 | |
2943 | switch (castToType) |
2944 | { |
2945 | case TYP_BYTE: |
2946 | assert(typ == TYP_INT); |
2947 | return VNForIntCon(INT8(arg0Val)); |
2948 | case TYP_BOOL: |
2949 | case TYP_UBYTE: |
2950 | assert(typ == TYP_INT); |
2951 | return VNForIntCon(UINT8(arg0Val)); |
2952 | case TYP_SHORT: |
2953 | assert(typ == TYP_INT); |
2954 | return VNForIntCon(INT16(arg0Val)); |
2955 | case TYP_USHORT: |
2956 | assert(typ == TYP_INT); |
2957 | return VNForIntCon(UINT16(arg0Val)); |
2958 | case TYP_INT: |
2959 | case TYP_UINT: |
2960 | assert(typ == TYP_INT); |
2961 | return arg0VN; |
2962 | case TYP_LONG: |
2963 | case TYP_ULONG: |
2964 | assert(!IsVNHandle(arg0VN)); |
2965 | #ifdef _TARGET_64BIT_ |
2966 | if (typ == TYP_LONG) |
2967 | { |
2968 | if (srcIsUnsigned) |
2969 | { |
2970 | return VNForLongCon(INT64(unsigned(arg0Val))); |
2971 | } |
2972 | else |
2973 | { |
2974 | return VNForLongCon(INT64(arg0Val)); |
2975 | } |
2976 | } |
2977 | else |
2978 | { |
2979 | assert(typ == TYP_BYREF); |
2980 | if (srcIsUnsigned) |
2981 | { |
2982 | return VNForByrefCon(INT64(unsigned(arg0Val))); |
2983 | } |
2984 | else |
2985 | { |
2986 | return VNForByrefCon(INT64(arg0Val)); |
2987 | } |
2988 | } |
2989 | #else // TARGET_32BIT |
2990 | if (srcIsUnsigned) |
2991 | return VNForLongCon(INT64(unsigned(arg0Val))); |
2992 | else |
2993 | return VNForLongCon(INT64(arg0Val)); |
2994 | #endif |
2995 | case TYP_BYREF: |
2996 | assert(typ == TYP_BYREF); |
2997 | return VNForByrefCon((INT64)arg0Val); |
2998 | case TYP_FLOAT: |
2999 | assert(typ == TYP_FLOAT); |
3000 | if (srcIsUnsigned) |
3001 | { |
3002 | return VNForFloatCon(float(unsigned(arg0Val))); |
3003 | } |
3004 | else |
3005 | { |
3006 | return VNForFloatCon(float(arg0Val)); |
3007 | } |
3008 | case TYP_DOUBLE: |
3009 | assert(typ == TYP_DOUBLE); |
3010 | if (srcIsUnsigned) |
3011 | { |
3012 | return VNForDoubleCon(double(unsigned(arg0Val))); |
3013 | } |
3014 | else |
3015 | { |
3016 | return VNForDoubleCon(double(arg0Val)); |
3017 | } |
3018 | default: |
3019 | unreached(); |
3020 | } |
3021 | break; |
3022 | } |
3023 | { |
3024 | #ifdef _TARGET_64BIT_ |
3025 | case TYP_REF: |
3026 | case TYP_BYREF: |
3027 | #endif |
3028 | case TYP_LONG: |
3029 | INT64 arg0Val = GetConstantInt64(arg0VN); |
3030 | |
3031 | switch (castToType) |
3032 | { |
3033 | case TYP_BYTE: |
3034 | assert(typ == TYP_INT); |
3035 | return VNForIntCon(INT8(arg0Val)); |
3036 | case TYP_BOOL: |
3037 | case TYP_UBYTE: |
3038 | assert(typ == TYP_INT); |
3039 | return VNForIntCon(UINT8(arg0Val)); |
3040 | case TYP_SHORT: |
3041 | assert(typ == TYP_INT); |
3042 | return VNForIntCon(INT16(arg0Val)); |
3043 | case TYP_USHORT: |
3044 | assert(typ == TYP_INT); |
3045 | return VNForIntCon(UINT16(arg0Val)); |
3046 | case TYP_INT: |
3047 | assert(typ == TYP_INT); |
3048 | return VNForIntCon(INT32(arg0Val)); |
3049 | case TYP_UINT: |
3050 | assert(typ == TYP_INT); |
3051 | return VNForIntCon(UINT32(arg0Val)); |
3052 | case TYP_LONG: |
3053 | case TYP_ULONG: |
3054 | assert(typ == TYP_LONG); |
3055 | return arg0VN; |
3056 | case TYP_BYREF: |
3057 | assert(typ == TYP_BYREF); |
3058 | return VNForByrefCon((INT64)arg0Val); |
3059 | case TYP_FLOAT: |
3060 | assert(typ == TYP_FLOAT); |
3061 | if (srcIsUnsigned) |
3062 | { |
3063 | return VNForFloatCon(FloatingPointUtils::convertUInt64ToFloat(UINT64(arg0Val))); |
3064 | } |
3065 | else |
3066 | { |
3067 | return VNForFloatCon(float(arg0Val)); |
3068 | } |
3069 | case TYP_DOUBLE: |
3070 | assert(typ == TYP_DOUBLE); |
3071 | if (srcIsUnsigned) |
3072 | { |
3073 | return VNForDoubleCon(FloatingPointUtils::convertUInt64ToDouble(UINT64(arg0Val))); |
3074 | } |
3075 | else |
3076 | { |
3077 | return VNForDoubleCon(double(arg0Val)); |
3078 | } |
3079 | default: |
3080 | unreached(); |
3081 | } |
3082 | } |
3083 | case TYP_FLOAT: |
3084 | { |
3085 | float arg0Val = GetConstantSingle(arg0VN); |
3086 | |
3087 | switch (castToType) |
3088 | { |
3089 | case TYP_BYTE: |
3090 | assert(typ == TYP_INT); |
3091 | return VNForIntCon(INT8(arg0Val)); |
3092 | case TYP_BOOL: |
3093 | case TYP_UBYTE: |
3094 | assert(typ == TYP_INT); |
3095 | return VNForIntCon(UINT8(arg0Val)); |
3096 | case TYP_SHORT: |
3097 | assert(typ == TYP_INT); |
3098 | return VNForIntCon(INT16(arg0Val)); |
3099 | case TYP_USHORT: |
3100 | assert(typ == TYP_INT); |
3101 | return VNForIntCon(UINT16(arg0Val)); |
3102 | case TYP_INT: |
3103 | assert(typ == TYP_INT); |
3104 | return VNForIntCon(INT32(arg0Val)); |
3105 | case TYP_UINT: |
3106 | assert(typ == TYP_INT); |
3107 | return VNForIntCon(UINT32(arg0Val)); |
3108 | case TYP_LONG: |
3109 | assert(typ == TYP_LONG); |
3110 | return VNForLongCon(INT64(arg0Val)); |
3111 | case TYP_ULONG: |
3112 | assert(typ == TYP_LONG); |
3113 | return VNForLongCon(UINT64(arg0Val)); |
3114 | case TYP_FLOAT: |
3115 | assert(typ == TYP_FLOAT); |
3116 | return VNForFloatCon(arg0Val); |
3117 | case TYP_DOUBLE: |
3118 | assert(typ == TYP_DOUBLE); |
3119 | return VNForDoubleCon(double(arg0Val)); |
3120 | default: |
3121 | unreached(); |
3122 | } |
3123 | } |
3124 | case TYP_DOUBLE: |
3125 | { |
3126 | double arg0Val = GetConstantDouble(arg0VN); |
3127 | |
3128 | switch (castToType) |
3129 | { |
3130 | case TYP_BYTE: |
3131 | assert(typ == TYP_INT); |
3132 | return VNForIntCon(INT8(arg0Val)); |
3133 | case TYP_BOOL: |
3134 | case TYP_UBYTE: |
3135 | assert(typ == TYP_INT); |
3136 | return VNForIntCon(UINT8(arg0Val)); |
3137 | case TYP_SHORT: |
3138 | assert(typ == TYP_INT); |
3139 | return VNForIntCon(INT16(arg0Val)); |
3140 | case TYP_USHORT: |
3141 | assert(typ == TYP_INT); |
3142 | return VNForIntCon(UINT16(arg0Val)); |
3143 | case TYP_INT: |
3144 | assert(typ == TYP_INT); |
3145 | return VNForIntCon(INT32(arg0Val)); |
3146 | case TYP_UINT: |
3147 | assert(typ == TYP_INT); |
3148 | return VNForIntCon(UINT32(arg0Val)); |
3149 | case TYP_LONG: |
3150 | assert(typ == TYP_LONG); |
3151 | return VNForLongCon(INT64(arg0Val)); |
3152 | case TYP_ULONG: |
3153 | assert(typ == TYP_LONG); |
3154 | return VNForLongCon(UINT64(arg0Val)); |
3155 | case TYP_FLOAT: |
3156 | assert(typ == TYP_FLOAT); |
3157 | return VNForFloatCon(float(arg0Val)); |
3158 | case TYP_DOUBLE: |
3159 | assert(typ == TYP_DOUBLE); |
3160 | return VNForDoubleCon(arg0Val); |
3161 | default: |
3162 | unreached(); |
3163 | } |
3164 | } |
3165 | default: |
3166 | unreached(); |
3167 | } |
3168 | } |
3169 | |
3170 | //----------------------------------------------------------------------------------- |
3171 | // CanEvalForConstantArgs: - Given a VNFunc value return true when we can perform |
3172 | // compile-time constant folding for the operation. |
3173 | // |
3174 | // Arguments: |
3175 | // vnf - The VNFunc that we are inquiring about |
3176 | // |
3177 | // Return Value: |
3178 | // - Returns true if we can always compute a constant result |
3179 | // when given all constant args. |
3180 | // |
3181 | // Notes: - When this method returns true, the logic to compute the |
3182 | // compile-time result must also be added to EvalOP, |
3183 | // EvalOpspecialized or EvalComparison |
3184 | // |
3185 | bool ValueNumStore::CanEvalForConstantArgs(VNFunc vnf) |
3186 | { |
3187 | if (vnf < VNF_Boundary) |
3188 | { |
3189 | genTreeOps oper = genTreeOps(vnf); |
3190 | |
3191 | switch (oper) |
3192 | { |
3193 | // Only return true for the node kinds that have code that supports |
3194 | // them in EvalOP, EvalOpspecialized or EvalComparison |
3195 | |
3196 | // Unary Ops |
3197 | case GT_NEG: |
3198 | case GT_NOT: |
3199 | case GT_BSWAP16: |
3200 | case GT_BSWAP: |
3201 | |
3202 | // Binary Ops |
3203 | case GT_ADD: |
3204 | case GT_SUB: |
3205 | case GT_MUL: |
3206 | case GT_DIV: |
3207 | case GT_MOD: |
3208 | |
3209 | case GT_UDIV: |
3210 | case GT_UMOD: |
3211 | |
3212 | case GT_AND: |
3213 | case GT_OR: |
3214 | case GT_XOR: |
3215 | |
3216 | case GT_LSH: |
3217 | case GT_RSH: |
3218 | case GT_RSZ: |
3219 | case GT_ROL: |
3220 | case GT_ROR: |
3221 | |
3222 | // Equality Ops |
3223 | case GT_EQ: |
3224 | case GT_NE: |
3225 | case GT_GT: |
3226 | case GT_GE: |
3227 | case GT_LT: |
3228 | case GT_LE: |
3229 | |
3230 | // We can evaluate these. |
3231 | return true; |
3232 | |
3233 | default: |
3234 | // We can not evaluate these. |
3235 | return false; |
3236 | } |
3237 | } |
3238 | else |
3239 | { |
3240 | // some VNF_ that we can evaluate |
3241 | switch (vnf) |
3242 | { |
3243 | // Consider adding: |
3244 | // case VNF_GT_UN: |
3245 | // case VNF_GE_UN: |
3246 | // case VNF_LT_UN: |
3247 | // case VNF_LE_UN: |
3248 | // |
3249 | |
3250 | case VNF_Cast: |
3251 | // We can evaluate these. |
3252 | return true; |
3253 | |
3254 | default: |
3255 | // We can not evaluate these. |
3256 | return false; |
3257 | } |
3258 | } |
3259 | } |
3260 | |
3261 | //---------------------------------------------------------------------------------------- |
3262 | // VNEvalShouldFold - Returns true if we should perform the folding operation. |
3263 | // It returns false if we don't want to fold the expression, |
3264 | // because it will always throw an exception. |
3265 | // |
3266 | // Arguments: |
3267 | // typ - The type of the resulting ValueNum produced by 'func' |
3268 | // func - Any binary VNFunc |
3269 | // arg0VN - The ValueNum of the first argument to 'func' |
3270 | // arg1VN - The ValueNum of the second argument to 'func' |
3271 | // |
3272 | // Return Value: - Returns true if we should perform a folding operation. |
3273 | // |
3274 | bool ValueNumStore::VNEvalShouldFold(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN) |
3275 | { |
3276 | bool shouldFold = true; |
3277 | |
3278 | // We have some arithmetic operations that will always throw |
3279 | // an exception given particular constant argument(s). |
3280 | // (i.e. integer division by zero) |
3281 | // |
3282 | // We will avoid performing any constant folding on them |
3283 | // since they won't actually produce any result. |
3284 | // Instead they always will throw an exception. |
3285 | // |
3286 | if (func < VNF_Boundary) |
3287 | { |
3288 | genTreeOps oper = genTreeOps(func); |
3289 | |
3290 | // Floating point operations do not throw exceptions |
3291 | // |
3292 | if (!varTypeIsFloating(typ)) |
3293 | { |
3294 | // Is this an integer divide/modulo that will always throw an exception? |
3295 | // |
3296 | if ((oper == GT_DIV) || (oper == GT_UDIV) || (oper == GT_MOD) || (oper == GT_UMOD)) |
3297 | { |
3298 | if ((TypeOfVN(arg0VN) != typ) || (TypeOfVN(arg1VN) != typ)) |
3299 | { |
3300 | // Just in case we have mismatched types |
3301 | shouldFold = false; |
3302 | } |
3303 | else |
3304 | { |
3305 | bool isUnsigned = (oper == GT_UDIV) || (oper == GT_UMOD); |
3306 | if (typ == TYP_LONG) |
3307 | { |
3308 | INT64 kArg0 = ConstantValue<INT64>(arg0VN); |
3309 | INT64 kArg1 = ConstantValue<INT64>(arg1VN); |
3310 | |
3311 | if (IsIntZero(kArg1)) |
3312 | { |
3313 | // Don't fold, we have a divide by zero |
3314 | shouldFold = false; |
3315 | } |
3316 | else if (!isUnsigned || IsOverflowIntDiv(kArg0, kArg1)) |
3317 | { |
3318 | // Don't fold, we have a divide of INT64_MIN/-1 |
3319 | shouldFold = false; |
3320 | } |
3321 | } |
3322 | else if (typ == TYP_INT) |
3323 | { |
3324 | int kArg0 = ConstantValue<int>(arg0VN); |
3325 | int kArg1 = ConstantValue<int>(arg1VN); |
3326 | |
3327 | if (IsIntZero(kArg1)) |
3328 | { |
3329 | // Don't fold, we have a divide by zero |
3330 | shouldFold = false; |
3331 | } |
3332 | else if (!isUnsigned && IsOverflowIntDiv(kArg0, kArg1)) |
3333 | { |
3334 | // Don't fold, we have a divide of INT32_MIN/-1 |
3335 | shouldFold = false; |
3336 | } |
3337 | } |
3338 | else // strange value for 'typ' |
3339 | { |
3340 | assert(!"unexpected 'typ' in VNForFunc constant folding" ); |
3341 | shouldFold = false; |
3342 | } |
3343 | } |
3344 | } |
3345 | } |
3346 | } |
3347 | else // (func > VNF_Boundary) |
3348 | { |
3349 | // OK to fold, |
3350 | // Add checks in the future if we support folding of VNF_ADD_OVF, etc... |
3351 | } |
3352 | |
3353 | return shouldFold; |
3354 | } |
3355 | |
3356 | //---------------------------------------------------------------------------------------- |
3357 | // EvalUsingMathIdentity |
3358 | // - Attempts to evaluate 'func' by using mathimatical identities |
3359 | // that can be applied to 'func'. |
3360 | // |
3361 | // Arguments: |
3362 | // typ - The type of the resulting ValueNum produced by 'func' |
3363 | // func - Any binary VNFunc |
3364 | // arg0VN - The ValueNum of the first argument to 'func' |
3365 | // arg1VN - The ValueNum of the second argument to 'func' |
3366 | // |
3367 | // Return Value: - When successful a ValueNum for the expression is returned. |
3368 | // When unsuccessful NoVN is returned. |
3369 | // |
3370 | ValueNum ValueNumStore::EvalUsingMathIdentity(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN) |
3371 | { |
3372 | ValueNum resultVN = NoVN; // set default result to unsuccessful |
3373 | |
3374 | if (typ == TYP_BYREF) // We don't want/need to optimize a zero byref |
3375 | { |
3376 | return resultVN; // return the unsuccessful value |
3377 | } |
3378 | |
3379 | // We have ways of evaluating some binary functions. |
3380 | if (func < VNF_Boundary) |
3381 | { |
3382 | switch (genTreeOps(func)) |
3383 | { |
3384 | ValueNum ZeroVN; |
3385 | ValueNum OneVN; |
3386 | |
3387 | case GT_ADD: |
3388 | // (0 + x) == x |
3389 | // (x + 0) == x |
3390 | // This identity does not apply for floating point (when x == -0.0) |
3391 | // |
3392 | if (!varTypeIsFloating(typ)) |
3393 | { |
3394 | ZeroVN = VNZeroForType(typ); |
3395 | if (VNIsEqual(arg0VN, ZeroVN)) |
3396 | { |
3397 | resultVN = arg1VN; |
3398 | } |
3399 | else if (VNIsEqual(arg1VN, ZeroVN)) |
3400 | { |
3401 | resultVN = arg0VN; |
3402 | } |
3403 | } |
3404 | break; |
3405 | |
3406 | case GT_SUB: |
3407 | // (x - 0) == x |
3408 | // (x - x) == 0 |
3409 | // This identity does not apply for floating point (when x == -0.0) |
3410 | // |
3411 | if (!varTypeIsFloating(typ)) |
3412 | { |
3413 | ZeroVN = VNZeroForType(typ); |
3414 | if (VNIsEqual(arg1VN, ZeroVN)) |
3415 | { |
3416 | resultVN = arg0VN; |
3417 | } |
3418 | else if (VNIsEqual(arg0VN, arg1VN)) |
3419 | { |
3420 | resultVN = ZeroVN; |
3421 | } |
3422 | } |
3423 | break; |
3424 | |
3425 | case GT_MUL: |
3426 | // These identities do not apply for floating point |
3427 | // |
3428 | if (!varTypeIsFloating(typ)) |
3429 | { |
3430 | // (0 * x) == 0 |
3431 | // (x * 0) == 0 |
3432 | ZeroVN = VNZeroForType(typ); |
3433 | if (arg0VN == ZeroVN) |
3434 | { |
3435 | resultVN = ZeroVN; |
3436 | } |
3437 | else if (arg1VN == ZeroVN) |
3438 | { |
3439 | resultVN = ZeroVN; |
3440 | } |
3441 | |
3442 | // (x * 1) == x |
3443 | // (1 * x) == x |
3444 | OneVN = VNOneForType(typ); |
3445 | if (arg0VN == OneVN) |
3446 | { |
3447 | resultVN = arg1VN; |
3448 | } |
3449 | else if (arg1VN == OneVN) |
3450 | { |
3451 | resultVN = arg0VN; |
3452 | } |
3453 | } |
3454 | break; |
3455 | |
3456 | case GT_DIV: |
3457 | case GT_UDIV: |
3458 | // (x / 1) == x |
3459 | // This identity does not apply for floating point |
3460 | // |
3461 | if (!varTypeIsFloating(typ)) |
3462 | { |
3463 | OneVN = VNOneForType(typ); |
3464 | if (arg1VN == OneVN) |
3465 | { |
3466 | resultVN = arg0VN; |
3467 | } |
3468 | } |
3469 | break; |
3470 | |
3471 | case GT_OR: |
3472 | case GT_XOR: |
3473 | // (0 | x) == x, (0 ^ x) == x |
3474 | // (x | 0) == x, (x ^ 0) == x |
3475 | ZeroVN = VNZeroForType(typ); |
3476 | if (arg0VN == ZeroVN) |
3477 | { |
3478 | resultVN = arg1VN; |
3479 | } |
3480 | else if (arg1VN == ZeroVN) |
3481 | { |
3482 | resultVN = arg0VN; |
3483 | } |
3484 | break; |
3485 | |
3486 | case GT_AND: |
3487 | // (x & 0) == 0 |
3488 | // (0 & x) == 0 |
3489 | ZeroVN = VNZeroForType(typ); |
3490 | if (arg0VN == ZeroVN) |
3491 | { |
3492 | resultVN = ZeroVN; |
3493 | } |
3494 | else if (arg1VN == ZeroVN) |
3495 | { |
3496 | resultVN = ZeroVN; |
3497 | } |
3498 | break; |
3499 | |
3500 | case GT_LSH: |
3501 | case GT_RSH: |
3502 | case GT_RSZ: |
3503 | case GT_ROL: |
3504 | case GT_ROR: |
3505 | // (x << 0) == x |
3506 | // (x >> 0) == x |
3507 | // (x rol 0) == x |
3508 | // (x ror 0) == x |
3509 | ZeroVN = VNZeroForType(typ); |
3510 | if (arg1VN == ZeroVN) |
3511 | { |
3512 | resultVN = arg0VN; |
3513 | } |
3514 | // (0 << x) == 0 |
3515 | // (0 >> x) == 0 |
3516 | // (0 rol x) == 0 |
3517 | // (0 ror x) == 0 |
3518 | if (arg0VN == ZeroVN) |
3519 | { |
3520 | resultVN = ZeroVN; |
3521 | } |
3522 | break; |
3523 | |
3524 | case GT_EQ: |
3525 | case GT_GE: |
3526 | case GT_LE: |
3527 | // (x == x) == true, (null == non-null) == false, (non-null == null) == false |
3528 | // (x <= x) == true, (null <= non-null) == false, (non-null <= null) == false |
3529 | // (x >= x) == true, (null >= non-null) == false, (non-null >= null) == false |
3530 | // |
3531 | // This identity does not apply for floating point (when x == NaN) |
3532 | // |
3533 | if (!varTypeIsFloating(typ)) |
3534 | { |
3535 | if (VNIsEqual(arg0VN, arg1VN)) |
3536 | { |
3537 | resultVN = VNOneForType(typ); |
3538 | } |
3539 | if ((arg0VN == VNForNull()) && IsKnownNonNull(arg1VN)) |
3540 | { |
3541 | resultVN = VNZeroForType(typ); |
3542 | } |
3543 | if (IsKnownNonNull(arg0VN) && (arg1VN == VNForNull())) |
3544 | { |
3545 | resultVN = VNZeroForType(typ); |
3546 | } |
3547 | } |
3548 | break; |
3549 | |
3550 | case GT_NE: |
3551 | case GT_GT: |
3552 | case GT_LT: |
3553 | // (x != x) == false, (null != non-null) == true, (non-null != null) == true |
3554 | // (x > x) == false, (null == non-null) == true, (non-null == null) == true |
3555 | // (x < x) == false, (null == non-null) == true, (non-null == null) == true |
3556 | // |
3557 | // This identity does not apply for floating point (when x == NaN) |
3558 | // |
3559 | if (!varTypeIsFloating(typ)) |
3560 | { |
3561 | if (VNIsEqual(arg0VN, arg1VN)) |
3562 | { |
3563 | resultVN = VNZeroForType(typ); |
3564 | } |
3565 | if ((arg0VN == VNForNull()) && IsKnownNonNull(arg1VN)) |
3566 | { |
3567 | resultVN = VNOneForType(typ); |
3568 | } |
3569 | if (IsKnownNonNull(arg0VN) && (arg1VN == VNForNull())) |
3570 | { |
3571 | resultVN = VNOneForType(typ); |
3572 | } |
3573 | } |
3574 | break; |
3575 | |
3576 | default: |
3577 | break; |
3578 | } |
3579 | } |
3580 | else // must be a VNF_ function |
3581 | { |
3582 | // These identities do not apply for floating point (when x == NaN) |
3583 | // |
3584 | if (VNIsEqual(arg0VN, arg1VN)) |
3585 | { |
3586 | // x <= x == true |
3587 | // x >= x == true |
3588 | if ((func == VNF_LE_UN) || (func == VNF_GE_UN)) |
3589 | { |
3590 | resultVN = VNOneForType(typ); |
3591 | } |
3592 | // x < x == false |
3593 | // x > x == false |
3594 | else if ((func == VNF_LT_UN) || (func == VNF_GT_UN)) |
3595 | { |
3596 | resultVN = VNZeroForType(typ); |
3597 | } |
3598 | } |
3599 | } |
3600 | return resultVN; |
3601 | } |
3602 | |
3603 | //------------------------------------------------------------------------ |
3604 | // VNForExpr: Opaque value number that is equivalent to itself but unique |
3605 | // from all other value numbers. |
3606 | // |
3607 | // Arguments: |
3608 | // block - BasicBlock where the expression that produces this value occurs. |
3609 | // May be nullptr to force conservative "could be anywhere" interpretation. |
3610 | // typ - Type of the expression in the IR |
3611 | // |
3612 | // Return Value: |
3613 | // A new value number distinct from any previously generated, that compares as equal |
3614 | // to itself, but not any other value number, and is annotated with the given |
3615 | // type and block. |
3616 | |
3617 | ValueNum ValueNumStore::VNForExpr(BasicBlock* block, var_types typ) |
3618 | { |
3619 | BasicBlock::loopNumber loopNum; |
3620 | if (block == nullptr) |
3621 | { |
3622 | loopNum = MAX_LOOP_NUM; |
3623 | } |
3624 | else |
3625 | { |
3626 | loopNum = block->bbNatLoopNum; |
3627 | } |
3628 | |
3629 | // We always allocate a new, unique VN in this call. |
3630 | // The 'typ' is used to partition the allocation of VNs into different chunks. |
3631 | Chunk* c = GetAllocChunk(typ, CEA_None, loopNum); |
3632 | unsigned offsetWithinChunk = c->AllocVN(); |
3633 | ValueNum result = c->m_baseVN + offsetWithinChunk; |
3634 | return result; |
3635 | } |
3636 | |
3637 | ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk, |
3638 | ValueNum map, |
3639 | FieldSeqNode* fieldSeq, |
3640 | size_t* wbFinalStructSize) |
3641 | { |
3642 | if (fieldSeq == nullptr) |
3643 | { |
3644 | return map; |
3645 | } |
3646 | else |
3647 | { |
3648 | assert(fieldSeq != FieldSeqStore::NotAField()); |
3649 | |
3650 | // Skip any "FirstElem" pseudo-fields or any "ConstantIndex" pseudo-fields |
3651 | if (fieldSeq->IsPseudoField()) |
3652 | { |
3653 | return VNApplySelectors(vnk, map, fieldSeq->m_next, wbFinalStructSize); |
3654 | } |
3655 | |
3656 | // Otherwise, is a real field handle. |
3657 | CORINFO_FIELD_HANDLE fldHnd = fieldSeq->m_fieldHnd; |
3658 | CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE; |
3659 | ValueNum fldHndVN = VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL); |
3660 | noway_assert(fldHnd != nullptr); |
3661 | CorInfoType fieldCit = m_pComp->info.compCompHnd->getFieldType(fldHnd, &structHnd); |
3662 | var_types fieldType = JITtype2varType(fieldCit); |
3663 | |
3664 | size_t structSize = 0; |
3665 | if (varTypeIsStruct(fieldType)) |
3666 | { |
3667 | structSize = m_pComp->info.compCompHnd->getClassSize(structHnd); |
3668 | // We do not normalize the type field accesses during importation unless they |
3669 | // are used in a call, return or assignment. |
3670 | if ((fieldType == TYP_STRUCT) && (structSize <= m_pComp->largestEnregisterableStructSize())) |
3671 | { |
3672 | fieldType = m_pComp->impNormStructType(structHnd); |
3673 | } |
3674 | } |
3675 | if (wbFinalStructSize != nullptr) |
3676 | { |
3677 | *wbFinalStructSize = structSize; |
3678 | } |
3679 | |
3680 | #ifdef DEBUG |
3681 | if (m_pComp->verbose) |
3682 | { |
3683 | printf(" VNApplySelectors:\n" ); |
3684 | const char* modName; |
3685 | const char* fldName = m_pComp->eeGetFieldName(fldHnd, &modName); |
3686 | printf(" VNForHandle(%s) is " FMT_VN ", fieldType is %s" , fldName, fldHndVN, varTypeName(fieldType)); |
3687 | if (varTypeIsStruct(fieldType)) |
3688 | { |
3689 | printf(", size = %d" , structSize); |
3690 | } |
3691 | printf("\n" ); |
3692 | } |
3693 | #endif |
3694 | |
3695 | if (fieldSeq->m_next != nullptr) |
3696 | { |
3697 | ValueNum newMap = VNForMapSelect(vnk, fieldType, map, fldHndVN); |
3698 | return VNApplySelectors(vnk, newMap, fieldSeq->m_next, wbFinalStructSize); |
3699 | } |
3700 | else // end of fieldSeq |
3701 | { |
3702 | return VNForMapSelect(vnk, fieldType, map, fldHndVN); |
3703 | } |
3704 | } |
3705 | } |
3706 | |
3707 | ValueNum ValueNumStore::VNApplySelectorsTypeCheck(ValueNum elem, var_types indType, size_t elemStructSize) |
3708 | { |
3709 | var_types elemTyp = TypeOfVN(elem); |
3710 | |
3711 | // Check if the elemTyp is matching/compatible |
3712 | |
3713 | if (indType != elemTyp) |
3714 | { |
3715 | // We are trying to read from an 'elem' of type 'elemType' using 'indType' read |
3716 | |
3717 | size_t elemTypSize = (elemTyp == TYP_STRUCT) ? elemStructSize : genTypeSize(elemTyp); |
3718 | size_t indTypeSize = genTypeSize(indType); |
3719 | |
3720 | if ((indType == TYP_REF) && (varTypeIsStruct(elemTyp))) |
3721 | { |
3722 | // indType is TYP_REF and elemTyp is TYP_STRUCT |
3723 | // |
3724 | // We have a pointer to a static that is a Boxed Struct |
3725 | // |
3726 | return elem; |
3727 | } |
3728 | else if (indTypeSize > elemTypSize) |
3729 | { |
3730 | // Reading beyong the end of 'elem' |
3731 | |
3732 | // return a new unique value number |
3733 | elem = VNMakeNormalUnique(elem); |
3734 | |
3735 | JITDUMP(" *** Mismatched types in VNApplySelectorsTypeCheck (reading beyond the end)\n" ); |
3736 | } |
3737 | else if (varTypeIsStruct(indType)) |
3738 | { |
3739 | // return a new unique value number |
3740 | elem = VNMakeNormalUnique(elem); |
3741 | |
3742 | JITDUMP(" *** Mismatched types in VNApplySelectorsTypeCheck (indType is TYP_STRUCT)\n" ); |
3743 | } |
3744 | else |
3745 | { |
3746 | // We are trying to read an 'elem' of type 'elemType' using 'indType' read |
3747 | |
3748 | // insert a cast of elem to 'indType' |
3749 | elem = VNForCast(elem, indType, elemTyp); |
3750 | } |
3751 | } |
3752 | |
3753 | return elem; |
3754 | } |
3755 | |
3756 | ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum elem, var_types indType, BasicBlock* block) |
3757 | { |
3758 | var_types elemTyp = TypeOfVN(elem); |
3759 | |
3760 | // Check if the elemTyp is matching/compatible |
3761 | |
3762 | if (indType != elemTyp) |
3763 | { |
3764 | bool isConstant = IsVNConstant(elem); |
3765 | if (isConstant && (elemTyp == genActualType(indType))) |
3766 | { |
3767 | // (i.e. We recorded a constant of TYP_INT for a TYP_BYTE field) |
3768 | } |
3769 | else |
3770 | { |
3771 | // We are trying to write an 'elem' of type 'elemType' using 'indType' store |
3772 | |
3773 | if (varTypeIsStruct(indType)) |
3774 | { |
3775 | // return a new unique value number |
3776 | elem = VNMakeNormalUnique(elem); |
3777 | |
3778 | JITDUMP(" *** Mismatched types in VNApplySelectorsAssignTypeCoerce (indType is TYP_STRUCT)\n" ); |
3779 | } |
3780 | else |
3781 | { |
3782 | // We are trying to write an 'elem' of type 'elemType' using 'indType' store |
3783 | |
3784 | // insert a cast of elem to 'indType' |
3785 | elem = VNForCast(elem, indType, elemTyp); |
3786 | |
3787 | JITDUMP(" Cast to %s inserted in VNApplySelectorsAssignTypeCoerce (elemTyp is %s)\n" , |
3788 | varTypeName(indType), varTypeName(elemTyp)); |
3789 | } |
3790 | } |
3791 | } |
3792 | return elem; |
3793 | } |
3794 | |
3795 | //------------------------------------------------------------------------ |
3796 | // VNApplySelectorsAssign: Compute the value number corresponding to "map" but with |
3797 | // the element at "fieldSeq" updated to have type "elem"; this is the new memory |
3798 | // value for an assignment of value "elem" into the memory at location "fieldSeq" |
3799 | // that occurs in block "block" and has type "indType" (so long as the selectors |
3800 | // into that memory occupy disjoint locations, which is true for GcHeap). |
3801 | // |
3802 | // Arguments: |
3803 | // vnk - Identifies whether to recurse to Conservative or Liberal value numbers |
3804 | // when recursing through phis |
3805 | // map - Value number for the field map before the assignment |
3806 | // elem - Value number for the value being stored (to the given field) |
3807 | // indType - Type of the indirection storing the value to the field |
3808 | // block - Block where the assignment occurs |
3809 | // |
3810 | // Return Value: |
3811 | // The value number corresponding to memory after the assignment. |
3812 | |
3813 | ValueNum ValueNumStore::VNApplySelectorsAssign( |
3814 | ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum elem, var_types indType, BasicBlock* block) |
3815 | { |
3816 | if (fieldSeq == nullptr) |
3817 | { |
3818 | return VNApplySelectorsAssignTypeCoerce(elem, indType, block); |
3819 | } |
3820 | else |
3821 | { |
3822 | assert(fieldSeq != FieldSeqStore::NotAField()); |
3823 | |
3824 | // Skip any "FirstElem" pseudo-fields or any "ConstantIndex" pseudo-fields |
3825 | // These will occur, at least, in struct static expressions, for method table offsets. |
3826 | if (fieldSeq->IsPseudoField()) |
3827 | { |
3828 | return VNApplySelectorsAssign(vnk, map, fieldSeq->m_next, elem, indType, block); |
3829 | } |
3830 | |
3831 | // Otherwise, fldHnd is a real field handle. |
3832 | CORINFO_FIELD_HANDLE fldHnd = fieldSeq->m_fieldHnd; |
3833 | ValueNum fldHndVN = VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL); |
3834 | noway_assert(fldHnd != nullptr); |
3835 | CorInfoType fieldCit = m_pComp->info.compCompHnd->getFieldType(fldHnd); |
3836 | var_types fieldType = JITtype2varType(fieldCit); |
3837 | |
3838 | ValueNum elemAfter; |
3839 | if (fieldSeq->m_next) |
3840 | { |
3841 | #ifdef DEBUG |
3842 | if (m_pComp->verbose) |
3843 | { |
3844 | const char* modName; |
3845 | const char* fldName = m_pComp->eeGetFieldName(fldHnd, &modName); |
3846 | printf(" VNForHandle(%s) is " FMT_VN ", fieldType is %s\n" , fldName, fldHndVN, |
3847 | varTypeName(fieldType)); |
3848 | } |
3849 | #endif |
3850 | ValueNum fseqMap = VNForMapSelect(vnk, fieldType, map, fldHndVN); |
3851 | elemAfter = VNApplySelectorsAssign(vnk, fseqMap, fieldSeq->m_next, elem, indType, block); |
3852 | } |
3853 | else |
3854 | { |
3855 | #ifdef DEBUG |
3856 | if (m_pComp->verbose) |
3857 | { |
3858 | if (fieldSeq->m_next == nullptr) |
3859 | { |
3860 | printf(" VNApplySelectorsAssign:\n" ); |
3861 | } |
3862 | const char* modName; |
3863 | const char* fldName = m_pComp->eeGetFieldName(fldHnd, &modName); |
3864 | printf(" VNForHandle(%s) is " FMT_VN ", fieldType is %s\n" , fldName, fldHndVN, |
3865 | varTypeName(fieldType)); |
3866 | } |
3867 | #endif |
3868 | elemAfter = VNApplySelectorsAssignTypeCoerce(elem, indType, block); |
3869 | } |
3870 | |
3871 | ValueNum newMap = VNForMapStore(fieldType, map, fldHndVN, elemAfter); |
3872 | return newMap; |
3873 | } |
3874 | } |
3875 | |
3876 | ValueNumPair ValueNumStore::VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType) |
3877 | { |
3878 | size_t structSize = 0; |
3879 | ValueNum liberalVN = VNApplySelectors(VNK_Liberal, map.GetLiberal(), fieldSeq, &structSize); |
3880 | liberalVN = VNApplySelectorsTypeCheck(liberalVN, indType, structSize); |
3881 | |
3882 | structSize = 0; |
3883 | ValueNum conservVN = VNApplySelectors(VNK_Conservative, map.GetConservative(), fieldSeq, &structSize); |
3884 | conservVN = VNApplySelectorsTypeCheck(conservVN, indType, structSize); |
3885 | |
3886 | return ValueNumPair(liberalVN, conservVN); |
3887 | } |
3888 | |
3889 | bool ValueNumStore::IsVNNotAField(ValueNum vn) |
3890 | { |
3891 | return m_chunks.GetNoExpand(GetChunkNum(vn))->m_attribs == CEA_NotAField; |
3892 | } |
3893 | |
3894 | ValueNum ValueNumStore::VNForFieldSeq(FieldSeqNode* fieldSeq) |
3895 | { |
3896 | if (fieldSeq == nullptr) |
3897 | { |
3898 | return VNForNull(); |
3899 | } |
3900 | else if (fieldSeq == FieldSeqStore::NotAField()) |
3901 | { |
3902 | // We always allocate a new, unique VN in this call. |
3903 | Chunk* c = GetAllocChunk(TYP_REF, CEA_NotAField); |
3904 | unsigned offsetWithinChunk = c->AllocVN(); |
3905 | ValueNum result = c->m_baseVN + offsetWithinChunk; |
3906 | return result; |
3907 | } |
3908 | else |
3909 | { |
3910 | ssize_t fieldHndVal = ssize_t(fieldSeq->m_fieldHnd); |
3911 | ValueNum fieldHndVN = VNForHandle(fieldHndVal, GTF_ICON_FIELD_HDL); |
3912 | ValueNum seqNextVN = VNForFieldSeq(fieldSeq->m_next); |
3913 | ValueNum fieldSeqVN = VNForFunc(TYP_REF, VNF_FieldSeq, fieldHndVN, seqNextVN); |
3914 | |
3915 | #ifdef DEBUG |
3916 | if (m_pComp->verbose) |
3917 | { |
3918 | printf(" FieldSeq" ); |
3919 | vnDump(m_pComp, fieldSeqVN); |
3920 | printf(" is " FMT_VN "\n" , fieldSeqVN); |
3921 | } |
3922 | #endif |
3923 | |
3924 | return fieldSeqVN; |
3925 | } |
3926 | } |
3927 | |
3928 | FieldSeqNode* ValueNumStore::FieldSeqVNToFieldSeq(ValueNum vn) |
3929 | { |
3930 | if (vn == VNForNull()) |
3931 | { |
3932 | return nullptr; |
3933 | } |
3934 | |
3935 | assert(IsVNFunc(vn)); |
3936 | |
3937 | VNFuncApp funcApp; |
3938 | GetVNFunc(vn, &funcApp); |
3939 | if (funcApp.m_func == VNF_NotAField) |
3940 | { |
3941 | return FieldSeqStore::NotAField(); |
3942 | } |
3943 | |
3944 | assert(funcApp.m_func == VNF_FieldSeq); |
3945 | const ssize_t fieldHndVal = ConstantValue<ssize_t>(funcApp.m_args[0]); |
3946 | FieldSeqNode* head = |
3947 | m_pComp->GetFieldSeqStore()->CreateSingleton(reinterpret_cast<CORINFO_FIELD_HANDLE>(fieldHndVal)); |
3948 | FieldSeqNode* tail = FieldSeqVNToFieldSeq(funcApp.m_args[1]); |
3949 | return m_pComp->GetFieldSeqStore()->Append(head, tail); |
3950 | } |
3951 | |
3952 | ValueNum ValueNumStore::FieldSeqVNAppend(ValueNum fsVN1, ValueNum fsVN2) |
3953 | { |
3954 | if (fsVN1 == VNForNull()) |
3955 | { |
3956 | return fsVN2; |
3957 | } |
3958 | |
3959 | assert(IsVNFunc(fsVN1)); |
3960 | |
3961 | VNFuncApp funcApp1; |
3962 | GetVNFunc(fsVN1, &funcApp1); |
3963 | |
3964 | if ((funcApp1.m_func == VNF_NotAField) || IsVNNotAField(fsVN2)) |
3965 | { |
3966 | return VNForFieldSeq(FieldSeqStore::NotAField()); |
3967 | } |
3968 | |
3969 | assert(funcApp1.m_func == VNF_FieldSeq); |
3970 | ValueNum tailRes = FieldSeqVNAppend(funcApp1.m_args[1], fsVN2); |
3971 | ValueNum fieldSeqVN = VNForFunc(TYP_REF, VNF_FieldSeq, funcApp1.m_args[0], tailRes); |
3972 | |
3973 | #ifdef DEBUG |
3974 | if (m_pComp->verbose) |
3975 | { |
3976 | printf(" fieldSeq " FMT_VN " is " , fieldSeqVN); |
3977 | vnDump(m_pComp, fieldSeqVN); |
3978 | printf("\n" ); |
3979 | } |
3980 | #endif |
3981 | |
3982 | return fieldSeqVN; |
3983 | } |
3984 | |
3985 | ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, GenTree* opB) |
3986 | { |
3987 | if (opB->OperGet() == GT_CNS_INT) |
3988 | { |
3989 | FieldSeqNode* fldSeq = opB->gtIntCon.gtFieldSeq; |
3990 | if (fldSeq != nullptr) |
3991 | { |
3992 | return ExtendPtrVN(opA, fldSeq); |
3993 | } |
3994 | } |
3995 | return NoVN; |
3996 | } |
3997 | |
3998 | ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq) |
3999 | { |
4000 | assert(fldSeq != nullptr); |
4001 | |
4002 | ValueNum res = NoVN; |
4003 | |
4004 | ValueNum opAvnWx = opA->gtVNPair.GetLiberal(); |
4005 | assert(VNIsValid(opAvnWx)); |
4006 | ValueNum opAvn; |
4007 | ValueNum opAvnx; |
4008 | VNUnpackExc(opAvnWx, &opAvn, &opAvnx); |
4009 | assert(VNIsValid(opAvn) && VNIsValid(opAvnx)); |
4010 | |
4011 | VNFuncApp funcApp; |
4012 | if (!GetVNFunc(opAvn, &funcApp)) |
4013 | { |
4014 | return res; |
4015 | } |
4016 | |
4017 | if (funcApp.m_func == VNF_PtrToLoc) |
4018 | { |
4019 | #ifdef DEBUG |
4020 | // For PtrToLoc, lib == cons. |
4021 | VNFuncApp consFuncApp; |
4022 | assert(GetVNFunc(VNConservativeNormalValue(opA->gtVNPair), &consFuncApp) && consFuncApp.Equals(funcApp)); |
4023 | #endif |
4024 | ValueNum fldSeqVN = VNForFieldSeq(fldSeq); |
4025 | res = VNForFunc(TYP_BYREF, VNF_PtrToLoc, funcApp.m_args[0], FieldSeqVNAppend(funcApp.m_args[1], fldSeqVN)); |
4026 | } |
4027 | else if (funcApp.m_func == VNF_PtrToStatic) |
4028 | { |
4029 | ValueNum fldSeqVN = VNForFieldSeq(fldSeq); |
4030 | res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, FieldSeqVNAppend(funcApp.m_args[0], fldSeqVN)); |
4031 | } |
4032 | else if (funcApp.m_func == VNF_PtrToArrElem) |
4033 | { |
4034 | ValueNum fldSeqVN = VNForFieldSeq(fldSeq); |
4035 | res = VNForFunc(TYP_BYREF, VNF_PtrToArrElem, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2], |
4036 | FieldSeqVNAppend(funcApp.m_args[3], fldSeqVN)); |
4037 | } |
4038 | if (res != NoVN) |
4039 | { |
4040 | res = VNWithExc(res, opAvnx); |
4041 | } |
4042 | return res; |
4043 | } |
4044 | |
4045 | ValueNum Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, |
4046 | ValueNum arrVN, |
4047 | ValueNum inxVN, |
4048 | FieldSeqNode* fldSeq, |
4049 | ValueNum rhsVN, |
4050 | var_types indType) |
4051 | { |
4052 | bool invalidateArray = false; |
4053 | ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); |
4054 | var_types arrElemType = DecodeElemType(elemTypeEq); |
4055 | ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, fgCurMemoryVN[GcHeap], elemTypeEqVN); |
4056 | ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, hAtArrType, arrVN); |
4057 | ValueNum hAtArrTypeAtArrAtInx = vnStore->VNForMapSelect(VNK_Liberal, arrElemType, hAtArrTypeAtArr, inxVN); |
4058 | |
4059 | ValueNum newValAtInx = ValueNumStore::NoVN; |
4060 | ValueNum newValAtArr = ValueNumStore::NoVN; |
4061 | ValueNum newValAtArrType = ValueNumStore::NoVN; |
4062 | |
4063 | if (fldSeq == FieldSeqStore::NotAField()) |
4064 | { |
4065 | // This doesn't represent a proper array access |
4066 | JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrIndexAssign\n" ); |
4067 | |
4068 | // Store a new unique value for newValAtArrType |
4069 | newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_REF); |
4070 | invalidateArray = true; |
4071 | } |
4072 | else |
4073 | { |
4074 | // Note that this does the right thing if "fldSeq" is null -- returns last "rhs" argument. |
4075 | // This is the value that should be stored at "arr[inx]". |
4076 | newValAtInx = |
4077 | vnStore->VNApplySelectorsAssign(VNK_Liberal, hAtArrTypeAtArrAtInx, fldSeq, rhsVN, indType, compCurBB); |
4078 | |
4079 | var_types arrElemFldType = arrElemType; // Uses arrElemType unless we has a non-null fldSeq |
4080 | if (vnStore->IsVNFunc(newValAtInx)) |
4081 | { |
4082 | VNFuncApp funcApp; |
4083 | vnStore->GetVNFunc(newValAtInx, &funcApp); |
4084 | if (funcApp.m_func == VNF_MapStore) |
4085 | { |
4086 | arrElemFldType = vnStore->TypeOfVN(newValAtInx); |
4087 | } |
4088 | } |
4089 | |
4090 | if (indType != arrElemFldType) |
4091 | { |
4092 | // Mismatched types: Store between different types (indType into array of arrElemFldType) |
4093 | // |
4094 | |
4095 | JITDUMP(" *** Mismatched types in fgValueNumberArrIndexAssign\n" ); |
4096 | |
4097 | // Store a new unique value for newValAtArrType |
4098 | newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_REF); |
4099 | invalidateArray = true; |
4100 | } |
4101 | } |
4102 | |
4103 | if (!invalidateArray) |
4104 | { |
4105 | newValAtArr = vnStore->VNForMapStore(indType, hAtArrTypeAtArr, inxVN, newValAtInx); |
4106 | newValAtArrType = vnStore->VNForMapStore(TYP_REF, hAtArrType, arrVN, newValAtArr); |
4107 | } |
4108 | |
4109 | #ifdef DEBUG |
4110 | if (verbose) |
4111 | { |
4112 | printf(" hAtArrType " FMT_VN " is MapSelect(curGcHeap(" FMT_VN "), " , hAtArrType, fgCurMemoryVN[GcHeap]); |
4113 | |
4114 | if (arrElemType == TYP_STRUCT) |
4115 | { |
4116 | printf("%s[]).\n" , eeGetClassName(elemTypeEq)); |
4117 | } |
4118 | else |
4119 | { |
4120 | printf("%s[]).\n" , varTypeName(arrElemType)); |
4121 | } |
4122 | printf(" hAtArrTypeAtArr " FMT_VN " is MapSelect(hAtArrType(" FMT_VN "), arr=" FMT_VN ")\n" , hAtArrTypeAtArr, |
4123 | hAtArrType, arrVN); |
4124 | printf(" hAtArrTypeAtArrAtInx " FMT_VN " is MapSelect(hAtArrTypeAtArr(" FMT_VN "), inx=" FMT_VN "):%s\n" , |
4125 | hAtArrTypeAtArrAtInx, hAtArrTypeAtArr, inxVN, varTypeName(arrElemType)); |
4126 | |
4127 | if (!invalidateArray) |
4128 | { |
4129 | printf(" newValAtInd " FMT_VN " is " , newValAtInx); |
4130 | vnStore->vnDump(this, newValAtInx); |
4131 | printf("\n" ); |
4132 | |
4133 | printf(" newValAtArr " FMT_VN " is " , newValAtArr); |
4134 | vnStore->vnDump(this, newValAtArr); |
4135 | printf("\n" ); |
4136 | } |
4137 | |
4138 | printf(" newValAtArrType " FMT_VN " is " , newValAtArrType); |
4139 | vnStore->vnDump(this, newValAtArrType); |
4140 | printf("\n" ); |
4141 | } |
4142 | #endif // DEBUG |
4143 | |
4144 | return vnStore->VNForMapStore(TYP_REF, fgCurMemoryVN[GcHeap], elemTypeEqVN, newValAtArrType); |
4145 | } |
4146 | |
4147 | ValueNum Compiler::fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* pFuncApp, ValueNum addrXvn) |
4148 | { |
4149 | assert(vnStore->IsVNHandle(pFuncApp->m_args[0])); |
4150 | CORINFO_CLASS_HANDLE arrElemTypeEQ = CORINFO_CLASS_HANDLE(vnStore->ConstantValue<ssize_t>(pFuncApp->m_args[0])); |
4151 | ValueNum arrVN = pFuncApp->m_args[1]; |
4152 | ValueNum inxVN = pFuncApp->m_args[2]; |
4153 | FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(pFuncApp->m_args[3]); |
4154 | return fgValueNumberArrIndexVal(tree, arrElemTypeEQ, arrVN, inxVN, addrXvn, fldSeq); |
4155 | } |
4156 | |
4157 | ValueNum Compiler::fgValueNumberArrIndexVal(GenTree* tree, |
4158 | CORINFO_CLASS_HANDLE elemTypeEq, |
4159 | ValueNum arrVN, |
4160 | ValueNum inxVN, |
4161 | ValueNum excVN, |
4162 | FieldSeqNode* fldSeq) |
4163 | { |
4164 | assert(tree == nullptr || tree->OperIsIndir()); |
4165 | |
4166 | // The VN inputs are required to be non-exceptional values. |
4167 | assert(arrVN == vnStore->VNNormalValue(arrVN)); |
4168 | assert(inxVN == vnStore->VNNormalValue(inxVN)); |
4169 | |
4170 | var_types elemTyp = DecodeElemType(elemTypeEq); |
4171 | var_types indType = (tree == nullptr) ? elemTyp : tree->TypeGet(); |
4172 | ValueNum selectedElem; |
4173 | |
4174 | if (fldSeq == FieldSeqStore::NotAField()) |
4175 | { |
4176 | // This doesn't represent a proper array access |
4177 | JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrIndexVal\n" ); |
4178 | |
4179 | // a new unique value number |
4180 | selectedElem = vnStore->VNForExpr(compCurBB, elemTyp); |
4181 | |
4182 | #ifdef DEBUG |
4183 | if (verbose) |
4184 | { |
4185 | printf(" IND of PtrToArrElem is unique VN " FMT_VN ".\n" , selectedElem); |
4186 | } |
4187 | #endif // DEBUG |
4188 | |
4189 | if (tree != nullptr) |
4190 | { |
4191 | tree->gtVNPair.SetBoth(selectedElem); |
4192 | } |
4193 | } |
4194 | else |
4195 | { |
4196 | ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); |
4197 | ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, fgCurMemoryVN[GcHeap], elemTypeEqVN); |
4198 | ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, hAtArrType, arrVN); |
4199 | ValueNum wholeElem = vnStore->VNForMapSelect(VNK_Liberal, elemTyp, hAtArrTypeAtArr, inxVN); |
4200 | |
4201 | #ifdef DEBUG |
4202 | if (verbose) |
4203 | { |
4204 | printf(" hAtArrType " FMT_VN " is MapSelect(curGcHeap(" FMT_VN "), " , hAtArrType, fgCurMemoryVN[GcHeap]); |
4205 | if (elemTyp == TYP_STRUCT) |
4206 | { |
4207 | printf("%s[]).\n" , eeGetClassName(elemTypeEq)); |
4208 | } |
4209 | else |
4210 | { |
4211 | printf("%s[]).\n" , varTypeName(elemTyp)); |
4212 | } |
4213 | |
4214 | printf(" hAtArrTypeAtArr " FMT_VN " is MapSelect(hAtArrType(" FMT_VN "), arr=" FMT_VN ").\n" , |
4215 | hAtArrTypeAtArr, hAtArrType, arrVN); |
4216 | |
4217 | printf(" wholeElem " FMT_VN " is MapSelect(hAtArrTypeAtArr(" FMT_VN "), ind=" FMT_VN ").\n" , wholeElem, |
4218 | hAtArrTypeAtArr, inxVN); |
4219 | } |
4220 | #endif // DEBUG |
4221 | |
4222 | selectedElem = wholeElem; |
4223 | size_t elemStructSize = 0; |
4224 | if (fldSeq) |
4225 | { |
4226 | selectedElem = vnStore->VNApplySelectors(VNK_Liberal, wholeElem, fldSeq, &elemStructSize); |
4227 | elemTyp = vnStore->TypeOfVN(selectedElem); |
4228 | } |
4229 | selectedElem = vnStore->VNApplySelectorsTypeCheck(selectedElem, indType, elemStructSize); |
4230 | selectedElem = vnStore->VNWithExc(selectedElem, excVN); |
4231 | |
4232 | #ifdef DEBUG |
4233 | if (verbose && (selectedElem != wholeElem)) |
4234 | { |
4235 | printf(" selectedElem is " FMT_VN " after applying selectors.\n" , selectedElem); |
4236 | } |
4237 | #endif // DEBUG |
4238 | |
4239 | if (tree != nullptr) |
4240 | { |
4241 | tree->gtVNPair.SetLiberal(selectedElem); |
4242 | |
4243 | // TODO-CQ: what to do here about exceptions? We don't have the array and ind conservative |
4244 | // values, so we don't have their exceptions. Maybe we should. |
4245 | tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
4246 | } |
4247 | } |
4248 | |
4249 | return selectedElem; |
4250 | } |
4251 | |
4252 | ValueNum Compiler::fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN) |
4253 | { |
4254 | ValueNum memoryVN = fgCurMemoryVN[ByrefExposed]; |
4255 | // The memoization for VNFunc applications does not factor in the result type, so |
4256 | // VNF_ByrefExposedLoad takes the loaded type as an explicit parameter. |
4257 | ValueNum typeVN = vnStore->VNForIntCon(type); |
4258 | ValueNum loadVN = |
4259 | vnStore->VNForFunc(type, VNF_ByrefExposedLoad, typeVN, vnStore->VNNormalValue(pointerVN), memoryVN); |
4260 | |
4261 | return loadVN; |
4262 | } |
4263 | |
4264 | var_types ValueNumStore::TypeOfVN(ValueNum vn) |
4265 | { |
4266 | if (vn == NoVN) |
4267 | { |
4268 | return TYP_UNDEF; |
4269 | } |
4270 | |
4271 | Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn)); |
4272 | return c->m_typ; |
4273 | } |
4274 | |
4275 | //------------------------------------------------------------------------ |
4276 | // LoopOfVN: If the given value number is an opaque one associated with a particular |
4277 | // expression in the IR, give the loop number where the expression occurs; otherwise, |
4278 | // returns MAX_LOOP_NUM. |
4279 | // |
4280 | // Arguments: |
4281 | // vn - Value number to query |
4282 | // |
4283 | // Return Value: |
4284 | // The correspondingblock's bbNatLoopNum, which may be BasicBlock::NOT_IN_LOOP. |
4285 | // Returns MAX_LOOP_NUM if this VN is not an opaque value number associated with |
4286 | // a particular expression/location in the IR. |
4287 | |
4288 | BasicBlock::loopNumber ValueNumStore::LoopOfVN(ValueNum vn) |
4289 | { |
4290 | if (vn == NoVN) |
4291 | { |
4292 | return MAX_LOOP_NUM; |
4293 | } |
4294 | |
4295 | Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn)); |
4296 | return c->m_loopNum; |
4297 | } |
4298 | |
4299 | bool ValueNumStore::IsVNConstant(ValueNum vn) |
4300 | { |
4301 | if (vn == NoVN) |
4302 | { |
4303 | return false; |
4304 | } |
4305 | Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn)); |
4306 | if (c->m_attribs == CEA_Const) |
4307 | { |
4308 | return vn != VNForVoid(); // Void is not a "real" constant -- in the sense that it represents no value. |
4309 | } |
4310 | else |
4311 | { |
4312 | return c->m_attribs == CEA_Handle; |
4313 | } |
4314 | } |
4315 | |
4316 | bool ValueNumStore::IsVNInt32Constant(ValueNum vn) |
4317 | { |
4318 | if (!IsVNConstant(vn)) |
4319 | { |
4320 | return false; |
4321 | } |
4322 | |
4323 | return TypeOfVN(vn) == TYP_INT; |
4324 | } |
4325 | |
4326 | unsigned ValueNumStore::GetHandleFlags(ValueNum vn) |
4327 | { |
4328 | assert(IsVNHandle(vn)); |
4329 | Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn)); |
4330 | unsigned offset = ChunkOffset(vn); |
4331 | VNHandle* handle = &reinterpret_cast<VNHandle*>(c->m_defs)[offset]; |
4332 | return handle->m_flags; |
4333 | } |
4334 | |
4335 | bool ValueNumStore::IsVNHandle(ValueNum vn) |
4336 | { |
4337 | if (vn == NoVN) |
4338 | { |
4339 | return false; |
4340 | } |
4341 | |
4342 | Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn)); |
4343 | return c->m_attribs == CEA_Handle; |
4344 | } |
4345 | |
4346 | bool ValueNumStore::IsVNConstantBound(ValueNum vn) |
4347 | { |
4348 | // Do we have "var < 100"? |
4349 | if (vn == NoVN) |
4350 | { |
4351 | return false; |
4352 | } |
4353 | |
4354 | VNFuncApp funcAttr; |
4355 | if (!GetVNFunc(vn, &funcAttr)) |
4356 | { |
4357 | return false; |
4358 | } |
4359 | if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE && funcAttr.m_func != (VNFunc)GT_LT && |
4360 | funcAttr.m_func != (VNFunc)GT_GT) |
4361 | { |
4362 | return false; |
4363 | } |
4364 | |
4365 | return IsVNInt32Constant(funcAttr.m_args[0]) != IsVNInt32Constant(funcAttr.m_args[1]); |
4366 | } |
4367 | |
4368 | void ValueNumStore::GetConstantBoundInfo(ValueNum vn, ConstantBoundInfo* info) |
4369 | { |
4370 | assert(IsVNConstantBound(vn)); |
4371 | assert(info); |
4372 | |
4373 | // Do we have var < 100? |
4374 | VNFuncApp funcAttr; |
4375 | GetVNFunc(vn, &funcAttr); |
4376 | |
4377 | bool isOp1Const = IsVNInt32Constant(funcAttr.m_args[1]); |
4378 | |
4379 | if (isOp1Const) |
4380 | { |
4381 | info->cmpOper = funcAttr.m_func; |
4382 | info->cmpOpVN = funcAttr.m_args[0]; |
4383 | info->constVal = GetConstantInt32(funcAttr.m_args[1]); |
4384 | } |
4385 | else |
4386 | { |
4387 | info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func); |
4388 | info->cmpOpVN = funcAttr.m_args[1]; |
4389 | info->constVal = GetConstantInt32(funcAttr.m_args[0]); |
4390 | } |
4391 | } |
4392 | |
4393 | //------------------------------------------------------------------------ |
4394 | // IsVNArrLenUnsignedBound: Checks if the specified vn represents an expression |
4395 | // such as "(uint)i < (uint)len" that implies that the index is valid |
4396 | // (0 <= i && i < a.len). |
4397 | // |
4398 | // Arguments: |
4399 | // vn - Value number to query |
4400 | // info - Pointer to an UnsignedCompareCheckedBoundInfo object to return information about |
4401 | // the expression. Not populated if the vn expression isn't suitable (e.g. i <= len). |
4402 | // This enables optCreateJTrueBoundAssertion to immediatly create an OAK_NO_THROW |
4403 | // assertion instead of the OAK_EQUAL/NOT_EQUAL assertions created by signed compares |
4404 | // (IsVNCompareCheckedBound, IsVNCompareCheckedBoundArith) that require further processing. |
4405 | |
4406 | bool ValueNumStore::IsVNUnsignedCompareCheckedBound(ValueNum vn, UnsignedCompareCheckedBoundInfo* info) |
4407 | { |
4408 | VNFuncApp funcApp; |
4409 | |
4410 | if (GetVNFunc(vn, &funcApp)) |
4411 | { |
4412 | if ((funcApp.m_func == VNF_LT_UN) || (funcApp.m_func == VNF_GE_UN)) |
4413 | { |
4414 | // We only care about "(uint)i < (uint)len" and its negation "(uint)i >= (uint)len" |
4415 | if (IsVNCheckedBound(funcApp.m_args[1])) |
4416 | { |
4417 | info->vnIdx = funcApp.m_args[0]; |
4418 | info->cmpOper = funcApp.m_func; |
4419 | info->vnBound = funcApp.m_args[1]; |
4420 | return true; |
4421 | } |
4422 | } |
4423 | else if ((funcApp.m_func == VNF_GT_UN) || (funcApp.m_func == VNF_LE_UN)) |
4424 | { |
4425 | // We only care about "(uint)a.len > (uint)i" and its negation "(uint)a.len <= (uint)i" |
4426 | if (IsVNCheckedBound(funcApp.m_args[0])) |
4427 | { |
4428 | info->vnIdx = funcApp.m_args[1]; |
4429 | // Let's keep a consistent operand order - it's always i < len, never len > i |
4430 | info->cmpOper = (funcApp.m_func == VNF_GT_UN) ? VNF_LT_UN : VNF_GE_UN; |
4431 | info->vnBound = funcApp.m_args[0]; |
4432 | return true; |
4433 | } |
4434 | } |
4435 | } |
4436 | |
4437 | return false; |
4438 | } |
4439 | |
4440 | bool ValueNumStore::IsVNCompareCheckedBound(ValueNum vn) |
4441 | { |
4442 | // Do we have "var < len"? |
4443 | if (vn == NoVN) |
4444 | { |
4445 | return false; |
4446 | } |
4447 | |
4448 | VNFuncApp funcAttr; |
4449 | if (!GetVNFunc(vn, &funcAttr)) |
4450 | { |
4451 | return false; |
4452 | } |
4453 | if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE && funcAttr.m_func != (VNFunc)GT_LT && |
4454 | funcAttr.m_func != (VNFunc)GT_GT) |
4455 | { |
4456 | return false; |
4457 | } |
4458 | if (!IsVNCheckedBound(funcAttr.m_args[0]) && !IsVNCheckedBound(funcAttr.m_args[1])) |
4459 | { |
4460 | return false; |
4461 | } |
4462 | |
4463 | return true; |
4464 | } |
4465 | |
4466 | void ValueNumStore::GetCompareCheckedBound(ValueNum vn, CompareCheckedBoundArithInfo* info) |
4467 | { |
4468 | assert(IsVNCompareCheckedBound(vn)); |
4469 | |
4470 | // Do we have var < a.len? |
4471 | VNFuncApp funcAttr; |
4472 | GetVNFunc(vn, &funcAttr); |
4473 | |
4474 | bool isOp1CheckedBound = IsVNCheckedBound(funcAttr.m_args[1]); |
4475 | if (isOp1CheckedBound) |
4476 | { |
4477 | info->cmpOper = funcAttr.m_func; |
4478 | info->cmpOp = funcAttr.m_args[0]; |
4479 | info->vnBound = funcAttr.m_args[1]; |
4480 | } |
4481 | else |
4482 | { |
4483 | info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func); |
4484 | info->cmpOp = funcAttr.m_args[1]; |
4485 | info->vnBound = funcAttr.m_args[0]; |
4486 | } |
4487 | } |
4488 | |
4489 | bool ValueNumStore::IsVNCheckedBoundArith(ValueNum vn) |
4490 | { |
4491 | // Do we have "a.len +or- var" |
4492 | if (vn == NoVN) |
4493 | { |
4494 | return false; |
4495 | } |
4496 | |
4497 | VNFuncApp funcAttr; |
4498 | |
4499 | return GetVNFunc(vn, &funcAttr) && // vn is a func. |
4500 | (funcAttr.m_func == (VNFunc)GT_ADD || funcAttr.m_func == (VNFunc)GT_SUB) && // the func is +/- |
4501 | (IsVNCheckedBound(funcAttr.m_args[0]) || IsVNCheckedBound(funcAttr.m_args[1])); // either op1 or op2 is a.len |
4502 | } |
4503 | |
4504 | void ValueNumStore::GetCheckedBoundArithInfo(ValueNum vn, CompareCheckedBoundArithInfo* info) |
4505 | { |
4506 | // Do we have a.len +/- var? |
4507 | assert(IsVNCheckedBoundArith(vn)); |
4508 | VNFuncApp funcArith; |
4509 | GetVNFunc(vn, &funcArith); |
4510 | |
4511 | bool isOp1CheckedBound = IsVNCheckedBound(funcArith.m_args[1]); |
4512 | if (isOp1CheckedBound) |
4513 | { |
4514 | info->arrOper = funcArith.m_func; |
4515 | info->arrOp = funcArith.m_args[0]; |
4516 | info->vnBound = funcArith.m_args[1]; |
4517 | } |
4518 | else |
4519 | { |
4520 | info->arrOper = funcArith.m_func; |
4521 | info->arrOp = funcArith.m_args[1]; |
4522 | info->vnBound = funcArith.m_args[0]; |
4523 | } |
4524 | } |
4525 | |
4526 | bool ValueNumStore::IsVNCompareCheckedBoundArith(ValueNum vn) |
4527 | { |
4528 | // Do we have: "var < a.len - var" |
4529 | if (vn == NoVN) |
4530 | { |
4531 | return false; |
4532 | } |
4533 | |
4534 | VNFuncApp funcAttr; |
4535 | if (!GetVNFunc(vn, &funcAttr)) |
4536 | { |
4537 | return false; |
4538 | } |
4539 | |
4540 | // Suitable comparator. |
4541 | if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE && funcAttr.m_func != (VNFunc)GT_LT && |
4542 | funcAttr.m_func != (VNFunc)GT_GT) |
4543 | { |
4544 | return false; |
4545 | } |
4546 | |
4547 | // Either the op0 or op1 is arr len arithmetic. |
4548 | if (!IsVNCheckedBoundArith(funcAttr.m_args[0]) && !IsVNCheckedBoundArith(funcAttr.m_args[1])) |
4549 | { |
4550 | return false; |
4551 | } |
4552 | |
4553 | return true; |
4554 | } |
4555 | |
4556 | void ValueNumStore::GetCompareCheckedBoundArithInfo(ValueNum vn, CompareCheckedBoundArithInfo* info) |
4557 | { |
4558 | assert(IsVNCompareCheckedBoundArith(vn)); |
4559 | |
4560 | VNFuncApp funcAttr; |
4561 | GetVNFunc(vn, &funcAttr); |
4562 | |
4563 | // Check whether op0 or op1 is checked bound arithmetic. |
4564 | bool isOp1CheckedBoundArith = IsVNCheckedBoundArith(funcAttr.m_args[1]); |
4565 | if (isOp1CheckedBoundArith) |
4566 | { |
4567 | info->cmpOper = funcAttr.m_func; |
4568 | info->cmpOp = funcAttr.m_args[0]; |
4569 | GetCheckedBoundArithInfo(funcAttr.m_args[1], info); |
4570 | } |
4571 | else |
4572 | { |
4573 | info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func); |
4574 | info->cmpOp = funcAttr.m_args[1]; |
4575 | GetCheckedBoundArithInfo(funcAttr.m_args[0], info); |
4576 | } |
4577 | } |
4578 | |
4579 | ValueNum ValueNumStore::GetArrForLenVn(ValueNum vn) |
4580 | { |
4581 | if (vn == NoVN) |
4582 | { |
4583 | return NoVN; |
4584 | } |
4585 | |
4586 | VNFuncApp funcAttr; |
4587 | if (GetVNFunc(vn, &funcAttr) && funcAttr.m_func == (VNFunc)GT_ARR_LENGTH) |
4588 | { |
4589 | return funcAttr.m_args[0]; |
4590 | } |
4591 | return NoVN; |
4592 | } |
4593 | |
4594 | bool ValueNumStore::IsVNNewArr(ValueNum vn, VNFuncApp* funcApp) |
4595 | { |
4596 | if (vn == NoVN) |
4597 | { |
4598 | return false; |
4599 | } |
4600 | bool result = false; |
4601 | if (GetVNFunc(vn, funcApp)) |
4602 | { |
4603 | result = (funcApp->m_func == VNF_JitNewArr) || (funcApp->m_func == VNF_JitReadyToRunNewArr); |
4604 | } |
4605 | return result; |
4606 | } |
4607 | |
4608 | int ValueNumStore::GetNewArrSize(ValueNum vn) |
4609 | { |
4610 | VNFuncApp funcApp; |
4611 | if (IsVNNewArr(vn, &funcApp)) |
4612 | { |
4613 | ValueNum arg1VN = funcApp.m_args[1]; |
4614 | if (IsVNConstant(arg1VN) && TypeOfVN(arg1VN) == TYP_INT) |
4615 | { |
4616 | return ConstantValue<int>(arg1VN); |
4617 | } |
4618 | } |
4619 | return 0; |
4620 | } |
4621 | |
4622 | bool ValueNumStore::IsVNArrLen(ValueNum vn) |
4623 | { |
4624 | if (vn == NoVN) |
4625 | { |
4626 | return false; |
4627 | } |
4628 | VNFuncApp funcAttr; |
4629 | return (GetVNFunc(vn, &funcAttr) && funcAttr.m_func == (VNFunc)GT_ARR_LENGTH); |
4630 | } |
4631 | |
4632 | bool ValueNumStore::IsVNCheckedBound(ValueNum vn) |
4633 | { |
4634 | bool dummy; |
4635 | if (m_checkedBoundVNs.TryGetValue(vn, &dummy)) |
4636 | { |
4637 | // This VN appeared as the conservative VN of the length argument of some |
4638 | // GT_ARR_BOUND node. |
4639 | return true; |
4640 | } |
4641 | if (IsVNArrLen(vn)) |
4642 | { |
4643 | // Even if we haven't seen this VN in a bounds check, if it is an array length |
4644 | // VN then consider it a checked bound VN. This facilitates better bounds check |
4645 | // removal by ensuring that compares against array lengths get put in the |
4646 | // optCseCheckedBoundMap; such an array length might get CSEd with one that was |
4647 | // directly used in a bounds check, and having the map entry will let us update |
4648 | // the compare's VN so that OptimizeRangeChecks can recognize such compares. |
4649 | return true; |
4650 | } |
4651 | |
4652 | return false; |
4653 | } |
4654 | |
4655 | void ValueNumStore::SetVNIsCheckedBound(ValueNum vn) |
4656 | { |
4657 | // This is meant to flag VNs for lengths that aren't known at compile time, so we can |
4658 | // form and propagate assertions about them. Ensure that callers filter out constant |
4659 | // VNs since they're not what we're looking to flag, and assertion prop can reason |
4660 | // directly about constants. |
4661 | assert(!IsVNConstant(vn)); |
4662 | m_checkedBoundVNs.AddOrUpdate(vn, true); |
4663 | } |
4664 | |
4665 | ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, CorInfoIntrinsics gtMathFN, ValueNum arg0VN) |
4666 | { |
4667 | assert(arg0VN == VNNormalValue(arg0VN)); |
4668 | |
4669 | // If the math intrinsic is not implemented by target-specific instructions, such as implemented |
4670 | // by user calls, then don't do constant folding on it. This minimizes precision loss. |
4671 | |
4672 | if (IsVNConstant(arg0VN) && m_pComp->IsTargetIntrinsic(gtMathFN)) |
4673 | { |
4674 | assert(varTypeIsFloating(TypeOfVN(arg0VN))); |
4675 | |
4676 | if (typ == TYP_DOUBLE) |
4677 | { |
4678 | // Both operand and its result must be of the same floating point type. |
4679 | assert(typ == TypeOfVN(arg0VN)); |
4680 | double arg0Val = GetConstantDouble(arg0VN); |
4681 | |
4682 | double res = 0.0; |
4683 | switch (gtMathFN) |
4684 | { |
4685 | case CORINFO_INTRINSIC_Sin: |
4686 | res = sin(arg0Val); |
4687 | break; |
4688 | case CORINFO_INTRINSIC_Cos: |
4689 | res = cos(arg0Val); |
4690 | break; |
4691 | case CORINFO_INTRINSIC_Sqrt: |
4692 | res = sqrt(arg0Val); |
4693 | break; |
4694 | case CORINFO_INTRINSIC_Abs: |
4695 | res = fabs(arg0Val); |
4696 | break; |
4697 | case CORINFO_INTRINSIC_Ceiling: |
4698 | res = ceil(arg0Val); |
4699 | break; |
4700 | case CORINFO_INTRINSIC_Floor: |
4701 | res = floor(arg0Val); |
4702 | break; |
4703 | case CORINFO_INTRINSIC_Round: |
4704 | res = FloatingPointUtils::round(arg0Val); |
4705 | break; |
4706 | default: |
4707 | unreached(); // the above are the only math intrinsics at the time of this writing. |
4708 | } |
4709 | |
4710 | return VNForDoubleCon(res); |
4711 | } |
4712 | else if (typ == TYP_FLOAT) |
4713 | { |
4714 | // Both operand and its result must be of the same floating point type. |
4715 | assert(typ == TypeOfVN(arg0VN)); |
4716 | float arg0Val = GetConstantSingle(arg0VN); |
4717 | |
4718 | float res = 0.0f; |
4719 | switch (gtMathFN) |
4720 | { |
4721 | case CORINFO_INTRINSIC_Sin: |
4722 | res = sinf(arg0Val); |
4723 | break; |
4724 | case CORINFO_INTRINSIC_Cos: |
4725 | res = cosf(arg0Val); |
4726 | break; |
4727 | case CORINFO_INTRINSIC_Sqrt: |
4728 | res = sqrtf(arg0Val); |
4729 | break; |
4730 | case CORINFO_INTRINSIC_Abs: |
4731 | res = fabsf(arg0Val); |
4732 | break; |
4733 | case CORINFO_INTRINSIC_Ceiling: |
4734 | res = ceilf(arg0Val); |
4735 | break; |
4736 | case CORINFO_INTRINSIC_Floor: |
4737 | res = floorf(arg0Val); |
4738 | break; |
4739 | case CORINFO_INTRINSIC_Round: |
4740 | res = FloatingPointUtils::round(arg0Val); |
4741 | break; |
4742 | default: |
4743 | unreached(); // the above are the only math intrinsics at the time of this writing. |
4744 | } |
4745 | |
4746 | return VNForFloatCon(res); |
4747 | } |
4748 | else |
4749 | { |
4750 | // CORINFO_INTRINSIC_Round is currently the only intrinsic that takes floating-point arguments |
4751 | // and that returns a non floating-point result. |
4752 | |
4753 | assert(typ == TYP_INT); |
4754 | assert(gtMathFN == CORINFO_INTRINSIC_Round); |
4755 | |
4756 | int res = 0; |
4757 | |
4758 | switch (TypeOfVN(arg0VN)) |
4759 | { |
4760 | case TYP_DOUBLE: |
4761 | { |
4762 | double arg0Val = GetConstantDouble(arg0VN); |
4763 | res = int(FloatingPointUtils::round(arg0Val)); |
4764 | break; |
4765 | } |
4766 | case TYP_FLOAT: |
4767 | { |
4768 | float arg0Val = GetConstantSingle(arg0VN); |
4769 | res = int(FloatingPointUtils::round(arg0Val)); |
4770 | break; |
4771 | } |
4772 | default: |
4773 | unreached(); |
4774 | } |
4775 | |
4776 | return VNForIntCon(res); |
4777 | } |
4778 | } |
4779 | else |
4780 | { |
4781 | assert(typ == TYP_DOUBLE || typ == TYP_FLOAT || (typ == TYP_INT && gtMathFN == CORINFO_INTRINSIC_Round)); |
4782 | |
4783 | VNFunc vnf = VNF_Boundary; |
4784 | switch (gtMathFN) |
4785 | { |
4786 | case CORINFO_INTRINSIC_Sin: |
4787 | vnf = VNF_Sin; |
4788 | break; |
4789 | case CORINFO_INTRINSIC_Cos: |
4790 | vnf = VNF_Cos; |
4791 | break; |
4792 | case CORINFO_INTRINSIC_Cbrt: |
4793 | vnf = VNF_Cbrt; |
4794 | break; |
4795 | case CORINFO_INTRINSIC_Sqrt: |
4796 | vnf = VNF_Sqrt; |
4797 | break; |
4798 | case CORINFO_INTRINSIC_Abs: |
4799 | vnf = VNF_Abs; |
4800 | break; |
4801 | case CORINFO_INTRINSIC_Round: |
4802 | if (typ == TYP_DOUBLE) |
4803 | { |
4804 | vnf = VNF_RoundDouble; |
4805 | } |
4806 | else if (typ == TYP_FLOAT) |
4807 | { |
4808 | vnf = VNF_RoundFloat; |
4809 | } |
4810 | else if (typ == TYP_INT) |
4811 | { |
4812 | vnf = VNF_RoundInt; |
4813 | } |
4814 | else |
4815 | { |
4816 | noway_assert(!"Invalid INTRINSIC_Round" ); |
4817 | } |
4818 | break; |
4819 | case CORINFO_INTRINSIC_Cosh: |
4820 | vnf = VNF_Cosh; |
4821 | break; |
4822 | case CORINFO_INTRINSIC_Sinh: |
4823 | vnf = VNF_Sinh; |
4824 | break; |
4825 | case CORINFO_INTRINSIC_Tan: |
4826 | vnf = VNF_Tan; |
4827 | break; |
4828 | case CORINFO_INTRINSIC_Tanh: |
4829 | vnf = VNF_Tanh; |
4830 | break; |
4831 | case CORINFO_INTRINSIC_Asin: |
4832 | vnf = VNF_Asin; |
4833 | break; |
4834 | case CORINFO_INTRINSIC_Asinh: |
4835 | vnf = VNF_Asinh; |
4836 | break; |
4837 | case CORINFO_INTRINSIC_Acos: |
4838 | vnf = VNF_Acos; |
4839 | break; |
4840 | case CORINFO_INTRINSIC_Acosh: |
4841 | vnf = VNF_Acosh; |
4842 | break; |
4843 | case CORINFO_INTRINSIC_Atan: |
4844 | vnf = VNF_Atan; |
4845 | break; |
4846 | case CORINFO_INTRINSIC_Atanh: |
4847 | vnf = VNF_Atanh; |
4848 | break; |
4849 | case CORINFO_INTRINSIC_Log10: |
4850 | vnf = VNF_Log10; |
4851 | break; |
4852 | case CORINFO_INTRINSIC_Exp: |
4853 | vnf = VNF_Exp; |
4854 | break; |
4855 | case CORINFO_INTRINSIC_Ceiling: |
4856 | vnf = VNF_Ceiling; |
4857 | break; |
4858 | case CORINFO_INTRINSIC_Floor: |
4859 | vnf = VNF_Floor; |
4860 | break; |
4861 | default: |
4862 | unreached(); // the above are the only math intrinsics at the time of this writing. |
4863 | } |
4864 | |
4865 | return VNForFunc(typ, vnf, arg0VN); |
4866 | } |
4867 | } |
4868 | |
4869 | ValueNum ValueNumStore::EvalMathFuncBinary(var_types typ, CorInfoIntrinsics gtMathFN, ValueNum arg0VN, ValueNum arg1VN) |
4870 | { |
4871 | assert(varTypeIsFloating(typ)); |
4872 | assert(arg0VN == VNNormalValue(arg0VN)); |
4873 | assert(arg1VN == VNNormalValue(arg1VN)); |
4874 | |
4875 | VNFunc vnf = VNF_Boundary; |
4876 | |
4877 | // Currently, none of the binary math intrinsic are implemented by target-specific instructions. |
4878 | // To minimize precision loss, do not do constant folding on them. |
4879 | |
4880 | switch (gtMathFN) |
4881 | { |
4882 | case CORINFO_INTRINSIC_Atan2: |
4883 | vnf = VNF_Atan2; |
4884 | break; |
4885 | |
4886 | case CORINFO_INTRINSIC_Pow: |
4887 | vnf = VNF_Pow; |
4888 | break; |
4889 | |
4890 | default: |
4891 | unreached(); // the above are the only binary math intrinsics at the time of this writing. |
4892 | } |
4893 | |
4894 | return VNForFunc(typ, vnf, arg0VN, arg1VN); |
4895 | } |
4896 | |
4897 | bool ValueNumStore::IsVNFunc(ValueNum vn) |
4898 | { |
4899 | if (vn == NoVN) |
4900 | { |
4901 | return false; |
4902 | } |
4903 | Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn)); |
4904 | switch (c->m_attribs) |
4905 | { |
4906 | case CEA_NotAField: |
4907 | case CEA_Func0: |
4908 | case CEA_Func1: |
4909 | case CEA_Func2: |
4910 | case CEA_Func3: |
4911 | case CEA_Func4: |
4912 | return true; |
4913 | default: |
4914 | return false; |
4915 | } |
4916 | } |
4917 | |
4918 | bool ValueNumStore::GetVNFunc(ValueNum vn, VNFuncApp* funcApp) |
4919 | { |
4920 | if (vn == NoVN) |
4921 | { |
4922 | return false; |
4923 | } |
4924 | |
4925 | Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn)); |
4926 | unsigned offset = ChunkOffset(vn); |
4927 | assert(offset < c->m_numUsed); |
4928 | switch (c->m_attribs) |
4929 | { |
4930 | case CEA_Func4: |
4931 | { |
4932 | VNDefFunc4Arg* farg4 = &reinterpret_cast<VNDefFunc4Arg*>(c->m_defs)[offset]; |
4933 | funcApp->m_func = farg4->m_func; |
4934 | funcApp->m_arity = 4; |
4935 | funcApp->m_args[0] = farg4->m_arg0; |
4936 | funcApp->m_args[1] = farg4->m_arg1; |
4937 | funcApp->m_args[2] = farg4->m_arg2; |
4938 | funcApp->m_args[3] = farg4->m_arg3; |
4939 | return true; |
4940 | } |
4941 | case CEA_Func3: |
4942 | { |
4943 | VNDefFunc3Arg* farg3 = &reinterpret_cast<VNDefFunc3Arg*>(c->m_defs)[offset]; |
4944 | funcApp->m_func = farg3->m_func; |
4945 | funcApp->m_arity = 3; |
4946 | funcApp->m_args[0] = farg3->m_arg0; |
4947 | funcApp->m_args[1] = farg3->m_arg1; |
4948 | funcApp->m_args[2] = farg3->m_arg2; |
4949 | return true; |
4950 | } |
4951 | case CEA_Func2: |
4952 | { |
4953 | VNDefFunc2Arg* farg2 = &reinterpret_cast<VNDefFunc2Arg*>(c->m_defs)[offset]; |
4954 | funcApp->m_func = farg2->m_func; |
4955 | funcApp->m_arity = 2; |
4956 | funcApp->m_args[0] = farg2->m_arg0; |
4957 | funcApp->m_args[1] = farg2->m_arg1; |
4958 | return true; |
4959 | } |
4960 | case CEA_Func1: |
4961 | { |
4962 | VNDefFunc1Arg* farg1 = &reinterpret_cast<VNDefFunc1Arg*>(c->m_defs)[offset]; |
4963 | funcApp->m_func = farg1->m_func; |
4964 | funcApp->m_arity = 1; |
4965 | funcApp->m_args[0] = farg1->m_arg0; |
4966 | return true; |
4967 | } |
4968 | case CEA_Func0: |
4969 | { |
4970 | VNDefFunc0Arg* farg0 = &reinterpret_cast<VNDefFunc0Arg*>(c->m_defs)[offset]; |
4971 | funcApp->m_func = farg0->m_func; |
4972 | funcApp->m_arity = 0; |
4973 | return true; |
4974 | } |
4975 | case CEA_NotAField: |
4976 | { |
4977 | funcApp->m_func = VNF_NotAField; |
4978 | funcApp->m_arity = 0; |
4979 | return true; |
4980 | } |
4981 | default: |
4982 | return false; |
4983 | } |
4984 | } |
4985 | |
4986 | ValueNum ValueNumStore::VNForRefInAddr(ValueNum vn) |
4987 | { |
4988 | var_types vnType = TypeOfVN(vn); |
4989 | if (vnType == TYP_REF) |
4990 | { |
4991 | return vn; |
4992 | } |
4993 | // Otherwise... |
4994 | assert(vnType == TYP_BYREF); |
4995 | VNFuncApp funcApp; |
4996 | if (GetVNFunc(vn, &funcApp)) |
4997 | { |
4998 | assert(funcApp.m_arity == 2 && (funcApp.m_func == VNFunc(GT_ADD) || funcApp.m_func == VNFunc(GT_SUB))); |
4999 | var_types vnArg0Type = TypeOfVN(funcApp.m_args[0]); |
5000 | if (vnArg0Type == TYP_REF || vnArg0Type == TYP_BYREF) |
5001 | { |
5002 | return VNForRefInAddr(funcApp.m_args[0]); |
5003 | } |
5004 | else |
5005 | { |
5006 | assert(funcApp.m_func == VNFunc(GT_ADD) && |
5007 | (TypeOfVN(funcApp.m_args[1]) == TYP_REF || TypeOfVN(funcApp.m_args[1]) == TYP_BYREF)); |
5008 | return VNForRefInAddr(funcApp.m_args[1]); |
5009 | } |
5010 | } |
5011 | else |
5012 | { |
5013 | assert(IsVNConstant(vn)); |
5014 | return vn; |
5015 | } |
5016 | } |
5017 | |
5018 | bool ValueNumStore::VNIsValid(ValueNum vn) |
5019 | { |
5020 | ChunkNum cn = GetChunkNum(vn); |
5021 | if (cn >= m_chunks.Size()) |
5022 | { |
5023 | return false; |
5024 | } |
5025 | // Otherwise... |
5026 | Chunk* c = m_chunks.GetNoExpand(cn); |
5027 | return ChunkOffset(vn) < c->m_numUsed; |
5028 | } |
5029 | |
5030 | #ifdef DEBUG |
5031 | |
5032 | void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr) |
5033 | { |
5034 | printf(" {" ); |
5035 | if (vn == NoVN) |
5036 | { |
5037 | printf("NoVN" ); |
5038 | } |
5039 | else if (IsVNHandle(vn)) |
5040 | { |
5041 | ssize_t val = ConstantValue<ssize_t>(vn); |
5042 | printf("Hnd const: 0x%p" , dspPtr(val)); |
5043 | } |
5044 | else if (IsVNConstant(vn)) |
5045 | { |
5046 | var_types vnt = TypeOfVN(vn); |
5047 | switch (vnt) |
5048 | { |
5049 | case TYP_BOOL: |
5050 | case TYP_BYTE: |
5051 | case TYP_UBYTE: |
5052 | case TYP_SHORT: |
5053 | case TYP_USHORT: |
5054 | case TYP_INT: |
5055 | case TYP_UINT: |
5056 | { |
5057 | int val = ConstantValue<int>(vn); |
5058 | if (isPtr) |
5059 | { |
5060 | printf("PtrCns[%p]" , dspPtr(val)); |
5061 | } |
5062 | else |
5063 | { |
5064 | printf("IntCns" ); |
5065 | if ((val > -1000) && (val < 1000)) |
5066 | { |
5067 | printf(" %ld" , val); |
5068 | } |
5069 | else |
5070 | { |
5071 | printf(" 0x%X" , val); |
5072 | } |
5073 | } |
5074 | } |
5075 | break; |
5076 | case TYP_LONG: |
5077 | case TYP_ULONG: |
5078 | { |
5079 | INT64 val = ConstantValue<INT64>(vn); |
5080 | if (isPtr) |
5081 | { |
5082 | printf("LngPtrCns: 0x%p" , dspPtr(val)); |
5083 | } |
5084 | else |
5085 | { |
5086 | printf("LngCns: " ); |
5087 | if ((val > -1000) && (val < 1000)) |
5088 | { |
5089 | printf(" %ld" , val); |
5090 | } |
5091 | else if ((val & 0xFFFFFFFF00000000LL) == 0) |
5092 | { |
5093 | printf(" 0x%X" , val); |
5094 | } |
5095 | else |
5096 | { |
5097 | printf(" 0x%llx" , val); |
5098 | } |
5099 | } |
5100 | } |
5101 | break; |
5102 | case TYP_FLOAT: |
5103 | printf("FltCns[%f]" , ConstantValue<float>(vn)); |
5104 | break; |
5105 | case TYP_DOUBLE: |
5106 | printf("DblCns[%f]" , ConstantValue<double>(vn)); |
5107 | break; |
5108 | case TYP_REF: |
5109 | if (vn == VNForNull()) |
5110 | { |
5111 | printf("null" ); |
5112 | } |
5113 | else if (vn == VNForVoid()) |
5114 | { |
5115 | printf("void" ); |
5116 | } |
5117 | else |
5118 | { |
5119 | assert(vn == VNForZeroMap()); |
5120 | printf("zeroMap" ); |
5121 | } |
5122 | break; |
5123 | case TYP_BYREF: |
5124 | printf("byrefVal" ); |
5125 | break; |
5126 | case TYP_STRUCT: |
5127 | #ifdef FEATURE_SIMD |
5128 | case TYP_SIMD8: |
5129 | case TYP_SIMD12: |
5130 | case TYP_SIMD16: |
5131 | case TYP_SIMD32: |
5132 | #endif // FEATURE_SIMD |
5133 | printf("structVal" ); |
5134 | break; |
5135 | |
5136 | // These should be unreached. |
5137 | default: |
5138 | unreached(); |
5139 | } |
5140 | } |
5141 | else if (IsVNCompareCheckedBound(vn)) |
5142 | { |
5143 | CompareCheckedBoundArithInfo info; |
5144 | GetCompareCheckedBound(vn, &info); |
5145 | info.dump(this); |
5146 | } |
5147 | else if (IsVNCompareCheckedBoundArith(vn)) |
5148 | { |
5149 | CompareCheckedBoundArithInfo info; |
5150 | GetCompareCheckedBoundArithInfo(vn, &info); |
5151 | info.dump(this); |
5152 | } |
5153 | else if (IsVNFunc(vn)) |
5154 | { |
5155 | VNFuncApp funcApp; |
5156 | GetVNFunc(vn, &funcApp); |
5157 | // A few special cases... |
5158 | switch (funcApp.m_func) |
5159 | { |
5160 | case VNF_FieldSeq: |
5161 | vnDumpFieldSeq(comp, &funcApp, true); |
5162 | break; |
5163 | case VNF_MapSelect: |
5164 | vnDumpMapSelect(comp, &funcApp); |
5165 | break; |
5166 | case VNF_MapStore: |
5167 | vnDumpMapStore(comp, &funcApp); |
5168 | break; |
5169 | case VNF_ValWithExc: |
5170 | vnDumpValWithExc(comp, &funcApp); |
5171 | break; |
5172 | default: |
5173 | printf("%s(" , VNFuncName(funcApp.m_func)); |
5174 | for (unsigned i = 0; i < funcApp.m_arity; i++) |
5175 | { |
5176 | if (i > 0) |
5177 | { |
5178 | printf(", " ); |
5179 | } |
5180 | |
5181 | printf(FMT_VN, funcApp.m_args[i]); |
5182 | |
5183 | #if FEATURE_VN_DUMP_FUNC_ARGS |
5184 | printf("=" ); |
5185 | vnDump(comp, funcApp.m_args[i]); |
5186 | #endif |
5187 | } |
5188 | printf(")" ); |
5189 | } |
5190 | } |
5191 | else |
5192 | { |
5193 | // Otherwise, just a VN with no structure; print just the VN. |
5194 | printf("%x" , vn); |
5195 | } |
5196 | printf("}" ); |
5197 | } |
5198 | |
5199 | // Requires "valWithExc" to be a value with an exeception set VNFuncApp. |
5200 | // Prints a representation of the exeception set on standard out. |
5201 | void ValueNumStore::vnDumpValWithExc(Compiler* comp, VNFuncApp* valWithExc) |
5202 | { |
5203 | assert(valWithExc->m_func == VNF_ValWithExc); // Precondition. |
5204 | |
5205 | ValueNum normVN = valWithExc->m_args[0]; // First arg is the VN from normal execution |
5206 | ValueNum excVN = valWithExc->m_args[1]; // Second arg is the set of possible exceptions |
5207 | |
5208 | assert(IsVNFunc(excVN)); |
5209 | VNFuncApp excSeq; |
5210 | GetVNFunc(excVN, &excSeq); |
5211 | |
5212 | printf("norm=" ); |
5213 | printf(FMT_VN, normVN); |
5214 | vnDump(comp, normVN); |
5215 | printf(", exc=" ); |
5216 | printf(FMT_VN, excVN); |
5217 | vnDumpExcSeq(comp, &excSeq, true); |
5218 | } |
5219 | |
5220 | // Requires "excSeq" to be a ExcSetCons sequence. |
5221 | // Prints a representation of the set of exceptions on standard out. |
5222 | void ValueNumStore::vnDumpExcSeq(Compiler* comp, VNFuncApp* excSeq, bool isHead) |
5223 | { |
5224 | assert(excSeq->m_func == VNF_ExcSetCons); // Precondition. |
5225 | |
5226 | ValueNum curExc = excSeq->m_args[0]; |
5227 | bool hasTail = (excSeq->m_args[1] != VNForEmptyExcSet()); |
5228 | |
5229 | if (isHead && hasTail) |
5230 | { |
5231 | printf("(" ); |
5232 | } |
5233 | |
5234 | vnDump(comp, curExc); |
5235 | |
5236 | if (hasTail) |
5237 | { |
5238 | printf(", " ); |
5239 | assert(IsVNFunc(excSeq->m_args[1])); |
5240 | VNFuncApp tail; |
5241 | GetVNFunc(excSeq->m_args[1], &tail); |
5242 | vnDumpExcSeq(comp, &tail, false); |
5243 | } |
5244 | |
5245 | if (isHead && hasTail) |
5246 | { |
5247 | printf(")" ); |
5248 | } |
5249 | } |
5250 | |
5251 | void ValueNumStore::vnDumpFieldSeq(Compiler* comp, VNFuncApp* fieldSeq, bool isHead) |
5252 | { |
5253 | assert(fieldSeq->m_func == VNF_FieldSeq); // Precondition. |
5254 | // First arg is the field handle VN. |
5255 | assert(IsVNConstant(fieldSeq->m_args[0]) && TypeOfVN(fieldSeq->m_args[0]) == TYP_I_IMPL); |
5256 | ssize_t fieldHndVal = ConstantValue<ssize_t>(fieldSeq->m_args[0]); |
5257 | bool hasTail = (fieldSeq->m_args[1] != VNForNull()); |
5258 | |
5259 | if (isHead && hasTail) |
5260 | { |
5261 | printf("(" ); |
5262 | } |
5263 | |
5264 | CORINFO_FIELD_HANDLE fldHnd = CORINFO_FIELD_HANDLE(fieldHndVal); |
5265 | if (fldHnd == FieldSeqStore::FirstElemPseudoField) |
5266 | { |
5267 | printf("#FirstElem" ); |
5268 | } |
5269 | else if (fldHnd == FieldSeqStore::ConstantIndexPseudoField) |
5270 | { |
5271 | printf("#ConstantIndex" ); |
5272 | } |
5273 | else |
5274 | { |
5275 | const char* modName; |
5276 | const char* fldName = m_pComp->eeGetFieldName(fldHnd, &modName); |
5277 | printf("%s" , fldName); |
5278 | } |
5279 | |
5280 | if (hasTail) |
5281 | { |
5282 | printf(", " ); |
5283 | assert(IsVNFunc(fieldSeq->m_args[1])); |
5284 | VNFuncApp tail; |
5285 | GetVNFunc(fieldSeq->m_args[1], &tail); |
5286 | vnDumpFieldSeq(comp, &tail, false); |
5287 | } |
5288 | |
5289 | if (isHead && hasTail) |
5290 | { |
5291 | printf(")" ); |
5292 | } |
5293 | } |
5294 | |
5295 | void ValueNumStore::vnDumpMapSelect(Compiler* comp, VNFuncApp* mapSelect) |
5296 | { |
5297 | assert(mapSelect->m_func == VNF_MapSelect); // Precondition. |
5298 | |
5299 | ValueNum mapVN = mapSelect->m_args[0]; // First arg is the map id |
5300 | ValueNum indexVN = mapSelect->m_args[1]; // Second arg is the index |
5301 | |
5302 | comp->vnPrint(mapVN, 0); |
5303 | printf("[" ); |
5304 | comp->vnPrint(indexVN, 0); |
5305 | printf("]" ); |
5306 | } |
5307 | |
5308 | void ValueNumStore::vnDumpMapStore(Compiler* comp, VNFuncApp* mapStore) |
5309 | { |
5310 | assert(mapStore->m_func == VNF_MapStore); // Precondition. |
5311 | |
5312 | ValueNum mapVN = mapStore->m_args[0]; // First arg is the map id |
5313 | ValueNum indexVN = mapStore->m_args[1]; // Second arg is the index |
5314 | ValueNum newValVN = mapStore->m_args[2]; // Third arg is the new value |
5315 | |
5316 | comp->vnPrint(mapVN, 0); |
5317 | printf("[" ); |
5318 | comp->vnPrint(indexVN, 0); |
5319 | printf(" := " ); |
5320 | comp->vnPrint(newValVN, 0); |
5321 | printf("]" ); |
5322 | } |
5323 | #endif // DEBUG |
5324 | |
5325 | // Static fields, methods. |
5326 | static UINT8 vnfOpAttribs[VNF_COUNT]; |
5327 | static genTreeOps genTreeOpsIllegalAsVNFunc[] = {GT_IND, // When we do heap memory. |
5328 | GT_NULLCHECK, GT_QMARK, GT_COLON, GT_LOCKADD, GT_XADD, GT_XCHG, |
5329 | GT_CMPXCHG, GT_LCLHEAP, GT_BOX, |
5330 | |
5331 | // These need special semantics: |
5332 | GT_COMMA, // == second argument (but with exception(s) from first). |
5333 | GT_ADDR, GT_ARR_BOUNDS_CHECK, |
5334 | GT_OBJ, // May reference heap memory. |
5335 | GT_BLK, // May reference heap memory. |
5336 | GT_INIT_VAL, // Not strictly a pass-through. |
5337 | |
5338 | // These control-flow operations need no values. |
5339 | GT_JTRUE, GT_RETURN, GT_SWITCH, GT_RETFILT, GT_CKFINITE}; |
5340 | |
5341 | UINT8* ValueNumStore::s_vnfOpAttribs = nullptr; |
5342 | |
5343 | void ValueNumStore::InitValueNumStoreStatics() |
5344 | { |
5345 | // Make sure we've gotten constants right... |
5346 | assert(unsigned(VNFOA_Arity) == (1 << VNFOA_ArityShift)); |
5347 | assert(unsigned(VNFOA_AfterArity) == (unsigned(VNFOA_Arity) << VNFOA_ArityBits)); |
5348 | |
5349 | s_vnfOpAttribs = &vnfOpAttribs[0]; |
5350 | for (unsigned i = 0; i < GT_COUNT; i++) |
5351 | { |
5352 | genTreeOps gtOper = static_cast<genTreeOps>(i); |
5353 | unsigned arity = 0; |
5354 | if (GenTree::OperIsUnary(gtOper)) |
5355 | { |
5356 | arity = 1; |
5357 | } |
5358 | else if (GenTree::OperIsBinary(gtOper)) |
5359 | { |
5360 | arity = 2; |
5361 | } |
5362 | // Since GT_ARR_BOUNDS_CHECK is not currently GTK_BINOP |
5363 | else if (gtOper == GT_ARR_BOUNDS_CHECK) |
5364 | { |
5365 | arity = 2; |
5366 | } |
5367 | vnfOpAttribs[i] |= (arity << VNFOA_ArityShift); |
5368 | |
5369 | if (GenTree::OperIsCommutative(gtOper)) |
5370 | { |
5371 | vnfOpAttribs[i] |= VNFOA_Commutative; |
5372 | } |
5373 | } |
5374 | |
5375 | // I so wish this wasn't the best way to do this... |
5376 | |
5377 | int vnfNum = VNF_Boundary + 1; // The macro definition below will update this after using it. |
5378 | |
5379 | #define ValueNumFuncDef(vnf, arity, commute, knownNonNull, sharedStatic) \ |
5380 | if (commute) \ |
5381 | vnfOpAttribs[vnfNum] |= VNFOA_Commutative; \ |
5382 | if (knownNonNull) \ |
5383 | vnfOpAttribs[vnfNum] |= VNFOA_KnownNonNull; \ |
5384 | if (sharedStatic) \ |
5385 | vnfOpAttribs[vnfNum] |= VNFOA_SharedStatic; \ |
5386 | vnfOpAttribs[vnfNum] |= (arity << VNFOA_ArityShift); \ |
5387 | vnfNum++; |
5388 | |
5389 | #include "valuenumfuncs.h" |
5390 | #undef ValueNumFuncDef |
5391 | |
5392 | for (unsigned i = 0; i < _countof(genTreeOpsIllegalAsVNFunc); i++) |
5393 | { |
5394 | vnfOpAttribs[genTreeOpsIllegalAsVNFunc[i]] |= VNFOA_IllegalGenTreeOp; |
5395 | } |
5396 | } |
5397 | |
5398 | #ifdef DEBUG |
5399 | // Define the name array. |
5400 | #define ValueNumFuncDef(vnf, arity, commute, knownNonNull, sharedStatic) #vnf, |
5401 | |
5402 | const char* ValueNumStore::VNFuncNameArr[] = { |
5403 | #include "valuenumfuncs.h" |
5404 | #undef ValueNumFuncDef |
5405 | }; |
5406 | |
5407 | // static |
5408 | const char* ValueNumStore::VNFuncName(VNFunc vnf) |
5409 | { |
5410 | if (vnf < VNF_Boundary) |
5411 | { |
5412 | return GenTree::OpName(genTreeOps(vnf)); |
5413 | } |
5414 | else |
5415 | { |
5416 | return VNFuncNameArr[vnf - (VNF_Boundary + 1)]; |
5417 | } |
5418 | } |
5419 | |
5420 | static const char* s_reservedNameArr[] = { |
5421 | "$VN.Recursive" , // -2 RecursiveVN |
5422 | "$VN.No" , // -1 NoVN |
5423 | "$VN.Null" , // 0 VNForNull() |
5424 | "$VN.ZeroMap" , // 1 VNForZeroMap() |
5425 | "$VN.ReadOnlyHeap" , // 2 VNForROH() |
5426 | "$VN.Void" , // 3 VNForVoid() |
5427 | "$VN.EmptyExcSet" // 4 VNForEmptyExcSet() |
5428 | }; |
5429 | |
5430 | // Returns the string name of "vn" when it is a reserved value number, nullptr otherwise |
5431 | // static |
5432 | const char* ValueNumStore::reservedName(ValueNum vn) |
5433 | { |
5434 | int val = vn - ValueNumStore::RecursiveVN; // Add two, making 'RecursiveVN' equal to zero |
5435 | int max = ValueNumStore::SRC_NumSpecialRefConsts - ValueNumStore::RecursiveVN; |
5436 | |
5437 | if ((val >= 0) && (val < max)) |
5438 | { |
5439 | return s_reservedNameArr[val]; |
5440 | } |
5441 | return nullptr; |
5442 | } |
5443 | |
5444 | #endif // DEBUG |
5445 | |
5446 | // Returns true if "vn" is a reserved value number |
5447 | |
5448 | // static |
5449 | bool ValueNumStore::isReservedVN(ValueNum vn) |
5450 | { |
5451 | int val = vn - ValueNumStore::RecursiveVN; // Adding two, making 'RecursiveVN' equal to zero |
5452 | int max = ValueNumStore::SRC_NumSpecialRefConsts - ValueNumStore::RecursiveVN; |
5453 | |
5454 | if ((val >= 0) && (val < max)) |
5455 | { |
5456 | return true; |
5457 | } |
5458 | return false; |
5459 | } |
5460 | |
5461 | #ifdef DEBUG |
5462 | void ValueNumStore::RunTests(Compiler* comp) |
5463 | { |
5464 | VNFunc VNF_Add = GenTreeOpToVNFunc(GT_ADD); |
5465 | |
5466 | ValueNumStore* vns = new (comp->getAllocatorDebugOnly()) ValueNumStore(comp, comp->getAllocatorDebugOnly()); |
5467 | ValueNum vnNull = VNForNull(); |
5468 | assert(vnNull == VNForNull()); |
5469 | |
5470 | ValueNum vnFor1 = vns->VNForIntCon(1); |
5471 | assert(vnFor1 == vns->VNForIntCon(1)); |
5472 | assert(vns->TypeOfVN(vnFor1) == TYP_INT); |
5473 | assert(vns->IsVNConstant(vnFor1)); |
5474 | assert(vns->ConstantValue<int>(vnFor1) == 1); |
5475 | |
5476 | ValueNum vnFor100 = vns->VNForIntCon(100); |
5477 | assert(vnFor100 == vns->VNForIntCon(100)); |
5478 | assert(vnFor100 != vnFor1); |
5479 | assert(vns->TypeOfVN(vnFor100) == TYP_INT); |
5480 | assert(vns->IsVNConstant(vnFor100)); |
5481 | assert(vns->ConstantValue<int>(vnFor100) == 100); |
5482 | |
5483 | ValueNum vnFor1F = vns->VNForFloatCon(1.0f); |
5484 | assert(vnFor1F == vns->VNForFloatCon(1.0f)); |
5485 | assert(vnFor1F != vnFor1 && vnFor1F != vnFor100); |
5486 | assert(vns->TypeOfVN(vnFor1F) == TYP_FLOAT); |
5487 | assert(vns->IsVNConstant(vnFor1F)); |
5488 | assert(vns->ConstantValue<float>(vnFor1F) == 1.0f); |
5489 | |
5490 | ValueNum vnFor1D = vns->VNForDoubleCon(1.0); |
5491 | assert(vnFor1D == vns->VNForDoubleCon(1.0)); |
5492 | assert(vnFor1D != vnFor1F && vnFor1D != vnFor1 && vnFor1D != vnFor100); |
5493 | assert(vns->TypeOfVN(vnFor1D) == TYP_DOUBLE); |
5494 | assert(vns->IsVNConstant(vnFor1D)); |
5495 | assert(vns->ConstantValue<double>(vnFor1D) == 1.0); |
5496 | |
5497 | ValueNum vnRandom1 = vns->VNForExpr(nullptr, TYP_INT); |
5498 | ValueNum vnForFunc2a = vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnRandom1); |
5499 | assert(vnForFunc2a == vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnRandom1)); |
5500 | assert(vnForFunc2a != vnFor1D && vnForFunc2a != vnFor1F && vnForFunc2a != vnFor1 && vnForFunc2a != vnRandom1); |
5501 | assert(vns->TypeOfVN(vnForFunc2a) == TYP_INT); |
5502 | assert(!vns->IsVNConstant(vnForFunc2a)); |
5503 | assert(vns->IsVNFunc(vnForFunc2a)); |
5504 | VNFuncApp fa2a; |
5505 | bool b = vns->GetVNFunc(vnForFunc2a, &fa2a); |
5506 | assert(b); |
5507 | assert(fa2a.m_func == VNF_Add && fa2a.m_arity == 2 && fa2a.m_args[0] == vnFor1 && fa2a.m_args[1] == vnRandom1); |
5508 | |
5509 | ValueNum vnForFunc2b = vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnFor100); |
5510 | assert(vnForFunc2b == vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnFor100)); |
5511 | assert(vnForFunc2b != vnFor1D && vnForFunc2b != vnFor1F && vnForFunc2b != vnFor1 && vnForFunc2b != vnFor100); |
5512 | assert(vns->TypeOfVN(vnForFunc2b) == TYP_INT); |
5513 | assert(vns->IsVNConstant(vnForFunc2b)); |
5514 | assert(vns->ConstantValue<int>(vnForFunc2b) == 101); |
5515 | |
5516 | // printf("Did ValueNumStore::RunTests.\n"); |
5517 | } |
5518 | #endif // DEBUG |
5519 | |
5520 | typedef JitExpandArrayStack<BasicBlock*> BlockStack; |
5521 | |
5522 | // This represents the "to do" state of the value number computation. |
5523 | struct ValueNumberState |
5524 | { |
5525 | // These two stacks collectively represent the set of blocks that are candidates for |
5526 | // processing, because at least one predecessor has been processed. Blocks on "m_toDoAllPredsDone" |
5527 | // have had *all* predecessors processed, and thus are candidates for some extra optimizations. |
5528 | // Blocks on "m_toDoNotAllPredsDone" have at least one predecessor that has not been processed. |
5529 | // Blocks are initially on "m_toDoNotAllPredsDone" may be moved to "m_toDoAllPredsDone" when their last |
5530 | // unprocessed predecessor is processed, thus maintaining the invariants. |
5531 | BlockStack m_toDoAllPredsDone; |
5532 | BlockStack m_toDoNotAllPredsDone; |
5533 | |
5534 | Compiler* m_comp; |
5535 | |
5536 | // TBD: This should really be a bitset... |
5537 | // For now: |
5538 | // first bit indicates completed, |
5539 | // second bit indicates that it's been pushed on all-done stack, |
5540 | // third bit indicates that it's been pushed on not-all-done stack. |
5541 | BYTE* m_visited; |
5542 | |
5543 | enum BlockVisitBits |
5544 | { |
5545 | BVB_complete = 0x1, |
5546 | BVB_onAllDone = 0x2, |
5547 | BVB_onNotAllDone = 0x4, |
5548 | }; |
5549 | |
5550 | bool GetVisitBit(unsigned bbNum, BlockVisitBits bvb) |
5551 | { |
5552 | return (m_visited[bbNum] & bvb) != 0; |
5553 | } |
5554 | void SetVisitBit(unsigned bbNum, BlockVisitBits bvb) |
5555 | { |
5556 | m_visited[bbNum] |= bvb; |
5557 | } |
5558 | |
5559 | ValueNumberState(Compiler* comp) |
5560 | : m_toDoAllPredsDone(comp->getAllocator(), /*minSize*/ 4) |
5561 | , m_toDoNotAllPredsDone(comp->getAllocator(), /*minSize*/ 4) |
5562 | , m_comp(comp) |
5563 | , m_visited(new (comp, CMK_ValueNumber) BYTE[comp->fgBBNumMax + 1]()) |
5564 | { |
5565 | } |
5566 | |
5567 | BasicBlock* ChooseFromNotAllPredsDone() |
5568 | { |
5569 | assert(m_toDoAllPredsDone.Size() == 0); |
5570 | // If we have no blocks with all preds done, then (ideally, if all cycles have been captured by loops) |
5571 | // we must have at least one block within a loop. We want to do the loops first. Doing a loop entry block |
5572 | // should break the cycle, making the rest of the body of the loop (unless there's a nested loop) doable by the |
5573 | // all-preds-done rule. If several loop entry blocks are available, at least one should have all non-loop preds |
5574 | // done -- we choose that. |
5575 | for (unsigned i = 0; i < m_toDoNotAllPredsDone.Size(); i++) |
5576 | { |
5577 | BasicBlock* cand = m_toDoNotAllPredsDone.Get(i); |
5578 | |
5579 | // Skip any already-completed blocks (a block may have all its preds finished, get added to the |
5580 | // all-preds-done todo set, and get processed there). Do this by moving the last one down, to |
5581 | // keep the array compact. |
5582 | while (GetVisitBit(cand->bbNum, BVB_complete)) |
5583 | { |
5584 | if (i + 1 < m_toDoNotAllPredsDone.Size()) |
5585 | { |
5586 | cand = m_toDoNotAllPredsDone.Pop(); |
5587 | m_toDoNotAllPredsDone.Set(i, cand); |
5588 | } |
5589 | else |
5590 | { |
5591 | // "cand" is the last element; delete it. |
5592 | (void)m_toDoNotAllPredsDone.Pop(); |
5593 | break; |
5594 | } |
5595 | } |
5596 | // We may have run out of non-complete candidates above. If so, we're done. |
5597 | if (i == m_toDoNotAllPredsDone.Size()) |
5598 | { |
5599 | break; |
5600 | } |
5601 | |
5602 | // See if "cand" is a loop entry. |
5603 | unsigned lnum; |
5604 | if (m_comp->optBlockIsLoopEntry(cand, &lnum)) |
5605 | { |
5606 | // "lnum" is the innermost loop of which "cand" is the entry; find the outermost. |
5607 | unsigned lnumPar = m_comp->optLoopTable[lnum].lpParent; |
5608 | while (lnumPar != BasicBlock::NOT_IN_LOOP) |
5609 | { |
5610 | if (m_comp->optLoopTable[lnumPar].lpEntry == cand) |
5611 | { |
5612 | lnum = lnumPar; |
5613 | } |
5614 | else |
5615 | { |
5616 | break; |
5617 | } |
5618 | lnumPar = m_comp->optLoopTable[lnumPar].lpParent; |
5619 | } |
5620 | |
5621 | bool allNonLoopPredsDone = true; |
5622 | for (flowList* pred = m_comp->BlockPredsWithEH(cand); pred != nullptr; pred = pred->flNext) |
5623 | { |
5624 | BasicBlock* predBlock = pred->flBlock; |
5625 | if (!m_comp->optLoopTable[lnum].lpContains(predBlock)) |
5626 | { |
5627 | if (!GetVisitBit(predBlock->bbNum, BVB_complete)) |
5628 | { |
5629 | allNonLoopPredsDone = false; |
5630 | } |
5631 | } |
5632 | } |
5633 | if (allNonLoopPredsDone) |
5634 | { |
5635 | return cand; |
5636 | } |
5637 | } |
5638 | } |
5639 | |
5640 | // If we didn't find a loop entry block with all non-loop preds done above, then return a random member (if |
5641 | // there is one). |
5642 | if (m_toDoNotAllPredsDone.Size() == 0) |
5643 | { |
5644 | return nullptr; |
5645 | } |
5646 | else |
5647 | { |
5648 | return m_toDoNotAllPredsDone.Pop(); |
5649 | } |
5650 | } |
5651 | |
5652 | // Debugging output that is too detailed for a normal JIT dump... |
5653 | #define DEBUG_VN_VISIT 0 |
5654 | |
5655 | // Record that "blk" has been visited, and add any unvisited successors of "blk" to the appropriate todo set. |
5656 | void FinishVisit(BasicBlock* blk) |
5657 | { |
5658 | #ifdef DEBUG_VN_VISIT |
5659 | JITDUMP("finish(" FMT_BB ").\n" , blk->bbNum); |
5660 | #endif // DEBUG_VN_VISIT |
5661 | |
5662 | SetVisitBit(blk->bbNum, BVB_complete); |
5663 | |
5664 | for (BasicBlock* succ : blk->GetAllSuccs(m_comp)) |
5665 | { |
5666 | #ifdef DEBUG_VN_VISIT |
5667 | JITDUMP(" Succ(" FMT_BB ").\n" , succ->bbNum); |
5668 | #endif // DEBUG_VN_VISIT |
5669 | |
5670 | if (GetVisitBit(succ->bbNum, BVB_complete)) |
5671 | { |
5672 | continue; |
5673 | } |
5674 | #ifdef DEBUG_VN_VISIT |
5675 | JITDUMP(" Not yet completed.\n" ); |
5676 | #endif // DEBUG_VN_VISIT |
5677 | |
5678 | bool allPredsVisited = true; |
5679 | for (flowList* pred = m_comp->BlockPredsWithEH(succ); pred != nullptr; pred = pred->flNext) |
5680 | { |
5681 | BasicBlock* predBlock = pred->flBlock; |
5682 | if (!GetVisitBit(predBlock->bbNum, BVB_complete)) |
5683 | { |
5684 | allPredsVisited = false; |
5685 | break; |
5686 | } |
5687 | } |
5688 | |
5689 | if (allPredsVisited) |
5690 | { |
5691 | #ifdef DEBUG_VN_VISIT |
5692 | JITDUMP(" All preds complete, adding to allDone.\n" ); |
5693 | #endif // DEBUG_VN_VISIT |
5694 | |
5695 | assert(!GetVisitBit(succ->bbNum, BVB_onAllDone)); // Only last completion of last succ should add to |
5696 | // this. |
5697 | m_toDoAllPredsDone.Push(succ); |
5698 | SetVisitBit(succ->bbNum, BVB_onAllDone); |
5699 | } |
5700 | else |
5701 | { |
5702 | #ifdef DEBUG_VN_VISIT |
5703 | JITDUMP(" Not all preds complete Adding to notallDone, if necessary...\n" ); |
5704 | #endif // DEBUG_VN_VISIT |
5705 | |
5706 | if (!GetVisitBit(succ->bbNum, BVB_onNotAllDone)) |
5707 | { |
5708 | #ifdef DEBUG_VN_VISIT |
5709 | JITDUMP(" Was necessary.\n" ); |
5710 | #endif // DEBUG_VN_VISIT |
5711 | m_toDoNotAllPredsDone.Push(succ); |
5712 | SetVisitBit(succ->bbNum, BVB_onNotAllDone); |
5713 | } |
5714 | } |
5715 | } |
5716 | } |
5717 | |
5718 | bool ToDoExists() |
5719 | { |
5720 | return m_toDoAllPredsDone.Size() > 0 || m_toDoNotAllPredsDone.Size() > 0; |
5721 | } |
5722 | }; |
5723 | |
5724 | void Compiler::fgValueNumber() |
5725 | { |
5726 | #ifdef DEBUG |
5727 | // This could be a JITDUMP, but some people find it convenient to set a breakpoint on the printf. |
5728 | if (verbose) |
5729 | { |
5730 | printf("\n*************** In fgValueNumber()\n" ); |
5731 | } |
5732 | #endif |
5733 | |
5734 | // If we skipped SSA, skip VN as well. |
5735 | if (fgSsaPassesCompleted == 0) |
5736 | { |
5737 | return; |
5738 | } |
5739 | |
5740 | // Allocate the value number store. |
5741 | assert(fgVNPassesCompleted > 0 || vnStore == nullptr); |
5742 | if (fgVNPassesCompleted == 0) |
5743 | { |
5744 | CompAllocator allocator(getAllocator(CMK_ValueNumber)); |
5745 | vnStore = new (allocator) ValueNumStore(this, allocator); |
5746 | } |
5747 | else |
5748 | { |
5749 | ValueNumPair noVnp; |
5750 | // Make sure the memory SSA names have no value numbers. |
5751 | for (unsigned i = 0; i < lvMemoryPerSsaData.GetCount(); i++) |
5752 | { |
5753 | lvMemoryPerSsaData.GetSsaDefByIndex(i)->m_vnPair = noVnp; |
5754 | } |
5755 | for (BasicBlock* blk = fgFirstBB; blk != nullptr; blk = blk->bbNext) |
5756 | { |
5757 | // Now iterate over the block's statements, and their trees. |
5758 | for (GenTree* stmts = blk->FirstNonPhiDef(); stmts != nullptr; stmts = stmts->gtNext) |
5759 | { |
5760 | assert(stmts->IsStatement()); |
5761 | for (GenTree* tree = stmts->gtStmt.gtStmtList; tree; tree = tree->gtNext) |
5762 | { |
5763 | tree->gtVNPair.SetBoth(ValueNumStore::NoVN); |
5764 | } |
5765 | } |
5766 | } |
5767 | } |
5768 | |
5769 | // Compute the side effects of loops. |
5770 | optComputeLoopSideEffects(); |
5771 | |
5772 | // At the block level, we will use a modified worklist algorithm. We will have two |
5773 | // "todo" sets of unvisited blocks. Blocks (other than the entry block) are put in a |
5774 | // todo set only when some predecessor has been visited, so all blocks have at least one |
5775 | // predecessor visited. The distinction between the two sets is whether *all* predecessors have |
5776 | // already been visited. We visit such blocks preferentially if they exist, since phi definitions |
5777 | // in such blocks will have all arguments defined, enabling a simplification in the case that all |
5778 | // arguments to the phi have the same VN. If no such blocks exist, we pick a block with at least |
5779 | // one unvisited predecessor. In this case, we assign a new VN for phi definitions. |
5780 | |
5781 | // Start by giving incoming arguments value numbers. |
5782 | // Also give must-init vars a zero of their type. |
5783 | for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++) |
5784 | { |
5785 | if (!lvaInSsa(lclNum)) |
5786 | { |
5787 | continue; |
5788 | } |
5789 | |
5790 | LclVarDsc* varDsc = &lvaTable[lclNum]; |
5791 | assert(varDsc->lvTracked); |
5792 | |
5793 | if (varDsc->lvIsParam) |
5794 | { |
5795 | // We assume that code equivalent to this variable initialization loop |
5796 | // has been performed when doing SSA naming, so that all the variables we give |
5797 | // initial VNs to here have been given initial SSA definitions there. |
5798 | // SSA numbers always start from FIRST_SSA_NUM, and we give the value number to SSA name FIRST_SSA_NUM. |
5799 | // We use the VNF_InitVal(i) from here so we know that this value is loop-invariant |
5800 | // in all loops. |
5801 | ValueNum initVal = vnStore->VNForFunc(varDsc->TypeGet(), VNF_InitVal, vnStore->VNForIntCon(lclNum)); |
5802 | LclSsaVarDsc* ssaDef = varDsc->GetPerSsaData(SsaConfig::FIRST_SSA_NUM); |
5803 | ssaDef->m_vnPair.SetBoth(initVal); |
5804 | ssaDef->m_defLoc.m_blk = fgFirstBB; |
5805 | } |
5806 | else if (info.compInitMem || varDsc->lvMustInit || |
5807 | VarSetOps::IsMember(this, fgFirstBB->bbLiveIn, varDsc->lvVarIndex)) |
5808 | { |
5809 | // The last clause covers the use-before-def variables (the ones that are live-in to the the first block), |
5810 | // these are variables that are read before being initialized (at least on some control flow paths) |
5811 | // if they are not must-init, then they get VNF_InitVal(i), as with the param case.) |
5812 | |
5813 | bool isZeroed = (info.compInitMem || varDsc->lvMustInit); |
5814 | ValueNum initVal = ValueNumStore::NoVN; // We must assign a new value to initVal |
5815 | var_types typ = varDsc->TypeGet(); |
5816 | |
5817 | switch (typ) |
5818 | { |
5819 | case TYP_LCLBLK: // The outgoing args area for arm and x64 |
5820 | case TYP_BLK: // A blob of memory |
5821 | // TYP_BLK is used for the EHSlots LclVar on x86 (aka shadowSPslotsVar) |
5822 | // and for the lvaInlinedPInvokeFrameVar on x64, arm and x86 |
5823 | // The stack associated with these LclVars are not zero initialized |
5824 | // thus we set 'initVN' to a new, unique VN. |
5825 | // |
5826 | initVal = vnStore->VNForExpr(fgFirstBB); |
5827 | break; |
5828 | |
5829 | case TYP_BYREF: |
5830 | if (isZeroed) |
5831 | { |
5832 | // LclVars of TYP_BYREF can be zero-inited. |
5833 | initVal = vnStore->VNForByrefCon(0); |
5834 | } |
5835 | else |
5836 | { |
5837 | // Here we have uninitialized TYP_BYREF |
5838 | initVal = vnStore->VNForFunc(typ, VNF_InitVal, vnStore->VNForIntCon(lclNum)); |
5839 | } |
5840 | break; |
5841 | |
5842 | default: |
5843 | if (isZeroed) |
5844 | { |
5845 | // By default we will zero init these LclVars |
5846 | initVal = vnStore->VNZeroForType(typ); |
5847 | } |
5848 | else |
5849 | { |
5850 | initVal = vnStore->VNForFunc(typ, VNF_InitVal, vnStore->VNForIntCon(lclNum)); |
5851 | } |
5852 | break; |
5853 | } |
5854 | #ifdef _TARGET_X86_ |
5855 | bool isVarargParam = (lclNum == lvaVarargsBaseOfStkArgs || lclNum == lvaVarargsHandleArg); |
5856 | if (isVarargParam) |
5857 | initVal = vnStore->VNForExpr(fgFirstBB); // a new, unique VN. |
5858 | #endif |
5859 | assert(initVal != ValueNumStore::NoVN); |
5860 | |
5861 | LclSsaVarDsc* ssaDef = varDsc->GetPerSsaData(SsaConfig::FIRST_SSA_NUM); |
5862 | ssaDef->m_vnPair.SetBoth(initVal); |
5863 | ssaDef->m_defLoc.m_blk = fgFirstBB; |
5864 | } |
5865 | } |
5866 | // Give memory an initial value number (about which we know nothing). |
5867 | ValueNum memoryInitVal = vnStore->VNForFunc(TYP_REF, VNF_InitVal, vnStore->VNForIntCon(-1)); // Use -1 for memory. |
5868 | GetMemoryPerSsaData(SsaConfig::FIRST_SSA_NUM)->m_vnPair.SetBoth(memoryInitVal); |
5869 | #ifdef DEBUG |
5870 | if (verbose) |
5871 | { |
5872 | printf("Memory Initial Value in BB01 is: " FMT_VN "\n" , memoryInitVal); |
5873 | } |
5874 | #endif // DEBUG |
5875 | |
5876 | ValueNumberState vs(this); |
5877 | |
5878 | // Push the first block. This has no preds. |
5879 | vs.m_toDoAllPredsDone.Push(fgFirstBB); |
5880 | |
5881 | while (vs.ToDoExists()) |
5882 | { |
5883 | while (vs.m_toDoAllPredsDone.Size() > 0) |
5884 | { |
5885 | BasicBlock* toDo = vs.m_toDoAllPredsDone.Pop(); |
5886 | fgValueNumberBlock(toDo); |
5887 | // Record that we've visited "toDo", and add successors to the right sets. |
5888 | vs.FinishVisit(toDo); |
5889 | } |
5890 | // OK, we've run out of blocks whose predecessors are done. Pick one whose predecessors are not all done, |
5891 | // process that. This may make more "all-done" blocks, so we'll go around the outer loop again -- |
5892 | // note that this is an "if", not a "while" loop. |
5893 | if (vs.m_toDoNotAllPredsDone.Size() > 0) |
5894 | { |
5895 | BasicBlock* toDo = vs.ChooseFromNotAllPredsDone(); |
5896 | if (toDo == nullptr) |
5897 | { |
5898 | continue; // We may have run out, because of completed blocks on the not-all-preds done list. |
5899 | } |
5900 | |
5901 | fgValueNumberBlock(toDo); |
5902 | // Record that we've visited "toDo", and add successors to the right sest. |
5903 | vs.FinishVisit(toDo); |
5904 | } |
5905 | } |
5906 | |
5907 | #ifdef DEBUG |
5908 | JitTestCheckVN(); |
5909 | #endif // DEBUG |
5910 | |
5911 | fgVNPassesCompleted++; |
5912 | } |
5913 | |
5914 | void Compiler::fgValueNumberBlock(BasicBlock* blk) |
5915 | { |
5916 | compCurBB = blk; |
5917 | |
5918 | #ifdef DEBUG |
5919 | compCurStmtNum = blk->bbStmtNum - 1; // Set compCurStmtNum |
5920 | #endif |
5921 | |
5922 | unsigned outerLoopNum = BasicBlock::NOT_IN_LOOP; |
5923 | |
5924 | // First: visit phi's. If "newVNForPhis", give them new VN's. If not, |
5925 | // first check to see if all phi args have the same value. |
5926 | GenTree* firstNonPhi = blk->FirstNonPhiDef(); |
5927 | for (GenTree* phiDefs = blk->bbTreeList; phiDefs != firstNonPhi; phiDefs = phiDefs->gtNext) |
5928 | { |
5929 | // TODO-Cleanup: It has been proposed that we should have an IsPhiDef predicate. We would use it |
5930 | // in Block::FirstNonPhiDef as well. |
5931 | GenTree* phiDef = phiDefs->gtStmt.gtStmtExpr; |
5932 | assert(phiDef->OperGet() == GT_ASG); |
5933 | GenTreeLclVarCommon* newSsaVar = phiDef->gtOp.gtOp1->AsLclVarCommon(); |
5934 | |
5935 | ValueNumPair phiAppVNP; |
5936 | ValueNumPair sameVNPair; |
5937 | |
5938 | GenTree* phiFunc = phiDef->gtOp.gtOp2; |
5939 | |
5940 | // At this point a GT_PHI node should never have a nullptr for gtOp1 |
5941 | // and the gtOp1 should always be a GT_LIST node. |
5942 | GenTree* phiOp1 = phiFunc->gtOp.gtOp1; |
5943 | noway_assert(phiOp1 != nullptr); |
5944 | noway_assert(phiOp1->OperGet() == GT_LIST); |
5945 | |
5946 | GenTreeArgList* phiArgs = phiFunc->gtOp.gtOp1->AsArgList(); |
5947 | |
5948 | // A GT_PHI node should have more than one argument. |
5949 | noway_assert(phiArgs->Rest() != nullptr); |
5950 | |
5951 | GenTreeLclVarCommon* phiArg = phiArgs->Current()->AsLclVarCommon(); |
5952 | phiArgs = phiArgs->Rest(); |
5953 | |
5954 | phiAppVNP.SetBoth(vnStore->VNForIntCon(phiArg->gtSsaNum)); |
5955 | bool allSameLib = true; |
5956 | bool allSameCons = true; |
5957 | sameVNPair = lvaTable[phiArg->gtLclNum].GetPerSsaData(phiArg->gtSsaNum)->m_vnPair; |
5958 | if (!sameVNPair.BothDefined()) |
5959 | { |
5960 | allSameLib = false; |
5961 | allSameCons = false; |
5962 | } |
5963 | while (phiArgs != nullptr) |
5964 | { |
5965 | phiArg = phiArgs->Current()->AsLclVarCommon(); |
5966 | // Set the VN of the phi arg. |
5967 | phiArg->gtVNPair = lvaTable[phiArg->gtLclNum].GetPerSsaData(phiArg->gtSsaNum)->m_vnPair; |
5968 | if (phiArg->gtVNPair.BothDefined()) |
5969 | { |
5970 | if (phiArg->gtVNPair.GetLiberal() != sameVNPair.GetLiberal()) |
5971 | { |
5972 | allSameLib = false; |
5973 | } |
5974 | if (phiArg->gtVNPair.GetConservative() != sameVNPair.GetConservative()) |
5975 | { |
5976 | allSameCons = false; |
5977 | } |
5978 | } |
5979 | else |
5980 | { |
5981 | allSameLib = false; |
5982 | allSameCons = false; |
5983 | } |
5984 | ValueNumPair phiArgSsaVNP; |
5985 | phiArgSsaVNP.SetBoth(vnStore->VNForIntCon(phiArg->gtSsaNum)); |
5986 | phiAppVNP = vnStore->VNPairForFunc(newSsaVar->TypeGet(), VNF_Phi, phiArgSsaVNP, phiAppVNP); |
5987 | phiArgs = phiArgs->Rest(); |
5988 | } |
5989 | |
5990 | ValueNumPair newVNPair; |
5991 | if (allSameLib) |
5992 | { |
5993 | newVNPair.SetLiberal(sameVNPair.GetLiberal()); |
5994 | } |
5995 | else |
5996 | { |
5997 | newVNPair.SetLiberal(phiAppVNP.GetLiberal()); |
5998 | } |
5999 | if (allSameCons) |
6000 | { |
6001 | newVNPair.SetConservative(sameVNPair.GetConservative()); |
6002 | } |
6003 | else |
6004 | { |
6005 | newVNPair.SetConservative(phiAppVNP.GetConservative()); |
6006 | } |
6007 | |
6008 | LclSsaVarDsc* newSsaVarDsc = lvaTable[newSsaVar->gtLclNum].GetPerSsaData(newSsaVar->GetSsaNum()); |
6009 | // If all the args of the phi had the same value(s, liberal and conservative), then there wasn't really |
6010 | // a reason to have the phi -- just pass on that value. |
6011 | if (allSameLib && allSameCons) |
6012 | { |
6013 | newSsaVarDsc->m_vnPair = newVNPair; |
6014 | #ifdef DEBUG |
6015 | if (verbose) |
6016 | { |
6017 | printf("In SSA definition, incoming phi args all same, set VN of local %d/%d to " , |
6018 | newSsaVar->GetLclNum(), newSsaVar->GetSsaNum()); |
6019 | vnpPrint(newVNPair, 1); |
6020 | printf(".\n" ); |
6021 | } |
6022 | #endif // DEBUG |
6023 | } |
6024 | else |
6025 | { |
6026 | // They were not the same; we need to create a phi definition. |
6027 | ValueNumPair lclNumVNP; |
6028 | lclNumVNP.SetBoth(ValueNum(newSsaVar->GetLclNum())); |
6029 | ValueNumPair ssaNumVNP; |
6030 | ssaNumVNP.SetBoth(ValueNum(newSsaVar->GetSsaNum())); |
6031 | ValueNumPair vnPhiDef = |
6032 | vnStore->VNPairForFunc(newSsaVar->TypeGet(), VNF_PhiDef, lclNumVNP, ssaNumVNP, phiAppVNP); |
6033 | newSsaVarDsc->m_vnPair = vnPhiDef; |
6034 | #ifdef DEBUG |
6035 | if (verbose) |
6036 | { |
6037 | printf("SSA definition: set VN of local %d/%d to " , newSsaVar->GetLclNum(), newSsaVar->GetSsaNum()); |
6038 | vnpPrint(vnPhiDef, 1); |
6039 | printf(".\n" ); |
6040 | } |
6041 | #endif // DEBUG |
6042 | } |
6043 | } |
6044 | |
6045 | // Now do the same for each MemoryKind. |
6046 | for (MemoryKind memoryKind : allMemoryKinds()) |
6047 | { |
6048 | // Is there a phi for this block? |
6049 | if (blk->bbMemorySsaPhiFunc[memoryKind] == nullptr) |
6050 | { |
6051 | fgCurMemoryVN[memoryKind] = GetMemoryPerSsaData(blk->bbMemorySsaNumIn[memoryKind])->m_vnPair.GetLiberal(); |
6052 | assert(fgCurMemoryVN[memoryKind] != ValueNumStore::NoVN); |
6053 | } |
6054 | else |
6055 | { |
6056 | if ((memoryKind == ByrefExposed) && byrefStatesMatchGcHeapStates) |
6057 | { |
6058 | // The update for GcHeap will copy its result to ByrefExposed. |
6059 | assert(memoryKind < GcHeap); |
6060 | assert(blk->bbMemorySsaPhiFunc[memoryKind] == blk->bbMemorySsaPhiFunc[GcHeap]); |
6061 | continue; |
6062 | } |
6063 | |
6064 | unsigned loopNum; |
6065 | ValueNum newMemoryVN; |
6066 | if (optBlockIsLoopEntry(blk, &loopNum)) |
6067 | { |
6068 | newMemoryVN = fgMemoryVNForLoopSideEffects(memoryKind, blk, loopNum); |
6069 | } |
6070 | else |
6071 | { |
6072 | // Are all the VN's the same? |
6073 | BasicBlock::MemoryPhiArg* phiArgs = blk->bbMemorySsaPhiFunc[memoryKind]; |
6074 | assert(phiArgs != BasicBlock::EmptyMemoryPhiDef); |
6075 | // There should be > 1 args to a phi. |
6076 | assert(phiArgs->m_nextArg != nullptr); |
6077 | ValueNum phiAppVN = vnStore->VNForIntCon(phiArgs->GetSsaNum()); |
6078 | JITDUMP(" Building phi application: $%x = SSA# %d.\n" , phiAppVN, phiArgs->GetSsaNum()); |
6079 | bool allSame = true; |
6080 | ValueNum sameVN = GetMemoryPerSsaData(phiArgs->GetSsaNum())->m_vnPair.GetLiberal(); |
6081 | if (sameVN == ValueNumStore::NoVN) |
6082 | { |
6083 | allSame = false; |
6084 | } |
6085 | phiArgs = phiArgs->m_nextArg; |
6086 | while (phiArgs != nullptr) |
6087 | { |
6088 | ValueNum phiArgVN = GetMemoryPerSsaData(phiArgs->GetSsaNum())->m_vnPair.GetLiberal(); |
6089 | if (phiArgVN == ValueNumStore::NoVN || phiArgVN != sameVN) |
6090 | { |
6091 | allSame = false; |
6092 | } |
6093 | #ifdef DEBUG |
6094 | ValueNum oldPhiAppVN = phiAppVN; |
6095 | #endif |
6096 | unsigned phiArgSSANum = phiArgs->GetSsaNum(); |
6097 | ValueNum phiArgSSANumVN = vnStore->VNForIntCon(phiArgSSANum); |
6098 | JITDUMP(" Building phi application: $%x = SSA# %d.\n" , phiArgSSANumVN, phiArgSSANum); |
6099 | phiAppVN = vnStore->VNForFunc(TYP_REF, VNF_Phi, phiArgSSANumVN, phiAppVN); |
6100 | JITDUMP(" Building phi application: $%x = phi($%x, $%x).\n" , phiAppVN, phiArgSSANumVN, |
6101 | oldPhiAppVN); |
6102 | phiArgs = phiArgs->m_nextArg; |
6103 | } |
6104 | if (allSame) |
6105 | { |
6106 | newMemoryVN = sameVN; |
6107 | } |
6108 | else |
6109 | { |
6110 | newMemoryVN = |
6111 | vnStore->VNForFunc(TYP_REF, VNF_PhiMemoryDef, vnStore->VNForHandle(ssize_t(blk), 0), phiAppVN); |
6112 | } |
6113 | } |
6114 | GetMemoryPerSsaData(blk->bbMemorySsaNumIn[memoryKind])->m_vnPair.SetLiberal(newMemoryVN); |
6115 | fgCurMemoryVN[memoryKind] = newMemoryVN; |
6116 | if ((memoryKind == GcHeap) && byrefStatesMatchGcHeapStates) |
6117 | { |
6118 | // Keep the CurMemoryVNs in sync |
6119 | fgCurMemoryVN[ByrefExposed] = newMemoryVN; |
6120 | } |
6121 | } |
6122 | #ifdef DEBUG |
6123 | if (verbose) |
6124 | { |
6125 | printf("The SSA definition for %s (#%d) at start of " FMT_BB " is " , memoryKindNames[memoryKind], |
6126 | blk->bbMemorySsaNumIn[memoryKind], blk->bbNum); |
6127 | vnPrint(fgCurMemoryVN[memoryKind], 1); |
6128 | printf("\n" ); |
6129 | } |
6130 | #endif // DEBUG |
6131 | } |
6132 | |
6133 | // Now iterate over the remaining statements, and their trees. |
6134 | for (GenTree* stmt = firstNonPhi; stmt != nullptr; stmt = stmt->gtNext) |
6135 | { |
6136 | assert(stmt->IsStatement()); |
6137 | |
6138 | #ifdef DEBUG |
6139 | compCurStmtNum++; |
6140 | if (verbose) |
6141 | { |
6142 | printf("\n***** " FMT_BB ", stmt %d (before)\n" , blk->bbNum, compCurStmtNum); |
6143 | gtDispTree(stmt->gtStmt.gtStmtExpr); |
6144 | printf("\n" ); |
6145 | } |
6146 | #endif |
6147 | |
6148 | for (GenTree* tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext) |
6149 | { |
6150 | fgValueNumberTree(tree); |
6151 | } |
6152 | |
6153 | #ifdef DEBUG |
6154 | if (verbose) |
6155 | { |
6156 | printf("\n***** " FMT_BB ", stmt %d (after)\n" , blk->bbNum, compCurStmtNum); |
6157 | gtDispTree(stmt->gtStmt.gtStmtExpr); |
6158 | printf("\n" ); |
6159 | if (stmt->gtNext) |
6160 | { |
6161 | printf("---------\n" ); |
6162 | } |
6163 | } |
6164 | #endif |
6165 | } |
6166 | |
6167 | for (MemoryKind memoryKind : allMemoryKinds()) |
6168 | { |
6169 | if ((memoryKind == GcHeap) && byrefStatesMatchGcHeapStates) |
6170 | { |
6171 | // The update to the shared SSA data will have already happened for ByrefExposed. |
6172 | assert(memoryKind > ByrefExposed); |
6173 | assert(blk->bbMemorySsaNumOut[memoryKind] == blk->bbMemorySsaNumOut[ByrefExposed]); |
6174 | assert(GetMemoryPerSsaData(blk->bbMemorySsaNumOut[memoryKind])->m_vnPair.GetLiberal() == |
6175 | fgCurMemoryVN[memoryKind]); |
6176 | continue; |
6177 | } |
6178 | |
6179 | if (blk->bbMemorySsaNumOut[memoryKind] != blk->bbMemorySsaNumIn[memoryKind]) |
6180 | { |
6181 | GetMemoryPerSsaData(blk->bbMemorySsaNumOut[memoryKind])->m_vnPair.SetLiberal(fgCurMemoryVN[memoryKind]); |
6182 | } |
6183 | } |
6184 | |
6185 | compCurBB = nullptr; |
6186 | } |
6187 | |
6188 | ValueNum Compiler::fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, |
6189 | BasicBlock* entryBlock, |
6190 | unsigned innermostLoopNum) |
6191 | { |
6192 | // "loopNum" is the innermost loop for which "blk" is the entry; find the outermost one. |
6193 | assert(innermostLoopNum != BasicBlock::NOT_IN_LOOP); |
6194 | unsigned loopsInNest = innermostLoopNum; |
6195 | unsigned loopNum = innermostLoopNum; |
6196 | while (loopsInNest != BasicBlock::NOT_IN_LOOP) |
6197 | { |
6198 | if (optLoopTable[loopsInNest].lpEntry != entryBlock) |
6199 | { |
6200 | break; |
6201 | } |
6202 | loopNum = loopsInNest; |
6203 | loopsInNest = optLoopTable[loopsInNest].lpParent; |
6204 | } |
6205 | |
6206 | #ifdef DEBUG |
6207 | if (verbose) |
6208 | { |
6209 | printf("Computing %s state for block " FMT_BB ", entry block for loops %d to %d:\n" , |
6210 | memoryKindNames[memoryKind], entryBlock->bbNum, innermostLoopNum, loopNum); |
6211 | } |
6212 | #endif // DEBUG |
6213 | |
6214 | // If this loop has memory havoc effects, just use a new, unique VN. |
6215 | if (optLoopTable[loopNum].lpLoopHasMemoryHavoc[memoryKind]) |
6216 | { |
6217 | ValueNum res = vnStore->VNForExpr(entryBlock, TYP_REF); |
6218 | #ifdef DEBUG |
6219 | if (verbose) |
6220 | { |
6221 | printf(" Loop %d has memory havoc effect; heap state is new unique $%x.\n" , loopNum, res); |
6222 | } |
6223 | #endif // DEBUG |
6224 | return res; |
6225 | } |
6226 | |
6227 | // Otherwise, find the predecessors of the entry block that are not in the loop. |
6228 | // If there is only one such, use its memory value as the "base." If more than one, |
6229 | // use a new unique VN. |
6230 | BasicBlock* nonLoopPred = nullptr; |
6231 | bool multipleNonLoopPreds = false; |
6232 | for (flowList* pred = BlockPredsWithEH(entryBlock); pred != nullptr; pred = pred->flNext) |
6233 | { |
6234 | BasicBlock* predBlock = pred->flBlock; |
6235 | if (!optLoopTable[loopNum].lpContains(predBlock)) |
6236 | { |
6237 | if (nonLoopPred == nullptr) |
6238 | { |
6239 | nonLoopPred = predBlock; |
6240 | } |
6241 | else |
6242 | { |
6243 | #ifdef DEBUG |
6244 | if (verbose) |
6245 | { |
6246 | printf(" Entry block has >1 non-loop preds: (at least) " FMT_BB " and " FMT_BB ".\n" , |
6247 | nonLoopPred->bbNum, predBlock->bbNum); |
6248 | } |
6249 | #endif // DEBUG |
6250 | multipleNonLoopPreds = true; |
6251 | break; |
6252 | } |
6253 | } |
6254 | } |
6255 | if (multipleNonLoopPreds) |
6256 | { |
6257 | ValueNum res = vnStore->VNForExpr(entryBlock, TYP_REF); |
6258 | #ifdef DEBUG |
6259 | if (verbose) |
6260 | { |
6261 | printf(" Therefore, memory state is new, fresh $%x.\n" , res); |
6262 | } |
6263 | #endif // DEBUG |
6264 | return res; |
6265 | } |
6266 | // Otherwise, there is a single non-loop pred. |
6267 | assert(nonLoopPred != nullptr); |
6268 | // What is its memory post-state? |
6269 | ValueNum newMemoryVN = GetMemoryPerSsaData(nonLoopPred->bbMemorySsaNumOut[memoryKind])->m_vnPair.GetLiberal(); |
6270 | assert(newMemoryVN != ValueNumStore::NoVN); // We must have processed the single non-loop pred before reaching the |
6271 | // loop entry. |
6272 | |
6273 | #ifdef DEBUG |
6274 | if (verbose) |
6275 | { |
6276 | printf(" Init %s state is $%x, with new, fresh VN at:\n" , memoryKindNames[memoryKind], newMemoryVN); |
6277 | } |
6278 | #endif // DEBUG |
6279 | // Modify "base" by setting all the modified fields/field maps/array maps to unknown values. |
6280 | // These annotations apply specifically to the GcHeap, where we disambiguate across such stores. |
6281 | if (memoryKind == GcHeap) |
6282 | { |
6283 | // First the fields/field maps. |
6284 | Compiler::LoopDsc::FieldHandleSet* fieldsMod = optLoopTable[loopNum].lpFieldsModified; |
6285 | if (fieldsMod != nullptr) |
6286 | { |
6287 | for (Compiler::LoopDsc::FieldHandleSet::KeyIterator ki = fieldsMod->Begin(); !ki.Equal(fieldsMod->End()); |
6288 | ++ki) |
6289 | { |
6290 | CORINFO_FIELD_HANDLE fldHnd = ki.Get(); |
6291 | ValueNum fldHndVN = vnStore->VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL); |
6292 | |
6293 | #ifdef DEBUG |
6294 | if (verbose) |
6295 | { |
6296 | const char* modName; |
6297 | const char* fldName = eeGetFieldName(fldHnd, &modName); |
6298 | printf(" VNForHandle(%s) is " FMT_VN "\n" , fldName, fldHndVN); |
6299 | } |
6300 | #endif // DEBUG |
6301 | |
6302 | newMemoryVN = |
6303 | vnStore->VNForMapStore(TYP_REF, newMemoryVN, fldHndVN, vnStore->VNForExpr(entryBlock, TYP_REF)); |
6304 | } |
6305 | } |
6306 | // Now do the array maps. |
6307 | Compiler::LoopDsc::ClassHandleSet* elemTypesMod = optLoopTable[loopNum].lpArrayElemTypesModified; |
6308 | if (elemTypesMod != nullptr) |
6309 | { |
6310 | for (Compiler::LoopDsc::ClassHandleSet::KeyIterator ki = elemTypesMod->Begin(); |
6311 | !ki.Equal(elemTypesMod->End()); ++ki) |
6312 | { |
6313 | CORINFO_CLASS_HANDLE elemClsHnd = ki.Get(); |
6314 | |
6315 | #ifdef DEBUG |
6316 | if (verbose) |
6317 | { |
6318 | var_types elemTyp = DecodeElemType(elemClsHnd); |
6319 | // If a valid class handle is given when the ElemType is set, DecodeElemType will |
6320 | // return TYP_STRUCT, and elemClsHnd is that handle. |
6321 | // Otherwise, elemClsHnd is NOT a valid class handle, and is the encoded var_types value. |
6322 | if (elemTyp == TYP_STRUCT) |
6323 | { |
6324 | printf(" Array map %s[]\n" , eeGetClassName(elemClsHnd)); |
6325 | } |
6326 | else |
6327 | { |
6328 | printf(" Array map %s[]\n" , varTypeName(elemTyp)); |
6329 | } |
6330 | } |
6331 | #endif // DEBUG |
6332 | |
6333 | ValueNum elemTypeVN = vnStore->VNForHandle(ssize_t(elemClsHnd), GTF_ICON_CLASS_HDL); |
6334 | ValueNum uniqueVN = vnStore->VNForExpr(entryBlock, TYP_REF); |
6335 | newMemoryVN = vnStore->VNForMapStore(TYP_REF, newMemoryVN, elemTypeVN, uniqueVN); |
6336 | } |
6337 | } |
6338 | } |
6339 | else |
6340 | { |
6341 | // If there were any fields/elements modified, this should have been recorded as havoc |
6342 | // for ByrefExposed. |
6343 | assert(memoryKind == ByrefExposed); |
6344 | assert((optLoopTable[loopNum].lpFieldsModified == nullptr) || |
6345 | optLoopTable[loopNum].lpLoopHasMemoryHavoc[memoryKind]); |
6346 | assert((optLoopTable[loopNum].lpArrayElemTypesModified == nullptr) || |
6347 | optLoopTable[loopNum].lpLoopHasMemoryHavoc[memoryKind]); |
6348 | } |
6349 | |
6350 | #ifdef DEBUG |
6351 | if (verbose) |
6352 | { |
6353 | printf(" Final %s state is $%x.\n" , memoryKindNames[memoryKind], newMemoryVN); |
6354 | } |
6355 | #endif // DEBUG |
6356 | return newMemoryVN; |
6357 | } |
6358 | |
6359 | void Compiler::fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)) |
6360 | { |
6361 | // Update the current memory VN, and if we're tracking the heap SSA # caused by this node, record it. |
6362 | recordGcHeapStore(tree, vnStore->VNForExpr(compCurBB, TYP_REF) DEBUGARG(msg)); |
6363 | } |
6364 | |
6365 | void Compiler::fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)) |
6366 | { |
6367 | // Update the current ByrefExposed VN, and if we're tracking the heap SSA # caused by this node, record it. |
6368 | recordAddressExposedLocalStore(tree, vnStore->VNForExpr(compCurBB) DEBUGARG(msg)); |
6369 | } |
6370 | |
6371 | void Compiler::recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)) |
6372 | { |
6373 | // bbMemoryDef must include GcHeap for any block that mutates the GC Heap |
6374 | // and GC Heap mutations are also ByrefExposed mutations |
6375 | assert((compCurBB->bbMemoryDef & memoryKindSet(GcHeap, ByrefExposed)) == memoryKindSet(GcHeap, ByrefExposed)); |
6376 | fgCurMemoryVN[GcHeap] = gcHeapVN; |
6377 | |
6378 | if (byrefStatesMatchGcHeapStates) |
6379 | { |
6380 | // Since GcHeap and ByrefExposed share SSA nodes, they need to share |
6381 | // value numbers too. |
6382 | fgCurMemoryVN[ByrefExposed] = gcHeapVN; |
6383 | } |
6384 | else |
6385 | { |
6386 | // GcHeap and ByrefExposed have different defnums and VNs. We conservatively |
6387 | // assume that this GcHeap store may alias any byref load/store, so don't |
6388 | // bother trying to record the map/select stuff, and instead just an opaque VN |
6389 | // for ByrefExposed |
6390 | fgCurMemoryVN[ByrefExposed] = vnStore->VNForExpr(compCurBB); |
6391 | } |
6392 | |
6393 | #ifdef DEBUG |
6394 | if (verbose) |
6395 | { |
6396 | printf(" fgCurMemoryVN[GcHeap] assigned for %s at " , msg); |
6397 | Compiler::printTreeID(curTree); |
6398 | printf(" to VN: " FMT_VN ".\n" , gcHeapVN); |
6399 | } |
6400 | #endif // DEBUG |
6401 | |
6402 | // If byrefStatesMatchGcHeapStates is true, then since GcHeap and ByrefExposed share |
6403 | // their SSA map entries, the below will effectively update both. |
6404 | fgValueNumberRecordMemorySsa(GcHeap, curTree); |
6405 | } |
6406 | |
6407 | void Compiler::recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)) |
6408 | { |
6409 | // This should only happen if GcHeap and ByrefExposed are being tracked separately; |
6410 | // otherwise we'd go through recordGcHeapStore. |
6411 | assert(!byrefStatesMatchGcHeapStates); |
6412 | |
6413 | // bbMemoryDef must include ByrefExposed for any block that mutates an address-exposed local |
6414 | assert((compCurBB->bbMemoryDef & memoryKindSet(ByrefExposed)) != 0); |
6415 | fgCurMemoryVN[ByrefExposed] = memoryVN; |
6416 | |
6417 | #ifdef DEBUG |
6418 | if (verbose) |
6419 | { |
6420 | printf(" fgCurMemoryVN[ByrefExposed] assigned for %s at " , msg); |
6421 | Compiler::printTreeID(curTree); |
6422 | printf(" to VN: " FMT_VN ".\n" , memoryVN); |
6423 | } |
6424 | #endif // DEBUG |
6425 | |
6426 | fgValueNumberRecordMemorySsa(ByrefExposed, curTree); |
6427 | } |
6428 | |
6429 | void Compiler::fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree) |
6430 | { |
6431 | unsigned ssaNum; |
6432 | if (GetMemorySsaMap(memoryKind)->Lookup(tree, &ssaNum)) |
6433 | { |
6434 | GetMemoryPerSsaData(ssaNum)->m_vnPair.SetLiberal(fgCurMemoryVN[memoryKind]); |
6435 | #ifdef DEBUG |
6436 | if (verbose) |
6437 | { |
6438 | printf("Node " ); |
6439 | Compiler::printTreeID(tree); |
6440 | printf(" sets %s SSA # %d to VN $%x: " , memoryKindNames[memoryKind], ssaNum, fgCurMemoryVN[memoryKind]); |
6441 | vnStore->vnDump(this, fgCurMemoryVN[memoryKind]); |
6442 | printf("\n" ); |
6443 | } |
6444 | #endif // DEBUG |
6445 | } |
6446 | } |
6447 | |
6448 | // The input 'tree' is a leaf node that is a constant |
6449 | // Assign the proper value number to the tree |
6450 | void Compiler::fgValueNumberTreeConst(GenTree* tree) |
6451 | { |
6452 | genTreeOps oper = tree->OperGet(); |
6453 | var_types typ = tree->TypeGet(); |
6454 | assert(GenTree::OperIsConst(oper)); |
6455 | |
6456 | switch (typ) |
6457 | { |
6458 | case TYP_LONG: |
6459 | case TYP_ULONG: |
6460 | case TYP_INT: |
6461 | case TYP_UINT: |
6462 | case TYP_USHORT: |
6463 | case TYP_SHORT: |
6464 | case TYP_BYTE: |
6465 | case TYP_UBYTE: |
6466 | case TYP_BOOL: |
6467 | if (tree->IsCnsIntOrI() && tree->IsIconHandle()) |
6468 | { |
6469 | tree->gtVNPair.SetBoth( |
6470 | vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag())); |
6471 | } |
6472 | else if ((typ == TYP_LONG) || (typ == TYP_ULONG)) |
6473 | { |
6474 | tree->gtVNPair.SetBoth(vnStore->VNForLongCon(INT64(tree->gtIntConCommon.LngValue()))); |
6475 | } |
6476 | else |
6477 | { |
6478 | tree->gtVNPair.SetBoth(vnStore->VNForIntCon(int(tree->gtIntConCommon.IconValue()))); |
6479 | } |
6480 | break; |
6481 | |
6482 | case TYP_FLOAT: |
6483 | tree->gtVNPair.SetBoth(vnStore->VNForFloatCon((float)tree->gtDblCon.gtDconVal)); |
6484 | break; |
6485 | case TYP_DOUBLE: |
6486 | tree->gtVNPair.SetBoth(vnStore->VNForDoubleCon(tree->gtDblCon.gtDconVal)); |
6487 | break; |
6488 | case TYP_REF: |
6489 | if (tree->gtIntConCommon.IconValue() == 0) |
6490 | { |
6491 | tree->gtVNPair.SetBoth(ValueNumStore::VNForNull()); |
6492 | } |
6493 | else |
6494 | { |
6495 | assert(tree->gtFlags == GTF_ICON_STR_HDL); // Constant object can be only frozen string. |
6496 | tree->gtVNPair.SetBoth( |
6497 | vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag())); |
6498 | } |
6499 | break; |
6500 | |
6501 | case TYP_BYREF: |
6502 | if (tree->gtIntConCommon.IconValue() == 0) |
6503 | { |
6504 | tree->gtVNPair.SetBoth(ValueNumStore::VNForNull()); |
6505 | } |
6506 | else |
6507 | { |
6508 | assert(tree->IsCnsIntOrI()); |
6509 | |
6510 | if (tree->IsIconHandle()) |
6511 | { |
6512 | tree->gtVNPair.SetBoth( |
6513 | vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag())); |
6514 | } |
6515 | else |
6516 | { |
6517 | tree->gtVNPair.SetBoth(vnStore->VNForByrefCon(tree->gtIntConCommon.IconValue())); |
6518 | } |
6519 | } |
6520 | break; |
6521 | |
6522 | default: |
6523 | unreached(); |
6524 | } |
6525 | } |
6526 | |
6527 | //------------------------------------------------------------------------ |
6528 | // fgValueNumberBlockAssignment: Perform value numbering for block assignments. |
6529 | // |
6530 | // Arguments: |
6531 | // tree - the block assignment to be value numbered. |
6532 | // |
6533 | // Return Value: |
6534 | // None. |
6535 | // |
6536 | // Assumptions: |
6537 | // 'tree' must be a block assignment (GT_INITBLK, GT_COPYBLK, GT_COPYOBJ). |
6538 | |
6539 | void Compiler::fgValueNumberBlockAssignment(GenTree* tree) |
6540 | { |
6541 | GenTree* lhs = tree->gtGetOp1(); |
6542 | GenTree* rhs = tree->gtGetOp2(); |
6543 | |
6544 | if (tree->OperIsInitBlkOp()) |
6545 | { |
6546 | GenTreeLclVarCommon* lclVarTree; |
6547 | bool isEntire; |
6548 | |
6549 | if (tree->DefinesLocal(this, &lclVarTree, &isEntire)) |
6550 | { |
6551 | assert(lclVarTree->gtFlags & GTF_VAR_DEF); |
6552 | // Should not have been recorded as updating the GC heap. |
6553 | assert(!GetMemorySsaMap(GcHeap)->Lookup(tree)); |
6554 | |
6555 | unsigned lclNum = lclVarTree->GetLclNum(); |
6556 | |
6557 | // Ignore vars that we excluded from SSA (for example, because they're address-exposed). They don't have |
6558 | // SSA names in which to store VN's on defs. We'll yield unique VN's when we read from them. |
6559 | if (lvaInSsa(lclNum)) |
6560 | { |
6561 | // Should not have been recorded as updating ByrefExposed. |
6562 | assert(!GetMemorySsaMap(ByrefExposed)->Lookup(tree)); |
6563 | |
6564 | unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree); |
6565 | |
6566 | ValueNum initBlkVN = ValueNumStore::NoVN; |
6567 | GenTree* initConst = rhs; |
6568 | if (isEntire && initConst->OperGet() == GT_CNS_INT) |
6569 | { |
6570 | unsigned initVal = 0xFF & (unsigned)initConst->AsIntConCommon()->IconValue(); |
6571 | if (initVal == 0) |
6572 | { |
6573 | initBlkVN = vnStore->VNZeroForType(lclVarTree->TypeGet()); |
6574 | } |
6575 | } |
6576 | ValueNum lclVarVN = (initBlkVN != ValueNumStore::NoVN) |
6577 | ? initBlkVN |
6578 | : vnStore->VNForExpr(compCurBB, var_types(lvaTable[lclNum].lvType)); |
6579 | |
6580 | lvaTable[lclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair.SetBoth(lclVarVN); |
6581 | #ifdef DEBUG |
6582 | if (verbose) |
6583 | { |
6584 | printf("N%03u " , tree->gtSeqNum); |
6585 | Compiler::printTreeID(tree); |
6586 | printf(" " ); |
6587 | gtDispNodeName(tree); |
6588 | printf(" V%02u/%d => " , lclNum, lclDefSsaNum); |
6589 | vnPrint(lclVarVN, 1); |
6590 | printf("\n" ); |
6591 | } |
6592 | #endif // DEBUG |
6593 | } |
6594 | else if (lvaVarAddrExposed(lclVarTree->gtLclNum)) |
6595 | { |
6596 | fgMutateAddressExposedLocal(tree DEBUGARG("INITBLK - address-exposed local" )); |
6597 | } |
6598 | } |
6599 | else |
6600 | { |
6601 | // For now, arbitrary side effect on GcHeap/ByrefExposed. |
6602 | // TODO-CQ: Why not be complete, and get this case right? |
6603 | fgMutateGcHeap(tree DEBUGARG("INITBLK - non local" )); |
6604 | } |
6605 | // Initblock's are of type void. Give them the void "value" -- they may occur in argument lists, which we |
6606 | // want to be able to give VN's to. |
6607 | tree->gtVNPair.SetBoth(ValueNumStore::VNForVoid()); |
6608 | } |
6609 | else |
6610 | { |
6611 | assert(tree->OperIsCopyBlkOp()); |
6612 | // TODO-Cleanup: We should factor things so that we uniformly rely on "PtrTo" VN's, and |
6613 | // the memory cases can be shared with assignments. |
6614 | GenTreeLclVarCommon* lclVarTree = nullptr; |
6615 | bool isEntire = false; |
6616 | // Note that we don't care about exceptions here, since we're only using the values |
6617 | // to perform an assignment (which happens after any exceptions are raised...) |
6618 | |
6619 | if (tree->DefinesLocal(this, &lclVarTree, &isEntire)) |
6620 | { |
6621 | // Should not have been recorded as updating the GC heap. |
6622 | assert(!GetMemorySsaMap(GcHeap)->Lookup(tree)); |
6623 | |
6624 | unsigned lhsLclNum = lclVarTree->GetLclNum(); |
6625 | FieldSeqNode* lhsFldSeq = nullptr; |
6626 | // If it's excluded from SSA, don't need to do anything. |
6627 | if (lvaInSsa(lhsLclNum)) |
6628 | { |
6629 | // Should not have been recorded as updating ByrefExposed. |
6630 | assert(!GetMemorySsaMap(ByrefExposed)->Lookup(tree)); |
6631 | |
6632 | unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree); |
6633 | |
6634 | if (lhs->IsLocalExpr(this, &lclVarTree, &lhsFldSeq)) |
6635 | { |
6636 | noway_assert(lclVarTree->gtLclNum == lhsLclNum); |
6637 | } |
6638 | else |
6639 | { |
6640 | GenTree* lhsAddr; |
6641 | if (lhs->OperIsBlk()) |
6642 | { |
6643 | lhsAddr = lhs->AsBlk()->Addr(); |
6644 | } |
6645 | else |
6646 | { |
6647 | assert(lhs->OperGet() == GT_IND); |
6648 | lhsAddr = lhs->gtOp.gtOp1; |
6649 | } |
6650 | |
6651 | // For addr-of-local expressions, lib/cons shouldn't matter. |
6652 | assert(lhsAddr->gtVNPair.BothEqual()); |
6653 | ValueNum lhsAddrVN = lhsAddr->GetVN(VNK_Liberal); |
6654 | |
6655 | // Unpack the PtrToLoc value number of the address. |
6656 | assert(vnStore->IsVNFunc(lhsAddrVN)); |
6657 | |
6658 | VNFuncApp lhsAddrFuncApp; |
6659 | vnStore->GetVNFunc(lhsAddrVN, &lhsAddrFuncApp); |
6660 | |
6661 | assert(lhsAddrFuncApp.m_func == VNF_PtrToLoc); |
6662 | assert(vnStore->IsVNConstant(lhsAddrFuncApp.m_args[0]) && |
6663 | vnStore->ConstantValue<unsigned>(lhsAddrFuncApp.m_args[0]) == lhsLclNum); |
6664 | |
6665 | lhsFldSeq = vnStore->FieldSeqVNToFieldSeq(lhsAddrFuncApp.m_args[1]); |
6666 | } |
6667 | |
6668 | // Now we need to get the proper RHS. |
6669 | GenTreeLclVarCommon* rhsLclVarTree = nullptr; |
6670 | LclVarDsc* rhsVarDsc = nullptr; |
6671 | FieldSeqNode* rhsFldSeq = nullptr; |
6672 | ValueNumPair rhsVNPair; |
6673 | bool isNewUniq = false; |
6674 | if (!rhs->OperIsIndir()) |
6675 | { |
6676 | if (rhs->IsLocalExpr(this, &rhsLclVarTree, &rhsFldSeq)) |
6677 | { |
6678 | unsigned rhsLclNum = rhsLclVarTree->GetLclNum(); |
6679 | rhsVarDsc = &lvaTable[rhsLclNum]; |
6680 | if (!lvaInSsa(rhsLclNum) || rhsFldSeq == FieldSeqStore::NotAField()) |
6681 | { |
6682 | rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, rhsLclVarTree->TypeGet())); |
6683 | isNewUniq = true; |
6684 | } |
6685 | else |
6686 | { |
6687 | rhsVNPair = lvaTable[rhsLclVarTree->GetLclNum()] |
6688 | .GetPerSsaData(rhsLclVarTree->GetSsaNum()) |
6689 | ->m_vnPair; |
6690 | var_types indType = rhsLclVarTree->TypeGet(); |
6691 | |
6692 | rhsVNPair = vnStore->VNPairApplySelectors(rhsVNPair, rhsFldSeq, indType); |
6693 | } |
6694 | } |
6695 | else |
6696 | { |
6697 | rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, rhs->TypeGet())); |
6698 | isNewUniq = true; |
6699 | } |
6700 | } |
6701 | else |
6702 | { |
6703 | GenTree* srcAddr = rhs->AsIndir()->Addr(); |
6704 | VNFuncApp srcAddrFuncApp; |
6705 | if (srcAddr->IsLocalAddrExpr(this, &rhsLclVarTree, &rhsFldSeq)) |
6706 | { |
6707 | unsigned rhsLclNum = rhsLclVarTree->GetLclNum(); |
6708 | rhsVarDsc = &lvaTable[rhsLclNum]; |
6709 | if (!lvaInSsa(rhsLclNum) || rhsFldSeq == FieldSeqStore::NotAField()) |
6710 | { |
6711 | isNewUniq = true; |
6712 | } |
6713 | else |
6714 | { |
6715 | rhsVNPair = lvaTable[rhsLclVarTree->GetLclNum()] |
6716 | .GetPerSsaData(rhsLclVarTree->GetSsaNum()) |
6717 | ->m_vnPair; |
6718 | var_types indType = rhsLclVarTree->TypeGet(); |
6719 | |
6720 | rhsVNPair = vnStore->VNPairApplySelectors(rhsVNPair, rhsFldSeq, indType); |
6721 | } |
6722 | } |
6723 | else if (vnStore->GetVNFunc(vnStore->VNLiberalNormalValue(srcAddr->gtVNPair), &srcAddrFuncApp)) |
6724 | { |
6725 | if (srcAddrFuncApp.m_func == VNF_PtrToStatic) |
6726 | { |
6727 | var_types indType = lclVarTree->TypeGet(); |
6728 | ValueNum fieldSeqVN = srcAddrFuncApp.m_args[0]; |
6729 | |
6730 | FieldSeqNode* zeroOffsetFldSeq = nullptr; |
6731 | if (GetZeroOffsetFieldMap()->Lookup(srcAddr, &zeroOffsetFldSeq)) |
6732 | { |
6733 | fieldSeqVN = |
6734 | vnStore->FieldSeqVNAppend(fieldSeqVN, vnStore->VNForFieldSeq(zeroOffsetFldSeq)); |
6735 | } |
6736 | |
6737 | FieldSeqNode* fldSeqForStaticVar = vnStore->FieldSeqVNToFieldSeq(fieldSeqVN); |
6738 | |
6739 | if (fldSeqForStaticVar != FieldSeqStore::NotAField()) |
6740 | { |
6741 | // We model statics as indices into GcHeap (which is a subset of ByrefExposed). |
6742 | ValueNum selectedStaticVar; |
6743 | size_t structSize = 0; |
6744 | selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], |
6745 | fldSeqForStaticVar, &structSize); |
6746 | selectedStaticVar = |
6747 | vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, indType, structSize); |
6748 | |
6749 | rhsVNPair.SetLiberal(selectedStaticVar); |
6750 | rhsVNPair.SetConservative(vnStore->VNForExpr(compCurBB, indType)); |
6751 | } |
6752 | else |
6753 | { |
6754 | JITDUMP(" *** Missing field sequence info for Src/RHS of COPYBLK\n" ); |
6755 | isNewUniq = true; |
6756 | } |
6757 | } |
6758 | else if (srcAddrFuncApp.m_func == VNF_PtrToArrElem) |
6759 | { |
6760 | ValueNum elemLib = |
6761 | fgValueNumberArrIndexVal(nullptr, &srcAddrFuncApp, vnStore->VNForEmptyExcSet()); |
6762 | rhsVNPair.SetLiberal(elemLib); |
6763 | rhsVNPair.SetConservative(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); |
6764 | } |
6765 | else |
6766 | { |
6767 | isNewUniq = true; |
6768 | } |
6769 | } |
6770 | else |
6771 | { |
6772 | isNewUniq = true; |
6773 | } |
6774 | } |
6775 | |
6776 | if (lhsFldSeq == FieldSeqStore::NotAField()) |
6777 | { |
6778 | // We don't have proper field sequence information for the lhs |
6779 | // |
6780 | JITDUMP(" *** Missing field sequence info for Dst/LHS of COPYBLK\n" ); |
6781 | isNewUniq = true; |
6782 | } |
6783 | |
6784 | if (isNewUniq) |
6785 | { |
6786 | rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); |
6787 | } |
6788 | else // We will assign rhsVNPair into a map[lhsFldSeq] |
6789 | { |
6790 | if (lhsFldSeq != nullptr && isEntire) |
6791 | { |
6792 | // This can occur for structs with one field, itself of a struct type. |
6793 | // We are assigning the one field and it is also the entire enclosing struct. |
6794 | // |
6795 | // Use an unique value number for the old map, as this is an an entire assignment |
6796 | // and we won't have any other values in the map |
6797 | ValueNumPair uniqueMap; |
6798 | uniqueMap.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); |
6799 | rhsVNPair = vnStore->VNPairApplySelectorsAssign(uniqueMap, lhsFldSeq, rhsVNPair, |
6800 | lclVarTree->TypeGet(), compCurBB); |
6801 | } |
6802 | else |
6803 | { |
6804 | ValueNumPair oldLhsVNPair = |
6805 | lvaTable[lhsLclNum].GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair; |
6806 | rhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, lhsFldSeq, rhsVNPair, |
6807 | lclVarTree->TypeGet(), compCurBB); |
6808 | } |
6809 | } |
6810 | |
6811 | lvaTable[lhsLclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = vnStore->VNPNormalPair(rhsVNPair); |
6812 | |
6813 | #ifdef DEBUG |
6814 | if (verbose) |
6815 | { |
6816 | printf("Tree " ); |
6817 | Compiler::printTreeID(tree); |
6818 | printf(" assigned VN to local var V%02u/%d: " , lhsLclNum, lclDefSsaNum); |
6819 | if (isNewUniq) |
6820 | { |
6821 | printf("new uniq " ); |
6822 | } |
6823 | vnpPrint(rhsVNPair, 1); |
6824 | printf("\n" ); |
6825 | } |
6826 | #endif // DEBUG |
6827 | } |
6828 | else if (lvaVarAddrExposed(lhsLclNum)) |
6829 | { |
6830 | fgMutateAddressExposedLocal(tree DEBUGARG("COPYBLK - address-exposed local" )); |
6831 | } |
6832 | } |
6833 | else |
6834 | { |
6835 | // For now, arbitrary side effect on GcHeap/ByrefExposed. |
6836 | // TODO-CQ: Why not be complete, and get this case right? |
6837 | fgMutateGcHeap(tree DEBUGARG("COPYBLK - non local" )); |
6838 | } |
6839 | // Copyblock's are of type void. Give them the void "value" -- they may occur in argument lists, which we want |
6840 | // to be able to give VN's to. |
6841 | tree->gtVNPair.SetBoth(ValueNumStore::VNForVoid()); |
6842 | } |
6843 | } |
6844 | |
6845 | void Compiler::fgValueNumberTree(GenTree* tree) |
6846 | { |
6847 | genTreeOps oper = tree->OperGet(); |
6848 | |
6849 | #ifdef FEATURE_SIMD |
6850 | // TODO-CQ: For now TYP_SIMD values are not handled by value numbering to be amenable for CSE'ing. |
6851 | if (oper == GT_SIMD) |
6852 | { |
6853 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_UNKNOWN)); |
6854 | return; |
6855 | } |
6856 | #endif |
6857 | |
6858 | #ifdef FEATURE_HW_INTRINSICS |
6859 | if (oper == GT_HWIntrinsic) |
6860 | { |
6861 | // TODO-CQ: For now hardware intrinsics are not handled by value numbering to be amenable for CSE'ing. |
6862 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_UNKNOWN)); |
6863 | |
6864 | GenTreeHWIntrinsic* hwIntrinsicNode = tree->AsHWIntrinsic(); |
6865 | assert(hwIntrinsicNode != nullptr); |
6866 | |
6867 | // For safety/correctness we must mutate the global heap valuenumber |
6868 | // for any HW intrinsic that performs a memory store operation |
6869 | if (hwIntrinsicNode->OperIsMemoryStore()) |
6870 | { |
6871 | fgMutateGcHeap(tree DEBUGARG("HWIntrinsic - MemoryStore" )); |
6872 | } |
6873 | |
6874 | return; |
6875 | } |
6876 | #endif // FEATURE_HW_INTRINSICS |
6877 | |
6878 | var_types typ = tree->TypeGet(); |
6879 | if (GenTree::OperIsConst(oper)) |
6880 | { |
6881 | // If this is a struct assignment, with a constant rhs, it is an initBlk, and it is not |
6882 | // really useful to value number the constant. |
6883 | if (!varTypeIsStruct(tree)) |
6884 | { |
6885 | fgValueNumberTreeConst(tree); |
6886 | } |
6887 | } |
6888 | else if (GenTree::OperIsLeaf(oper)) |
6889 | { |
6890 | switch (oper) |
6891 | { |
6892 | case GT_LCL_VAR: |
6893 | { |
6894 | GenTreeLclVarCommon* lcl = tree->AsLclVarCommon(); |
6895 | unsigned lclNum = lcl->gtLclNum; |
6896 | LclVarDsc* varDsc = &lvaTable[lclNum]; |
6897 | |
6898 | // Do we have a Use (read) of the LclVar? |
6899 | // |
6900 | if ((lcl->gtFlags & GTF_VAR_DEF) == 0 || |
6901 | (lcl->gtFlags & GTF_VAR_USEASG)) // If it is a "pure" def, will handled as part of the assignment. |
6902 | { |
6903 | bool generateUniqueVN = false; |
6904 | FieldSeqNode* zeroOffsetFldSeq = nullptr; |
6905 | |
6906 | // When we have a TYP_BYREF LclVar it can have a zero offset field sequence that needs to be added |
6907 | if (typ == TYP_BYREF) |
6908 | { |
6909 | GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFldSeq); |
6910 | } |
6911 | |
6912 | if (varDsc->lvPromoted && varDsc->lvFieldCnt == 1) |
6913 | { |
6914 | // If the promoted var has only one field var, treat like a use of the field var. |
6915 | lclNum = varDsc->lvFieldLclStart; |
6916 | } |
6917 | |
6918 | if (lcl->gtSsaNum == SsaConfig::RESERVED_SSA_NUM) |
6919 | { |
6920 | // Not an SSA variable. |
6921 | |
6922 | if (lvaVarAddrExposed(lclNum)) |
6923 | { |
6924 | // Address-exposed locals are part of ByrefExposed. |
6925 | ValueNum addrVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToLoc, vnStore->VNForIntCon(lclNum), |
6926 | vnStore->VNForFieldSeq(nullptr)); |
6927 | ValueNum loadVN = fgValueNumberByrefExposedLoad(typ, addrVN); |
6928 | |
6929 | lcl->gtVNPair.SetBoth(loadVN); |
6930 | } |
6931 | else |
6932 | { |
6933 | // Assign odd cases a new, unique, VN. |
6934 | generateUniqueVN = true; |
6935 | } |
6936 | } |
6937 | else |
6938 | { |
6939 | ValueNumPair wholeLclVarVNP = varDsc->GetPerSsaData(lcl->gtSsaNum)->m_vnPair; |
6940 | |
6941 | // Check for mismatched LclVar size |
6942 | // |
6943 | unsigned typSize = genTypeSize(genActualType(typ)); |
6944 | unsigned varSize = genTypeSize(genActualType(varDsc->TypeGet())); |
6945 | |
6946 | if (typSize == varSize) |
6947 | { |
6948 | lcl->gtVNPair = wholeLclVarVNP; |
6949 | } |
6950 | else // mismatched LclVar definition and LclVar use size |
6951 | { |
6952 | if (typSize < varSize) |
6953 | { |
6954 | // the indirection is reading less that the whole LclVar |
6955 | // create a new VN that represent the partial value |
6956 | // |
6957 | ValueNumPair partialLclVarVNP = |
6958 | vnStore->VNPairForCast(wholeLclVarVNP, typ, varDsc->TypeGet()); |
6959 | lcl->gtVNPair = partialLclVarVNP; |
6960 | } |
6961 | else |
6962 | { |
6963 | assert(typSize > varSize); |
6964 | // the indirection is reading beyond the end of the field |
6965 | // |
6966 | generateUniqueVN = true; |
6967 | } |
6968 | } |
6969 | } |
6970 | |
6971 | if (!generateUniqueVN) |
6972 | { |
6973 | // There are a couple of cases where we haven't assigned a valid value number to 'lcl' |
6974 | // |
6975 | if (lcl->gtVNPair.GetLiberal() == ValueNumStore::NoVN) |
6976 | { |
6977 | // So far, we know about two of these cases: |
6978 | // Case 1) We have a local var who has never been defined but it's seen as a use. |
6979 | // This is the case of storeIndir(addr(lclvar)) = expr. In this case since we only |
6980 | // take the address of the variable, this doesn't mean it's a use nor we have to |
6981 | // initialize it, so in this very rare case, we fabricate a value number. |
6982 | // Case 2) Local variables that represent structs which are assigned using CpBlk. |
6983 | // |
6984 | // Make sure we have either case 1 or case 2 |
6985 | // |
6986 | GenTree* nextNode = lcl->gtNext; |
6987 | assert((nextNode->gtOper == GT_ADDR && nextNode->gtOp.gtOp1 == lcl) || |
6988 | varTypeIsStruct(lcl->TypeGet())); |
6989 | |
6990 | // We will assign a unique value number for these |
6991 | // |
6992 | generateUniqueVN = true; |
6993 | } |
6994 | } |
6995 | |
6996 | if (!generateUniqueVN && (zeroOffsetFldSeq != nullptr)) |
6997 | { |
6998 | ValueNum addrExtended = vnStore->ExtendPtrVN(lcl, zeroOffsetFldSeq); |
6999 | if (addrExtended != ValueNumStore::NoVN) |
7000 | { |
7001 | lcl->gtVNPair.SetBoth(addrExtended); |
7002 | } |
7003 | } |
7004 | |
7005 | if (generateUniqueVN) |
7006 | { |
7007 | ValueNum uniqVN = vnStore->VNForExpr(compCurBB, lcl->TypeGet()); |
7008 | lcl->gtVNPair.SetBoth(uniqVN); |
7009 | } |
7010 | } |
7011 | else if ((lcl->gtFlags & GTF_VAR_DEF) != 0) |
7012 | { |
7013 | // We have a Def (write) of the LclVar |
7014 | |
7015 | // TODO-Review: For the short term, we have a workaround for copyblk/initblk. Those that use |
7016 | // addrSpillTemp will have a statement like "addrSpillTemp = addr(local)." If we previously decided |
7017 | // that this block operation defines the local, we will have labeled the "local" node as a DEF |
7018 | // This flag propagates to the "local" on the RHS. So we'll assume that this is correct, |
7019 | // and treat it as a def (to a new, unique VN). |
7020 | // |
7021 | if (lcl->gtSsaNum != SsaConfig::RESERVED_SSA_NUM) |
7022 | { |
7023 | ValueNum uniqVN = vnStore->VNForExpr(compCurBB, lcl->TypeGet()); |
7024 | varDsc->GetPerSsaData(lcl->gtSsaNum)->m_vnPair.SetBoth(uniqVN); |
7025 | } |
7026 | |
7027 | lcl->gtVNPair = ValueNumPair(); // Avoid confusion -- we don't set the VN of a lcl being defined. |
7028 | } |
7029 | } |
7030 | break; |
7031 | |
7032 | case GT_FTN_ADDR: |
7033 | // Use the value of the function pointer (actually, a method handle.) |
7034 | tree->gtVNPair.SetBoth( |
7035 | vnStore->VNForHandle(ssize_t(tree->gtFptrVal.gtFptrMethod), GTF_ICON_METHOD_HDL)); |
7036 | break; |
7037 | |
7038 | // This group passes through a value from a child node. |
7039 | case GT_RET_EXPR: |
7040 | tree->SetVNsFromNode(tree->gtRetExpr.gtInlineCandidate); |
7041 | break; |
7042 | |
7043 | case GT_LCL_FLD: |
7044 | { |
7045 | GenTreeLclFld* lclFld = tree->AsLclFld(); |
7046 | assert(!lvaInSsa(lclFld->GetLclNum()) || lclFld->gtFieldSeq != nullptr); |
7047 | // If this is a (full) def, then the variable will be labeled with the new SSA number, |
7048 | // which will not have a value. We skip; it will be handled by one of the assignment-like |
7049 | // forms (assignment, or initBlk or copyBlk). |
7050 | if (((lclFld->gtFlags & GTF_VAR_DEF) == 0) || (lclFld->gtFlags & GTF_VAR_USEASG)) |
7051 | { |
7052 | unsigned lclNum = lclFld->GetLclNum(); |
7053 | unsigned ssaNum = lclFld->GetSsaNum(); |
7054 | LclVarDsc* varDsc = &lvaTable[lclNum]; |
7055 | |
7056 | var_types indType = tree->TypeGet(); |
7057 | if (lclFld->gtFieldSeq == FieldSeqStore::NotAField() || !lvaInSsa(lclFld->GetLclNum())) |
7058 | { |
7059 | // This doesn't represent a proper field access or it's a struct |
7060 | // with overlapping fields that is hard to reason about; return a new unique VN. |
7061 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, indType)); |
7062 | } |
7063 | else |
7064 | { |
7065 | ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair; |
7066 | tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, lclFld->gtFieldSeq, indType); |
7067 | } |
7068 | } |
7069 | } |
7070 | break; |
7071 | |
7072 | // The ones below here all get a new unique VN -- but for various reasons, explained after each. |
7073 | case GT_CATCH_ARG: |
7074 | // We know nothing about the value of a caught expression. |
7075 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
7076 | break; |
7077 | |
7078 | case GT_CLS_VAR: |
7079 | // Skip GT_CLS_VAR nodes that are the LHS of an assignment. (We labeled these earlier.) |
7080 | // We will "evaluate" this as part of the assignment. |
7081 | // |
7082 | if ((tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0) |
7083 | { |
7084 | bool isVolatile = (tree->gtFlags & GTF_FLD_VOLATILE) != 0; |
7085 | |
7086 | if (isVolatile) |
7087 | { |
7088 | // For Volatile indirection, first mutate GcHeap/ByrefExposed |
7089 | fgMutateGcHeap(tree DEBUGARG("GTF_FLD_VOLATILE - read" )); |
7090 | } |
7091 | |
7092 | // We just mutate GcHeap/ByrefExposed if isVolatile is true, and then do the read as normal. |
7093 | // |
7094 | // This allows: |
7095 | // 1: read s; |
7096 | // 2: volatile read s; |
7097 | // 3: read s; |
7098 | // |
7099 | // We should never assume that the values read by 1 and 2 are the same (because the heap was mutated |
7100 | // in between them)... but we *should* be able to prove that the values read in 2 and 3 are the |
7101 | // same. |
7102 | // |
7103 | |
7104 | ValueNumPair clsVarVNPair; |
7105 | |
7106 | // If the static field handle is for a struct type field, then the value of the static |
7107 | // is a "ref" to the boxed struct -- treat it as the address of the static (we assume that a |
7108 | // first element offset will be added to get to the actual struct...) |
7109 | GenTreeClsVar* clsVar = tree->AsClsVar(); |
7110 | FieldSeqNode* fldSeq = clsVar->gtFieldSeq; |
7111 | assert(fldSeq != nullptr); // We need to have one. |
7112 | ValueNum selectedStaticVar = ValueNumStore::NoVN; |
7113 | if (gtIsStaticFieldPtrToBoxedStruct(clsVar->TypeGet(), fldSeq->m_fieldHnd)) |
7114 | { |
7115 | clsVarVNPair.SetBoth( |
7116 | vnStore->VNForFunc(TYP_BYREF, VNF_PtrToStatic, vnStore->VNForFieldSeq(fldSeq))); |
7117 | } |
7118 | else |
7119 | { |
7120 | // This is a reference to heap memory. |
7121 | // We model statics as indices into GcHeap (which is a subset of ByrefExposed). |
7122 | |
7123 | FieldSeqNode* fldSeqForStaticVar = |
7124 | GetFieldSeqStore()->CreateSingleton(tree->gtClsVar.gtClsVarHnd); |
7125 | size_t structSize = 0; |
7126 | selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], |
7127 | fldSeqForStaticVar, &structSize); |
7128 | selectedStaticVar = |
7129 | vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, tree->TypeGet(), structSize); |
7130 | |
7131 | clsVarVNPair.SetLiberal(selectedStaticVar); |
7132 | // The conservative interpretation always gets a new, unique VN. |
7133 | clsVarVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
7134 | } |
7135 | |
7136 | // The ValueNum returned must represent the full-sized IL-Stack value |
7137 | // If we need to widen this value then we need to introduce a VNF_Cast here to represent |
7138 | // the widened value. This is necessary since the CSE package can replace all occurances |
7139 | // of a given ValueNum with a LclVar that is a full-sized IL-Stack value |
7140 | // |
7141 | if (varTypeIsSmall(tree->TypeGet())) |
7142 | { |
7143 | var_types castToType = tree->TypeGet(); |
7144 | clsVarVNPair = vnStore->VNPairForCast(clsVarVNPair, castToType, castToType); |
7145 | } |
7146 | tree->gtVNPair = clsVarVNPair; |
7147 | } |
7148 | break; |
7149 | |
7150 | case GT_MEMORYBARRIER: // Leaf |
7151 | // For MEMORYBARRIER add an arbitrary side effect on GcHeap/ByrefExposed. |
7152 | fgMutateGcHeap(tree DEBUGARG("MEMORYBARRIER" )); |
7153 | break; |
7154 | |
7155 | // These do not represent values. |
7156 | case GT_NO_OP: |
7157 | case GT_JMP: // Control flow |
7158 | case GT_LABEL: // Control flow |
7159 | #if !FEATURE_EH_FUNCLETS |
7160 | case GT_END_LFIN: // Control flow |
7161 | #endif |
7162 | case GT_ARGPLACE: |
7163 | // This node is a standin for an argument whose value will be computed later. (Perhaps it's |
7164 | // a register argument, and we don't want to preclude use of the register in arg evaluation yet.) |
7165 | // We give this a "fake" value number now; if the call in which it occurs cares about the |
7166 | // value (e.g., it's a helper call whose result is a function of argument values) we'll reset |
7167 | // this later, when the later args have been assigned VNs. |
7168 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
7169 | break; |
7170 | |
7171 | case GT_PHI_ARG: |
7172 | // This one is special because we should never process it in this method: it should |
7173 | // always be taken care of, when needed, during pre-processing of a blocks phi definitions. |
7174 | assert(false); |
7175 | break; |
7176 | |
7177 | default: |
7178 | unreached(); |
7179 | } |
7180 | } |
7181 | else if (GenTree::OperIsSimple(oper)) |
7182 | { |
7183 | #ifdef DEBUG |
7184 | // Sometimes we query the memory ssa map in an assertion, and need a dummy location for the ignored result. |
7185 | unsigned memorySsaNum; |
7186 | #endif |
7187 | |
7188 | if ((oper == GT_ASG) && !varTypeIsStruct(tree)) |
7189 | { |
7190 | GenTree* lhs = tree->gtOp.gtOp1; |
7191 | GenTree* rhs = tree->gtOp.gtOp2; |
7192 | |
7193 | ValueNumPair rhsVNPair = rhs->gtVNPair; |
7194 | |
7195 | // Is the type being stored different from the type computed by the rhs? |
7196 | if (rhs->TypeGet() != lhs->TypeGet()) |
7197 | { |
7198 | // This means that there is an implicit cast on the rhs value |
7199 | // |
7200 | // We will add a cast function to reflect the possible narrowing of the rhs value |
7201 | // |
7202 | var_types castToType = lhs->TypeGet(); |
7203 | var_types castFromType = rhs->TypeGet(); |
7204 | bool isUnsigned = varTypeIsUnsigned(castFromType); |
7205 | |
7206 | rhsVNPair = vnStore->VNPairForCast(rhsVNPair, castToType, castFromType, isUnsigned); |
7207 | } |
7208 | |
7209 | if (tree->TypeGet() != TYP_VOID) |
7210 | { |
7211 | // Assignment operators, as expressions, return the value of the RHS. |
7212 | tree->gtVNPair = rhsVNPair; |
7213 | } |
7214 | |
7215 | // Now that we've labeled the assignment as a whole, we don't care about exceptions. |
7216 | rhsVNPair = vnStore->VNPNormalPair(rhsVNPair); |
7217 | |
7218 | // Record the exeception set for this 'tree' in vnExcSet. |
7219 | // First we'll record the exeception set for the rhs and |
7220 | // later we will union in the exeception set for the lhs |
7221 | // |
7222 | ValueNum vnExcSet; |
7223 | |
7224 | // Unpack, Norm,Exc for 'rhsVNPair' |
7225 | ValueNum vnRhsLibNorm; |
7226 | vnStore->VNUnpackExc(rhsVNPair.GetLiberal(), &vnRhsLibNorm, &vnExcSet); |
7227 | |
7228 | // Now that we've saved the rhs exeception set, we we will use the normal values. |
7229 | rhsVNPair = ValueNumPair(vnRhsLibNorm, vnStore->VNNormalValue(rhsVNPair.GetConservative())); |
7230 | |
7231 | // If the types of the rhs and lhs are different then we |
7232 | // may want to change the ValueNumber assigned to the lhs. |
7233 | // |
7234 | if (rhs->TypeGet() != lhs->TypeGet()) |
7235 | { |
7236 | if (rhs->TypeGet() == TYP_REF) |
7237 | { |
7238 | // If we have an unsafe IL assignment of a TYP_REF to a non-ref (typically a TYP_BYREF) |
7239 | // then don't propagate this ValueNumber to the lhs, instead create a new unique VN |
7240 | // |
7241 | rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhs->TypeGet())); |
7242 | } |
7243 | } |
7244 | |
7245 | // We have to handle the case where the LHS is a comma. In that case, we don't evaluate the comma, |
7246 | // so we give it VNForVoid, and we're really interested in the effective value. |
7247 | GenTree* lhsCommaIter = lhs; |
7248 | while (lhsCommaIter->OperGet() == GT_COMMA) |
7249 | { |
7250 | lhsCommaIter->gtVNPair.SetBoth(vnStore->VNForVoid()); |
7251 | lhsCommaIter = lhsCommaIter->gtOp.gtOp2; |
7252 | } |
7253 | lhs = lhs->gtEffectiveVal(); |
7254 | |
7255 | // Now, record the new VN for an assignment (performing the indicated "state update"). |
7256 | // It's safe to use gtEffectiveVal here, because the non-last elements of a comma list on the |
7257 | // LHS will come before the assignment in evaluation order. |
7258 | switch (lhs->OperGet()) |
7259 | { |
7260 | case GT_LCL_VAR: |
7261 | { |
7262 | GenTreeLclVarCommon* lcl = lhs->AsLclVarCommon(); |
7263 | unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lcl); |
7264 | |
7265 | // Should not have been recorded as updating the GC heap. |
7266 | assert(!GetMemorySsaMap(GcHeap)->Lookup(tree, &memorySsaNum)); |
7267 | |
7268 | if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) |
7269 | { |
7270 | // Should not have been recorded as updating ByrefExposed mem. |
7271 | assert(!GetMemorySsaMap(ByrefExposed)->Lookup(tree, &memorySsaNum)); |
7272 | |
7273 | assert(rhsVNPair.GetLiberal() != ValueNumStore::NoVN); |
7274 | |
7275 | lhs->gtVNPair = rhsVNPair; |
7276 | lvaTable[lcl->gtLclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = rhsVNPair; |
7277 | |
7278 | #ifdef DEBUG |
7279 | if (verbose) |
7280 | { |
7281 | printf("N%03u " , lhs->gtSeqNum); |
7282 | Compiler::printTreeID(lhs); |
7283 | printf(" " ); |
7284 | gtDispNodeName(lhs); |
7285 | gtDispLeaf(lhs, nullptr); |
7286 | printf(" => " ); |
7287 | vnpPrint(lhs->gtVNPair, 1); |
7288 | printf("\n" ); |
7289 | } |
7290 | #endif // DEBUG |
7291 | } |
7292 | else if (lvaVarAddrExposed(lcl->gtLclNum)) |
7293 | { |
7294 | // We could use MapStore here and MapSelect on reads of address-exposed locals |
7295 | // (using the local nums as selectors) to get e.g. propagation of values |
7296 | // through address-taken locals in regions of code with no calls or byref |
7297 | // writes. |
7298 | // For now, just use a new opaque VN. |
7299 | ValueNum heapVN = vnStore->VNForExpr(compCurBB); |
7300 | recordAddressExposedLocalStore(tree, heapVN DEBUGARG("local assign" )); |
7301 | } |
7302 | #ifdef DEBUG |
7303 | else |
7304 | { |
7305 | if (verbose) |
7306 | { |
7307 | JITDUMP("Tree " ); |
7308 | Compiler::printTreeID(tree); |
7309 | printf(" assigns to non-address-taken local var V%02u; excluded from SSA, so value not " |
7310 | "tracked.\n" , |
7311 | lcl->GetLclNum()); |
7312 | } |
7313 | } |
7314 | #endif // DEBUG |
7315 | } |
7316 | break; |
7317 | case GT_LCL_FLD: |
7318 | { |
7319 | GenTreeLclFld* lclFld = lhs->AsLclFld(); |
7320 | unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclFld); |
7321 | |
7322 | // Should not have been recorded as updating the GC heap. |
7323 | assert(!GetMemorySsaMap(GcHeap)->Lookup(tree, &memorySsaNum)); |
7324 | |
7325 | if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM) |
7326 | { |
7327 | ValueNumPair newLhsVNPair; |
7328 | // Is this a full definition? |
7329 | if ((lclFld->gtFlags & GTF_VAR_USEASG) == 0) |
7330 | { |
7331 | assert(!lclFld->IsPartialLclFld(this)); |
7332 | assert(rhsVNPair.GetLiberal() != ValueNumStore::NoVN); |
7333 | newLhsVNPair = rhsVNPair; |
7334 | } |
7335 | else |
7336 | { |
7337 | // We should never have a null field sequence here. |
7338 | assert(lclFld->gtFieldSeq != nullptr); |
7339 | if (lclFld->gtFieldSeq == FieldSeqStore::NotAField()) |
7340 | { |
7341 | // We don't know what field this represents. Assign a new VN to the whole variable |
7342 | // (since we may be writing to an unknown portion of it.) |
7343 | newLhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lvaGetActualType(lclFld->gtLclNum))); |
7344 | } |
7345 | else |
7346 | { |
7347 | // We do know the field sequence. |
7348 | // The "lclFld" node will be labeled with the SSA number of its "use" identity |
7349 | // (we looked in a side table above for its "def" identity). Look up that value. |
7350 | ValueNumPair oldLhsVNPair = |
7351 | lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclFld->GetSsaNum())->m_vnPair; |
7352 | newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, lclFld->gtFieldSeq, |
7353 | rhsVNPair, // Pre-value. |
7354 | lclFld->TypeGet(), compCurBB); |
7355 | } |
7356 | } |
7357 | lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair; |
7358 | lhs->gtVNPair = newLhsVNPair; |
7359 | #ifdef DEBUG |
7360 | if (verbose) |
7361 | { |
7362 | if (lhs->gtVNPair.GetLiberal() != ValueNumStore::NoVN) |
7363 | { |
7364 | printf("N%03u " , lhs->gtSeqNum); |
7365 | Compiler::printTreeID(lhs); |
7366 | printf(" " ); |
7367 | gtDispNodeName(lhs); |
7368 | gtDispLeaf(lhs, nullptr); |
7369 | printf(" => " ); |
7370 | vnpPrint(lhs->gtVNPair, 1); |
7371 | printf("\n" ); |
7372 | } |
7373 | } |
7374 | #endif // DEBUG |
7375 | } |
7376 | else if (lvaVarAddrExposed(lclFld->gtLclNum)) |
7377 | { |
7378 | // This side-effects ByrefExposed. Just use a new opaque VN. |
7379 | // As with GT_LCL_VAR, we could probably use MapStore here and MapSelect at corresponding |
7380 | // loads, but to do so would have to identify the subset of address-exposed locals |
7381 | // whose fields can be disambiguated. |
7382 | ValueNum heapVN = vnStore->VNForExpr(compCurBB); |
7383 | recordAddressExposedLocalStore(tree, heapVN DEBUGARG("local field assign" )); |
7384 | } |
7385 | } |
7386 | break; |
7387 | |
7388 | case GT_PHI_ARG: |
7389 | noway_assert(!"Phi arg cannot be LHS." ); |
7390 | break; |
7391 | |
7392 | case GT_BLK: |
7393 | case GT_OBJ: |
7394 | noway_assert(!"GT_BLK/GT_OBJ can not be LHS when !varTypeIsStruct(tree) is true!" ); |
7395 | break; |
7396 | |
7397 | case GT_IND: |
7398 | { |
7399 | bool isVolatile = (lhs->gtFlags & GTF_IND_VOLATILE) != 0; |
7400 | |
7401 | if (isVolatile) |
7402 | { |
7403 | // For Volatile store indirection, first mutate GcHeap/ByrefExposed |
7404 | fgMutateGcHeap(lhs DEBUGARG("GTF_IND_VOLATILE - store" )); |
7405 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhs->TypeGet())); |
7406 | } |
7407 | |
7408 | GenTree* arg = lhs->gtOp.gtOp1; |
7409 | |
7410 | // Indicates whether the argument of the IND is the address of a local. |
7411 | bool wasLocal = false; |
7412 | |
7413 | lhs->gtVNPair = rhsVNPair; |
7414 | |
7415 | VNFuncApp funcApp; |
7416 | ValueNum argVN = arg->gtVNPair.GetLiberal(); |
7417 | |
7418 | bool argIsVNFunc = vnStore->GetVNFunc(vnStore->VNNormalValue(argVN), &funcApp); |
7419 | |
7420 | // Is this an assignment to a (field of, perhaps) a local? |
7421 | // If it is a PtrToLoc, lib and cons VNs will be the same. |
7422 | if (argIsVNFunc) |
7423 | { |
7424 | if (funcApp.m_func == VNF_PtrToLoc) |
7425 | { |
7426 | assert(arg->gtVNPair.BothEqual()); // If it's a PtrToLoc, lib/cons shouldn't differ. |
7427 | assert(vnStore->IsVNConstant(funcApp.m_args[0])); |
7428 | unsigned lclNum = vnStore->ConstantValue<unsigned>(funcApp.m_args[0]); |
7429 | |
7430 | wasLocal = true; |
7431 | |
7432 | if (lvaInSsa(lclNum)) |
7433 | { |
7434 | FieldSeqNode* fieldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]); |
7435 | |
7436 | // Either "arg" is the address of (part of) a local itself, or else we have |
7437 | // a "rogue" PtrToLoc, one that should have made the local in question |
7438 | // address-exposed. Assert on that. |
7439 | GenTreeLclVarCommon* lclVarTree = nullptr; |
7440 | bool isEntire = false; |
7441 | unsigned lclDefSsaNum = SsaConfig::RESERVED_SSA_NUM; |
7442 | ValueNumPair newLhsVNPair; |
7443 | |
7444 | if (arg->DefinesLocalAddr(this, genTypeSize(lhs->TypeGet()), &lclVarTree, &isEntire)) |
7445 | { |
7446 | // The local #'s should agree. |
7447 | assert(lclNum == lclVarTree->GetLclNum()); |
7448 | |
7449 | if (fieldSeq == FieldSeqStore::NotAField()) |
7450 | { |
7451 | // We don't know where we're storing, so give the local a new, unique VN. |
7452 | // Do this by considering it an "entire" assignment, with an unknown RHS. |
7453 | isEntire = true; |
7454 | rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet())); |
7455 | } |
7456 | |
7457 | if (isEntire) |
7458 | { |
7459 | newLhsVNPair = rhsVNPair; |
7460 | lclDefSsaNum = lclVarTree->GetSsaNum(); |
7461 | } |
7462 | else |
7463 | { |
7464 | // Don't use the lclVarTree's VN: if it's a local field, it will |
7465 | // already be dereferenced by it's field sequence. |
7466 | ValueNumPair oldLhsVNPair = lvaTable[lclVarTree->GetLclNum()] |
7467 | .GetPerSsaData(lclVarTree->GetSsaNum()) |
7468 | ->m_vnPair; |
7469 | lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree); |
7470 | newLhsVNPair = |
7471 | vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, fieldSeq, rhsVNPair, |
7472 | lhs->TypeGet(), compCurBB); |
7473 | } |
7474 | lvaTable[lclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair; |
7475 | } |
7476 | else |
7477 | { |
7478 | unreached(); // "Rogue" PtrToLoc, as discussed above. |
7479 | } |
7480 | #ifdef DEBUG |
7481 | if (verbose) |
7482 | { |
7483 | printf("Tree " ); |
7484 | Compiler::printTreeID(tree); |
7485 | printf(" assigned VN to local var V%02u/%d: VN " , lclNum, lclDefSsaNum); |
7486 | vnpPrint(newLhsVNPair, 1); |
7487 | printf("\n" ); |
7488 | } |
7489 | #endif // DEBUG |
7490 | } |
7491 | else if (lvaVarAddrExposed(lclNum)) |
7492 | { |
7493 | // Need to record the effect on ByrefExposed. |
7494 | // We could use MapStore here and MapSelect on reads of address-exposed locals |
7495 | // (using the local nums as selectors) to get e.g. propagation of values |
7496 | // through address-taken locals in regions of code with no calls or byref |
7497 | // writes. |
7498 | // For now, just use a new opaque VN. |
7499 | ValueNum heapVN = vnStore->VNForExpr(compCurBB); |
7500 | recordAddressExposedLocalStore(tree, heapVN DEBUGARG("PtrToLoc indir" )); |
7501 | } |
7502 | } |
7503 | } |
7504 | |
7505 | // Was the argument of the GT_IND the address of a local, handled above? |
7506 | if (!wasLocal) |
7507 | { |
7508 | GenTree* obj = nullptr; |
7509 | GenTree* staticOffset = nullptr; |
7510 | FieldSeqNode* fldSeq = nullptr; |
7511 | |
7512 | // Is the LHS an array index expression? |
7513 | if (argIsVNFunc && funcApp.m_func == VNF_PtrToArrElem) |
7514 | { |
7515 | CORINFO_CLASS_HANDLE elemTypeEq = |
7516 | CORINFO_CLASS_HANDLE(vnStore->ConstantValue<ssize_t>(funcApp.m_args[0])); |
7517 | ValueNum arrVN = funcApp.m_args[1]; |
7518 | ValueNum inxVN = funcApp.m_args[2]; |
7519 | FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[3]); |
7520 | |
7521 | if (arg->gtOper != GT_LCL_VAR) |
7522 | { |
7523 | // Does the child of the GT_IND 'arg' have an associated zero-offset field sequence? |
7524 | FieldSeqNode* addrFieldSeq = nullptr; |
7525 | if (GetZeroOffsetFieldMap()->Lookup(arg, &addrFieldSeq)) |
7526 | { |
7527 | fldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fldSeq); |
7528 | } |
7529 | } |
7530 | |
7531 | #ifdef DEBUG |
7532 | if (verbose) |
7533 | { |
7534 | printf("Tree " ); |
7535 | Compiler::printTreeID(tree); |
7536 | printf(" assigns to an array element:\n" ); |
7537 | } |
7538 | #endif // DEBUG |
7539 | |
7540 | ValueNum heapVN = fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, |
7541 | rhsVNPair.GetLiberal(), lhs->TypeGet()); |
7542 | recordGcHeapStore(tree, heapVN DEBUGARG("ArrIndexAssign (case 1)" )); |
7543 | } |
7544 | // It may be that we haven't parsed it yet. Try. |
7545 | else if (lhs->gtFlags & GTF_IND_ARR_INDEX) |
7546 | { |
7547 | ArrayInfo arrInfo; |
7548 | bool b = GetArrayInfoMap()->Lookup(lhs, &arrInfo); |
7549 | assert(b); |
7550 | ValueNum arrVN = ValueNumStore::NoVN; |
7551 | ValueNum inxVN = ValueNumStore::NoVN; |
7552 | FieldSeqNode* fldSeq = nullptr; |
7553 | |
7554 | // Try to parse it. |
7555 | GenTree* arr = nullptr; |
7556 | arg->ParseArrayAddress(this, &arrInfo, &arr, &inxVN, &fldSeq); |
7557 | if (arr == nullptr) |
7558 | { |
7559 | fgMutateGcHeap(tree DEBUGARG("assignment to unparseable array expression" )); |
7560 | return; |
7561 | } |
7562 | // Otherwise, parsing succeeded. |
7563 | |
7564 | // Need to form H[arrType][arr][ind][fldSeq] = rhsVNPair.GetLiberal() |
7565 | |
7566 | // Get the element type equivalence class representative. |
7567 | CORINFO_CLASS_HANDLE elemTypeEq = |
7568 | EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); |
7569 | arrVN = arr->gtVNPair.GetLiberal(); |
7570 | |
7571 | FieldSeqNode* zeroOffsetFldSeq = nullptr; |
7572 | if (GetZeroOffsetFieldMap()->Lookup(arg, &zeroOffsetFldSeq)) |
7573 | { |
7574 | fldSeq = GetFieldSeqStore()->Append(fldSeq, zeroOffsetFldSeq); |
7575 | } |
7576 | |
7577 | ValueNum heapVN = fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq, |
7578 | rhsVNPair.GetLiberal(), lhs->TypeGet()); |
7579 | recordGcHeapStore(tree, heapVN DEBUGARG("ArrIndexAssign (case 2)" )); |
7580 | } |
7581 | else if (arg->IsFieldAddr(this, &obj, &staticOffset, &fldSeq)) |
7582 | { |
7583 | if (fldSeq == FieldSeqStore::NotAField()) |
7584 | { |
7585 | fgMutateGcHeap(tree DEBUGARG("NotAField" )); |
7586 | } |
7587 | else |
7588 | { |
7589 | assert(fldSeq != nullptr); |
7590 | #ifdef DEBUG |
7591 | CORINFO_CLASS_HANDLE fldCls = info.compCompHnd->getFieldClass(fldSeq->m_fieldHnd); |
7592 | if (obj != nullptr) |
7593 | { |
7594 | // Make sure that the class containing it is not a value class (as we are expecting |
7595 | // an instance field) |
7596 | assert((info.compCompHnd->getClassAttribs(fldCls) & CORINFO_FLG_VALUECLASS) == 0); |
7597 | assert(staticOffset == nullptr); |
7598 | } |
7599 | #endif // DEBUG |
7600 | |
7601 | // Get the first (instance or static) field from field seq. GcHeap[field] will yield |
7602 | // the "field map". |
7603 | if (fldSeq->IsFirstElemFieldSeq()) |
7604 | { |
7605 | fldSeq = fldSeq->m_next; |
7606 | assert(fldSeq != nullptr); |
7607 | } |
7608 | |
7609 | // Get a field sequence for just the first field in the sequence |
7610 | // |
7611 | FieldSeqNode* firstFieldOnly = GetFieldSeqStore()->CreateSingleton(fldSeq->m_fieldHnd); |
7612 | |
7613 | // The final field in the sequence will need to match the 'indType' |
7614 | var_types indType = lhs->TypeGet(); |
7615 | ValueNum fldMapVN = |
7616 | vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], firstFieldOnly); |
7617 | |
7618 | // The type of the field is "struct" if there are more fields in the sequence, |
7619 | // otherwise it is the type returned from VNApplySelectors above. |
7620 | var_types firstFieldType = vnStore->TypeOfVN(fldMapVN); |
7621 | |
7622 | // The value number from the rhs of the assignment |
7623 | ValueNum storeVal = rhsVNPair.GetLiberal(); |
7624 | ValueNum newFldMapVN = ValueNumStore::NoVN; |
7625 | |
7626 | // when (obj != nullptr) we have an instance field, otherwise a static field |
7627 | // when (staticOffset != nullptr) it represents a offset into a static or the call to |
7628 | // Shared Static Base |
7629 | if ((obj != nullptr) || (staticOffset != nullptr)) |
7630 | { |
7631 | ValueNum valAtAddr = fldMapVN; |
7632 | ValueNum normVal = ValueNumStore::NoVN; |
7633 | |
7634 | if (obj != nullptr) |
7635 | { |
7636 | // Unpack, Norm,Exc for 'obj' |
7637 | ValueNum vnObjExcSet; |
7638 | vnStore->VNUnpackExc(obj->gtVNPair.GetLiberal(), &normVal, &vnObjExcSet); |
7639 | vnExcSet = vnStore->VNExcSetUnion(vnExcSet, vnObjExcSet); |
7640 | |
7641 | // construct the ValueNumber for 'fldMap at obj' |
7642 | valAtAddr = |
7643 | vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, normVal); |
7644 | } |
7645 | else // (staticOffset != nullptr) |
7646 | { |
7647 | // construct the ValueNumber for 'fldMap at staticOffset' |
7648 | normVal = vnStore->VNLiberalNormalValue(staticOffset->gtVNPair); |
7649 | valAtAddr = |
7650 | vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, normVal); |
7651 | } |
7652 | // Now get rid of any remaining struct field dereferences. (if they exist) |
7653 | if (fldSeq->m_next) |
7654 | { |
7655 | storeVal = |
7656 | vnStore->VNApplySelectorsAssign(VNK_Liberal, valAtAddr, fldSeq->m_next, |
7657 | storeVal, indType, compCurBB); |
7658 | } |
7659 | |
7660 | // From which we can construct the new ValueNumber for 'fldMap at normVal' |
7661 | newFldMapVN = vnStore->VNForMapStore(vnStore->TypeOfVN(fldMapVN), fldMapVN, normVal, |
7662 | storeVal); |
7663 | } |
7664 | else |
7665 | { |
7666 | // plain static field |
7667 | |
7668 | // Now get rid of any remaining struct field dereferences. (if they exist) |
7669 | if (fldSeq->m_next) |
7670 | { |
7671 | storeVal = |
7672 | vnStore->VNApplySelectorsAssign(VNK_Liberal, fldMapVN, fldSeq->m_next, |
7673 | storeVal, indType, compCurBB); |
7674 | } |
7675 | |
7676 | newFldMapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], |
7677 | fldSeq, storeVal, indType, compCurBB); |
7678 | } |
7679 | |
7680 | // It is not strictly necessary to set the lhs value number, |
7681 | // but the dumps read better with it set to the 'storeVal' that we just computed |
7682 | lhs->gtVNPair.SetBoth(storeVal); |
7683 | |
7684 | // Update the field map for firstField in GcHeap to this new value. |
7685 | ValueNum heapVN = |
7686 | vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], firstFieldOnly, |
7687 | newFldMapVN, indType, compCurBB); |
7688 | |
7689 | recordGcHeapStore(tree, heapVN DEBUGARG("StoreField" )); |
7690 | } |
7691 | } |
7692 | else |
7693 | { |
7694 | GenTreeLclVarCommon* lclVarTree = nullptr; |
7695 | bool isLocal = tree->DefinesLocal(this, &lclVarTree); |
7696 | |
7697 | if (isLocal && lvaVarAddrExposed(lclVarTree->gtLclNum)) |
7698 | { |
7699 | // Store to address-exposed local; need to record the effect on ByrefExposed. |
7700 | // We could use MapStore here and MapSelect on reads of address-exposed locals |
7701 | // (using the local nums as selectors) to get e.g. propagation of values |
7702 | // through address-taken locals in regions of code with no calls or byref |
7703 | // writes. |
7704 | // For now, just use a new opaque VN. |
7705 | ValueNum memoryVN = vnStore->VNForExpr(compCurBB); |
7706 | recordAddressExposedLocalStore(tree, memoryVN DEBUGARG("PtrToLoc indir" )); |
7707 | } |
7708 | else if (!isLocal) |
7709 | { |
7710 | // If it doesn't define a local, then it might update GcHeap/ByrefExposed. |
7711 | // For the new ByrefExposed VN, we could use an operator here like |
7712 | // VNF_ByrefExposedStore that carries the VNs of the pointer and RHS, then |
7713 | // at byref loads if the current ByrefExposed VN happens to be |
7714 | // VNF_ByrefExposedStore with the same pointer VN, we could propagate the |
7715 | // VN from the RHS to the VN for the load. This would e.g. allow tracking |
7716 | // values through assignments to out params. For now, just model this |
7717 | // as an opaque GcHeap/ByrefExposed mutation. |
7718 | fgMutateGcHeap(tree DEBUGARG("assign-of-IND" )); |
7719 | } |
7720 | } |
7721 | } |
7722 | |
7723 | // We don't actually evaluate an IND on the LHS, so give it the Void value. |
7724 | tree->gtVNPair.SetBoth(vnStore->VNForVoid()); |
7725 | } |
7726 | break; |
7727 | |
7728 | case GT_CLS_VAR: |
7729 | { |
7730 | bool isVolatile = (lhs->gtFlags & GTF_FLD_VOLATILE) != 0; |
7731 | |
7732 | if (isVolatile) |
7733 | { |
7734 | // For Volatile store indirection, first mutate GcHeap/ByrefExposed |
7735 | fgMutateGcHeap(lhs DEBUGARG("GTF_CLS_VAR - store" )); // always change fgCurMemoryVN |
7736 | } |
7737 | |
7738 | // We model statics as indices into GcHeap (which is a subset of ByrefExposed). |
7739 | FieldSeqNode* fldSeqForStaticVar = GetFieldSeqStore()->CreateSingleton(lhs->gtClsVar.gtClsVarHnd); |
7740 | assert(fldSeqForStaticVar != FieldSeqStore::NotAField()); |
7741 | |
7742 | ValueNum storeVal = rhsVNPair.GetLiberal(); // The value number from the rhs of the assignment |
7743 | storeVal = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeqForStaticVar, |
7744 | storeVal, lhs->TypeGet(), compCurBB); |
7745 | |
7746 | // It is not strictly necessary to set the lhs value number, |
7747 | // but the dumps read better with it set to the 'storeVal' that we just computed |
7748 | lhs->gtVNPair.SetBoth(storeVal); |
7749 | |
7750 | // bbMemoryDef must include GcHeap for any block that mutates the GC heap |
7751 | assert((compCurBB->bbMemoryDef & memoryKindSet(GcHeap)) != 0); |
7752 | |
7753 | // Update the field map for the fgCurMemoryVN and SSA for the tree |
7754 | recordGcHeapStore(tree, storeVal DEBUGARG("Static Field store" )); |
7755 | } |
7756 | break; |
7757 | |
7758 | default: |
7759 | assert(!"Unknown node for lhs of assignment!" ); |
7760 | |
7761 | // For Unknown stores, mutate GcHeap/ByrefExposed |
7762 | fgMutateGcHeap(lhs DEBUGARG("Unkwown Assignment - store" )); // always change fgCurMemoryVN |
7763 | break; |
7764 | } |
7765 | } |
7766 | // Other kinds of assignment: initblk and copyblk. |
7767 | else if (oper == GT_ASG && varTypeIsStruct(tree)) |
7768 | { |
7769 | fgValueNumberBlockAssignment(tree); |
7770 | } |
7771 | else if (oper == GT_ADDR) |
7772 | { |
7773 | // We have special representations for byrefs to lvalues. |
7774 | GenTree* arg = tree->gtOp.gtOp1; |
7775 | if (arg->OperIsLocal()) |
7776 | { |
7777 | FieldSeqNode* fieldSeq = nullptr; |
7778 | ValueNum newVN = ValueNumStore::NoVN; |
7779 | if (!lvaInSsa(arg->gtLclVarCommon.GetLclNum())) |
7780 | { |
7781 | newVN = vnStore->VNForExpr(compCurBB, TYP_BYREF); |
7782 | } |
7783 | else if (arg->OperGet() == GT_LCL_FLD) |
7784 | { |
7785 | fieldSeq = arg->AsLclFld()->gtFieldSeq; |
7786 | if (fieldSeq == nullptr) |
7787 | { |
7788 | // Local field with unknown field seq -- not a precise pointer. |
7789 | newVN = vnStore->VNForExpr(compCurBB, TYP_BYREF); |
7790 | } |
7791 | } |
7792 | if (newVN == ValueNumStore::NoVN) |
7793 | { |
7794 | assert(arg->gtLclVarCommon.GetSsaNum() != ValueNumStore::NoVN); |
7795 | newVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToLoc, |
7796 | vnStore->VNForIntCon(arg->gtLclVarCommon.GetLclNum()), |
7797 | vnStore->VNForFieldSeq(fieldSeq)); |
7798 | } |
7799 | tree->gtVNPair.SetBoth(newVN); |
7800 | } |
7801 | else if ((arg->gtOper == GT_IND) || arg->OperIsBlk()) |
7802 | { |
7803 | // Usually the ADDR and IND just cancel out... |
7804 | // except when this GT_ADDR has a valid zero-offset field sequence |
7805 | // |
7806 | FieldSeqNode* zeroOffsetFieldSeq = nullptr; |
7807 | if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFieldSeq) && |
7808 | (zeroOffsetFieldSeq != FieldSeqStore::NotAField())) |
7809 | { |
7810 | ValueNum addrExtended = vnStore->ExtendPtrVN(arg->gtOp.gtOp1, zeroOffsetFieldSeq); |
7811 | if (addrExtended != ValueNumStore::NoVN) |
7812 | { |
7813 | tree->gtVNPair.SetBoth(addrExtended); // We don't care about lib/cons differences for addresses. |
7814 | } |
7815 | else |
7816 | { |
7817 | // ExtendPtrVN returned a failure result |
7818 | // So give this address a new unique value |
7819 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_BYREF)); |
7820 | } |
7821 | } |
7822 | else |
7823 | { |
7824 | // They just cancel, so fetch the ValueNumber from the op1 of the GT_IND node. |
7825 | // |
7826 | GenTree* addr = arg->AsIndir()->Addr(); |
7827 | tree->gtVNPair = addr->gtVNPair; |
7828 | |
7829 | // For the CSE phase mark the address as GTF_DONT_CSE |
7830 | // because it will end up with the same value number as tree (the GT_ADDR). |
7831 | addr->gtFlags |= GTF_DONT_CSE; |
7832 | } |
7833 | } |
7834 | else |
7835 | { |
7836 | // May be more cases to do here! But we'll punt for now. |
7837 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_BYREF)); |
7838 | } |
7839 | } |
7840 | else if ((oper == GT_IND) || GenTree::OperIsBlk(oper)) |
7841 | { |
7842 | // So far, we handle cases in which the address is a ptr-to-local, or if it's |
7843 | // a pointer to an object field or array element. Other cases become uses of |
7844 | // the current ByrefExposed value and the pointer value, so that at least we |
7845 | // can recognize redundant loads with no stores between them. |
7846 | GenTree* addr = tree->AsIndir()->Addr(); |
7847 | GenTreeLclVarCommon* lclVarTree = nullptr; |
7848 | FieldSeqNode* fldSeq1 = nullptr; |
7849 | FieldSeqNode* fldSeq2 = nullptr; |
7850 | GenTree* obj = nullptr; |
7851 | GenTree* staticOffset = nullptr; |
7852 | bool isVolatile = (tree->gtFlags & GTF_IND_VOLATILE) != 0; |
7853 | |
7854 | // See if the addr has any exceptional part. |
7855 | ValueNumPair addrNvnp; |
7856 | ValueNumPair addrXvnp; |
7857 | vnStore->VNPUnpackExc(addr->gtVNPair, &addrNvnp, &addrXvnp); |
7858 | |
7859 | // Is the dereference immutable? If so, model it as referencing the read-only heap. |
7860 | if (tree->gtFlags & GTF_IND_INVARIANT) |
7861 | { |
7862 | assert(!isVolatile); // We don't expect both volatile and invariant |
7863 | tree->gtVNPair = |
7864 | ValueNumPair(vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, ValueNumStore::VNForROH(), |
7865 | addrNvnp.GetLiberal()), |
7866 | vnStore->VNForMapSelect(VNK_Conservative, TYP_REF, ValueNumStore::VNForROH(), |
7867 | addrNvnp.GetConservative())); |
7868 | tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); |
7869 | } |
7870 | else if (isVolatile) |
7871 | { |
7872 | // For Volatile indirection, mutate GcHeap/ByrefExposed |
7873 | fgMutateGcHeap(tree DEBUGARG("GTF_IND_VOLATILE - read" )); |
7874 | |
7875 | // The value read by the GT_IND can immediately change |
7876 | ValueNum newUniq = vnStore->VNForExpr(compCurBB, tree->TypeGet()); |
7877 | tree->gtVNPair = vnStore->VNPWithExc(ValueNumPair(newUniq, newUniq), addrXvnp); |
7878 | } |
7879 | // We always want to evaluate the LHS when the GT_IND node is marked with GTF_IND_ARR_INDEX |
7880 | // as this will relabel the GT_IND child correctly using the VNF_PtrToArrElem |
7881 | else if ((tree->gtFlags & GTF_IND_ARR_INDEX) != 0) |
7882 | { |
7883 | ArrayInfo arrInfo; |
7884 | bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo); |
7885 | assert(b); |
7886 | |
7887 | ValueNum inxVN = ValueNumStore::NoVN; |
7888 | FieldSeqNode* fldSeq = nullptr; |
7889 | |
7890 | // GenTree* addr = tree->gtOp.gtOp1; |
7891 | ValueNum addrVN = addrNvnp.GetLiberal(); |
7892 | |
7893 | // Try to parse it. |
7894 | GenTree* arr = nullptr; |
7895 | addr->ParseArrayAddress(this, &arrInfo, &arr, &inxVN, &fldSeq); |
7896 | if (arr == nullptr) |
7897 | { |
7898 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
7899 | return; |
7900 | } |
7901 | assert(fldSeq != FieldSeqStore::NotAField()); |
7902 | |
7903 | // Otherwise... |
7904 | // Need to form H[arrType][arr][ind][fldSeq] |
7905 | // Get the array element type equivalence class rep. |
7906 | CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType); |
7907 | ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL); |
7908 | JITDUMP(" VNForHandle(arrElemType: %s) is " FMT_VN "\n" , |
7909 | (arrInfo.m_elemType == TYP_STRUCT) ? eeGetClassName(arrInfo.m_elemStructType) |
7910 | : varTypeName(arrInfo.m_elemType), |
7911 | elemTypeEqVN) |
7912 | |
7913 | // We take the "VNNormalValue"s here, because if either has exceptional outcomes, they will be captured |
7914 | // as part of the value of the composite "addr" operation... |
7915 | ValueNum arrVN = vnStore->VNLiberalNormalValue(arr->gtVNPair); |
7916 | inxVN = vnStore->VNNormalValue(inxVN); |
7917 | |
7918 | // Additionally, relabel the address with a PtrToArrElem value number. |
7919 | ValueNum fldSeqVN = vnStore->VNForFieldSeq(fldSeq); |
7920 | ValueNum elemAddr = |
7921 | vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, arrVN, inxVN, fldSeqVN); |
7922 | |
7923 | // The aggregate "addr" VN should have had all the exceptions bubble up... |
7924 | elemAddr = vnStore->VNWithExc(elemAddr, addrXvnp.GetLiberal()); |
7925 | addr->gtVNPair.SetBoth(elemAddr); |
7926 | #ifdef DEBUG |
7927 | if (verbose) |
7928 | { |
7929 | printf(" Relabeled IND_ARR_INDEX address node " ); |
7930 | Compiler::printTreeID(addr); |
7931 | printf(" with l:" FMT_VN ": " , elemAddr); |
7932 | vnStore->vnDump(this, elemAddr); |
7933 | printf("\n" ); |
7934 | if (vnStore->VNNormalValue(elemAddr) != elemAddr) |
7935 | { |
7936 | printf(" [" FMT_VN " is: " , vnStore->VNNormalValue(elemAddr)); |
7937 | vnStore->vnDump(this, vnStore->VNNormalValue(elemAddr)); |
7938 | printf("]\n" ); |
7939 | } |
7940 | } |
7941 | #endif // DEBUG |
7942 | |
7943 | // We now need to retrieve the value number for the array element value |
7944 | // and give this value number to the GT_IND node 'tree' |
7945 | // We do this whenever we have an rvalue, but we don't do it for a |
7946 | // normal LHS assignment into an array element. |
7947 | // |
7948 | if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0) |
7949 | { |
7950 | fgValueNumberArrIndexVal(tree, elemTypeEq, arrVN, inxVN, addrXvnp.GetLiberal(), fldSeq); |
7951 | } |
7952 | } |
7953 | // In general we skip GT_IND nodes on that are the LHS of an assignment. (We labeled these earlier.) |
7954 | // We will "evaluate" this as part of the assignment. |
7955 | else if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0) |
7956 | { |
7957 | FieldSeqNode* localFldSeq = nullptr; |
7958 | VNFuncApp funcApp; |
7959 | |
7960 | // Is it a local or a heap address? |
7961 | if (addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq) && lvaInSsa(lclVarTree->GetLclNum())) |
7962 | { |
7963 | unsigned lclNum = lclVarTree->GetLclNum(); |
7964 | unsigned ssaNum = lclVarTree->GetSsaNum(); |
7965 | LclVarDsc* varDsc = &lvaTable[lclNum]; |
7966 | |
7967 | if ((localFldSeq == FieldSeqStore::NotAField()) || (localFldSeq == nullptr)) |
7968 | { |
7969 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
7970 | } |
7971 | else |
7972 | { |
7973 | var_types indType = tree->TypeGet(); |
7974 | ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair; |
7975 | tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, localFldSeq, indType); |
7976 | ; |
7977 | } |
7978 | tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); |
7979 | } |
7980 | else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && funcApp.m_func == VNF_PtrToStatic) |
7981 | { |
7982 | var_types indType = tree->TypeGet(); |
7983 | ValueNum fieldSeqVN = funcApp.m_args[0]; |
7984 | |
7985 | FieldSeqNode* fldSeqForStaticVar = vnStore->FieldSeqVNToFieldSeq(fieldSeqVN); |
7986 | |
7987 | if (fldSeqForStaticVar != FieldSeqStore::NotAField()) |
7988 | { |
7989 | ValueNum selectedStaticVar; |
7990 | // We model statics as indices into the GcHeap (which is a subset of ByrefExposed). |
7991 | size_t structSize = 0; |
7992 | selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], |
7993 | fldSeqForStaticVar, &structSize); |
7994 | selectedStaticVar = vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, indType, structSize); |
7995 | |
7996 | tree->gtVNPair.SetLiberal(selectedStaticVar); |
7997 | tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, indType)); |
7998 | } |
7999 | else |
8000 | { |
8001 | JITDUMP(" *** Missing field sequence info for VNF_PtrToStatic value GT_IND\n" ); |
8002 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, indType)); // a new unique value number |
8003 | } |
8004 | tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); |
8005 | } |
8006 | else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && (funcApp.m_func == VNF_PtrToArrElem)) |
8007 | { |
8008 | fgValueNumberArrIndexVal(tree, &funcApp, addrXvnp.GetLiberal()); |
8009 | } |
8010 | else if (addr->IsFieldAddr(this, &obj, &staticOffset, &fldSeq2)) |
8011 | { |
8012 | if (fldSeq2 == FieldSeqStore::NotAField()) |
8013 | { |
8014 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
8015 | } |
8016 | else if (fldSeq2 != nullptr) |
8017 | { |
8018 | // Get the first (instance or static) field from field seq. GcHeap[field] will yield the "field |
8019 | // map". |
8020 | CLANG_FORMAT_COMMENT_ANCHOR; |
8021 | |
8022 | #ifdef DEBUG |
8023 | CORINFO_CLASS_HANDLE fldCls = info.compCompHnd->getFieldClass(fldSeq2->m_fieldHnd); |
8024 | if (obj != nullptr) |
8025 | { |
8026 | // Make sure that the class containing it is not a value class (as we are expecting an |
8027 | // instance field) |
8028 | assert((info.compCompHnd->getClassAttribs(fldCls) & CORINFO_FLG_VALUECLASS) == 0); |
8029 | assert(staticOffset == nullptr); |
8030 | } |
8031 | #endif // DEBUG |
8032 | |
8033 | // Get a field sequence for just the first field in the sequence |
8034 | // |
8035 | FieldSeqNode* firstFieldOnly = GetFieldSeqStore()->CreateSingleton(fldSeq2->m_fieldHnd); |
8036 | size_t structSize = 0; |
8037 | ValueNum fldMapVN = |
8038 | vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], firstFieldOnly, &structSize); |
8039 | |
8040 | // The final field in the sequence will need to match the 'indType' |
8041 | var_types indType = tree->TypeGet(); |
8042 | |
8043 | // The type of the field is "struct" if there are more fields in the sequence, |
8044 | // otherwise it is the type returned from VNApplySelectors above. |
8045 | var_types firstFieldType = vnStore->TypeOfVN(fldMapVN); |
8046 | |
8047 | ValueNum valAtAddr = fldMapVN; |
8048 | if (obj != nullptr) |
8049 | { |
8050 | // construct the ValueNumber for 'fldMap at obj' |
8051 | ValueNum objNormVal = vnStore->VNLiberalNormalValue(obj->gtVNPair); |
8052 | valAtAddr = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, objNormVal); |
8053 | } |
8054 | else if (staticOffset != nullptr) |
8055 | { |
8056 | // construct the ValueNumber for 'fldMap at staticOffset' |
8057 | ValueNum offsetNormVal = vnStore->VNLiberalNormalValue(staticOffset->gtVNPair); |
8058 | valAtAddr = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, offsetNormVal); |
8059 | } |
8060 | |
8061 | // Now get rid of any remaining struct field dereferences. |
8062 | if (fldSeq2->m_next) |
8063 | { |
8064 | valAtAddr = vnStore->VNApplySelectors(VNK_Liberal, valAtAddr, fldSeq2->m_next, &structSize); |
8065 | } |
8066 | valAtAddr = vnStore->VNApplySelectorsTypeCheck(valAtAddr, indType, structSize); |
8067 | |
8068 | tree->gtVNPair.SetLiberal(valAtAddr); |
8069 | |
8070 | // The conservative value is a new, unique VN. |
8071 | tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
8072 | tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); |
8073 | } |
8074 | else |
8075 | { |
8076 | // Occasionally we do an explicit null test on a REF, so we just dereference it with no |
8077 | // field sequence. The result is probably unused. |
8078 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
8079 | tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); |
8080 | } |
8081 | } |
8082 | else // We don't know where the address points, so it is an ByrefExposed load. |
8083 | { |
8084 | ValueNum addrVN = addr->gtVNPair.GetLiberal(); |
8085 | ValueNum loadVN = fgValueNumberByrefExposedLoad(typ, addrVN); |
8086 | tree->gtVNPair.SetLiberal(loadVN); |
8087 | tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
8088 | tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp); |
8089 | } |
8090 | } |
8091 | } |
8092 | else if (tree->OperGet() == GT_CAST) |
8093 | { |
8094 | fgValueNumberCastTree(tree); |
8095 | } |
8096 | else if (tree->OperGet() == GT_INTRINSIC) |
8097 | { |
8098 | fgValueNumberIntrinsic(tree); |
8099 | } |
8100 | else // Look up the VNFunc for the node |
8101 | { |
8102 | VNFunc vnf = GetVNFuncForNode(tree); |
8103 | |
8104 | if (ValueNumStore::VNFuncIsLegal(vnf)) |
8105 | { |
8106 | if (GenTree::OperIsUnary(oper)) |
8107 | { |
8108 | if (tree->gtOp.gtOp1 != nullptr) |
8109 | { |
8110 | if (tree->OperGet() == GT_NOP) |
8111 | { |
8112 | // Pass through arg vn. |
8113 | tree->gtVNPair = tree->gtOp.gtOp1->gtVNPair; |
8114 | } |
8115 | else |
8116 | { |
8117 | ValueNumPair op1VNP; |
8118 | ValueNumPair op1VNPx; |
8119 | vnStore->VNPUnpackExc(tree->gtOp.gtOp1->gtVNPair, &op1VNP, &op1VNPx); |
8120 | |
8121 | // If we are fetching the array length for an array ref that came from global memory |
8122 | // then for CSE safety we must use the conservative value number for both |
8123 | // |
8124 | if ((tree->OperGet() == GT_ARR_LENGTH) && ((tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) != 0)) |
8125 | { |
8126 | // use the conservative value number for both when computing the VN for the ARR_LENGTH |
8127 | op1VNP.SetBoth(op1VNP.GetConservative()); |
8128 | } |
8129 | |
8130 | tree->gtVNPair = |
8131 | vnStore->VNPWithExc(vnStore->VNPairForFunc(tree->TypeGet(), vnf, op1VNP), op1VNPx); |
8132 | } |
8133 | } |
8134 | else // Is actually nullary. |
8135 | { |
8136 | // Mostly we'll leave these without a value number, assuming we'll detect these as VN failures |
8137 | // if they actually need to have values. With the exception of NOPs, which can sometimes have |
8138 | // meaning. |
8139 | if (tree->OperGet() == GT_NOP) |
8140 | { |
8141 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
8142 | } |
8143 | } |
8144 | } |
8145 | else // we have a binary oper |
8146 | { |
8147 | assert(oper != GT_ASG); // We handled assignments earlier. |
8148 | assert(GenTree::OperIsBinary(oper)); |
8149 | // Standard binary operator. |
8150 | ValueNumPair op2VNPair; |
8151 | if (tree->gtOp.gtOp2 == nullptr) |
8152 | { |
8153 | // Handle any GT_LIST nodes as they can have a nullptr for op2. |
8154 | op2VNPair.SetBoth(ValueNumStore::VNForNull()); |
8155 | } |
8156 | else |
8157 | { |
8158 | op2VNPair = tree->gtOp.gtOp2->gtVNPair; |
8159 | } |
8160 | |
8161 | // Handle a few special cases: if we add a field offset constant to a PtrToXXX, we will get back a |
8162 | // new |
8163 | // PtrToXXX. |
8164 | |
8165 | ValueNumPair op1vnp; |
8166 | ValueNumPair op1Xvnp; |
8167 | vnStore->VNPUnpackExc(tree->gtOp.gtOp1->gtVNPair, &op1vnp, &op1Xvnp); |
8168 | |
8169 | ValueNumPair op2vnp; |
8170 | ValueNumPair op2Xvnp; |
8171 | vnStore->VNPUnpackExc(op2VNPair, &op2vnp, &op2Xvnp); |
8172 | ValueNumPair excSet = vnStore->VNPExcSetUnion(op1Xvnp, op2Xvnp); |
8173 | |
8174 | ValueNum newVN = ValueNumStore::NoVN; |
8175 | |
8176 | // Check for the addition of a field offset constant |
8177 | // |
8178 | if ((oper == GT_ADD) && (!tree->gtOverflowEx())) |
8179 | { |
8180 | newVN = vnStore->ExtendPtrVN(tree->gtOp.gtOp1, tree->gtOp.gtOp2); |
8181 | } |
8182 | |
8183 | if (newVN != ValueNumStore::NoVN) |
8184 | { |
8185 | // We don't care about differences between liberal and conservative for pointer values. |
8186 | newVN = vnStore->VNWithExc(newVN, excSet.GetLiberal()); |
8187 | tree->gtVNPair.SetBoth(newVN); |
8188 | } |
8189 | else |
8190 | { |
8191 | VNFunc vnf = GetVNFuncForNode(tree); |
8192 | ValueNumPair normalPair = vnStore->VNPairForFunc(tree->TypeGet(), vnf, op1vnp, op2vnp); |
8193 | tree->gtVNPair = vnStore->VNPWithExc(normalPair, excSet); |
8194 | // For overflow checking operations the VNF_OverflowExc will be added below |
8195 | // by fgValueNumberAddExceptionSet |
8196 | } |
8197 | } |
8198 | } |
8199 | else // ValueNumStore::VNFuncIsLegal returns false |
8200 | { |
8201 | // Some of the genTreeOps that aren't legal VNFuncs so they get special handling. |
8202 | switch (oper) |
8203 | { |
8204 | case GT_COMMA: |
8205 | { |
8206 | ValueNumPair op1vnp; |
8207 | ValueNumPair op1Xvnp; |
8208 | vnStore->VNPUnpackExc(tree->gtOp.gtOp1->gtVNPair, &op1vnp, &op1Xvnp); |
8209 | ValueNumPair op2vnp; |
8210 | ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet(); |
8211 | GenTree* op2 = tree->gtGetOp2(); |
8212 | |
8213 | if (op2->OperIsIndir() && ((op2->gtFlags & GTF_IND_ASG_LHS) != 0)) |
8214 | { |
8215 | // If op2 represents the lhs of an assignment then we give a VNForVoid for the lhs |
8216 | op2vnp = ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()); |
8217 | } |
8218 | else if ((op2->OperGet() == GT_CLS_VAR) && (op2->gtFlags & GTF_CLS_VAR_ASG_LHS)) |
8219 | { |
8220 | // If op2 represents the lhs of an assignment then we give a VNForVoid for the lhs |
8221 | op2vnp = ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()); |
8222 | } |
8223 | else |
8224 | { |
8225 | vnStore->VNPUnpackExc(op2->gtVNPair, &op2vnp, &op2Xvnp); |
8226 | } |
8227 | tree->gtVNPair = vnStore->VNPWithExc(op2vnp, vnStore->VNPExcSetUnion(op1Xvnp, op2Xvnp)); |
8228 | } |
8229 | break; |
8230 | |
8231 | case GT_NULLCHECK: |
8232 | { |
8233 | // An Explicit null check, produces no value |
8234 | // But we do persist any execeptions produced by op1 |
8235 | // |
8236 | tree->gtVNPair = vnStore->VNPWithExc(vnStore->VNPForVoid(), |
8237 | vnStore->VNPExceptionSet(tree->gtOp.gtOp1->gtVNPair)); |
8238 | // The exception set with VNF_NullPtrExc will be added below |
8239 | // by fgValueNumberAddExceptionSet |
8240 | } |
8241 | break; |
8242 | |
8243 | case GT_LOCKADD: // Binop |
8244 | noway_assert("LOCKADD should not appear before lowering" ); |
8245 | break; |
8246 | |
8247 | case GT_XADD: // Binop |
8248 | case GT_XCHG: // Binop |
8249 | { |
8250 | // For XADD and XCHG other intrinsics add an arbitrary side effect on GcHeap/ByrefExposed. |
8251 | fgMutateGcHeap(tree DEBUGARG("Interlocked intrinsic" )); |
8252 | |
8253 | assert(tree->OperIsImplicitIndir()); // special node with an implicit indirections |
8254 | |
8255 | GenTree* addr = tree->gtOp.gtOp1; // op1 |
8256 | GenTree* data = tree->gtOp.gtOp2; // op2 |
8257 | |
8258 | ValueNumPair vnpExcSet = ValueNumStore::VNPForEmptyExcSet(); |
8259 | |
8260 | vnpExcSet = vnStore->VNPUnionExcSet(data->gtVNPair, vnpExcSet); |
8261 | vnpExcSet = vnStore->VNPUnionExcSet(addr->gtVNPair, vnpExcSet); |
8262 | |
8263 | // The normal value is a new unique VN. |
8264 | ValueNumPair normalPair; |
8265 | normalPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
8266 | |
8267 | // Attach the combined exception set |
8268 | tree->gtVNPair = vnStore->VNPWithExc(normalPair, vnpExcSet); |
8269 | |
8270 | // add the null check exception for 'addr' to the tree's value number |
8271 | fgValueNumberAddExceptionSetForIndirection(tree, addr); |
8272 | break; |
8273 | } |
8274 | |
8275 | case GT_JTRUE: |
8276 | case GT_LIST: |
8277 | // These nodes never need to have a ValueNumber |
8278 | tree->gtVNPair.SetBoth(ValueNumStore::NoVN); |
8279 | break; |
8280 | |
8281 | case GT_BOX: |
8282 | // BOX doesn't do anything at this point, the actual object allocation |
8283 | // and initialization happens separately (and not numbering BOX correctly |
8284 | // prevents seeing allocation related assertions through it) |
8285 | tree->gtVNPair = tree->gtGetOp1()->gtVNPair; |
8286 | break; |
8287 | |
8288 | default: |
8289 | // The default action is to give the node a new, unique VN. |
8290 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
8291 | break; |
8292 | } |
8293 | } |
8294 | } |
8295 | |
8296 | // next we add any exception sets for the current tree node |
8297 | fgValueNumberAddExceptionSet(tree); |
8298 | } |
8299 | else |
8300 | { |
8301 | assert(GenTree::OperIsSpecial(oper)); |
8302 | |
8303 | // TBD: We must handle these individually. For now: |
8304 | switch (oper) |
8305 | { |
8306 | case GT_CALL: |
8307 | fgValueNumberCall(tree->AsCall()); |
8308 | break; |
8309 | |
8310 | case GT_ARR_BOUNDS_CHECK: |
8311 | #ifdef FEATURE_SIMD |
8312 | case GT_SIMD_CHK: |
8313 | #endif // FEATURE_SIMD |
8314 | #ifdef FEATURE_HW_INTRINSICS |
8315 | case GT_HW_INTRINSIC_CHK: |
8316 | #endif // FEATURE_HW_INTRINSICS |
8317 | { |
8318 | ValueNumPair vnpIndex = tree->AsBoundsChk()->gtIndex->gtVNPair; |
8319 | ValueNumPair vnpArrLen = tree->AsBoundsChk()->gtArrLen->gtVNPair; |
8320 | |
8321 | // Construct the exception set for bounds check |
8322 | ValueNumPair vnpExcSet = vnStore->VNPExcSetSingleton( |
8323 | vnStore->VNPairForFunc(TYP_REF, VNF_IndexOutOfRangeExc, vnStore->VNPNormalPair(vnpIndex), |
8324 | vnStore->VNPNormalPair(vnpArrLen))); |
8325 | |
8326 | // And collect the exceptions from Index and ArrLen |
8327 | vnpExcSet = vnStore->VNPUnionExcSet(vnpIndex, vnpExcSet); |
8328 | vnpExcSet = vnStore->VNPUnionExcSet(vnpArrLen, vnpExcSet); |
8329 | |
8330 | // A bounds check node has no value, but may throw exceptions. |
8331 | tree->gtVNPair = vnStore->VNPWithExc(vnStore->VNPForVoid(), vnpExcSet); |
8332 | |
8333 | // Record non-constant value numbers that are used as the length argument to bounds checks, so that |
8334 | // assertion prop will know that comparisons against them are worth analyzing. |
8335 | ValueNum lengthVN = tree->AsBoundsChk()->gtArrLen->gtVNPair.GetConservative(); |
8336 | if ((lengthVN != ValueNumStore::NoVN) && !vnStore->IsVNConstant(lengthVN)) |
8337 | { |
8338 | vnStore->SetVNIsCheckedBound(lengthVN); |
8339 | } |
8340 | } |
8341 | break; |
8342 | |
8343 | case GT_CMPXCHG: // Specialop |
8344 | { |
8345 | // For CMPXCHG and other intrinsics add an arbitrary side effect on GcHeap/ByrefExposed. |
8346 | fgMutateGcHeap(tree DEBUGARG("Interlocked intrinsic" )); |
8347 | |
8348 | GenTreeCmpXchg* const cmpXchg = tree->AsCmpXchg(); |
8349 | |
8350 | assert(tree->OperIsImplicitIndir()); // special node with an implicit indirections |
8351 | |
8352 | GenTree* location = cmpXchg->gtOpLocation; // arg1 |
8353 | GenTree* value = cmpXchg->gtOpValue; // arg2 |
8354 | GenTree* comparand = cmpXchg->gtOpComparand; // arg3 |
8355 | |
8356 | ValueNumPair vnpExcSet = ValueNumStore::VNPForEmptyExcSet(); |
8357 | |
8358 | // Collect the exception sets from our operands |
8359 | vnpExcSet = vnStore->VNPUnionExcSet(location->gtVNPair, vnpExcSet); |
8360 | vnpExcSet = vnStore->VNPUnionExcSet(value->gtVNPair, vnpExcSet); |
8361 | vnpExcSet = vnStore->VNPUnionExcSet(comparand->gtVNPair, vnpExcSet); |
8362 | |
8363 | // The normal value is a new unique VN. |
8364 | ValueNumPair normalPair; |
8365 | normalPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
8366 | |
8367 | // Attach the combined exception set |
8368 | tree->gtVNPair = vnStore->VNPWithExc(normalPair, vnpExcSet); |
8369 | |
8370 | // add the null check exception for 'location' to the tree's value number |
8371 | fgValueNumberAddExceptionSetForIndirection(tree, location); |
8372 | // add the null check exception for 'comparand' to the tree's value number |
8373 | fgValueNumberAddExceptionSetForIndirection(tree, comparand); |
8374 | break; |
8375 | } |
8376 | |
8377 | default: |
8378 | tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet())); |
8379 | } |
8380 | } |
8381 | #ifdef DEBUG |
8382 | if (verbose) |
8383 | { |
8384 | if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN) |
8385 | { |
8386 | printf("N%03u " , tree->gtSeqNum); |
8387 | printTreeID(tree); |
8388 | printf(" " ); |
8389 | gtDispNodeName(tree); |
8390 | if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves |
8391 | { |
8392 | gtDispLeaf(tree, nullptr); |
8393 | } |
8394 | printf(" => " ); |
8395 | vnpPrint(tree->gtVNPair, 1); |
8396 | printf("\n" ); |
8397 | } |
8398 | } |
8399 | #endif // DEBUG |
8400 | } |
8401 | |
8402 | void Compiler::fgValueNumberIntrinsic(GenTree* tree) |
8403 | { |
8404 | assert(tree->OperGet() == GT_INTRINSIC); |
8405 | GenTreeIntrinsic* intrinsic = tree->AsIntrinsic(); |
8406 | ValueNumPair arg0VNP, arg1VNP; |
8407 | ValueNumPair arg0VNPx = ValueNumStore::VNPForEmptyExcSet(); |
8408 | ValueNumPair arg1VNPx = ValueNumStore::VNPForEmptyExcSet(); |
8409 | |
8410 | vnStore->VNPUnpackExc(intrinsic->gtOp.gtOp1->gtVNPair, &arg0VNP, &arg0VNPx); |
8411 | |
8412 | if (intrinsic->gtOp.gtOp2 != nullptr) |
8413 | { |
8414 | vnStore->VNPUnpackExc(intrinsic->gtOp.gtOp2->gtVNPair, &arg1VNP, &arg1VNPx); |
8415 | } |
8416 | |
8417 | if (IsMathIntrinsic(intrinsic->gtIntrinsicId)) |
8418 | { |
8419 | // GT_INTRINSIC is a currently a subtype of binary operators. But most of |
8420 | // the math intrinsics are actually unary operations. |
8421 | |
8422 | if (intrinsic->gtOp.gtOp2 == nullptr) |
8423 | { |
8424 | intrinsic->gtVNPair = |
8425 | vnStore->VNPWithExc(vnStore->EvalMathFuncUnary(tree->TypeGet(), intrinsic->gtIntrinsicId, arg0VNP), |
8426 | arg0VNPx); |
8427 | } |
8428 | else |
8429 | { |
8430 | ValueNumPair newVNP = |
8431 | vnStore->EvalMathFuncBinary(tree->TypeGet(), intrinsic->gtIntrinsicId, arg0VNP, arg1VNP); |
8432 | ValueNumPair excSet = vnStore->VNPExcSetUnion(arg0VNPx, arg1VNPx); |
8433 | intrinsic->gtVNPair = vnStore->VNPWithExc(newVNP, excSet); |
8434 | } |
8435 | } |
8436 | else |
8437 | { |
8438 | switch (intrinsic->gtIntrinsicId) |
8439 | { |
8440 | case CORINFO_INTRINSIC_Object_GetType: |
8441 | intrinsic->gtVNPair = |
8442 | vnStore->VNPWithExc(vnStore->VNPairForFunc(intrinsic->TypeGet(), VNF_ObjGetType, arg0VNP), |
8443 | arg0VNPx); |
8444 | break; |
8445 | |
8446 | default: |
8447 | unreached(); |
8448 | } |
8449 | } |
8450 | } |
8451 | |
8452 | void Compiler::fgValueNumberCastTree(GenTree* tree) |
8453 | { |
8454 | assert(tree->OperGet() == GT_CAST); |
8455 | |
8456 | ValueNumPair srcVNPair = tree->gtOp.gtOp1->gtVNPair; |
8457 | var_types castToType = tree->CastToType(); |
8458 | var_types castFromType = tree->CastFromType(); |
8459 | bool srcIsUnsigned = ((tree->gtFlags & GTF_UNSIGNED) != 0); |
8460 | bool hasOverflowCheck = tree->gtOverflowEx(); |
8461 | |
8462 | assert(genActualType(castToType) == genActualType(tree->TypeGet())); // Insure that the resultType is correct |
8463 | |
8464 | tree->gtVNPair = vnStore->VNPairForCast(srcVNPair, castToType, castFromType, srcIsUnsigned, hasOverflowCheck); |
8465 | } |
8466 | |
8467 | // Compute the normal ValueNumber for a cast operation with no exceptions |
8468 | ValueNum ValueNumStore::VNForCast(ValueNum srcVN, |
8469 | var_types castToType, |
8470 | var_types castFromType, |
8471 | bool srcIsUnsigned /* = false */) |
8472 | { |
8473 | // The resulting type after performingthe cast is always widened to a supported IL stack size |
8474 | var_types resultType = genActualType(castToType); |
8475 | |
8476 | // When we're considering actual value returned by a non-checking cast whether or not the source is |
8477 | // unsigned does *not* matter for non-widening casts. That is, if we cast an int or a uint to short, |
8478 | // we just extract the first two bytes from the source bit pattern, not worrying about the interpretation. |
8479 | // The same is true in casting between signed/unsigned types of the same width. Only when we're doing |
8480 | // a widening cast do we care about whether the source was unsigned,so we know whether to sign or zero extend it. |
8481 | // |
8482 | bool srcIsUnsignedNorm = srcIsUnsigned; |
8483 | if (genTypeSize(castToType) <= genTypeSize(castFromType)) |
8484 | { |
8485 | srcIsUnsignedNorm = false; |
8486 | } |
8487 | |
8488 | ValueNum castTypeVN = VNForCastOper(castToType, srcIsUnsigned); |
8489 | ValueNum resultVN = VNForFunc(resultType, VNF_Cast, srcVN, castTypeVN); |
8490 | |
8491 | #ifdef DEBUG |
8492 | if (m_pComp->verbose) |
8493 | { |
8494 | printf(" VNForCast(" FMT_VN ", " FMT_VN ") returns " , srcVN, castTypeVN); |
8495 | m_pComp->vnPrint(resultVN, 1); |
8496 | printf("\n" ); |
8497 | } |
8498 | #endif |
8499 | |
8500 | return resultVN; |
8501 | } |
8502 | |
8503 | // Compute the ValueNumberPair for a cast operation |
8504 | ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair, |
8505 | var_types castToType, |
8506 | var_types castFromType, |
8507 | bool srcIsUnsigned, /* = false */ |
8508 | bool hasOverflowCheck) /* = false */ |
8509 | { |
8510 | // The resulting type after performingthe cast is always widened to a supported IL stack size |
8511 | var_types resultType = genActualType(castToType); |
8512 | |
8513 | ValueNumPair castArgVNP; |
8514 | ValueNumPair castArgxVNP; |
8515 | VNPUnpackExc(srcVNPair, &castArgVNP, &castArgxVNP); |
8516 | |
8517 | // When we're considering actual value returned by a non-checking cast, (hasOverflowCheck is false) |
8518 | // whether or not the source is unsigned does *not* matter for non-widening casts. |
8519 | // That is, if we cast an int or a uint to short, we just extract the first two bytes from the source |
8520 | // bit pattern, not worrying about the interpretation. The same is true in casting between signed/unsigned |
8521 | // types of the same width. Only when we're doing a widening cast do we care about whether the source |
8522 | // was unsigned, so we know whether to sign or zero extend it. |
8523 | // |
8524 | // Important: Casts to floating point cannot be optimized in this fashion. (bug 946768) |
8525 | // |
8526 | bool srcIsUnsignedNorm = srcIsUnsigned; |
8527 | if (!hasOverflowCheck && !varTypeIsFloating(castToType) && (genTypeSize(castToType) <= genTypeSize(castFromType))) |
8528 | { |
8529 | srcIsUnsignedNorm = false; |
8530 | } |
8531 | |
8532 | VNFunc vnFunc = hasOverflowCheck ? VNF_CastOvf : VNF_Cast; |
8533 | ValueNum castTypeVN = VNForCastOper(castToType, srcIsUnsignedNorm); |
8534 | ValueNumPair castTypeVNPair(castTypeVN, castTypeVN); |
8535 | ValueNumPair castNormRes = VNPairForFunc(resultType, vnFunc, castArgVNP, castTypeVNPair); |
8536 | |
8537 | ValueNumPair resultVNP = VNPWithExc(castNormRes, castArgxVNP); |
8538 | |
8539 | // If we have a check for overflow, add the exception information. |
8540 | if (hasOverflowCheck) |
8541 | { |
8542 | ValueNumPair ovfChk = VNPairForFunc(TYP_REF, VNF_ConvOverflowExc, castArgVNP, castTypeVNPair); |
8543 | ValueNumPair excSet = VNPExcSetSingleton(ovfChk); |
8544 | excSet = VNPExcSetUnion(excSet, castArgxVNP); |
8545 | resultVNP = VNPWithExc(castNormRes, excSet); |
8546 | } |
8547 | |
8548 | return resultVNP; |
8549 | } |
8550 | |
8551 | void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc) |
8552 | { |
8553 | unsigned nArgs = ValueNumStore::VNFuncArity(vnf); |
8554 | assert(vnf != VNF_Boundary); |
8555 | GenTreeArgList* args = call->gtCallArgs; |
8556 | bool generateUniqueVN = false; |
8557 | bool useEntryPointAddrAsArg0 = false; |
8558 | |
8559 | switch (vnf) |
8560 | { |
8561 | case VNF_JitNew: |
8562 | { |
8563 | generateUniqueVN = true; |
8564 | vnpExc = ValueNumStore::VNPForEmptyExcSet(); |
8565 | } |
8566 | break; |
8567 | |
8568 | case VNF_JitNewArr: |
8569 | { |
8570 | generateUniqueVN = true; |
8571 | ValueNumPair vnp1 = vnStore->VNPNormalPair(args->Rest()->Current()->gtVNPair); |
8572 | |
8573 | // The New Array helper may throw an overflow exception |
8574 | vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_NewArrOverflowExc, vnp1)); |
8575 | } |
8576 | break; |
8577 | |
8578 | case VNF_Box: |
8579 | case VNF_BoxNullable: |
8580 | { |
8581 | // Generate unique VN so, VNForFunc generates a uniq value number for box nullable. |
8582 | // Alternatively instead of using vnpUniq below in VNPairForFunc(...), |
8583 | // we could use the value number of what the byref arg0 points to. |
8584 | // |
8585 | // But retrieving the value number of what the byref arg0 points to is quite a bit more work |
8586 | // and doing so only very rarely allows for an additional optimization. |
8587 | generateUniqueVN = true; |
8588 | } |
8589 | break; |
8590 | |
8591 | case VNF_JitReadyToRunNew: |
8592 | { |
8593 | generateUniqueVN = true; |
8594 | vnpExc = ValueNumStore::VNPForEmptyExcSet(); |
8595 | useEntryPointAddrAsArg0 = true; |
8596 | } |
8597 | break; |
8598 | |
8599 | case VNF_JitReadyToRunNewArr: |
8600 | { |
8601 | generateUniqueVN = true; |
8602 | ValueNumPair vnp1 = vnStore->VNPNormalPair(args->Current()->gtVNPair); |
8603 | |
8604 | // The New Array helper may throw an overflow exception |
8605 | vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_NewArrOverflowExc, vnp1)); |
8606 | useEntryPointAddrAsArg0 = true; |
8607 | } |
8608 | break; |
8609 | |
8610 | case VNF_ReadyToRunStaticBase: |
8611 | case VNF_ReadyToRunGenericStaticBase: |
8612 | case VNF_ReadyToRunIsInstanceOf: |
8613 | case VNF_ReadyToRunCastClass: |
8614 | { |
8615 | useEntryPointAddrAsArg0 = true; |
8616 | } |
8617 | break; |
8618 | |
8619 | default: |
8620 | { |
8621 | assert(s_helperCallProperties.IsPure(eeGetHelperNum(call->gtCallMethHnd))); |
8622 | } |
8623 | break; |
8624 | } |
8625 | |
8626 | if (generateUniqueVN) |
8627 | { |
8628 | nArgs--; |
8629 | } |
8630 | |
8631 | ValueNumPair vnpUniq; |
8632 | if (generateUniqueVN) |
8633 | { |
8634 | // Generate unique VN so, VNForFunc generates a unique value number. |
8635 | vnpUniq.SetBoth(vnStore->VNForExpr(compCurBB, call->TypeGet())); |
8636 | } |
8637 | |
8638 | #if defined(FEATURE_READYTORUN_COMPILER) && defined(_TARGET_ARMARCH_) |
8639 | if (call->IsR2RRelativeIndir()) |
8640 | { |
8641 | #ifdef DEBUG |
8642 | assert(args->Current()->OperGet() == GT_ARGPLACE); |
8643 | |
8644 | // Find the corresponding late arg. |
8645 | GenTree* indirectCellAddress = call->fgArgInfo->GetArgNode(0); |
8646 | assert(indirectCellAddress->IsCnsIntOrI() && indirectCellAddress->gtRegNum == REG_R2R_INDIRECT_PARAM); |
8647 | #endif // DEBUG |
8648 | |
8649 | // For ARM indirectCellAddress is consumed by the call itself, so it should have added as an implicit argument |
8650 | // in morph. So we do not need to use EntryPointAddrAsArg0, because arg0 is already an entry point addr. |
8651 | useEntryPointAddrAsArg0 = false; |
8652 | } |
8653 | #endif // FEATURE_READYTORUN_COMPILER && _TARGET_ARMARCH_ |
8654 | |
8655 | if (nArgs == 0) |
8656 | { |
8657 | if (generateUniqueVN) |
8658 | { |
8659 | call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnpUniq); |
8660 | } |
8661 | else |
8662 | { |
8663 | call->gtVNPair.SetBoth(vnStore->VNForFunc(call->TypeGet(), vnf)); |
8664 | } |
8665 | } |
8666 | else |
8667 | { |
8668 | auto getCurrentArg = [call, &args, useEntryPointAddrAsArg0](int currentIndex) { |
8669 | GenTree* arg = args->Current(); |
8670 | if ((arg->gtFlags & GTF_LATE_ARG) != 0) |
8671 | { |
8672 | // This arg is a setup node that moves the arg into position. |
8673 | // Value-numbering will have visited the separate late arg that |
8674 | // holds the actual value, and propagated/computed the value number |
8675 | // for this arg there. |
8676 | if (useEntryPointAddrAsArg0) |
8677 | { |
8678 | // The args in the fgArgInfo don't include the entry point, so |
8679 | // index into them using one less than the requested index. |
8680 | --currentIndex; |
8681 | } |
8682 | return call->fgArgInfo->GetArgNode(currentIndex); |
8683 | } |
8684 | return arg; |
8685 | }; |
8686 | // Has at least one argument. |
8687 | ValueNumPair vnp0; |
8688 | ValueNumPair vnp0x = ValueNumStore::VNPForEmptyExcSet(); |
8689 | #ifdef FEATURE_READYTORUN_COMPILER |
8690 | if (useEntryPointAddrAsArg0) |
8691 | { |
8692 | ssize_t addrValue = (ssize_t)call->gtEntryPoint.addr; |
8693 | ValueNum callAddrVN = vnStore->VNForHandle(addrValue, GTF_ICON_FTN_ADDR); |
8694 | vnp0 = ValueNumPair(callAddrVN, callAddrVN); |
8695 | } |
8696 | else |
8697 | #endif // FEATURE_READYTORUN_COMPILER |
8698 | { |
8699 | assert(!useEntryPointAddrAsArg0); |
8700 | ValueNumPair vnp0wx = getCurrentArg(0)->gtVNPair; |
8701 | vnStore->VNPUnpackExc(vnp0wx, &vnp0, &vnp0x); |
8702 | |
8703 | // Also include in the argument exception sets |
8704 | vnpExc = vnStore->VNPExcSetUnion(vnpExc, vnp0x); |
8705 | |
8706 | args = args->Rest(); |
8707 | } |
8708 | if (nArgs == 1) |
8709 | { |
8710 | if (generateUniqueVN) |
8711 | { |
8712 | call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnpUniq); |
8713 | } |
8714 | else |
8715 | { |
8716 | call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0); |
8717 | } |
8718 | } |
8719 | else |
8720 | { |
8721 | // Has at least two arguments. |
8722 | ValueNumPair vnp1wx = getCurrentArg(1)->gtVNPair; |
8723 | ValueNumPair vnp1; |
8724 | ValueNumPair vnp1x; |
8725 | vnStore->VNPUnpackExc(vnp1wx, &vnp1, &vnp1x); |
8726 | vnpExc = vnStore->VNPExcSetUnion(vnpExc, vnp1x); |
8727 | |
8728 | args = args->Rest(); |
8729 | if (nArgs == 2) |
8730 | { |
8731 | if (generateUniqueVN) |
8732 | { |
8733 | call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnp1, vnpUniq); |
8734 | } |
8735 | else |
8736 | { |
8737 | call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnp1); |
8738 | } |
8739 | } |
8740 | else |
8741 | { |
8742 | ValueNumPair vnp2wx = getCurrentArg(2)->gtVNPair; |
8743 | ValueNumPair vnp2; |
8744 | ValueNumPair vnp2x; |
8745 | vnStore->VNPUnpackExc(vnp2wx, &vnp2, &vnp2x); |
8746 | vnpExc = vnStore->VNPExcSetUnion(vnpExc, vnp2x); |
8747 | |
8748 | args = args->Rest(); |
8749 | assert(nArgs == 3); // Our current maximum. |
8750 | assert(args == nullptr); |
8751 | if (generateUniqueVN) |
8752 | { |
8753 | call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnp1, vnp2, vnpUniq); |
8754 | } |
8755 | else |
8756 | { |
8757 | call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnp1, vnp2); |
8758 | } |
8759 | } |
8760 | } |
8761 | // Add the accumulated exceptions. |
8762 | call->gtVNPair = vnStore->VNPWithExc(call->gtVNPair, vnpExc); |
8763 | } |
8764 | assert(args == nullptr || generateUniqueVN); // All arguments should be processed or we generate unique VN and do |
8765 | // not care. |
8766 | } |
8767 | |
8768 | void Compiler::fgValueNumberCall(GenTreeCall* call) |
8769 | { |
8770 | // First: do value numbering of any argument placeholder nodes in the argument list |
8771 | // (by transferring from the VN of the late arg that they are standing in for...) |
8772 | unsigned i = 0; |
8773 | GenTreeArgList* args = call->gtCallArgs; |
8774 | bool updatedArgPlace = false; |
8775 | while (args != nullptr) |
8776 | { |
8777 | GenTree* arg = args->Current(); |
8778 | if (arg->OperGet() == GT_ARGPLACE) |
8779 | { |
8780 | // Find the corresponding late arg. |
8781 | GenTree* lateArg = call->fgArgInfo->GetArgNode(i); |
8782 | assert(lateArg->gtVNPair.BothDefined()); |
8783 | arg->gtVNPair = lateArg->gtVNPair; |
8784 | updatedArgPlace = true; |
8785 | #ifdef DEBUG |
8786 | if (verbose) |
8787 | { |
8788 | printf("VN of ARGPLACE tree " ); |
8789 | Compiler::printTreeID(arg); |
8790 | printf(" updated to " ); |
8791 | vnpPrint(arg->gtVNPair, 1); |
8792 | printf("\n" ); |
8793 | } |
8794 | #endif |
8795 | } |
8796 | i++; |
8797 | args = args->Rest(); |
8798 | } |
8799 | if (updatedArgPlace) |
8800 | { |
8801 | // Now we have to update the VN's of the argument list nodes, since that will be used in determining |
8802 | // loop-invariance. |
8803 | fgUpdateArgListVNs(call->gtCallArgs); |
8804 | } |
8805 | |
8806 | if (call->gtCallType == CT_HELPER) |
8807 | { |
8808 | bool modHeap = fgValueNumberHelperCall(call); |
8809 | |
8810 | if (modHeap) |
8811 | { |
8812 | // For now, arbitrary side effect on GcHeap/ByrefExposed. |
8813 | fgMutateGcHeap(call DEBUGARG("HELPER - modifies heap" )); |
8814 | } |
8815 | } |
8816 | else |
8817 | { |
8818 | if (call->TypeGet() == TYP_VOID) |
8819 | { |
8820 | call->gtVNPair.SetBoth(ValueNumStore::VNForVoid()); |
8821 | } |
8822 | else |
8823 | { |
8824 | call->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, call->TypeGet())); |
8825 | } |
8826 | |
8827 | // For now, arbitrary side effect on GcHeap/ByrefExposed. |
8828 | fgMutateGcHeap(call DEBUGARG("CALL" )); |
8829 | } |
8830 | } |
8831 | |
8832 | void Compiler::fgUpdateArgListVNs(GenTreeArgList* args) |
8833 | { |
8834 | if (args == nullptr) |
8835 | { |
8836 | return; |
8837 | } |
8838 | // Otherwise... |
8839 | fgUpdateArgListVNs(args->Rest()); |
8840 | fgValueNumberTree(args); |
8841 | } |
8842 | |
8843 | VNFunc Compiler::fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc) |
8844 | { |
8845 | assert(s_helperCallProperties.IsPure(helpFunc) || s_helperCallProperties.IsAllocator(helpFunc)); |
8846 | |
8847 | VNFunc vnf = VNF_Boundary; // An illegal value... |
8848 | switch (helpFunc) |
8849 | { |
8850 | // These translate to other function symbols: |
8851 | case CORINFO_HELP_DIV: |
8852 | vnf = VNFunc(GT_DIV); |
8853 | break; |
8854 | case CORINFO_HELP_MOD: |
8855 | vnf = VNFunc(GT_MOD); |
8856 | break; |
8857 | case CORINFO_HELP_UDIV: |
8858 | vnf = VNFunc(GT_UDIV); |
8859 | break; |
8860 | case CORINFO_HELP_UMOD: |
8861 | vnf = VNFunc(GT_UMOD); |
8862 | break; |
8863 | case CORINFO_HELP_LLSH: |
8864 | vnf = VNFunc(GT_LSH); |
8865 | break; |
8866 | case CORINFO_HELP_LRSH: |
8867 | vnf = VNFunc(GT_RSH); |
8868 | break; |
8869 | case CORINFO_HELP_LRSZ: |
8870 | vnf = VNFunc(GT_RSZ); |
8871 | break; |
8872 | case CORINFO_HELP_LMUL: |
8873 | case CORINFO_HELP_LMUL_OVF: |
8874 | vnf = VNFunc(GT_MUL); |
8875 | break; |
8876 | case CORINFO_HELP_ULMUL_OVF: |
8877 | vnf = VNFunc(GT_MUL); |
8878 | break; // Is this the right thing? |
8879 | case CORINFO_HELP_LDIV: |
8880 | vnf = VNFunc(GT_DIV); |
8881 | break; |
8882 | case CORINFO_HELP_LMOD: |
8883 | vnf = VNFunc(GT_MOD); |
8884 | break; |
8885 | case CORINFO_HELP_ULDIV: |
8886 | vnf = VNFunc(GT_UDIV); |
8887 | break; |
8888 | case CORINFO_HELP_ULMOD: |
8889 | vnf = VNFunc(GT_UMOD); |
8890 | break; |
8891 | |
8892 | case CORINFO_HELP_LNG2DBL: |
8893 | vnf = VNF_Lng2Dbl; |
8894 | break; |
8895 | case CORINFO_HELP_ULNG2DBL: |
8896 | vnf = VNF_ULng2Dbl; |
8897 | break; |
8898 | case CORINFO_HELP_DBL2INT: |
8899 | vnf = VNF_Dbl2Int; |
8900 | break; |
8901 | case CORINFO_HELP_DBL2INT_OVF: |
8902 | vnf = VNF_Dbl2Int; |
8903 | break; |
8904 | case CORINFO_HELP_DBL2LNG: |
8905 | vnf = VNF_Dbl2Lng; |
8906 | break; |
8907 | case CORINFO_HELP_DBL2LNG_OVF: |
8908 | vnf = VNF_Dbl2Lng; |
8909 | break; |
8910 | case CORINFO_HELP_DBL2UINT: |
8911 | vnf = VNF_Dbl2UInt; |
8912 | break; |
8913 | case CORINFO_HELP_DBL2UINT_OVF: |
8914 | vnf = VNF_Dbl2UInt; |
8915 | break; |
8916 | case CORINFO_HELP_DBL2ULNG: |
8917 | vnf = VNF_Dbl2ULng; |
8918 | break; |
8919 | case CORINFO_HELP_DBL2ULNG_OVF: |
8920 | vnf = VNF_Dbl2ULng; |
8921 | break; |
8922 | case CORINFO_HELP_FLTREM: |
8923 | vnf = VNFunc(GT_MOD); |
8924 | break; |
8925 | case CORINFO_HELP_DBLREM: |
8926 | vnf = VNFunc(GT_MOD); |
8927 | break; |
8928 | case CORINFO_HELP_FLTROUND: |
8929 | vnf = VNF_FltRound; |
8930 | break; // Is this the right thing? |
8931 | case CORINFO_HELP_DBLROUND: |
8932 | vnf = VNF_DblRound; |
8933 | break; // Is this the right thing? |
8934 | |
8935 | // These allocation operations probably require some augmentation -- perhaps allocSiteId, |
8936 | // something about array length... |
8937 | case CORINFO_HELP_NEW_CROSSCONTEXT: |
8938 | case CORINFO_HELP_NEWFAST: |
8939 | case CORINFO_HELP_NEWSFAST: |
8940 | case CORINFO_HELP_NEWSFAST_FINALIZE: |
8941 | case CORINFO_HELP_NEWSFAST_ALIGN8: |
8942 | case CORINFO_HELP_NEWSFAST_ALIGN8_VC: |
8943 | case CORINFO_HELP_NEWSFAST_ALIGN8_FINALIZE: |
8944 | vnf = VNF_JitNew; |
8945 | break; |
8946 | |
8947 | case CORINFO_HELP_READYTORUN_NEW: |
8948 | vnf = VNF_JitReadyToRunNew; |
8949 | break; |
8950 | |
8951 | case CORINFO_HELP_NEWARR_1_DIRECT: |
8952 | case CORINFO_HELP_NEWARR_1_OBJ: |
8953 | case CORINFO_HELP_NEWARR_1_VC: |
8954 | case CORINFO_HELP_NEWARR_1_ALIGN8: |
8955 | vnf = VNF_JitNewArr; |
8956 | break; |
8957 | |
8958 | case CORINFO_HELP_NEWARR_1_R2R_DIRECT: |
8959 | case CORINFO_HELP_READYTORUN_NEWARR_1: |
8960 | vnf = VNF_JitReadyToRunNewArr; |
8961 | break; |
8962 | |
8963 | case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: |
8964 | vnf = VNF_GetgenericsGcstaticBase; |
8965 | break; |
8966 | case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: |
8967 | vnf = VNF_GetgenericsNongcstaticBase; |
8968 | break; |
8969 | case CORINFO_HELP_GETSHARED_GCSTATIC_BASE: |
8970 | vnf = VNF_GetsharedGcstaticBase; |
8971 | break; |
8972 | case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE: |
8973 | vnf = VNF_GetsharedNongcstaticBase; |
8974 | break; |
8975 | case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR: |
8976 | vnf = VNF_GetsharedGcstaticBaseNoctor; |
8977 | break; |
8978 | case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR: |
8979 | vnf = VNF_GetsharedNongcstaticBaseNoctor; |
8980 | break; |
8981 | case CORINFO_HELP_READYTORUN_STATIC_BASE: |
8982 | vnf = VNF_ReadyToRunStaticBase; |
8983 | break; |
8984 | case CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE: |
8985 | vnf = VNF_ReadyToRunGenericStaticBase; |
8986 | break; |
8987 | case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS: |
8988 | vnf = VNF_GetsharedGcstaticBaseDynamicclass; |
8989 | break; |
8990 | case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS: |
8991 | vnf = VNF_GetsharedNongcstaticBaseDynamicclass; |
8992 | break; |
8993 | case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS: |
8994 | vnf = VNF_ClassinitSharedDynamicclass; |
8995 | break; |
8996 | case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: |
8997 | vnf = VNF_GetgenericsGcthreadstaticBase; |
8998 | break; |
8999 | case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: |
9000 | vnf = VNF_GetgenericsNongcthreadstaticBase; |
9001 | break; |
9002 | case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE: |
9003 | vnf = VNF_GetsharedGcthreadstaticBase; |
9004 | break; |
9005 | case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE: |
9006 | vnf = VNF_GetsharedNongcthreadstaticBase; |
9007 | break; |
9008 | case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR: |
9009 | vnf = VNF_GetsharedGcthreadstaticBaseNoctor; |
9010 | break; |
9011 | case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR: |
9012 | vnf = VNF_GetsharedNongcthreadstaticBaseNoctor; |
9013 | break; |
9014 | case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS: |
9015 | vnf = VNF_GetsharedGcthreadstaticBaseDynamicclass; |
9016 | break; |
9017 | case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS: |
9018 | vnf = VNF_GetsharedNongcthreadstaticBaseDynamicclass; |
9019 | break; |
9020 | case CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT: |
9021 | vnf = VNF_GetStaticAddrContext; |
9022 | break; |
9023 | case CORINFO_HELP_GETSTATICFIELDADDR_TLS: |
9024 | vnf = VNF_GetStaticAddrTLS; |
9025 | break; |
9026 | |
9027 | case CORINFO_HELP_RUNTIMEHANDLE_METHOD: |
9028 | case CORINFO_HELP_RUNTIMEHANDLE_METHOD_LOG: |
9029 | vnf = VNF_RuntimeHandleMethod; |
9030 | break; |
9031 | |
9032 | case CORINFO_HELP_RUNTIMEHANDLE_CLASS: |
9033 | case CORINFO_HELP_RUNTIMEHANDLE_CLASS_LOG: |
9034 | vnf = VNF_RuntimeHandleClass; |
9035 | break; |
9036 | |
9037 | case CORINFO_HELP_STRCNS: |
9038 | vnf = VNF_StrCns; |
9039 | break; |
9040 | |
9041 | case CORINFO_HELP_CHKCASTCLASS: |
9042 | case CORINFO_HELP_CHKCASTCLASS_SPECIAL: |
9043 | case CORINFO_HELP_CHKCASTARRAY: |
9044 | case CORINFO_HELP_CHKCASTINTERFACE: |
9045 | case CORINFO_HELP_CHKCASTANY: |
9046 | vnf = VNF_CastClass; |
9047 | break; |
9048 | |
9049 | case CORINFO_HELP_READYTORUN_CHKCAST: |
9050 | vnf = VNF_ReadyToRunCastClass; |
9051 | break; |
9052 | |
9053 | case CORINFO_HELP_ISINSTANCEOFCLASS: |
9054 | case CORINFO_HELP_ISINSTANCEOFINTERFACE: |
9055 | case CORINFO_HELP_ISINSTANCEOFARRAY: |
9056 | case CORINFO_HELP_ISINSTANCEOFANY: |
9057 | vnf = VNF_IsInstanceOf; |
9058 | break; |
9059 | |
9060 | case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE: |
9061 | vnf = VNF_TypeHandleToRuntimeType; |
9062 | break; |
9063 | |
9064 | case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE: |
9065 | vnf = VNF_TypeHandleToRuntimeTypeHandle; |
9066 | break; |
9067 | |
9068 | case CORINFO_HELP_ARE_TYPES_EQUIVALENT: |
9069 | vnf = VNF_AreTypesEquivalent; |
9070 | break; |
9071 | |
9072 | case CORINFO_HELP_READYTORUN_ISINSTANCEOF: |
9073 | vnf = VNF_ReadyToRunIsInstanceOf; |
9074 | break; |
9075 | |
9076 | case CORINFO_HELP_LDELEMA_REF: |
9077 | vnf = VNF_LdElemA; |
9078 | break; |
9079 | |
9080 | case CORINFO_HELP_UNBOX: |
9081 | vnf = VNF_Unbox; |
9082 | break; |
9083 | |
9084 | // A constant within any method. |
9085 | case CORINFO_HELP_GETCURRENTMANAGEDTHREADID: |
9086 | vnf = VNF_ManagedThreadId; |
9087 | break; |
9088 | |
9089 | case CORINFO_HELP_GETREFANY: |
9090 | // TODO-CQ: This should really be interpreted as just a struct field reference, in terms of values. |
9091 | vnf = VNF_GetRefanyVal; |
9092 | break; |
9093 | |
9094 | case CORINFO_HELP_GETCLASSFROMMETHODPARAM: |
9095 | vnf = VNF_GetClassFromMethodParam; |
9096 | break; |
9097 | |
9098 | case CORINFO_HELP_GETSYNCFROMCLASSHANDLE: |
9099 | vnf = VNF_GetSyncFromClassHandle; |
9100 | break; |
9101 | |
9102 | case CORINFO_HELP_LOOP_CLONE_CHOICE_ADDR: |
9103 | vnf = VNF_LoopCloneChoiceAddr; |
9104 | break; |
9105 | |
9106 | case CORINFO_HELP_BOX: |
9107 | vnf = VNF_Box; |
9108 | break; |
9109 | |
9110 | case CORINFO_HELP_BOX_NULLABLE: |
9111 | vnf = VNF_BoxNullable; |
9112 | break; |
9113 | |
9114 | default: |
9115 | unreached(); |
9116 | } |
9117 | |
9118 | assert(vnf != VNF_Boundary); |
9119 | return vnf; |
9120 | } |
9121 | |
9122 | bool Compiler::fgValueNumberHelperCall(GenTreeCall* call) |
9123 | { |
9124 | CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd); |
9125 | bool pure = s_helperCallProperties.IsPure(helpFunc); |
9126 | bool isAlloc = s_helperCallProperties.IsAllocator(helpFunc); |
9127 | bool modHeap = s_helperCallProperties.MutatesHeap(helpFunc); |
9128 | bool mayRunCctor = s_helperCallProperties.MayRunCctor(helpFunc); |
9129 | bool noThrow = s_helperCallProperties.NoThrow(helpFunc); |
9130 | |
9131 | ValueNumPair vnpExc = ValueNumStore::VNPForEmptyExcSet(); |
9132 | |
9133 | // If the JIT helper can throw an exception make sure that we fill in |
9134 | // vnpExc with a Value Number that represents the exception(s) that can be thrown. |
9135 | if (!noThrow) |
9136 | { |
9137 | // If the helper is known to only throw only one particular exception |
9138 | // we can set vnpExc to that exception, otherwise we conservatively |
9139 | // model the JIT helper as possibly throwing multiple different exceptions |
9140 | // |
9141 | switch (helpFunc) |
9142 | { |
9143 | case CORINFO_HELP_OVERFLOW: |
9144 | // This helper always throws the VNF_OverflowExc exception |
9145 | vnpExc = vnStore->VNPExcSetSingleton( |
9146 | vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc, vnStore->VNPForVoid())); |
9147 | break; |
9148 | |
9149 | default: |
9150 | // Setup vnpExc with the information that multiple different exceptions |
9151 | // could be generated by this helper |
9152 | vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_HelperMultipleExc)); |
9153 | } |
9154 | } |
9155 | |
9156 | ValueNumPair vnpNorm; |
9157 | |
9158 | if (call->TypeGet() == TYP_VOID) |
9159 | { |
9160 | vnpNorm = ValueNumStore::VNPForVoid(); |
9161 | } |
9162 | else |
9163 | { |
9164 | // TODO-CQ: this is a list of helpers we're going to treat as non-pure, |
9165 | // because they raise complications. Eventually, we need to handle those complications... |
9166 | bool needsFurtherWork = false; |
9167 | switch (helpFunc) |
9168 | { |
9169 | case CORINFO_HELP_NEW_MDARR: |
9170 | // This is a varargs helper. We need to represent the array shape in the VN world somehow. |
9171 | needsFurtherWork = true; |
9172 | break; |
9173 | default: |
9174 | break; |
9175 | } |
9176 | |
9177 | if (!needsFurtherWork && (pure || isAlloc)) |
9178 | { |
9179 | VNFunc vnf = fgValueNumberJitHelperMethodVNFunc(helpFunc); |
9180 | |
9181 | if (mayRunCctor) |
9182 | { |
9183 | if ((call->gtFlags & GTF_CALL_HOISTABLE) == 0) |
9184 | { |
9185 | modHeap = true; |
9186 | } |
9187 | } |
9188 | |
9189 | fgValueNumberHelperCallFunc(call, vnf, vnpExc); |
9190 | return modHeap; |
9191 | } |
9192 | else |
9193 | { |
9194 | vnpNorm.SetBoth(vnStore->VNForExpr(compCurBB, call->TypeGet())); |
9195 | } |
9196 | } |
9197 | |
9198 | call->gtVNPair = vnStore->VNPWithExc(vnpNorm, vnpExc); |
9199 | return modHeap; |
9200 | } |
9201 | |
9202 | //-------------------------------------------------------------------------------- |
9203 | // fgValueNumberAddExceptionSetForIndirection |
9204 | // - Adds the exception sets for the current tree node |
9205 | // which is performing a memory indirection operation |
9206 | // |
9207 | // Arguments: |
9208 | // tree - The current GenTree node, |
9209 | // It must be some kind of an indirection node |
9210 | // or have an implicit indirection |
9211 | // baseAddr - The address that we are indirecting |
9212 | // |
9213 | // Return Value: |
9214 | // - The tree's gtVNPair is updated to include the VNF_nullPtrExc |
9215 | // exception set. We calculate a base address to use as the |
9216 | // argument to the VNF_nullPtrExc function. |
9217 | // |
9218 | // Notes: - The calculation of the base address removes any constant |
9219 | // offsets, so that obj.x and obj.y will both have obj as |
9220 | // their base address. |
9221 | // For arrays the base address currently includes the |
9222 | // index calculations. |
9223 | // |
9224 | void Compiler::fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr) |
9225 | { |
9226 | // We should have tree that a unary indirection or a tree node with an implicit indirection |
9227 | assert(tree->OperIsUnary() || tree->OperIsImplicitIndir()); |
9228 | |
9229 | // We evaluate the baseAddr ValueNumber further in order |
9230 | // to obtain a better value to use for the null check exeception. |
9231 | // |
9232 | ValueNumPair baseVNP = baseAddr->gtVNPair; |
9233 | ValueNum baseLVN = baseVNP.GetLiberal(); |
9234 | ValueNum baseCVN = baseVNP.GetConservative(); |
9235 | ssize_t offsetL = 0; |
9236 | ssize_t offsetC = 0; |
9237 | VNFuncApp funcAttr; |
9238 | |
9239 | while (vnStore->GetVNFunc(baseLVN, &funcAttr) && (funcAttr.m_func == (VNFunc)GT_ADD) && |
9240 | (vnStore->TypeOfVN(baseLVN) == TYP_BYREF)) |
9241 | { |
9242 | if (fgIsBigOffset(offsetL)) |
9243 | { |
9244 | // Failure: Exit this loop if we have a "big" offset |
9245 | |
9246 | // reset baseLVN back to the full address expression |
9247 | baseLVN = baseVNP.GetLiberal(); |
9248 | break; |
9249 | } |
9250 | |
9251 | // The arguments in value numbering functions are sorted in increasing order |
9252 | // Thus either arg could be the constant. |
9253 | if (vnStore->IsVNConstant(funcAttr.m_args[0]) && varTypeIsIntegral(vnStore->TypeOfVN(funcAttr.m_args[0]))) |
9254 | { |
9255 | offsetL += vnStore->CoercedConstantValue<ssize_t>(funcAttr.m_args[0]); |
9256 | baseLVN = funcAttr.m_args[1]; |
9257 | } |
9258 | else if (vnStore->IsVNConstant(funcAttr.m_args[1]) && varTypeIsIntegral(vnStore->TypeOfVN(funcAttr.m_args[1]))) |
9259 | { |
9260 | offsetL += vnStore->CoercedConstantValue<ssize_t>(funcAttr.m_args[1]); |
9261 | baseLVN = funcAttr.m_args[0]; |
9262 | } |
9263 | else // neither argument is a constant |
9264 | { |
9265 | break; |
9266 | } |
9267 | } |
9268 | |
9269 | while (vnStore->GetVNFunc(baseCVN, &funcAttr) && (funcAttr.m_func == (VNFunc)GT_ADD) && |
9270 | (vnStore->TypeOfVN(baseCVN) == TYP_BYREF)) |
9271 | { |
9272 | if (fgIsBigOffset(offsetC)) |
9273 | { |
9274 | // Failure: Exit this loop if we have a "big" offset |
9275 | |
9276 | // reset baseCVN back to the full address expression |
9277 | baseCVN = baseVNP.GetConservative(); |
9278 | break; |
9279 | } |
9280 | |
9281 | // The arguments in value numbering functions are sorted in increasing order |
9282 | // Thus either arg could be the constant. |
9283 | if (vnStore->IsVNConstant(funcAttr.m_args[0]) && varTypeIsIntegral(vnStore->TypeOfVN(funcAttr.m_args[0]))) |
9284 | { |
9285 | offsetL += vnStore->CoercedConstantValue<ssize_t>(funcAttr.m_args[0]); |
9286 | baseCVN = funcAttr.m_args[1]; |
9287 | } |
9288 | else if (vnStore->IsVNConstant(funcAttr.m_args[1]) && varTypeIsIntegral(vnStore->TypeOfVN(funcAttr.m_args[1]))) |
9289 | { |
9290 | offsetC += vnStore->CoercedConstantValue<ssize_t>(funcAttr.m_args[1]); |
9291 | baseCVN = funcAttr.m_args[0]; |
9292 | } |
9293 | else // neither argument is a constant |
9294 | { |
9295 | break; |
9296 | } |
9297 | } |
9298 | |
9299 | // Create baseVNP, from the values we just computed, |
9300 | baseVNP = ValueNumPair(baseLVN, baseCVN); |
9301 | |
9302 | // Unpack, Norm,Exc for the tree's op1 VN |
9303 | ValueNumPair vnpBaseNorm; |
9304 | ValueNumPair vnpBaseExc; |
9305 | vnStore->VNPUnpackExc(baseVNP, &vnpBaseNorm, &vnpBaseExc); |
9306 | |
9307 | // The Norm VN for op1 is used to create the NullPtrExc |
9308 | ValueNumPair excChkSet = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_NullPtrExc, vnpBaseNorm)); |
9309 | |
9310 | // Combine the excChkSet with exception set of op1 |
9311 | ValueNumPair excSetBoth = vnStore->VNPExcSetUnion(excChkSet, vnpBaseExc); |
9312 | |
9313 | // Retrieve the Normal VN for tree, note that it may be NoVN, so we handle that case |
9314 | ValueNumPair vnpNorm = vnStore->VNPNormalPair(tree->gtVNPair); |
9315 | |
9316 | // For as GT_IND on the lhs of an assignment we will get a NoVN value |
9317 | if (vnpNorm.GetLiberal() == ValueNumStore::NoVN) |
9318 | { |
9319 | // Use the special Void VN value instead. |
9320 | vnpNorm = vnStore->VNPForVoid(); |
9321 | } |
9322 | tree->gtVNPair = vnStore->VNPWithExc(vnpNorm, excSetBoth); |
9323 | } |
9324 | |
9325 | //-------------------------------------------------------------------------------- |
9326 | // fgValueNumberAddExceptionSetForDivison |
9327 | // - Adds the exception sets for the current tree node |
9328 | // which is performing an integer division operation |
9329 | // |
9330 | // Arguments: |
9331 | // tree - The current GenTree node, |
9332 | // It must be a node that performs an integer division |
9333 | // |
9334 | // Return Value: |
9335 | // - The tree's gtVNPair is updated to include |
9336 | // VNF_DivideByZeroExc and VNF_ArithmeticExc, |
9337 | // We will omit one or both of them when the operation |
9338 | // has constants arguments that preclude the exception. |
9339 | // |
9340 | void Compiler::fgValueNumberAddExceptionSetForDivision(GenTree* tree) |
9341 | { |
9342 | genTreeOps oper = tree->OperGet(); |
9343 | |
9344 | // A Divide By Zero exception may be possible. |
9345 | // The divisor is held in tree->gtOp.gtOp2 |
9346 | // |
9347 | bool isUnsignedOper = (oper == GT_UDIV) || (oper == GT_UMOD); |
9348 | bool needDivideByZeroExcLib = true; |
9349 | bool needDivideByZeroExcCon = true; |
9350 | bool needArithmeticExcLib = !isUnsignedOper; // Overflow isn't possible for unsigned divide |
9351 | bool needArithmeticExcCon = !isUnsignedOper; |
9352 | |
9353 | // Determine if we have a 32-bit or 64-bit divide operation |
9354 | var_types typ = genActualType(tree->TypeGet()); |
9355 | assert((typ == TYP_INT) || (typ == TYP_LONG)); |
9356 | |
9357 | // Retrieve the Norm VN for op2 to use it for the DivideByZeroExc |
9358 | ValueNumPair vnpOp2Norm = vnStore->VNPNormalPair(tree->gtOp.gtOp2->gtVNPair); |
9359 | ValueNum vnOp2NormLib = vnpOp2Norm.GetLiberal(); |
9360 | ValueNum vnOp2NormCon = vnpOp2Norm.GetConservative(); |
9361 | |
9362 | if (typ == TYP_INT) |
9363 | { |
9364 | if (vnStore->IsVNConstant(vnOp2NormLib)) |
9365 | { |
9366 | INT32 kVal = vnStore->ConstantValue<INT32>(vnOp2NormLib); |
9367 | if (kVal != 0) |
9368 | { |
9369 | needDivideByZeroExcLib = false; |
9370 | } |
9371 | if (!isUnsignedOper && (kVal != -1)) |
9372 | { |
9373 | needArithmeticExcLib = false; |
9374 | } |
9375 | } |
9376 | if (vnStore->IsVNConstant(vnOp2NormCon)) |
9377 | { |
9378 | INT32 kVal = vnStore->ConstantValue<INT32>(vnOp2NormCon); |
9379 | if (kVal != 0) |
9380 | { |
9381 | needDivideByZeroExcCon = false; |
9382 | } |
9383 | if (!isUnsignedOper && (kVal != -1)) |
9384 | { |
9385 | needArithmeticExcCon = false; |
9386 | } |
9387 | } |
9388 | } |
9389 | else // (typ == TYP_LONG) |
9390 | { |
9391 | if (vnStore->IsVNConstant(vnOp2NormLib)) |
9392 | { |
9393 | INT64 kVal = vnStore->ConstantValue<INT64>(vnOp2NormLib); |
9394 | if (kVal != 0) |
9395 | { |
9396 | needDivideByZeroExcLib = false; |
9397 | } |
9398 | if (!isUnsignedOper && (kVal != -1)) |
9399 | { |
9400 | needArithmeticExcLib = false; |
9401 | } |
9402 | } |
9403 | if (vnStore->IsVNConstant(vnOp2NormCon)) |
9404 | { |
9405 | INT64 kVal = vnStore->ConstantValue<INT64>(vnOp2NormCon); |
9406 | if (kVal != 0) |
9407 | { |
9408 | needDivideByZeroExcCon = false; |
9409 | } |
9410 | if (!isUnsignedOper && (kVal != -1)) |
9411 | { |
9412 | needArithmeticExcCon = false; |
9413 | } |
9414 | } |
9415 | } |
9416 | |
9417 | // Retrieve the Norm VN for op1 to use it for the ArithmeticExc |
9418 | ValueNumPair vnpOp1Norm = vnStore->VNPNormalPair(tree->gtOp.gtOp1->gtVNPair); |
9419 | ValueNum vnOp1NormLib = vnpOp1Norm.GetLiberal(); |
9420 | ValueNum vnOp1NormCon = vnpOp1Norm.GetConservative(); |
9421 | |
9422 | if (needArithmeticExcLib || needArithmeticExcCon) |
9423 | { |
9424 | if (typ == TYP_INT) |
9425 | { |
9426 | if (vnStore->IsVNConstant(vnOp1NormLib)) |
9427 | { |
9428 | INT32 kVal = vnStore->ConstantValue<INT32>(vnOp1NormLib); |
9429 | |
9430 | if (!isUnsignedOper && (kVal != INT32_MIN)) |
9431 | { |
9432 | needArithmeticExcLib = false; |
9433 | } |
9434 | } |
9435 | if (vnStore->IsVNConstant(vnOp1NormCon)) |
9436 | { |
9437 | INT32 kVal = vnStore->ConstantValue<INT32>(vnOp1NormCon); |
9438 | |
9439 | if (!isUnsignedOper && (kVal != INT32_MIN)) |
9440 | { |
9441 | needArithmeticExcCon = false; |
9442 | } |
9443 | } |
9444 | } |
9445 | else // (typ == TYP_LONG) |
9446 | { |
9447 | if (vnStore->IsVNConstant(vnOp1NormLib)) |
9448 | { |
9449 | INT64 kVal = vnStore->ConstantValue<INT64>(vnOp1NormLib); |
9450 | |
9451 | if (!isUnsignedOper && (kVal != INT64_MIN)) |
9452 | { |
9453 | needArithmeticExcLib = false; |
9454 | } |
9455 | } |
9456 | if (vnStore->IsVNConstant(vnOp1NormCon)) |
9457 | { |
9458 | INT64 kVal = vnStore->ConstantValue<INT64>(vnOp1NormCon); |
9459 | |
9460 | if (!isUnsignedOper && (kVal != INT64_MIN)) |
9461 | { |
9462 | needArithmeticExcCon = false; |
9463 | } |
9464 | } |
9465 | } |
9466 | } |
9467 | |
9468 | // Unpack, Norm,Exc for the tree's VN |
9469 | ValueNumPair vnpTreeNorm; |
9470 | ValueNumPair vnpTreeExc; |
9471 | ValueNumPair vnpDivZeroExc = ValueNumStore::VNPForEmptyExcSet(); |
9472 | ValueNumPair vnpArithmExc = ValueNumStore::VNPForEmptyExcSet(); |
9473 | |
9474 | vnStore->VNPUnpackExc(tree->gtVNPair, &vnpTreeNorm, &vnpTreeExc); |
9475 | |
9476 | if (needDivideByZeroExcLib) |
9477 | { |
9478 | vnpDivZeroExc.SetLiberal( |
9479 | vnStore->VNExcSetSingleton(vnStore->VNForFunc(TYP_REF, VNF_DivideByZeroExc, vnOp2NormLib))); |
9480 | } |
9481 | if (needDivideByZeroExcCon) |
9482 | { |
9483 | vnpDivZeroExc.SetConservative( |
9484 | vnStore->VNExcSetSingleton(vnStore->VNForFunc(TYP_REF, VNF_DivideByZeroExc, vnOp2NormCon))); |
9485 | } |
9486 | if (needArithmeticExcLib) |
9487 | { |
9488 | vnpArithmExc.SetLiberal( |
9489 | vnStore->VNExcSetSingleton(vnStore->VNForFunc(TYP_REF, VNF_ArithmeticExc, vnOp1NormLib, vnOp2NormLib))); |
9490 | } |
9491 | if (needArithmeticExcCon) |
9492 | { |
9493 | vnpArithmExc.SetConservative( |
9494 | vnStore->VNExcSetSingleton(vnStore->VNForFunc(TYP_REF, VNF_ArithmeticExc, vnOp1NormLib, vnOp2NormCon))); |
9495 | } |
9496 | |
9497 | // Combine vnpDivZeroExc with the exception set of tree |
9498 | ValueNumPair newExcSet = vnStore->VNPExcSetUnion(vnpTreeExc, vnpDivZeroExc); |
9499 | // Combine vnpArithmExc with the newExcSet |
9500 | newExcSet = vnStore->VNPExcSetUnion(newExcSet, vnpArithmExc); |
9501 | |
9502 | // Updated VN for tree, it now includes DivideByZeroExc and/or ArithmeticExc |
9503 | tree->gtVNPair = vnStore->VNPWithExc(vnpTreeNorm, newExcSet); |
9504 | } |
9505 | |
9506 | //-------------------------------------------------------------------------------- |
9507 | // fgValueNumberAddExceptionSetForOverflow |
9508 | // - Adds the exception set for the current tree node |
9509 | // which is performing an overflow checking math operation |
9510 | // |
9511 | // Arguments: |
9512 | // tree - The current GenTree node, |
9513 | // It must be a node that performs an overflow |
9514 | // checking math operation |
9515 | // |
9516 | // Return Value: |
9517 | // - The tree's gtVNPair is updated to include the VNF_OverflowExc |
9518 | // exception set. |
9519 | // |
9520 | void Compiler::fgValueNumberAddExceptionSetForOverflow(GenTree* tree) |
9521 | { |
9522 | assert(tree->gtOverflowEx()); |
9523 | |
9524 | // We should only be dealing with an Overflow checking ALU operation. |
9525 | VNFunc vnf = GetVNFuncForNode(tree); |
9526 | assert((vnf >= VNF_ADD_OVF) && (vnf <= VNF_MUL_UN_OVF)); |
9527 | |
9528 | // Unpack, Norm,Exc for the tree's VN |
9529 | // |
9530 | ValueNumPair vnpTreeNorm; |
9531 | ValueNumPair vnpTreeExc; |
9532 | |
9533 | vnStore->VNPUnpackExc(tree->gtVNPair, &vnpTreeNorm, &vnpTreeExc); |
9534 | |
9535 | #ifdef DEBUG |
9536 | // The normal value number function should be the same overflow checking ALU operation as 'vnf' |
9537 | VNFuncApp treeNormFuncApp; |
9538 | assert(vnStore->GetVNFunc(vnpTreeNorm.GetLiberal(), &treeNormFuncApp) && (treeNormFuncApp.m_func == vnf)); |
9539 | #endif // DEBUG |
9540 | |
9541 | // Overflow-checking operations add an overflow exception |
9542 | // The normal result is used as the input argument for the OverflowExc |
9543 | ValueNumPair overflowExcSet = |
9544 | vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc, vnpTreeNorm)); |
9545 | |
9546 | // Combine the new Overflow exception with the original exception set of tree |
9547 | ValueNumPair newExcSet = vnStore->VNPExcSetUnion(vnpTreeExc, overflowExcSet); |
9548 | |
9549 | // Updated VN for tree, it now includes Overflow exception |
9550 | tree->gtVNPair = vnStore->VNPWithExc(vnpTreeNorm, newExcSet); |
9551 | } |
9552 | |
9553 | //-------------------------------------------------------------------------------- |
9554 | // fgValueNumberAddExceptionSetForCkFinite |
9555 | // - Adds the exception set for the current tree node |
9556 | // which is a CkFinite operation |
9557 | // |
9558 | // Arguments: |
9559 | // tree - The current GenTree node, |
9560 | // It must be a CkFinite node |
9561 | // |
9562 | // Return Value: |
9563 | // - The tree's gtVNPair is updated to include the VNF_ArithmeticExc |
9564 | // exception set. |
9565 | // |
9566 | void Compiler::fgValueNumberAddExceptionSetForCkFinite(GenTree* tree) |
9567 | { |
9568 | // We should only be dealing with an check finite operation. |
9569 | assert(tree->OperGet() == GT_CKFINITE); |
9570 | |
9571 | // Unpack, Norm,Exc for the tree's VN |
9572 | // |
9573 | ValueNumPair vnpTreeNorm; |
9574 | ValueNumPair vnpTreeExc; |
9575 | ValueNumPair newExcSet; |
9576 | |
9577 | vnStore->VNPUnpackExc(tree->gtVNPair, &vnpTreeNorm, &vnpTreeExc); |
9578 | |
9579 | // ckfinite adds an Arithmetic exception |
9580 | // The normal result is used as the input argument for the ArithmeticExc |
9581 | ValueNumPair arithmeticExcSet = |
9582 | vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_ArithmeticExc, vnpTreeNorm)); |
9583 | |
9584 | // Combine the new Arithmetic exception with the original exception set of tree |
9585 | newExcSet = vnStore->VNPExcSetUnion(vnpTreeExc, arithmeticExcSet); |
9586 | |
9587 | // Updated VN for tree, it now includes Arithmetic exception |
9588 | tree->gtVNPair = vnStore->VNPWithExc(vnpTreeNorm, newExcSet); |
9589 | } |
9590 | |
9591 | //-------------------------------------------------------------------------------- |
9592 | // fgValueNumberAddExceptionSet |
9593 | // - Adds any exception sets needed for the current tree node |
9594 | // |
9595 | // Arguments: |
9596 | // tree - The current GenTree node, |
9597 | // |
9598 | // Return Value: |
9599 | // - The tree's gtVNPair is updated to include the exception sets. |
9600 | // |
9601 | // Notes: - This method relies upon OperMayTHrow to determine if we need |
9602 | // to add an exception set. If OPerMayThrow returns false no |
9603 | // exception set will be added. |
9604 | // |
9605 | void Compiler::fgValueNumberAddExceptionSet(GenTree* tree) |
9606 | { |
9607 | if (tree->OperMayThrow(this)) |
9608 | { |
9609 | switch (tree->OperGet()) |
9610 | { |
9611 | case GT_CAST: // A cast with an overflow check |
9612 | break; // Already handled by VNPairForCast() |
9613 | |
9614 | case GT_ADD: // An Overflow checking ALU operation |
9615 | case GT_SUB: |
9616 | case GT_MUL: |
9617 | fgValueNumberAddExceptionSetForOverflow(tree); |
9618 | break; |
9619 | |
9620 | case GT_LCLHEAP: |
9621 | // It is not necessary to model the StackOverflow exception for GT_LCLHEAP |
9622 | break; |
9623 | |
9624 | case GT_INTRINSIC: |
9625 | // ToDo: model the exceptions for Intrinsics |
9626 | break; |
9627 | |
9628 | case GT_IND: // Implicit null check. |
9629 | if ((tree->gtFlags & GTF_IND_ASG_LHS) != 0) |
9630 | { |
9631 | // Don't add exception set on LHS of assignment |
9632 | break; |
9633 | } |
9634 | __fallthrough; |
9635 | |
9636 | case GT_BLK: |
9637 | case GT_OBJ: |
9638 | case GT_DYN_BLK: |
9639 | case GT_NULLCHECK: |
9640 | fgValueNumberAddExceptionSetForIndirection(tree, tree->AsIndir()->Addr()); |
9641 | break; |
9642 | |
9643 | case GT_ARR_LENGTH: |
9644 | fgValueNumberAddExceptionSetForIndirection(tree, tree->AsArrLen()->ArrRef()); |
9645 | break; |
9646 | |
9647 | case GT_ARR_ELEM: |
9648 | fgValueNumberAddExceptionSetForIndirection(tree, tree->gtArrElem.gtArrObj); |
9649 | break; |
9650 | |
9651 | case GT_ARR_INDEX: |
9652 | fgValueNumberAddExceptionSetForIndirection(tree, tree->gtArrIndex.ArrObj()); |
9653 | break; |
9654 | |
9655 | case GT_ARR_OFFSET: |
9656 | fgValueNumberAddExceptionSetForIndirection(tree, tree->gtArrOffs.gtArrObj); |
9657 | break; |
9658 | |
9659 | case GT_DIV: |
9660 | case GT_UDIV: |
9661 | case GT_MOD: |
9662 | case GT_UMOD: |
9663 | fgValueNumberAddExceptionSetForDivision(tree); |
9664 | break; |
9665 | |
9666 | case GT_CKFINITE: |
9667 | fgValueNumberAddExceptionSetForCkFinite(tree); |
9668 | break; |
9669 | |
9670 | default: |
9671 | assert(!"Handle this oper in fgValueNumberAddExceptionSet" ); |
9672 | break; |
9673 | } |
9674 | } |
9675 | } |
9676 | |
9677 | #ifdef DEBUG |
9678 | // This method asserts that SSA name constraints specified are satisfied. |
9679 | // Until we figure out otherwise, all VN's are assumed to be liberal. |
9680 | // TODO-Cleanup: new JitTestLabels for lib vs cons vs both VN classes? |
9681 | void Compiler::JitTestCheckVN() |
9682 | { |
9683 | typedef JitHashTable<ssize_t, JitSmallPrimitiveKeyFuncs<ssize_t>, ValueNum> LabelToVNMap; |
9684 | typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ssize_t> VNToLabelMap; |
9685 | |
9686 | // If we have no test data, early out. |
9687 | if (m_nodeTestData == nullptr) |
9688 | { |
9689 | return; |
9690 | } |
9691 | |
9692 | NodeToTestDataMap* testData = GetNodeTestData(); |
9693 | |
9694 | // First we have to know which nodes in the tree are reachable. |
9695 | typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; |
9696 | NodeToIntMap* reachable = FindReachableNodesInNodeTestData(); |
9697 | |
9698 | LabelToVNMap* labelToVN = new (getAllocatorDebugOnly()) LabelToVNMap(getAllocatorDebugOnly()); |
9699 | VNToLabelMap* vnToLabel = new (getAllocatorDebugOnly()) VNToLabelMap(getAllocatorDebugOnly()); |
9700 | |
9701 | if (verbose) |
9702 | { |
9703 | printf("\nJit Testing: Value numbering.\n" ); |
9704 | } |
9705 | for (NodeToTestDataMap::KeyIterator ki = testData->Begin(); !ki.Equal(testData->End()); ++ki) |
9706 | { |
9707 | TestLabelAndNum tlAndN; |
9708 | GenTree* node = ki.Get(); |
9709 | ValueNum nodeVN = node->GetVN(VNK_Liberal); |
9710 | |
9711 | bool b = testData->Lookup(node, &tlAndN); |
9712 | assert(b); |
9713 | if (tlAndN.m_tl == TL_VN || tlAndN.m_tl == TL_VNNorm) |
9714 | { |
9715 | int dummy; |
9716 | if (!reachable->Lookup(node, &dummy)) |
9717 | { |
9718 | printf("Node " ); |
9719 | Compiler::printTreeID(node); |
9720 | printf(" had a test constraint declared, but has become unreachable at the time the constraint is " |
9721 | "tested.\n" |
9722 | "(This is probably as a result of some optimization -- \n" |
9723 | "you may need to modify the test case to defeat this opt.)\n" ); |
9724 | assert(false); |
9725 | } |
9726 | |
9727 | if (verbose) |
9728 | { |
9729 | printf(" Node " ); |
9730 | Compiler::printTreeID(node); |
9731 | printf(" -- VN class %d.\n" , tlAndN.m_num); |
9732 | } |
9733 | |
9734 | if (tlAndN.m_tl == TL_VNNorm) |
9735 | { |
9736 | nodeVN = vnStore->VNNormalValue(nodeVN); |
9737 | } |
9738 | |
9739 | ValueNum vn; |
9740 | if (labelToVN->Lookup(tlAndN.m_num, &vn)) |
9741 | { |
9742 | if (verbose) |
9743 | { |
9744 | printf(" Already in hash tables.\n" ); |
9745 | } |
9746 | // The mapping(s) must be one-to-one: if the label has a mapping, then the ssaNm must, as well. |
9747 | ssize_t num2; |
9748 | bool b = vnToLabel->Lookup(vn, &num2); |
9749 | // And the mappings must be the same. |
9750 | if (tlAndN.m_num != num2) |
9751 | { |
9752 | printf("Node: " ); |
9753 | Compiler::printTreeID(node); |
9754 | printf(", with value number " FMT_VN ", was declared in VN class %d,\n" , nodeVN, tlAndN.m_num); |
9755 | printf("but this value number " FMT_VN |
9756 | " has already been associated with a different SSA name class: %d.\n" , |
9757 | vn, num2); |
9758 | assert(false); |
9759 | } |
9760 | // And the current node must be of the specified SSA family. |
9761 | if (nodeVN != vn) |
9762 | { |
9763 | printf("Node: " ); |
9764 | Compiler::printTreeID(node); |
9765 | printf(", " FMT_VN " was declared in SSA name class %d,\n" , nodeVN, tlAndN.m_num); |
9766 | printf("but that name class was previously bound to a different value number: " FMT_VN ".\n" , vn); |
9767 | assert(false); |
9768 | } |
9769 | } |
9770 | else |
9771 | { |
9772 | ssize_t num; |
9773 | // The mapping(s) must be one-to-one: if the label has no mapping, then the ssaNm may not, either. |
9774 | if (vnToLabel->Lookup(nodeVN, &num)) |
9775 | { |
9776 | printf("Node: " ); |
9777 | Compiler::printTreeID(node); |
9778 | printf(", " FMT_VN " was declared in value number class %d,\n" , nodeVN, tlAndN.m_num); |
9779 | printf( |
9780 | "but this value number has already been associated with a different value number class: %d.\n" , |
9781 | num); |
9782 | assert(false); |
9783 | } |
9784 | // Add to both mappings. |
9785 | labelToVN->Set(tlAndN.m_num, nodeVN); |
9786 | vnToLabel->Set(nodeVN, tlAndN.m_num); |
9787 | if (verbose) |
9788 | { |
9789 | printf(" added to hash tables.\n" ); |
9790 | } |
9791 | } |
9792 | } |
9793 | } |
9794 | } |
9795 | |
9796 | void Compiler::vnpPrint(ValueNumPair vnp, unsigned level) |
9797 | { |
9798 | if (vnp.BothEqual()) |
9799 | { |
9800 | vnPrint(vnp.GetLiberal(), level); |
9801 | } |
9802 | else |
9803 | { |
9804 | printf("<l:" ); |
9805 | vnPrint(vnp.GetLiberal(), level); |
9806 | printf(", c:" ); |
9807 | vnPrint(vnp.GetConservative(), level); |
9808 | printf(">" ); |
9809 | } |
9810 | } |
9811 | |
9812 | void Compiler::vnPrint(ValueNum vn, unsigned level) |
9813 | { |
9814 | |
9815 | if (ValueNumStore::isReservedVN(vn)) |
9816 | { |
9817 | printf(ValueNumStore::reservedName(vn)); |
9818 | } |
9819 | else |
9820 | { |
9821 | printf(FMT_VN, vn); |
9822 | if (level > 0) |
9823 | { |
9824 | vnStore->vnDump(this, vn); |
9825 | } |
9826 | } |
9827 | } |
9828 | |
9829 | #endif // DEBUG |
9830 | |
9831 | // Methods of ValueNumPair. |
9832 | ValueNumPair::ValueNumPair() : m_liberal(ValueNumStore::NoVN), m_conservative(ValueNumStore::NoVN) |
9833 | { |
9834 | } |
9835 | |
9836 | bool ValueNumPair::BothDefined() const |
9837 | { |
9838 | return (m_liberal != ValueNumStore::NoVN) && (m_conservative != ValueNumStore::NoVN); |
9839 | } |
9840 | |