| 1 | // Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file |
| 2 | // for details. All rights reserved. Use of this source code is governed by a |
| 3 | // BSD-style license that can be found in the LICENSE file. |
| 4 | |
| 5 | #include "vm/compiler/backend/evaluator.h" |
| 6 | |
| 7 | namespace dart { |
| 8 | |
| 9 | static IntegerPtr BinaryIntegerEvaluateRaw(const Integer& left, |
| 10 | const Integer& right, |
| 11 | Token::Kind token_kind) { |
| 12 | switch (token_kind) { |
| 13 | case Token::kTRUNCDIV: |
| 14 | FALL_THROUGH; |
| 15 | case Token::kMOD: |
| 16 | // Check right value for zero. |
| 17 | if (right.AsInt64Value() == 0) { |
| 18 | break; // Will throw. |
| 19 | } |
| 20 | FALL_THROUGH; |
| 21 | case Token::kADD: |
| 22 | FALL_THROUGH; |
| 23 | case Token::kSUB: |
| 24 | FALL_THROUGH; |
| 25 | case Token::kMUL: |
| 26 | return left.ArithmeticOp(token_kind, right, Heap::kOld); |
| 27 | case Token::kSHL: |
| 28 | FALL_THROUGH; |
| 29 | case Token::kSHR: |
| 30 | if (right.AsInt64Value() >= 0) { |
| 31 | return left.ShiftOp(token_kind, right, Heap::kOld); |
| 32 | } |
| 33 | break; |
| 34 | case Token::kBIT_AND: |
| 35 | FALL_THROUGH; |
| 36 | case Token::kBIT_OR: |
| 37 | FALL_THROUGH; |
| 38 | case Token::kBIT_XOR: |
| 39 | return left.BitOp(token_kind, right, Heap::kOld); |
| 40 | case Token::kDIV: |
| 41 | break; |
| 42 | default: |
| 43 | UNREACHABLE(); |
| 44 | } |
| 45 | |
| 46 | return Integer::null(); |
| 47 | } |
| 48 | |
| 49 | static IntegerPtr UnaryIntegerEvaluateRaw(const Integer& value, |
| 50 | Token::Kind token_kind, |
| 51 | Zone* zone) { |
| 52 | switch (token_kind) { |
| 53 | case Token::kNEGATE: |
| 54 | return value.ArithmeticOp(Token::kMUL, Smi::Handle(zone, Smi::New(-1)), |
| 55 | Heap::kOld); |
| 56 | case Token::kBIT_NOT: |
| 57 | if (value.IsSmi()) { |
| 58 | return Integer::New(~Smi::Cast(value).Value(), Heap::kOld); |
| 59 | } else if (value.IsMint()) { |
| 60 | return Integer::New(~Mint::Cast(value).value(), Heap::kOld); |
| 61 | } |
| 62 | break; |
| 63 | default: |
| 64 | UNREACHABLE(); |
| 65 | } |
| 66 | return Integer::null(); |
| 67 | } |
| 68 | |
| 69 | int64_t Evaluator::TruncateTo(int64_t v, Representation r) { |
| 70 | switch (r) { |
| 71 | case kTagged: { |
| 72 | // Smi occupies word minus kSmiTagShift bits. |
| 73 | const intptr_t kTruncateBits = |
| 74 | (kBitsPerInt64 - kBitsPerWord) + kSmiTagShift; |
| 75 | return Utils::ShiftLeftWithTruncation(v, kTruncateBits) >> kTruncateBits; |
| 76 | } |
| 77 | case kUnboxedInt32: |
| 78 | return Utils::ShiftLeftWithTruncation(v, kBitsPerInt32) >> kBitsPerInt32; |
| 79 | case kUnboxedUint32: |
| 80 | return v & kMaxUint32; |
| 81 | case kUnboxedInt64: |
| 82 | return v; |
| 83 | default: |
| 84 | UNREACHABLE(); |
| 85 | } |
| 86 | } |
| 87 | |
| 88 | IntegerPtr Evaluator::BinaryIntegerEvaluate(const Object& left, |
| 89 | const Object& right, |
| 90 | Token::Kind token_kind, |
| 91 | bool is_truncating, |
| 92 | Representation representation, |
| 93 | Thread* thread) { |
| 94 | if (!left.IsInteger() || !right.IsInteger()) { |
| 95 | return Integer::null(); |
| 96 | } |
| 97 | Zone* zone = thread->zone(); |
| 98 | const Integer& left_int = Integer::Cast(left); |
| 99 | const Integer& right_int = Integer::Cast(right); |
| 100 | Integer& result = Integer::Handle( |
| 101 | zone, BinaryIntegerEvaluateRaw(left_int, right_int, token_kind)); |
| 102 | |
| 103 | if (!result.IsNull()) { |
| 104 | if (is_truncating) { |
| 105 | const int64_t truncated = |
| 106 | TruncateTo(result.AsTruncatedInt64Value(), representation); |
| 107 | result = Integer::New(truncated, Heap::kOld); |
| 108 | ASSERT(FlowGraph::IsConstantRepresentable( |
| 109 | result, representation, /*tagged_value_must_be_smi=*/true)); |
| 110 | } else if (!FlowGraph::IsConstantRepresentable( |
| 111 | result, representation, /*tagged_value_must_be_smi=*/true)) { |
| 112 | // If this operation is not truncating it would deoptimize on overflow. |
| 113 | // Check that we match this behavior and don't produce a value that is |
| 114 | // larger than something this operation can produce. We could have |
| 115 | // specialized instructions that use this value under this assumption. |
| 116 | return Integer::null(); |
| 117 | } |
| 118 | const char* error_str = NULL; |
| 119 | result ^= result.CheckAndCanonicalize(thread, &error_str); |
| 120 | if (error_str != NULL) { |
| 121 | FATAL1("Failed to canonicalize: %s" , error_str); |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | return result.raw(); |
| 126 | } |
| 127 | |
| 128 | IntegerPtr Evaluator::UnaryIntegerEvaluate(const Object& value, |
| 129 | Token::Kind token_kind, |
| 130 | Representation representation, |
| 131 | Thread* thread) { |
| 132 | if (!value.IsInteger()) { |
| 133 | return Integer::null(); |
| 134 | } |
| 135 | Zone* zone = thread->zone(); |
| 136 | const Integer& value_int = Integer::Cast(value); |
| 137 | Integer& result = Integer::Handle( |
| 138 | zone, UnaryIntegerEvaluateRaw(value_int, token_kind, zone)); |
| 139 | |
| 140 | if (!result.IsNull()) { |
| 141 | if (!FlowGraph::IsConstantRepresentable( |
| 142 | result, representation, |
| 143 | /*tagged_value_must_be_smi=*/true)) { |
| 144 | // If this operation is not truncating it would deoptimize on overflow. |
| 145 | // Check that we match this behavior and don't produce a value that is |
| 146 | // larger than something this operation can produce. We could have |
| 147 | // specialized instructions that use this value under this assumption. |
| 148 | return Integer::null(); |
| 149 | } |
| 150 | |
| 151 | const char* error_str = NULL; |
| 152 | result ^= result.CheckAndCanonicalize(thread, &error_str); |
| 153 | if (error_str != NULL) { |
| 154 | FATAL1("Failed to canonicalize: %s" , error_str); |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | return result.raw(); |
| 159 | } |
| 160 | |
| 161 | double Evaluator::EvaluateDoubleOp(const double left, |
| 162 | const double right, |
| 163 | Token::Kind token_kind) { |
| 164 | switch (token_kind) { |
| 165 | case Token::kADD: |
| 166 | return left + right; |
| 167 | case Token::kSUB: |
| 168 | return left - right; |
| 169 | case Token::kMUL: |
| 170 | return left * right; |
| 171 | case Token::kDIV: |
| 172 | return left / right; |
| 173 | default: |
| 174 | UNREACHABLE(); |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | bool Evaluator::ToIntegerConstant(Value* value, int64_t* result) { |
| 179 | if (!value->BindsToConstant()) { |
| 180 | UnboxInstr* unbox = value->definition()->AsUnbox(); |
| 181 | if (unbox != nullptr) { |
| 182 | switch (unbox->representation()) { |
| 183 | case kUnboxedDouble: |
| 184 | case kUnboxedInt64: |
| 185 | return ToIntegerConstant(unbox->value(), result); |
| 186 | case kUnboxedUint32: |
| 187 | if (ToIntegerConstant(unbox->value(), result)) { |
| 188 | *result = Evaluator::TruncateTo(*result, kUnboxedUint32); |
| 189 | return true; |
| 190 | } |
| 191 | break; |
| 192 | // No need to handle Unbox<Int32>(Constant(C)) because it gets |
| 193 | // canonicalized to UnboxedConstant<Int32>(C). |
| 194 | case kUnboxedInt32: |
| 195 | default: |
| 196 | break; |
| 197 | } |
| 198 | } |
| 199 | return false; |
| 200 | } |
| 201 | const Object& constant = value->BoundConstant(); |
| 202 | if (constant.IsDouble()) { |
| 203 | const Double& double_constant = Double::Cast(constant); |
| 204 | *result = Utils::SafeDoubleToInt<int64_t>(double_constant.value()); |
| 205 | return (static_cast<double>(*result) == double_constant.value()); |
| 206 | } else if (constant.IsSmi()) { |
| 207 | *result = Smi::Cast(constant).Value(); |
| 208 | return true; |
| 209 | } else if (constant.IsMint()) { |
| 210 | *result = Mint::Cast(constant).value(); |
| 211 | return true; |
| 212 | } |
| 213 | return false; |
| 214 | } |
| 215 | |
| 216 | } // namespace dart |
| 217 | |