| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/simd-scalar-lowering.h" | 5 #include "src/compiler/simd-scalar-lowering.h" |
| 6 #include "src/compiler/diamond.h" | 6 #include "src/compiler/diamond.h" |
| 7 #include "src/compiler/linkage.h" | 7 #include "src/compiler/linkage.h" |
| 8 #include "src/compiler/node-matchers.h" | 8 #include "src/compiler/node-matchers.h" |
| 9 #include "src/compiler/node-properties.h" | 9 #include "src/compiler/node-properties.h" |
| 10 | 10 |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 68 state_.Set(input, State::kOnStack); | 68 state_.Set(input, State::kOnStack); |
| 69 } | 69 } |
| 70 } | 70 } |
| 71 } | 71 } |
| 72 } | 72 } |
| 73 | 73 |
| 74 #define FOREACH_INT32X4_OPCODE(V) \ | 74 #define FOREACH_INT32X4_OPCODE(V) \ |
| 75 V(Int32x4Splat) \ | 75 V(Int32x4Splat) \ |
| 76 V(Int32x4ExtractLane) \ | 76 V(Int32x4ExtractLane) \ |
| 77 V(Int32x4ReplaceLane) \ | 77 V(Int32x4ReplaceLane) \ |
| 78 V(Int32x4Neg) \ |
| 79 V(Simd128Not) \ |
| 78 V(Int32x4Add) \ | 80 V(Int32x4Add) \ |
| 79 V(Int32x4Sub) \ | 81 V(Int32x4Sub) \ |
| 80 V(Int32x4Mul) \ | 82 V(Int32x4Mul) \ |
| 81 V(Simd128And) \ | 83 V(Simd128And) \ |
| 82 V(Simd128Or) \ | 84 V(Simd128Or) \ |
| 83 V(Simd128Xor) | 85 V(Simd128Xor) |
| 84 | 86 |
| 85 #define FOREACH_FLOAT32X4_OPCODE(V) \ | 87 #define FOREACH_FLOAT32X4_OPCODE(V) \ |
| 86 V(Float32x4Splat) \ | 88 V(Float32x4Splat) \ |
| 87 V(Float32x4ExtractLane) \ | 89 V(Float32x4ExtractLane) \ |
| 88 V(Float32x4ReplaceLane) \ | 90 V(Float32x4ReplaceLane) \ |
| 91 V(Float32x4Abs) \ |
| 92 V(Float32x4Neg) \ |
| 89 V(Float32x4Add) \ | 93 V(Float32x4Add) \ |
| 90 V(Float32x4Sub) \ | 94 V(Float32x4Sub) \ |
| 91 V(Float32x4Mul) \ | 95 V(Float32x4Mul) \ |
| 92 V(Float32x4Div) \ | 96 V(Float32x4Div) \ |
| 93 V(Float32x4Min) \ | 97 V(Float32x4Min) \ |
| 94 V(Float32x4Max) | 98 V(Float32x4Max) \ |
| 99 V(Float32x4FromInt32x4) \ |
| 100 V(Float32x4FromUint32x4) |
| 95 | 101 |
| 96 void SimdScalarLowering::SetLoweredType(Node* node, Node* output) { | 102 void SimdScalarLowering::SetLoweredType(Node* node, Node* output) { |
| 97 switch (node->opcode()) { | 103 switch (node->opcode()) { |
| 98 #define CASE_STMT(name) case IrOpcode::k##name: | 104 #define CASE_STMT(name) case IrOpcode::k##name: |
| 99 FOREACH_INT32X4_OPCODE(CASE_STMT) | 105 FOREACH_INT32X4_OPCODE(CASE_STMT) |
| 100 case IrOpcode::kReturn: | 106 case IrOpcode::kReturn: |
| 101 case IrOpcode::kParameter: | 107 case IrOpcode::kParameter: |
| 102 case IrOpcode::kCall: { | 108 case IrOpcode::kCall: { |
| 103 replacements_[node->id()].type = SimdType::kInt32; | 109 replacements_[node->id()].type = SimdType::kInt32; |
| 104 break; | 110 break; |
| 105 } | 111 } |
| 106 FOREACH_FLOAT32X4_OPCODE(CASE_STMT) { | 112 FOREACH_FLOAT32X4_OPCODE(CASE_STMT) { |
| 107 replacements_[node->id()].type = SimdType::kFloat32; | 113 replacements_[node->id()].type = SimdType::kFloat32; |
| 108 break; | 114 break; |
| 109 } | 115 } |
| 110 #undef CASE_STMT | 116 #undef CASE_STMT |
| 111 default: | 117 default: { |
| 112 replacements_[node->id()].type = replacements_[output->id()].type; | 118 if (output->opcode() == IrOpcode::kFloat32x4FromInt32x4 || |
| 119 output->opcode() == IrOpcode::kFloat32x4FromUint32x4) { |
| 120 replacements_[node->id()].type = SimdType::kInt32; |
| 121 } else { |
| 122 replacements_[node->id()].type = replacements_[output->id()].type; |
| 123 } |
| 124 } |
| 113 } | 125 } |
| 114 } | 126 } |
| 115 | 127 |
| 116 static int GetParameterIndexAfterLowering( | 128 static int GetParameterIndexAfterLowering( |
| 117 Signature<MachineRepresentation>* signature, int old_index) { | 129 Signature<MachineRepresentation>* signature, int old_index) { |
| 118 // In function calls, the simd128 types are passed as 4 Int32 types. The | 130 // In function calls, the simd128 types are passed as 4 Int32 types. The |
| 119 // parameters are typecast to the types as needed for various operations. | 131 // parameters are typecast to the types as needed for various operations. |
| 120 int result = old_index; | 132 int result = old_index; |
| 121 for (int i = 0; i < old_index; ++i) { | 133 for (int i = 0; i < old_index; ++i) { |
| 122 if (signature->GetParam(i) == MachineRepresentation::kSimd128) { | 134 if (signature->GetParam(i) == MachineRepresentation::kSimd128) { |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 222 graph()->NewNode(store_op, base, indices[i], rep_inputs[i]); | 234 graph()->NewNode(store_op, base, indices[i], rep_inputs[i]); |
| 223 } | 235 } |
| 224 } | 236 } |
| 225 | 237 |
| 226 ReplaceNode(node, rep_nodes); | 238 ReplaceNode(node, rep_nodes); |
| 227 } else { | 239 } else { |
| 228 DefaultLowering(node); | 240 DefaultLowering(node); |
| 229 } | 241 } |
| 230 } | 242 } |
| 231 | 243 |
| 232 void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType rep_type, | 244 void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type, |
| 233 const Operator* op) { | 245 const Operator* op) { |
| 234 DCHECK(node->InputCount() == 2); | 246 DCHECK(node->InputCount() == 2); |
| 235 Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type); | 247 Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type); |
| 236 Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type); | 248 Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type); |
| 237 Node* rep_node[kMaxLanes]; | 249 Node* rep_node[kMaxLanes]; |
| 238 for (int i = 0; i < kMaxLanes; ++i) { | 250 for (int i = 0; i < kMaxLanes; ++i) { |
| 239 rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]); | 251 rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]); |
| 240 } | 252 } |
| 241 ReplaceNode(node, rep_node); | 253 ReplaceNode(node, rep_node); |
| 242 } | 254 } |
| 243 | 255 |
| 256 void SimdScalarLowering::LowerUnaryOp(Node* node, SimdType input_rep_type, |
| 257 const Operator* op) { |
| 258 DCHECK(node->InputCount() == 1); |
| 259 Node** rep = GetReplacementsWithType(node->InputAt(0), input_rep_type); |
| 260 Node* rep_node[kMaxLanes]; |
| 261 for (int i = 0; i < kMaxLanes; ++i) { |
| 262 rep_node[i] = graph()->NewNode(op, rep[i]); |
| 263 } |
| 264 ReplaceNode(node, rep_node); |
| 265 } |
| 266 |
| 244 void SimdScalarLowering::LowerNode(Node* node) { | 267 void SimdScalarLowering::LowerNode(Node* node) { |
| 245 SimdType rep_type = ReplacementType(node); | 268 SimdType rep_type = ReplacementType(node); |
| 246 switch (node->opcode()) { | 269 switch (node->opcode()) { |
| 247 case IrOpcode::kStart: { | 270 case IrOpcode::kStart: { |
| 248 int parameter_count = GetParameterCountAfterLowering(); | 271 int parameter_count = GetParameterCountAfterLowering(); |
| 249 // Only exchange the node if the parameter count actually changed. | 272 // Only exchange the node if the parameter count actually changed. |
| 250 if (parameter_count != static_cast<int>(signature()->parameter_count())) { | 273 if (parameter_count != static_cast<int>(signature()->parameter_count())) { |
| 251 int delta = | 274 int delta = |
| 252 parameter_count - static_cast<int>(signature()->parameter_count()); | 275 parameter_count - static_cast<int>(signature()->parameter_count()); |
| 253 int new_output_count = node->op()->ValueOutputCount() + delta; | 276 int new_output_count = node->op()->ValueOutputCount() + delta; |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 392 LowerBinaryOp(node, rep_type, machine()->instruction()); \ | 415 LowerBinaryOp(node, rep_type, machine()->instruction()); \ |
| 393 break; \ | 416 break; \ |
| 394 } | 417 } |
| 395 I32X4_BINOP_CASE(kInt32x4Add, Int32Add) | 418 I32X4_BINOP_CASE(kInt32x4Add, Int32Add) |
| 396 I32X4_BINOP_CASE(kInt32x4Sub, Int32Sub) | 419 I32X4_BINOP_CASE(kInt32x4Sub, Int32Sub) |
| 397 I32X4_BINOP_CASE(kInt32x4Mul, Int32Mul) | 420 I32X4_BINOP_CASE(kInt32x4Mul, Int32Mul) |
| 398 I32X4_BINOP_CASE(kSimd128And, Word32And) | 421 I32X4_BINOP_CASE(kSimd128And, Word32And) |
| 399 I32X4_BINOP_CASE(kSimd128Or, Word32Or) | 422 I32X4_BINOP_CASE(kSimd128Or, Word32Or) |
| 400 I32X4_BINOP_CASE(kSimd128Xor, Word32Xor) | 423 I32X4_BINOP_CASE(kSimd128Xor, Word32Xor) |
| 401 #undef I32X4_BINOP_CASE | 424 #undef I32X4_BINOP_CASE |
| 425 case IrOpcode::kInt32x4Neg: { |
| 426 DCHECK(node->InputCount() == 1); |
| 427 Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type); |
| 428 Node* rep_node[kMaxLanes]; |
| 429 Node* zero = graph()->NewNode(common()->Int32Constant(0)); |
| 430 for (int i = 0; i < kMaxLanes; ++i) { |
| 431 rep_node[i] = graph()->NewNode(machine()->Int32Sub(), zero, rep[i]); |
| 432 } |
| 433 ReplaceNode(node, rep_node); |
| 434 break; |
| 435 } |
| 436 case IrOpcode::kSimd128Not: { |
| 437 DCHECK(node->InputCount() == 1); |
| 438 Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type); |
| 439 Node* rep_node[kMaxLanes]; |
| 440 Node* mask = graph()->NewNode(common()->Int32Constant(0xffffffff)); |
| 441 for (int i = 0; i < kMaxLanes; ++i) { |
| 442 rep_node[i] = graph()->NewNode(machine()->Word32Xor(), rep[i], mask); |
| 443 } |
| 444 ReplaceNode(node, rep_node); |
| 445 break; |
| 446 } |
| 402 #define F32X4_BINOP_CASE(name) \ | 447 #define F32X4_BINOP_CASE(name) \ |
| 403 case IrOpcode::kFloat32x4##name: { \ | 448 case IrOpcode::kFloat32x4##name: { \ |
| 404 LowerBinaryOp(node, rep_type, machine()->Float32##name()); \ | 449 LowerBinaryOp(node, rep_type, machine()->Float32##name()); \ |
| 405 break; \ | 450 break; \ |
| 406 } | 451 } |
| 407 F32X4_BINOP_CASE(Add) | 452 F32X4_BINOP_CASE(Add) |
| 408 F32X4_BINOP_CASE(Sub) | 453 F32X4_BINOP_CASE(Sub) |
| 409 F32X4_BINOP_CASE(Mul) | 454 F32X4_BINOP_CASE(Mul) |
| 410 F32X4_BINOP_CASE(Div) | 455 F32X4_BINOP_CASE(Div) |
| 411 F32X4_BINOP_CASE(Min) | 456 F32X4_BINOP_CASE(Min) |
| 412 F32X4_BINOP_CASE(Max) | 457 F32X4_BINOP_CASE(Max) |
| 413 #undef F32X4_BINOP_CASE | 458 #undef F32X4_BINOP_CASE |
| 459 #define F32X4_UNOP_CASE(name) \ |
| 460 case IrOpcode::kFloat32x4##name: { \ |
| 461 LowerUnaryOp(node, rep_type, machine()->Float32##name()); \ |
| 462 break; \ |
| 463 } |
| 464 F32X4_UNOP_CASE(Abs) |
| 465 F32X4_UNOP_CASE(Neg) |
| 466 F32X4_UNOP_CASE(Sqrt) |
| 467 #undef F32x4_UNOP_CASE |
| 468 case IrOpcode::kFloat32x4FromInt32x4: { |
| 469 LowerUnaryOp(node, SimdType::kInt32, machine()->RoundInt32ToFloat32()); |
| 470 break; |
| 471 } |
| 472 case IrOpcode::kFloat32x4FromUint32x4: { |
| 473 LowerUnaryOp(node, SimdType::kInt32, machine()->RoundUint32ToFloat32()); |
| 474 break; |
| 475 } |
| 414 case IrOpcode::kInt32x4Splat: | 476 case IrOpcode::kInt32x4Splat: |
| 415 case IrOpcode::kFloat32x4Splat: { | 477 case IrOpcode::kFloat32x4Splat: { |
| 416 Node* rep_node[kMaxLanes]; | 478 Node* rep_node[kMaxLanes]; |
| 417 for (int i = 0; i < kMaxLanes; ++i) { | 479 for (int i = 0; i < kMaxLanes; ++i) { |
| 418 if (HasReplacement(0, node->InputAt(0))) { | 480 if (HasReplacement(0, node->InputAt(0))) { |
| 419 rep_node[i] = GetReplacements(node->InputAt(0))[0]; | 481 rep_node[i] = GetReplacements(node->InputAt(0))[0]; |
| 420 } else { | 482 } else { |
| 421 rep_node[i] = node->InputAt(0); | 483 rep_node[i] = node->InputAt(0); |
| 422 } | 484 } |
| 423 } | 485 } |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 554 } else { | 616 } else { |
| 555 UNREACHABLE(); | 617 UNREACHABLE(); |
| 556 } | 618 } |
| 557 } | 619 } |
| 558 ReplaceNode(phi, rep_nodes); | 620 ReplaceNode(phi, rep_nodes); |
| 559 } | 621 } |
| 560 } | 622 } |
| 561 } // namespace compiler | 623 } // namespace compiler |
| 562 } // namespace internal | 624 } // namespace internal |
| 563 } // namespace v8 | 625 } // namespace v8 |
| OLD | NEW |