OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "src/interpreter/bytecode-register-optimizer.h" |
| 6 |
| 7 namespace v8 { |
| 8 namespace internal { |
| 9 namespace interpreter { |
| 10 |
| 11 // A class for tracking the state of a register. This class tracks |
| 12 // which equivalence set a register is a member of and also whether a |
| 13 // register is materialized in the bytecode stream. |
| 14 class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject { |
| 15 public: |
| 16 RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized) |
| 17 : register_(reg), |
| 18 equivalence_id_(equivalence_id), |
| 19 materialized_(materialized), |
| 20 next_(this), |
| 21 prev_(this) {} |
| 22 |
| 23 void AddToEquivalenceSetOf(RegisterInfo* info); |
| 24 void MoveToNewEquivalenceSet(uint32_t equivalence_id, bool materialized); |
| 25 bool IsOnlyMemberOfEquivalenceSet() const; |
| 26 bool IsOnlyMaterializedMemberOfEquivalenceSet() const; |
| 27 bool IsInSameEquivalenceSet(RegisterInfo* info) const; |
| 28 |
| 29 // Get a member of this register's equivalence set that is |
| 30 // materialized. The materialized equivalent will be this register |
| 31 // if it is materialized. Returns nullptr if no materialized |
| 32 // equivalent exists. |
| 33 RegisterInfo* GetMaterializedEquivalent(); |
| 34 |
| 35 // Get a member of this register's equivalence set that is |
| 36 // materialized and not register |reg|. The materialized equivalent |
| 37 // will be this register if it is materialized. Returns nullptr if |
| 38 // no materialized equivalent exists. |
| 39 RegisterInfo* GetMaterializedEquivalentOtherThan(Register reg); |
| 40 |
| 41 // Get a member of this register's equivalence set that is intended |
| 42 // to be materialized in place of this register (which is currently |
| 43 // materialized). The best candidate is deemed to be the register |
| 44 // with the lowest index as this permits temporary registers to be |
| 45 // removed from the bytecode stream. Returns nullptr if no candidate |
| 46 // exists. |
| 47 RegisterInfo* GetEquivalentToMaterialize(); |
| 48 |
| 49 Register register_value() const { return register_; } |
| 50 bool materialized() const { return materialized_; } |
| 51 void set_materialized(bool materialized) { materialized_ = materialized; } |
| 52 void set_equivalence_id(uint32_t equivalence_id) { |
| 53 equivalence_id_ = equivalence_id; |
| 54 } |
| 55 uint32_t equivalence_id() const { return equivalence_id_; } |
| 56 |
| 57 private: |
| 58 Register register_; |
| 59 uint32_t equivalence_id_; |
| 60 bool materialized_; |
| 61 |
| 62 // Equivalence set pointers. |
| 63 RegisterInfo* next_; |
| 64 RegisterInfo* prev_; |
| 65 |
| 66 DISALLOW_COPY_AND_ASSIGN(RegisterInfo); |
| 67 }; |
| 68 |
| 69 void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf( |
| 70 RegisterInfo* info) { |
| 71 // Fix old list |
| 72 next_->prev_ = prev_; |
| 73 prev_->next_ = next_; |
| 74 // Add to new list. |
| 75 next_ = info->next_; |
| 76 prev_ = info; |
| 77 prev_->next_ = this; |
| 78 next_->prev_ = this; |
| 79 set_equivalence_id(info->equivalence_id()); |
| 80 set_materialized(false); |
| 81 } |
| 82 |
| 83 void BytecodeRegisterOptimizer::RegisterInfo::MoveToNewEquivalenceSet( |
| 84 uint32_t equivalence_id, bool materialized) { |
| 85 next_->prev_ = prev_; |
| 86 prev_->next_ = next_; |
| 87 next_ = prev_ = this; |
| 88 equivalence_id_ = equivalence_id; |
| 89 materialized_ = materialized; |
| 90 } |
| 91 |
| 92 bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyMemberOfEquivalenceSet() |
| 93 const { |
| 94 return this->next_ == this; |
| 95 } |
| 96 |
| 97 bool BytecodeRegisterOptimizer::RegisterInfo:: |
| 98 IsOnlyMaterializedMemberOfEquivalenceSet() const { |
| 99 DCHECK(materialized()); |
| 100 |
| 101 const RegisterInfo* visitor = this->next_; |
| 102 while (visitor != this) { |
| 103 if (visitor->materialized()) { |
| 104 return false; |
| 105 } |
| 106 visitor = visitor->next_; |
| 107 } |
| 108 return true; |
| 109 } |
| 110 |
| 111 bool BytecodeRegisterOptimizer::RegisterInfo::IsInSameEquivalenceSet( |
| 112 RegisterInfo* info) const { |
| 113 return equivalence_id() == info->equivalence_id(); |
| 114 } |
| 115 |
| 116 BytecodeRegisterOptimizer::RegisterInfo* |
| 117 BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() { |
| 118 RegisterInfo* visitor = this; |
| 119 do { |
| 120 if (visitor->materialized()) { |
| 121 return visitor; |
| 122 } |
| 123 visitor = visitor->next_; |
| 124 } while (visitor != this); |
| 125 |
| 126 return nullptr; |
| 127 } |
| 128 |
| 129 BytecodeRegisterOptimizer::RegisterInfo* |
| 130 BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalentOtherThan( |
| 131 Register reg) { |
| 132 RegisterInfo* visitor = this; |
| 133 do { |
| 134 if (visitor->materialized() && visitor->register_value() != reg) { |
| 135 return visitor; |
| 136 } |
| 137 visitor = visitor->next_; |
| 138 } while (visitor != this); |
| 139 |
| 140 return nullptr; |
| 141 } |
| 142 |
| 143 BytecodeRegisterOptimizer::RegisterInfo* |
| 144 BytecodeRegisterOptimizer::RegisterInfo::GetEquivalentToMaterialize() { |
| 145 DCHECK(this->materialized()); |
| 146 RegisterInfo* visitor = this->next_; |
| 147 RegisterInfo* best_info = nullptr; |
| 148 while (visitor != this) { |
| 149 if (visitor->materialized()) { |
| 150 return nullptr; |
| 151 } |
| 152 if (best_info == nullptr || |
| 153 visitor->register_value() < best_info->register_value()) { |
| 154 best_info = visitor; |
| 155 } |
| 156 visitor = visitor->next_; |
| 157 } |
| 158 return best_info; |
| 159 } |
| 160 |
| 161 BytecodeRegisterOptimizer::BytecodeRegisterOptimizer( |
| 162 Zone* zone, TemporaryRegisterAllocator* register_allocator, |
| 163 int parameter_count, BytecodePipelineStage* next_stage) |
| 164 : accumulator_(Register::virtual_accumulator()), |
| 165 temporary_base_(register_allocator->allocation_base()), |
| 166 register_info_table_(zone), |
| 167 equivalence_id_(0), |
| 168 next_stage_(next_stage), |
| 169 flushed_(false), |
| 170 zone_(zone) { |
| 171 register_allocator->set_observer(this); |
| 172 |
| 173 // Calculate offset so register index values can be mapped into |
| 174 // a vector of register metadata. |
| 175 if (parameter_count != 0) { |
| 176 register_info_table_offset_ = |
| 177 -Register::FromParameterIndex(0, parameter_count).index(); |
| 178 } else { |
| 179 // TODO(oth): This path shouldn't be necessary in bytecode generated |
| 180 // from Javascript, but a set of tests do not include the JS receiver. |
| 181 register_info_table_offset_ = -accumulator_.index(); |
| 182 } |
| 183 |
| 184 // Initialize register map for parameters, locals, and the |
| 185 // accumulator. |
| 186 register_info_table_.resize(register_info_table_offset_ + |
| 187 static_cast<size_t>(temporary_base_.index())); |
| 188 for (size_t i = 0; i < register_info_table_.size(); ++i) { |
| 189 register_info_table_[i] = new (zone) RegisterInfo( |
| 190 RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true); |
| 191 DCHECK_EQ(register_info_table_[i]->register_value().index(), |
| 192 RegisterFromRegisterInfoTableIndex(i).index()); |
| 193 } |
| 194 accumulator_info_ = GetRegisterInfo(accumulator_); |
| 195 DCHECK(accumulator_info_->register_value() == accumulator_); |
| 196 } |
| 197 |
| 198 void BytecodeRegisterOptimizer::FlushState() { |
| 199 if (flushed_) { |
| 200 return; |
| 201 } |
| 202 |
| 203 // Materialize all live registers. |
| 204 size_t count = register_info_table_.size(); |
| 205 for (size_t i = 0; i < count; ++i) { |
| 206 RegisterInfo* reg_info = register_info_table_[i]; |
| 207 if (!reg_info->IsOnlyMemberOfEquivalenceSet() && |
| 208 !reg_info->materialized()) { |
| 209 DCHECK(RegisterIsTemporary(reg_info->register_value()) || |
| 210 reg_info->register_value() == accumulator_); |
| 211 Materialize(reg_info); |
| 212 } |
| 213 } |
| 214 |
| 215 // Break all existing equivalences. |
| 216 for (size_t i = 0; i < count; ++i) { |
| 217 RegisterInfo* reg_info = register_info_table_[i]; |
| 218 if (!reg_info->IsOnlyMemberOfEquivalenceSet()) { |
| 219 reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true); |
| 220 } |
| 221 } |
| 222 |
| 223 flushed_ = true; |
| 224 } |
| 225 |
| 226 // override |
| 227 void BytecodeRegisterOptimizer::FlushBasicBlock() { |
| 228 FlushState(); |
| 229 next_stage_->FlushBasicBlock(); |
| 230 } |
| 231 |
| 232 // override |
| 233 size_t BytecodeRegisterOptimizer::FlushForOffset() { |
| 234 FlushState(); |
| 235 return next_stage_->FlushForOffset(); |
| 236 } |
| 237 |
| 238 // override |
| 239 void BytecodeRegisterOptimizer::Write(BytecodeNode* node) { |
| 240 flushed_ = false; |
| 241 |
| 242 // |
| 243 // Transfers with observable registers as the destination will be |
| 244 // immediately materialized so the source position information will |
| 245 // be ordered correctly. |
| 246 // |
| 247 // Transfers without observable destination registers will initially |
| 248 // be emitted as Nop's with the source position. They may, or may |
| 249 // not, be materialized by the optimizer. However, the source |
| 250 // position is not lost and being attached to a Nop is fine as the |
| 251 // destination register is not observable in the debugger. |
| 252 // |
| 253 switch (node->bytecode()) { |
| 254 case Bytecode::kLdar: { |
| 255 DoLdar(node); |
| 256 return; |
| 257 } |
| 258 case Bytecode::kStar: { |
| 259 DoStar(node); |
| 260 return; |
| 261 } |
| 262 case Bytecode::kMov: { |
| 263 DoMov(node); |
| 264 return; |
| 265 } |
| 266 default: |
| 267 break; |
| 268 } |
| 269 |
| 270 if (Bytecodes::IsJump(node->bytecode()) || |
| 271 node->bytecode() == Bytecode::kDebugger) { |
| 272 // The debugger can manipulate locals and parameters, flush |
| 273 // everything before handing over to it. Similarly, all state must |
| 274 // be flushed before emitting a jump due to how bytecode offsets |
| 275 // for jumps are evaluated. |
| 276 FlushState(); |
| 277 } |
| 278 |
| 279 PrepareOperands(node); |
| 280 WriteToNextStage(node); |
| 281 } |
| 282 |
| 283 void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const { |
| 284 next_stage_->Write(node); |
| 285 } |
| 286 |
| 287 void BytecodeRegisterOptimizer::WriteToNextStage( |
| 288 BytecodeNode* node, const BytecodeSourceInfo& source_info) const { |
| 289 node->source_info().Update(source_info); |
| 290 next_stage_->Write(node); |
| 291 } |
| 292 |
| 293 void BytecodeRegisterOptimizer::OutputRegisterTransfer( |
| 294 RegisterInfo* input_info, RegisterInfo* output_info, |
| 295 const BytecodeSourceInfo& source_info) { |
| 296 Register input = input_info->register_value(); |
| 297 Register output = output_info->register_value(); |
| 298 DCHECK_NE(input.index(), output.index()); |
| 299 |
| 300 if (input == accumulator_) { |
| 301 uint32_t operand = static_cast<uint32_t>(output.ToOperand()); |
| 302 OperandScale scale = Bytecodes::OperandSizesToScale(output.SizeOfOperand()); |
| 303 BytecodeNode node(Bytecode::kStar, operand, scale); |
| 304 WriteToNextStage(&node, source_info); |
| 305 } else if (output == accumulator_) { |
| 306 uint32_t operand = static_cast<uint32_t>(input.ToOperand()); |
| 307 OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand()); |
| 308 BytecodeNode node(Bytecode::kLdar, operand, scale); |
| 309 WriteToNextStage(&node, source_info); |
| 310 } else { |
| 311 uint32_t operand0 = static_cast<uint32_t>(input.ToOperand()); |
| 312 uint32_t operand1 = static_cast<uint32_t>(output.ToOperand()); |
| 313 OperandScale scale = Bytecodes::OperandSizesToScale(input.SizeOfOperand(), |
| 314 output.SizeOfOperand()); |
| 315 BytecodeNode node(Bytecode::kMov, operand0, operand1, scale); |
| 316 WriteToNextStage(&node, source_info); |
| 317 } |
| 318 output_info->set_materialized(true); |
| 319 } |
| 320 |
| 321 void BytecodeRegisterOptimizer::CreateMaterializedEquivalent( |
| 322 RegisterInfo* info) { |
| 323 DCHECK(info->materialized()); |
| 324 RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize(); |
| 325 if (unmaterialized) { |
| 326 OutputRegisterTransfer(info, unmaterialized); |
| 327 } |
| 328 } |
| 329 |
| 330 BytecodeRegisterOptimizer::RegisterInfo* |
| 331 BytecodeRegisterOptimizer::GetMaterializedEquivalent(RegisterInfo* info) { |
| 332 return info->materialized() ? info : info->GetMaterializedEquivalent(); |
| 333 } |
| 334 |
| 335 BytecodeRegisterOptimizer::RegisterInfo* |
| 336 BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator( |
| 337 RegisterInfo* info) { |
| 338 if (info->materialized()) { |
| 339 return info; |
| 340 } |
| 341 |
| 342 RegisterInfo* result = info->GetMaterializedEquivalentOtherThan(accumulator_); |
| 343 if (result == nullptr) { |
| 344 Materialize(info); |
| 345 result = info; |
| 346 } |
| 347 DCHECK(result->register_value() != accumulator_); |
| 348 return result; |
| 349 } |
| 350 |
| 351 void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) { |
| 352 if (!info->materialized()) { |
| 353 RegisterInfo* materialized = info->GetMaterializedEquivalent(); |
| 354 OutputRegisterTransfer(materialized, info); |
| 355 } |
| 356 } |
| 357 |
| 358 void BytecodeRegisterOptimizer::RegisterTransfer( |
| 359 RegisterInfo* input_info, RegisterInfo* output_info, |
| 360 const BytecodeSourceInfo& source_info) { |
| 361 // Materialize an alternate in the equivalence set that |
| 362 // |output_info| is leaving. |
| 363 if (output_info->materialized()) { |
| 364 CreateMaterializedEquivalent(output_info); |
| 365 } |
| 366 |
| 367 // Add |output_info| to new equivalence set. |
| 368 if (!output_info->IsInSameEquivalenceSet(input_info)) { |
| 369 output_info->AddToEquivalenceSetOf(input_info); |
| 370 } |
| 371 |
| 372 bool output_is_observable = |
| 373 RegisterIsObservable(output_info->register_value()); |
| 374 if (output_is_observable) { |
| 375 // Force store to be emitted when register is observable. |
| 376 output_info->set_materialized(false); |
| 377 RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent(); |
| 378 OutputRegisterTransfer(materialized_info, output_info, source_info); |
| 379 } else if (source_info.is_valid()) { |
| 380 // Emit a placeholder nop to maintain source position info. |
| 381 EmitNopForSourceInfo(source_info); |
| 382 } |
| 383 } |
| 384 |
| 385 void BytecodeRegisterOptimizer::EmitNopForSourceInfo( |
| 386 const BytecodeSourceInfo& source_info) const { |
| 387 BytecodeNode nop(Bytecode::kNop); |
| 388 nop.source_info().Update(source_info); |
| 389 WriteToNextStage(&nop); |
| 390 } |
| 391 |
| 392 void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) { |
| 393 Register input = GetRegisterInputOperand( |
| 394 0, node->bytecode(), node->operands(), node->operand_count()); |
| 395 RegisterInfo* input_info = GetRegisterInfo(input); |
| 396 RegisterTransfer(input_info, accumulator_info_, node->source_info()); |
| 397 } |
| 398 |
| 399 void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) { |
| 400 Register input = GetRegisterInputOperand( |
| 401 0, node->bytecode(), node->operands(), node->operand_count()); |
| 402 RegisterInfo* input_info = GetRegisterInfo(input); |
| 403 Register output = GetRegisterOutputOperand( |
| 404 1, node->bytecode(), node->operands(), node->operand_count()); |
| 405 RegisterInfo* output_info = GetOrCreateRegisterInfo(output); |
| 406 RegisterTransfer(input_info, output_info, node->source_info()); |
| 407 } |
| 408 |
| 409 void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) { |
| 410 Register output = GetRegisterOutputOperand( |
| 411 0, node->bytecode(), node->operands(), node->operand_count()); |
| 412 RegisterInfo* output_info = GetOrCreateRegisterInfo(output); |
| 413 RegisterTransfer(accumulator_info_, output_info, node->source_info()); |
| 414 } |
| 415 |
| 416 void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand( |
| 417 RegisterInfo* reg_info) { |
| 418 if (reg_info->materialized()) { |
| 419 CreateMaterializedEquivalent(reg_info); |
| 420 } |
| 421 reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true); |
| 422 } |
| 423 |
| 424 void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand( |
| 425 Register start, int count) { |
| 426 for (int i = 0; i < count; ++i) { |
| 427 Register reg(start.index() + i); |
| 428 RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg); |
| 429 PrepareRegisterOutputOperand(reg_info); |
| 430 } |
| 431 } |
| 432 |
| 433 Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand( |
| 434 Register reg) { |
| 435 // For a temporary register, RegInfo state may need be created. For |
| 436 // locals and parameters, the RegInfo state is created in the |
| 437 // BytecodeRegisterOptimizer constructor. |
| 438 RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg); |
| 439 if (reg_info->materialized()) { |
| 440 return reg; |
| 441 } else { |
| 442 RegisterInfo* equivalent_info = |
| 443 GetMaterializedEquivalentNotAccumulator(reg_info); |
| 444 return equivalent_info->register_value(); |
| 445 } |
| 446 } |
| 447 |
| 448 void BytecodeRegisterOptimizer::PrepareRegisterInputOperand( |
| 449 BytecodeNode* const node, Register reg, int operand_index) { |
| 450 Register equivalent = GetEquivalentRegisterForInputOperand(reg); |
| 451 node->operands()[operand_index] = |
| 452 static_cast<uint32_t>(equivalent.ToOperand()); |
| 453 // Update operand scale as equivalent may be different. |
| 454 OperandScale operand_scale = |
| 455 Bytecodes::OperandSizesToScale(equivalent.SizeOfOperand()); |
| 456 if (operand_scale > node->operand_scale()) { |
| 457 node->set_operand_scale(operand_scale); |
| 458 } |
| 459 } |
| 460 |
| 461 void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start, |
| 462 int count) { |
| 463 for (int i = 0; i < count; ++i) { |
| 464 Register current(start.index() + i); |
| 465 RegisterInfo* input_info = GetRegisterInfo(current); |
| 466 Materialize(input_info); |
| 467 } |
| 468 } |
| 469 |
| 470 void BytecodeRegisterOptimizer::PrepareRegisterOperands( |
| 471 BytecodeNode* const node) { |
| 472 // |
| 473 // For each input operand, get a materialized equivalent if it is |
| 474 // just a single register, otherwise materialize register range. |
| 475 // Update operand_scale if necessary. |
| 476 // |
| 477 // For each output register about to be clobbered, materialize an |
| 478 // equivalent if it exists. Put each register in it's own equivalence set. |
| 479 // |
| 480 int register_operand_bitmap = |
| 481 Bytecodes::GetRegisterOperandBitmap(node->bytecode()); |
| 482 const OperandType* operand_types = |
| 483 Bytecodes::GetOperandTypes(node->bytecode()); |
| 484 uint32_t* operands = node->operands(); |
| 485 for (int i = 0; register_operand_bitmap != 0; |
| 486 ++i, register_operand_bitmap >>= 1) { |
| 487 if ((register_operand_bitmap & 1) == 0) { |
| 488 continue; |
| 489 } |
| 490 OperandType operand_type = operand_types[i]; |
| 491 int count = 0; |
| 492 if (operand_types[i + 1] == OperandType::kRegCount) { |
| 493 count = static_cast<int>(operands[i + 1]); |
| 494 if (count == 0) { |
| 495 continue; |
| 496 } |
| 497 } else { |
| 498 count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type); |
| 499 } |
| 500 |
| 501 Register reg = Register::FromOperand(static_cast<int32_t>(operands[i])); |
| 502 if (Bytecodes::IsRegisterInputOperandType(operand_type)) { |
| 503 if (count == 1) { |
| 504 PrepareRegisterInputOperand(node, reg, i); |
| 505 } else if (count > 1) { |
| 506 PrepareRegisterRangeInputOperand(reg, count); |
| 507 } |
| 508 } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) { |
| 509 PrepareRegisterRangeOutputOperand(reg, count); |
| 510 } |
| 511 } |
| 512 } |
| 513 |
| 514 void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) { |
| 515 // Materialize the accumulator if it is read by the bytecode. The |
| 516 // accumulator is special and no other register can be materialized |
| 517 // in it's place. |
| 518 if (Bytecodes::ReadsAccumulator(node->bytecode()) && |
| 519 !accumulator_info_->materialized()) { |
| 520 Materialize(accumulator_info_); |
| 521 } |
| 522 |
| 523 // Materialize an equivalent to the accumulator if it will be |
| 524 // clobbered when the bytecode is dispatched. |
| 525 if (Bytecodes::WritesAccumulator(node->bytecode())) { |
| 526 PrepareRegisterOutputOperand(accumulator_info_); |
| 527 } |
| 528 } |
| 529 |
| 530 void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) { |
| 531 PrepareAccumulator(node); |
| 532 PrepareRegisterOperands(node); |
| 533 } |
| 534 |
| 535 // static |
| 536 Register BytecodeRegisterOptimizer::GetRegisterInputOperand( |
| 537 int index, Bytecode bytecode, const uint32_t* operands, int operand_count) { |
| 538 DCHECK_LT(index, operand_count); |
| 539 DCHECK(Bytecodes::IsRegisterInputOperandType( |
| 540 Bytecodes::GetOperandType(bytecode, index))); |
| 541 return OperandToRegister(operands[index]); |
| 542 } |
| 543 |
| 544 // static |
| 545 Register BytecodeRegisterOptimizer::GetRegisterOutputOperand( |
| 546 int index, Bytecode bytecode, const uint32_t* operands, int operand_count) { |
| 547 DCHECK_LT(index, operand_count); |
| 548 DCHECK(Bytecodes::IsRegisterOutputOperandType( |
| 549 Bytecodes::GetOperandType(bytecode, index))); |
| 550 return OperandToRegister(operands[index]); |
| 551 } |
| 552 |
| 553 BytecodeRegisterOptimizer::RegisterInfo* |
| 554 BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) { |
| 555 size_t index = GetRegisterInfoTableIndex(reg); |
| 556 return (index < register_info_table_.size()) ? register_info_table_[index] |
| 557 : nullptr; |
| 558 } |
| 559 |
| 560 BytecodeRegisterOptimizer::RegisterInfo* |
| 561 BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) { |
| 562 size_t index = GetRegisterInfoTableIndex(reg); |
| 563 return index < register_info_table_.size() ? register_info_table_[index] |
| 564 : NewRegisterInfo(reg); |
| 565 } |
| 566 |
| 567 BytecodeRegisterOptimizer::RegisterInfo* |
| 568 BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) { |
| 569 size_t index = GetRegisterInfoTableIndex(reg); |
| 570 DCHECK_GE(index, register_info_table_.size()); |
| 571 GrowRegisterMap(reg); |
| 572 return register_info_table_[index]; |
| 573 } |
| 574 |
| 575 void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) { |
| 576 DCHECK(RegisterIsTemporary(reg)); |
| 577 size_t index = GetRegisterInfoTableIndex(reg); |
| 578 DCHECK_GE(index, register_info_table_.size()); |
| 579 size_t new_size = index + 1; |
| 580 size_t old_size = register_info_table_.size(); |
| 581 register_info_table_.resize(new_size); |
| 582 for (size_t i = old_size; i < new_size; ++i) { |
| 583 register_info_table_[i] = new (zone()) RegisterInfo( |
| 584 RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), false); |
| 585 } |
| 586 } |
| 587 |
| 588 void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) { |
| 589 RegisterInfo* info = GetRegisterInfo(reg); |
| 590 if (info != nullptr) { |
| 591 // If register is materialized and part of equivalence set, make |
| 592 // sure another member of the set holds the value before the |
| 593 // temporary register is removed. |
| 594 if (info->materialized()) { |
| 595 CreateMaterializedEquivalent(info); |
| 596 } |
| 597 info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false); |
| 598 } |
| 599 } |
| 600 |
| 601 } // namespace interpreter |
| 602 } // namespace internal |
| 603 } // namespace v8 |
OLD | NEW |