Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(160)

Side by Side Diff: runtime/vm/flow_graph_compiler.cc

Issue 10559035: Implement a simple register allocator that tries to keep instruction results in registers. (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 8 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX.
6 6
7 #include "vm/flow_graph_compiler.h" 7 #include "vm/flow_graph_compiler.h"
8 8
9 #include "vm/debugger.h" 9 #include "vm/debugger.h"
10 #include "vm/il_printer.h" 10 #include "vm/il_printer.h"
11 #include "vm/intrinsifier.h" 11 #include "vm/intrinsifier.h"
12 #include "vm/locations.h"
12 #include "vm/longjump.h" 13 #include "vm/longjump.h"
13 #include "vm/object_store.h" 14 #include "vm/object_store.h"
14 #include "vm/parser.h" 15 #include "vm/parser.h"
15 #include "vm/stub_code.h" 16 #include "vm/stub_code.h"
16 17
17 namespace dart { 18 namespace dart {
18 19
19 DECLARE_FLAG(bool, code_comments); 20 DECLARE_FLAG(bool, code_comments);
20 DECLARE_FLAG(bool, enable_type_checks); 21 DECLARE_FLAG(bool, enable_type_checks);
21 DECLARE_FLAG(bool, intrinsify); 22 DECLARE_FLAG(bool, intrinsify);
(...skipping 13 matching lines...) Expand all
35 current_block_(NULL), 36 current_block_(NULL),
36 exception_handlers_list_(NULL), 37 exception_handlers_list_(NULL),
37 pc_descriptors_list_(NULL), 38 pc_descriptors_list_(NULL),
38 stackmap_builder_(NULL), 39 stackmap_builder_(NULL),
39 block_info_(block_order.length()), 40 block_info_(block_order.length()),
40 deopt_stubs_(), 41 deopt_stubs_(),
41 is_optimizing_(is_optimizing), 42 is_optimizing_(is_optimizing),
42 bool_true_(Bool::ZoneHandle(Bool::True())), 43 bool_true_(Bool::ZoneHandle(Bool::True())),
43 bool_false_(Bool::ZoneHandle(Bool::False())), 44 bool_false_(Bool::ZoneHandle(Bool::False())),
44 double_class_(Class::ZoneHandle( 45 double_class_(Class::ZoneHandle(
45 Isolate::Current()->object_store()->double_class())) { 46 Isolate::Current()->object_store()->double_class())),
47 frame_register_allocator_(this, is_optimizing) {
46 ASSERT(assembler != NULL); 48 ASSERT(assembler != NULL);
47 } 49 }
48 50
49 51
50 FlowGraphCompiler::~FlowGraphCompiler() { 52 FlowGraphCompiler::~FlowGraphCompiler() {
51 // BlockInfos are zone-allocated, so their destructors are not called. 53 // BlockInfos are zone-allocated, so their destructors are not called.
52 // Verify the labels explicitly here. 54 // Verify the labels explicitly here.
53 for (int i = 0; i < block_info_.length(); ++i) { 55 for (int i = 0; i < block_info_.length(); ++i) {
54 ASSERT(!block_info_[i]->label.IsLinked()); 56 ASSERT(!block_info_[i]->label.IsLinked());
55 ASSERT(!block_info_[i]->label.HasNear()); 57 ASSERT(!block_info_[i]->label.HasNear());
56 } 58 }
57 } 59 }
58 60
59 61
60 void FlowGraphCompiler::InitCompiler() { 62 void FlowGraphCompiler::InitCompiler() {
61 pc_descriptors_list_ = new DescriptorList(); 63 pc_descriptors_list_ = new DescriptorList();
62 exception_handlers_list_ = new ExceptionHandlerList(); 64 exception_handlers_list_ = new ExceptionHandlerList();
63 block_info_.Clear(); 65 block_info_.Clear();
64 for (int i = 0; i < block_order_.length(); ++i) { 66 for (int i = 0; i < block_order_.length(); ++i) {
65 block_info_.Add(new BlockInfo()); 67 block_info_.Add(new BlockInfo());
66 } 68 }
67 } 69 }
68 70
69 71
70 void FlowGraphCompiler::VisitBlocks() { 72 void FlowGraphCompiler::VisitBlocks() {
71 for (intptr_t i = 0; i < block_order().length(); ++i) { 73 for (intptr_t i = 0; i < block_order().length(); ++i) {
74 ASSERT(frame_register_allocator()->IsSpilled());
72 assembler()->Comment("B%d", i); 75 assembler()->Comment("B%d", i);
73 // Compile the block entry. 76 // Compile the block entry.
74 set_current_block(block_order()[i]); 77 set_current_block(block_order()[i]);
75 current_block()->PrepareEntry(this); 78 current_block()->PrepareEntry(this);
76 Instruction* instr = current_block()->StraightLineSuccessor(); 79 Instruction* instr = current_block()->StraightLineSuccessor();
77 // Compile all successors until an exit, branch, or a block entry. 80 // Compile all successors until an exit, branch, or a block entry.
78 while ((instr != NULL) && !instr->IsBlockEntry()) { 81 while ((instr != NULL) && !instr->IsBlockEntry()) {
79 if (FLAG_code_comments) EmitComment(instr); 82 if (FLAG_code_comments) EmitComment(instr);
80 ASSERT(instr->locs() != NULL); 83 ASSERT(instr->locs() != NULL);
81 EmitInstructionPrologue(instr); 84 EmitInstructionPrologue(instr);
82 instr->EmitNativeCode(this); 85 instr->EmitNativeCode(this);
83 instr = instr->StraightLineSuccessor(); 86 instr = instr->StraightLineSuccessor();
84 } 87 }
85 BlockEntryInstr* successor = 88 BlockEntryInstr* successor =
86 (instr == NULL) ? NULL : instr->AsBlockEntry(); 89 (instr == NULL) ? NULL : instr->AsBlockEntry();
87 if (successor != NULL) { 90 if (successor != NULL) {
91 frame_register_allocator()->Spill();
88 // Block ended with a "goto". We can fall through if it is the 92 // Block ended with a "goto". We can fall through if it is the
89 // next block in the list. Otherwise, we need a jump. 93 // next block in the list. Otherwise, we need a jump.
90 if ((i == block_order().length() - 1) || 94 if ((i == block_order().length() - 1) ||
91 (block_order()[i + 1] != successor)) { 95 (block_order()[i + 1] != successor)) {
92 assembler()->jmp(GetBlockLabel(successor)); 96 assembler()->jmp(GetBlockLabel(successor));
93 } 97 }
94 } 98 }
95 } 99 }
96 } 100 }
97 101
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
139 intptr_t pc_offset) { 143 intptr_t pc_offset) {
140 exception_handlers_list_->AddHandler(try_index, pc_offset); 144 exception_handlers_list_->AddHandler(try_index, pc_offset);
141 } 145 }
142 146
143 147
144 // Uses current pc position and try-index. 148 // Uses current pc position and try-index.
145 void FlowGraphCompiler::AddCurrentDescriptor(PcDescriptors::Kind kind, 149 void FlowGraphCompiler::AddCurrentDescriptor(PcDescriptors::Kind kind,
146 intptr_t cid, 150 intptr_t cid,
147 intptr_t token_index, 151 intptr_t token_index,
148 intptr_t try_index) { 152 intptr_t try_index) {
153 ASSERT((kind != PcDescriptors::kDeopt) ||
154 frame_register_allocator()->IsSpilled());
149 pc_descriptors_list()->AddDescriptor(kind, 155 pc_descriptors_list()->AddDescriptor(kind,
150 assembler()->CodeSize(), 156 assembler()->CodeSize(),
151 cid, 157 cid,
152 token_index, 158 token_index,
153 try_index); 159 try_index);
154 } 160 }
155 161
156 162
157 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id, 163 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id,
158 intptr_t deopt_token_index, 164 intptr_t deopt_token_index,
159 intptr_t try_index, 165 intptr_t try_index,
160 DeoptReasonId reason, 166 DeoptReasonId reason,
161 Register reg1, 167 Register reg1,
162 Register reg2, 168 Register reg2,
163 Register reg3) { 169 Register reg3) {
164 DeoptimizationStub* stub = 170 DeoptimizationStub* stub =
165 new DeoptimizationStub(deopt_id, deopt_token_index, try_index, reason); 171 new DeoptimizationStub(deopt_id, deopt_token_index, try_index, reason);
172 frame_register_allocator()->SpillInDeoptStub(stub);
166 if (reg1 != kNoRegister) stub->Push(reg1); 173 if (reg1 != kNoRegister) stub->Push(reg1);
167 if (reg2 != kNoRegister) stub->Push(reg2); 174 if (reg2 != kNoRegister) stub->Push(reg2);
168 if (reg3 != kNoRegister) stub->Push(reg3); 175 if (reg3 != kNoRegister) stub->Push(reg3);
169 deopt_stubs_.Add(stub); 176 deopt_stubs_.Add(stub);
170 return stub->entry_label(); 177 return stub->entry_label();
171 } 178 }
172 179
173 180
174 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) { 181 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) {
175 ASSERT(exception_handlers_list_ != NULL); 182 ASSERT(exception_handlers_list_ != NULL);
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 268
262 269
263 void FlowGraphCompiler::GenerateInstanceCall( 270 void FlowGraphCompiler::GenerateInstanceCall(
264 intptr_t cid, 271 intptr_t cid,
265 intptr_t token_index, 272 intptr_t token_index,
266 intptr_t try_index, 273 intptr_t try_index,
267 const String& function_name, 274 const String& function_name,
268 intptr_t argument_count, 275 intptr_t argument_count,
269 const Array& argument_names, 276 const Array& argument_names,
270 intptr_t checked_argument_count) { 277 intptr_t checked_argument_count) {
278 ASSERT(frame_register_allocator()->IsSpilled());
271 ICData& ic_data = 279 ICData& ic_data =
272 ICData::ZoneHandle(ICData::New(parsed_function().function(), 280 ICData::ZoneHandle(ICData::New(parsed_function().function(),
273 function_name, 281 function_name,
274 cid, 282 cid,
275 checked_argument_count)); 283 checked_argument_count));
276 const Array& arguments_descriptor = 284 const Array& arguments_descriptor =
277 CodeGenerator::ArgumentsDescriptor(argument_count, argument_names); 285 CodeGenerator::ArgumentsDescriptor(argument_count, argument_names);
278 uword label_address = 0; 286 uword label_address = 0;
279 switch (checked_argument_count) { 287 switch (checked_argument_count) {
280 case 1: 288 case 1:
(...skipping 18 matching lines...) Expand all
299 try_index); 307 try_index);
300 } 308 }
301 309
302 310
303 void FlowGraphCompiler::GenerateStaticCall(intptr_t cid, 311 void FlowGraphCompiler::GenerateStaticCall(intptr_t cid,
304 intptr_t token_index, 312 intptr_t token_index,
305 intptr_t try_index, 313 intptr_t try_index,
306 const Function& function, 314 const Function& function,
307 intptr_t argument_count, 315 intptr_t argument_count,
308 const Array& argument_names) { 316 const Array& argument_names) {
317 ASSERT(frame_register_allocator()->IsSpilled());
318
309 const Array& arguments_descriptor = 319 const Array& arguments_descriptor =
310 CodeGenerator::ArgumentsDescriptor(argument_count, argument_names); 320 CodeGenerator::ArgumentsDescriptor(argument_count, argument_names);
311 const intptr_t descr_offset = EmitStaticCall(function, 321 const intptr_t descr_offset = EmitStaticCall(function,
312 arguments_descriptor, 322 arguments_descriptor,
313 argument_count); 323 argument_count);
314 pc_descriptors_list()->AddDescriptor(PcDescriptors::kFuncCall, 324 pc_descriptors_list()->AddDescriptor(PcDescriptors::kFuncCall,
315 descr_offset, 325 descr_offset,
316 cid, 326 cid,
317 token_index, 327 token_index,
318 try_index); 328 try_index);
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
386 GenerateInstanceCall(comp->cid(), 396 GenerateInstanceCall(comp->cid(),
387 comp->token_index(), 397 comp->token_index(),
388 comp->try_index(), 398 comp->try_index(),
389 function_name, 399 function_name,
390 kNumArguments, 400 kNumArguments,
391 Array::ZoneHandle(), // No optional arguments. 401 Array::ZoneHandle(), // No optional arguments.
392 kNumArgsChecked); 402 kNumArgsChecked);
393 } 403 }
394 404
395 405
406 Register FrameRegisterAllocator::AllocateFreeRegister(bool* blocked_registers) {
407 for (intptr_t regno = 0; regno < kNumberOfCpuRegisters; regno++) {
408 if (!blocked_registers[regno] && (registers_[regno] == NULL)) {
409 blocked_registers[regno] = true;
410 return static_cast<Register>(regno);
411 }
412 }
413 return SpillFirst();
414 }
415
416
417 Register FrameRegisterAllocator::SpillFirst() {
418 ASSERT(stack_.length() > 0);
419 Register reg = stack_[0];
420 stack_.RemoveFirst();
421 compiler()->assembler()->PushRegister(reg);
422 registers_[reg] = NULL;
423 return reg;
424 }
425
426
427 void FrameRegisterAllocator::SpillRegister(Register reg) {
428 while (registers_[reg] != NULL) SpillFirst();
429 }
430
431
432 void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) {
433 LocationSummary* locs = instr->locs();
434
435 bool blocked_registers[kNumberOfCpuRegisters];
436 bool blocked_temp_registers[kNumberOfCpuRegisters];
437
438 bool spill = false;
439
440 // Mark all available registers free.
441 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
442 blocked_registers[i] = false;
443 blocked_temp_registers[i] = false;
444 }
445
446 // Mark all fixed input, temp and output registers as used.
447 for (intptr_t i = 0; i < locs->input_count(); i++) {
448 Location loc = locs->in(i);
449 if (loc.kind() == Location::kRegister) {
450 ASSERT(!blocked_registers[loc.reg()]);
451 blocked_registers[loc.reg()] = true;
452 if (registers_[loc.reg()] != NULL) {
453 intptr_t stack_index = stack_.length() - (locs->input_count() - i);
454 if ((stack_index < 0) || (stack_[stack_index] != loc.reg())) {
455 spill = true;
456 }
457 }
458 }
459 }
460
461 if (spill) Spill();
462
463 for (intptr_t i = 0; i < locs->temp_count(); i++) {
464 Location loc = locs->temp(i);
465 if (loc.kind() == Location::kRegister) {
466 ASSERT(!blocked_registers[loc.reg()]);
467 blocked_registers[loc.reg()] = true;
468 blocked_temp_registers[loc.reg()] = true;
469 }
470 }
471
472 if (locs->out().kind() == Location::kRegister) {
473 // Fixed output registers are allowed to overlap with
474 // temps and inputs.
475 blocked_registers[locs->out().reg()] = true;
476 }
477
478 // Do not allocate known registers.
479 blocked_registers[CTX] = true;
480 blocked_registers[SPREG] = true;
481 blocked_registers[FPREG] = true;
482 if (TMP != kNoRegister) {
483 blocked_registers[TMP] = true;
484 }
485
486 // Allocate all unallocated input locations.
487 for (intptr_t i = locs->input_count() - 1; i >= 0; i--) {
488 Location loc = locs->in(i);
489 Register reg = kNoRegister;
490 if (loc.kind() == Location::kRegister) {
491 reg = loc.reg();
492 } else if (loc.kind() == Location::kUnallocated) {
493 ASSERT(loc.policy() == Location::kRequiresRegister);
494 if ((stack_.length() > 0) && !blocked_temp_registers[stack_.Last()]) {
495 reg = stack_.Last();
496 blocked_registers[reg] = true;
497 } else {
498 reg = AllocateFreeRegister(blocked_registers);
499 }
500 locs->set_in(i, Location::RegisterLocation(reg));
501 }
502
503 Pop(reg, instr->InputAt(i));
504 }
505
506 // If this instruction is call spill everything that was not consumed by
507 // input locations.
508 if (locs->is_call() || locs->is_branch()) {
509 Spill();
510 }
511
512 // Allocate all unallocated temp locations.
513 for (intptr_t i = 0; i < locs->temp_count(); i++) {
514 Location loc = locs->temp(i);
515 if (loc.kind() == Location::kUnallocated) {
516 ASSERT(loc.policy() == Location::kRequiresRegister);
517 loc = Location::RegisterLocation(
518 AllocateFreeRegister(blocked_registers));
519 locs->set_temp(i, loc);
520 }
521 SpillRegister(loc.reg());
522 }
523
524 Location result_location = locs->out();
525 if (result_location.kind() == Location::kUnallocated) {
526 switch (result_location.policy()) {
527 case Location::kRequiresRegister:
528 result_location = Location::RegisterLocation(
529 AllocateFreeRegister(blocked_registers));
530 break;
531 case Location::kSameAsFirstInput:
532 result_location = locs->in(0);
533 break;
534 }
535 locs->set_out(result_location);
536 }
537
538 if (result_location.kind() == Location::kRegister) {
539 SpillRegister(result_location.reg());
540 }
541 }
542
543
544 void FrameRegisterAllocator::Pop(Register dst, Value* val) {
545 if (stack_.length() > 0) {
546 ASSERT(keep_values_in_registers_);
547 Register src = stack_.Last();
548 ASSERT(val->AsUse()->definition() == registers_[src]);
549 stack_.RemoveLast();
550 registers_[src] = NULL;
551 compiler()->assembler()->MoveRegister(dst, src);
552 } else {
553 compiler()->assembler()->PopRegister(dst);
554 }
555 }
556
557
558 void FrameRegisterAllocator::Push(Register reg, Instruction* val) {
559 ASSERT(registers_[reg] == NULL);
560 if (keep_values_in_registers_) {
561 registers_[reg] = val;
562 stack_.Add(reg);
563 } else {
564 compiler()->assembler()->PushRegister(reg);
565 }
566 }
567
568
569 void FrameRegisterAllocator::Spill() {
570 for (int i = 0; i < stack_.length(); i++) {
571 Register r = stack_[i];
572 registers_[r] = NULL;
573 compiler()->assembler()->PushRegister(r);
574 }
575 stack_.Clear();
576 }
577
578
579 void FrameRegisterAllocator::SpillInDeoptStub(DeoptimizationStub* stub) {
580 for (int i = 0; i < stack_.length(); i++) {
581 stub->Push(stack_[i]);
582 }
583 }
396 584
397 585
398 } // namespace dart 586 } // namespace dart
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698