OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_SLOT_SET_H | 5 #ifndef V8_SLOT_SET_H |
6 #define V8_SLOT_SET_H | 6 #define V8_SLOT_SET_H |
7 | 7 |
| 8 #include <map> |
8 #include <stack> | 9 #include <stack> |
9 | 10 |
10 #include "src/allocation.h" | 11 #include "src/allocation.h" |
11 #include "src/base/atomic-utils.h" | 12 #include "src/base/atomic-utils.h" |
12 #include "src/base/bits.h" | 13 #include "src/base/bits.h" |
13 #include "src/utils.h" | 14 #include "src/utils.h" |
14 | 15 |
15 namespace v8 { | 16 namespace v8 { |
16 namespace internal { | 17 namespace internal { |
17 | 18 |
(...skipping 435 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
453 | 454 |
454 void FreeToBeFreedChunks() { | 455 void FreeToBeFreedChunks() { |
455 base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_); | 456 base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_); |
456 while (!to_be_freed_chunks_.empty()) { | 457 while (!to_be_freed_chunks_.empty()) { |
457 Chunk* top = to_be_freed_chunks_.top(); | 458 Chunk* top = to_be_freed_chunks_.top(); |
458 to_be_freed_chunks_.pop(); | 459 to_be_freed_chunks_.pop(); |
459 delete top; | 460 delete top; |
460 } | 461 } |
461 } | 462 } |
462 | 463 |
| 464 void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) { |
| 465 Chunk* chunk = chunk_.Value(); |
| 466 while (chunk != nullptr) { |
| 467 TypedSlot* buffer = chunk->buffer.Value(); |
| 468 int count = chunk->count.Value(); |
| 469 for (int i = 0; i < count; i++) { |
| 470 uint32_t host_offset = buffer[i].host_offset(); |
| 471 std::map<uint32_t, uint32_t>::iterator upper_bound = |
| 472 invalid_ranges.upper_bound(host_offset); |
| 473 if (upper_bound == invalid_ranges.begin()) continue; |
| 474 // upper_bounds points to the invalid range after the given slot. Hence, |
| 475 // we have to go to the previous element. |
| 476 upper_bound--; |
| 477 DCHECK_LE(upper_bound->first, host_offset); |
| 478 if (upper_bound->second > host_offset) { |
| 479 buffer[i].Clear(); |
| 480 } |
| 481 } |
| 482 chunk = chunk->next.Value(); |
| 483 } |
| 484 } |
| 485 |
463 private: | 486 private: |
464 static const int kInitialBufferSize = 100; | 487 static const int kInitialBufferSize = 100; |
465 static const int kMaxBufferSize = 16 * KB; | 488 static const int kMaxBufferSize = 16 * KB; |
466 | 489 |
467 static int NextCapacity(int capacity) { | 490 static int NextCapacity(int capacity) { |
468 return Min(kMaxBufferSize, capacity * 2); | 491 return Min(kMaxBufferSize, capacity * 2); |
469 } | 492 } |
470 | 493 |
471 class OffsetField : public BitField<int, 0, 29> {}; | 494 class OffsetField : public BitField<int, 0, 29> {}; |
472 class TypeField : public BitField<SlotType, 29, 3> {}; | 495 class TypeField : public BitField<SlotType, 29, 3> {}; |
(...skipping 26 matching lines...) Expand all Loading... |
499 Address page_start_; | 522 Address page_start_; |
500 base::AtomicValue<Chunk*> chunk_; | 523 base::AtomicValue<Chunk*> chunk_; |
501 base::Mutex to_be_freed_chunks_mutex_; | 524 base::Mutex to_be_freed_chunks_mutex_; |
502 std::stack<Chunk*> to_be_freed_chunks_; | 525 std::stack<Chunk*> to_be_freed_chunks_; |
503 }; | 526 }; |
504 | 527 |
505 } // namespace internal | 528 } // namespace internal |
506 } // namespace v8 | 529 } // namespace v8 |
507 | 530 |
508 #endif // V8_SLOT_SET_H | 531 #endif // V8_SLOT_SET_H |
OLD | NEW |