Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1466 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1477 } | 1477 } |
| 1478 | 1478 |
| 1479 // Take another spin if there are now unswept objects in new space | 1479 // Take another spin if there are now unswept objects in new space |
| 1480 // (there are currently no more unswept promoted objects). | 1480 // (there are currently no more unswept promoted objects). |
| 1481 } while (new_space_front != new_space_.top()); | 1481 } while (new_space_front != new_space_.top()); |
| 1482 | 1482 |
| 1483 return new_space_front; | 1483 return new_space_front; |
| 1484 } | 1484 } |
| 1485 | 1485 |
| 1486 | 1486 |
| 1487 #ifndef V8_HOST_ARCH_64_BIT | |
| 1488 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, | |
| 1489 HeapObject* object, | |
| 1490 int size)) { | |
|
Erik Corry
2012/04/16 14:35:19
This assumes that there is an even number of heade
Vyacheslav Egorov (Chromium)
2012/04/30 14:39:11
Done.
| |
| 1491 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) { | |
| 1492 heap->CreateFillerObjectAt(object->address(), kPointerSize); | |
| 1493 return HeapObject::FromAddress(object->address() + kPointerSize); | |
| 1494 } else { | |
| 1495 heap->CreateFillerObjectAt(object->address() + size - kPointerSize, | |
| 1496 kPointerSize); | |
| 1497 return object; | |
| 1498 } | |
| 1499 } | |
| 1500 #endif | |
| 1501 | |
| 1502 | |
| 1487 enum LoggingAndProfiling { | 1503 enum LoggingAndProfiling { |
| 1488 LOGGING_AND_PROFILING_ENABLED, | 1504 LOGGING_AND_PROFILING_ENABLED, |
| 1489 LOGGING_AND_PROFILING_DISABLED | 1505 LOGGING_AND_PROFILING_DISABLED |
| 1490 }; | 1506 }; |
| 1491 | 1507 |
| 1492 | 1508 |
| 1493 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS }; | 1509 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS }; |
| 1494 | 1510 |
| 1495 | 1511 |
| 1496 template<MarksHandling marks_handling, | 1512 template<MarksHandling marks_handling, |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1550 kVisitStructGeneric>(); | 1566 kVisitStructGeneric>(); |
| 1551 } | 1567 } |
| 1552 | 1568 |
| 1553 static VisitorDispatchTable<ScavengingCallback>* GetTable() { | 1569 static VisitorDispatchTable<ScavengingCallback>* GetTable() { |
| 1554 return &table_; | 1570 return &table_; |
| 1555 } | 1571 } |
| 1556 | 1572 |
| 1557 private: | 1573 private: |
| 1558 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; | 1574 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; |
| 1559 enum SizeRestriction { SMALL, UNKNOWN_SIZE }; | 1575 enum SizeRestriction { SMALL, UNKNOWN_SIZE }; |
| 1576 enum AlignmentRequirement { POINTER_ALIGNED, DOUBLE_ALIGNED }; | |
|
Erik Corry
2012/04/16 14:35:19
It seems to me that if you use the actual alignmen
| |
| 1577 | |
| 1560 | 1578 |
| 1561 static void RecordCopiedObject(Heap* heap, HeapObject* obj) { | 1579 static void RecordCopiedObject(Heap* heap, HeapObject* obj) { |
| 1562 bool should_record = false; | 1580 bool should_record = false; |
| 1563 #ifdef DEBUG | 1581 #ifdef DEBUG |
| 1564 should_record = FLAG_heap_stats; | 1582 should_record = FLAG_heap_stats; |
| 1565 #endif | 1583 #endif |
| 1566 should_record = should_record || FLAG_log_gc; | 1584 should_record = should_record || FLAG_log_gc; |
| 1567 if (should_record) { | 1585 if (should_record) { |
| 1568 if (heap->new_space()->Contains(obj)) { | 1586 if (heap->new_space()->Contains(obj)) { |
| 1569 heap->new_space()->RecordAllocation(obj); | 1587 heap->new_space()->RecordAllocation(obj); |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 1600 } | 1618 } |
| 1601 } | 1619 } |
| 1602 | 1620 |
| 1603 if (marks_handling == TRANSFER_MARKS) { | 1621 if (marks_handling == TRANSFER_MARKS) { |
| 1604 if (Marking::TransferColor(source, target)) { | 1622 if (Marking::TransferColor(source, target)) { |
| 1605 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size); | 1623 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size); |
| 1606 } | 1624 } |
| 1607 } | 1625 } |
| 1608 } | 1626 } |
| 1609 | 1627 |
| 1610 template<ObjectContents object_contents, SizeRestriction size_restriction> | 1628 |
| 1629 template<ObjectContents object_contents, | |
| 1630 SizeRestriction size_restriction, | |
| 1631 AlignmentRequirement alignment> | |
| 1611 static inline void EvacuateObject(Map* map, | 1632 static inline void EvacuateObject(Map* map, |
| 1612 HeapObject** slot, | 1633 HeapObject** slot, |
| 1613 HeapObject* object, | 1634 HeapObject* object, |
| 1614 int object_size) { | 1635 int object_size) { |
| 1615 SLOW_ASSERT((size_restriction != SMALL) || | 1636 SLOW_ASSERT((size_restriction != SMALL) || |
| 1616 (object_size <= Page::kMaxNonCodeHeapObjectSize)); | 1637 (object_size <= Page::kMaxNonCodeHeapObjectSize)); |
| 1617 SLOW_ASSERT(object->Size() == object_size); | 1638 SLOW_ASSERT(object->Size() == object_size); |
| 1618 | 1639 |
| 1640 #ifndef V8_HOST_ARCH_64_BIT | |
| 1641 int allocation_size = object_size; | |
| 1642 if (alignment == DOUBLE_ALIGNED) { | |
|
Erik Corry
2012/04/16 14:35:19
You can replace this with
if (kPointerSize != alig
Vyacheslav Egorov (Chromium)
2012/04/30 14:39:11
Done.
Vyacheslav Egorov (Chromium)
2012/04/30 14:39:11
Done.
| |
| 1643 allocation_size += kPointerSize; | |
| 1644 } | |
| 1645 #endif | |
| 1646 | |
| 1619 Heap* heap = map->GetHeap(); | 1647 Heap* heap = map->GetHeap(); |
| 1620 if (heap->ShouldBePromoted(object->address(), object_size)) { | 1648 if (heap->ShouldBePromoted(object->address(), object_size)) { |
| 1621 MaybeObject* maybe_result; | 1649 MaybeObject* maybe_result; |
| 1622 | 1650 |
| 1623 if ((size_restriction != SMALL) && | 1651 if ((size_restriction != SMALL) && |
| 1624 (object_size > Page::kMaxNonCodeHeapObjectSize)) { | 1652 (allocation_size > Page::kMaxNonCodeHeapObjectSize)) { |
| 1625 maybe_result = heap->lo_space()->AllocateRaw(object_size, | 1653 maybe_result = heap->lo_space()->AllocateRaw(allocation_size, |
| 1626 NOT_EXECUTABLE); | 1654 NOT_EXECUTABLE); |
| 1627 } else { | 1655 } else { |
| 1628 if (object_contents == DATA_OBJECT) { | 1656 if (object_contents == DATA_OBJECT) { |
| 1629 maybe_result = heap->old_data_space()->AllocateRaw(object_size); | 1657 maybe_result = heap->old_data_space()->AllocateRaw(allocation_size); |
| 1630 } else { | 1658 } else { |
| 1631 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size); | 1659 maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size) ; |
| 1632 } | 1660 } |
| 1633 } | 1661 } |
| 1634 | 1662 |
| 1635 Object* result = NULL; // Initialization to please compiler. | 1663 Object* result = NULL; // Initialization to please compiler. |
| 1636 if (maybe_result->ToObject(&result)) { | 1664 if (maybe_result->ToObject(&result)) { |
| 1637 HeapObject* target = HeapObject::cast(result); | 1665 HeapObject* target = HeapObject::cast(result); |
| 1638 | 1666 |
| 1667 #ifndef V8_HOST_ARCH_64_BIT | |
| 1668 if (alignment == DOUBLE_ALIGNED) { | |
| 1669 target = EnsureDoubleAligned(heap, target, allocation_size); | |
| 1670 } | |
| 1671 #endif | |
| 1672 | |
| 1639 // Order is important: slot might be inside of the target if target | 1673 // Order is important: slot might be inside of the target if target |
| 1640 // was allocated over a dead object and slot comes from the store | 1674 // was allocated over a dead object and slot comes from the store |
| 1641 // buffer. | 1675 // buffer. |
| 1642 *slot = target; | 1676 *slot = target; |
| 1643 MigrateObject(heap, object, target, object_size); | 1677 MigrateObject(heap, object, target, object_size); |
| 1644 | 1678 |
| 1645 if (object_contents == POINTER_OBJECT) { | 1679 if (object_contents == POINTER_OBJECT) { |
| 1646 heap->promotion_queue()->insert(target, object_size); | 1680 heap->promotion_queue()->insert(target, object_size); |
| 1647 } | 1681 } |
| 1648 | 1682 |
| 1649 heap->tracer()->increment_promoted_objects_size(object_size); | 1683 heap->tracer()->increment_promoted_objects_size(object_size); |
| 1650 return; | 1684 return; |
| 1651 } | 1685 } |
| 1652 } | 1686 } |
| 1653 MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size); | 1687 MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size); |
| 1654 heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); | 1688 heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); |
| 1655 Object* result = allocation->ToObjectUnchecked(); | 1689 Object* result = allocation->ToObjectUnchecked(); |
| 1656 HeapObject* target = HeapObject::cast(result); | 1690 HeapObject* target = HeapObject::cast(result); |
| 1657 | 1691 |
| 1692 #ifndef V8_HOST_ARCH_64_BIT | |
| 1693 if (alignment == DOUBLE_ALIGNED) { | |
| 1694 target = EnsureDoubleAligned(heap, target, allocation_size); | |
| 1695 } | |
| 1696 #endif | |
| 1697 | |
| 1658 // Order is important: slot might be inside of the target if target | 1698 // Order is important: slot might be inside of the target if target |
| 1659 // was allocated over a dead object and slot comes from the store | 1699 // was allocated over a dead object and slot comes from the store |
| 1660 // buffer. | 1700 // buffer. |
| 1661 *slot = target; | 1701 *slot = target; |
| 1662 MigrateObject(heap, object, target, object_size); | 1702 MigrateObject(heap, object, target, object_size); |
| 1663 return; | 1703 return; |
| 1664 } | 1704 } |
| 1665 | 1705 |
| 1666 | 1706 |
| 1667 static inline void EvacuateJSFunction(Map* map, | 1707 static inline void EvacuateJSFunction(Map* map, |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 1683 map->GetHeap()->mark_compact_collector()-> | 1723 map->GetHeap()->mark_compact_collector()-> |
| 1684 RecordCodeEntrySlot(code_entry_slot, code); | 1724 RecordCodeEntrySlot(code_entry_slot, code); |
| 1685 } | 1725 } |
| 1686 } | 1726 } |
| 1687 | 1727 |
| 1688 | 1728 |
| 1689 static inline void EvacuateFixedArray(Map* map, | 1729 static inline void EvacuateFixedArray(Map* map, |
| 1690 HeapObject** slot, | 1730 HeapObject** slot, |
| 1691 HeapObject* object) { | 1731 HeapObject* object) { |
| 1692 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); | 1732 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); |
| 1693 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, | 1733 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, POINTER_ALIGNED>(map, |
| 1694 slot, | 1734 slot, |
|
Erik Corry
2012/04/16 14:35:19
Ironically enough, the alignment is wrong here.
| |
| 1695 object, | 1735 object, |
| 1696 object_size); | 1736 object_size); |
| 1697 } | 1737 } |
| 1698 | 1738 |
| 1699 | 1739 |
| 1700 static inline void EvacuateFixedDoubleArray(Map* map, | 1740 static inline void EvacuateFixedDoubleArray(Map* map, |
| 1701 HeapObject** slot, | 1741 HeapObject** slot, |
| 1702 HeapObject* object) { | 1742 HeapObject* object) { |
| 1703 int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); | 1743 int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); |
| 1704 int object_size = FixedDoubleArray::SizeFor(length); | 1744 int object_size = FixedDoubleArray::SizeFor(length); |
| 1705 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, | 1745 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, DOUBLE_ALIGNED>( |
| 1706 slot, | 1746 map, |
| 1707 object, | 1747 slot, |
| 1708 object_size); | 1748 object, |
| 1749 object_size); | |
| 1709 } | 1750 } |
| 1710 | 1751 |
| 1711 | 1752 |
| 1712 static inline void EvacuateByteArray(Map* map, | 1753 static inline void EvacuateByteArray(Map* map, |
| 1713 HeapObject** slot, | 1754 HeapObject** slot, |
| 1714 HeapObject* object) { | 1755 HeapObject* object) { |
| 1715 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize(); | 1756 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize(); |
| 1716 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); | 1757 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, POINTER_ALIGNED>(map, slot, object , object_size); |
| 1717 } | 1758 } |
| 1718 | 1759 |
| 1719 | 1760 |
| 1720 static inline void EvacuateSeqAsciiString(Map* map, | 1761 static inline void EvacuateSeqAsciiString(Map* map, |
| 1721 HeapObject** slot, | 1762 HeapObject** slot, |
| 1722 HeapObject* object) { | 1763 HeapObject* object) { |
| 1723 int object_size = SeqAsciiString::cast(object)-> | 1764 int object_size = SeqAsciiString::cast(object)-> |
| 1724 SeqAsciiStringSize(map->instance_type()); | 1765 SeqAsciiStringSize(map->instance_type()); |
| 1725 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); | 1766 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, POINTER_ALIGNED>(map, slot, object , object_size); |
| 1726 } | 1767 } |
| 1727 | 1768 |
| 1728 | 1769 |
| 1729 static inline void EvacuateSeqTwoByteString(Map* map, | 1770 static inline void EvacuateSeqTwoByteString(Map* map, |
| 1730 HeapObject** slot, | 1771 HeapObject** slot, |
| 1731 HeapObject* object) { | 1772 HeapObject* object) { |
| 1732 int object_size = SeqTwoByteString::cast(object)-> | 1773 int object_size = SeqTwoByteString::cast(object)-> |
| 1733 SeqTwoByteStringSize(map->instance_type()); | 1774 SeqTwoByteStringSize(map->instance_type()); |
| 1734 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); | 1775 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, POINTER_ALIGNED>(map, slot, object , object_size); |
| 1735 } | 1776 } |
| 1736 | 1777 |
| 1737 | 1778 |
| 1738 static inline bool IsShortcutCandidate(int type) { | 1779 static inline bool IsShortcutCandidate(int type) { |
| 1739 return ((type & kShortcutTypeMask) == kShortcutTypeTag); | 1780 return ((type & kShortcutTypeMask) == kShortcutTypeTag); |
| 1740 } | 1781 } |
| 1741 | 1782 |
| 1742 static inline void EvacuateShortcutCandidate(Map* map, | 1783 static inline void EvacuateShortcutCandidate(Map* map, |
| 1743 HeapObject** slot, | 1784 HeapObject** slot, |
| 1744 HeapObject* object) { | 1785 HeapObject* object) { |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 1767 object->set_map_word(MapWord::FromForwardingAddress(target)); | 1808 object->set_map_word(MapWord::FromForwardingAddress(target)); |
| 1768 return; | 1809 return; |
| 1769 } | 1810 } |
| 1770 | 1811 |
| 1771 heap->DoScavengeObject(first->map(), slot, first); | 1812 heap->DoScavengeObject(first->map(), slot, first); |
| 1772 object->set_map_word(MapWord::FromForwardingAddress(*slot)); | 1813 object->set_map_word(MapWord::FromForwardingAddress(*slot)); |
| 1773 return; | 1814 return; |
| 1774 } | 1815 } |
| 1775 | 1816 |
| 1776 int object_size = ConsString::kSize; | 1817 int object_size = ConsString::kSize; |
| 1777 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size); | 1818 EvacuateObject<POINTER_OBJECT, SMALL, POINTER_ALIGNED>(map, slot, object, ob ject_size); |
| 1778 } | 1819 } |
| 1779 | 1820 |
| 1780 template<ObjectContents object_contents> | 1821 template<ObjectContents object_contents> |
| 1781 class ObjectEvacuationStrategy { | 1822 class ObjectEvacuationStrategy { |
| 1782 public: | 1823 public: |
| 1783 template<int object_size> | 1824 template<int object_size> |
| 1784 static inline void VisitSpecialized(Map* map, | 1825 static inline void VisitSpecialized(Map* map, |
| 1785 HeapObject** slot, | 1826 HeapObject** slot, |
| 1786 HeapObject* object) { | 1827 HeapObject* object) { |
| 1787 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); | 1828 EvacuateObject<object_contents, SMALL, POINTER_ALIGNED>(map, slot, object, object_size); |
| 1788 } | 1829 } |
| 1789 | 1830 |
| 1790 static inline void Visit(Map* map, | 1831 static inline void Visit(Map* map, |
| 1791 HeapObject** slot, | 1832 HeapObject** slot, |
| 1792 HeapObject* object) { | 1833 HeapObject* object) { |
| 1793 int object_size = map->instance_size(); | 1834 int object_size = map->instance_size(); |
| 1794 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); | 1835 EvacuateObject<object_contents, SMALL, POINTER_ALIGNED>(map, slot, object, object_size); |
| 1795 } | 1836 } |
| 1796 }; | 1837 }; |
| 1797 | 1838 |
| 1798 static VisitorDispatchTable<ScavengingCallback> table_; | 1839 static VisitorDispatchTable<ScavengingCallback> table_; |
| 1799 }; | 1840 }; |
| 1800 | 1841 |
| 1801 | 1842 |
| 1802 template<MarksHandling marks_handling, | 1843 template<MarksHandling marks_handling, |
| 1803 LoggingAndProfiling logging_and_profiling_mode> | 1844 LoggingAndProfiling logging_and_profiling_mode> |
| 1804 VisitorDispatchTable<ScavengingCallback> | 1845 VisitorDispatchTable<ScavengingCallback> |
| (...skipping 2844 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4649 | 4690 |
| 4650 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length, | 4691 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length, |
| 4651 PretenureFlag pretenure) { | 4692 PretenureFlag pretenure) { |
| 4652 if (length < 0 || length > FixedDoubleArray::kMaxLength) { | 4693 if (length < 0 || length > FixedDoubleArray::kMaxLength) { |
| 4653 return Failure::OutOfMemoryException(); | 4694 return Failure::OutOfMemoryException(); |
| 4654 } | 4695 } |
| 4655 | 4696 |
| 4656 AllocationSpace space = | 4697 AllocationSpace space = |
| 4657 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; | 4698 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; |
| 4658 int size = FixedDoubleArray::SizeFor(length); | 4699 int size = FixedDoubleArray::SizeFor(length); |
| 4700 | |
| 4701 #ifndef V8_HOST_ARCH_64_BIT | |
| 4702 size += kPointerSize; | |
| 4703 #endif | |
| 4704 | |
| 4659 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { | 4705 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { |
| 4660 // Too big for new space. | 4706 // Too big for new space. |
| 4661 space = LO_SPACE; | 4707 space = LO_SPACE; |
| 4662 } else if (space == OLD_DATA_SPACE && | 4708 } else if (space == OLD_DATA_SPACE && |
| 4663 size > Page::kMaxNonCodeHeapObjectSize) { | 4709 size > Page::kMaxNonCodeHeapObjectSize) { |
| 4664 // Too big for old data space. | 4710 // Too big for old data space. |
| 4665 space = LO_SPACE; | 4711 space = LO_SPACE; |
| 4666 } | 4712 } |
| 4667 | 4713 |
| 4668 AllocationSpace retry_space = | 4714 AllocationSpace retry_space = |
| 4669 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE; | 4715 (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE; |
| 4670 | 4716 |
| 4671 return AllocateRaw(size, space, retry_space); | 4717 HeapObject* object; |
| 4718 { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space); | |
| 4719 if (!maybe_object->To<HeapObject>(&object)) return maybe_object; | |
| 4720 } | |
| 4721 | |
| 4722 #ifndef V8_HOST_ARCH_64_BIT | |
| 4723 return EnsureDoubleAligned(this, object, size); | |
| 4724 #else | |
| 4725 return object; | |
| 4726 #endif | |
| 4672 } | 4727 } |
| 4673 | 4728 |
| 4674 | 4729 |
| 4675 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { | 4730 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { |
| 4676 Object* result; | 4731 Object* result; |
| 4677 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure); | 4732 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure); |
| 4678 if (!maybe_result->ToObject(&result)) return maybe_result; | 4733 if (!maybe_result->ToObject(&result)) return maybe_result; |
| 4679 } | 4734 } |
| 4680 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier( | 4735 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier( |
| 4681 hash_table_map()); | 4736 hash_table_map()); |
| (...skipping 2298 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6980 } else { | 7035 } else { |
| 6981 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died. | 7036 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died. |
| 6982 } | 7037 } |
| 6983 remembered_unmapped_pages_[remembered_unmapped_pages_index_] = | 7038 remembered_unmapped_pages_[remembered_unmapped_pages_index_] = |
| 6984 reinterpret_cast<Address>(p); | 7039 reinterpret_cast<Address>(p); |
| 6985 remembered_unmapped_pages_index_++; | 7040 remembered_unmapped_pages_index_++; |
| 6986 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages; | 7041 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages; |
| 6987 } | 7042 } |
| 6988 | 7043 |
| 6989 } } // namespace v8::internal | 7044 } } // namespace v8::internal |
| OLD | NEW |