| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 581 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 592 | 592 |
| 593 | 593 |
| 594 bool Serializer::serialization_enabled_ = false; | 594 bool Serializer::serialization_enabled_ = false; |
| 595 bool Serializer::too_late_to_enable_now_ = false; | 595 bool Serializer::too_late_to_enable_now_ = false; |
| 596 | 596 |
| 597 | 597 |
| 598 Deserializer::Deserializer(SnapshotByteSource* source) | 598 Deserializer::Deserializer(SnapshotByteSource* source) |
| 599 : isolate_(NULL), | 599 : isolate_(NULL), |
| 600 source_(source), | 600 source_(source), |
| 601 external_reference_decoder_(NULL) { | 601 external_reference_decoder_(NULL) { |
| 602 } | 602 for (int i = 0; i < LAST_SPACE + 1; i++) { |
| 603 | 603 reservations_[i] = kUninitializedReservation; |
| 604 | |
| 605 // This routine both allocates a new object, and also keeps | |
| 606 // track of where objects have been allocated so that we can | |
| 607 // fix back references when deserializing. | |
| 608 Address Deserializer::Allocate(int space_index, Space* space, int size) { | |
| 609 Address address; | |
| 610 if (!SpaceIsLarge(space_index)) { | |
| 611 ASSERT(!SpaceIsPaged(space_index) || | |
| 612 size <= Page::kPageSize - Page::kObjectStartOffset); | |
| 613 MaybeObject* maybe_new_allocation; | |
| 614 if (space_index == NEW_SPACE) { | |
| 615 maybe_new_allocation = | |
| 616 reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); | |
| 617 } else { | |
| 618 maybe_new_allocation = | |
| 619 reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); | |
| 620 } | |
| 621 ASSERT(!maybe_new_allocation->IsFailure()); | |
| 622 Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); | |
| 623 HeapObject* new_object = HeapObject::cast(new_allocation); | |
| 624 address = new_object->address(); | |
| 625 high_water_[space_index] = address + size; | |
| 626 } else { | |
| 627 ASSERT(SpaceIsLarge(space_index)); | |
| 628 LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); | |
| 629 Object* new_allocation; | |
| 630 if (space_index == kLargeData || space_index == kLargeFixedArray) { | |
| 631 new_allocation = | |
| 632 lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked(); | |
| 633 } else { | |
| 634 ASSERT_EQ(kLargeCode, space_index); | |
| 635 new_allocation = | |
| 636 lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked(); | |
| 637 } | |
| 638 HeapObject* new_object = HeapObject::cast(new_allocation); | |
| 639 // Record all large objects in the same space. | |
| 640 address = new_object->address(); | |
| 641 pages_[LO_SPACE].Add(address); | |
| 642 } | 604 } |
| 643 last_object_address_ = address; | |
| 644 return address; | |
| 645 } | |
| 646 | |
| 647 | |
| 648 // This returns the address of an object that has been described in the | |
| 649 // snapshot as being offset bytes back in a particular space. | |
| 650 HeapObject* Deserializer::GetAddressFromEnd(int space) { | |
| 651 int offset = source_->GetInt(); | |
| 652 ASSERT(!SpaceIsLarge(space)); | |
| 653 offset <<= kObjectAlignmentBits; | |
| 654 return HeapObject::FromAddress(high_water_[space] - offset); | |
| 655 } | |
| 656 | |
| 657 | |
| 658 // This returns the address of an object that has been described in the | |
| 659 // snapshot as being offset bytes into a particular space. | |
| 660 HeapObject* Deserializer::GetAddressFromStart(int space) { | |
| 661 int offset = source_->GetInt(); | |
| 662 if (SpaceIsLarge(space)) { | |
| 663 // Large spaces have one object per 'page'. | |
| 664 return HeapObject::FromAddress(pages_[LO_SPACE][offset]); | |
| 665 } | |
| 666 offset <<= kObjectAlignmentBits; | |
| 667 if (space == NEW_SPACE) { | |
| 668 // New space has only one space - numbered 0. | |
| 669 return HeapObject::FromAddress(pages_[space][0] + offset); | |
| 670 } | |
| 671 ASSERT(SpaceIsPaged(space)); | |
| 672 int page_of_pointee = offset >> kPageSizeBits; | |
| 673 Address object_address = pages_[space][page_of_pointee] + | |
| 674 (offset & Page::kPageAlignmentMask); | |
| 675 return HeapObject::FromAddress(object_address); | |
| 676 } | 605 } |
| 677 | 606 |
| 678 | 607 |
| 679 void Deserializer::Deserialize() { | 608 void Deserializer::Deserialize() { |
| 680 isolate_ = Isolate::Current(); | 609 isolate_ = Isolate::Current(); |
| 681 ASSERT(isolate_ != NULL); | 610 ASSERT(isolate_ != NULL); |
| 682 { | 611 isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]); |
| 683 // Don't GC while deserializing - just expand the heap. | 612 // No active threads. |
| 684 AlwaysAllocateScope always_allocate; | 613 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); |
| 685 // Don't use the free lists while deserializing. | 614 // No active handles. |
| 686 LinearAllocationScope allocate_linearly; | 615 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); |
| 687 // No active threads. | 616 ASSERT_EQ(NULL, external_reference_decoder_); |
| 688 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); | 617 external_reference_decoder_ = new ExternalReferenceDecoder(); |
| 689 // No active handles. | 618 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); |
| 690 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); | 619 isolate_->heap()->RepairFreeListsAfterBoot(); |
| 691 ASSERT_EQ(NULL, external_reference_decoder_); | 620 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); |
| 692 external_reference_decoder_ = new ExternalReferenceDecoder(); | |
| 693 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); | |
| 694 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); | |
| 695 | 621 |
| 696 isolate_->heap()->set_native_contexts_list( | 622 isolate_->heap()->set_native_contexts_list( |
| 697 isolate_->heap()->undefined_value()); | 623 isolate_->heap()->undefined_value()); |
| 698 | 624 |
| 699 // Update data pointers to the external strings containing natives sources. | 625 // Update data pointers to the external strings containing natives sources. |
| 700 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { | 626 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { |
| 701 Object* source = isolate_->heap()->natives_source_cache()->get(i); | 627 Object* source = isolate_->heap()->natives_source_cache()->get(i); |
| 702 if (!source->IsUndefined()) { | 628 if (!source->IsUndefined()) { |
| 703 ExternalAsciiString::cast(source)->update_data_cache(); | 629 ExternalAsciiString::cast(source)->update_data_cache(); |
| 704 } | |
| 705 } | 630 } |
| 706 } | 631 } |
| 707 | 632 |
| 708 // Issue code events for newly deserialized code objects. | 633 // Issue code events for newly deserialized code objects. |
| 709 LOG_CODE_EVENT(isolate_, LogCodeObjects()); | 634 LOG_CODE_EVENT(isolate_, LogCodeObjects()); |
| 710 LOG_CODE_EVENT(isolate_, LogCompiledFunctions()); | 635 LOG_CODE_EVENT(isolate_, LogCompiledFunctions()); |
| 711 } | 636 } |
| 712 | 637 |
| 713 | 638 |
| 714 void Deserializer::DeserializePartial(Object** root) { | 639 void Deserializer::DeserializePartial(Object** root) { |
| 715 isolate_ = Isolate::Current(); | 640 isolate_ = Isolate::Current(); |
| 716 // Don't GC while deserializing - just expand the heap. | 641 for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) { |
| 717 AlwaysAllocateScope always_allocate; | 642 ASSERT(reservations_[i] != kUninitializedReservation); |
| 718 // Don't use the free lists while deserializing. | 643 } |
| 719 LinearAllocationScope allocate_linearly; | 644 isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]); |
| 720 if (external_reference_decoder_ == NULL) { | 645 if (external_reference_decoder_ == NULL) { |
| 721 external_reference_decoder_ = new ExternalReferenceDecoder(); | 646 external_reference_decoder_ = new ExternalReferenceDecoder(); |
| 722 } | 647 } |
| 723 | 648 |
| 724 // Keep track of the code space start and end pointers in case new | 649 // Keep track of the code space start and end pointers in case new |
| 725 // code objects were unserialized | 650 // code objects were unserialized |
| 726 OldSpace* code_space = isolate_->heap()->code_space(); | 651 OldSpace* code_space = isolate_->heap()->code_space(); |
| 727 Address start_address = code_space->top(); | 652 Address start_address = code_space->top(); |
| 728 VisitPointer(root); | 653 VisitPointer(root); |
| 729 | 654 |
| (...skipping 21 matching lines...) Expand all Loading... |
| 751 ReadChunk(start, end, NEW_SPACE, NULL); | 676 ReadChunk(start, end, NEW_SPACE, NULL); |
| 752 } | 677 } |
| 753 | 678 |
| 754 | 679 |
| 755 // This routine writes the new object into the pointer provided and then | 680 // This routine writes the new object into the pointer provided and then |
| 756 // returns true if the new object was in young space and false otherwise. | 681 // returns true if the new object was in young space and false otherwise. |
| 757 // The reason for this strange interface is that otherwise the object is | 682 // The reason for this strange interface is that otherwise the object is |
| 758 // written very late, which means the FreeSpace map is not set up by the | 683 // written very late, which means the FreeSpace map is not set up by the |
| 759 // time we need to use it to mark the space at the end of a page free. | 684 // time we need to use it to mark the space at the end of a page free. |
| 760 void Deserializer::ReadObject(int space_number, | 685 void Deserializer::ReadObject(int space_number, |
| 761 Space* space, | |
| 762 Object** write_back) { | 686 Object** write_back) { |
| 763 int size = source_->GetInt() << kObjectAlignmentBits; | 687 int size = source_->GetInt() << kObjectAlignmentBits; |
| 764 Address address = Allocate(space_number, space, size); | 688 Address address = Allocate(space_number, size); |
| 765 *write_back = HeapObject::FromAddress(address); | 689 *write_back = HeapObject::FromAddress(address); |
| 766 Object** current = reinterpret_cast<Object**>(address); | 690 Object** current = reinterpret_cast<Object**>(address); |
| 767 Object** limit = current + (size >> kPointerSizeLog2); | 691 Object** limit = current + (size >> kPointerSizeLog2); |
| 768 if (FLAG_log_snapshot_positions) { | 692 if (FLAG_log_snapshot_positions) { |
| 769 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); | 693 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); |
| 770 } | 694 } |
| 771 ReadChunk(current, limit, space_number, address); | 695 ReadChunk(current, limit, space_number, address); |
| 772 #ifdef DEBUG | 696 #ifdef DEBUG |
| 773 bool is_codespace = (space == HEAP->code_space()) || | 697 bool is_codespace = (space_number == CODE_SPACE); |
| 774 ((space == HEAP->lo_space()) && (space_number == kLargeCode)); | |
| 775 ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace); | 698 ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace); |
| 776 #endif | 699 #endif |
| 777 } | 700 } |
| 778 | 701 |
| 779 | |
| 780 // This macro is always used with a constant argument so it should all fold | |
| 781 // away to almost nothing in the generated code. It might be nicer to do this | |
| 782 // with the ternary operator but there are type issues with that. | |
| 783 #define ASSIGN_DEST_SPACE(space_number) \ | |
| 784 Space* dest_space; \ | |
| 785 if (space_number == NEW_SPACE) { \ | |
| 786 dest_space = isolate->heap()->new_space(); \ | |
| 787 } else if (space_number == OLD_POINTER_SPACE) { \ | |
| 788 dest_space = isolate->heap()->old_pointer_space(); \ | |
| 789 } else if (space_number == OLD_DATA_SPACE) { \ | |
| 790 dest_space = isolate->heap()->old_data_space(); \ | |
| 791 } else if (space_number == CODE_SPACE) { \ | |
| 792 dest_space = isolate->heap()->code_space(); \ | |
| 793 } else if (space_number == MAP_SPACE) { \ | |
| 794 dest_space = isolate->heap()->map_space(); \ | |
| 795 } else if (space_number == CELL_SPACE) { \ | |
| 796 dest_space = isolate->heap()->cell_space(); \ | |
| 797 } else { \ | |
| 798 ASSERT(space_number >= LO_SPACE); \ | |
| 799 dest_space = isolate->heap()->lo_space(); \ | |
| 800 } | |
| 801 | |
| 802 | |
| 803 static const int kUnknownOffsetFromStart = -1; | |
| 804 | |
| 805 | |
| 806 void Deserializer::ReadChunk(Object** current, | 702 void Deserializer::ReadChunk(Object** current, |
| 807 Object** limit, | 703 Object** limit, |
| 808 int source_space, | 704 int source_space, |
| 809 Address current_object_address) { | 705 Address current_object_address) { |
| 810 Isolate* const isolate = isolate_; | 706 Isolate* const isolate = isolate_; |
| 707 // Write barrier support costs around 1% in startup time. In fact there |
| 708 // are no new space objects in current boot snapshots, so it's not needed, |
| 709 // but that may change. |
| 811 bool write_barrier_needed = (current_object_address != NULL && | 710 bool write_barrier_needed = (current_object_address != NULL && |
| 812 source_space != NEW_SPACE && | 711 source_space != NEW_SPACE && |
| 813 source_space != CELL_SPACE && | 712 source_space != CELL_SPACE && |
| 814 source_space != CODE_SPACE && | 713 source_space != CODE_SPACE && |
| 815 source_space != OLD_DATA_SPACE); | 714 source_space != OLD_DATA_SPACE); |
| 816 while (current < limit) { | 715 while (current < limit) { |
| 817 int data = source_->Get(); | 716 int data = source_->Get(); |
| 818 switch (data) { | 717 switch (data) { |
| 819 #define CASE_STATEMENT(where, how, within, space_number) \ | 718 #define CASE_STATEMENT(where, how, within, space_number) \ |
| 820 case where + how + within + space_number: \ | 719 case where + how + within + space_number: \ |
| 821 ASSERT((where & ~kPointedToMask) == 0); \ | 720 ASSERT((where & ~kPointedToMask) == 0); \ |
| 822 ASSERT((how & ~kHowToCodeMask) == 0); \ | 721 ASSERT((how & ~kHowToCodeMask) == 0); \ |
| 823 ASSERT((within & ~kWhereToPointMask) == 0); \ | 722 ASSERT((within & ~kWhereToPointMask) == 0); \ |
| 824 ASSERT((space_number & ~kSpaceMask) == 0); | 723 ASSERT((space_number & ~kSpaceMask) == 0); |
| 825 | 724 |
| 826 #define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \ | 725 #define CASE_BODY(where, how, within, space_number_if_any) \ |
| 827 { \ | 726 { \ |
| 828 bool emit_write_barrier = false; \ | 727 bool emit_write_barrier = false; \ |
| 829 bool current_was_incremented = false; \ | 728 bool current_was_incremented = false; \ |
| 830 int space_number = space_number_if_any == kAnyOldSpace ? \ | 729 int space_number = space_number_if_any == kAnyOldSpace ? \ |
| 831 (data & kSpaceMask) : space_number_if_any; \ | 730 (data & kSpaceMask) : space_number_if_any; \ |
| 832 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ | 731 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ |
| 833 ASSIGN_DEST_SPACE(space_number) \ | 732 ReadObject(space_number, current); \ |
| 834 ReadObject(space_number, dest_space, current); \ | |
| 835 emit_write_barrier = (space_number == NEW_SPACE); \ | 733 emit_write_barrier = (space_number == NEW_SPACE); \ |
| 836 } else { \ | 734 } else { \ |
| 837 Object* new_object = NULL; /* May not be a real Object pointer. */ \ | 735 Object* new_object = NULL; /* May not be a real Object pointer. */ \ |
| 838 if (where == kNewObject) { \ | 736 if (where == kNewObject) { \ |
| 839 ASSIGN_DEST_SPACE(space_number) \ | 737 ReadObject(space_number, &new_object); \ |
| 840 ReadObject(space_number, dest_space, &new_object); \ | |
| 841 } else if (where == kRootArray) { \ | 738 } else if (where == kRootArray) { \ |
| 842 int root_id = source_->GetInt(); \ | 739 int root_id = source_->GetInt(); \ |
| 843 new_object = isolate->heap()->roots_array_start()[root_id]; \ | 740 new_object = isolate->heap()->roots_array_start()[root_id]; \ |
| 844 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | 741 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
| 845 } else if (where == kPartialSnapshotCache) { \ | 742 } else if (where == kPartialSnapshotCache) { \ |
| 846 int cache_index = source_->GetInt(); \ | 743 int cache_index = source_->GetInt(); \ |
| 847 new_object = isolate->serialize_partial_snapshot_cache() \ | 744 new_object = isolate->serialize_partial_snapshot_cache() \ |
| 848 [cache_index]; \ | 745 [cache_index]; \ |
| 849 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | 746 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
| 850 } else if (where == kExternalReference) { \ | 747 } else if (where == kExternalReference) { \ |
| 748 int skip = source_->GetInt(); \ |
| 749 current = reinterpret_cast<Object**>(reinterpret_cast<Address>( \ |
| 750 current) + skip); \ |
| 851 int reference_id = source_->GetInt(); \ | 751 int reference_id = source_->GetInt(); \ |
| 852 Address address = external_reference_decoder_-> \ | 752 Address address = external_reference_decoder_-> \ |
| 853 Decode(reference_id); \ | 753 Decode(reference_id); \ |
| 854 new_object = reinterpret_cast<Object*>(address); \ | 754 new_object = reinterpret_cast<Object*>(address); \ |
| 855 } else if (where == kBackref) { \ | 755 } else if (where == kBackref) { \ |
| 856 emit_write_barrier = (space_number == NEW_SPACE); \ | 756 emit_write_barrier = (space_number == NEW_SPACE); \ |
| 857 new_object = GetAddressFromEnd(data & kSpaceMask); \ | 757 new_object = GetAddressFromEnd(data & kSpaceMask); \ |
| 858 } else { \ | 758 } else { \ |
| 859 ASSERT(where == kFromStart); \ | 759 ASSERT(where == kBackrefWithSkip); \ |
| 860 if (offset_from_start == kUnknownOffsetFromStart) { \ | 760 int skip = source_->GetInt(); \ |
| 861 emit_write_barrier = (space_number == NEW_SPACE); \ | 761 current = reinterpret_cast<Object**>( \ |
| 862 new_object = GetAddressFromStart(data & kSpaceMask); \ | 762 reinterpret_cast<Address>(current) + skip); \ |
| 863 } else { \ | 763 emit_write_barrier = (space_number == NEW_SPACE); \ |
| 864 Address object_address = pages_[space_number][0] + \ | 764 new_object = GetAddressFromEnd(data & kSpaceMask); \ |
| 865 (offset_from_start << kObjectAlignmentBits); \ | |
| 866 new_object = HeapObject::FromAddress(object_address); \ | |
| 867 } \ | |
| 868 } \ | 765 } \ |
| 869 if (within == kInnerPointer) { \ | 766 if (within == kInnerPointer) { \ |
| 870 if (space_number != CODE_SPACE || new_object->IsCode()) { \ | 767 if (space_number != CODE_SPACE || new_object->IsCode()) { \ |
| 871 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ | 768 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ |
| 872 new_object = reinterpret_cast<Object*>( \ | 769 new_object = reinterpret_cast<Object*>( \ |
| 873 new_code_object->instruction_start()); \ | 770 new_code_object->instruction_start()); \ |
| 874 } else { \ | 771 } else { \ |
| 875 ASSERT(space_number == CODE_SPACE || space_number == kLargeCode);\ | 772 ASSERT(space_number == CODE_SPACE); \ |
| 876 JSGlobalPropertyCell* cell = \ | 773 JSGlobalPropertyCell* cell = \ |
| 877 JSGlobalPropertyCell::cast(new_object); \ | 774 JSGlobalPropertyCell::cast(new_object); \ |
| 878 new_object = reinterpret_cast<Object*>( \ | 775 new_object = reinterpret_cast<Object*>( \ |
| 879 cell->ValueAddress()); \ | 776 cell->ValueAddress()); \ |
| 880 } \ | 777 } \ |
| 881 } \ | 778 } \ |
| 882 if (how == kFromCode) { \ | 779 if (how == kFromCode) { \ |
| 883 Address location_of_branch_data = \ | 780 Address location_of_branch_data = \ |
| 884 reinterpret_cast<Address>(current); \ | 781 reinterpret_cast<Address>(current); \ |
| 885 Assembler::deserialization_set_special_target_at( \ | 782 Assembler::deserialization_set_special_target_at( \ |
| (...skipping 11 matching lines...) Expand all Loading... |
| 897 isolate->heap()->RecordWrite( \ | 794 isolate->heap()->RecordWrite( \ |
| 898 current_object_address, \ | 795 current_object_address, \ |
| 899 static_cast<int>(current_address - current_object_address)); \ | 796 static_cast<int>(current_address - current_object_address)); \ |
| 900 } \ | 797 } \ |
| 901 if (!current_was_incremented) { \ | 798 if (!current_was_incremented) { \ |
| 902 current++; \ | 799 current++; \ |
| 903 } \ | 800 } \ |
| 904 break; \ | 801 break; \ |
| 905 } \ | 802 } \ |
| 906 | 803 |
| 907 // This generates a case and a body for each space. The large object spaces are | |
| 908 // very rare in snapshots so they are grouped in one body. | |
| 909 #define ONE_PER_SPACE(where, how, within) \ | |
| 910 CASE_STATEMENT(where, how, within, NEW_SPACE) \ | |
| 911 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ | |
| 912 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ | |
| 913 CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \ | |
| 914 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ | |
| 915 CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \ | |
| 916 CASE_STATEMENT(where, how, within, CODE_SPACE) \ | |
| 917 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ | |
| 918 CASE_STATEMENT(where, how, within, CELL_SPACE) \ | |
| 919 CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \ | |
| 920 CASE_STATEMENT(where, how, within, MAP_SPACE) \ | |
| 921 CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \ | |
| 922 CASE_STATEMENT(where, how, within, kLargeData) \ | |
| 923 CASE_STATEMENT(where, how, within, kLargeCode) \ | |
| 924 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ | |
| 925 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) | |
| 926 | |
| 927 // This generates a case and a body for the new space (which has to do extra | 804 // This generates a case and a body for the new space (which has to do extra |
| 928 // write barrier handling) and handles the other spaces with 8 fall-through | 805 // write barrier handling) and handles the other spaces with 8 fall-through |
| 929 // cases and one body. | 806 // cases and one body. |
| 930 #define ALL_SPACES(where, how, within) \ | 807 #define ALL_SPACES(where, how, within) \ |
| 931 CASE_STATEMENT(where, how, within, NEW_SPACE) \ | 808 CASE_STATEMENT(where, how, within, NEW_SPACE) \ |
| 932 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ | 809 CASE_BODY(where, how, within, NEW_SPACE) \ |
| 933 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ | 810 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ |
| 934 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ | 811 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ |
| 935 CASE_STATEMENT(where, how, within, CODE_SPACE) \ | 812 CASE_STATEMENT(where, how, within, CODE_SPACE) \ |
| 936 CASE_STATEMENT(where, how, within, CELL_SPACE) \ | 813 CASE_STATEMENT(where, how, within, CELL_SPACE) \ |
| 937 CASE_STATEMENT(where, how, within, MAP_SPACE) \ | 814 CASE_STATEMENT(where, how, within, MAP_SPACE) \ |
| 938 CASE_STATEMENT(where, how, within, kLargeData) \ | 815 CASE_BODY(where, how, within, kAnyOldSpace) |
| 939 CASE_STATEMENT(where, how, within, kLargeCode) \ | |
| 940 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ | |
| 941 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) | |
| 942 | |
| 943 #define ONE_PER_CODE_SPACE(where, how, within) \ | |
| 944 CASE_STATEMENT(where, how, within, CODE_SPACE) \ | |
| 945 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ | |
| 946 CASE_STATEMENT(where, how, within, kLargeCode) \ | |
| 947 CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart) | |
| 948 | 816 |
| 949 #define FOUR_CASES(byte_code) \ | 817 #define FOUR_CASES(byte_code) \ |
| 950 case byte_code: \ | 818 case byte_code: \ |
| 951 case byte_code + 1: \ | 819 case byte_code + 1: \ |
| 952 case byte_code + 2: \ | 820 case byte_code + 2: \ |
| 953 case byte_code + 3: | 821 case byte_code + 3: |
| 954 | 822 |
| 955 #define SIXTEEN_CASES(byte_code) \ | 823 #define SIXTEEN_CASES(byte_code) \ |
| 956 FOUR_CASES(byte_code) \ | 824 FOUR_CASES(byte_code) \ |
| 957 FOUR_CASES(byte_code + 4) \ | 825 FOUR_CASES(byte_code + 4) \ |
| 958 FOUR_CASES(byte_code + 8) \ | 826 FOUR_CASES(byte_code + 8) \ |
| 959 FOUR_CASES(byte_code + 12) | 827 FOUR_CASES(byte_code + 12) |
| 960 | 828 |
| 829 #define COMMON_RAW_LENGTHS(f) \ |
| 830 f(1) \ |
| 831 f(2) \ |
| 832 f(3) \ |
| 833 f(4) \ |
| 834 f(5) \ |
| 835 f(6) \ |
| 836 f(7) \ |
| 837 f(8) \ |
| 838 f(9) \ |
| 839 f(10) \ |
| 840 f(11) \ |
| 841 f(12) \ |
| 842 f(13) \ |
| 843 f(14) \ |
| 844 f(15) \ |
| 845 f(16) \ |
| 846 f(17) \ |
| 847 f(18) \ |
| 848 f(19) \ |
| 849 f(20) \ |
| 850 f(21) \ |
| 851 f(22) \ |
| 852 f(23) \ |
| 853 f(24) \ |
| 854 f(25) \ |
| 855 f(26) \ |
| 856 f(27) \ |
| 857 f(28) \ |
| 858 f(29) \ |
| 859 f(30) \ |
| 860 f(31) |
| 861 |
| 961 // We generate 15 cases and bodies that process special tags that combine | 862 // We generate 15 cases and bodies that process special tags that combine |
| 962 // the raw data tag and the length into one byte. | 863 // the raw data tag and the length into one byte. |
| 963 #define RAW_CASE(index, size) \ | 864 #define RAW_CASE(index) \ |
| 964 case kRawData + index: { \ | 865 case kRawData + index: { \ |
| 965 byte* raw_data_out = reinterpret_cast<byte*>(current); \ | 866 byte* raw_data_out = reinterpret_cast<byte*>(current); \ |
| 966 source_->CopyRaw(raw_data_out, size); \ | 867 source_->CopyRaw(raw_data_out, index * kPointerSize); \ |
| 967 current = reinterpret_cast<Object**>(raw_data_out + size); \ | 868 current = \ |
| 968 break; \ | 869 reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \ |
| 870 break; \ |
| 969 } | 871 } |
| 970 COMMON_RAW_LENGTHS(RAW_CASE) | 872 COMMON_RAW_LENGTHS(RAW_CASE) |
| 971 #undef RAW_CASE | 873 #undef RAW_CASE |
| 972 | 874 |
| 973 // Deserialize a chunk of raw data that doesn't have one of the popular | 875 // Deserialize a chunk of raw data that doesn't have one of the popular |
| 974 // lengths. | 876 // lengths. |
| 975 case kRawData: { | 877 case kRawData: { |
| 976 int size = source_->GetInt(); | 878 int size = source_->GetInt(); |
| 977 byte* raw_data_out = reinterpret_cast<byte*>(current); | 879 byte* raw_data_out = reinterpret_cast<byte*>(current); |
| 978 source_->CopyRaw(raw_data_out, size); | 880 source_->CopyRaw(raw_data_out, size); |
| 979 current = reinterpret_cast<Object**>(raw_data_out + size); | |
| 980 break; | 881 break; |
| 981 } | 882 } |
| 982 | 883 |
| 983 SIXTEEN_CASES(kRootArrayLowConstants) | 884 SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance) |
| 984 SIXTEEN_CASES(kRootArrayHighConstants) { | 885 SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) { |
| 985 int root_id = RootArrayConstantFromByteCode(data); | 886 int root_id = RootArrayConstantFromByteCode(data); |
| 986 Object* object = isolate->heap()->roots_array_start()[root_id]; | 887 Object* object = isolate->heap()->roots_array_start()[root_id]; |
| 987 ASSERT(!isolate->heap()->InNewSpace(object)); | 888 ASSERT(!isolate->heap()->InNewSpace(object)); |
| 988 *current++ = object; | 889 *current++ = object; |
| 989 break; | 890 break; |
| 990 } | 891 } |
| 991 | 892 |
| 893 SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance) |
| 894 SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance + 16) { |
| 895 int root_id = RootArrayConstantFromByteCode(data); |
| 896 int skip = source_->GetInt(); |
| 897 current = reinterpret_cast<Object**>( |
| 898 reinterpret_cast<intptr_t>(current) + skip); |
| 899 Object* object = isolate->heap()->roots_array_start()[root_id]; |
| 900 ASSERT(!isolate->heap()->InNewSpace(object)); |
| 901 *current++ = object; |
| 902 break; |
| 903 } |
| 904 |
| 992 case kRepeat: { | 905 case kRepeat: { |
| 993 int repeats = source_->GetInt(); | 906 int repeats = source_->GetInt(); |
| 994 Object* object = current[-1]; | 907 Object* object = current[-1]; |
| 995 ASSERT(!isolate->heap()->InNewSpace(object)); | 908 ASSERT(!isolate->heap()->InNewSpace(object)); |
| 996 for (int i = 0; i < repeats; i++) current[i] = object; | 909 for (int i = 0; i < repeats; i++) current[i] = object; |
| 997 current += repeats; | 910 current += repeats; |
| 998 break; | 911 break; |
| 999 } | 912 } |
| 1000 | 913 |
| 1001 STATIC_ASSERT(kRootArrayNumberOfConstantEncodings == | 914 STATIC_ASSERT(kRootArrayNumberOfConstantEncodings == |
| 1002 Heap::kOldSpaceRoots); | 915 Heap::kOldSpaceRoots); |
| 1003 STATIC_ASSERT(kMaxRepeats == 12); | 916 STATIC_ASSERT(kMaxRepeats == 13); |
| 1004 FOUR_CASES(kConstantRepeat) | 917 case kConstantRepeat: |
| 1005 FOUR_CASES(kConstantRepeat + 4) | 918 FOUR_CASES(kConstantRepeat + 1) |
| 1006 FOUR_CASES(kConstantRepeat + 8) { | 919 FOUR_CASES(kConstantRepeat + 5) |
| 920 FOUR_CASES(kConstantRepeat + 9) { |
| 1007 int repeats = RepeatsForCode(data); | 921 int repeats = RepeatsForCode(data); |
| 1008 Object* object = current[-1]; | 922 Object* object = current[-1]; |
| 1009 ASSERT(!isolate->heap()->InNewSpace(object)); | 923 ASSERT(!isolate->heap()->InNewSpace(object)); |
| 1010 for (int i = 0; i < repeats; i++) current[i] = object; | 924 for (int i = 0; i < repeats; i++) current[i] = object; |
| 1011 current += repeats; | 925 current += repeats; |
| 1012 break; | 926 break; |
| 1013 } | 927 } |
| 1014 | 928 |
| 1015 // Deserialize a new object and write a pointer to it to the current | 929 // Deserialize a new object and write a pointer to it to the current |
| 1016 // object. | 930 // object. |
| 1017 ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject) | 931 ALL_SPACES(kNewObject, kPlain, kStartOfObject) |
| 1018 // Support for direct instruction pointers in functions. It's an inner | 932 // Support for direct instruction pointers in functions. It's an inner |
| 1019 // pointer because it points at the entry point, not at the start of the | 933 // pointer because it points at the entry point, not at the start of the |
| 1020 // code object. | 934 // code object. |
| 1021 ONE_PER_CODE_SPACE(kNewObject, kPlain, kInnerPointer) | 935 CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE) |
| 936 CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE) |
| 1022 // Deserialize a new code object and write a pointer to its first | 937 // Deserialize a new code object and write a pointer to its first |
| 1023 // instruction to the current code object. | 938 // instruction to the current code object. |
| 1024 ONE_PER_SPACE(kNewObject, kFromCode, kInnerPointer) | 939 ALL_SPACES(kNewObject, kFromCode, kInnerPointer) |
| 1025 // Find a recently deserialized object using its offset from the current | 940 // Find a recently deserialized object using its offset from the current |
| 1026 // allocation point and write a pointer to it to the current object. | 941 // allocation point and write a pointer to it to the current object. |
| 1027 ALL_SPACES(kBackref, kPlain, kStartOfObject) | 942 ALL_SPACES(kBackref, kPlain, kStartOfObject) |
| 943 ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject) |
| 1028 #if V8_TARGET_ARCH_MIPS | 944 #if V8_TARGET_ARCH_MIPS |
| 1029 // Deserialize a new object from pointer found in code and write | 945 // Deserialize a new object from pointer found in code and write |
| 1030 // a pointer to it to the current object. Required only for MIPS, and | 946 // a pointer to it to the current object. Required only for MIPS, and |
| 1031 // omitted on the other architectures because it is fully unrolled and | 947 // omitted on the other architectures because it is fully unrolled and |
| 1032 // would cause bloat. | 948 // would cause bloat. |
| 1033 ONE_PER_SPACE(kNewObject, kFromCode, kStartOfObject) | 949 ALL_SPACES(kNewObject, kFromCode, kStartOfObject) |
| 1034 // Find a recently deserialized code object using its offset from the | 950 // Find a recently deserialized code object using its offset from the |
| 1035 // current allocation point and write a pointer to it to the current | 951 // current allocation point and write a pointer to it to the current |
| 1036 // object. Required only for MIPS. | 952 // object. Required only for MIPS. |
| 1037 ALL_SPACES(kBackref, kFromCode, kStartOfObject) | 953 ALL_SPACES(kBackref, kFromCode, kStartOfObject) |
| 1038 // Find an already deserialized code object using its offset from | 954 ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject) |
| 1039 // the start and write a pointer to it to the current object. | |
| 1040 // Required only for MIPS. | |
| 1041 ALL_SPACES(kFromStart, kFromCode, kStartOfObject) | |
| 1042 #endif | 955 #endif |
| 1043 // Find a recently deserialized code object using its offset from the | 956 // Find a recently deserialized code object using its offset from the |
| 1044 // current allocation point and write a pointer to its first instruction | 957 // current allocation point and write a pointer to its first instruction |
| 1045 // to the current code object or the instruction pointer in a function | 958 // to the current code object or the instruction pointer in a function |
| 1046 // object. | 959 // object. |
| 1047 ALL_SPACES(kBackref, kFromCode, kInnerPointer) | 960 ALL_SPACES(kBackref, kFromCode, kInnerPointer) |
| 961 ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer) |
| 1048 ALL_SPACES(kBackref, kPlain, kInnerPointer) | 962 ALL_SPACES(kBackref, kPlain, kInnerPointer) |
| 1049 // Find an already deserialized object using its offset from the start | 963 ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer) |
| 1050 // and write a pointer to it to the current object. | |
| 1051 ALL_SPACES(kFromStart, kPlain, kStartOfObject) | |
| 1052 ALL_SPACES(kFromStart, kPlain, kInnerPointer) | |
| 1053 // Find an already deserialized code object using its offset from the | |
| 1054 // start and write a pointer to its first instruction to the current code | |
| 1055 // object. | |
| 1056 ALL_SPACES(kFromStart, kFromCode, kInnerPointer) | |
| 1057 // Find an object in the roots array and write a pointer to it to the | 964 // Find an object in the roots array and write a pointer to it to the |
| 1058 // current object. | 965 // current object. |
| 1059 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) | 966 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) |
| 1060 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart) | 967 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0) |
| 1061 // Find an object in the partial snapshots cache and write a pointer to it | 968 // Find an object in the partial snapshots cache and write a pointer to it |
| 1062 // to the current object. | 969 // to the current object. |
| 1063 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) | 970 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) |
| 1064 CASE_BODY(kPartialSnapshotCache, | 971 CASE_BODY(kPartialSnapshotCache, |
| 1065 kPlain, | 972 kPlain, |
| 1066 kStartOfObject, | 973 kStartOfObject, |
| 1067 0, | 974 0) |
| 1068 kUnknownOffsetFromStart) | |
| 1069 // Find an code entry in the partial snapshots cache and | 975 // Find an code entry in the partial snapshots cache and |
| 1070 // write a pointer to it to the current object. | 976 // write a pointer to it to the current object. |
| 1071 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0) | 977 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0) |
| 1072 CASE_BODY(kPartialSnapshotCache, | 978 CASE_BODY(kPartialSnapshotCache, |
| 1073 kPlain, | 979 kPlain, |
| 1074 kInnerPointer, | 980 kInnerPointer, |
| 1075 0, | 981 0) |
| 1076 kUnknownOffsetFromStart) | |
| 1077 // Find an external reference and write a pointer to it to the current | 982 // Find an external reference and write a pointer to it to the current |
| 1078 // object. | 983 // object. |
| 1079 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) | 984 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) |
| 1080 CASE_BODY(kExternalReference, | 985 CASE_BODY(kExternalReference, |
| 1081 kPlain, | 986 kPlain, |
| 1082 kStartOfObject, | 987 kStartOfObject, |
| 1083 0, | 988 0) |
| 1084 kUnknownOffsetFromStart) | |
| 1085 // Find an external reference and write a pointer to it in the current | 989 // Find an external reference and write a pointer to it in the current |
| 1086 // code object. | 990 // code object. |
| 1087 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) | 991 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) |
| 1088 CASE_BODY(kExternalReference, | 992 CASE_BODY(kExternalReference, |
| 1089 kFromCode, | 993 kFromCode, |
| 1090 kStartOfObject, | 994 kStartOfObject, |
| 1091 0, | 995 0) |
| 1092 kUnknownOffsetFromStart) | |
| 1093 | 996 |
| 1094 #undef CASE_STATEMENT | 997 #undef CASE_STATEMENT |
| 1095 #undef CASE_BODY | 998 #undef CASE_BODY |
| 1096 #undef ONE_PER_SPACE | |
| 1097 #undef ALL_SPACES | 999 #undef ALL_SPACES |
| 1098 #undef ASSIGN_DEST_SPACE | |
| 1099 | 1000 |
| 1100 case kNewPage: { | 1001 case kSkip: { |
| 1101 int space = source_->Get(); | 1002 int size = source_->GetInt(); |
| 1102 pages_[space].Add(last_object_address_); | 1003 current = reinterpret_cast<Object**>( |
| 1103 if (space == CODE_SPACE) { | 1004 reinterpret_cast<intptr_t>(current) + size); |
| 1104 CPU::FlushICache(last_object_address_, Page::kPageSize); | |
| 1105 } | |
| 1106 break; | 1005 break; |
| 1107 } | 1006 } |
| 1108 | 1007 |
| 1109 case kSkip: { | |
| 1110 current++; | |
| 1111 break; | |
| 1112 } | |
| 1113 | |
| 1114 case kNativesStringResource: { | 1008 case kNativesStringResource: { |
| 1115 int index = source_->Get(); | 1009 int index = source_->Get(); |
| 1116 Vector<const char> source_vector = Natives::GetRawScriptSource(index); | 1010 Vector<const char> source_vector = Natives::GetRawScriptSource(index); |
| 1117 NativesExternalStringResource* resource = | 1011 NativesExternalStringResource* resource = |
| 1118 new NativesExternalStringResource(isolate->bootstrapper(), | 1012 new NativesExternalStringResource(isolate->bootstrapper(), |
| 1119 source_vector.start(), | 1013 source_vector.start(), |
| 1120 source_vector.length()); | 1014 source_vector.length()); |
| 1121 *current++ = reinterpret_cast<Object*>(resource); | 1015 *current++ = reinterpret_cast<Object*>(resource); |
| 1122 break; | 1016 break; |
| 1123 } | 1017 } |
| 1124 | 1018 |
| 1125 case kSynchronize: { | 1019 case kSynchronize: { |
| 1126 // If we get here then that indicates that you have a mismatch between | 1020 // If we get here then that indicates that you have a mismatch between |
| 1127 // the number of GC roots when serializing and deserializing. | 1021 // the number of GC roots when serializing and deserializing. |
| 1128 UNREACHABLE(); | 1022 UNREACHABLE(); |
| 1129 } | 1023 } |
| 1130 | 1024 |
| 1131 default: | 1025 default: |
| 1132 UNREACHABLE(); | 1026 UNREACHABLE(); |
| 1133 } | 1027 } |
| 1134 } | 1028 } |
| 1135 ASSERT_EQ(current, limit); | 1029 ASSERT_EQ(limit, current); |
| 1136 } | 1030 } |
| 1137 | 1031 |
| 1138 | 1032 |
| 1139 void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { | 1033 void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { |
| 1140 const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7; | 1034 ASSERT(integer < 1 << 22); |
| 1141 for (int shift = max_shift; shift > 0; shift -= 7) { | 1035 integer <<= 2; |
| 1142 if (integer >= static_cast<uintptr_t>(1u) << shift) { | 1036 int bytes = 1; |
| 1143 Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart"); | 1037 if (integer > 0xff) bytes = 2; |
| 1144 } | 1038 if (integer > 0xffff) bytes = 3; |
| 1145 } | 1039 integer |= bytes; |
| 1146 PutSection(static_cast<int>(integer & 0x7f), "IntLastPart"); | 1040 Put(static_cast<int>(integer & 0xff), "IntPart1"); |
| 1041 if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2"); |
| 1042 if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3"); |
| 1147 } | 1043 } |
| 1148 | 1044 |
| 1149 | 1045 |
| 1150 Serializer::Serializer(SnapshotByteSink* sink) | 1046 Serializer::Serializer(SnapshotByteSink* sink) |
| 1151 : sink_(sink), | 1047 : sink_(sink), |
| 1152 current_root_index_(0), | 1048 current_root_index_(0), |
| 1153 external_reference_encoder_(new ExternalReferenceEncoder), | 1049 external_reference_encoder_(new ExternalReferenceEncoder), |
| 1154 large_object_total_(0), | |
| 1155 root_index_wave_front_(0) { | 1050 root_index_wave_front_(0) { |
| 1156 isolate_ = Isolate::Current(); | 1051 isolate_ = Isolate::Current(); |
| 1157 // The serializer is meant to be used only to generate initial heap images | 1052 // The serializer is meant to be used only to generate initial heap images |
| 1158 // from a context in which there is only one isolate. | 1053 // from a context in which there is only one isolate. |
| 1159 ASSERT(isolate_->IsDefaultIsolate()); | 1054 ASSERT(isolate_->IsDefaultIsolate()); |
| 1160 for (int i = 0; i <= LAST_SPACE; i++) { | 1055 for (int i = 0; i <= LAST_SPACE; i++) { |
| 1161 fullness_[i] = 0; | 1056 fullness_[i] = 0; |
| 1162 } | 1057 } |
| 1163 } | 1058 } |
| 1164 | 1059 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1177 CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); | 1072 CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); |
| 1178 // We don't support serializing installed extensions. | 1073 // We don't support serializing installed extensions. |
| 1179 CHECK(!isolate->has_installed_extensions()); | 1074 CHECK(!isolate->has_installed_extensions()); |
| 1180 | 1075 |
| 1181 HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG); | 1076 HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG); |
| 1182 } | 1077 } |
| 1183 | 1078 |
| 1184 | 1079 |
| 1185 void PartialSerializer::Serialize(Object** object) { | 1080 void PartialSerializer::Serialize(Object** object) { |
| 1186 this->VisitPointer(object); | 1081 this->VisitPointer(object); |
| 1082 Pad(); |
| 1187 } | 1083 } |
| 1188 | 1084 |
| 1189 | 1085 |
| 1190 void Serializer::VisitPointers(Object** start, Object** end) { | 1086 void Serializer::VisitPointers(Object** start, Object** end) { |
| 1191 Isolate* isolate = Isolate::Current(); | 1087 Isolate* isolate = Isolate::Current(); |
| 1192 | 1088 |
| 1193 for (Object** current = start; current < end; current++) { | 1089 for (Object** current = start; current < end; current++) { |
| 1194 if (start == isolate->heap()->roots_array_start()) { | 1090 if (start == isolate->heap()->roots_array_start()) { |
| 1195 root_index_wave_front_ = | 1091 root_index_wave_front_ = |
| 1196 Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); | 1092 Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); |
| 1197 } | 1093 } |
| 1198 if (reinterpret_cast<Address>(current) == | 1094 if (reinterpret_cast<Address>(current) == |
| 1199 isolate->heap()->store_buffer()->TopAddress()) { | 1095 isolate->heap()->store_buffer()->TopAddress()) { |
| 1200 sink_->Put(kSkip, "Skip"); | 1096 sink_->Put(kSkip, "Skip"); |
| 1097 sink_->PutInt(kPointerSize, "SkipOneWord"); |
| 1201 } else if ((*current)->IsSmi()) { | 1098 } else if ((*current)->IsSmi()) { |
| 1202 sink_->Put(kRawData, "RawData"); | 1099 sink_->Put(kRawData + 1, "Smi"); |
| 1203 sink_->PutInt(kPointerSize, "length"); | |
| 1204 for (int i = 0; i < kPointerSize; i++) { | 1100 for (int i = 0; i < kPointerSize; i++) { |
| 1205 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); | 1101 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); |
| 1206 } | 1102 } |
| 1207 } else { | 1103 } else { |
| 1208 SerializeObject(*current, kPlain, kStartOfObject); | 1104 SerializeObject(*current, kPlain, kStartOfObject, 0); |
| 1209 } | 1105 } |
| 1210 } | 1106 } |
| 1211 } | 1107 } |
| 1212 | 1108 |
| 1213 | 1109 |
| 1214 // This ensures that the partial snapshot cache keeps things alive during GC and | 1110 // This ensures that the partial snapshot cache keeps things alive during GC and |
| 1215 // tracks their movement. When it is called during serialization of the startup | 1111 // tracks their movement. When it is called during serialization of the startup |
| 1216 // snapshot nothing happens. When the partial (context) snapshot is created, | 1112 // snapshot nothing happens. When the partial (context) snapshot is created, |
| 1217 // this array is populated with the pointers that the partial snapshot will | 1113 // this array is populated with the pointers that the partial snapshot will |
| 1218 // need. As that happens we emit serialized objects to the startup snapshot | 1114 // need. As that happens we emit serialized objects to the startup snapshot |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1285 | 1181 |
| 1286 | 1182 |
| 1287 // Encode the location of an already deserialized object in order to write its | 1183 // Encode the location of an already deserialized object in order to write its |
| 1288 // location into a later object. We can encode the location as an offset from | 1184 // location into a later object. We can encode the location as an offset from |
| 1289 // the start of the deserialized objects or as an offset backwards from the | 1185 // the start of the deserialized objects or as an offset backwards from the |
| 1290 // current allocation pointer. | 1186 // current allocation pointer. |
| 1291 void Serializer::SerializeReferenceToPreviousObject( | 1187 void Serializer::SerializeReferenceToPreviousObject( |
| 1292 int space, | 1188 int space, |
| 1293 int address, | 1189 int address, |
| 1294 HowToCode how_to_code, | 1190 HowToCode how_to_code, |
| 1295 WhereToPoint where_to_point) { | 1191 WhereToPoint where_to_point, |
| 1192 int skip) { |
| 1296 int offset = CurrentAllocationAddress(space) - address; | 1193 int offset = CurrentAllocationAddress(space) - address; |
| 1297 bool from_start = true; | 1194 // Shift out the bits that are always 0. |
| 1298 if (SpaceIsPaged(space)) { | 1195 offset >>= kObjectAlignmentBits; |
| 1299 // For paged space it is simple to encode back from current allocation if | 1196 if (skip == 0) { |
| 1300 // the object is on the same page as the current allocation pointer. | 1197 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); |
| 1301 if ((CurrentAllocationAddress(space) >> kPageSizeBits) == | 1198 } else { |
| 1302 (address >> kPageSizeBits)) { | 1199 sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space, |
| 1303 from_start = false; | 1200 "BackRefSerWithSkip"); |
| 1304 address = offset; | 1201 sink_->PutInt(skip, "BackRefSkipDistance"); |
| 1305 } | |
| 1306 } else if (space == NEW_SPACE) { | |
| 1307 // For new space it is always simple to encode back from current allocation. | |
| 1308 if (offset < address) { | |
| 1309 from_start = false; | |
| 1310 address = offset; | |
| 1311 } | |
| 1312 } | 1202 } |
| 1313 // If we are actually dealing with real offsets (and not a numbering of | 1203 sink_->PutInt(offset, "offset"); |
| 1314 // all objects) then we should shift out the bits that are always 0. | |
| 1315 if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; | |
| 1316 if (from_start) { | |
| 1317 sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer"); | |
| 1318 sink_->PutInt(address, "address"); | |
| 1319 } else { | |
| 1320 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); | |
| 1321 sink_->PutInt(address, "address"); | |
| 1322 } | |
| 1323 } | 1204 } |
| 1324 | 1205 |
| 1325 | 1206 |
| 1326 void StartupSerializer::SerializeObject( | 1207 void StartupSerializer::SerializeObject( |
| 1327 Object* o, | 1208 Object* o, |
| 1328 HowToCode how_to_code, | 1209 HowToCode how_to_code, |
| 1329 WhereToPoint where_to_point) { | 1210 WhereToPoint where_to_point, |
| 1211 int skip) { |
| 1330 CHECK(o->IsHeapObject()); | 1212 CHECK(o->IsHeapObject()); |
| 1331 HeapObject* heap_object = HeapObject::cast(o); | 1213 HeapObject* heap_object = HeapObject::cast(o); |
| 1332 | 1214 |
| 1333 int root_index; | 1215 int root_index; |
| 1334 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { | 1216 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { |
| 1335 PutRoot(root_index, heap_object, how_to_code, where_to_point); | 1217 PutRoot(root_index, heap_object, how_to_code, where_to_point, skip); |
| 1336 return; | 1218 return; |
| 1337 } | 1219 } |
| 1338 | 1220 |
| 1339 if (address_mapper_.IsMapped(heap_object)) { | 1221 if (address_mapper_.IsMapped(heap_object)) { |
| 1340 int space = SpaceOfAlreadySerializedObject(heap_object); | 1222 int space = SpaceOfObject(heap_object); |
| 1341 int address = address_mapper_.MappedTo(heap_object); | 1223 int address = address_mapper_.MappedTo(heap_object); |
| 1342 SerializeReferenceToPreviousObject(space, | 1224 SerializeReferenceToPreviousObject(space, |
| 1343 address, | 1225 address, |
| 1344 how_to_code, | 1226 how_to_code, |
| 1345 where_to_point); | 1227 where_to_point, |
| 1228 skip); |
| 1346 } else { | 1229 } else { |
| 1230 if (skip != 0) { |
| 1231 sink_->Put(kSkip, "FlushPendingSkip"); |
| 1232 sink_->PutInt(skip, "SkipDistance"); |
| 1233 } |
| 1234 |
| 1347 // Object has not yet been serialized. Serialize it here. | 1235 // Object has not yet been serialized. Serialize it here. |
| 1348 ObjectSerializer object_serializer(this, | 1236 ObjectSerializer object_serializer(this, |
| 1349 heap_object, | 1237 heap_object, |
| 1350 sink_, | 1238 sink_, |
| 1351 how_to_code, | 1239 how_to_code, |
| 1352 where_to_point); | 1240 where_to_point); |
| 1353 object_serializer.Serialize(); | 1241 object_serializer.Serialize(); |
| 1354 } | 1242 } |
| 1355 } | 1243 } |
| 1356 | 1244 |
| 1357 | 1245 |
| 1358 void StartupSerializer::SerializeWeakReferences() { | 1246 void StartupSerializer::SerializeWeakReferences() { |
| 1359 // This phase comes right after the partial serialization (of the snapshot). | 1247 // This phase comes right after the partial serialization (of the snapshot). |
| 1360 // After we have done the partial serialization the partial snapshot cache | 1248 // After we have done the partial serialization the partial snapshot cache |
| 1361 // will contain some references needed to decode the partial snapshot. We | 1249 // will contain some references needed to decode the partial snapshot. We |
| 1362 // add one entry with 'undefined' which is the sentinel that the deserializer | 1250 // add one entry with 'undefined' which is the sentinel that the deserializer |
| 1363 // uses to know it is done deserializing the array. | 1251 // uses to know it is done deserializing the array. |
| 1364 Isolate* isolate = Isolate::Current(); | 1252 Isolate* isolate = Isolate::Current(); |
| 1365 Object* undefined = isolate->heap()->undefined_value(); | 1253 Object* undefined = isolate->heap()->undefined_value(); |
| 1366 VisitPointer(&undefined); | 1254 VisitPointer(&undefined); |
| 1367 HEAP->IterateWeakRoots(this, VISIT_ALL); | 1255 HEAP->IterateWeakRoots(this, VISIT_ALL); |
| 1256 Pad(); |
| 1368 } | 1257 } |
| 1369 | 1258 |
| 1370 | 1259 |
| 1371 void Serializer::PutRoot(int root_index, | 1260 void Serializer::PutRoot(int root_index, |
| 1372 HeapObject* object, | 1261 HeapObject* object, |
| 1373 SerializerDeserializer::HowToCode how_to_code, | 1262 SerializerDeserializer::HowToCode how_to_code, |
| 1374 SerializerDeserializer::WhereToPoint where_to_point) { | 1263 SerializerDeserializer::WhereToPoint where_to_point, |
| 1264 int skip) { |
| 1375 if (how_to_code == kPlain && | 1265 if (how_to_code == kPlain && |
| 1376 where_to_point == kStartOfObject && | 1266 where_to_point == kStartOfObject && |
| 1377 root_index < kRootArrayNumberOfConstantEncodings && | 1267 root_index < kRootArrayNumberOfConstantEncodings && |
| 1378 !HEAP->InNewSpace(object)) { | 1268 !HEAP->InNewSpace(object)) { |
| 1379 if (root_index < kRootArrayNumberOfLowConstantEncodings) { | 1269 if (skip == 0) { |
| 1380 sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant"); | 1270 sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index, |
| 1271 "RootConstant"); |
| 1381 } else { | 1272 } else { |
| 1382 sink_->Put(kRootArrayHighConstants + root_index - | 1273 sink_->Put(kRootArrayConstants + kHasSkipDistance + root_index, |
| 1383 kRootArrayNumberOfLowConstantEncodings, | 1274 "RootConstant"); |
| 1384 "RootHiConstant"); | 1275 sink_->PutInt(skip, "SkipInPutRoot"); |
| 1385 } | 1276 } |
| 1386 } else { | 1277 } else { |
| 1278 if (skip != 0) { |
| 1279 sink_->Put(kSkip, "SkipFromPutRoot"); |
| 1280 sink_->PutInt(skip, "SkipFromPutRootDistance"); |
| 1281 } |
| 1387 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); | 1282 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); |
| 1388 sink_->PutInt(root_index, "root_index"); | 1283 sink_->PutInt(root_index, "root_index"); |
| 1389 } | 1284 } |
| 1390 } | 1285 } |
| 1391 | 1286 |
| 1392 | 1287 |
| 1393 void PartialSerializer::SerializeObject( | 1288 void PartialSerializer::SerializeObject( |
| 1394 Object* o, | 1289 Object* o, |
| 1395 HowToCode how_to_code, | 1290 HowToCode how_to_code, |
| 1396 WhereToPoint where_to_point) { | 1291 WhereToPoint where_to_point, |
| 1292 int skip) { |
| 1397 CHECK(o->IsHeapObject()); | 1293 CHECK(o->IsHeapObject()); |
| 1398 HeapObject* heap_object = HeapObject::cast(o); | 1294 HeapObject* heap_object = HeapObject::cast(o); |
| 1399 | 1295 |
| 1400 if (heap_object->IsMap()) { | 1296 if (heap_object->IsMap()) { |
| 1401 // The code-caches link to context-specific code objects, which | 1297 // The code-caches link to context-specific code objects, which |
| 1402 // the startup and context serializes cannot currently handle. | 1298 // the startup and context serializes cannot currently handle. |
| 1403 ASSERT(Map::cast(heap_object)->code_cache() == | 1299 ASSERT(Map::cast(heap_object)->code_cache() == |
| 1404 heap_object->GetHeap()->raw_unchecked_empty_fixed_array()); | 1300 heap_object->GetHeap()->raw_unchecked_empty_fixed_array()); |
| 1405 } | 1301 } |
| 1406 | 1302 |
| 1407 int root_index; | 1303 int root_index; |
| 1408 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { | 1304 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { |
| 1409 PutRoot(root_index, heap_object, how_to_code, where_to_point); | 1305 PutRoot(root_index, heap_object, how_to_code, where_to_point, skip); |
| 1410 return; | 1306 return; |
| 1411 } | 1307 } |
| 1412 | 1308 |
| 1413 if (ShouldBeInThePartialSnapshotCache(heap_object)) { | 1309 if (ShouldBeInThePartialSnapshotCache(heap_object)) { |
| 1310 if (skip != 0) { |
| 1311 sink_->Put(kSkip, "SkipFromSerializeObject"); |
| 1312 sink_->PutInt(skip, "SkipDistanceFromSerializeObject"); |
| 1313 } |
| 1314 |
| 1414 int cache_index = PartialSnapshotCacheIndex(heap_object); | 1315 int cache_index = PartialSnapshotCacheIndex(heap_object); |
| 1415 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, | 1316 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, |
| 1416 "PartialSnapshotCache"); | 1317 "PartialSnapshotCache"); |
| 1417 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); | 1318 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); |
| 1418 return; | 1319 return; |
| 1419 } | 1320 } |
| 1420 | 1321 |
| 1421 // Pointers from the partial snapshot to the objects in the startup snapshot | 1322 // Pointers from the partial snapshot to the objects in the startup snapshot |
| 1422 // should go through the root array or through the partial snapshot cache. | 1323 // should go through the root array or through the partial snapshot cache. |
| 1423 // If this is not the case you may have to add something to the root array. | 1324 // If this is not the case you may have to add something to the root array. |
| 1424 ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); | 1325 ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); |
| 1425 // All the symbols that the partial snapshot needs should be either in the | 1326 // All the symbols that the partial snapshot needs should be either in the |
| 1426 // root table or in the partial snapshot cache. | 1327 // root table or in the partial snapshot cache. |
| 1427 ASSERT(!heap_object->IsSymbol()); | 1328 ASSERT(!heap_object->IsSymbol()); |
| 1428 | 1329 |
| 1429 if (address_mapper_.IsMapped(heap_object)) { | 1330 if (address_mapper_.IsMapped(heap_object)) { |
| 1430 int space = SpaceOfAlreadySerializedObject(heap_object); | 1331 int space = SpaceOfObject(heap_object); |
| 1431 int address = address_mapper_.MappedTo(heap_object); | 1332 int address = address_mapper_.MappedTo(heap_object); |
| 1432 SerializeReferenceToPreviousObject(space, | 1333 SerializeReferenceToPreviousObject(space, |
| 1433 address, | 1334 address, |
| 1434 how_to_code, | 1335 how_to_code, |
| 1435 where_to_point); | 1336 where_to_point, |
| 1337 skip); |
| 1436 } else { | 1338 } else { |
| 1339 if (skip != 0) { |
| 1340 sink_->Put(kSkip, "SkipFromSerializeObject"); |
| 1341 sink_->PutInt(skip, "SkipDistanceFromSerializeObject"); |
| 1342 } |
| 1437 // Object has not yet been serialized. Serialize it here. | 1343 // Object has not yet been serialized. Serialize it here. |
| 1438 ObjectSerializer serializer(this, | 1344 ObjectSerializer serializer(this, |
| 1439 heap_object, | 1345 heap_object, |
| 1440 sink_, | 1346 sink_, |
| 1441 how_to_code, | 1347 how_to_code, |
| 1442 where_to_point); | 1348 where_to_point); |
| 1443 serializer.Serialize(); | 1349 serializer.Serialize(); |
| 1444 } | 1350 } |
| 1445 } | 1351 } |
| 1446 | 1352 |
| 1447 | 1353 |
| 1448 void Serializer::ObjectSerializer::Serialize() { | 1354 void Serializer::ObjectSerializer::Serialize() { |
| 1449 int space = Serializer::SpaceOfObject(object_); | 1355 int space = Serializer::SpaceOfObject(object_); |
| 1450 int size = object_->Size(); | 1356 int size = object_->Size(); |
| 1451 | 1357 |
| 1452 sink_->Put(kNewObject + reference_representation_ + space, | 1358 sink_->Put(kNewObject + reference_representation_ + space, |
| 1453 "ObjectSerialization"); | 1359 "ObjectSerialization"); |
| 1454 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); | 1360 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); |
| 1455 | 1361 |
| 1456 LOG(i::Isolate::Current(), | 1362 LOG(i::Isolate::Current(), |
| 1457 SnapshotPositionEvent(object_->address(), sink_->Position())); | 1363 SnapshotPositionEvent(object_->address(), sink_->Position())); |
| 1458 | 1364 |
| 1459 // Mark this object as already serialized. | 1365 // Mark this object as already serialized. |
| 1460 bool start_new_page; | 1366 int offset = serializer_->Allocate(space, size); |
| 1461 int offset = serializer_->Allocate(space, size, &start_new_page); | |
| 1462 serializer_->address_mapper()->AddMapping(object_, offset); | 1367 serializer_->address_mapper()->AddMapping(object_, offset); |
| 1463 if (start_new_page) { | |
| 1464 sink_->Put(kNewPage, "NewPage"); | |
| 1465 sink_->PutSection(space, "NewPageSpace"); | |
| 1466 } | |
| 1467 | 1368 |
| 1468 // Serialize the map (first word of the object). | 1369 // Serialize the map (first word of the object). |
| 1469 serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject); | 1370 serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0); |
| 1470 | 1371 |
| 1471 // Serialize the rest of the object. | 1372 // Serialize the rest of the object. |
| 1472 CHECK_EQ(0, bytes_processed_so_far_); | 1373 CHECK_EQ(0, bytes_processed_so_far_); |
| 1473 bytes_processed_so_far_ = kPointerSize; | 1374 bytes_processed_so_far_ = kPointerSize; |
| 1474 object_->IterateBody(object_->map()->instance_type(), size, this); | 1375 object_->IterateBody(object_->map()->instance_type(), size, this); |
| 1475 OutputRawData(object_->address() + size); | 1376 OutputRawData(object_->address() + size); |
| 1476 } | 1377 } |
| 1477 | 1378 |
| 1478 | 1379 |
| 1479 void Serializer::ObjectSerializer::VisitPointers(Object** start, | 1380 void Serializer::ObjectSerializer::VisitPointers(Object** start, |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1500 } | 1401 } |
| 1501 current += repeat_count; | 1402 current += repeat_count; |
| 1502 bytes_processed_so_far_ += repeat_count * kPointerSize; | 1403 bytes_processed_so_far_ += repeat_count * kPointerSize; |
| 1503 if (repeat_count > kMaxRepeats) { | 1404 if (repeat_count > kMaxRepeats) { |
| 1504 sink_->Put(kRepeat, "SerializeRepeats"); | 1405 sink_->Put(kRepeat, "SerializeRepeats"); |
| 1505 sink_->PutInt(repeat_count, "SerializeRepeats"); | 1406 sink_->PutInt(repeat_count, "SerializeRepeats"); |
| 1506 } else { | 1407 } else { |
| 1507 sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats"); | 1408 sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats"); |
| 1508 } | 1409 } |
| 1509 } else { | 1410 } else { |
| 1510 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject); | 1411 serializer_->SerializeObject( |
| 1412 current_contents, kPlain, kStartOfObject, 0); |
| 1511 bytes_processed_so_far_ += kPointerSize; | 1413 bytes_processed_so_far_ += kPointerSize; |
| 1512 current++; | 1414 current++; |
| 1513 } | 1415 } |
| 1514 } | 1416 } |
| 1515 } | 1417 } |
| 1516 } | 1418 } |
| 1517 | 1419 |
| 1518 | 1420 |
| 1519 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) { | 1421 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) { |
| 1520 Object** current = rinfo->target_object_address(); | 1422 Object** current = rinfo->target_object_address(); |
| 1521 | 1423 |
| 1522 OutputRawData(rinfo->target_address_address()); | 1424 int skip = OutputRawData(rinfo->target_address_address(), |
| 1425 kCanReturnSkipInsteadOfSkipping); |
| 1523 HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain; | 1426 HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain; |
| 1524 serializer_->SerializeObject(*current, representation, kStartOfObject); | 1427 serializer_->SerializeObject(*current, representation, kStartOfObject, skip); |
| 1525 bytes_processed_so_far_ += rinfo->target_address_size(); | 1428 bytes_processed_so_far_ += rinfo->target_address_size(); |
| 1526 } | 1429 } |
| 1527 | 1430 |
| 1528 | 1431 |
| 1529 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, | 1432 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, |
| 1530 Address* end) { | 1433 Address* end) { |
| 1531 Address references_start = reinterpret_cast<Address>(start); | 1434 Address references_start = reinterpret_cast<Address>(start); |
| 1532 OutputRawData(references_start); | 1435 int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping); |
| 1533 | 1436 |
| 1534 for (Address* current = start; current < end; current++) { | 1437 for (Address* current = start; current < end; current++) { |
| 1535 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); | 1438 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); |
| 1439 sink_->PutInt(skip, "SkipB4ExternalRef"); |
| 1440 skip = 0; |
| 1536 int reference_id = serializer_->EncodeExternalReference(*current); | 1441 int reference_id = serializer_->EncodeExternalReference(*current); |
| 1537 sink_->PutInt(reference_id, "reference id"); | 1442 sink_->PutInt(reference_id, "reference id"); |
| 1538 } | 1443 } |
| 1539 bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); | 1444 bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); |
| 1540 } | 1445 } |
| 1541 | 1446 |
| 1542 | 1447 |
| 1543 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) { | 1448 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) { |
| 1544 Address references_start = rinfo->target_address_address(); | 1449 Address references_start = rinfo->target_address_address(); |
| 1545 OutputRawData(references_start); | 1450 int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping); |
| 1546 | 1451 |
| 1547 Address* current = rinfo->target_reference_address(); | 1452 Address* current = rinfo->target_reference_address(); |
| 1548 int representation = rinfo->IsCodedSpecially() ? | 1453 int representation = rinfo->IsCodedSpecially() ? |
| 1549 kFromCode + kStartOfObject : kPlain + kStartOfObject; | 1454 kFromCode + kStartOfObject : kPlain + kStartOfObject; |
| 1550 sink_->Put(kExternalReference + representation, "ExternalRef"); | 1455 sink_->Put(kExternalReference + representation, "ExternalRef"); |
| 1456 sink_->PutInt(skip, "SkipB4ExternalRef"); |
| 1551 int reference_id = serializer_->EncodeExternalReference(*current); | 1457 int reference_id = serializer_->EncodeExternalReference(*current); |
| 1552 sink_->PutInt(reference_id, "reference id"); | 1458 sink_->PutInt(reference_id, "reference id"); |
| 1553 bytes_processed_so_far_ += rinfo->target_address_size(); | 1459 bytes_processed_so_far_ += rinfo->target_address_size(); |
| 1554 } | 1460 } |
| 1555 | 1461 |
| 1556 | 1462 |
| 1557 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { | 1463 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { |
| 1558 Address target_start = rinfo->target_address_address(); | 1464 Address target_start = rinfo->target_address_address(); |
| 1559 OutputRawData(target_start); | 1465 int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping); |
| 1560 Address target = rinfo->target_address(); | 1466 Address target = rinfo->target_address(); |
| 1561 uint32_t encoding = serializer_->EncodeExternalReference(target); | 1467 uint32_t encoding = serializer_->EncodeExternalReference(target); |
| 1562 CHECK(target == NULL ? encoding == 0 : encoding != 0); | 1468 CHECK(target == NULL ? encoding == 0 : encoding != 0); |
| 1563 int representation; | 1469 int representation; |
| 1564 // Can't use a ternary operator because of gcc. | 1470 // Can't use a ternary operator because of gcc. |
| 1565 if (rinfo->IsCodedSpecially()) { | 1471 if (rinfo->IsCodedSpecially()) { |
| 1566 representation = kStartOfObject + kFromCode; | 1472 representation = kStartOfObject + kFromCode; |
| 1567 } else { | 1473 } else { |
| 1568 representation = kStartOfObject + kPlain; | 1474 representation = kStartOfObject + kPlain; |
| 1569 } | 1475 } |
| 1570 sink_->Put(kExternalReference + representation, "ExternalReference"); | 1476 sink_->Put(kExternalReference + representation, "ExternalReference"); |
| 1477 sink_->PutInt(skip, "SkipB4ExternalRef"); |
| 1571 sink_->PutInt(encoding, "reference id"); | 1478 sink_->PutInt(encoding, "reference id"); |
| 1572 bytes_processed_so_far_ += rinfo->target_address_size(); | 1479 bytes_processed_so_far_ += rinfo->target_address_size(); |
| 1573 } | 1480 } |
| 1574 | 1481 |
| 1575 | 1482 |
| 1576 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { | 1483 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { |
| 1577 CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); | 1484 CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); |
| 1578 Address target_start = rinfo->target_address_address(); | 1485 Address target_start = rinfo->target_address_address(); |
| 1579 OutputRawData(target_start); | 1486 int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping); |
| 1580 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); | 1487 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
| 1581 serializer_->SerializeObject(target, kFromCode, kInnerPointer); | 1488 serializer_->SerializeObject(target, kFromCode, kInnerPointer, skip); |
| 1582 bytes_processed_so_far_ += rinfo->target_address_size(); | 1489 bytes_processed_so_far_ += rinfo->target_address_size(); |
| 1583 } | 1490 } |
| 1584 | 1491 |
| 1585 | 1492 |
| 1586 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { | 1493 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { |
| 1587 Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); | 1494 Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); |
| 1588 OutputRawData(entry_address); | 1495 int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping); |
| 1589 serializer_->SerializeObject(target, kPlain, kInnerPointer); | 1496 serializer_->SerializeObject(target, kPlain, kInnerPointer, skip); |
| 1590 bytes_processed_so_far_ += kPointerSize; | 1497 bytes_processed_so_far_ += kPointerSize; |
| 1591 } | 1498 } |
| 1592 | 1499 |
| 1593 | 1500 |
| 1594 void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) { | 1501 void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) { |
| 1595 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); | 1502 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); |
| 1596 JSGlobalPropertyCell* cell = | 1503 JSGlobalPropertyCell* cell = |
| 1597 JSGlobalPropertyCell::cast(rinfo->target_cell()); | 1504 JSGlobalPropertyCell::cast(rinfo->target_cell()); |
| 1598 OutputRawData(rinfo->pc()); | 1505 int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping); |
| 1599 serializer_->SerializeObject(cell, kPlain, kInnerPointer); | 1506 serializer_->SerializeObject(cell, kPlain, kInnerPointer, skip); |
| 1600 } | 1507 } |
| 1601 | 1508 |
| 1602 | 1509 |
| 1603 void Serializer::ObjectSerializer::VisitExternalAsciiString( | 1510 void Serializer::ObjectSerializer::VisitExternalAsciiString( |
| 1604 v8::String::ExternalAsciiStringResource** resource_pointer) { | 1511 v8::String::ExternalAsciiStringResource** resource_pointer) { |
| 1605 Address references_start = reinterpret_cast<Address>(resource_pointer); | 1512 Address references_start = reinterpret_cast<Address>(resource_pointer); |
| 1606 OutputRawData(references_start); | 1513 OutputRawData(references_start); |
| 1607 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { | 1514 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { |
| 1608 Object* source = HEAP->natives_source_cache()->get(i); | 1515 Object* source = HEAP->natives_source_cache()->get(i); |
| 1609 if (!source->IsUndefined()) { | 1516 if (!source->IsUndefined()) { |
| 1610 ExternalAsciiString* string = ExternalAsciiString::cast(source); | 1517 ExternalAsciiString* string = ExternalAsciiString::cast(source); |
| 1611 typedef v8::String::ExternalAsciiStringResource Resource; | 1518 typedef v8::String::ExternalAsciiStringResource Resource; |
| 1612 const Resource* resource = string->resource(); | 1519 const Resource* resource = string->resource(); |
| 1613 if (resource == *resource_pointer) { | 1520 if (resource == *resource_pointer) { |
| 1614 sink_->Put(kNativesStringResource, "NativesStringResource"); | 1521 sink_->Put(kNativesStringResource, "NativesStringResource"); |
| 1615 sink_->PutSection(i, "NativesStringResourceEnd"); | 1522 sink_->PutSection(i, "NativesStringResourceEnd"); |
| 1616 bytes_processed_so_far_ += sizeof(resource); | 1523 bytes_processed_so_far_ += sizeof(resource); |
| 1617 return; | 1524 return; |
| 1618 } | 1525 } |
| 1619 } | 1526 } |
| 1620 } | 1527 } |
| 1621 // One of the strings in the natives cache should match the resource. We | 1528 // One of the strings in the natives cache should match the resource. We |
| 1622 // can't serialize any other kinds of external strings. | 1529 // can't serialize any other kinds of external strings. |
| 1623 UNREACHABLE(); | 1530 UNREACHABLE(); |
| 1624 } | 1531 } |
| 1625 | 1532 |
| 1626 | 1533 |
| 1627 void Serializer::ObjectSerializer::OutputRawData(Address up_to) { | 1534 int Serializer::ObjectSerializer::OutputRawData( |
| 1535 Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) { |
| 1628 Address object_start = object_->address(); | 1536 Address object_start = object_->address(); |
| 1537 Address base = object_start + bytes_processed_so_far_; |
| 1629 int up_to_offset = static_cast<int>(up_to - object_start); | 1538 int up_to_offset = static_cast<int>(up_to - object_start); |
| 1630 int skipped = up_to_offset - bytes_processed_so_far_; | 1539 int to_skip = up_to_offset - bytes_processed_so_far_; |
| 1540 int bytes_to_output = to_skip; |
| 1541 bytes_processed_so_far_ += to_skip; |
| 1631 // This assert will fail if the reloc info gives us the target_address_address | 1542 // This assert will fail if the reloc info gives us the target_address_address |
| 1632 // locations in a non-ascending order. Luckily that doesn't happen. | 1543 // locations in a non-ascending order. Luckily that doesn't happen. |
| 1633 ASSERT(skipped >= 0); | 1544 ASSERT(to_skip >= 0); |
| 1634 if (skipped != 0) { | 1545 bool outputting_code = false; |
| 1635 Address base = object_start + bytes_processed_so_far_; | 1546 if (to_skip != 0 && code_object_ && !code_has_been_output_) { |
| 1636 #define RAW_CASE(index, length) \ | 1547 // Output the code all at once and fix later. |
| 1637 if (skipped == length) { \ | 1548 bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_; |
| 1549 outputting_code = true; |
| 1550 code_has_been_output_ = true; |
| 1551 } |
| 1552 if (bytes_to_output != 0 && |
| 1553 (!code_object_ || outputting_code)) { |
| 1554 #define RAW_CASE(index) \ |
| 1555 if (!outputting_code && bytes_to_output == index * kPointerSize && \ |
| 1556 index * kPointerSize == to_skip) { \ |
| 1638 sink_->PutSection(kRawData + index, "RawDataFixed"); \ | 1557 sink_->PutSection(kRawData + index, "RawDataFixed"); \ |
| 1558 to_skip = 0; /* This insn already skips. */ \ |
| 1639 } else /* NOLINT */ | 1559 } else /* NOLINT */ |
| 1640 COMMON_RAW_LENGTHS(RAW_CASE) | 1560 COMMON_RAW_LENGTHS(RAW_CASE) |
| 1641 #undef RAW_CASE | 1561 #undef RAW_CASE |
| 1642 { /* NOLINT */ | 1562 { /* NOLINT */ |
| 1563 // We always end up here if we are outputting the code of a code object. |
| 1643 sink_->Put(kRawData, "RawData"); | 1564 sink_->Put(kRawData, "RawData"); |
| 1644 sink_->PutInt(skipped, "length"); | 1565 sink_->PutInt(bytes_to_output, "length"); |
| 1645 } | 1566 } |
| 1646 for (int i = 0; i < skipped; i++) { | 1567 for (int i = 0; i < bytes_to_output; i++) { |
| 1647 unsigned int data = base[i]; | 1568 unsigned int data = base[i]; |
| 1648 sink_->PutSection(data, "Byte"); | 1569 sink_->PutSection(data, "Byte"); |
| 1649 } | 1570 } |
| 1650 bytes_processed_so_far_ += skipped; | |
| 1651 } | 1571 } |
| 1572 if (to_skip != 0 && return_skip == kIgnoringReturn) { |
| 1573 sink_->Put(kSkip, "Skip"); |
| 1574 sink_->PutInt(to_skip, "SkipDistance"); |
| 1575 to_skip = 0; |
| 1576 } |
| 1577 return to_skip; |
| 1652 } | 1578 } |
| 1653 | 1579 |
| 1654 | 1580 |
| 1655 int Serializer::SpaceOfObject(HeapObject* object) { | 1581 int Serializer::SpaceOfObject(HeapObject* object) { |
| 1656 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { | 1582 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { |
| 1657 AllocationSpace s = static_cast<AllocationSpace>(i); | 1583 AllocationSpace s = static_cast<AllocationSpace>(i); |
| 1658 if (HEAP->InSpace(object, s)) { | 1584 if (HEAP->InSpace(object, s)) { |
| 1659 if (i == LO_SPACE) { | 1585 ASSERT(i < kNumberOfSpaces); |
| 1660 if (object->IsCode()) { | |
| 1661 return kLargeCode; | |
| 1662 } else if (object->IsFixedArray()) { | |
| 1663 return kLargeFixedArray; | |
| 1664 } else { | |
| 1665 return kLargeData; | |
| 1666 } | |
| 1667 } | |
| 1668 return i; | 1586 return i; |
| 1669 } | 1587 } |
| 1670 } | 1588 } |
| 1671 UNREACHABLE(); | 1589 UNREACHABLE(); |
| 1672 return 0; | 1590 return 0; |
| 1673 } | 1591 } |
| 1674 | 1592 |
| 1675 | 1593 |
| 1676 int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) { | 1594 int Serializer::Allocate(int space, int size) { |
| 1677 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { | |
| 1678 AllocationSpace s = static_cast<AllocationSpace>(i); | |
| 1679 if (HEAP->InSpace(object, s)) { | |
| 1680 return i; | |
| 1681 } | |
| 1682 } | |
| 1683 UNREACHABLE(); | |
| 1684 return 0; | |
| 1685 } | |
| 1686 | |
| 1687 | |
| 1688 int Serializer::Allocate(int space, int size, bool* new_page) { | |
| 1689 CHECK(space >= 0 && space < kNumberOfSpaces); | 1595 CHECK(space >= 0 && space < kNumberOfSpaces); |
| 1690 if (SpaceIsLarge(space)) { | |
| 1691 // In large object space we merely number the objects instead of trying to | |
| 1692 // determine some sort of address. | |
| 1693 *new_page = true; | |
| 1694 large_object_total_ += size; | |
| 1695 return fullness_[LO_SPACE]++; | |
| 1696 } | |
| 1697 *new_page = false; | |
| 1698 if (fullness_[space] == 0) { | |
| 1699 *new_page = true; | |
| 1700 } | |
| 1701 if (SpaceIsPaged(space)) { | |
| 1702 // Paged spaces are a little special. We encode their addresses as if the | |
| 1703 // pages were all contiguous and each page were filled up in the range | |
| 1704 // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous | |
| 1705 // and allocation does not start at offset 0 in the page, but this scheme | |
| 1706 // means the deserializer can get the page number quickly by shifting the | |
| 1707 // serialized address. | |
| 1708 CHECK(IsPowerOf2(Page::kPageSize)); | |
| 1709 int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1)); | |
| 1710 CHECK(size <= SpaceAreaSize(space)); | |
| 1711 if (used_in_this_page + size > SpaceAreaSize(space)) { | |
| 1712 *new_page = true; | |
| 1713 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); | |
| 1714 } | |
| 1715 } | |
| 1716 int allocation_address = fullness_[space]; | 1596 int allocation_address = fullness_[space]; |
| 1717 fullness_[space] = allocation_address + size; | 1597 fullness_[space] = allocation_address + size; |
| 1718 return allocation_address; | 1598 return allocation_address; |
| 1719 } | 1599 } |
| 1720 | 1600 |
| 1721 | 1601 |
| 1722 int Serializer::SpaceAreaSize(int space) { | 1602 int Serializer::SpaceAreaSize(int space) { |
| 1723 if (space == CODE_SPACE) { | 1603 if (space == CODE_SPACE) { |
| 1724 return isolate_->memory_allocator()->CodePageAreaSize(); | 1604 return isolate_->memory_allocator()->CodePageAreaSize(); |
| 1725 } else { | 1605 } else { |
| 1726 return Page::kPageSize - Page::kObjectStartOffset; | 1606 return Page::kPageSize - Page::kObjectStartOffset; |
| 1727 } | 1607 } |
| 1728 } | 1608 } |
| 1729 | 1609 |
| 1730 | 1610 |
| 1611 void Serializer::Pad() { |
| 1612 // The non-branching GetInt will read up to 3 bytes too far, so we need |
| 1613 // to pad the snapshot to make sure we don't read over the end. |
| 1614 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) { |
| 1615 sink_->Put(kNop, "Padding"); |
| 1616 } |
| 1617 } |
| 1618 |
| 1619 |
| 1620 bool SnapshotByteSource::AtEOF() { |
| 1621 if (0u + length_ - position_ > sizeof(uint32_t)) return false; |
| 1622 for (int x = position_; x < length_; x++) { |
| 1623 if (data_[x] != SerializerDeserializer::nop()) return false; |
| 1624 } |
| 1625 return true; |
| 1626 } |
| 1627 |
| 1731 } } // namespace v8::internal | 1628 } } // namespace v8::internal |
| OLD | NEW |