OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
163 SnapshotByteSource(const byte* array, int length) | 163 SnapshotByteSource(const byte* array, int length) |
164 : data_(array), length_(length), position_(0) { } | 164 : data_(array), length_(length), position_(0) { } |
165 | 165 |
166 bool HasMore() { return position_ < length_; } | 166 bool HasMore() { return position_ < length_; } |
167 | 167 |
168 int Get() { | 168 int Get() { |
169 ASSERT(position_ < length_); | 169 ASSERT(position_ < length_); |
170 return data_[position_++]; | 170 return data_[position_++]; |
171 } | 171 } |
172 | 172 |
| 173 int32_t GetUnalignedInt() { |
| 174 #if defined(V8_HOST_CAN_READ_UNALIGNED) && __BYTE_ORDER == __LITTLE_ENDIAN |
| 175 int32_t answer; |
| 176 ASSERT(position_ + sizeof(answer) <= length_ + 0u); |
| 177 answer = *reinterpret_cast<const int32_t*>(data_ + position_); |
| 178 #else |
| 179 int32_t answer = data_[position_]; |
| 180 answer |= data_[position_ + 1] << 8; |
| 181 answer |= data_[position_ + 2] << 16; |
| 182 answer |= data_[position_ + 3] << 24; |
| 183 #endif |
| 184 return answer; |
| 185 } |
| 186 |
| 187 void Advance(int by) { position_ += by; } |
| 188 |
173 inline void CopyRaw(byte* to, int number_of_bytes); | 189 inline void CopyRaw(byte* to, int number_of_bytes); |
174 | 190 |
175 inline int GetInt(); | 191 inline int GetInt(); |
176 | 192 |
177 bool AtEOF() { | 193 bool AtEOF(); |
178 return position_ == length_; | |
179 } | |
180 | 194 |
181 int position() { return position_; } | 195 int position() { return position_; } |
182 | 196 |
183 private: | 197 private: |
184 const byte* data_; | 198 const byte* data_; |
185 int length_; | 199 int length_; |
186 int position_; | 200 int position_; |
187 }; | 201 }; |
188 | 202 |
189 | 203 |
190 #define COMMON_RAW_LENGTHS(f) \ | |
191 f(1, 1) \ | |
192 f(2, 2) \ | |
193 f(3, 3) \ | |
194 f(4, 4) \ | |
195 f(5, 5) \ | |
196 f(6, 6) \ | |
197 f(7, 7) \ | |
198 f(8, 8) \ | |
199 f(9, 12) \ | |
200 f(10, 16) \ | |
201 f(11, 20) \ | |
202 f(12, 24) \ | |
203 f(13, 28) \ | |
204 f(14, 32) \ | |
205 f(15, 36) | |
206 | |
207 // The Serializer/Deserializer class is a common superclass for Serializer and | 204 // The Serializer/Deserializer class is a common superclass for Serializer and |
208 // Deserializer which is used to store common constants and methods used by | 205 // Deserializer which is used to store common constants and methods used by |
209 // both. | 206 // both. |
210 class SerializerDeserializer: public ObjectVisitor { | 207 class SerializerDeserializer: public ObjectVisitor { |
211 public: | 208 public: |
212 static void Iterate(ObjectVisitor* visitor); | 209 static void Iterate(ObjectVisitor* visitor); |
213 | 210 |
| 211 static int nop() { return kNop; } |
| 212 |
214 protected: | 213 protected: |
215 // Where the pointed-to object can be found: | 214 // Where the pointed-to object can be found: |
216 enum Where { | 215 enum Where { |
217 kNewObject = 0, // Object is next in snapshot. | 216 kNewObject = 0, // Object is next in snapshot. |
218 // 1-8 One per space. | 217 // 1-6 One per space. |
219 kRootArray = 0x9, // Object is found in root array. | 218 kRootArray = 0x9, // Object is found in root array. |
220 kPartialSnapshotCache = 0xa, // Object is in the cache. | 219 kPartialSnapshotCache = 0xa, // Object is in the cache. |
221 kExternalReference = 0xb, // Pointer to an external reference. | 220 kExternalReference = 0xb, // Pointer to an external reference. |
222 kSkip = 0xc, // Skip a pointer sized cell. | 221 kSkip = 0xc, // Skip n bytes. |
223 // 0xd-0xf Free. | 222 kNop = 0xd, // Does nothing, used to pad. |
224 kBackref = 0x10, // Object is described relative to end. | 223 // 0xe-0xf Free. |
225 // 0x11-0x18 One per space. | 224 kBackref = 0x10, // Object is described relative to end. |
226 // 0x19-0x1f Free. | 225 // 0x11-0x16 One per space. |
227 kFromStart = 0x20, // Object is described relative to start. | 226 kBackrefWithSkip = 0x18, // Object is described relative to end. |
228 // 0x21-0x28 One per space. | 227 // 0x19-0x1e One per space. |
229 // 0x29-0x2f Free. | 228 // 0x20-0x3f Used by misc. tags below. |
230 // 0x30-0x3f Used by misc. tags below. | |
231 kPointedToMask = 0x3f | 229 kPointedToMask = 0x3f |
232 }; | 230 }; |
233 | 231 |
234 // How to code the pointer to the object. | 232 // How to code the pointer to the object. |
235 enum HowToCode { | 233 enum HowToCode { |
236 kPlain = 0, // Straight pointer. | 234 kPlain = 0, // Straight pointer. |
237 // What this means depends on the architecture: | 235 // What this means depends on the architecture: |
238 kFromCode = 0x40, // A pointer inlined in code. | 236 kFromCode = 0x40, // A pointer inlined in code. |
239 kHowToCodeMask = 0x40 | 237 kHowToCodeMask = 0x40 |
240 }; | 238 }; |
241 | 239 |
| 240 // For kRootArrayConstants |
| 241 enum WithSkip { |
| 242 kNoSkipDistance = 0, |
| 243 kHasSkipDistance = 0x40, |
| 244 kWithSkipMask = 0x40 |
| 245 }; |
| 246 |
242 // Where to point within the object. | 247 // Where to point within the object. |
243 enum WhereToPoint { | 248 enum WhereToPoint { |
244 kStartOfObject = 0, | 249 kStartOfObject = 0, |
245 kInnerPointer = 0x80, // First insn in code object or payload of cell. | 250 kInnerPointer = 0x80, // First insn in code object or payload of cell. |
246 kWhereToPointMask = 0x80 | 251 kWhereToPointMask = 0x80 |
247 }; | 252 }; |
248 | 253 |
249 // Misc. | 254 // Misc. |
250 // Raw data to be copied from the snapshot. | 255 // Raw data to be copied from the snapshot. This byte code does not advance |
251 static const int kRawData = 0x30; | 256 // the current pointer, which is used for code objects, where we write the |
252 // Some common raw lengths: 0x31-0x3f | 257 // entire code in one memcpy, then fix up stuff with kSkip and other byte |
| 258 // codes that overwrite data. |
| 259 static const int kRawData = 0x20; |
| 260 // Some common raw lengths: 0x21-0x3f. These autoadvance the current pointer. |
253 // A tag emitted at strategic points in the snapshot to delineate sections. | 261 // A tag emitted at strategic points in the snapshot to delineate sections. |
254 // If the deserializer does not find these at the expected moments then it | 262 // If the deserializer does not find these at the expected moments then it |
255 // is an indication that the snapshot and the VM do not fit together. | 263 // is an indication that the snapshot and the VM do not fit together. |
256 // Examine the build process for architecture, version or configuration | 264 // Examine the build process for architecture, version or configuration |
257 // mismatches. | 265 // mismatches. |
258 static const int kSynchronize = 0x70; | 266 static const int kSynchronize = 0x70; |
259 // Used for the source code of the natives, which is in the executable, but | 267 // Used for the source code of the natives, which is in the executable, but |
260 // is referred to from external strings in the snapshot. | 268 // is referred to from external strings in the snapshot. |
261 static const int kNativesStringResource = 0x71; | 269 static const int kNativesStringResource = 0x71; |
262 static const int kNewPage = 0x72; | 270 static const int kRepeat = 0x72; |
263 static const int kRepeat = 0x73; | 271 static const int kConstantRepeat = 0x73; |
264 static const int kConstantRepeat = 0x74; | 272 // 0x73-0x7f Repeat last word (subtract 0x72 to get the count). |
265 // 0x74-0x7f Repeat last word (subtract 0x73 to get the count). | 273 static const int kMaxRepeats = 0x7f - 0x72; |
266 static const int kMaxRepeats = 0x7f - 0x73; | |
267 static int CodeForRepeats(int repeats) { | 274 static int CodeForRepeats(int repeats) { |
268 ASSERT(repeats >= 1 && repeats <= kMaxRepeats); | 275 ASSERT(repeats >= 1 && repeats <= kMaxRepeats); |
269 return 0x73 + repeats; | 276 return 0x72 + repeats; |
270 } | 277 } |
271 static int RepeatsForCode(int byte_code) { | 278 static int RepeatsForCode(int byte_code) { |
272 ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f); | 279 ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f); |
273 return byte_code - 0x73; | 280 return byte_code - 0x72; |
274 } | 281 } |
275 static const int kRootArrayLowConstants = 0xb0; | 282 static const int kRootArrayConstants = 0xa0; |
276 // 0xb0-0xbf Things from the first 16 elements of the root array. | 283 // 0xa0-0xbf Things from the first 32 elements of the root array. |
277 static const int kRootArrayHighConstants = 0xf0; | |
278 // 0xf0-0xff Things from the next 16 elements of the root array. | |
279 static const int kRootArrayNumberOfConstantEncodings = 0x20; | 284 static const int kRootArrayNumberOfConstantEncodings = 0x20; |
280 static const int kRootArrayNumberOfLowConstantEncodings = 0x10; | |
281 static int RootArrayConstantFromByteCode(int byte_code) { | 285 static int RootArrayConstantFromByteCode(int byte_code) { |
282 int constant = (byte_code & 0xf) | ((byte_code & 0x40) >> 2); | 286 return byte_code & 0x1f; |
283 ASSERT(constant >= 0 && constant < kRootArrayNumberOfConstantEncodings); | |
284 return constant; | |
285 } | 287 } |
286 | 288 |
287 | 289 static const int kNumberOfSpaces = LO_SPACE; |
288 static const int kLargeData = LAST_SPACE; | |
289 static const int kLargeCode = kLargeData + 1; | |
290 static const int kLargeFixedArray = kLargeCode + 1; | |
291 static const int kNumberOfSpaces = kLargeFixedArray + 1; | |
292 static const int kAnyOldSpace = -1; | 290 static const int kAnyOldSpace = -1; |
293 | 291 |
294 // A bitmask for getting the space out of an instruction. | 292 // A bitmask for getting the space out of an instruction. |
295 static const int kSpaceMask = 15; | 293 static const int kSpaceMask = 7; |
296 | |
297 static inline bool SpaceIsLarge(int space) { return space >= kLargeData; } | |
298 static inline bool SpaceIsPaged(int space) { | |
299 return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE; | |
300 } | |
301 }; | 294 }; |
302 | 295 |
303 | 296 |
304 int SnapshotByteSource::GetInt() { | 297 int SnapshotByteSource::GetInt() { |
305 // A little unwind to catch the really small ints. | 298 // This way of variable-length encoding integers does not suffer from branch |
306 int snapshot_byte = Get(); | 299 // mispredictions. |
307 if ((snapshot_byte & 0x80) == 0) { | 300 uint32_t answer = GetUnalignedInt(); |
308 return snapshot_byte; | 301 int bytes = answer & 3; |
309 } | 302 Advance(bytes); |
310 int accumulator = (snapshot_byte & 0x7f) << 7; | 303 uint32_t mask = 0xffffffffu; |
311 while (true) { | 304 mask >>= 32 - (bytes << 3); |
312 snapshot_byte = Get(); | 305 answer &= mask; |
313 if ((snapshot_byte & 0x80) == 0) { | 306 answer >>= 2; |
314 return accumulator | snapshot_byte; | 307 return answer; |
315 } | |
316 accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7; | |
317 } | |
318 UNREACHABLE(); | |
319 return accumulator; | |
320 } | 308 } |
321 | 309 |
322 | 310 |
323 void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) { | 311 void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) { |
324 memcpy(to, data_ + position_, number_of_bytes); | 312 memcpy(to, data_ + position_, number_of_bytes); |
325 position_ += number_of_bytes; | 313 position_ += number_of_bytes; |
326 } | 314 } |
327 | 315 |
328 | 316 |
329 // A Deserializer reads a snapshot and reconstructs the Object graph it defines. | 317 // A Deserializer reads a snapshot and reconstructs the Object graph it defines. |
330 class Deserializer: public SerializerDeserializer { | 318 class Deserializer: public SerializerDeserializer { |
331 public: | 319 public: |
332 // Create a deserializer from a snapshot byte source. | 320 // Create a deserializer from a snapshot byte source. |
333 explicit Deserializer(SnapshotByteSource* source); | 321 explicit Deserializer(SnapshotByteSource* source); |
334 | 322 |
335 virtual ~Deserializer(); | 323 virtual ~Deserializer(); |
336 | 324 |
337 // Deserialize the snapshot into an empty heap. | 325 // Deserialize the snapshot into an empty heap. |
338 void Deserialize(); | 326 void Deserialize(); |
339 | 327 |
340 // Deserialize a single object and the objects reachable from it. | 328 // Deserialize a single object and the objects reachable from it. |
341 void DeserializePartial(Object** root); | 329 void DeserializePartial(Object** root); |
342 | 330 |
| 331 void set_reservation(int space_number, uintptr_t reservation) { |
| 332 ASSERT(space_number >= 0); |
| 333 ASSERT(space_number <= LAST_SPACE); |
| 334 reservations_[space_number] = reservation; |
| 335 } |
| 336 |
343 private: | 337 private: |
344 virtual void VisitPointers(Object** start, Object** end); | 338 virtual void VisitPointers(Object** start, Object** end); |
345 | 339 |
346 virtual void VisitExternalReferences(Address* start, Address* end) { | 340 virtual void VisitExternalReferences(Address* start, Address* end) { |
347 UNREACHABLE(); | 341 UNREACHABLE(); |
348 } | 342 } |
349 | 343 |
350 virtual void VisitRuntimeEntry(RelocInfo* rinfo) { | 344 virtual void VisitRuntimeEntry(RelocInfo* rinfo) { |
351 UNREACHABLE(); | 345 UNREACHABLE(); |
352 } | 346 } |
353 | 347 |
354 // Fills in some heap data in an area from start to end (non-inclusive). The | 348 // Fills in some heap data in an area from start to end (non-inclusive). The |
355 // space id is used for the write barrier. The object_address is the address | 349 // space id is used for the write barrier. The object_address is the address |
356 // of the object we are writing into, or NULL if we are not writing into an | 350 // of the object we are writing into, or NULL if we are not writing into an |
357 // object, i.e. if we are writing a series of tagged values that are not on | 351 // object, i.e. if we are writing a series of tagged values that are not on |
358 // the heap. | 352 // the heap. |
359 void ReadChunk( | 353 void ReadChunk( |
360 Object** start, Object** end, int space, Address object_address); | 354 Object** start, Object** end, int space, Address object_address); |
361 HeapObject* GetAddressFromStart(int space); | 355 void ReadObject(int space_number, Object** write_back); |
362 inline HeapObject* GetAddressFromEnd(int space); | 356 |
363 Address Allocate(int space_number, Space* space, int size); | 357 // This routine both allocates a new object, and also keeps |
364 void ReadObject(int space_number, Space* space, Object** write_back); | 358 // track of where objects have been allocated so that we can |
| 359 // fix back references when deserializing. |
| 360 Address Allocate(int space_index, int size) { |
| 361 Address address = high_water_[space_index]; |
| 362 high_water_[space_index] = address + size; |
| 363 return address; |
| 364 } |
| 365 |
| 366 // This returns the address of an object that has been described in the |
| 367 // snapshot as being offset bytes back in a particular space. |
| 368 HeapObject* GetAddressFromEnd(int space) { |
| 369 int offset = source_->GetInt(); |
| 370 offset <<= kObjectAlignmentBits; |
| 371 return HeapObject::FromAddress(high_water_[space] - offset); |
| 372 } |
| 373 |
365 | 374 |
366 // Cached current isolate. | 375 // Cached current isolate. |
367 Isolate* isolate_; | 376 Isolate* isolate_; |
368 | 377 |
369 // Keep track of the pages in the paged spaces. | |
370 // (In large object space we are keeping track of individual objects | |
371 // rather than pages.) In new space we just need the address of the | |
372 // first object and the others will flow from that. | |
373 List<Address> pages_[SerializerDeserializer::kNumberOfSpaces]; | |
374 | |
375 SnapshotByteSource* source_; | 378 SnapshotByteSource* source_; |
376 // This is the address of the next object that will be allocated in each | 379 // This is the address of the next object that will be allocated in each |
377 // space. It is used to calculate the addresses of back-references. | 380 // space. It is used to calculate the addresses of back-references. |
378 Address high_water_[LAST_SPACE + 1]; | 381 Address high_water_[LAST_SPACE + 1]; |
379 // This is the address of the most recent object that was allocated. It | 382 |
380 // is used to set the location of the new page when we encounter a | 383 intptr_t reservations_[LAST_SPACE + 1]; |
381 // START_NEW_PAGE_SERIALIZATION tag. | 384 static const intptr_t kUninitializedReservation = -1; |
382 Address last_object_address_; | |
383 | 385 |
384 ExternalReferenceDecoder* external_reference_decoder_; | 386 ExternalReferenceDecoder* external_reference_decoder_; |
385 | 387 |
386 DISALLOW_COPY_AND_ASSIGN(Deserializer); | 388 DISALLOW_COPY_AND_ASSIGN(Deserializer); |
387 }; | 389 }; |
388 | 390 |
389 | 391 |
390 class SnapshotByteSink { | 392 class SnapshotByteSink { |
391 public: | 393 public: |
392 virtual ~SnapshotByteSink() { } | 394 virtual ~SnapshotByteSink() { } |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
454 | 456 |
455 // There can be only one serializer per V8 process. | 457 // There can be only one serializer per V8 process. |
456 class Serializer : public SerializerDeserializer { | 458 class Serializer : public SerializerDeserializer { |
457 public: | 459 public: |
458 explicit Serializer(SnapshotByteSink* sink); | 460 explicit Serializer(SnapshotByteSink* sink); |
459 ~Serializer(); | 461 ~Serializer(); |
460 void VisitPointers(Object** start, Object** end); | 462 void VisitPointers(Object** start, Object** end); |
461 // You can call this after serialization to find out how much space was used | 463 // You can call this after serialization to find out how much space was used |
462 // in each space. | 464 // in each space. |
463 int CurrentAllocationAddress(int space) { | 465 int CurrentAllocationAddress(int space) { |
464 if (SpaceIsLarge(space)) return large_object_total_; | 466 ASSERT(space < kNumberOfSpaces); |
465 return fullness_[space]; | 467 return fullness_[space]; |
466 } | 468 } |
467 | 469 |
468 static void Enable() { | 470 static void Enable() { |
469 if (!serialization_enabled_) { | 471 if (!serialization_enabled_) { |
470 ASSERT(!too_late_to_enable_now_); | 472 ASSERT(!too_late_to_enable_now_); |
471 } | 473 } |
472 serialization_enabled_ = true; | 474 serialization_enabled_ = true; |
473 } | 475 } |
474 | 476 |
475 static void Disable() { serialization_enabled_ = false; } | 477 static void Disable() { serialization_enabled_ = false; } |
476 // Call this when you have made use of the fact that there is no serialization | 478 // Call this when you have made use of the fact that there is no serialization |
477 // going on. | 479 // going on. |
478 static void TooLateToEnableNow() { too_late_to_enable_now_ = true; } | 480 static void TooLateToEnableNow() { too_late_to_enable_now_ = true; } |
479 static bool enabled() { return serialization_enabled_; } | 481 static bool enabled() { return serialization_enabled_; } |
480 SerializationAddressMapper* address_mapper() { return &address_mapper_; } | 482 SerializationAddressMapper* address_mapper() { return &address_mapper_; } |
481 void PutRoot( | 483 void PutRoot(int index, |
482 int index, HeapObject* object, HowToCode how, WhereToPoint where); | 484 HeapObject* object, |
| 485 HowToCode how, |
| 486 WhereToPoint where, |
| 487 int skip); |
483 | 488 |
484 protected: | 489 protected: |
485 static const int kInvalidRootIndex = -1; | 490 static const int kInvalidRootIndex = -1; |
486 | 491 |
487 int RootIndex(HeapObject* heap_object, HowToCode from); | 492 int RootIndex(HeapObject* heap_object, HowToCode from); |
488 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0; | 493 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0; |
489 intptr_t root_index_wave_front() { return root_index_wave_front_; } | 494 intptr_t root_index_wave_front() { return root_index_wave_front_; } |
490 void set_root_index_wave_front(intptr_t value) { | 495 void set_root_index_wave_front(intptr_t value) { |
491 ASSERT(value >= root_index_wave_front_); | 496 ASSERT(value >= root_index_wave_front_); |
492 root_index_wave_front_ = value; | 497 root_index_wave_front_ = value; |
493 } | 498 } |
494 | 499 |
495 class ObjectSerializer : public ObjectVisitor { | 500 class ObjectSerializer : public ObjectVisitor { |
496 public: | 501 public: |
497 ObjectSerializer(Serializer* serializer, | 502 ObjectSerializer(Serializer* serializer, |
498 Object* o, | 503 Object* o, |
499 SnapshotByteSink* sink, | 504 SnapshotByteSink* sink, |
500 HowToCode how_to_code, | 505 HowToCode how_to_code, |
501 WhereToPoint where_to_point) | 506 WhereToPoint where_to_point) |
502 : serializer_(serializer), | 507 : serializer_(serializer), |
503 object_(HeapObject::cast(o)), | 508 object_(HeapObject::cast(o)), |
504 sink_(sink), | 509 sink_(sink), |
505 reference_representation_(how_to_code + where_to_point), | 510 reference_representation_(how_to_code + where_to_point), |
506 bytes_processed_so_far_(0) { } | 511 bytes_processed_so_far_(0), |
| 512 code_object_(o->IsCode()), |
| 513 code_has_been_output_(false) { } |
507 void Serialize(); | 514 void Serialize(); |
508 void VisitPointers(Object** start, Object** end); | 515 void VisitPointers(Object** start, Object** end); |
509 void VisitEmbeddedPointer(RelocInfo* target); | 516 void VisitEmbeddedPointer(RelocInfo* target); |
510 void VisitExternalReferences(Address* start, Address* end); | 517 void VisitExternalReferences(Address* start, Address* end); |
511 void VisitExternalReference(RelocInfo* rinfo); | 518 void VisitExternalReference(RelocInfo* rinfo); |
512 void VisitCodeTarget(RelocInfo* target); | 519 void VisitCodeTarget(RelocInfo* target); |
513 void VisitCodeEntry(Address entry_address); | 520 void VisitCodeEntry(Address entry_address); |
514 void VisitGlobalPropertyCell(RelocInfo* rinfo); | 521 void VisitGlobalPropertyCell(RelocInfo* rinfo); |
515 void VisitRuntimeEntry(RelocInfo* reloc); | 522 void VisitRuntimeEntry(RelocInfo* reloc); |
516 // Used for seralizing the external strings that hold the natives source. | 523 // Used for seralizing the external strings that hold the natives source. |
517 void VisitExternalAsciiString( | 524 void VisitExternalAsciiString( |
518 v8::String::ExternalAsciiStringResource** resource); | 525 v8::String::ExternalAsciiStringResource** resource); |
519 // We can't serialize a heap with external two byte strings. | 526 // We can't serialize a heap with external two byte strings. |
520 void VisitExternalTwoByteString( | 527 void VisitExternalTwoByteString( |
521 v8::String::ExternalStringResource** resource) { | 528 v8::String::ExternalStringResource** resource) { |
522 UNREACHABLE(); | 529 UNREACHABLE(); |
523 } | 530 } |
524 | 531 |
525 private: | 532 private: |
526 void OutputRawData(Address up_to); | 533 enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn }; |
| 534 // This function outputs or skips the raw data between the last pointer and |
| 535 // up to the current position. It optionally can just return the number of |
| 536 // bytes to skip instead of performing a skip instruction, in case the skip |
| 537 // can be merged into the next instruction. |
| 538 int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn); |
527 | 539 |
528 Serializer* serializer_; | 540 Serializer* serializer_; |
529 HeapObject* object_; | 541 HeapObject* object_; |
530 SnapshotByteSink* sink_; | 542 SnapshotByteSink* sink_; |
531 int reference_representation_; | 543 int reference_representation_; |
532 int bytes_processed_so_far_; | 544 int bytes_processed_so_far_; |
| 545 bool code_object_; |
| 546 bool code_has_been_output_; |
533 }; | 547 }; |
534 | 548 |
535 virtual void SerializeObject(Object* o, | 549 virtual void SerializeObject(Object* o, |
536 HowToCode how_to_code, | 550 HowToCode how_to_code, |
537 WhereToPoint where_to_point) = 0; | 551 WhereToPoint where_to_point, |
| 552 int skip) = 0; |
538 void SerializeReferenceToPreviousObject( | 553 void SerializeReferenceToPreviousObject( |
539 int space, | 554 int space, |
540 int address, | 555 int address, |
541 HowToCode how_to_code, | 556 HowToCode how_to_code, |
542 WhereToPoint where_to_point); | 557 WhereToPoint where_to_point, |
| 558 int skip); |
543 void InitializeAllocators(); | 559 void InitializeAllocators(); |
544 // This will return the space for an object. If the object is in large | 560 // This will return the space for an object. |
545 // object space it may return kLargeCode or kLargeFixedArray in order | |
546 // to indicate to the deserializer what kind of large object allocation | |
547 // to make. | |
548 static int SpaceOfObject(HeapObject* object); | 561 static int SpaceOfObject(HeapObject* object); |
549 // This just returns the space of the object. It will return LO_SPACE | 562 int Allocate(int space, int size); |
550 // for all large objects since you can't check the type of the object | |
551 // once the map has been used for the serialization address. | |
552 static int SpaceOfAlreadySerializedObject(HeapObject* object); | |
553 int Allocate(int space, int size, bool* new_page_started); | |
554 int EncodeExternalReference(Address addr) { | 563 int EncodeExternalReference(Address addr) { |
555 return external_reference_encoder_->Encode(addr); | 564 return external_reference_encoder_->Encode(addr); |
556 } | 565 } |
557 | 566 |
558 int SpaceAreaSize(int space); | 567 int SpaceAreaSize(int space); |
559 | 568 |
560 Isolate* isolate_; | 569 Isolate* isolate_; |
561 // Keep track of the fullness of each space in order to generate | 570 // Keep track of the fullness of each space in order to generate |
562 // relative addresses for back references. Large objects are | 571 // relative addresses for back references. |
563 // just numbered sequentially since relative addresses make no | |
564 // sense in large object space. | |
565 int fullness_[LAST_SPACE + 1]; | 572 int fullness_[LAST_SPACE + 1]; |
566 SnapshotByteSink* sink_; | 573 SnapshotByteSink* sink_; |
567 int current_root_index_; | 574 int current_root_index_; |
568 ExternalReferenceEncoder* external_reference_encoder_; | 575 ExternalReferenceEncoder* external_reference_encoder_; |
569 static bool serialization_enabled_; | 576 static bool serialization_enabled_; |
570 // Did we already make use of the fact that serialization was not enabled? | 577 // Did we already make use of the fact that serialization was not enabled? |
571 static bool too_late_to_enable_now_; | 578 static bool too_late_to_enable_now_; |
572 int large_object_total_; | |
573 SerializationAddressMapper address_mapper_; | 579 SerializationAddressMapper address_mapper_; |
574 intptr_t root_index_wave_front_; | 580 intptr_t root_index_wave_front_; |
| 581 void Pad(); |
575 | 582 |
576 friend class ObjectSerializer; | 583 friend class ObjectSerializer; |
577 friend class Deserializer; | 584 friend class Deserializer; |
578 | 585 |
579 private: | 586 private: |
580 DISALLOW_COPY_AND_ASSIGN(Serializer); | 587 DISALLOW_COPY_AND_ASSIGN(Serializer); |
581 }; | 588 }; |
582 | 589 |
583 | 590 |
584 class PartialSerializer : public Serializer { | 591 class PartialSerializer : public Serializer { |
585 public: | 592 public: |
586 PartialSerializer(Serializer* startup_snapshot_serializer, | 593 PartialSerializer(Serializer* startup_snapshot_serializer, |
587 SnapshotByteSink* sink) | 594 SnapshotByteSink* sink) |
588 : Serializer(sink), | 595 : Serializer(sink), |
589 startup_serializer_(startup_snapshot_serializer) { | 596 startup_serializer_(startup_snapshot_serializer) { |
590 set_root_index_wave_front(Heap::kStrongRootListLength); | 597 set_root_index_wave_front(Heap::kStrongRootListLength); |
591 } | 598 } |
592 | 599 |
593 // Serialize the objects reachable from a single object pointer. | 600 // Serialize the objects reachable from a single object pointer. |
594 virtual void Serialize(Object** o); | 601 virtual void Serialize(Object** o); |
595 virtual void SerializeObject(Object* o, | 602 virtual void SerializeObject(Object* o, |
596 HowToCode how_to_code, | 603 HowToCode how_to_code, |
597 WhereToPoint where_to_point); | 604 WhereToPoint where_to_point, |
| 605 int skip); |
598 | 606 |
599 protected: | 607 protected: |
600 virtual int PartialSnapshotCacheIndex(HeapObject* o); | 608 virtual int PartialSnapshotCacheIndex(HeapObject* o); |
601 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { | 609 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { |
602 // Scripts should be referred only through shared function infos. We can't | 610 // Scripts should be referred only through shared function infos. We can't |
603 // allow them to be part of the partial snapshot because they contain a | 611 // allow them to be part of the partial snapshot because they contain a |
604 // unique ID, and deserializing several partial snapshots containing script | 612 // unique ID, and deserializing several partial snapshots containing script |
605 // would cause dupes. | 613 // would cause dupes. |
606 ASSERT(!o->IsScript()); | 614 ASSERT(!o->IsScript()); |
607 return o->IsString() || o->IsSharedFunctionInfo() || | 615 return o->IsString() || o->IsSharedFunctionInfo() || |
(...skipping 17 matching lines...) Expand all Loading... |
625 // snapshot. | 633 // snapshot. |
626 Isolate::Current()->set_serialize_partial_snapshot_cache_length(0); | 634 Isolate::Current()->set_serialize_partial_snapshot_cache_length(0); |
627 } | 635 } |
628 // Serialize the current state of the heap. The order is: | 636 // Serialize the current state of the heap. The order is: |
629 // 1) Strong references. | 637 // 1) Strong references. |
630 // 2) Partial snapshot cache. | 638 // 2) Partial snapshot cache. |
631 // 3) Weak references (e.g. the symbol table). | 639 // 3) Weak references (e.g. the symbol table). |
632 virtual void SerializeStrongReferences(); | 640 virtual void SerializeStrongReferences(); |
633 virtual void SerializeObject(Object* o, | 641 virtual void SerializeObject(Object* o, |
634 HowToCode how_to_code, | 642 HowToCode how_to_code, |
635 WhereToPoint where_to_point); | 643 WhereToPoint where_to_point, |
| 644 int skip); |
636 void SerializeWeakReferences(); | 645 void SerializeWeakReferences(); |
637 void Serialize() { | 646 void Serialize() { |
638 SerializeStrongReferences(); | 647 SerializeStrongReferences(); |
639 SerializeWeakReferences(); | 648 SerializeWeakReferences(); |
| 649 Pad(); |
640 } | 650 } |
641 | 651 |
642 private: | 652 private: |
643 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { | 653 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { |
644 return false; | 654 return false; |
645 } | 655 } |
646 }; | 656 }; |
647 | 657 |
648 | 658 |
649 } } // namespace v8::internal | 659 } } // namespace v8::internal |
650 | 660 |
651 #endif // V8_SERIALIZE_H_ | 661 #endif // V8_SERIALIZE_H_ |
OLD | NEW |