Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(143)

Side by Side Diff: src/spaces-inl.h

Issue 71163006: Merge bleeding_edge r17376:17693. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Fix all.gyp Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.cc ('k') | src/stub-cache.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after
257 ASSERT(page->owner() == owner()); 257 ASSERT(page->owner() == owner());
258 set_prev_chunk(page); 258 set_prev_chunk(page);
259 } 259 }
260 260
261 261
262 // Try linear allocation in the page of alloc_info's allocation top. Does 262 // Try linear allocation in the page of alloc_info's allocation top. Does
263 // not contain slow case logic (e.g. move to the next page or try free list 263 // not contain slow case logic (e.g. move to the next page or try free list
264 // allocation) so it can be used by all the allocation functions and for all 264 // allocation) so it can be used by all the allocation functions and for all
265 // the paged spaces. 265 // the paged spaces.
266 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { 266 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
267 Address current_top = allocation_info_.top; 267 Address current_top = allocation_info_.top();
268 Address new_top = current_top + size_in_bytes; 268 Address new_top = current_top + size_in_bytes;
269 if (new_top > allocation_info_.limit) return NULL; 269 if (new_top > allocation_info_.limit()) return NULL;
270 270
271 allocation_info_.top = new_top; 271 allocation_info_.set_top(new_top);
272 return HeapObject::FromAddress(current_top); 272 return HeapObject::FromAddress(current_top);
273 } 273 }
274 274
275 275
276 // Raw allocation. 276 // Raw allocation.
277 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes, 277 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
278 AllocationType event) {
279 HeapProfiler* profiler = heap()->isolate()->heap_profiler();
280
281 HeapObject* object = AllocateLinearly(size_in_bytes); 278 HeapObject* object = AllocateLinearly(size_in_bytes);
282 if (object != NULL) { 279 if (object != NULL) {
283 if (identity() == CODE_SPACE) { 280 if (identity() == CODE_SPACE) {
284 SkipList::Update(object->address(), size_in_bytes); 281 SkipList::Update(object->address(), size_in_bytes);
285 } 282 }
286 if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
287 profiler->NewObjectEvent(object->address(), size_in_bytes);
288 }
289 return object; 283 return object;
290 } 284 }
291 285
292 ASSERT(!heap()->linear_allocation() || 286 ASSERT(!heap()->linear_allocation() ||
293 (anchor_.next_chunk() == &anchor_ && 287 (anchor_.next_chunk() == &anchor_ &&
294 anchor_.prev_chunk() == &anchor_)); 288 anchor_.prev_chunk() == &anchor_));
295 289
296 object = free_list_.Allocate(size_in_bytes); 290 object = free_list_.Allocate(size_in_bytes);
297 if (object != NULL) { 291 if (object != NULL) {
298 if (identity() == CODE_SPACE) { 292 if (identity() == CODE_SPACE) {
299 SkipList::Update(object->address(), size_in_bytes); 293 SkipList::Update(object->address(), size_in_bytes);
300 } 294 }
301 if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
302 profiler->NewObjectEvent(object->address(), size_in_bytes);
303 }
304 return object; 295 return object;
305 } 296 }
306 297
307 object = SlowAllocateRaw(size_in_bytes); 298 object = SlowAllocateRaw(size_in_bytes);
308 if (object != NULL) { 299 if (object != NULL) {
309 if (identity() == CODE_SPACE) { 300 if (identity() == CODE_SPACE) {
310 SkipList::Update(object->address(), size_in_bytes); 301 SkipList::Update(object->address(), size_in_bytes);
311 } 302 }
312 if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
313 profiler->NewObjectEvent(object->address(), size_in_bytes);
314 }
315 return object; 303 return object;
316 } 304 }
317 305
318 return Failure::RetryAfterGC(identity()); 306 return Failure::RetryAfterGC(identity());
319 } 307 }
320 308
321 309
322 // ----------------------------------------------------------------------------- 310 // -----------------------------------------------------------------------------
323 // NewSpace 311 // NewSpace
324 312
325 313
326 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { 314 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
327 Address old_top = allocation_info_.top; 315 Address old_top = allocation_info_.top();
328 #ifdef DEBUG 316 #ifdef DEBUG
329 // If we are stressing compaction we waste some memory in new space 317 // If we are stressing compaction we waste some memory in new space
330 // in order to get more frequent GCs. 318 // in order to get more frequent GCs.
331 if (FLAG_stress_compaction && !heap()->linear_allocation()) { 319 if (FLAG_stress_compaction && !heap()->linear_allocation()) {
332 if (allocation_info_.limit - old_top >= size_in_bytes * 4) { 320 if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
333 int filler_size = size_in_bytes * 4; 321 int filler_size = size_in_bytes * 4;
334 for (int i = 0; i < filler_size; i += kPointerSize) { 322 for (int i = 0; i < filler_size; i += kPointerSize) {
335 *(reinterpret_cast<Object**>(old_top + i)) = 323 *(reinterpret_cast<Object**>(old_top + i)) =
336 heap()->one_pointer_filler_map(); 324 heap()->one_pointer_filler_map();
337 } 325 }
338 old_top += filler_size; 326 old_top += filler_size;
339 allocation_info_.top += filler_size; 327 allocation_info_.set_top(allocation_info_.top() + filler_size);
340 } 328 }
341 } 329 }
342 #endif 330 #endif
343 331
344 if (allocation_info_.limit - old_top < size_in_bytes) { 332 if (allocation_info_.limit() - old_top < size_in_bytes) {
345 return SlowAllocateRaw(size_in_bytes); 333 return SlowAllocateRaw(size_in_bytes);
346 } 334 }
347 335
348 HeapObject* obj = HeapObject::FromAddress(old_top); 336 HeapObject* obj = HeapObject::FromAddress(old_top);
349 allocation_info_.top += size_in_bytes; 337 allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
350 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 338 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
351 339
352 HeapProfiler* profiler = heap()->isolate()->heap_profiler();
353 if (profiler != NULL && profiler->is_tracking_allocations()) {
354 profiler->NewObjectEvent(obj->address(), size_in_bytes);
355 }
356
357 return obj; 340 return obj;
358 } 341 }
359 342
360 343
361 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { 344 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
362 heap->incremental_marking()->SetOldSpacePageFlags(chunk); 345 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
363 return static_cast<LargePage*>(chunk); 346 return static_cast<LargePage*>(chunk);
364 } 347 }
365 348
366 349
367 intptr_t LargeObjectSpace::Available() { 350 intptr_t LargeObjectSpace::Available() {
368 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); 351 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
369 } 352 }
370 353
371 354
372 bool FreeListNode::IsFreeListNode(HeapObject* object) { 355 bool FreeListNode::IsFreeListNode(HeapObject* object) {
373 Map* map = object->map(); 356 Map* map = object->map();
374 Heap* heap = object->GetHeap(); 357 Heap* heap = object->GetHeap();
375 return map == heap->raw_unchecked_free_space_map() 358 return map == heap->raw_unchecked_free_space_map()
376 || map == heap->raw_unchecked_one_pointer_filler_map() 359 || map == heap->raw_unchecked_one_pointer_filler_map()
377 || map == heap->raw_unchecked_two_pointer_filler_map(); 360 || map == heap->raw_unchecked_two_pointer_filler_map();
378 } 361 }
379 362
380 } } // namespace v8::internal 363 } } // namespace v8::internal
381 364
382 #endif // V8_SPACES_INL_H_ 365 #endif // V8_SPACES_INL_H_
OLDNEW
« no previous file with comments | « src/spaces.cc ('k') | src/stub-cache.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698