Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(7)

Side by Side Diff: src/spaces-inl.h

Issue 23039014: Hook-allocation on bleeding edge (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/x64/code-stubs-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 11 matching lines...) Expand all
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #ifndef V8_SPACES_INL_H_ 28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_ 29 #define V8_SPACES_INL_H_
30 30
31 #include "isolate.h" 31 #include "isolate.h"
32 #include "heap-profiler.h"
32 #include "spaces.h" 33 #include "spaces.h"
33 #include "v8memory.h" 34 #include "v8memory.h"
34 35
35 namespace v8 { 36 namespace v8 {
36 namespace internal { 37 namespace internal {
37 38
38 39
39 // ----------------------------------------------------------------------------- 40 // -----------------------------------------------------------------------------
40 // Bitmap 41 // Bitmap
41 42
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
266 Address current_top = allocation_info_.top; 267 Address current_top = allocation_info_.top;
267 Address new_top = current_top + size_in_bytes; 268 Address new_top = current_top + size_in_bytes;
268 if (new_top > allocation_info_.limit) return NULL; 269 if (new_top > allocation_info_.limit) return NULL;
269 270
270 allocation_info_.top = new_top; 271 allocation_info_.top = new_top;
271 return HeapObject::FromAddress(current_top); 272 return HeapObject::FromAddress(current_top);
272 } 273 }
273 274
274 275
275 // Raw allocation. 276 // Raw allocation.
276 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { 277 HeapObject* PagedSpace::AllocateRawHelper(int size_in_bytes) {
277 HeapObject* object = AllocateLinearly(size_in_bytes); 278 HeapObject* object = AllocateLinearly(size_in_bytes);
278 if (object != NULL) { 279 if (object != NULL) {
279 if (identity() == CODE_SPACE) { 280 if (identity() == CODE_SPACE) {
280 SkipList::Update(object->address(), size_in_bytes); 281 SkipList::Update(object->address(), size_in_bytes);
281 } 282 }
282 return object; 283 return object;
283 } 284 }
284 285
285 ASSERT(!heap()->linear_allocation() || 286 ASSERT(!heap()->linear_allocation() ||
286 (anchor_.next_chunk() == &anchor_ && 287 (anchor_.next_chunk() == &anchor_ &&
287 anchor_.prev_chunk() == &anchor_)); 288 anchor_.prev_chunk() == &anchor_));
288 289
289 object = free_list_.Allocate(size_in_bytes); 290 object = free_list_.Allocate(size_in_bytes);
290 if (object != NULL) { 291 if (object != NULL) {
291 if (identity() == CODE_SPACE) { 292 if (identity() == CODE_SPACE) {
292 SkipList::Update(object->address(), size_in_bytes); 293 SkipList::Update(object->address(), size_in_bytes);
293 } 294 }
294 return object; 295 return object;
295 } 296 }
296 297
297 object = SlowAllocateRaw(size_in_bytes); 298 object = SlowAllocateRaw(size_in_bytes);
298 if (object != NULL) { 299 if (object != NULL) {
299 if (identity() == CODE_SPACE) { 300 if (identity() == CODE_SPACE) {
300 SkipList::Update(object->address(), size_in_bytes); 301 SkipList::Update(object->address(), size_in_bytes);
301 } 302 }
302 return object; 303 return object;
303 } 304 }
304 305
306 return NULL;
307 }
308
309
310 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
311 HeapObject* object = AllocateRawHelper(size_in_bytes);
312 if (object != NULL) {
313 heap()->isolate()->heap_profiler()->RecordObjectAllocation(
314 object->address(), size_in_bytes);
315 return object;
316 }
305 return Failure::RetryAfterGC(identity()); 317 return Failure::RetryAfterGC(identity());
306 } 318 }
307 319
320
321 MaybeObject* PagedSpace::AllocateRawSilently(int size_in_bytes) {
322 HeapObject* object = AllocateRawHelper(size_in_bytes);
323 if (object != NULL) {
324 return object;
325 }
326 return Failure::RetryAfterGC(identity());
327 }
328
308 329
309 // ----------------------------------------------------------------------------- 330 // -----------------------------------------------------------------------------
310 // NewSpace 331 // NewSpace
311 332
312 333
313 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { 334 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
314 Address old_top = allocation_info_.top; 335 Address old_top = allocation_info_.top;
315 #ifdef DEBUG 336 #ifdef DEBUG
316 // If we are stressing compaction we waste some memory in new space 337 // If we are stressing compaction we waste some memory in new space
317 // in order to get more frequent GCs. 338 // in order to get more frequent GCs.
318 if (FLAG_stress_compaction && !HEAP->linear_allocation()) { 339 if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
319 if (allocation_info_.limit - old_top >= size_in_bytes * 4) { 340 if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
320 int filler_size = size_in_bytes * 4; 341 int filler_size = size_in_bytes * 4;
321 for (int i = 0; i < filler_size; i += kPointerSize) { 342 for (int i = 0; i < filler_size; i += kPointerSize) {
322 *(reinterpret_cast<Object**>(old_top + i)) = 343 *(reinterpret_cast<Object**>(old_top + i)) =
323 HEAP->one_pointer_filler_map(); 344 HEAP->one_pointer_filler_map();
324 } 345 }
325 old_top += filler_size; 346 old_top += filler_size;
326 allocation_info_.top += filler_size; 347 allocation_info_.top += filler_size;
327 } 348 }
328 } 349 }
329 #endif 350 #endif
330 351
331 if (allocation_info_.limit - old_top < size_in_bytes) { 352 if (allocation_info_.limit - old_top < size_in_bytes) {
332 return SlowAllocateRaw(size_in_bytes); 353 return SlowAllocateRaw(size_in_bytes);
333 } 354 }
334 355
335 Object* obj = HeapObject::FromAddress(old_top); 356 HeapObject* obj = HeapObject::FromAddress(old_top);
336 allocation_info_.top += size_in_bytes; 357 allocation_info_.top += size_in_bytes;
337 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 358 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
338 359
360 heap()->isolate()->heap_profiler()->RecordObjectAllocation(
361 obj->address(), size_in_bytes);
362
339 return obj; 363 return obj;
340 } 364 }
341 365
342 366
343 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { 367 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
344 heap->incremental_marking()->SetOldSpacePageFlags(chunk); 368 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
345 return static_cast<LargePage*>(chunk); 369 return static_cast<LargePage*>(chunk);
346 } 370 }
347 371
348 372
349 intptr_t LargeObjectSpace::Available() { 373 intptr_t LargeObjectSpace::Available() {
350 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); 374 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
351 } 375 }
352 376
353 377
354 bool FreeListNode::IsFreeListNode(HeapObject* object) { 378 bool FreeListNode::IsFreeListNode(HeapObject* object) {
355 Map* map = object->map(); 379 Map* map = object->map();
356 Heap* heap = object->GetHeap(); 380 Heap* heap = object->GetHeap();
357 return map == heap->raw_unchecked_free_space_map() 381 return map == heap->raw_unchecked_free_space_map()
358 || map == heap->raw_unchecked_one_pointer_filler_map() 382 || map == heap->raw_unchecked_one_pointer_filler_map()
359 || map == heap->raw_unchecked_two_pointer_filler_map(); 383 || map == heap->raw_unchecked_two_pointer_filler_map();
360 } 384 }
361 385
362 } } // namespace v8::internal 386 } } // namespace v8::internal
363 387
364 #endif // V8_SPACES_INL_H_ 388 #endif // V8_SPACES_INL_H_
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/x64/code-stubs-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698