OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 863 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
874 | 874 |
875 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; | 875 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; |
876 | 876 |
877 // The marking speed is driven either by the allocation rate or by the rate | 877 // The marking speed is driven either by the allocation rate or by the rate |
878 // at which we are having to check the color of objects in the write barrier. | 878 // at which we are having to check the color of objects in the write barrier. |
879 // It is possible for a tight non-allocating loop to run a lot of write | 879 // It is possible for a tight non-allocating loop to run a lot of write |
880 // barriers before we get here and check them (marking can only take place on | 880 // barriers before we get here and check them (marking can only take place on |
881 // allocation), so to reduce the lumpiness we don't use the write barriers | 881 // allocation), so to reduce the lumpiness we don't use the write barriers |
882 // invoked since last step directly to determine the amount of work to do. | 882 // invoked since last step directly to determine the amount of work to do. |
883 intptr_t bytes_to_process = | 883 intptr_t bytes_to_process = |
884 marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold); | 884 marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_); |
885 allocated_ = 0; | 885 allocated_ = 0; |
886 write_barriers_invoked_since_last_step_ = 0; | 886 write_barriers_invoked_since_last_step_ = 0; |
887 | 887 |
888 bytes_scanned_ += bytes_to_process; | 888 bytes_scanned_ += bytes_to_process; |
889 | 889 |
890 double start = 0; | 890 double start = 0; |
891 | 891 |
892 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { | 892 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { |
893 start = OS::TimeCurrentMillis(); | 893 start = OS::TimeCurrentMillis(); |
894 } | 894 } |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
994 bytes_scanned_ = 0; | 994 bytes_scanned_ = 0; |
995 write_barriers_invoked_since_last_step_ = 0; | 995 write_barriers_invoked_since_last_step_ = 0; |
996 } | 996 } |
997 | 997 |
998 | 998 |
999 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | 999 int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
1000 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); | 1000 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); |
1001 } | 1001 } |
1002 | 1002 |
1003 } } // namespace v8::internal | 1003 } } // namespace v8::internal |
OLD | NEW |