OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/gpu_memory_manager.h" | 5 #include "content/common/gpu/gpu_memory_manager.h" |
6 | 6 |
7 #if defined(ENABLE_GPU) | 7 #if defined(ENABLE_GPU) |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 | 10 |
11 #include "base/bind.h" | 11 #include "base/bind.h" |
| 12 #include "base/debug/trace_event.h" |
12 #include "base/message_loop.h" | 13 #include "base/message_loop.h" |
13 #include "content/common/gpu/gpu_command_buffer_stub.h" | 14 #include "content/common/gpu/gpu_command_buffer_stub.h" |
14 #include "content/common/gpu/gpu_memory_allocation.h" | 15 #include "content/common/gpu/gpu_memory_allocation.h" |
15 | 16 |
16 namespace { | 17 namespace { |
17 | 18 |
18 const int kDelayedScheduleManageTimeoutMs = 67; | 19 const int kDelayedScheduleManageTimeoutMs = 67; |
19 | 20 |
20 bool IsInSameContextShareGroupAsAnyOf( | 21 bool IsInSameContextShareGroupAsAnyOf( |
21 const GpuCommandBufferStubBase* stub, | 22 const GpuCommandBufferStubBase* stub, |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
63 } | 64 } |
64 | 65 |
65 } | 66 } |
66 | 67 |
67 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, | 68 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, |
68 size_t max_surfaces_with_frontbuffer_soft_limit) | 69 size_t max_surfaces_with_frontbuffer_soft_limit) |
69 : client_(client), | 70 : client_(client), |
70 manage_immediate_scheduled_(false), | 71 manage_immediate_scheduled_(false), |
71 max_surfaces_with_frontbuffer_soft_limit_( | 72 max_surfaces_with_frontbuffer_soft_limit_( |
72 max_surfaces_with_frontbuffer_soft_limit), | 73 max_surfaces_with_frontbuffer_soft_limit), |
73 peak_assigned_allocation_sum_(0) { | 74 bytes_allocated_current_(0), |
| 75 bytes_allocated_historical_max_(0) { |
74 } | 76 } |
75 | 77 |
76 GpuMemoryManager::~GpuMemoryManager() { | 78 GpuMemoryManager::~GpuMemoryManager() { |
77 } | 79 } |
78 | 80 |
79 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( | 81 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( |
80 GpuCommandBufferStubBase* lhs, | 82 GpuCommandBufferStubBase* lhs, |
81 GpuCommandBufferStubBase* rhs) { | 83 GpuCommandBufferStubBase* rhs) { |
82 DCHECK(lhs->has_surface_state() && rhs->has_surface_state()); | 84 DCHECK(lhs->has_surface_state() && rhs->has_surface_state()); |
83 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state(); | 85 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state(); |
(...skipping 24 matching lines...) Expand all Loading... |
108 delayed_manage_callback_.callback(), | 110 delayed_manage_callback_.callback(), |
109 base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs)); | 111 base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs)); |
110 } | 112 } |
111 } | 113 } |
112 | 114 |
113 size_t GpuMemoryManager::GetAvailableGpuMemory() const { | 115 size_t GpuMemoryManager::GetAvailableGpuMemory() const { |
114 // TODO(mmocny): Implement this with real system figures. | 116 // TODO(mmocny): Implement this with real system figures. |
115 return kMaximumAllocationForTabs; | 117 return kMaximumAllocationForTabs; |
116 } | 118 } |
117 | 119 |
| 120 void GpuMemoryManager::TrackMemoryAllocatedChange(size_t old_size, |
| 121 size_t new_size) |
| 122 { |
| 123 if (new_size < old_size) { |
| 124 size_t delta = old_size - new_size; |
| 125 DCHECK(bytes_allocated_current_ >= delta); |
| 126 bytes_allocated_current_ -= delta; |
| 127 } |
| 128 else { |
| 129 size_t delta = new_size - old_size; |
| 130 bytes_allocated_current_ += delta; |
| 131 if (bytes_allocated_current_ > bytes_allocated_historical_max_) { |
| 132 bytes_allocated_historical_max_ = bytes_allocated_current_; |
| 133 } |
| 134 } |
| 135 if (new_size != old_size) { |
| 136 TRACE_COUNTER_ID1("GpuMemoryManager", |
| 137 "GpuMemoryUsage", |
| 138 this, |
| 139 bytes_allocated_current_); |
| 140 } |
| 141 } |
| 142 |
118 // The current Manage algorithm simply classifies contexts (stubs) into | 143 // The current Manage algorithm simply classifies contexts (stubs) into |
119 // "foreground", "background", or "hibernated" categories. | 144 // "foreground", "background", or "hibernated" categories. |
120 // For each of these three categories, there are predefined memory allocation | 145 // For each of these three categories, there are predefined memory allocation |
121 // limits and front/backbuffer states. | 146 // limits and front/backbuffer states. |
122 // | 147 // |
123 // Stubs may or may not have a surfaces, and the rules are different for each. | 148 // Stubs may or may not have a surfaces, and the rules are different for each. |
124 // | 149 // |
125 // The rules for categorizing contexts with a surface are: | 150 // The rules for categorizing contexts with a surface are: |
126 // 1. Foreground: All visible surfaces. | 151 // 1. Foreground: All visible surfaces. |
127 // * Must have both front and back buffer. | 152 // * Must have both front and back buffer. |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
271 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers), | 296 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers), |
272 false); | 297 false); |
273 | 298 |
274 size_t assigned_allocation_sum = 0; | 299 size_t assigned_allocation_sum = 0; |
275 for (StubMemoryStatMap::iterator it = | 300 for (StubMemoryStatMap::iterator it = |
276 stub_memory_stats_for_last_manage_.begin(); | 301 stub_memory_stats_for_last_manage_.begin(); |
277 it != stub_memory_stats_for_last_manage_.end(); | 302 it != stub_memory_stats_for_last_manage_.end(); |
278 ++it) { | 303 ++it) { |
279 assigned_allocation_sum += it->second.allocation.gpu_resource_size_in_bytes; | 304 assigned_allocation_sum += it->second.allocation.gpu_resource_size_in_bytes; |
280 } | 305 } |
281 | |
282 if (assigned_allocation_sum > peak_assigned_allocation_sum_) | |
283 peak_assigned_allocation_sum_ = assigned_allocation_sum; | |
284 } | 306 } |
285 | 307 |
286 #endif | 308 #endif |
OLD | NEW |