OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/gpu_memory_manager.h" | 5 #include "content/common/gpu/gpu_memory_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/bind.h" | 9 #include "base/bind.h" |
10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
50 size_t max_surfaces_with_frontbuffer_soft_limit) | 50 size_t max_surfaces_with_frontbuffer_soft_limit) |
51 : channel_manager_(channel_manager), | 51 : channel_manager_(channel_manager), |
52 use_nonuniform_memory_policy_(false), | 52 use_nonuniform_memory_policy_(false), |
53 manage_immediate_scheduled_(false), | 53 manage_immediate_scheduled_(false), |
54 max_surfaces_with_frontbuffer_soft_limit_( | 54 max_surfaces_with_frontbuffer_soft_limit_( |
55 max_surfaces_with_frontbuffer_soft_limit), | 55 max_surfaces_with_frontbuffer_soft_limit), |
56 bytes_available_gpu_memory_(0), | 56 bytes_available_gpu_memory_(0), |
57 bytes_available_gpu_memory_overridden_(false), | 57 bytes_available_gpu_memory_overridden_(false), |
58 bytes_minimum_per_client_(0), | 58 bytes_minimum_per_client_(0), |
59 bytes_minimum_per_client_overridden_(false), | 59 bytes_minimum_per_client_overridden_(false), |
60 bytes_backgrounded_available_gpu_memory_(0), | 60 bytes_nonvisible_available_gpu_memory_(0), |
61 bytes_allocated_managed_current_(0), | 61 bytes_allocated_managed_current_(0), |
62 bytes_allocated_managed_visible_(0), | 62 bytes_allocated_managed_visible_(0), |
63 bytes_allocated_managed_backgrounded_(0), | 63 bytes_allocated_managed_nonvisible_(0), |
64 bytes_allocated_unmanaged_current_(0), | 64 bytes_allocated_unmanaged_current_(0), |
65 bytes_allocated_historical_max_(0), | 65 bytes_allocated_historical_max_(0), |
66 bytes_allocated_unmanaged_high_(0), | 66 bytes_allocated_unmanaged_high_(0), |
67 bytes_allocated_unmanaged_low_(0), | 67 bytes_allocated_unmanaged_low_(0), |
68 bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep), | 68 bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep), |
69 window_count_has_been_received_(false), | 69 window_count_has_been_received_(false), |
70 window_count_(0), | 70 window_count_(0), |
71 disable_schedule_manage_(false) | 71 disable_schedule_manage_(false) |
72 { | 72 { |
73 CommandLine* command_line = CommandLine::ForCurrentProcess(); | 73 CommandLine* command_line = CommandLine::ForCurrentProcess(); |
74 if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { | 74 if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { |
75 base::StringToSizeT( | 75 base::StringToSizeT( |
76 command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), | 76 command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), |
77 &bytes_available_gpu_memory_); | 77 &bytes_available_gpu_memory_); |
78 bytes_available_gpu_memory_ *= 1024 * 1024; | 78 bytes_available_gpu_memory_ *= 1024 * 1024; |
79 bytes_available_gpu_memory_overridden_ = true; | 79 bytes_available_gpu_memory_overridden_ = true; |
80 } else | 80 } else |
81 bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory(); | 81 bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory(); |
82 UpdateBackgroundedAvailableGpuMemory(); | 82 UpdateNonvisibleAvailableGpuMemory(); |
83 } | 83 } |
84 | 84 |
85 GpuMemoryManager::~GpuMemoryManager() { | 85 GpuMemoryManager::~GpuMemoryManager() { |
86 DCHECK(tracking_groups_.empty()); | 86 DCHECK(tracking_groups_.empty()); |
87 DCHECK(clients_visible_mru_.empty()); | 87 DCHECK(clients_visible_mru_.empty()); |
88 DCHECK(clients_nonvisible_mru_.empty()); | 88 DCHECK(clients_nonvisible_mru_.empty()); |
89 DCHECK(clients_nonsurface_.empty()); | 89 DCHECK(clients_nonsurface_.empty()); |
90 DCHECK(!bytes_allocated_managed_current_); | 90 DCHECK(!bytes_allocated_managed_current_); |
91 DCHECK(!bytes_allocated_unmanaged_current_); | 91 DCHECK(!bytes_allocated_unmanaged_current_); |
92 DCHECK(!bytes_allocated_managed_visible_); | 92 DCHECK(!bytes_allocated_managed_visible_); |
93 DCHECK(!bytes_allocated_managed_backgrounded_); | 93 DCHECK(!bytes_allocated_managed_nonvisible_); |
94 } | 94 } |
95 | 95 |
96 size_t GpuMemoryManager::GetAvailableGpuMemory() const { | 96 size_t GpuMemoryManager::GetAvailableGpuMemory() const { |
97 // Allow unmanaged allocations to over-subscribe by at most (high_ - low_) | 97 // Allow unmanaged allocations to over-subscribe by at most (high_ - low_) |
98 // before restricting managed (compositor) memory based on unmanaged usage. | 98 // before restricting managed (compositor) memory based on unmanaged usage. |
99 if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_) | 99 if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_) |
100 return 0; | 100 return 0; |
101 return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_; | 101 return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_; |
102 } | 102 } |
103 | 103 |
104 size_t GpuMemoryManager::GetCurrentBackgroundedAvailableGpuMemory() const { | 104 size_t GpuMemoryManager::GetCurrentNonvisibleAvailableGpuMemory() const { |
105 if (bytes_allocated_managed_visible_ < GetAvailableGpuMemory()) { | 105 if (bytes_allocated_managed_visible_ < GetAvailableGpuMemory()) { |
106 return std::min(bytes_backgrounded_available_gpu_memory_, | 106 return std::min(bytes_nonvisible_available_gpu_memory_, |
107 GetAvailableGpuMemory() - bytes_allocated_managed_visible_); | 107 GetAvailableGpuMemory() - bytes_allocated_managed_visible_); |
108 } | 108 } |
109 return 0; | 109 return 0; |
110 } | 110 } |
111 | 111 |
112 size_t GpuMemoryManager::GetDefaultAvailableGpuMemory() const { | 112 size_t GpuMemoryManager::GetDefaultAvailableGpuMemory() const { |
113 #if defined(OS_ANDROID) | 113 #if defined(OS_ANDROID) |
114 return 32 * 1024 * 1024; | 114 return 32 * 1024 * 1024; |
115 #elif defined(OS_CHROMEOS) | 115 #elif defined(OS_CHROMEOS) |
116 return 1024 * 1024 * 1024; | 116 return 1024 * 1024 * 1024; |
117 #else | 117 #else |
118 return 256 * 1024 * 1024; | 118 return 256 * 1024 * 1024; |
119 #endif | 119 #endif |
120 } | 120 } |
121 | 121 |
122 size_t GpuMemoryManager::GetMaximumTotalGpuMemory() const { | 122 size_t GpuMemoryManager::GetMaximumTotalGpuMemory() const { |
123 #if defined(OS_ANDROID) | 123 #if defined(OS_ANDROID) |
124 return 256 * 1024 * 1024; | 124 return 256 * 1024 * 1024; |
125 #else | 125 #else |
126 return 1024 * 1024 * 1024; | 126 return 1024 * 1024 * 1024; |
127 #endif | 127 #endif |
128 } | 128 } |
129 | 129 |
130 size_t GpuMemoryManager::GetMaximumTabAllocation() const { | 130 size_t GpuMemoryManager::GetMaximumClientAllocation() const { |
131 #if defined(OS_ANDROID) || defined(OS_CHROMEOS) | 131 #if defined(OS_ANDROID) || defined(OS_CHROMEOS) |
132 return bytes_available_gpu_memory_; | 132 return bytes_available_gpu_memory_; |
133 #else | 133 #else |
134 // This is to avoid allowing a single page on to use a full 256MB of memory | 134 // This is to avoid allowing a single page on to use a full 256MB of memory |
135 // (the current total limit). Long-scroll pages will hit this limit, | 135 // (the current total limit). Long-scroll pages will hit this limit, |
136 // resulting in instability on some platforms (e.g, issue 141377). | 136 // resulting in instability on some platforms (e.g, issue 141377). |
137 return bytes_available_gpu_memory_ / 2; | 137 return bytes_available_gpu_memory_ / 2; |
138 #endif | 138 #endif |
139 } | 139 } |
140 | 140 |
141 size_t GpuMemoryManager::GetMinimumTabAllocation() const { | 141 size_t GpuMemoryManager::GetMinimumClientAllocation() const { |
142 if (bytes_minimum_per_client_overridden_) | 142 if (bytes_minimum_per_client_overridden_) |
143 return bytes_minimum_per_client_; | 143 return bytes_minimum_per_client_; |
144 #if defined(OS_ANDROID) | 144 #if defined(OS_ANDROID) |
145 return 32 * 1024 * 1024; | 145 return 32 * 1024 * 1024; |
146 #elif defined(OS_CHROMEOS) | 146 #elif defined(OS_CHROMEOS) |
147 return 64 * 1024 * 1024; | 147 return 64 * 1024 * 1024; |
148 #else | 148 #else |
149 return 64 * 1024 * 1024; | 149 return 64 * 1024 * 1024; |
150 #endif | 150 #endif |
151 } | 151 } |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
240 // of the intervals rounded down and up to the nearest step_, to avoid | 240 // of the intervals rounded down and up to the nearest step_, to avoid |
241 // thrashing the interval. | 241 // thrashing the interval. |
242 bytes_allocated_unmanaged_high_ = RoundUp( | 242 bytes_allocated_unmanaged_high_ = RoundUp( |
243 bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4, | 243 bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4, |
244 bytes_unmanaged_limit_step_); | 244 bytes_unmanaged_limit_step_); |
245 bytes_allocated_unmanaged_low_ = RoundDown( | 245 bytes_allocated_unmanaged_low_ = RoundDown( |
246 bytes_allocated_unmanaged_current_, | 246 bytes_allocated_unmanaged_current_, |
247 bytes_unmanaged_limit_step_); | 247 bytes_unmanaged_limit_step_); |
248 } | 248 } |
249 | 249 |
250 void GpuMemoryManager::UpdateBackgroundedAvailableGpuMemory() { | 250 void GpuMemoryManager::UpdateNonvisibleAvailableGpuMemory() { |
251 // Be conservative and disable saving backgrounded tabs' textures on Android | 251 // Be conservative and disable saving nonvisible clients' textures on Android |
252 // for the moment | 252 // for the moment |
253 #if defined(OS_ANDROID) | 253 #if defined(OS_ANDROID) |
254 bytes_backgrounded_available_gpu_memory_ = 0; | 254 bytes_nonvisible_available_gpu_memory_ = 0; |
255 #else | 255 #else |
256 bytes_backgrounded_available_gpu_memory_ = GetAvailableGpuMemory() / 4; | 256 bytes_nonvisible_available_gpu_memory_ = GetAvailableGpuMemory() / 4; |
257 #endif | 257 #endif |
258 } | 258 } |
259 | 259 |
260 void GpuMemoryManager::ScheduleManage( | 260 void GpuMemoryManager::ScheduleManage( |
261 ScheduleManageTime schedule_manage_time) { | 261 ScheduleManageTime schedule_manage_time) { |
262 if (disable_schedule_manage_) | 262 if (disable_schedule_manage_) |
263 return; | 263 return; |
264 if (manage_immediate_scheduled_) | 264 if (manage_immediate_scheduled_) |
265 return; | 265 return; |
266 if (schedule_manage_time == kScheduleManageNow) { | 266 if (schedule_manage_time == kScheduleManageNow) { |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
329 TrackingGroupMap::iterator tracking_group_it = | 329 TrackingGroupMap::iterator tracking_group_it = |
330 tracking_groups_.find(client->GetMemoryTracker()); | 330 tracking_groups_.find(client->GetMemoryTracker()); |
331 DCHECK(tracking_group_it != tracking_groups_.end()); | 331 DCHECK(tracking_group_it != tracking_groups_.end()); |
332 GpuMemoryTrackingGroup* tracking_group = tracking_group_it->second; | 332 GpuMemoryTrackingGroup* tracking_group = tracking_group_it->second; |
333 | 333 |
334 GpuMemoryManagerClientState* client_state = new GpuMemoryManagerClientState( | 334 GpuMemoryManagerClientState* client_state = new GpuMemoryManagerClientState( |
335 this, client, tracking_group, has_surface, visible); | 335 this, client, tracking_group, has_surface, visible); |
336 TrackValueChanged(0, client_state->managed_memory_stats_.bytes_allocated, | 336 TrackValueChanged(0, client_state->managed_memory_stats_.bytes_allocated, |
337 client_state->visible_ ? | 337 client_state->visible_ ? |
338 &bytes_allocated_managed_visible_ : | 338 &bytes_allocated_managed_visible_ : |
339 &bytes_allocated_managed_backgrounded_); | 339 &bytes_allocated_managed_nonvisible_); |
340 AddClientToList(client_state); | 340 AddClientToList(client_state); |
341 ScheduleManage(kScheduleManageNow); | 341 ScheduleManage(kScheduleManageNow); |
342 return client_state; | 342 return client_state; |
343 } | 343 } |
344 | 344 |
345 void GpuMemoryManager::OnDestroyClientState( | 345 void GpuMemoryManager::OnDestroyClientState( |
346 GpuMemoryManagerClientState* client_state) { | 346 GpuMemoryManagerClientState* client_state) { |
347 RemoveClientFromList(client_state); | 347 RemoveClientFromList(client_state); |
348 TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, 0, | 348 TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, 0, |
349 client_state->visible_ ? | 349 client_state->visible_ ? |
350 &bytes_allocated_managed_visible_ : | 350 &bytes_allocated_managed_visible_ : |
351 &bytes_allocated_managed_backgrounded_); | 351 &bytes_allocated_managed_nonvisible_); |
352 ScheduleManage(kScheduleManageLater); | 352 ScheduleManage(kScheduleManageLater); |
353 } | 353 } |
354 | 354 |
355 void GpuMemoryManager::SetClientStateVisible( | 355 void GpuMemoryManager::SetClientStateVisible( |
356 GpuMemoryManagerClientState* client_state, bool visible) { | 356 GpuMemoryManagerClientState* client_state, bool visible) { |
357 DCHECK(client_state->has_surface_); | 357 DCHECK(client_state->has_surface_); |
358 if (client_state->visible_ == visible) | 358 if (client_state->visible_ == visible) |
359 return; | 359 return; |
360 | 360 |
361 RemoveClientFromList(client_state); | 361 RemoveClientFromList(client_state); |
362 client_state->visible_ = visible; | 362 client_state->visible_ = visible; |
363 AddClientToList(client_state); | 363 AddClientToList(client_state); |
364 | 364 |
365 TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, 0, | 365 TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, 0, |
366 client_state->visible_ ? | 366 client_state->visible_ ? |
367 &bytes_allocated_managed_backgrounded_ : | 367 &bytes_allocated_managed_nonvisible_ : |
368 &bytes_allocated_managed_visible_); | 368 &bytes_allocated_managed_visible_); |
369 TrackValueChanged(0, client_state->managed_memory_stats_.bytes_allocated, | 369 TrackValueChanged(0, client_state->managed_memory_stats_.bytes_allocated, |
370 client_state->visible_ ? | 370 client_state->visible_ ? |
371 &bytes_allocated_managed_visible_ : | 371 &bytes_allocated_managed_visible_ : |
372 &bytes_allocated_managed_backgrounded_); | 372 &bytes_allocated_managed_nonvisible_); |
373 ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater); | 373 ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater); |
374 } | 374 } |
375 | 375 |
376 void GpuMemoryManager::SetClientStateManagedMemoryStats( | 376 void GpuMemoryManager::SetClientStateManagedMemoryStats( |
377 GpuMemoryManagerClientState* client_state, | 377 GpuMemoryManagerClientState* client_state, |
378 const GpuManagedMemoryStats& stats) | 378 const GpuManagedMemoryStats& stats) |
379 { | 379 { |
380 TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, | 380 TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, |
381 stats.bytes_allocated, | 381 stats.bytes_allocated, |
382 client_state->visible_ ? | 382 client_state->visible_ ? |
383 &bytes_allocated_managed_visible_ : | 383 &bytes_allocated_managed_visible_ : |
384 &bytes_allocated_managed_backgrounded_); | 384 &bytes_allocated_managed_nonvisible_); |
385 client_state->managed_memory_stats_ = stats; | 385 client_state->managed_memory_stats_ = stats; |
386 | 386 |
387 if (use_nonuniform_memory_policy_) { | 387 if (use_nonuniform_memory_policy_) { |
388 // If these statistics sit outside of the range that we used in our | 388 // If these statistics sit outside of the range that we used in our |
389 // computation of memory budgets then recompute the budgets. | 389 // computation of memory allocations then recompute the allocations. |
390 if (client_state->managed_memory_stats_.bytes_nice_to_have > | 390 if (client_state->managed_memory_stats_.bytes_nice_to_have > |
391 client_state->bytes_nice_to_have_limit_high_) { | 391 client_state->bytes_nicetohave_limit_high_) { |
392 ScheduleManage(kScheduleManageNow); | 392 ScheduleManage(kScheduleManageNow); |
393 } else if (client_state->managed_memory_stats_.bytes_nice_to_have < | 393 } else if (client_state->managed_memory_stats_.bytes_nice_to_have < |
394 client_state->bytes_nice_to_have_limit_low_) { | 394 client_state->bytes_nicetohave_limit_low_) { |
395 ScheduleManage(kScheduleManageLater); | 395 ScheduleManage(kScheduleManageLater); |
396 } | 396 } |
397 } else { | 397 } else { |
398 // If this allocation pushed our usage of backgrounded tabs memory over the | 398 // If this allocation pushed our usage of nonvisible clients' memory over |
399 // limit, then schedule a drop of backgrounded memory. | 399 // the limit, then schedule a drop of nonvisible memory. |
400 if (bytes_allocated_managed_backgrounded_ > | 400 if (bytes_allocated_managed_nonvisible_ > |
401 GetCurrentBackgroundedAvailableGpuMemory()) | 401 GetCurrentNonvisibleAvailableGpuMemory()) |
402 ScheduleManage(kScheduleManageLater); | 402 ScheduleManage(kScheduleManageLater); |
403 } | 403 } |
404 } | 404 } |
405 | 405 |
406 GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup( | 406 GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup( |
407 base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) { | 407 base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) { |
408 GpuMemoryTrackingGroup* tracking_group = new GpuMemoryTrackingGroup( | 408 GpuMemoryTrackingGroup* tracking_group = new GpuMemoryTrackingGroup( |
409 pid, memory_tracker, this); | 409 pid, memory_tracker, this); |
410 DCHECK(!tracking_groups_.count(tracking_group->GetMemoryTracker())); | 410 DCHECK(!tracking_groups_.count(tracking_group->GetMemoryTracker())); |
411 tracking_groups_.insert(std::make_pair(tracking_group->GetMemoryTracker(), | 411 tracking_groups_.insert(std::make_pair(tracking_group->GetMemoryTracker(), |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
482 void GpuMemoryManager::Manage() { | 482 void GpuMemoryManager::Manage() { |
483 manage_immediate_scheduled_ = false; | 483 manage_immediate_scheduled_ = false; |
484 delayed_manage_callback_.Cancel(); | 484 delayed_manage_callback_.Cancel(); |
485 | 485 |
486 // Update the amount of GPU memory available on the system. | 486 // Update the amount of GPU memory available on the system. |
487 UpdateAvailableGpuMemory(); | 487 UpdateAvailableGpuMemory(); |
488 | 488 |
489 // Update the limit on unmanaged memory. | 489 // Update the limit on unmanaged memory. |
490 UpdateUnmanagedMemoryLimits(); | 490 UpdateUnmanagedMemoryLimits(); |
491 | 491 |
492 // Update the backgrounded available gpu memory because it depends on | 492 // Update the nonvisible available gpu memory because it depends on |
493 // the available GPU memory. | 493 // the available GPU memory. |
494 UpdateBackgroundedAvailableGpuMemory(); | 494 UpdateNonvisibleAvailableGpuMemory(); |
495 | 495 |
496 // Determine which clients are "hibernated" (which determines the | 496 // Determine which clients are "hibernated" (which determines the |
497 // distribution of frontbuffers and memory among clients that don't have | 497 // distribution of frontbuffers and memory among clients that don't have |
498 // surfaces). | 498 // surfaces). |
499 SetClientsHibernatedState(); | 499 SetClientsHibernatedState(); |
500 | 500 |
501 // Assign memory allocations to clients that have surfaces. | 501 // Assign memory allocations to clients that have surfaces. |
502 if (use_nonuniform_memory_policy_) | 502 if (use_nonuniform_memory_policy_) |
503 AssignSurfacesAllocationsNonuniform(); | 503 AssignSurfacesAllocationsNonuniform(); |
504 else | 504 else |
505 AssignSurfacesAllocationsUniform(); | 505 AssignSurfacesAllocationsUniform(); |
506 | 506 |
507 // Assign memory allocations to clients that don't have surfaces. | 507 // Assign memory allocations to clients that don't have surfaces. |
508 AssignNonSurfacesAllocations(); | 508 AssignNonSurfacesAllocations(); |
509 | 509 |
510 SendUmaStatsToBrowser(); | 510 SendUmaStatsToBrowser(); |
511 } | 511 } |
512 | 512 |
513 void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() { | 513 void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() { |
514 size_t bytes_available_total = GetAvailableGpuMemory(); | 514 size_t bytes_available_total = GetAvailableGpuMemory(); |
515 size_t bytes_allocated_visible = 0; | 515 size_t bytes_allocated_visible = 0; |
516 ClientStateList clients = clients_visible_mru_; | 516 ClientStateList clients = clients_visible_mru_; |
517 clients.insert(clients.end(), | 517 clients.insert(clients.end(), |
518 clients_nonvisible_mru_.begin(), | 518 clients_nonvisible_mru_.begin(), |
519 clients_nonvisible_mru_.end()); | 519 clients_nonvisible_mru_.end()); |
520 | 520 |
521 // Compute budget when visible for all clients | 521 // Compute allocation when visible for all clients |
522 for (ClientStateList::const_iterator it = clients.begin(); | 522 for (ClientStateList::const_iterator it = clients.begin(); |
523 it != clients.end(); | 523 it != clients.end(); |
524 ++it) { | 524 ++it) { |
525 GpuMemoryManagerClientState* client_state = *it; | 525 GpuMemoryManagerClientState* client_state = *it; |
526 GpuManagedMemoryStats* stats = &client_state->managed_memory_stats_; | 526 GpuManagedMemoryStats* stats = &client_state->managed_memory_stats_; |
527 | 527 |
528 // Give the client 4/3 of what it needs to draw everything that is in | 528 // Give the client 4/3 of what it needs to draw everything that is in |
529 // the "nice to have" bucket, so that it has some room to grow. | 529 // the "nice to have" bucket, so that it has some room to grow. |
530 client_state->bytes_budget_when_visible_ = | 530 client_state->bytes_allocation_when_visible_ = |
531 4 * stats->bytes_nice_to_have / 3; | 531 4 * stats->bytes_nice_to_have / 3; |
532 | 532 |
533 // Re-assign memory limits to this client when its "nice to have" bucket | 533 // Re-assign memory limits to this client when its "nice to have" bucket |
534 // grows or shrinks by 1/4. | 534 // grows or shrinks by 1/4. |
535 client_state->bytes_nice_to_have_limit_high_ = | 535 client_state->bytes_nicetohave_limit_high_ = |
536 5 * stats->bytes_nice_to_have / 4; | 536 5 * stats->bytes_nice_to_have / 4; |
537 client_state->bytes_nice_to_have_limit_low_ = | 537 client_state->bytes_nicetohave_limit_low_ = |
538 3 * stats->bytes_nice_to_have / 4; | 538 3 * stats->bytes_nice_to_have / 4; |
539 | 539 |
540 // Clamp to the acceptable range. | 540 // Clamp to the acceptable range. |
541 client_state->bytes_budget_when_visible_ = std::min( | 541 client_state->bytes_allocation_when_visible_ = std::min( |
542 client_state->bytes_budget_when_visible_, | 542 client_state->bytes_allocation_when_visible_, |
543 GetMaximumTabAllocation()); | 543 GetMaximumClientAllocation()); |
544 client_state->bytes_budget_when_visible_ = std::max( | 544 client_state->bytes_allocation_when_visible_ = std::max( |
545 client_state->bytes_budget_when_visible_, | 545 client_state->bytes_allocation_when_visible_, |
546 GetMinimumTabAllocation()); | 546 GetMinimumClientAllocation()); |
547 | 547 |
548 // Compute how much space is used by visible clients. | 548 // Compute how much space is used by visible clients. |
549 if (client_state->visible_) | 549 if (client_state->visible_) |
550 bytes_allocated_visible += client_state->bytes_budget_when_visible_; | 550 bytes_allocated_visible += client_state->bytes_allocation_when_visible_; |
551 } | 551 } |
552 | 552 |
553 // TODO(ccameron): If bytes_allocated_visible exceeds bytes_available_total, | 553 // TODO(ccameron): If bytes_allocated_visible exceeds bytes_available_total, |
554 // then cut down the amount of memory given out. This has to be done | 554 // then cut down the amount of memory given out. This has to be done |
555 // carefully -- we don't want a single heavy tab to cause other light tabs | 555 // carefully -- we don't want a single heavy client to cause other light |
556 // to not display correctly. | 556 // clients to not display correctly. |
557 | 557 |
558 // Allow up to 1/4 of the memory that was available for visible tabs to | 558 // Allow up to 1/4 of the memory that was available for visible clients to |
559 // go to backgrounded tabs. | 559 // go to nonvisible clients. |
560 size_t bytes_available_backgrounded = 0; | 560 size_t bytes_available_nonvisible = 0; |
561 size_t bytes_allocated_backgrounded = 0; | 561 size_t bytes_allocated_nonvisible = 0; |
562 if (bytes_available_total > bytes_allocated_visible) { | 562 if (bytes_available_total > bytes_allocated_visible) { |
563 bytes_available_backgrounded = std::min( | 563 bytes_available_nonvisible = std::min( |
564 bytes_available_total / 4, | 564 bytes_available_total / 4, |
565 bytes_available_total - bytes_allocated_visible); | 565 bytes_available_total - bytes_allocated_visible); |
566 } | 566 } |
567 for (ClientStateList::const_iterator it = clients.begin(); | 567 for (ClientStateList::const_iterator it = clients.begin(); |
568 it != clients.end(); | 568 it != clients.end(); |
569 ++it) { | 569 ++it) { |
570 GpuMemoryManagerClientState* client_state = *it; | 570 GpuMemoryManagerClientState* client_state = *it; |
571 GpuManagedMemoryStats* stats = &client_state->managed_memory_stats_; | 571 GpuManagedMemoryStats* stats = &client_state->managed_memory_stats_; |
572 | 572 |
573 // Compute the amount of space we have for this renderer when it is | 573 // Compute the amount of space we have for this renderer when it is |
574 // backgrounded. | 574 // nonvisible. |
575 size_t bytes_available_backgrounded_adjusted = 0; | 575 size_t bytes_available_nonvisible_adjusted = 0; |
576 if (client_state->visible_) { | 576 if (client_state->visible_) { |
577 // If this is a visible tab, don't count this tab's budget while visible | 577 // If this is a visible client, don't count this client's allocation |
578 // against the backgrounded tabs' budget total. | 578 // while visible against the nonvisible clients' allocation total. |
579 bytes_available_backgrounded_adjusted = std::min( | 579 bytes_available_nonvisible_adjusted = std::min( |
580 bytes_available_backgrounded + | 580 bytes_available_nonvisible + |
581 client_state->bytes_budget_when_visible_ / 4, | 581 client_state->bytes_allocation_when_visible_ / 4, |
582 bytes_available_total / 4); | 582 bytes_available_total / 4); |
583 } else if (bytes_available_backgrounded > bytes_allocated_backgrounded) { | 583 } else if (bytes_available_nonvisible > bytes_allocated_nonvisible) { |
584 // If this is a backgrounded tab, take into account all more recently | 584 // If this is a nonvisible client, take into account all more recently |
585 // used backgrounded tabs. | 585 // used nonvisible clients. |
586 bytes_available_backgrounded_adjusted = | 586 bytes_available_nonvisible_adjusted = |
587 bytes_available_backgrounded - bytes_allocated_backgrounded; | 587 bytes_available_nonvisible - bytes_allocated_nonvisible; |
588 } | 588 } |
589 | 589 |
590 // Give a budget of 9/8ths of the required memory when backgrounded, if it | 590 // Give a allocation of 9/8ths of the required memory when nonvisible, if it |
591 // fits within the limit we just calculated. | 591 // fits within the limit we just calculated. |
592 client_state->bytes_budget_when_backgrounded_ = | 592 client_state->bytes_allocation_when_nonvisible_ = |
593 9 * stats->bytes_required / 8; | 593 9 * stats->bytes_required / 8; |
594 if (client_state->bytes_budget_when_backgrounded_ > | 594 if (client_state->bytes_allocation_when_nonvisible_ > |
595 bytes_available_backgrounded_adjusted) | 595 bytes_available_nonvisible_adjusted) |
596 client_state->bytes_budget_when_backgrounded_ = 0; | 596 client_state->bytes_allocation_when_nonvisible_ = 0; |
597 | 597 |
598 // Update the amount of memory given out to backgrounded tabs. | 598 // Update the amount of memory given out to nonvisible clients. |
599 if (!client_state->visible_) | 599 if (!client_state->visible_) |
600 bytes_allocated_backgrounded += | 600 bytes_allocated_nonvisible += |
601 client_state->bytes_budget_when_backgrounded_; | 601 client_state->bytes_allocation_when_nonvisible_; |
602 } | 602 } |
603 | 603 |
604 // Assign budgets to clients. | 604 // Assign budgets to clients. |
605 for (ClientStateList::const_iterator it = clients.begin(); | 605 for (ClientStateList::const_iterator it = clients.begin(); |
606 it != clients.end(); | 606 it != clients.end(); |
607 ++it) { | 607 ++it) { |
608 GpuMemoryManagerClientState* client_state = *it; | 608 GpuMemoryManagerClientState* client_state = *it; |
609 GpuMemoryAllocation allocation; | 609 GpuMemoryAllocation allocation; |
610 | 610 |
611 allocation.browser_allocation.suggest_have_frontbuffer = | 611 allocation.browser_allocation.suggest_have_frontbuffer = |
612 !client_state->hibernated_; | 612 !client_state->hibernated_; |
613 | 613 |
614 allocation.renderer_allocation.bytes_limit_when_visible = | 614 allocation.renderer_allocation.bytes_limit_when_visible = |
615 client_state->bytes_budget_when_visible_; | 615 client_state->bytes_allocation_when_visible_; |
616 allocation.renderer_allocation.priority_cutoff_when_visible = | 616 allocation.renderer_allocation.priority_cutoff_when_visible = |
617 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything; | 617 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything; |
618 | 618 |
619 allocation.renderer_allocation.bytes_limit_when_not_visible = | 619 allocation.renderer_allocation.bytes_limit_when_not_visible = |
620 client_state->bytes_budget_when_backgrounded_; | 620 client_state->bytes_allocation_when_nonvisible_; |
621 allocation.renderer_allocation.priority_cutoff_when_not_visible = | 621 allocation.renderer_allocation.priority_cutoff_when_not_visible = |
622 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired; | 622 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired; |
623 | 623 |
624 client_state->client_->SetMemoryAllocation(allocation); | 624 client_state->client_->SetMemoryAllocation(allocation); |
625 } | 625 } |
626 } | 626 } |
627 | 627 |
628 void GpuMemoryManager::AssignSurfacesAllocationsUniform() { | 628 void GpuMemoryManager::AssignSurfacesAllocationsUniform() { |
629 // Determine how much memory to assign to give to visible and backgrounded | 629 // Determine how much memory to assign to give to visible and nonvisible |
630 // clients. | 630 // clients. |
631 size_t bytes_limit_when_visible = GetVisibleClientAllocation(); | 631 size_t bytes_limit_when_visible = GetVisibleClientAllocation(); |
632 | 632 |
633 // Experiment to determine if aggressively discarding tiles on OS X | 633 // Experiment to determine if aggressively discarding tiles on OS X |
634 // results in greater stability. | 634 // results in greater stability. |
635 #if defined(OS_MACOSX) | 635 #if defined(OS_MACOSX) |
636 GpuMemoryAllocationForRenderer::PriorityCutoff priority_cutoff_when_visible = | 636 GpuMemoryAllocationForRenderer::PriorityCutoff priority_cutoff_when_visible = |
637 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNiceToHave; | 637 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNiceToHave; |
638 #else | 638 #else |
639 GpuMemoryAllocationForRenderer::PriorityCutoff priority_cutoff_when_visible = | 639 GpuMemoryAllocationForRenderer::PriorityCutoff priority_cutoff_when_visible = |
640 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything; | 640 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything; |
641 #endif | 641 #endif |
642 | 642 |
643 // Assign memory allocations to visible clients. | 643 // Assign memory allocations to visible clients. |
644 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | 644 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); |
645 it != clients_visible_mru_.end(); | 645 it != clients_visible_mru_.end(); |
646 ++it) { | 646 ++it) { |
647 GpuMemoryManagerClientState* client_state = *it; | 647 GpuMemoryManagerClientState* client_state = *it; |
648 GpuMemoryAllocation allocation; | 648 GpuMemoryAllocation allocation; |
649 | 649 |
650 allocation.browser_allocation.suggest_have_frontbuffer = true; | 650 allocation.browser_allocation.suggest_have_frontbuffer = true; |
651 allocation.renderer_allocation.bytes_limit_when_visible = | 651 allocation.renderer_allocation.bytes_limit_when_visible = |
652 bytes_limit_when_visible; | 652 bytes_limit_when_visible; |
653 allocation.renderer_allocation.priority_cutoff_when_visible = | 653 allocation.renderer_allocation.priority_cutoff_when_visible = |
654 priority_cutoff_when_visible; | 654 priority_cutoff_when_visible; |
655 | 655 |
656 // Allow this client to keep its textures when backgrounded if they | 656 // Allow this client to keep its textures when nonvisible if they |
657 // aren't so expensive that they won't fit. | 657 // aren't so expensive that they won't fit. |
658 if (client_state->managed_memory_stats_.bytes_required <= | 658 if (client_state->managed_memory_stats_.bytes_required <= |
659 bytes_backgrounded_available_gpu_memory_) { | 659 bytes_nonvisible_available_gpu_memory_) { |
660 allocation.renderer_allocation.bytes_limit_when_not_visible = | 660 allocation.renderer_allocation.bytes_limit_when_not_visible = |
661 GetCurrentBackgroundedAvailableGpuMemory(); | 661 GetCurrentNonvisibleAvailableGpuMemory(); |
662 allocation.renderer_allocation.priority_cutoff_when_not_visible = | 662 allocation.renderer_allocation.priority_cutoff_when_not_visible = |
663 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired; | 663 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired; |
664 } else { | 664 } else { |
665 allocation.renderer_allocation.bytes_limit_when_not_visible = 0; | 665 allocation.renderer_allocation.bytes_limit_when_not_visible = 0; |
666 allocation.renderer_allocation.priority_cutoff_when_not_visible = | 666 allocation.renderer_allocation.priority_cutoff_when_not_visible = |
667 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNothing; | 667 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNothing; |
668 } | 668 } |
669 | 669 |
670 client_state->client_->SetMemoryAllocation(allocation); | 670 client_state->client_->SetMemoryAllocation(allocation); |
671 } | 671 } |
672 | 672 |
673 // Assign memory allocations to backgrounded clients. | 673 // Assign memory allocations to nonvisible clients. |
674 size_t bytes_allocated_backgrounded = 0; | 674 size_t bytes_allocated_nonvisible = 0; |
675 for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin(); | 675 for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin(); |
676 it != clients_nonvisible_mru_.end(); | 676 it != clients_nonvisible_mru_.end(); |
677 ++it) { | 677 ++it) { |
678 GpuMemoryManagerClientState* client_state = *it; | 678 GpuMemoryManagerClientState* client_state = *it; |
679 GpuMemoryAllocation allocation; | 679 GpuMemoryAllocation allocation; |
680 | 680 |
681 allocation.browser_allocation.suggest_have_frontbuffer = | 681 allocation.browser_allocation.suggest_have_frontbuffer = |
682 !client_state->hibernated_; | 682 !client_state->hibernated_; |
683 allocation.renderer_allocation.bytes_limit_when_visible = | 683 allocation.renderer_allocation.bytes_limit_when_visible = |
684 bytes_limit_when_visible; | 684 bytes_limit_when_visible; |
685 allocation.renderer_allocation.priority_cutoff_when_visible = | 685 allocation.renderer_allocation.priority_cutoff_when_visible = |
686 priority_cutoff_when_visible; | 686 priority_cutoff_when_visible; |
687 | 687 |
688 if (client_state->managed_memory_stats_.bytes_required + | 688 if (client_state->managed_memory_stats_.bytes_required + |
689 bytes_allocated_backgrounded <= | 689 bytes_allocated_nonvisible <= |
690 GetCurrentBackgroundedAvailableGpuMemory()) { | 690 GetCurrentNonvisibleAvailableGpuMemory()) { |
691 bytes_allocated_backgrounded += | 691 bytes_allocated_nonvisible += |
692 client_state->managed_memory_stats_.bytes_required; | 692 client_state->managed_memory_stats_.bytes_required; |
693 allocation.renderer_allocation.bytes_limit_when_not_visible = | 693 allocation.renderer_allocation.bytes_limit_when_not_visible = |
694 GetCurrentBackgroundedAvailableGpuMemory(); | 694 GetCurrentNonvisibleAvailableGpuMemory(); |
695 allocation.renderer_allocation.priority_cutoff_when_not_visible = | 695 allocation.renderer_allocation.priority_cutoff_when_not_visible = |
696 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired; | 696 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired; |
697 } else { | 697 } else { |
698 allocation.renderer_allocation.bytes_limit_when_not_visible = 0; | 698 allocation.renderer_allocation.bytes_limit_when_not_visible = 0; |
699 allocation.renderer_allocation.priority_cutoff_when_not_visible = | 699 allocation.renderer_allocation.priority_cutoff_when_not_visible = |
700 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNothing; | 700 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNothing; |
701 } | 701 } |
702 | 702 |
703 client_state->client_->SetMemoryAllocation(allocation); | 703 client_state->client_->SetMemoryAllocation(allocation); |
704 } | 704 } |
705 } | 705 } |
706 | 706 |
707 void GpuMemoryManager::AssignNonSurfacesAllocations() { | 707 void GpuMemoryManager::AssignNonSurfacesAllocations() { |
708 for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); | 708 for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); |
709 it != clients_nonsurface_.end(); | 709 it != clients_nonsurface_.end(); |
710 ++it) { | 710 ++it) { |
711 GpuMemoryManagerClientState* client_state = *it; | 711 GpuMemoryManagerClientState* client_state = *it; |
712 GpuMemoryAllocation allocation; | 712 GpuMemoryAllocation allocation; |
713 | 713 |
714 if (!client_state->hibernated_) { | 714 if (!client_state->hibernated_) { |
715 allocation.renderer_allocation.bytes_limit_when_visible = | 715 allocation.renderer_allocation.bytes_limit_when_visible = |
716 GetMinimumTabAllocation(); | 716 GetMinimumClientAllocation(); |
717 allocation.renderer_allocation.priority_cutoff_when_visible = | 717 allocation.renderer_allocation.priority_cutoff_when_visible = |
718 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything; | 718 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything; |
719 } | 719 } |
720 | 720 |
721 client_state->client_->SetMemoryAllocation(allocation); | 721 client_state->client_->SetMemoryAllocation(allocation); |
722 } | 722 } |
723 } | 723 } |
724 | 724 |
725 void GpuMemoryManager::SetClientsHibernatedState() const { | 725 void GpuMemoryManager::SetClientsHibernatedState() const { |
726 // Re-set all tracking groups as being hibernated. | 726 // Re-set all tracking groups as being hibernated. |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
773 ++it) { | 773 ++it) { |
774 GpuMemoryManagerClientState* client_state = *it; | 774 GpuMemoryManagerClientState* client_state = *it; |
775 if (!client_state->hibernated_) | 775 if (!client_state->hibernated_) |
776 clients_without_surface_not_hibernated_count++; | 776 clients_without_surface_not_hibernated_count++; |
777 } | 777 } |
778 | 778 |
779 // Calculate bonus allocation by splitting remainder of global limit equally | 779 // Calculate bonus allocation by splitting remainder of global limit equally |
780 // after giving out the minimum to those that need it. | 780 // after giving out the minimum to those that need it. |
781 size_t num_clients_need_mem = clients_with_surface_visible_count + | 781 size_t num_clients_need_mem = clients_with_surface_visible_count + |
782 clients_without_surface_not_hibernated_count; | 782 clients_without_surface_not_hibernated_count; |
783 size_t base_allocation_size = GetMinimumTabAllocation() * | 783 size_t base_allocation_size = GetMinimumClientAllocation() * |
784 num_clients_need_mem; | 784 num_clients_need_mem; |
785 size_t bonus_allocation = 0; | 785 size_t bonus_allocation = 0; |
786 if (base_allocation_size < GetAvailableGpuMemory() && | 786 if (base_allocation_size < GetAvailableGpuMemory() && |
787 clients_with_surface_visible_count) | 787 clients_with_surface_visible_count) |
788 bonus_allocation = (GetAvailableGpuMemory() - base_allocation_size) / | 788 bonus_allocation = (GetAvailableGpuMemory() - base_allocation_size) / |
789 clients_with_surface_visible_count; | 789 clients_with_surface_visible_count; |
790 size_t clients_allocation_when_visible = GetMinimumTabAllocation() + | 790 size_t clients_allocation_when_visible = GetMinimumClientAllocation() + |
791 bonus_allocation; | 791 bonus_allocation; |
792 | 792 |
793 // If we have received a window count message, then override the client-based | 793 // If we have received a window count message, then override the client-based |
794 // scheme with a per-window scheme | 794 // scheme with a per-window scheme |
795 if (window_count_has_been_received_) { | 795 if (window_count_has_been_received_) { |
796 clients_allocation_when_visible = std::max( | 796 clients_allocation_when_visible = std::max( |
797 clients_allocation_when_visible, | 797 clients_allocation_when_visible, |
798 GetAvailableGpuMemory() / std::max(window_count_, 1u)); | 798 GetAvailableGpuMemory() / std::max(window_count_, 1u)); |
799 } | 799 } |
800 | 800 |
801 // Limit the memory per client to its maximum allowed level. | 801 // Limit the memory per client to its maximum allowed level. |
802 if (clients_allocation_when_visible >= GetMaximumTabAllocation()) | 802 if (clients_allocation_when_visible >= GetMaximumClientAllocation()) |
803 clients_allocation_when_visible = GetMaximumTabAllocation(); | 803 clients_allocation_when_visible = GetMaximumClientAllocation(); |
804 | 804 |
805 return clients_allocation_when_visible; | 805 return clients_allocation_when_visible; |
806 } | 806 } |
807 | 807 |
808 void GpuMemoryManager::SendUmaStatsToBrowser() { | 808 void GpuMemoryManager::SendUmaStatsToBrowser() { |
809 if (!channel_manager_) | 809 if (!channel_manager_) |
810 return; | 810 return; |
811 GPUMemoryUmaStats params; | 811 GPUMemoryUmaStats params; |
812 params.bytes_allocated_current = GetCurrentUsage(); | 812 params.bytes_allocated_current = GetCurrentUsage(); |
813 params.bytes_allocated_max = bytes_allocated_historical_max_; | 813 params.bytes_allocated_max = bytes_allocated_historical_max_; |
(...skipping 24 matching lines...) Expand all Loading... |
838 | 838 |
839 void GpuMemoryManager::RemoveClientFromList( | 839 void GpuMemoryManager::RemoveClientFromList( |
840 GpuMemoryManagerClientState* client_state) { | 840 GpuMemoryManagerClientState* client_state) { |
841 DCHECK(client_state->list_iterator_valid_); | 841 DCHECK(client_state->list_iterator_valid_); |
842 ClientStateList* client_list = GetClientList(client_state); | 842 ClientStateList* client_list = GetClientList(client_state); |
843 client_list->erase(client_state->list_iterator_); | 843 client_list->erase(client_state->list_iterator_); |
844 client_state->list_iterator_valid_ = false; | 844 client_state->list_iterator_valid_ = false; |
845 } | 845 } |
846 | 846 |
847 } // namespace content | 847 } // namespace content |
OLD | NEW |