Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(210)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 12475002: Delete memory manager dead code. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Resolve against CrOS changes Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/command_line.h" 10 #include "base/command_line.h"
(...skipping 30 matching lines...) Expand all
41 T RoundDown(T n, T mul) { 41 T RoundDown(T n, T mul) {
42 return (n / mul) * mul; 42 return (n / mul) * mul;
43 } 43 }
44 44
45 } 45 }
46 46
47 GpuMemoryManager::GpuMemoryManager( 47 GpuMemoryManager::GpuMemoryManager(
48 GpuChannelManager* channel_manager, 48 GpuChannelManager* channel_manager,
49 uint64 max_surfaces_with_frontbuffer_soft_limit) 49 uint64 max_surfaces_with_frontbuffer_soft_limit)
50 : channel_manager_(channel_manager), 50 : channel_manager_(channel_manager),
51 use_nonuniform_memory_policy_(true),
52 manage_immediate_scheduled_(false), 51 manage_immediate_scheduled_(false),
53 max_surfaces_with_frontbuffer_soft_limit_( 52 max_surfaces_with_frontbuffer_soft_limit_(
54 max_surfaces_with_frontbuffer_soft_limit), 53 max_surfaces_with_frontbuffer_soft_limit),
55 bytes_available_gpu_memory_(0), 54 bytes_available_gpu_memory_(0),
56 bytes_available_gpu_memory_overridden_(false), 55 bytes_available_gpu_memory_overridden_(false),
57 bytes_minimum_per_client_(0), 56 bytes_minimum_per_client_(0),
58 bytes_default_per_client_(0), 57 bytes_default_per_client_(0),
59 bytes_nonvisible_available_gpu_memory_(0),
60 bytes_allocated_managed_current_(0), 58 bytes_allocated_managed_current_(0),
61 bytes_allocated_managed_visible_(0), 59 bytes_allocated_managed_visible_(0),
62 bytes_allocated_managed_nonvisible_(0), 60 bytes_allocated_managed_nonvisible_(0),
63 bytes_allocated_unmanaged_current_(0), 61 bytes_allocated_unmanaged_current_(0),
64 bytes_allocated_historical_max_(0), 62 bytes_allocated_historical_max_(0),
65 bytes_allocated_unmanaged_high_(0), 63 bytes_allocated_unmanaged_high_(0),
66 bytes_allocated_unmanaged_low_(0), 64 bytes_allocated_unmanaged_low_(0),
67 bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep), 65 bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep),
68 window_count_has_been_received_(false),
69 window_count_(0),
70 disable_schedule_manage_(false) 66 disable_schedule_manage_(false)
71 { 67 {
72 CommandLine* command_line = CommandLine::ForCurrentProcess(); 68 CommandLine* command_line = CommandLine::ForCurrentProcess();
73 69
74 #if defined(OS_ANDROID) 70 #if defined(OS_ANDROID)
75 bytes_default_per_client_ = 32 * 1024 * 1024; 71 bytes_default_per_client_ = 32 * 1024 * 1024;
76 bytes_minimum_per_client_ = 32 * 1024 * 1024; 72 bytes_minimum_per_client_ = 32 * 1024 * 1024;
77 #else 73 #else
78 bytes_default_per_client_ = 64 * 1024 * 1024; 74 bytes_default_per_client_ = 64 * 1024 * 1024;
79 bytes_minimum_per_client_ = 64 * 1024 * 1024; 75 bytes_minimum_per_client_ = 64 * 1024 * 1024;
80 #endif 76 #endif
81 77
82 if (command_line->HasSwitch(switches::kDisableNonuniformGpuMemPolicy))
83 use_nonuniform_memory_policy_ = false;
84
85 if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { 78 if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) {
86 base::StringToUint64( 79 base::StringToUint64(
87 command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), 80 command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb),
88 &bytes_available_gpu_memory_); 81 &bytes_available_gpu_memory_);
89 bytes_available_gpu_memory_ *= 1024 * 1024; 82 bytes_available_gpu_memory_ *= 1024 * 1024;
90 bytes_available_gpu_memory_overridden_ = true; 83 bytes_available_gpu_memory_overridden_ = true;
91 } else 84 } else
92 bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory(); 85 bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory();
93
94 UpdateNonvisibleAvailableGpuMemory();
95 } 86 }
96 87
97 GpuMemoryManager::~GpuMemoryManager() { 88 GpuMemoryManager::~GpuMemoryManager() {
98 DCHECK(tracking_groups_.empty()); 89 DCHECK(tracking_groups_.empty());
99 DCHECK(clients_visible_mru_.empty()); 90 DCHECK(clients_visible_mru_.empty());
100 DCHECK(clients_nonvisible_mru_.empty()); 91 DCHECK(clients_nonvisible_mru_.empty());
101 DCHECK(clients_nonsurface_.empty()); 92 DCHECK(clients_nonsurface_.empty());
102 DCHECK(!bytes_allocated_managed_current_); 93 DCHECK(!bytes_allocated_managed_current_);
103 DCHECK(!bytes_allocated_unmanaged_current_); 94 DCHECK(!bytes_allocated_unmanaged_current_);
104 DCHECK(!bytes_allocated_managed_visible_); 95 DCHECK(!bytes_allocated_managed_visible_);
105 DCHECK(!bytes_allocated_managed_nonvisible_); 96 DCHECK(!bytes_allocated_managed_nonvisible_);
106 } 97 }
107 98
108 uint64 GpuMemoryManager::GetAvailableGpuMemory() const { 99 uint64 GpuMemoryManager::GetAvailableGpuMemory() const {
109 // Allow unmanaged allocations to over-subscribe by at most (high_ - low_) 100 // Allow unmanaged allocations to over-subscribe by at most (high_ - low_)
110 // before restricting managed (compositor) memory based on unmanaged usage. 101 // before restricting managed (compositor) memory based on unmanaged usage.
111 if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_) 102 if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_)
112 return 0; 103 return 0;
113 return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_; 104 return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_;
114 } 105 }
115 106
116 uint64 GpuMemoryManager::GetCurrentNonvisibleAvailableGpuMemory() const {
117 if (bytes_allocated_managed_visible_ < GetAvailableGpuMemory()) {
118 return std::min(bytes_nonvisible_available_gpu_memory_,
119 GetAvailableGpuMemory() - bytes_allocated_managed_visible_);
120 }
121 return 0;
122 }
123
124 uint64 GpuMemoryManager::GetDefaultAvailableGpuMemory() const { 107 uint64 GpuMemoryManager::GetDefaultAvailableGpuMemory() const {
125 #if defined(OS_ANDROID) 108 #if defined(OS_ANDROID)
126 return 32 * 1024 * 1024; 109 return 32 * 1024 * 1024;
127 #elif defined(OS_CHROMEOS) 110 #elif defined(OS_CHROMEOS)
128 return 1024 * 1024 * 1024; 111 return 1024 * 1024 * 1024;
129 #else 112 #else
130 return 256 * 1024 * 1024; 113 return 256 * 1024 * 1024;
131 #endif 114 #endif
132 } 115 }
133 116
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
210 // of the intervals rounded down and up to the nearest step_, to avoid 193 // of the intervals rounded down and up to the nearest step_, to avoid
211 // thrashing the interval. 194 // thrashing the interval.
212 bytes_allocated_unmanaged_high_ = RoundUp( 195 bytes_allocated_unmanaged_high_ = RoundUp(
213 bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4, 196 bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4,
214 bytes_unmanaged_limit_step_); 197 bytes_unmanaged_limit_step_);
215 bytes_allocated_unmanaged_low_ = RoundDown( 198 bytes_allocated_unmanaged_low_ = RoundDown(
216 bytes_allocated_unmanaged_current_, 199 bytes_allocated_unmanaged_current_,
217 bytes_unmanaged_limit_step_); 200 bytes_unmanaged_limit_step_);
218 } 201 }
219 202
220 void GpuMemoryManager::UpdateNonvisibleAvailableGpuMemory() {
221 // Be conservative and disable saving nonvisible clients' textures on Android
222 // for the moment
223 #if defined(OS_ANDROID)
224 bytes_nonvisible_available_gpu_memory_ = 0;
225 #else
226 bytes_nonvisible_available_gpu_memory_ = GetAvailableGpuMemory() / 4;
227 #endif
228 }
229
230 void GpuMemoryManager::ScheduleManage( 203 void GpuMemoryManager::ScheduleManage(
231 ScheduleManageTime schedule_manage_time) { 204 ScheduleManageTime schedule_manage_time) {
232 if (disable_schedule_manage_) 205 if (disable_schedule_manage_)
233 return; 206 return;
234 if (manage_immediate_scheduled_) 207 if (manage_immediate_scheduled_)
235 return; 208 return;
236 if (schedule_manage_time == kScheduleManageNow) { 209 if (schedule_manage_time == kScheduleManageNow) {
237 MessageLoop::current()->PostTask( 210 MessageLoop::current()->PostTask(
238 FROM_HERE, 211 FROM_HERE,
239 base::Bind(&GpuMemoryManager::Manage, AsWeakPtr())); 212 base::Bind(&GpuMemoryManager::Manage, AsWeakPtr()));
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
360 client_state->managed_memory_stats_ = stats; 333 client_state->managed_memory_stats_ = stats;
361 334
362 // If this is the first time that stats have been received for this 335 // If this is the first time that stats have been received for this
363 // client, use them immediately. 336 // client, use them immediately.
364 if (!client_state->managed_memory_stats_received_) { 337 if (!client_state->managed_memory_stats_received_) {
365 client_state->managed_memory_stats_received_ = true; 338 client_state->managed_memory_stats_received_ = true;
366 ScheduleManage(kScheduleManageNow); 339 ScheduleManage(kScheduleManageNow);
367 return; 340 return;
368 } 341 }
369 342
370 if (use_nonuniform_memory_policy_) { 343 // If these statistics sit outside of the range that we used in our
371 // If these statistics sit outside of the range that we used in our 344 // computation of memory allocations then recompute the allocations.
372 // computation of memory allocations then recompute the allocations. 345 if (client_state->managed_memory_stats_.bytes_nice_to_have >
373 if (client_state->managed_memory_stats_.bytes_nice_to_have > 346 client_state->bytes_nicetohave_limit_high_) {
374 client_state->bytes_nicetohave_limit_high_) { 347 ScheduleManage(kScheduleManageNow);
375 ScheduleManage(kScheduleManageNow); 348 } else if (client_state->managed_memory_stats_.bytes_nice_to_have <
376 } else if (client_state->managed_memory_stats_.bytes_nice_to_have < 349 client_state->bytes_nicetohave_limit_low_) {
377 client_state->bytes_nicetohave_limit_low_) { 350 ScheduleManage(kScheduleManageLater);
378 ScheduleManage(kScheduleManageLater);
379 }
380 } else {
381 // If this allocation pushed our usage of nonvisible clients' memory over
382 // the limit, then schedule a drop of nonvisible memory.
383 if (bytes_allocated_managed_nonvisible_ >
384 GetCurrentNonvisibleAvailableGpuMemory())
385 ScheduleManage(kScheduleManageLater);
386 } 351 }
387 } 352 }
388 353
389 GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup( 354 GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup(
390 base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) { 355 base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) {
391 GpuMemoryTrackingGroup* tracking_group = new GpuMemoryTrackingGroup( 356 GpuMemoryTrackingGroup* tracking_group = new GpuMemoryTrackingGroup(
392 pid, memory_tracker, this); 357 pid, memory_tracker, this);
393 DCHECK(!tracking_groups_.count(tracking_group->GetMemoryTracker())); 358 DCHECK(!tracking_groups_.count(tracking_group->GetMemoryTracker()));
394 tracking_groups_.insert(std::make_pair(tracking_group->GetMemoryTracker(), 359 tracking_groups_.insert(std::make_pair(tracking_group->GetMemoryTracker(),
395 tracking_group)); 360 tracking_group));
(...skipping 21 matching lines...) Expand all
417 video_memory_usage_stats->process_map[ 382 video_memory_usage_stats->process_map[
418 base::GetCurrentProcId()].video_memory = GetCurrentUsage(); 383 base::GetCurrentProcId()].video_memory = GetCurrentUsage();
419 video_memory_usage_stats->process_map[ 384 video_memory_usage_stats->process_map[
420 base::GetCurrentProcId()].has_duplicates = true; 385 base::GetCurrentProcId()].has_duplicates = true;
421 386
422 video_memory_usage_stats->bytes_allocated = GetCurrentUsage(); 387 video_memory_usage_stats->bytes_allocated = GetCurrentUsage();
423 video_memory_usage_stats->bytes_allocated_historical_max = 388 video_memory_usage_stats->bytes_allocated_historical_max =
424 bytes_allocated_historical_max_; 389 bytes_allocated_historical_max_;
425 } 390 }
426 391
427 void GpuMemoryManager::SetWindowCount(uint32 window_count) {
428 bool should_schedule_manage = !window_count_has_been_received_ ||
429 (window_count != window_count_);
430 window_count_has_been_received_ = true;
431 window_count_ = window_count;
432 if (should_schedule_manage)
433 ScheduleManage(kScheduleManageNow);
434 }
435
436 // The current Manage algorithm simply classifies contexts (clients) into
437 // "foreground", "background", or "hibernated" categories.
438 // For each of these three categories, there are predefined memory allocation
439 // limits and front/backbuffer states.
440 //
441 // Users may or may not have a surfaces, and the rules are different for each.
442 //
443 // The rules for categorizing contexts with a surface are:
444 // 1. Foreground: All visible surfaces.
445 // * Must have both front and back buffer.
446 //
447 // 2. Background: Non visible surfaces, which have not surpassed the
448 // max_surfaces_with_frontbuffer_soft_limit_ limit.
449 // * Will have only a frontbuffer.
450 //
451 // 3. Hibernated: Non visible surfaces, which have surpassed the
452 // max_surfaces_with_frontbuffer_soft_limit_ limit.
453 // * Will not have either buffer.
454 //
455 // The considerations for categorizing contexts without a surface are:
456 // 1. These contexts do not track {visibility,last_used_time}, so cannot
457 // sort them directly.
458 // 2. These contexts may be used by, and thus affect, other contexts, and so
459 // cannot be less visible than any affected context.
460 // 3. Contexts belong to share groups within which resources can be shared.
461 //
462 // As such, the rule for categorizing contexts without a surface is:
463 // 1. Find the most visible context-with-a-surface within each
464 // context-without-a-surface's share group, and inherit its visibilty.
465 void GpuMemoryManager::Manage() { 392 void GpuMemoryManager::Manage() {
466 manage_immediate_scheduled_ = false; 393 manage_immediate_scheduled_ = false;
467 delayed_manage_callback_.Cancel(); 394 delayed_manage_callback_.Cancel();
468 395
469 // Update the amount of GPU memory available on the system. 396 // Update the amount of GPU memory available on the system.
470 UpdateAvailableGpuMemory(); 397 UpdateAvailableGpuMemory();
471 398
472 // Update the limit on unmanaged memory. 399 // Update the limit on unmanaged memory.
473 UpdateUnmanagedMemoryLimits(); 400 UpdateUnmanagedMemoryLimits();
474 401
475 // Update the nonvisible available gpu memory because it depends on
476 // the available GPU memory.
477 UpdateNonvisibleAvailableGpuMemory();
478
479 // Determine which clients are "hibernated" (which determines the 402 // Determine which clients are "hibernated" (which determines the
480 // distribution of frontbuffers and memory among clients that don't have 403 // distribution of frontbuffers and memory among clients that don't have
481 // surfaces). 404 // surfaces).
482 SetClientsHibernatedState(); 405 SetClientsHibernatedState();
483 406
484 // Assign memory allocations to clients that have surfaces. 407 // Assign memory allocations to clients that have surfaces.
485 if (use_nonuniform_memory_policy_) 408 AssignSurfacesAllocations();
486 AssignSurfacesAllocationsNonuniform();
487 else
488 AssignSurfacesAllocationsUniform();
489 409
490 // Assign memory allocations to clients that don't have surfaces. 410 // Assign memory allocations to clients that don't have surfaces.
491 AssignNonSurfacesAllocations(); 411 AssignNonSurfacesAllocations();
492 412
493 SendUmaStatsToBrowser(); 413 SendUmaStatsToBrowser();
494 } 414 }
495 415
496 // static 416 // static
497 uint64 GpuMemoryManager::ComputeCap( 417 uint64 GpuMemoryManager::ComputeCap(
498 std::vector<uint64> bytes, uint64 bytes_sum_limit) 418 std::vector<uint64> bytes, uint64 bytes_sum_limit)
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
566 486
567 uint64 GpuMemoryManager::ComputeClientAllocationWhenNonvisible( 487 uint64 GpuMemoryManager::ComputeClientAllocationWhenNonvisible(
568 GpuMemoryManagerClientState* client_state) { 488 GpuMemoryManagerClientState* client_state) {
569 489
570 if (!client_state->managed_memory_stats_received_) 490 if (!client_state->managed_memory_stats_received_)
571 return 0; 491 return 0;
572 492
573 return 9 * client_state->managed_memory_stats_.bytes_required / 8; 493 return 9 * client_state->managed_memory_stats_.bytes_required / 8;
574 } 494 }
575 495
576 void GpuMemoryManager::ComputeVisibleSurfacesAllocationsNonuniform() { 496 void GpuMemoryManager::ComputeVisibleSurfacesAllocations() {
577 uint64 bytes_available_total = GetAvailableGpuMemory(); 497 uint64 bytes_available_total = GetAvailableGpuMemory();
578 uint64 bytes_above_required_cap = std::numeric_limits<uint64>::max(); 498 uint64 bytes_above_required_cap = std::numeric_limits<uint64>::max();
579 uint64 bytes_above_minimum_cap = std::numeric_limits<uint64>::max(); 499 uint64 bytes_above_minimum_cap = std::numeric_limits<uint64>::max();
580 uint64 bytes_overall_cap_visible = GetMaximumClientAllocation(); 500 uint64 bytes_overall_cap_visible = GetMaximumClientAllocation();
581 501
582 // Compute memory usage at three levels 502 // Compute memory usage at three levels
583 // - painting everything that is nicetohave for visible clients 503 // - painting everything that is nicetohave for visible clients
584 // - painting only what that is visible 504 // - painting only what that is visible
585 // - giving every client the minimum allocation 505 // - giving every client the minimum allocation
586 uint64 bytes_nicetohave_visible = 0; 506 uint64 bytes_nicetohave_visible = 0;
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
687 GpuMemoryManagerClientState* client_state = *it; 607 GpuMemoryManagerClientState* client_state = *it;
688 client_state->bytes_allocation_when_visible_ = 608 client_state->bytes_allocation_when_visible_ =
689 ComputeClientAllocationWhenVisible( 609 ComputeClientAllocationWhenVisible(
690 client_state, 610 client_state,
691 bytes_above_required_cap, 611 bytes_above_required_cap,
692 bytes_above_minimum_cap, 612 bytes_above_minimum_cap,
693 bytes_overall_cap_nonvisible); 613 bytes_overall_cap_nonvisible);
694 } 614 }
695 } 615 }
696 616
697 void GpuMemoryManager::ComputeNonvisibleSurfacesAllocationsNonuniform() { 617 void GpuMemoryManager::ComputeNonvisibleSurfacesAllocations() {
698 uint64 bytes_allocated_visible = 0; 618 uint64 bytes_allocated_visible = 0;
699 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); 619 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
700 it != clients_visible_mru_.end(); 620 it != clients_visible_mru_.end();
701 ++it) { 621 ++it) {
702 GpuMemoryManagerClientState* client_state = *it; 622 GpuMemoryManagerClientState* client_state = *it;
703 bytes_allocated_visible += client_state->bytes_allocation_when_visible_; 623 bytes_allocated_visible += client_state->bytes_allocation_when_visible_;
704 } 624 }
705 625
706 // Allow up to 1/4 of the memory that was available for visible clients to 626 // Allow up to 1/4 of the memory that was available for visible clients to
707 // go to nonvisible clients. 627 // go to nonvisible clients.
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
805 it != clients_visible_mru_.end(); 725 it != clients_visible_mru_.end();
806 ++it) { 726 ++it) {
807 GpuMemoryManagerClientState* client_state = *it; 727 GpuMemoryManagerClientState* client_state = *it;
808 uint64 bytes_extra = GetMaximumClientAllocation() - 728 uint64 bytes_extra = GetMaximumClientAllocation() -
809 client_state->bytes_allocation_when_visible_; 729 client_state->bytes_allocation_when_visible_;
810 client_state->bytes_allocation_when_visible_ += std::min( 730 client_state->bytes_allocation_when_visible_ += std::min(
811 bytes_extra, bytes_extra_cap); 731 bytes_extra, bytes_extra_cap);
812 } 732 }
813 } 733 }
814 734
815 void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() { 735 void GpuMemoryManager::AssignSurfacesAllocations() {
816 // Compute allocation when for all clients. 736 // Compute allocation when for all clients.
817 ComputeVisibleSurfacesAllocationsNonuniform(); 737 ComputeVisibleSurfacesAllocations();
818 ComputeNonvisibleSurfacesAllocationsNonuniform(); 738 ComputeNonvisibleSurfacesAllocations();
819 739
820 // Distribute the remaining memory to visible clients. 740 // Distribute the remaining memory to visible clients.
821 DistributeRemainingMemoryToVisibleSurfaces(); 741 DistributeRemainingMemoryToVisibleSurfaces();
822 742
823 // Send that allocation to the clients. 743 // Send that allocation to the clients.
824 ClientStateList clients = clients_visible_mru_; 744 ClientStateList clients = clients_visible_mru_;
825 clients.insert(clients.end(), 745 clients.insert(clients.end(),
826 clients_nonvisible_mru_.begin(), 746 clients_nonvisible_mru_.begin(),
827 clients_nonvisible_mru_.end()); 747 clients_nonvisible_mru_.end());
828 for (ClientStateList::const_iterator it = clients.begin(); 748 for (ClientStateList::const_iterator it = clients.begin();
(...skipping 25 matching lines...) Expand all
854 774
855 allocation.renderer_allocation.bytes_limit_when_not_visible = 775 allocation.renderer_allocation.bytes_limit_when_not_visible =
856 client_state->bytes_allocation_when_nonvisible_; 776 client_state->bytes_allocation_when_nonvisible_;
857 allocation.renderer_allocation.priority_cutoff_when_not_visible = 777 allocation.renderer_allocation.priority_cutoff_when_not_visible =
858 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired; 778 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired;
859 779
860 client_state->client_->SetMemoryAllocation(allocation); 780 client_state->client_->SetMemoryAllocation(allocation);
861 } 781 }
862 } 782 }
863 783
864 void GpuMemoryManager::AssignSurfacesAllocationsUniform() {
865 // Determine how much memory to assign to give to visible and nonvisible
866 // clients.
867 uint64 bytes_limit_when_visible = GetVisibleClientAllocation();
868
869 // Experiment to determine if aggressively discarding tiles on OS X
870 // results in greater stability.
871 #if defined(OS_MACOSX)
872 GpuMemoryAllocationForRenderer::PriorityCutoff priority_cutoff_when_visible =
873 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNiceToHave;
874 #else
875 GpuMemoryAllocationForRenderer::PriorityCutoff priority_cutoff_when_visible =
876 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything;
877 #endif
878
879 // Assign memory allocations to visible clients.
880 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
881 it != clients_visible_mru_.end();
882 ++it) {
883 GpuMemoryManagerClientState* client_state = *it;
884 GpuMemoryAllocation allocation;
885
886 allocation.browser_allocation.suggest_have_frontbuffer = true;
887 allocation.renderer_allocation.bytes_limit_when_visible =
888 bytes_limit_when_visible;
889 allocation.renderer_allocation.priority_cutoff_when_visible =
890 priority_cutoff_when_visible;
891
892 // Allow this client to keep its textures when nonvisible if they
893 // aren't so expensive that they won't fit.
894 if (client_state->managed_memory_stats_.bytes_required <=
895 bytes_nonvisible_available_gpu_memory_) {
896 allocation.renderer_allocation.bytes_limit_when_not_visible =
897 GetCurrentNonvisibleAvailableGpuMemory();
898 allocation.renderer_allocation.priority_cutoff_when_not_visible =
899 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired;
900 } else {
901 allocation.renderer_allocation.bytes_limit_when_not_visible = 0;
902 allocation.renderer_allocation.priority_cutoff_when_not_visible =
903 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNothing;
904 }
905
906 client_state->client_->SetMemoryAllocation(allocation);
907 }
908
909 // Assign memory allocations to nonvisible clients.
910 uint64 bytes_allocated_nonvisible = 0;
911 for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
912 it != clients_nonvisible_mru_.end();
913 ++it) {
914 GpuMemoryManagerClientState* client_state = *it;
915 GpuMemoryAllocation allocation;
916
917 allocation.browser_allocation.suggest_have_frontbuffer =
918 !client_state->hibernated_;
919 allocation.renderer_allocation.bytes_limit_when_visible =
920 bytes_limit_when_visible;
921 allocation.renderer_allocation.priority_cutoff_when_visible =
922 priority_cutoff_when_visible;
923
924 if (client_state->managed_memory_stats_.bytes_required +
925 bytes_allocated_nonvisible <=
926 GetCurrentNonvisibleAvailableGpuMemory()) {
927 bytes_allocated_nonvisible +=
928 client_state->managed_memory_stats_.bytes_required;
929 allocation.renderer_allocation.bytes_limit_when_not_visible =
930 GetCurrentNonvisibleAvailableGpuMemory();
931 allocation.renderer_allocation.priority_cutoff_when_not_visible =
932 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired;
933 } else {
934 allocation.renderer_allocation.bytes_limit_when_not_visible = 0;
935 allocation.renderer_allocation.priority_cutoff_when_not_visible =
936 GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNothing;
937 }
938
939 client_state->client_->SetMemoryAllocation(allocation);
940 }
941 }
942
943 void GpuMemoryManager::AssignNonSurfacesAllocations() { 784 void GpuMemoryManager::AssignNonSurfacesAllocations() {
944 for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); 785 for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
945 it != clients_nonsurface_.end(); 786 it != clients_nonsurface_.end();
946 ++it) { 787 ++it) {
947 GpuMemoryManagerClientState* client_state = *it; 788 GpuMemoryManagerClientState* client_state = *it;
948 GpuMemoryAllocation allocation; 789 GpuMemoryAllocation allocation;
949 790
950 if (!client_state->hibernated_) { 791 if (!client_state->hibernated_) {
951 allocation.renderer_allocation.bytes_limit_when_visible = 792 allocation.renderer_allocation.bytes_limit_when_visible =
952 GetMinimumClientAllocation(); 793 GetMinimumClientAllocation();
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
993 // Clients that don't have surfaces are non-hibernated if they are 834 // Clients that don't have surfaces are non-hibernated if they are
994 // in a GL share group with a non-hibernated surface. 835 // in a GL share group with a non-hibernated surface.
995 for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); 836 for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
996 it != clients_nonsurface_.end(); 837 it != clients_nonsurface_.end();
997 ++it) { 838 ++it) {
998 GpuMemoryManagerClientState* client_state = *it; 839 GpuMemoryManagerClientState* client_state = *it;
999 client_state->hibernated_ = client_state->tracking_group_->hibernated_; 840 client_state->hibernated_ = client_state->tracking_group_->hibernated_;
1000 } 841 }
1001 } 842 }
1002 843
1003 uint64 GpuMemoryManager::GetVisibleClientAllocation() const {
1004 // Count how many clients will get allocations.
1005 size_t clients_with_surface_visible_count = clients_visible_mru_.size();
1006 size_t clients_without_surface_not_hibernated_count = 0;
1007 for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
1008 it != clients_nonsurface_.end();
1009 ++it) {
1010 GpuMemoryManagerClientState* client_state = *it;
1011 if (!client_state->hibernated_)
1012 clients_without_surface_not_hibernated_count++;
1013 }
1014
1015 // Calculate bonus allocation by splitting remainder of global limit equally
1016 // after giving out the minimum to those that need it.
1017 size_t num_clients_need_mem = clients_with_surface_visible_count +
1018 clients_without_surface_not_hibernated_count;
1019 uint64 base_allocation_size = GetMinimumClientAllocation() *
1020 num_clients_need_mem;
1021 uint64 bonus_allocation = 0;
1022 if (base_allocation_size < GetAvailableGpuMemory() &&
1023 clients_with_surface_visible_count)
1024 bonus_allocation = (GetAvailableGpuMemory() - base_allocation_size) /
1025 clients_with_surface_visible_count;
1026 uint64 clients_allocation_when_visible = GetMinimumClientAllocation() +
1027 bonus_allocation;
1028
1029 // If we have received a window count message, then override the client-based
1030 // scheme with a per-window scheme
1031 if (window_count_has_been_received_) {
1032 clients_allocation_when_visible = std::max(
1033 clients_allocation_when_visible,
1034 GetAvailableGpuMemory() / std::max(window_count_, 1u));
1035 }
1036
1037 // Limit the memory per client to its maximum allowed level.
1038 if (clients_allocation_when_visible >= GetMaximumClientAllocation())
1039 clients_allocation_when_visible = GetMaximumClientAllocation();
1040
1041 return clients_allocation_when_visible;
1042 }
1043
1044 void GpuMemoryManager::SendUmaStatsToBrowser() { 844 void GpuMemoryManager::SendUmaStatsToBrowser() {
1045 if (!channel_manager_) 845 if (!channel_manager_)
1046 return; 846 return;
1047 GPUMemoryUmaStats params; 847 GPUMemoryUmaStats params;
1048 params.bytes_allocated_current = GetCurrentUsage(); 848 params.bytes_allocated_current = GetCurrentUsage();
1049 params.bytes_allocated_max = bytes_allocated_historical_max_; 849 params.bytes_allocated_max = bytes_allocated_historical_max_;
1050 params.bytes_limit = bytes_available_gpu_memory_; 850 params.bytes_limit = bytes_available_gpu_memory_;
1051 params.client_count = clients_visible_mru_.size() + 851 params.client_count = clients_visible_mru_.size() +
1052 clients_nonvisible_mru_.size() + 852 clients_nonvisible_mru_.size() +
1053 clients_nonsurface_.size(); 853 clients_nonsurface_.size();
1054 params.context_group_count = tracking_groups_.size(); 854 params.context_group_count = tracking_groups_.size();
1055 params.window_count = window_count_;
1056 channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params)); 855 channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params));
1057 } 856 }
1058 857
1059 GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList( 858 GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList(
1060 GpuMemoryManagerClientState* client_state) { 859 GpuMemoryManagerClientState* client_state) {
1061 if (client_state->has_surface_) { 860 if (client_state->has_surface_) {
1062 if (client_state->visible_) 861 if (client_state->visible_)
1063 return &clients_visible_mru_; 862 return &clients_visible_mru_;
1064 else 863 else
1065 return &clients_nonvisible_mru_; 864 return &clients_nonvisible_mru_;
(...skipping 12 matching lines...) Expand all
1078 877
1079 void GpuMemoryManager::RemoveClientFromList( 878 void GpuMemoryManager::RemoveClientFromList(
1080 GpuMemoryManagerClientState* client_state) { 879 GpuMemoryManagerClientState* client_state) {
1081 DCHECK(client_state->list_iterator_valid_); 880 DCHECK(client_state->list_iterator_valid_);
1082 ClientStateList* client_list = GetClientList(client_state); 881 ClientStateList* client_list = GetClientList(client_state);
1083 client_list->erase(client_state->list_iterator_); 882 client_list->erase(client_state->list_iterator_);
1084 client_state->list_iterator_valid_ = false; 883 client_state->list_iterator_valid_ = false;
1085 } 884 }
1086 885
1087 } // namespace content 886 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_memory_manager.h ('k') | content/common/gpu/gpu_memory_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698