Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/common/gpu/gpu_memory_manager.h" | |
| 6 | |
| 7 #if defined(ENABLE_GPU) | |
| 8 | |
| 9 #include <set> | |
| 10 #include <algorithm> | |
| 11 | |
| 12 #include "base/bind.h" | |
| 13 #include "base/message_loop.h" | |
| 14 #include "content/common/gpu/gpu_command_buffer_stub.h" | |
| 15 #include "content/common/gpu/gpu_memory_allocation.h" | |
| 16 | |
| 17 namespace { | |
| 18 | |
| 19 static const int kResourceSizeForegroundTab = 100 * 1024 * 1024; | |
|
nduca
2012/02/01 00:01:17
you dont need static keywords inside a anon namesp
nduca
2012/02/01 00:01:17
Put a comment block explaining what these magic va
| |
| 20 static const int kResourceSizeBackgroundTab = 50 * 1024 * 1024; | |
| 21 static const int kResourceSizeHibernatedTab = 0; | |
| 22 static const size_t kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8; | |
|
nduca
2012/02/01 00:01:17
It seem so to me we should set kResourceSizeNonHib
| |
| 23 | |
| 24 // Set up three allocation values for the three possible stub states | |
| 25 static const GpuMemoryAllocation all_buffers_allocation( | |
| 26 kResourceSizeForegroundTab, true, true); | |
| 27 static const GpuMemoryAllocation front_buffers_allocation( | |
| 28 kResourceSizeBackgroundTab, true, false); | |
| 29 static const GpuMemoryAllocation no_buffers_allocation( | |
| 30 kResourceSizeHibernatedTab, false, false); | |
| 31 | |
| 32 } | |
| 33 | |
| 34 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client) | |
| 35 : client_(client), | |
| 36 manage_scheduled_(false), | |
| 37 max_surfaces_with_frontbuffer_soft_limit_( | |
| 38 kDefaultMaxSurfacesWithFrontbufferSoftLimit), | |
| 39 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { | |
| 40 } | |
| 41 | |
| 42 GpuMemoryManager::~GpuMemoryManager() { | |
| 43 } | |
| 44 | |
| 45 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( | |
| 46 GpuCommandBufferStubBase* lhs, | |
| 47 GpuCommandBufferStubBase* rhs) { | |
| 48 GpuCommandBufferStubBase::SurfaceState* lhs_ss = lhs->surface_state(); | |
| 49 GpuCommandBufferStubBase::SurfaceState* rhs_ss = rhs->surface_state(); | |
| 50 DCHECK(lhs_ss && rhs_ss); | |
| 51 if (lhs_ss->visible) | |
| 52 return !rhs_ss->visible || | |
| 53 (lhs_ss->last_used_time > rhs_ss->last_used_time); | |
| 54 else | |
| 55 return !rhs_ss->visible && | |
| 56 (lhs_ss->last_used_time > rhs_ss->last_used_time); | |
| 57 }; | |
| 58 | |
| 59 void GpuMemoryManager::ScheduleManage() { | |
| 60 if (manage_scheduled_) | |
| 61 return; | |
| 62 MessageLoop::current()->PostTask( | |
| 63 FROM_HERE, | |
| 64 base::Bind(&GpuMemoryManager::Manage, weak_factory_.GetWeakPtr())); | |
| 65 manage_scheduled_ = true; | |
| 66 } | |
| 67 | |
| 68 // The current Manage algorithm simply classifies contexts (stubs) into | |
| 69 // "foreground", "background", or "hibernated" categories. | |
| 70 // For each of these three categories, there are predefined memory allocation | |
| 71 // limits and front/backbuffer states. | |
| 72 // | |
| 73 // Stubs may or may not have a surfaces, and the rules are different for each. | |
| 74 // | |
| 75 // The rules for categorizing contexts with a surface are: | |
| 76 // 1. Foreground: All visible surfaces. | |
| 77 // * Must have both front and back buffer. | |
| 78 // | |
| 79 // 2. Background: Non visible surfaces, which have not surpassed the | |
| 80 // max_surfaces_with_frontbuffer_soft_limit_ limit. | |
| 81 // * Will have only a frontbuffer. | |
| 82 // | |
| 83 // 3. Hibernated: Non visible surfaces, which have surpassed the | |
| 84 // max_surfaces_with_frontbuffer_soft_limit_ limit. | |
| 85 // * Will not have either buffer. | |
| 86 // | |
| 87 // The rule for categorizing contexts without a surface is: | |
| 88 // * Stubs without a surface instead have an affected_surface_ids list. | |
| 89 // Its state must be the same as the most visible surface it affects. | |
| 90 void GpuMemoryManager::Manage() { | |
| 91 manage_scheduled_ = false; | |
| 92 | |
| 93 // Create stub lists by separating out the two types received from client | |
| 94 std::vector<GpuCommandBufferStubBase*> stubs_with_surface; | |
| 95 std::vector<GpuCommandBufferStubBase*> stubs_without_surface; | |
| 96 { | |
| 97 std::vector<GpuCommandBufferStubBase*> stubs; | |
| 98 client_->AppendAllCommandBufferStubs(stubs); | |
| 99 | |
| 100 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); | |
| 101 it != stubs.end(); ++it) { | |
| 102 GpuCommandBufferStubBase* stub = *it; | |
| 103 if (stub->surface_state()) | |
| 104 stubs_with_surface.push_back(stub); | |
| 105 else | |
| 106 stubs_without_surface.push_back(stub); | |
| 107 } | |
| 108 } | |
| 109 | |
| 110 // Sort stubs with surface into {visibility,last_used_time} order using | |
| 111 // custom comparator | |
| 112 std::sort(stubs_with_surface.begin(), stubs_with_surface.end(), | |
| 113 StubWithSurfaceComparator()); | |
| 114 | |
| 115 // Separate stubs with surfaces into three sets and send memory allocation | |
| 116 std::set<int32> all_buffers, front_buffers, no_buffers; | |
| 117 | |
| 118 for (size_t i = 0; i < stubs_with_surface.size(); ++i) { | |
| 119 GpuCommandBufferStubBase* stub = stubs_with_surface[i]; | |
| 120 if (stub->surface_state()->visible) { | |
| 121 all_buffers.insert(stub->surface_state()->surface_id); | |
| 122 stub->SendMemoryAllocationToProxy(all_buffers_allocation); | |
| 123 } else if (i < max_surfaces_with_frontbuffer_soft_limit_) { | |
| 124 front_buffers.insert(stub->surface_state()->surface_id); | |
| 125 stub->SendMemoryAllocationToProxy(front_buffers_allocation); | |
| 126 } else { | |
| 127 no_buffers.insert(stub->surface_state()->surface_id); | |
| 128 stub->SendMemoryAllocationToProxy(no_buffers_allocation); | |
| 129 } | |
| 130 } | |
| 131 | |
| 132 // Now, go through the stubs without surfaces and send memory allocations | |
| 133 // based on buckets we just divided. Because there may be multiple affected | |
| 134 // surfaces, use the state of the most "important" affected surface. | |
| 135 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = | |
| 136 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { | |
| 137 GpuCommandBufferStubBase* stub = *it; | |
| 138 if (stub->affected_surface_ids().empty()) | |
| 139 continue; | |
| 140 if (std::find_first_of(all_buffers.begin(), | |
| 141 all_buffers.end(), | |
| 142 stub->affected_surface_ids().begin(), | |
| 143 stub->affected_surface_ids().end()) != | |
| 144 all_buffers.end()) { | |
| 145 stub->SendMemoryAllocationToProxy(all_buffers_allocation); | |
| 146 } else if (std::find_first_of(front_buffers.begin(), | |
| 147 front_buffers.end(), | |
| 148 stub->affected_surface_ids().begin(), | |
| 149 stub->affected_surface_ids().end()) != | |
| 150 front_buffers.end()) { | |
| 151 stub->SendMemoryAllocationToProxy(front_buffers_allocation); | |
| 152 } else if (std::find_first_of(no_buffers.begin(), | |
| 153 no_buffers.end(), | |
| 154 stub->affected_surface_ids().begin(), | |
| 155 stub->affected_surface_ids().end()) != | |
| 156 no_buffers.end()) { | |
| 157 stub->SendMemoryAllocationToProxy(no_buffers_allocation); | |
| 158 } else { | |
| 159 DLOG(ERROR) << "GpuCommandBufferStub::affected_surface_ids are not " | |
| 160 "valid."; | |
| 161 } | |
| 162 } | |
| 163 } | |
| 164 | |
| 165 void GpuMemoryManager::SetMaxSurfacesWithFrontbufferSoftLimit(size_t limit) { | |
| 166 max_surfaces_with_frontbuffer_soft_limit_ = limit; | |
| 167 } | |
| 168 | |
| 169 #endif | |
| OLD | NEW |