Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(34)

Side by Side Diff: net/disk_cache/mapped_file_avoid_mmap_posix.cc

Issue 18834003: Revert "Updating the mapped_file_avoid_mmap_posix implementation." (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « net/disk_cache/mapped_file.h ('k') | net/disk_cache/mapped_file_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/mapped_file.h" 5 #include "net/disk_cache/mapped_file.h"
6 6
7 #include <stdlib.h> 7 #include <stdlib.h>
8 #include <sys/mman.h>
9
10 #include <map>
11 8
12 #include "base/files/file_path.h" 9 #include "base/files/file_path.h"
13 #include "base/lazy_instance.h"
14 #include "base/logging.h" 10 #include "base/logging.h"
15 #include "base/memory/scoped_ptr.h"
16 #include "base/memory/scoped_vector.h"
17 #include "base/threading/thread_local.h"
18
19 // This implementation of MappedFile doesn't use a shared RW mmap for
20 // performance reason. Instead it will use a private RO mmap and install a SEGV
21 // signal handler. When the memory is modified, the handler will register the
22 // modified address and change the protection of the page to RW. When a flush is
23 // then executed, it has access to the exact pages that have been modified and
24 // will only write those to disk. The handler will only track half of the dirty
25 // pages. If more than half the pages are modified, the flush will instead write
26 // the full buffer to disk.
27
28 namespace {
29
30 // Choose 4k as a reasonable page size. As this file is used mainly on Android,
31 // this is the real android page size.
32 const size_t kPageSize = 4096;
33
34 // Variable capacity array, optimized for capacity of 1. Most of the mapped file
35 // are used to map exactly 2 pages. Tracking 1 page is then optimal because if
36 // both pages are modified, writing the full view is the optimal behavior.
37 class SmallArray {
38 public:
39 SmallArray() : capacity_(0), array_(NULL) {}
40 ~SmallArray() { SetCapacity(0); }
41
42 size_t capacity() { return capacity_; }
43 char** array() { return array_; }
44 void SetCapacity(size_t capacity) {
45 if (capacity_ > 1)
46 delete[] array_;
47 capacity_ = capacity;
48 if (capacity > 1)
49 array_ = new char*[capacity];
50 else
51 array_ = small_array_;
52 }
53
54 private:
55 size_t capacity_;
56 char** array_;
57 char* small_array_[1];
58 };
59
60 // Information about the memory mapped part of a file.
61 struct MappedFileInfo {
62 // Stat address of the memory.
63 char* start_address;
64 // Size of the memory map.
65 size_t size;
66 // Number of dirty page. A page is dirty if the memory content is different
67 // from the file content.
68 size_t num_dirty_pages;
69 // The dirty pages.
70 SmallArray dirty_pages;
71 };
72
73 // The maximum number of dirty pages that can be tracked. Limit the memory
74 // overhead to 2kb per file.
75 const size_t kMaxDirtyPagesCacheSize =
76 kPageSize / sizeof(char*) / 2 - sizeof(MappedFileInfo);
77
78 class ThreadLocalMappedFileInfo {
79 public:
80 ThreadLocalMappedFileInfo() {}
81 ~ThreadLocalMappedFileInfo() {}
82
83 void RegisterMappedFile(disk_cache::MappedFile* mapped_file, size_t size) {
84 scoped_ptr<MappedFileInfo> new_info(new MappedFileInfo);
85 new_info->start_address = static_cast<char*>(mapped_file->buffer());
86 new_info->size = size;
87 new_info->num_dirty_pages = 0;
88 // Track half of the dirty pages, after this, just overwrite the full
89 // content.
90 size_t capacity = (size + kPageSize - 1) / kPageSize / 2;
91 if (capacity > kMaxDirtyPagesCacheSize)
92 capacity = kMaxDirtyPagesCacheSize;
93 new_info->dirty_pages.SetCapacity(capacity);
94 info_per_map_file_[mapped_file] = new_info.get();
95 infos_.push_back(new_info.release());
96 Update();
97 }
98
99 void UnregisterMappedFile(disk_cache::MappedFile* mapped_file) {
100 MappedFileInfo* info = InfoForMappedFile(mapped_file);
101 DCHECK(info);
102 info_per_map_file_.erase(mapped_file);
103 infos_.erase(std::find(infos_.begin(), infos_.end(), info));
104 Update();
105 }
106
107 MappedFileInfo* InfoForMappedFile(disk_cache::MappedFile* mapped_file) {
108 return info_per_map_file_[mapped_file];
109 }
110
111 MappedFileInfo** infos_ptr() { return infos_ptr_; }
112 size_t infos_size() { return infos_size_; }
113
114 private:
115 // Update |infos_ptr_| and |infos_size_| when |infos_| change.
116 void Update() {
117 infos_ptr_ = &infos_[0];
118 infos_size_ = infos_.size();
119 }
120
121 // Link to the MappedFileInfo for a given MappedFile.
122 std::map<disk_cache::MappedFile*, MappedFileInfo*> info_per_map_file_;
123 // Vector of information about all current MappedFile belonging to the current
124 // thread.
125 ScopedVector<MappedFileInfo> infos_;
126 // Pointer to the storage part of |infos_|. This is kept as a variable to
127 // prevent the signal handler from calling any C++ method that might allocate
128 // memory.
129 MappedFileInfo** infos_ptr_;
130 // Size of |infos_|.
131 size_t infos_size_;
132 };
133
134 class SegvHandler {
135 public:
136 // Register the signal handler.
137 SegvHandler();
138 ~SegvHandler() {}
139
140 // SEGV signal handler. This handler will check that the address that
141 // generated the fault is one associated with a mapped file. If that's the
142 // case, it will register the address and change the protection to RW then
143 // return. This will cause the instruction that generated the fault to be
144 // re-executed. If not, it will just reinstall the old handler and return,
145 // which will generate the fault again and let the initial handler get called.
146 static void SigSegvHandler(int sig, siginfo_t* si, void* unused);
147
148 base::ThreadLocalPointer<ThreadLocalMappedFileInfo>& thread_local_infos() {
149 return thread_local_infos_;
150 }
151
152 private:
153 // Install the SEGV handler, storing the current sigaction in |old_sigaction|
154 // if it is not NULL.
155 static void InstallSigHandler(struct sigaction* old_sigaction);
156
157 base::ThreadLocalPointer<ThreadLocalMappedFileInfo> thread_local_infos_;
158 struct sigaction old_sigaction_;
159 };
160
161 static base::LazyInstance<SegvHandler> g_segv_handler =
162 LAZY_INSTANCE_INITIALIZER;
163
164 // Initialisation method.
165 SegvHandler::SegvHandler() {
166 // Setup the SIGV signal handler.
167 InstallSigHandler(&old_sigaction_);
168 }
169
170 // static
171 void SegvHandler::SigSegvHandler(int sig, siginfo_t* si, void* unused) {
172 // First, check if the current sighandler has the SA_SIGINFO flag. If it
173 // doesn't it means an external library installed temporarly a signal handler
174 // using signal, and so incorrectly restored the current one. The parameters
175 // are then useless.
176 struct sigaction current_action;
177 sigaction(SIGSEGV, NULL, &current_action);
178 if (!(current_action.sa_flags & SA_SIGINFO)) {
179 LOG(WARNING) << "Signal handler have been re-installed incorrectly.";
180 InstallSigHandler(NULL);
181 // Returning will re-run the signal with the correct parameters.
182 return;
183 }
184 ThreadLocalMappedFileInfo* thread_local_info =
185 g_segv_handler.Pointer()->thread_local_infos().Get();
186 if (thread_local_info) {
187 char* addr = reinterpret_cast<char*>(si->si_addr);
188 for (size_t i = 0; i < thread_local_info->infos_size(); ++i) {
189 MappedFileInfo* info = thread_local_info->infos_ptr()[i];
190 if (info->start_address <= addr &&
191 addr < info->start_address + info->size) {
192 // Only track new dirty pages if the array has still some capacity.
193 // Otherwise, the full buffer will be written to disk and it is not
194 // necessary to track changes until the next flush.
195 if (info->num_dirty_pages < info->dirty_pages.capacity()) {
196 char* aligned_address = reinterpret_cast<char*>(
197 reinterpret_cast<size_t>(addr) & ~(kPageSize - 1));
198 mprotect(aligned_address, kPageSize, PROT_READ | PROT_WRITE);
199 info->dirty_pages.array()[info->num_dirty_pages] = aligned_address;
200 } else {
201 mprotect(info->start_address, info->size, PROT_READ | PROT_WRITE);
202 }
203 info->num_dirty_pages++;
204 return;
205 }
206 }
207 }
208 // The address it not handled by any mapped filed. Let the default handler get
209 // called.
210 sigaction(SIGSEGV, &g_segv_handler.Pointer()->old_sigaction_, NULL);
211 }
212
213 // static
214 void SegvHandler::InstallSigHandler(struct sigaction* old_sigaction) {
215 struct sigaction action;
216 action.sa_sigaction = SigSegvHandler;
217 sigemptyset(&action.sa_mask);
218 action.sa_flags = SA_SIGINFO | SA_RESTART;
219 sigaction(SIGSEGV, &action, old_sigaction);
220 }
221
222 } // namespace
223 11
224 namespace disk_cache { 12 namespace disk_cache {
225 13
226 void* MappedFile::Init(const base::FilePath& name, size_t size) { 14 void* MappedFile::Init(const base::FilePath& name, size_t size) {
227 DCHECK(!init_); 15 DCHECK(!init_);
228 if (init_ || !File::Init(name)) 16 if (init_ || !File::Init(name))
229 return NULL; 17 return NULL;
230 18
231 if (!size) 19 if (!size)
232 size = GetLength(); 20 size = GetLength();
233 21
234 buffer_ = mmap(NULL, size, PROT_READ, MAP_PRIVATE, platform_file(), 0); 22 buffer_ = malloc(size);
235 if (reinterpret_cast<ptrdiff_t>(buffer_) == -1) { 23 snapshot_ = malloc(size);
236 NOTREACHED(); 24 if (buffer_ && snapshot_ && Read(buffer_, size, 0)) {
237 buffer_ = 0; 25 memcpy(snapshot_, buffer_, size);
238 } 26 } else {
239 27 free(buffer_);
240 if (buffer_) { 28 free(snapshot_);
241 ThreadLocalMappedFileInfo* thread_local_info = 29 buffer_ = snapshot_ = 0;
242 g_segv_handler.Pointer()->thread_local_infos().Get();
243 if (!thread_local_info) {
244 thread_local_info = new ThreadLocalMappedFileInfo();
245 g_segv_handler.Pointer()->thread_local_infos().Set(thread_local_info);
246 }
247 DCHECK(size);
248 thread_local_info->RegisterMappedFile(this, size);
249 } 30 }
250 31
251 init_ = true; 32 init_ = true;
252 view_size_ = size; 33 view_size_ = size;
253 return buffer_; 34 return buffer_;
254 } 35 }
255 36
256 bool MappedFile::Load(const FileBlock* block) { 37 bool MappedFile::Load(const FileBlock* block) {
257 size_t offset = block->offset() + view_size_; 38 size_t offset = block->offset() + view_size_;
258 return Read(block->buffer(), block->size(), offset); 39 return Read(block->buffer(), block->size(), offset);
259 } 40 }
260 41
261 bool MappedFile::Store(const FileBlock* block) { 42 bool MappedFile::Store(const FileBlock* block) {
262 size_t offset = block->offset() + view_size_; 43 size_t offset = block->offset() + view_size_;
263 return Write(block->buffer(), block->size(), offset); 44 return Write(block->buffer(), block->size(), offset);
264 } 45 }
265 46
266 void MappedFile::Flush() { 47 void MappedFile::Flush() {
267 DCHECK(buffer_); 48 DCHECK(buffer_);
268 MappedFileInfo* info = g_segv_handler.Pointer()->thread_local_infos().Get()-> 49 DCHECK(snapshot_);
269 InfoForMappedFile(this); 50 const char* buffer_ptr = static_cast<const char*>(buffer_);
270 DCHECK(info); 51 char* snapshot_ptr = static_cast<char*>(snapshot_);
271 if (info->num_dirty_pages > info->dirty_pages.capacity()) { 52 const size_t block_size = 4096;
272 Write(buffer_, view_size_, 0); 53 for (size_t offset = 0; offset < view_size_; offset += block_size) {
273 } else { 54 size_t size = std::min(view_size_ - offset, block_size);
274 const char* buffer_ptr = static_cast<const char*>(buffer_); 55 if (memcmp(snapshot_ptr + offset, buffer_ptr + offset, size)) {
275 for (size_t i = 0; i < info->num_dirty_pages; ++i) { 56 memcpy(snapshot_ptr + offset, buffer_ptr + offset, size);
276 const char* ptr = info->dirty_pages.array()[i]; 57 Write(snapshot_ptr + offset, size, offset);
277 size_t size_to_write = kPageSize;
278 // The view_size is not a full number of page. Only write the fraction of
279 // the page that is in the view.
280 if (ptr - buffer_ptr + kPageSize > view_size_)
281 size_to_write = view_size_ - (ptr - buffer_ptr);
282 Write(ptr, size_to_write, ptr - buffer_ptr);
283 } 58 }
284 } 59 }
285 info->num_dirty_pages = 0;
286 mprotect(buffer_, view_size_, PROT_READ);
287 } 60 }
288 61
289 MappedFile::~MappedFile() { 62 MappedFile::~MappedFile() {
290 if (!init_) 63 if (!init_)
291 return; 64 return;
292 65
293 if (buffer_) { 66 if (buffer_ && snapshot_) {
294 Flush(); 67 Flush();
295 ThreadLocalMappedFileInfo* thread_local_info =
296 g_segv_handler.Pointer()->thread_local_infos().Get();
297 DCHECK(thread_local_info);
298 thread_local_info->UnregisterMappedFile(this);
299 munmap(buffer_, 0);
300 } 68 }
69 free(buffer_);
70 free(snapshot_);
301 } 71 }
302 72
303 } // namespace disk_cache 73 } // namespace disk_cache
OLDNEW
« no previous file with comments | « net/disk_cache/mapped_file.h ('k') | net/disk_cache/mapped_file_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698