Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(237)

Side by Side Diff: net/disk_cache/simple/simple_entry_impl.cc

Issue 14130015: Support overlapping operations on the SimpleEntryImpl. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Egor comments Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « net/disk_cache/simple/simple_entry_impl.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/simple/simple_entry_impl.h" 5 #include "net/disk_cache/simple/simple_entry_impl.h"
6 6
7 #include "base/bind.h" 7 #include "base/bind.h"
8 #include "base/bind_helpers.h" 8 #include "base/bind_helpers.h"
9 #include "base/callback.h" 9 #include "base/callback.h"
10 #include "base/location.h" 10 #include "base/location.h"
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
85 WorkerPool::PostTask(FROM_HERE, 85 WorkerPool::PostTask(FROM_HERE,
86 base::Bind(&SimpleSynchronousEntry::DoomEntry, path, key, 86 base::Bind(&SimpleSynchronousEntry::DoomEntry, path, key,
87 MessageLoopProxy::current(), callback), 87 MessageLoopProxy::current(), callback),
88 true); 88 true);
89 return net::ERR_IO_PENDING; 89 return net::ERR_IO_PENDING;
90 } 90 }
91 91
92 void SimpleEntryImpl::Doom() { 92 void SimpleEntryImpl::Doom() {
93 DCHECK(io_thread_checker_.CalledOnValidThread()); 93 DCHECK(io_thread_checker_.CalledOnValidThread());
94 DCHECK(synchronous_entry_); 94 DCHECK(synchronous_entry_);
95 DCHECK(!operation_running_);
gavinp 2013/04/17 16:31:37 I think this is bad; it's always valid to doom an
felipeg 2013/04/17 16:54:59 Done.
95 #if defined(OS_POSIX) 96 #if defined(OS_POSIX)
96 // This call to static SimpleEntryImpl::DoomEntry() will just erase the 97 // This call to static SimpleEntryImpl::DoomEntry() will just erase the
97 // underlying files. On POSIX, this is fine; the files are still open on the 98 // underlying files. On POSIX, this is fine; the files are still open on the
98 // SimpleSynchronousEntry, and operations can even happen on them. The files 99 // SimpleSynchronousEntry, and operations can even happen on them. The files
99 // will be removed from the filesystem when they are closed. 100 // will be removed from the filesystem when they are closed.
100 DoomEntry(index_, path_, key_, CompletionCallback()); 101 DoomEntry(index_, path_, key_, CompletionCallback());
101 #else 102 #else
102 NOTIMPLEMENTED(); 103 NOTIMPLEMENTED();
103 #endif 104 #endif
104 } 105 }
105 106
106 void SimpleEntryImpl::Close() { 107 void SimpleEntryImpl::Close() {
107 DCHECK(io_thread_checker_.CalledOnValidThread()); 108 DCHECK(io_thread_checker_.CalledOnValidThread());
108 Release(); // Balanced in CreationOperationCompleted(). 109 if (operation_running_) {
gavinp 2013/04/17 16:31:37 Why is the pattern here so different from ReadData
felipeg 2013/04/17 16:54:59 Done.
110 // Postpone close operation.
111 // Push the close operation to the end of the line. This way we run all
112 // operations before we are able close.
113 pending_operations_.push(base::Bind(&SimpleEntryImpl::Close, this));
114 return;
115 } else {
116 DCHECK(pending_operations_.size() == 0);
117 DCHECK(!operation_running_);
118 DCHECK(synchronous_entry_);
119 WorkerPool::PostTask(FROM_HERE,
120 base::Bind(&SimpleSynchronousEntry::Close,
121 base::Unretained(synchronous_entry_)),
122 true);
123 // Entry::Close() is expected to delete this entry. See disk_cache.h for
124 // details.
125 Release(); // Balanced in CreationOperationCompleted().
126 }
109 } 127 }
110 128
111 std::string SimpleEntryImpl::GetKey() const { 129 std::string SimpleEntryImpl::GetKey() const {
112 DCHECK(io_thread_checker_.CalledOnValidThread()); 130 DCHECK(io_thread_checker_.CalledOnValidThread());
113 return key_; 131 return key_;
114 } 132 }
115 133
116 Time SimpleEntryImpl::GetLastUsed() const { 134 Time SimpleEntryImpl::GetLastUsed() const {
117 DCHECK(io_thread_checker_.CalledOnValidThread()); 135 DCHECK(io_thread_checker_.CalledOnValidThread());
118 return last_used_; 136 return last_used_;
119 } 137 }
120 138
121 Time SimpleEntryImpl::GetLastModified() const { 139 Time SimpleEntryImpl::GetLastModified() const {
122 DCHECK(io_thread_checker_.CalledOnValidThread()); 140 DCHECK(io_thread_checker_.CalledOnValidThread());
123 return last_modified_; 141 return last_modified_;
124 } 142 }
125 143
126 int32 SimpleEntryImpl::GetDataSize(int index) const { 144 int32 SimpleEntryImpl::GetDataSize(int index) const {
127 DCHECK(io_thread_checker_.CalledOnValidThread()); 145 DCHECK(io_thread_checker_.CalledOnValidThread());
128 return data_size_[index]; 146 return data_size_[index];
129 } 147 }
130 148
131 int SimpleEntryImpl::ReadData(int index, 149 int SimpleEntryImpl::ReadData(int index,
132 int offset, 150 int offset,
133 net::IOBuffer* buf, 151 net::IOBuffer* buf,
134 int buf_len, 152 int buf_len,
135 const CompletionCallback& callback) { 153 const CompletionCallback& callback) {
136 DCHECK(io_thread_checker_.CalledOnValidThread()); 154 DCHECK(io_thread_checker_.CalledOnValidThread());
137 // TODO(gavinp): Add support for overlapping reads. The net::HttpCache does 155 if (index < 0 || index >= kSimpleEntryFileCount || buf_len < 0)
138 // make overlapping read requests when multiple transactions access the same 156 return net::ERR_INVALID_ARGUMENT;
139 // entry as read only. This might make calling SimpleSynchronousEntry::Close() 157 if (offset >= data_size_[index] || offset < 0 || !buf_len)
140 // correctly more tricky (see SimpleEntryImpl::EntryOperationComplete). 158 return 0;
141 if (synchronous_entry_in_use_by_worker_) { 159 // TODO(felipeg): Optimization: Add support for truly parallel read
142 NOTIMPLEMENTED(); 160 // operations.
143 CHECK(false); 161 pending_operations_.push(
144 } 162 base::Bind(&SimpleEntryImpl::ReadDataInternal,
145 synchronous_entry_in_use_by_worker_ = true; 163 this,
146 index_->UseIfExists(key_); 164 index,
147 SynchronousOperationCallback sync_operation_callback = 165 offset,
148 base::Bind(&SimpleEntryImpl::EntryOperationComplete, 166 make_scoped_refptr(buf),
149 this, callback); 167 buf_len,
150 WorkerPool::PostTask(FROM_HERE, 168 callback));
151 base::Bind(&SimpleSynchronousEntry::ReadData, 169 RunNextOperationIfNeeded();
152 base::Unretained(synchronous_entry_),
153 index, offset, make_scoped_refptr(buf),
154 buf_len, sync_operation_callback),
155 true);
156 return net::ERR_IO_PENDING; 170 return net::ERR_IO_PENDING;
157 } 171 }
158 172
159 int SimpleEntryImpl::WriteData(int index, 173 int SimpleEntryImpl::WriteData(int index,
160 int offset, 174 int offset,
161 net::IOBuffer* buf, 175 net::IOBuffer* buf,
162 int buf_len, 176 int buf_len,
163 const CompletionCallback& callback, 177 const CompletionCallback& callback,
164 bool truncate) { 178 bool truncate) {
165 DCHECK(io_thread_checker_.CalledOnValidThread()); 179 DCHECK(io_thread_checker_.CalledOnValidThread());
166 if (synchronous_entry_in_use_by_worker_) { 180 if (index < 0 || index >= kSimpleEntryFileCount || offset < 0 || buf_len < 0)
167 NOTIMPLEMENTED(); 181 return net::ERR_INVALID_ARGUMENT;
168 CHECK(false); 182 pending_operations_.push(
169 } 183 base::Bind(&SimpleEntryImpl::WriteDataInternal,
170 synchronous_entry_in_use_by_worker_ = true; 184 this,
171 index_->UseIfExists(key_); 185 index,
172 SynchronousOperationCallback sync_operation_callback = 186 offset,
173 base::Bind(&SimpleEntryImpl::EntryOperationComplete, 187 make_scoped_refptr(buf),
174 this, callback); 188 buf_len,
175 WorkerPool::PostTask(FROM_HERE, 189 callback,
176 base::Bind(&SimpleSynchronousEntry::WriteData, 190 truncate));
177 base::Unretained(synchronous_entry_), 191 RunNextOperationIfNeeded();
178 index, offset, make_scoped_refptr(buf), 192 // TODO(felipeg): Optimization: Add support for optimistic writes, quickly
179 buf_len, sync_operation_callback, truncate), 193 // returning net::OK here.
180 true);
181 return net::ERR_IO_PENDING; 194 return net::ERR_IO_PENDING;
182 } 195 }
183 196
184 int SimpleEntryImpl::ReadSparseData(int64 offset, 197 int SimpleEntryImpl::ReadSparseData(int64 offset,
185 net::IOBuffer* buf, 198 net::IOBuffer* buf,
186 int buf_len, 199 int buf_len,
187 const CompletionCallback& callback) { 200 const CompletionCallback& callback) {
188 DCHECK(io_thread_checker_.CalledOnValidThread()); 201 DCHECK(io_thread_checker_.CalledOnValidThread());
189 // TODO(gavinp): Determine if the simple backend should support sparse data. 202 // TODO(gavinp): Determine if the simple backend should support sparse data.
190 NOTIMPLEMENTED(); 203 NOTIMPLEMENTED();
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
226 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { 239 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
227 DCHECK(io_thread_checker_.CalledOnValidThread()); 240 DCHECK(io_thread_checker_.CalledOnValidThread());
228 // TODO(gavinp): Determine if the simple backend should support sparse data. 241 // TODO(gavinp): Determine if the simple backend should support sparse data.
229 NOTIMPLEMENTED(); 242 NOTIMPLEMENTED();
230 return net::ERR_FAILED; 243 return net::ERR_FAILED;
231 } 244 }
232 245
233 SimpleEntryImpl::SimpleEntryImpl(const scoped_refptr<SimpleIndex>& index, 246 SimpleEntryImpl::SimpleEntryImpl(const scoped_refptr<SimpleIndex>& index,
234 const base::FilePath& path, 247 const base::FilePath& path,
235 const std::string& key) 248 const std::string& key)
236 : constructor_thread_(base::MessageLoopProxy::current()), 249 : index_(index),
237 index_(index),
238 path_(path), 250 path_(path),
239 key_(key), 251 key_(key),
240 synchronous_entry_(NULL), 252 synchronous_entry_(NULL),
241 synchronous_entry_in_use_by_worker_(false) { 253 operation_running_(false) {
254 DCHECK(index_.get());
242 } 255 }
243 256
244 SimpleEntryImpl::~SimpleEntryImpl() { 257 SimpleEntryImpl::~SimpleEntryImpl() {
245 if (synchronous_entry_) { 258 DCHECK(pending_operations_.size() == 0);
gavinp 2013/04/17 16:31:37 DCHECK_NE(0, pending_operations_.size());
felipeg 2013/04/17 16:54:59 You mean: DCHECK_EQ(0U, pending_operations_.size
246 base::Closure close_sync_entry = 259 DCHECK(!operation_running_);
gavinp 2013/04/17 16:31:37 + DCHECK(!synchronous_entry);
felipeg 2013/04/17 16:54:59 you mean: DCHECK(synchronous_entry);
gavinp 2013/04/18 05:39:42 Hrrrm. I think I meant DCHECK(!synchronous_entry),
247 base::Bind(&SimpleSynchronousEntry::Close, 260 }
248 base::Unretained(synchronous_entry_)); 261
249 // We aren't guaranteed to be able to run IO on our constructor thread, but 262 bool SimpleEntryImpl::RunNextOperationIfNeeded() {
250 // we are also not guaranteed to be allowed to run WorkerPool::PostTask on 263 DCHECK(io_thread_checker_.CalledOnValidThread());
251 // our other threads. 264 if (pending_operations_.size() <= 0 || operation_running_)
252 if (constructor_thread_->BelongsToCurrentThread()) 265 return false;
253 WorkerPool::PostTask(FROM_HERE, close_sync_entry, true); 266 base::Closure operation = pending_operations_.front();
254 else 267 pending_operations_.pop();
255 close_sync_entry.Run(); 268 operation.Run();
256 } 269 return true;
270 }
271
272 void SimpleEntryImpl::ReadDataInternal(int index,
273 int offset,
274 scoped_refptr<net::IOBuffer> buf,
275 int buf_len,
276 const CompletionCallback& callback) {
277 DCHECK(io_thread_checker_.CalledOnValidThread());
278 DCHECK(!operation_running_);
279 operation_running_ = true;
280 index_->UseIfExists(key_);
281 SynchronousOperationCallback sync_operation_callback =
282 base::Bind(&SimpleEntryImpl::EntryOperationComplete,
283 this, callback);
284 WorkerPool::PostTask(FROM_HERE,
285 base::Bind(&SimpleSynchronousEntry::ReadData,
286 base::Unretained(synchronous_entry_),
287 index, offset, buf,
288 buf_len, sync_operation_callback),
289 true);
290 }
291
292 void SimpleEntryImpl::WriteDataInternal(int index,
293 int offset,
294 scoped_refptr<net::IOBuffer> buf,
295 int buf_len,
296 const CompletionCallback& callback,
297 bool truncate) {
298 DCHECK(io_thread_checker_.CalledOnValidThread());
299 DCHECK(!operation_running_);
300 operation_running_ = true;
301 index_->UseIfExists(key_);
302
303 last_used_ = base::Time::Now();
304 last_modified_ = base::Time::Now();
305 data_size_[index] = buf_len;
gavinp 2013/04/17 16:31:37 This isn't right; I think this is code leftover fr
felipeg 2013/04/17 16:54:59 Independently if this is a leftover from fast writ
gavinp 2013/04/18 05:39:42 Yes, definitely. But data_size_[index] = buf_len w
306
307 SynchronousOperationCallback sync_operation_callback =
308 base::Bind(&SimpleEntryImpl::EntryOperationComplete,
309 this, callback);
310 WorkerPool::PostTask(FROM_HERE,
311 base::Bind(&SimpleSynchronousEntry::WriteData,
312 base::Unretained(synchronous_entry_),
313 index, offset, buf,
314 buf_len, sync_operation_callback, truncate),
315 true);
257 } 316 }
258 317
259 void SimpleEntryImpl::CreationOperationComplete( 318 void SimpleEntryImpl::CreationOperationComplete(
260 Entry** out_entry, 319 Entry** out_entry,
261 const CompletionCallback& completion_callback, 320 const CompletionCallback& completion_callback,
262 SimpleSynchronousEntry* sync_entry) { 321 SimpleSynchronousEntry* sync_entry) {
263 DCHECK(io_thread_checker_.CalledOnValidThread()); 322 DCHECK(io_thread_checker_.CalledOnValidThread());
264 if (!sync_entry) { 323 if (!sync_entry) {
265 completion_callback.Run(net::ERR_FAILED); 324 completion_callback.Run(net::ERR_FAILED);
266 // If OpenEntry failed, we must remove it from our index. 325 // If OpenEntry failed, we must remove it from our index.
(...skipping 10 matching lines...) Expand all
277 index_->Insert(key_); 336 index_->Insert(key_);
278 *out_entry = this; 337 *out_entry = this;
279 completion_callback.Run(net::OK); 338 completion_callback.Run(net::OK);
280 } 339 }
281 340
282 void SimpleEntryImpl::EntryOperationComplete( 341 void SimpleEntryImpl::EntryOperationComplete(
283 const CompletionCallback& completion_callback, 342 const CompletionCallback& completion_callback,
284 int result) { 343 int result) {
285 DCHECK(io_thread_checker_.CalledOnValidThread()); 344 DCHECK(io_thread_checker_.CalledOnValidThread());
286 DCHECK(synchronous_entry_); 345 DCHECK(synchronous_entry_);
287 DCHECK(synchronous_entry_in_use_by_worker_); 346 DCHECK(operation_running_);
288 synchronous_entry_in_use_by_worker_ = false; 347
348 operation_running_ = false;
289 SetSynchronousData(); 349 SetSynchronousData();
290 if (result >= 0) { 350 if (result >= 0) {
291 index_->UpdateEntrySize(synchronous_entry_->key(), 351 index_->UpdateEntrySize(synchronous_entry_->key(),
292 synchronous_entry_->GetFileSize()); 352 synchronous_entry_->GetFileSize());
293 } else { 353 } else {
294 index_->Remove(synchronous_entry_->key()); 354 index_->Remove(synchronous_entry_->key());
295 } 355 }
296 completion_callback.Run(result); 356 completion_callback.Run(result);
357 RunNextOperationIfNeeded();
297 } 358 }
298 359
299 void SimpleEntryImpl::SetSynchronousData() { 360 void SimpleEntryImpl::SetSynchronousData() {
300 DCHECK(io_thread_checker_.CalledOnValidThread()); 361 DCHECK(io_thread_checker_.CalledOnValidThread());
301 DCHECK(!synchronous_entry_in_use_by_worker_); 362 DCHECK(!operation_running_);
302 // TODO(felipeg): These copies to avoid data races are not optimal. While 363 // TODO(felipeg): These copies to avoid data races are not optimal. While
303 // adding an IO thread index (for fast misses etc...), we can store this data 364 // adding an IO thread index (for fast misses etc...), we can store this data
304 // in that structure. This also solves problems with last_used() on ext4 365 // in that structure. This also solves problems with last_used() on ext4
305 // filesystems not being accurate. 366 // filesystems not being accurate.
306 last_used_ = synchronous_entry_->last_used(); 367 last_used_ = synchronous_entry_->last_used();
307 last_modified_ = synchronous_entry_->last_modified(); 368 last_modified_ = synchronous_entry_->last_modified();
308 for (int i = 0; i < kSimpleEntryFileCount; ++i) 369 for (int i = 0; i < kSimpleEntryFileCount; ++i)
309 data_size_[i] = synchronous_entry_->data_size(i); 370 data_size_[i] = synchronous_entry_->data_size(i);
310 } 371 }
311 372
312 } // namespace disk_cache 373 } // namespace disk_cache
OLDNEW
« no previous file with comments | « net/disk_cache/simple/simple_entry_impl.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698