Index: net/disk_cache/simple/simple_entry_impl.cc |
diff --git a/net/disk_cache/simple/simple_entry_impl.cc b/net/disk_cache/simple/simple_entry_impl.cc |
index 1917a88715091ac1a37be9dae3b8fd104b6f4edf..580caf5d4c9a2c35a815c4e51de59f7b8481915b 100644 |
--- a/net/disk_cache/simple/simple_entry_impl.cc |
+++ b/net/disk_cache/simple/simple_entry_impl.cc |
@@ -88,6 +88,7 @@ int SimpleEntryImpl::DoomEntry(WeakPtr<SimpleIndex> index, |
void SimpleEntryImpl::Doom() { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
+ DCHECK(!operation_running_); |
#if defined(OS_POSIX) |
// This call to static SimpleEntryImpl::DoomEntry() will just erase the |
// underlying files. On POSIX, this is fine; the files are still open on the |
@@ -101,12 +102,22 @@ void SimpleEntryImpl::Doom() { |
void SimpleEntryImpl::Close() { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
- if (!synchronous_entry_in_use_by_worker_) { |
- WorkerPool::PostTask(FROM_HERE, |
- base::Bind(&SimpleSynchronousEntry::Close, |
- base::Unretained(synchronous_entry_)), |
- true); |
+ if (operation_running_) { |
+ // Postpone close operation. |
+ // Push the close operation to the end of the line. This way we run all |
+ // operations before we are able close. |
+ pending_operations_.push( |
+ base::Bind(&SimpleEntryImpl::Close, |
+ weak_ptr_factory_.GetWeakPtr())); |
+ return; |
pasko-google - do not use
2013/04/17 15:14:46
since it is not a trivial if->return case, would b
felipeg
2013/04/17 15:53:57
Done.
|
} |
+ DCHECK(pending_operations_.size() == 0); |
+ DCHECK(!operation_running_); |
+ |
+ WorkerPool::PostTask(FROM_HERE, |
+ base::Bind(&SimpleSynchronousEntry::Close, |
+ base::Unretained(synchronous_entry_)), |
+ true); |
// Entry::Close() is expected to release this entry. See disk_cache.h for |
// details. |
delete this; |
@@ -138,27 +149,21 @@ int SimpleEntryImpl::ReadData(int index, |
int buf_len, |
const CompletionCallback& callback) { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
- // TODO(gavinp): Add support for overlapping reads. The net::HttpCache does |
- // make overlapping read requests when multiple transactions access the same |
- // entry as read only. This might make calling SimpleSynchronousEntry::Close() |
- // correctly more tricky (see SimpleEntryImpl::EntryOperationComplete). |
- if (synchronous_entry_in_use_by_worker_) { |
- NOTIMPLEMENTED(); |
- CHECK(false); |
- } |
- synchronous_entry_in_use_by_worker_ = true; |
- if (index_) |
- index_->UseIfExists(key_); |
- SynchronousOperationCallback sync_operation_callback = |
- base::Bind(&SimpleEntryImpl::EntryOperationComplete, |
- index_, callback, weak_ptr_factory_.GetWeakPtr(), |
- synchronous_entry_); |
- WorkerPool::PostTask(FROM_HERE, |
- base::Bind(&SimpleSynchronousEntry::ReadData, |
- base::Unretained(synchronous_entry_), |
- index, offset, make_scoped_refptr(buf), |
- buf_len, sync_operation_callback), |
- true); |
+ if (index < 0 || index >= kSimpleEntryFileCount || buf_len < 0) |
pasko-google - do not use
2013/04/17 15:14:46
was this requested by tests?
Otherwise seems like
felipeg
2013/04/17 15:53:57
Done.
|
+ return net::ERR_INVALID_ARGUMENT; |
+ if (offset >= data_size_[index] || offset < 0 || !buf_len) |
+ return 0; |
+ // TODO(felipeg): Optimization: Add support for truly parallel read |
+ // operations. |
+ pending_operations_.push( |
+ base::Bind(&SimpleEntryImpl::ReadDataInternal, |
+ weak_ptr_factory_.GetWeakPtr(), |
+ index, |
+ offset, |
+ make_scoped_refptr(buf), |
+ buf_len, |
+ callback)); |
+ RunNextOperationIfNeeded(); |
return net::ERR_IO_PENDING; |
} |
@@ -169,23 +174,22 @@ int SimpleEntryImpl::WriteData(int index, |
const CompletionCallback& callback, |
bool truncate) { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
- if (synchronous_entry_in_use_by_worker_) { |
- NOTIMPLEMENTED(); |
- CHECK(false); |
- } |
- synchronous_entry_in_use_by_worker_ = true; |
- if (index_) |
- index_->UseIfExists(key_); |
- SynchronousOperationCallback sync_operation_callback = |
- base::Bind(&SimpleEntryImpl::EntryOperationComplete, |
- index_, callback, weak_ptr_factory_.GetWeakPtr(), |
- synchronous_entry_); |
- WorkerPool::PostTask(FROM_HERE, |
- base::Bind(&SimpleSynchronousEntry::WriteData, |
- base::Unretained(synchronous_entry_), |
- index, offset, make_scoped_refptr(buf), |
- buf_len, sync_operation_callback, truncate), |
- true); |
+ if (index < 0 || index >= kSimpleEntryFileCount || offset < 0 || buf_len < 0) |
+ return net::ERR_INVALID_ARGUMENT; |
+ |
+ pending_operations_.push( |
+ base::Bind(&SimpleEntryImpl::WriteDataInternal, |
+ weak_ptr_factory_.GetWeakPtr(), |
+ index, |
+ offset, |
+ make_scoped_refptr(buf), |
+ buf_len, |
+ callback, |
+ truncate)); |
+ RunNextOperationIfNeeded(); |
+ |
+ // TODO(felipeg): Optimization: Add support for optimistic writes, quickly |
+ // returning net::OK here. |
return net::ERR_IO_PENDING; |
} |
@@ -245,7 +249,7 @@ SimpleEntryImpl::SimpleEntryImpl( |
path_(synchronous_entry->path()), |
key_(synchronous_entry->key()), |
synchronous_entry_(synchronous_entry), |
- synchronous_entry_in_use_by_worker_(false), |
+ operation_running_(false), |
index_(index) { |
DCHECK(synchronous_entry); |
SetSynchronousData(); |
@@ -255,6 +259,66 @@ SimpleEntryImpl::~SimpleEntryImpl() { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
} |
+bool SimpleEntryImpl::RunNextOperationIfNeeded() { |
+ DCHECK(io_thread_checker_.CalledOnValidThread()); |
+ if (pending_operations_.size() <= 0 || operation_running_) |
+ return false; |
+ base::Closure operation = pending_operations_.front(); |
+ pending_operations_.pop(); |
+ operation.Run(); |
+ return true; |
+} |
+ |
+void SimpleEntryImpl::ReadDataInternal(int index, |
+ int offset, |
+ scoped_refptr<net::IOBuffer> buf, |
+ int buf_len, |
+ const CompletionCallback& callback) { |
+ DCHECK(io_thread_checker_.CalledOnValidThread()); |
+ DCHECK(!operation_running_); |
+ operation_running_ = true; |
+ if (index_) |
+ index_->UseIfExists(key_); |
+ SynchronousOperationCallback sync_operation_callback = |
+ base::Bind(&SimpleEntryImpl::EntryOperationComplete, |
+ index_, callback, weak_ptr_factory_.GetWeakPtr(), |
+ synchronous_entry_); |
+ WorkerPool::PostTask(FROM_HERE, |
+ base::Bind(&SimpleSynchronousEntry::ReadData, |
+ base::Unretained(synchronous_entry_), |
+ index, offset, buf, |
+ buf_len, sync_operation_callback), |
+ true); |
+} |
+ |
+void SimpleEntryImpl::WriteDataInternal(int index, |
+ int offset, |
+ scoped_refptr<net::IOBuffer> buf, |
+ int buf_len, |
+ const CompletionCallback& callback, |
+ bool truncate) { |
+ DCHECK(io_thread_checker_.CalledOnValidThread()); |
+ DCHECK(!operation_running_); |
+ operation_running_ = true; |
+ if (index_) |
+ index_->UseIfExists(key_); |
+ |
+ last_used_ = base::Time::Now(); |
+ last_modified_ = base::Time::Now(); |
+ data_size_[index] = buf_len; |
+ |
+ SynchronousOperationCallback sync_operation_callback = |
+ base::Bind(&SimpleEntryImpl::EntryOperationComplete, |
+ index_, callback, weak_ptr_factory_.GetWeakPtr(), |
+ synchronous_entry_); |
+ WorkerPool::PostTask(FROM_HERE, |
+ base::Bind(&SimpleSynchronousEntry::WriteData, |
+ base::Unretained(synchronous_entry_), |
+ index, offset, buf, |
+ buf_len, sync_operation_callback, truncate), |
+ true); |
+} |
+ |
// static |
void SimpleEntryImpl::CreationOperationComplete( |
WeakPtr<SimpleIndex> index, |
@@ -291,9 +355,10 @@ void SimpleEntryImpl::EntryOperationComplete( |
} |
if (entry) { |
- DCHECK(entry->synchronous_entry_in_use_by_worker_); |
- entry->synchronous_entry_in_use_by_worker_ = false; |
+ DCHECK(entry->operation_running_); |
+ entry->operation_running_ = false; |
entry->SetSynchronousData(); |
+ entry->RunNextOperationIfNeeded(); |
} else { |
// |entry| must have had Close() called while this operation was in flight. |
// Since the simple cache now only supports one pending entry operation in |
@@ -308,7 +373,7 @@ void SimpleEntryImpl::EntryOperationComplete( |
void SimpleEntryImpl::SetSynchronousData() { |
DCHECK(io_thread_checker_.CalledOnValidThread()); |
- DCHECK(!synchronous_entry_in_use_by_worker_); |
+ DCHECK(!operation_running_); |
// TODO(felipeg): These copies to avoid data races are not optimal. While |
// adding an IO thread index (for fast misses etc...), we can store this data |
// in that structure. This also solves problems with last_used() on ext4 |