Index: net/disk_cache/v3/entry_impl_v3.cc |
=================================================================== |
--- net/disk_cache/v3/entry_impl_v3.cc (revision 0) |
+++ net/disk_cache/v3/entry_impl_v3.cc (working copy) |
@@ -23,60 +23,6 @@ |
namespace { |
-// Index for the file used to store the key, if any (files_[kKeyFileIndex]). |
-const int kKeyFileIndex = 3; |
- |
-// This class implements FileIOCallback to buffer the callback from a file IO |
-// operation from the actual net class. |
-class SyncCallback: public disk_cache::FileIOCallback { |
- public: |
- // |end_event_type| is the event type to log on completion. Logs nothing on |
- // discard, or when the NetLog is not set to log all events. |
- SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer, |
- const net::CompletionCallback& callback, |
- net::NetLog::EventType end_event_type) |
- : entry_(entry), callback_(callback), buf_(buffer), |
- start_(TimeTicks::Now()), end_event_type_(end_event_type) { |
- entry->AddRef(); |
- entry->IncrementIoCount(); |
- } |
- virtual ~SyncCallback() {} |
- |
- virtual void OnFileIOComplete(int bytes_copied) OVERRIDE; |
- void Discard(); |
- |
- private: |
- disk_cache::EntryImpl* entry_; |
- net::CompletionCallback callback_; |
- scoped_refptr<net::IOBuffer> buf_; |
- TimeTicks start_; |
- const net::NetLog::EventType end_event_type_; |
- |
- DISALLOW_COPY_AND_ASSIGN(SyncCallback); |
-}; |
- |
-void SyncCallback::OnFileIOComplete(int bytes_copied) { |
- entry_->DecrementIoCount(); |
- if (!callback_.is_null()) { |
- if (entry_->net_log().IsLoggingAllEvents()) { |
- entry_->net_log().EndEvent( |
- end_event_type_, |
- disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied)); |
- } |
- entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_); |
- buf_ = NULL; // Release the buffer before invoking the callback. |
- callback_.Run(bytes_copied); |
- } |
- entry_->Release(); |
- delete this; |
-} |
- |
-void SyncCallback::Discard() { |
- callback_.Reset(); |
- buf_ = NULL; |
- OnFileIOComplete(0); |
-} |
- |
const int kMaxBufferSize = 1024 * 1024; // 1 MB. |
} // namespace |
@@ -95,7 +41,7 @@ |
buffer_.reserve(kMaxBlockSize); |
} |
~UserBuffer() { |
- if (backend_.get()) |
+ if (backend_) |
backend_->BufferDeleted(capacity() - kMaxBlockSize); |
} |
@@ -252,7 +198,7 @@ |
void EntryImpl::UserBuffer::Reset() { |
if (!grow_allowed_) { |
- if (backend_.get()) |
+ if (backend_) |
backend_->BufferDeleted(capacity() - kMaxBlockSize); |
grow_allowed_ = true; |
std::vector<char> tmp; |
@@ -272,7 +218,7 @@ |
if (required > limit) |
return false; |
- if (!backend_.get()) |
+ if (!backend_) |
return false; |
int to_add = std::max(required - current_size, kMaxBlockSize * 4); |
@@ -301,104 +247,6 @@ |
} |
} |
-void EntryImpl::DoomImpl() { |
- if (doomed_ || !backend_.get()) |
- return; |
- |
- SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); |
- backend_->InternalDoomEntry(this); |
-} |
- |
-int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, |
- const CompletionCallback& callback) { |
- if (net_log_.IsLoggingAllEvents()) { |
- net_log_.BeginEvent( |
- net::NetLog::TYPE_ENTRY_READ_DATA, |
- CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); |
- } |
- |
- int result = InternalReadData(index, offset, buf, buf_len, callback); |
- |
- if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { |
- net_log_.EndEvent( |
- net::NetLog::TYPE_ENTRY_READ_DATA, |
- CreateNetLogReadWriteCompleteCallback(result)); |
- } |
- return result; |
-} |
- |
-int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, |
- const CompletionCallback& callback, |
- bool truncate) { |
- if (net_log_.IsLoggingAllEvents()) { |
- net_log_.BeginEvent( |
- net::NetLog::TYPE_ENTRY_WRITE_DATA, |
- CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); |
- } |
- |
- int result = InternalWriteData(index, offset, buf, buf_len, callback, |
- truncate); |
- |
- if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { |
- net_log_.EndEvent( |
- net::NetLog::TYPE_ENTRY_WRITE_DATA, |
- CreateNetLogReadWriteCompleteCallback(result)); |
- } |
- return result; |
-} |
- |
-int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, |
- const CompletionCallback& callback) { |
- DCHECK(node_.Data()->dirty || read_only_); |
- int result = InitSparseData(); |
- if (net::OK != result) |
- return result; |
- |
- TimeTicks start = TimeTicks::Now(); |
- result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, |
- callback); |
- ReportIOTime(kSparseRead, start); |
- return result; |
-} |
- |
-int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, |
- const CompletionCallback& callback) { |
- DCHECK(node_.Data()->dirty || read_only_); |
- int result = InitSparseData(); |
- if (net::OK != result) |
- return result; |
- |
- TimeTicks start = TimeTicks::Now(); |
- result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, |
- buf_len, callback); |
- ReportIOTime(kSparseWrite, start); |
- return result; |
-} |
- |
-int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { |
- int result = InitSparseData(); |
- if (net::OK != result) |
- return result; |
- |
- return sparse_->GetAvailableRange(offset, len, start); |
-} |
- |
-void EntryImpl::CancelSparseIOImpl() { |
- if (!sparse_.get()) |
- return; |
- |
- sparse_->CancelIO(); |
-} |
- |
-int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { |
- DCHECK(sparse_.get()); |
- return sparse_->ReadyToUse(callback); |
-} |
- |
-uint32 EntryImpl::GetHash() { |
- return entry_.Data()->hash; |
-} |
- |
bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, |
uint32 hash) { |
Trace("Create entry In"); |
@@ -446,6 +294,10 @@ |
return true; |
} |
+uint32 EntryImpl::GetHash() { |
+ return entry_.Data()->hash; |
+} |
+ |
bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { |
if (entry_.Data()->hash != hash || |
static_cast<size_t>(entry_.Data()->key_len) != key.size()) |
@@ -464,103 +316,6 @@ |
doomed_ = true; |
} |
-void EntryImpl::DeleteEntryData(bool everything) { |
- DCHECK(doomed_ || !everything); |
- |
- if (GetEntryFlags() & PARENT_ENTRY) { |
- // We have some child entries that must go away. |
- SparseControl::DeleteChildren(this); |
- } |
- |
- if (GetDataSize(0)) |
- CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); |
- if (GetDataSize(1)) |
- CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); |
- for (int index = 0; index < kNumStreams; index++) { |
- Addr address(entry_.Data()->data_addr[index]); |
- if (address.is_initialized()) { |
- backend_->ModifyStorageSize(entry_.Data()->data_size[index] - |
- unreported_size_[index], 0); |
- entry_.Data()->data_addr[index] = 0; |
- entry_.Data()->data_size[index] = 0; |
- entry_.Store(); |
- DeleteData(address, index); |
- } |
- } |
- |
- if (!everything) |
- return; |
- |
- // Remove all traces of this entry. |
- backend_->RemoveEntry(this); |
- |
- // Note that at this point node_ and entry_ are just two blocks of data, and |
- // even if they reference each other, nobody should be referencing them. |
- |
- Addr address(entry_.Data()->long_key); |
- DeleteData(address, kKeyFileIndex); |
- backend_->ModifyStorageSize(entry_.Data()->key_len, 0); |
- |
- backend_->DeleteBlock(entry_.address(), true); |
- entry_.Discard(); |
- |
- if (!LeaveRankingsBehind()) { |
- backend_->DeleteBlock(node_.address(), true); |
- node_.Discard(); |
- } |
-} |
- |
-CacheAddr EntryImpl::GetNextAddress() { |
- return entry_.Data()->next; |
-} |
- |
-void EntryImpl::SetNextAddress(Addr address) { |
- DCHECK_NE(address.value(), entry_.address().value()); |
- entry_.Data()->next = address.value(); |
- bool success = entry_.Store(); |
- DCHECK(success); |
-} |
- |
-bool EntryImpl::LoadNodeAddress() { |
- Addr address(entry_.Data()->rankings_node); |
- if (!node_.LazyInit(backend_->File(address), address)) |
- return false; |
- return node_.Load(); |
-} |
- |
-bool EntryImpl::Update() { |
- DCHECK(node_.HasData()); |
- |
- if (read_only_) |
- return true; |
- |
- RankingsNode* rankings = node_.Data(); |
- if (!rankings->dirty) { |
- rankings->dirty = backend_->GetCurrentEntryId(); |
- if (!node_.Store()) |
- return false; |
- } |
- return true; |
-} |
- |
-void EntryImpl::SetDirtyFlag(int32 current_id) { |
- DCHECK(node_.HasData()); |
- if (node_.Data()->dirty && current_id != node_.Data()->dirty) |
- dirty_ = true; |
- |
- if (!current_id) |
- dirty_ = true; |
-} |
- |
-void EntryImpl::SetPointerForInvalidEntry(int32 new_id) { |
- node_.Data()->dirty = new_id; |
- node_.Store(); |
-} |
- |
-bool EntryImpl::LeaveRankingsBehind() { |
- return !node_.Data()->contents; |
-} |
- |
// This only includes checks that relate to the first block of the entry (the |
// first 256 bytes), and values that should be set from the entry creation. |
// Basically, even if there is something wrong with this entry, we want to see |
@@ -667,57 +422,12 @@ |
entry_.Store(); |
} |
-void EntryImpl::IncrementIoCount() { |
- backend_->IncrementIoCount(); |
-} |
- |
-void EntryImpl::DecrementIoCount() { |
- if (backend_.get()) |
- backend_->DecrementIoCount(); |
-} |
- |
-void EntryImpl::OnEntryCreated(BackendImpl* backend) { |
- // Just grab a reference to the backround queue. |
- background_queue_ = backend->GetBackgroundQueue(); |
-} |
- |
void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { |
node_.Data()->last_used = last_used.ToInternalValue(); |
node_.Data()->last_modified = last_modified.ToInternalValue(); |
node_.set_modified(); |
} |
-void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { |
- if (!backend_.get()) |
- return; |
- |
- switch (op) { |
- case kRead: |
- CACHE_UMA(AGE_MS, "ReadTime", 0, start); |
- break; |
- case kWrite: |
- CACHE_UMA(AGE_MS, "WriteTime", 0, start); |
- break; |
- case kSparseRead: |
- CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); |
- break; |
- case kSparseWrite: |
- CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start); |
- break; |
- case kAsyncIO: |
- CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start); |
- break; |
- case kReadAsync1: |
- CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start); |
- break; |
- case kWriteAsync1: |
- CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start); |
- break; |
- default: |
- NOTREACHED(); |
- } |
-} |
- |
void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { |
DCHECK(!net_log_.net_log()); |
net_log_ = net::BoundNetLog::Make( |
@@ -731,27 +441,23 @@ |
return net_log_; |
} |
-// static |
-int EntryImpl::NumBlocksForEntry(int key_size) { |
- // The longest key that can be stored using one block. |
- int key1_len = |
- static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key)); |
- |
- if (key_size < key1_len || key_size > kMaxInternalKeyLength) |
- return 1; |
- |
- return ((key_size - key1_len) / 256 + 2); |
-} |
- |
// ------------------------------------------------------------------------ |
void EntryImpl::Doom() { |
- if (background_queue_.get()) |
+ if (background_queue_) |
background_queue_->DoomEntryImpl(this); |
} |
+void EntryImpl::DoomImpl() { |
+ if (doomed_ || !backend_) |
+ return; |
+ |
+ SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); |
+ backend_->InternalDoomEntry(this); |
+} |
+ |
void EntryImpl::Close() { |
- if (background_queue_.get()) |
+ if (background_queue_) |
background_queue_->CloseEntryImpl(this); |
} |
@@ -821,13 +527,31 @@ |
if (buf_len < 0) |
return net::ERR_INVALID_ARGUMENT; |
- if (!background_queue_.get()) |
+ if (!background_queue_) |
return net::ERR_UNEXPECTED; |
background_queue_->ReadData(this, index, offset, buf, buf_len, callback); |
return net::ERR_IO_PENDING; |
} |
+int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, |
+ const CompletionCallback& callback) { |
+ if (net_log_.IsLoggingAllEvents()) { |
+ net_log_.BeginEvent( |
+ net::NetLog::TYPE_ENTRY_READ_DATA, |
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); |
+ } |
+ |
+ int result = InternalReadData(index, offset, buf, buf_len, callback); |
+ |
+ if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { |
+ net_log_.EndEvent( |
+ net::NetLog::TYPE_ENTRY_READ_DATA, |
+ CreateNetLogReadWriteCompleteCallback(result)); |
+ } |
+ return result; |
+} |
+ |
int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, |
const CompletionCallback& callback, bool truncate) { |
if (callback.is_null()) |
@@ -840,7 +564,7 @@ |
if (offset < 0 || buf_len < 0) |
return net::ERR_INVALID_ARGUMENT; |
- if (!background_queue_.get()) |
+ if (!background_queue_) |
return net::ERR_UNEXPECTED; |
background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, |
@@ -848,39 +572,95 @@ |
return net::ERR_IO_PENDING; |
} |
+int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, |
+ const CompletionCallback& callback, |
+ bool truncate) { |
+ if (net_log_.IsLoggingAllEvents()) { |
+ net_log_.BeginEvent( |
+ net::NetLog::TYPE_ENTRY_WRITE_DATA, |
+ CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); |
+ } |
+ |
+ int result = InternalWriteData(index, offset, buf, buf_len, callback, |
+ truncate); |
+ |
+ if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { |
+ net_log_.EndEvent( |
+ net::NetLog::TYPE_ENTRY_WRITE_DATA, |
+ CreateNetLogReadWriteCompleteCallback(result)); |
+ } |
+ return result; |
+} |
+ |
int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, |
const CompletionCallback& callback) { |
if (callback.is_null()) |
return ReadSparseDataImpl(offset, buf, buf_len, callback); |
- if (!background_queue_.get()) |
+ if (!background_queue_) |
return net::ERR_UNEXPECTED; |
background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); |
return net::ERR_IO_PENDING; |
} |
+int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, |
+ const CompletionCallback& callback) { |
+ DCHECK(node_.Data()->dirty || read_only_); |
+ int result = InitSparseData(); |
+ if (net::OK != result) |
+ return result; |
+ |
+ TimeTicks start = TimeTicks::Now(); |
+ result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, |
+ callback); |
+ ReportIOTime(kSparseRead, start); |
+ return result; |
+} |
+ |
int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, |
const CompletionCallback& callback) { |
if (callback.is_null()) |
return WriteSparseDataImpl(offset, buf, buf_len, callback); |
- if (!background_queue_.get()) |
+ if (!background_queue_) |
return net::ERR_UNEXPECTED; |
background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); |
return net::ERR_IO_PENDING; |
} |
+int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, |
+ const CompletionCallback& callback) { |
+ DCHECK(node_.Data()->dirty || read_only_); |
+ int result = InitSparseData(); |
+ if (net::OK != result) |
+ return result; |
+ |
+ TimeTicks start = TimeTicks::Now(); |
+ result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, |
+ buf_len, callback); |
+ ReportIOTime(kSparseWrite, start); |
+ return result; |
+} |
+ |
int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, |
const CompletionCallback& callback) { |
- if (!background_queue_.get()) |
+ if (!background_queue_) |
return net::ERR_UNEXPECTED; |
background_queue_->GetAvailableRange(this, offset, len, start, callback); |
return net::ERR_IO_PENDING; |
} |
+int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { |
+ int result = InitSparseData(); |
+ if (net::OK != result) |
+ return result; |
+ |
+ return sparse_->GetAvailableRange(offset, len, start); |
+} |
+ |
bool EntryImpl::CouldBeSparse() const { |
if (sparse_.get()) |
return true; |
@@ -891,21 +671,35 @@ |
} |
void EntryImpl::CancelSparseIO() { |
- if (background_queue_.get()) |
+ if (background_queue_) |
background_queue_->CancelSparseIO(this); |
} |
+void EntryImpl::CancelSparseIOImpl() { |
+ if (!sparse_.get()) |
+ return; |
+ |
+ sparse_->CancelIO(); |
+} |
+ |
int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { |
if (!sparse_.get()) |
return net::OK; |
- if (!background_queue_.get()) |
+ if (!background_queue_) |
return net::ERR_UNEXPECTED; |
background_queue_->ReadyForSparseIO(this, callback); |
return net::ERR_IO_PENDING; |
} |
+int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { |
+ DCHECK(sparse_.get()); |
+ return sparse_->ReadyToUse(callback); |
+} |
+ |
+// ------------------------------------------------------------------------ |
+ |
// When an entry is deleted from the cache, we clean up all the data associated |
// with it for two reasons: to simplify the reuse of the block (we know that any |
// unused block is filled with zeros), and to simplify the handling of write / |
@@ -913,7 +707,7 @@ |
// data related to a previous cache entry because the range was not fully |
// written before). |
EntryImpl::~EntryImpl() { |
- if (!backend_.get()) { |
+ if (!backend_) { |
entry_.clear_modified(); |
node_.clear_modified(); |
return; |
@@ -964,8 +758,6 @@ |
backend_->OnEntryDestroyEnd(); |
} |
-// ------------------------------------------------------------------------ |
- |
int EntryImpl::InternalReadData(int index, int offset, |
IOBuffer* buf, int buf_len, |
const CompletionCallback& callback) { |
@@ -981,7 +773,7 @@ |
if (buf_len < 0) |
return net::ERR_INVALID_ARGUMENT; |
- if (!backend_.get()) |
+ if (!backend_) |
return net::ERR_UNEXPECTED; |
TimeTicks start = TimeTicks::Now(); |
@@ -1063,7 +855,7 @@ |
if (offset < 0 || buf_len < 0) |
return net::ERR_INVALID_ARGUMENT; |
- if (!backend_.get()) |
+ if (!backend_) |
return net::ERR_UNEXPECTED; |
int max_file_size = backend_->MaxFileSize(); |
@@ -1171,7 +963,7 @@ |
bool EntryImpl::CreateBlock(int size, Addr* address) { |
DCHECK(!address->is_initialized()); |
- if (!backend_.get()) |
+ if (!backend_) |
return false; |
FileType file_type = Addr::RequiredFileType(size); |
@@ -1196,7 +988,7 @@ |
// important that the entry doesn't keep a reference to this address, or we'll |
// end up deleting the contents of |address| once again. |
void EntryImpl::DeleteData(Addr address, int index) { |
- DCHECK(backend_.get()); |
+ DCHECK(backend_); |
if (!address.is_initialized()) |
return; |
if (address.is_separate_file()) { |
@@ -1206,7 +998,7 @@ |
LOG(ERROR) << "Failed to delete " << |
backend_->GetFileName(address).value() << " from the cache."; |
} |
- if (files_[index].get()) |
+ if (files_[index]) |
files_[index] = NULL; // Releases the object. |
} else { |
backend_->DeleteBlock(address, true); |
@@ -1214,7 +1006,7 @@ |
} |
void EntryImpl::UpdateRank(bool modified) { |
- if (!backend_.get()) |
+ if (!backend_) |
return; |
if (!doomed_) { |
@@ -1230,27 +1022,50 @@ |
node_.Data()->last_modified = current.ToInternalValue(); |
} |
-File* EntryImpl::GetBackingFile(Addr address, int index) { |
- if (!backend_.get()) |
- return NULL; |
+void EntryImpl::DeleteEntryData(bool everything) { |
+ DCHECK(doomed_ || !everything); |
- File* file; |
- if (address.is_separate_file()) |
- file = GetExternalFile(address, index); |
- else |
- file = backend_->File(address); |
- return file; |
-} |
+ if (GetEntryFlags() & PARENT_ENTRY) { |
+ // We have some child entries that must go away. |
+ SparseControl::DeleteChildren(this); |
+ } |
-File* EntryImpl::GetExternalFile(Addr address, int index) { |
- DCHECK(index >= 0 && index <= kKeyFileIndex); |
- if (!files_[index].get()) { |
- // For a key file, use mixed mode IO. |
- scoped_refptr<File> file(new File(kKeyFileIndex == index)); |
- if (file->Init(backend_->GetFileName(address))) |
- files_[index].swap(file); |
+ if (GetDataSize(0)) |
+ CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); |
+ if (GetDataSize(1)) |
+ CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); |
+ for (int index = 0; index < kNumStreams; index++) { |
+ Addr address(entry_.Data()->data_addr[index]); |
+ if (address.is_initialized()) { |
+ backend_->ModifyStorageSize(entry_.Data()->data_size[index] - |
+ unreported_size_[index], 0); |
+ entry_.Data()->data_addr[index] = 0; |
+ entry_.Data()->data_size[index] = 0; |
+ entry_.Store(); |
+ DeleteData(address, index); |
+ } |
} |
- return files_[index].get(); |
+ |
+ if (!everything) |
+ return; |
+ |
+ // Remove all traces of this entry. |
+ backend_->RemoveEntry(this); |
+ |
+ // Note that at this point node_ and entry_ are just two blocks of data, and |
+ // even if they reference each other, nobody should be referencing them. |
+ |
+ Addr address(entry_.Data()->long_key); |
+ DeleteData(address, kKeyFileIndex); |
+ backend_->ModifyStorageSize(entry_.Data()->key_len, 0); |
+ |
+ backend_->DeleteBlock(entry_.address(), true); |
+ entry_.Discard(); |
+ |
+ if (!LeaveRankingsBehind()) { |
+ backend_->DeleteBlock(node_.address(), true); |
+ node_.Discard(); |
+ } |
} |
// We keep a memory buffer for everything that ends up stored on a block file |
@@ -1505,7 +1320,7 @@ |
} |
void EntryImpl::GetData(int index, char** buffer, Addr* address) { |
- DCHECK(backend_.get()); |
+ DCHECK(backend_); |
if (user_buffers_[index].get() && user_buffers_[index]->Size() && |
!user_buffers_[index]->Start()) { |
// The data is already in memory, just copy it and we're done. |
@@ -1531,6 +1346,37 @@ |
} |
} |
+void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { |
+ if (!backend_) |
+ return; |
+ |
+ switch (op) { |
+ case kRead: |
+ CACHE_UMA(AGE_MS, "ReadTime", 0, start); |
+ break; |
+ case kWrite: |
+ CACHE_UMA(AGE_MS, "WriteTime", 0, start); |
+ break; |
+ case kSparseRead: |
+ CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); |
+ break; |
+ case kSparseWrite: |
+ CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start); |
+ break; |
+ case kAsyncIO: |
+ CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start); |
+ break; |
+ case kReadAsync1: |
+ CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start); |
+ break; |
+ case kWriteAsync1: |
+ CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start); |
+ break; |
+ default: |
+ NOTREACHED(); |
+ } |
+} |
+ |
void EntryImpl::Log(const char* msg) { |
int dirty = 0; |
if (node_.HasData()) { |