Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(328)

Side by Side Diff: net/disk_cache/v3/entry_impl_v3.cc

Issue 14991008: Disk cache: Add base files for implementation of file format version 3. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Rebase attempt 2 Created 7 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/entry_impl.h" 5 #include "net/disk_cache/entry_impl.h"
6 6
7 #include "base/hash.h" 7 #include "base/hash.h"
8 #include "base/message_loop.h" 8 #include "base/message_loop.h"
9 #include "base/metrics/histogram.h" 9 #include "base/metrics/histogram.h"
10 #include "base/string_util.h" 10 #include "base/string_util.h"
11 #include "net/base/io_buffer.h" 11 #include "net/base/io_buffer.h"
12 #include "net/base/net_errors.h" 12 #include "net/base/net_errors.h"
13 #include "net/disk_cache/backend_impl.h" 13 #include "net/disk_cache/backend_impl.h"
14 #include "net/disk_cache/bitmap.h" 14 #include "net/disk_cache/bitmap.h"
15 #include "net/disk_cache/cache_util.h" 15 #include "net/disk_cache/cache_util.h"
16 #include "net/disk_cache/histogram_macros.h" 16 #include "net/disk_cache/histogram_macros.h"
17 #include "net/disk_cache/net_log_parameters.h" 17 #include "net/disk_cache/net_log_parameters.h"
18 #include "net/disk_cache/sparse_control.h" 18 #include "net/disk_cache/sparse_control.h"
19 19
20 using base::Time; 20 using base::Time;
21 using base::TimeDelta; 21 using base::TimeDelta;
22 using base::TimeTicks; 22 using base::TimeTicks;
23 23
24 namespace { 24 namespace {
25 25
26 // Index for the file used to store the key, if any (files_[kKeyFileIndex]).
27 const int kKeyFileIndex = 3;
28
29 // This class implements FileIOCallback to buffer the callback from a file IO
30 // operation from the actual net class.
31 class SyncCallback: public disk_cache::FileIOCallback {
32 public:
33 // |end_event_type| is the event type to log on completion. Logs nothing on
34 // discard, or when the NetLog is not set to log all events.
35 SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer,
36 const net::CompletionCallback& callback,
37 net::NetLog::EventType end_event_type)
38 : entry_(entry), callback_(callback), buf_(buffer),
39 start_(TimeTicks::Now()), end_event_type_(end_event_type) {
40 entry->AddRef();
41 entry->IncrementIoCount();
42 }
43 virtual ~SyncCallback() {}
44
45 virtual void OnFileIOComplete(int bytes_copied) OVERRIDE;
46 void Discard();
47
48 private:
49 disk_cache::EntryImpl* entry_;
50 net::CompletionCallback callback_;
51 scoped_refptr<net::IOBuffer> buf_;
52 TimeTicks start_;
53 const net::NetLog::EventType end_event_type_;
54
55 DISALLOW_COPY_AND_ASSIGN(SyncCallback);
56 };
57
58 void SyncCallback::OnFileIOComplete(int bytes_copied) {
59 entry_->DecrementIoCount();
60 if (!callback_.is_null()) {
61 if (entry_->net_log().IsLoggingAllEvents()) {
62 entry_->net_log().EndEvent(
63 end_event_type_,
64 disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied));
65 }
66 entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_);
67 buf_ = NULL; // Release the buffer before invoking the callback.
68 callback_.Run(bytes_copied);
69 }
70 entry_->Release();
71 delete this;
72 }
73
74 void SyncCallback::Discard() {
75 callback_.Reset();
76 buf_ = NULL;
77 OnFileIOComplete(0);
78 }
79
80 const int kMaxBufferSize = 1024 * 1024; // 1 MB. 26 const int kMaxBufferSize = 1024 * 1024; // 1 MB.
81 27
82 } // namespace 28 } // namespace
83 29
84 namespace disk_cache { 30 namespace disk_cache {
85 31
86 // This class handles individual memory buffers that store data before it is 32 // This class handles individual memory buffers that store data before it is
87 // sent to disk. The buffer can start at any offset, but if we try to write to 33 // sent to disk. The buffer can start at any offset, but if we try to write to
88 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to 34 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
89 // zero. The buffer grows up to a size determined by the backend, to keep the 35 // zero. The buffer grows up to a size determined by the backend, to keep the
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
294 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) 240 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
295 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), 241 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)),
296 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only), 242 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only),
297 dirty_(false) { 243 dirty_(false) {
298 entry_.LazyInit(backend->File(address), address); 244 entry_.LazyInit(backend->File(address), address);
299 for (int i = 0; i < kNumStreams; i++) { 245 for (int i = 0; i < kNumStreams; i++) {
300 unreported_size_[i] = 0; 246 unreported_size_[i] = 0;
301 } 247 }
302 } 248 }
303 249
304 void EntryImpl::DoomImpl() {
305 if (doomed_ || !backend_)
306 return;
307
308 SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
309 backend_->InternalDoomEntry(this);
310 }
311
312 int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
313 const CompletionCallback& callback) {
314 if (net_log_.IsLoggingAllEvents()) {
315 net_log_.BeginEvent(
316 net::NetLog::TYPE_ENTRY_READ_DATA,
317 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false));
318 }
319
320 int result = InternalReadData(index, offset, buf, buf_len, callback);
321
322 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
323 net_log_.EndEvent(
324 net::NetLog::TYPE_ENTRY_READ_DATA,
325 CreateNetLogReadWriteCompleteCallback(result));
326 }
327 return result;
328 }
329
330 int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
331 const CompletionCallback& callback,
332 bool truncate) {
333 if (net_log_.IsLoggingAllEvents()) {
334 net_log_.BeginEvent(
335 net::NetLog::TYPE_ENTRY_WRITE_DATA,
336 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate));
337 }
338
339 int result = InternalWriteData(index, offset, buf, buf_len, callback,
340 truncate);
341
342 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
343 net_log_.EndEvent(
344 net::NetLog::TYPE_ENTRY_WRITE_DATA,
345 CreateNetLogReadWriteCompleteCallback(result));
346 }
347 return result;
348 }
349
350 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
351 const CompletionCallback& callback) {
352 DCHECK(node_.Data()->dirty || read_only_);
353 int result = InitSparseData();
354 if (net::OK != result)
355 return result;
356
357 TimeTicks start = TimeTicks::Now();
358 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
359 callback);
360 ReportIOTime(kSparseRead, start);
361 return result;
362 }
363
364 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
365 const CompletionCallback& callback) {
366 DCHECK(node_.Data()->dirty || read_only_);
367 int result = InitSparseData();
368 if (net::OK != result)
369 return result;
370
371 TimeTicks start = TimeTicks::Now();
372 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
373 buf_len, callback);
374 ReportIOTime(kSparseWrite, start);
375 return result;
376 }
377
378 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) {
379 int result = InitSparseData();
380 if (net::OK != result)
381 return result;
382
383 return sparse_->GetAvailableRange(offset, len, start);
384 }
385
386 void EntryImpl::CancelSparseIOImpl() {
387 if (!sparse_.get())
388 return;
389
390 sparse_->CancelIO();
391 }
392
393 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) {
394 DCHECK(sparse_.get());
395 return sparse_->ReadyToUse(callback);
396 }
397
398 uint32 EntryImpl::GetHash() {
399 return entry_.Data()->hash;
400 }
401
402 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, 250 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
403 uint32 hash) { 251 uint32 hash) {
404 Trace("Create entry In"); 252 Trace("Create entry In");
405 EntryStore* entry_store = entry_.Data(); 253 EntryStore* entry_store = entry_.Data();
406 RankingsNode* node = node_.Data(); 254 RankingsNode* node = node_.Data();
407 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); 255 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
408 memset(node, 0, sizeof(RankingsNode)); 256 memset(node, 0, sizeof(RankingsNode));
409 if (!node_.LazyInit(backend_->File(node_address), node_address)) 257 if (!node_.LazyInit(backend_->File(node_address), node_address))
410 return false; 258 return false;
411 259
(...skipping 27 matching lines...) Expand all
439 memcpy(entry_store->key, key.data(), key.size()); 287 memcpy(entry_store->key, key.data(), key.size());
440 entry_store->key[key.size()] = '\0'; 288 entry_store->key[key.size()] = '\0';
441 } 289 }
442 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); 290 backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
443 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size())); 291 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size()));
444 node->dirty = backend_->GetCurrentEntryId(); 292 node->dirty = backend_->GetCurrentEntryId();
445 Log("Create Entry "); 293 Log("Create Entry ");
446 return true; 294 return true;
447 } 295 }
448 296
297 uint32 EntryImpl::GetHash() {
298 return entry_.Data()->hash;
299 }
300
449 bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { 301 bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) {
450 if (entry_.Data()->hash != hash || 302 if (entry_.Data()->hash != hash ||
451 static_cast<size_t>(entry_.Data()->key_len) != key.size()) 303 static_cast<size_t>(entry_.Data()->key_len) != key.size())
452 return false; 304 return false;
453 305
454 return (key.compare(GetKey()) == 0); 306 return (key.compare(GetKey()) == 0);
455 } 307 }
456 308
457 void EntryImpl::InternalDoom() { 309 void EntryImpl::InternalDoom() {
458 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); 310 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
459 DCHECK(node_.HasData()); 311 DCHECK(node_.HasData());
460 if (!node_.Data()->dirty) { 312 if (!node_.Data()->dirty) {
461 node_.Data()->dirty = backend_->GetCurrentEntryId(); 313 node_.Data()->dirty = backend_->GetCurrentEntryId();
462 node_.Store(); 314 node_.Store();
463 } 315 }
464 doomed_ = true; 316 doomed_ = true;
465 } 317 }
466 318
467 void EntryImpl::DeleteEntryData(bool everything) {
468 DCHECK(doomed_ || !everything);
469
470 if (GetEntryFlags() & PARENT_ENTRY) {
471 // We have some child entries that must go away.
472 SparseControl::DeleteChildren(this);
473 }
474
475 if (GetDataSize(0))
476 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
477 if (GetDataSize(1))
478 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
479 for (int index = 0; index < kNumStreams; index++) {
480 Addr address(entry_.Data()->data_addr[index]);
481 if (address.is_initialized()) {
482 backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
483 unreported_size_[index], 0);
484 entry_.Data()->data_addr[index] = 0;
485 entry_.Data()->data_size[index] = 0;
486 entry_.Store();
487 DeleteData(address, index);
488 }
489 }
490
491 if (!everything)
492 return;
493
494 // Remove all traces of this entry.
495 backend_->RemoveEntry(this);
496
497 // Note that at this point node_ and entry_ are just two blocks of data, and
498 // even if they reference each other, nobody should be referencing them.
499
500 Addr address(entry_.Data()->long_key);
501 DeleteData(address, kKeyFileIndex);
502 backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
503
504 backend_->DeleteBlock(entry_.address(), true);
505 entry_.Discard();
506
507 if (!LeaveRankingsBehind()) {
508 backend_->DeleteBlock(node_.address(), true);
509 node_.Discard();
510 }
511 }
512
513 CacheAddr EntryImpl::GetNextAddress() {
514 return entry_.Data()->next;
515 }
516
517 void EntryImpl::SetNextAddress(Addr address) {
518 DCHECK_NE(address.value(), entry_.address().value());
519 entry_.Data()->next = address.value();
520 bool success = entry_.Store();
521 DCHECK(success);
522 }
523
524 bool EntryImpl::LoadNodeAddress() {
525 Addr address(entry_.Data()->rankings_node);
526 if (!node_.LazyInit(backend_->File(address), address))
527 return false;
528 return node_.Load();
529 }
530
531 bool EntryImpl::Update() {
532 DCHECK(node_.HasData());
533
534 if (read_only_)
535 return true;
536
537 RankingsNode* rankings = node_.Data();
538 if (!rankings->dirty) {
539 rankings->dirty = backend_->GetCurrentEntryId();
540 if (!node_.Store())
541 return false;
542 }
543 return true;
544 }
545
546 void EntryImpl::SetDirtyFlag(int32 current_id) {
547 DCHECK(node_.HasData());
548 if (node_.Data()->dirty && current_id != node_.Data()->dirty)
549 dirty_ = true;
550
551 if (!current_id)
552 dirty_ = true;
553 }
554
555 void EntryImpl::SetPointerForInvalidEntry(int32 new_id) {
556 node_.Data()->dirty = new_id;
557 node_.Store();
558 }
559
560 bool EntryImpl::LeaveRankingsBehind() {
561 return !node_.Data()->contents;
562 }
563
564 // This only includes checks that relate to the first block of the entry (the 319 // This only includes checks that relate to the first block of the entry (the
565 // first 256 bytes), and values that should be set from the entry creation. 320 // first 256 bytes), and values that should be set from the entry creation.
566 // Basically, even if there is something wrong with this entry, we want to see 321 // Basically, even if there is something wrong with this entry, we want to see
567 // if it is possible to load the rankings node and delete them together. 322 // if it is possible to load the rankings node and delete them together.
568 bool EntryImpl::SanityCheck() { 323 bool EntryImpl::SanityCheck() {
569 if (!entry_.VerifyHash()) 324 if (!entry_.VerifyHash())
570 return false; 325 return false;
571 326
572 EntryStore* stored = entry_.Data(); 327 EntryStore* stored = entry_.Data();
573 if (!stored->rankings_node || stored->key_len <= 0) 328 if (!stored->rankings_node || stored->key_len <= 0)
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
660 // In general, trust the stored size as it should be in sync with the 415 // In general, trust the stored size as it should be in sync with the
661 // total size tracked by the backend. 416 // total size tracked by the backend.
662 } 417 }
663 } 418 }
664 if (data_size < 0) 419 if (data_size < 0)
665 stored->data_size[i] = 0; 420 stored->data_size[i] = 0;
666 } 421 }
667 entry_.Store(); 422 entry_.Store();
668 } 423 }
669 424
670 void EntryImpl::IncrementIoCount() {
671 backend_->IncrementIoCount();
672 }
673
674 void EntryImpl::DecrementIoCount() {
675 if (backend_)
676 backend_->DecrementIoCount();
677 }
678
679 void EntryImpl::OnEntryCreated(BackendImpl* backend) {
680 // Just grab a reference to the backround queue.
681 background_queue_ = backend->GetBackgroundQueue();
682 }
683
684 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { 425 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
685 node_.Data()->last_used = last_used.ToInternalValue(); 426 node_.Data()->last_used = last_used.ToInternalValue();
686 node_.Data()->last_modified = last_modified.ToInternalValue(); 427 node_.Data()->last_modified = last_modified.ToInternalValue();
687 node_.set_modified(); 428 node_.set_modified();
688 } 429 }
689 430
690 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
691 if (!backend_)
692 return;
693
694 switch (op) {
695 case kRead:
696 CACHE_UMA(AGE_MS, "ReadTime", 0, start);
697 break;
698 case kWrite:
699 CACHE_UMA(AGE_MS, "WriteTime", 0, start);
700 break;
701 case kSparseRead:
702 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
703 break;
704 case kSparseWrite:
705 CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
706 break;
707 case kAsyncIO:
708 CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start);
709 break;
710 case kReadAsync1:
711 CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start);
712 break;
713 case kWriteAsync1:
714 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start);
715 break;
716 default:
717 NOTREACHED();
718 }
719 }
720
721 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { 431 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
722 DCHECK(!net_log_.net_log()); 432 DCHECK(!net_log_.net_log());
723 net_log_ = net::BoundNetLog::Make( 433 net_log_ = net::BoundNetLog::Make(
724 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); 434 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY);
725 net_log_.BeginEvent( 435 net_log_.BeginEvent(
726 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, 436 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL,
727 CreateNetLogEntryCreationCallback(this, created)); 437 CreateNetLogEntryCreationCallback(this, created));
728 } 438 }
729 439
730 const net::BoundNetLog& EntryImpl::net_log() const { 440 const net::BoundNetLog& EntryImpl::net_log() const {
731 return net_log_; 441 return net_log_;
732 } 442 }
733 443
734 // static
735 int EntryImpl::NumBlocksForEntry(int key_size) {
736 // The longest key that can be stored using one block.
737 int key1_len =
738 static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key));
739
740 if (key_size < key1_len || key_size > kMaxInternalKeyLength)
741 return 1;
742
743 return ((key_size - key1_len) / 256 + 2);
744 }
745
746 // ------------------------------------------------------------------------ 444 // ------------------------------------------------------------------------
747 445
748 void EntryImpl::Doom() { 446 void EntryImpl::Doom() {
749 if (background_queue_) 447 if (background_queue_)
750 background_queue_->DoomEntryImpl(this); 448 background_queue_->DoomEntryImpl(this);
751 } 449 }
752 450
451 void EntryImpl::DoomImpl() {
452 if (doomed_ || !backend_)
453 return;
454
455 SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
456 backend_->InternalDoomEntry(this);
457 }
458
753 void EntryImpl::Close() { 459 void EntryImpl::Close() {
754 if (background_queue_) 460 if (background_queue_)
755 background_queue_->CloseEntryImpl(this); 461 background_queue_->CloseEntryImpl(this);
756 } 462 }
757 463
758 std::string EntryImpl::GetKey() const { 464 std::string EntryImpl::GetKey() const {
759 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); 465 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
760 int key_len = entry->Data()->key_len; 466 int key_len = entry->Data()->key_len;
761 if (key_len <= kMaxInternalKeyLength) 467 if (key_len <= kMaxInternalKeyLength)
762 return std::string(entry->Data()->key); 468 return std::string(entry->Data()->key);
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
821 if (buf_len < 0) 527 if (buf_len < 0)
822 return net::ERR_INVALID_ARGUMENT; 528 return net::ERR_INVALID_ARGUMENT;
823 529
824 if (!background_queue_) 530 if (!background_queue_)
825 return net::ERR_UNEXPECTED; 531 return net::ERR_UNEXPECTED;
826 532
827 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); 533 background_queue_->ReadData(this, index, offset, buf, buf_len, callback);
828 return net::ERR_IO_PENDING; 534 return net::ERR_IO_PENDING;
829 } 535 }
830 536
537 int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
538 const CompletionCallback& callback) {
539 if (net_log_.IsLoggingAllEvents()) {
540 net_log_.BeginEvent(
541 net::NetLog::TYPE_ENTRY_READ_DATA,
542 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false));
543 }
544
545 int result = InternalReadData(index, offset, buf, buf_len, callback);
546
547 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
548 net_log_.EndEvent(
549 net::NetLog::TYPE_ENTRY_READ_DATA,
550 CreateNetLogReadWriteCompleteCallback(result));
551 }
552 return result;
553 }
554
831 int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, 555 int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
832 const CompletionCallback& callback, bool truncate) { 556 const CompletionCallback& callback, bool truncate) {
833 if (callback.is_null()) 557 if (callback.is_null())
834 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); 558 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
835 559
836 DCHECK(node_.Data()->dirty || read_only_); 560 DCHECK(node_.Data()->dirty || read_only_);
837 if (index < 0 || index >= kNumStreams) 561 if (index < 0 || index >= kNumStreams)
838 return net::ERR_INVALID_ARGUMENT; 562 return net::ERR_INVALID_ARGUMENT;
839 563
840 if (offset < 0 || buf_len < 0) 564 if (offset < 0 || buf_len < 0)
841 return net::ERR_INVALID_ARGUMENT; 565 return net::ERR_INVALID_ARGUMENT;
842 566
843 if (!background_queue_) 567 if (!background_queue_)
844 return net::ERR_UNEXPECTED; 568 return net::ERR_UNEXPECTED;
845 569
846 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, 570 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
847 callback); 571 callback);
848 return net::ERR_IO_PENDING; 572 return net::ERR_IO_PENDING;
849 } 573 }
850 574
575 int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
576 const CompletionCallback& callback,
577 bool truncate) {
578 if (net_log_.IsLoggingAllEvents()) {
579 net_log_.BeginEvent(
580 net::NetLog::TYPE_ENTRY_WRITE_DATA,
581 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate));
582 }
583
584 int result = InternalWriteData(index, offset, buf, buf_len, callback,
585 truncate);
586
587 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
588 net_log_.EndEvent(
589 net::NetLog::TYPE_ENTRY_WRITE_DATA,
590 CreateNetLogReadWriteCompleteCallback(result));
591 }
592 return result;
593 }
594
851 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, 595 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
852 const CompletionCallback& callback) { 596 const CompletionCallback& callback) {
853 if (callback.is_null()) 597 if (callback.is_null())
854 return ReadSparseDataImpl(offset, buf, buf_len, callback); 598 return ReadSparseDataImpl(offset, buf, buf_len, callback);
855 599
856 if (!background_queue_) 600 if (!background_queue_)
857 return net::ERR_UNEXPECTED; 601 return net::ERR_UNEXPECTED;
858 602
859 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); 603 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback);
860 return net::ERR_IO_PENDING; 604 return net::ERR_IO_PENDING;
861 } 605 }
862 606
607 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
608 const CompletionCallback& callback) {
609 DCHECK(node_.Data()->dirty || read_only_);
610 int result = InitSparseData();
611 if (net::OK != result)
612 return result;
613
614 TimeTicks start = TimeTicks::Now();
615 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
616 callback);
617 ReportIOTime(kSparseRead, start);
618 return result;
619 }
620
863 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, 621 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
864 const CompletionCallback& callback) { 622 const CompletionCallback& callback) {
865 if (callback.is_null()) 623 if (callback.is_null())
866 return WriteSparseDataImpl(offset, buf, buf_len, callback); 624 return WriteSparseDataImpl(offset, buf, buf_len, callback);
867 625
868 if (!background_queue_) 626 if (!background_queue_)
869 return net::ERR_UNEXPECTED; 627 return net::ERR_UNEXPECTED;
870 628
871 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); 629 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback);
872 return net::ERR_IO_PENDING; 630 return net::ERR_IO_PENDING;
873 } 631 }
874 632
633 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
634 const CompletionCallback& callback) {
635 DCHECK(node_.Data()->dirty || read_only_);
636 int result = InitSparseData();
637 if (net::OK != result)
638 return result;
639
640 TimeTicks start = TimeTicks::Now();
641 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
642 buf_len, callback);
643 ReportIOTime(kSparseWrite, start);
644 return result;
645 }
646
875 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, 647 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
876 const CompletionCallback& callback) { 648 const CompletionCallback& callback) {
877 if (!background_queue_) 649 if (!background_queue_)
878 return net::ERR_UNEXPECTED; 650 return net::ERR_UNEXPECTED;
879 651
880 background_queue_->GetAvailableRange(this, offset, len, start, callback); 652 background_queue_->GetAvailableRange(this, offset, len, start, callback);
881 return net::ERR_IO_PENDING; 653 return net::ERR_IO_PENDING;
882 } 654 }
883 655
656 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) {
657 int result = InitSparseData();
658 if (net::OK != result)
659 return result;
660
661 return sparse_->GetAvailableRange(offset, len, start);
662 }
663
884 bool EntryImpl::CouldBeSparse() const { 664 bool EntryImpl::CouldBeSparse() const {
885 if (sparse_.get()) 665 if (sparse_.get())
886 return true; 666 return true;
887 667
888 scoped_ptr<SparseControl> sparse; 668 scoped_ptr<SparseControl> sparse;
889 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); 669 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
890 return sparse->CouldBeSparse(); 670 return sparse->CouldBeSparse();
891 } 671 }
892 672
893 void EntryImpl::CancelSparseIO() { 673 void EntryImpl::CancelSparseIO() {
894 if (background_queue_) 674 if (background_queue_)
895 background_queue_->CancelSparseIO(this); 675 background_queue_->CancelSparseIO(this);
896 } 676 }
897 677
678 void EntryImpl::CancelSparseIOImpl() {
679 if (!sparse_.get())
680 return;
681
682 sparse_->CancelIO();
683 }
684
898 int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { 685 int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
899 if (!sparse_.get()) 686 if (!sparse_.get())
900 return net::OK; 687 return net::OK;
901 688
902 if (!background_queue_) 689 if (!background_queue_)
903 return net::ERR_UNEXPECTED; 690 return net::ERR_UNEXPECTED;
904 691
905 background_queue_->ReadyForSparseIO(this, callback); 692 background_queue_->ReadyForSparseIO(this, callback);
906 return net::ERR_IO_PENDING; 693 return net::ERR_IO_PENDING;
907 } 694 }
908 695
696 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) {
697 DCHECK(sparse_.get());
698 return sparse_->ReadyToUse(callback);
699 }
700
701 // ------------------------------------------------------------------------
702
909 // When an entry is deleted from the cache, we clean up all the data associated 703 // When an entry is deleted from the cache, we clean up all the data associated
910 // with it for two reasons: to simplify the reuse of the block (we know that any 704 // with it for two reasons: to simplify the reuse of the block (we know that any
911 // unused block is filled with zeros), and to simplify the handling of write / 705 // unused block is filled with zeros), and to simplify the handling of write /
912 // read partial information from an entry (don't have to worry about returning 706 // read partial information from an entry (don't have to worry about returning
913 // data related to a previous cache entry because the range was not fully 707 // data related to a previous cache entry because the range was not fully
914 // written before). 708 // written before).
915 EntryImpl::~EntryImpl() { 709 EntryImpl::~EntryImpl() {
916 if (!backend_) { 710 if (!backend_) {
917 entry_.clear_modified(); 711 entry_.clear_modified();
918 node_.clear_modified(); 712 node_.clear_modified();
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
957 node_.Data()->dirty = 0; 751 node_.Data()->dirty = 0;
958 node_.Store(); 752 node_.Store();
959 } 753 }
960 } 754 }
961 755
962 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); 756 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
963 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL); 757 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL);
964 backend_->OnEntryDestroyEnd(); 758 backend_->OnEntryDestroyEnd();
965 } 759 }
966 760
967 // ------------------------------------------------------------------------
968
969 int EntryImpl::InternalReadData(int index, int offset, 761 int EntryImpl::InternalReadData(int index, int offset,
970 IOBuffer* buf, int buf_len, 762 IOBuffer* buf, int buf_len,
971 const CompletionCallback& callback) { 763 const CompletionCallback& callback) {
972 DCHECK(node_.Data()->dirty || read_only_); 764 DCHECK(node_.Data()->dirty || read_only_);
973 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; 765 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
974 if (index < 0 || index >= kNumStreams) 766 if (index < 0 || index >= kNumStreams)
975 return net::ERR_INVALID_ARGUMENT; 767 return net::ERR_INVALID_ARGUMENT;
976 768
977 int entry_size = entry_.Data()->data_size[index]; 769 int entry_size = entry_.Data()->data_size[index];
978 if (offset >= entry_size || offset < 0 || !buf_len) 770 if (offset >= entry_size || offset < 0 || !buf_len)
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after
1223 return; 1015 return;
1224 } 1016 }
1225 1017
1226 Time current = Time::Now(); 1018 Time current = Time::Now();
1227 node_.Data()->last_used = current.ToInternalValue(); 1019 node_.Data()->last_used = current.ToInternalValue();
1228 1020
1229 if (modified) 1021 if (modified)
1230 node_.Data()->last_modified = current.ToInternalValue(); 1022 node_.Data()->last_modified = current.ToInternalValue();
1231 } 1023 }
1232 1024
1233 File* EntryImpl::GetBackingFile(Addr address, int index) { 1025 void EntryImpl::DeleteEntryData(bool everything) {
1234 if (!backend_) 1026 DCHECK(doomed_ || !everything);
1235 return NULL;
1236 1027
1237 File* file; 1028 if (GetEntryFlags() & PARENT_ENTRY) {
1238 if (address.is_separate_file()) 1029 // We have some child entries that must go away.
1239 file = GetExternalFile(address, index); 1030 SparseControl::DeleteChildren(this);
1240 else 1031 }
1241 file = backend_->File(address);
1242 return file;
1243 }
1244 1032
1245 File* EntryImpl::GetExternalFile(Addr address, int index) { 1033 if (GetDataSize(0))
1246 DCHECK(index >= 0 && index <= kKeyFileIndex); 1034 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
1247 if (!files_[index].get()) { 1035 if (GetDataSize(1))
1248 // For a key file, use mixed mode IO. 1036 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
1249 scoped_refptr<File> file(new File(kKeyFileIndex == index)); 1037 for (int index = 0; index < kNumStreams; index++) {
1250 if (file->Init(backend_->GetFileName(address))) 1038 Addr address(entry_.Data()->data_addr[index]);
1251 files_[index].swap(file); 1039 if (address.is_initialized()) {
1040 backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1041 unreported_size_[index], 0);
1042 entry_.Data()->data_addr[index] = 0;
1043 entry_.Data()->data_size[index] = 0;
1044 entry_.Store();
1045 DeleteData(address, index);
1046 }
1252 } 1047 }
1253 return files_[index].get(); 1048
1049 if (!everything)
1050 return;
1051
1052 // Remove all traces of this entry.
1053 backend_->RemoveEntry(this);
1054
1055 // Note that at this point node_ and entry_ are just two blocks of data, and
1056 // even if they reference each other, nobody should be referencing them.
1057
1058 Addr address(entry_.Data()->long_key);
1059 DeleteData(address, kKeyFileIndex);
1060 backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
1061
1062 backend_->DeleteBlock(entry_.address(), true);
1063 entry_.Discard();
1064
1065 if (!LeaveRankingsBehind()) {
1066 backend_->DeleteBlock(node_.address(), true);
1067 node_.Discard();
1068 }
1254 } 1069 }
1255 1070
1256 // We keep a memory buffer for everything that ends up stored on a block file 1071 // We keep a memory buffer for everything that ends up stored on a block file
1257 // (because we don't know yet the final data size), and for some of the data 1072 // (because we don't know yet the final data size), and for some of the data
1258 // that end up on external files. This function will initialize that memory 1073 // that end up on external files. This function will initialize that memory
1259 // buffer and / or the files needed to store the data. 1074 // buffer and / or the files needed to store the data.
1260 // 1075 //
1261 // In general, a buffer may overlap data already stored on disk, and in that 1076 // In general, a buffer may overlap data already stored on disk, and in that
1262 // case, the contents of the buffer are the most accurate. It may also extend 1077 // case, the contents of the buffer are the most accurate. It may also extend
1263 // the file, but we don't want to read from disk just to keep the buffer up to 1078 // the file, but we don't want to read from disk just to keep the buffer up to
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
1524 address->set_value(entry_.Data()->data_addr[index]); 1339 address->set_value(entry_.Data()->data_addr[index]);
1525 if (address->is_initialized()) { 1340 if (address->is_initialized()) {
1526 // Prevent us from deleting the block from the backing store. 1341 // Prevent us from deleting the block from the backing store.
1527 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - 1342 backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1528 unreported_size_[index], 0); 1343 unreported_size_[index], 0);
1529 entry_.Data()->data_addr[index] = 0; 1344 entry_.Data()->data_addr[index] = 0;
1530 entry_.Data()->data_size[index] = 0; 1345 entry_.Data()->data_size[index] = 0;
1531 } 1346 }
1532 } 1347 }
1533 1348
1349 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
1350 if (!backend_)
1351 return;
1352
1353 switch (op) {
1354 case kRead:
1355 CACHE_UMA(AGE_MS, "ReadTime", 0, start);
1356 break;
1357 case kWrite:
1358 CACHE_UMA(AGE_MS, "WriteTime", 0, start);
1359 break;
1360 case kSparseRead:
1361 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
1362 break;
1363 case kSparseWrite:
1364 CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
1365 break;
1366 case kAsyncIO:
1367 CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start);
1368 break;
1369 case kReadAsync1:
1370 CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start);
1371 break;
1372 case kWriteAsync1:
1373 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start);
1374 break;
1375 default:
1376 NOTREACHED();
1377 }
1378 }
1379
1534 void EntryImpl::Log(const char* msg) { 1380 void EntryImpl::Log(const char* msg) {
1535 int dirty = 0; 1381 int dirty = 0;
1536 if (node_.HasData()) { 1382 if (node_.HasData()) {
1537 dirty = node_.Data()->dirty; 1383 dirty = node_.Data()->dirty;
1538 } 1384 }
1539 1385
1540 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), 1386 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
1541 entry_.address().value(), node_.address().value()); 1387 entry_.address().value(), node_.address().value());
1542 1388
1543 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], 1389 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
1544 entry_.Data()->data_addr[1], entry_.Data()->long_key); 1390 entry_.Data()->data_addr[1], entry_.Data()->long_key);
1545 1391
1546 Trace(" doomed: %d 0x%x", doomed_, dirty); 1392 Trace(" doomed: %d 0x%x", doomed_, dirty);
1547 } 1393 }
1548 1394
1549 } // namespace disk_cache 1395 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698