| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/entry_impl.h" | 5 #include "net/disk_cache/entry_impl.h" |
| 6 | 6 |
| 7 #include "base/message_loop.h" | 7 #include "base/message_loop.h" |
| 8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
| 9 #include "base/string_util.h" | 9 #include "base/string_util.h" |
| 10 #include "net/base/io_buffer.h" | 10 #include "net/base/io_buffer.h" |
| (...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 285 | 285 |
| 286 DVLOG(3) << "Buffer grow to " << required; | 286 DVLOG(3) << "Buffer grow to " << required; |
| 287 | 287 |
| 288 buffer_.reserve(required); | 288 buffer_.reserve(required); |
| 289 return true; | 289 return true; |
| 290 } | 290 } |
| 291 | 291 |
| 292 // ------------------------------------------------------------------------ | 292 // ------------------------------------------------------------------------ |
| 293 | 293 |
| 294 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) | 294 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) |
| 295 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), backend_(backend), | 295 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), |
| 296 doomed_(false), read_only_(read_only), dirty_(false) { | 296 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only), |
| 297 dirty_(false) { |
| 297 entry_.LazyInit(backend->File(address), address); | 298 entry_.LazyInit(backend->File(address), address); |
| 298 for (int i = 0; i < kNumStreams; i++) { | 299 for (int i = 0; i < kNumStreams; i++) { |
| 299 unreported_size_[i] = 0; | 300 unreported_size_[i] = 0; |
| 300 } | 301 } |
| 301 } | 302 } |
| 302 | 303 |
| 303 void EntryImpl::DoomImpl() { | 304 void EntryImpl::DoomImpl() { |
| 304 if (doomed_) | 305 if (doomed_ || !backend_) |
| 305 return; | 306 return; |
| 306 | 307 |
| 307 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); | 308 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); |
| 308 backend_->InternalDoomEntry(this); | 309 backend_->InternalDoomEntry(this); |
| 309 } | 310 } |
| 310 | 311 |
| 311 int EntryImpl::ReadDataImpl( | 312 int EntryImpl::ReadDataImpl( |
| 312 int index, int offset, net::IOBuffer* buf, int buf_len, | 313 int index, int offset, net::IOBuffer* buf, int buf_len, |
| 313 const net::CompletionCallback& callback) { | 314 const net::CompletionCallback& callback) { |
| 314 if (net_log_.IsLoggingAllEvents()) { | 315 if (net_log_.IsLoggingAllEvents()) { |
| (...skipping 356 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 671 stored->data_size[i] = 0; | 672 stored->data_size[i] = 0; |
| 672 } | 673 } |
| 673 entry_.Store(); | 674 entry_.Store(); |
| 674 } | 675 } |
| 675 | 676 |
| 676 void EntryImpl::IncrementIoCount() { | 677 void EntryImpl::IncrementIoCount() { |
| 677 backend_->IncrementIoCount(); | 678 backend_->IncrementIoCount(); |
| 678 } | 679 } |
| 679 | 680 |
| 680 void EntryImpl::DecrementIoCount() { | 681 void EntryImpl::DecrementIoCount() { |
| 681 backend_->DecrementIoCount(); | 682 if (backend_) |
| 683 backend_->DecrementIoCount(); |
| 684 } |
| 685 |
| 686 void EntryImpl::OnEntryCreated(BackendImpl* backend) { |
| 687 // Just grab a reference to the backround queue. |
| 688 background_queue_ = backend->GetBackgroundQueue(); |
| 682 } | 689 } |
| 683 | 690 |
| 684 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { | 691 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { |
| 685 node_.Data()->last_used = last_used.ToInternalValue(); | 692 node_.Data()->last_used = last_used.ToInternalValue(); |
| 686 node_.Data()->last_modified = last_modified.ToInternalValue(); | 693 node_.Data()->last_modified = last_modified.ToInternalValue(); |
| 687 node_.set_modified(); | 694 node_.set_modified(); |
| 688 } | 695 } |
| 689 | 696 |
| 690 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { | 697 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { |
| 698 if (!backend_) |
| 699 return; |
| 700 |
| 691 int group = backend_->GetSizeGroup(); | 701 int group = backend_->GetSizeGroup(); |
| 692 switch (op) { | 702 switch (op) { |
| 693 case kRead: | 703 case kRead: |
| 694 CACHE_UMA(AGE_MS, "ReadTime", group, start); | 704 CACHE_UMA(AGE_MS, "ReadTime", group, start); |
| 695 break; | 705 break; |
| 696 case kWrite: | 706 case kWrite: |
| 697 CACHE_UMA(AGE_MS, "WriteTime", group, start); | 707 CACHE_UMA(AGE_MS, "WriteTime", group, start); |
| 698 break; | 708 break; |
| 699 case kSparseRead: | 709 case kSparseRead: |
| 700 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); | 710 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 737 | 747 |
| 738 if (key_size < key1_len || key_size > kMaxInternalKeyLength) | 748 if (key_size < key1_len || key_size > kMaxInternalKeyLength) |
| 739 return 1; | 749 return 1; |
| 740 | 750 |
| 741 return ((key_size - key1_len) / 256 + 2); | 751 return ((key_size - key1_len) / 256 + 2); |
| 742 } | 752 } |
| 743 | 753 |
| 744 // ------------------------------------------------------------------------ | 754 // ------------------------------------------------------------------------ |
| 745 | 755 |
| 746 void EntryImpl::Doom() { | 756 void EntryImpl::Doom() { |
| 747 backend_->background_queue()->DoomEntryImpl(this); | 757 if (background_queue_) |
| 758 background_queue_->DoomEntryImpl(this); |
| 748 } | 759 } |
| 749 | 760 |
| 750 void EntryImpl::Close() { | 761 void EntryImpl::Close() { |
| 751 backend_->background_queue()->CloseEntryImpl(this); | 762 if (background_queue_) |
| 763 background_queue_->CloseEntryImpl(this); |
| 752 } | 764 } |
| 753 | 765 |
| 754 std::string EntryImpl::GetKey() const { | 766 std::string EntryImpl::GetKey() const { |
| 755 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 767 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
| 756 int key_len = entry->Data()->key_len; | 768 int key_len = entry->Data()->key_len; |
| 757 if (key_len <= kMaxInternalKeyLength) | 769 if (key_len <= kMaxInternalKeyLength) |
| 758 return std::string(entry->Data()->key); | 770 return std::string(entry->Data()->key); |
| 759 | 771 |
| 760 // We keep a copy of the key so that we can always return it, even if the | 772 // We keep a copy of the key so that we can always return it, even if the |
| 761 // backend is disabled. | 773 // backend is disabled. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 810 if (index < 0 || index >= kNumStreams) | 822 if (index < 0 || index >= kNumStreams) |
| 811 return net::ERR_INVALID_ARGUMENT; | 823 return net::ERR_INVALID_ARGUMENT; |
| 812 | 824 |
| 813 int entry_size = entry_.Data()->data_size[index]; | 825 int entry_size = entry_.Data()->data_size[index]; |
| 814 if (offset >= entry_size || offset < 0 || !buf_len) | 826 if (offset >= entry_size || offset < 0 || !buf_len) |
| 815 return 0; | 827 return 0; |
| 816 | 828 |
| 817 if (buf_len < 0) | 829 if (buf_len < 0) |
| 818 return net::ERR_INVALID_ARGUMENT; | 830 return net::ERR_INVALID_ARGUMENT; |
| 819 | 831 |
| 820 backend_->background_queue()->ReadData(this, index, offset, buf, buf_len, | 832 if (!background_queue_) |
| 821 callback); | 833 return net::ERR_UNEXPECTED; |
| 834 |
| 835 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); |
| 822 return net::ERR_IO_PENDING; | 836 return net::ERR_IO_PENDING; |
| 823 } | 837 } |
| 824 | 838 |
| 825 int EntryImpl::WriteData( | 839 int EntryImpl::WriteData( |
| 826 int index, int offset, net::IOBuffer* buf, int buf_len, | 840 int index, int offset, net::IOBuffer* buf, int buf_len, |
| 827 const net::CompletionCallback& callback, bool truncate) { | 841 const net::CompletionCallback& callback, bool truncate) { |
| 828 if (callback.is_null()) | 842 if (callback.is_null()) |
| 829 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); | 843 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); |
| 830 | 844 |
| 831 DCHECK(node_.Data()->dirty || read_only_); | 845 DCHECK(node_.Data()->dirty || read_only_); |
| 832 if (index < 0 || index >= kNumStreams) | 846 if (index < 0 || index >= kNumStreams) |
| 833 return net::ERR_INVALID_ARGUMENT; | 847 return net::ERR_INVALID_ARGUMENT; |
| 834 | 848 |
| 835 if (offset < 0 || buf_len < 0) | 849 if (offset < 0 || buf_len < 0) |
| 836 return net::ERR_INVALID_ARGUMENT; | 850 return net::ERR_INVALID_ARGUMENT; |
| 837 | 851 |
| 838 backend_->background_queue()->WriteData(this, index, offset, buf, buf_len, | 852 if (!background_queue_) |
| 839 truncate, callback); | 853 return net::ERR_UNEXPECTED; |
| 854 |
| 855 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, |
| 856 callback); |
| 840 return net::ERR_IO_PENDING; | 857 return net::ERR_IO_PENDING; |
| 841 } | 858 } |
| 842 | 859 |
| 843 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 860 int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len, |
| 844 const net::CompletionCallback& callback) { | 861 const net::CompletionCallback& callback) { |
| 845 if (callback.is_null()) | 862 if (callback.is_null()) |
| 846 return ReadSparseDataImpl(offset, buf, buf_len, callback); | 863 return ReadSparseDataImpl(offset, buf, buf_len, callback); |
| 847 | 864 |
| 848 backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len, | 865 if (!background_queue_) |
| 849 callback); | 866 return net::ERR_UNEXPECTED; |
| 867 |
| 868 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); |
| 850 return net::ERR_IO_PENDING; | 869 return net::ERR_IO_PENDING; |
| 851 } | 870 } |
| 852 | 871 |
| 853 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, | 872 int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len, |
| 854 const net::CompletionCallback& callback) { | 873 const net::CompletionCallback& callback) { |
| 855 if (callback.is_null()) | 874 if (callback.is_null()) |
| 856 return WriteSparseDataImpl(offset, buf, buf_len, callback); | 875 return WriteSparseDataImpl(offset, buf, buf_len, callback); |
| 857 | 876 |
| 858 backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len, | 877 if (!background_queue_) |
| 859 callback); | 878 return net::ERR_UNEXPECTED; |
| 879 |
| 880 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); |
| 860 return net::ERR_IO_PENDING; | 881 return net::ERR_IO_PENDING; |
| 861 } | 882 } |
| 862 | 883 |
| 863 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | 884 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, |
| 864 const net::CompletionCallback& callback) { | 885 const net::CompletionCallback& callback) { |
| 865 backend_->background_queue()->GetAvailableRange(this, offset, len, start, | 886 if (!background_queue_) |
| 866 callback); | 887 return net::ERR_UNEXPECTED; |
| 888 |
| 889 background_queue_->GetAvailableRange(this, offset, len, start, callback); |
| 867 return net::ERR_IO_PENDING; | 890 return net::ERR_IO_PENDING; |
| 868 } | 891 } |
| 869 | 892 |
| 870 bool EntryImpl::CouldBeSparse() const { | 893 bool EntryImpl::CouldBeSparse() const { |
| 871 if (sparse_.get()) | 894 if (sparse_.get()) |
| 872 return true; | 895 return true; |
| 873 | 896 |
| 874 scoped_ptr<SparseControl> sparse; | 897 scoped_ptr<SparseControl> sparse; |
| 875 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); | 898 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); |
| 876 return sparse->CouldBeSparse(); | 899 return sparse->CouldBeSparse(); |
| 877 } | 900 } |
| 878 | 901 |
| 879 void EntryImpl::CancelSparseIO() { | 902 void EntryImpl::CancelSparseIO() { |
| 880 backend_->background_queue()->CancelSparseIO(this); | 903 if (background_queue_) |
| 904 background_queue_->CancelSparseIO(this); |
| 881 } | 905 } |
| 882 | 906 |
| 883 int EntryImpl::ReadyForSparseIO(const net::CompletionCallback& callback) { | 907 int EntryImpl::ReadyForSparseIO(const net::CompletionCallback& callback) { |
| 884 if (!sparse_.get()) | 908 if (!sparse_.get()) |
| 885 return net::OK; | 909 return net::OK; |
| 886 | 910 |
| 887 backend_->background_queue()->ReadyForSparseIO(this, callback); | 911 if (!background_queue_) |
| 912 return net::ERR_UNEXPECTED; |
| 913 |
| 914 background_queue_->ReadyForSparseIO(this, callback); |
| 888 return net::ERR_IO_PENDING; | 915 return net::ERR_IO_PENDING; |
| 889 } | 916 } |
| 890 | 917 |
| 891 // When an entry is deleted from the cache, we clean up all the data associated | 918 // When an entry is deleted from the cache, we clean up all the data associated |
| 892 // with it for two reasons: to simplify the reuse of the block (we know that any | 919 // with it for two reasons: to simplify the reuse of the block (we know that any |
| 893 // unused block is filled with zeros), and to simplify the handling of write / | 920 // unused block is filled with zeros), and to simplify the handling of write / |
| 894 // read partial information from an entry (don't have to worry about returning | 921 // read partial information from an entry (don't have to worry about returning |
| 895 // data related to a previous cache entry because the range was not fully | 922 // data related to a previous cache entry because the range was not fully |
| 896 // written before). | 923 // written before). |
| 897 EntryImpl::~EntryImpl() { | 924 EntryImpl::~EntryImpl() { |
| 925 if (!backend_) { |
| 926 entry_.clear_modified(); |
| 927 node_.clear_modified(); |
| 928 return; |
| 929 } |
| 898 Log("~EntryImpl in"); | 930 Log("~EntryImpl in"); |
| 899 | 931 |
| 900 // Save the sparse info to disk. This will generate IO for this entry and | 932 // Save the sparse info to disk. This will generate IO for this entry and |
| 901 // maybe for a child entry, so it is important to do it before deleting this | 933 // maybe for a child entry, so it is important to do it before deleting this |
| 902 // entry. | 934 // entry. |
| 903 sparse_.reset(); | 935 sparse_.reset(); |
| 904 | 936 |
| 905 // Remove this entry from the list of open entries. | 937 // Remove this entry from the list of open entries. |
| 906 backend_->OnEntryDestroyBegin(entry_.address()); | 938 backend_->OnEntryDestroyBegin(entry_.address()); |
| 907 | 939 |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 951 if (index < 0 || index >= kNumStreams) | 983 if (index < 0 || index >= kNumStreams) |
| 952 return net::ERR_INVALID_ARGUMENT; | 984 return net::ERR_INVALID_ARGUMENT; |
| 953 | 985 |
| 954 int entry_size = entry_.Data()->data_size[index]; | 986 int entry_size = entry_.Data()->data_size[index]; |
| 955 if (offset >= entry_size || offset < 0 || !buf_len) | 987 if (offset >= entry_size || offset < 0 || !buf_len) |
| 956 return 0; | 988 return 0; |
| 957 | 989 |
| 958 if (buf_len < 0) | 990 if (buf_len < 0) |
| 959 return net::ERR_INVALID_ARGUMENT; | 991 return net::ERR_INVALID_ARGUMENT; |
| 960 | 992 |
| 993 if (!backend_) |
| 994 return net::ERR_UNEXPECTED; |
| 995 |
| 961 TimeTicks start = TimeTicks::Now(); | 996 TimeTicks start = TimeTicks::Now(); |
| 962 | 997 |
| 963 if (offset + buf_len > entry_size) | 998 if (offset + buf_len > entry_size) |
| 964 buf_len = entry_size - offset; | 999 buf_len = entry_size - offset; |
| 965 | 1000 |
| 966 UpdateRank(false); | 1001 UpdateRank(false); |
| 967 | 1002 |
| 968 backend_->OnEvent(Stats::READ_DATA); | 1003 backend_->OnEvent(Stats::READ_DATA); |
| 969 backend_->OnRead(buf_len); | 1004 backend_->OnRead(buf_len); |
| 970 | 1005 |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1028 int index, int offset, net::IOBuffer* buf, int buf_len, | 1063 int index, int offset, net::IOBuffer* buf, int buf_len, |
| 1029 const net::CompletionCallback& callback, bool truncate) { | 1064 const net::CompletionCallback& callback, bool truncate) { |
| 1030 DCHECK(node_.Data()->dirty || read_only_); | 1065 DCHECK(node_.Data()->dirty || read_only_); |
| 1031 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; | 1066 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; |
| 1032 if (index < 0 || index >= kNumStreams) | 1067 if (index < 0 || index >= kNumStreams) |
| 1033 return net::ERR_INVALID_ARGUMENT; | 1068 return net::ERR_INVALID_ARGUMENT; |
| 1034 | 1069 |
| 1035 if (offset < 0 || buf_len < 0) | 1070 if (offset < 0 || buf_len < 0) |
| 1036 return net::ERR_INVALID_ARGUMENT; | 1071 return net::ERR_INVALID_ARGUMENT; |
| 1037 | 1072 |
| 1073 if (!backend_) |
| 1074 return net::ERR_UNEXPECTED; |
| 1075 |
| 1038 int max_file_size = backend_->MaxFileSize(); | 1076 int max_file_size = backend_->MaxFileSize(); |
| 1039 | 1077 |
| 1040 // offset or buf_len could be negative numbers. | 1078 // offset or buf_len could be negative numbers. |
| 1041 if (offset > max_file_size || buf_len > max_file_size || | 1079 if (offset > max_file_size || buf_len > max_file_size || |
| 1042 offset + buf_len > max_file_size) { | 1080 offset + buf_len > max_file_size) { |
| 1043 int size = offset + buf_len; | 1081 int size = offset + buf_len; |
| 1044 if (size <= max_file_size) | 1082 if (size <= max_file_size) |
| 1045 size = kint32max; | 1083 size = kint32max; |
| 1046 backend_->TooMuchStorageRequested(size); | 1084 backend_->TooMuchStorageRequested(size); |
| 1047 return net::ERR_FAILED; | 1085 return net::ERR_FAILED; |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1133 if (!CreateBlock(size, &address)) | 1171 if (!CreateBlock(size, &address)) |
| 1134 return false; | 1172 return false; |
| 1135 | 1173 |
| 1136 entry_.Data()->data_addr[index] = address.value(); | 1174 entry_.Data()->data_addr[index] = address.value(); |
| 1137 entry_.Store(); | 1175 entry_.Store(); |
| 1138 return true; | 1176 return true; |
| 1139 } | 1177 } |
| 1140 | 1178 |
| 1141 bool EntryImpl::CreateBlock(int size, Addr* address) { | 1179 bool EntryImpl::CreateBlock(int size, Addr* address) { |
| 1142 DCHECK(!address->is_initialized()); | 1180 DCHECK(!address->is_initialized()); |
| 1181 if (!backend_) |
| 1182 return false; |
| 1143 | 1183 |
| 1144 FileType file_type = Addr::RequiredFileType(size); | 1184 FileType file_type = Addr::RequiredFileType(size); |
| 1145 if (EXTERNAL == file_type) { | 1185 if (EXTERNAL == file_type) { |
| 1146 if (size > backend_->MaxFileSize()) | 1186 if (size > backend_->MaxFileSize()) |
| 1147 return false; | 1187 return false; |
| 1148 if (!backend_->CreateExternalFile(address)) | 1188 if (!backend_->CreateExternalFile(address)) |
| 1149 return false; | 1189 return false; |
| 1150 } else { | 1190 } else { |
| 1151 int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) / | 1191 int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) / |
| 1152 Addr::BlockSizeForFileType(file_type); | 1192 Addr::BlockSizeForFileType(file_type); |
| 1153 | 1193 |
| 1154 if (!backend_->CreateBlock(file_type, num_blocks, address)) | 1194 if (!backend_->CreateBlock(file_type, num_blocks, address)) |
| 1155 return false; | 1195 return false; |
| 1156 } | 1196 } |
| 1157 return true; | 1197 return true; |
| 1158 } | 1198 } |
| 1159 | 1199 |
| 1160 // Note that this method may end up modifying a block file so upon return the | 1200 // Note that this method may end up modifying a block file so upon return the |
| 1161 // involved block will be free, and could be reused for something else. If there | 1201 // involved block will be free, and could be reused for something else. If there |
| 1162 // is a crash after that point (and maybe before returning to the caller), the | 1202 // is a crash after that point (and maybe before returning to the caller), the |
| 1163 // entry will be left dirty... and at some point it will be discarded; it is | 1203 // entry will be left dirty... and at some point it will be discarded; it is |
| 1164 // important that the entry doesn't keep a reference to this address, or we'll | 1204 // important that the entry doesn't keep a reference to this address, or we'll |
| 1165 // end up deleting the contents of |address| once again. | 1205 // end up deleting the contents of |address| once again. |
| 1166 void EntryImpl::DeleteData(Addr address, int index) { | 1206 void EntryImpl::DeleteData(Addr address, int index) { |
| 1207 DCHECK(backend_); |
| 1167 if (!address.is_initialized()) | 1208 if (!address.is_initialized()) |
| 1168 return; | 1209 return; |
| 1169 if (address.is_separate_file()) { | 1210 if (address.is_separate_file()) { |
| 1170 int failure = !DeleteCacheFile(backend_->GetFileName(address)); | 1211 int failure = !DeleteCacheFile(backend_->GetFileName(address)); |
| 1171 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); | 1212 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); |
| 1172 if (failure) { | 1213 if (failure) { |
| 1173 LOG(ERROR) << "Failed to delete " << | 1214 LOG(ERROR) << "Failed to delete " << |
| 1174 backend_->GetFileName(address).value() << " from the cache."; | 1215 backend_->GetFileName(address).value() << " from the cache."; |
| 1175 } | 1216 } |
| 1176 if (files_[index]) | 1217 if (files_[index]) |
| 1177 files_[index] = NULL; // Releases the object. | 1218 files_[index] = NULL; // Releases the object. |
| 1178 } else { | 1219 } else { |
| 1179 backend_->DeleteBlock(address, true); | 1220 backend_->DeleteBlock(address, true); |
| 1180 } | 1221 } |
| 1181 } | 1222 } |
| 1182 | 1223 |
| 1183 void EntryImpl::UpdateRank(bool modified) { | 1224 void EntryImpl::UpdateRank(bool modified) { |
| 1225 if (!backend_) |
| 1226 return; |
| 1227 |
| 1184 if (!doomed_) { | 1228 if (!doomed_) { |
| 1185 // Everything is handled by the backend. | 1229 // Everything is handled by the backend. |
| 1186 backend_->UpdateRank(this, modified); | 1230 backend_->UpdateRank(this, modified); |
| 1187 return; | 1231 return; |
| 1188 } | 1232 } |
| 1189 | 1233 |
| 1190 Time current = Time::Now(); | 1234 Time current = Time::Now(); |
| 1191 node_.Data()->last_used = current.ToInternalValue(); | 1235 node_.Data()->last_used = current.ToInternalValue(); |
| 1192 | 1236 |
| 1193 if (modified) | 1237 if (modified) |
| 1194 node_.Data()->last_modified = current.ToInternalValue(); | 1238 node_.Data()->last_modified = current.ToInternalValue(); |
| 1195 } | 1239 } |
| 1196 | 1240 |
| 1197 File* EntryImpl::GetBackingFile(Addr address, int index) { | 1241 File* EntryImpl::GetBackingFile(Addr address, int index) { |
| 1242 if (!backend_) |
| 1243 return NULL; |
| 1244 |
| 1198 File* file; | 1245 File* file; |
| 1199 if (address.is_separate_file()) | 1246 if (address.is_separate_file()) |
| 1200 file = GetExternalFile(address, index); | 1247 file = GetExternalFile(address, index); |
| 1201 else | 1248 else |
| 1202 file = backend_->File(address); | 1249 file = backend_->File(address); |
| 1203 return file; | 1250 return file; |
| 1204 } | 1251 } |
| 1205 | 1252 |
| 1206 File* EntryImpl::GetExternalFile(Addr address, int index) { | 1253 File* EntryImpl::GetExternalFile(Addr address, int index) { |
| 1207 DCHECK(index >= 0 && index <= kKeyFileIndex); | 1254 DCHECK(index >= 0 && index <= kKeyFileIndex); |
| (...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1459 void EntryImpl::SetEntryFlags(uint32 flags) { | 1506 void EntryImpl::SetEntryFlags(uint32 flags) { |
| 1460 entry_.Data()->flags |= flags; | 1507 entry_.Data()->flags |= flags; |
| 1461 entry_.set_modified(); | 1508 entry_.set_modified(); |
| 1462 } | 1509 } |
| 1463 | 1510 |
| 1464 uint32 EntryImpl::GetEntryFlags() { | 1511 uint32 EntryImpl::GetEntryFlags() { |
| 1465 return entry_.Data()->flags; | 1512 return entry_.Data()->flags; |
| 1466 } | 1513 } |
| 1467 | 1514 |
| 1468 void EntryImpl::GetData(int index, char** buffer, Addr* address) { | 1515 void EntryImpl::GetData(int index, char** buffer, Addr* address) { |
| 1516 DCHECK(backend_); |
| 1469 if (user_buffers_[index].get() && user_buffers_[index]->Size() && | 1517 if (user_buffers_[index].get() && user_buffers_[index]->Size() && |
| 1470 !user_buffers_[index]->Start()) { | 1518 !user_buffers_[index]->Start()) { |
| 1471 // The data is already in memory, just copy it and we're done. | 1519 // The data is already in memory, just copy it and we're done. |
| 1472 int data_len = entry_.Data()->data_size[index]; | 1520 int data_len = entry_.Data()->data_size[index]; |
| 1473 if (data_len <= user_buffers_[index]->Size()) { | 1521 if (data_len <= user_buffers_[index]->Size()) { |
| 1474 DCHECK(!user_buffers_[index]->Start()); | 1522 DCHECK(!user_buffers_[index]->Start()); |
| 1475 *buffer = new char[data_len]; | 1523 *buffer = new char[data_len]; |
| 1476 memcpy(*buffer, user_buffers_[index]->Data(), data_len); | 1524 memcpy(*buffer, user_buffers_[index]->Data(), data_len); |
| 1477 return; | 1525 return; |
| 1478 } | 1526 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1500 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | 1548 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), |
| 1501 entry_.address().value(), node_.address().value()); | 1549 entry_.address().value(), node_.address().value()); |
| 1502 | 1550 |
| 1503 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | 1551 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], |
| 1504 entry_.Data()->data_addr[1], entry_.Data()->long_key); | 1552 entry_.Data()->data_addr[1], entry_.Data()->long_key); |
| 1505 | 1553 |
| 1506 Trace(" doomed: %d 0x%x", doomed_, dirty); | 1554 Trace(" doomed: %d 0x%x", doomed_, dirty); |
| 1507 } | 1555 } |
| 1508 | 1556 |
| 1509 } // namespace disk_cache | 1557 } // namespace disk_cache |
| OLD | NEW |