OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/disk_cache/entry_impl.h" | 5 #include "net/disk_cache/entry_impl.h" |
6 | 6 |
7 #include "base/hash.h" | 7 #include "base/hash.h" |
8 #include "base/message_loop.h" | 8 #include "base/message_loop.h" |
9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
10 #include "base/string_util.h" | 10 #include "base/string_util.h" |
11 #include "net/base/io_buffer.h" | 11 #include "net/base/io_buffer.h" |
12 #include "net/base/net_errors.h" | 12 #include "net/base/net_errors.h" |
13 #include "net/disk_cache/backend_impl.h" | 13 #include "net/disk_cache/backend_impl.h" |
14 #include "net/disk_cache/bitmap.h" | 14 #include "net/disk_cache/bitmap.h" |
15 #include "net/disk_cache/cache_util.h" | 15 #include "net/disk_cache/cache_util.h" |
16 #include "net/disk_cache/histogram_macros.h" | 16 #include "net/disk_cache/histogram_macros.h" |
17 #include "net/disk_cache/net_log_parameters.h" | 17 #include "net/disk_cache/net_log_parameters.h" |
18 #include "net/disk_cache/sparse_control.h" | 18 #include "net/disk_cache/sparse_control.h" |
19 | 19 |
20 using base::Time; | 20 using base::Time; |
21 using base::TimeDelta; | 21 using base::TimeDelta; |
22 using base::TimeTicks; | 22 using base::TimeTicks; |
23 | 23 |
24 namespace { | 24 namespace { |
25 | 25 |
26 // Index for the file used to store the key, if any (files_[kKeyFileIndex]). | |
27 const int kKeyFileIndex = 3; | |
28 | |
29 // This class implements FileIOCallback to buffer the callback from a file IO | |
30 // operation from the actual net class. | |
31 class SyncCallback: public disk_cache::FileIOCallback { | |
32 public: | |
33 // |end_event_type| is the event type to log on completion. Logs nothing on | |
34 // discard, or when the NetLog is not set to log all events. | |
35 SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer, | |
36 const net::CompletionCallback& callback, | |
37 net::NetLog::EventType end_event_type) | |
38 : entry_(entry), callback_(callback), buf_(buffer), | |
39 start_(TimeTicks::Now()), end_event_type_(end_event_type) { | |
40 entry->AddRef(); | |
41 entry->IncrementIoCount(); | |
42 } | |
43 virtual ~SyncCallback() {} | |
44 | |
45 virtual void OnFileIOComplete(int bytes_copied) OVERRIDE; | |
46 void Discard(); | |
47 | |
48 private: | |
49 disk_cache::EntryImpl* entry_; | |
50 net::CompletionCallback callback_; | |
51 scoped_refptr<net::IOBuffer> buf_; | |
52 TimeTicks start_; | |
53 const net::NetLog::EventType end_event_type_; | |
54 | |
55 DISALLOW_COPY_AND_ASSIGN(SyncCallback); | |
56 }; | |
57 | |
58 void SyncCallback::OnFileIOComplete(int bytes_copied) { | |
59 entry_->DecrementIoCount(); | |
60 if (!callback_.is_null()) { | |
61 if (entry_->net_log().IsLoggingAllEvents()) { | |
62 entry_->net_log().EndEvent( | |
63 end_event_type_, | |
64 disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied)); | |
65 } | |
66 entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_); | |
67 buf_ = NULL; // Release the buffer before invoking the callback. | |
68 callback_.Run(bytes_copied); | |
69 } | |
70 entry_->Release(); | |
71 delete this; | |
72 } | |
73 | |
74 void SyncCallback::Discard() { | |
75 callback_.Reset(); | |
76 buf_ = NULL; | |
77 OnFileIOComplete(0); | |
78 } | |
79 | |
80 const int kMaxBufferSize = 1024 * 1024; // 1 MB. | 26 const int kMaxBufferSize = 1024 * 1024; // 1 MB. |
81 | 27 |
82 } // namespace | 28 } // namespace |
83 | 29 |
84 namespace disk_cache { | 30 namespace disk_cache { |
85 | 31 |
86 // This class handles individual memory buffers that store data before it is | 32 // This class handles individual memory buffers that store data before it is |
87 // sent to disk. The buffer can start at any offset, but if we try to write to | 33 // sent to disk. The buffer can start at any offset, but if we try to write to |
88 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to | 34 // anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to |
89 // zero. The buffer grows up to a size determined by the backend, to keep the | 35 // zero. The buffer grows up to a size determined by the backend, to keep the |
90 // total memory used under control. | 36 // total memory used under control. |
91 class EntryImpl::UserBuffer { | 37 class EntryImpl::UserBuffer { |
92 public: | 38 public: |
93 explicit UserBuffer(BackendImpl* backend) | 39 explicit UserBuffer(BackendImpl* backend) |
94 : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) { | 40 : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) { |
95 buffer_.reserve(kMaxBlockSize); | 41 buffer_.reserve(kMaxBlockSize); |
96 } | 42 } |
97 ~UserBuffer() { | 43 ~UserBuffer() { |
98 if (backend_.get()) | 44 if (backend_) |
99 backend_->BufferDeleted(capacity() - kMaxBlockSize); | 45 backend_->BufferDeleted(capacity() - kMaxBlockSize); |
100 } | 46 } |
101 | 47 |
102 // Returns true if we can handle writing |len| bytes to |offset|. | 48 // Returns true if we can handle writing |len| bytes to |offset|. |
103 bool PreWrite(int offset, int len); | 49 bool PreWrite(int offset, int len); |
104 | 50 |
105 // Truncates the buffer to |offset| bytes. | 51 // Truncates the buffer to |offset| bytes. |
106 void Truncate(int offset); | 52 void Truncate(int offset); |
107 | 53 |
108 // Writes |len| bytes from |buf| at the given |offset|. | 54 // Writes |len| bytes from |buf| at the given |offset|. |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
245 int available = Size() - start; | 191 int available = Size() - start; |
246 DCHECK_GE(start, 0); | 192 DCHECK_GE(start, 0); |
247 DCHECK_GE(available, 0); | 193 DCHECK_GE(available, 0); |
248 len = std::min(len, available); | 194 len = std::min(len, available); |
249 memcpy(buf->data() + clean_bytes, &buffer_[start], len); | 195 memcpy(buf->data() + clean_bytes, &buffer_[start], len); |
250 return len + clean_bytes; | 196 return len + clean_bytes; |
251 } | 197 } |
252 | 198 |
253 void EntryImpl::UserBuffer::Reset() { | 199 void EntryImpl::UserBuffer::Reset() { |
254 if (!grow_allowed_) { | 200 if (!grow_allowed_) { |
255 if (backend_.get()) | 201 if (backend_) |
256 backend_->BufferDeleted(capacity() - kMaxBlockSize); | 202 backend_->BufferDeleted(capacity() - kMaxBlockSize); |
257 grow_allowed_ = true; | 203 grow_allowed_ = true; |
258 std::vector<char> tmp; | 204 std::vector<char> tmp; |
259 buffer_.swap(tmp); | 205 buffer_.swap(tmp); |
260 buffer_.reserve(kMaxBlockSize); | 206 buffer_.reserve(kMaxBlockSize); |
261 } | 207 } |
262 offset_ = 0; | 208 offset_ = 0; |
263 buffer_.clear(); | 209 buffer_.clear(); |
264 } | 210 } |
265 | 211 |
266 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) { | 212 bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) { |
267 DCHECK_GE(required, 0); | 213 DCHECK_GE(required, 0); |
268 int current_size = capacity(); | 214 int current_size = capacity(); |
269 if (required <= current_size) | 215 if (required <= current_size) |
270 return true; | 216 return true; |
271 | 217 |
272 if (required > limit) | 218 if (required > limit) |
273 return false; | 219 return false; |
274 | 220 |
275 if (!backend_.get()) | 221 if (!backend_) |
276 return false; | 222 return false; |
277 | 223 |
278 int to_add = std::max(required - current_size, kMaxBlockSize * 4); | 224 int to_add = std::max(required - current_size, kMaxBlockSize * 4); |
279 to_add = std::max(current_size, to_add); | 225 to_add = std::max(current_size, to_add); |
280 required = std::min(current_size + to_add, limit); | 226 required = std::min(current_size + to_add, limit); |
281 | 227 |
282 grow_allowed_ = backend_->IsAllocAllowed(current_size, required); | 228 grow_allowed_ = backend_->IsAllocAllowed(current_size, required); |
283 if (!grow_allowed_) | 229 if (!grow_allowed_) |
284 return false; | 230 return false; |
285 | 231 |
286 DVLOG(3) << "Buffer grow to " << required; | 232 DVLOG(3) << "Buffer grow to " << required; |
287 | 233 |
288 buffer_.reserve(required); | 234 buffer_.reserve(required); |
289 return true; | 235 return true; |
290 } | 236 } |
291 | 237 |
292 // ------------------------------------------------------------------------ | 238 // ------------------------------------------------------------------------ |
293 | 239 |
294 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) | 240 EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only) |
295 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), | 241 : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), |
296 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only), | 242 backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only), |
297 dirty_(false) { | 243 dirty_(false) { |
298 entry_.LazyInit(backend->File(address), address); | 244 entry_.LazyInit(backend->File(address), address); |
299 for (int i = 0; i < kNumStreams; i++) { | 245 for (int i = 0; i < kNumStreams; i++) { |
300 unreported_size_[i] = 0; | 246 unreported_size_[i] = 0; |
301 } | 247 } |
302 } | 248 } |
303 | 249 |
304 void EntryImpl::DoomImpl() { | |
305 if (doomed_ || !backend_.get()) | |
306 return; | |
307 | |
308 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); | |
309 backend_->InternalDoomEntry(this); | |
310 } | |
311 | |
312 int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, | |
313 const CompletionCallback& callback) { | |
314 if (net_log_.IsLoggingAllEvents()) { | |
315 net_log_.BeginEvent( | |
316 net::NetLog::TYPE_ENTRY_READ_DATA, | |
317 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); | |
318 } | |
319 | |
320 int result = InternalReadData(index, offset, buf, buf_len, callback); | |
321 | |
322 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { | |
323 net_log_.EndEvent( | |
324 net::NetLog::TYPE_ENTRY_READ_DATA, | |
325 CreateNetLogReadWriteCompleteCallback(result)); | |
326 } | |
327 return result; | |
328 } | |
329 | |
330 int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, | |
331 const CompletionCallback& callback, | |
332 bool truncate) { | |
333 if (net_log_.IsLoggingAllEvents()) { | |
334 net_log_.BeginEvent( | |
335 net::NetLog::TYPE_ENTRY_WRITE_DATA, | |
336 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); | |
337 } | |
338 | |
339 int result = InternalWriteData(index, offset, buf, buf_len, callback, | |
340 truncate); | |
341 | |
342 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { | |
343 net_log_.EndEvent( | |
344 net::NetLog::TYPE_ENTRY_WRITE_DATA, | |
345 CreateNetLogReadWriteCompleteCallback(result)); | |
346 } | |
347 return result; | |
348 } | |
349 | |
350 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | |
351 const CompletionCallback& callback) { | |
352 DCHECK(node_.Data()->dirty || read_only_); | |
353 int result = InitSparseData(); | |
354 if (net::OK != result) | |
355 return result; | |
356 | |
357 TimeTicks start = TimeTicks::Now(); | |
358 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, | |
359 callback); | |
360 ReportIOTime(kSparseRead, start); | |
361 return result; | |
362 } | |
363 | |
364 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, | |
365 const CompletionCallback& callback) { | |
366 DCHECK(node_.Data()->dirty || read_only_); | |
367 int result = InitSparseData(); | |
368 if (net::OK != result) | |
369 return result; | |
370 | |
371 TimeTicks start = TimeTicks::Now(); | |
372 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, | |
373 buf_len, callback); | |
374 ReportIOTime(kSparseWrite, start); | |
375 return result; | |
376 } | |
377 | |
378 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { | |
379 int result = InitSparseData(); | |
380 if (net::OK != result) | |
381 return result; | |
382 | |
383 return sparse_->GetAvailableRange(offset, len, start); | |
384 } | |
385 | |
386 void EntryImpl::CancelSparseIOImpl() { | |
387 if (!sparse_.get()) | |
388 return; | |
389 | |
390 sparse_->CancelIO(); | |
391 } | |
392 | |
393 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { | |
394 DCHECK(sparse_.get()); | |
395 return sparse_->ReadyToUse(callback); | |
396 } | |
397 | |
398 uint32 EntryImpl::GetHash() { | |
399 return entry_.Data()->hash; | |
400 } | |
401 | |
402 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, | 250 bool EntryImpl::CreateEntry(Addr node_address, const std::string& key, |
403 uint32 hash) { | 251 uint32 hash) { |
404 Trace("Create entry In"); | 252 Trace("Create entry In"); |
405 EntryStore* entry_store = entry_.Data(); | 253 EntryStore* entry_store = entry_.Data(); |
406 RankingsNode* node = node_.Data(); | 254 RankingsNode* node = node_.Data(); |
407 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); | 255 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); |
408 memset(node, 0, sizeof(RankingsNode)); | 256 memset(node, 0, sizeof(RankingsNode)); |
409 if (!node_.LazyInit(backend_->File(node_address), node_address)) | 257 if (!node_.LazyInit(backend_->File(node_address), node_address)) |
410 return false; | 258 return false; |
411 | 259 |
(...skipping 27 matching lines...) Expand all Loading... |
439 memcpy(entry_store->key, key.data(), key.size()); | 287 memcpy(entry_store->key, key.data(), key.size()); |
440 entry_store->key[key.size()] = '\0'; | 288 entry_store->key[key.size()] = '\0'; |
441 } | 289 } |
442 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); | 290 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); |
443 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size())); | 291 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size())); |
444 node->dirty = backend_->GetCurrentEntryId(); | 292 node->dirty = backend_->GetCurrentEntryId(); |
445 Log("Create Entry "); | 293 Log("Create Entry "); |
446 return true; | 294 return true; |
447 } | 295 } |
448 | 296 |
| 297 uint32 EntryImpl::GetHash() { |
| 298 return entry_.Data()->hash; |
| 299 } |
| 300 |
449 bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { | 301 bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) { |
450 if (entry_.Data()->hash != hash || | 302 if (entry_.Data()->hash != hash || |
451 static_cast<size_t>(entry_.Data()->key_len) != key.size()) | 303 static_cast<size_t>(entry_.Data()->key_len) != key.size()) |
452 return false; | 304 return false; |
453 | 305 |
454 return (key.compare(GetKey()) == 0); | 306 return (key.compare(GetKey()) == 0); |
455 } | 307 } |
456 | 308 |
457 void EntryImpl::InternalDoom() { | 309 void EntryImpl::InternalDoom() { |
458 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); | 310 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); |
459 DCHECK(node_.HasData()); | 311 DCHECK(node_.HasData()); |
460 if (!node_.Data()->dirty) { | 312 if (!node_.Data()->dirty) { |
461 node_.Data()->dirty = backend_->GetCurrentEntryId(); | 313 node_.Data()->dirty = backend_->GetCurrentEntryId(); |
462 node_.Store(); | 314 node_.Store(); |
463 } | 315 } |
464 doomed_ = true; | 316 doomed_ = true; |
465 } | 317 } |
466 | 318 |
467 void EntryImpl::DeleteEntryData(bool everything) { | |
468 DCHECK(doomed_ || !everything); | |
469 | |
470 if (GetEntryFlags() & PARENT_ENTRY) { | |
471 // We have some child entries that must go away. | |
472 SparseControl::DeleteChildren(this); | |
473 } | |
474 | |
475 if (GetDataSize(0)) | |
476 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); | |
477 if (GetDataSize(1)) | |
478 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); | |
479 for (int index = 0; index < kNumStreams; index++) { | |
480 Addr address(entry_.Data()->data_addr[index]); | |
481 if (address.is_initialized()) { | |
482 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | |
483 unreported_size_[index], 0); | |
484 entry_.Data()->data_addr[index] = 0; | |
485 entry_.Data()->data_size[index] = 0; | |
486 entry_.Store(); | |
487 DeleteData(address, index); | |
488 } | |
489 } | |
490 | |
491 if (!everything) | |
492 return; | |
493 | |
494 // Remove all traces of this entry. | |
495 backend_->RemoveEntry(this); | |
496 | |
497 // Note that at this point node_ and entry_ are just two blocks of data, and | |
498 // even if they reference each other, nobody should be referencing them. | |
499 | |
500 Addr address(entry_.Data()->long_key); | |
501 DeleteData(address, kKeyFileIndex); | |
502 backend_->ModifyStorageSize(entry_.Data()->key_len, 0); | |
503 | |
504 backend_->DeleteBlock(entry_.address(), true); | |
505 entry_.Discard(); | |
506 | |
507 if (!LeaveRankingsBehind()) { | |
508 backend_->DeleteBlock(node_.address(), true); | |
509 node_.Discard(); | |
510 } | |
511 } | |
512 | |
513 CacheAddr EntryImpl::GetNextAddress() { | |
514 return entry_.Data()->next; | |
515 } | |
516 | |
517 void EntryImpl::SetNextAddress(Addr address) { | |
518 DCHECK_NE(address.value(), entry_.address().value()); | |
519 entry_.Data()->next = address.value(); | |
520 bool success = entry_.Store(); | |
521 DCHECK(success); | |
522 } | |
523 | |
524 bool EntryImpl::LoadNodeAddress() { | |
525 Addr address(entry_.Data()->rankings_node); | |
526 if (!node_.LazyInit(backend_->File(address), address)) | |
527 return false; | |
528 return node_.Load(); | |
529 } | |
530 | |
531 bool EntryImpl::Update() { | |
532 DCHECK(node_.HasData()); | |
533 | |
534 if (read_only_) | |
535 return true; | |
536 | |
537 RankingsNode* rankings = node_.Data(); | |
538 if (!rankings->dirty) { | |
539 rankings->dirty = backend_->GetCurrentEntryId(); | |
540 if (!node_.Store()) | |
541 return false; | |
542 } | |
543 return true; | |
544 } | |
545 | |
546 void EntryImpl::SetDirtyFlag(int32 current_id) { | |
547 DCHECK(node_.HasData()); | |
548 if (node_.Data()->dirty && current_id != node_.Data()->dirty) | |
549 dirty_ = true; | |
550 | |
551 if (!current_id) | |
552 dirty_ = true; | |
553 } | |
554 | |
555 void EntryImpl::SetPointerForInvalidEntry(int32 new_id) { | |
556 node_.Data()->dirty = new_id; | |
557 node_.Store(); | |
558 } | |
559 | |
560 bool EntryImpl::LeaveRankingsBehind() { | |
561 return !node_.Data()->contents; | |
562 } | |
563 | |
564 // This only includes checks that relate to the first block of the entry (the | 319 // This only includes checks that relate to the first block of the entry (the |
565 // first 256 bytes), and values that should be set from the entry creation. | 320 // first 256 bytes), and values that should be set from the entry creation. |
566 // Basically, even if there is something wrong with this entry, we want to see | 321 // Basically, even if there is something wrong with this entry, we want to see |
567 // if it is possible to load the rankings node and delete them together. | 322 // if it is possible to load the rankings node and delete them together. |
568 bool EntryImpl::SanityCheck() { | 323 bool EntryImpl::SanityCheck() { |
569 if (!entry_.VerifyHash()) | 324 if (!entry_.VerifyHash()) |
570 return false; | 325 return false; |
571 | 326 |
572 EntryStore* stored = entry_.Data(); | 327 EntryStore* stored = entry_.Data(); |
573 if (!stored->rankings_node || stored->key_len <= 0) | 328 if (!stored->rankings_node || stored->key_len <= 0) |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
660 // In general, trust the stored size as it should be in sync with the | 415 // In general, trust the stored size as it should be in sync with the |
661 // total size tracked by the backend. | 416 // total size tracked by the backend. |
662 } | 417 } |
663 } | 418 } |
664 if (data_size < 0) | 419 if (data_size < 0) |
665 stored->data_size[i] = 0; | 420 stored->data_size[i] = 0; |
666 } | 421 } |
667 entry_.Store(); | 422 entry_.Store(); |
668 } | 423 } |
669 | 424 |
670 void EntryImpl::IncrementIoCount() { | |
671 backend_->IncrementIoCount(); | |
672 } | |
673 | |
674 void EntryImpl::DecrementIoCount() { | |
675 if (backend_.get()) | |
676 backend_->DecrementIoCount(); | |
677 } | |
678 | |
679 void EntryImpl::OnEntryCreated(BackendImpl* backend) { | |
680 // Just grab a reference to the backround queue. | |
681 background_queue_ = backend->GetBackgroundQueue(); | |
682 } | |
683 | |
684 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { | 425 void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) { |
685 node_.Data()->last_used = last_used.ToInternalValue(); | 426 node_.Data()->last_used = last_used.ToInternalValue(); |
686 node_.Data()->last_modified = last_modified.ToInternalValue(); | 427 node_.Data()->last_modified = last_modified.ToInternalValue(); |
687 node_.set_modified(); | 428 node_.set_modified(); |
688 } | 429 } |
689 | 430 |
690 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { | |
691 if (!backend_.get()) | |
692 return; | |
693 | |
694 switch (op) { | |
695 case kRead: | |
696 CACHE_UMA(AGE_MS, "ReadTime", 0, start); | |
697 break; | |
698 case kWrite: | |
699 CACHE_UMA(AGE_MS, "WriteTime", 0, start); | |
700 break; | |
701 case kSparseRead: | |
702 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); | |
703 break; | |
704 case kSparseWrite: | |
705 CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start); | |
706 break; | |
707 case kAsyncIO: | |
708 CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start); | |
709 break; | |
710 case kReadAsync1: | |
711 CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start); | |
712 break; | |
713 case kWriteAsync1: | |
714 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start); | |
715 break; | |
716 default: | |
717 NOTREACHED(); | |
718 } | |
719 } | |
720 | |
721 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { | 431 void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) { |
722 DCHECK(!net_log_.net_log()); | 432 DCHECK(!net_log_.net_log()); |
723 net_log_ = net::BoundNetLog::Make( | 433 net_log_ = net::BoundNetLog::Make( |
724 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); | 434 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); |
725 net_log_.BeginEvent( | 435 net_log_.BeginEvent( |
726 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, | 436 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, |
727 CreateNetLogEntryCreationCallback(this, created)); | 437 CreateNetLogEntryCreationCallback(this, created)); |
728 } | 438 } |
729 | 439 |
730 const net::BoundNetLog& EntryImpl::net_log() const { | 440 const net::BoundNetLog& EntryImpl::net_log() const { |
731 return net_log_; | 441 return net_log_; |
732 } | 442 } |
733 | 443 |
734 // static | |
735 int EntryImpl::NumBlocksForEntry(int key_size) { | |
736 // The longest key that can be stored using one block. | |
737 int key1_len = | |
738 static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key)); | |
739 | |
740 if (key_size < key1_len || key_size > kMaxInternalKeyLength) | |
741 return 1; | |
742 | |
743 return ((key_size - key1_len) / 256 + 2); | |
744 } | |
745 | |
746 // ------------------------------------------------------------------------ | 444 // ------------------------------------------------------------------------ |
747 | 445 |
748 void EntryImpl::Doom() { | 446 void EntryImpl::Doom() { |
749 if (background_queue_.get()) | 447 if (background_queue_) |
750 background_queue_->DoomEntryImpl(this); | 448 background_queue_->DoomEntryImpl(this); |
751 } | 449 } |
752 | 450 |
| 451 void EntryImpl::DoomImpl() { |
| 452 if (doomed_ || !backend_) |
| 453 return; |
| 454 |
| 455 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); |
| 456 backend_->InternalDoomEntry(this); |
| 457 } |
| 458 |
753 void EntryImpl::Close() { | 459 void EntryImpl::Close() { |
754 if (background_queue_.get()) | 460 if (background_queue_) |
755 background_queue_->CloseEntryImpl(this); | 461 background_queue_->CloseEntryImpl(this); |
756 } | 462 } |
757 | 463 |
758 std::string EntryImpl::GetKey() const { | 464 std::string EntryImpl::GetKey() const { |
759 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); | 465 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); |
760 int key_len = entry->Data()->key_len; | 466 int key_len = entry->Data()->key_len; |
761 if (key_len <= kMaxInternalKeyLength) | 467 if (key_len <= kMaxInternalKeyLength) |
762 return std::string(entry->Data()->key); | 468 return std::string(entry->Data()->key); |
763 | 469 |
764 // We keep a copy of the key so that we can always return it, even if the | 470 // We keep a copy of the key so that we can always return it, even if the |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
814 if (index < 0 || index >= kNumStreams) | 520 if (index < 0 || index >= kNumStreams) |
815 return net::ERR_INVALID_ARGUMENT; | 521 return net::ERR_INVALID_ARGUMENT; |
816 | 522 |
817 int entry_size = entry_.Data()->data_size[index]; | 523 int entry_size = entry_.Data()->data_size[index]; |
818 if (offset >= entry_size || offset < 0 || !buf_len) | 524 if (offset >= entry_size || offset < 0 || !buf_len) |
819 return 0; | 525 return 0; |
820 | 526 |
821 if (buf_len < 0) | 527 if (buf_len < 0) |
822 return net::ERR_INVALID_ARGUMENT; | 528 return net::ERR_INVALID_ARGUMENT; |
823 | 529 |
824 if (!background_queue_.get()) | 530 if (!background_queue_) |
825 return net::ERR_UNEXPECTED; | 531 return net::ERR_UNEXPECTED; |
826 | 532 |
827 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); | 533 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); |
828 return net::ERR_IO_PENDING; | 534 return net::ERR_IO_PENDING; |
829 } | 535 } |
830 | 536 |
| 537 int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, |
| 538 const CompletionCallback& callback) { |
| 539 if (net_log_.IsLoggingAllEvents()) { |
| 540 net_log_.BeginEvent( |
| 541 net::NetLog::TYPE_ENTRY_READ_DATA, |
| 542 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); |
| 543 } |
| 544 |
| 545 int result = InternalReadData(index, offset, buf, buf_len, callback); |
| 546 |
| 547 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { |
| 548 net_log_.EndEvent( |
| 549 net::NetLog::TYPE_ENTRY_READ_DATA, |
| 550 CreateNetLogReadWriteCompleteCallback(result)); |
| 551 } |
| 552 return result; |
| 553 } |
| 554 |
831 int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, | 555 int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len, |
832 const CompletionCallback& callback, bool truncate) { | 556 const CompletionCallback& callback, bool truncate) { |
833 if (callback.is_null()) | 557 if (callback.is_null()) |
834 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); | 558 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); |
835 | 559 |
836 DCHECK(node_.Data()->dirty || read_only_); | 560 DCHECK(node_.Data()->dirty || read_only_); |
837 if (index < 0 || index >= kNumStreams) | 561 if (index < 0 || index >= kNumStreams) |
838 return net::ERR_INVALID_ARGUMENT; | 562 return net::ERR_INVALID_ARGUMENT; |
839 | 563 |
840 if (offset < 0 || buf_len < 0) | 564 if (offset < 0 || buf_len < 0) |
841 return net::ERR_INVALID_ARGUMENT; | 565 return net::ERR_INVALID_ARGUMENT; |
842 | 566 |
843 if (!background_queue_.get()) | 567 if (!background_queue_) |
844 return net::ERR_UNEXPECTED; | 568 return net::ERR_UNEXPECTED; |
845 | 569 |
846 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, | 570 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, |
847 callback); | 571 callback); |
848 return net::ERR_IO_PENDING; | 572 return net::ERR_IO_PENDING; |
849 } | 573 } |
850 | 574 |
| 575 int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, |
| 576 const CompletionCallback& callback, |
| 577 bool truncate) { |
| 578 if (net_log_.IsLoggingAllEvents()) { |
| 579 net_log_.BeginEvent( |
| 580 net::NetLog::TYPE_ENTRY_WRITE_DATA, |
| 581 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); |
| 582 } |
| 583 |
| 584 int result = InternalWriteData(index, offset, buf, buf_len, callback, |
| 585 truncate); |
| 586 |
| 587 if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) { |
| 588 net_log_.EndEvent( |
| 589 net::NetLog::TYPE_ENTRY_WRITE_DATA, |
| 590 CreateNetLogReadWriteCompleteCallback(result)); |
| 591 } |
| 592 return result; |
| 593 } |
| 594 |
851 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, | 595 int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, |
852 const CompletionCallback& callback) { | 596 const CompletionCallback& callback) { |
853 if (callback.is_null()) | 597 if (callback.is_null()) |
854 return ReadSparseDataImpl(offset, buf, buf_len, callback); | 598 return ReadSparseDataImpl(offset, buf, buf_len, callback); |
855 | 599 |
856 if (!background_queue_.get()) | 600 if (!background_queue_) |
857 return net::ERR_UNEXPECTED; | 601 return net::ERR_UNEXPECTED; |
858 | 602 |
859 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); | 603 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); |
860 return net::ERR_IO_PENDING; | 604 return net::ERR_IO_PENDING; |
861 } | 605 } |
862 | 606 |
| 607 int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, |
| 608 const CompletionCallback& callback) { |
| 609 DCHECK(node_.Data()->dirty || read_only_); |
| 610 int result = InitSparseData(); |
| 611 if (net::OK != result) |
| 612 return result; |
| 613 |
| 614 TimeTicks start = TimeTicks::Now(); |
| 615 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, |
| 616 callback); |
| 617 ReportIOTime(kSparseRead, start); |
| 618 return result; |
| 619 } |
| 620 |
863 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, | 621 int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, |
864 const CompletionCallback& callback) { | 622 const CompletionCallback& callback) { |
865 if (callback.is_null()) | 623 if (callback.is_null()) |
866 return WriteSparseDataImpl(offset, buf, buf_len, callback); | 624 return WriteSparseDataImpl(offset, buf, buf_len, callback); |
867 | 625 |
868 if (!background_queue_.get()) | 626 if (!background_queue_) |
869 return net::ERR_UNEXPECTED; | 627 return net::ERR_UNEXPECTED; |
870 | 628 |
871 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); | 629 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); |
872 return net::ERR_IO_PENDING; | 630 return net::ERR_IO_PENDING; |
873 } | 631 } |
874 | 632 |
| 633 int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, |
| 634 const CompletionCallback& callback) { |
| 635 DCHECK(node_.Data()->dirty || read_only_); |
| 636 int result = InitSparseData(); |
| 637 if (net::OK != result) |
| 638 return result; |
| 639 |
| 640 TimeTicks start = TimeTicks::Now(); |
| 641 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, |
| 642 buf_len, callback); |
| 643 ReportIOTime(kSparseWrite, start); |
| 644 return result; |
| 645 } |
| 646 |
875 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, | 647 int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start, |
876 const CompletionCallback& callback) { | 648 const CompletionCallback& callback) { |
877 if (!background_queue_.get()) | 649 if (!background_queue_) |
878 return net::ERR_UNEXPECTED; | 650 return net::ERR_UNEXPECTED; |
879 | 651 |
880 background_queue_->GetAvailableRange(this, offset, len, start, callback); | 652 background_queue_->GetAvailableRange(this, offset, len, start, callback); |
881 return net::ERR_IO_PENDING; | 653 return net::ERR_IO_PENDING; |
882 } | 654 } |
883 | 655 |
| 656 int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { |
| 657 int result = InitSparseData(); |
| 658 if (net::OK != result) |
| 659 return result; |
| 660 |
| 661 return sparse_->GetAvailableRange(offset, len, start); |
| 662 } |
| 663 |
884 bool EntryImpl::CouldBeSparse() const { | 664 bool EntryImpl::CouldBeSparse() const { |
885 if (sparse_.get()) | 665 if (sparse_.get()) |
886 return true; | 666 return true; |
887 | 667 |
888 scoped_ptr<SparseControl> sparse; | 668 scoped_ptr<SparseControl> sparse; |
889 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); | 669 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); |
890 return sparse->CouldBeSparse(); | 670 return sparse->CouldBeSparse(); |
891 } | 671 } |
892 | 672 |
893 void EntryImpl::CancelSparseIO() { | 673 void EntryImpl::CancelSparseIO() { |
894 if (background_queue_.get()) | 674 if (background_queue_) |
895 background_queue_->CancelSparseIO(this); | 675 background_queue_->CancelSparseIO(this); |
896 } | 676 } |
897 | 677 |
| 678 void EntryImpl::CancelSparseIOImpl() { |
| 679 if (!sparse_.get()) |
| 680 return; |
| 681 |
| 682 sparse_->CancelIO(); |
| 683 } |
| 684 |
898 int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { | 685 int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { |
899 if (!sparse_.get()) | 686 if (!sparse_.get()) |
900 return net::OK; | 687 return net::OK; |
901 | 688 |
902 if (!background_queue_.get()) | 689 if (!background_queue_) |
903 return net::ERR_UNEXPECTED; | 690 return net::ERR_UNEXPECTED; |
904 | 691 |
905 background_queue_->ReadyForSparseIO(this, callback); | 692 background_queue_->ReadyForSparseIO(this, callback); |
906 return net::ERR_IO_PENDING; | 693 return net::ERR_IO_PENDING; |
907 } | 694 } |
908 | 695 |
| 696 int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) { |
| 697 DCHECK(sparse_.get()); |
| 698 return sparse_->ReadyToUse(callback); |
| 699 } |
| 700 |
| 701 // ------------------------------------------------------------------------ |
| 702 |
909 // When an entry is deleted from the cache, we clean up all the data associated | 703 // When an entry is deleted from the cache, we clean up all the data associated |
910 // with it for two reasons: to simplify the reuse of the block (we know that any | 704 // with it for two reasons: to simplify the reuse of the block (we know that any |
911 // unused block is filled with zeros), and to simplify the handling of write / | 705 // unused block is filled with zeros), and to simplify the handling of write / |
912 // read partial information from an entry (don't have to worry about returning | 706 // read partial information from an entry (don't have to worry about returning |
913 // data related to a previous cache entry because the range was not fully | 707 // data related to a previous cache entry because the range was not fully |
914 // written before). | 708 // written before). |
915 EntryImpl::~EntryImpl() { | 709 EntryImpl::~EntryImpl() { |
916 if (!backend_.get()) { | 710 if (!backend_) { |
917 entry_.clear_modified(); | 711 entry_.clear_modified(); |
918 node_.clear_modified(); | 712 node_.clear_modified(); |
919 return; | 713 return; |
920 } | 714 } |
921 Log("~EntryImpl in"); | 715 Log("~EntryImpl in"); |
922 | 716 |
923 // Save the sparse info to disk. This will generate IO for this entry and | 717 // Save the sparse info to disk. This will generate IO for this entry and |
924 // maybe for a child entry, so it is important to do it before deleting this | 718 // maybe for a child entry, so it is important to do it before deleting this |
925 // entry. | 719 // entry. |
926 sparse_.reset(); | 720 sparse_.reset(); |
(...skipping 30 matching lines...) Expand all Loading... |
957 node_.Data()->dirty = 0; | 751 node_.Data()->dirty = 0; |
958 node_.Store(); | 752 node_.Store(); |
959 } | 753 } |
960 } | 754 } |
961 | 755 |
962 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); | 756 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); |
963 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL); | 757 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL); |
964 backend_->OnEntryDestroyEnd(); | 758 backend_->OnEntryDestroyEnd(); |
965 } | 759 } |
966 | 760 |
967 // ------------------------------------------------------------------------ | |
968 | |
969 int EntryImpl::InternalReadData(int index, int offset, | 761 int EntryImpl::InternalReadData(int index, int offset, |
970 IOBuffer* buf, int buf_len, | 762 IOBuffer* buf, int buf_len, |
971 const CompletionCallback& callback) { | 763 const CompletionCallback& callback) { |
972 DCHECK(node_.Data()->dirty || read_only_); | 764 DCHECK(node_.Data()->dirty || read_only_); |
973 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; | 765 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; |
974 if (index < 0 || index >= kNumStreams) | 766 if (index < 0 || index >= kNumStreams) |
975 return net::ERR_INVALID_ARGUMENT; | 767 return net::ERR_INVALID_ARGUMENT; |
976 | 768 |
977 int entry_size = entry_.Data()->data_size[index]; | 769 int entry_size = entry_.Data()->data_size[index]; |
978 if (offset >= entry_size || offset < 0 || !buf_len) | 770 if (offset >= entry_size || offset < 0 || !buf_len) |
979 return 0; | 771 return 0; |
980 | 772 |
981 if (buf_len < 0) | 773 if (buf_len < 0) |
982 return net::ERR_INVALID_ARGUMENT; | 774 return net::ERR_INVALID_ARGUMENT; |
983 | 775 |
984 if (!backend_.get()) | 776 if (!backend_) |
985 return net::ERR_UNEXPECTED; | 777 return net::ERR_UNEXPECTED; |
986 | 778 |
987 TimeTicks start = TimeTicks::Now(); | 779 TimeTicks start = TimeTicks::Now(); |
988 | 780 |
989 if (offset + buf_len > entry_size) | 781 if (offset + buf_len > entry_size) |
990 buf_len = entry_size - offset; | 782 buf_len = entry_size - offset; |
991 | 783 |
992 UpdateRank(false); | 784 UpdateRank(false); |
993 | 785 |
994 backend_->OnEvent(Stats::READ_DATA); | 786 backend_->OnEvent(Stats::READ_DATA); |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1056 const CompletionCallback& callback, | 848 const CompletionCallback& callback, |
1057 bool truncate) { | 849 bool truncate) { |
1058 DCHECK(node_.Data()->dirty || read_only_); | 850 DCHECK(node_.Data()->dirty || read_only_); |
1059 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; | 851 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; |
1060 if (index < 0 || index >= kNumStreams) | 852 if (index < 0 || index >= kNumStreams) |
1061 return net::ERR_INVALID_ARGUMENT; | 853 return net::ERR_INVALID_ARGUMENT; |
1062 | 854 |
1063 if (offset < 0 || buf_len < 0) | 855 if (offset < 0 || buf_len < 0) |
1064 return net::ERR_INVALID_ARGUMENT; | 856 return net::ERR_INVALID_ARGUMENT; |
1065 | 857 |
1066 if (!backend_.get()) | 858 if (!backend_) |
1067 return net::ERR_UNEXPECTED; | 859 return net::ERR_UNEXPECTED; |
1068 | 860 |
1069 int max_file_size = backend_->MaxFileSize(); | 861 int max_file_size = backend_->MaxFileSize(); |
1070 | 862 |
1071 // offset or buf_len could be negative numbers. | 863 // offset or buf_len could be negative numbers. |
1072 if (offset > max_file_size || buf_len > max_file_size || | 864 if (offset > max_file_size || buf_len > max_file_size || |
1073 offset + buf_len > max_file_size) { | 865 offset + buf_len > max_file_size) { |
1074 int size = offset + buf_len; | 866 int size = offset + buf_len; |
1075 if (size <= max_file_size) | 867 if (size <= max_file_size) |
1076 size = kint32max; | 868 size = kint32max; |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1164 if (!CreateBlock(size, &address)) | 956 if (!CreateBlock(size, &address)) |
1165 return false; | 957 return false; |
1166 | 958 |
1167 entry_.Data()->data_addr[index] = address.value(); | 959 entry_.Data()->data_addr[index] = address.value(); |
1168 entry_.Store(); | 960 entry_.Store(); |
1169 return true; | 961 return true; |
1170 } | 962 } |
1171 | 963 |
1172 bool EntryImpl::CreateBlock(int size, Addr* address) { | 964 bool EntryImpl::CreateBlock(int size, Addr* address) { |
1173 DCHECK(!address->is_initialized()); | 965 DCHECK(!address->is_initialized()); |
1174 if (!backend_.get()) | 966 if (!backend_) |
1175 return false; | 967 return false; |
1176 | 968 |
1177 FileType file_type = Addr::RequiredFileType(size); | 969 FileType file_type = Addr::RequiredFileType(size); |
1178 if (EXTERNAL == file_type) { | 970 if (EXTERNAL == file_type) { |
1179 if (size > backend_->MaxFileSize()) | 971 if (size > backend_->MaxFileSize()) |
1180 return false; | 972 return false; |
1181 if (!backend_->CreateExternalFile(address)) | 973 if (!backend_->CreateExternalFile(address)) |
1182 return false; | 974 return false; |
1183 } else { | 975 } else { |
1184 int num_blocks = Addr::RequiredBlocks(size, file_type); | 976 int num_blocks = Addr::RequiredBlocks(size, file_type); |
1185 | 977 |
1186 if (!backend_->CreateBlock(file_type, num_blocks, address)) | 978 if (!backend_->CreateBlock(file_type, num_blocks, address)) |
1187 return false; | 979 return false; |
1188 } | 980 } |
1189 return true; | 981 return true; |
1190 } | 982 } |
1191 | 983 |
1192 // Note that this method may end up modifying a block file so upon return the | 984 // Note that this method may end up modifying a block file so upon return the |
1193 // involved block will be free, and could be reused for something else. If there | 985 // involved block will be free, and could be reused for something else. If there |
1194 // is a crash after that point (and maybe before returning to the caller), the | 986 // is a crash after that point (and maybe before returning to the caller), the |
1195 // entry will be left dirty... and at some point it will be discarded; it is | 987 // entry will be left dirty... and at some point it will be discarded; it is |
1196 // important that the entry doesn't keep a reference to this address, or we'll | 988 // important that the entry doesn't keep a reference to this address, or we'll |
1197 // end up deleting the contents of |address| once again. | 989 // end up deleting the contents of |address| once again. |
1198 void EntryImpl::DeleteData(Addr address, int index) { | 990 void EntryImpl::DeleteData(Addr address, int index) { |
1199 DCHECK(backend_.get()); | 991 DCHECK(backend_); |
1200 if (!address.is_initialized()) | 992 if (!address.is_initialized()) |
1201 return; | 993 return; |
1202 if (address.is_separate_file()) { | 994 if (address.is_separate_file()) { |
1203 int failure = !DeleteCacheFile(backend_->GetFileName(address)); | 995 int failure = !DeleteCacheFile(backend_->GetFileName(address)); |
1204 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); | 996 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); |
1205 if (failure) { | 997 if (failure) { |
1206 LOG(ERROR) << "Failed to delete " << | 998 LOG(ERROR) << "Failed to delete " << |
1207 backend_->GetFileName(address).value() << " from the cache."; | 999 backend_->GetFileName(address).value() << " from the cache."; |
1208 } | 1000 } |
1209 if (files_[index].get()) | 1001 if (files_[index]) |
1210 files_[index] = NULL; // Releases the object. | 1002 files_[index] = NULL; // Releases the object. |
1211 } else { | 1003 } else { |
1212 backend_->DeleteBlock(address, true); | 1004 backend_->DeleteBlock(address, true); |
1213 } | 1005 } |
1214 } | 1006 } |
1215 | 1007 |
1216 void EntryImpl::UpdateRank(bool modified) { | 1008 void EntryImpl::UpdateRank(bool modified) { |
1217 if (!backend_.get()) | 1009 if (!backend_) |
1218 return; | 1010 return; |
1219 | 1011 |
1220 if (!doomed_) { | 1012 if (!doomed_) { |
1221 // Everything is handled by the backend. | 1013 // Everything is handled by the backend. |
1222 backend_->UpdateRank(this, modified); | 1014 backend_->UpdateRank(this, modified); |
1223 return; | 1015 return; |
1224 } | 1016 } |
1225 | 1017 |
1226 Time current = Time::Now(); | 1018 Time current = Time::Now(); |
1227 node_.Data()->last_used = current.ToInternalValue(); | 1019 node_.Data()->last_used = current.ToInternalValue(); |
1228 | 1020 |
1229 if (modified) | 1021 if (modified) |
1230 node_.Data()->last_modified = current.ToInternalValue(); | 1022 node_.Data()->last_modified = current.ToInternalValue(); |
1231 } | 1023 } |
1232 | 1024 |
1233 File* EntryImpl::GetBackingFile(Addr address, int index) { | 1025 void EntryImpl::DeleteEntryData(bool everything) { |
1234 if (!backend_.get()) | 1026 DCHECK(doomed_ || !everything); |
1235 return NULL; | |
1236 | 1027 |
1237 File* file; | 1028 if (GetEntryFlags() & PARENT_ENTRY) { |
1238 if (address.is_separate_file()) | 1029 // We have some child entries that must go away. |
1239 file = GetExternalFile(address, index); | 1030 SparseControl::DeleteChildren(this); |
1240 else | 1031 } |
1241 file = backend_->File(address); | |
1242 return file; | |
1243 } | |
1244 | 1032 |
1245 File* EntryImpl::GetExternalFile(Addr address, int index) { | 1033 if (GetDataSize(0)) |
1246 DCHECK(index >= 0 && index <= kKeyFileIndex); | 1034 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); |
1247 if (!files_[index].get()) { | 1035 if (GetDataSize(1)) |
1248 // For a key file, use mixed mode IO. | 1036 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); |
1249 scoped_refptr<File> file(new File(kKeyFileIndex == index)); | 1037 for (int index = 0; index < kNumStreams; index++) { |
1250 if (file->Init(backend_->GetFileName(address))) | 1038 Addr address(entry_.Data()->data_addr[index]); |
1251 files_[index].swap(file); | 1039 if (address.is_initialized()) { |
| 1040 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - |
| 1041 unreported_size_[index], 0); |
| 1042 entry_.Data()->data_addr[index] = 0; |
| 1043 entry_.Data()->data_size[index] = 0; |
| 1044 entry_.Store(); |
| 1045 DeleteData(address, index); |
| 1046 } |
1252 } | 1047 } |
1253 return files_[index].get(); | 1048 |
| 1049 if (!everything) |
| 1050 return; |
| 1051 |
| 1052 // Remove all traces of this entry. |
| 1053 backend_->RemoveEntry(this); |
| 1054 |
| 1055 // Note that at this point node_ and entry_ are just two blocks of data, and |
| 1056 // even if they reference each other, nobody should be referencing them. |
| 1057 |
| 1058 Addr address(entry_.Data()->long_key); |
| 1059 DeleteData(address, kKeyFileIndex); |
| 1060 backend_->ModifyStorageSize(entry_.Data()->key_len, 0); |
| 1061 |
| 1062 backend_->DeleteBlock(entry_.address(), true); |
| 1063 entry_.Discard(); |
| 1064 |
| 1065 if (!LeaveRankingsBehind()) { |
| 1066 backend_->DeleteBlock(node_.address(), true); |
| 1067 node_.Discard(); |
| 1068 } |
1254 } | 1069 } |
1255 | 1070 |
1256 // We keep a memory buffer for everything that ends up stored on a block file | 1071 // We keep a memory buffer for everything that ends up stored on a block file |
1257 // (because we don't know yet the final data size), and for some of the data | 1072 // (because we don't know yet the final data size), and for some of the data |
1258 // that end up on external files. This function will initialize that memory | 1073 // that end up on external files. This function will initialize that memory |
1259 // buffer and / or the files needed to store the data. | 1074 // buffer and / or the files needed to store the data. |
1260 // | 1075 // |
1261 // In general, a buffer may overlap data already stored on disk, and in that | 1076 // In general, a buffer may overlap data already stored on disk, and in that |
1262 // case, the contents of the buffer are the most accurate. It may also extend | 1077 // case, the contents of the buffer are the most accurate. It may also extend |
1263 // the file, but we don't want to read from disk just to keep the buffer up to | 1078 // the file, but we don't want to read from disk just to keep the buffer up to |
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1498 void EntryImpl::SetEntryFlags(uint32 flags) { | 1313 void EntryImpl::SetEntryFlags(uint32 flags) { |
1499 entry_.Data()->flags |= flags; | 1314 entry_.Data()->flags |= flags; |
1500 entry_.set_modified(); | 1315 entry_.set_modified(); |
1501 } | 1316 } |
1502 | 1317 |
1503 uint32 EntryImpl::GetEntryFlags() { | 1318 uint32 EntryImpl::GetEntryFlags() { |
1504 return entry_.Data()->flags; | 1319 return entry_.Data()->flags; |
1505 } | 1320 } |
1506 | 1321 |
1507 void EntryImpl::GetData(int index, char** buffer, Addr* address) { | 1322 void EntryImpl::GetData(int index, char** buffer, Addr* address) { |
1508 DCHECK(backend_.get()); | 1323 DCHECK(backend_); |
1509 if (user_buffers_[index].get() && user_buffers_[index]->Size() && | 1324 if (user_buffers_[index].get() && user_buffers_[index]->Size() && |
1510 !user_buffers_[index]->Start()) { | 1325 !user_buffers_[index]->Start()) { |
1511 // The data is already in memory, just copy it and we're done. | 1326 // The data is already in memory, just copy it and we're done. |
1512 int data_len = entry_.Data()->data_size[index]; | 1327 int data_len = entry_.Data()->data_size[index]; |
1513 if (data_len <= user_buffers_[index]->Size()) { | 1328 if (data_len <= user_buffers_[index]->Size()) { |
1514 DCHECK(!user_buffers_[index]->Start()); | 1329 DCHECK(!user_buffers_[index]->Start()); |
1515 *buffer = new char[data_len]; | 1330 *buffer = new char[data_len]; |
1516 memcpy(*buffer, user_buffers_[index]->Data(), data_len); | 1331 memcpy(*buffer, user_buffers_[index]->Data(), data_len); |
1517 return; | 1332 return; |
1518 } | 1333 } |
1519 } | 1334 } |
1520 | 1335 |
1521 // Bad news: we'd have to read the info from disk so instead we'll just tell | 1336 // Bad news: we'd have to read the info from disk so instead we'll just tell |
1522 // the caller where to read from. | 1337 // the caller where to read from. |
1523 *buffer = NULL; | 1338 *buffer = NULL; |
1524 address->set_value(entry_.Data()->data_addr[index]); | 1339 address->set_value(entry_.Data()->data_addr[index]); |
1525 if (address->is_initialized()) { | 1340 if (address->is_initialized()) { |
1526 // Prevent us from deleting the block from the backing store. | 1341 // Prevent us from deleting the block from the backing store. |
1527 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - | 1342 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - |
1528 unreported_size_[index], 0); | 1343 unreported_size_[index], 0); |
1529 entry_.Data()->data_addr[index] = 0; | 1344 entry_.Data()->data_addr[index] = 0; |
1530 entry_.Data()->data_size[index] = 0; | 1345 entry_.Data()->data_size[index] = 0; |
1531 } | 1346 } |
1532 } | 1347 } |
1533 | 1348 |
| 1349 void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) { |
| 1350 if (!backend_) |
| 1351 return; |
| 1352 |
| 1353 switch (op) { |
| 1354 case kRead: |
| 1355 CACHE_UMA(AGE_MS, "ReadTime", 0, start); |
| 1356 break; |
| 1357 case kWrite: |
| 1358 CACHE_UMA(AGE_MS, "WriteTime", 0, start); |
| 1359 break; |
| 1360 case kSparseRead: |
| 1361 CACHE_UMA(AGE_MS, "SparseReadTime", 0, start); |
| 1362 break; |
| 1363 case kSparseWrite: |
| 1364 CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start); |
| 1365 break; |
| 1366 case kAsyncIO: |
| 1367 CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start); |
| 1368 break; |
| 1369 case kReadAsync1: |
| 1370 CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start); |
| 1371 break; |
| 1372 case kWriteAsync1: |
| 1373 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start); |
| 1374 break; |
| 1375 default: |
| 1376 NOTREACHED(); |
| 1377 } |
| 1378 } |
| 1379 |
1534 void EntryImpl::Log(const char* msg) { | 1380 void EntryImpl::Log(const char* msg) { |
1535 int dirty = 0; | 1381 int dirty = 0; |
1536 if (node_.HasData()) { | 1382 if (node_.HasData()) { |
1537 dirty = node_.Data()->dirty; | 1383 dirty = node_.Data()->dirty; |
1538 } | 1384 } |
1539 | 1385 |
1540 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), | 1386 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this), |
1541 entry_.address().value(), node_.address().value()); | 1387 entry_.address().value(), node_.address().value()); |
1542 | 1388 |
1543 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], | 1389 Trace(" data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0], |
1544 entry_.Data()->data_addr[1], entry_.Data()->long_key); | 1390 entry_.Data()->data_addr[1], entry_.Data()->long_key); |
1545 | 1391 |
1546 Trace(" doomed: %d 0x%x", doomed_, dirty); | 1392 Trace(" doomed: %d 0x%x", doomed_, dirty); |
1547 } | 1393 } |
1548 | 1394 |
1549 } // namespace disk_cache | 1395 } // namespace disk_cache |
OLD | NEW |