Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(893)

Side by Side Diff: net/disk_cache/simple/simple_entry_impl.cc

Issue 23983005: SimpleCache: merge the first and second stream in one file (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Removed functions from simple_util Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/simple/simple_entry_impl.h" 5 #include "net/disk_cache/simple/simple_entry_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cstring> 8 #include <cstring>
9 #include <vector> 9 #include <vector>
10 10
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
330 int buf_len, 330 int buf_len,
331 const CompletionCallback& callback) { 331 const CompletionCallback& callback) {
332 DCHECK(io_thread_checker_.CalledOnValidThread()); 332 DCHECK(io_thread_checker_.CalledOnValidThread());
333 333
334 if (net_log_.IsLoggingAllEvents()) { 334 if (net_log_.IsLoggingAllEvents()) {
335 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL, 335 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
336 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, 336 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
337 false)); 337 false));
338 } 338 }
339 339
340 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || 340 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
341 buf_len < 0) { 341 buf_len < 0) {
342 if (net_log_.IsLoggingAllEvents()) { 342 if (net_log_.IsLoggingAllEvents()) {
343 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, 343 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
344 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); 344 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
345 } 345 }
346 346
347 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT); 347 RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT);
348 return net::ERR_INVALID_ARGUMENT; 348 return net::ERR_INVALID_ARGUMENT;
349 } 349 }
350 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) || 350 if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
(...skipping 25 matching lines...) Expand all
376 bool truncate) { 376 bool truncate) {
377 DCHECK(io_thread_checker_.CalledOnValidThread()); 377 DCHECK(io_thread_checker_.CalledOnValidThread());
378 378
379 if (net_log_.IsLoggingAllEvents()) { 379 if (net_log_.IsLoggingAllEvents()) {
380 net_log_.AddEvent( 380 net_log_.AddEvent(
381 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL, 381 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
382 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len, 382 CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
383 truncate)); 383 truncate));
384 } 384 }
385 385
386 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 || 386 if (stream_index < 0 || stream_index >= kSimpleEntryStreamCount ||
387 buf_len < 0) { 387 offset < 0 || buf_len < 0) {
388 if (net_log_.IsLoggingAllEvents()) { 388 if (net_log_.IsLoggingAllEvents()) {
389 net_log_.AddEvent( 389 net_log_.AddEvent(
390 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, 390 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
391 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT)); 391 CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
392 } 392 }
393 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT); 393 RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT);
394 return net::ERR_INVALID_ARGUMENT; 394 return net::ERR_INVALID_ARGUMENT;
395 } 395 }
396 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) { 396 if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
397 if (net_log_.IsLoggingAllEvents()) { 397 if (net_log_.IsLoggingAllEvents()) {
398 net_log_.AddEvent( 398 net_log_.AddEvent(
399 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END, 399 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
400 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED)); 400 CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
401 } 401 }
402 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE); 402 RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE);
403 return net::ERR_FAILED; 403 return net::ERR_FAILED;
404 } 404 }
405 ScopedOperationRunner operation_runner(this); 405 ScopedOperationRunner operation_runner(this);
406 406
407 // Currently, Simple Cache is only used for HTTP, which stores the headers in
408 // stream 0 and always writes them with a single, truncating write. Detect
409 // these writes and record the size and size changes of the headers. Also,
410 // note writes to stream 0 that violate those assumptions.
411 if (stream_index == 0) {
412 if (offset == 0 && truncate)
413 RecordHeaderSizeChange(cache_type_, data_size_[0], buf_len);
414 else
415 RecordUnexpectedStream0Write(cache_type_);
416 }
417
418 // We can only do optimistic Write if there is no pending operations, so 407 // We can only do optimistic Write if there is no pending operations, so
419 // that we are sure that the next call to RunNextOperationIfNeeded will 408 // that we are sure that the next call to RunNextOperationIfNeeded will
420 // actually run the write operation that sets the stream size. It also 409 // actually run the write operation that sets the stream size. It also
421 // prevents from previous possibly-conflicting writes that could be stacked 410 // prevents from previous possibly-conflicting writes that could be stacked
422 // in the |pending_operations_|. We could optimize this for when we have 411 // in the |pending_operations_|. We could optimize this for when we have
423 // only read operations enqueued. 412 // only read operations enqueued.
424 const bool optimistic = 413 const bool optimistic =
425 (use_optimistic_operations_ && state_ == STATE_READY && 414 (use_optimistic_operations_ && state_ == STATE_READY &&
426 pending_operations_.size() == 0); 415 pending_operations_.size() == 0);
427 CompletionCallback op_callback; 416 CompletionCallback op_callback;
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
687 DCHECK_EQ(STATE_UNINITIALIZED, state_); 676 DCHECK_EQ(STATE_UNINITIALIZED, state_);
688 DCHECK(!synchronous_entry_); 677 DCHECK(!synchronous_entry_);
689 678
690 state_ = STATE_IO_PENDING; 679 state_ = STATE_IO_PENDING;
691 680
692 // Since we don't know the correct values for |last_used_| and 681 // Since we don't know the correct values for |last_used_| and
693 // |last_modified_| yet, we make this approximation. 682 // |last_modified_| yet, we make this approximation.
694 last_used_ = last_modified_ = base::Time::Now(); 683 last_used_ = last_modified_ = base::Time::Now();
695 684
696 // If creation succeeds, we should mark all streams to be saved on close. 685 // If creation succeeds, we should mark all streams to be saved on close.
697 for (int i = 0; i < kSimpleEntryFileCount; ++i) 686 for (int i = 0; i < kSimpleEntryStreamCount; ++i)
698 have_written_[i] = true; 687 have_written_[i] = true;
699 688
700 const base::TimeTicks start_time = base::TimeTicks::Now(); 689 const base::TimeTicks start_time = base::TimeTicks::Now();
701 scoped_ptr<SimpleEntryCreationResults> results( 690 scoped_ptr<SimpleEntryCreationResults> results(
702 new SimpleEntryCreationResults( 691 new SimpleEntryCreationResults(
703 SimpleEntryStat(last_used_, last_modified_, data_size_))); 692 SimpleEntryStat(last_used_, last_modified_, data_size_)));
704 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, 693 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
705 cache_type_, 694 cache_type_,
706 path_, 695 path_,
707 key_, 696 key_,
(...skipping 14 matching lines...) Expand all
722 DCHECK(io_thread_checker_.CalledOnValidThread()); 711 DCHECK(io_thread_checker_.CalledOnValidThread());
723 typedef SimpleSynchronousEntry::CRCRecord CRCRecord; 712 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
724 scoped_ptr<std::vector<CRCRecord> > 713 scoped_ptr<std::vector<CRCRecord> >
725 crc32s_to_write(new std::vector<CRCRecord>()); 714 crc32s_to_write(new std::vector<CRCRecord>());
726 715
727 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN); 716 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
728 717
729 if (state_ == STATE_READY) { 718 if (state_ == STATE_READY) {
730 DCHECK(synchronous_entry_); 719 DCHECK(synchronous_entry_);
731 state_ = STATE_IO_PENDING; 720 state_ = STATE_IO_PENDING;
732 for (int i = 0; i < kSimpleEntryFileCount; ++i) { 721 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
733 if (have_written_[i]) { 722 if (have_written_[i]) {
734 if (GetDataSize(i) == crc32s_end_offset_[i]) { 723 if (GetDataSize(i) == crc32s_end_offset_[i]) {
735 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; 724 int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
736 crc32s_to_write->push_back(CRCRecord(i, true, crc)); 725 crc32s_to_write->push_back(CRCRecord(i, true, crc));
737 } else { 726 } else {
738 crc32s_to_write->push_back(CRCRecord(i, false, 0)); 727 crc32s_to_write->push_back(CRCRecord(i, false, 0));
739 } 728 }
740 } 729 }
741 } 730 }
742 } else { 731 } else {
743 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_); 732 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
744 } 733 }
745 734
746 if (synchronous_entry_) { 735 if (synchronous_entry_) {
747 Closure task = 736 Closure task =
748 base::Bind(&SimpleSynchronousEntry::Close, 737 base::Bind(&SimpleSynchronousEntry::Close,
749 base::Unretained(synchronous_entry_), 738 base::Unretained(synchronous_entry_),
750 SimpleEntryStat(last_used_, last_modified_, data_size_), 739 SimpleEntryStat(last_used_, last_modified_, data_size_),
751 base::Passed(&crc32s_to_write)); 740 base::Passed(&crc32s_to_write),
741 stream_0_data_);
752 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); 742 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
753 synchronous_entry_ = NULL; 743 synchronous_entry_ = NULL;
754 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 744 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
755 745
756 for (int i = 0; i < kSimpleEntryFileCount; ++i) { 746 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
757 if (!have_written_[i]) { 747 if (!have_written_[i]) {
758 SIMPLE_CACHE_UMA(ENUMERATION, 748 SIMPLE_CACHE_UMA(ENUMERATION,
759 "CheckCRCResult", cache_type_, 749 "CheckCRCResult", cache_type_,
760 crc_check_state_[i], CRC_CHECK_MAX); 750 crc_check_state_[i], CRC_CHECK_MAX);
761 } 751 }
762 } 752 }
763 } else { 753 } else {
764 CloseOperationComplete(); 754 CloseOperationComplete();
765 } 755 }
766 } 756 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
799 DCHECK_EQ(STATE_READY, state_); 789 DCHECK_EQ(STATE_READY, state_);
800 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) { 790 if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
801 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN); 791 RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN);
802 // If there is nothing to read, we bail out before setting state_ to 792 // If there is nothing to read, we bail out before setting state_ to
803 // STATE_IO_PENDING. 793 // STATE_IO_PENDING.
804 if (!callback.is_null()) 794 if (!callback.is_null())
805 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0)); 795 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 0));
806 return; 796 return;
807 } 797 }
808 798
799 // Since stream 0 data is kept in memory, it is read immediately.
gavinp 2013/09/17 15:07:43 Why not catch this case in ReadData, so we can ret
clamy 2013/09/18 16:17:15 Returning with net::OK in ReadData causes some uni
gavinp 2013/09/18 17:20:35 It makes me sad that we're slowing down our code j
pasko 2013/09/18 17:35:27 I'm sorry, I just did not like to have an optimiza
clamy 2013/09/18 18:29:38 Done.
800 if (stream_index == 0) {
801 int ret_value = ReadStream0Data(buf, offset, buf_len);
802 if (!callback.is_null()) {
803 MessageLoopProxy::current()->PostTask(FROM_HERE,
804 base::Bind(callback, ret_value));
805 }
806 return;
807 }
808
809 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset); 809 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
810 810
811 state_ = STATE_IO_PENDING; 811 state_ = STATE_IO_PENDING;
812 if (!doomed_ && backend_.get()) 812 if (!doomed_ && backend_.get())
813 backend_->index()->UseIfExists(entry_hash_); 813 backend_->index()->UseIfExists(entry_hash_);
814 814
815 scoped_ptr<uint32> read_crc32(new uint32()); 815 scoped_ptr<uint32> read_crc32(new uint32());
816 scoped_ptr<int> result(new int()); 816 scoped_ptr<int> result(new int());
817 scoped_ptr<base::Time> last_used(new base::Time()); 817 scoped_ptr<SimpleEntryStat> entry_stat(
818 new SimpleEntryStat(last_used_, last_modified_, data_size_));
818 Closure task = base::Bind( 819 Closure task = base::Bind(
819 &SimpleSynchronousEntry::ReadData, 820 &SimpleSynchronousEntry::ReadData,
820 base::Unretained(synchronous_entry_), 821 base::Unretained(synchronous_entry_),
821 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len), 822 SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
822 make_scoped_refptr(buf), 823 make_scoped_refptr(buf),
823 read_crc32.get(), 824 read_crc32.get(),
824 last_used.get(), 825 entry_stat.get(),
825 result.get()); 826 result.get());
826 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, 827 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
827 this, 828 this,
828 stream_index, 829 stream_index,
829 offset, 830 offset,
830 callback, 831 callback,
831 base::Passed(&read_crc32), 832 base::Passed(&read_crc32),
832 base::Passed(&last_used), 833 base::Passed(&entry_stat),
833 base::Passed(&result)); 834 base::Passed(&result));
834 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 835 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
835 } 836 }
836 837
837 void SimpleEntryImpl::WriteDataInternal(int stream_index, 838 void SimpleEntryImpl::WriteDataInternal(int stream_index,
838 int offset, 839 int offset,
839 net::IOBuffer* buf, 840 net::IOBuffer* buf,
840 int buf_len, 841 int buf_len,
841 const CompletionCallback& callback, 842 const CompletionCallback& callback,
842 bool truncate) { 843 bool truncate) {
(...skipping 16 matching lines...) Expand all
859 } 860 }
860 if (!callback.is_null()) { 861 if (!callback.is_null()) {
861 MessageLoopProxy::current()->PostTask( 862 MessageLoopProxy::current()->PostTask(
862 FROM_HERE, base::Bind(callback, net::ERR_FAILED)); 863 FROM_HERE, base::Bind(callback, net::ERR_FAILED));
863 } 864 }
864 // |this| may be destroyed after return here. 865 // |this| may be destroyed after return here.
865 return; 866 return;
866 } 867 }
867 868
868 DCHECK_EQ(STATE_READY, state_); 869 DCHECK_EQ(STATE_READY, state_);
870 // Since stream 0 data is kept in memory, it will be written immediatly.
pasko 2013/09/17 16:40:52 add extra empty line above please, I just like whe
clamy 2013/09/18 16:17:15 Done.
871 if (stream_index == 0) {
872 int ret_value = CopyStream0Data(buf, offset, buf_len, truncate);
873 if (!callback.is_null()) {
874 // PostTask prevents creating a loop when calling the callback directly.
875 MessageLoopProxy::current()->PostTask(FROM_HERE,
876 base::Bind(callback, ret_value));
877 }
878 return;
879 }
880
869 state_ = STATE_IO_PENDING; 881 state_ = STATE_IO_PENDING;
870 if (!doomed_ && backend_.get()) 882 if (!doomed_ && backend_.get())
871 backend_->index()->UseIfExists(entry_hash_); 883 backend_->index()->UseIfExists(entry_hash_);
872 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) 884 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
873 // if |offset == 0| or we have already computed the CRC for [0 .. offset). 885 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
874 // We rely on most write operations being sequential, start to end to compute 886 // We rely on most write operations being sequential, start to end to compute
875 // the crc of the data. When we write to an entry and close without having 887 // the crc of the data. When we write to an entry and close without having
876 // done a sequential write, we don't check the CRC on read. 888 // done a sequential write, we don't check the CRC on read.
877 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) { 889 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
878 uint32 initial_crc = (offset != 0) ? crc32s_[stream_index] 890 uint32 initial_crc = (offset != 0) ? crc32s_[stream_index]
(...skipping 14 matching lines...) Expand all
893 } else { 905 } else {
894 data_size_[stream_index] = std::max(offset + buf_len, 906 data_size_[stream_index] = std::max(offset + buf_len,
895 GetDataSize(stream_index)); 907 GetDataSize(stream_index));
896 } 908 }
897 909
898 // Since we don't know the correct values for |last_used_| and 910 // Since we don't know the correct values for |last_used_| and
899 // |last_modified_| yet, we make this approximation. 911 // |last_modified_| yet, we make this approximation.
900 last_used_ = last_modified_ = base::Time::Now(); 912 last_used_ = last_modified_ = base::Time::Now();
901 913
902 have_written_[stream_index] = true; 914 have_written_[stream_index] = true;
915 // Writing on stream 1 affects the placement of stream 0 in the file.
916 if (stream_index == 1)
917 have_written_[0] = true;
903 918
904 scoped_ptr<int> result(new int()); 919 scoped_ptr<int> result(new int());
905 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData, 920 Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
906 base::Unretained(synchronous_entry_), 921 base::Unretained(synchronous_entry_),
907 SimpleSynchronousEntry::EntryOperationData( 922 SimpleSynchronousEntry::EntryOperationData(
908 stream_index, offset, buf_len, truncate), 923 stream_index, offset, buf_len, truncate),
909 make_scoped_refptr(buf), 924 make_scoped_refptr(buf),
910 entry_stat.get(), 925 entry_stat.get(),
911 result.get()); 926 result.get());
912 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, 927 Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
949 MakeUninitialized(); 964 MakeUninitialized();
950 return; 965 return;
951 } 966 }
952 // If out_entry is NULL, it means we already called ReturnEntryToCaller from 967 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
953 // the optimistic Create case. 968 // the optimistic Create case.
954 if (out_entry) 969 if (out_entry)
955 ReturnEntryToCaller(out_entry); 970 ReturnEntryToCaller(out_entry);
956 971
957 state_ = STATE_READY; 972 state_ = STATE_READY;
958 synchronous_entry_ = in_results->sync_entry; 973 synchronous_entry_ = in_results->sync_entry;
974 stream_0_data_ = in_results->stream_0_data;
975 // The crc was read in SimpleSynchronousEntry.
976 crc_check_state_[0] = CRC_CHECK_DONE;
959 if (key_.empty()) { 977 if (key_.empty()) {
960 SetKey(synchronous_entry_->key()); 978 SetKey(synchronous_entry_->key());
961 } else { 979 } else {
962 // This should only be triggered when creating an entry. The key check in 980 // This should only be triggered when creating an entry. The key check in
963 // the open case is handled in SimpleBackendImpl. 981 // the open case is handled in SimpleBackendImpl.
964 DCHECK_EQ(key_, synchronous_entry_->key()); 982 DCHECK_EQ(key_, synchronous_entry_->key());
965 } 983 }
966 UpdateDataFromEntryStat(in_results->entry_stat); 984 UpdateDataFromEntryStat(in_results->entry_stat);
967 SIMPLE_CACHE_UMA(TIMES, 985 SIMPLE_CACHE_UMA(TIMES,
968 "EntryCreationTime", cache_type_, 986 "EntryCreationTime", cache_type_,
(...skipping 27 matching lines...) Expand all
996 completion_callback, *result)); 1014 completion_callback, *result));
997 } 1015 }
998 RunNextOperationIfNeeded(); 1016 RunNextOperationIfNeeded();
999 } 1017 }
1000 1018
1001 void SimpleEntryImpl::ReadOperationComplete( 1019 void SimpleEntryImpl::ReadOperationComplete(
1002 int stream_index, 1020 int stream_index,
1003 int offset, 1021 int offset,
1004 const CompletionCallback& completion_callback, 1022 const CompletionCallback& completion_callback,
1005 scoped_ptr<uint32> read_crc32, 1023 scoped_ptr<uint32> read_crc32,
1006 scoped_ptr<base::Time> last_used, 1024 scoped_ptr<SimpleEntryStat> entry_stat,
1007 scoped_ptr<int> result) { 1025 scoped_ptr<int> result) {
1008 DCHECK(io_thread_checker_.CalledOnValidThread()); 1026 DCHECK(io_thread_checker_.CalledOnValidThread());
1009 DCHECK(synchronous_entry_); 1027 DCHECK(synchronous_entry_);
1010 DCHECK_EQ(STATE_IO_PENDING, state_); 1028 DCHECK_EQ(STATE_IO_PENDING, state_);
1011 DCHECK(read_crc32); 1029 DCHECK(read_crc32);
1012 DCHECK(result); 1030 DCHECK(result);
1013 1031
1014 if (*result > 0 && 1032 if (*result > 0 &&
1015 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) { 1033 crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
1016 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END; 1034 crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
(...skipping 15 matching lines...) Expand all
1032 // entry, one reader can be behind the other. In this case we compute 1050 // entry, one reader can be behind the other. In this case we compute
1033 // the crc as the most advanced reader progresses, and check it for 1051 // the crc as the most advanced reader progresses, and check it for
1034 // both readers as they read the last byte. 1052 // both readers as they read the last byte.
1035 1053
1036 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN); 1054 net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
1037 1055
1038 scoped_ptr<int> new_result(new int()); 1056 scoped_ptr<int> new_result(new int());
1039 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord, 1057 Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
1040 base::Unretained(synchronous_entry_), 1058 base::Unretained(synchronous_entry_),
1041 stream_index, 1059 stream_index,
1042 data_size_[stream_index], 1060 *entry_stat,
1043 crc32s_[stream_index], 1061 crc32s_[stream_index],
1044 new_result.get()); 1062 new_result.get());
1045 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete, 1063 Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
1046 this, *result, stream_index, 1064 this, *result, stream_index,
1047 completion_callback, 1065 completion_callback,
1048 base::Passed(&new_result)); 1066 base::Passed(&new_result));
1049 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply); 1067 worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1050 crc_check_state_[stream_index] = CRC_CHECK_DONE; 1068 crc_check_state_[stream_index] = CRC_CHECK_DONE;
1051 return; 1069 return;
1052 } 1070 }
1053 } 1071 }
1054 1072
1055 if (*result < 0) { 1073 if (*result < 0) {
1056 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE); 1074 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1057 } else { 1075 } else {
1058 RecordReadResult(cache_type_, READ_RESULT_SUCCESS); 1076 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1059 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END && 1077 if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
1060 offset + *result == GetDataSize(stream_index)) { 1078 offset + *result == GetDataSize(stream_index)) {
1061 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE; 1079 crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
1062 } 1080 }
1063 } 1081 }
1064 if (net_log_.IsLoggingAllEvents()) { 1082 if (net_log_.IsLoggingAllEvents()) {
1065 net_log_.AddEvent( 1083 net_log_.AddEvent(
1066 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END, 1084 net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1067 CreateNetLogReadWriteCompleteCallback(*result)); 1085 CreateNetLogReadWriteCompleteCallback(*result));
1068 } 1086 }
1069 1087
1070 EntryOperationComplete( 1088 EntryOperationComplete(
1071 stream_index, 1089 stream_index, completion_callback, *entry_stat, result.Pass());
1072 completion_callback,
1073 SimpleEntryStat(*last_used, last_modified_, data_size_),
1074 result.Pass());
1075 } 1090 }
1076 1091
1077 void SimpleEntryImpl::WriteOperationComplete( 1092 void SimpleEntryImpl::WriteOperationComplete(
1078 int stream_index, 1093 int stream_index,
1079 const CompletionCallback& completion_callback, 1094 const CompletionCallback& completion_callback,
1080 scoped_ptr<SimpleEntryStat> entry_stat, 1095 scoped_ptr<SimpleEntryStat> entry_stat,
1081 scoped_ptr<int> result) { 1096 scoped_ptr<int> result) {
1082 if (*result >= 0) 1097 if (*result >= 0)
1083 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS); 1098 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1084 else 1099 else
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
1153 } 1168 }
1154 1169
1155 void SimpleEntryImpl::UpdateDataFromEntryStat( 1170 void SimpleEntryImpl::UpdateDataFromEntryStat(
1156 const SimpleEntryStat& entry_stat) { 1171 const SimpleEntryStat& entry_stat) {
1157 DCHECK(io_thread_checker_.CalledOnValidThread()); 1172 DCHECK(io_thread_checker_.CalledOnValidThread());
1158 DCHECK(synchronous_entry_); 1173 DCHECK(synchronous_entry_);
1159 DCHECK_EQ(STATE_READY, state_); 1174 DCHECK_EQ(STATE_READY, state_);
1160 1175
1161 last_used_ = entry_stat.last_used; 1176 last_used_ = entry_stat.last_used;
1162 last_modified_ = entry_stat.last_modified; 1177 last_modified_ = entry_stat.last_modified;
1163 for (int i = 0; i < kSimpleEntryFileCount; ++i) { 1178 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1164 data_size_[i] = entry_stat.data_size[i]; 1179 data_size_[i] = entry_stat.data_size[i];
1165 } 1180 }
1166 if (!doomed_ && backend_.get()) 1181 if (!doomed_ && backend_.get())
1167 backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage()); 1182 backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage());
1168 } 1183 }
1169 1184
1170 int64 SimpleEntryImpl::GetDiskUsage() const { 1185 int64 SimpleEntryImpl::GetDiskUsage() const {
1171 int64 file_size = 0; 1186 int64 file_size = 0;
1172 for (int i = 0; i < kSimpleEntryFileCount; ++i) { 1187 for (int i = 0; i < kSimpleEntryStreamCount; ++i) {
1173 file_size += 1188 file_size +=
1174 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]); 1189 simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
1175 } 1190 }
1176 return file_size; 1191 return file_size;
1177 } 1192 }
1178 1193
1179 void SimpleEntryImpl::RecordReadIsParallelizable( 1194 void SimpleEntryImpl::RecordReadIsParallelizable(
1180 const SimpleEntryOperation& operation) const { 1195 const SimpleEntryOperation& operation) const {
1181 if (!executing_operation_) 1196 if (!executing_operation_)
1182 return; 1197 return;
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1240 } else { 1255 } else {
1241 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE 1256 type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
1242 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE; 1257 : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
1243 } 1258 }
1244 } 1259 }
1245 SIMPLE_CACHE_UMA(ENUMERATION, 1260 SIMPLE_CACHE_UMA(ENUMERATION,
1246 "WriteDependencyType", cache_type_, 1261 "WriteDependencyType", cache_type_,
1247 type, WRITE_DEPENDENCY_TYPE_MAX); 1262 type, WRITE_DEPENDENCY_TYPE_MAX);
1248 } 1263 }
1249 1264
1265 int SimpleEntryImpl::ReadStream0Data(net::IOBuffer* buf,
1266 int offset,
1267 int buf_len) {
1268 int read_size = std::min(data_size_[0] - offset, buf_len);
1269 if (read_size < 0) {
1270 RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1271 return 0;
1272 }
1273 memcpy(buf->data(), stream_0_data_->data() + offset, read_size);
pasko 2013/09/17 16:40:52 Seems like a null deref with stream_0_data_ if a R
clamy 2013/09/18 16:17:15 This code is called from ReadInternal, which is ca
1274 UpdateDataFromEntryStat(
1275 SimpleEntryStat(base::Time::Now(), last_modified_, data_size_));
1276 RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1277 return read_size;
1278 }
1279
1280 int SimpleEntryImpl::CopyStream0Data(net::IOBuffer* buf,
pasko 2013/09/17 16:40:52 hm, I dunno, it seems it would be a tiny bit less
gavinp 2013/09/17 17:09:04 I'd be OK with these being "ReadDataFromBuffer" an
pasko 2013/09/18 08:14:10 I don't think it is a good idea since the method u
clamy 2013/09/18 16:17:15 I went with SetStream0Data, and as Egor pointed ou
1281 int offset,
1282 int buf_len,
1283 bool truncate) {
1284 // Currently, stream 0 is only used for HTTP headers, and always writes them
1285 // with a single, truncating write. Detect these writes and record the size
1286 // and size changes of the headers. Also, supports writes to stream 0 that
1287 // violate those assumptions. All other clients of the Simple Cache are
1288 // encouraged to use stream 1.
1289 if (!stream_0_data_)
1290 stream_0_data_ = new net::GrowableIOBuffer();
1291 have_written_[0] = true;
1292 int data_size = data_size_[0];
1293 if (offset == 0 && truncate) {
1294 RecordHeaderSizeChange(cache_type_, data_size, buf_len);
1295 stream_0_data_->SetCapacity(buf_len);
1296 memcpy(stream_0_data_->data(), buf->data(), buf_len);
1297 data_size_[0] = buf_len;
1298 } else {
1299 RecordUnexpectedStream0Write(cache_type_);
1300 const int buffer_size =
1301 truncate ? offset + buf_len : std::max(offset + buf_len, data_size);
1302 stream_0_data_->SetCapacity(buffer_size);
1303 // If |stream_0_data_| was extended. the extension until offset need to be
pasko 2013/09/17 16:40:52 s/./,/ s/need/needs/ s/zeroed/zero-filled/ In oth
clamy 2013/09/18 16:17:15 Done.
1304 // zeroed.
1305 const int fill_size = offset <= data_size ? 0 : offset - data_size;
1306 if (fill_size > 0)
1307 memset(stream_0_data_->data() + data_size, 0, fill_size);
1308 if (buf)
pasko 2013/09/17 16:40:52 why this check? does it happen in practice? I don'
clamy 2013/09/18 16:17:15 Some unit tests like 0-length truncating writes wi
1309 memcpy(stream_0_data_->data() + offset, buf->data(), buf_len);
1310 data_size_[0] = buffer_size;
1311 }
1312 base::Time modification_time = base::Time::Now();
1313 UpdateDataFromEntryStat(
1314 SimpleEntryStat(modification_time, modification_time, data_size_));
1315 if (stream_0_data_) {
1316 crc32s_[0] = crc32(crc32(0L, Z_NULL, 0),
1317 reinterpret_cast<const Bytef*>(stream_0_data_->data()),
1318 data_size_[0]);
pasko 2013/09/17 16:40:52 this needs updating crc32s_end_offset_, I think it
clamy 2013/09/18 16:17:15 Done.
1319 } else {
1320 crc32s_[0] = crc32(0L, Z_NULL, 0);
1321 }
1322 RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1323 return buf_len;
1324 }
1325
1250 } // namespace disk_cache 1326 } // namespace disk_cache
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698