Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(384)

Side by Side Diff: net/disk_cache/simple/simple_entry_impl.cc

Issue 13907009: Support optimistic Create and Write operations on the SimpleCache. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: make linker happy Created 7 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/disk_cache/simple/simple_entry_impl.h" 5 #include "net/disk_cache/simple/simple_entry_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cstring> 8 #include <cstring>
9 #include <vector> 9 #include <vector>
10 10
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
64 SimpleEntryImpl::SimpleEntryImpl(SimpleBackendImpl* backend, 64 SimpleEntryImpl::SimpleEntryImpl(SimpleBackendImpl* backend,
65 const FilePath& path, 65 const FilePath& path,
66 const std::string& key, 66 const std::string& key,
67 const uint64 entry_hash) 67 const uint64 entry_hash)
68 : backend_(backend->AsWeakPtr()), 68 : backend_(backend->AsWeakPtr()),
69 path_(path), 69 path_(path),
70 key_(key), 70 key_(key),
71 entry_hash_(entry_hash), 71 entry_hash_(entry_hash),
72 open_count_(0), 72 open_count_(0),
73 state_(STATE_UNINITIALIZED), 73 state_(STATE_UNINITIALIZED),
74 synchronous_entry_(NULL) { 74 synchronous_entry_(NULL),
75 optimistic_(true) {
75 DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key)); 76 DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
76 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_), 77 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
77 arrays_should_be_same_size); 78 arrays_should_be_same_size);
78 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_), 79 COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
79 arrays_should_be_same_size2); 80 arrays_should_be_same_size2);
80 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_), 81 COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
81 arrays_should_be_same_size3); 82 arrays_should_be_same_size3);
82
83 MakeUninitialized(); 83 MakeUninitialized();
84 } 84 }
85 85
86 int SimpleEntryImpl::OpenEntry(Entry** out_entry, 86 int SimpleEntryImpl::OpenEntry(Entry** out_entry,
87 const CompletionCallback& callback) { 87 const CompletionCallback& callback) {
88 DCHECK(backend_); 88 DCHECK(backend_);
89 // This enumeration is used in histograms, add entries only at end.
90 enum OpenEntryIndexEnum {
91 INDEX_NOEXIST = 0,
92 INDEX_MISS = 1,
93 INDEX_HIT = 2,
94 INDEX_MAX = 3,
95 };
96 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
97 if (backend_) {
98 if (backend_->index()->Has(key_))
99 open_entry_index_enum = INDEX_HIT;
100 else
101 open_entry_index_enum = INDEX_MISS;
102 }
103 UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
104 open_entry_index_enum, INDEX_MAX);
105
106 // If entry is not known to the index, initiate fast failover to the network.
107 if (open_entry_index_enum == INDEX_MISS)
108 return net::ERR_FAILED;
89 109
90 pending_operations_.push(base::Bind(&SimpleEntryImpl::OpenEntryInternal, 110 pending_operations_.push(base::Bind(&SimpleEntryImpl::OpenEntryInternal,
91 this, out_entry, callback)); 111 this, callback, out_entry));
92 RunNextOperationIfNeeded(); 112 RunNextOperationIfNeeded();
93 return net::ERR_IO_PENDING; 113 return net::ERR_IO_PENDING;
94 } 114 }
95 115
96 int SimpleEntryImpl::CreateEntry(Entry** out_entry, 116 int SimpleEntryImpl::CreateEntry(Entry** out_entry,
97 const CompletionCallback& callback) { 117 const CompletionCallback& callback) {
98 DCHECK(backend_); 118 DCHECK(backend_);
99 pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal, 119 int ret_value = net::ERR_FAILED;
100 this, out_entry, callback)); 120 if (state_ == STATE_UNINITIALIZED &&
121 pending_operations_.size() == 0 &&
122 optimistic_) {
123 ReturnEntryToCaller(out_entry);
124 // We can do optimistic Create.
125 pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
126 this,
127 CompletionCallback(),
128 static_cast<Entry**>(NULL)));
129 ret_value = net::OK;
130 } else {
131 pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
132 this,
133 callback,
134 out_entry));
135 ret_value = net::ERR_IO_PENDING;
136 }
137
138 // We insert the entry in the index before creating the entry files in the
139 // SimpleSynchronousEntry, because this way the worst scenario is when we
140 // have the entry in the index but we don't have the created files yet, this
141 // way we never leak files. CreationOperationComplete will remove the entry
142 // from the index if the creation fails.
143 if (backend_)
144 backend_->index()->Insert(key_);
145
146 // Since we don't know the correct values for |last_used_| and
147 // |last_modified_| yet, we make this approximation.
148 last_used_ = last_modified_ = base::Time::Now();
149
101 RunNextOperationIfNeeded(); 150 RunNextOperationIfNeeded();
102 return net::ERR_IO_PENDING; 151 return ret_value;
103 } 152 }
104 153
105 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) { 154 int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
106 MarkAsDoomed(); 155 MarkAsDoomed();
107
108 scoped_ptr<int> result(new int()); 156 scoped_ptr<int> result(new int());
109 Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_, 157 Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_,
110 entry_hash_, result.get()); 158 entry_hash_, result.get());
111 Closure reply = base::Bind(&CallCompletionCallback, 159 Closure reply = base::Bind(&CallCompletionCallback,
112 callback, base::Passed(&result)); 160 callback, base::Passed(&result));
113 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 161 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
114 return net::ERR_IO_PENDING; 162 return net::ERR_IO_PENDING;
115 } 163 }
116 164
117 165
(...skipping 27 matching lines...) Expand all
145 return last_used_; 193 return last_used_;
146 } 194 }
147 195
148 Time SimpleEntryImpl::GetLastModified() const { 196 Time SimpleEntryImpl::GetLastModified() const {
149 DCHECK(io_thread_checker_.CalledOnValidThread()); 197 DCHECK(io_thread_checker_.CalledOnValidThread());
150 return last_modified_; 198 return last_modified_;
151 } 199 }
152 200
153 int32 SimpleEntryImpl::GetDataSize(int stream_index) const { 201 int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
154 DCHECK(io_thread_checker_.CalledOnValidThread()); 202 DCHECK(io_thread_checker_.CalledOnValidThread());
203 DCHECK(data_size_[stream_index] >= 0);
gavinp 2013/05/01 13:11:22 DCHECK_LE please.
felipeg 2013/05/02 09:49:27 Done.
155 return data_size_[stream_index]; 204 return data_size_[stream_index];
156 } 205 }
157 206
158 int SimpleEntryImpl::ReadData(int stream_index, 207 int SimpleEntryImpl::ReadData(int stream_index,
159 int offset, 208 int offset,
160 net::IOBuffer* buf, 209 net::IOBuffer* buf,
161 int buf_len, 210 int buf_len,
162 const CompletionCallback& callback) { 211 const CompletionCallback& callback) {
163 DCHECK(io_thread_checker_.CalledOnValidThread()); 212 DCHECK(io_thread_checker_.CalledOnValidThread());
164 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || buf_len < 0) 213 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || buf_len < 0)
165 return net::ERR_INVALID_ARGUMENT; 214 return net::ERR_INVALID_ARGUMENT;
166 if (offset >= data_size_[stream_index] || offset < 0 || !buf_len) 215 if (offset >= data_size_[stream_index] || offset < 0 || !buf_len)
167 return 0; 216 return 0;
168 buf_len = std::min(buf_len, data_size_[stream_index] - offset); 217
169 // TODO(felipeg): Optimization: Add support for truly parallel read 218 // TODO(felipeg): Optimization: Add support for truly parallel read
170 // operations. 219 // operations.
171 pending_operations_.push( 220 pending_operations_.push(
172 base::Bind(&SimpleEntryImpl::ReadDataInternal, 221 base::Bind(&SimpleEntryImpl::ReadDataInternal,
173 this, 222 this,
174 stream_index, 223 stream_index,
175 offset, 224 offset,
176 make_scoped_refptr(buf), 225 make_scoped_refptr(buf),
177 buf_len, 226 buf_len,
178 callback)); 227 callback));
179 RunNextOperationIfNeeded(); 228 RunNextOperationIfNeeded();
180 return net::ERR_IO_PENDING; 229 return net::ERR_IO_PENDING;
181 } 230 }
182 231
183 int SimpleEntryImpl::WriteData(int stream_index, 232 int SimpleEntryImpl::WriteData(int stream_index,
184 int offset, 233 int offset,
185 net::IOBuffer* buf, 234 net::IOBuffer* buf,
186 int buf_len, 235 int buf_len,
187 const CompletionCallback& callback, 236 const CompletionCallback& callback,
188 bool truncate) { 237 bool truncate) {
189 DCHECK(io_thread_checker_.CalledOnValidThread()); 238 DCHECK(io_thread_checker_.CalledOnValidThread());
190 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 || 239 if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
191 buf_len < 0) { 240 buf_len < 0) {
192 return net::ERR_INVALID_ARGUMENT; 241 return net::ERR_INVALID_ARGUMENT;
193 } 242 }
194 pending_operations_.push( 243
195 base::Bind(&SimpleEntryImpl::WriteDataInternal, 244 int ret_value = net::ERR_FAILED;
196 this, 245 if (state_ == STATE_READY && pending_operations_.size() == 0 && optimistic_) {
197 stream_index, 246 // We can only do optimistic Write if there is no pending operations, so
198 offset, 247 // that we are sure that the next call to RunNextOperationIfNeeded will
199 make_scoped_refptr(buf), 248 // actually run the write operation that sets the stream size. It also
200 buf_len, 249 // prevents from previous possibly-conflicting writes that could be stacked
201 callback, 250 // in the |pending_operations_|. We could optimize this for when we have
202 truncate)); 251 // only read operations enqueued.
252 pending_operations_.push(
253 base::Bind(&SimpleEntryImpl::WriteDataInternal,
254 this,
255 stream_index,
256 offset,
257 make_scoped_refptr(buf),
258 buf_len,
259 CompletionCallback(),
260 truncate));
261 ret_value = buf_len;
262 } else {
263 pending_operations_.push(
264 base::Bind(&SimpleEntryImpl::WriteDataInternal,
265 this,
266 stream_index,
267 offset,
268 make_scoped_refptr(buf),
269 buf_len,
270 callback,
271 truncate));
272 ret_value = net::ERR_IO_PENDING;
273 }
274
275 if (truncate)
276 data_size_[stream_index] = offset + buf_len;
277 else
278 data_size_[stream_index] = std::max(offset + buf_len,
279 data_size_[stream_index]);
280
281 // Since we don't know the correct values for |last_used_| and
282 // |last_modified_| yet, we make this approximation.
283 last_used_ = last_modified_ = base::Time::Now();
284
203 RunNextOperationIfNeeded(); 285 RunNextOperationIfNeeded();
204 // TODO(felipeg): Optimization: Add support for optimistic writes, quickly 286 return ret_value;
205 // returning net::OK here.
206 return net::ERR_IO_PENDING;
207 } 287 }
208 288
209 int SimpleEntryImpl::ReadSparseData(int64 offset, 289 int SimpleEntryImpl::ReadSparseData(int64 offset,
210 net::IOBuffer* buf, 290 net::IOBuffer* buf,
211 int buf_len, 291 int buf_len,
212 const CompletionCallback& callback) { 292 const CompletionCallback& callback) {
213 DCHECK(io_thread_checker_.CalledOnValidThread()); 293 DCHECK(io_thread_checker_.CalledOnValidThread());
214 // TODO(gavinp): Determine if the simple backend should support sparse data. 294 // TODO(gavinp): Determine if the simple backend should support sparse data.
215 NOTIMPLEMENTED(); 295 NOTIMPLEMENTED();
216 return net::ERR_FAILED; 296 return net::ERR_FAILED;
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
251 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { 331 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
252 DCHECK(io_thread_checker_.CalledOnValidThread()); 332 DCHECK(io_thread_checker_.CalledOnValidThread());
253 // TODO(gavinp): Determine if the simple backend should support sparse data. 333 // TODO(gavinp): Determine if the simple backend should support sparse data.
254 NOTIMPLEMENTED(); 334 NOTIMPLEMENTED();
255 return net::ERR_FAILED; 335 return net::ERR_FAILED;
256 } 336 }
257 337
258 SimpleEntryImpl::~SimpleEntryImpl() { 338 SimpleEntryImpl::~SimpleEntryImpl() {
259 DCHECK(io_thread_checker_.CalledOnValidThread()); 339 DCHECK(io_thread_checker_.CalledOnValidThread());
260 DCHECK_EQ(0U, pending_operations_.size()); 340 DCHECK_EQ(0U, pending_operations_.size());
261 DCHECK_EQ(STATE_UNINITIALIZED, state_); 341 DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
262 DCHECK(!synchronous_entry_); 342 DCHECK(!synchronous_entry_);
263 RemoveSelfFromBackend(); 343 RemoveSelfFromBackend();
264 } 344 }
265 345
266 void SimpleEntryImpl::MakeUninitialized() { 346 void SimpleEntryImpl::MakeUninitialized() {
347
267 state_ = STATE_UNINITIALIZED; 348 state_ = STATE_UNINITIALIZED;
268 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_)); 349 std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
269 std::memset(crc32s_, 0, sizeof(crc32s_)); 350 std::memset(crc32s_, 0, sizeof(crc32s_));
270 std::memset(have_written_, 0, sizeof(have_written_)); 351 std::memset(have_written_, 0, sizeof(have_written_));
352 std::memset(data_size_, 0, sizeof(data_size_));
271 } 353 }
272 354
273 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) { 355 void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
356 if (!out_entry)
gavinp 2013/05/01 13:11:22 This seems like a belt and suspenders; why not mak
felipeg 2013/05/02 09:49:27 Done.
357 return;
358
274 ++open_count_; 359 ++open_count_;
275 AddRef(); // Balanced in Close() 360 AddRef(); // Balanced in Close()
276 *out_entry = this; 361 *out_entry = this;
277 } 362 }
278 363
279 void SimpleEntryImpl::RemoveSelfFromBackend() { 364 void SimpleEntryImpl::RemoveSelfFromBackend() {
280 if (!backend_) 365 if (!backend_)
281 return; 366 return;
282 backend_->OnDeactivated(this); 367 backend_->OnDeactivated(this);
283 backend_.reset(); 368 backend_.reset();
284 } 369 }
285 370
286 void SimpleEntryImpl::MarkAsDoomed() { 371 void SimpleEntryImpl::MarkAsDoomed() {
287 if (!backend_) 372 if (!backend_)
288 return; 373 return;
289 backend_->index()->Remove(key_); 374 backend_->index()->Remove(key_);
290 RemoveSelfFromBackend(); 375 RemoveSelfFromBackend();
291 } 376 }
292 377
293 void SimpleEntryImpl::RunNextOperationIfNeeded() { 378 void SimpleEntryImpl::RunNextOperationIfNeeded() {
294 DCHECK(io_thread_checker_.CalledOnValidThread()); 379 DCHECK(io_thread_checker_.CalledOnValidThread());
295
296 UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending", 380 UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
297 pending_operations_.size(), 0, 100, 20); 381 pending_operations_.size(), 0, 100, 20);
298
299 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) { 382 if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
300 base::Closure operation = pending_operations_.front(); 383 base::Closure operation = pending_operations_.front();
301 pending_operations_.pop(); 384 pending_operations_.pop();
302 operation.Run(); 385 operation.Run();
303 // |this| may have been deleted. 386 // |this| may have been deleted.
304 } 387 }
305 } 388 }
306 389
307 void SimpleEntryImpl::OpenEntryInternal(Entry** out_entry, 390 void SimpleEntryImpl::OpenEntryInternal(const CompletionCallback& callback,
308 const CompletionCallback& callback) { 391 Entry** out_entry) {
gavinp 2013/05/01 13:11:22 I'm confused by the value of optimistic open. Can
felipeg 2013/05/02 09:49:27 This CL doesnt have optimistic open. Only Create a
309 ScopedOperationRunner operation_runner(this); 392 ScopedOperationRunner operation_runner(this);
310
311 if (state_ == STATE_READY) { 393 if (state_ == STATE_READY) {
312 ReturnEntryToCaller(out_entry); 394 ReturnEntryToCaller(out_entry);
313 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback, 395 if (!callback.is_null())
314 net::OK)); 396 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
397 net::OK));
398 return;
399 } else if (state_ == STATE_FAILURE) {
400 if (!callback.is_null())
401 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
402 callback, net::ERR_FAILED));
315 return; 403 return;
316 } 404 }
317 DCHECK_EQ(STATE_UNINITIALIZED, state_); 405 DCHECK_EQ(STATE_UNINITIALIZED, state_);
318
319 // This enumeration is used in histograms, add entries only at end.
320 enum OpenEntryIndexEnum {
321 INDEX_NOEXIST = 0,
322 INDEX_MISS = 1,
323 INDEX_HIT = 2,
324 INDEX_MAX = 3,
325 };
326 OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
327 if (backend_) {
328 if (backend_->index()->Has(key_))
329 open_entry_index_enum = INDEX_HIT;
330 else
331 open_entry_index_enum = INDEX_MISS;
332 }
333 UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
334 open_entry_index_enum, INDEX_MAX);
335 // If entry is not known to the index, initiate fast failover to the network.
336 if (open_entry_index_enum == INDEX_MISS) {
337 MessageLoopProxy::current()->PostTask(FROM_HERE,
338 base::Bind(callback,
339 net::ERR_FAILED));
340 return;
341 }
342 state_ = STATE_IO_PENDING; 406 state_ = STATE_IO_PENDING;
343
344 const base::TimeTicks start_time = base::TimeTicks::Now(); 407 const base::TimeTicks start_time = base::TimeTicks::Now();
345 typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry; 408 typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
346 scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry( 409 scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
347 new PointerToSimpleSynchronousEntry()); 410 new PointerToSimpleSynchronousEntry());
348 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, path_, key_, 411 Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, path_, key_,
349 entry_hash_, sync_entry.get()); 412 entry_hash_, sync_entry.get());
350 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this, 413 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
351 callback, start_time, base::Passed(&sync_entry), 414 callback, start_time, base::Passed(&sync_entry),
352 out_entry); 415 out_entry);
353 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 416 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
354 } 417 }
355 418
356 void SimpleEntryImpl::CreateEntryInternal(Entry** out_entry, 419 void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback,
357 const CompletionCallback& callback) { 420 Entry** out_entry) {
358 ScopedOperationRunner operation_runner(this); 421 ScopedOperationRunner operation_runner(this);
359 422 if (state_ != STATE_UNINITIALIZED) {
gavinp 2013/05/01 13:11:22 This seems wrong. What if we're in STATE_IO_PENDIN
felipeg 2013/05/02 09:49:27 Because CreateInternal is pushed in the operations
360 if (state_ == STATE_READY) {
361 // There is already an active normal entry. 423 // There is already an active normal entry.
362 MessageLoopProxy::current()->PostTask(FROM_HERE, 424 if (!callback.is_null())
363 base::Bind(callback, 425 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
364 net::ERR_FAILED)); 426 callback, net::ERR_FAILED));
365 return; 427 return;
366 } 428 }
367 DCHECK_EQ(STATE_UNINITIALIZED, state_); 429 DCHECK_EQ(STATE_UNINITIALIZED, state_);
368 430
369 state_ = STATE_IO_PENDING; 431 state_ = STATE_IO_PENDING;
370 432
371 // If creation succeeds, we should mark all streams to be saved on close. 433 // If creation succeeds, we should mark all streams to be saved on close.
372 for (int i = 0; i < kSimpleEntryFileCount; ++i) 434 for (int i = 0; i < kSimpleEntryFileCount; ++i)
373 have_written_[i] = true; 435 have_written_[i] = true;
374 436
375 // We insert the entry in the index before creating the entry files in the
376 // SimpleSynchronousEntry, because this way the worst scenario is when we
377 // have the entry in the index but we don't have the created files yet, this
378 // way we never leak files. CreationOperationComplete will remove the entry
379 // from the index if the creation fails.
380 if (backend_)
381 backend_->index()->Insert(key_);
382 const base::TimeTicks start_time = base::TimeTicks::Now(); 437 const base::TimeTicks start_time = base::TimeTicks::Now();
383 typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry; 438 typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
384 scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry( 439 scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
385 new PointerToSimpleSynchronousEntry()); 440 new PointerToSimpleSynchronousEntry());
386 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, path_, key_, 441 Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, path_, key_,
387 entry_hash_, sync_entry.get()); 442 entry_hash_, sync_entry.get());
388 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this, 443 Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
389 callback, start_time, base::Passed(&sync_entry), 444 callback, start_time, base::Passed(&sync_entry),
390 out_entry); 445 out_entry);
391 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 446 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
392 } 447 }
393 448
394 void SimpleEntryImpl::CloseInternal() { 449 void SimpleEntryImpl::CloseInternal() {
395 DCHECK(io_thread_checker_.CalledOnValidThread()); 450 DCHECK(io_thread_checker_.CalledOnValidThread());
396 DCHECK_EQ(0U, pending_operations_.size());
397 DCHECK_EQ(STATE_READY, state_);
398 DCHECK(synchronous_entry_);
399
400 state_ = STATE_IO_PENDING;
401
402 typedef SimpleSynchronousEntry::CRCRecord CRCRecord; 451 typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
403
404 scoped_ptr<std::vector<CRCRecord> > 452 scoped_ptr<std::vector<CRCRecord> >
405 crc32s_to_write(new std::vector<CRCRecord>()); 453 crc32s_to_write(new std::vector<CRCRecord>());
406 for (int i = 0; i < kSimpleEntryFileCount; ++i) { 454
407 if (have_written_[i]) { 455 if (state_ == STATE_READY) {
408 if (data_size_[i] == crc32s_end_offset_[i]) { 456 DCHECK(synchronous_entry_);
409 int32 crc = data_size_[i] == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i]; 457 state_ = STATE_IO_PENDING;
410 crc32s_to_write->push_back(CRCRecord(i, true, crc)); 458 for (int i = 0; i < kSimpleEntryFileCount; ++i) {
411 } else { 459 if (have_written_[i]) {
412 crc32s_to_write->push_back(CRCRecord(i, false, 0)); 460 if (data_size_[i] == crc32s_end_offset_[i]) {
461 int32 crc = data_size_[i] == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
462 crc32s_to_write->push_back(CRCRecord(i, true, crc));
463 } else {
464 crc32s_to_write->push_back(CRCRecord(i, false, 0));
465 }
413 } 466 }
414 } 467 }
468 } else {
469 DCHECK_EQ(STATE_FAILURE, state_);
415 } 470 }
416 Closure task = base::Bind(&SimpleSynchronousEntry::Close, 471
417 base::Unretained(synchronous_entry_), 472 if (synchronous_entry_) {
418 base::Passed(&crc32s_to_write)); 473 Closure task = base::Bind(&SimpleSynchronousEntry::Close,
419 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this); 474 base::Unretained(synchronous_entry_),
420 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 475 base::Passed(&crc32s_to_write));
421 synchronous_entry_ = NULL; 476 Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
477 synchronous_entry_ = NULL;
478 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
479 } else {
480 synchronous_entry_ = NULL;
481 CloseOperationComplete();
482 }
422 } 483 }
423 484
424 void SimpleEntryImpl::ReadDataInternal(int stream_index, 485 void SimpleEntryImpl::ReadDataInternal(int stream_index,
425 int offset, 486 int offset,
426 net::IOBuffer* buf, 487 net::IOBuffer* buf,
427 int buf_len, 488 int buf_len,
428 const CompletionCallback& callback) { 489 const CompletionCallback& callback) {
429 DCHECK(io_thread_checker_.CalledOnValidThread()); 490 DCHECK(io_thread_checker_.CalledOnValidThread());
491 ScopedOperationRunner operation_runner(this);
492
493 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
494 if (!callback.is_null())
495 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
496 callback, net::ERR_FAILED));
497 return;
498 }
430 DCHECK_EQ(STATE_READY, state_); 499 DCHECK_EQ(STATE_READY, state_);
500 buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
501 if (offset < 0 || buf_len <= 0) {
502 // If there is nothing to read, we bail out before setting state_ to
503 // STATE_IO_PENDING.
504 if (!callback.is_null())
505 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
506 callback, 0));
507 return;
508 }
509
431 state_ = STATE_IO_PENDING; 510 state_ = STATE_IO_PENDING;
432 if (backend_) 511 if (backend_)
433 backend_->index()->UseIfExists(key_); 512 backend_->index()->UseIfExists(key_);
434 513
435 scoped_ptr<uint32> read_crc32(new uint32()); 514 scoped_ptr<uint32> read_crc32(new uint32());
436 scoped_ptr<int> result(new int()); 515 scoped_ptr<int> result(new int());
437 Closure task = base::Bind(&SimpleSynchronousEntry::ReadData, 516 Closure task = base::Bind(&SimpleSynchronousEntry::ReadData,
438 base::Unretained(synchronous_entry_), 517 base::Unretained(synchronous_entry_),
439 stream_index, offset, make_scoped_refptr(buf), 518 stream_index, offset, make_scoped_refptr(buf),
440 buf_len, read_crc32.get(), result.get()); 519 buf_len, read_crc32.get(), result.get());
441 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, this, 520 Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, this,
442 stream_index, offset, callback, 521 stream_index, offset, callback,
443 base::Passed(&read_crc32), base::Passed(&result)); 522 base::Passed(&read_crc32), base::Passed(&result));
444 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true); 523 WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
445 } 524 }
446 525
447 void SimpleEntryImpl::WriteDataInternal(int stream_index, 526 void SimpleEntryImpl::WriteDataInternal(int stream_index,
448 int offset, 527 int offset,
449 net::IOBuffer* buf, 528 net::IOBuffer* buf,
450 int buf_len, 529 int buf_len,
451 const CompletionCallback& callback, 530 const CompletionCallback& callback,
452 bool truncate) { 531 bool truncate) {
453 DCHECK(io_thread_checker_.CalledOnValidThread()); 532 DCHECK(io_thread_checker_.CalledOnValidThread());
533 ScopedOperationRunner operation_runner(this);
534 if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
535 if (!callback.is_null()) {
536 // We need to posttask so that we don't go in a loop when we call the
537 // callback directly.
538 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
539 callback, net::ERR_FAILED));
540 }
541 // |this| may be destroyed after return here.
542 return;
543 }
454 DCHECK_EQ(STATE_READY, state_); 544 DCHECK_EQ(STATE_READY, state_);
455 state_ = STATE_IO_PENDING; 545 state_ = STATE_IO_PENDING;
456 if (backend_) 546 if (backend_)
457 backend_->index()->UseIfExists(key_); 547 backend_->index()->UseIfExists(key_);
458 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|) 548 // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
459 // if |offset == 0| or we have already computed the CRC for [0 .. offset). 549 // if |offset == 0| or we have already computed the CRC for [0 .. offset).
460 // We rely on most write operations being sequential, start to end to compute 550 // We rely on most write operations being sequential, start to end to compute
461 // the crc of the data. When we write to an entry and close without having 551 // the crc of the data. When we write to an entry and close without having
462 // done a sequential write, we don't check the CRC on read. 552 // done a sequential write, we don't check the CRC on read.
463 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) { 553 if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
(...skipping 20 matching lines...) Expand all
484 } 574 }
485 575
486 void SimpleEntryImpl::CreationOperationComplete( 576 void SimpleEntryImpl::CreationOperationComplete(
487 const CompletionCallback& completion_callback, 577 const CompletionCallback& completion_callback,
488 const base::TimeTicks& start_time, 578 const base::TimeTicks& start_time,
489 scoped_ptr<SimpleSynchronousEntry*> in_sync_entry, 579 scoped_ptr<SimpleSynchronousEntry*> in_sync_entry,
490 Entry** out_entry) { 580 Entry** out_entry) {
491 DCHECK(io_thread_checker_.CalledOnValidThread()); 581 DCHECK(io_thread_checker_.CalledOnValidThread());
492 DCHECK_EQ(state_, STATE_IO_PENDING); 582 DCHECK_EQ(state_, STATE_IO_PENDING);
493 DCHECK(in_sync_entry); 583 DCHECK(in_sync_entry);
494
495 ScopedOperationRunner operation_runner(this); 584 ScopedOperationRunner operation_runner(this);
496 585
497 bool creation_failed = !*in_sync_entry; 586 bool creation_failed = !*in_sync_entry;
498 UMA_HISTOGRAM_BOOLEAN("SimpleCache.EntryCreationResult", creation_failed); 587 UMA_HISTOGRAM_BOOLEAN("SimpleCache.EntryCreationResult", creation_failed);
499 if (creation_failed) { 588 if (creation_failed) {
500 completion_callback.Run(net::ERR_FAILED); 589 if (!completion_callback.is_null())
gavinp 2013/05/01 13:11:22 completion_callback.is_null() is another, redundan
felipeg 2013/05/02 09:49:27 It is not, it is just a side-effect, we should not
gavinp 2013/05/02 12:47:39 Hrm, confusing.
felipeg 2013/05/02 13:55:58 I see. But I still don't see a problem with checki
gavinp 2013/05/02 14:05:45 Ugh, I was unclear again. I meant to say: OK, yeah
501 MarkAsDoomed(); 590 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
591 completion_callback, net::ERR_FAILED));
592
593 MarkAsDoomed(); // Removes entry from the index.
502 MakeUninitialized(); 594 MakeUninitialized();
595 state_ = STATE_FAILURE;
596 // |this| may now be deleted.
gavinp 2013/05/01 13:11:22 !!! if this comment is correct, then we are in gra
felipeg 2013/05/02 09:49:27 Done.
503 return; 597 return;
504 } 598 }
599 // If out_entry is NULL, it means we already called ReturnEntryToCaller from
600 // the optimistic Create case.
601 if (out_entry)
602 ReturnEntryToCaller(out_entry);
603
505 state_ = STATE_READY; 604 state_ = STATE_READY;
506 synchronous_entry_ = *in_sync_entry; 605 synchronous_entry_ = *in_sync_entry;
507 SetSynchronousData(); 606 SetSynchronousData();
508 ReturnEntryToCaller(out_entry);
509 UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime", 607 UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
510 (base::TimeTicks::Now() - start_time)); 608 (base::TimeTicks::Now() - start_time));
511 completion_callback.Run(net::OK); 609
610 if (!completion_callback.is_null())
611 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
612 completion_callback, net::OK));
512 } 613 }
513 614
514 void SimpleEntryImpl::EntryOperationComplete( 615 void SimpleEntryImpl::EntryOperationComplete(
515 int stream_index, 616 int stream_index,
516 const CompletionCallback& completion_callback, 617 const CompletionCallback& completion_callback,
517 scoped_ptr<int> result) { 618 scoped_ptr<int> result) {
518 DCHECK(io_thread_checker_.CalledOnValidThread()); 619 DCHECK(io_thread_checker_.CalledOnValidThread());
519 DCHECK(synchronous_entry_); 620 DCHECK(synchronous_entry_);
520 DCHECK_EQ(STATE_IO_PENDING, state_); 621 DCHECK_EQ(STATE_IO_PENDING, state_);
521 DCHECK(result); 622 DCHECK(result);
522
523 state_ = STATE_READY; 623 state_ = STATE_READY;
524
525 if (*result < 0) { 624 if (*result < 0) {
526 MarkAsDoomed(); 625 MarkAsDoomed();
626 state_ = STATE_FAILURE;
527 crc32s_end_offset_[stream_index] = 0; 627 crc32s_end_offset_[stream_index] = 0;
628 } else {
629 SetSynchronousData();
528 } 630 }
529 SetSynchronousData(); 631
530 completion_callback.Run(*result); 632 if (!completion_callback.is_null())
633 MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
634 completion_callback, *result));
531 RunNextOperationIfNeeded(); 635 RunNextOperationIfNeeded();
532 } 636 }
533 637
534 void SimpleEntryImpl::ReadOperationComplete( 638 void SimpleEntryImpl::ReadOperationComplete(
535 int stream_index, 639 int stream_index,
536 int offset, 640 int offset,
537 const CompletionCallback& completion_callback, 641 const CompletionCallback& completion_callback,
538 scoped_ptr<uint32> read_crc32, 642 scoped_ptr<uint32> read_crc32,
539 scoped_ptr<int> result) { 643 scoped_ptr<int> result) {
540 DCHECK(io_thread_checker_.CalledOnValidThread()); 644 DCHECK(io_thread_checker_.CalledOnValidThread());
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
586 DCHECK_EQ(STATE_IO_PENDING, state_); 690 DCHECK_EQ(STATE_IO_PENDING, state_);
587 DCHECK(result); 691 DCHECK(result);
588 if (*result == net::OK) 692 if (*result == net::OK)
589 *result = orig_result; 693 *result = orig_result;
590 EntryOperationComplete(stream_index, completion_callback, result.Pass()); 694 EntryOperationComplete(stream_index, completion_callback, result.Pass());
591 } 695 }
592 696
593 void SimpleEntryImpl::CloseOperationComplete() { 697 void SimpleEntryImpl::CloseOperationComplete() {
594 DCHECK(!synchronous_entry_); 698 DCHECK(!synchronous_entry_);
595 DCHECK_EQ(0, open_count_); 699 DCHECK_EQ(0, open_count_);
596 DCHECK_EQ(STATE_IO_PENDING, state_); 700 DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_);
597
598 MakeUninitialized(); 701 MakeUninitialized();
599 RunNextOperationIfNeeded(); 702 RunNextOperationIfNeeded();
600 } 703 }
601 704
602 void SimpleEntryImpl::SetSynchronousData() { 705 void SimpleEntryImpl::SetSynchronousData() {
603 DCHECK(io_thread_checker_.CalledOnValidThread()); 706 DCHECK(io_thread_checker_.CalledOnValidThread());
707 DCHECK(synchronous_entry_);
604 DCHECK_EQ(STATE_READY, state_); 708 DCHECK_EQ(STATE_READY, state_);
605 // TODO(felipeg): These copies to avoid data races are not optimal. While 709 // TODO(felipeg): These copies to avoid data races are not optimal. While
606 // adding an IO thread index (for fast misses etc...), we can store this data 710 // adding an IO thread index (for fast misses etc...), we can store this data
607 // in that structure. This also solves problems with last_used() on ext4 711 // in that structure. This also solves problems with last_used() on ext4
608 // filesystems not being accurate. 712 // filesystems not being accurate.
609 last_used_ = synchronous_entry_->last_used(); 713 last_used_ = synchronous_entry_->last_used();
610 last_modified_ = synchronous_entry_->last_modified(); 714 last_modified_ = synchronous_entry_->last_modified();
611 for (int i = 0; i < kSimpleEntryFileCount; ++i) 715 for (int i = 0; i < kSimpleEntryFileCount; ++i)
612 data_size_[i] = synchronous_entry_->data_size(i); 716 data_size_[i] = synchronous_entry_->data_size(i);
613 if (backend_) 717 if (backend_)
614 backend_->index()->UpdateEntrySize(key_, synchronous_entry_->GetFileSize()); 718 backend_->index()->UpdateEntrySize(key_, synchronous_entry_->GetFileSize());
615 } 719 }
616 720
617 } // namespace disk_cache 721 } // namespace disk_cache
OLDNEW
« net/disk_cache/simple/simple_entry_impl.h ('K') | « net/disk_cache/simple/simple_entry_impl.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698