OLD | NEW |
1 // Copyright 2012 The Chromium Authors. All rights reserved. | 1 // Copyright 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sync/syncable/directory.h" | 5 #include "sync/syncable/directory.h" |
6 | 6 |
7 #include "base/debug/trace_event.h" | 7 #include "base/debug/trace_event.h" |
8 #include "base/perftimer.h" | 8 #include "base/perftimer.h" |
9 #include "base/stl_util.h" | 9 #include "base/stl_util.h" |
10 #include "base/string_number_conversions.h" | 10 #include "base/string_number_conversions.h" |
11 #include "sync/internal_api/public/base/node_ordinal.h" | 11 #include "sync/internal_api/public/base/node_ordinal.h" |
12 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" | 12 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" |
| 13 #include "sync/syncable/delete_journal.h" |
13 #include "sync/syncable/entry.h" | 14 #include "sync/syncable/entry.h" |
14 #include "sync/syncable/entry_kernel.h" | 15 #include "sync/syncable/entry_kernel.h" |
15 #include "sync/syncable/in_memory_directory_backing_store.h" | 16 #include "sync/syncable/in_memory_directory_backing_store.h" |
16 #include "sync/syncable/on_disk_directory_backing_store.h" | 17 #include "sync/syncable/on_disk_directory_backing_store.h" |
17 #include "sync/syncable/scoped_index_updater.h" | 18 #include "sync/syncable/scoped_index_updater.h" |
18 #include "sync/syncable/syncable-inl.h" | 19 #include "sync/syncable/syncable-inl.h" |
19 #include "sync/syncable/syncable_base_transaction.h" | 20 #include "sync/syncable/syncable_base_transaction.h" |
20 #include "sync/syncable/syncable_changes_version.h" | 21 #include "sync/syncable/syncable_changes_version.h" |
21 #include "sync/syncable/syncable_read_transaction.h" | 22 #include "sync/syncable/syncable_read_transaction.h" |
22 #include "sync/syncable/syncable_util.h" | 23 #include "sync/syncable/syncable_util.h" |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
94 download_progress[model_type].set_data_type_id( | 95 download_progress[model_type].set_data_type_id( |
95 GetSpecificsFieldNumberFromModelType(model_type)); | 96 GetSpecificsFieldNumberFromModelType(model_type)); |
96 // An empty-string token indicates no prior knowledge. | 97 // An empty-string token indicates no prior knowledge. |
97 download_progress[model_type].set_token(std::string()); | 98 download_progress[model_type].set_token(std::string()); |
98 } | 99 } |
99 | 100 |
100 Directory::SaveChangesSnapshot::SaveChangesSnapshot() | 101 Directory::SaveChangesSnapshot::SaveChangesSnapshot() |
101 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { | 102 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { |
102 } | 103 } |
103 | 104 |
104 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {} | 105 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() { |
| 106 STLDeleteElements(&dirty_metas); |
| 107 STLDeleteElements(&delete_journals); |
| 108 } |
105 | 109 |
106 Directory::Kernel::Kernel( | 110 Directory::Kernel::Kernel( |
107 const std::string& name, | 111 const std::string& name, |
108 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate, | 112 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate, |
109 const WeakHandle<TransactionObserver>& transaction_observer) | 113 const WeakHandle<TransactionObserver>& transaction_observer) |
110 : next_write_transaction_id(0), | 114 : next_write_transaction_id(0), |
111 name(name), | 115 name(name), |
112 metahandles_index(new Directory::MetahandlesIndex), | 116 metahandles_index(new Directory::MetahandlesIndex), |
113 ids_index(new Directory::IdsIndex), | 117 ids_index(new Directory::IdsIndex), |
114 parent_id_child_index(new Directory::ParentIdChildIndex), | 118 parent_id_child_index(new Directory::ParentIdChildIndex), |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
189 } | 193 } |
190 DCHECK(!entry->is_dirty()); | 194 DCHECK(!entry->is_dirty()); |
191 } | 195 } |
192 } | 196 } |
193 | 197 |
194 DirOpenResult Directory::OpenImpl( | 198 DirOpenResult Directory::OpenImpl( |
195 const string& name, | 199 const string& name, |
196 DirectoryChangeDelegate* delegate, | 200 DirectoryChangeDelegate* delegate, |
197 const WeakHandle<TransactionObserver>& | 201 const WeakHandle<TransactionObserver>& |
198 transaction_observer) { | 202 transaction_observer) { |
199 | |
200 KernelLoadInfo info; | 203 KernelLoadInfo info; |
201 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) | 204 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) |
202 // swap these later. | 205 // swap these later. |
203 MetahandlesIndex metas_bucket; | 206 MetahandlesIndex metas_bucket; |
204 DirOpenResult result = store_->Load(&metas_bucket, &info); | 207 JournalIndex delete_journals; |
| 208 |
| 209 DirOpenResult result = store_->Load(&metas_bucket, &delete_journals, &info); |
205 if (OPENED != result) | 210 if (OPENED != result) |
206 return result; | 211 return result; |
207 | 212 |
208 kernel_ = new Kernel(name, info, delegate, transaction_observer); | 213 kernel_ = new Kernel(name, info, delegate, transaction_observer); |
209 kernel_->metahandles_index->swap(metas_bucket); | 214 kernel_->metahandles_index->swap(metas_bucket); |
| 215 delete_journal_.reset(new DeleteJournal(&delete_journals)); |
210 InitializeIndices(); | 216 InitializeIndices(); |
211 | 217 |
212 // Write back the share info to reserve some space in 'next_id'. This will | 218 // Write back the share info to reserve some space in 'next_id'. This will |
213 // prevent local ID reuse in the case of an early crash. See the comments in | 219 // prevent local ID reuse in the case of an early crash. See the comments in |
214 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information. | 220 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information. |
215 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | 221 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
216 if (!SaveChanges()) | 222 if (!SaveChanges()) |
217 return FAILED_INITIAL_WRITE; | 223 return FAILED_INITIAL_WRITE; |
218 | 224 |
219 return OPENED; | 225 return OPENED; |
220 } | 226 } |
221 | 227 |
| 228 DeleteJournal* Directory::delete_journal() { |
| 229 DCHECK(delete_journal_.get()); |
| 230 return delete_journal_.get(); |
| 231 } |
| 232 |
222 void Directory::Close() { | 233 void Directory::Close() { |
223 store_.reset(); | 234 store_.reset(); |
224 if (kernel_) { | 235 if (kernel_) { |
225 delete kernel_; | 236 delete kernel_; |
226 kernel_ = NULL; | 237 kernel_ = NULL; |
227 } | 238 } |
228 } | 239 } |
229 | 240 |
230 void Directory::OnUnrecoverableError(const BaseTransaction* trans, | 241 void Directory::OnUnrecoverableError(const BaseTransaction* trans, |
231 const tracked_objects::Location& location, | 242 const tracked_objects::Location& location, |
232 const std::string & message) { | 243 const std::string & message) { |
233 DCHECK(trans != NULL); | 244 DCHECK(trans != NULL); |
234 unrecoverable_error_set_ = true; | 245 unrecoverable_error_set_ = true; |
235 unrecoverable_error_handler_->OnUnrecoverableError(location, | 246 unrecoverable_error_handler_->OnUnrecoverableError(location, |
236 message); | 247 message); |
237 } | 248 } |
238 | 249 |
239 | |
240 EntryKernel* Directory::GetEntryById(const Id& id) { | 250 EntryKernel* Directory::GetEntryById(const Id& id) { |
241 ScopedKernelLock lock(this); | 251 ScopedKernelLock lock(this); |
242 return GetEntryById(id, &lock); | 252 return GetEntryById(id, &lock); |
243 } | 253 } |
244 | 254 |
245 EntryKernel* Directory::GetEntryById(const Id& id, | 255 EntryKernel* Directory::GetEntryById(const Id& id, |
246 ScopedKernelLock* const lock) { | 256 ScopedKernelLock* const lock) { |
247 DCHECK(kernel_); | 257 DCHECK(kernel_); |
248 // Find it in the in memory ID index. | 258 // Find it in the in memory ID index. |
249 kernel_->needle.put(ID, id); | 259 kernel_->needle.put(ID, id); |
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
459 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and | 469 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and |
460 // clear dirty flags. | 470 // clear dirty flags. |
461 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin(); | 471 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin(); |
462 i != kernel_->dirty_metahandles->end(); ++i) { | 472 i != kernel_->dirty_metahandles->end(); ++i) { |
463 EntryKernel* entry = GetEntryByHandle(*i, &lock); | 473 EntryKernel* entry = GetEntryByHandle(*i, &lock); |
464 if (!entry) | 474 if (!entry) |
465 continue; | 475 continue; |
466 // Skip over false positives; it happens relatively infrequently. | 476 // Skip over false positives; it happens relatively infrequently. |
467 if (!entry->is_dirty()) | 477 if (!entry->is_dirty()) |
468 continue; | 478 continue; |
469 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry); | 479 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), |
| 480 new EntryKernel(*entry)); |
470 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i)); | 481 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i)); |
471 // We don't bother removing from the index here as we blow the entire thing | 482 // We don't bother removing from the index here as we blow the entire thing |
472 // in a moment, and it unnecessarily complicates iteration. | 483 // in a moment, and it unnecessarily complicates iteration. |
473 entry->clear_dirty(NULL); | 484 entry->clear_dirty(NULL); |
474 } | 485 } |
475 ClearDirtyMetahandles(); | 486 ClearDirtyMetahandles(); |
476 | 487 |
477 // Set purged handles. | 488 // Set purged handles. |
478 DCHECK(snapshot->metahandles_to_purge.empty()); | 489 DCHECK(snapshot->metahandles_to_purge.empty()); |
479 snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge)); | 490 snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge)); |
480 | 491 |
481 // Fill kernel_info_status and kernel_info. | 492 // Fill kernel_info_status and kernel_info. |
482 snapshot->kernel_info = kernel_->persisted_info; | 493 snapshot->kernel_info = kernel_->persisted_info; |
483 // To avoid duplicates when the process crashes, we record the next_id to be | 494 // To avoid duplicates when the process crashes, we record the next_id to be |
484 // greater magnitude than could possibly be reached before the next save | 495 // greater magnitude than could possibly be reached before the next save |
485 // changes. In other words, it's effectively impossible for the user to | 496 // changes. In other words, it's effectively impossible for the user to |
486 // generate 65536 new bookmarks in 3 seconds. | 497 // generate 65536 new bookmarks in 3 seconds. |
487 snapshot->kernel_info.next_id -= 65536; | 498 snapshot->kernel_info.next_id -= 65536; |
488 snapshot->kernel_info_status = kernel_->info_status; | 499 snapshot->kernel_info_status = kernel_->info_status; |
489 // This one we reset on failure. | 500 // This one we reset on failure. |
490 kernel_->info_status = KERNEL_SHARE_INFO_VALID; | 501 kernel_->info_status = KERNEL_SHARE_INFO_VALID; |
| 502 |
| 503 delete_journal_->TakeSnapshotAndClear( |
| 504 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge); |
491 } | 505 } |
492 | 506 |
493 bool Directory::SaveChanges() { | 507 bool Directory::SaveChanges() { |
494 bool success = false; | 508 bool success = false; |
495 | 509 |
496 base::AutoLock scoped_lock(kernel_->save_changes_mutex); | 510 base::AutoLock scoped_lock(kernel_->save_changes_mutex); |
497 | 511 |
498 // Snapshot and save. | 512 // Snapshot and save. |
499 SaveChangesSnapshot snapshot; | 513 SaveChangesSnapshot snapshot; |
500 TakeSnapshotForSaveChanges(&snapshot); | 514 TakeSnapshotForSaveChanges(&snapshot); |
(...skipping 10 matching lines...) Expand all Loading... |
511 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { | 525 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { |
512 if (snapshot.dirty_metas.empty()) | 526 if (snapshot.dirty_metas.empty()) |
513 return true; | 527 return true; |
514 | 528 |
515 // Need a write transaction as we are about to permanently purge entries. | 529 // Need a write transaction as we are about to permanently purge entries. |
516 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); | 530 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); |
517 ScopedKernelLock lock(this); | 531 ScopedKernelLock lock(this); |
518 // Now drop everything we can out of memory. | 532 // Now drop everything we can out of memory. |
519 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); | 533 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); |
520 i != snapshot.dirty_metas.end(); ++i) { | 534 i != snapshot.dirty_metas.end(); ++i) { |
521 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); | 535 kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE)); |
522 MetahandlesIndex::iterator found = | 536 MetahandlesIndex::iterator found = |
523 kernel_->metahandles_index->find(&kernel_->needle); | 537 kernel_->metahandles_index->find(&kernel_->needle); |
524 EntryKernel* entry = (found == kernel_->metahandles_index->end() ? | 538 EntryKernel* entry = (found == kernel_->metahandles_index->end() ? |
525 NULL : *found); | 539 NULL : *found); |
526 if (entry && SafeToPurgeFromMemory(&trans, entry)) { | 540 if (entry && SafeToPurgeFromMemory(&trans, entry)) { |
527 // We now drop deleted metahandles that are up to date on both the client | 541 // We now drop deleted metahandles that are up to date on both the client |
528 // and the server. | 542 // and the server. |
529 size_t num_erased = 0; | 543 size_t num_erased = 0; |
530 num_erased = kernel_->ids_index->erase(entry); | 544 num_erased = kernel_->ids_index->erase(entry); |
531 DCHECK_EQ(1u, num_erased); | 545 DCHECK_EQ(1u, num_erased); |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
598 it.Good(); it.Inc()) { | 612 it.Good(); it.Inc()) { |
599 kernel_->persisted_info.reset_download_progress(it.Get()); | 613 kernel_->persisted_info.reset_download_progress(it.Get()); |
600 kernel_->persisted_info.transaction_version[it.Get()] = 0; | 614 kernel_->persisted_info.transaction_version[it.Get()] = 0; |
601 } | 615 } |
602 } | 616 } |
603 } | 617 } |
604 return true; | 618 return true; |
605 } | 619 } |
606 | 620 |
607 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) { | 621 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) { |
| 622 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this); |
608 ScopedKernelLock lock(this); | 623 ScopedKernelLock lock(this); |
609 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | 624 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; |
610 | 625 |
611 // Because we optimistically cleared the dirty bit on the real entries when | 626 // Because we optimistically cleared the dirty bit on the real entries when |
612 // taking the snapshot, we must restore it on failure. Not doing this could | 627 // taking the snapshot, we must restore it on failure. Not doing this could |
613 // cause lost data, if no other changes are made to the in-memory entries | 628 // cause lost data, if no other changes are made to the in-memory entries |
614 // that would cause the dirty bit to get set again. Setting the bit ensures | 629 // that would cause the dirty bit to get set again. Setting the bit ensures |
615 // that SaveChanges will at least try again later. | 630 // that SaveChanges will at least try again later. |
616 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); | 631 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); |
617 i != snapshot.dirty_metas.end(); ++i) { | 632 i != snapshot.dirty_metas.end(); ++i) { |
618 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); | 633 kernel_->needle.put(META_HANDLE, (*i)->ref(META_HANDLE)); |
619 MetahandlesIndex::iterator found = | 634 MetahandlesIndex::iterator found = |
620 kernel_->metahandles_index->find(&kernel_->needle); | 635 kernel_->metahandles_index->find(&kernel_->needle); |
621 if (found != kernel_->metahandles_index->end()) { | 636 if (found != kernel_->metahandles_index->end()) { |
622 (*found)->mark_dirty(kernel_->dirty_metahandles); | 637 (*found)->mark_dirty(kernel_->dirty_metahandles); |
623 } | 638 } |
624 } | 639 } |
625 | 640 |
626 kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(), | 641 kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(), |
627 snapshot.metahandles_to_purge.end()); | 642 snapshot.metahandles_to_purge.end()); |
| 643 |
| 644 // Restore delete journals. |
| 645 delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals); |
| 646 delete_journal_->PurgeDeleteJournals(&trans, |
| 647 snapshot.delete_journals_to_purge); |
628 } | 648 } |
629 | 649 |
630 void Directory::GetDownloadProgress( | 650 void Directory::GetDownloadProgress( |
631 ModelType model_type, | 651 ModelType model_type, |
632 sync_pb::DataTypeProgressMarker* value_out) const { | 652 sync_pb::DataTypeProgressMarker* value_out) const { |
633 ScopedKernelLock lock(this); | 653 ScopedKernelLock lock(this); |
634 return value_out->CopyFrom( | 654 return value_out->CopyFrom( |
635 kernel_->persisted_info.download_progress[model_type]); | 655 kernel_->persisted_info.download_progress[model_type]); |
636 } | 656 } |
637 | 657 |
(...skipping 627 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1265 // There were no children in the linked list. | 1285 // There were no children in the linked list. |
1266 return NULL; | 1286 return NULL; |
1267 } | 1287 } |
1268 | 1288 |
1269 ScopedKernelLock::ScopedKernelLock(const Directory* dir) | 1289 ScopedKernelLock::ScopedKernelLock(const Directory* dir) |
1270 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) { | 1290 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) { |
1271 } | 1291 } |
1272 | 1292 |
1273 } // namespace syncable | 1293 } // namespace syncable |
1274 } // namespace syncer | 1294 } // namespace syncer |
OLD | NEW |