OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "sync/syncable/directory.h" | |
6 | |
7 #include <stddef.h> | |
8 #include <stdint.h> | |
9 | |
10 #include <algorithm> | |
11 #include <iterator> | |
12 #include <utility> | |
13 | |
14 #include "base/base64.h" | |
15 #include "base/guid.h" | |
16 #include "base/metrics/histogram.h" | |
17 #include "base/stl_util.h" | |
18 #include "base/strings/string_number_conversions.h" | |
19 #include "base/trace_event/trace_event.h" | |
20 #include "sync/internal_api/public/base/attachment_id_proto.h" | |
21 #include "sync/internal_api/public/base/unique_position.h" | |
22 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" | |
23 #include "sync/syncable/entry.h" | |
24 #include "sync/syncable/entry_kernel.h" | |
25 #include "sync/syncable/in_memory_directory_backing_store.h" | |
26 #include "sync/syncable/model_neutral_mutable_entry.h" | |
27 #include "sync/syncable/on_disk_directory_backing_store.h" | |
28 #include "sync/syncable/scoped_kernel_lock.h" | |
29 #include "sync/syncable/scoped_parent_child_index_updater.h" | |
30 #include "sync/syncable/syncable-inl.h" | |
31 #include "sync/syncable/syncable_base_transaction.h" | |
32 #include "sync/syncable/syncable_changes_version.h" | |
33 #include "sync/syncable/syncable_read_transaction.h" | |
34 #include "sync/syncable/syncable_util.h" | |
35 #include "sync/syncable/syncable_write_transaction.h" | |
36 | |
37 using std::string; | |
38 | |
39 namespace syncer { | |
40 namespace syncable { | |
41 | |
42 // static | |
43 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] = | |
44 FILE_PATH_LITERAL("SyncData.sqlite3"); | |
45 | |
46 Directory::PersistedKernelInfo::PersistedKernelInfo() { | |
47 ModelTypeSet protocol_types = ProtocolTypes(); | |
48 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good(); | |
49 iter.Inc()) { | |
50 ResetDownloadProgress(iter.Get()); | |
51 transaction_version[iter.Get()] = 0; | |
52 } | |
53 } | |
54 | |
55 Directory::PersistedKernelInfo::~PersistedKernelInfo() {} | |
56 | |
57 void Directory::PersistedKernelInfo::ResetDownloadProgress( | |
58 ModelType model_type) { | |
59 // Clear everything except the data type id field. | |
60 download_progress[model_type].Clear(); | |
61 download_progress[model_type].set_data_type_id( | |
62 GetSpecificsFieldNumberFromModelType(model_type)); | |
63 | |
64 // Explicitly set an empty token field to denote no progress. | |
65 download_progress[model_type].set_token(""); | |
66 } | |
67 | |
68 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress( | |
69 ModelType model_type) { | |
70 const sync_pb::DataTypeProgressMarker& progress_marker = | |
71 download_progress[model_type]; | |
72 return progress_marker.token().empty(); | |
73 } | |
74 | |
75 Directory::SaveChangesSnapshot::SaveChangesSnapshot() | |
76 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { | |
77 } | |
78 | |
79 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() { | |
80 STLDeleteElements(&dirty_metas); | |
81 STLDeleteElements(&delete_journals); | |
82 } | |
83 | |
84 bool Directory::SaveChangesSnapshot::HasUnsavedMetahandleChanges() const { | |
85 return !dirty_metas.empty() || !metahandles_to_purge.empty() || | |
86 !delete_journals.empty() || !delete_journals_to_purge.empty(); | |
87 } | |
88 | |
89 Directory::Kernel::Kernel( | |
90 const std::string& name, | |
91 const KernelLoadInfo& info, | |
92 DirectoryChangeDelegate* delegate, | |
93 const WeakHandle<TransactionObserver>& transaction_observer) | |
94 : next_write_transaction_id(0), | |
95 name(name), | |
96 info_status(Directory::KERNEL_SHARE_INFO_VALID), | |
97 persisted_info(info.kernel_info), | |
98 cache_guid(info.cache_guid), | |
99 next_metahandle(info.max_metahandle + 1), | |
100 delegate(delegate), | |
101 transaction_observer(transaction_observer) { | |
102 DCHECK(delegate); | |
103 DCHECK(transaction_observer.IsInitialized()); | |
104 } | |
105 | |
106 Directory::Kernel::~Kernel() { | |
107 STLDeleteContainerPairSecondPointers(metahandles_map.begin(), | |
108 metahandles_map.end()); | |
109 } | |
110 | |
111 Directory::Directory( | |
112 DirectoryBackingStore* store, | |
113 const WeakHandle<UnrecoverableErrorHandler>& unrecoverable_error_handler, | |
114 const base::Closure& report_unrecoverable_error_function, | |
115 NigoriHandler* nigori_handler, | |
116 Cryptographer* cryptographer) | |
117 : kernel_(NULL), | |
118 store_(store), | |
119 unrecoverable_error_handler_(unrecoverable_error_handler), | |
120 report_unrecoverable_error_function_(report_unrecoverable_error_function), | |
121 unrecoverable_error_set_(false), | |
122 nigori_handler_(nigori_handler), | |
123 cryptographer_(cryptographer), | |
124 invariant_check_level_(VERIFY_CHANGES), | |
125 weak_ptr_factory_(this) {} | |
126 | |
127 Directory::~Directory() { | |
128 Close(); | |
129 } | |
130 | |
131 DirOpenResult Directory::Open( | |
132 const string& name, | |
133 DirectoryChangeDelegate* delegate, | |
134 const WeakHandle<TransactionObserver>& transaction_observer) { | |
135 TRACE_EVENT0("sync", "SyncDatabaseOpen"); | |
136 | |
137 const DirOpenResult result = | |
138 OpenImpl(name, delegate, transaction_observer); | |
139 | |
140 if (OPENED != result) | |
141 Close(); | |
142 return result; | |
143 } | |
144 | |
145 void Directory::InitializeIndices(MetahandlesMap* handles_map) { | |
146 ScopedKernelLock lock(this); | |
147 kernel_->metahandles_map.swap(*handles_map); | |
148 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin(); | |
149 it != kernel_->metahandles_map.end(); ++it) { | |
150 EntryKernel* entry = it->second; | |
151 if (ParentChildIndex::ShouldInclude(entry)) | |
152 kernel_->parent_child_index.Insert(entry); | |
153 const int64_t metahandle = entry->ref(META_HANDLE); | |
154 if (entry->ref(IS_UNSYNCED)) | |
155 kernel_->unsynced_metahandles.insert(metahandle); | |
156 if (entry->ref(IS_UNAPPLIED_UPDATE)) { | |
157 const ModelType type = entry->GetServerModelType(); | |
158 kernel_->unapplied_update_metahandles[type].insert(metahandle); | |
159 } | |
160 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { | |
161 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) == | |
162 kernel_->server_tags_map.end()) | |
163 << "Unexpected duplicate use of client tag"; | |
164 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry; | |
165 } | |
166 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { | |
167 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) == | |
168 kernel_->server_tags_map.end()) | |
169 << "Unexpected duplicate use of server tag"; | |
170 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry; | |
171 } | |
172 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) == | |
173 kernel_->ids_map.end()) << "Unexpected duplicate use of ID"; | |
174 kernel_->ids_map[entry->ref(ID).value()] = entry; | |
175 DCHECK(!entry->is_dirty()); | |
176 AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA)); | |
177 } | |
178 } | |
179 | |
180 DirOpenResult Directory::OpenImpl( | |
181 const string& name, | |
182 DirectoryChangeDelegate* delegate, | |
183 const WeakHandle<TransactionObserver>& | |
184 transaction_observer) { | |
185 KernelLoadInfo info; | |
186 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) | |
187 // swap these later. | |
188 Directory::MetahandlesMap tmp_handles_map; | |
189 | |
190 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after | |
191 // the swap in the success case. | |
192 STLValueDeleter<MetahandlesMap> deleter(&tmp_handles_map); | |
193 | |
194 JournalIndex delete_journals; | |
195 MetahandleSet metahandles_to_purge; | |
196 | |
197 DirOpenResult result = store_->Load(&tmp_handles_map, &delete_journals, | |
198 &metahandles_to_purge, &info); | |
199 if (OPENED != result) | |
200 return result; | |
201 | |
202 DCHECK(!kernel_); | |
203 kernel_ = new Kernel(name, info, delegate, transaction_observer); | |
204 kernel_->metahandles_to_purge.swap(metahandles_to_purge); | |
205 delete_journal_.reset(new DeleteJournal(&delete_journals)); | |
206 InitializeIndices(&tmp_handles_map); | |
207 | |
208 // Save changes back in case there are any metahandles to purge. | |
209 if (!SaveChanges()) | |
210 return FAILED_INITIAL_WRITE; | |
211 | |
212 // Now that we've successfully opened the store, install an error handler to | |
213 // deal with catastrophic errors that may occur later on. Use a weak pointer | |
214 // because we cannot guarantee that this Directory will outlive the Closure. | |
215 store_->SetCatastrophicErrorHandler(base::Bind( | |
216 &Directory::OnCatastrophicError, weak_ptr_factory_.GetWeakPtr())); | |
217 | |
218 return OPENED; | |
219 } | |
220 | |
221 DeleteJournal* Directory::delete_journal() { | |
222 DCHECK(delete_journal_.get()); | |
223 return delete_journal_.get(); | |
224 } | |
225 | |
226 void Directory::Close() { | |
227 store_.reset(); | |
228 if (kernel_) { | |
229 delete kernel_; | |
230 kernel_ = NULL; | |
231 } | |
232 } | |
233 | |
234 void Directory::OnUnrecoverableError(const BaseTransaction* trans, | |
235 const tracked_objects::Location& location, | |
236 const std::string & message) { | |
237 DCHECK(trans != NULL); | |
238 unrecoverable_error_set_ = true; | |
239 unrecoverable_error_handler_.Call( | |
240 FROM_HERE, &UnrecoverableErrorHandler::OnUnrecoverableError, location, | |
241 message); | |
242 } | |
243 | |
244 EntryKernel* Directory::GetEntryById(const Id& id) { | |
245 ScopedKernelLock lock(this); | |
246 return GetEntryById(lock, id); | |
247 } | |
248 | |
249 EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock, | |
250 const Id& id) { | |
251 DCHECK(kernel_); | |
252 // Find it in the in memory ID index. | |
253 IdsMap::iterator id_found = kernel_->ids_map.find(id.value()); | |
254 if (id_found != kernel_->ids_map.end()) { | |
255 return id_found->second; | |
256 } | |
257 return NULL; | |
258 } | |
259 | |
260 EntryKernel* Directory::GetEntryByClientTag(const string& tag) { | |
261 ScopedKernelLock lock(this); | |
262 DCHECK(kernel_); | |
263 | |
264 TagsMap::iterator it = kernel_->client_tags_map.find(tag); | |
265 if (it != kernel_->client_tags_map.end()) { | |
266 return it->second; | |
267 } | |
268 return NULL; | |
269 } | |
270 | |
271 EntryKernel* Directory::GetEntryByServerTag(const string& tag) { | |
272 ScopedKernelLock lock(this); | |
273 DCHECK(kernel_); | |
274 TagsMap::iterator it = kernel_->server_tags_map.find(tag); | |
275 if (it != kernel_->server_tags_map.end()) { | |
276 return it->second; | |
277 } | |
278 return NULL; | |
279 } | |
280 | |
281 EntryKernel* Directory::GetEntryByHandle(int64_t metahandle) { | |
282 ScopedKernelLock lock(this); | |
283 return GetEntryByHandle(lock, metahandle); | |
284 } | |
285 | |
286 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock, | |
287 int64_t metahandle) { | |
288 // Look up in memory | |
289 MetahandlesMap::iterator found = | |
290 kernel_->metahandles_map.find(metahandle); | |
291 if (found != kernel_->metahandles_map.end()) { | |
292 // Found it in memory. Easy. | |
293 return found->second; | |
294 } | |
295 return NULL; | |
296 } | |
297 | |
298 bool Directory::GetChildHandlesById( | |
299 BaseTransaction* trans, const Id& parent_id, | |
300 Directory::Metahandles* result) { | |
301 if (!SyncAssert(this == trans->directory(), FROM_HERE, | |
302 "Directories don't match", trans)) | |
303 return false; | |
304 result->clear(); | |
305 | |
306 ScopedKernelLock lock(this); | |
307 AppendChildHandles(lock, parent_id, result); | |
308 return true; | |
309 } | |
310 | |
311 int Directory::GetTotalNodeCount( | |
312 BaseTransaction* trans, | |
313 EntryKernel* kernel) const { | |
314 if (!SyncAssert(this == trans->directory(), FROM_HERE, | |
315 "Directories don't match", trans)) | |
316 return false; | |
317 | |
318 int count = 1; | |
319 std::deque<const OrderedChildSet*> child_sets; | |
320 | |
321 GetChildSetForKernel(trans, kernel, &child_sets); | |
322 while (!child_sets.empty()) { | |
323 const OrderedChildSet* set = child_sets.front(); | |
324 child_sets.pop_front(); | |
325 for (OrderedChildSet::const_iterator it = set->begin(); | |
326 it != set->end(); ++it) { | |
327 count++; | |
328 GetChildSetForKernel(trans, *it, &child_sets); | |
329 } | |
330 } | |
331 | |
332 return count; | |
333 } | |
334 | |
335 void Directory::GetChildSetForKernel( | |
336 BaseTransaction* trans, | |
337 EntryKernel* kernel, | |
338 std::deque<const OrderedChildSet*>* child_sets) const { | |
339 if (!kernel->ref(IS_DIR)) | |
340 return; // Not a directory => no children. | |
341 | |
342 const OrderedChildSet* descendants = | |
343 kernel_->parent_child_index.GetChildren(kernel->ref(ID)); | |
344 if (!descendants) | |
345 return; // This directory has no children. | |
346 | |
347 // Add our children to the list of items to be traversed. | |
348 child_sets->push_back(descendants); | |
349 } | |
350 | |
351 int Directory::GetPositionIndex( | |
352 BaseTransaction* trans, | |
353 EntryKernel* kernel) const { | |
354 const OrderedChildSet* siblings = | |
355 kernel_->parent_child_index.GetSiblings(kernel); | |
356 | |
357 OrderedChildSet::const_iterator it = siblings->find(kernel); | |
358 return std::distance(siblings->begin(), it); | |
359 } | |
360 | |
361 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) { | |
362 ScopedKernelLock lock(this); | |
363 return InsertEntry(lock, trans, entry); | |
364 } | |
365 | |
366 bool Directory::InsertEntry(const ScopedKernelLock& lock, | |
367 BaseWriteTransaction* trans, | |
368 EntryKernel* entry) { | |
369 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans)) | |
370 return false; | |
371 | |
372 static const char error[] = "Entry already in memory index."; | |
373 | |
374 if (!SyncAssert( | |
375 kernel_->metahandles_map.insert( | |
376 std::make_pair(entry->ref(META_HANDLE), entry)).second, | |
377 FROM_HERE, | |
378 error, | |
379 trans)) { | |
380 return false; | |
381 } | |
382 if (!SyncAssert( | |
383 kernel_->ids_map.insert( | |
384 std::make_pair(entry->ref(ID).value(), entry)).second, | |
385 FROM_HERE, | |
386 error, | |
387 trans)) { | |
388 return false; | |
389 } | |
390 if (ParentChildIndex::ShouldInclude(entry)) { | |
391 if (!SyncAssert(kernel_->parent_child_index.Insert(entry), | |
392 FROM_HERE, | |
393 error, | |
394 trans)) { | |
395 return false; | |
396 } | |
397 } | |
398 AddToAttachmentIndex( | |
399 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA)); | |
400 | |
401 // Should NEVER be created with a client tag or server tag. | |
402 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE, | |
403 "Server tag should be empty", trans)) { | |
404 return false; | |
405 } | |
406 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE, | |
407 "Client tag should be empty", trans)) | |
408 return false; | |
409 | |
410 return true; | |
411 } | |
412 | |
413 bool Directory::ReindexId(BaseWriteTransaction* trans, | |
414 EntryKernel* const entry, | |
415 const Id& new_id) { | |
416 ScopedKernelLock lock(this); | |
417 if (NULL != GetEntryById(lock, new_id)) | |
418 return false; | |
419 | |
420 { | |
421 // Update the indices that depend on the ID field. | |
422 ScopedParentChildIndexUpdater updater_b(lock, entry, | |
423 &kernel_->parent_child_index); | |
424 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); | |
425 DCHECK_EQ(1U, num_erased); | |
426 entry->put(ID, new_id); | |
427 kernel_->ids_map[entry->ref(ID).value()] = entry; | |
428 } | |
429 return true; | |
430 } | |
431 | |
432 bool Directory::ReindexParentId(BaseWriteTransaction* trans, | |
433 EntryKernel* const entry, | |
434 const Id& new_parent_id) { | |
435 ScopedKernelLock lock(this); | |
436 | |
437 { | |
438 // Update the indices that depend on the PARENT_ID field. | |
439 ScopedParentChildIndexUpdater index_updater(lock, entry, | |
440 &kernel_->parent_child_index); | |
441 entry->put(PARENT_ID, new_parent_id); | |
442 } | |
443 return true; | |
444 } | |
445 | |
446 void Directory::RemoveFromAttachmentIndex( | |
447 const ScopedKernelLock& lock, | |
448 const int64_t metahandle, | |
449 const sync_pb::AttachmentMetadata& attachment_metadata) { | |
450 for (int i = 0; i < attachment_metadata.record_size(); ++i) { | |
451 AttachmentIdUniqueId unique_id = | |
452 attachment_metadata.record(i).id().unique_id(); | |
453 IndexByAttachmentId::iterator iter = | |
454 kernel_->index_by_attachment_id.find(unique_id); | |
455 if (iter != kernel_->index_by_attachment_id.end()) { | |
456 iter->second.erase(metahandle); | |
457 if (iter->second.empty()) { | |
458 kernel_->index_by_attachment_id.erase(iter); | |
459 } | |
460 } | |
461 } | |
462 } | |
463 | |
464 void Directory::AddToAttachmentIndex( | |
465 const ScopedKernelLock& lock, | |
466 const int64_t metahandle, | |
467 const sync_pb::AttachmentMetadata& attachment_metadata) { | |
468 for (int i = 0; i < attachment_metadata.record_size(); ++i) { | |
469 AttachmentIdUniqueId unique_id = | |
470 attachment_metadata.record(i).id().unique_id(); | |
471 IndexByAttachmentId::iterator iter = | |
472 kernel_->index_by_attachment_id.find(unique_id); | |
473 if (iter == kernel_->index_by_attachment_id.end()) { | |
474 iter = kernel_->index_by_attachment_id.insert(std::make_pair( | |
475 unique_id, | |
476 MetahandleSet())).first; | |
477 } | |
478 iter->second.insert(metahandle); | |
479 } | |
480 } | |
481 | |
482 void Directory::UpdateAttachmentIndex( | |
483 const int64_t metahandle, | |
484 const sync_pb::AttachmentMetadata& old_metadata, | |
485 const sync_pb::AttachmentMetadata& new_metadata) { | |
486 ScopedKernelLock lock(this); | |
487 RemoveFromAttachmentIndex(lock, metahandle, old_metadata); | |
488 AddToAttachmentIndex(lock, metahandle, new_metadata); | |
489 } | |
490 | |
491 void Directory::GetMetahandlesByAttachmentId( | |
492 BaseTransaction* trans, | |
493 const sync_pb::AttachmentIdProto& attachment_id_proto, | |
494 Metahandles* result) { | |
495 DCHECK(result); | |
496 result->clear(); | |
497 ScopedKernelLock lock(this); | |
498 IndexByAttachmentId::const_iterator index_iter = | |
499 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id()); | |
500 if (index_iter == kernel_->index_by_attachment_id.end()) | |
501 return; | |
502 const MetahandleSet& metahandle_set = index_iter->second; | |
503 std::copy( | |
504 metahandle_set.begin(), metahandle_set.end(), back_inserter(*result)); | |
505 } | |
506 | |
507 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { | |
508 DCHECK(trans != NULL); | |
509 return unrecoverable_error_set_; | |
510 } | |
511 | |
512 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) { | |
513 kernel_->transaction_mutex.AssertAcquired(); | |
514 kernel_->dirty_metahandles.clear(); | |
515 } | |
516 | |
517 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans, | |
518 const EntryKernel* const entry) const { | |
519 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() && | |
520 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) && | |
521 !entry->ref(IS_UNSYNCED); | |
522 | |
523 if (safe) { | |
524 int64_t handle = entry->ref(META_HANDLE); | |
525 const ModelType type = entry->GetServerModelType(); | |
526 if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U, | |
527 FROM_HERE, | |
528 "Dirty metahandles should be empty", trans)) | |
529 return false; | |
530 // TODO(tim): Bug 49278. | |
531 if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle), | |
532 FROM_HERE, | |
533 "Unsynced handles should be empty", | |
534 trans)) | |
535 return false; | |
536 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle), | |
537 FROM_HERE, | |
538 "Unapplied metahandles should be empty", | |
539 trans)) | |
540 return false; | |
541 } | |
542 | |
543 return safe; | |
544 } | |
545 | |
546 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) { | |
547 ReadTransaction trans(FROM_HERE, this); | |
548 ScopedKernelLock lock(this); | |
549 | |
550 // If there is an unrecoverable error then just bail out. | |
551 if (unrecoverable_error_set(&trans)) | |
552 return; | |
553 | |
554 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and | |
555 // clear dirty flags. | |
556 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin(); | |
557 i != kernel_->dirty_metahandles.end(); ++i) { | |
558 EntryKernel* entry = GetEntryByHandle(lock, *i); | |
559 if (!entry) | |
560 continue; | |
561 // Skip over false positives; it happens relatively infrequently. | |
562 if (!entry->is_dirty()) | |
563 continue; | |
564 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), | |
565 new EntryKernel(*entry)); | |
566 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i)); | |
567 // We don't bother removing from the index here as we blow the entire thing | |
568 // in a moment, and it unnecessarily complicates iteration. | |
569 entry->clear_dirty(NULL); | |
570 } | |
571 ClearDirtyMetahandles(lock); | |
572 | |
573 // Set purged handles. | |
574 DCHECK(snapshot->metahandles_to_purge.empty()); | |
575 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge); | |
576 | |
577 // Fill kernel_info_status and kernel_info. | |
578 snapshot->kernel_info = kernel_->persisted_info; | |
579 snapshot->kernel_info_status = kernel_->info_status; | |
580 // This one we reset on failure. | |
581 kernel_->info_status = KERNEL_SHARE_INFO_VALID; | |
582 | |
583 delete_journal_->TakeSnapshotAndClear( | |
584 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge); | |
585 } | |
586 | |
587 bool Directory::SaveChanges() { | |
588 bool success = false; | |
589 | |
590 base::AutoLock scoped_lock(kernel_->save_changes_mutex); | |
591 | |
592 // Snapshot and save. | |
593 SaveChangesSnapshot snapshot; | |
594 TakeSnapshotForSaveChanges(&snapshot); | |
595 success = store_->SaveChanges(snapshot); | |
596 | |
597 // Handle success or failure. | |
598 if (success) | |
599 success = VacuumAfterSaveChanges(snapshot); | |
600 else | |
601 HandleSaveChangesFailure(snapshot); | |
602 return success; | |
603 } | |
604 | |
605 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { | |
606 if (snapshot.dirty_metas.empty()) | |
607 return true; | |
608 | |
609 // Need a write transaction as we are about to permanently purge entries. | |
610 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); | |
611 ScopedKernelLock lock(this); | |
612 // Now drop everything we can out of memory. | |
613 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); | |
614 i != snapshot.dirty_metas.end(); ++i) { | |
615 MetahandlesMap::iterator found = | |
616 kernel_->metahandles_map.find((*i)->ref(META_HANDLE)); | |
617 EntryKernel* entry = (found == kernel_->metahandles_map.end() ? | |
618 NULL : found->second); | |
619 if (entry && SafeToPurgeFromMemory(&trans, entry)) { | |
620 // We now drop deleted metahandles that are up to date on both the client | |
621 // and the server. | |
622 size_t num_erased = 0; | |
623 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); | |
624 DCHECK_EQ(1u, num_erased); | |
625 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); | |
626 DCHECK_EQ(1u, num_erased); | |
627 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { | |
628 num_erased = | |
629 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); | |
630 DCHECK_EQ(1u, num_erased); | |
631 } | |
632 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { | |
633 num_erased = | |
634 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); | |
635 DCHECK_EQ(1u, num_erased); | |
636 } | |
637 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), | |
638 FROM_HERE, | |
639 "Deleted entry still present", | |
640 (&trans))) | |
641 return false; | |
642 RemoveFromAttachmentIndex( | |
643 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA)); | |
644 | |
645 delete entry; | |
646 } | |
647 if (trans.unrecoverable_error_set()) | |
648 return false; | |
649 } | |
650 return true; | |
651 } | |
652 | |
653 void Directory::UnapplyEntry(EntryKernel* entry) { | |
654 int64_t handle = entry->ref(META_HANDLE); | |
655 ModelType server_type = GetModelTypeFromSpecifics( | |
656 entry->ref(SERVER_SPECIFICS)); | |
657 | |
658 // Clear enough so that on the next sync cycle all local data will | |
659 // be overwritten. | |
660 // Note: do not modify the root node in order to preserve the | |
661 // initial sync ended bit for this type (else on the next restart | |
662 // this type will be treated as disabled and therefore fully purged). | |
663 if (entry->ref(PARENT_ID).IsRoot()) { | |
664 ModelType root_type = server_type; | |
665 // Support both server created and client created type root folders. | |
666 if (!IsRealDataType(root_type)) { | |
667 root_type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS)); | |
668 } | |
669 if (IsRealDataType(root_type) && | |
670 ModelTypeToRootTag(root_type) == entry->ref(UNIQUE_SERVER_TAG)) { | |
671 return; | |
672 } | |
673 } | |
674 | |
675 // Set the unapplied bit if this item has server data. | |
676 if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) { | |
677 entry->put(IS_UNAPPLIED_UPDATE, true); | |
678 kernel_->unapplied_update_metahandles[server_type].insert(handle); | |
679 entry->mark_dirty(&kernel_->dirty_metahandles); | |
680 } | |
681 | |
682 // Unset the unsynced bit. | |
683 if (entry->ref(IS_UNSYNCED)) { | |
684 kernel_->unsynced_metahandles.erase(handle); | |
685 entry->put(IS_UNSYNCED, false); | |
686 entry->mark_dirty(&kernel_->dirty_metahandles); | |
687 } | |
688 | |
689 // Mark the item as locally deleted. No deleted items are allowed in the | |
690 // parent child index. | |
691 if (!entry->ref(IS_DEL)) { | |
692 kernel_->parent_child_index.Remove(entry); | |
693 entry->put(IS_DEL, true); | |
694 entry->mark_dirty(&kernel_->dirty_metahandles); | |
695 } | |
696 | |
697 // Set the version to the "newly created" version. | |
698 if (entry->ref(BASE_VERSION) != CHANGES_VERSION) { | |
699 entry->put(BASE_VERSION, CHANGES_VERSION); | |
700 entry->mark_dirty(&kernel_->dirty_metahandles); | |
701 } | |
702 | |
703 // At this point locally created items that aren't synced will become locally | |
704 // deleted items, and purged on the next snapshot. All other items will match | |
705 // the state they would have had if they were just created via a server | |
706 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..). | |
707 } | |
708 | |
709 void Directory::DeleteEntry(const ScopedKernelLock& lock, | |
710 bool save_to_journal, | |
711 EntryKernel* entry, | |
712 EntryKernelSet* entries_to_journal) { | |
713 int64_t handle = entry->ref(META_HANDLE); | |
714 ModelType server_type = GetModelTypeFromSpecifics( | |
715 entry->ref(SERVER_SPECIFICS)); | |
716 | |
717 kernel_->metahandles_to_purge.insert(handle); | |
718 | |
719 size_t num_erased = 0; | |
720 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); | |
721 DCHECK_EQ(1u, num_erased); | |
722 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); | |
723 DCHECK_EQ(1u, num_erased); | |
724 num_erased = kernel_->unsynced_metahandles.erase(handle); | |
725 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); | |
726 num_erased = | |
727 kernel_->unapplied_update_metahandles[server_type].erase(handle); | |
728 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); | |
729 if (kernel_->parent_child_index.Contains(entry)) | |
730 kernel_->parent_child_index.Remove(entry); | |
731 | |
732 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { | |
733 num_erased = | |
734 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); | |
735 DCHECK_EQ(1u, num_erased); | |
736 } | |
737 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { | |
738 num_erased = | |
739 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); | |
740 DCHECK_EQ(1u, num_erased); | |
741 } | |
742 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA)); | |
743 | |
744 if (save_to_journal) { | |
745 entries_to_journal->insert(entry); | |
746 } else { | |
747 delete entry; | |
748 } | |
749 } | |
750 | |
751 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types, | |
752 ModelTypeSet types_to_journal, | |
753 ModelTypeSet types_to_unapply) { | |
754 disabled_types.RemoveAll(ProxyTypes()); | |
755 | |
756 if (disabled_types.Empty()) | |
757 return true; | |
758 | |
759 { | |
760 WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this); | |
761 | |
762 EntryKernelSet entries_to_journal; | |
763 STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal); | |
764 | |
765 { | |
766 ScopedKernelLock lock(this); | |
767 | |
768 bool found_progress = false; | |
769 for (ModelTypeSet::Iterator iter = disabled_types.First(); iter.Good(); | |
770 iter.Inc()) { | |
771 if (!kernel_->persisted_info.HasEmptyDownloadProgress(iter.Get())) | |
772 found_progress = true; | |
773 } | |
774 | |
775 // If none of the disabled types have progress markers, there's nothing to | |
776 // purge. | |
777 if (!found_progress) | |
778 return true; | |
779 | |
780 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin(); | |
781 it != kernel_->metahandles_map.end();) { | |
782 EntryKernel* entry = it->second; | |
783 const sync_pb::EntitySpecifics& local_specifics = entry->ref(SPECIFICS); | |
784 const sync_pb::EntitySpecifics& server_specifics = | |
785 entry->ref(SERVER_SPECIFICS); | |
786 ModelType local_type = GetModelTypeFromSpecifics(local_specifics); | |
787 ModelType server_type = GetModelTypeFromSpecifics(server_specifics); | |
788 | |
789 // Increment the iterator before (potentially) calling DeleteEntry, | |
790 // otherwise our iterator may be invalidated. | |
791 ++it; | |
792 | |
793 if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) || | |
794 (IsRealDataType(server_type) && disabled_types.Has(server_type))) { | |
795 if (types_to_unapply.Has(local_type) || | |
796 types_to_unapply.Has(server_type)) { | |
797 UnapplyEntry(entry); | |
798 } else { | |
799 bool save_to_journal = | |
800 (types_to_journal.Has(local_type) || | |
801 types_to_journal.Has(server_type)) && | |
802 (delete_journal_->IsDeleteJournalEnabled(local_type) || | |
803 delete_journal_->IsDeleteJournalEnabled(server_type)); | |
804 DeleteEntry(lock, save_to_journal, entry, &entries_to_journal); | |
805 } | |
806 } | |
807 } | |
808 | |
809 delete_journal_->AddJournalBatch(&trans, entries_to_journal); | |
810 | |
811 // Ensure meta tracking for these data types reflects the purged state. | |
812 for (ModelTypeSet::Iterator it = disabled_types.First(); | |
813 it.Good(); it.Inc()) { | |
814 kernel_->persisted_info.transaction_version[it.Get()] = 0; | |
815 | |
816 // Don't discard progress markers or context for unapplied types. | |
817 if (!types_to_unapply.Has(it.Get())) { | |
818 kernel_->persisted_info.ResetDownloadProgress(it.Get()); | |
819 kernel_->persisted_info.datatype_context[it.Get()].Clear(); | |
820 } | |
821 } | |
822 | |
823 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
824 } | |
825 } | |
826 return true; | |
827 } | |
828 | |
829 bool Directory::ResetVersionsForType(BaseWriteTransaction* trans, | |
830 ModelType type) { | |
831 if (!ProtocolTypes().Has(type)) | |
832 return false; | |
833 DCHECK_NE(type, BOOKMARKS) << "Only non-hierarchical types are supported"; | |
834 | |
835 EntryKernel* type_root = GetEntryByServerTag(ModelTypeToRootTag(type)); | |
836 if (!type_root) | |
837 return false; | |
838 | |
839 ScopedKernelLock lock(this); | |
840 const Id& type_root_id = type_root->ref(ID); | |
841 Directory::Metahandles children; | |
842 AppendChildHandles(lock, type_root_id, &children); | |
843 | |
844 for (Metahandles::iterator it = children.begin(); it != children.end(); | |
845 ++it) { | |
846 EntryKernel* entry = GetEntryByHandle(lock, *it); | |
847 if (!entry) | |
848 continue; | |
849 if (entry->ref(BASE_VERSION) > 1) | |
850 entry->put(BASE_VERSION, 1); | |
851 if (entry->ref(SERVER_VERSION) > 1) | |
852 entry->put(SERVER_VERSION, 1); | |
853 | |
854 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order | |
855 // to ensure no in-transit data is lost. | |
856 | |
857 entry->mark_dirty(&kernel_->dirty_metahandles); | |
858 } | |
859 | |
860 return true; | |
861 } | |
862 | |
863 bool Directory::IsAttachmentLinked( | |
864 const sync_pb::AttachmentIdProto& attachment_id_proto) const { | |
865 ScopedKernelLock lock(this); | |
866 IndexByAttachmentId::const_iterator iter = | |
867 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id()); | |
868 if (iter != kernel_->index_by_attachment_id.end() && !iter->second.empty()) { | |
869 return true; | |
870 } | |
871 return false; | |
872 } | |
873 | |
874 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) { | |
875 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this); | |
876 ScopedKernelLock lock(this); | |
877 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
878 | |
879 // Because we optimistically cleared the dirty bit on the real entries when | |
880 // taking the snapshot, we must restore it on failure. Not doing this could | |
881 // cause lost data, if no other changes are made to the in-memory entries | |
882 // that would cause the dirty bit to get set again. Setting the bit ensures | |
883 // that SaveChanges will at least try again later. | |
884 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); | |
885 i != snapshot.dirty_metas.end(); ++i) { | |
886 MetahandlesMap::iterator found = | |
887 kernel_->metahandles_map.find((*i)->ref(META_HANDLE)); | |
888 if (found != kernel_->metahandles_map.end()) { | |
889 found->second->mark_dirty(&kernel_->dirty_metahandles); | |
890 } | |
891 } | |
892 | |
893 kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(), | |
894 snapshot.metahandles_to_purge.end()); | |
895 | |
896 // Restore delete journals. | |
897 delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals); | |
898 delete_journal_->PurgeDeleteJournals(&trans, | |
899 snapshot.delete_journals_to_purge); | |
900 } | |
901 | |
902 void Directory::GetDownloadProgress( | |
903 ModelType model_type, | |
904 sync_pb::DataTypeProgressMarker* value_out) const { | |
905 ScopedKernelLock lock(this); | |
906 return value_out->CopyFrom( | |
907 kernel_->persisted_info.download_progress[model_type]); | |
908 } | |
909 | |
910 void Directory::GetDownloadProgressAsString( | |
911 ModelType model_type, | |
912 std::string* value_out) const { | |
913 ScopedKernelLock lock(this); | |
914 kernel_->persisted_info.download_progress[model_type].SerializeToString( | |
915 value_out); | |
916 } | |
917 | |
918 size_t Directory::GetEntriesCount() const { | |
919 ScopedKernelLock lock(this); | |
920 return kernel_->metahandles_map.size(); | |
921 } | |
922 | |
923 void Directory::SetDownloadProgress( | |
924 ModelType model_type, | |
925 const sync_pb::DataTypeProgressMarker& new_progress) { | |
926 ScopedKernelLock lock(this); | |
927 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress); | |
928 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
929 } | |
930 | |
931 bool Directory::HasEmptyDownloadProgress(ModelType type) const { | |
932 ScopedKernelLock lock(this); | |
933 return kernel_->persisted_info.HasEmptyDownloadProgress(type); | |
934 } | |
935 | |
936 int64_t Directory::GetTransactionVersion(ModelType type) const { | |
937 kernel_->transaction_mutex.AssertAcquired(); | |
938 return kernel_->persisted_info.transaction_version[type]; | |
939 } | |
940 | |
941 void Directory::IncrementTransactionVersion(ModelType type) { | |
942 kernel_->transaction_mutex.AssertAcquired(); | |
943 kernel_->persisted_info.transaction_version[type]++; | |
944 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
945 } | |
946 | |
947 void Directory::GetDataTypeContext(BaseTransaction* trans, | |
948 ModelType type, | |
949 sync_pb::DataTypeContext* context) const { | |
950 ScopedKernelLock lock(this); | |
951 context->CopyFrom(kernel_->persisted_info.datatype_context[type]); | |
952 } | |
953 | |
954 void Directory::SetDataTypeContext( | |
955 BaseWriteTransaction* trans, | |
956 ModelType type, | |
957 const sync_pb::DataTypeContext& context) { | |
958 ScopedKernelLock lock(this); | |
959 kernel_->persisted_info.datatype_context[type].CopyFrom(context); | |
960 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
961 } | |
962 | |
963 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders. | |
964 ModelTypeSet Directory::InitialSyncEndedTypes() { | |
965 syncable::ReadTransaction trans(FROM_HERE, this); | |
966 ModelTypeSet protocol_types = ProtocolTypes(); | |
967 ModelTypeSet initial_sync_ended_types; | |
968 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) { | |
969 if (InitialSyncEndedForType(&trans, i.Get())) { | |
970 initial_sync_ended_types.Put(i.Get()); | |
971 } | |
972 } | |
973 return initial_sync_ended_types; | |
974 } | |
975 | |
976 bool Directory::InitialSyncEndedForType(ModelType type) { | |
977 syncable::ReadTransaction trans(FROM_HERE, this); | |
978 return InitialSyncEndedForType(&trans, type); | |
979 } | |
980 | |
981 bool Directory::InitialSyncEndedForType( | |
982 BaseTransaction* trans, ModelType type) { | |
983 // True iff the type's root node has been created and changes | |
984 // for the type have been applied at least once. | |
985 Entry root(trans, GET_TYPE_ROOT, type); | |
986 return root.good() && root.GetBaseVersion() != CHANGES_VERSION; | |
987 } | |
988 | |
989 void Directory::MarkInitialSyncEndedForType(BaseWriteTransaction* trans, | |
990 ModelType type) { | |
991 // If the root folder is downloaded for the server, the root's base version | |
992 // get updated automatically at the end of update cycle when the update gets | |
993 // applied. However if this is a type with client generated root, the root | |
994 // node gets created locally and never goes through the update cycle. In that | |
995 // case its base version has to be explictly changed from CHANGES_VERSION | |
996 // at the end of the initial update cycle to mark the type as downloaded. | |
997 // See Directory::InitialSyncEndedForType | |
998 DCHECK(IsTypeWithClientGeneratedRoot(type)); | |
999 ModelNeutralMutableEntry root(trans, GET_TYPE_ROOT, type); | |
1000 | |
1001 // Some tests don't bother creating type root. Need to check if the root | |
1002 // exists before clearing its base version. | |
1003 if (root.good()) { | |
1004 DCHECK(!root.GetIsDel()); | |
1005 if (root.GetBaseVersion() == CHANGES_VERSION) | |
1006 root.PutBaseVersion(0); | |
1007 } | |
1008 } | |
1009 | |
1010 string Directory::store_birthday() const { | |
1011 ScopedKernelLock lock(this); | |
1012 return kernel_->persisted_info.store_birthday; | |
1013 } | |
1014 | |
1015 void Directory::set_store_birthday(const string& store_birthday) { | |
1016 ScopedKernelLock lock(this); | |
1017 if (kernel_->persisted_info.store_birthday == store_birthday) | |
1018 return; | |
1019 kernel_->persisted_info.store_birthday = store_birthday; | |
1020 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
1021 } | |
1022 | |
1023 string Directory::bag_of_chips() const { | |
1024 ScopedKernelLock lock(this); | |
1025 return kernel_->persisted_info.bag_of_chips; | |
1026 } | |
1027 | |
1028 void Directory::set_bag_of_chips(const string& bag_of_chips) { | |
1029 ScopedKernelLock lock(this); | |
1030 if (kernel_->persisted_info.bag_of_chips == bag_of_chips) | |
1031 return; | |
1032 kernel_->persisted_info.bag_of_chips = bag_of_chips; | |
1033 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
1034 } | |
1035 | |
1036 | |
1037 string Directory::cache_guid() const { | |
1038 // No need to lock since nothing ever writes to it after load. | |
1039 return kernel_->cache_guid; | |
1040 } | |
1041 | |
1042 NigoriHandler* Directory::GetNigoriHandler() { | |
1043 return nigori_handler_; | |
1044 } | |
1045 | |
1046 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) { | |
1047 DCHECK_EQ(this, trans->directory()); | |
1048 return cryptographer_; | |
1049 } | |
1050 | |
1051 void Directory::ReportUnrecoverableError() { | |
1052 if (!report_unrecoverable_error_function_.is_null()) { | |
1053 report_unrecoverable_error_function_.Run(); | |
1054 } | |
1055 } | |
1056 | |
1057 void Directory::GetAllMetaHandles(BaseTransaction* trans, | |
1058 MetahandleSet* result) { | |
1059 result->clear(); | |
1060 ScopedKernelLock lock(this); | |
1061 for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin(); | |
1062 i != kernel_->metahandles_map.end(); ++i) { | |
1063 result->insert(i->first); | |
1064 } | |
1065 } | |
1066 | |
1067 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans, | |
1068 Metahandles* result) { | |
1069 result->clear(); | |
1070 ScopedKernelLock lock(this); | |
1071 copy(kernel_->unsynced_metahandles.begin(), | |
1072 kernel_->unsynced_metahandles.end(), back_inserter(*result)); | |
1073 } | |
1074 | |
1075 int64_t Directory::unsynced_entity_count() const { | |
1076 ScopedKernelLock lock(this); | |
1077 return kernel_->unsynced_metahandles.size(); | |
1078 } | |
1079 | |
1080 bool Directory::TypeHasUnappliedUpdates(ModelType type) { | |
1081 ScopedKernelLock lock(this); | |
1082 return !kernel_->unapplied_update_metahandles[type].empty(); | |
1083 } | |
1084 | |
1085 void Directory::GetUnappliedUpdateMetaHandles(BaseTransaction* trans, | |
1086 FullModelTypeSet server_types, | |
1087 std::vector<int64_t>* result) { | |
1088 result->clear(); | |
1089 ScopedKernelLock lock(this); | |
1090 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) { | |
1091 const ModelType type = ModelTypeFromInt(i); | |
1092 if (server_types.Has(type)) { | |
1093 std::copy(kernel_->unapplied_update_metahandles[type].begin(), | |
1094 kernel_->unapplied_update_metahandles[type].end(), | |
1095 back_inserter(*result)); | |
1096 } | |
1097 } | |
1098 } | |
1099 | |
1100 void Directory::GetMetaHandlesOfType(BaseTransaction* trans, | |
1101 ModelType type, | |
1102 std::vector<int64_t>* result) { | |
1103 ScopedKernelLock lock(this); | |
1104 GetMetaHandlesOfType(lock, trans, type, result); | |
1105 } | |
1106 | |
1107 void Directory::GetMetaHandlesOfType(const ScopedKernelLock& lock, | |
1108 BaseTransaction* trans, | |
1109 ModelType type, | |
1110 std::vector<int64_t>* result) { | |
1111 result->clear(); | |
1112 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin(); | |
1113 it != kernel_->metahandles_map.end(); ++it) { | |
1114 EntryKernel* entry = it->second; | |
1115 const ModelType entry_type = | |
1116 GetModelTypeFromSpecifics(entry->ref(SPECIFICS)); | |
1117 if (entry_type == type) | |
1118 result->push_back(it->first); | |
1119 } | |
1120 } | |
1121 | |
1122 void Directory::CollectMetaHandleCounts( | |
1123 std::vector<int>* num_entries_by_type, | |
1124 std::vector<int>* num_to_delete_entries_by_type) { | |
1125 syncable::ReadTransaction trans(FROM_HERE, this); | |
1126 ScopedKernelLock lock(this); | |
1127 | |
1128 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin(); | |
1129 it != kernel_->metahandles_map.end(); ++it) { | |
1130 EntryKernel* entry = it->second; | |
1131 const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS)); | |
1132 (*num_entries_by_type)[type]++; | |
1133 if (entry->ref(IS_DEL)) | |
1134 (*num_to_delete_entries_by_type)[type]++; | |
1135 } | |
1136 } | |
1137 | |
1138 std::unique_ptr<base::ListValue> Directory::GetNodeDetailsForType( | |
1139 BaseTransaction* trans, | |
1140 ModelType type) { | |
1141 std::unique_ptr<base::ListValue> nodes(new base::ListValue()); | |
1142 | |
1143 ScopedKernelLock lock(this); | |
1144 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin(); | |
1145 it != kernel_->metahandles_map.end(); ++it) { | |
1146 if (GetModelTypeFromSpecifics(it->second->ref(SPECIFICS)) != type) { | |
1147 continue; | |
1148 } | |
1149 | |
1150 EntryKernel* kernel = it->second; | |
1151 std::unique_ptr<base::DictionaryValue> node( | |
1152 kernel->ToValue(GetCryptographer(trans))); | |
1153 | |
1154 // Add the position index if appropriate. This must be done here (and not | |
1155 // in EntryKernel) because the EntryKernel does not have access to its | |
1156 // siblings. | |
1157 if (kernel->ShouldMaintainPosition() && !kernel->ref(IS_DEL)) { | |
1158 node->SetInteger("positionIndex", GetPositionIndex(trans, kernel)); | |
1159 } | |
1160 | |
1161 nodes->Append(std::move(node)); | |
1162 } | |
1163 | |
1164 return nodes; | |
1165 } | |
1166 | |
1167 bool Directory::CheckInvariantsOnTransactionClose( | |
1168 syncable::BaseTransaction* trans, | |
1169 const MetahandleSet& modified_handles) { | |
1170 // NOTE: The trans may be in the process of being destructed. Be careful if | |
1171 // you wish to call any of its virtual methods. | |
1172 switch (invariant_check_level_) { | |
1173 case FULL_DB_VERIFICATION: { | |
1174 MetahandleSet all_handles; | |
1175 GetAllMetaHandles(trans, &all_handles); | |
1176 return CheckTreeInvariants(trans, all_handles); | |
1177 } | |
1178 case VERIFY_CHANGES: { | |
1179 return CheckTreeInvariants(trans, modified_handles); | |
1180 } | |
1181 case OFF: { | |
1182 return true; | |
1183 } | |
1184 } | |
1185 NOTREACHED(); | |
1186 return false; | |
1187 } | |
1188 | |
1189 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) { | |
1190 MetahandleSet handles; | |
1191 GetAllMetaHandles(trans, &handles); | |
1192 return CheckTreeInvariants(trans, handles); | |
1193 } | |
1194 | |
1195 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, | |
1196 const MetahandleSet& handles) { | |
1197 MetahandleSet::const_iterator i; | |
1198 for (i = handles.begin() ; i != handles.end() ; ++i) { | |
1199 int64_t metahandle = *i; | |
1200 Entry e(trans, GET_BY_HANDLE, metahandle); | |
1201 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans)) | |
1202 return false; | |
1203 syncable::Id id = e.GetId(); | |
1204 syncable::Id parentid = e.GetParentId(); | |
1205 | |
1206 if (id.IsRoot()) { | |
1207 if (!SyncAssert(e.GetIsDir(), FROM_HERE, | |
1208 "Entry should be a directory", | |
1209 trans)) | |
1210 return false; | |
1211 if (!SyncAssert(parentid.IsRoot(), FROM_HERE, | |
1212 "Entry should be root", | |
1213 trans)) | |
1214 return false; | |
1215 if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE, "Entry should be synced", | |
1216 trans)) | |
1217 return false; | |
1218 continue; | |
1219 } | |
1220 | |
1221 if (!e.GetIsDel()) { | |
1222 if (!SyncAssert(id != parentid, FROM_HERE, | |
1223 "Id should be different from parent id.", | |
1224 trans)) | |
1225 return false; | |
1226 if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE, | |
1227 "Non unique name should not be empty.", | |
1228 trans)) | |
1229 return false; | |
1230 | |
1231 if (!parentid.IsNull()) { | |
1232 int safety_count = handles.size() + 1; | |
1233 while (!parentid.IsRoot()) { | |
1234 Entry parent(trans, GET_BY_ID, parentid); | |
1235 if (!SyncAssert(parent.good(), FROM_HERE, | |
1236 "Parent entry is not valid.", trans)) | |
1237 return false; | |
1238 if (handles.end() == handles.find(parent.GetMetahandle())) | |
1239 break; // Skip further checking if parent was unmodified. | |
1240 if (!SyncAssert(parent.GetIsDir(), FROM_HERE, | |
1241 "Parent should be a directory", trans)) | |
1242 return false; | |
1243 if (!SyncAssert(!parent.GetIsDel(), FROM_HERE, | |
1244 "Parent should not have been marked for deletion.", | |
1245 trans)) | |
1246 return false; | |
1247 if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()), | |
1248 FROM_HERE, "Parent should be in the index.", trans)) | |
1249 return false; | |
1250 parentid = parent.GetParentId(); | |
1251 if (!SyncAssert(--safety_count > 0, FROM_HERE, | |
1252 "Count should be greater than zero.", trans)) | |
1253 return false; | |
1254 } | |
1255 } | |
1256 } | |
1257 int64_t base_version = e.GetBaseVersion(); | |
1258 int64_t server_version = e.GetServerVersion(); | |
1259 bool using_unique_client_tag = !e.GetUniqueClientTag().empty(); | |
1260 if (CHANGES_VERSION == base_version || 0 == base_version) { | |
1261 ModelType model_type = e.GetModelType(); | |
1262 bool is_client_creatable_type_root_folder = | |
1263 parentid.IsRoot() && | |
1264 IsTypeWithClientGeneratedRoot(model_type) && | |
1265 e.GetUniqueServerTag() == ModelTypeToRootTag(model_type); | |
1266 if (e.GetIsUnappliedUpdate()) { | |
1267 // Must be a new item, or a de-duplicated unique client tag | |
1268 // that was created both locally and remotely, or a type root folder | |
1269 // that was created both locally and remotely. | |
1270 if (!(using_unique_client_tag || | |
1271 is_client_creatable_type_root_folder)) { | |
1272 if (!SyncAssert(e.GetIsDel(), FROM_HERE, | |
1273 "The entry should have been deleted.", trans)) | |
1274 return false; | |
1275 } | |
1276 // It came from the server, so it must have a server ID. | |
1277 if (!SyncAssert(id.ServerKnows(), FROM_HERE, | |
1278 "The id should be from a server.", | |
1279 trans)) | |
1280 return false; | |
1281 } else { | |
1282 if (e.GetIsDir()) { | |
1283 // TODO(chron): Implement this mode if clients ever need it. | |
1284 // For now, you can't combine a client tag and a directory. | |
1285 if (!SyncAssert(!using_unique_client_tag, FROM_HERE, | |
1286 "Directory cannot have a client tag.", | |
1287 trans)) | |
1288 return false; | |
1289 } | |
1290 if (is_client_creatable_type_root_folder) { | |
1291 // This must be a locally created type root folder. | |
1292 if (!SyncAssert( | |
1293 !e.GetIsUnsynced(), FROM_HERE, | |
1294 "Locally created type root folders should not be unsynced.", | |
1295 trans)) | |
1296 return false; | |
1297 | |
1298 if (!SyncAssert( | |
1299 !e.GetIsDel(), FROM_HERE, | |
1300 "Locally created type root folders should not be deleted.", | |
1301 trans)) | |
1302 return false; | |
1303 } else { | |
1304 // Should be an uncomitted item, or a successfully deleted one. | |
1305 if (!e.GetIsDel()) { | |
1306 if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE, | |
1307 "The item should be unsynced.", trans)) | |
1308 return false; | |
1309 } | |
1310 } | |
1311 // If the next check failed, it would imply that an item exists | |
1312 // on the server, isn't waiting for application locally, but either | |
1313 // is an unsynced create or a successful delete in the local copy. | |
1314 // Either way, that's a mismatch. | |
1315 if (!SyncAssert(0 == server_version, FROM_HERE, | |
1316 "Server version should be zero.", | |
1317 trans)) | |
1318 return false; | |
1319 // Items that aren't using the unique client tag should have a zero | |
1320 // base version only if they have a local ID. Items with unique client | |
1321 // tags are allowed to use the zero base version for undeletion and | |
1322 // de-duplication; the unique client tag trumps the server ID. | |
1323 if (!using_unique_client_tag) { | |
1324 if (!SyncAssert(!id.ServerKnows(), FROM_HERE, | |
1325 "Should be a client only id.", | |
1326 trans)) | |
1327 return false; | |
1328 } | |
1329 } | |
1330 } else { | |
1331 if (!SyncAssert(id.ServerKnows(), | |
1332 FROM_HERE, | |
1333 "Should be a server id.", | |
1334 trans)) | |
1335 return false; | |
1336 } | |
1337 | |
1338 // Previously we would assert that locally deleted items that have never | |
1339 // been synced must not be sent to the server (IS_UNSYNCED must be false). | |
1340 // This is not always true in the case that an item is deleted while the | |
1341 // initial commit is in flight. See crbug.com/426865. | |
1342 } | |
1343 return true; | |
1344 } | |
1345 | |
1346 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) { | |
1347 invariant_check_level_ = check_level; | |
1348 } | |
1349 | |
1350 int64_t Directory::NextMetahandle() { | |
1351 ScopedKernelLock lock(this); | |
1352 int64_t metahandle = (kernel_->next_metahandle)++; | |
1353 return metahandle; | |
1354 } | |
1355 | |
1356 // Generates next client ID based on a randomly generated GUID. | |
1357 Id Directory::NextId() { | |
1358 return Id::CreateFromClientString(base::GenerateGUID()); | |
1359 } | |
1360 | |
1361 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) { | |
1362 ScopedKernelLock lock(this); | |
1363 return kernel_->parent_child_index.GetChildren(id) != NULL; | |
1364 } | |
1365 | |
1366 Id Directory::GetFirstChildId(BaseTransaction* trans, | |
1367 const EntryKernel* parent) { | |
1368 DCHECK(parent); | |
1369 DCHECK(parent->ref(IS_DIR)); | |
1370 | |
1371 ScopedKernelLock lock(this); | |
1372 const OrderedChildSet* children = | |
1373 kernel_->parent_child_index.GetChildren(parent->ref(ID)); | |
1374 | |
1375 // We're expected to return root if there are no children. | |
1376 if (!children) | |
1377 return Id(); | |
1378 | |
1379 return (*children->begin())->ref(ID); | |
1380 } | |
1381 | |
1382 syncable::Id Directory::GetPredecessorId(EntryKernel* e) { | |
1383 ScopedKernelLock lock(this); | |
1384 | |
1385 DCHECK(ParentChildIndex::ShouldInclude(e)); | |
1386 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e); | |
1387 OrderedChildSet::const_iterator i = siblings->find(e); | |
1388 DCHECK(i != siblings->end()); | |
1389 | |
1390 if (i == siblings->begin()) { | |
1391 return Id(); | |
1392 } else { | |
1393 i--; | |
1394 return (*i)->ref(ID); | |
1395 } | |
1396 } | |
1397 | |
1398 syncable::Id Directory::GetSuccessorId(EntryKernel* e) { | |
1399 ScopedKernelLock lock(this); | |
1400 | |
1401 DCHECK(ParentChildIndex::ShouldInclude(e)); | |
1402 const OrderedChildSet* siblings = kernel_->parent_child_index.GetSiblings(e); | |
1403 OrderedChildSet::const_iterator i = siblings->find(e); | |
1404 DCHECK(i != siblings->end()); | |
1405 | |
1406 i++; | |
1407 if (i == siblings->end()) { | |
1408 return Id(); | |
1409 } else { | |
1410 return (*i)->ref(ID); | |
1411 } | |
1412 } | |
1413 | |
1414 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition() | |
1415 // items as siblings of items that do not maintain postions. It is required | |
1416 // only for tests. See crbug.com/178282. | |
1417 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) { | |
1418 DCHECK(!e->ref(IS_DEL)); | |
1419 if (!e->ShouldMaintainPosition()) { | |
1420 DCHECK(!e->ref(UNIQUE_POSITION).IsValid()); | |
1421 return; | |
1422 } | |
1423 std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG); | |
1424 DCHECK(!suffix.empty()); | |
1425 | |
1426 // Remove our item from the ParentChildIndex and remember to re-add it later. | |
1427 ScopedKernelLock lock(this); | |
1428 ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index); | |
1429 | |
1430 // Note: The ScopedParentChildIndexUpdater will update this set for us as we | |
1431 // leave this function. | |
1432 const OrderedChildSet* siblings = | |
1433 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID)); | |
1434 | |
1435 if (!siblings) { | |
1436 // This parent currently has no other children. | |
1437 DCHECK(predecessor == NULL); | |
1438 UniquePosition pos = UniquePosition::InitialPosition(suffix); | |
1439 e->put(UNIQUE_POSITION, pos); | |
1440 return; | |
1441 } | |
1442 | |
1443 if (predecessor == NULL) { | |
1444 // We have at least one sibling, and we're inserting to the left of them. | |
1445 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION); | |
1446 | |
1447 UniquePosition pos; | |
1448 if (!successor_pos.IsValid()) { | |
1449 // If all our successors are of non-positionable types, just create an | |
1450 // initial position. We arbitrarily choose to sort invalid positions to | |
1451 // the right of the valid positions. | |
1452 // | |
1453 // We really shouldn't need to support this. See TODO above. | |
1454 pos = UniquePosition::InitialPosition(suffix); | |
1455 } else { | |
1456 DCHECK(!siblings->empty()); | |
1457 pos = UniquePosition::Before(successor_pos, suffix); | |
1458 } | |
1459 | |
1460 e->put(UNIQUE_POSITION, pos); | |
1461 return; | |
1462 } | |
1463 | |
1464 // We can't support placing an item after an invalid position. Fortunately, | |
1465 // the tests don't exercise this particular case. We should not support | |
1466 // siblings with invalid positions at all. See TODO above. | |
1467 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid()); | |
1468 | |
1469 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor); | |
1470 DCHECK(neighbour != siblings->end()); | |
1471 | |
1472 ++neighbour; | |
1473 if (neighbour == siblings->end()) { | |
1474 // Inserting at the end of the list. | |
1475 UniquePosition pos = UniquePosition::After( | |
1476 predecessor->ref(UNIQUE_POSITION), | |
1477 suffix); | |
1478 e->put(UNIQUE_POSITION, pos); | |
1479 return; | |
1480 } | |
1481 | |
1482 EntryKernel* successor = *neighbour; | |
1483 | |
1484 // Another mixed valid and invalid position case. This one could be supported | |
1485 // in theory, but we're trying to deprecate support for siblings with and | |
1486 // without valid positions. See TODO above. | |
1487 // Using a release CHECK here because the following UniquePosition::Between | |
1488 // call crashes anyway when the position string is empty (see crbug/332371). | |
1489 CHECK(successor->ref(UNIQUE_POSITION).IsValid()) << *successor; | |
1490 | |
1491 // Finally, the normal case: inserting between two elements. | |
1492 UniquePosition pos = UniquePosition::Between( | |
1493 predecessor->ref(UNIQUE_POSITION), | |
1494 successor->ref(UNIQUE_POSITION), | |
1495 suffix); | |
1496 e->put(UNIQUE_POSITION, pos); | |
1497 return; | |
1498 } | |
1499 | |
1500 // TODO(rlarocque): Avoid this indirection. Just return the set. | |
1501 void Directory::AppendChildHandles(const ScopedKernelLock& lock, | |
1502 const Id& parent_id, | |
1503 Directory::Metahandles* result) { | |
1504 const OrderedChildSet* children = | |
1505 kernel_->parent_child_index.GetChildren(parent_id); | |
1506 if (!children) | |
1507 return; | |
1508 | |
1509 for (OrderedChildSet::const_iterator i = children->begin(); | |
1510 i != children->end(); ++i) { | |
1511 result->push_back((*i)->ref(META_HANDLE)); | |
1512 } | |
1513 } | |
1514 | |
1515 void Directory::UnmarkDirtyEntry(WriteTransaction* trans, Entry* entry) { | |
1516 CHECK(trans); | |
1517 entry->kernel_->clear_dirty(&kernel_->dirty_metahandles); | |
1518 } | |
1519 | |
1520 void Directory::GetAttachmentIdsToUpload(BaseTransaction* trans, | |
1521 ModelType type, | |
1522 AttachmentIdList* ids) { | |
1523 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to | |
1524 // use it. The approach below is likely very expensive because it iterates | |
1525 // all entries (bug 415199). | |
1526 DCHECK(trans); | |
1527 DCHECK(ids); | |
1528 ids->clear(); | |
1529 AttachmentIdSet on_server_id_set; | |
1530 AttachmentIdSet not_on_server_id_set; | |
1531 std::vector<int64_t> metahandles; | |
1532 { | |
1533 ScopedKernelLock lock(this); | |
1534 GetMetaHandlesOfType(lock, trans, type, &metahandles); | |
1535 std::vector<int64_t>::const_iterator iter = metahandles.begin(); | |
1536 const std::vector<int64_t>::const_iterator end = metahandles.end(); | |
1537 // For all of this type's entries... | |
1538 for (; iter != end; ++iter) { | |
1539 EntryKernel* entry = GetEntryByHandle(lock, *iter); | |
1540 DCHECK(entry); | |
1541 const sync_pb::AttachmentMetadata metadata = | |
1542 entry->ref(ATTACHMENT_METADATA); | |
1543 // for each of this entry's attachments... | |
1544 for (int i = 0; i < metadata.record_size(); ++i) { | |
1545 AttachmentId id = | |
1546 AttachmentId::CreateFromProto(metadata.record(i).id()); | |
1547 // if this attachment is known to be on the server, remember it for | |
1548 // later, | |
1549 if (metadata.record(i).is_on_server()) { | |
1550 on_server_id_set.insert(id); | |
1551 } else { | |
1552 // otherwise, add it to id_set. | |
1553 not_on_server_id_set.insert(id); | |
1554 } | |
1555 } | |
1556 } | |
1557 } | |
1558 // Why did we bother keeping a set of ids known to be on the server? The | |
1559 // is_on_server flag is stored denormalized so we can end up with two entries | |
1560 // with the same attachment id where one says it's on the server and the other | |
1561 // says it's not. When this happens, we trust the one that says it's on the | |
1562 // server. To avoid re-uploading the same attachment mulitple times, we | |
1563 // remove any ids known to be on the server from the id_set we are about to | |
1564 // return. | |
1565 // | |
1566 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203). | |
1567 std::set_difference(not_on_server_id_set.begin(), not_on_server_id_set.end(), | |
1568 on_server_id_set.begin(), on_server_id_set.end(), | |
1569 std::back_inserter(*ids)); | |
1570 } | |
1571 | |
1572 void Directory::OnCatastrophicError() { | |
1573 UMA_HISTOGRAM_BOOLEAN("Sync.DirectoryCatastrophicError", true); | |
1574 ReadTransaction trans(FROM_HERE, this); | |
1575 OnUnrecoverableError(&trans, FROM_HERE, | |
1576 "Catastrophic error detected, Sync DB is unrecoverable"); | |
1577 } | |
1578 | |
1579 Directory::Kernel* Directory::kernel() { | |
1580 return kernel_; | |
1581 } | |
1582 | |
1583 const Directory::Kernel* Directory::kernel() const { | |
1584 return kernel_; | |
1585 } | |
1586 | |
1587 } // namespace syncable | |
1588 } // namespace syncer | |
OLD | NEW |