Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(933)

Side by Side Diff: components/sync/syncable/directory.cc

Issue 2130453004: [Sync] Move //sync to //components/sync. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebase. Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "sync/syncable/directory.h" 5 #include "components/sync/syncable/directory.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <algorithm> 10 #include <algorithm>
11 #include <iterator> 11 #include <iterator>
12 #include <utility> 12 #include <utility>
13 13
14 #include "base/base64.h" 14 #include "base/base64.h"
15 #include "base/guid.h" 15 #include "base/guid.h"
16 #include "base/metrics/histogram.h" 16 #include "base/metrics/histogram.h"
17 #include "base/stl_util.h" 17 #include "base/stl_util.h"
18 #include "base/strings/string_number_conversions.h" 18 #include "base/strings/string_number_conversions.h"
19 #include "base/trace_event/trace_event.h" 19 #include "base/trace_event/trace_event.h"
20 #include "sync/internal_api/public/base/attachment_id_proto.h" 20 #include "components/sync/base/attachment_id_proto.h"
21 #include "sync/internal_api/public/base/unique_position.h" 21 #include "components/sync/base/unique_position.h"
22 #include "sync/internal_api/public/util/unrecoverable_error_handler.h" 22 #include "components/sync/base/unrecoverable_error_handler.h"
23 #include "sync/syncable/entry.h" 23 #include "components/sync/syncable/entry.h"
24 #include "sync/syncable/entry_kernel.h" 24 #include "components/sync/syncable/entry_kernel.h"
25 #include "sync/syncable/in_memory_directory_backing_store.h" 25 #include "components/sync/syncable/in_memory_directory_backing_store.h"
26 #include "sync/syncable/model_neutral_mutable_entry.h" 26 #include "components/sync/syncable/model_neutral_mutable_entry.h"
27 #include "sync/syncable/on_disk_directory_backing_store.h" 27 #include "components/sync/syncable/on_disk_directory_backing_store.h"
28 #include "sync/syncable/scoped_kernel_lock.h" 28 #include "components/sync/syncable/scoped_kernel_lock.h"
29 #include "sync/syncable/scoped_parent_child_index_updater.h" 29 #include "components/sync/syncable/scoped_parent_child_index_updater.h"
30 #include "sync/syncable/syncable-inl.h" 30 #include "components/sync/syncable/syncable-inl.h"
31 #include "sync/syncable/syncable_base_transaction.h" 31 #include "components/sync/syncable/syncable_base_transaction.h"
32 #include "sync/syncable/syncable_changes_version.h" 32 #include "components/sync/syncable/syncable_changes_version.h"
33 #include "sync/syncable/syncable_read_transaction.h" 33 #include "components/sync/syncable/syncable_read_transaction.h"
34 #include "sync/syncable/syncable_util.h" 34 #include "components/sync/syncable/syncable_util.h"
35 #include "sync/syncable/syncable_write_transaction.h" 35 #include "components/sync/syncable/syncable_write_transaction.h"
36 36
37 using std::string; 37 using std::string;
38 38
39 namespace syncer { 39 namespace syncer {
40 namespace syncable { 40 namespace syncable {
41 41
42 // static 42 // static
43 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] = 43 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
44 FILE_PATH_LITERAL("SyncData.sqlite3"); 44 FILE_PATH_LITERAL("SyncData.sqlite3");
45 45
(...skipping 20 matching lines...) Expand all
66 } 66 }
67 67
68 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress( 68 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
69 ModelType model_type) { 69 ModelType model_type) {
70 const sync_pb::DataTypeProgressMarker& progress_marker = 70 const sync_pb::DataTypeProgressMarker& progress_marker =
71 download_progress[model_type]; 71 download_progress[model_type];
72 return progress_marker.token().empty(); 72 return progress_marker.token().empty();
73 } 73 }
74 74
75 Directory::SaveChangesSnapshot::SaveChangesSnapshot() 75 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
76 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { 76 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {}
77 }
78 77
79 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() { 78 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
80 STLDeleteElements(&dirty_metas); 79 STLDeleteElements(&dirty_metas);
81 STLDeleteElements(&delete_journals); 80 STLDeleteElements(&delete_journals);
82 } 81 }
83 82
84 bool Directory::SaveChangesSnapshot::HasUnsavedMetahandleChanges() const { 83 bool Directory::SaveChangesSnapshot::HasUnsavedMetahandleChanges() const {
85 return !dirty_metas.empty() || !metahandles_to_purge.empty() || 84 return !dirty_metas.empty() || !metahandles_to_purge.empty() ||
86 !delete_journals.empty() || !delete_journals_to_purge.empty(); 85 !delete_journals.empty() || !delete_journals_to_purge.empty();
87 } 86 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
127 Directory::~Directory() { 126 Directory::~Directory() {
128 Close(); 127 Close();
129 } 128 }
130 129
131 DirOpenResult Directory::Open( 130 DirOpenResult Directory::Open(
132 const string& name, 131 const string& name,
133 DirectoryChangeDelegate* delegate, 132 DirectoryChangeDelegate* delegate,
134 const WeakHandle<TransactionObserver>& transaction_observer) { 133 const WeakHandle<TransactionObserver>& transaction_observer) {
135 TRACE_EVENT0("sync", "SyncDatabaseOpen"); 134 TRACE_EVENT0("sync", "SyncDatabaseOpen");
136 135
137 const DirOpenResult result = 136 const DirOpenResult result = OpenImpl(name, delegate, transaction_observer);
138 OpenImpl(name, delegate, transaction_observer);
139 137
140 if (OPENED != result) 138 if (OPENED != result)
141 Close(); 139 Close();
142 return result; 140 return result;
143 } 141 }
144 142
145 void Directory::InitializeIndices(MetahandlesMap* handles_map) { 143 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
146 ScopedKernelLock lock(this); 144 ScopedKernelLock lock(this);
147 kernel_->metahandles_map.swap(*handles_map); 145 kernel_->metahandles_map.swap(*handles_map);
148 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin(); 146 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
(...skipping 14 matching lines...) Expand all
163 << "Unexpected duplicate use of client tag"; 161 << "Unexpected duplicate use of client tag";
164 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry; 162 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
165 } 163 }
166 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { 164 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
167 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) == 165 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
168 kernel_->server_tags_map.end()) 166 kernel_->server_tags_map.end())
169 << "Unexpected duplicate use of server tag"; 167 << "Unexpected duplicate use of server tag";
170 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry; 168 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
171 } 169 }
172 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) == 170 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
173 kernel_->ids_map.end()) << "Unexpected duplicate use of ID"; 171 kernel_->ids_map.end())
172 << "Unexpected duplicate use of ID";
174 kernel_->ids_map[entry->ref(ID).value()] = entry; 173 kernel_->ids_map[entry->ref(ID).value()] = entry;
175 DCHECK(!entry->is_dirty()); 174 DCHECK(!entry->is_dirty());
176 AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA)); 175 AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA));
177 } 176 }
178 } 177 }
179 178
180 DirOpenResult Directory::OpenImpl( 179 DirOpenResult Directory::OpenImpl(
181 const string& name, 180 const string& name,
182 DirectoryChangeDelegate* delegate, 181 DirectoryChangeDelegate* delegate,
183 const WeakHandle<TransactionObserver>& 182 const WeakHandle<TransactionObserver>& transaction_observer) {
184 transaction_observer) {
185 KernelLoadInfo info; 183 KernelLoadInfo info;
186 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) 184 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
187 // swap these later. 185 // swap these later.
188 Directory::MetahandlesMap tmp_handles_map; 186 Directory::MetahandlesMap tmp_handles_map;
189 187
190 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after 188 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
191 // the swap in the success case. 189 // the swap in the success case.
192 STLValueDeleter<MetahandlesMap> deleter(&tmp_handles_map); 190 STLValueDeleter<MetahandlesMap> deleter(&tmp_handles_map);
193 191
194 JournalIndex delete_journals; 192 JournalIndex delete_journals;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
226 void Directory::Close() { 224 void Directory::Close() {
227 store_.reset(); 225 store_.reset();
228 if (kernel_) { 226 if (kernel_) {
229 delete kernel_; 227 delete kernel_;
230 kernel_ = NULL; 228 kernel_ = NULL;
231 } 229 }
232 } 230 }
233 231
234 void Directory::OnUnrecoverableError(const BaseTransaction* trans, 232 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
235 const tracked_objects::Location& location, 233 const tracked_objects::Location& location,
236 const std::string & message) { 234 const std::string& message) {
237 DCHECK(trans != NULL); 235 DCHECK(trans != NULL);
238 unrecoverable_error_set_ = true; 236 unrecoverable_error_set_ = true;
239 unrecoverable_error_handler_.Call( 237 unrecoverable_error_handler_.Call(
240 FROM_HERE, &UnrecoverableErrorHandler::OnUnrecoverableError, location, 238 FROM_HERE, &UnrecoverableErrorHandler::OnUnrecoverableError, location,
241 message); 239 message);
242 } 240 }
243 241
244 EntryKernel* Directory::GetEntryById(const Id& id) { 242 EntryKernel* Directory::GetEntryById(const Id& id) {
245 ScopedKernelLock lock(this); 243 ScopedKernelLock lock(this);
246 return GetEntryById(lock, id); 244 return GetEntryById(lock, id);
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
279 } 277 }
280 278
281 EntryKernel* Directory::GetEntryByHandle(int64_t metahandle) { 279 EntryKernel* Directory::GetEntryByHandle(int64_t metahandle) {
282 ScopedKernelLock lock(this); 280 ScopedKernelLock lock(this);
283 return GetEntryByHandle(lock, metahandle); 281 return GetEntryByHandle(lock, metahandle);
284 } 282 }
285 283
286 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock, 284 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock,
287 int64_t metahandle) { 285 int64_t metahandle) {
288 // Look up in memory 286 // Look up in memory
289 MetahandlesMap::iterator found = 287 MetahandlesMap::iterator found = kernel_->metahandles_map.find(metahandle);
290 kernel_->metahandles_map.find(metahandle);
291 if (found != kernel_->metahandles_map.end()) { 288 if (found != kernel_->metahandles_map.end()) {
292 // Found it in memory. Easy. 289 // Found it in memory. Easy.
293 return found->second; 290 return found->second;
294 } 291 }
295 return NULL; 292 return NULL;
296 } 293 }
297 294
298 bool Directory::GetChildHandlesById( 295 bool Directory::GetChildHandlesById(BaseTransaction* trans,
299 BaseTransaction* trans, const Id& parent_id, 296 const Id& parent_id,
300 Directory::Metahandles* result) { 297 Directory::Metahandles* result) {
301 if (!SyncAssert(this == trans->directory(), FROM_HERE, 298 if (!SyncAssert(this == trans->directory(), FROM_HERE,
302 "Directories don't match", trans)) 299 "Directories don't match", trans))
303 return false; 300 return false;
304 result->clear(); 301 result->clear();
305 302
306 ScopedKernelLock lock(this); 303 ScopedKernelLock lock(this);
307 AppendChildHandles(lock, parent_id, result); 304 AppendChildHandles(lock, parent_id, result);
308 return true; 305 return true;
309 } 306 }
310 307
311 int Directory::GetTotalNodeCount( 308 int Directory::GetTotalNodeCount(BaseTransaction* trans,
312 BaseTransaction* trans, 309 EntryKernel* kernel) const {
313 EntryKernel* kernel) const {
314 if (!SyncAssert(this == trans->directory(), FROM_HERE, 310 if (!SyncAssert(this == trans->directory(), FROM_HERE,
315 "Directories don't match", trans)) 311 "Directories don't match", trans))
316 return false; 312 return false;
317 313
318 int count = 1; 314 int count = 1;
319 std::deque<const OrderedChildSet*> child_sets; 315 std::deque<const OrderedChildSet*> child_sets;
320 316
321 GetChildSetForKernel(trans, kernel, &child_sets); 317 GetChildSetForKernel(trans, kernel, &child_sets);
322 while (!child_sets.empty()) { 318 while (!child_sets.empty()) {
323 const OrderedChildSet* set = child_sets.front(); 319 const OrderedChildSet* set = child_sets.front();
324 child_sets.pop_front(); 320 child_sets.pop_front();
325 for (OrderedChildSet::const_iterator it = set->begin(); 321 for (OrderedChildSet::const_iterator it = set->begin(); it != set->end();
326 it != set->end(); ++it) { 322 ++it) {
327 count++; 323 count++;
328 GetChildSetForKernel(trans, *it, &child_sets); 324 GetChildSetForKernel(trans, *it, &child_sets);
329 } 325 }
330 } 326 }
331 327
332 return count; 328 return count;
333 } 329 }
334 330
335 void Directory::GetChildSetForKernel( 331 void Directory::GetChildSetForKernel(
336 BaseTransaction* trans, 332 BaseTransaction* trans,
337 EntryKernel* kernel, 333 EntryKernel* kernel,
338 std::deque<const OrderedChildSet*>* child_sets) const { 334 std::deque<const OrderedChildSet*>* child_sets) const {
339 if (!kernel->ref(IS_DIR)) 335 if (!kernel->ref(IS_DIR))
340 return; // Not a directory => no children. 336 return; // Not a directory => no children.
341 337
342 const OrderedChildSet* descendants = 338 const OrderedChildSet* descendants =
343 kernel_->parent_child_index.GetChildren(kernel->ref(ID)); 339 kernel_->parent_child_index.GetChildren(kernel->ref(ID));
344 if (!descendants) 340 if (!descendants)
345 return; // This directory has no children. 341 return; // This directory has no children.
346 342
347 // Add our children to the list of items to be traversed. 343 // Add our children to the list of items to be traversed.
348 child_sets->push_back(descendants); 344 child_sets->push_back(descendants);
349 } 345 }
350 346
351 int Directory::GetPositionIndex( 347 int Directory::GetPositionIndex(BaseTransaction* trans,
352 BaseTransaction* trans, 348 EntryKernel* kernel) const {
353 EntryKernel* kernel) const {
354 const OrderedChildSet* siblings = 349 const OrderedChildSet* siblings =
355 kernel_->parent_child_index.GetSiblings(kernel); 350 kernel_->parent_child_index.GetSiblings(kernel);
356 351
357 OrderedChildSet::const_iterator it = siblings->find(kernel); 352 OrderedChildSet::const_iterator it = siblings->find(kernel);
358 return std::distance(siblings->begin(), it); 353 return std::distance(siblings->begin(), it);
359 } 354 }
360 355
361 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) { 356 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
362 ScopedKernelLock lock(this); 357 ScopedKernelLock lock(this);
363 return InsertEntry(lock, trans, entry); 358 return InsertEntry(lock, trans, entry);
364 } 359 }
365 360
366 bool Directory::InsertEntry(const ScopedKernelLock& lock, 361 bool Directory::InsertEntry(const ScopedKernelLock& lock,
367 BaseWriteTransaction* trans, 362 BaseWriteTransaction* trans,
368 EntryKernel* entry) { 363 EntryKernel* entry) {
369 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans)) 364 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
370 return false; 365 return false;
371 366
372 static const char error[] = "Entry already in memory index."; 367 static const char error[] = "Entry already in memory index.";
373 368
374 if (!SyncAssert( 369 if (!SyncAssert(kernel_->metahandles_map
375 kernel_->metahandles_map.insert( 370 .insert(std::make_pair(entry->ref(META_HANDLE), entry))
376 std::make_pair(entry->ref(META_HANDLE), entry)).second, 371 .second,
377 FROM_HERE, 372 FROM_HERE, error, trans)) {
378 error,
379 trans)) {
380 return false; 373 return false;
381 } 374 }
382 if (!SyncAssert( 375 if (!SyncAssert(
383 kernel_->ids_map.insert( 376 kernel_->ids_map.insert(std::make_pair(entry->ref(ID).value(), entry))
384 std::make_pair(entry->ref(ID).value(), entry)).second, 377 .second,
385 FROM_HERE, 378 FROM_HERE, error, trans)) {
386 error,
387 trans)) {
388 return false; 379 return false;
389 } 380 }
390 if (ParentChildIndex::ShouldInclude(entry)) { 381 if (ParentChildIndex::ShouldInclude(entry)) {
391 if (!SyncAssert(kernel_->parent_child_index.Insert(entry), 382 if (!SyncAssert(kernel_->parent_child_index.Insert(entry), FROM_HERE, error,
392 FROM_HERE,
393 error,
394 trans)) { 383 trans)) {
395 return false; 384 return false;
396 } 385 }
397 } 386 }
398 AddToAttachmentIndex( 387 AddToAttachmentIndex(lock, entry->ref(META_HANDLE),
399 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA)); 388 entry->ref(ATTACHMENT_METADATA));
400 389
401 // Should NEVER be created with a client tag or server tag. 390 // Should NEVER be created with a client tag or server tag.
402 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE, 391 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
403 "Server tag should be empty", trans)) { 392 "Server tag should be empty", trans)) {
404 return false; 393 return false;
405 } 394 }
406 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE, 395 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
407 "Client tag should be empty", trans)) 396 "Client tag should be empty", trans))
408 return false; 397 return false;
409 398
410 return true; 399 return true;
411 } 400 }
412 401
413 bool Directory::ReindexId(BaseWriteTransaction* trans, 402 bool Directory::ReindexId(BaseWriteTransaction* trans,
414 EntryKernel* const entry, 403 EntryKernel* const entry,
415 const Id& new_id) { 404 const Id& new_id) {
416 ScopedKernelLock lock(this); 405 ScopedKernelLock lock(this);
417 if (NULL != GetEntryById(lock, new_id)) 406 if (NULL != GetEntryById(lock, new_id))
418 return false; 407 return false;
419 408
420 { 409 {
421 // Update the indices that depend on the ID field. 410 // Update the indices that depend on the ID field.
422 ScopedParentChildIndexUpdater updater_b(lock, entry, 411 ScopedParentChildIndexUpdater updater_b(lock, entry,
423 &kernel_->parent_child_index); 412 &kernel_->parent_child_index);
424 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); 413 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
425 DCHECK_EQ(1U, num_erased); 414 DCHECK_EQ(1U, num_erased);
426 entry->put(ID, new_id); 415 entry->put(ID, new_id);
427 kernel_->ids_map[entry->ref(ID).value()] = entry; 416 kernel_->ids_map[entry->ref(ID).value()] = entry;
428 } 417 }
429 return true; 418 return true;
430 } 419 }
431 420
432 bool Directory::ReindexParentId(BaseWriteTransaction* trans, 421 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
433 EntryKernel* const entry, 422 EntryKernel* const entry,
434 const Id& new_parent_id) { 423 const Id& new_parent_id) {
435 ScopedKernelLock lock(this); 424 ScopedKernelLock lock(this);
436 425
437 { 426 {
438 // Update the indices that depend on the PARENT_ID field. 427 // Update the indices that depend on the PARENT_ID field.
439 ScopedParentChildIndexUpdater index_updater(lock, entry, 428 ScopedParentChildIndexUpdater index_updater(lock, entry,
440 &kernel_->parent_child_index); 429 &kernel_->parent_child_index);
441 entry->put(PARENT_ID, new_parent_id); 430 entry->put(PARENT_ID, new_parent_id);
442 } 431 }
443 return true; 432 return true;
444 } 433 }
445 434
446 void Directory::RemoveFromAttachmentIndex( 435 void Directory::RemoveFromAttachmentIndex(
447 const ScopedKernelLock& lock, 436 const ScopedKernelLock& lock,
448 const int64_t metahandle, 437 const int64_t metahandle,
449 const sync_pb::AttachmentMetadata& attachment_metadata) { 438 const sync_pb::AttachmentMetadata& attachment_metadata) {
450 for (int i = 0; i < attachment_metadata.record_size(); ++i) { 439 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
(...skipping 13 matching lines...) Expand all
464 void Directory::AddToAttachmentIndex( 453 void Directory::AddToAttachmentIndex(
465 const ScopedKernelLock& lock, 454 const ScopedKernelLock& lock,
466 const int64_t metahandle, 455 const int64_t metahandle,
467 const sync_pb::AttachmentMetadata& attachment_metadata) { 456 const sync_pb::AttachmentMetadata& attachment_metadata) {
468 for (int i = 0; i < attachment_metadata.record_size(); ++i) { 457 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
469 AttachmentIdUniqueId unique_id = 458 AttachmentIdUniqueId unique_id =
470 attachment_metadata.record(i).id().unique_id(); 459 attachment_metadata.record(i).id().unique_id();
471 IndexByAttachmentId::iterator iter = 460 IndexByAttachmentId::iterator iter =
472 kernel_->index_by_attachment_id.find(unique_id); 461 kernel_->index_by_attachment_id.find(unique_id);
473 if (iter == kernel_->index_by_attachment_id.end()) { 462 if (iter == kernel_->index_by_attachment_id.end()) {
474 iter = kernel_->index_by_attachment_id.insert(std::make_pair( 463 iter = kernel_->index_by_attachment_id
475 unique_id, 464 .insert(std::make_pair(unique_id, MetahandleSet()))
476 MetahandleSet())).first; 465 .first;
477 } 466 }
478 iter->second.insert(metahandle); 467 iter->second.insert(metahandle);
479 } 468 }
480 } 469 }
481 470
482 void Directory::UpdateAttachmentIndex( 471 void Directory::UpdateAttachmentIndex(
483 const int64_t metahandle, 472 const int64_t metahandle,
484 const sync_pb::AttachmentMetadata& old_metadata, 473 const sync_pb::AttachmentMetadata& old_metadata,
485 const sync_pb::AttachmentMetadata& new_metadata) { 474 const sync_pb::AttachmentMetadata& new_metadata) {
486 ScopedKernelLock lock(this); 475 ScopedKernelLock lock(this);
487 RemoveFromAttachmentIndex(lock, metahandle, old_metadata); 476 RemoveFromAttachmentIndex(lock, metahandle, old_metadata);
488 AddToAttachmentIndex(lock, metahandle, new_metadata); 477 AddToAttachmentIndex(lock, metahandle, new_metadata);
489 } 478 }
490 479
491 void Directory::GetMetahandlesByAttachmentId( 480 void Directory::GetMetahandlesByAttachmentId(
492 BaseTransaction* trans, 481 BaseTransaction* trans,
493 const sync_pb::AttachmentIdProto& attachment_id_proto, 482 const sync_pb::AttachmentIdProto& attachment_id_proto,
494 Metahandles* result) { 483 Metahandles* result) {
495 DCHECK(result); 484 DCHECK(result);
496 result->clear(); 485 result->clear();
497 ScopedKernelLock lock(this); 486 ScopedKernelLock lock(this);
498 IndexByAttachmentId::const_iterator index_iter = 487 IndexByAttachmentId::const_iterator index_iter =
499 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id()); 488 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
500 if (index_iter == kernel_->index_by_attachment_id.end()) 489 if (index_iter == kernel_->index_by_attachment_id.end())
501 return; 490 return;
502 const MetahandleSet& metahandle_set = index_iter->second; 491 const MetahandleSet& metahandle_set = index_iter->second;
503 std::copy( 492 std::copy(metahandle_set.begin(), metahandle_set.end(),
504 metahandle_set.begin(), metahandle_set.end(), back_inserter(*result)); 493 back_inserter(*result));
505 } 494 }
506 495
507 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { 496 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
508 DCHECK(trans != NULL); 497 DCHECK(trans != NULL);
509 return unrecoverable_error_set_; 498 return unrecoverable_error_set_;
510 } 499 }
511 500
512 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) { 501 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) {
513 kernel_->transaction_mutex.AssertAcquired(); 502 kernel_->transaction_mutex.AssertAcquired();
514 kernel_->dirty_metahandles.clear(); 503 kernel_->dirty_metahandles.clear();
515 } 504 }
516 505
517 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans, 506 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
518 const EntryKernel* const entry) const { 507 const EntryKernel* const entry) const {
519 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() && 508 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
520 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) && 509 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
521 !entry->ref(IS_UNSYNCED); 510 !entry->ref(IS_UNSYNCED);
522 511
523 if (safe) { 512 if (safe) {
524 int64_t handle = entry->ref(META_HANDLE); 513 int64_t handle = entry->ref(META_HANDLE);
525 const ModelType type = entry->GetServerModelType(); 514 const ModelType type = entry->GetServerModelType();
526 if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U, 515 if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U, FROM_HERE,
527 FROM_HERE,
528 "Dirty metahandles should be empty", trans)) 516 "Dirty metahandles should be empty", trans))
529 return false; 517 return false;
530 // TODO(tim): Bug 49278. 518 // TODO(tim): Bug 49278.
531 if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle), 519 if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle), FROM_HERE,
532 FROM_HERE, 520 "Unsynced handles should be empty", trans))
533 "Unsynced handles should be empty",
534 trans))
535 return false; 521 return false;
536 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle), 522 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
537 FROM_HERE, 523 FROM_HERE, "Unapplied metahandles should be empty", trans))
538 "Unapplied metahandles should be empty",
539 trans))
540 return false; 524 return false;
541 } 525 }
542 526
543 return safe; 527 return safe;
544 } 528 }
545 529
546 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) { 530 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
547 ReadTransaction trans(FROM_HERE, this); 531 ReadTransaction trans(FROM_HERE, this);
548 ScopedKernelLock lock(this); 532 ScopedKernelLock lock(this);
549 533
(...skipping 23 matching lines...) Expand all
573 // Set purged handles. 557 // Set purged handles.
574 DCHECK(snapshot->metahandles_to_purge.empty()); 558 DCHECK(snapshot->metahandles_to_purge.empty());
575 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge); 559 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
576 560
577 // Fill kernel_info_status and kernel_info. 561 // Fill kernel_info_status and kernel_info.
578 snapshot->kernel_info = kernel_->persisted_info; 562 snapshot->kernel_info = kernel_->persisted_info;
579 snapshot->kernel_info_status = kernel_->info_status; 563 snapshot->kernel_info_status = kernel_->info_status;
580 // This one we reset on failure. 564 // This one we reset on failure.
581 kernel_->info_status = KERNEL_SHARE_INFO_VALID; 565 kernel_->info_status = KERNEL_SHARE_INFO_VALID;
582 566
583 delete_journal_->TakeSnapshotAndClear( 567 delete_journal_->TakeSnapshotAndClear(&trans, &snapshot->delete_journals,
584 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge); 568 &snapshot->delete_journals_to_purge);
585 } 569 }
586 570
587 bool Directory::SaveChanges() { 571 bool Directory::SaveChanges() {
588 bool success = false; 572 bool success = false;
589 573
590 base::AutoLock scoped_lock(kernel_->save_changes_mutex); 574 base::AutoLock scoped_lock(kernel_->save_changes_mutex);
591 575
592 // Snapshot and save. 576 // Snapshot and save.
593 SaveChangesSnapshot snapshot; 577 SaveChangesSnapshot snapshot;
594 TakeSnapshotForSaveChanges(&snapshot); 578 TakeSnapshotForSaveChanges(&snapshot);
(...skipping 12 matching lines...) Expand all
607 return true; 591 return true;
608 592
609 // Need a write transaction as we are about to permanently purge entries. 593 // Need a write transaction as we are about to permanently purge entries.
610 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); 594 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
611 ScopedKernelLock lock(this); 595 ScopedKernelLock lock(this);
612 // Now drop everything we can out of memory. 596 // Now drop everything we can out of memory.
613 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); 597 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
614 i != snapshot.dirty_metas.end(); ++i) { 598 i != snapshot.dirty_metas.end(); ++i) {
615 MetahandlesMap::iterator found = 599 MetahandlesMap::iterator found =
616 kernel_->metahandles_map.find((*i)->ref(META_HANDLE)); 600 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
617 EntryKernel* entry = (found == kernel_->metahandles_map.end() ? 601 EntryKernel* entry =
618 NULL : found->second); 602 (found == kernel_->metahandles_map.end() ? NULL : found->second);
619 if (entry && SafeToPurgeFromMemory(&trans, entry)) { 603 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
620 // We now drop deleted metahandles that are up to date on both the client 604 // We now drop deleted metahandles that are up to date on both the client
621 // and the server. 605 // and the server.
622 size_t num_erased = 0; 606 size_t num_erased = 0;
623 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); 607 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
624 DCHECK_EQ(1u, num_erased); 608 DCHECK_EQ(1u, num_erased);
625 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); 609 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
626 DCHECK_EQ(1u, num_erased); 610 DCHECK_EQ(1u, num_erased);
627 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { 611 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
628 num_erased = 612 num_erased =
629 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG)); 613 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
630 DCHECK_EQ(1u, num_erased); 614 DCHECK_EQ(1u, num_erased);
631 } 615 }
632 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { 616 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
633 num_erased = 617 num_erased =
634 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG)); 618 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
635 DCHECK_EQ(1u, num_erased); 619 DCHECK_EQ(1u, num_erased);
636 } 620 }
637 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), 621 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry), FROM_HERE,
638 FROM_HERE, 622 "Deleted entry still present", (&trans)))
639 "Deleted entry still present",
640 (&trans)))
641 return false; 623 return false;
642 RemoveFromAttachmentIndex( 624 RemoveFromAttachmentIndex(lock, entry->ref(META_HANDLE),
643 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA)); 625 entry->ref(ATTACHMENT_METADATA));
644 626
645 delete entry; 627 delete entry;
646 } 628 }
647 if (trans.unrecoverable_error_set()) 629 if (trans.unrecoverable_error_set())
648 return false; 630 return false;
649 } 631 }
650 return true; 632 return true;
651 } 633 }
652 634
653 void Directory::UnapplyEntry(EntryKernel* entry) { 635 void Directory::UnapplyEntry(EntryKernel* entry) {
654 int64_t handle = entry->ref(META_HANDLE); 636 int64_t handle = entry->ref(META_HANDLE);
655 ModelType server_type = GetModelTypeFromSpecifics( 637 ModelType server_type =
656 entry->ref(SERVER_SPECIFICS)); 638 GetModelTypeFromSpecifics(entry->ref(SERVER_SPECIFICS));
657 639
658 // Clear enough so that on the next sync cycle all local data will 640 // Clear enough so that on the next sync cycle all local data will
659 // be overwritten. 641 // be overwritten.
660 // Note: do not modify the root node in order to preserve the 642 // Note: do not modify the root node in order to preserve the
661 // initial sync ended bit for this type (else on the next restart 643 // initial sync ended bit for this type (else on the next restart
662 // this type will be treated as disabled and therefore fully purged). 644 // this type will be treated as disabled and therefore fully purged).
663 if (entry->ref(PARENT_ID).IsRoot()) { 645 if (entry->ref(PARENT_ID).IsRoot()) {
664 ModelType root_type = server_type; 646 ModelType root_type = server_type;
665 // Support both server created and client created type root folders. 647 // Support both server created and client created type root folders.
666 if (!IsRealDataType(root_type)) { 648 if (!IsRealDataType(root_type)) {
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
704 // deleted items, and purged on the next snapshot. All other items will match 686 // deleted items, and purged on the next snapshot. All other items will match
705 // the state they would have had if they were just created via a server 687 // the state they would have had if they were just created via a server
706 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..). 688 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
707 } 689 }
708 690
709 void Directory::DeleteEntry(const ScopedKernelLock& lock, 691 void Directory::DeleteEntry(const ScopedKernelLock& lock,
710 bool save_to_journal, 692 bool save_to_journal,
711 EntryKernel* entry, 693 EntryKernel* entry,
712 EntryKernelSet* entries_to_journal) { 694 EntryKernelSet* entries_to_journal) {
713 int64_t handle = entry->ref(META_HANDLE); 695 int64_t handle = entry->ref(META_HANDLE);
714 ModelType server_type = GetModelTypeFromSpecifics( 696 ModelType server_type =
715 entry->ref(SERVER_SPECIFICS)); 697 GetModelTypeFromSpecifics(entry->ref(SERVER_SPECIFICS));
716 698
717 kernel_->metahandles_to_purge.insert(handle); 699 kernel_->metahandles_to_purge.insert(handle);
718 700
719 size_t num_erased = 0; 701 size_t num_erased = 0;
720 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE)); 702 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
721 DCHECK_EQ(1u, num_erased); 703 DCHECK_EQ(1u, num_erased);
722 num_erased = kernel_->ids_map.erase(entry->ref(ID).value()); 704 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
723 DCHECK_EQ(1u, num_erased); 705 DCHECK_EQ(1u, num_erased);
724 num_erased = kernel_->unsynced_metahandles.erase(handle); 706 num_erased = kernel_->unsynced_metahandles.erase(handle);
725 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); 707 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
726 num_erased = 708 num_erased = kernel_->unapplied_update_metahandles[server_type].erase(handle);
727 kernel_->unapplied_update_metahandles[server_type].erase(handle);
728 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); 709 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
729 if (kernel_->parent_child_index.Contains(entry)) 710 if (kernel_->parent_child_index.Contains(entry))
730 kernel_->parent_child_index.Remove(entry); 711 kernel_->parent_child_index.Remove(entry);
731 712
732 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) { 713 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
733 num_erased = 714 num_erased = kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
734 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
735 DCHECK_EQ(1u, num_erased); 715 DCHECK_EQ(1u, num_erased);
736 } 716 }
737 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) { 717 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
738 num_erased = 718 num_erased = kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
739 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
740 DCHECK_EQ(1u, num_erased); 719 DCHECK_EQ(1u, num_erased);
741 } 720 }
742 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA)); 721 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA));
743 722
744 if (save_to_journal) { 723 if (save_to_journal) {
745 entries_to_journal->insert(entry); 724 entries_to_journal->insert(entry);
746 } else { 725 } else {
747 delete entry; 726 delete entry;
748 } 727 }
749 } 728 }
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
802 (delete_journal_->IsDeleteJournalEnabled(local_type) || 781 (delete_journal_->IsDeleteJournalEnabled(local_type) ||
803 delete_journal_->IsDeleteJournalEnabled(server_type)); 782 delete_journal_->IsDeleteJournalEnabled(server_type));
804 DeleteEntry(lock, save_to_journal, entry, &entries_to_journal); 783 DeleteEntry(lock, save_to_journal, entry, &entries_to_journal);
805 } 784 }
806 } 785 }
807 } 786 }
808 787
809 delete_journal_->AddJournalBatch(&trans, entries_to_journal); 788 delete_journal_->AddJournalBatch(&trans, entries_to_journal);
810 789
811 // Ensure meta tracking for these data types reflects the purged state. 790 // Ensure meta tracking for these data types reflects the purged state.
812 for (ModelTypeSet::Iterator it = disabled_types.First(); 791 for (ModelTypeSet::Iterator it = disabled_types.First(); it.Good();
813 it.Good(); it.Inc()) { 792 it.Inc()) {
814 kernel_->persisted_info.transaction_version[it.Get()] = 0; 793 kernel_->persisted_info.transaction_version[it.Get()] = 0;
815 794
816 // Don't discard progress markers or context for unapplied types. 795 // Don't discard progress markers or context for unapplied types.
817 if (!types_to_unapply.Has(it.Get())) { 796 if (!types_to_unapply.Has(it.Get())) {
818 kernel_->persisted_info.ResetDownloadProgress(it.Get()); 797 kernel_->persisted_info.ResetDownloadProgress(it.Get());
819 kernel_->persisted_info.datatype_context[it.Get()].Clear(); 798 kernel_->persisted_info.datatype_context[it.Get()].Clear();
820 } 799 }
821 } 800 }
822 801
823 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; 802 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
900 } 879 }
901 880
902 void Directory::GetDownloadProgress( 881 void Directory::GetDownloadProgress(
903 ModelType model_type, 882 ModelType model_type,
904 sync_pb::DataTypeProgressMarker* value_out) const { 883 sync_pb::DataTypeProgressMarker* value_out) const {
905 ScopedKernelLock lock(this); 884 ScopedKernelLock lock(this);
906 return value_out->CopyFrom( 885 return value_out->CopyFrom(
907 kernel_->persisted_info.download_progress[model_type]); 886 kernel_->persisted_info.download_progress[model_type]);
908 } 887 }
909 888
910 void Directory::GetDownloadProgressAsString( 889 void Directory::GetDownloadProgressAsString(ModelType model_type,
911 ModelType model_type, 890 std::string* value_out) const {
912 std::string* value_out) const {
913 ScopedKernelLock lock(this); 891 ScopedKernelLock lock(this);
914 kernel_->persisted_info.download_progress[model_type].SerializeToString( 892 kernel_->persisted_info.download_progress[model_type].SerializeToString(
915 value_out); 893 value_out);
916 } 894 }
917 895
918 size_t Directory::GetEntriesCount() const { 896 size_t Directory::GetEntriesCount() const {
919 ScopedKernelLock lock(this); 897 ScopedKernelLock lock(this);
920 return kernel_->metahandles_map.size(); 898 return kernel_->metahandles_map.size();
921 } 899 }
922 900
(...skipping 21 matching lines...) Expand all
944 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; 922 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
945 } 923 }
946 924
947 void Directory::GetDataTypeContext(BaseTransaction* trans, 925 void Directory::GetDataTypeContext(BaseTransaction* trans,
948 ModelType type, 926 ModelType type,
949 sync_pb::DataTypeContext* context) const { 927 sync_pb::DataTypeContext* context) const {
950 ScopedKernelLock lock(this); 928 ScopedKernelLock lock(this);
951 context->CopyFrom(kernel_->persisted_info.datatype_context[type]); 929 context->CopyFrom(kernel_->persisted_info.datatype_context[type]);
952 } 930 }
953 931
954 void Directory::SetDataTypeContext( 932 void Directory::SetDataTypeContext(BaseWriteTransaction* trans,
955 BaseWriteTransaction* trans, 933 ModelType type,
956 ModelType type, 934 const sync_pb::DataTypeContext& context) {
957 const sync_pb::DataTypeContext& context) {
958 ScopedKernelLock lock(this); 935 ScopedKernelLock lock(this);
959 kernel_->persisted_info.datatype_context[type].CopyFrom(context); 936 kernel_->persisted_info.datatype_context[type].CopyFrom(context);
960 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; 937 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
961 } 938 }
962 939
963 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders. 940 // TODO(stanisc): crbug.com/438313: change these to not rely on the folders.
964 ModelTypeSet Directory::InitialSyncEndedTypes() { 941 ModelTypeSet Directory::InitialSyncEndedTypes() {
965 syncable::ReadTransaction trans(FROM_HERE, this); 942 syncable::ReadTransaction trans(FROM_HERE, this);
966 ModelTypeSet protocol_types = ProtocolTypes(); 943 ModelTypeSet protocol_types = ProtocolTypes();
967 ModelTypeSet initial_sync_ended_types; 944 ModelTypeSet initial_sync_ended_types;
968 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) { 945 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
969 if (InitialSyncEndedForType(&trans, i.Get())) { 946 if (InitialSyncEndedForType(&trans, i.Get())) {
970 initial_sync_ended_types.Put(i.Get()); 947 initial_sync_ended_types.Put(i.Get());
971 } 948 }
972 } 949 }
973 return initial_sync_ended_types; 950 return initial_sync_ended_types;
974 } 951 }
975 952
976 bool Directory::InitialSyncEndedForType(ModelType type) { 953 bool Directory::InitialSyncEndedForType(ModelType type) {
977 syncable::ReadTransaction trans(FROM_HERE, this); 954 syncable::ReadTransaction trans(FROM_HERE, this);
978 return InitialSyncEndedForType(&trans, type); 955 return InitialSyncEndedForType(&trans, type);
979 } 956 }
980 957
981 bool Directory::InitialSyncEndedForType( 958 bool Directory::InitialSyncEndedForType(BaseTransaction* trans,
982 BaseTransaction* trans, ModelType type) { 959 ModelType type) {
983 // True iff the type's root node has been created and changes 960 // True iff the type's root node has been created and changes
984 // for the type have been applied at least once. 961 // for the type have been applied at least once.
985 Entry root(trans, GET_TYPE_ROOT, type); 962 Entry root(trans, GET_TYPE_ROOT, type);
986 return root.good() && root.GetBaseVersion() != CHANGES_VERSION; 963 return root.good() && root.GetBaseVersion() != CHANGES_VERSION;
987 } 964 }
988 965
989 void Directory::MarkInitialSyncEndedForType(BaseWriteTransaction* trans, 966 void Directory::MarkInitialSyncEndedForType(BaseWriteTransaction* trans,
990 ModelType type) { 967 ModelType type) {
991 // If the root folder is downloaded for the server, the root's base version 968 // If the root folder is downloaded for the server, the root's base version
992 // get updated automatically at the end of update cycle when the update gets 969 // get updated automatically at the end of update cycle when the update gets
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1026 } 1003 }
1027 1004
1028 void Directory::set_bag_of_chips(const string& bag_of_chips) { 1005 void Directory::set_bag_of_chips(const string& bag_of_chips) {
1029 ScopedKernelLock lock(this); 1006 ScopedKernelLock lock(this);
1030 if (kernel_->persisted_info.bag_of_chips == bag_of_chips) 1007 if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
1031 return; 1008 return;
1032 kernel_->persisted_info.bag_of_chips = bag_of_chips; 1009 kernel_->persisted_info.bag_of_chips = bag_of_chips;
1033 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; 1010 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1034 } 1011 }
1035 1012
1036
1037 string Directory::cache_guid() const { 1013 string Directory::cache_guid() const {
1038 // No need to lock since nothing ever writes to it after load. 1014 // No need to lock since nothing ever writes to it after load.
1039 return kernel_->cache_guid; 1015 return kernel_->cache_guid;
1040 } 1016 }
1041 1017
1042 NigoriHandler* Directory::GetNigoriHandler() { 1018 NigoriHandler* Directory::GetNigoriHandler() {
1043 return nigori_handler_; 1019 return nigori_handler_;
1044 } 1020 }
1045 1021
1046 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) { 1022 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
1188 1164
1189 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) { 1165 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
1190 MetahandleSet handles; 1166 MetahandleSet handles;
1191 GetAllMetaHandles(trans, &handles); 1167 GetAllMetaHandles(trans, &handles);
1192 return CheckTreeInvariants(trans, handles); 1168 return CheckTreeInvariants(trans, handles);
1193 } 1169 }
1194 1170
1195 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, 1171 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1196 const MetahandleSet& handles) { 1172 const MetahandleSet& handles) {
1197 MetahandleSet::const_iterator i; 1173 MetahandleSet::const_iterator i;
1198 for (i = handles.begin() ; i != handles.end() ; ++i) { 1174 for (i = handles.begin(); i != handles.end(); ++i) {
1199 int64_t metahandle = *i; 1175 int64_t metahandle = *i;
1200 Entry e(trans, GET_BY_HANDLE, metahandle); 1176 Entry e(trans, GET_BY_HANDLE, metahandle);
1201 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans)) 1177 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
1202 return false; 1178 return false;
1203 syncable::Id id = e.GetId(); 1179 syncable::Id id = e.GetId();
1204 syncable::Id parentid = e.GetParentId(); 1180 syncable::Id parentid = e.GetParentId();
1205 1181
1206 if (id.IsRoot()) { 1182 if (id.IsRoot()) {
1207 if (!SyncAssert(e.GetIsDir(), FROM_HERE, 1183 if (!SyncAssert(e.GetIsDir(), FROM_HERE, "Entry should be a directory",
1208 "Entry should be a directory",
1209 trans)) 1184 trans))
1210 return false; 1185 return false;
1211 if (!SyncAssert(parentid.IsRoot(), FROM_HERE, 1186 if (!SyncAssert(parentid.IsRoot(), FROM_HERE, "Entry should be root",
1212 "Entry should be root",
1213 trans)) 1187 trans))
1214 return false; 1188 return false;
1215 if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE, "Entry should be synced", 1189 if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE, "Entry should be synced",
1216 trans)) 1190 trans))
1217 return false; 1191 return false;
1218 continue; 1192 continue;
1219 } 1193 }
1220 1194
1221 if (!e.GetIsDel()) { 1195 if (!e.GetIsDel()) {
1222 if (!SyncAssert(id != parentid, FROM_HERE, 1196 if (!SyncAssert(id != parentid, FROM_HERE,
1223 "Id should be different from parent id.", 1197 "Id should be different from parent id.", trans))
1224 trans)) 1198 return false;
1225 return false;
1226 if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE, 1199 if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
1227 "Non unique name should not be empty.", 1200 "Non unique name should not be empty.", trans))
1228 trans))
1229 return false; 1201 return false;
1230 1202
1231 if (!parentid.IsNull()) { 1203 if (!parentid.IsNull()) {
1232 int safety_count = handles.size() + 1; 1204 int safety_count = handles.size() + 1;
1233 while (!parentid.IsRoot()) { 1205 while (!parentid.IsRoot()) {
1234 Entry parent(trans, GET_BY_ID, parentid); 1206 Entry parent(trans, GET_BY_ID, parentid);
1235 if (!SyncAssert(parent.good(), FROM_HERE, 1207 if (!SyncAssert(parent.good(), FROM_HERE,
1236 "Parent entry is not valid.", trans)) 1208 "Parent entry is not valid.", trans))
1237 return false; 1209 return false;
1238 if (handles.end() == handles.find(parent.GetMetahandle())) 1210 if (handles.end() == handles.find(parent.GetMetahandle()))
(...skipping 14 matching lines...) Expand all
1253 return false; 1225 return false;
1254 } 1226 }
1255 } 1227 }
1256 } 1228 }
1257 int64_t base_version = e.GetBaseVersion(); 1229 int64_t base_version = e.GetBaseVersion();
1258 int64_t server_version = e.GetServerVersion(); 1230 int64_t server_version = e.GetServerVersion();
1259 bool using_unique_client_tag = !e.GetUniqueClientTag().empty(); 1231 bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
1260 if (CHANGES_VERSION == base_version || 0 == base_version) { 1232 if (CHANGES_VERSION == base_version || 0 == base_version) {
1261 ModelType model_type = e.GetModelType(); 1233 ModelType model_type = e.GetModelType();
1262 bool is_client_creatable_type_root_folder = 1234 bool is_client_creatable_type_root_folder =
1263 parentid.IsRoot() && 1235 parentid.IsRoot() && IsTypeWithClientGeneratedRoot(model_type) &&
1264 IsTypeWithClientGeneratedRoot(model_type) &&
1265 e.GetUniqueServerTag() == ModelTypeToRootTag(model_type); 1236 e.GetUniqueServerTag() == ModelTypeToRootTag(model_type);
1266 if (e.GetIsUnappliedUpdate()) { 1237 if (e.GetIsUnappliedUpdate()) {
1267 // Must be a new item, or a de-duplicated unique client tag 1238 // Must be a new item, or a de-duplicated unique client tag
1268 // that was created both locally and remotely, or a type root folder 1239 // that was created both locally and remotely, or a type root folder
1269 // that was created both locally and remotely. 1240 // that was created both locally and remotely.
1270 if (!(using_unique_client_tag || 1241 if (!(using_unique_client_tag ||
1271 is_client_creatable_type_root_folder)) { 1242 is_client_creatable_type_root_folder)) {
1272 if (!SyncAssert(e.GetIsDel(), FROM_HERE, 1243 if (!SyncAssert(e.GetIsDel(), FROM_HERE,
1273 "The entry should have been deleted.", trans)) 1244 "The entry should have been deleted.", trans))
1274 return false; 1245 return false;
1275 } 1246 }
1276 // It came from the server, so it must have a server ID. 1247 // It came from the server, so it must have a server ID.
1277 if (!SyncAssert(id.ServerKnows(), FROM_HERE, 1248 if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1278 "The id should be from a server.", 1249 "The id should be from a server.", trans))
1279 trans))
1280 return false; 1250 return false;
1281 } else { 1251 } else {
1282 if (e.GetIsDir()) { 1252 if (e.GetIsDir()) {
1283 // TODO(chron): Implement this mode if clients ever need it. 1253 // TODO(chron): Implement this mode if clients ever need it.
1284 // For now, you can't combine a client tag and a directory. 1254 // For now, you can't combine a client tag and a directory.
1285 if (!SyncAssert(!using_unique_client_tag, FROM_HERE, 1255 if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1286 "Directory cannot have a client tag.", 1256 "Directory cannot have a client tag.", trans))
1287 trans))
1288 return false; 1257 return false;
1289 } 1258 }
1290 if (is_client_creatable_type_root_folder) { 1259 if (is_client_creatable_type_root_folder) {
1291 // This must be a locally created type root folder. 1260 // This must be a locally created type root folder.
1292 if (!SyncAssert( 1261 if (!SyncAssert(
1293 !e.GetIsUnsynced(), FROM_HERE, 1262 !e.GetIsUnsynced(), FROM_HERE,
1294 "Locally created type root folders should not be unsynced.", 1263 "Locally created type root folders should not be unsynced.",
1295 trans)) 1264 trans))
1296 return false; 1265 return false;
1297 1266
1298 if (!SyncAssert( 1267 if (!SyncAssert(
1299 !e.GetIsDel(), FROM_HERE, 1268 !e.GetIsDel(), FROM_HERE,
1300 "Locally created type root folders should not be deleted.", 1269 "Locally created type root folders should not be deleted.",
1301 trans)) 1270 trans))
1302 return false; 1271 return false;
1303 } else { 1272 } else {
1304 // Should be an uncomitted item, or a successfully deleted one. 1273 // Should be an uncomitted item, or a successfully deleted one.
1305 if (!e.GetIsDel()) { 1274 if (!e.GetIsDel()) {
1306 if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE, 1275 if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
1307 "The item should be unsynced.", trans)) 1276 "The item should be unsynced.", trans))
1308 return false; 1277 return false;
1309 } 1278 }
1310 } 1279 }
1311 // If the next check failed, it would imply that an item exists 1280 // If the next check failed, it would imply that an item exists
1312 // on the server, isn't waiting for application locally, but either 1281 // on the server, isn't waiting for application locally, but either
1313 // is an unsynced create or a successful delete in the local copy. 1282 // is an unsynced create or a successful delete in the local copy.
1314 // Either way, that's a mismatch. 1283 // Either way, that's a mismatch.
1315 if (!SyncAssert(0 == server_version, FROM_HERE, 1284 if (!SyncAssert(0 == server_version, FROM_HERE,
1316 "Server version should be zero.", 1285 "Server version should be zero.", trans))
1317 trans))
1318 return false; 1286 return false;
1319 // Items that aren't using the unique client tag should have a zero 1287 // Items that aren't using the unique client tag should have a zero
1320 // base version only if they have a local ID. Items with unique client 1288 // base version only if they have a local ID. Items with unique client
1321 // tags are allowed to use the zero base version for undeletion and 1289 // tags are allowed to use the zero base version for undeletion and
1322 // de-duplication; the unique client tag trumps the server ID. 1290 // de-duplication; the unique client tag trumps the server ID.
1323 if (!using_unique_client_tag) { 1291 if (!using_unique_client_tag) {
1324 if (!SyncAssert(!id.ServerKnows(), FROM_HERE, 1292 if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1325 "Should be a client only id.", 1293 "Should be a client only id.", trans))
1326 trans))
1327 return false; 1294 return false;
1328 } 1295 }
1329 } 1296 }
1330 } else { 1297 } else {
1331 if (!SyncAssert(id.ServerKnows(), 1298 if (!SyncAssert(id.ServerKnows(), FROM_HERE, "Should be a server id.",
1332 FROM_HERE,
1333 "Should be a server id.",
1334 trans)) 1299 trans))
1335 return false; 1300 return false;
1336 } 1301 }
1337 1302
1338 // Previously we would assert that locally deleted items that have never 1303 // Previously we would assert that locally deleted items that have never
1339 // been synced must not be sent to the server (IS_UNSYNCED must be false). 1304 // been synced must not be sent to the server (IS_UNSYNCED must be false).
1340 // This is not always true in the case that an item is deleted while the 1305 // This is not always true in the case that an item is deleted while the
1341 // initial commit is in flight. See crbug.com/426865. 1306 // initial commit is in flight. See crbug.com/426865.
1342 } 1307 }
1343 return true; 1308 return true;
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
1445 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION); 1410 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1446 1411
1447 UniquePosition pos; 1412 UniquePosition pos;
1448 if (!successor_pos.IsValid()) { 1413 if (!successor_pos.IsValid()) {
1449 // If all our successors are of non-positionable types, just create an 1414 // If all our successors are of non-positionable types, just create an
1450 // initial position. We arbitrarily choose to sort invalid positions to 1415 // initial position. We arbitrarily choose to sort invalid positions to
1451 // the right of the valid positions. 1416 // the right of the valid positions.
1452 // 1417 //
1453 // We really shouldn't need to support this. See TODO above. 1418 // We really shouldn't need to support this. See TODO above.
1454 pos = UniquePosition::InitialPosition(suffix); 1419 pos = UniquePosition::InitialPosition(suffix);
1455 } else { 1420 } else {
1456 DCHECK(!siblings->empty()); 1421 DCHECK(!siblings->empty());
1457 pos = UniquePosition::Before(successor_pos, suffix); 1422 pos = UniquePosition::Before(successor_pos, suffix);
1458 } 1423 }
1459 1424
1460 e->put(UNIQUE_POSITION, pos); 1425 e->put(UNIQUE_POSITION, pos);
1461 return; 1426 return;
1462 } 1427 }
1463 1428
1464 // We can't support placing an item after an invalid position. Fortunately, 1429 // We can't support placing an item after an invalid position. Fortunately,
1465 // the tests don't exercise this particular case. We should not support 1430 // the tests don't exercise this particular case. We should not support
1466 // siblings with invalid positions at all. See TODO above. 1431 // siblings with invalid positions at all. See TODO above.
1467 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid()); 1432 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1468 1433
1469 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor); 1434 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1470 DCHECK(neighbour != siblings->end()); 1435 DCHECK(neighbour != siblings->end());
1471 1436
1472 ++neighbour; 1437 ++neighbour;
1473 if (neighbour == siblings->end()) { 1438 if (neighbour == siblings->end()) {
1474 // Inserting at the end of the list. 1439 // Inserting at the end of the list.
1475 UniquePosition pos = UniquePosition::After( 1440 UniquePosition pos =
1476 predecessor->ref(UNIQUE_POSITION), 1441 UniquePosition::After(predecessor->ref(UNIQUE_POSITION), suffix);
1477 suffix);
1478 e->put(UNIQUE_POSITION, pos); 1442 e->put(UNIQUE_POSITION, pos);
1479 return; 1443 return;
1480 } 1444 }
1481 1445
1482 EntryKernel* successor = *neighbour; 1446 EntryKernel* successor = *neighbour;
1483 1447
1484 // Another mixed valid and invalid position case. This one could be supported 1448 // Another mixed valid and invalid position case. This one could be supported
1485 // in theory, but we're trying to deprecate support for siblings with and 1449 // in theory, but we're trying to deprecate support for siblings with and
1486 // without valid positions. See TODO above. 1450 // without valid positions. See TODO above.
1487 // Using a release CHECK here because the following UniquePosition::Between 1451 // Using a release CHECK here because the following UniquePosition::Between
1488 // call crashes anyway when the position string is empty (see crbug/332371). 1452 // call crashes anyway when the position string is empty (see crbug/332371).
1489 CHECK(successor->ref(UNIQUE_POSITION).IsValid()) << *successor; 1453 CHECK(successor->ref(UNIQUE_POSITION).IsValid()) << *successor;
1490 1454
1491 // Finally, the normal case: inserting between two elements. 1455 // Finally, the normal case: inserting between two elements.
1492 UniquePosition pos = UniquePosition::Between( 1456 UniquePosition pos =
1493 predecessor->ref(UNIQUE_POSITION), 1457 UniquePosition::Between(predecessor->ref(UNIQUE_POSITION),
1494 successor->ref(UNIQUE_POSITION), 1458 successor->ref(UNIQUE_POSITION), suffix);
1495 suffix);
1496 e->put(UNIQUE_POSITION, pos); 1459 e->put(UNIQUE_POSITION, pos);
1497 return; 1460 return;
1498 } 1461 }
1499 1462
1500 // TODO(rlarocque): Avoid this indirection. Just return the set. 1463 // TODO(rlarocque): Avoid this indirection. Just return the set.
1501 void Directory::AppendChildHandles(const ScopedKernelLock& lock, 1464 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1502 const Id& parent_id, 1465 const Id& parent_id,
1503 Directory::Metahandles* result) { 1466 Directory::Metahandles* result) {
1504 const OrderedChildSet* children = 1467 const OrderedChildSet* children =
1505 kernel_->parent_child_index.GetChildren(parent_id); 1468 kernel_->parent_child_index.GetChildren(parent_id);
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1579 Directory::Kernel* Directory::kernel() { 1542 Directory::Kernel* Directory::kernel() {
1580 return kernel_; 1543 return kernel_;
1581 } 1544 }
1582 1545
1583 const Directory::Kernel* Directory::kernel() const { 1546 const Directory::Kernel* Directory::kernel() const {
1584 return kernel_; 1547 return kernel_;
1585 } 1548 }
1586 1549
1587 } // namespace syncable 1550 } // namespace syncable
1588 } // namespace syncer 1551 } // namespace syncer
OLDNEW
« no previous file with comments | « components/sync/syncable/directory.h ('k') | components/sync/syncable/directory_backing_store.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698