OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "chrome/browser/sync/syncable/syncable.h" | |
6 | |
7 #include <algorithm> | |
8 #include <cstring> | |
9 #include <functional> | |
10 #include <iomanip> | |
11 #include <iterator> | |
12 #include <limits> | |
13 #include <set> | |
14 #include <string> | |
15 | |
16 #include "base/basictypes.h" | |
17 #include "base/debug/trace_event.h" | |
18 #include "base/compiler_specific.h" | |
19 #include "base/debug/trace_event.h" | |
20 #include "base/file_util.h" | |
21 #include "base/hash_tables.h" | |
22 #include "base/location.h" | |
23 #include "base/logging.h" | |
24 #include "base/memory/scoped_ptr.h" | |
25 #include "base/perftimer.h" | |
26 #include "base/stl_util.h" | |
27 #include "base/string_number_conversions.h" | |
28 #include "base/string_util.h" | |
29 #include "base/time.h" | |
30 #include "base/utf_string_conversions.h" | |
31 #include "base/values.h" | |
32 #include "chrome/browser/sync/protocol/proto_value_conversions.h" | |
33 #include "chrome/browser/sync/protocol/service_constants.h" | |
34 #include "chrome/browser/sync/syncable/directory_backing_store.h" | |
35 #include "chrome/browser/sync/syncable/directory_change_delegate.h" | |
36 #include "chrome/browser/sync/syncable/in_memory_directory_backing_store.h" | |
37 #include "chrome/browser/sync/syncable/model_type.h" | |
38 #include "chrome/browser/sync/syncable/on_disk_directory_backing_store.h" | |
39 #include "chrome/browser/sync/syncable/syncable-inl.h" | |
40 #include "chrome/browser/sync/syncable/syncable_changes_version.h" | |
41 #include "chrome/browser/sync/syncable/syncable_columns.h" | |
42 #include "chrome/browser/sync/syncable/syncable_enum_conversions.h" | |
43 #include "chrome/browser/sync/syncable/transaction_observer.h" | |
44 #include "chrome/browser/sync/util/logging.h" | |
45 #include "chrome/browser/sync/util/cryptographer.h" | |
46 #include "net/base/escape.h" | |
47 | |
48 namespace { | |
49 | |
50 enum InvariantCheckLevel { | |
51 OFF = 0, | |
52 VERIFY_IN_MEMORY = 1, | |
53 FULL_DB_VERIFICATION = 2 | |
54 }; | |
55 | |
56 const InvariantCheckLevel kInvariantCheckLevel = VERIFY_IN_MEMORY; | |
57 | |
58 // Max number of milliseconds to spend checking syncable entry invariants | |
59 const int kInvariantCheckMaxMs = 50; | |
60 | |
61 // This function checks to see if the given list of Metahandles has any nodes | |
62 // whose PREV_ID, PARENT_ID or NEXT_ID values refer to ID values that do not | |
63 // actually exist. Returns true on success. | |
64 // | |
65 // This function is "Unsafe" because it does not attempt to acquire any locks | |
66 // that may be protecting this list that gets passed in. The caller is | |
67 // responsible for ensuring that no one modifies this list while the function is | |
68 // running. | |
69 bool VerifyReferenceIntegrityUnsafe(const syncable::MetahandlesIndex &index) { | |
70 TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck"); | |
71 using namespace syncable; | |
72 typedef base::hash_set<std::string> IdsSet; | |
73 | |
74 IdsSet ids_set; | |
75 bool is_ok = true; | |
76 | |
77 for (MetahandlesIndex::const_iterator it = index.begin(); | |
78 it != index.end(); ++it) { | |
79 EntryKernel* entry = *it; | |
80 bool is_duplicate_id = !(ids_set.insert(entry->ref(ID).value()).second); | |
81 is_ok = is_ok && !is_duplicate_id; | |
82 } | |
83 | |
84 IdsSet::iterator end = ids_set.end(); | |
85 for (MetahandlesIndex::const_iterator it = index.begin(); | |
86 it != index.end(); ++it) { | |
87 EntryKernel* entry = *it; | |
88 bool prev_exists = (ids_set.find(entry->ref(PREV_ID).value()) != end); | |
89 bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end); | |
90 bool next_exists = (ids_set.find(entry->ref(NEXT_ID).value()) != end); | |
91 is_ok = is_ok && prev_exists && parent_exists && next_exists; | |
92 } | |
93 return is_ok; | |
94 } | |
95 | |
96 } // namespace | |
97 | |
98 using std::string; | |
99 using browser_sync::Encryptor; | |
100 using browser_sync::ReportUnrecoverableErrorFunction; | |
101 using browser_sync::UnrecoverableErrorHandler; | |
102 | |
103 namespace syncable { | |
104 | |
105 namespace { | |
106 | |
107 // Function to handle runtime failures on syncable code. Rather than crashing, | |
108 // if the |condition| is false the following will happen: | |
109 // 1. Sets unrecoverable error on transaction. | |
110 // 2. Returns false. | |
111 bool SyncAssert(bool condition, | |
112 const tracked_objects::Location& location, | |
113 const char* msg, | |
114 BaseTransaction* trans) { | |
115 if (!condition) { | |
116 trans->OnUnrecoverableError(location, msg); | |
117 return false; | |
118 } | |
119 return true; | |
120 } | |
121 | |
122 } // namespace | |
123 | |
124 #define ENUM_CASE(x) case x: return #x; break | |
125 | |
126 std::string WriterTagToString(WriterTag writer_tag) { | |
127 switch (writer_tag) { | |
128 ENUM_CASE(INVALID); | |
129 ENUM_CASE(SYNCER); | |
130 ENUM_CASE(AUTHWATCHER); | |
131 ENUM_CASE(UNITTEST); | |
132 ENUM_CASE(VACUUM_AFTER_SAVE); | |
133 ENUM_CASE(PURGE_ENTRIES); | |
134 ENUM_CASE(SYNCAPI); | |
135 }; | |
136 NOTREACHED(); | |
137 return ""; | |
138 } | |
139 | |
140 #undef ENUM_CASE | |
141 | |
142 WriteTransactionInfo::WriteTransactionInfo( | |
143 int64 id, | |
144 tracked_objects::Location location, | |
145 WriterTag writer, | |
146 ImmutableEntryKernelMutationMap mutations) | |
147 : id(id), | |
148 location_string(location.ToString()), | |
149 writer(writer), | |
150 mutations(mutations) {} | |
151 | |
152 WriteTransactionInfo::WriteTransactionInfo() | |
153 : id(-1), writer(INVALID) {} | |
154 | |
155 WriteTransactionInfo::~WriteTransactionInfo() {} | |
156 | |
157 base::DictionaryValue* WriteTransactionInfo::ToValue( | |
158 size_t max_mutations_size) const { | |
159 DictionaryValue* dict = new DictionaryValue(); | |
160 dict->SetString("id", base::Int64ToString(id)); | |
161 dict->SetString("location", location_string); | |
162 dict->SetString("writer", WriterTagToString(writer)); | |
163 Value* mutations_value = NULL; | |
164 const size_t mutations_size = mutations.Get().size(); | |
165 if (mutations_size <= max_mutations_size) { | |
166 mutations_value = EntryKernelMutationMapToValue(mutations.Get()); | |
167 } else { | |
168 mutations_value = | |
169 Value::CreateStringValue( | |
170 base::Uint64ToString(static_cast<uint64>(mutations_size)) + | |
171 " mutations"); | |
172 } | |
173 dict->Set("mutations", mutations_value); | |
174 return dict; | |
175 } | |
176 | |
177 DictionaryValue* EntryKernelMutationToValue( | |
178 const EntryKernelMutation& mutation) { | |
179 DictionaryValue* dict = new DictionaryValue(); | |
180 dict->Set("original", mutation.original.ToValue()); | |
181 dict->Set("mutated", mutation.mutated.ToValue()); | |
182 return dict; | |
183 } | |
184 | |
185 ListValue* EntryKernelMutationMapToValue( | |
186 const EntryKernelMutationMap& mutations) { | |
187 ListValue* list = new ListValue(); | |
188 for (EntryKernelMutationMap::const_iterator it = mutations.begin(); | |
189 it != mutations.end(); ++it) { | |
190 list->Append(EntryKernelMutationToValue(it->second)); | |
191 } | |
192 return list; | |
193 } | |
194 | |
195 namespace { | |
196 | |
197 // A ScopedIndexUpdater temporarily removes an entry from an index, | |
198 // and restores it to the index when the scope exits. This simplifies | |
199 // the common pattern where items need to be removed from an index | |
200 // before updating the field. | |
201 // | |
202 // This class is parameterized on the Indexer traits type, which | |
203 // must define a Comparator and a static bool ShouldInclude | |
204 // function for testing whether the item ought to be included | |
205 // in the index. | |
206 template<typename Indexer> | |
207 class ScopedIndexUpdater { | |
208 public: | |
209 ScopedIndexUpdater(const ScopedKernelLock& proof_of_lock, | |
210 EntryKernel* entry, | |
211 typename Index<Indexer>::Set* index) | |
212 : entry_(entry), | |
213 index_(index) { | |
214 // First call to ShouldInclude happens before the field is updated. | |
215 if (Indexer::ShouldInclude(entry_)) { | |
216 // TODO(lipalani): Replace this CHECK with |SyncAssert| by refactorting | |
217 // this class into a function. | |
218 CHECK(index_->erase(entry_)); | |
219 } | |
220 } | |
221 | |
222 ~ScopedIndexUpdater() { | |
223 // Second call to ShouldInclude happens after the field is updated. | |
224 if (Indexer::ShouldInclude(entry_)) { | |
225 // TODO(lipalani): Replace this CHECK with |SyncAssert| by refactorting | |
226 // this class into a function. | |
227 CHECK(index_->insert(entry_).second); | |
228 } | |
229 } | |
230 private: | |
231 // The entry that was temporarily removed from the index. | |
232 EntryKernel* entry_; | |
233 // The index which we are updating. | |
234 typename Index<Indexer>::Set* const index_; | |
235 }; | |
236 | |
237 // Helper function to add an item to the index, if it ought to be added. | |
238 template<typename Indexer> | |
239 void InitializeIndexEntry(EntryKernel* entry, | |
240 typename Index<Indexer>::Set* index) { | |
241 if (Indexer::ShouldInclude(entry)) { | |
242 index->insert(entry); | |
243 } | |
244 } | |
245 | |
246 } // namespace | |
247 | |
248 /////////////////////////////////////////////////////////////////////////// | |
249 // Comparator and filter functions for the indices. | |
250 | |
251 // static | |
252 bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) { | |
253 return !a->ref(UNIQUE_CLIENT_TAG).empty(); | |
254 } | |
255 | |
256 bool ParentIdAndHandleIndexer::Comparator::operator() ( | |
257 const syncable::EntryKernel* a, | |
258 const syncable::EntryKernel* b) const { | |
259 int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID)); | |
260 if (cmp != 0) | |
261 return cmp < 0; | |
262 | |
263 int64 a_position = a->ref(SERVER_POSITION_IN_PARENT); | |
264 int64 b_position = b->ref(SERVER_POSITION_IN_PARENT); | |
265 if (a_position != b_position) | |
266 return a_position < b_position; | |
267 | |
268 cmp = a->ref(ID).compare(b->ref(ID)); | |
269 return cmp < 0; | |
270 } | |
271 | |
272 // static | |
273 bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) { | |
274 // This index excludes deleted items and the root item. The root | |
275 // item is excluded so that it doesn't show up as a child of itself. | |
276 return !a->ref(IS_DEL) && !a->ref(ID).IsRoot(); | |
277 } | |
278 | |
279 /////////////////////////////////////////////////////////////////////////// | |
280 // EntryKernel | |
281 | |
282 EntryKernel::EntryKernel() : dirty_(false) { | |
283 // Everything else should already be default-initialized. | |
284 for (int i = INT64_FIELDS_BEGIN; i < INT64_FIELDS_END; ++i) { | |
285 int64_fields[i] = 0; | |
286 } | |
287 } | |
288 | |
289 EntryKernel::~EntryKernel() {} | |
290 | |
291 syncable::ModelType EntryKernel::GetServerModelType() const { | |
292 ModelType specifics_type = GetModelTypeFromSpecifics(ref(SERVER_SPECIFICS)); | |
293 if (specifics_type != UNSPECIFIED) | |
294 return specifics_type; | |
295 if (ref(ID).IsRoot()) | |
296 return TOP_LEVEL_FOLDER; | |
297 // Loose check for server-created top-level folders that aren't | |
298 // bound to a particular model type. | |
299 if (!ref(UNIQUE_SERVER_TAG).empty() && ref(SERVER_IS_DIR)) | |
300 return TOP_LEVEL_FOLDER; | |
301 | |
302 return UNSPECIFIED; | |
303 } | |
304 | |
305 bool EntryKernel::ContainsString(const std::string& lowercase_query) const { | |
306 // TODO(lipalani) - figure out what to do if the node is encrypted. | |
307 const sync_pb::EntitySpecifics& specifics = ref(SPECIFICS); | |
308 std::string temp; | |
309 // The protobuf serialized string contains the original strings. So | |
310 // we will just serialize it and search it. | |
311 specifics.SerializeToString(&temp); | |
312 | |
313 // Now convert to lower case. | |
314 StringToLowerASCII(&temp); | |
315 | |
316 if (temp.find(lowercase_query) != std::string::npos) | |
317 return true; | |
318 | |
319 // Now go through all the string fields to see if the value is there. | |
320 for (int i = STRING_FIELDS_BEGIN; i < STRING_FIELDS_END; ++i) { | |
321 if (StringToLowerASCII(ref(static_cast<StringField>(i))).find( | |
322 lowercase_query) != std::string::npos) | |
323 return true; | |
324 } | |
325 | |
326 for (int i = ID_FIELDS_BEGIN; i < ID_FIELDS_END; ++i) { | |
327 const Id& id = ref(static_cast<IdField>(i)); | |
328 if (id.ContainsStringCaseInsensitive(lowercase_query)) { | |
329 return true; | |
330 } | |
331 } | |
332 return false; | |
333 } | |
334 | |
335 namespace { | |
336 | |
337 // Utility function to loop through a set of enum values and add the | |
338 // field keys/values in the kernel to the given dictionary. | |
339 // | |
340 // V should be convertible to Value. | |
341 template <class T, class U, class V> | |
342 void SetFieldValues(const EntryKernel& kernel, | |
343 DictionaryValue* dictionary_value, | |
344 const char* (*enum_key_fn)(T), | |
345 V* (*enum_value_fn)(U), | |
346 int field_key_min, int field_key_max) { | |
347 DCHECK_LE(field_key_min, field_key_max); | |
348 for (int i = field_key_min; i <= field_key_max; ++i) { | |
349 T field = static_cast<T>(i); | |
350 const std::string& key = enum_key_fn(field); | |
351 V* value = enum_value_fn(kernel.ref(field)); | |
352 dictionary_value->Set(key, value); | |
353 } | |
354 } | |
355 | |
356 // Helper functions for SetFieldValues(). | |
357 | |
358 StringValue* Int64ToValue(int64 i) { | |
359 return Value::CreateStringValue(base::Int64ToString(i)); | |
360 } | |
361 | |
362 StringValue* TimeToValue(const base::Time& t) { | |
363 return Value::CreateStringValue(browser_sync::GetTimeDebugString(t)); | |
364 } | |
365 | |
366 StringValue* IdToValue(const Id& id) { | |
367 return id.ToValue(); | |
368 } | |
369 | |
370 } // namespace | |
371 | |
372 DictionaryValue* EntryKernel::ToValue() const { | |
373 DictionaryValue* kernel_info = new DictionaryValue(); | |
374 kernel_info->SetBoolean("isDirty", is_dirty()); | |
375 kernel_info->Set("serverModelType", ModelTypeToValue(GetServerModelType())); | |
376 | |
377 // Int64 fields. | |
378 SetFieldValues(*this, kernel_info, | |
379 &GetMetahandleFieldString, &Int64ToValue, | |
380 INT64_FIELDS_BEGIN, META_HANDLE); | |
381 SetFieldValues(*this, kernel_info, | |
382 &GetBaseVersionString, &Int64ToValue, | |
383 META_HANDLE + 1, BASE_VERSION); | |
384 SetFieldValues(*this, kernel_info, | |
385 &GetInt64FieldString, &Int64ToValue, | |
386 BASE_VERSION + 1, INT64_FIELDS_END - 1); | |
387 | |
388 // Time fields. | |
389 SetFieldValues(*this, kernel_info, | |
390 &GetTimeFieldString, &TimeToValue, | |
391 TIME_FIELDS_BEGIN, TIME_FIELDS_END - 1); | |
392 | |
393 // ID fields. | |
394 SetFieldValues(*this, kernel_info, | |
395 &GetIdFieldString, &IdToValue, | |
396 ID_FIELDS_BEGIN, ID_FIELDS_END - 1); | |
397 | |
398 // Bit fields. | |
399 SetFieldValues(*this, kernel_info, | |
400 &GetIndexedBitFieldString, &Value::CreateBooleanValue, | |
401 BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1); | |
402 SetFieldValues(*this, kernel_info, | |
403 &GetIsDelFieldString, &Value::CreateBooleanValue, | |
404 INDEXED_BIT_FIELDS_END, IS_DEL); | |
405 SetFieldValues(*this, kernel_info, | |
406 &GetBitFieldString, &Value::CreateBooleanValue, | |
407 IS_DEL + 1, BIT_FIELDS_END - 1); | |
408 | |
409 // String fields. | |
410 { | |
411 // Pick out the function overload we want. | |
412 StringValue* (*string_to_value)(const std::string&) = | |
413 &Value::CreateStringValue; | |
414 SetFieldValues(*this, kernel_info, | |
415 &GetStringFieldString, string_to_value, | |
416 STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1); | |
417 } | |
418 | |
419 // Proto fields. | |
420 SetFieldValues(*this, kernel_info, | |
421 &GetProtoFieldString, &browser_sync::EntitySpecificsToValue, | |
422 PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1); | |
423 | |
424 // Bit temps. | |
425 SetFieldValues(*this, kernel_info, | |
426 &GetBitTempString, &Value::CreateBooleanValue, | |
427 BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1); | |
428 | |
429 return kernel_info; | |
430 } | |
431 | |
432 /////////////////////////////////////////////////////////////////////////// | |
433 // Directory | |
434 | |
435 // static | |
436 const FilePath::CharType Directory::kSyncDatabaseFilename[] = | |
437 FILE_PATH_LITERAL("SyncData.sqlite3"); | |
438 | |
439 void Directory::InitKernelForTest( | |
440 const std::string& name, | |
441 DirectoryChangeDelegate* delegate, | |
442 const browser_sync::WeakHandle<TransactionObserver>& | |
443 transaction_observer) { | |
444 DCHECK(!kernel_); | |
445 kernel_ = new Kernel(name, KernelLoadInfo(), delegate, transaction_observer); | |
446 } | |
447 | |
448 Directory::PersistedKernelInfo::PersistedKernelInfo() | |
449 : next_id(0) { | |
450 for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) { | |
451 reset_download_progress(ModelTypeFromInt(i)); | |
452 } | |
453 } | |
454 | |
455 Directory::PersistedKernelInfo::~PersistedKernelInfo() {} | |
456 | |
457 void Directory::PersistedKernelInfo::reset_download_progress( | |
458 ModelType model_type) { | |
459 download_progress[model_type].set_data_type_id( | |
460 GetSpecificsFieldNumberFromModelType(model_type)); | |
461 // An empty-string token indicates no prior knowledge. | |
462 download_progress[model_type].set_token(std::string()); | |
463 } | |
464 | |
465 Directory::SaveChangesSnapshot::SaveChangesSnapshot() | |
466 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) { | |
467 } | |
468 | |
469 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {} | |
470 | |
471 Directory::Kernel::Kernel( | |
472 const std::string& name, | |
473 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate, | |
474 const browser_sync::WeakHandle<TransactionObserver>& | |
475 transaction_observer) | |
476 : refcount(1), | |
477 next_write_transaction_id(0), | |
478 name(name), | |
479 metahandles_index(new Directory::MetahandlesIndex), | |
480 ids_index(new Directory::IdsIndex), | |
481 parent_id_child_index(new Directory::ParentIdChildIndex), | |
482 client_tag_index(new Directory::ClientTagIndex), | |
483 unsynced_metahandles(new MetahandleSet), | |
484 dirty_metahandles(new MetahandleSet), | |
485 metahandles_to_purge(new MetahandleSet), | |
486 info_status(Directory::KERNEL_SHARE_INFO_VALID), | |
487 persisted_info(info.kernel_info), | |
488 cache_guid(info.cache_guid), | |
489 next_metahandle(info.max_metahandle + 1), | |
490 delegate(delegate), | |
491 transaction_observer(transaction_observer) { | |
492 DCHECK(delegate); | |
493 DCHECK(transaction_observer.IsInitialized()); | |
494 } | |
495 | |
496 void Directory::Kernel::AddRef() { | |
497 base::subtle::NoBarrier_AtomicIncrement(&refcount, 1); | |
498 } | |
499 | |
500 void Directory::Kernel::Release() { | |
501 if (!base::subtle::NoBarrier_AtomicIncrement(&refcount, -1)) | |
502 delete this; | |
503 } | |
504 | |
505 Directory::Kernel::~Kernel() { | |
506 CHECK_EQ(0, refcount); | |
507 delete unsynced_metahandles; | |
508 delete dirty_metahandles; | |
509 delete metahandles_to_purge; | |
510 delete parent_id_child_index; | |
511 delete client_tag_index; | |
512 delete ids_index; | |
513 STLDeleteElements(metahandles_index); | |
514 delete metahandles_index; | |
515 } | |
516 | |
517 Directory::Directory( | |
518 Encryptor* encryptor, | |
519 UnrecoverableErrorHandler* unrecoverable_error_handler, | |
520 ReportUnrecoverableErrorFunction report_unrecoverable_error_function) | |
521 : cryptographer_(encryptor), | |
522 kernel_(NULL), | |
523 store_(NULL), | |
524 unrecoverable_error_handler_(unrecoverable_error_handler), | |
525 report_unrecoverable_error_function_( | |
526 report_unrecoverable_error_function), | |
527 unrecoverable_error_set_(false) { | |
528 } | |
529 | |
530 Directory::~Directory() { | |
531 Close(); | |
532 } | |
533 | |
534 DirOpenResult Directory::Open( | |
535 const FilePath& file_path, const string& name, | |
536 DirectoryChangeDelegate* delegate, | |
537 const browser_sync::WeakHandle<TransactionObserver>& | |
538 transaction_observer) { | |
539 TRACE_EVENT0("sync", "SyncDatabaseOpen"); | |
540 | |
541 FilePath db_path(file_path); | |
542 file_util::AbsolutePath(&db_path); | |
543 DirectoryBackingStore* store = new OnDiskDirectoryBackingStore(name, db_path); | |
544 | |
545 const DirOpenResult result = | |
546 OpenImpl(store, name, delegate, transaction_observer); | |
547 | |
548 if (OPENED != result) | |
549 Close(); | |
550 return result; | |
551 } | |
552 | |
553 DirOpenResult Directory::OpenInMemoryForTest( | |
554 const string& name, DirectoryChangeDelegate* delegate, | |
555 const browser_sync::WeakHandle<TransactionObserver>& | |
556 transaction_observer) { | |
557 | |
558 DirectoryBackingStore* store = new InMemoryDirectoryBackingStore(name); | |
559 | |
560 const DirOpenResult result = | |
561 OpenImpl(store, name, delegate, transaction_observer); | |
562 if (OPENED != result) | |
563 Close(); | |
564 return result; | |
565 } | |
566 | |
567 void Directory::InitializeIndices() { | |
568 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); | |
569 for (; it != kernel_->metahandles_index->end(); ++it) { | |
570 EntryKernel* entry = *it; | |
571 InitializeIndexEntry<ParentIdAndHandleIndexer>(entry, | |
572 kernel_->parent_id_child_index); | |
573 InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index); | |
574 InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index); | |
575 const int64 metahandle = entry->ref(META_HANDLE); | |
576 if (entry->ref(IS_UNSYNCED)) | |
577 kernel_->unsynced_metahandles->insert(metahandle); | |
578 if (entry->ref(IS_UNAPPLIED_UPDATE)) { | |
579 const ModelType type = entry->GetServerModelType(); | |
580 kernel_->unapplied_update_metahandles[type].insert(metahandle); | |
581 } | |
582 DCHECK(!entry->is_dirty()); | |
583 } | |
584 } | |
585 | |
586 DirOpenResult Directory::OpenImpl( | |
587 DirectoryBackingStore* store, | |
588 const string& name, | |
589 DirectoryChangeDelegate* delegate, | |
590 const browser_sync::WeakHandle<TransactionObserver>& | |
591 transaction_observer) { | |
592 DCHECK_EQ(static_cast<DirectoryBackingStore*>(NULL), store_); | |
593 store_ = store; | |
594 | |
595 KernelLoadInfo info; | |
596 // Temporary indices before kernel_ initialized in case Load fails. We 0(1) | |
597 // swap these later. | |
598 MetahandlesIndex metas_bucket; | |
599 DirOpenResult result = store_->Load(&metas_bucket, &info); | |
600 if (OPENED != result) | |
601 return result; | |
602 | |
603 if (!VerifyReferenceIntegrityUnsafe(metas_bucket)) | |
604 return FAILED_LOGICAL_CORRUPTION; | |
605 | |
606 kernel_ = new Kernel(name, info, delegate, transaction_observer); | |
607 kernel_->metahandles_index->swap(metas_bucket); | |
608 InitializeIndices(); | |
609 return OPENED; | |
610 } | |
611 | |
612 void Directory::Close() { | |
613 if (store_) | |
614 delete store_; | |
615 store_ = NULL; | |
616 if (kernel_) { | |
617 bool del = !base::subtle::NoBarrier_AtomicIncrement(&kernel_->refcount, -1); | |
618 DCHECK(del) << "Kernel should only have a single ref"; | |
619 if (del) | |
620 delete kernel_; | |
621 kernel_ = NULL; | |
622 } | |
623 } | |
624 | |
625 void Directory::OnUnrecoverableError(const BaseTransaction* trans, | |
626 const tracked_objects::Location& location, | |
627 const std::string & message) { | |
628 DCHECK(trans != NULL); | |
629 unrecoverable_error_set_ = true; | |
630 unrecoverable_error_handler_->OnUnrecoverableError(location, | |
631 message); | |
632 } | |
633 | |
634 | |
635 EntryKernel* Directory::GetEntryById(const Id& id) { | |
636 ScopedKernelLock lock(this); | |
637 return GetEntryById(id, &lock); | |
638 } | |
639 | |
640 EntryKernel* Directory::GetEntryById(const Id& id, | |
641 ScopedKernelLock* const lock) { | |
642 DCHECK(kernel_); | |
643 // Find it in the in memory ID index. | |
644 kernel_->needle.put(ID, id); | |
645 IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle); | |
646 if (id_found != kernel_->ids_index->end()) { | |
647 return *id_found; | |
648 } | |
649 return NULL; | |
650 } | |
651 | |
652 EntryKernel* Directory::GetEntryByClientTag(const string& tag) { | |
653 ScopedKernelLock lock(this); | |
654 DCHECK(kernel_); | |
655 // Find it in the ClientTagIndex. | |
656 kernel_->needle.put(UNIQUE_CLIENT_TAG, tag); | |
657 ClientTagIndex::iterator found = kernel_->client_tag_index->find( | |
658 &kernel_->needle); | |
659 if (found != kernel_->client_tag_index->end()) { | |
660 return *found; | |
661 } | |
662 return NULL; | |
663 } | |
664 | |
665 EntryKernel* Directory::GetEntryByServerTag(const string& tag) { | |
666 ScopedKernelLock lock(this); | |
667 DCHECK(kernel_); | |
668 // We don't currently keep a separate index for the tags. Since tags | |
669 // only exist for server created items that are the first items | |
670 // to be created in a store, they should have small metahandles. | |
671 // So, we just iterate over the items in sorted metahandle order, | |
672 // looking for a match. | |
673 MetahandlesIndex& set = *kernel_->metahandles_index; | |
674 for (MetahandlesIndex::iterator i = set.begin(); i != set.end(); ++i) { | |
675 if ((*i)->ref(UNIQUE_SERVER_TAG) == tag) { | |
676 return *i; | |
677 } | |
678 } | |
679 return NULL; | |
680 } | |
681 | |
682 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) { | |
683 ScopedKernelLock lock(this); | |
684 return GetEntryByHandle(metahandle, &lock); | |
685 } | |
686 | |
687 EntryKernel* Directory::GetEntryByHandle(int64 metahandle, | |
688 ScopedKernelLock* lock) { | |
689 // Look up in memory | |
690 kernel_->needle.put(META_HANDLE, metahandle); | |
691 MetahandlesIndex::iterator found = | |
692 kernel_->metahandles_index->find(&kernel_->needle); | |
693 if (found != kernel_->metahandles_index->end()) { | |
694 // Found it in memory. Easy. | |
695 return *found; | |
696 } | |
697 return NULL; | |
698 } | |
699 | |
700 bool Directory::GetChildHandlesById( | |
701 BaseTransaction* trans, const Id& parent_id, | |
702 Directory::ChildHandles* result) { | |
703 if (!SyncAssert(this == trans->directory(), FROM_HERE, | |
704 "Directories don't match", trans)) | |
705 return false; | |
706 result->clear(); | |
707 | |
708 ScopedKernelLock lock(this); | |
709 AppendChildHandles(lock, parent_id, result); | |
710 return true; | |
711 } | |
712 | |
713 bool Directory::GetChildHandlesByHandle( | |
714 BaseTransaction* trans, int64 handle, | |
715 Directory::ChildHandles* result) { | |
716 if (!SyncAssert(this == trans->directory(), FROM_HERE, | |
717 "Directories don't match", trans)) | |
718 return false; | |
719 | |
720 result->clear(); | |
721 | |
722 ScopedKernelLock lock(this); | |
723 EntryKernel* kernel = GetEntryByHandle(handle, &lock); | |
724 if (!kernel) | |
725 return true; | |
726 | |
727 AppendChildHandles(lock, kernel->ref(ID), result); | |
728 return true; | |
729 } | |
730 | |
731 EntryKernel* Directory::GetRootEntry() { | |
732 return GetEntryById(Id()); | |
733 } | |
734 | |
735 bool Directory::InsertEntry(WriteTransaction* trans, EntryKernel* entry) { | |
736 ScopedKernelLock lock(this); | |
737 return InsertEntry(trans, entry, &lock); | |
738 } | |
739 | |
740 bool Directory::InsertEntry(WriteTransaction* trans, | |
741 EntryKernel* entry, | |
742 ScopedKernelLock* lock) { | |
743 DCHECK(NULL != lock); | |
744 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans)) | |
745 return false; | |
746 | |
747 static const char error[] = "Entry already in memory index."; | |
748 if (!SyncAssert(kernel_->metahandles_index->insert(entry).second, | |
749 FROM_HERE, | |
750 error, | |
751 trans)) | |
752 return false; | |
753 | |
754 if (!entry->ref(IS_DEL)) { | |
755 if (!SyncAssert(kernel_->parent_id_child_index->insert(entry).second, | |
756 FROM_HERE, | |
757 error, | |
758 trans)) { | |
759 return false; | |
760 } | |
761 } | |
762 if (!SyncAssert(kernel_->ids_index->insert(entry).second, | |
763 FROM_HERE, | |
764 error, | |
765 trans)) | |
766 return false; | |
767 | |
768 // Should NEVER be created with a client tag. | |
769 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE, | |
770 "Client should be empty", trans)) | |
771 return false; | |
772 | |
773 return true; | |
774 } | |
775 | |
776 bool Directory::ReindexId(WriteTransaction* trans, | |
777 EntryKernel* const entry, | |
778 const Id& new_id) { | |
779 ScopedKernelLock lock(this); | |
780 if (NULL != GetEntryById(new_id, &lock)) | |
781 return false; | |
782 | |
783 { | |
784 // Update the indices that depend on the ID field. | |
785 ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index); | |
786 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry, | |
787 kernel_->parent_id_child_index); | |
788 entry->put(ID, new_id); | |
789 } | |
790 return true; | |
791 } | |
792 | |
793 bool Directory::ReindexParentId(WriteTransaction* trans, | |
794 EntryKernel* const entry, | |
795 const Id& new_parent_id) { | |
796 ScopedKernelLock lock(this); | |
797 | |
798 { | |
799 // Update the indices that depend on the PARENT_ID field. | |
800 ScopedIndexUpdater<ParentIdAndHandleIndexer> index_updater(lock, entry, | |
801 kernel_->parent_id_child_index); | |
802 entry->put(PARENT_ID, new_parent_id); | |
803 } | |
804 return true; | |
805 } | |
806 | |
807 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const { | |
808 DCHECK(trans != NULL); | |
809 return unrecoverable_error_set_; | |
810 } | |
811 | |
812 void Directory::ClearDirtyMetahandles() { | |
813 kernel_->transaction_mutex.AssertAcquired(); | |
814 kernel_->dirty_metahandles->clear(); | |
815 } | |
816 | |
817 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans, | |
818 const EntryKernel* const entry) const { | |
819 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() && | |
820 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) && | |
821 !entry->ref(IS_UNSYNCED); | |
822 | |
823 if (safe) { | |
824 int64 handle = entry->ref(META_HANDLE); | |
825 const ModelType type = entry->GetServerModelType(); | |
826 if (!SyncAssert(kernel_->dirty_metahandles->count(handle) == 0U, | |
827 FROM_HERE, | |
828 "Dirty metahandles should be empty", trans)) | |
829 return false; | |
830 // TODO(tim): Bug 49278. | |
831 if (!SyncAssert(!kernel_->unsynced_metahandles->count(handle), | |
832 FROM_HERE, | |
833 "Unsynced handles should be empty", | |
834 trans)) | |
835 return false; | |
836 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle), | |
837 FROM_HERE, | |
838 "Unapplied metahandles should be empty", | |
839 trans)) | |
840 return false; | |
841 } | |
842 | |
843 return safe; | |
844 } | |
845 | |
846 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) { | |
847 ReadTransaction trans(FROM_HERE, this); | |
848 ScopedKernelLock lock(this); | |
849 | |
850 // If there is an unrecoverable error then just bail out. | |
851 if (unrecoverable_error_set(&trans)) | |
852 return; | |
853 | |
854 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and | |
855 // clear dirty flags. | |
856 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin(); | |
857 i != kernel_->dirty_metahandles->end(); ++i) { | |
858 EntryKernel* entry = GetEntryByHandle(*i, &lock); | |
859 if (!entry) | |
860 continue; | |
861 // Skip over false positives; it happens relatively infrequently. | |
862 if (!entry->is_dirty()) | |
863 continue; | |
864 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry); | |
865 DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i)); | |
866 // We don't bother removing from the index here as we blow the entire thing | |
867 // in a moment, and it unnecessarily complicates iteration. | |
868 entry->clear_dirty(NULL); | |
869 } | |
870 ClearDirtyMetahandles(); | |
871 | |
872 // Set purged handles. | |
873 DCHECK(snapshot->metahandles_to_purge.empty()); | |
874 snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge)); | |
875 | |
876 // Fill kernel_info_status and kernel_info. | |
877 snapshot->kernel_info = kernel_->persisted_info; | |
878 // To avoid duplicates when the process crashes, we record the next_id to be | |
879 // greater magnitude than could possibly be reached before the next save | |
880 // changes. In other words, it's effectively impossible for the user to | |
881 // generate 65536 new bookmarks in 3 seconds. | |
882 snapshot->kernel_info.next_id -= 65536; | |
883 snapshot->kernel_info_status = kernel_->info_status; | |
884 // This one we reset on failure. | |
885 kernel_->info_status = KERNEL_SHARE_INFO_VALID; | |
886 } | |
887 | |
888 bool Directory::SaveChanges() { | |
889 bool success = false; | |
890 DCHECK(store_); | |
891 | |
892 base::AutoLock scoped_lock(kernel_->save_changes_mutex); | |
893 | |
894 // Snapshot and save. | |
895 SaveChangesSnapshot snapshot; | |
896 TakeSnapshotForSaveChanges(&snapshot); | |
897 success = store_->SaveChanges(snapshot); | |
898 | |
899 // Handle success or failure. | |
900 if (success) | |
901 success = VacuumAfterSaveChanges(snapshot); | |
902 else | |
903 HandleSaveChangesFailure(snapshot); | |
904 return success; | |
905 } | |
906 | |
907 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) { | |
908 if (snapshot.dirty_metas.empty()) | |
909 return true; | |
910 | |
911 // Need a write transaction as we are about to permanently purge entries. | |
912 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this); | |
913 ScopedKernelLock lock(this); | |
914 // Now drop everything we can out of memory. | |
915 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); | |
916 i != snapshot.dirty_metas.end(); ++i) { | |
917 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); | |
918 MetahandlesIndex::iterator found = | |
919 kernel_->metahandles_index->find(&kernel_->needle); | |
920 EntryKernel* entry = (found == kernel_->metahandles_index->end() ? | |
921 NULL : *found); | |
922 if (entry && SafeToPurgeFromMemory(&trans, entry)) { | |
923 // We now drop deleted metahandles that are up to date on both the client | |
924 // and the server. | |
925 size_t num_erased = 0; | |
926 num_erased = kernel_->ids_index->erase(entry); | |
927 DCHECK_EQ(1u, num_erased); | |
928 num_erased = kernel_->metahandles_index->erase(entry); | |
929 DCHECK_EQ(1u, num_erased); | |
930 | |
931 // Might not be in it | |
932 num_erased = kernel_->client_tag_index->erase(entry); | |
933 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); | |
934 if (!SyncAssert(!kernel_->parent_id_child_index->count(entry), | |
935 FROM_HERE, | |
936 "Deleted entry still present", | |
937 (&trans))) | |
938 return false; | |
939 delete entry; | |
940 } | |
941 if (trans.unrecoverable_error_set()) | |
942 return false; | |
943 } | |
944 return true; | |
945 } | |
946 | |
947 void Directory::PurgeEntriesWithTypeIn(ModelTypeSet types) { | |
948 if (types.Empty()) | |
949 return; | |
950 | |
951 { | |
952 WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this); | |
953 { | |
954 ScopedKernelLock lock(this); | |
955 MetahandlesIndex::iterator it = kernel_->metahandles_index->begin(); | |
956 while (it != kernel_->metahandles_index->end()) { | |
957 const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS); | |
958 const sync_pb::EntitySpecifics& server_specifics = | |
959 (*it)->ref(SERVER_SPECIFICS); | |
960 ModelType local_type = GetModelTypeFromSpecifics(local_specifics); | |
961 ModelType server_type = GetModelTypeFromSpecifics(server_specifics); | |
962 | |
963 // Note the dance around incrementing |it|, since we sometimes erase(). | |
964 if ((IsRealDataType(local_type) && types.Has(local_type)) || | |
965 (IsRealDataType(server_type) && types.Has(server_type))) { | |
966 if (!UnlinkEntryFromOrder(*it, NULL, &lock)) | |
967 return; | |
968 | |
969 int64 handle = (*it)->ref(META_HANDLE); | |
970 kernel_->metahandles_to_purge->insert(handle); | |
971 | |
972 size_t num_erased = 0; | |
973 EntryKernel* entry = *it; | |
974 num_erased = kernel_->ids_index->erase(entry); | |
975 DCHECK_EQ(1u, num_erased); | |
976 num_erased = kernel_->client_tag_index->erase(entry); | |
977 DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased); | |
978 num_erased = kernel_->unsynced_metahandles->erase(handle); | |
979 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0); | |
980 num_erased = | |
981 kernel_->unapplied_update_metahandles[server_type].erase(handle); | |
982 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0); | |
983 num_erased = kernel_->parent_id_child_index->erase(entry); | |
984 DCHECK_EQ(entry->ref(IS_DEL), !num_erased); | |
985 kernel_->metahandles_index->erase(it++); | |
986 delete entry; | |
987 } else { | |
988 ++it; | |
989 } | |
990 } | |
991 | |
992 // Ensure meta tracking for these data types reflects the deleted state. | |
993 for (syncable::ModelTypeSet::Iterator it = types.First(); | |
994 it.Good(); it.Inc()) { | |
995 set_initial_sync_ended_for_type_unsafe(it.Get(), false); | |
996 kernel_->persisted_info.reset_download_progress(it.Get()); | |
997 } | |
998 } | |
999 } | |
1000 } | |
1001 | |
1002 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) { | |
1003 ScopedKernelLock lock(this); | |
1004 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
1005 | |
1006 // Because we optimistically cleared the dirty bit on the real entries when | |
1007 // taking the snapshot, we must restore it on failure. Not doing this could | |
1008 // cause lost data, if no other changes are made to the in-memory entries | |
1009 // that would cause the dirty bit to get set again. Setting the bit ensures | |
1010 // that SaveChanges will at least try again later. | |
1011 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin(); | |
1012 i != snapshot.dirty_metas.end(); ++i) { | |
1013 kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE)); | |
1014 MetahandlesIndex::iterator found = | |
1015 kernel_->metahandles_index->find(&kernel_->needle); | |
1016 if (found != kernel_->metahandles_index->end()) { | |
1017 (*found)->mark_dirty(kernel_->dirty_metahandles); | |
1018 } | |
1019 } | |
1020 | |
1021 kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(), | |
1022 snapshot.metahandles_to_purge.end()); | |
1023 } | |
1024 | |
1025 void Directory::GetDownloadProgress( | |
1026 ModelType model_type, | |
1027 sync_pb::DataTypeProgressMarker* value_out) const { | |
1028 ScopedKernelLock lock(this); | |
1029 return value_out->CopyFrom( | |
1030 kernel_->persisted_info.download_progress[model_type]); | |
1031 } | |
1032 | |
1033 void Directory::GetDownloadProgressAsString( | |
1034 ModelType model_type, | |
1035 std::string* value_out) const { | |
1036 ScopedKernelLock lock(this); | |
1037 kernel_->persisted_info.download_progress[model_type].SerializeToString( | |
1038 value_out); | |
1039 } | |
1040 | |
1041 size_t Directory::GetEntriesCount() const { | |
1042 ScopedKernelLock lock(this); | |
1043 return kernel_->metahandles_index ? kernel_->metahandles_index->size() : 0; | |
1044 } | |
1045 | |
1046 void Directory::SetDownloadProgress( | |
1047 ModelType model_type, | |
1048 const sync_pb::DataTypeProgressMarker& new_progress) { | |
1049 ScopedKernelLock lock(this); | |
1050 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress); | |
1051 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
1052 } | |
1053 | |
1054 bool Directory::initial_sync_ended_for_type(ModelType type) const { | |
1055 ScopedKernelLock lock(this); | |
1056 return kernel_->persisted_info.initial_sync_ended.Has(type); | |
1057 } | |
1058 | |
1059 template <class T> void Directory::TestAndSet( | |
1060 T* kernel_data, const T* data_to_set) { | |
1061 if (*kernel_data != *data_to_set) { | |
1062 *kernel_data = *data_to_set; | |
1063 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
1064 } | |
1065 } | |
1066 | |
1067 void Directory::set_initial_sync_ended_for_type(ModelType type, bool x) { | |
1068 ScopedKernelLock lock(this); | |
1069 set_initial_sync_ended_for_type_unsafe(type, x); | |
1070 } | |
1071 | |
1072 void Directory::set_initial_sync_ended_for_type_unsafe(ModelType type, | |
1073 bool x) { | |
1074 if (kernel_->persisted_info.initial_sync_ended.Has(type) == x) | |
1075 return; | |
1076 if (x) { | |
1077 kernel_->persisted_info.initial_sync_ended.Put(type); | |
1078 } else { | |
1079 kernel_->persisted_info.initial_sync_ended.Remove(type); | |
1080 } | |
1081 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
1082 } | |
1083 | |
1084 void Directory::SetNotificationStateUnsafe( | |
1085 const std::string& notification_state) { | |
1086 if (notification_state == kernel_->persisted_info.notification_state) | |
1087 return; | |
1088 kernel_->persisted_info.notification_state = notification_state; | |
1089 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
1090 } | |
1091 | |
1092 string Directory::store_birthday() const { | |
1093 ScopedKernelLock lock(this); | |
1094 return kernel_->persisted_info.store_birthday; | |
1095 } | |
1096 | |
1097 void Directory::set_store_birthday(const string& store_birthday) { | |
1098 ScopedKernelLock lock(this); | |
1099 if (kernel_->persisted_info.store_birthday == store_birthday) | |
1100 return; | |
1101 kernel_->persisted_info.store_birthday = store_birthday; | |
1102 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
1103 } | |
1104 | |
1105 std::string Directory::GetNotificationState() const { | |
1106 ScopedKernelLock lock(this); | |
1107 std::string notification_state = kernel_->persisted_info.notification_state; | |
1108 return notification_state; | |
1109 } | |
1110 | |
1111 void Directory::SetNotificationState(const std::string& notification_state) { | |
1112 ScopedKernelLock lock(this); | |
1113 SetNotificationStateUnsafe(notification_state); | |
1114 } | |
1115 | |
1116 string Directory::cache_guid() const { | |
1117 // No need to lock since nothing ever writes to it after load. | |
1118 return kernel_->cache_guid; | |
1119 } | |
1120 | |
1121 browser_sync::Cryptographer* Directory::GetCryptographer( | |
1122 const BaseTransaction* trans) { | |
1123 DCHECK_EQ(this, trans->directory()); | |
1124 return &cryptographer_; | |
1125 } | |
1126 | |
1127 void Directory::GetAllMetaHandles(BaseTransaction* trans, | |
1128 MetahandleSet* result) { | |
1129 result->clear(); | |
1130 ScopedKernelLock lock(this); | |
1131 MetahandlesIndex::iterator i; | |
1132 for (i = kernel_->metahandles_index->begin(); | |
1133 i != kernel_->metahandles_index->end(); | |
1134 ++i) { | |
1135 result->insert((*i)->ref(META_HANDLE)); | |
1136 } | |
1137 } | |
1138 | |
1139 void Directory::GetAllEntryKernels(BaseTransaction* trans, | |
1140 std::vector<const EntryKernel*>* result) { | |
1141 result->clear(); | |
1142 ScopedKernelLock lock(this); | |
1143 result->insert(result->end(), | |
1144 kernel_->metahandles_index->begin(), | |
1145 kernel_->metahandles_index->end()); | |
1146 } | |
1147 | |
1148 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans, | |
1149 UnsyncedMetaHandles* result) { | |
1150 result->clear(); | |
1151 ScopedKernelLock lock(this); | |
1152 copy(kernel_->unsynced_metahandles->begin(), | |
1153 kernel_->unsynced_metahandles->end(), back_inserter(*result)); | |
1154 } | |
1155 | |
1156 int64 Directory::unsynced_entity_count() const { | |
1157 ScopedKernelLock lock(this); | |
1158 return kernel_->unsynced_metahandles->size(); | |
1159 } | |
1160 | |
1161 FullModelTypeSet Directory::GetServerTypesWithUnappliedUpdates( | |
1162 BaseTransaction* trans) const { | |
1163 syncable::FullModelTypeSet server_types; | |
1164 ScopedKernelLock lock(this); | |
1165 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) { | |
1166 const ModelType type = ModelTypeFromInt(i); | |
1167 if (!kernel_->unapplied_update_metahandles[type].empty()) { | |
1168 server_types.Put(type); | |
1169 } | |
1170 } | |
1171 return server_types; | |
1172 } | |
1173 | |
1174 void Directory::GetUnappliedUpdateMetaHandles( | |
1175 BaseTransaction* trans, | |
1176 FullModelTypeSet server_types, | |
1177 UnappliedUpdateMetaHandles* result) { | |
1178 result->clear(); | |
1179 ScopedKernelLock lock(this); | |
1180 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) { | |
1181 const ModelType type = ModelTypeFromInt(i); | |
1182 if (server_types.Has(type)) { | |
1183 std::copy(kernel_->unapplied_update_metahandles[type].begin(), | |
1184 kernel_->unapplied_update_metahandles[type].end(), | |
1185 back_inserter(*result)); | |
1186 } | |
1187 } | |
1188 } | |
1189 | |
1190 | |
1191 class IdFilter { | |
1192 public: | |
1193 virtual ~IdFilter() { } | |
1194 virtual bool ShouldConsider(const Id& id) const = 0; | |
1195 }; | |
1196 | |
1197 | |
1198 class FullScanFilter : public IdFilter { | |
1199 public: | |
1200 virtual bool ShouldConsider(const Id& id) const { | |
1201 return true; | |
1202 } | |
1203 }; | |
1204 | |
1205 class SomeIdsFilter : public IdFilter { | |
1206 public: | |
1207 virtual bool ShouldConsider(const Id& id) const { | |
1208 return std::binary_search(ids_.begin(), ids_.end(), id); | |
1209 } | |
1210 std::vector<Id> ids_; | |
1211 }; | |
1212 | |
1213 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, | |
1214 const EntryKernelMutationMap& mutations) { | |
1215 MetahandleSet handles; | |
1216 SomeIdsFilter filter; | |
1217 filter.ids_.reserve(mutations.size()); | |
1218 for (EntryKernelMutationMap::const_iterator it = mutations.begin(), | |
1219 end = mutations.end(); it != end; ++it) { | |
1220 filter.ids_.push_back(it->second.mutated.ref(ID)); | |
1221 handles.insert(it->first); | |
1222 } | |
1223 std::sort(filter.ids_.begin(), filter.ids_.end()); | |
1224 if (!CheckTreeInvariants(trans, handles, filter)) | |
1225 return false; | |
1226 return true; | |
1227 } | |
1228 | |
1229 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, | |
1230 bool full_scan) { | |
1231 // TODO(timsteele): This is called every time a WriteTransaction finishes. | |
1232 // The performance hit is substantial given that we now examine every single | |
1233 // syncable entry. Need to redesign this. | |
1234 MetahandleSet handles; | |
1235 GetAllMetaHandles(trans, &handles); | |
1236 if (full_scan) { | |
1237 FullScanFilter fullfilter; | |
1238 if (!CheckTreeInvariants(trans, handles, fullfilter)) | |
1239 return false; | |
1240 } else { | |
1241 SomeIdsFilter filter; | |
1242 MetahandleSet::iterator i; | |
1243 for (i = handles.begin() ; i != handles.end() ; ++i) { | |
1244 Entry e(trans, GET_BY_HANDLE, *i); | |
1245 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans)) | |
1246 return false; | |
1247 filter.ids_.push_back(e.Get(ID)); | |
1248 } | |
1249 std::sort(filter.ids_.begin(), filter.ids_.end()); | |
1250 if (!CheckTreeInvariants(trans, handles, filter)) | |
1251 return false; | |
1252 } | |
1253 return true; | |
1254 } | |
1255 | |
1256 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans, | |
1257 const MetahandleSet& handles, | |
1258 const IdFilter& idfilter) { | |
1259 const int64 max_ms = kInvariantCheckMaxMs; | |
1260 PerfTimer check_timer; | |
1261 MetahandleSet::const_iterator i; | |
1262 int entries_done = 0; | |
1263 for (i = handles.begin() ; i != handles.end() ; ++i) { | |
1264 int64 metahandle = *i; | |
1265 Entry e(trans, GET_BY_HANDLE, metahandle); | |
1266 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans)) | |
1267 return false; | |
1268 syncable::Id id = e.Get(ID); | |
1269 syncable::Id parentid = e.Get(PARENT_ID); | |
1270 | |
1271 if (id.IsRoot()) { | |
1272 if (!SyncAssert(e.Get(IS_DIR), FROM_HERE, | |
1273 "Entry should be a directory", | |
1274 trans)) | |
1275 return false; | |
1276 if (!SyncAssert(parentid.IsRoot(), FROM_HERE, | |
1277 "Entry should be root", | |
1278 trans)) | |
1279 return false; | |
1280 if (!SyncAssert(!e.Get(IS_UNSYNCED), FROM_HERE, | |
1281 "Entry should be sycned", | |
1282 trans)) | |
1283 return false; | |
1284 ++entries_done; | |
1285 continue; | |
1286 } | |
1287 | |
1288 if (!e.Get(IS_DEL)) { | |
1289 if (!SyncAssert(id != parentid, FROM_HERE, | |
1290 "Id should be different from parent id.", | |
1291 trans)) | |
1292 return false; | |
1293 if (!SyncAssert(!e.Get(NON_UNIQUE_NAME).empty(), FROM_HERE, | |
1294 "Non unique name should not be empty.", | |
1295 trans)) | |
1296 return false; | |
1297 int safety_count = handles.size() + 1; | |
1298 while (!parentid.IsRoot()) { | |
1299 if (!idfilter.ShouldConsider(parentid)) | |
1300 break; | |
1301 Entry parent(trans, GET_BY_ID, parentid); | |
1302 if (!SyncAssert(parent.good(), FROM_HERE, | |
1303 "Parent entry is not valid.", | |
1304 trans)) | |
1305 return false; | |
1306 if (!SyncAssert(parent.Get(IS_DIR), FROM_HERE, | |
1307 "Parent should be a directory", | |
1308 trans)) | |
1309 return false; | |
1310 if (!SyncAssert(!parent.Get(IS_DEL), FROM_HERE, | |
1311 "Parent should not have been marked for deletion.", | |
1312 trans)) | |
1313 return false; | |
1314 if (!SyncAssert(handles.end() != handles.find(parent.Get(META_HANDLE)), | |
1315 FROM_HERE, | |
1316 "Parent should be in the index.", | |
1317 trans)) | |
1318 return false; | |
1319 parentid = parent.Get(PARENT_ID); | |
1320 if (!SyncAssert(--safety_count > 0, FROM_HERE, | |
1321 "Count should be greater than zero.", | |
1322 trans)) | |
1323 return false; | |
1324 } | |
1325 } | |
1326 int64 base_version = e.Get(BASE_VERSION); | |
1327 int64 server_version = e.Get(SERVER_VERSION); | |
1328 bool using_unique_client_tag = !e.Get(UNIQUE_CLIENT_TAG).empty(); | |
1329 if (CHANGES_VERSION == base_version || 0 == base_version) { | |
1330 if (e.Get(IS_UNAPPLIED_UPDATE)) { | |
1331 // Must be a new item, or a de-duplicated unique client tag | |
1332 // that was created both locally and remotely. | |
1333 if (!using_unique_client_tag) { | |
1334 if (!SyncAssert(e.Get(IS_DEL), FROM_HERE, | |
1335 "The entry should not have been deleted.", | |
1336 trans)) | |
1337 return false; | |
1338 } | |
1339 // It came from the server, so it must have a server ID. | |
1340 if (!SyncAssert(id.ServerKnows(), FROM_HERE, | |
1341 "The id should be from a server.", | |
1342 trans)) | |
1343 return false; | |
1344 } else { | |
1345 if (e.Get(IS_DIR)) { | |
1346 // TODO(chron): Implement this mode if clients ever need it. | |
1347 // For now, you can't combine a client tag and a directory. | |
1348 if (!SyncAssert(!using_unique_client_tag, FROM_HERE, | |
1349 "Directory cannot have a client tag.", | |
1350 trans)) | |
1351 return false; | |
1352 } | |
1353 // Should be an uncomitted item, or a successfully deleted one. | |
1354 if (!e.Get(IS_DEL)) { | |
1355 if (!SyncAssert(e.Get(IS_UNSYNCED), FROM_HERE, | |
1356 "The item should be unsynced.", | |
1357 trans)) | |
1358 return false; | |
1359 } | |
1360 // If the next check failed, it would imply that an item exists | |
1361 // on the server, isn't waiting for application locally, but either | |
1362 // is an unsynced create or a sucessful delete in the local copy. | |
1363 // Either way, that's a mismatch. | |
1364 if (!SyncAssert(0 == server_version, FROM_HERE, | |
1365 "Server version should be zero.", | |
1366 trans)) | |
1367 return false; | |
1368 // Items that aren't using the unique client tag should have a zero | |
1369 // base version only if they have a local ID. Items with unique client | |
1370 // tags are allowed to use the zero base version for undeletion and | |
1371 // de-duplication; the unique client tag trumps the server ID. | |
1372 if (!using_unique_client_tag) { | |
1373 if (!SyncAssert(!id.ServerKnows(), FROM_HERE, | |
1374 "Should be a client only id.", | |
1375 trans)) | |
1376 return false; | |
1377 } | |
1378 } | |
1379 } else { | |
1380 if (!SyncAssert(id.ServerKnows(), | |
1381 FROM_HERE, | |
1382 "Should be a server id.", | |
1383 trans)) | |
1384 return false; | |
1385 } | |
1386 ++entries_done; | |
1387 int64 elapsed_ms = check_timer.Elapsed().InMilliseconds(); | |
1388 if (elapsed_ms > max_ms) { | |
1389 DVLOG(1) << "Cutting Invariant check short after " << elapsed_ms | |
1390 << "ms. Processed " << entries_done << "/" << handles.size() | |
1391 << " entries"; | |
1392 return true; | |
1393 } | |
1394 | |
1395 } | |
1396 return true; | |
1397 } | |
1398 | |
1399 /////////////////////////////////////////////////////////////////////////////// | |
1400 // ScopedKernelLock | |
1401 | |
1402 ScopedKernelLock::ScopedKernelLock(const Directory* dir) | |
1403 : scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) { | |
1404 } | |
1405 | |
1406 /////////////////////////////////////////////////////////////////////////// | |
1407 // Transactions | |
1408 | |
1409 void BaseTransaction::Lock() { | |
1410 TRACE_EVENT2("sync_lock_contention", "AcquireLock", | |
1411 "src_file", from_here_.file_name(), | |
1412 "src_func", from_here_.function_name()); | |
1413 | |
1414 dirkernel_->transaction_mutex.Acquire(); | |
1415 } | |
1416 | |
1417 void BaseTransaction::Unlock() { | |
1418 dirkernel_->transaction_mutex.Release(); | |
1419 } | |
1420 | |
1421 void BaseTransaction::OnUnrecoverableError( | |
1422 const tracked_objects::Location& location, | |
1423 const std::string& message) { | |
1424 unrecoverable_error_set_ = true; | |
1425 unrecoverable_error_location_ = location; | |
1426 unrecoverable_error_msg_ = message; | |
1427 | |
1428 // Note: We dont call the Directory's OnUnrecoverableError method right | |
1429 // away. Instead we wait to unwind the stack and in the destructor of the | |
1430 // transaction we would call the OnUnrecoverableError method. | |
1431 | |
1432 directory()->ReportUnrecoverableError(); | |
1433 } | |
1434 | |
1435 bool BaseTransaction::unrecoverable_error_set() const { | |
1436 return unrecoverable_error_set_; | |
1437 } | |
1438 | |
1439 void BaseTransaction::HandleUnrecoverableErrorIfSet() { | |
1440 if (unrecoverable_error_set_) { | |
1441 directory()->OnUnrecoverableError(this, | |
1442 unrecoverable_error_location_, | |
1443 unrecoverable_error_msg_); | |
1444 } | |
1445 } | |
1446 | |
1447 BaseTransaction::BaseTransaction(const tracked_objects::Location& from_here, | |
1448 const char* name, | |
1449 WriterTag writer, | |
1450 Directory* directory) | |
1451 : from_here_(from_here), name_(name), writer_(writer), | |
1452 directory_(directory), dirkernel_(directory->kernel_), | |
1453 unrecoverable_error_set_(false) { | |
1454 // TODO(lipalani): Don't issue a good transaction if the directory has | |
1455 // unrecoverable error set. And the callers have to check trans.good before | |
1456 // proceeding. | |
1457 TRACE_EVENT_BEGIN2("sync", name_, | |
1458 "src_file", from_here_.file_name(), | |
1459 "src_func", from_here_.function_name()); | |
1460 } | |
1461 | |
1462 BaseTransaction::~BaseTransaction() { | |
1463 TRACE_EVENT_END0("sync", name_); | |
1464 } | |
1465 | |
1466 ReadTransaction::ReadTransaction(const tracked_objects::Location& location, | |
1467 Directory* directory) | |
1468 : BaseTransaction(location, "ReadTransaction", INVALID, directory) { | |
1469 Lock(); | |
1470 } | |
1471 | |
1472 ReadTransaction::~ReadTransaction() { | |
1473 HandleUnrecoverableErrorIfSet(); | |
1474 Unlock(); | |
1475 } | |
1476 | |
1477 WriteTransaction::WriteTransaction(const tracked_objects::Location& location, | |
1478 WriterTag writer, Directory* directory) | |
1479 : BaseTransaction(location, "WriteTransaction", writer, directory) { | |
1480 Lock(); | |
1481 } | |
1482 | |
1483 void WriteTransaction::SaveOriginal(const EntryKernel* entry) { | |
1484 if (!entry) { | |
1485 return; | |
1486 } | |
1487 // Insert only if it's not already there. | |
1488 const int64 handle = entry->ref(META_HANDLE); | |
1489 EntryKernelMutationMap::iterator it = mutations_.lower_bound(handle); | |
1490 if (it == mutations_.end() || it->first != handle) { | |
1491 EntryKernelMutation mutation; | |
1492 mutation.original = *entry; | |
1493 ignore_result(mutations_.insert(it, std::make_pair(handle, mutation))); | |
1494 } | |
1495 } | |
1496 | |
1497 ImmutableEntryKernelMutationMap WriteTransaction::RecordMutations() { | |
1498 dirkernel_->transaction_mutex.AssertAcquired(); | |
1499 for (syncable::EntryKernelMutationMap::iterator it = mutations_.begin(); | |
1500 it != mutations_.end();) { | |
1501 EntryKernel* kernel = directory()->GetEntryByHandle(it->first); | |
1502 if (!kernel) { | |
1503 NOTREACHED(); | |
1504 continue; | |
1505 } | |
1506 if (kernel->is_dirty()) { | |
1507 it->second.mutated = *kernel; | |
1508 ++it; | |
1509 } else { | |
1510 DCHECK(!it->second.original.is_dirty()); | |
1511 // Not actually mutated, so erase from |mutations_|. | |
1512 mutations_.erase(it++); | |
1513 } | |
1514 } | |
1515 return ImmutableEntryKernelMutationMap(&mutations_); | |
1516 } | |
1517 | |
1518 void WriteTransaction::UnlockAndNotify( | |
1519 const ImmutableEntryKernelMutationMap& mutations) { | |
1520 // Work while transaction mutex is held. | |
1521 ModelTypeSet models_with_changes; | |
1522 bool has_mutations = !mutations.Get().empty(); | |
1523 if (has_mutations) { | |
1524 models_with_changes = NotifyTransactionChangingAndEnding(mutations); | |
1525 } | |
1526 Unlock(); | |
1527 | |
1528 // Work after mutex is relased. | |
1529 if (has_mutations) { | |
1530 NotifyTransactionComplete(models_with_changes); | |
1531 } | |
1532 } | |
1533 | |
1534 ModelTypeSet WriteTransaction::NotifyTransactionChangingAndEnding( | |
1535 const ImmutableEntryKernelMutationMap& mutations) { | |
1536 dirkernel_->transaction_mutex.AssertAcquired(); | |
1537 DCHECK(!mutations.Get().empty()); | |
1538 | |
1539 WriteTransactionInfo write_transaction_info( | |
1540 dirkernel_->next_write_transaction_id, from_here_, writer_, mutations); | |
1541 ++dirkernel_->next_write_transaction_id; | |
1542 | |
1543 ImmutableWriteTransactionInfo immutable_write_transaction_info( | |
1544 &write_transaction_info); | |
1545 DirectoryChangeDelegate* const delegate = dirkernel_->delegate; | |
1546 if (writer_ == syncable::SYNCAPI) { | |
1547 delegate->HandleCalculateChangesChangeEventFromSyncApi( | |
1548 immutable_write_transaction_info, this); | |
1549 } else { | |
1550 delegate->HandleCalculateChangesChangeEventFromSyncer( | |
1551 immutable_write_transaction_info, this); | |
1552 } | |
1553 | |
1554 ModelTypeSet models_with_changes = | |
1555 delegate->HandleTransactionEndingChangeEvent( | |
1556 immutable_write_transaction_info, this); | |
1557 | |
1558 dirkernel_->transaction_observer.Call(FROM_HERE, | |
1559 &TransactionObserver::OnTransactionWrite, | |
1560 immutable_write_transaction_info, models_with_changes); | |
1561 | |
1562 return models_with_changes; | |
1563 } | |
1564 | |
1565 void WriteTransaction::NotifyTransactionComplete( | |
1566 ModelTypeSet models_with_changes) { | |
1567 dirkernel_->delegate->HandleTransactionCompleteChangeEvent( | |
1568 models_with_changes); | |
1569 } | |
1570 | |
1571 WriteTransaction::~WriteTransaction() { | |
1572 const ImmutableEntryKernelMutationMap& mutations = RecordMutations(); | |
1573 | |
1574 if (!unrecoverable_error_set_) { | |
1575 if (OFF != kInvariantCheckLevel) { | |
1576 const bool full_scan = (FULL_DB_VERIFICATION == kInvariantCheckLevel); | |
1577 if (full_scan) | |
1578 directory()->CheckTreeInvariants(this, full_scan); | |
1579 else | |
1580 directory()->CheckTreeInvariants(this, mutations.Get()); | |
1581 } | |
1582 } | |
1583 | |
1584 // |CheckTreeInvariants| could have thrown an unrecoverable error. | |
1585 if (unrecoverable_error_set_) { | |
1586 HandleUnrecoverableErrorIfSet(); | |
1587 Unlock(); | |
1588 return; | |
1589 } | |
1590 | |
1591 UnlockAndNotify(mutations); | |
1592 } | |
1593 | |
1594 /////////////////////////////////////////////////////////////////////////// | |
1595 // Entry | |
1596 | |
1597 Entry::Entry(BaseTransaction* trans, GetById, const Id& id) | |
1598 : basetrans_(trans) { | |
1599 kernel_ = trans->directory()->GetEntryById(id); | |
1600 } | |
1601 | |
1602 Entry::Entry(BaseTransaction* trans, GetByClientTag, const string& tag) | |
1603 : basetrans_(trans) { | |
1604 kernel_ = trans->directory()->GetEntryByClientTag(tag); | |
1605 } | |
1606 | |
1607 Entry::Entry(BaseTransaction* trans, GetByServerTag, const string& tag) | |
1608 : basetrans_(trans) { | |
1609 kernel_ = trans->directory()->GetEntryByServerTag(tag); | |
1610 } | |
1611 | |
1612 Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle) | |
1613 : basetrans_(trans) { | |
1614 kernel_ = trans->directory()->GetEntryByHandle(metahandle); | |
1615 } | |
1616 | |
1617 Directory* Entry::dir() const { | |
1618 return basetrans_->directory(); | |
1619 } | |
1620 | |
1621 Id Entry::ComputePrevIdFromServerPosition(const Id& parent_id) const { | |
1622 return dir()->ComputePrevIdFromServerPosition(kernel_, parent_id); | |
1623 } | |
1624 | |
1625 DictionaryValue* Entry::ToValue() const { | |
1626 DictionaryValue* entry_info = new DictionaryValue(); | |
1627 entry_info->SetBoolean("good", good()); | |
1628 if (good()) { | |
1629 entry_info->Set("kernel", kernel_->ToValue()); | |
1630 entry_info->Set("modelType", | |
1631 ModelTypeToValue(GetModelType())); | |
1632 entry_info->SetBoolean("existsOnClientBecauseNameIsNonEmpty", | |
1633 ExistsOnClientBecauseNameIsNonEmpty()); | |
1634 entry_info->SetBoolean("isRoot", IsRoot()); | |
1635 } | |
1636 return entry_info; | |
1637 } | |
1638 | |
1639 const string& Entry::Get(StringField field) const { | |
1640 DCHECK(kernel_); | |
1641 return kernel_->ref(field); | |
1642 } | |
1643 | |
1644 syncable::ModelType Entry::GetServerModelType() const { | |
1645 ModelType specifics_type = kernel_->GetServerModelType(); | |
1646 if (specifics_type != UNSPECIFIED) | |
1647 return specifics_type; | |
1648 | |
1649 // Otherwise, we don't have a server type yet. That should only happen | |
1650 // if the item is an uncommitted locally created item. | |
1651 // It's possible we'll need to relax these checks in the future; they're | |
1652 // just here for now as a safety measure. | |
1653 DCHECK(Get(IS_UNSYNCED)); | |
1654 DCHECK_EQ(Get(SERVER_VERSION), 0); | |
1655 DCHECK(Get(SERVER_IS_DEL)); | |
1656 // Note: can't enforce !Get(ID).ServerKnows() here because that could | |
1657 // actually happen if we hit AttemptReuniteLostCommitResponses. | |
1658 return UNSPECIFIED; | |
1659 } | |
1660 | |
1661 syncable::ModelType Entry::GetModelType() const { | |
1662 ModelType specifics_type = GetModelTypeFromSpecifics(Get(SPECIFICS)); | |
1663 if (specifics_type != UNSPECIFIED) | |
1664 return specifics_type; | |
1665 if (IsRoot()) | |
1666 return TOP_LEVEL_FOLDER; | |
1667 // Loose check for server-created top-level folders that aren't | |
1668 // bound to a particular model type. | |
1669 if (!Get(UNIQUE_SERVER_TAG).empty() && Get(IS_DIR)) | |
1670 return TOP_LEVEL_FOLDER; | |
1671 | |
1672 return UNSPECIFIED; | |
1673 } | |
1674 | |
1675 /////////////////////////////////////////////////////////////////////////// | |
1676 // MutableEntry | |
1677 | |
1678 MutableEntry::MutableEntry(WriteTransaction* trans, Create, | |
1679 const Id& parent_id, const string& name) | |
1680 : Entry(trans), | |
1681 write_transaction_(trans) { | |
1682 Init(trans, parent_id, name); | |
1683 } | |
1684 | |
1685 | |
1686 void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id, | |
1687 const string& name) { | |
1688 scoped_ptr<EntryKernel> kernel(new EntryKernel); | |
1689 kernel_ = NULL; | |
1690 | |
1691 kernel->put(ID, trans->directory_->NextId()); | |
1692 kernel->put(META_HANDLE, trans->directory_->NextMetahandle()); | |
1693 kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles); | |
1694 kernel->put(PARENT_ID, parent_id); | |
1695 kernel->put(NON_UNIQUE_NAME, name); | |
1696 const base::Time& now = base::Time::Now(); | |
1697 kernel->put(CTIME, now); | |
1698 kernel->put(MTIME, now); | |
1699 // We match the database defaults here | |
1700 kernel->put(BASE_VERSION, CHANGES_VERSION); | |
1701 if (!trans->directory()->InsertEntry(trans, kernel.get())) { | |
1702 return; // We failed inserting, nothing more to do. | |
1703 } | |
1704 // Because this entry is new, it was originally deleted. | |
1705 kernel->put(IS_DEL, true); | |
1706 trans->SaveOriginal(kernel.get()); | |
1707 kernel->put(IS_DEL, false); | |
1708 | |
1709 // Now swap the pointers. | |
1710 kernel_ = kernel.release(); | |
1711 } | |
1712 | |
1713 MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, | |
1714 const Id& id) | |
1715 : Entry(trans), write_transaction_(trans) { | |
1716 Entry same_id(trans, GET_BY_ID, id); | |
1717 kernel_ = NULL; | |
1718 if (same_id.good()) { | |
1719 return; // already have an item with this ID. | |
1720 } | |
1721 scoped_ptr<EntryKernel> kernel(new EntryKernel()); | |
1722 | |
1723 kernel->put(ID, id); | |
1724 kernel->put(META_HANDLE, trans->directory_->NextMetahandle()); | |
1725 kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles); | |
1726 kernel->put(IS_DEL, true); | |
1727 // We match the database defaults here | |
1728 kernel->put(BASE_VERSION, CHANGES_VERSION); | |
1729 if (!trans->directory()->InsertEntry(trans, kernel.get())) { | |
1730 return; // Failed inserting. | |
1731 } | |
1732 trans->SaveOriginal(kernel.get()); | |
1733 | |
1734 kernel_ = kernel.release(); | |
1735 } | |
1736 | |
1737 MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id) | |
1738 : Entry(trans, GET_BY_ID, id), write_transaction_(trans) { | |
1739 trans->SaveOriginal(kernel_); | |
1740 } | |
1741 | |
1742 MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle, | |
1743 int64 metahandle) | |
1744 : Entry(trans, GET_BY_HANDLE, metahandle), write_transaction_(trans) { | |
1745 trans->SaveOriginal(kernel_); | |
1746 } | |
1747 | |
1748 MutableEntry::MutableEntry(WriteTransaction* trans, GetByClientTag, | |
1749 const std::string& tag) | |
1750 : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) { | |
1751 trans->SaveOriginal(kernel_); | |
1752 } | |
1753 | |
1754 MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag, | |
1755 const string& tag) | |
1756 : Entry(trans, GET_BY_SERVER_TAG, tag), write_transaction_(trans) { | |
1757 trans->SaveOriginal(kernel_); | |
1758 } | |
1759 | |
1760 bool MutableEntry::PutIsDel(bool is_del) { | |
1761 DCHECK(kernel_); | |
1762 if (is_del == kernel_->ref(IS_DEL)) { | |
1763 return true; | |
1764 } | |
1765 if (is_del) { | |
1766 if (!UnlinkFromOrder()) { | |
1767 return false; | |
1768 } | |
1769 } | |
1770 | |
1771 { | |
1772 ScopedKernelLock lock(dir()); | |
1773 // Some indices don't include deleted items and must be updated | |
1774 // upon a value change. | |
1775 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_, | |
1776 dir()->kernel_->parent_id_child_index); | |
1777 | |
1778 kernel_->put(IS_DEL, is_del); | |
1779 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1780 } | |
1781 | |
1782 if (!is_del) | |
1783 // Restores position to the 0th index. | |
1784 if (!PutPredecessor(Id())) { | |
1785 // TODO(lipalani) : Propagate the error to caller. crbug.com/100444. | |
1786 NOTREACHED(); | |
1787 } | |
1788 | |
1789 return true; | |
1790 } | |
1791 | |
1792 bool MutableEntry::Put(Int64Field field, const int64& value) { | |
1793 DCHECK(kernel_); | |
1794 if (kernel_->ref(field) != value) { | |
1795 ScopedKernelLock lock(dir()); | |
1796 if (SERVER_POSITION_IN_PARENT == field) { | |
1797 ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_, | |
1798 dir()->kernel_->parent_id_child_index); | |
1799 kernel_->put(field, value); | |
1800 } else { | |
1801 kernel_->put(field, value); | |
1802 } | |
1803 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1804 } | |
1805 return true; | |
1806 } | |
1807 | |
1808 bool MutableEntry::Put(TimeField field, const base::Time& value) { | |
1809 DCHECK(kernel_); | |
1810 if (kernel_->ref(field) != value) { | |
1811 kernel_->put(field, value); | |
1812 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1813 } | |
1814 return true; | |
1815 } | |
1816 | |
1817 bool MutableEntry::Put(IdField field, const Id& value) { | |
1818 DCHECK(kernel_); | |
1819 if (kernel_->ref(field) != value) { | |
1820 if (ID == field) { | |
1821 if (!dir()->ReindexId(write_transaction(), kernel_, value)) | |
1822 return false; | |
1823 } else if (PARENT_ID == field) { | |
1824 PutParentIdPropertyOnly(value); // Makes sibling order inconsistent. | |
1825 // Fixes up the sibling order inconsistency. | |
1826 if (!PutPredecessor(Id())) { | |
1827 // TODO(lipalani) : Propagate the error to caller. crbug.com/100444. | |
1828 NOTREACHED(); | |
1829 } | |
1830 } else { | |
1831 kernel_->put(field, value); | |
1832 } | |
1833 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1834 } | |
1835 return true; | |
1836 } | |
1837 | |
1838 void MutableEntry::PutParentIdPropertyOnly(const Id& parent_id) { | |
1839 dir()->ReindexParentId(write_transaction(), kernel_, parent_id); | |
1840 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1841 } | |
1842 | |
1843 bool MutableEntry::Put(BaseVersion field, int64 value) { | |
1844 DCHECK(kernel_); | |
1845 if (kernel_->ref(field) != value) { | |
1846 kernel_->put(field, value); | |
1847 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1848 } | |
1849 return true; | |
1850 } | |
1851 | |
1852 bool MutableEntry::Put(StringField field, const string& value) { | |
1853 return PutImpl(field, value); | |
1854 } | |
1855 | |
1856 bool MutableEntry::Put(ProtoField field, | |
1857 const sync_pb::EntitySpecifics& value) { | |
1858 DCHECK(kernel_); | |
1859 // TODO(ncarter): This is unfortunately heavyweight. Can we do | |
1860 // better? | |
1861 if (kernel_->ref(field).SerializeAsString() != value.SerializeAsString()) { | |
1862 const bool update_unapplied_updates_index = | |
1863 (field == SERVER_SPECIFICS) && kernel_->ref(IS_UNAPPLIED_UPDATE); | |
1864 if (update_unapplied_updates_index) { | |
1865 // Remove ourselves from unapplied_update_metahandles with our | |
1866 // old server type. | |
1867 const syncable::ModelType old_server_type = | |
1868 kernel_->GetServerModelType(); | |
1869 const int64 metahandle = kernel_->ref(META_HANDLE); | |
1870 size_t erase_count = | |
1871 dir()->kernel_->unapplied_update_metahandles[old_server_type] | |
1872 .erase(metahandle); | |
1873 DCHECK_EQ(erase_count, 1u); | |
1874 } | |
1875 | |
1876 kernel_->put(field, value); | |
1877 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1878 | |
1879 if (update_unapplied_updates_index) { | |
1880 // Add ourselves back into unapplied_update_metahandles with our | |
1881 // new server type. | |
1882 const syncable::ModelType new_server_type = | |
1883 kernel_->GetServerModelType(); | |
1884 const int64 metahandle = kernel_->ref(META_HANDLE); | |
1885 dir()->kernel_->unapplied_update_metahandles[new_server_type] | |
1886 .insert(metahandle); | |
1887 } | |
1888 } | |
1889 return true; | |
1890 } | |
1891 | |
1892 bool MutableEntry::Put(BitField field, bool value) { | |
1893 DCHECK(kernel_); | |
1894 if (kernel_->ref(field) != value) { | |
1895 kernel_->put(field, value); | |
1896 kernel_->mark_dirty(GetDirtyIndexHelper()); | |
1897 } | |
1898 return true; | |
1899 } | |
1900 | |
1901 MetahandleSet* MutableEntry::GetDirtyIndexHelper() { | |
1902 return dir()->kernel_->dirty_metahandles; | |
1903 } | |
1904 | |
1905 bool MutableEntry::PutUniqueClientTag(const string& new_tag) { | |
1906 // There is no SERVER_UNIQUE_CLIENT_TAG. This field is similar to ID. | |
1907 string old_tag = kernel_->ref(UNIQUE_CLIENT_TAG); | |
1908 if (old_tag == new_tag) { | |
1909 return true; | |
1910 } | |
1911 | |
1912 ScopedKernelLock lock(dir()); | |
1913 if (!new_tag.empty()) { | |
1914 // Make sure your new value is not in there already. | |
1915 EntryKernel lookup_kernel_ = *kernel_; | |
1916 lookup_kernel_.put(UNIQUE_CLIENT_TAG, new_tag); | |
1917 bool new_tag_conflicts = | |
1918 (dir()->kernel_->client_tag_index->count(&lookup_kernel_) > 0); | |
1919 if (new_tag_conflicts) { | |
1920 return false; | |
1921 } | |
1922 } | |
1923 | |
1924 { | |
1925 ScopedIndexUpdater<ClientTagIndexer> index_updater(lock, kernel_, | |
1926 dir()->kernel_->client_tag_index); | |
1927 kernel_->put(UNIQUE_CLIENT_TAG, new_tag); | |
1928 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1929 } | |
1930 return true; | |
1931 } | |
1932 | |
1933 bool MutableEntry::PutImpl(StringField field, const string& value) { | |
1934 DCHECK(kernel_); | |
1935 if (field == UNIQUE_CLIENT_TAG) { | |
1936 return PutUniqueClientTag(value); | |
1937 } | |
1938 | |
1939 if (kernel_->ref(field) != value) { | |
1940 kernel_->put(field, value); | |
1941 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1942 } | |
1943 return true; | |
1944 } | |
1945 | |
1946 bool MutableEntry::Put(IndexedBitField field, bool value) { | |
1947 DCHECK(kernel_); | |
1948 if (kernel_->ref(field) != value) { | |
1949 MetahandleSet* index; | |
1950 if (IS_UNSYNCED == field) { | |
1951 index = dir()->kernel_->unsynced_metahandles; | |
1952 } else { | |
1953 // Use kernel_->GetServerModelType() instead of | |
1954 // GetServerModelType() as we may trigger some DCHECKs in the | |
1955 // latter. | |
1956 index = | |
1957 &dir()->kernel_->unapplied_update_metahandles[ | |
1958 kernel_->GetServerModelType()]; | |
1959 } | |
1960 | |
1961 ScopedKernelLock lock(dir()); | |
1962 if (value) { | |
1963 if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second, | |
1964 FROM_HERE, | |
1965 "Could not insert", | |
1966 write_transaction())) { | |
1967 return false; | |
1968 } | |
1969 } else { | |
1970 if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)), | |
1971 FROM_HERE, | |
1972 "Entry Not succesfully erased", | |
1973 write_transaction())) { | |
1974 return false; | |
1975 } | |
1976 } | |
1977 kernel_->put(field, value); | |
1978 kernel_->mark_dirty(dir()->kernel_->dirty_metahandles); | |
1979 } | |
1980 return true; | |
1981 } | |
1982 | |
1983 bool MutableEntry::UnlinkFromOrder() { | |
1984 ScopedKernelLock lock(dir()); | |
1985 return dir()->UnlinkEntryFromOrder(kernel_, write_transaction(), &lock); | |
1986 } | |
1987 | |
1988 bool Directory::UnlinkEntryFromOrder(EntryKernel* entry, | |
1989 WriteTransaction* trans, | |
1990 ScopedKernelLock* lock) { | |
1991 if (!SyncAssert(!trans || this == trans->directory(), | |
1992 FROM_HERE, | |
1993 "Transaction not pointing to the right directory", | |
1994 trans)) | |
1995 return false; | |
1996 Id old_previous = entry->ref(PREV_ID); | |
1997 Id old_next = entry->ref(NEXT_ID); | |
1998 | |
1999 entry->put(NEXT_ID, entry->ref(ID)); | |
2000 entry->put(PREV_ID, entry->ref(ID)); | |
2001 entry->mark_dirty(kernel_->dirty_metahandles); | |
2002 | |
2003 if (!old_previous.IsRoot()) { | |
2004 if (old_previous == old_next) { | |
2005 // Note previous == next doesn't imply previous == next == Get(ID). We | |
2006 // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added | |
2007 // and deleted before receiving the server ID in the commit response. | |
2008 if (!SyncAssert( | |
2009 (old_next == entry->ref(ID)) || !old_next.ServerKnows(), | |
2010 FROM_HERE, | |
2011 "Encounteered inconsistent entry while deleting", | |
2012 trans)) { | |
2013 return false; | |
2014 } | |
2015 return true; // Done if we were already self-looped (hence unlinked). | |
2016 } | |
2017 EntryKernel* previous_entry = GetEntryById(old_previous, lock); | |
2018 ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS)); | |
2019 // TODO(tim): Multiple asserts here for bug 101039 investigation. | |
2020 if (type == AUTOFILL) { | |
2021 if (!SyncAssert(previous_entry != NULL, | |
2022 FROM_HERE, | |
2023 "Could not find previous autofill entry", | |
2024 trans)) { | |
2025 return false; | |
2026 } | |
2027 } else { | |
2028 if (!SyncAssert(previous_entry != NULL, | |
2029 FROM_HERE, | |
2030 "Could not find previous entry", | |
2031 trans)) { | |
2032 return false; | |
2033 } | |
2034 } | |
2035 if (trans) | |
2036 trans->SaveOriginal(previous_entry); | |
2037 previous_entry->put(NEXT_ID, old_next); | |
2038 previous_entry->mark_dirty(kernel_->dirty_metahandles); | |
2039 } | |
2040 | |
2041 if (!old_next.IsRoot()) { | |
2042 EntryKernel* next_entry = GetEntryById(old_next, lock); | |
2043 if (!SyncAssert(next_entry != NULL, | |
2044 FROM_HERE, | |
2045 "Could not find next entry", | |
2046 trans)) { | |
2047 return false; | |
2048 } | |
2049 if (trans) | |
2050 trans->SaveOriginal(next_entry); | |
2051 next_entry->put(PREV_ID, old_previous); | |
2052 next_entry->mark_dirty(kernel_->dirty_metahandles); | |
2053 } | |
2054 return true; | |
2055 } | |
2056 | |
2057 bool MutableEntry::PutPredecessor(const Id& predecessor_id) { | |
2058 if (!UnlinkFromOrder()) | |
2059 return false; | |
2060 | |
2061 if (Get(IS_DEL)) { | |
2062 DCHECK(predecessor_id.IsNull()); | |
2063 return true; | |
2064 } | |
2065 | |
2066 // TODO(ncarter): It should be possible to not maintain position for | |
2067 // non-bookmark items. However, we'd need to robustly handle all possible | |
2068 // permutations of setting IS_DEL and the SPECIFICS to identify the | |
2069 // object type; or else, we'd need to add a ModelType to the | |
2070 // MutableEntry's Create ctor. | |
2071 // if (!ShouldMaintainPosition()) { | |
2072 // return false; | |
2073 // } | |
2074 | |
2075 // This is classic insert-into-doubly-linked-list from CS 101 and your last | |
2076 // job interview. An "IsRoot" Id signifies the head or tail. | |
2077 Id successor_id; | |
2078 if (!predecessor_id.IsRoot()) { | |
2079 MutableEntry predecessor(write_transaction(), GET_BY_ID, predecessor_id); | |
2080 if (!predecessor.good()) { | |
2081 LOG(ERROR) << "Predecessor is not good : " | |
2082 << predecessor_id.GetServerId(); | |
2083 return false; | |
2084 } | |
2085 if (predecessor.Get(PARENT_ID) != Get(PARENT_ID)) | |
2086 return false; | |
2087 successor_id = predecessor.Get(NEXT_ID); | |
2088 predecessor.Put(NEXT_ID, Get(ID)); | |
2089 } else { | |
2090 syncable::Directory* dir = trans()->directory(); | |
2091 if (!dir->GetFirstChildId(trans(), Get(PARENT_ID), &successor_id)) { | |
2092 return false; | |
2093 } | |
2094 } | |
2095 if (!successor_id.IsRoot()) { | |
2096 MutableEntry successor(write_transaction(), GET_BY_ID, successor_id); | |
2097 if (!successor.good()) { | |
2098 LOG(ERROR) << "Successor is not good: " | |
2099 << successor_id.GetServerId(); | |
2100 return false; | |
2101 } | |
2102 if (successor.Get(PARENT_ID) != Get(PARENT_ID)) | |
2103 return false; | |
2104 successor.Put(PREV_ID, Get(ID)); | |
2105 } | |
2106 DCHECK(predecessor_id != Get(ID)); | |
2107 DCHECK(successor_id != Get(ID)); | |
2108 Put(PREV_ID, predecessor_id); | |
2109 Put(NEXT_ID, successor_id); | |
2110 return true; | |
2111 } | |
2112 | |
2113 bool MutableEntry::Put(BitTemp field, bool value) { | |
2114 DCHECK(kernel_); | |
2115 kernel_->put(field, value); | |
2116 return true; | |
2117 } | |
2118 | |
2119 /////////////////////////////////////////////////////////////////////////// | |
2120 // High-level functions | |
2121 | |
2122 int64 Directory::NextMetahandle() { | |
2123 ScopedKernelLock lock(this); | |
2124 int64 metahandle = (kernel_->next_metahandle)++; | |
2125 return metahandle; | |
2126 } | |
2127 | |
2128 // Always returns a client ID that is the string representation of a negative | |
2129 // number. | |
2130 Id Directory::NextId() { | |
2131 int64 result; | |
2132 { | |
2133 ScopedKernelLock lock(this); | |
2134 result = (kernel_->persisted_info.next_id)--; | |
2135 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY; | |
2136 } | |
2137 DCHECK_LT(result, 0); | |
2138 return Id::CreateFromClientString(base::Int64ToString(result)); | |
2139 } | |
2140 | |
2141 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) { | |
2142 ScopedKernelLock lock(this); | |
2143 return (GetPossibleFirstChild(lock, id) != NULL); | |
2144 } | |
2145 | |
2146 bool Directory::GetFirstChildId(BaseTransaction* trans, | |
2147 const Id& parent_id, | |
2148 Id* first_child_id) { | |
2149 ScopedKernelLock lock(this); | |
2150 EntryKernel* entry = GetPossibleFirstChild(lock, parent_id); | |
2151 if (!entry) { | |
2152 *first_child_id = Id(); | |
2153 return true; | |
2154 } | |
2155 | |
2156 // Walk to the front of the list; the server position ordering | |
2157 // is commonly identical to the linked-list ordering, but pending | |
2158 // unsynced or unapplied items may diverge. | |
2159 while (!entry->ref(PREV_ID).IsRoot()) { | |
2160 entry = GetEntryById(entry->ref(PREV_ID), &lock); | |
2161 if (!entry) { | |
2162 *first_child_id = Id(); | |
2163 return false; | |
2164 } | |
2165 } | |
2166 *first_child_id = entry->ref(ID); | |
2167 return true; | |
2168 } | |
2169 | |
2170 bool Directory::GetLastChildIdForTest( | |
2171 BaseTransaction* trans, const Id& parent_id, Id* last_child_id) { | |
2172 ScopedKernelLock lock(this); | |
2173 EntryKernel* entry = GetPossibleLastChildForTest(lock, parent_id); | |
2174 if (!entry) { | |
2175 *last_child_id = Id(); | |
2176 return true; | |
2177 } | |
2178 | |
2179 // Walk to the back of the list; the server position ordering | |
2180 // is commonly identical to the linked-list ordering, but pending | |
2181 // unsynced or unapplied items may diverge. | |
2182 while (!entry->ref(NEXT_ID).IsRoot()) { | |
2183 entry = GetEntryById(entry->ref(NEXT_ID), &lock); | |
2184 if (!entry) { | |
2185 *last_child_id = Id(); | |
2186 return false; | |
2187 } | |
2188 } | |
2189 | |
2190 *last_child_id = entry->ref(ID); | |
2191 return true; | |
2192 } | |
2193 | |
2194 Id Directory::ComputePrevIdFromServerPosition( | |
2195 const EntryKernel* entry, | |
2196 const syncable::Id& parent_id) { | |
2197 ScopedKernelLock lock(this); | |
2198 | |
2199 // Find the natural insertion point in the parent_id_child_index, and | |
2200 // work back from there, filtering out ineligible candidates. | |
2201 ParentIdChildIndex::iterator sibling = LocateInParentChildIndex(lock, | |
2202 parent_id, entry->ref(SERVER_POSITION_IN_PARENT), entry->ref(ID)); | |
2203 ParentIdChildIndex::iterator first_sibling = | |
2204 GetParentChildIndexLowerBound(lock, parent_id); | |
2205 | |
2206 while (sibling != first_sibling) { | |
2207 --sibling; | |
2208 EntryKernel* candidate = *sibling; | |
2209 | |
2210 // The item itself should never be in the range under consideration. | |
2211 DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE)); | |
2212 | |
2213 // Ignore unapplied updates -- they might not even be server-siblings. | |
2214 if (candidate->ref(IS_UNAPPLIED_UPDATE)) | |
2215 continue; | |
2216 | |
2217 // We can't trust the SERVER_ fields of unsynced items, but they are | |
2218 // potentially legitimate local predecessors. In the case where | |
2219 // |update_item| and an unsynced item wind up in the same insertion | |
2220 // position, we need to choose how to order them. The following check puts | |
2221 // the unapplied update first; removing it would put the unsynced item(s) | |
2222 // first. | |
2223 if (candidate->ref(IS_UNSYNCED)) | |
2224 continue; | |
2225 | |
2226 // Skip over self-looped items, which are not valid predecessors. This | |
2227 // shouldn't happen in practice, but is worth defending against. | |
2228 if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) && | |
2229 !candidate->ref(PREV_ID).IsRoot()) { | |
2230 NOTREACHED(); | |
2231 continue; | |
2232 } | |
2233 return candidate->ref(ID); | |
2234 } | |
2235 // This item will be the first in the sibling order. | |
2236 return Id(); | |
2237 } | |
2238 | |
2239 bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id, | |
2240 const Id& new_parent_id) { | |
2241 if (entry_id.IsRoot()) | |
2242 return false; | |
2243 // we have to ensure that the entry is not an ancestor of the new parent. | |
2244 Id ancestor_id = new_parent_id; | |
2245 while (!ancestor_id.IsRoot()) { | |
2246 if (entry_id == ancestor_id) | |
2247 return false; | |
2248 Entry new_parent(trans, GET_BY_ID, ancestor_id); | |
2249 if (!SyncAssert(new_parent.good(), | |
2250 FROM_HERE, | |
2251 "Invalid new parent", | |
2252 trans)) | |
2253 return false; | |
2254 ancestor_id = new_parent.Get(PARENT_ID); | |
2255 } | |
2256 return true; | |
2257 } | |
2258 | |
2259 // This function sets only the flags needed to get this entry to sync. | |
2260 bool MarkForSyncing(syncable::MutableEntry* e) { | |
2261 DCHECK_NE(static_cast<MutableEntry*>(NULL), e); | |
2262 DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing."; | |
2263 if (!(e->Put(IS_UNSYNCED, true))) | |
2264 return false; | |
2265 e->Put(SYNCING, false); | |
2266 return true; | |
2267 } | |
2268 | |
2269 std::ostream& operator<<(std::ostream& os, const Entry& entry) { | |
2270 int i; | |
2271 EntryKernel* const kernel = entry.kernel_; | |
2272 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) { | |
2273 os << g_metas_columns[i].name << ": " | |
2274 << kernel->ref(static_cast<Int64Field>(i)) << ", "; | |
2275 } | |
2276 for ( ; i < TIME_FIELDS_END; ++i) { | |
2277 os << g_metas_columns[i].name << ": " | |
2278 << browser_sync::GetTimeDebugString( | |
2279 kernel->ref(static_cast<TimeField>(i))) << ", "; | |
2280 } | |
2281 for ( ; i < ID_FIELDS_END; ++i) { | |
2282 os << g_metas_columns[i].name << ": " | |
2283 << kernel->ref(static_cast<IdField>(i)) << ", "; | |
2284 } | |
2285 os << "Flags: "; | |
2286 for ( ; i < BIT_FIELDS_END; ++i) { | |
2287 if (kernel->ref(static_cast<BitField>(i))) | |
2288 os << g_metas_columns[i].name << ", "; | |
2289 } | |
2290 for ( ; i < STRING_FIELDS_END; ++i) { | |
2291 const string& field = kernel->ref(static_cast<StringField>(i)); | |
2292 os << g_metas_columns[i].name << ": " << field << ", "; | |
2293 } | |
2294 for ( ; i < PROTO_FIELDS_END; ++i) { | |
2295 os << g_metas_columns[i].name << ": " | |
2296 << net::EscapePath( | |
2297 kernel->ref(static_cast<ProtoField>(i)).SerializeAsString()) | |
2298 << ", "; | |
2299 } | |
2300 os << "TempFlags: "; | |
2301 for ( ; i < BIT_TEMPS_END; ++i) { | |
2302 if (kernel->ref(static_cast<BitTemp>(i))) | |
2303 os << "#" << i - BIT_TEMPS_BEGIN << ", "; | |
2304 } | |
2305 return os; | |
2306 } | |
2307 | |
2308 std::ostream& operator<<(std::ostream& s, const Blob& blob) { | |
2309 for (Blob::const_iterator i = blob.begin(); i != blob.end(); ++i) | |
2310 s << std::hex << std::setw(2) | |
2311 << std::setfill('0') << static_cast<unsigned int>(*i); | |
2312 return s << std::dec; | |
2313 } | |
2314 | |
2315 Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex( | |
2316 const ScopedKernelLock& lock, | |
2317 const Id& parent_id, | |
2318 int64 position_in_parent, | |
2319 const Id& item_id_for_tiebreaking) { | |
2320 kernel_->needle.put(PARENT_ID, parent_id); | |
2321 kernel_->needle.put(SERVER_POSITION_IN_PARENT, position_in_parent); | |
2322 kernel_->needle.put(ID, item_id_for_tiebreaking); | |
2323 return kernel_->parent_id_child_index->lower_bound(&kernel_->needle); | |
2324 } | |
2325 | |
2326 Directory::ParentIdChildIndex::iterator | |
2327 Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock, | |
2328 const Id& parent_id) { | |
2329 // Peg the parent ID, and use the least values for the remaining | |
2330 // index variables. | |
2331 return LocateInParentChildIndex(lock, parent_id, | |
2332 std::numeric_limits<int64>::min(), | |
2333 Id::GetLeastIdForLexicographicComparison()); | |
2334 } | |
2335 | |
2336 Directory::ParentIdChildIndex::iterator | |
2337 Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock, | |
2338 const Id& parent_id) { | |
2339 // The upper bound of |parent_id|'s range is the lower | |
2340 // bound of |++parent_id|'s range. | |
2341 return GetParentChildIndexLowerBound(lock, | |
2342 parent_id.GetLexicographicSuccessor()); | |
2343 } | |
2344 | |
2345 void Directory::AppendChildHandles(const ScopedKernelLock& lock, | |
2346 const Id& parent_id, | |
2347 Directory::ChildHandles* result) { | |
2348 typedef ParentIdChildIndex::iterator iterator; | |
2349 CHECK(result); | |
2350 for (iterator i = GetParentChildIndexLowerBound(lock, parent_id), | |
2351 end = GetParentChildIndexUpperBound(lock, parent_id); | |
2352 i != end; ++i) { | |
2353 DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID)); | |
2354 result->push_back((*i)->ref(META_HANDLE)); | |
2355 } | |
2356 } | |
2357 | |
2358 EntryKernel* Directory::GetPossibleFirstChild( | |
2359 const ScopedKernelLock& lock, const Id& parent_id) { | |
2360 // We can use the server positional ordering as a hint because it's generally | |
2361 // in sync with the local (linked-list) positional ordering, and we have an | |
2362 // index on it. | |
2363 ParentIdChildIndex::iterator candidate = | |
2364 GetParentChildIndexLowerBound(lock, parent_id); | |
2365 ParentIdChildIndex::iterator end_range = | |
2366 GetParentChildIndexUpperBound(lock, parent_id); | |
2367 for (; candidate != end_range; ++candidate) { | |
2368 EntryKernel* entry = *candidate; | |
2369 // Filter out self-looped items, which are temporarily not in the child | |
2370 // ordering. | |
2371 if (entry->ref(PREV_ID).IsRoot() || | |
2372 entry->ref(PREV_ID) != entry->ref(NEXT_ID)) { | |
2373 return entry; | |
2374 } | |
2375 } | |
2376 // There were no children in the linked list. | |
2377 return NULL; | |
2378 } | |
2379 | |
2380 EntryKernel* Directory::GetPossibleLastChildForTest( | |
2381 const ScopedKernelLock& lock, const Id& parent_id) { | |
2382 // We can use the server positional ordering as a hint because it's generally | |
2383 // in sync with the local (linked-list) positional ordering, and we have an | |
2384 // index on it. | |
2385 ParentIdChildIndex::iterator begin_range = | |
2386 GetParentChildIndexLowerBound(lock, parent_id); | |
2387 ParentIdChildIndex::iterator candidate = | |
2388 GetParentChildIndexUpperBound(lock, parent_id); | |
2389 | |
2390 while (begin_range != candidate) { | |
2391 --candidate; | |
2392 EntryKernel* entry = *candidate; | |
2393 | |
2394 // Filter out self-looped items, which are temporarily not in the child | |
2395 // ordering. | |
2396 if (entry->ref(NEXT_ID).IsRoot() || | |
2397 entry->ref(NEXT_ID) != entry->ref(PREV_ID)) { | |
2398 return entry; | |
2399 } | |
2400 } | |
2401 // There were no children in the linked list. | |
2402 return NULL; | |
2403 } | |
2404 | |
2405 } // namespace syncable | |
OLD | NEW |