Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(119)

Side by Side Diff: chrome/browser/sync/engine/syncer_util.cc

Issue 9699057: [Sync] Move 'sync' target to sync/ (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Address Tim's comments Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « chrome/browser/sync/engine/syncer_util.h ('k') | chrome/browser/sync/engine/syncproto.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "chrome/browser/sync/engine/syncer_util.h"
6
7 #include <algorithm>
8 #include <set>
9 #include <string>
10 #include <vector>
11
12 #include "base/location.h"
13 #include "base/metrics/histogram.h"
14 #include "chrome/browser/sync/engine/conflict_resolver.h"
15 #include "chrome/browser/sync/engine/nigori_util.h"
16 #include "chrome/browser/sync/engine/syncer_proto_util.h"
17 #include "chrome/browser/sync/engine/syncer_types.h"
18 #include "chrome/browser/sync/engine/syncproto.h"
19 #include "chrome/browser/sync/syncable/model_type.h"
20 #include "chrome/browser/sync/syncable/syncable.h"
21 #include "chrome/browser/sync/syncable/syncable_changes_version.h"
22 #include "chrome/browser/sync/util/cryptographer.h"
23 #include "chrome/browser/sync/util/time.h"
24 #include "sync/protocol/bookmark_specifics.pb.h"
25 #include "sync/protocol/nigori_specifics.pb.h"
26 #include "sync/protocol/password_specifics.pb.h"
27 #include "sync/protocol/sync.pb.h"
28
29 using syncable::BASE_VERSION;
30 using syncable::Blob;
31 using syncable::CHANGES_VERSION;
32 using syncable::CREATE;
33 using syncable::CREATE_NEW_UPDATE_ITEM;
34 using syncable::CTIME;
35 using syncable::Directory;
36 using syncable::Entry;
37 using syncable::GetModelTypeFromSpecifics;
38 using syncable::GET_BY_HANDLE;
39 using syncable::GET_BY_ID;
40 using syncable::ID;
41 using syncable::IS_DEL;
42 using syncable::IS_DIR;
43 using syncable::IS_UNAPPLIED_UPDATE;
44 using syncable::IS_UNSYNCED;
45 using syncable::Id;
46 using syncable::IsRealDataType;
47 using syncable::META_HANDLE;
48 using syncable::MTIME;
49 using syncable::MutableEntry;
50 using syncable::NEXT_ID;
51 using syncable::NON_UNIQUE_NAME;
52 using syncable::BASE_SERVER_SPECIFICS;
53 using syncable::PARENT_ID;
54 using syncable::PREV_ID;
55 using syncable::ReadTransaction;
56 using syncable::SERVER_CTIME;
57 using syncable::SERVER_IS_DEL;
58 using syncable::SERVER_IS_DIR;
59 using syncable::SERVER_MTIME;
60 using syncable::SERVER_NON_UNIQUE_NAME;
61 using syncable::SERVER_PARENT_ID;
62 using syncable::SERVER_POSITION_IN_PARENT;
63 using syncable::SERVER_SPECIFICS;
64 using syncable::SERVER_VERSION;
65 using syncable::UNIQUE_CLIENT_TAG;
66 using syncable::UNIQUE_SERVER_TAG;
67 using syncable::SPECIFICS;
68 using syncable::SYNCER;
69 using syncable::WriteTransaction;
70
71 namespace browser_sync {
72
73 // Returns the number of unsynced entries.
74 // static
75 int SyncerUtil::GetUnsyncedEntries(syncable::BaseTransaction* trans,
76 std::vector<int64> *handles) {
77 trans->directory()->GetUnsyncedMetaHandles(trans, handles);
78 DVLOG_IF(1, !handles->empty()) << "Have " << handles->size()
79 << " unsynced items.";
80 return handles->size();
81 }
82
83 // static
84 void SyncerUtil::ChangeEntryIDAndUpdateChildren(
85 syncable::WriteTransaction* trans,
86 syncable::MutableEntry* entry,
87 const syncable::Id& new_id,
88 syncable::Directory::ChildHandles* children) {
89 syncable::Id old_id = entry->Get(ID);
90 if (!entry->Put(ID, new_id)) {
91 Entry old_entry(trans, GET_BY_ID, new_id);
92 CHECK(old_entry.good());
93 LOG(FATAL) << "Attempt to change ID to " << new_id
94 << " conflicts with existing entry.\n\n"
95 << *entry << "\n\n" << old_entry;
96 }
97 if (entry->Get(IS_DIR)) {
98 // Get all child entries of the old id.
99 trans->directory()->GetChildHandlesById(trans, old_id, children);
100 Directory::ChildHandles::iterator i = children->begin();
101 while (i != children->end()) {
102 MutableEntry child_entry(trans, GET_BY_HANDLE, *i++);
103 CHECK(child_entry.good());
104 // Use the unchecked setter here to avoid touching the child's NEXT_ID
105 // and PREV_ID fields (which Put(PARENT_ID) would normally do to
106 // maintain linked-list invariants). In this case, NEXT_ID and PREV_ID
107 // among the children will be valid after the loop, since we update all
108 // the children at once.
109 child_entry.PutParentIdPropertyOnly(new_id);
110 }
111 }
112 // Update Id references on the previous and next nodes in the sibling
113 // order. Do this by reinserting into the linked list; the first
114 // step in PutPredecessor is to Unlink from the existing order, which
115 // will overwrite the stale Id value from the adjacent nodes.
116 if (entry->Get(PREV_ID) == entry->Get(NEXT_ID) &&
117 entry->Get(PREV_ID) == old_id) {
118 // We just need a shallow update to |entry|'s fields since it is already
119 // self looped.
120 entry->Put(NEXT_ID, new_id);
121 entry->Put(PREV_ID, new_id);
122 } else {
123 entry->PutPredecessor(entry->Get(PREV_ID));
124 }
125 }
126
127 // static
128 void SyncerUtil::ChangeEntryIDAndUpdateChildren(
129 syncable::WriteTransaction* trans,
130 syncable::MutableEntry* entry,
131 const syncable::Id& new_id) {
132 syncable::Directory::ChildHandles children;
133 ChangeEntryIDAndUpdateChildren(trans, entry, new_id, &children);
134 }
135
136 // static
137 syncable::Id SyncerUtil::FindLocalIdToUpdate(
138 syncable::BaseTransaction* trans,
139 const SyncEntity& update) {
140 // Expected entry points of this function:
141 // SyncEntity has NOT been applied to SERVER fields.
142 // SyncEntity has NOT been applied to LOCAL fields.
143 // DB has not yet been modified, no entries created for this update.
144
145 const std::string& client_id = trans->directory()->cache_guid();
146
147 if (update.has_client_defined_unique_tag() &&
148 !update.client_defined_unique_tag().empty()) {
149 // When a server sends down a client tag, the following cases can occur:
150 // 1) Client has entry for tag already, ID is server style, matches
151 // 2) Client has entry for tag already, ID is server, doesn't match.
152 // 3) Client has entry for tag already, ID is local, (never matches)
153 // 4) Client has no entry for tag
154
155 // Case 1, we don't have to do anything since the update will
156 // work just fine. Update will end up in the proper entry, via ID lookup.
157 // Case 2 - Happens very rarely due to lax enforcement of client tags
158 // on the server, if two clients commit the same tag at the same time.
159 // When this happens, we pick the lexically-least ID and ignore all other
160 // items.
161 // Case 3 - We need to replace the local ID with the server ID so that
162 // this update gets targeted at the correct local entry; we expect conflict
163 // resolution to occur.
164 // Case 4 - Perfect. Same as case 1.
165
166 syncable::Entry local_entry(trans, syncable::GET_BY_CLIENT_TAG,
167 update.client_defined_unique_tag());
168
169 // The SyncAPI equivalent of this function will return !good if IS_DEL.
170 // The syncable version will return good even if IS_DEL.
171 // TODO(chron): Unit test the case with IS_DEL and make sure.
172 if (local_entry.good()) {
173 if (local_entry.Get(ID).ServerKnows()) {
174 if (local_entry.Get(ID) != update.id()) {
175 // Case 2.
176 LOG(WARNING) << "Duplicated client tag.";
177 if (local_entry.Get(ID) < update.id()) {
178 // Signal an error; drop this update on the floor. Note that
179 // we don't server delete the item, because we don't allow it to
180 // exist locally at all. So the item will remain orphaned on
181 // the server, and we won't pay attention to it.
182 return syncable::GetNullId();
183 }
184 }
185 // Target this change to the existing local entry; later,
186 // we'll change the ID of the local entry to update.id()
187 // if needed.
188 return local_entry.Get(ID);
189 } else {
190 // Case 3: We have a local entry with the same client tag.
191 // We should change the ID of the local entry to the server entry.
192 // This will result in an server ID with base version == 0, but that's
193 // a legal state for an item with a client tag. By changing the ID,
194 // update will now be applied to local_entry.
195 DCHECK(0 == local_entry.Get(BASE_VERSION) ||
196 CHANGES_VERSION == local_entry.Get(BASE_VERSION));
197 return local_entry.Get(ID);
198 }
199 }
200 } else if (update.has_originator_cache_guid() &&
201 update.originator_cache_guid() == client_id) {
202 // If a commit succeeds, but the response does not come back fast enough
203 // then the syncer might assume that it was never committed.
204 // The server will track the client that sent up the original commit and
205 // return this in a get updates response. When this matches a local
206 // uncommitted item, we must mutate our local item and version to pick up
207 // the committed version of the same item whose commit response was lost.
208 // There is however still a race condition if the server has not
209 // completed the commit by the time the syncer tries to get updates
210 // again. To mitigate this, we need to have the server time out in
211 // a reasonable span, our commit batches have to be small enough
212 // to process within our HTTP response "assumed alive" time.
213
214 // We need to check if we have an entry that didn't get its server
215 // id updated correctly. The server sends down a client ID
216 // and a local (negative) id. If we have a entry by that
217 // description, we should update the ID and version to the
218 // server side ones to avoid multiple copies of the same thing.
219
220 syncable::Id client_item_id = syncable::Id::CreateFromClientString(
221 update.originator_client_item_id());
222 DCHECK(!client_item_id.ServerKnows());
223 syncable::Entry local_entry(trans, GET_BY_ID, client_item_id);
224
225 // If it exists, then our local client lost a commit response. Use
226 // the local entry.
227 if (local_entry.good() && !local_entry.Get(IS_DEL)) {
228 int64 old_version = local_entry.Get(BASE_VERSION);
229 int64 new_version = update.version();
230 DCHECK_LE(old_version, 0);
231 DCHECK_GT(new_version, 0);
232 // Otherwise setting the base version could cause a consistency failure.
233 // An entry should never be version 0 and SYNCED.
234 DCHECK(local_entry.Get(IS_UNSYNCED));
235
236 // Just a quick sanity check.
237 DCHECK(!local_entry.Get(ID).ServerKnows());
238
239 DVLOG(1) << "Reuniting lost commit response IDs. server id: "
240 << update.id() << " local id: " << local_entry.Get(ID)
241 << " new version: " << new_version;
242
243 return local_entry.Get(ID);
244 }
245 }
246 // Fallback: target an entry having the server ID, creating one if needed.
247 return update.id();
248 }
249
250 // static
251 UpdateAttemptResponse SyncerUtil::AttemptToUpdateEntry(
252 syncable::WriteTransaction* const trans,
253 syncable::MutableEntry* const entry,
254 ConflictResolver* resolver,
255 Cryptographer* cryptographer) {
256 CHECK(entry->good());
257 if (!entry->Get(IS_UNAPPLIED_UPDATE))
258 return SUCCESS; // No work to do.
259 syncable::Id id = entry->Get(ID);
260 const sync_pb::EntitySpecifics& specifics = entry->Get(SERVER_SPECIFICS);
261
262 // We intercept updates to the Nigori node, update the Cryptographer and
263 // encrypt any unsynced changes here because there is no Nigori
264 // ChangeProcessor. We never put the nigori node in a state of
265 // conflict_encryption.
266 //
267 // We always update the cryptographer with the server's nigori node,
268 // even if we have a locally modified nigori node (we manually merge nigori
269 // data in the conflict resolver in that case). This handles the case where
270 // two clients both set a different passphrase. The second client to attempt
271 // to commit will go into a state of having pending keys, unioned the set of
272 // encrypted types, and eventually re-encrypt everything with the passphrase
273 // of the first client and commit the set of merged encryption keys. Until the
274 // second client provides the pending passphrase, the cryptographer will
275 // preserve the encryption keys based on the local passphrase, while the
276 // nigori node will preserve the server encryption keys.
277 //
278 // If non-encryption changes are made to the nigori node, they will be
279 // lost as part of conflict resolution. This is intended, as we place a higher
280 // priority on preserving the server's passphrase change to preserving local
281 // non-encryption changes. Next time the non-encryption changes are made to
282 // the nigori node (e.g. on restart), they will commit without issue.
283 if (specifics.has_nigori()) {
284 const sync_pb::NigoriSpecifics& nigori = specifics.nigori();
285 cryptographer->Update(nigori);
286
287 // Make sure any unsynced changes are properly encrypted as necessary.
288 // We only perform this if the cryptographer is ready. If not, these are
289 // re-encrypted at SetPassphrase time (via ReEncryptEverything). This logic
290 // covers the case where the nigori updated marked new datatypes for
291 // encryption, but didn't change the passphrase.
292 if (cryptographer->is_ready()) {
293 // Note that we don't bother to encrypt any data for which IS_UNSYNCED
294 // == false here. The machine that turned on encryption should know about
295 // and re-encrypt all synced data. It's possible it could get interrupted
296 // during this process, but we currently reencrypt everything at startup
297 // as well, so as soon as a client is restarted with this datatype marked
298 // for encryption, all the data should be updated as necessary.
299
300 // If this fails, something is wrong with the cryptographer, but there's
301 // nothing we can do about it here.
302 DVLOG(1) << "Received new nigori, encrypting unsynced changes.";
303 syncable::ProcessUnsyncedChangesForEncryption(trans, cryptographer);
304 }
305 }
306
307 // Only apply updates that we can decrypt. If we can't decrypt the update, it
308 // is likely because the passphrase has not arrived yet. Because the
309 // passphrase may not arrive within this GetUpdates, we can't just return
310 // conflict, else we try to perform normal conflict resolution prematurely or
311 // the syncer may get stuck. As such, we return CONFLICT_ENCRYPTION, which is
312 // treated as an unresolvable conflict. See the description in syncer_types.h.
313 // This prevents any unsynced changes from commiting and postpones conflict
314 // resolution until all data can be decrypted.
315 if (specifics.has_encrypted() &&
316 !cryptographer->CanDecrypt(specifics.encrypted())) {
317 // We can't decrypt this node yet.
318 DVLOG(1) << "Received an undecryptable "
319 << syncable::ModelTypeToString(entry->GetServerModelType())
320 << " update, returning encryption_conflict.";
321 return CONFLICT_ENCRYPTION;
322 } else if (specifics.has_password() &&
323 entry->Get(UNIQUE_SERVER_TAG).empty()) {
324 // Passwords use their own legacy encryption scheme.
325 const sync_pb::PasswordSpecifics& password = specifics.password();
326 if (!cryptographer->CanDecrypt(password.encrypted())) {
327 DVLOG(1) << "Received an undecryptable password update, returning "
328 << "encryption_conflict.";
329 return CONFLICT_ENCRYPTION;
330 }
331 }
332
333 if (!entry->Get(SERVER_IS_DEL)) {
334 syncable::Id new_parent = entry->Get(SERVER_PARENT_ID);
335 Entry parent(trans, GET_BY_ID, new_parent);
336 // A note on non-directory parents:
337 // We catch most unfixable tree invariant errors at update receipt time,
338 // however we deal with this case here because we may receive the child
339 // first then the illegal parent. Instead of dealing with it twice in
340 // different ways we deal with it once here to reduce the amount of code and
341 // potential errors.
342 if (!parent.good() || parent.Get(IS_DEL) || !parent.Get(IS_DIR)) {
343 return CONFLICT_HIERARCHY;
344 }
345 if (entry->Get(PARENT_ID) != new_parent) {
346 if (!entry->Get(IS_DEL) && !IsLegalNewParent(trans, id, new_parent)) {
347 DVLOG(1) << "Not updating item " << id
348 << ", illegal new parent (would cause loop).";
349 return CONFLICT_HIERARCHY;
350 }
351 }
352 } else if (entry->Get(IS_DIR)) {
353 Directory::ChildHandles handles;
354 trans->directory()->GetChildHandlesById(trans, id, &handles);
355 if (!handles.empty()) {
356 // If we have still-existing children, then we need to deal with
357 // them before we can process this change.
358 DVLOG(1) << "Not deleting directory; it's not empty " << *entry;
359 return CONFLICT_HIERARCHY;
360 }
361 }
362
363 if (entry->Get(IS_UNSYNCED)) {
364 DVLOG(1) << "Skipping update, returning conflict for: " << id
365 << " ; it's unsynced.";
366 return CONFLICT_SIMPLE;
367 }
368
369 if (specifics.has_encrypted()) {
370 DVLOG(2) << "Received a decryptable "
371 << syncable::ModelTypeToString(entry->GetServerModelType())
372 << " update, applying normally.";
373 } else {
374 DVLOG(2) << "Received an unencrypted "
375 << syncable::ModelTypeToString(entry->GetServerModelType())
376 << " update, applying normally.";
377 }
378
379 SyncerUtil::UpdateLocalDataFromServerData(trans, entry);
380
381 return SUCCESS;
382 }
383
384 namespace {
385 // Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally,
386 // when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based
387 // protocol.
388 void UpdateBookmarkSpecifics(const std::string& singleton_tag,
389 const std::string& url,
390 const std::string& favicon_bytes,
391 MutableEntry* local_entry) {
392 // In the new-style protocol, the server no longer sends bookmark info for
393 // the "google_chrome" folder. Mimic that here.
394 if (singleton_tag == "google_chrome")
395 return;
396 sync_pb::EntitySpecifics pb;
397 sync_pb::BookmarkSpecifics* bookmark = pb.mutable_bookmark();
398 if (!url.empty())
399 bookmark->set_url(url);
400 if (!favicon_bytes.empty())
401 bookmark->set_favicon(favicon_bytes);
402 local_entry->Put(SERVER_SPECIFICS, pb);
403 }
404
405 } // namespace
406
407 // Pass in name and checksum because of UTF8 conversion.
408 // static
409 void SyncerUtil::UpdateServerFieldsFromUpdate(
410 MutableEntry* target,
411 const SyncEntity& update,
412 const std::string& name) {
413 if (update.deleted()) {
414 if (target->Get(SERVER_IS_DEL)) {
415 // If we already think the item is server-deleted, we're done.
416 // Skipping these cases prevents our committed deletions from coming
417 // back and overriding subsequent undeletions. For non-deleted items,
418 // the version number check has a similar effect.
419 return;
420 }
421 // The server returns very lightweight replies for deletions, so we don't
422 // clobber a bunch of fields on delete.
423 target->Put(SERVER_IS_DEL, true);
424 if (!target->Get(UNIQUE_CLIENT_TAG).empty()) {
425 // Items identified by the client unique tag are undeletable; when
426 // they're deleted, they go back to version 0.
427 target->Put(SERVER_VERSION, 0);
428 } else {
429 // Otherwise, fake a server version by bumping the local number.
430 target->Put(SERVER_VERSION,
431 std::max(target->Get(SERVER_VERSION),
432 target->Get(BASE_VERSION)) + 1);
433 }
434 target->Put(IS_UNAPPLIED_UPDATE, true);
435 return;
436 }
437
438 DCHECK(target->Get(ID) == update.id())
439 << "ID Changing not supported here";
440 target->Put(SERVER_PARENT_ID, update.parent_id());
441 target->Put(SERVER_NON_UNIQUE_NAME, name);
442 target->Put(SERVER_VERSION, update.version());
443 target->Put(SERVER_CTIME, ProtoTimeToTime(update.ctime()));
444 target->Put(SERVER_MTIME, ProtoTimeToTime(update.mtime()));
445 target->Put(SERVER_IS_DIR, update.IsFolder());
446 if (update.has_server_defined_unique_tag()) {
447 const std::string& tag = update.server_defined_unique_tag();
448 target->Put(UNIQUE_SERVER_TAG, tag);
449 }
450 if (update.has_client_defined_unique_tag()) {
451 const std::string& tag = update.client_defined_unique_tag();
452 target->Put(UNIQUE_CLIENT_TAG, tag);
453 }
454 // Store the datatype-specific part as a protobuf.
455 if (update.has_specifics()) {
456 DCHECK(update.GetModelType() != syncable::UNSPECIFIED)
457 << "Storing unrecognized datatype in sync database.";
458 target->Put(SERVER_SPECIFICS, update.specifics());
459 } else if (update.has_bookmarkdata()) {
460 // Legacy protocol response for bookmark data.
461 const SyncEntity::BookmarkData& bookmark = update.bookmarkdata();
462 UpdateBookmarkSpecifics(update.server_defined_unique_tag(),
463 bookmark.bookmark_url(),
464 bookmark.bookmark_favicon(),
465 target);
466 }
467 if (update.has_position_in_parent())
468 target->Put(SERVER_POSITION_IN_PARENT, update.position_in_parent());
469
470 target->Put(SERVER_IS_DEL, update.deleted());
471 // We only mark the entry as unapplied if its version is greater than the
472 // local data. If we're processing the update that corresponds to one of our
473 // commit we don't apply it as time differences may occur.
474 if (update.version() > target->Get(BASE_VERSION)) {
475 target->Put(IS_UNAPPLIED_UPDATE, true);
476 }
477 }
478
479 // Creates a new Entry iff no Entry exists with the given id.
480 // static
481 void SyncerUtil::CreateNewEntry(syncable::WriteTransaction *trans,
482 const syncable::Id& id) {
483 syncable::MutableEntry entry(trans, GET_BY_ID, id);
484 if (!entry.good()) {
485 syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM,
486 id);
487 }
488 }
489
490 // static
491 void SyncerUtil::SplitServerInformationIntoNewEntry(
492 syncable::WriteTransaction* trans,
493 syncable::MutableEntry* entry) {
494 syncable::Id id = entry->Get(ID);
495 ChangeEntryIDAndUpdateChildren(trans, entry, trans->directory()->NextId());
496 entry->Put(BASE_VERSION, 0);
497
498 MutableEntry new_entry(trans, CREATE_NEW_UPDATE_ITEM, id);
499 CopyServerFields(entry, &new_entry);
500 ClearServerData(entry);
501
502 DVLOG(1) << "Splitting server information, local entry: " << *entry
503 << " server entry: " << new_entry;
504 }
505
506 // This function is called on an entry when we can update the user-facing data
507 // from the server data.
508 // static
509 void SyncerUtil::UpdateLocalDataFromServerData(
510 syncable::WriteTransaction* trans,
511 syncable::MutableEntry* entry) {
512 DCHECK(!entry->Get(IS_UNSYNCED));
513 DCHECK(entry->Get(IS_UNAPPLIED_UPDATE));
514
515 DVLOG(2) << "Updating entry : " << *entry;
516 // Start by setting the properties that determine the model_type.
517 entry->Put(SPECIFICS, entry->Get(SERVER_SPECIFICS));
518 // Clear the previous server specifics now that we're applying successfully.
519 entry->Put(BASE_SERVER_SPECIFICS, sync_pb::EntitySpecifics());
520 entry->Put(IS_DIR, entry->Get(SERVER_IS_DIR));
521 // This strange dance around the IS_DEL flag avoids problems when setting
522 // the name.
523 // TODO(chron): Is this still an issue? Unit test this codepath.
524 if (entry->Get(SERVER_IS_DEL)) {
525 entry->Put(IS_DEL, true);
526 } else {
527 entry->Put(NON_UNIQUE_NAME, entry->Get(SERVER_NON_UNIQUE_NAME));
528 entry->Put(PARENT_ID, entry->Get(SERVER_PARENT_ID));
529 CHECK(entry->Put(IS_DEL, false));
530 Id new_predecessor =
531 entry->ComputePrevIdFromServerPosition(entry->Get(SERVER_PARENT_ID));
532 CHECK(entry->PutPredecessor(new_predecessor))
533 << " Illegal predecessor after converting from server position.";
534 }
535
536 entry->Put(CTIME, entry->Get(SERVER_CTIME));
537 entry->Put(MTIME, entry->Get(SERVER_MTIME));
538 entry->Put(BASE_VERSION, entry->Get(SERVER_VERSION));
539 entry->Put(IS_DEL, entry->Get(SERVER_IS_DEL));
540 entry->Put(IS_UNAPPLIED_UPDATE, false);
541 }
542
543 // static
544 VerifyCommitResult SyncerUtil::ValidateCommitEntry(
545 syncable::Entry* entry) {
546 syncable::Id id = entry->Get(ID);
547 if (id == entry->Get(PARENT_ID)) {
548 CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry;
549 // If the root becomes unsynced it can cause us problems.
550 LOG(ERROR) << "Root item became unsynced " << *entry;
551 return VERIFY_UNSYNCABLE;
552 }
553 if (entry->IsRoot()) {
554 LOG(ERROR) << "Permanent item became unsynced " << *entry;
555 return VERIFY_UNSYNCABLE;
556 }
557 if (entry->Get(IS_DEL) && !entry->Get(ID).ServerKnows()) {
558 // Drop deleted uncommitted entries.
559 return VERIFY_UNSYNCABLE;
560 }
561 return VERIFY_OK;
562 }
563
564 // static
565 bool SyncerUtil::AddItemThenPredecessors(
566 syncable::BaseTransaction* trans,
567 syncable::Entry* item,
568 syncable::IndexedBitField inclusion_filter,
569 syncable::MetahandleSet* inserted_items,
570 std::vector<syncable::Id>* commit_ids) {
571
572 if (!inserted_items->insert(item->Get(META_HANDLE)).second)
573 return false;
574 commit_ids->push_back(item->Get(ID));
575 if (item->Get(IS_DEL))
576 return true; // Deleted items have no predecessors.
577
578 Id prev_id = item->Get(PREV_ID);
579 while (!prev_id.IsRoot()) {
580 Entry prev(trans, GET_BY_ID, prev_id);
581 CHECK(prev.good()) << "Bad id when walking predecessors.";
582 if (!prev.Get(inclusion_filter))
583 break;
584 if (!inserted_items->insert(prev.Get(META_HANDLE)).second)
585 break;
586 commit_ids->push_back(prev_id);
587 prev_id = prev.Get(PREV_ID);
588 }
589 return true;
590 }
591
592 // static
593 void SyncerUtil::AddPredecessorsThenItem(
594 syncable::BaseTransaction* trans,
595 syncable::Entry* item,
596 syncable::IndexedBitField inclusion_filter,
597 syncable::MetahandleSet* inserted_items,
598 std::vector<syncable::Id>* commit_ids) {
599 size_t initial_size = commit_ids->size();
600 if (!AddItemThenPredecessors(trans, item, inclusion_filter, inserted_items,
601 commit_ids))
602 return;
603 // Reverse what we added to get the correct order.
604 std::reverse(commit_ids->begin() + initial_size, commit_ids->end());
605 }
606
607 // static
608 void SyncerUtil::MarkDeletedChildrenSynced(
609 syncable::Directory* dir,
610 std::set<syncable::Id>* deleted_folders) {
611 // There's two options here.
612 // 1. Scan deleted unsynced entries looking up their pre-delete tree for any
613 // of the deleted folders.
614 // 2. Take each folder and do a tree walk of all entries underneath it.
615 // #2 has a lower big O cost, but writing code to limit the time spent inside
616 // the transaction during each step is simpler with 1. Changing this decision
617 // may be sensible if this code shows up in profiling.
618 if (deleted_folders->empty())
619 return;
620 Directory::UnsyncedMetaHandles handles;
621 {
622 ReadTransaction trans(FROM_HERE, dir);
623 dir->GetUnsyncedMetaHandles(&trans, &handles);
624 }
625 if (handles.empty())
626 return;
627 Directory::UnsyncedMetaHandles::iterator it;
628 for (it = handles.begin() ; it != handles.end() ; ++it) {
629 // Single transaction / entry we deal with.
630 WriteTransaction trans(FROM_HERE, SYNCER, dir);
631 MutableEntry entry(&trans, GET_BY_HANDLE, *it);
632 if (!entry.Get(IS_UNSYNCED) || !entry.Get(IS_DEL))
633 continue;
634 syncable::Id id = entry.Get(PARENT_ID);
635 while (id != trans.root_id()) {
636 if (deleted_folders->find(id) != deleted_folders->end()) {
637 // We've synced the deletion of this deleted entries parent.
638 entry.Put(IS_UNSYNCED, false);
639 break;
640 }
641 Entry parent(&trans, GET_BY_ID, id);
642 if (!parent.good() || !parent.Get(IS_DEL))
643 break;
644 id = parent.Get(PARENT_ID);
645 }
646 }
647 }
648
649 // static
650 VerifyResult SyncerUtil::VerifyNewEntry(
651 const SyncEntity& update,
652 syncable::Entry* target,
653 const bool deleted) {
654 if (target->good()) {
655 // Not a new update.
656 return VERIFY_UNDECIDED;
657 }
658 if (deleted) {
659 // Deletion of an item we've never seen can be ignored.
660 return VERIFY_SKIP;
661 }
662
663 return VERIFY_SUCCESS;
664 }
665
666 // Assumes we have an existing entry; check here for updates that break
667 // consistency rules.
668 // static
669 VerifyResult SyncerUtil::VerifyUpdateConsistency(
670 syncable::WriteTransaction* trans,
671 const SyncEntity& update,
672 syncable::MutableEntry* target,
673 const bool deleted,
674 const bool is_directory,
675 syncable::ModelType model_type) {
676
677 CHECK(target->good());
678
679 // If the update is a delete, we don't really need to worry at this stage.
680 if (deleted)
681 return VERIFY_SUCCESS;
682
683 if (model_type == syncable::UNSPECIFIED) {
684 // This update is to an item of a datatype we don't recognize. The server
685 // shouldn't have sent it to us. Throw it on the ground.
686 return VERIFY_SKIP;
687 }
688
689 if (target->Get(SERVER_VERSION) > 0) {
690 // Then we've had an update for this entry before.
691 if (is_directory != target->Get(SERVER_IS_DIR) ||
692 model_type != target->GetServerModelType()) {
693 if (target->Get(IS_DEL)) { // If we've deleted the item, we don't care.
694 return VERIFY_SKIP;
695 } else {
696 LOG(ERROR) << "Server update doesn't agree with previous updates. ";
697 LOG(ERROR) << " Entry: " << *target;
698 LOG(ERROR) << " Update: "
699 << SyncerProtoUtil::SyncEntityDebugString(update);
700 return VERIFY_FAIL;
701 }
702 }
703
704 if (!deleted && (target->Get(ID) == update.id()) &&
705 (target->Get(SERVER_IS_DEL) ||
706 (!target->Get(IS_UNSYNCED) && target->Get(IS_DEL) &&
707 target->Get(BASE_VERSION) > 0))) {
708 // An undelete. The latter case in the above condition is for
709 // when the server does not give us an update following the
710 // commit of a delete, before undeleting.
711 // Undeletion is common for items that reuse the client-unique tag.
712 VerifyResult result =
713 SyncerUtil::VerifyUndelete(trans, update, target);
714 if (VERIFY_UNDECIDED != result)
715 return result;
716 }
717 }
718 if (target->Get(BASE_VERSION) > 0) {
719 // We've committed this update in the past.
720 if (is_directory != target->Get(IS_DIR) ||
721 model_type != target->GetModelType()) {
722 LOG(ERROR) << "Server update doesn't agree with committed item. ";
723 LOG(ERROR) << " Entry: " << *target;
724 LOG(ERROR) << " Update: "
725 << SyncerProtoUtil::SyncEntityDebugString(update);
726 return VERIFY_FAIL;
727 }
728 if (target->Get(ID) == update.id()) {
729 if (target->Get(SERVER_VERSION) > update.version()) {
730 LOG(WARNING) << "We've already seen a more recent version.";
731 LOG(WARNING) << " Entry: " << *target;
732 LOG(WARNING) << " Update: "
733 << SyncerProtoUtil::SyncEntityDebugString(update);
734 return VERIFY_SKIP;
735 }
736 }
737 }
738 return VERIFY_SUCCESS;
739 }
740
741 // Assumes we have an existing entry; verify an update that seems to be
742 // expressing an 'undelete'
743 // static
744 VerifyResult SyncerUtil::VerifyUndelete(syncable::WriteTransaction* trans,
745 const SyncEntity& update,
746 syncable::MutableEntry* target) {
747 // TODO(nick): We hit this path for items deleted items that the server
748 // tells us to re-create; only deleted items with positive base versions
749 // will hit this path. However, it's not clear how such an undeletion
750 // would actually succeed on the server; in the protocol, a base
751 // version of 0 is required to undelete an object. This codepath
752 // should be deprecated in favor of client-tag style undeletion
753 // (where items go to version 0 when they're deleted), or else
754 // removed entirely (if this type of undeletion is indeed impossible).
755 CHECK(target->good());
756 DVLOG(1) << "Server update is attempting undelete. " << *target
757 << "Update:" << SyncerProtoUtil::SyncEntityDebugString(update);
758 // Move the old one aside and start over. It's too tricky to get the old one
759 // back into a state that would pass CheckTreeInvariants().
760 if (target->Get(IS_DEL)) {
761 DCHECK(target->Get(UNIQUE_CLIENT_TAG).empty())
762 << "Doing move-aside undeletion on client-tagged item.";
763 target->Put(ID, trans->directory()->NextId());
764 target->Put(UNIQUE_CLIENT_TAG, "");
765 target->Put(BASE_VERSION, CHANGES_VERSION);
766 target->Put(SERVER_VERSION, 0);
767 return VERIFY_SUCCESS;
768 }
769 if (update.version() < target->Get(SERVER_VERSION)) {
770 LOG(WARNING) << "Update older than current server version for "
771 << *target << " Update:"
772 << SyncerProtoUtil::SyncEntityDebugString(update);
773 return VERIFY_SUCCESS; // Expected in new sync protocol.
774 }
775 return VERIFY_UNDECIDED;
776 }
777
778 } // namespace browser_sync
OLDNEW
« no previous file with comments | « chrome/browser/sync/engine/syncer_util.h ('k') | chrome/browser/sync/engine/syncproto.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698