Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(162)

Side by Side Diff: components/sync/engine_impl/syncer_util.cc

Issue 2130453004: [Sync] Move //sync to //components/sync. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebase. Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 The Chromium Authors. All rights reserved. 1 // Copyright 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "sync/engine/syncer_util.h" 5 #include "components/sync/engine_impl/syncer_util.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <set> 10 #include <set>
11 #include <string> 11 #include <string>
12 #include <vector> 12 #include <vector>
13 13
14 #include "base/base64.h" 14 #include "base/base64.h"
15 #include "base/location.h" 15 #include "base/location.h"
16 #include "base/metrics/histogram.h" 16 #include "base/metrics/histogram.h"
17 #include "base/rand_util.h" 17 #include "base/rand_util.h"
18 #include "base/strings/string_number_conversions.h" 18 #include "base/strings/string_number_conversions.h"
19 #include "sync/engine/conflict_resolver.h" 19 #include "components/sync/base/attachment_id_proto.h"
20 #include "sync/engine/syncer_proto_util.h" 20 #include "components/sync/base/cryptographer.h"
21 #include "sync/engine/syncer_types.h" 21 #include "components/sync/base/model_type.h"
22 #include "sync/internal_api/public/base/attachment_id_proto.h" 22 #include "components/sync/base/time.h"
23 #include "sync/internal_api/public/base/model_type.h" 23 #include "components/sync/base/unique_position.h"
24 #include "sync/internal_api/public/base/unique_position.h" 24 #include "components/sync/engine_impl/conflict_resolver.h"
25 #include "sync/protocol/bookmark_specifics.pb.h" 25 #include "components/sync/engine_impl/syncer_proto_util.h"
26 #include "sync/protocol/password_specifics.pb.h" 26 #include "components/sync/engine_impl/syncer_types.h"
27 #include "sync/protocol/sync.pb.h" 27 #include "components/sync/protocol/bookmark_specifics.pb.h"
28 #include "sync/syncable/directory.h" 28 #include "components/sync/protocol/password_specifics.pb.h"
29 #include "sync/syncable/entry.h" 29 #include "components/sync/protocol/sync.pb.h"
30 #include "sync/syncable/model_neutral_mutable_entry.h" 30 #include "components/sync/syncable/directory.h"
31 #include "sync/syncable/mutable_entry.h" 31 #include "components/sync/syncable/entry.h"
32 #include "sync/syncable/syncable_changes_version.h" 32 #include "components/sync/syncable/model_neutral_mutable_entry.h"
33 #include "sync/syncable/syncable_model_neutral_write_transaction.h" 33 #include "components/sync/syncable/mutable_entry.h"
34 #include "sync/syncable/syncable_proto_util.h" 34 #include "components/sync/syncable/syncable_changes_version.h"
35 #include "sync/syncable/syncable_read_transaction.h" 35 #include "components/sync/syncable/syncable_model_neutral_write_transaction.h"
36 #include "sync/syncable/syncable_util.h" 36 #include "components/sync/syncable/syncable_proto_util.h"
37 #include "sync/syncable/syncable_write_transaction.h" 37 #include "components/sync/syncable/syncable_read_transaction.h"
38 #include "sync/util/cryptographer.h" 38 #include "components/sync/syncable/syncable_util.h"
39 #include "sync/util/time.h" 39 #include "components/sync/syncable/syncable_write_transaction.h"
40 40
41 namespace syncer { 41 namespace syncer {
42 42
43 using syncable::CHANGES_VERSION; 43 using syncable::CHANGES_VERSION;
44 using syncable::Directory; 44 using syncable::Directory;
45 using syncable::Entry; 45 using syncable::Entry;
46 using syncable::GET_BY_HANDLE; 46 using syncable::GET_BY_HANDLE;
47 using syncable::GET_BY_ID; 47 using syncable::GET_BY_ID;
48 using syncable::ID; 48 using syncable::ID;
49 using syncable::Id; 49 using syncable::Id;
50 50
51 syncable::Id FindLocalIdToUpdate( 51 syncable::Id FindLocalIdToUpdate(syncable::BaseTransaction* trans,
52 syncable::BaseTransaction* trans, 52 const sync_pb::SyncEntity& update) {
53 const sync_pb::SyncEntity& update) {
54 // Expected entry points of this function: 53 // Expected entry points of this function:
55 // SyncEntity has NOT been applied to SERVER fields. 54 // SyncEntity has NOT been applied to SERVER fields.
56 // SyncEntity has NOT been applied to LOCAL fields. 55 // SyncEntity has NOT been applied to LOCAL fields.
57 // DB has not yet been modified, no entries created for this update. 56 // DB has not yet been modified, no entries created for this update.
58 57
59 const std::string& client_id = trans->directory()->cache_guid(); 58 const std::string& client_id = trans->directory()->cache_guid();
60 const syncable::Id& update_id = SyncableIdFromProto(update.id_string()); 59 const syncable::Id& update_id = SyncableIdFromProto(update.id_string());
61 60
62 if (update.has_client_defined_unique_tag() && 61 if (update.has_client_defined_unique_tag() &&
63 !update.client_defined_unique_tag().empty()) { 62 !update.client_defined_unique_tag().empty()) {
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
106 // We should change the ID of the local entry to the server entry. 105 // We should change the ID of the local entry to the server entry.
107 // This will result in an server ID with base version == 0, but that's 106 // This will result in an server ID with base version == 0, but that's
108 // a legal state for an item with a client tag. By changing the ID, 107 // a legal state for an item with a client tag. By changing the ID,
109 // update will now be applied to local_entry. 108 // update will now be applied to local_entry.
110 DCHECK(0 == local_entry.GetBaseVersion() || 109 DCHECK(0 == local_entry.GetBaseVersion() ||
111 CHANGES_VERSION == local_entry.GetBaseVersion()); 110 CHANGES_VERSION == local_entry.GetBaseVersion());
112 return local_entry.GetId(); 111 return local_entry.GetId();
113 } 112 }
114 } 113 }
115 } else if (update.has_originator_cache_guid() && 114 } else if (update.has_originator_cache_guid() &&
116 update.originator_cache_guid() == client_id) { 115 update.originator_cache_guid() == client_id) {
117 // If a commit succeeds, but the response does not come back fast enough 116 // If a commit succeeds, but the response does not come back fast enough
118 // then the syncer might assume that it was never committed. 117 // then the syncer might assume that it was never committed.
119 // The server will track the client that sent up the original commit and 118 // The server will track the client that sent up the original commit and
120 // return this in a get updates response. When this matches a local 119 // return this in a get updates response. When this matches a local
121 // uncommitted item, we must mutate our local item and version to pick up 120 // uncommitted item, we must mutate our local item and version to pick up
122 // the committed version of the same item whose commit response was lost. 121 // the committed version of the same item whose commit response was lost.
123 // There is however still a race condition if the server has not 122 // There is however still a race condition if the server has not
124 // completed the commit by the time the syncer tries to get updates 123 // completed the commit by the time the syncer tries to get updates
125 // again. To mitigate this, we need to have the server time out in 124 // again. To mitigate this, we need to have the server time out in
126 // a reasonable span, our commit batches have to be small enough 125 // a reasonable span, our commit batches have to be small enough
(...skipping 17 matching lines...) Expand all
144 int64_t new_version = update.version(); 143 int64_t new_version = update.version();
145 DCHECK_LE(old_version, 0); 144 DCHECK_LE(old_version, 0);
146 DCHECK_GT(new_version, 0); 145 DCHECK_GT(new_version, 0);
147 // Otherwise setting the base version could cause a consistency failure. 146 // Otherwise setting the base version could cause a consistency failure.
148 // An entry should never be version 0 and SYNCED. 147 // An entry should never be version 0 and SYNCED.
149 DCHECK(local_entry.GetIsUnsynced()); 148 DCHECK(local_entry.GetIsUnsynced());
150 149
151 // Just a quick sanity check. 150 // Just a quick sanity check.
152 DCHECK(!local_entry.GetId().ServerKnows()); 151 DCHECK(!local_entry.GetId().ServerKnows());
153 152
154 DVLOG(1) << "Reuniting lost commit response IDs. server id: " 153 DVLOG(1) << "Reuniting lost commit response IDs. server id: " << update_id
155 << update_id << " local id: " << local_entry.GetId() 154 << " local id: " << local_entry.GetId()
156 << " new version: " << new_version; 155 << " new version: " << new_version;
157 156
158 return local_entry.GetId(); 157 return local_entry.GetId();
159 } 158 }
160 } else if (update.has_server_defined_unique_tag() && 159 } else if (update.has_server_defined_unique_tag() &&
161 !update.server_defined_unique_tag().empty()) { 160 !update.server_defined_unique_tag().empty()) {
162 // The client creates type root folders with a local ID on demand when a 161 // The client creates type root folders with a local ID on demand when a
163 // progress marker for the given type is initially set. 162 // progress marker for the given type is initially set.
164 // The server might also attempt to send a type root folder for the same 163 // The server might also attempt to send a type root folder for the same
165 // type (during the transition period until support for root folders is 164 // type (during the transition period until support for root folders is
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
197 // treated as an unresolvable conflict. See the description in syncer_types.h. 196 // treated as an unresolvable conflict. See the description in syncer_types.h.
198 // This prevents any unsynced changes from commiting and postpones conflict 197 // This prevents any unsynced changes from commiting and postpones conflict
199 // resolution until all data can be decrypted. 198 // resolution until all data can be decrypted.
200 if (specifics.has_encrypted() && 199 if (specifics.has_encrypted() &&
201 !cryptographer->CanDecrypt(specifics.encrypted())) { 200 !cryptographer->CanDecrypt(specifics.encrypted())) {
202 // We can't decrypt this node yet. 201 // We can't decrypt this node yet.
203 DVLOG(1) << "Received an undecryptable " 202 DVLOG(1) << "Received an undecryptable "
204 << ModelTypeToString(entry->GetServerModelType()) 203 << ModelTypeToString(entry->GetServerModelType())
205 << " update, returning conflict_encryption."; 204 << " update, returning conflict_encryption.";
206 return CONFLICT_ENCRYPTION; 205 return CONFLICT_ENCRYPTION;
207 } else if (specifics.has_password() && 206 } else if (specifics.has_password() && entry->GetUniqueServerTag().empty()) {
208 entry->GetUniqueServerTag().empty()) {
209 // Passwords use their own legacy encryption scheme. 207 // Passwords use their own legacy encryption scheme.
210 const sync_pb::PasswordSpecifics& password = specifics.password(); 208 const sync_pb::PasswordSpecifics& password = specifics.password();
211 if (!cryptographer->CanDecrypt(password.encrypted())) { 209 if (!cryptographer->CanDecrypt(password.encrypted())) {
212 DVLOG(1) << "Received an undecryptable password update, returning " 210 DVLOG(1) << "Received an undecryptable password update, returning "
213 << "conflict_encryption."; 211 << "conflict_encryption.";
214 return CONFLICT_ENCRYPTION; 212 return CONFLICT_ENCRYPTION;
215 } 213 }
216 } 214 }
217 215
218 if (!entry->GetServerIsDel()) { 216 if (!entry->GetServerIsDel()) {
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
351 // Update our position. 349 // Update our position.
352 UniquePosition update_pos = 350 UniquePosition update_pos =
353 GetUpdatePosition(update, local_entry->GetUniqueBookmarkTag()); 351 GetUpdatePosition(update, local_entry->GetUniqueBookmarkTag());
354 if (update_pos.IsValid()) { 352 if (update_pos.IsValid()) {
355 local_entry->PutServerUniquePosition(update_pos); 353 local_entry->PutServerUniquePosition(update_pos);
356 } 354 }
357 } 355 }
358 356
359 } // namespace 357 } // namespace
360 358
361 void UpdateServerFieldsFromUpdate( 359 void UpdateServerFieldsFromUpdate(syncable::ModelNeutralMutableEntry* target,
362 syncable::ModelNeutralMutableEntry* target, 360 const sync_pb::SyncEntity& update,
363 const sync_pb::SyncEntity& update, 361 const std::string& name) {
364 const std::string& name) {
365 if (update.deleted()) { 362 if (update.deleted()) {
366 if (target->GetServerIsDel()) { 363 if (target->GetServerIsDel()) {
367 // If we already think the item is server-deleted, we're done. 364 // If we already think the item is server-deleted, we're done.
368 // Skipping these cases prevents our committed deletions from coming 365 // Skipping these cases prevents our committed deletions from coming
369 // back and overriding subsequent undeletions. For non-deleted items, 366 // back and overriding subsequent undeletions. For non-deleted items,
370 // the version number check has a similar effect. 367 // the version number check has a similar effect.
371 return; 368 return;
372 } 369 }
373 // Mark entry as unapplied update first to ensure journaling the deletion. 370 // Mark entry as unapplied update first to ensure journaling the deletion.
374 target->PutIsUnappliedUpdate(true); 371 target->PutIsUnappliedUpdate(true);
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
410 // Store the datatype-specific part as a protobuf. 407 // Store the datatype-specific part as a protobuf.
411 if (update.has_specifics()) { 408 if (update.has_specifics()) {
412 DCHECK_NE(GetModelType(update), UNSPECIFIED) 409 DCHECK_NE(GetModelType(update), UNSPECIFIED)
413 << "Storing unrecognized datatype in sync database."; 410 << "Storing unrecognized datatype in sync database.";
414 target->PutServerSpecifics(update.specifics()); 411 target->PutServerSpecifics(update.specifics());
415 } else if (update.has_bookmarkdata()) { 412 } else if (update.has_bookmarkdata()) {
416 // Legacy protocol response for bookmark data. 413 // Legacy protocol response for bookmark data.
417 const sync_pb::SyncEntity::BookmarkData& bookmark = update.bookmarkdata(); 414 const sync_pb::SyncEntity::BookmarkData& bookmark = update.bookmarkdata();
418 UpdateBookmarkSpecifics(update.server_defined_unique_tag(), 415 UpdateBookmarkSpecifics(update.server_defined_unique_tag(),
419 bookmark.bookmark_url(), 416 bookmark.bookmark_url(),
420 bookmark.bookmark_favicon(), 417 bookmark.bookmark_favicon(), target);
421 target);
422 } 418 }
423 target->PutServerAttachmentMetadata( 419 target->PutServerAttachmentMetadata(
424 CreateAttachmentMetadata(update.attachment_id())); 420 CreateAttachmentMetadata(update.attachment_id()));
425 if (SyncerProtoUtil::ShouldMaintainPosition(update)) { 421 if (SyncerProtoUtil::ShouldMaintainPosition(update)) {
426 UpdateBookmarkPositioning(update, target); 422 UpdateBookmarkPositioning(update, target);
427 } 423 }
428 424
429 // We only mark the entry as unapplied if its version is greater than the 425 // We only mark the entry as unapplied if its version is greater than the
430 // local data. If we're processing the update that corresponds to one of our 426 // local data. If we're processing the update that corresponds to one of our
431 // commit we don't apply it as time differences may occur. 427 // commit we don't apply it as time differences may occur.
432 if (update.version() > target->GetBaseVersion()) { 428 if (update.version() > target->GetBaseVersion()) {
433 target->PutIsUnappliedUpdate(true); 429 target->PutIsUnappliedUpdate(true);
434 } 430 }
435 DCHECK(!update.deleted()); 431 DCHECK(!update.deleted());
436 target->PutServerIsDel(false); 432 target->PutServerIsDel(false);
437 } 433 }
438 434
439 // Creates a new Entry iff no Entry exists with the given id. 435 // Creates a new Entry iff no Entry exists with the given id.
440 void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans, 436 void CreateNewEntry(syncable::ModelNeutralWriteTransaction* trans,
441 const syncable::Id& id) { 437 const syncable::Id& id) {
442 syncable::Entry entry(trans, GET_BY_ID, id); 438 syncable::Entry entry(trans, GET_BY_ID, id);
443 if (!entry.good()) { 439 if (!entry.good()) {
444 syncable::ModelNeutralMutableEntry new_entry( 440 syncable::ModelNeutralMutableEntry new_entry(
445 trans, 441 trans, syncable::CREATE_NEW_UPDATE_ITEM, id);
446 syncable::CREATE_NEW_UPDATE_ITEM,
447 id);
448 } 442 }
449 } 443 }
450 444
451 // This function is called on an entry when we can update the user-facing data 445 // This function is called on an entry when we can update the user-facing data
452 // from the server data. 446 // from the server data.
453 void UpdateLocalDataFromServerData( 447 void UpdateLocalDataFromServerData(syncable::WriteTransaction* trans,
454 syncable::WriteTransaction* trans, 448 syncable::MutableEntry* entry) {
455 syncable::MutableEntry* entry) {
456 DCHECK(!entry->GetIsUnsynced()); 449 DCHECK(!entry->GetIsUnsynced());
457 DCHECK(entry->GetIsUnappliedUpdate()); 450 DCHECK(entry->GetIsUnappliedUpdate());
458 451
459 DVLOG(2) << "Updating entry : " << *entry; 452 DVLOG(2) << "Updating entry : " << *entry;
460 // Start by setting the properties that determine the model_type. 453 // Start by setting the properties that determine the model_type.
461 entry->PutSpecifics(entry->GetServerSpecifics()); 454 entry->PutSpecifics(entry->GetServerSpecifics());
462 // Clear the previous server specifics now that we're applying successfully. 455 // Clear the previous server specifics now that we're applying successfully.
463 entry->PutBaseServerSpecifics(sync_pb::EntitySpecifics()); 456 entry->PutBaseServerSpecifics(sync_pb::EntitySpecifics());
464 entry->PutIsDir(entry->GetServerIsDir()); 457 entry->PutIsDir(entry->GetServerIsDir());
465 // This strange dance around the IS_DEL flag avoids problems when setting 458 // This strange dance around the IS_DEL flag avoids problems when setting
(...skipping 28 matching lines...) Expand all
494 LOG(ERROR) << "Permanent item became unsynced " << *entry; 487 LOG(ERROR) << "Permanent item became unsynced " << *entry;
495 return VERIFY_UNSYNCABLE; 488 return VERIFY_UNSYNCABLE;
496 } 489 }
497 if (entry->GetIsDel() && !entry->GetId().ServerKnows()) { 490 if (entry->GetIsDel() && !entry->GetId().ServerKnows()) {
498 // Drop deleted uncommitted entries. 491 // Drop deleted uncommitted entries.
499 return VERIFY_UNSYNCABLE; 492 return VERIFY_UNSYNCABLE;
500 } 493 }
501 return VERIFY_OK; 494 return VERIFY_OK;
502 } 495 }
503 496
504 void MarkDeletedChildrenSynced( 497 void MarkDeletedChildrenSynced(syncable::Directory* dir,
505 syncable::Directory* dir, 498 syncable::BaseWriteTransaction* trans,
506 syncable::BaseWriteTransaction* trans, 499 std::set<syncable::Id>* deleted_folders) {
507 std::set<syncable::Id>* deleted_folders) {
508 // There's two options here. 500 // There's two options here.
509 // 1. Scan deleted unsynced entries looking up their pre-delete tree for any 501 // 1. Scan deleted unsynced entries looking up their pre-delete tree for any
510 // of the deleted folders. 502 // of the deleted folders.
511 // 2. Take each folder and do a tree walk of all entries underneath it. 503 // 2. Take each folder and do a tree walk of all entries underneath it.
512 // #2 has a lower big O cost, but writing code to limit the time spent inside 504 // #2 has a lower big O cost, but writing code to limit the time spent inside
513 // the transaction during each step is simpler with 1. Changing this decision 505 // the transaction during each step is simpler with 1. Changing this decision
514 // may be sensible if this code shows up in profiling. 506 // may be sensible if this code shows up in profiling.
515 if (deleted_folders->empty()) 507 if (deleted_folders->empty())
516 return; 508 return;
517 Directory::Metahandles handles; 509 Directory::Metahandles handles;
518 dir->GetUnsyncedMetaHandles(trans, &handles); 510 dir->GetUnsyncedMetaHandles(trans, &handles);
519 if (handles.empty()) 511 if (handles.empty())
520 return; 512 return;
521 Directory::Metahandles::iterator it; 513 Directory::Metahandles::iterator it;
522 for (it = handles.begin() ; it != handles.end() ; ++it) { 514 for (it = handles.begin(); it != handles.end(); ++it) {
523 syncable::ModelNeutralMutableEntry entry(trans, GET_BY_HANDLE, *it); 515 syncable::ModelNeutralMutableEntry entry(trans, GET_BY_HANDLE, *it);
524 if (!entry.GetIsUnsynced() || !entry.GetIsDel()) 516 if (!entry.GetIsUnsynced() || !entry.GetIsDel())
525 continue; 517 continue;
526 syncable::Id id = entry.GetParentId(); 518 syncable::Id id = entry.GetParentId();
527 while (id != trans->root_id()) { 519 while (id != trans->root_id()) {
528 if (deleted_folders->find(id) != deleted_folders->end()) { 520 if (deleted_folders->find(id) != deleted_folders->end()) {
529 // We've synced the deletion of this deleted entries parent. 521 // We've synced the deletion of this deleted entries parent.
530 entry.PutIsUnsynced(false); 522 entry.PutIsUnsynced(false);
531 break; 523 break;
532 } 524 }
533 Entry parent(trans, GET_BY_ID, id); 525 Entry parent(trans, GET_BY_ID, id);
534 if (!parent.good() || !parent.GetIsDel()) 526 if (!parent.good() || !parent.GetIsDel())
535 break; 527 break;
536 id = parent.GetParentId(); 528 id = parent.GetParentId();
537 } 529 }
538 } 530 }
539 } 531 }
540 532
541 VerifyResult VerifyNewEntry( 533 VerifyResult VerifyNewEntry(const sync_pb::SyncEntity& update,
542 const sync_pb::SyncEntity& update, 534 syncable::Entry* target,
543 syncable::Entry* target, 535 const bool deleted) {
544 const bool deleted) {
545 if (target->good()) { 536 if (target->good()) {
546 // Not a new update. 537 // Not a new update.
547 return VERIFY_UNDECIDED; 538 return VERIFY_UNDECIDED;
548 } 539 }
549 if (deleted) { 540 if (deleted) {
550 // Deletion of an item we've never seen can be ignored. 541 // Deletion of an item we've never seen can be ignored.
551 return VERIFY_SKIP; 542 return VERIFY_SKIP;
552 } 543 }
553 544
554 return VERIFY_SUCCESS; 545 return VERIFY_SUCCESS;
555 } 546 }
556 547
557 // Assumes we have an existing entry; check here for updates that break 548 // Assumes we have an existing entry; check here for updates that break
558 // consistency rules. 549 // consistency rules.
559 VerifyResult VerifyUpdateConsistency( 550 VerifyResult VerifyUpdateConsistency(
560 syncable::ModelNeutralWriteTransaction* trans, 551 syncable::ModelNeutralWriteTransaction* trans,
561 const sync_pb::SyncEntity& update, 552 const sync_pb::SyncEntity& update,
562 const bool deleted, 553 const bool deleted,
563 const bool is_directory, 554 const bool is_directory,
564 ModelType model_type, 555 ModelType model_type,
565 syncable::ModelNeutralMutableEntry* target) { 556 syncable::ModelNeutralMutableEntry* target) {
566
567 CHECK(target->good()); 557 CHECK(target->good());
568 const syncable::Id& update_id = SyncableIdFromProto(update.id_string()); 558 const syncable::Id& update_id = SyncableIdFromProto(update.id_string());
569 559
570 // If the update is a delete, we don't really need to worry at this stage. 560 // If the update is a delete, we don't really need to worry at this stage.
571 if (deleted) 561 if (deleted)
572 return VERIFY_SUCCESS; 562 return VERIFY_SUCCESS;
573 563
574 if (model_type == UNSPECIFIED) { 564 if (model_type == UNSPECIFIED) {
575 // This update is to an item of a datatype we don't recognize. The server 565 // This update is to an item of a datatype we don't recognize. The server
576 // shouldn't have sent it to us. Throw it on the ground. 566 // shouldn't have sent it to us. Throw it on the ground.
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
649 if (target->GetIsDel()) { 639 if (target->GetIsDel()) {
650 if (target->GetUniqueClientTag().empty()) 640 if (target->GetUniqueClientTag().empty())
651 LOG(WARNING) << "Doing move-aside undeletion on client-tagged item."; 641 LOG(WARNING) << "Doing move-aside undeletion on client-tagged item.";
652 target->PutId(trans->directory()->NextId()); 642 target->PutId(trans->directory()->NextId());
653 target->PutUniqueClientTag(std::string()); 643 target->PutUniqueClientTag(std::string());
654 target->PutBaseVersion(CHANGES_VERSION); 644 target->PutBaseVersion(CHANGES_VERSION);
655 target->PutServerVersion(0); 645 target->PutServerVersion(0);
656 return VERIFY_SUCCESS; 646 return VERIFY_SUCCESS;
657 } 647 }
658 if (update.version() < target->GetServerVersion()) { 648 if (update.version() < target->GetServerVersion()) {
659 LOG(WARNING) << "Update older than current server version for " 649 LOG(WARNING) << "Update older than current server version for " << *target
660 << *target << " Update:" 650 << " Update:"
661 << SyncerProtoUtil::SyncEntityDebugString(update); 651 << SyncerProtoUtil::SyncEntityDebugString(update);
662 return VERIFY_SUCCESS; // Expected in new sync protocol. 652 return VERIFY_SUCCESS; // Expected in new sync protocol.
663 } 653 }
664 return VERIFY_UNDECIDED; 654 return VERIFY_UNDECIDED;
665 } 655 }
666 656
667 } // namespace syncer 657 } // namespace syncer
OLDNEW
« no previous file with comments | « components/sync/engine_impl/syncer_util.h ('k') | components/sync/engine_impl/syncer_util_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698