OLD | NEW |
1 // Copyright 2012 The Chromium Authors. All rights reserved. | 1 // Copyright 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sync/syncable/mutable_entry.h" | 5 #include "sync/syncable/mutable_entry.h" |
6 | 6 |
7 #include "base/memory/scoped_ptr.h" | 7 #include "base/memory/scoped_ptr.h" |
8 #include "sync/internal_api/public/base/unique_position.h" | 8 #include "sync/internal_api/public/base/unique_position.h" |
9 #include "sync/syncable/directory.h" | 9 #include "sync/syncable/directory.h" |
10 #include "sync/syncable/scoped_kernel_lock.h" | 10 #include "sync/syncable/scoped_kernel_lock.h" |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
114 const std::string& tag) | 114 const std::string& tag) |
115 : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) { | 115 : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) { |
116 } | 116 } |
117 | 117 |
118 MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag, | 118 MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag, |
119 const string& tag) | 119 const string& tag) |
120 : Entry(trans, GET_BY_SERVER_TAG, tag), write_transaction_(trans) { | 120 : Entry(trans, GET_BY_SERVER_TAG, tag), write_transaction_(trans) { |
121 } | 121 } |
122 | 122 |
123 void MutableEntry::PutBaseVersion(int64 value) { | 123 void MutableEntry::PutBaseVersion(int64 value) { |
124 Put(BASE_VERSION, value); | 124 DCHECK(kernel_); |
| 125 write_transaction_->SaveOriginal(kernel_); |
| 126 if (kernel_->ref(BASE_VERSION) != value) { |
| 127 kernel_->put(BASE_VERSION, value); |
| 128 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 129 } |
125 } | 130 } |
126 | 131 |
127 void MutableEntry::PutServerVersion(int64 value) { | 132 void MutableEntry::PutServerVersion(int64 value) { |
128 Put(SERVER_VERSION, value); | 133 DCHECK(kernel_); |
| 134 write_transaction_->SaveOriginal(kernel_); |
| 135 if (kernel_->ref(SERVER_VERSION) != value) { |
| 136 ScopedKernelLock lock(dir()); |
| 137 kernel_->put(SERVER_VERSION, value); |
| 138 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 139 } |
129 } | 140 } |
130 | 141 |
131 void MutableEntry::PutLocalExternalId(int64 value) { | 142 void MutableEntry::PutLocalExternalId(int64 value) { |
132 Put(LOCAL_EXTERNAL_ID, value); | 143 DCHECK(kernel_); |
| 144 write_transaction_->SaveOriginal(kernel_); |
| 145 if (kernel_->ref(LOCAL_EXTERNAL_ID) != value) { |
| 146 ScopedKernelLock lock(dir()); |
| 147 kernel_->put(LOCAL_EXTERNAL_ID, value); |
| 148 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 149 } |
133 } | 150 } |
134 | 151 |
135 void MutableEntry::PutMtime(base::Time value) { | 152 void MutableEntry::PutMtime(base::Time value) { |
136 Put(MTIME, value); | 153 DCHECK(kernel_); |
| 154 write_transaction_->SaveOriginal(kernel_); |
| 155 if (kernel_->ref(MTIME) != value) { |
| 156 kernel_->put(MTIME, value); |
| 157 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 158 } |
137 } | 159 } |
138 | 160 |
139 void MutableEntry::PutServerMtime(base::Time value) { | 161 void MutableEntry::PutServerMtime(base::Time value) { |
140 Put(SERVER_MTIME, value); | 162 DCHECK(kernel_); |
| 163 write_transaction_->SaveOriginal(kernel_); |
| 164 if (kernel_->ref(SERVER_MTIME) != value) { |
| 165 kernel_->put(SERVER_MTIME, value); |
| 166 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 167 } |
141 } | 168 } |
142 | 169 |
143 void MutableEntry::PutCtime(base::Time value) { | 170 void MutableEntry::PutCtime(base::Time value) { |
144 Put(CTIME, value); | 171 DCHECK(kernel_); |
| 172 write_transaction_->SaveOriginal(kernel_); |
| 173 if (kernel_->ref(CTIME) != value) { |
| 174 kernel_->put(CTIME, value); |
| 175 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 176 } |
145 } | 177 } |
146 | 178 |
147 void MutableEntry::PutServerCtime(base::Time value) { | 179 void MutableEntry::PutServerCtime(base::Time value) { |
148 Put(SERVER_CTIME, value); | 180 DCHECK(kernel_); |
| 181 write_transaction_->SaveOriginal(kernel_); |
| 182 if (kernel_->ref(SERVER_CTIME) != value) { |
| 183 kernel_->put(SERVER_CTIME, value); |
| 184 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 185 } |
149 } | 186 } |
150 | 187 |
151 bool MutableEntry::PutId(const Id& value) { | 188 bool MutableEntry::PutId(const Id& value) { |
152 return Put(ID, value); | 189 DCHECK(kernel_); |
| 190 write_transaction_->SaveOriginal(kernel_); |
| 191 if (kernel_->ref(ID) != value) { |
| 192 if (!dir()->ReindexId(write_transaction(), kernel_, value)) |
| 193 return false; |
| 194 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 195 } |
| 196 return true; |
153 } | 197 } |
154 | 198 |
155 void MutableEntry::PutParentId(const Id& value) { | 199 void MutableEntry::PutParentId(const Id& value) { |
156 Put(PARENT_ID, value); | 200 DCHECK(kernel_); |
| 201 write_transaction_->SaveOriginal(kernel_); |
| 202 if (kernel_->ref(PARENT_ID) != value) { |
| 203 PutParentIdPropertyOnly(value); |
| 204 if (!GetIsDel()) { |
| 205 if (!PutPredecessor(Id())) { |
| 206 // TODO(lipalani) : Propagate the error to caller. crbug.com/100444. |
| 207 NOTREACHED(); |
| 208 } |
| 209 } |
| 210 } |
157 } | 211 } |
158 | 212 |
159 void MutableEntry::PutServerParentId(const Id& value) { | 213 void MutableEntry::PutServerParentId(const Id& value) { |
160 Put(SERVER_PARENT_ID, value); | 214 DCHECK(kernel_); |
| 215 write_transaction_->SaveOriginal(kernel_); |
| 216 |
| 217 if (kernel_->ref(SERVER_PARENT_ID) != value) { |
| 218 kernel_->put(SERVER_PARENT_ID, value); |
| 219 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 220 } |
161 } | 221 } |
162 | 222 |
163 bool MutableEntry::PutIsUnsynced(bool value) { | 223 bool MutableEntry::PutIsUnsynced(bool value) { |
164 return Put(IS_UNSYNCED, value); | 224 DCHECK(kernel_); |
| 225 write_transaction_->SaveOriginal(kernel_); |
| 226 if (kernel_->ref(IS_UNSYNCED) != value) { |
| 227 MetahandleSet* index = &dir()->kernel_->unsynced_metahandles; |
| 228 |
| 229 ScopedKernelLock lock(dir()); |
| 230 if (value) { |
| 231 if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second, |
| 232 FROM_HERE, |
| 233 "Could not insert", |
| 234 write_transaction())) { |
| 235 return false; |
| 236 } |
| 237 } else { |
| 238 if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)), |
| 239 FROM_HERE, |
| 240 "Entry Not succesfully erased", |
| 241 write_transaction())) { |
| 242 return false; |
| 243 } |
| 244 } |
| 245 kernel_->put(IS_UNSYNCED, value); |
| 246 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 247 } |
| 248 return true; |
165 } | 249 } |
166 | 250 |
167 bool MutableEntry::PutIsUnappliedUpdate(bool value) { | 251 bool MutableEntry::PutIsUnappliedUpdate(bool value) { |
168 return Put(IS_UNAPPLIED_UPDATE, value); | 252 DCHECK(kernel_); |
| 253 write_transaction_->SaveOriginal(kernel_); |
| 254 if (kernel_->ref(IS_UNAPPLIED_UPDATE) != value) { |
| 255 // Use kernel_->GetServerModelType() instead of |
| 256 // GetServerModelType() as we may trigger some DCHECKs in the |
| 257 // latter. |
| 258 MetahandleSet* index = &dir()->kernel_->unapplied_update_metahandles[ |
| 259 kernel_->GetServerModelType()]; |
| 260 |
| 261 ScopedKernelLock lock(dir()); |
| 262 if (value) { |
| 263 if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second, |
| 264 FROM_HERE, |
| 265 "Could not insert", |
| 266 write_transaction())) { |
| 267 return false; |
| 268 } |
| 269 } else { |
| 270 if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)), |
| 271 FROM_HERE, |
| 272 "Entry Not succesfully erased", |
| 273 write_transaction())) { |
| 274 return false; |
| 275 } |
| 276 } |
| 277 kernel_->put(IS_UNAPPLIED_UPDATE, value); |
| 278 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 279 } |
| 280 return true; |
169 } | 281 } |
170 | 282 |
171 void MutableEntry::PutIsDir(bool value) { | 283 void MutableEntry::PutIsDir(bool value) { |
172 Put(IS_DIR, value); | 284 DCHECK(kernel_); |
| 285 write_transaction_->SaveOriginal(kernel_); |
| 286 bool old_value = kernel_->ref(IS_DIR); |
| 287 if (old_value != value) { |
| 288 kernel_->put(IS_DIR, value); |
| 289 kernel_->mark_dirty(GetDirtyIndexHelper()); |
| 290 } |
173 } | 291 } |
174 | 292 |
175 void MutableEntry::PutServerIsDir(bool value) { | 293 void MutableEntry::PutServerIsDir(bool value) { |
176 Put(SERVER_IS_DIR, value); | 294 DCHECK(kernel_); |
| 295 write_transaction_->SaveOriginal(kernel_); |
| 296 bool old_value = kernel_->ref(SERVER_IS_DIR); |
| 297 if (old_value != value) { |
| 298 kernel_->put(SERVER_IS_DIR, value); |
| 299 kernel_->mark_dirty(GetDirtyIndexHelper()); |
| 300 } |
177 } | 301 } |
178 | 302 |
179 void MutableEntry::PutServerIsDel(bool value) { | 303 void MutableEntry::PutIsDel(bool value) { |
180 Put(SERVER_IS_DEL, value); | |
181 } | |
182 | |
183 void MutableEntry::PutNonUniqueName(const std::string& value) { | |
184 Put(NON_UNIQUE_NAME, value); | |
185 } | |
186 | |
187 void MutableEntry::PutServerNonUniqueName(const std::string& value) { | |
188 Put(SERVER_NON_UNIQUE_NAME, value); | |
189 } | |
190 | |
191 void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) { | |
192 Put(SPECIFICS, value); | |
193 } | |
194 | |
195 void MutableEntry::PutServerSpecifics(const sync_pb::EntitySpecifics& value) { | |
196 Put(SERVER_SPECIFICS, value); | |
197 } | |
198 | |
199 void MutableEntry::PutBaseServerSpecifics( | |
200 const sync_pb::EntitySpecifics& value) { | |
201 Put(BASE_SERVER_SPECIFICS, value); | |
202 } | |
203 | |
204 void MutableEntry::PutUniquePosition(const UniquePosition& value) { | |
205 Put(UNIQUE_POSITION, value); | |
206 } | |
207 | |
208 void MutableEntry::PutServerUniquePosition(const UniquePosition& value) { | |
209 Put(SERVER_UNIQUE_POSITION, value); | |
210 } | |
211 | |
212 void MutableEntry::PutSyncing(bool value) { | |
213 Put(SYNCING, value); | |
214 } | |
215 | |
216 bool MutableEntry::PutIsDel(bool is_del) { | |
217 DCHECK(kernel_); | 304 DCHECK(kernel_); |
218 write_transaction_->SaveOriginal(kernel_); | 305 write_transaction_->SaveOriginal(kernel_); |
219 if (is_del == kernel_->ref(IS_DEL)) { | 306 if (value == kernel_->ref(IS_DEL)) { |
220 return true; | 307 return; |
221 } | 308 } |
222 if (is_del) { | 309 if (value) { |
223 // If the server never knew about this item and it's deleted then we don't | 310 // If the server never knew about this item and it's deleted then we don't |
224 // need to keep it around. Unsetting IS_UNSYNCED will: | 311 // need to keep it around. Unsetting IS_UNSYNCED will: |
225 // - Ensure that the item is never committed to the server. | 312 // - Ensure that the item is never committed to the server. |
226 // - Allow any items with the same UNIQUE_CLIENT_TAG created on other | 313 // - Allow any items with the same UNIQUE_CLIENT_TAG created on other |
227 // clients to override this entry. | 314 // clients to override this entry. |
228 // - Let us delete this entry permanently through | 315 // - Let us delete this entry permanently through |
229 // DirectoryBackingStore::DropDeletedEntries() when we next restart sync. | 316 // DirectoryBackingStore::DropDeletedEntries() when we next restart sync. |
230 // This will save memory and avoid crbug.com/125381. | 317 // This will save memory and avoid crbug.com/125381. |
231 if (!GetId().ServerKnows()) { | 318 if (!GetId().ServerKnows()) { |
232 Put(IS_UNSYNCED, false); | 319 PutIsUnsynced(false); |
233 } | 320 } |
234 } | 321 } |
235 | 322 |
236 { | 323 { |
237 ScopedKernelLock lock(dir()); | 324 ScopedKernelLock lock(dir()); |
238 // Some indices don't include deleted items and must be updated | 325 // Some indices don't include deleted items and must be updated |
239 // upon a value change. | 326 // upon a value change. |
240 ScopedParentChildIndexUpdater updater(lock, kernel_, | 327 ScopedParentChildIndexUpdater updater(lock, kernel_, |
241 &dir()->kernel_->parent_child_index); | 328 &dir()->kernel_->parent_child_index); |
242 | 329 |
243 kernel_->put(IS_DEL, is_del); | 330 kernel_->put(IS_DEL, value); |
244 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | 331 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
245 } | 332 } |
| 333 } |
| 334 |
| 335 void MutableEntry::PutServerIsDel(bool value) { |
| 336 DCHECK(kernel_); |
| 337 write_transaction_->SaveOriginal(kernel_); |
| 338 bool old_value = kernel_->ref(SERVER_IS_DEL); |
| 339 if (old_value != value) { |
| 340 kernel_->put(SERVER_IS_DEL, value); |
| 341 kernel_->mark_dirty(GetDirtyIndexHelper()); |
| 342 } |
| 343 |
| 344 // Update delete journal for existence status change on server side here |
| 345 // instead of in PutIsDel() because IS_DEL may not be updated due to |
| 346 // early returns when processing updates. And because |
| 347 // UpdateDeleteJournalForServerDelete() checks for SERVER_IS_DEL, it has |
| 348 // to be called on sync thread. |
| 349 dir()->delete_journal()->UpdateDeleteJournalForServerDelete( |
| 350 write_transaction(), old_value, *kernel_); |
| 351 } |
| 352 |
| 353 void MutableEntry::PutNonUniqueName(const std::string& value) { |
| 354 DCHECK(kernel_); |
| 355 write_transaction_->SaveOriginal(kernel_); |
| 356 |
| 357 if (kernel_->ref(NON_UNIQUE_NAME) != value) { |
| 358 kernel_->put(NON_UNIQUE_NAME, value); |
| 359 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 360 } |
| 361 } |
| 362 |
| 363 void MutableEntry::PutServerNonUniqueName(const std::string& value) { |
| 364 DCHECK(kernel_); |
| 365 write_transaction_->SaveOriginal(kernel_); |
| 366 |
| 367 if (kernel_->ref(SERVER_NON_UNIQUE_NAME) != value) { |
| 368 kernel_->put(SERVER_NON_UNIQUE_NAME, value); |
| 369 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 370 } |
| 371 } |
| 372 |
| 373 bool MutableEntry::PutUniqueServerTag(const string& new_tag) { |
| 374 if (new_tag == kernel_->ref(UNIQUE_SERVER_TAG)) { |
| 375 return true; |
| 376 } |
| 377 |
| 378 write_transaction_->SaveOriginal(kernel_); |
| 379 ScopedKernelLock lock(dir()); |
| 380 // Make sure your new value is not in there already. |
| 381 if (dir()->kernel_->server_tags_map.find(new_tag) != |
| 382 dir()->kernel_->server_tags_map.end()) { |
| 383 DVLOG(1) << "Detected duplicate server tag"; |
| 384 return false; |
| 385 } |
| 386 dir()->kernel_->server_tags_map.erase( |
| 387 kernel_->ref(UNIQUE_SERVER_TAG)); |
| 388 kernel_->put(UNIQUE_SERVER_TAG, new_tag); |
| 389 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 390 if (!new_tag.empty()) { |
| 391 dir()->kernel_->server_tags_map[new_tag] = kernel_; |
| 392 } |
246 | 393 |
247 return true; | 394 return true; |
248 } | 395 } |
249 | 396 |
250 bool MutableEntry::Put(Int64Field field, const int64& value) { | 397 bool MutableEntry::PutUniqueClientTag(const string& new_tag) { |
251 DCHECK(kernel_); | 398 if (new_tag == kernel_->ref(UNIQUE_CLIENT_TAG)) { |
252 | 399 return true; |
253 // We shouldn't set TRANSACTION_VERSION here. See UpdateTransactionVersion. | 400 } |
254 DCHECK_NE(TRANSACTION_VERSION, field); | |
255 | 401 |
256 write_transaction_->SaveOriginal(kernel_); | 402 write_transaction_->SaveOriginal(kernel_); |
257 if (kernel_->ref(field) != value) { | 403 ScopedKernelLock lock(dir()); |
258 ScopedKernelLock lock(dir()); | 404 // Make sure your new value is not in there already. |
259 kernel_->put(field, value); | 405 if (dir()->kernel_->client_tags_map.find(new_tag) != |
260 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | 406 dir()->kernel_->client_tags_map.end()) { |
| 407 DVLOG(1) << "Detected duplicate client tag"; |
| 408 return false; |
261 } | 409 } |
| 410 dir()->kernel_->client_tags_map.erase( |
| 411 kernel_->ref(UNIQUE_CLIENT_TAG)); |
| 412 kernel_->put(UNIQUE_CLIENT_TAG, new_tag); |
| 413 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 414 if (!new_tag.empty()) { |
| 415 dir()->kernel_->client_tags_map[new_tag] = kernel_; |
| 416 } |
| 417 |
262 return true; | 418 return true; |
263 } | 419 } |
264 | 420 |
265 bool MutableEntry::Put(TimeField field, const base::Time& value) { | 421 void MutableEntry::PutUniqueBookmarkTag(const std::string& tag) { |
266 DCHECK(kernel_); | 422 // This unique tag will eventually be used as the unique suffix when adjusting |
267 write_transaction_->SaveOriginal(kernel_); | 423 // this bookmark's position. Let's make sure it's a valid suffix. |
268 if (kernel_->ref(field) != value) { | 424 if (!UniquePosition::IsValidSuffix(tag)) { |
269 kernel_->put(field, value); | 425 NOTREACHED(); |
270 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | 426 return; |
271 } | 427 } |
272 return true; | |
273 } | |
274 | 428 |
275 bool MutableEntry::Put(IdField field, const Id& value) { | 429 if (!kernel_->ref(UNIQUE_BOOKMARK_TAG).empty() && |
276 DCHECK(kernel_); | 430 tag != kernel_->ref(UNIQUE_BOOKMARK_TAG)) { |
277 write_transaction_->SaveOriginal(kernel_); | 431 // There is only one scenario where our tag is expected to change. That |
278 if (kernel_->ref(field) != value) { | 432 // scenario occurs when our current tag is a non-correct tag assigned during |
279 if (ID == field) { | 433 // the UniquePosition migration. |
280 if (!dir()->ReindexId(write_transaction(), kernel_, value)) | 434 std::string migration_generated_tag = |
281 return false; | 435 GenerateSyncableBookmarkHash(std::string(), |
282 } else if (PARENT_ID == field) { | 436 kernel_->ref(ID).GetServerId()); |
283 PutParentIdPropertyOnly(value); | 437 DCHECK_EQ(migration_generated_tag, kernel_->ref(UNIQUE_BOOKMARK_TAG)); |
284 if (!GetIsDel()) { | |
285 if (!PutPredecessor(Id())) { | |
286 // TODO(lipalani) : Propagate the error to caller. crbug.com/100444. | |
287 NOTREACHED(); | |
288 } | |
289 } | |
290 } else { | |
291 kernel_->put(field, value); | |
292 } | |
293 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | |
294 } | 438 } |
295 return true; | |
296 } | |
297 | 439 |
298 bool MutableEntry::Put(UniquePositionField field, const UniquePosition& value) { | 440 kernel_->put(UNIQUE_BOOKMARK_TAG, tag); |
299 DCHECK(kernel_); | |
300 write_transaction_->SaveOriginal(kernel_); | |
301 if(!kernel_->ref(field).Equals(value)) { | |
302 // We should never overwrite a valid position with an invalid one. | |
303 DCHECK(value.IsValid()); | |
304 ScopedKernelLock lock(dir()); | |
305 if (UNIQUE_POSITION == field) { | |
306 ScopedParentChildIndexUpdater updater( | |
307 lock, kernel_, &dir()->kernel_->parent_child_index); | |
308 kernel_->put(field, value); | |
309 } else { | |
310 kernel_->put(field, value); | |
311 } | |
312 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | |
313 } | |
314 return true; | |
315 } | |
316 | |
317 void MutableEntry::PutParentIdPropertyOnly(const Id& parent_id) { | |
318 write_transaction_->SaveOriginal(kernel_); | |
319 dir()->ReindexParentId(write_transaction(), kernel_, parent_id); | |
320 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | 441 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
321 } | 442 } |
322 | 443 |
323 bool MutableEntry::Put(BaseVersion field, int64 value) { | 444 void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) { |
324 DCHECK(kernel_); | |
325 write_transaction_->SaveOriginal(kernel_); | |
326 if (kernel_->ref(field) != value) { | |
327 kernel_->put(field, value); | |
328 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | |
329 } | |
330 return true; | |
331 } | |
332 | |
333 bool MutableEntry::Put(StringField field, const string& value) { | |
334 DCHECK(kernel_); | |
335 write_transaction_->SaveOriginal(kernel_); | |
336 if (field == UNIQUE_CLIENT_TAG) { | |
337 return PutUniqueClientTag(value); | |
338 } | |
339 | |
340 if (field == UNIQUE_SERVER_TAG) { | |
341 return PutUniqueServerTag(value); | |
342 } | |
343 | |
344 DCHECK_NE(UNIQUE_BOOKMARK_TAG, field) | |
345 << "Should use PutUniqueBookmarkTag instead of Put(UNIQUE_BOOKMARK_TAG)"; | |
346 | |
347 if (kernel_->ref(field) != value) { | |
348 kernel_->put(field, value); | |
349 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | |
350 } | |
351 return true; | |
352 } | |
353 | |
354 bool MutableEntry::Put(ProtoField field, | |
355 const sync_pb::EntitySpecifics& value) { | |
356 DCHECK(kernel_); | 445 DCHECK(kernel_); |
357 CHECK(!value.password().has_client_only_encrypted_data()); | 446 CHECK(!value.password().has_client_only_encrypted_data()); |
358 write_transaction_->SaveOriginal(kernel_); | 447 write_transaction_->SaveOriginal(kernel_); |
359 // TODO(ncarter): This is unfortunately heavyweight. Can we do | 448 // TODO(ncarter): This is unfortunately heavyweight. Can we do |
360 // better? | 449 // better? |
361 if (kernel_->ref(field).SerializeAsString() != value.SerializeAsString()) { | 450 if (kernel_->ref(SPECIFICS).SerializeAsString() != |
362 const bool update_unapplied_updates_index = | 451 value.SerializeAsString()) { |
363 (field == SERVER_SPECIFICS) && kernel_->ref(IS_UNAPPLIED_UPDATE); | 452 kernel_->put(SPECIFICS, value); |
364 if (update_unapplied_updates_index) { | 453 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 454 } |
| 455 } |
| 456 |
| 457 void MutableEntry::PutServerSpecifics(const sync_pb::EntitySpecifics& value) { |
| 458 DCHECK(kernel_); |
| 459 CHECK(!value.password().has_client_only_encrypted_data()); |
| 460 write_transaction_->SaveOriginal(kernel_); |
| 461 // TODO(ncarter): This is unfortunately heavyweight. Can we do |
| 462 // better? |
| 463 if (kernel_->ref(SERVER_SPECIFICS).SerializeAsString() != |
| 464 value.SerializeAsString()) { |
| 465 if (kernel_->ref(IS_UNAPPLIED_UPDATE)) { |
365 // Remove ourselves from unapplied_update_metahandles with our | 466 // Remove ourselves from unapplied_update_metahandles with our |
366 // old server type. | 467 // old server type. |
367 const ModelType old_server_type = kernel_->GetServerModelType(); | 468 const ModelType old_server_type = kernel_->GetServerModelType(); |
368 const int64 metahandle = kernel_->ref(META_HANDLE); | 469 const int64 metahandle = kernel_->ref(META_HANDLE); |
369 size_t erase_count = | 470 size_t erase_count = |
370 dir()->kernel_->unapplied_update_metahandles[old_server_type] | 471 dir()->kernel_->unapplied_update_metahandles[old_server_type] |
371 .erase(metahandle); | 472 .erase(metahandle); |
372 DCHECK_EQ(erase_count, 1u); | 473 DCHECK_EQ(erase_count, 1u); |
373 } | 474 } |
374 | 475 |
375 kernel_->put(field, value); | 476 kernel_->put(SERVER_SPECIFICS, value); |
376 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | 477 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
377 | 478 |
378 if (update_unapplied_updates_index) { | 479 if (kernel_->ref(IS_UNAPPLIED_UPDATE)) { |
379 // Add ourselves back into unapplied_update_metahandles with our | 480 // Add ourselves back into unapplied_update_metahandles with our |
380 // new server type. | 481 // new server type. |
381 const ModelType new_server_type = kernel_->GetServerModelType(); | 482 const ModelType new_server_type = kernel_->GetServerModelType(); |
382 const int64 metahandle = kernel_->ref(META_HANDLE); | 483 const int64 metahandle = kernel_->ref(META_HANDLE); |
383 dir()->kernel_->unapplied_update_metahandles[new_server_type] | 484 dir()->kernel_->unapplied_update_metahandles[new_server_type] |
384 .insert(metahandle); | 485 .insert(metahandle); |
385 } | 486 } |
386 } | 487 } |
387 return true; | |
388 } | 488 } |
389 | 489 |
390 bool MutableEntry::Put(BitField field, bool value) { | 490 void MutableEntry::PutBaseServerSpecifics( |
| 491 const sync_pb::EntitySpecifics& value) { |
| 492 DCHECK(kernel_); |
| 493 CHECK(!value.password().has_client_only_encrypted_data()); |
| 494 write_transaction_->SaveOriginal(kernel_); |
| 495 // TODO(ncarter): This is unfortunately heavyweight. Can we do |
| 496 // better? |
| 497 if (kernel_->ref(BASE_SERVER_SPECIFICS).SerializeAsString() |
| 498 != value.SerializeAsString()) { |
| 499 kernel_->put(BASE_SERVER_SPECIFICS, value); |
| 500 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
| 501 } |
| 502 } |
| 503 |
| 504 void MutableEntry::PutUniquePosition(const UniquePosition& value) { |
391 DCHECK(kernel_); | 505 DCHECK(kernel_); |
392 write_transaction_->SaveOriginal(kernel_); | 506 write_transaction_->SaveOriginal(kernel_); |
393 bool old_value = kernel_->ref(field); | 507 if(!kernel_->ref(UNIQUE_POSITION).Equals(value)) { |
394 if (old_value != value) { | 508 // We should never overwrite a valid position with an invalid one. |
395 kernel_->put(field, value); | 509 DCHECK(value.IsValid()); |
396 kernel_->mark_dirty(GetDirtyIndexHelper()); | 510 ScopedKernelLock lock(dir()); |
| 511 ScopedParentChildIndexUpdater updater( |
| 512 lock, kernel_, &dir()->kernel_->parent_child_index); |
| 513 kernel_->put(UNIQUE_POSITION, value); |
| 514 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
397 } | 515 } |
| 516 } |
398 | 517 |
399 // Update delete journal for existence status change on server side here | 518 void MutableEntry::PutServerUniquePosition(const UniquePosition& value) { |
400 // instead of in PutIsDel() because IS_DEL may not be updated due to | 519 DCHECK(kernel_); |
401 // early returns when processing updates. And because | 520 write_transaction_->SaveOriginal(kernel_); |
402 // UpdateDeleteJournalForServerDelete() checks for SERVER_IS_DEL, it has | 521 if(!kernel_->ref(SERVER_UNIQUE_POSITION).Equals(value)) { |
403 // to be called on sync thread. | 522 // We should never overwrite a valid position with an invalid one. |
404 if (field == SERVER_IS_DEL) { | 523 DCHECK(value.IsValid()); |
405 dir()->delete_journal()->UpdateDeleteJournalForServerDelete( | 524 ScopedKernelLock lock(dir()); |
406 write_transaction(), old_value, *kernel_); | 525 kernel_->put(SERVER_UNIQUE_POSITION, value); |
| 526 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
407 } | 527 } |
| 528 } |
408 | 529 |
409 return true; | 530 void MutableEntry::PutSyncing(bool value) { |
| 531 kernel_->put(SYNCING, value); |
| 532 } |
| 533 |
| 534 void MutableEntry::PutParentIdPropertyOnly(const Id& parent_id) { |
| 535 write_transaction_->SaveOriginal(kernel_); |
| 536 dir()->ReindexParentId(write_transaction(), kernel_, parent_id); |
| 537 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); |
410 } | 538 } |
411 | 539 |
412 MetahandleSet* MutableEntry::GetDirtyIndexHelper() { | 540 MetahandleSet* MutableEntry::GetDirtyIndexHelper() { |
413 return &dir()->kernel_->dirty_metahandles; | 541 return &dir()->kernel_->dirty_metahandles; |
414 } | 542 } |
415 | 543 |
416 bool MutableEntry::PutUniqueClientTag(const string& new_tag) { | |
417 if (new_tag == kernel_->ref(UNIQUE_CLIENT_TAG)) { | |
418 return true; | |
419 } | |
420 | |
421 write_transaction_->SaveOriginal(kernel_); | |
422 ScopedKernelLock lock(dir()); | |
423 // Make sure your new value is not in there already. | |
424 if (dir()->kernel_->client_tags_map.find(new_tag) != | |
425 dir()->kernel_->client_tags_map.end()) { | |
426 DVLOG(1) << "Detected duplicate client tag"; | |
427 return false; | |
428 } | |
429 dir()->kernel_->client_tags_map.erase( | |
430 kernel_->ref(UNIQUE_CLIENT_TAG)); | |
431 kernel_->put(UNIQUE_CLIENT_TAG, new_tag); | |
432 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | |
433 if (!new_tag.empty()) { | |
434 dir()->kernel_->client_tags_map[new_tag] = kernel_; | |
435 } | |
436 | |
437 return true; | |
438 } | |
439 | |
440 bool MutableEntry::PutUniqueServerTag(const string& new_tag) { | |
441 if (new_tag == kernel_->ref(UNIQUE_SERVER_TAG)) { | |
442 return true; | |
443 } | |
444 | |
445 write_transaction_->SaveOriginal(kernel_); | |
446 ScopedKernelLock lock(dir()); | |
447 // Make sure your new value is not in there already. | |
448 if (dir()->kernel_->server_tags_map.find(new_tag) != | |
449 dir()->kernel_->server_tags_map.end()) { | |
450 DVLOG(1) << "Detected duplicate server tag"; | |
451 return false; | |
452 } | |
453 dir()->kernel_->server_tags_map.erase( | |
454 kernel_->ref(UNIQUE_SERVER_TAG)); | |
455 kernel_->put(UNIQUE_SERVER_TAG, new_tag); | |
456 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | |
457 if (!new_tag.empty()) { | |
458 dir()->kernel_->server_tags_map[new_tag] = kernel_; | |
459 } | |
460 | |
461 return true; | |
462 } | |
463 | |
464 bool MutableEntry::Put(IndexedBitField field, bool value) { | |
465 DCHECK(kernel_); | |
466 write_transaction_->SaveOriginal(kernel_); | |
467 if (kernel_->ref(field) != value) { | |
468 MetahandleSet* index; | |
469 if (IS_UNSYNCED == field) { | |
470 index = &dir()->kernel_->unsynced_metahandles; | |
471 } else { | |
472 // Use kernel_->GetServerModelType() instead of | |
473 // GetServerModelType() as we may trigger some DCHECKs in the | |
474 // latter. | |
475 index = | |
476 &dir()->kernel_->unapplied_update_metahandles[ | |
477 kernel_->GetServerModelType()]; | |
478 } | |
479 | |
480 ScopedKernelLock lock(dir()); | |
481 if (value) { | |
482 if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second, | |
483 FROM_HERE, | |
484 "Could not insert", | |
485 write_transaction())) { | |
486 return false; | |
487 } | |
488 } else { | |
489 if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)), | |
490 FROM_HERE, | |
491 "Entry Not succesfully erased", | |
492 write_transaction())) { | |
493 return false; | |
494 } | |
495 } | |
496 kernel_->put(field, value); | |
497 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | |
498 } | |
499 return true; | |
500 } | |
501 | |
502 void MutableEntry::PutUniqueBookmarkTag(const std::string& tag) { | |
503 // This unique tag will eventually be used as the unique suffix when adjusting | |
504 // this bookmark's position. Let's make sure it's a valid suffix. | |
505 if (!UniquePosition::IsValidSuffix(tag)) { | |
506 NOTREACHED(); | |
507 return; | |
508 } | |
509 | |
510 if (!kernel_->ref(UNIQUE_BOOKMARK_TAG).empty() | |
511 && tag != kernel_->ref(UNIQUE_BOOKMARK_TAG)) { | |
512 // There is only one scenario where our tag is expected to change. That | |
513 // scenario occurs when our current tag is a non-correct tag assigned during | |
514 // the UniquePosition migration. | |
515 std::string migration_generated_tag = | |
516 GenerateSyncableBookmarkHash(std::string(), | |
517 kernel_->ref(ID).GetServerId()); | |
518 DCHECK_EQ(migration_generated_tag, kernel_->ref(UNIQUE_BOOKMARK_TAG)); | |
519 } | |
520 | |
521 kernel_->put(UNIQUE_BOOKMARK_TAG, tag); | |
522 kernel_->mark_dirty(&dir()->kernel_->dirty_metahandles); | |
523 } | |
524 | |
525 bool MutableEntry::PutPredecessor(const Id& predecessor_id) { | 544 bool MutableEntry::PutPredecessor(const Id& predecessor_id) { |
526 MutableEntry predecessor(write_transaction_, GET_BY_ID, predecessor_id); | 545 MutableEntry predecessor(write_transaction_, GET_BY_ID, predecessor_id); |
527 if (!predecessor.good()) | 546 if (!predecessor.good()) |
528 return false; | 547 return false; |
529 dir()->PutPredecessor(kernel_, predecessor.kernel_); | 548 dir()->PutPredecessor(kernel_, predecessor.kernel_); |
530 return true; | 549 return true; |
531 } | 550 } |
532 | 551 |
533 bool MutableEntry::Put(BitTemp field, bool value) { | |
534 DCHECK(kernel_); | |
535 kernel_->put(field, value); | |
536 return true; | |
537 } | |
538 | |
539 void MutableEntry::UpdateTransactionVersion(int64 value) { | 552 void MutableEntry::UpdateTransactionVersion(int64 value) { |
540 ScopedKernelLock lock(dir()); | 553 ScopedKernelLock lock(dir()); |
541 kernel_->put(TRANSACTION_VERSION, value); | 554 kernel_->put(TRANSACTION_VERSION, value); |
542 kernel_->mark_dirty(&(dir()->kernel_->dirty_metahandles)); | 555 kernel_->mark_dirty(&(dir()->kernel_->dirty_metahandles)); |
543 } | 556 } |
544 | 557 |
545 // This function sets only the flags needed to get this entry to sync. | 558 // This function sets only the flags needed to get this entry to sync. |
546 bool MarkForSyncing(MutableEntry* e) { | 559 bool MarkForSyncing(MutableEntry* e) { |
547 DCHECK_NE(static_cast<MutableEntry*>(NULL), e); | 560 DCHECK_NE(static_cast<MutableEntry*>(NULL), e); |
548 DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing."; | 561 DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing."; |
549 if (!(e->PutIsUnsynced(true))) | 562 if (!(e->PutIsUnsynced(true))) |
550 return false; | 563 return false; |
551 e->PutSyncing(false); | 564 e->PutSyncing(false); |
552 return true; | 565 return true; |
553 } | 566 } |
554 | 567 |
555 } // namespace syncable | 568 } // namespace syncable |
556 } // namespace syncer | 569 } // namespace syncer |
OLD | NEW |