Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(746)

Side by Side Diff: chrome/browser/sync/syncable/directory_backing_store.cc

Issue 9699057: [Sync] Move 'sync' target to sync/ (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Address Tim's comments Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "chrome/browser/sync/syncable/directory_backing_store.h"
6
7 #include "build/build_config.h"
8
9 #include <limits>
10
11 #include "base/base64.h"
12 #include "base/file_util.h"
13 #include "base/hash_tables.h"
14 #include "base/logging.h"
15 #include "base/metrics/histogram.h"
16 #include "base/rand_util.h"
17 #include "base/stl_util.h"
18 #include "base/string_number_conversions.h"
19 #include "base/stringprintf.h"
20 #include "base/time.h"
21 #include "chrome/browser/sync/protocol/service_constants.h"
22 #include "chrome/browser/sync/syncable/syncable-inl.h"
23 #include "chrome/browser/sync/syncable/syncable_columns.h"
24 #include "chrome/browser/sync/util/time.h"
25 #include "sql/connection.h"
26 #include "sql/statement.h"
27 #include "sql/transaction.h"
28 #include "sync/protocol/bookmark_specifics.pb.h"
29 #include "sync/protocol/sync.pb.h"
30
31 using std::string;
32
33 namespace syncable {
34
35 // This just has to be big enough to hold an UPDATE or INSERT statement that
36 // modifies all the columns in the entry table.
37 static const string::size_type kUpdateStatementBufferSize = 2048;
38
39 // Increment this version whenever updating DB tables.
40 extern const int32 kCurrentDBVersion; // Global visibility for our unittest.
41 const int32 kCurrentDBVersion = 78;
42
43 // Iterate over the fields of |entry| and bind each to |statement| for
44 // updating. Returns the number of args bound.
45 void BindFields(const EntryKernel& entry,
46 sql::Statement* statement) {
47 int index = 0;
48 int i = 0;
49 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
50 statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i)));
51 }
52 for ( ; i < TIME_FIELDS_END; ++i) {
53 statement->BindInt64(index++,
54 browser_sync::TimeToProtoTime(
55 entry.ref(static_cast<TimeField>(i))));
56 }
57 for ( ; i < ID_FIELDS_END; ++i) {
58 statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_);
59 }
60 for ( ; i < BIT_FIELDS_END; ++i) {
61 statement->BindInt(index++, entry.ref(static_cast<BitField>(i)));
62 }
63 for ( ; i < STRING_FIELDS_END; ++i) {
64 statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
65 }
66 std::string temp;
67 for ( ; i < PROTO_FIELDS_END; ++i) {
68 entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
69 statement->BindBlob(index++, temp.data(), temp.length());
70 }
71 }
72
73 // The caller owns the returned EntryKernel*. Assumes the statement currently
74 // points to a valid row in the metas table.
75 EntryKernel* UnpackEntry(sql::Statement* statement) {
76 EntryKernel* kernel = new EntryKernel();
77 DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT));
78 int i = 0;
79 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
80 kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i));
81 }
82 for ( ; i < TIME_FIELDS_END; ++i) {
83 kernel->put(static_cast<TimeField>(i),
84 browser_sync::ProtoTimeToTime(statement->ColumnInt64(i)));
85 }
86 for ( ; i < ID_FIELDS_END; ++i) {
87 kernel->mutable_ref(static_cast<IdField>(i)).s_ =
88 statement->ColumnString(i);
89 }
90 for ( ; i < BIT_FIELDS_END; ++i) {
91 kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i)));
92 }
93 for ( ; i < STRING_FIELDS_END; ++i) {
94 kernel->put(static_cast<StringField>(i),
95 statement->ColumnString(i));
96 }
97 for ( ; i < PROTO_FIELDS_END; ++i) {
98 kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
99 statement->ColumnBlob(i), statement->ColumnByteLength(i));
100 }
101 return kernel;
102 }
103
104 namespace {
105
106 string ComposeCreateTableColumnSpecs() {
107 const ColumnSpec* begin = g_metas_columns;
108 const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
109 string query;
110 query.reserve(kUpdateStatementBufferSize);
111 char separator = '(';
112 for (const ColumnSpec* column = begin; column != end; ++column) {
113 query.push_back(separator);
114 separator = ',';
115 query.append(column->name);
116 query.push_back(' ');
117 query.append(column->spec);
118 }
119 query.push_back(')');
120 return query;
121 }
122
123 void AppendColumnList(std::string* output) {
124 const char* joiner = " ";
125 // Be explicit in SELECT order to match up with UnpackEntry.
126 for (int i = BEGIN_FIELDS; i < BEGIN_FIELDS + FIELD_COUNT; ++i) {
127 output->append(joiner);
128 output->append(ColumnName(i));
129 joiner = ", ";
130 }
131 }
132
133 } // namespace
134
135 ///////////////////////////////////////////////////////////////////////////////
136 // DirectoryBackingStore implementation.
137
138 DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
139 : db_(new sql::Connection()),
140 dir_name_(dir_name),
141 needs_column_refresh_(false) {
142 }
143
144 DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
145 sql::Connection* db)
146 : db_(db),
147 dir_name_(dir_name),
148 needs_column_refresh_(false) {
149 }
150
151 DirectoryBackingStore::~DirectoryBackingStore() {
152 }
153
154 bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) {
155 if (handles.empty())
156 return true;
157
158 sql::Statement statement(db_->GetCachedStatement(
159 SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
160
161 for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
162 ++i) {
163 statement.BindInt64(0, *i);
164 if (!statement.Run())
165 return false;
166 statement.Reset();
167 }
168 return true;
169 }
170
171 bool DirectoryBackingStore::SaveChanges(
172 const Directory::SaveChangesSnapshot& snapshot) {
173 DCHECK(CalledOnValidThread());
174 DCHECK(db_->is_open());
175
176 // Back out early if there is nothing to write.
177 bool save_info =
178 (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
179 if (snapshot.dirty_metas.size() < 1 && !save_info)
180 return true;
181
182 sql::Transaction transaction(db_.get());
183 if (!transaction.Begin())
184 return false;
185
186 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
187 i != snapshot.dirty_metas.end(); ++i) {
188 DCHECK(i->is_dirty());
189 if (!SaveEntryToDB(*i))
190 return false;
191 }
192
193 if (!DeleteEntries(snapshot.metahandles_to_purge))
194 return false;
195
196 if (save_info) {
197 const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
198 sql::Statement s1(db_->GetCachedStatement(
199 SQL_FROM_HERE,
200 "UPDATE share_info "
201 "SET store_birthday = ?, "
202 "next_id = ?, "
203 "notification_state = ?"));
204 s1.BindString(0, info.store_birthday);
205 s1.BindInt64(1, info.next_id);
206 s1.BindBlob(2, info.notification_state.data(),
207 info.notification_state.size());
208
209 if (!s1.Run())
210 return false;
211 DCHECK_EQ(db_->GetLastChangeCount(), 1);
212
213 sql::Statement s2(db_->GetCachedStatement(
214 SQL_FROM_HERE,
215 "INSERT OR REPLACE "
216 "INTO models (model_id, progress_marker, initial_sync_ended) "
217 "VALUES (?, ?, ?)"));
218
219 for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
220 // We persist not ModelType but rather a protobuf-derived ID.
221 string model_id = ModelTypeEnumToModelId(ModelTypeFromInt(i));
222 string progress_marker;
223 info.download_progress[i].SerializeToString(&progress_marker);
224 s2.BindBlob(0, model_id.data(), model_id.length());
225 s2.BindBlob(1, progress_marker.data(), progress_marker.length());
226 s2.BindBool(2, info.initial_sync_ended.Has(ModelTypeFromInt(i)));
227 if (!s2.Run())
228 return false;
229 DCHECK_EQ(db_->GetLastChangeCount(), 1);
230 s2.Reset();
231 }
232 }
233
234 return transaction.Commit();
235 }
236
237 bool DirectoryBackingStore::InitializeTables() {
238 sql::Transaction transaction(db_.get());
239 if (!transaction.Begin())
240 return false;
241
242 int version_on_disk = GetVersion();
243
244 // Upgrade from version 67. Version 67 was widely distributed as the original
245 // Bookmark Sync release. Version 68 removed unique naming.
246 if (version_on_disk == 67) {
247 if (MigrateVersion67To68())
248 version_on_disk = 68;
249 }
250 // Version 69 introduced additional datatypes.
251 if (version_on_disk == 68) {
252 if (MigrateVersion68To69())
253 version_on_disk = 69;
254 }
255
256 if (version_on_disk == 69) {
257 if (MigrateVersion69To70())
258 version_on_disk = 70;
259 }
260
261 // Version 71 changed the sync progress information to be per-datatype.
262 if (version_on_disk == 70) {
263 if (MigrateVersion70To71())
264 version_on_disk = 71;
265 }
266
267 // Version 72 removed extended attributes, a legacy way to do extensible
268 // key/value information, stored in their own table.
269 if (version_on_disk == 71) {
270 if (MigrateVersion71To72())
271 version_on_disk = 72;
272 }
273
274 // Version 73 added a field for notification state.
275 if (version_on_disk == 72) {
276 if (MigrateVersion72To73())
277 version_on_disk = 73;
278 }
279
280 // Version 74 added state for the autofill migration.
281 if (version_on_disk == 73) {
282 if (MigrateVersion73To74())
283 version_on_disk = 74;
284 }
285
286 // Version 75 migrated from int64-based timestamps to per-datatype tokens.
287 if (version_on_disk == 74) {
288 if (MigrateVersion74To75())
289 version_on_disk = 75;
290 }
291
292 // Version 76 removed all (5) autofill migration related columns.
293 if (version_on_disk == 75) {
294 if (MigrateVersion75To76())
295 version_on_disk = 76;
296 }
297
298 // Version 77 standardized all time fields to ms since the Unix
299 // epoch.
300 if (version_on_disk == 76) {
301 if (MigrateVersion76To77())
302 version_on_disk = 77;
303 }
304
305 // Version 78 added the column base_server_specifics to the metas table.
306 if (version_on_disk == 77) {
307 if (MigrateVersion77To78())
308 version_on_disk = 78;
309 }
310
311 // If one of the migrations requested it, drop columns that aren't current.
312 // It's only safe to do this after migrating all the way to the current
313 // version.
314 if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
315 if (!RefreshColumns())
316 version_on_disk = 0;
317 }
318
319 // A final, alternative catch-all migration to simply re-sync everything.
320 //
321 // TODO(rlarocque): It's wrong to recreate the database here unless the higher
322 // layers were expecting us to do so. See crbug.com/103824. We must leave
323 // this code as is for now because this is the code that ends up creating the
324 // database in the first time sync case, where the higher layers are expecting
325 // us to create a fresh database. The solution to this should be to implement
326 // crbug.com/105018.
327 if (version_on_disk != kCurrentDBVersion) {
328 if (version_on_disk > kCurrentDBVersion)
329 return FAILED_NEWER_VERSION;
330
331 // Fallback (re-sync everything) migration path.
332 DVLOG(1) << "Old/null sync database, version " << version_on_disk;
333 // Delete the existing database (if any), and create a fresh one.
334 DropAllTables();
335 if (!CreateTables())
336 return false;
337 }
338
339 sql::Statement s(db_->GetUniqueStatement(
340 "SELECT db_create_version, db_create_time FROM share_info"));
341 if (!s.Step())
342 return false;
343 string db_create_version = s.ColumnString(0);
344 int db_create_time = s.ColumnInt(1);
345 DVLOG(1) << "DB created at " << db_create_time << " by version " <<
346 db_create_version;
347
348 return transaction.Commit();
349 }
350
351 // This function drops unused columns by creating a new table that contains only
352 // the currently used columns then copying all rows from the old tables into
353 // this new one. The tables are then rearranged so the new replaces the old.
354 bool DirectoryBackingStore::RefreshColumns() {
355 DCHECK(needs_column_refresh_);
356
357 // Create a new table named temp_metas.
358 SafeDropTable("temp_metas");
359 if (!CreateMetasTable(true))
360 return false;
361
362 // Populate temp_metas from metas.
363 //
364 // At this point, the metas table may contain columns belonging to obsolete
365 // schema versions. This statement explicitly lists only the columns that
366 // belong to the current schema version, so the obsolete columns will be
367 // effectively dropped once we rename temp_metas over top of metas.
368 std::string query = "INSERT INTO temp_metas (";
369 AppendColumnList(&query);
370 query.append(") SELECT ");
371 AppendColumnList(&query);
372 query.append(" FROM metas");
373 if (!db_->Execute(query.c_str()))
374 return false;
375
376 // Drop metas.
377 SafeDropTable("metas");
378
379 // Rename temp_metas -> metas.
380 if (!db_->Execute("ALTER TABLE temp_metas RENAME TO metas"))
381 return false;
382
383 // Repeat the process for share_info.
384 SafeDropTable("temp_share_info");
385 if (!CreateShareInfoTable(true))
386 return false;
387
388 if (!db_->Execute(
389 "INSERT INTO temp_share_info (id, name, store_birthday, "
390 "db_create_version, db_create_time, next_id, cache_guid,"
391 "notification_state) "
392 "SELECT id, name, store_birthday, db_create_version, "
393 "db_create_time, next_id, cache_guid, notification_state "
394 "FROM share_info"))
395 return false;
396
397 SafeDropTable("share_info");
398 if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
399 return false;
400
401 needs_column_refresh_ = false;
402 return true;
403 }
404
405 bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
406 string select;
407 select.reserve(kUpdateStatementBufferSize);
408 select.append("SELECT ");
409 AppendColumnList(&select);
410 select.append(" FROM metas ");
411
412 sql::Statement s(db_->GetUniqueStatement(select.c_str()));
413
414 while (s.Step()) {
415 EntryKernel *kernel = UnpackEntry(&s);
416 entry_bucket->insert(kernel);
417 }
418 return s.Succeeded();
419 }
420
421 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
422 {
423 sql::Statement s(
424 db_->GetUniqueStatement(
425 "SELECT store_birthday, next_id, cache_guid, notification_state "
426 "FROM share_info"));
427 if (!s.Step())
428 return false;
429
430 info->kernel_info.store_birthday = s.ColumnString(0);
431 info->kernel_info.next_id = s.ColumnInt64(1);
432 info->cache_guid = s.ColumnString(2);
433 s.ColumnBlobAsString(3, &(info->kernel_info.notification_state));
434
435 // Verify there was only one row returned.
436 DCHECK(!s.Step());
437 DCHECK(s.Succeeded());
438 }
439
440 {
441 sql::Statement s(
442 db_->GetUniqueStatement(
443 "SELECT model_id, progress_marker, initial_sync_ended "
444 "FROM models"));
445
446 while (s.Step()) {
447 ModelType type = ModelIdToModelTypeEnum(s.ColumnBlob(0),
448 s.ColumnByteLength(0));
449 if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
450 info->kernel_info.download_progress[type].ParseFromArray(
451 s.ColumnBlob(1), s.ColumnByteLength(1));
452 if (s.ColumnBool(2))
453 info->kernel_info.initial_sync_ended.Put(type);
454 }
455 }
456 if (!s.Succeeded())
457 return false;
458 }
459 {
460 sql::Statement s(
461 db_->GetUniqueStatement(
462 "SELECT MAX(metahandle) FROM metas"));
463 if (!s.Step())
464 return false;
465
466 info->max_metahandle = s.ColumnInt64(0);
467
468 // Verify only one row was returned.
469 DCHECK(!s.Step());
470 DCHECK(s.Succeeded());
471 }
472 return true;
473 }
474
475 bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) {
476 // This statement is constructed at runtime, so we can't use
477 // GetCachedStatement() to let the Connection cache it. We will construct
478 // and cache it ourselves the first time this function is called.
479 if (!save_entry_statement_.is_valid()) {
480 string query;
481 query.reserve(kUpdateStatementBufferSize);
482 query.append("INSERT OR REPLACE INTO metas ");
483 string values;
484 values.reserve(kUpdateStatementBufferSize);
485 values.append("VALUES ");
486 const char* separator = "( ";
487 int i = 0;
488 for (i = BEGIN_FIELDS; i < PROTO_FIELDS_END; ++i) {
489 query.append(separator);
490 values.append(separator);
491 separator = ", ";
492 query.append(ColumnName(i));
493 values.append("?");
494 }
495 query.append(" ) ");
496 values.append(" )");
497 query.append(values);
498
499 save_entry_statement_.Assign(
500 db_->GetUniqueStatement(query.c_str()));
501 } else {
502 save_entry_statement_.Reset();
503 }
504
505 BindFields(entry, &save_entry_statement_);
506 return save_entry_statement_.Run();
507 }
508
509 bool DirectoryBackingStore::DropDeletedEntries() {
510 return db_->Execute("DELETE FROM metas "
511 "WHERE is_del > 0 "
512 "AND is_unsynced < 1 "
513 "AND is_unapplied_update < 1");
514 }
515
516 bool DirectoryBackingStore::SafeDropTable(const char* table_name) {
517 string query = "DROP TABLE IF EXISTS ";
518 query.append(table_name);
519 return db_->Execute(query.c_str());
520 }
521
522 void DirectoryBackingStore::DropAllTables() {
523 SafeDropTable("metas");
524 SafeDropTable("temp_metas");
525 SafeDropTable("share_info");
526 SafeDropTable("temp_share_info");
527 SafeDropTable("share_version");
528 SafeDropTable("extended_attributes");
529 SafeDropTable("models");
530 SafeDropTable("temp_models");
531 needs_column_refresh_ = false;
532 }
533
534 // static
535 ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
536 const void* data, int size) {
537 sync_pb::EntitySpecifics specifics;
538 if (!specifics.ParseFromArray(data, size))
539 return syncable::UNSPECIFIED;
540 return syncable::GetModelTypeFromSpecifics(specifics);
541 }
542
543 // static
544 string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
545 sync_pb::EntitySpecifics specifics;
546 syncable::AddDefaultFieldValue(model_type, &specifics);
547 return specifics.SerializeAsString();
548 }
549
550 // static
551 std::string DirectoryBackingStore::GenerateCacheGUID() {
552 // Generate a GUID with 128 bits of randomness.
553 const int kGuidBytes = 128 / 8;
554 std::string guid;
555 base::Base64Encode(base::RandBytesAsString(kGuidBytes), &guid);
556 return guid;
557 }
558
559 bool DirectoryBackingStore::MigrateToSpecifics(
560 const char* old_columns,
561 const char* specifics_column,
562 void (*handler_function)(sql::Statement* old_value_query,
563 int old_value_column,
564 sync_pb::EntitySpecifics* mutable_new_value)) {
565 std::string query_sql = base::StringPrintf(
566 "SELECT metahandle, %s, %s FROM metas", specifics_column, old_columns);
567 std::string update_sql = base::StringPrintf(
568 "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
569
570 sql::Statement query(db_->GetUniqueStatement(query_sql.c_str()));
571 sql::Statement update(db_->GetUniqueStatement(update_sql.c_str()));
572
573 while (query.Step()) {
574 int64 metahandle = query.ColumnInt64(0);
575 std::string new_value_bytes;
576 query.ColumnBlobAsString(1, &new_value_bytes);
577 sync_pb::EntitySpecifics new_value;
578 new_value.ParseFromString(new_value_bytes);
579 handler_function(&query, 2, &new_value);
580 new_value.SerializeToString(&new_value_bytes);
581
582 update.BindBlob(0, new_value_bytes.data(), new_value_bytes.length());
583 update.BindInt64(1, metahandle);
584 if (!update.Run())
585 return false;
586 update.Reset();
587 }
588 return query.Succeeded();
589 }
590
591 bool DirectoryBackingStore::SetVersion(int version) {
592 sql::Statement s(db_->GetCachedStatement(
593 SQL_FROM_HERE, "UPDATE share_version SET data = ?"));
594 s.BindInt(0, version);
595
596 return s.Run();
597 }
598
599 int DirectoryBackingStore::GetVersion() {
600 if (!db_->DoesTableExist("share_version"))
601 return 0;
602
603 sql::Statement statement(db_->GetUniqueStatement(
604 "SELECT data FROM share_version"));
605 if (statement.Step()) {
606 return statement.ColumnInt(0);
607 } else {
608 return 0;
609 }
610 }
611
612 bool DirectoryBackingStore::MigrateVersion67To68() {
613 // This change simply removed three columns:
614 // string NAME
615 // string UNSANITIZED_NAME
616 // string SERVER_NAME
617 // No data migration is necessary, but we should do a column refresh.
618 SetVersion(68);
619 needs_column_refresh_ = true;
620 return true;
621 }
622
623 bool DirectoryBackingStore::MigrateVersion69To70() {
624 // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
625 SetVersion(70);
626 if (!db_->Execute(
627 "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
628 return false;
629 if (!db_->Execute(
630 "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
631 return false;
632 needs_column_refresh_ = true;
633
634 if (!db_->Execute(
635 "UPDATE metas SET unique_server_tag = singleton_tag"))
636 return false;
637
638 return true;
639 }
640
641 namespace {
642
643 // Callback passed to MigrateToSpecifics for the v68->v69 migration. See
644 // MigrateVersion68To69().
645 void EncodeBookmarkURLAndFavicon(sql::Statement* old_value_query,
646 int old_value_column,
647 sync_pb::EntitySpecifics* mutable_new_value) {
648 // Extract data from the column trio we expect.
649 bool old_is_bookmark_object = old_value_query->ColumnBool(old_value_column);
650 std::string old_url = old_value_query->ColumnString(old_value_column + 1);
651 std::string old_favicon;
652 old_value_query->ColumnBlobAsString(old_value_column + 2, &old_favicon);
653 bool old_is_dir = old_value_query->ColumnBool(old_value_column + 3);
654
655 if (old_is_bookmark_object) {
656 sync_pb::BookmarkSpecifics* bookmark_data =
657 mutable_new_value->mutable_bookmark();
658 if (!old_is_dir) {
659 bookmark_data->set_url(old_url);
660 bookmark_data->set_favicon(old_favicon);
661 }
662 }
663 }
664
665 } // namespace
666
667 bool DirectoryBackingStore::MigrateVersion68To69() {
668 // In Version 68, there were columns on table 'metas':
669 // string BOOKMARK_URL
670 // string SERVER_BOOKMARK_URL
671 // blob BOOKMARK_FAVICON
672 // blob SERVER_BOOKMARK_FAVICON
673 // In version 69, these columns went away in favor of storing
674 // a serialized EntrySpecifics protobuf in the columns:
675 // protobuf blob SPECIFICS
676 // protobuf blob SERVER_SPECIFICS
677 // For bookmarks, EntrySpecifics is extended as per
678 // bookmark_specifics.proto. This migration converts bookmarks from the
679 // former scheme to the latter scheme.
680
681 // First, add the two new columns to the schema.
682 if (!db_->Execute(
683 "ALTER TABLE metas ADD COLUMN specifics blob"))
684 return false;
685 if (!db_->Execute(
686 "ALTER TABLE metas ADD COLUMN server_specifics blob"))
687 return false;
688
689 // Next, fold data from the old columns into the new protobuf columns.
690 if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
691 "bookmark_favicon, is_dir"),
692 "specifics",
693 &EncodeBookmarkURLAndFavicon)) {
694 return false;
695 }
696 if (!MigrateToSpecifics(("server_is_bookmark_object, "
697 "server_bookmark_url, "
698 "server_bookmark_favicon, "
699 "server_is_dir"),
700 "server_specifics",
701 &EncodeBookmarkURLAndFavicon)) {
702 return false;
703 }
704
705 // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
706 // ModelType: it shouldn't have BookmarkSpecifics.
707 if (!db_->Execute(
708 "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
709 "singleton_tag IN ('google_chrome')"))
710 return false;
711
712 SetVersion(69);
713 needs_column_refresh_ = true; // Trigger deletion of old columns.
714 return true;
715 }
716
717 // Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
718 // were removed from the share_info table. They were replaced by
719 // the 'models' table, which has these values on a per-datatype basis.
720 bool DirectoryBackingStore::MigrateVersion70To71() {
721 if (!CreateV71ModelsTable())
722 return false;
723
724 // Move data from the old share_info columns to the new models table.
725 {
726 sql::Statement fetch(db_->GetUniqueStatement(
727 "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
728 if (!fetch.Step())
729 return false;
730
731 int64 last_sync_timestamp = fetch.ColumnInt64(0);
732 bool initial_sync_ended = fetch.ColumnBool(1);
733
734 // Verify there were no additional rows returned.
735 DCHECK(!fetch.Step());
736 DCHECK(fetch.Succeeded());
737
738 sql::Statement update(db_->GetUniqueStatement(
739 "INSERT INTO models (model_id, "
740 "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
741 string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
742 update.BindBlob(0, bookmark_model_id.data(), bookmark_model_id.size());
743 update.BindInt64(1, last_sync_timestamp);
744 update.BindBool(2, initial_sync_ended);
745
746 if (!update.Run())
747 return false;
748 }
749
750 // Drop the columns from the old share_info table via a temp table.
751 const bool kCreateAsTempShareInfo = true;
752
753 if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo))
754 return false;
755 if (!db_->Execute(
756 "INSERT INTO temp_share_info (id, name, store_birthday, "
757 "db_create_version, db_create_time, next_id, cache_guid) "
758 "SELECT id, name, store_birthday, db_create_version, "
759 "db_create_time, next_id, cache_guid FROM share_info"))
760 return false;
761 SafeDropTable("share_info");
762 if (!db_->Execute(
763 "ALTER TABLE temp_share_info RENAME TO share_info"))
764 return false;
765 SetVersion(71);
766 return true;
767 }
768
769 bool DirectoryBackingStore::MigrateVersion71To72() {
770 // Version 72 removed a table 'extended_attributes', whose
771 // contents didn't matter.
772 SafeDropTable("extended_attributes");
773 SetVersion(72);
774 return true;
775 }
776
777 bool DirectoryBackingStore::MigrateVersion72To73() {
778 // Version 73 added one column to the table 'share_info': notification_state
779 if (!db_->Execute(
780 "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
781 return false;
782 SetVersion(73);
783 return true;
784 }
785
786 bool DirectoryBackingStore::MigrateVersion73To74() {
787 // Version 74 added the following columns to the table 'share_info':
788 // autofill_migration_state
789 // bookmarks_added_during_autofill_migration
790 // autofill_migration_time
791 // autofill_entries_added_during_migration
792 // autofill_profiles_added_during_migration
793
794 if (!db_->Execute(
795 "ALTER TABLE share_info ADD COLUMN "
796 "autofill_migration_state INT default 0"))
797 return false;
798
799 if (!db_->Execute(
800 "ALTER TABLE share_info ADD COLUMN "
801 "bookmarks_added_during_autofill_migration "
802 "INT default 0"))
803 return false;
804
805 if (!db_->Execute(
806 "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
807 "INT default 0"))
808 return false;
809
810 if (!db_->Execute(
811 "ALTER TABLE share_info ADD COLUMN "
812 "autofill_entries_added_during_migration "
813 "INT default 0"))
814 return false;
815
816 if (!db_->Execute(
817 "ALTER TABLE share_info ADD COLUMN "
818 "autofill_profiles_added_during_migration "
819 "INT default 0"))
820 return false;
821
822 SetVersion(74);
823 return true;
824 }
825
826 bool DirectoryBackingStore::MigrateVersion74To75() {
827 // In version 74, there was a table 'models':
828 // blob model_id (entity specifics, primary key)
829 // int last_download_timestamp
830 // boolean initial_sync_ended
831 // In version 75, we deprecated the integer-valued last_download_timestamp,
832 // using insted a protobuf-valued progress_marker field:
833 // blob progress_marker
834 // The progress_marker values are initialized from the value of
835 // last_download_timestamp, thereby preserving the download state.
836
837 // Move aside the old table and create a new empty one at the current schema.
838 if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
839 return false;
840 if (!CreateModelsTable())
841 return false;
842
843 sql::Statement query(db_->GetUniqueStatement(
844 "SELECT model_id, last_download_timestamp, initial_sync_ended "
845 "FROM temp_models"));
846
847 sql::Statement update(db_->GetUniqueStatement(
848 "INSERT INTO models (model_id, "
849 "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
850
851 while (query.Step()) {
852 ModelType type = ModelIdToModelTypeEnum(query.ColumnBlob(0),
853 query.ColumnByteLength(0));
854 if (type != UNSPECIFIED) {
855 // Set the |timestamp_token_for_migration| on a new
856 // DataTypeProgressMarker, using the old value of last_download_timestamp.
857 // The server will turn this into a real token on our behalf the next
858 // time we check for updates.
859 sync_pb::DataTypeProgressMarker progress_marker;
860 progress_marker.set_data_type_id(
861 GetSpecificsFieldNumberFromModelType(type));
862 progress_marker.set_timestamp_token_for_migration(query.ColumnInt64(1));
863 std::string progress_blob;
864 progress_marker.SerializeToString(&progress_blob);
865
866 update.BindBlob(0, query.ColumnBlob(0), query.ColumnByteLength(0));
867 update.BindBlob(1, progress_blob.data(), progress_blob.length());
868 update.BindBool(2, query.ColumnBool(2));
869 if (!update.Run())
870 return false;
871 update.Reset();
872 }
873 }
874 if (!query.Succeeded())
875 return false;
876
877 // Drop the old table.
878 SafeDropTable("temp_models");
879
880 SetVersion(75);
881 return true;
882 }
883
884 bool DirectoryBackingStore::MigrateVersion75To76() {
885 // This change removed five columns:
886 // autofill_migration_state
887 // bookmarks_added_during_autofill_migration
888 // autofill_migration_time
889 // autofill_entries_added_during_migration
890 // autofill_profiles_added_during_migration
891 // No data migration is necessary, but we should do a column refresh.
892 SetVersion(76);
893 needs_column_refresh_ = true;
894 return true;
895 }
896
897 bool DirectoryBackingStore::MigrateVersion76To77() {
898 // This change changes the format of stored timestamps to ms since
899 // the Unix epoch.
900 #if defined(OS_WIN)
901 // On Windows, we used to store timestamps in FILETIME format (100s of
902 // ns since Jan 1, 1601). Magic numbers taken from
903 // http://stackoverflow.com/questions/5398557/java-library-for-dealing-with-win3 2-filetime
904 // .
905 #define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
906 #else
907 // On other platforms, we used to store timestamps in time_t format (s
908 // since the Unix epoch).
909 #define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
910 #endif
911 sql::Statement update_timestamps(db_->GetUniqueStatement(
912 "UPDATE metas SET "
913 TO_UNIX_TIME_MS(mtime) ", "
914 TO_UNIX_TIME_MS(server_mtime) ", "
915 TO_UNIX_TIME_MS(ctime) ", "
916 TO_UNIX_TIME_MS(server_ctime)));
917 #undef TO_UNIX_TIME_MS
918 if (!update_timestamps.Run())
919 return false;
920 SetVersion(77);
921 return true;
922 }
923
924 bool DirectoryBackingStore::MigrateVersion77To78() {
925 // Version 78 added one column to table 'metas': base_server_specifics.
926 if (!db_->Execute(
927 "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
928 return false;
929 }
930 SetVersion(78);
931 return true;
932 }
933
934 bool DirectoryBackingStore::CreateTables() {
935 DVLOG(1) << "First run, creating tables";
936 // Create two little tables share_version and share_info
937 if (!db_->Execute(
938 "CREATE TABLE share_version ("
939 "id VARCHAR(128) primary key, data INT)")) {
940 return false;
941 }
942
943 {
944 sql::Statement s(db_->GetUniqueStatement(
945 "INSERT INTO share_version VALUES(?, ?)"));
946 s.BindString(0, dir_name_);
947 s.BindInt(1, kCurrentDBVersion);
948
949 if (!s.Run())
950 return false;
951 }
952
953 const bool kCreateAsTempShareInfo = false;
954 if (!CreateShareInfoTable(kCreateAsTempShareInfo)) {
955 return false;
956 }
957
958 {
959 sql::Statement s(db_->GetUniqueStatement(
960 "INSERT INTO share_info VALUES"
961 "(?, " // id
962 "?, " // name
963 "?, " // store_birthday
964 "?, " // db_create_version
965 "?, " // db_create_time
966 "-2, " // next_id
967 "?, " // cache_guid
968 "?);")); // notification_state
969 s.BindString(0, dir_name_); // id
970 s.BindString(1, dir_name_); // name
971 s.BindString(2, ""); // store_birthday
972 s.BindString(3, SYNC_ENGINE_VERSION_STRING); // db_create_version
973 s.BindInt(4, static_cast<int32>(time(0))); // db_create_time
974 s.BindString(5, GenerateCacheGUID()); // cache_guid
975 s.BindBlob(6, NULL, 0); // notification_state
976
977 if (!s.Run())
978 return false;
979 }
980
981 if (!CreateModelsTable())
982 return false;
983
984 // Create the big metas table.
985 if (!CreateMetasTable(false))
986 return false;
987
988 {
989 // Insert the entry for the root into the metas table.
990 const int64 now = browser_sync::TimeToProtoTime(base::Time::Now());
991 sql::Statement s(db_->GetUniqueStatement(
992 "INSERT INTO metas "
993 "( id, metahandle, is_dir, ctime, mtime) "
994 "VALUES ( \"r\", 1, 1, ?, ?)"));
995 s.BindInt64(0, now);
996 s.BindInt64(1, now);
997
998 if (!s.Run())
999 return false;
1000 }
1001
1002 return true;
1003 }
1004
1005 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
1006 const char* name = is_temporary ? "temp_metas" : "metas";
1007 string query = "CREATE TABLE ";
1008 query.append(name);
1009 query.append(ComposeCreateTableColumnSpecs());
1010 return db_->Execute(query.c_str());
1011 }
1012
1013 bool DirectoryBackingStore::CreateV71ModelsTable() {
1014 // This is an old schema for the Models table, used from versions 71 to 74.
1015 return db_->Execute(
1016 "CREATE TABLE models ("
1017 "model_id BLOB primary key, "
1018 "last_download_timestamp INT, "
1019 // Gets set if the syncer ever gets updates from the
1020 // server and the server returns 0. Lets us detect the
1021 // end of the initial sync.
1022 "initial_sync_ended BOOLEAN default 0)");
1023 }
1024
1025 bool DirectoryBackingStore::CreateModelsTable() {
1026 // This is the current schema for the Models table, from version 75
1027 // onward. If you change the schema, you'll probably want to double-check
1028 // the use of this function in the v74-v75 migration.
1029 return db_->Execute(
1030 "CREATE TABLE models ("
1031 "model_id BLOB primary key, "
1032 "progress_marker BLOB, "
1033 // Gets set if the syncer ever gets updates from the
1034 // server and the server returns 0. Lets us detect the
1035 // end of the initial sync.
1036 "initial_sync_ended BOOLEAN default 0)");
1037 }
1038
1039 bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
1040 const char* name = is_temporary ? "temp_share_info" : "share_info";
1041 string query = "CREATE TABLE ";
1042 query.append(name);
1043 // This is the current schema for the ShareInfo table, from version 76
1044 // onward.
1045 query.append(" ("
1046 "id TEXT primary key, "
1047 "name TEXT, "
1048 "store_birthday TEXT, "
1049 "db_create_version TEXT, "
1050 "db_create_time INT, "
1051 "next_id INT default -2, "
1052 "cache_guid TEXT ");
1053
1054 query.append(", notification_state BLOB");
1055 query.append(")");
1056 return db_->Execute(query.c_str());
1057 }
1058
1059 bool DirectoryBackingStore::CreateShareInfoTableVersion71(
1060 bool is_temporary) {
1061 const char* name = is_temporary ? "temp_share_info" : "share_info";
1062 string query = "CREATE TABLE ";
1063 query.append(name);
1064 // This is the schema for the ShareInfo table used from versions 71 to 72.
1065 query.append(" ("
1066 "id TEXT primary key, "
1067 "name TEXT, "
1068 "store_birthday TEXT, "
1069 "db_create_version TEXT, "
1070 "db_create_time INT, "
1071 "next_id INT default -2, "
1072 "cache_guid TEXT )");
1073 return db_->Execute(query.c_str());
1074 }
1075
1076 } // namespace syncable
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698