From a387b77e0d6a278800176c59f6dac9dc89590d5c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Nov 2017 14:00:24 +0000 Subject: [PATCH] Vendor github.com/mattes/migrate --- vendor/manifest | 6 + .../github.com/mattes/migrate/CONTRIBUTING.md | 22 + vendor/src/github.com/mattes/migrate/FAQ.md | 67 ++ vendor/src/github.com/mattes/migrate/LICENSE | 23 + .../github.com/mattes/migrate/MIGRATIONS.md | 81 ++ vendor/src/github.com/mattes/migrate/Makefile | 123 +++ .../src/github.com/mattes/migrate/README.md | 140 +++ .../github.com/mattes/migrate/cli/README.md | 113 +++ .../mattes/migrate/cli/build_aws-s3.go | 7 + .../mattes/migrate/cli/build_cassandra.go | 7 + .../mattes/migrate/cli/build_clickhouse.go | 8 + .../mattes/migrate/cli/build_cockroachdb.go | 7 + .../mattes/migrate/cli/build_github.go | 7 + .../mattes/migrate/cli/build_go-bindata.go | 7 + .../migrate/cli/build_google-cloud-storage.go | 7 + .../mattes/migrate/cli/build_mysql.go | 7 + .../mattes/migrate/cli/build_postgres.go | 7 + .../github.com/mattes/migrate/cli/build_ql.go | 7 + .../mattes/migrate/cli/build_redshift.go | 7 + .../mattes/migrate/cli/build_spanner.go | 7 + .../mattes/migrate/cli/build_sqlite3.go | 7 + .../github.com/mattes/migrate/cli/commands.go | 96 ++ .../mattes/migrate/cli/examples/Dockerfile | 12 + .../src/github.com/mattes/migrate/cli/log.go | 45 + .../src/github.com/mattes/migrate/cli/main.go | 237 +++++ .../github.com/mattes/migrate/cli/version.go | 4 + .../migrate/database/cassandra/README.md | 31 + .../migrate/database/cassandra/cassandra.go | 228 +++++ .../database/cassandra/cassandra_test.go | 53 + .../migrate/database/clickhouse/README.md | 12 + .../migrate/database/clickhouse/clickhouse.go | 196 ++++ .../examples/migrations/001_init.down.sql | 1 + .../examples/migrations/001_init.up.sql | 3 + .../migrations/002_create_table.down.sql | 1 + .../migrations/002_create_table.up.sql | 3 + .../migrate/database/cockroachdb/README.md | 19 + .../database/cockroachdb/cockroachdb.go | 338 +++++++ .../database/cockroachdb/cockroachdb_test.go | 91 ++ .../1085649617_create_users_table.down.sql | 1 + .../1085649617_create_users_table.up.sql | 5 + .../1185749658_add_city_to_users.down.sql | 1 + .../1185749658_add_city_to_users.up.sql | 1 + ...85849751_add_index_on_user_emails.down.sql | 1 + ...1285849751_add_index_on_user_emails.up.sql | 3 + .../1385949617_create_books_table.down.sql | 1 + .../1385949617_create_books_table.up.sql | 5 + .../1485949617_create_movies_table.down.sql | 1 + .../1485949617_create_movies_table.up.sql | 5 + .../1585849751_just_a_comment.up.sql | 1 + .../1685849751_another_comment.up.sql | 1 + .../1785849751_another_comment.up.sql | 1 + .../1885849751_another_comment.up.sql | 1 + .../mattes/migrate/database/crate/README.md | 0 .../mattes/migrate/database/driver.go | 112 +++ .../mattes/migrate/database/driver_test.go | 8 + .../mattes/migrate/database/error.go | 27 + .../mattes/migrate/database/mongodb/README.md | 0 .../mattes/migrate/database/mysql/README.md | 53 + .../mattes/migrate/database/mysql/mysql.go | 329 ++++++ .../migrate/database/mysql/mysql_test.go | 60 ++ .../mattes/migrate/database/neo4j/README.md | 0 .../migrate/database/postgres/README.md | 28 + .../1085649617_create_users_table.down.sql | 1 + .../1085649617_create_users_table.up.sql | 5 + .../1185749658_add_city_to_users.down.sql | 1 + .../1185749658_add_city_to_users.up.sql | 3 + ...85849751_add_index_on_user_emails.down.sql | 1 + ...1285849751_add_index_on_user_emails.up.sql | 3 + .../1385949617_create_books_table.down.sql | 1 + .../1385949617_create_books_table.up.sql | 5 + .../1485949617_create_movies_table.down.sql | 1 + .../1485949617_create_movies_table.up.sql | 5 + .../1585849751_just_a_comment.up.sql | 1 + .../1685849751_another_comment.up.sql | 1 + .../1785849751_another_comment.up.sql | 1 + .../1885849751_another_comment.up.sql | 1 + .../migrate/database/postgres/postgres.go | 273 +++++ .../database/postgres/postgres_test.go | 150 +++ .../mattes/migrate/database/ql/README.md | 0 .../ql/migration/33_create_table.down.sql | 1 + .../ql/migration/33_create_table.up.sql | 3 + .../ql/migration/44_alter_table.down.sql | 1 + .../ql/migration/44_alter_table.up.sql | 1 + .../mattes/migrate/database/ql/ql.go | 212 ++++ .../mattes/migrate/database/ql/ql_test.go | 62 ++ .../migrate/database/redshift/README.md | 6 + .../migrate/database/redshift/redshift.go | 46 + .../mattes/migrate/database/shell/README.md | 0 .../mattes/migrate/database/spanner/README.md | 35 + .../1481574547_create_users_table.down.sql | 1 + .../1481574547_create_users_table.up.sql | 5 + .../1496539702_add_city_to_users.down.sql | 1 + .../1496539702_add_city_to_users.up.sql | 1 + ...96601752_add_index_on_user_emails.down.sql | 1 + ...1496601752_add_index_on_user_emails.up.sql | 1 + .../1496602638_create_books_table.down.sql | 1 + .../1496602638_create_books_table.up.sql | 6 + .../migrate/database/spanner/spanner.go | 294 ++++++ .../migrate/database/spanner/spanner_test.go | 28 + .../mattes/migrate/database/sqlite3/README.md | 0 .../migration/33_create_table.down.sql | 1 + .../sqlite3/migration/33_create_table.up.sql | 3 + .../sqlite3/migration/44_alter_table.down.sql | 1 + .../sqlite3/migration/44_alter_table.up.sql | 1 + .../migrate/database/sqlite3/sqlite3.go | 214 ++++ .../migrate/database/sqlite3/sqlite3_test.go | 61 ++ .../mattes/migrate/database/stub/stub.go | 95 ++ .../mattes/migrate/database/stub/stub_test.go | 16 + .../migrate/database/testing/testing.go | 138 +++ .../mattes/migrate/database/util.go | 15 + .../mattes/migrate/database/util_test.go | 12 + vendor/src/github.com/mattes/migrate/log.go | 12 + .../src/github.com/mattes/migrate/migrate.go | 920 +++++++++++++++++ .../github.com/mattes/migrate/migrate_test.go | 941 ++++++++++++++++++ .../github.com/mattes/migrate/migration.go | 154 +++ .../mattes/migrate/migration_test.go | 56 ++ .../mattes/migrate/source/aws-s3/README.md | 3 + .../mattes/migrate/source/aws-s3/s3.go | 125 +++ .../mattes/migrate/source/aws-s3/s3_test.go | 82 ++ .../mattes/migrate/source/driver.go | 107 ++ .../mattes/migrate/source/driver_test.go | 8 + .../mattes/migrate/source/file/README.md | 4 + .../mattes/migrate/source/file/file.go | 127 +++ .../mattes/migrate/source/file/file_test.go | 207 ++++ .../mattes/migrate/source/github/README.md | 11 + .../1085649617_create_users_table.down.sql | 1 + .../1085649617_create_users_table.up.sql | 5 + .../1185749658_add_city_to_users.down.sql | 1 + .../1185749658_add_city_to_users.up.sql | 3 + ...85849751_add_index_on_user_emails.down.sql | 1 + ...1285849751_add_index_on_user_emails.up.sql | 3 + .../1385949617_create_books_table.down.sql | 1 + .../1385949617_create_books_table.up.sql | 5 + .../1485949617_create_movies_table.down.sql | 1 + .../1485949617_create_movies_table.up.sql | 5 + .../1585849751_just_a_comment.up.sql | 1 + .../1685849751_another_comment.up.sql | 1 + .../1785849751_another_comment.up.sql | 1 + .../1885849751_another_comment.up.sql | 1 + .../mattes/migrate/source/github/github.go | 180 ++++ .../migrate/source/github/github_test.go | 32 + .../migrate/source/go-bindata/README.md | 43 + .../go-bindata/examples/migrations/bindata.go | 304 ++++++ .../migrate/source/go-bindata/go-bindata.go | 119 +++ .../source/go-bindata/go-bindata_test.go | 43 + .../source/go-bindata/testdata/bindata.go | 396 ++++++++ .../source/google-cloud-storage/README.md | 3 + .../source/google-cloud-storage/storage.go | 119 +++ .../google-cloud-storage/storage_test.go | 37 + .../mattes/migrate/source/migration.go | 143 +++ .../mattes/migrate/source/migration_test.go | 46 + .../github.com/mattes/migrate/source/parse.go | 39 + .../mattes/migrate/source/parse_test.go | 106 ++ .../mattes/migrate/source/stub/stub.go | 85 ++ .../mattes/migrate/source/stub/stub_test.go | 30 + .../mattes/migrate/source/testing/testing.go | 169 ++++ .../mattes/migrate/testing/docker.go | 254 +++++ .../mattes/migrate/testing/testing.go | 96 ++ .../mattes/migrate/testing/testing_test.go | 20 + vendor/src/github.com/mattes/migrate/util.go | 105 ++ .../github.com/mattes/migrate/util_test.go | 32 + 161 files changed, 9586 insertions(+) create mode 100644 vendor/src/github.com/mattes/migrate/CONTRIBUTING.md create mode 100644 vendor/src/github.com/mattes/migrate/FAQ.md create mode 100644 vendor/src/github.com/mattes/migrate/LICENSE create mode 100644 vendor/src/github.com/mattes/migrate/MIGRATIONS.md create mode 100644 vendor/src/github.com/mattes/migrate/Makefile create mode 100644 vendor/src/github.com/mattes/migrate/README.md create mode 100644 vendor/src/github.com/mattes/migrate/cli/README.md create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_aws-s3.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_cassandra.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_clickhouse.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_cockroachdb.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_github.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_go-bindata.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_google-cloud-storage.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_mysql.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_postgres.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_ql.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_redshift.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_spanner.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/build_sqlite3.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/commands.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/examples/Dockerfile create mode 100644 vendor/src/github.com/mattes/migrate/cli/log.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/main.go create mode 100644 vendor/src/github.com/mattes/migrate/cli/version.go create mode 100644 vendor/src/github.com/mattes/migrate/database/cassandra/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/cassandra/cassandra.go create mode 100644 vendor/src/github.com/mattes/migrate/database/cassandra/cassandra_test.go create mode 100644 vendor/src/github.com/mattes/migrate/database/clickhouse/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/clickhouse/clickhouse.go create mode 100644 vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/cockroachdb.go create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/cockroachdb_test.go create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/crate/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/driver.go create mode 100644 vendor/src/github.com/mattes/migrate/database/driver_test.go create mode 100644 vendor/src/github.com/mattes/migrate/database/error.go create mode 100644 vendor/src/github.com/mattes/migrate/database/mongodb/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/mysql/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/mysql/mysql.go create mode 100644 vendor/src/github.com/mattes/migrate/database/mysql/mysql_test.go create mode 100644 vendor/src/github.com/mattes/migrate/database/neo4j/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/postgres.go create mode 100644 vendor/src/github.com/mattes/migrate/database/postgres/postgres_test.go create mode 100644 vendor/src/github.com/mattes/migrate/database/ql/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/ql/ql.go create mode 100644 vendor/src/github.com/mattes/migrate/database/ql/ql_test.go create mode 100644 vendor/src/github.com/mattes/migrate/database/redshift/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/redshift/redshift.go create mode 100644 vendor/src/github.com/mattes/migrate/database/shell/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/spanner.go create mode 100644 vendor/src/github.com/mattes/migrate/database/spanner/spanner_test.go create mode 100644 vendor/src/github.com/mattes/migrate/database/sqlite3/README.md create mode 100644 vendor/src/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/database/sqlite3/sqlite3.go create mode 100644 vendor/src/github.com/mattes/migrate/database/sqlite3/sqlite3_test.go create mode 100644 vendor/src/github.com/mattes/migrate/database/stub/stub.go create mode 100644 vendor/src/github.com/mattes/migrate/database/stub/stub_test.go create mode 100644 vendor/src/github.com/mattes/migrate/database/testing/testing.go create mode 100644 vendor/src/github.com/mattes/migrate/database/util.go create mode 100644 vendor/src/github.com/mattes/migrate/database/util_test.go create mode 100644 vendor/src/github.com/mattes/migrate/log.go create mode 100644 vendor/src/github.com/mattes/migrate/migrate.go create mode 100644 vendor/src/github.com/mattes/migrate/migrate_test.go create mode 100644 vendor/src/github.com/mattes/migrate/migration.go create mode 100644 vendor/src/github.com/mattes/migrate/migration_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/aws-s3/README.md create mode 100644 vendor/src/github.com/mattes/migrate/source/aws-s3/s3.go create mode 100644 vendor/src/github.com/mattes/migrate/source/aws-s3/s3_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/driver.go create mode 100644 vendor/src/github.com/mattes/migrate/source/driver_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/file/README.md create mode 100644 vendor/src/github.com/mattes/migrate/source/file/file.go create mode 100644 vendor/src/github.com/mattes/migrate/source/file/file_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/github/README.md create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql create mode 100644 vendor/src/github.com/mattes/migrate/source/github/github.go create mode 100644 vendor/src/github.com/mattes/migrate/source/github/github_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/go-bindata/README.md create mode 100644 vendor/src/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go create mode 100644 vendor/src/github.com/mattes/migrate/source/go-bindata/go-bindata.go create mode 100644 vendor/src/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go create mode 100644 vendor/src/github.com/mattes/migrate/source/google-cloud-storage/README.md create mode 100644 vendor/src/github.com/mattes/migrate/source/google-cloud-storage/storage.go create mode 100644 vendor/src/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/migration.go create mode 100644 vendor/src/github.com/mattes/migrate/source/migration_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/parse.go create mode 100644 vendor/src/github.com/mattes/migrate/source/parse_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/stub/stub.go create mode 100644 vendor/src/github.com/mattes/migrate/source/stub/stub_test.go create mode 100644 vendor/src/github.com/mattes/migrate/source/testing/testing.go create mode 100644 vendor/src/github.com/mattes/migrate/testing/docker.go create mode 100644 vendor/src/github.com/mattes/migrate/testing/testing.go create mode 100644 vendor/src/github.com/mattes/migrate/testing/testing_test.go create mode 100644 vendor/src/github.com/mattes/migrate/util.go create mode 100644 vendor/src/github.com/mattes/migrate/util_test.go diff --git a/vendor/manifest b/vendor/manifest index 830d3e2d..a5a816ba 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -150,6 +150,12 @@ "revision": "8b1c8ab81986c1ce7f06a52fce48f4a1156b66ee", "branch": "master" }, + { + "importpath": "github.com/mattes/migrate", + "repository": "https://github.com/mattes/migrate", + "revision": "69472d5f5cdca0fb2766d8d86f63cb2e78e1d869", + "branch": "master" + }, { "importpath": "github.com/matttproud/golang_protobuf_extensions/pbutil", "repository": "https://github.com/matttproud/golang_protobuf_extensions", diff --git a/vendor/src/github.com/mattes/migrate/CONTRIBUTING.md b/vendor/src/github.com/mattes/migrate/CONTRIBUTING.md new file mode 100644 index 00000000..fcf82a42 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Development, Testing and Contributing + + 1. Make sure you have a running Docker daemon + (Install for [MacOS](https://docs.docker.com/docker-for-mac/)) + 2. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/%you%/migrate` + 3. `make rewrite-import-paths` to update imports to your local fork + 4. Confirm tests are working: `make test-short` + 5. Write awesome code ... + 6. `make test` to run all tests against all database versions + 7. `make restore-import-paths` to restore import paths + 8. Push code and open Pull Request + +Some more helpful commands: + + * You can specify which database/ source tests to run: + `make test-short SOURCE='file go-bindata' DATABASE='postgres cassandra'` + * After `make test`, run `make html-coverage` which opens a shiny test coverage overview. + * Missing imports? `make deps` + * `make build-cli` builds the CLI in directory `cli/build/`. + * `make list-external-deps` lists all external dependencies for each package + * `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server. + Repeatedly call `make docs` to refresh the server. diff --git a/vendor/src/github.com/mattes/migrate/FAQ.md b/vendor/src/github.com/mattes/migrate/FAQ.md new file mode 100644 index 00000000..f8bb9a85 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/FAQ.md @@ -0,0 +1,67 @@ +# FAQ + +#### How is the code base structured? + ``` + / package migrate (the heart of everything) + /cli the CLI wrapper + /database database driver and sub directories have the actual driver implementations + /source source driver and sub directories have the actual driver implementations + ``` + +#### Why is there no `source/driver.go:Last()`? + It's not needed. And unless the source has a "native" way to read a directory in reversed order, + it might be expensive to do a full directory scan in order to get the last element. + +#### What is a NilMigration? NilVersion? + NilMigration defines a migration without a body. NilVersion is defined as const -1. + +#### What is the difference between uint(version) and int(targetVersion)? + version refers to an existing migration version coming from a source and therefor can never be negative. + targetVersion can either be a version OR represent a NilVersion, which equals -1. + +#### What's the difference between Next/Previous and Up/Down? + ``` + 1_first_migration.up.extension next -> 2_second_migration.up.extension ... + 1_first_migration.down.extension <- previous 2_second_migration.down.extension ... + ``` + +#### Why two separate files (up and down) for a migration? + It makes all of our lives easier. No new markup/syntax to learn for users + and existing database utility tools continue to work as expected. + +#### How many migrations can migrate handle? + Whatever the maximum positive signed integer value is for your platform. + For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to + the currently run and pre-fetched migrations in memory. Please note that some + source drivers need to do build a full "directory" tree first, which puts some + heat on the memory consumption. + +#### Are the table tests in migrate_test.go bloated? + Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact + the tests are very visual now and might help new users understand expected behaviors quickly. + Migrate from version x to y and y is the last migration? Just check out the test for + that particular case and know what's going on instantly. + +#### What is Docker being used for? + Only for testing. See [testing/docker.go](testing/docker.go) + +#### Why not just use docker-compose? + It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast + and whenever we want, not just once at the beginning of all tests. + +#### Can I maintain my driver in my own repository? + Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though. + The driver's functionality is dictated by migrate's interfaces. That means there should really + just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing, + just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the + "best" available driver for a database in order to get started, we would have failed as an open source community. + +#### Can I mix multiple sources during a batch of migrations? + No. + +#### What does "dirty" database mean? + Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists, + which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error + and then "force" the expected version. + + diff --git a/vendor/src/github.com/mattes/migrate/LICENSE b/vendor/src/github.com/mattes/migrate/LICENSE new file mode 100644 index 00000000..62efa367 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/LICENSE @@ -0,0 +1,23 @@ +The MIT License (MIT) + +Copyright (c) 2016 Matthias Kadenbach + +https://github.com/mattes/migrate + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/src/github.com/mattes/migrate/MIGRATIONS.md b/vendor/src/github.com/mattes/migrate/MIGRATIONS.md new file mode 100644 index 00000000..fbefb927 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/MIGRATIONS.md @@ -0,0 +1,81 @@ +# Migrations + +## Migration Filename Format + +A single logical migration is represented as two separate migration files, one +to migrate "up" to the specified version from the previous version, and a second +to migrate back "down" to the previous version. These migrations can be provided +by any one of the supported [migration sources](./README.md#migration-sources). + +The ordering and direction of the migration files is determined by the filenames +used for them. `migrate` expects the filenames of migrations to have the format: + + {version}_{title}.up.{extension} + {version}_{title}.down.{extension} + +The `title` of each migration is unused, and is only for readability. Similarly, +the `extension` of the migration files is not checked by the library, and should +be an appropriate format for the database in use (`.sql` for SQL variants, for +instance). + +Versions of migrations may be represented as any 64 bit unsigned integer. +All migrations are applied upward in order of increasing version number, and +downward by decreasing version number. + +Common versioning schemes include incrementing integers: + + 1_initialize_schema.down.sql + 1_initialize_schema.up.sql + 2_add_table.down.sql + 2_add_table.up.sql + ... + +Or timestamps at an appropriate resolution: + + 1500360784_initialize_schema.down.sql + 1500360784_initialize_schema.up.sql + 1500445949_add_table.down.sql + 1500445949_add_table.up.sql + ... + +But any scheme resulting in distinct, incrementing integers as versions is valid. + +It is suggested that the version number of corresponding `up` and `down` migration +files be equivalent for clarity, but they are allowed to differ so long as the +relative ordering of the migrations is preserved. + +The migration files are permitted to be empty, so in the event that a migration +is a no-op or is irreversible, it is recommended to still include both migration +files, and either leaving them empty or adding a comment as appropriate. + +## Migration Content Format + +The format of the migration files themselves varies between database systems. +Different databases have different semantics around schema changes and when and +how they are allowed to occur (for instance, if schema changes can occur within +a transaction). + +As such, the `migrate` library has little to no checking around the format of +migration sources. The migration files are generally processed directly by the +drivers as raw operations. + +## Reversibility of Migrations + +Best practice for writing schema migration is that all migrations should be +reversible. It should in theory be possible for run migrations down and back up +through any and all versions with the state being fully cleaned and recreated +by doing so. + +By adhering to this recommended practice, development and deployment of new code +is cleaner and easier (cleaning database state for a new feature should be as +easy as migrating down to a prior version, and back up to the latest). + +As opposed to some other migration libraries, `migrate` represents up and down +migrations as separate files. This prevents any non-standard file syntax from +being introduced which may result in unintended behavior or errors, depending +on what database is processing the file. + +While it is technically possible for an up or down migration to exist on its own +without an equivalently versioned counterpart, it is strongly recommended to +always include a down migration which cleans up the state of the corresponding +up migration. diff --git a/vendor/src/github.com/mattes/migrate/Makefile b/vendor/src/github.com/mattes/migrate/Makefile new file mode 100644 index 00000000..e36394be --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/Makefile @@ -0,0 +1,123 @@ +SOURCE ?= file go-bindata github aws-s3 google-cloud-storage +DATABASE ?= postgres mysql redshift cassandra sqlite3 spanner cockroachdb clickhouse +VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-) +TEST_FLAGS ?= +REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)") + + +build-cli: clean + -mkdir ./cli/build + cd ./cli && CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -a -o build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . + cd ./cli && CGO_ENABLED=1 GOOS=darwin GOARCH=amd64 go build -a -o build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . + cd ./cli && CGO_ENABLED=1 GOOS=windows GOARCH=amd64 go build -a -o build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' . + cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {} + cd ./cli/build && shasum -a 256 * > sha256sum.txt + cat ./cli/build/sha256sum.txt + + +clean: + -rm -r ./cli/build + + +test-short: + make test-with-flags --ignore-errors TEST_FLAGS='-short' + + +test: + @-rm -r .coverage + @mkdir .coverage + make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile .coverage/_$$(RAND).txt -bench=. -benchmem' + @echo 'mode: atomic' > .coverage/combined.txt + @cat .coverage/*.txt | grep -v 'mode: atomic' >> .coverage/combined.txt + + +test-with-flags: + @echo SOURCE: $(SOURCE) + @echo DATABASE: $(DATABASE) + + @go test $(TEST_FLAGS) . + @go test $(TEST_FLAGS) ./cli/... + @go test $(TEST_FLAGS) ./testing/... + + @echo -n '$(SOURCE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./source/{} + @go test $(TEST_FLAGS) ./source/testing/... + @go test $(TEST_FLAGS) ./source/stub/... + + @echo -n '$(DATABASE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./database/{} + @go test $(TEST_FLAGS) ./database/testing/... + @go test $(TEST_FLAGS) ./database/stub/... + + +kill-orphaned-docker-containers: + docker rm -f $(shell docker ps -aq --filter label=migrate_test) + + +html-coverage: + go tool cover -html=.coverage/combined.txt + + +deps: + -go get -v -u ./... + -go test -v -i ./... + # TODO: why is this not being fetched with the command above? + -go get -u github.com/fsouza/fake-gcs-server/fakestorage + + +list-external-deps: + $(call external_deps,'.') + $(call external_deps,'./cli/...') + $(call external_deps,'./testing/...') + + $(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...')) + $(call external_deps,'./source/testing/...') + $(call external_deps,'./source/stub/...') + + $(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...')) + $(call external_deps,'./database/testing/...') + $(call external_deps,'./database/stub/...') + + +restore-import-paths: + find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \; + + +rewrite-import-paths: + find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \; + + +# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs +docs: + -make kill-docs + nohup godoc -play -http=127.0.0.1:6064 /dev/null 2>&1 & echo $$! > .godoc.pid + cat .godoc.pid + + +kill-docs: + @cat .godoc.pid + kill -9 $$(cat .godoc.pid) + rm .godoc.pid + + +open-docs: + open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate + + +# example: make release V=0.0.0 +release: + git tag v$(V) + @read -p "Press enter to confirm and push to origin ..." && git push origin v$(V) + + +define external_deps + @echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' + +endef + + +.PHONY: build-cli clean test-short test test-with-flags deps html-coverage \ + restore-import-paths rewrite-import-paths list-external-deps release \ + docs kill-docs open-docs kill-orphaned-docker-containers + +SHELL = /bin/bash +RAND = $(shell echo $$RANDOM) + diff --git a/vendor/src/github.com/mattes/migrate/README.md b/vendor/src/github.com/mattes/migrate/README.md new file mode 100644 index 00000000..d69a10b3 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/README.md @@ -0,0 +1,140 @@ +[![Build Status](https://travis-ci.org/mattes/migrate.svg?branch=master)](https://travis-ci.org/mattes/migrate) +[![GoDoc](https://godoc.org/github.com/mattes/migrate?status.svg)](https://godoc.org/github.com/mattes/migrate) +[![Coverage Status](https://coveralls.io/repos/github/mattes/migrate/badge.svg?branch=v3.0-prev)](https://coveralls.io/github/mattes/migrate?branch=v3.0-prev) +[![packagecloud.io](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/mattes/migrate?filter=debs) + +# migrate + +__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__ + + * Migrate reads migrations from [sources](#migration-sources) + and applies them in correct order to a [database](#databases). + * Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof. + (Keeps the drivers lightweight, too.) + * Database drivers don't assume things or try to correct user input. When in doubt, fail. + + +Looking for [v1](https://github.com/mattes/migrate/tree/v1)? + + +## Databases + +Database drivers run migrations. [Add a new database?](database/driver.go) + + * [PostgreSQL](database/postgres) + * [Redshift](database/redshift) + * [Ql](database/ql) + * [Cassandra](database/cassandra) + * [SQLite](database/sqlite3) + * [MySQL/ MariaDB](database/mysql) + * [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167)) + * [MongoDB](database/mongodb) ([todo #169](https://github.com/mattes/migrate/issues/169)) + * [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170)) + * [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171)) + * [Google Cloud Spanner](database/spanner) + * [CockroachDB](database/cockroachdb) + * [ClickHouse](database/clickhouse) + + +## Migration Sources + +Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go) + + * [Filesystem](source/file) - read from fileystem + * [Go-Bindata](source/go-bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata)) + * [Github](source/github) - read from remote Github repositories + * [AWS S3](source/aws-s3) - read from Amazon Web Services S3 + * [Google Cloud Storage](source/google-cloud-storage) - read from Google Cloud Platform Storage + + + +## CLI usage + + * Simple wrapper around this library. + * Handles ctrl+c (SIGINT) gracefully. + * No config search paths, no config files, no magic ENV var injections. + +__[CLI Documentation](cli)__ + +([brew todo #156](https://github.com/mattes/migrate/issues/156)) + +``` +$ brew install migrate --with-postgres +$ migrate -database postgres://localhost:5432/database up 2 +``` + + +## Use in your Go project + + * API is stable and frozen for this release (v3.x). + * Package migrate has no external dependencies. + * Only import the drivers you need. + (check [dependency_tree.txt](https://github.com/mattes/migrate/releases) for each driver) + * To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`. + * Bring your own logger. + * Uses `io.Reader` streams internally for low memory overhead. + * Thread-safe and no goroutine leaks. + +__[Go Documentation](https://godoc.org/github.com/mattes/migrate)__ + +```go +import ( + "github.com/mattes/migrate" + _ "github.com/mattes/migrate/database/postgres" + _ "github.com/mattes/migrate/source/github" +) + +func main() { + m, err := migrate.New( + "github://mattes:personal-access-token@mattes/migrate_test", + "postgres://localhost:5432/database?sslmode=enable") + m.Steps(2) +} +``` + +Want to use an existing database client? + +```go +import ( + "database/sql" + _ "github.com/lib/pq" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database/postgres" + _ "github.com/mattes/migrate/source/file" +) + +func main() { + db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable") + driver, err := postgres.WithInstance(db, &postgres.Config{}) + m, err := migrate.NewWithDatabaseInstance( + "file:///migrations", + "postgres", driver) + m.Steps(2) +} +``` + +## Migration files + +Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration) + +``` +1481574547_create_users_table.up.sql +1481574547_create_users_table.down.sql +``` + +[Best practices: How to write migrations.](MIGRATIONS.md) + + + +## Development and Contributing + +Yes, please! [`Makefile`](Makefile) is your friend, +read the [development guide](CONTRIBUTING.md). + +Also have a look at the [FAQ](FAQ.md). + + + +--- + +Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database). diff --git a/vendor/src/github.com/mattes/migrate/cli/README.md b/vendor/src/github.com/mattes/migrate/cli/README.md new file mode 100644 index 00000000..c0886d5a --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/README.md @@ -0,0 +1,113 @@ +# migrate CLI + +## Installation + +#### With Go toolchain + +``` +$ go get -u -d github.com/mattes/migrate/cli github.com/lib/pq +$ go build -tags 'postgres' -o /usr/local/bin/migrate github.com/mattes/migrate/cli +``` + +Note: This example builds the cli which will only work with postgres. In order +to build the cli for use with other databases, replace the `postgres` build tag +with the appropriate database tag(s) for the databases desired. The tags +correspond to the names of the sub-packages underneath the +[`database`](../database) package. + +#### MacOS + +([todo #156](https://github.com/mattes/migrate/issues/156)) + +``` +$ brew install migrate --with-postgres +``` + +#### Linux (*.deb package) + +``` +$ curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - +$ echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list +$ apt-get update +$ apt-get install -y migrate +``` + +#### Download pre-build binary (Windows, MacOS, or Linux) + +[Release Downloads](https://github.com/mattes/migrate/releases) + +``` +$ curl -L https://github.com/mattes/migrate/releases/download/$version/migrate.$platform-amd64.tar.gz | tar xvz +``` + + + +## Usage + +``` +$ migrate -help +Usage: migrate OPTIONS COMMAND [arg...] + migrate [ -version | -help ] + +Options: + -source Location of the migrations (driver://url) + -path Shorthand for -source=file://path + -database Run migrations against this database (driver://url) + -prefetch N Number of migrations to load in advance before executing (default 10) + -lock-timeout N Allow N seconds to acquire database lock (default 15) + -verbose Print verbose logging + -version Print version + -help Print usage + +Commands: + create [-ext E] [-dir D] NAME + Create a set of timestamped up/down migrations titled NAME, in directory D with extension E + goto V Migrate to version V + up [N] Apply all or N up migrations + down [N] Apply all or N down migrations + drop Drop everyting inside database + force V Set version V but don't run migration (ignores dirty state) + version Print current migration version +``` + + +So let's say you want to run the first two migrations + +``` +$ migrate -database postgres://localhost:5432/database up 2 +``` + +If your migrations are hosted on github + +``` +$ migrate -source github://mattes:personal-access-token@mattes/migrate_test \ + -database postgres://localhost:5432/database down 2 +``` + +The CLI will gracefully stop at a safe point when SIGINT (ctrl+c) is received. +Send SIGKILL for immediate halt. + + + +## Reading CLI arguments from somewhere else + +##### ENV variables + +``` +$ migrate -database "$MY_MIGRATE_DATABASE" +``` + +##### JSON files + +Check out https://stedolan.github.io/jq/ + +``` +$ migrate -database "$(cat config.json | jq '.database')" +``` + +##### YAML files + +```` +$ migrate -database "$(cat config/database.yml | ruby -ryaml -e "print YAML.load(STDIN.read)['database']")" +$ migrate -database "$(cat config/database.yml | python -c 'import yaml,sys;print yaml.safe_load(sys.stdin)["database"]')" +``` diff --git a/vendor/src/github.com/mattes/migrate/cli/build_aws-s3.go b/vendor/src/github.com/mattes/migrate/cli/build_aws-s3.go new file mode 100644 index 00000000..766fd566 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_aws-s3.go @@ -0,0 +1,7 @@ +// +build aws-s3 + +package main + +import ( + _ "github.com/mattes/migrate/source/aws-s3" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_cassandra.go b/vendor/src/github.com/mattes/migrate/cli/build_cassandra.go new file mode 100644 index 00000000..319b52d2 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_cassandra.go @@ -0,0 +1,7 @@ +// +build cassandra + +package main + +import ( + _ "github.com/mattes/migrate/database/cassandra" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_clickhouse.go b/vendor/src/github.com/mattes/migrate/cli/build_clickhouse.go new file mode 100644 index 00000000..c9175e28 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_clickhouse.go @@ -0,0 +1,8 @@ +// +build clickhouse + +package main + +import ( + _ "github.com/kshvakov/clickhouse" + _ "github.com/mattes/migrate/database/clickhouse" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_cockroachdb.go b/vendor/src/github.com/mattes/migrate/cli/build_cockroachdb.go new file mode 100644 index 00000000..e5fdf073 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_cockroachdb.go @@ -0,0 +1,7 @@ +// +build cockroachdb + +package main + +import ( + _ "github.com/mattes/migrate/database/cockroachdb" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_github.go b/vendor/src/github.com/mattes/migrate/cli/build_github.go new file mode 100644 index 00000000..9c813b46 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_github.go @@ -0,0 +1,7 @@ +// +build github + +package main + +import ( + _ "github.com/mattes/migrate/source/github" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_go-bindata.go b/vendor/src/github.com/mattes/migrate/cli/build_go-bindata.go new file mode 100644 index 00000000..8a6a8934 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_go-bindata.go @@ -0,0 +1,7 @@ +// +build go-bindata + +package main + +import ( + _ "github.com/mattes/migrate/source/go-bindata" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_google-cloud-storage.go b/vendor/src/github.com/mattes/migrate/cli/build_google-cloud-storage.go new file mode 100644 index 00000000..04f31433 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_google-cloud-storage.go @@ -0,0 +1,7 @@ +// +build google-cloud-storage + +package main + +import ( + _ "github.com/mattes/migrate/source/google-cloud-storage" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_mysql.go b/vendor/src/github.com/mattes/migrate/cli/build_mysql.go new file mode 100644 index 00000000..177766f5 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_mysql.go @@ -0,0 +1,7 @@ +// +build mysql + +package main + +import ( + _ "github.com/mattes/migrate/database/mysql" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_postgres.go b/vendor/src/github.com/mattes/migrate/cli/build_postgres.go new file mode 100644 index 00000000..87f6be75 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_postgres.go @@ -0,0 +1,7 @@ +// +build postgres + +package main + +import ( + _ "github.com/mattes/migrate/database/postgres" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_ql.go b/vendor/src/github.com/mattes/migrate/cli/build_ql.go new file mode 100644 index 00000000..cd56ef95 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_ql.go @@ -0,0 +1,7 @@ +// +build ql + +package main + +import ( + _ "github.com/mattes/migrate/database/ql" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_redshift.go b/vendor/src/github.com/mattes/migrate/cli/build_redshift.go new file mode 100644 index 00000000..8153d0aa --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_redshift.go @@ -0,0 +1,7 @@ +// +build redshift + +package main + +import ( + _ "github.com/mattes/migrate/database/redshift" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_spanner.go b/vendor/src/github.com/mattes/migrate/cli/build_spanner.go new file mode 100644 index 00000000..7223d820 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_spanner.go @@ -0,0 +1,7 @@ +// +build spanner + +package main + +import ( + _ "github.com/mattes/migrate/database/spanner" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/build_sqlite3.go b/vendor/src/github.com/mattes/migrate/cli/build_sqlite3.go new file mode 100644 index 00000000..48ae8ebc --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/build_sqlite3.go @@ -0,0 +1,7 @@ +// +build sqlite3 + +package main + +import ( + _ "github.com/mattes/migrate/database/sqlite3" +) diff --git a/vendor/src/github.com/mattes/migrate/cli/commands.go b/vendor/src/github.com/mattes/migrate/cli/commands.go new file mode 100644 index 00000000..703896dc --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/commands.go @@ -0,0 +1,96 @@ +package main + +import ( + "github.com/mattes/migrate" + _ "github.com/mattes/migrate/database/stub" // TODO remove again + _ "github.com/mattes/migrate/source/file" + "os" + "fmt" +) + +func createCmd(dir string, timestamp int64, name string, ext string) { + base := fmt.Sprintf("%v%v_%v.", dir, timestamp, name) + os.MkdirAll(dir, os.ModePerm) + createFile(base + "up" + ext) + createFile(base + "down" + ext) +} + +func createFile(fname string) { + if _, err := os.Create(fname); err != nil { + log.fatalErr(err) + } +} + +func gotoCmd(m *migrate.Migrate, v uint) { + if err := m.Migrate(v); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } +} + +func upCmd(m *migrate.Migrate, limit int) { + if limit >= 0 { + if err := m.Steps(limit); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } else { + if err := m.Up(); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } +} + +func downCmd(m *migrate.Migrate, limit int) { + if limit >= 0 { + if err := m.Steps(-limit); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } else { + if err := m.Down(); err != nil { + if err != migrate.ErrNoChange { + log.fatalErr(err) + } else { + log.Println(err) + } + } + } +} + +func dropCmd(m *migrate.Migrate) { + if err := m.Drop(); err != nil { + log.fatalErr(err) + } +} + +func forceCmd(m *migrate.Migrate, v int) { + if err := m.Force(v); err != nil { + log.fatalErr(err) + } +} + +func versionCmd(m *migrate.Migrate) { + v, dirty, err := m.Version() + if err != nil { + log.fatalErr(err) + } + if dirty { + log.Printf("%v (dirty)\n", v) + } else { + log.Println(v) + } +} diff --git a/vendor/src/github.com/mattes/migrate/cli/examples/Dockerfile b/vendor/src/github.com/mattes/migrate/cli/examples/Dockerfile new file mode 100644 index 00000000..740f951f --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/examples/Dockerfile @@ -0,0 +1,12 @@ +FROM ubuntu:xenial + +RUN apt-get update && \ + apt-get install -y curl apt-transport-https + +RUN curl -L https://packagecloud.io/mattes/migrate/gpgkey | apt-key add - && \ + echo "deb https://packagecloud.io/mattes/migrate/ubuntu/ xenial main" > /etc/apt/sources.list.d/migrate.list && \ + apt-get update && \ + apt-get install -y migrate + +RUN migrate -version + diff --git a/vendor/src/github.com/mattes/migrate/cli/log.go b/vendor/src/github.com/mattes/migrate/cli/log.go new file mode 100644 index 00000000..a119d348 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/log.go @@ -0,0 +1,45 @@ +package main + +import ( + "fmt" + logpkg "log" + "os" +) + +type Log struct { + verbose bool +} + +func (l *Log) Printf(format string, v ...interface{}) { + if l.verbose { + logpkg.Printf(format, v...) + } else { + fmt.Fprintf(os.Stderr, format, v...) + } +} + +func (l *Log) Println(args ...interface{}) { + if l.verbose { + logpkg.Println(args...) + } else { + fmt.Fprintln(os.Stderr, args...) + } +} + +func (l *Log) Verbose() bool { + return l.verbose +} + +func (l *Log) fatalf(format string, v ...interface{}) { + l.Printf(format, v...) + os.Exit(1) +} + +func (l *Log) fatal(args ...interface{}) { + l.Println(args...) + os.Exit(1) +} + +func (l *Log) fatalErr(err error) { + l.fatal("error:", err) +} diff --git a/vendor/src/github.com/mattes/migrate/cli/main.go b/vendor/src/github.com/mattes/migrate/cli/main.go new file mode 100644 index 00000000..4c727a97 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/main.go @@ -0,0 +1,237 @@ +package main + +import ( + "flag" + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" + "time" + + "github.com/mattes/migrate" +) + +// set main log +var log = &Log{} + +func main() { + helpPtr := flag.Bool("help", false, "") + versionPtr := flag.Bool("version", false, "") + verbosePtr := flag.Bool("verbose", false, "") + prefetchPtr := flag.Uint("prefetch", 10, "") + lockTimeoutPtr := flag.Uint("lock-timeout", 15, "") + pathPtr := flag.String("path", "", "") + databasePtr := flag.String("database", "", "") + sourcePtr := flag.String("source", "", "") + + flag.Usage = func() { + fmt.Fprint(os.Stderr, + `Usage: migrate OPTIONS COMMAND [arg...] + migrate [ -version | -help ] + +Options: + -source Location of the migrations (driver://url) + -path Shorthand for -source=file://path + -database Run migrations against this database (driver://url) + -prefetch N Number of migrations to load in advance before executing (default 10) + -lock-timeout N Allow N seconds to acquire database lock (default 15) + -verbose Print verbose logging + -version Print version + -help Print usage + +Commands: + create [-ext E] [-dir D] NAME + Create a set of timestamped up/down migrations titled NAME, in directory D with extension E + goto V Migrate to version V + up [N] Apply all or N up migrations + down [N] Apply all or N down migrations + drop Drop everyting inside database + force V Set version V but don't run migration (ignores dirty state) + version Print current migration version +`) + } + + flag.Parse() + + // initialize logger + log.verbose = *verbosePtr + + // show cli version + if *versionPtr { + fmt.Fprintln(os.Stderr, Version) + os.Exit(0) + } + + // show help + if *helpPtr { + flag.Usage() + os.Exit(0) + } + + // translate -path into -source if given + if *sourcePtr == "" && *pathPtr != "" { + *sourcePtr = fmt.Sprintf("file://%v", *pathPtr) + } + + // initialize migrate + // don't catch migraterErr here and let each command decide + // how it wants to handle the error + migrater, migraterErr := migrate.New(*sourcePtr, *databasePtr) + defer func() { + if migraterErr == nil { + migrater.Close() + } + }() + if migraterErr == nil { + migrater.Log = log + migrater.PrefetchMigrations = *prefetchPtr + migrater.LockTimeout = time.Duration(int64(*lockTimeoutPtr)) * time.Second + + // handle Ctrl+c + signals := make(chan os.Signal, 1) + signal.Notify(signals, syscall.SIGINT) + go func() { + for range signals { + log.Println("Stopping after this running migration ...") + migrater.GracefulStop <- true + return + } + }() + } + + startTime := time.Now() + + switch flag.Arg(0) { + case "create": + args := flag.Args()[1:] + + createFlagSet := flag.NewFlagSet("create", flag.ExitOnError) + extPtr := createFlagSet.String("ext", "", "File extension") + dirPtr := createFlagSet.String("dir", "", "Directory to place file in (default: current working directory)") + createFlagSet.Parse(args) + + if createFlagSet.NArg() == 0 { + log.fatal("error: please specify name") + } + name := createFlagSet.Arg(0) + + if *extPtr != "" { + *extPtr = "." + strings.TrimPrefix(*extPtr, ".") + } + if *dirPtr != "" { + *dirPtr = strings.Trim(*dirPtr, "/") + "/" + } + + timestamp := startTime.Unix() + + createCmd(*dirPtr, timestamp, name, *extPtr) + + case "goto": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + if flag.Arg(1) == "" { + log.fatal("error: please specify version argument V") + } + + v, err := strconv.ParseUint(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read version argument V") + } + + gotoCmd(migrater, uint(v)) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "up": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + limit := -1 + if flag.Arg(1) != "" { + n, err := strconv.ParseUint(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read limit argument N") + } + limit = int(n) + } + + upCmd(migrater, limit) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "down": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + limit := -1 + if flag.Arg(1) != "" { + n, err := strconv.ParseUint(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read limit argument N") + } + limit = int(n) + } + + downCmd(migrater, limit) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "drop": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + dropCmd(migrater) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "force": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + if flag.Arg(1) == "" { + log.fatal("error: please specify version argument V") + } + + v, err := strconv.ParseInt(flag.Arg(1), 10, 64) + if err != nil { + log.fatal("error: can't read version argument V") + } + + if v < -1 { + log.fatal("error: argument V must be >= -1") + } + + forceCmd(migrater, int(v)) + + if log.verbose { + log.Println("Finished after", time.Now().Sub(startTime)) + } + + case "version": + if migraterErr != nil { + log.fatalErr(migraterErr) + } + + versionCmd(migrater) + + default: + flag.Usage() + os.Exit(0) + } +} diff --git a/vendor/src/github.com/mattes/migrate/cli/version.go b/vendor/src/github.com/mattes/migrate/cli/version.go new file mode 100644 index 00000000..6c3ec49f --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/cli/version.go @@ -0,0 +1,4 @@ +package main + +// Version is set in Makefile with build flags +var Version = "dev" diff --git a/vendor/src/github.com/mattes/migrate/database/cassandra/README.md b/vendor/src/github.com/mattes/migrate/database/cassandra/README.md new file mode 100644 index 00000000..f99b1105 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cassandra/README.md @@ -0,0 +1,31 @@ +# Cassandra + +* Drop command will not work on Cassandra 2.X because it rely on +system_schema table which comes with 3.X +* Other commands should work properly but are **not tested** + + +## Usage +`cassandra://host:port/keyspace?param1=value¶m2=value2` + + +| URL Query | Default value | Description | +|------------|-------------|-----------| +| `x-migrations-table` | schema_migrations | Name of the migrations table | +| `port` | 9042 | The port to bind to | +| `consistency` | ALL | Migration consistency +| `protocol` | | Cassandra protocol version (3 or 4) +| `timeout` | 1 minute | Migration timeout +| `username` | nil | Username to use when authenticating. | +| `password` | nil | Password to use when authenticating. | + + +`timeout` is parsed using [time.ParseDuration(s string)](https://golang.org/pkg/time/#ParseDuration) + + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +2. `DROP TABLE schema_migrations` +4. Download and install the latest migrate version. +5. Force the current migration version with `migrate force `. diff --git a/vendor/src/github.com/mattes/migrate/database/cassandra/cassandra.go b/vendor/src/github.com/mattes/migrate/database/cassandra/cassandra.go new file mode 100644 index 00000000..42563fdb --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cassandra/cassandra.go @@ -0,0 +1,228 @@ +package cassandra + +import ( + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "strconv" + "time" + + "github.com/gocql/gocql" + "github.com/mattes/migrate/database" +) + +func init() { + db := new(Cassandra) + database.Register("cassandra", db) +} + +var DefaultMigrationsTable = "schema_migrations" +var dbLocked = false + +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoKeyspace = fmt.Errorf("no keyspace provided") + ErrDatabaseDirty = fmt.Errorf("database is dirty") +) + +type Config struct { + MigrationsTable string + KeyspaceName string +} + +type Cassandra struct { + session *gocql.Session + isLocked bool + + // Open and WithInstance need to guarantee that config is never nil + config *Config +} + +func (p *Cassandra) Open(url string) (database.Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + // Check for missing mandatory attributes + if len(u.Path) == 0 { + return nil, ErrNoKeyspace + } + + migrationsTable := u.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + p.config = &Config{ + KeyspaceName: u.Path, + MigrationsTable: migrationsTable, + } + + cluster := gocql.NewCluster(u.Host) + cluster.Keyspace = u.Path[1:len(u.Path)] + cluster.Consistency = gocql.All + cluster.Timeout = 1 * time.Minute + + if len(u.Query().Get("username")) > 0 && len(u.Query().Get("password")) > 0 { + authenticator := gocql.PasswordAuthenticator{ + Username: u.Query().Get("username"), + Password: u.Query().Get("password"), + } + cluster.Authenticator = authenticator + } + + // Retrieve query string configuration + if len(u.Query().Get("consistency")) > 0 { + var consistency gocql.Consistency + consistency, err = parseConsistency(u.Query().Get("consistency")) + if err != nil { + return nil, err + } + + cluster.Consistency = consistency + } + if len(u.Query().Get("protocol")) > 0 { + var protoversion int + protoversion, err = strconv.Atoi(u.Query().Get("protocol")) + if err != nil { + return nil, err + } + cluster.ProtoVersion = protoversion + } + if len(u.Query().Get("timeout")) > 0 { + var timeout time.Duration + timeout, err = time.ParseDuration(u.Query().Get("timeout")) + if err != nil { + return nil, err + } + cluster.Timeout = timeout + } + + p.session, err = cluster.CreateSession() + + if err != nil { + return nil, err + } + + if err := p.ensureVersionTable(); err != nil { + return nil, err + } + + return p, nil +} + +func (p *Cassandra) Close() error { + p.session.Close() + return nil +} + +func (p *Cassandra) Lock() error { + if dbLocked { + return database.ErrLocked + } + dbLocked = true + return nil +} + +func (p *Cassandra) Unlock() error { + dbLocked = false + return nil +} + +func (p *Cassandra) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + // run migration + query := string(migr[:]) + if err := p.session.Query(query).Exec(); err != nil { + // TODO: cast to Cassandra error and get line number + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (p *Cassandra) SetVersion(version int, dirty bool) error { + query := `TRUNCATE "` + p.config.MigrationsTable + `"` + if err := p.session.Query(query).Exec(); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if version >= 0 { + query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES (?, ?)` + if err := p.session.Query(query, version, dirty).Exec(); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + return nil +} + +// Return current keyspace version +func (p *Cassandra) Version() (version int, dirty bool, err error) { + query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1` + err = p.session.Query(query).Scan(&version, &dirty) + switch { + case err == gocql.ErrNotFound: + return database.NilVersion, false, nil + + case err != nil: + if _, ok := err.(*gocql.Error); ok { + return database.NilVersion, false, nil + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (p *Cassandra) Drop() error { + // select all tables in current schema + query := fmt.Sprintf(`SELECT table_name from system_schema.tables WHERE keyspace_name='%s'`, p.config.KeyspaceName[1:]) // Skip '/' character + iter := p.session.Query(query).Iter() + var tableName string + for iter.Scan(&tableName) { + err := p.session.Query(fmt.Sprintf(`DROP TABLE %s`, tableName)).Exec() + if err != nil { + return err + } + } + // Re-create the version table + if err := p.ensureVersionTable(); err != nil { + return err + } + return nil +} + +// Ensure version table exists +func (p *Cassandra) ensureVersionTable() error { + err := p.session.Query(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (version bigint, dirty boolean, PRIMARY KEY(version))", p.config.MigrationsTable)).Exec() + if err != nil { + return err + } + if _, _, err = p.Version(); err != nil { + return err + } + return nil +} + +// ParseConsistency wraps gocql.ParseConsistency +// to return an error instead of a panicking. +func parseConsistency(consistencyStr string) (consistency gocql.Consistency, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + err, ok = r.(error) + if !ok { + err = fmt.Errorf("Failed to parse consistency \"%s\": %v", consistencyStr, r) + } + } + }() + consistency = gocql.ParseConsistency(consistencyStr) + + return consistency, nil +} diff --git a/vendor/src/github.com/mattes/migrate/database/cassandra/cassandra_test.go b/vendor/src/github.com/mattes/migrate/database/cassandra/cassandra_test.go new file mode 100644 index 00000000..4ca764a0 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cassandra/cassandra_test.go @@ -0,0 +1,53 @@ +package cassandra + +import ( + "fmt" + "testing" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" + "github.com/gocql/gocql" + "time" + "strconv" +) + +var versions = []mt.Version{ + {Image: "cassandra:3.0.10"}, + {Image: "cassandra:3.0"}, +} + +func isReady(i mt.Instance) bool { + // Cassandra exposes 5 ports (7000, 7001, 7199, 9042 & 9160) + // We only need the port bound to 9042, but we can only access to the first one + // through 'i.Port()' (which calls DockerContainer.firstPortMapping()) + // So we need to get port mapping to retrieve correct port number bound to 9042 + portMap := i.NetworkSettings().Ports + port, _ := strconv.Atoi(portMap["9042/tcp"][0].HostPort) + + cluster := gocql.NewCluster(i.Host()) + cluster.Port = port + //cluster.ProtoVersion = 4 + cluster.Consistency = gocql.All + cluster.Timeout = 1 * time.Minute + p, err := cluster.CreateSession() + if err != nil { + return false + } + // Create keyspace for tests + p.Query("CREATE KEYSPACE testks WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor':1}").Exec() + return true +} + +func Test(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Cassandra{} + portMap := i.NetworkSettings().Ports + port, _ := strconv.Atoi(portMap["9042/tcp"][0].HostPort) + addr := fmt.Sprintf("cassandra://%v:%v/testks", i.Host(), port) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT table_name from system_schema.tables")) + }) +} diff --git a/vendor/src/github.com/mattes/migrate/database/clickhouse/README.md b/vendor/src/github.com/mattes/migrate/database/clickhouse/README.md new file mode 100644 index 00000000..16dbbf96 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/clickhouse/README.md @@ -0,0 +1,12 @@ +# ClickHouse + +`clickhouse://host:port?username=user&password=qwerty&database=clicks` + +| URL Query | Description | +|------------|-------------| +| `x-migrations-table`| Name of the migrations table | +| `database` | The name of the database to connect to | +| `username` | The user to sign in as | +| `password` | The user's password | +| `host` | The host to connect to. | +| `port` | The port to bind to. | diff --git a/vendor/src/github.com/mattes/migrate/database/clickhouse/clickhouse.go b/vendor/src/github.com/mattes/migrate/database/clickhouse/clickhouse.go new file mode 100644 index 00000000..fffc5585 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/clickhouse/clickhouse.go @@ -0,0 +1,196 @@ +package clickhouse + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + "net/url" + "time" + + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +var DefaultMigrationsTable = "schema_migrations" + +var ErrNilConfig = fmt.Errorf("no config") + +type Config struct { + DatabaseName string + MigrationsTable string +} + +func init() { + database.Register("clickhouse", &ClickHouse{}) +} + +func WithInstance(conn *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := conn.Ping(); err != nil { + return nil, err + } + + ch := &ClickHouse{ + conn: conn, + config: config, + } + + if err := ch.init(); err != nil { + return nil, err + } + + return ch, nil +} + +type ClickHouse struct { + conn *sql.DB + config *Config +} + +func (ch *ClickHouse) Open(dsn string) (database.Driver, error) { + purl, err := url.Parse(dsn) + if err != nil { + return nil, err + } + q := migrate.FilterCustomQuery(purl) + q.Scheme = "tcp" + conn, err := sql.Open("clickhouse", q.String()) + if err != nil { + return nil, err + } + + ch = &ClickHouse{ + conn: conn, + config: &Config{ + MigrationsTable: purl.Query().Get("x-migrations-table"), + DatabaseName: purl.Query().Get("database"), + }, + } + + if err := ch.init(); err != nil { + return nil, err + } + + return ch, nil +} + +func (ch *ClickHouse) init() error { + if len(ch.config.DatabaseName) == 0 { + if err := ch.conn.QueryRow("SELECT currentDatabase()").Scan(&ch.config.DatabaseName); err != nil { + return err + } + } + + if len(ch.config.MigrationsTable) == 0 { + ch.config.MigrationsTable = DefaultMigrationsTable + } + + return ch.ensureVersionTable() +} + +func (ch *ClickHouse) Run(r io.Reader) error { + migration, err := ioutil.ReadAll(r) + if err != nil { + return err + } + if _, err := ch.conn.Exec(string(migration)); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migration} + } + + return nil +} +func (ch *ClickHouse) Version() (int, bool, error) { + var ( + version int + dirty uint8 + query = "SELECT version, dirty FROM `" + ch.config.MigrationsTable + "` ORDER BY sequence DESC LIMIT 1" + ) + if err := ch.conn.QueryRow(query).Scan(&version, &dirty); err != nil { + if err == sql.ErrNoRows { + return database.NilVersion, false, nil + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + } + return version, dirty == 1, nil +} + +func (ch *ClickHouse) SetVersion(version int, dirty bool) error { + var ( + bool = func(v bool) uint8 { + if v { + return 1 + } + return 0 + } + tx, err = ch.conn.Begin() + ) + if err != nil { + return err + } + + query := "INSERT INTO " + ch.config.MigrationsTable + " (version, dirty, sequence) VALUES (?, ?, ?)" + if _, err := tx.Exec(query, version, bool(dirty), time.Now().UnixNano()); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + return tx.Commit() +} + +func (ch *ClickHouse) ensureVersionTable() error { + var ( + table string + query = "SHOW TABLES FROM " + ch.config.DatabaseName + " LIKE '" + ch.config.MigrationsTable + "'" + ) + // check if migration table exists + if err := ch.conn.QueryRow(query).Scan(&table); err != nil { + if err != sql.ErrNoRows { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } else { + return nil + } + // if not, create the empty migration table + query = ` + CREATE TABLE ` + ch.config.MigrationsTable + ` ( + version UInt32, + dirty UInt8, + sequence UInt64 + ) Engine=TinyLog + ` + if _, err := ch.conn.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + +func (ch *ClickHouse) Drop() error { + var ( + query = "SHOW TABLES FROM " + ch.config.DatabaseName + tables, err = ch.conn.Query(query) + ) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + for tables.Next() { + var table string + if err := tables.Scan(&table); err != nil { + return err + } + + query = "DROP TABLE IF EXISTS " + ch.config.DatabaseName + "." + table + + if _, err := ch.conn.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + return ch.ensureVersionTable() +} + +func (ch *ClickHouse) Lock() error { return nil } +func (ch *ClickHouse) Unlock() error { return nil } +func (ch *ClickHouse) Close() error { return ch.conn.Close() } diff --git a/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.down.sql b/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.down.sql new file mode 100644 index 00000000..51cd8bfb --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS test_1; \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.up.sql b/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.up.sql new file mode 100644 index 00000000..5436b6fd --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/001_init.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE test_1 ( + Date Date +) Engine=Memory; \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql b/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql new file mode 100644 index 00000000..9d771223 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS test_2; \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql b/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql new file mode 100644 index 00000000..6b49ed99 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/clickhouse/examples/migrations/002_create_table.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE test_2 ( + Date Date +) Engine=Memory; \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/README.md b/vendor/src/github.com/mattes/migrate/database/cockroachdb/README.md new file mode 100644 index 00000000..7931c279 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/README.md @@ -0,0 +1,19 @@ +# cockroachdb + +`cockroachdb://user:password@host:port/dbname?query` (`cockroach://`, and `crdb-postgres://` work, too) + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `x-lock-table` | `LockTable` | Name of the table which maintains the migration lock | +| `x-force-lock` | `ForceLock` | Force lock acquisition to fix faulty migrations which may not have released the schema lock (Boolean, default is `false`) | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) | +| `port` | | The port to bind to. (default is 5432) | +| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. | +| `sslcert` | | Cert file location. The file must contain PEM encoded data. | +| `sslkey` | | Key file location. The file must contain PEM encoded data. | +| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. | +| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) | diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/cockroachdb.go b/vendor/src/github.com/mattes/migrate/database/cockroachdb/cockroachdb.go new file mode 100644 index 00000000..8da31d37 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/cockroachdb.go @@ -0,0 +1,338 @@ +package cockroachdb + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + + "github.com/cockroachdb/cockroach-go/crdb" + "github.com/lib/pq" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" + "regexp" + "strconv" + "context" +) + +func init() { + db := CockroachDb{} + database.Register("cockroach", &db) + database.Register("cockroachdb", &db) + database.Register("crdb-postgres", &db) +} + +var DefaultMigrationsTable = "schema_migrations" +var DefaultLockTable = "schema_lock" + +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") +) + +type Config struct { + MigrationsTable string + LockTable string + ForceLock bool + DatabaseName string +} + +type CockroachDb struct { + db *sql.DB + isLocked bool + + // Open and WithInstance need to guarantee that config is never nil + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + + query := `SELECT current_database()` + var databaseName string + if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + if len(config.LockTable) == 0 { + config.LockTable = DefaultLockTable + } + + px := &CockroachDb{ + db: instance, + config: config, + } + + if err := px.ensureVersionTable(); err != nil { + return nil, err + } + + if err := px.ensureLockTable(); err != nil { + return nil, err + } + + return px, nil +} + +func (c *CockroachDb) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + // As Cockroach uses the postgres protocol, and 'postgres' is already a registered database, we need to replace the + // connect prefix, with the actual protocol, so that the library can differentiate between the implementations + re := regexp.MustCompile("^(cockroach(db)?|crdb-postgres)") + connectString := re.ReplaceAllString(migrate.FilterCustomQuery(purl).String(), "postgres") + + db, err := sql.Open("postgres", connectString) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + lockTable := purl.Query().Get("x-lock-table") + if len(lockTable) == 0 { + lockTable = DefaultLockTable + } + + forceLockQuery := purl.Query().Get("x-force-lock") + forceLock, err := strconv.ParseBool(forceLockQuery) + if err != nil { + forceLock = false + } + + px, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + LockTable: lockTable, + ForceLock: forceLock, + }) + if err != nil { + return nil, err + } + + return px, nil +} + +func (c *CockroachDb) Close() error { + return c.db.Close() +} + +// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed +// See: https://github.com/cockroachdb/cockroach/issues/13546 +func (c *CockroachDb) Lock() error { + err := crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) error { + aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName) + if err != nil { + return err + } + + query := "SELECT * FROM " + c.config.LockTable + " WHERE lock_id = $1" + rows, err := tx.Query(query, aid) + if err != nil { + return database.Error{OrigErr: err, Err: "failed to fetch migration lock", Query: []byte(query)} + } + defer rows.Close() + + // If row exists at all, lock is present + locked := rows.Next() + if locked && !c.config.ForceLock { + return database.Error{Err: "lock could not be acquired; already locked", Query: []byte(query)} + } + + query = "INSERT INTO " + c.config.LockTable + " (lock_id) VALUES ($1)" + if _, err := tx.Exec(query, aid) ; err != nil { + return database.Error{OrigErr: err, Err: "failed to set migration lock", Query: []byte(query)} + } + + return nil + }) + + if err != nil { + return err + } else { + c.isLocked = true + return nil + } +} + +// Locking is done manually with a separate lock table. Implementing advisory locks in CRDB is being discussed +// See: https://github.com/cockroachdb/cockroach/issues/13546 +func (c *CockroachDb) Unlock() error { + aid, err := database.GenerateAdvisoryLockId(c.config.DatabaseName) + if err != nil { + return err + } + + // In the event of an implementation (non-migration) error, it is possible for the lock to not be released. Until + // a better locking mechanism is added, a manual purging of the lock table may be required in such circumstances + query := "DELETE FROM " + c.config.LockTable + " WHERE lock_id = $1" + if _, err := c.db.Exec(query, aid); err != nil { + if e, ok := err.(*pq.Error); ok { + // 42P01 is "UndefinedTableError" in CockroachDB + // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go + if e.Code == "42P01" { + // On drops, the lock table is fully removed; This is fine, and is a valid "unlocked" state for the schema + c.isLocked = false + return nil + } + } + return database.Error{OrigErr: err, Err: "failed to release migration lock", Query: []byte(query)} + } + + c.isLocked = false + return nil +} + +func (c *CockroachDb) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + // run migration + query := string(migr[:]) + if _, err := c.db.Exec(query); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (c *CockroachDb) SetVersion(version int, dirty bool) error { + return crdb.ExecuteTx(context.Background(), c.db, nil, func(tx *sql.Tx) error { + if _, err := tx.Exec( `TRUNCATE "` + c.config.MigrationsTable + `"`); err != nil { + return err + } + + if version >= 0 { + if _, err := tx.Exec(`INSERT INTO "` + c.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)`, version, dirty); err != nil { + return err + } + } + + return nil + }) +} + +func (c *CockroachDb) Version() (version int, dirty bool, err error) { + query := `SELECT version, dirty FROM "` + c.config.MigrationsTable + `" LIMIT 1` + err = c.db.QueryRow(query).Scan(&version, &dirty) + + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*pq.Error); ok { + // 42P01 is "UndefinedTableError" in CockroachDB + // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go + if e.Code == "42P01" { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (c *CockroachDb) Drop() error { + // select all tables in current schema + query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())` + tables, err := c.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + + if len(tableNames) > 0 { + // delete one by one ... + for _, t := range tableNames { + query = `DROP TABLE IF EXISTS ` + t + ` CASCADE` + if _, err := c.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := c.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} + +func (c *CockroachDb) ensureVersionTable() error { + // check if migration table exists + var count int + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` + if err := c.db.QueryRow(query, c.config.MigrationsTable).Scan(&count); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if count == 1 { + return nil + } + + // if not, create the empty migration table + query = `CREATE TABLE "` + c.config.MigrationsTable + `" (version INT NOT NULL PRIMARY KEY, dirty BOOL NOT NULL)` + if _, err := c.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + + +func (c *CockroachDb) ensureLockTable() error { + // check if lock table exists + var count int + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` + if err := c.db.QueryRow(query, c.config.LockTable).Scan(&count); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if count == 1 { + return nil + } + + // if not, create the empty lock table + query = `CREATE TABLE "` + c.config.LockTable + `" (lock_id INT NOT NULL PRIMARY KEY)` + if _, err := c.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + return nil +} diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/cockroachdb_test.go b/vendor/src/github.com/mattes/migrate/database/cockroachdb/cockroachdb_test.go new file mode 100644 index 00000000..e2dc1f86 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/cockroachdb_test.go @@ -0,0 +1,91 @@ +package cockroachdb + +// error codes https://github.com/lib/pq/blob/master/error.go + +import ( + //"bytes" + "database/sql" + "fmt" + "io" + "testing" + + "github.com/lib/pq" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" + "bytes" +) + +var versions = []mt.Version{ + {Image: "cockroachdb/cockroach:v1.0.2", Cmd: []string{"start", "--insecure"}}, +} + +func isReady(i mt.Instance) bool { + db, err := sql.Open("postgres", fmt.Sprintf("postgres://root@%v:%v?sslmode=disable", i.Host(), i.PortFor(26257))) + if err != nil { + return false + } + defer db.Close() + err = db.Ping() + if err == io.EOF { + _, err = db.Exec("CREATE DATABASE migrate") + return err == nil; + } else if e, ok := err.(*pq.Error); ok { + if e.Code.Name() == "cannot_connect_now" { + return false + } + } + + _, err = db.Exec("CREATE DATABASE migrate") + return err == nil; + + return true +} + +func Test(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + c := &CockroachDb{} + addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", i.Host(), i.PortFor(26257)) + d, err := c.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + }) +} + +func TestMultiStatement(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + c := &CockroachDb{} + addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable", i.Host(), i.Port()) + d, err := c.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil { + t.Fatalf("expected err to be nil, got %v", err) + } + + // make sure second table exists + var exists bool + if err := d.(*CockroachDb).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil { + t.Fatal(err) + } + if !exists { + t.Fatalf("expected table bar to exist") + } + }) +} + +func TestFilterCustomQuery(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + c := &CockroachDb{} + addr := fmt.Sprintf("cockroach://root@%v:%v/migrate?sslmode=disable&x-custom=foobar", i.Host(), i.PortFor(26257)) + _, err := c.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + }) +} diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql new file mode 100644 index 00000000..c99ddcdc --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS users; diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql new file mode 100644 index 00000000..fc321018 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1085649617_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE users ( + user_id INT UNIQUE, + name STRING(40), + email STRING(40) +); diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql new file mode 100644 index 00000000..940c6071 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql new file mode 100644 index 00000000..46204b0f --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1185749658_add_city_to_users.up.sql @@ -0,0 +1 @@ +ALTER TABLE users ADD COLUMN city TEXT; diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql new file mode 100644 index 00000000..3e87dd22 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql new file mode 100644 index 00000000..61f8ba0b --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1285849751_add_index_on_user_emails.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS users_email_index ON users (email); + +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql new file mode 100644 index 00000000..1a0b1a21 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS books; diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql new file mode 100644 index 00000000..0d3b9992 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1385949617_create_books_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE books ( + user_id INT, + name STRING(40), + author STRING(40) +); diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql new file mode 100644 index 00000000..3a518768 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS movies; diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql new file mode 100644 index 00000000..d533be90 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1485949617_create_movies_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE movies ( + user_id INT, + name STRING(40), + director STRING(40) +); diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1585849751_just_a_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1685849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1785849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/cockroachdb/examples/migrations/1885849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/crate/README.md b/vendor/src/github.com/mattes/migrate/database/crate/README.md new file mode 100644 index 00000000..e69de29b diff --git a/vendor/src/github.com/mattes/migrate/database/driver.go b/vendor/src/github.com/mattes/migrate/database/driver.go new file mode 100644 index 00000000..016eedcb --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/driver.go @@ -0,0 +1,112 @@ +// Package database provides the Database interface. +// All database drivers must implement this interface, register themselves, +// optionally provide a `WithInstance` function and pass the tests +// in package database/testing. +package database + +import ( + "fmt" + "io" + nurl "net/url" + "sync" +) + +var ( + ErrLocked = fmt.Errorf("can't acquire lock") +) + +const NilVersion int = -1 + +var driversMu sync.RWMutex +var drivers = make(map[string]Driver) + +// Driver is the interface every database driver must implement. +// +// How to implement a database driver? +// 1. Implement this interface. +// 2. Optionally, add a function named `WithInstance`. +// This function should accept an existing DB instance and a Config{} struct +// and return a driver instance. +// 3. Add a test that calls database/testing.go:Test() +// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). +// All other functions are tested by tests in database/testing. +// Saves you some time and makes sure all database drivers behave the same way. +// 5. Call Register in init(). +// 6. Create a migrate/cli/build_.go file +// 7. Add driver name in 'DATABASE' variable in Makefile +// +// Guidelines: +// * Don't try to correct user input. Don't assume things. +// When in doubt, return an error and explain the situation to the user. +// * All configuration input must come from the URL string in func Open() +// or the Config{} struct in WithInstance. Don't os.Getenv(). +type Driver interface { + // Open returns a new driver instance configured with parameters + // coming from the URL string. Migrate will call this function + // only once per instance. + Open(url string) (Driver, error) + + // Close closes the underlying database instance managed by the driver. + // Migrate will call this function only once per instance. + Close() error + + // Lock should acquire a database lock so that only one migration process + // can run at a time. Migrate will call this function before Run is called. + // If the implementation can't provide this functionality, return nil. + // Return database.ErrLocked if database is already locked. + Lock() error + + // Unlock should release the lock. Migrate will call this function after + // all migrations have been run. + Unlock() error + + // Run applies a migration to the database. migration is garantueed to be not nil. + Run(migration io.Reader) error + + // SetVersion saves version and dirty state. + // Migrate will call this function before and after each call to Run. + // version must be >= -1. -1 means NilVersion. + SetVersion(version int, dirty bool) error + + // Version returns the currently active version and if the database is dirty. + // When no migration has been applied, it must return version -1. + // Dirty means, a previous migration failed and user interaction is required. + Version() (version int, dirty bool, err error) + + // Drop deletes everything in the database. + Drop() error +} + +// Open returns a new driver instance. +func Open(url string) (Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + if u.Scheme == "" { + return nil, fmt.Errorf("database driver: invalid URL scheme") + } + + driversMu.RLock() + d, ok := drivers[u.Scheme] + driversMu.RUnlock() + if !ok { + return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", u.Scheme) + } + + return d.Open(url) +} + +// Register globally registers a driver. +func Register(name string, driver Driver) { + driversMu.Lock() + defer driversMu.Unlock() + if driver == nil { + panic("Register driver is nil") + } + if _, dup := drivers[name]; dup { + panic("Register called twice for driver " + name) + } + drivers[name] = driver +} diff --git a/vendor/src/github.com/mattes/migrate/database/driver_test.go b/vendor/src/github.com/mattes/migrate/database/driver_test.go new file mode 100644 index 00000000..c0a29304 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/driver_test.go @@ -0,0 +1,8 @@ +package database + +func ExampleDriver() { + // see database/stub for an example + + // database/stub/stub.go has the driver implementation + // database/stub/stub_test.go runs database/testing/test.go:Test +} diff --git a/vendor/src/github.com/mattes/migrate/database/error.go b/vendor/src/github.com/mattes/migrate/database/error.go new file mode 100644 index 00000000..eb802c75 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/error.go @@ -0,0 +1,27 @@ +package database + +import ( + "fmt" +) + +// Error should be used for errors involving queries ran against the database +type Error struct { + // Optional: the line number + Line uint + + // Query is a query excerpt + Query []byte + + // Err is a useful/helping error message for humans + Err string + + // OrigErr is the underlying error + OrigErr error +} + +func (e Error) Error() string { + if len(e.Err) == 0 { + return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query) + } + return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr) +} diff --git a/vendor/src/github.com/mattes/migrate/database/mongodb/README.md b/vendor/src/github.com/mattes/migrate/database/mongodb/README.md new file mode 100644 index 00000000..e69de29b diff --git a/vendor/src/github.com/mattes/migrate/database/mysql/README.md b/vendor/src/github.com/mattes/migrate/database/mysql/README.md new file mode 100644 index 00000000..490e90b2 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/mysql/README.md @@ -0,0 +1,53 @@ +# MySQL + +`mysql://user:password@tcp(host:port)/dbname?query` + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. | +| `port` | | The port to bind to. | +| `x-tls-ca` | | The location of the root certificate file. | +| `x-tls-cert` | | Cert file location. | +| `x-tls-key` | | Key file location. | +| `x-tls-insecure-skip-verify` | | Whether or not to use SSL (true\|false) | + +## Use with existing client + +If you use the MySQL driver with existing database client, you must create the client with parameter `multiStatements=true`: + +```go +package main + +import ( + "database/sql" + + _ "github.com/go-sql-driver/mysql" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database/mysql" + _ "github.com/mattes/migrate/source/file" +) + +func main() { + db, _ := sql.Open("mysql", "user:password@tcp(host:port)/dbname?multiStatements=true") + driver, _ := mysql.WithInstance(db, &mysql.Config{}) + m, _ := migrate.NewWithDatabaseInstance( + "file:///migrations", + "mysql", + driver, + ) + + m.Steps(2) +} +``` + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +1. `DROP TABLE schema_migrations` +2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://dev.mysql.com/doc/refman/5.7/en/commit.html)) if you use multiple statements within one migration. +3. Download and install the latest migrate version. +4. Force the current migration version with `migrate force `. diff --git a/vendor/src/github.com/mattes/migrate/database/mysql/mysql.go b/vendor/src/github.com/mattes/migrate/database/mysql/mysql.go new file mode 100644 index 00000000..f00f886e --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/mysql/mysql.go @@ -0,0 +1,329 @@ +package mysql + +import ( + "crypto/tls" + "crypto/x509" + "database/sql" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "strconv" + "strings" + + "github.com/go-sql-driver/mysql" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +func init() { + database.Register("mysql", &Mysql{}) +} + +var DefaultMigrationsTable = "schema_migrations" + +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrAppendPEM = fmt.Errorf("failed to append PEM") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Mysql struct { + db *sql.DB + isLocked bool + + config *Config +} + +// instance must have `multiStatements` set to true +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + + query := `SELECT DATABASE()` + var databaseName sql.NullString + if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName.String) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName.String + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + mx := &Mysql{ + db: instance, + config: config, + } + + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + + return mx, nil +} + +func (m *Mysql) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + q := purl.Query() + q.Set("multiStatements", "true") + purl.RawQuery = q.Encode() + + db, err := sql.Open("mysql", strings.Replace( + migrate.FilterCustomQuery(purl).String(), "mysql://", "", 1)) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + // use custom TLS? + ctls := purl.Query().Get("tls") + if len(ctls) > 0 { + if _, isBool := readBool(ctls); !isBool && strings.ToLower(ctls) != "skip-verify" { + rootCertPool := x509.NewCertPool() + pem, err := ioutil.ReadFile(purl.Query().Get("x-tls-ca")) + if err != nil { + return nil, err + } + + if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { + return nil, ErrAppendPEM + } + + certs, err := tls.LoadX509KeyPair(purl.Query().Get("x-tls-cert"), purl.Query().Get("x-tls-key")) + if err != nil { + return nil, err + } + + insecureSkipVerify := false + if len(purl.Query().Get("x-tls-insecure-skip-verify")) > 0 { + x, err := strconv.ParseBool(purl.Query().Get("x-tls-insecure-skip-verify")) + if err != nil { + return nil, err + } + insecureSkipVerify = x + } + + mysql.RegisterTLSConfig(ctls, &tls.Config{ + RootCAs: rootCertPool, + Certificates: []tls.Certificate{certs}, + InsecureSkipVerify: insecureSkipVerify, + }) + } + } + + mx, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + + return mx, nil +} + +func (m *Mysql) Close() error { + return m.db.Close() +} + +func (m *Mysql) Lock() error { + if m.isLocked { + return database.ErrLocked + } + + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := "SELECT GET_LOCK(?, 1)" + var success bool + if err := m.db.QueryRow(query, aid).Scan(&success); err != nil { + return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} + } + + if success { + m.isLocked = true + return nil + } + + return database.ErrLocked +} + +func (m *Mysql) Unlock() error { + if !m.isLocked { + return nil + } + + aid, err := database.GenerateAdvisoryLockId( + fmt.Sprintf("%s:%s", m.config.DatabaseName, m.config.MigrationsTable)) + if err != nil { + return err + } + + query := `SELECT RELEASE_LOCK(?)` + if _, err := m.db.Exec(query, aid); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + m.isLocked = false + return nil +} + +func (m *Mysql) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + query := string(migr[:]) + if _, err := m.db.Exec(query); err != nil { + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (m *Mysql) SetVersion(version int, dirty bool) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "TRUNCATE `" + m.config.MigrationsTable + "`" + if _, err := m.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query := "INSERT INTO `" + m.config.MigrationsTable + "` (version, dirty) VALUES (?, ?)" + if _, err := m.db.Exec(query, version, dirty); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Mysql) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM `" + m.config.MigrationsTable + "` LIMIT 1" + err = m.db.QueryRow(query).Scan(&version, &dirty) + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*mysql.MySQLError); ok { + if e.Number == 0 { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (m *Mysql) Drop() error { + // select all tables + query := `SHOW TABLES LIKE '%'` + tables, err := m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + + if len(tableNames) > 0 { + // delete one by one ... + for _, t := range tableNames { + query = "DROP TABLE IF EXISTS `" + t + "` CASCADE" + if _, err := m.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := m.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} + +func (m *Mysql) ensureVersionTable() error { + // check if migration table exists + var result string + query := `SHOW TABLES LIKE "` + m.config.MigrationsTable + `"` + if err := m.db.QueryRow(query).Scan(&result); err != nil { + if err != sql.ErrNoRows { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } else { + return nil + } + + // if not, create the empty migration table + query = "CREATE TABLE `" + m.config.MigrationsTable + "` (version bigint not null primary key, dirty boolean not null)" + if _, err := m.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +// See https://github.com/go-sql-driver/mysql/blob/a059889267dc7170331388008528b3b44479bffb/utils.go#L71 +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} diff --git a/vendor/src/github.com/mattes/migrate/database/mysql/mysql_test.go b/vendor/src/github.com/mattes/migrate/database/mysql/mysql_test.go new file mode 100644 index 00000000..f2b12e8e --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/mysql/mysql_test.go @@ -0,0 +1,60 @@ +package mysql + +import ( + "database/sql" + sqldriver "database/sql/driver" + "fmt" + // "io/ioutil" + // "log" + "testing" + + // "github.com/go-sql-driver/mysql" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" +) + +var versions = []mt.Version{ + {Image: "mysql:8", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, + {Image: "mysql:5.7", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, + {Image: "mysql:5.6", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, + {Image: "mysql:5.5", ENV: []string{"MYSQL_ROOT_PASSWORD=root", "MYSQL_DATABASE=public"}}, +} + +func isReady(i mt.Instance) bool { + db, err := sql.Open("mysql", fmt.Sprintf("root:root@tcp(%v:%v)/public", i.Host(), i.Port())) + if err != nil { + return false + } + defer db.Close() + err = db.Ping() + + if err == sqldriver.ErrBadConn { + return false + } + + return true +} + +func Test(t *testing.T) { + // mysql.SetLogger(mysql.Logger(log.New(ioutil.Discard, "", log.Ltime))) + + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Mysql{} + addr := fmt.Sprintf("mysql://root:root@tcp(%v:%v)/public", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + + // check ensureVersionTable + if err := d.(*Mysql).ensureVersionTable(); err != nil { + t.Fatal(err) + } + // check again + if err := d.(*Mysql).ensureVersionTable(); err != nil { + t.Fatal(err) + } + }) +} diff --git a/vendor/src/github.com/mattes/migrate/database/neo4j/README.md b/vendor/src/github.com/mattes/migrate/database/neo4j/README.md new file mode 100644 index 00000000..e69de29b diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/README.md b/vendor/src/github.com/mattes/migrate/database/postgres/README.md new file mode 100644 index 00000000..f6312392 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/README.md @@ -0,0 +1,28 @@ +# postgres + +`postgres://user:password@host:port/dbname?query` (`postgresql://` works, too) + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `dbname` | `DatabaseName` | The name of the database to connect to | +| `search_path` | | This variable specifies the order in which schemas are searched when an object is referenced by a simple name with no schema specified. | +| `user` | | The user to sign in as | +| `password` | | The user's password | +| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) | +| `port` | | The port to bind to. (default is 5432) | +| `fallback_application_name` | | An application_name to fall back to if one isn't provided. | +| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. | +| `sslcert` | | Cert file location. The file must contain PEM encoded data. | +| `sslkey` | | Key file location. The file must contain PEM encoded data. | +| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. | +| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) | + + +## Upgrading from v1 + +1. Write down the current migration version from schema_migrations +1. `DROP TABLE schema_migrations` +2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://www.postgresql.org/docs/current/static/transaction-iso.html)) if you use multiple statements within one migration. +3. Download and install the latest migrate version. +4. Force the current migration version with `migrate force `. diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql new file mode 100644 index 00000000..c99ddcdc --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS users; diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql new file mode 100644 index 00000000..92897dca --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1085649617_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE users ( + user_id integer unique, + name varchar(40), + email varchar(40) +); diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql new file mode 100644 index 00000000..940c6071 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql new file mode 100644 index 00000000..67823edc --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1185749658_add_city_to_users.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE users ADD COLUMN city varchar(100); + + diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql new file mode 100644 index 00000000..3e87dd22 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql new file mode 100644 index 00000000..fbeb4ab4 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1285849751_add_index_on_user_emails.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); + +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql new file mode 100644 index 00000000..1a0b1a21 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS books; diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql new file mode 100644 index 00000000..f1503b51 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1385949617_create_books_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE books ( + user_id integer, + name varchar(40), + author varchar(40) +); diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql new file mode 100644 index 00000000..3a518768 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS movies; diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql new file mode 100644 index 00000000..f0ef5943 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1485949617_create_movies_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE movies ( + user_id integer, + name varchar(40), + director varchar(40) +); diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1585849751_just_a_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1685849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1785849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/examples/migrations/1885849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/postgres.go b/vendor/src/github.com/mattes/migrate/database/postgres/postgres.go new file mode 100644 index 00000000..fb2d61c2 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/postgres.go @@ -0,0 +1,273 @@ +package postgres + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + + "github.com/lib/pq" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +func init() { + db := Postgres{} + database.Register("postgres", &db) + database.Register("postgresql", &db) +} + +var DefaultMigrationsTable = "schema_migrations" + +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrNoSchema = fmt.Errorf("no schema") + ErrDatabaseDirty = fmt.Errorf("database is dirty") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Postgres struct { + db *sql.DB + isLocked bool + + // Open and WithInstance need to garantuee that config is never nil + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + + query := `SELECT CURRENT_DATABASE()` + var databaseName string + if err := instance.QueryRow(query).Scan(&databaseName); err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if len(databaseName) == 0 { + return nil, ErrNoDatabaseName + } + + config.DatabaseName = databaseName + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + px := &Postgres{ + db: instance, + config: config, + } + + if err := px.ensureVersionTable(); err != nil { + return nil, err + } + + return px, nil +} + +func (p *Postgres) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + db, err := sql.Open("postgres", migrate.FilterCustomQuery(purl).String()) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + px, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + + return px, nil +} + +func (p *Postgres) Close() error { + return p.db.Close() +} + +// https://www.postgresql.org/docs/9.6/static/explicit-locking.html#ADVISORY-LOCKS +func (p *Postgres) Lock() error { + if p.isLocked { + return database.ErrLocked + } + + aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) + if err != nil { + return err + } + + // This will either obtain the lock immediately and return true, + // or return false if the lock cannot be acquired immediately. + query := `SELECT pg_try_advisory_lock($1)` + var success bool + if err := p.db.QueryRow(query, aid).Scan(&success); err != nil { + return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)} + } + + if success { + p.isLocked = true + return nil + } + + return database.ErrLocked +} + +func (p *Postgres) Unlock() error { + if !p.isLocked { + return nil + } + + aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName) + if err != nil { + return err + } + + query := `SELECT pg_advisory_unlock($1)` + if _, err := p.db.Exec(query, aid); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + p.isLocked = false + return nil +} + +func (p *Postgres) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + // run migration + query := string(migr[:]) + if _, err := p.db.Exec(query); err != nil { + // TODO: cast to postgress error and get line number + return database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +func (p *Postgres) SetVersion(version int, dirty bool) error { + tx, err := p.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := `TRUNCATE "` + p.config.MigrationsTable + `"` + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query = `INSERT INTO "` + p.config.MigrationsTable + `" (version, dirty) VALUES ($1, $2)` + if _, err := tx.Exec(query, version, dirty); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (p *Postgres) Version() (version int, dirty bool, err error) { + query := `SELECT version, dirty FROM "` + p.config.MigrationsTable + `" LIMIT 1` + err = p.db.QueryRow(query).Scan(&version, &dirty) + switch { + case err == sql.ErrNoRows: + return database.NilVersion, false, nil + + case err != nil: + if e, ok := err.(*pq.Error); ok { + if e.Code.Name() == "undefined_table" { + return database.NilVersion, false, nil + } + } + return 0, false, &database.Error{OrigErr: err, Query: []byte(query)} + + default: + return version, dirty, nil + } +} + +func (p *Postgres) Drop() error { + // select all tables in current schema + query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema())` + tables, err := p.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + + // delete one table after another + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + + if len(tableNames) > 0 { + // delete one by one ... + for _, t := range tableNames { + query = `DROP TABLE IF EXISTS ` + t + ` CASCADE` + if _, err := p.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := p.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} + +func (p *Postgres) ensureVersionTable() error { + // check if migration table exists + var count int + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1` + if err := p.db.QueryRow(query, p.config.MigrationsTable).Scan(&count); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if count == 1 { + return nil + } + + // if not, create the empty migration table + query = `CREATE TABLE "` + p.config.MigrationsTable + `" (version bigint not null primary key, dirty boolean not null)` + if _, err := p.db.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} diff --git a/vendor/src/github.com/mattes/migrate/database/postgres/postgres_test.go b/vendor/src/github.com/mattes/migrate/database/postgres/postgres_test.go new file mode 100644 index 00000000..9a367a05 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/postgres/postgres_test.go @@ -0,0 +1,150 @@ +package postgres + +// error codes https://github.com/lib/pq/blob/master/error.go + +import ( + "bytes" + "database/sql" + "fmt" + "io" + "testing" + + "github.com/lib/pq" + dt "github.com/mattes/migrate/database/testing" + mt "github.com/mattes/migrate/testing" +) + +var versions = []mt.Version{ + {Image: "postgres:9.6"}, + {Image: "postgres:9.5"}, + {Image: "postgres:9.4"}, + {Image: "postgres:9.3"}, + {Image: "postgres:9.2"}, +} + +func isReady(i mt.Instance) bool { + db, err := sql.Open("postgres", fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port())) + if err != nil { + return false + } + defer db.Close() + err = db.Ping() + if err == io.EOF { + return false + + } else if e, ok := err.(*pq.Error); ok { + if e.Code.Name() == "cannot_connect_now" { + return false + } + } + + return true +} + +func Test(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) + }) +} + +func TestMultiStatement(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Run(bytes.NewReader([]byte("CREATE TABLE foo (foo text); CREATE TABLE bar (bar text);"))); err != nil { + t.Fatalf("expected err to be nil, got %v", err) + } + + // make sure second table exists + var exists bool + if err := d.(*Postgres).db.QueryRow("SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'bar' AND table_schema = (SELECT current_schema()))").Scan(&exists); err != nil { + t.Fatal(err) + } + if !exists { + t.Fatalf("expected table bar to exist") + } + }) +} + +func TestFilterCustomQuery(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&x-custom=foobar", i.Host(), i.Port()) + _, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + }) +} + +func TestWithSchema(t *testing.T) { + mt.ParallelTest(t, versions, isReady, + func(t *testing.T, i mt.Instance) { + p := &Postgres{} + addr := fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable", i.Host(), i.Port()) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + + // create foobar schema + if err := d.Run(bytes.NewReader([]byte("CREATE SCHEMA foobar AUTHORIZATION postgres"))); err != nil { + t.Fatal(err) + } + if err := d.SetVersion(1, false); err != nil { + t.Fatal(err) + } + + // re-connect using that schema + d2, err := p.Open(fmt.Sprintf("postgres://postgres@%v:%v/postgres?sslmode=disable&search_path=foobar", i.Host(), i.Port())) + if err != nil { + t.Fatalf("%v", err) + } + + version, _, err := d2.Version() + if err != nil { + t.Fatal(err) + } + if version != -1 { + t.Fatal("expected NilVersion") + } + + // now update version and compare + if err := d2.SetVersion(2, false); err != nil { + t.Fatal(err) + } + version, _, err = d2.Version() + if err != nil { + t.Fatal(err) + } + if version != 2 { + t.Fatal("expected version 2") + } + + // meanwhile, the public schema still has the other version + version, _, err = d.Version() + if err != nil { + t.Fatal(err) + } + if version != 1 { + t.Fatal("expected version 2") + } + }) +} + +func TestWithInstance(t *testing.T) { + +} diff --git a/vendor/src/github.com/mattes/migrate/database/ql/README.md b/vendor/src/github.com/mattes/migrate/database/ql/README.md new file mode 100644 index 00000000..e69de29b diff --git a/vendor/src/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql b/vendor/src/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql new file mode 100644 index 00000000..72d18c55 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/ql/migration/33_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql b/vendor/src/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql new file mode 100644 index 00000000..5ad3404d --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/ql/migration/33_create_table.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE pets ( + name string +); \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql b/vendor/src/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql new file mode 100644 index 00000000..72d18c55 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/ql/migration/44_alter_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql b/vendor/src/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql new file mode 100644 index 00000000..3993698d --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/ql/migration/44_alter_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE pets ADD predator bool;; \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/ql/ql.go b/vendor/src/github.com/mattes/migrate/database/ql/ql.go new file mode 100644 index 00000000..46722a9c --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/ql/ql.go @@ -0,0 +1,212 @@ +package ql + +import ( + "database/sql" + "fmt" + "io" + "io/ioutil" + "strings" + + nurl "net/url" + + _ "github.com/cznic/ql/driver" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" +) + +func init() { + database.Register("ql", &Ql{}) +} + +var DefaultMigrationsTable = "schema_migrations" +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrAppendPEM = fmt.Errorf("failed to append PEM") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Ql struct { + db *sql.DB + isLocked bool + + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + mx := &Ql{ + db: instance, + config: config, + } + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + return mx, nil +} +func (m *Ql) ensureVersionTable() error { + tx, err := m.db.Begin() + if err != nil { + return err + } + if _, err := tx.Exec(fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool); + CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version); +`, m.config.MigrationsTable, m.config.MigrationsTable)); err != nil { + if err := tx.Rollback(); err != nil { + return err + } + return err + } + if err := tx.Commit(); err != nil { + return err + } + return nil +} + +func (m *Ql) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "ql://", "", 1) + db, err := sql.Open("ql", dbfile) + if err != nil { + return nil, err + } + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + mx, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + return mx, nil +} +func (m *Ql) Close() error { + return m.db.Close() +} +func (m *Ql) Drop() error { + query := `SELECT Name FROM __Table` + tables, err := m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + if strings.HasPrefix(tableName, "__") == false { + tableNames = append(tableNames, tableName) + } + } + } + if len(tableNames) > 0 { + for _, t := range tableNames { + query := "DROP TABLE " + t + err = m.executeQuery(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := m.ensureVersionTable(); err != nil { + return err + } + } + + return nil +} +func (m *Ql) Lock() error { + if m.isLocked { + return database.ErrLocked + } + m.isLocked = true + return nil +} +func (m *Ql) Unlock() error { + if !m.isLocked { + return nil + } + m.isLocked = false + return nil +} +func (m *Ql) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + query := string(migr[:]) + + return m.executeQuery(query) +} +func (m *Ql) executeQuery(query string) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + return nil +} +func (m *Ql) SetVersion(version int, dirty bool) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "TRUNCATE TABLE " + m.config.MigrationsTable + if _, err := tx.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, %t)`, m.config.MigrationsTable, version, dirty) + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Ql) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1" + err = m.db.QueryRow(query).Scan(&version, &dirty) + if err != nil { + return database.NilVersion, false, nil + } + return version, dirty, nil +} diff --git a/vendor/src/github.com/mattes/migrate/database/ql/ql_test.go b/vendor/src/github.com/mattes/migrate/database/ql/ql_test.go new file mode 100644 index 00000000..f04383fa --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/ql/ql_test.go @@ -0,0 +1,62 @@ +package ql + +import ( + "database/sql" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + _ "github.com/cznic/ql/driver" + "github.com/mattes/migrate" + dt "github.com/mattes/migrate/database/testing" + _ "github.com/mattes/migrate/source/file" +) + +func Test(t *testing.T) { + dir, err := ioutil.TempDir("", "ql-driver-test") + if err != nil { + return + } + defer func() { + os.RemoveAll(dir) + }() + fmt.Printf("DB path : %s\n", filepath.Join(dir, "ql.db")) + p := &Ql{} + addr := fmt.Sprintf("ql://%s", filepath.Join(dir, "ql.db")) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + + db, err := sql.Open("ql", filepath.Join(dir, "ql.db")) + if err != nil { + return + } + defer func() { + if err := db.Close(); err != nil { + return + } + }() + dt.Test(t, d, []byte("CREATE TABLE t (Qty int, Name string);")) + driver, err := WithInstance(db, &Config{}) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Drop(); err != nil { + t.Fatal(err) + } + + m, err := migrate.NewWithDatabaseInstance( + "file://./migration", + "ql", driver) + if err != nil { + t.Fatalf("%v", err) + } + fmt.Println("UP") + err = m.Up() + if err != nil { + t.Fatalf("%v", err) + } +} diff --git a/vendor/src/github.com/mattes/migrate/database/redshift/README.md b/vendor/src/github.com/mattes/migrate/database/redshift/README.md new file mode 100644 index 00000000..a03d109a --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/redshift/README.md @@ -0,0 +1,6 @@ +Redshift +=== + +This provides a Redshift driver for migrations. It is used whenever the URL of the database starts with `redshift://`. + +Redshift is PostgreSQL compatible but has some specific features (or lack thereof) that require slightly different behavior. diff --git a/vendor/src/github.com/mattes/migrate/database/redshift/redshift.go b/vendor/src/github.com/mattes/migrate/database/redshift/redshift.go new file mode 100644 index 00000000..99cdde72 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/redshift/redshift.go @@ -0,0 +1,46 @@ +package redshift + +import ( + "net/url" + + "github.com/mattes/migrate/database" + "github.com/mattes/migrate/database/postgres" +) + +// init registers the driver under the name 'redshift' +func init() { + db := new(Redshift) + db.Driver = new(postgres.Postgres) + + database.Register("redshift", db) +} + +// Redshift is a wrapper around the PostgreSQL driver which implements Redshift-specific behavior. +// +// Currently, the only different behaviour is the lack of locking in Redshift. The (Un)Lock() method(s) have been overridden from the PostgreSQL adapter to simply return nil. +type Redshift struct { + // The wrapped PostgreSQL driver. + database.Driver +} + +// Open implements the database.Driver interface by parsing the URL, switching the scheme from "redshift" to "postgres", and delegating to the underlying PostgreSQL driver. +func (driver *Redshift) Open(dsn string) (database.Driver, error) { + parsed, err := url.Parse(dsn) + if err != nil { + return nil, err + } + + parsed.Scheme = "postgres" + psql, err := driver.Driver.Open(parsed.String()) + if err != nil { + return nil, err + } + + return &Redshift{Driver: psql}, nil +} + +// Lock implements the database.Driver interface by not locking and returning nil. +func (driver *Redshift) Lock() error { return nil } + +// Unlock implements the database.Driver interface by not unlocking and returning nil. +func (driver *Redshift) Unlock() error { return nil } diff --git a/vendor/src/github.com/mattes/migrate/database/shell/README.md b/vendor/src/github.com/mattes/migrate/database/shell/README.md new file mode 100644 index 00000000..e69de29b diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/README.md b/vendor/src/github.com/mattes/migrate/database/spanner/README.md new file mode 100644 index 00000000..0de867a8 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/README.md @@ -0,0 +1,35 @@ +# Google Cloud Spanner + +## Usage + +The DSN must be given in the following format. + +`spanner://projects/{projectId}/instances/{instanceId}/databases/{databaseName}` + +See [Google Spanner Documentation](https://cloud.google.com/spanner/docs) for details. + + +| Param | WithInstance Config | Description | +| ----- | ------------------- | ----------- | +| `x-migrations-table` | `MigrationsTable` | Name of the migrations table | +| `url` | `DatabaseName` | The full path to the Spanner database resource. If provided as part of `Config` it must not contain a scheme or query string to match the format `projects/{projectId}/instances/{instanceId}/databases/{databaseName}`| +| `projectId` || The Google Cloud Platform project id +| `instanceId` || The id of the instance running Spanner +| `databaseName` || The name of the Spanner database + + +> **Note:** Google Cloud Spanner migrations can take a considerable amount of +> time. The migrations provided as part of the example take about 6 minutes to +> run on a small instance. +> +> ```log +> 1481574547/u create_users_table (21.354507597s) +> 1496539702/u add_city_to_users (41.647359754s) +> 1496601752/u add_index_on_user_emails (2m12.155787369s) +> 1496602638/u create_books_table (2m30.77299181s) + +## Testing + +To unit test the `spanner` driver, `SPANNER_DATABASE` needs to be set. You'll +need to sign-up to Google Cloud Platform (GCP) and have a running Spanner +instance since it is not possible to run Google Spanner outside GCP. \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql new file mode 100644 index 00000000..7bd522c1 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE Users diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql new file mode 100644 index 00000000..97b8bdb7 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1481574547_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE Users ( + UserId INT64, + Name STRING(40), + Email STRING(83) +) PRIMARY KEY(UserId) \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql new file mode 100644 index 00000000..f0fcd085 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE Users DROP COLUMN city \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql new file mode 100644 index 00000000..b2d6c02b --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496539702_add_city_to_users.up.sql @@ -0,0 +1 @@ +ALTER TABLE Users ADD COLUMN city STRING(100) \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql new file mode 100644 index 00000000..29f92559 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX UsersEmailIndex diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql new file mode 100644 index 00000000..e77b7f2d --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496601752_add_index_on_user_emails.up.sql @@ -0,0 +1 @@ +CREATE UNIQUE INDEX UsersEmailIndex ON Users (Email) diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql new file mode 100644 index 00000000..bd2ce054 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE Books \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql new file mode 100644 index 00000000..0bfa0d48 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/examples/migrations/1496602638_create_books_table.up.sql @@ -0,0 +1,6 @@ +CREATE TABLE Books ( + UserId INT64, + Name STRING(40), + Author STRING(40) +) PRIMARY KEY(UserId, Name), +INTERLEAVE IN PARENT Users ON DELETE CASCADE diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/spanner.go b/vendor/src/github.com/mattes/migrate/database/spanner/spanner.go new file mode 100644 index 00000000..6c65bab3 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/spanner.go @@ -0,0 +1,294 @@ +package spanner + +import ( + "fmt" + "io" + "io/ioutil" + "log" + nurl "net/url" + "regexp" + "strings" + + "golang.org/x/net/context" + + "cloud.google.com/go/spanner" + sdb "cloud.google.com/go/spanner/admin/database/apiv1" + + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" + + "google.golang.org/api/iterator" + adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +func init() { + db := Spanner{} + database.Register("spanner", &db) +} + +// DefaultMigrationsTable is used if no custom table is specified +const DefaultMigrationsTable = "SchemaMigrations" + +// Driver errors +var ( + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") + ErrNoSchema = fmt.Errorf("no schema") + ErrDatabaseDirty = fmt.Errorf("database is dirty") +) + +// Config used for a Spanner instance +type Config struct { + MigrationsTable string + DatabaseName string +} + +// Spanner implements database.Driver for Google Cloud Spanner +type Spanner struct { + db *DB + + config *Config +} + +type DB struct { + admin *sdb.DatabaseAdminClient + data *spanner.Client +} + +// WithInstance implements database.Driver +func WithInstance(instance *DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if len(config.DatabaseName) == 0 { + return nil, ErrNoDatabaseName + } + + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + sx := &Spanner{ + db: instance, + config: config, + } + + if err := sx.ensureVersionTable(); err != nil { + return nil, err + } + + return sx, nil +} + +// Open implements database.Driver +func (s *Spanner) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + ctx := context.Background() + + adminClient, err := sdb.NewDatabaseAdminClient(ctx) + if err != nil { + return nil, err + } + dbname := strings.Replace(migrate.FilterCustomQuery(purl).String(), "spanner://", "", 1) + dataClient, err := spanner.NewClient(ctx, dbname) + if err != nil { + log.Fatal(err) + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + + db := &DB{admin: adminClient, data: dataClient} + return WithInstance(db, &Config{ + DatabaseName: dbname, + MigrationsTable: migrationsTable, + }) +} + +// Close implements database.Driver +func (s *Spanner) Close() error { + s.db.data.Close() + return s.db.admin.Close() +} + +// Lock implements database.Driver but doesn't do anything because Spanner only +// enqueues the UpdateDatabaseDdlRequest. +func (s *Spanner) Lock() error { + return nil +} + +// Unlock implements database.Driver but no action required, see Lock. +func (s *Spanner) Unlock() error { + return nil +} + +// Run implements database.Driver +func (s *Spanner) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + + // run migration + stmts := migrationStatements(migr) + ctx := context.Background() + + op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ + Database: s.config.DatabaseName, + Statements: stmts, + }) + + if err != nil { + return &database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + if err := op.Wait(ctx); err != nil { + return &database.Error{OrigErr: err, Err: "migration failed", Query: migr} + } + + return nil +} + +// SetVersion implements database.Driver +func (s *Spanner) SetVersion(version int, dirty bool) error { + ctx := context.Background() + + _, err := s.db.data.ReadWriteTransaction(ctx, + func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + m := []*spanner.Mutation{ + spanner.Delete(s.config.MigrationsTable, spanner.AllKeys()), + spanner.Insert(s.config.MigrationsTable, + []string{"Version", "Dirty"}, + []interface{}{version, dirty}, + )} + return txn.BufferWrite(m) + }) + if err != nil { + return &database.Error{OrigErr: err} + } + + return nil +} + +// Version implements database.Driver +func (s *Spanner) Version() (version int, dirty bool, err error) { + ctx := context.Background() + + stmt := spanner.Statement{ + SQL: `SELECT Version, Dirty FROM ` + s.config.MigrationsTable + ` LIMIT 1`, + } + iter := s.db.data.Single().Query(ctx, stmt) + defer iter.Stop() + + row, err := iter.Next() + switch err { + case iterator.Done: + return database.NilVersion, false, nil + case nil: + var v int64 + if err = row.Columns(&v, &dirty); err != nil { + return 0, false, &database.Error{OrigErr: err, Query: []byte(stmt.SQL)} + } + version = int(v) + default: + return 0, false, &database.Error{OrigErr: err, Query: []byte(stmt.SQL)} + } + + return version, dirty, nil +} + +// Drop implements database.Driver. Retrieves the database schema first and +// creates statements to drop the indexes and tables accordingly. +// Note: The drop statements are created in reverse order to how they're +// provided in the schema. Assuming the schema describes how the database can +// be "build up", it seems logical to "unbuild" the database simply by going the +// opposite direction. More testing +func (s *Spanner) Drop() error { + ctx := context.Background() + res, err := s.db.admin.GetDatabaseDdl(ctx, &adminpb.GetDatabaseDdlRequest{ + Database: s.config.DatabaseName, + }) + if err != nil { + return &database.Error{OrigErr: err, Err: "drop failed"} + } + if len(res.Statements) == 0 { + return nil + } + + r := regexp.MustCompile(`(CREATE TABLE\s(\S+)\s)|(CREATE.+INDEX\s(\S+)\s)`) + stmts := make([]string, 0) + for i := len(res.Statements) - 1; i >= 0; i-- { + s := res.Statements[i] + m := r.FindSubmatch([]byte(s)) + + if len(m) == 0 { + continue + } else if tbl := m[2]; len(tbl) > 0 { + stmts = append(stmts, fmt.Sprintf(`DROP TABLE %s`, tbl)) + } else if idx := m[4]; len(idx) > 0 { + stmts = append(stmts, fmt.Sprintf(`DROP INDEX %s`, idx)) + } + } + + op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ + Database: s.config.DatabaseName, + Statements: stmts, + }) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(strings.Join(stmts, "; "))} + } + if err := op.Wait(ctx); err != nil { + return &database.Error{OrigErr: err, Query: []byte(strings.Join(stmts, "; "))} + } + + if err := s.ensureVersionTable(); err != nil { + return err + } + + return nil +} + +func (s *Spanner) ensureVersionTable() error { + ctx := context.Background() + tbl := s.config.MigrationsTable + iter := s.db.data.Single().Read(ctx, tbl, spanner.AllKeys(), []string{"Version"}) + if err := iter.Do(func(r *spanner.Row) error { return nil }); err == nil { + return nil + } + + stmt := fmt.Sprintf(`CREATE TABLE %s ( + Version INT64 NOT NULL, + Dirty BOOL NOT NULL + ) PRIMARY KEY(Version)`, tbl) + + op, err := s.db.admin.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{ + Database: s.config.DatabaseName, + Statements: []string{stmt}, + }) + + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(stmt)} + } + if err := op.Wait(ctx); err != nil { + return &database.Error{OrigErr: err, Query: []byte(stmt)} + } + + return nil +} + +func migrationStatements(migration []byte) []string { + regex := regexp.MustCompile(";$") + migrationString := string(migration[:]) + migrationString = strings.TrimSpace(migrationString) + migrationString = regex.ReplaceAllString(migrationString, "") + + statements := strings.Split(migrationString, ";") + return statements +} diff --git a/vendor/src/github.com/mattes/migrate/database/spanner/spanner_test.go b/vendor/src/github.com/mattes/migrate/database/spanner/spanner_test.go new file mode 100644 index 00000000..43d475ca --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/spanner/spanner_test.go @@ -0,0 +1,28 @@ +package spanner + +import ( + "fmt" + "os" + "testing" + + dt "github.com/mattes/migrate/database/testing" +) + +func Test(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + db, ok := os.LookupEnv("SPANNER_DATABASE") + if !ok { + t.Skip("SPANNER_DATABASE not set, skipping test.") + } + + s := &Spanner{} + addr := fmt.Sprintf("spanner://%v", db) + d, err := s.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + dt.Test(t, d, []byte("SELECT 1")) +} diff --git a/vendor/src/github.com/mattes/migrate/database/sqlite3/README.md b/vendor/src/github.com/mattes/migrate/database/sqlite3/README.md new file mode 100644 index 00000000..e69de29b diff --git a/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.down.sql b/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.down.sql new file mode 100644 index 00000000..72d18c55 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.up.sql b/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.up.sql new file mode 100644 index 00000000..5ad3404d --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/33_create_table.up.sql @@ -0,0 +1,3 @@ +CREATE TABLE pets ( + name string +); \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.down.sql b/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.down.sql new file mode 100644 index 00000000..72d18c55 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS pets; \ No newline at end of file diff --git a/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.up.sql b/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.up.sql new file mode 100644 index 00000000..f0682fcc --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/sqlite3/migration/44_alter_table.up.sql @@ -0,0 +1 @@ +ALTER TABLE pets ADD predator bool; diff --git a/vendor/src/github.com/mattes/migrate/database/sqlite3/sqlite3.go b/vendor/src/github.com/mattes/migrate/database/sqlite3/sqlite3.go new file mode 100644 index 00000000..bfd1a5b8 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/sqlite3/sqlite3.go @@ -0,0 +1,214 @@ +package sqlite3 + +import ( + "database/sql" + "fmt" + "github.com/mattes/migrate" + "github.com/mattes/migrate/database" + _ "github.com/mattn/go-sqlite3" + "io" + "io/ioutil" + nurl "net/url" + "strings" +) + +func init() { + database.Register("sqlite3", &Sqlite{}) +} + +var DefaultMigrationsTable = "schema_migrations" +var ( + ErrDatabaseDirty = fmt.Errorf("database is dirty") + ErrNilConfig = fmt.Errorf("no config") + ErrNoDatabaseName = fmt.Errorf("no database name") +) + +type Config struct { + MigrationsTable string + DatabaseName string +} + +type Sqlite struct { + db *sql.DB + isLocked bool + + config *Config +} + +func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) { + if config == nil { + return nil, ErrNilConfig + } + + if err := instance.Ping(); err != nil { + return nil, err + } + if len(config.MigrationsTable) == 0 { + config.MigrationsTable = DefaultMigrationsTable + } + + mx := &Sqlite{ + db: instance, + config: config, + } + if err := mx.ensureVersionTable(); err != nil { + return nil, err + } + return mx, nil +} + +func (m *Sqlite) ensureVersionTable() error { + + query := fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool); + CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version); + `, DefaultMigrationsTable, DefaultMigrationsTable) + + if _, err := m.db.Exec(query); err != nil { + return err + } + return nil +} + +func (m *Sqlite) Open(url string) (database.Driver, error) { + purl, err := nurl.Parse(url) + if err != nil { + return nil, err + } + dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "sqlite3://", "", 1) + db, err := sql.Open("sqlite3", dbfile) + if err != nil { + return nil, err + } + + migrationsTable := purl.Query().Get("x-migrations-table") + if len(migrationsTable) == 0 { + migrationsTable = DefaultMigrationsTable + } + mx, err := WithInstance(db, &Config{ + DatabaseName: purl.Path, + MigrationsTable: migrationsTable, + }) + if err != nil { + return nil, err + } + return mx, nil +} + +func (m *Sqlite) Close() error { + return m.db.Close() +} + +func (m *Sqlite) Drop() error { + query := `SELECT name FROM sqlite_master WHERE type = 'table';` + tables, err := m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + defer tables.Close() + tableNames := make([]string, 0) + for tables.Next() { + var tableName string + if err := tables.Scan(&tableName); err != nil { + return err + } + if len(tableName) > 0 { + tableNames = append(tableNames, tableName) + } + } + if len(tableNames) > 0 { + for _, t := range tableNames { + query := "DROP TABLE " + t + err = m.executeQuery(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + if err := m.ensureVersionTable(); err != nil { + return err + } + query := "VACUUM" + _, err = m.db.Query(query) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + return nil +} + +func (m *Sqlite) Lock() error { + if m.isLocked { + return database.ErrLocked + } + m.isLocked = true + return nil +} + +func (m *Sqlite) Unlock() error { + if !m.isLocked { + return nil + } + m.isLocked = false + return nil +} + +func (m *Sqlite) Run(migration io.Reader) error { + migr, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + query := string(migr[:]) + + return m.executeQuery(query) +} + +func (m *Sqlite) executeQuery(query string) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + return nil +} + +func (m *Sqlite) SetVersion(version int, dirty bool) error { + tx, err := m.db.Begin() + if err != nil { + return &database.Error{OrigErr: err, Err: "transaction start failed"} + } + + query := "DELETE FROM " + m.config.MigrationsTable + if _, err := tx.Exec(query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + if version >= 0 { + query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, '%t')`, m.config.MigrationsTable, version, dirty) + if _, err := tx.Exec(query); err != nil { + tx.Rollback() + return &database.Error{OrigErr: err, Query: []byte(query)} + } + } + + if err := tx.Commit(); err != nil { + return &database.Error{OrigErr: err, Err: "transaction commit failed"} + } + + return nil +} + +func (m *Sqlite) Version() (version int, dirty bool, err error) { + query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1" + err = m.db.QueryRow(query).Scan(&version, &dirty) + if err != nil { + return database.NilVersion, false, nil + } + return version, dirty, nil +} diff --git a/vendor/src/github.com/mattes/migrate/database/sqlite3/sqlite3_test.go b/vendor/src/github.com/mattes/migrate/database/sqlite3/sqlite3_test.go new file mode 100644 index 00000000..6a5c5c86 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/sqlite3/sqlite3_test.go @@ -0,0 +1,61 @@ +package sqlite3 + +import ( + "database/sql" + "fmt" + "github.com/mattes/migrate" + dt "github.com/mattes/migrate/database/testing" + _ "github.com/mattes/migrate/source/file" + _ "github.com/mattn/go-sqlite3" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func Test(t *testing.T) { + dir, err := ioutil.TempDir("", "sqlite3-driver-test") + if err != nil { + return + } + defer func() { + os.RemoveAll(dir) + }() + fmt.Printf("DB path : %s\n", filepath.Join(dir, "sqlite3.db")) + p := &Sqlite{} + addr := fmt.Sprintf("sqlite3://%s", filepath.Join(dir, "sqlite3.db")) + d, err := p.Open(addr) + if err != nil { + t.Fatalf("%v", err) + } + + db, err := sql.Open("sqlite3", filepath.Join(dir, "sqlite3.db")) + if err != nil { + return + } + defer func() { + if err := db.Close(); err != nil { + return + } + }() + dt.Test(t, d, []byte("CREATE TABLE t (Qty int, Name string);")) + driver, err := WithInstance(db, &Config{}) + if err != nil { + t.Fatalf("%v", err) + } + if err := d.Drop(); err != nil { + t.Fatal(err) + } + + m, err := migrate.NewWithDatabaseInstance( + "file://./migration", + "ql", driver) + if err != nil { + t.Fatalf("%v", err) + } + fmt.Println("UP") + err = m.Up() + if err != nil { + t.Fatalf("%v", err) + } +} diff --git a/vendor/src/github.com/mattes/migrate/database/stub/stub.go b/vendor/src/github.com/mattes/migrate/database/stub/stub.go new file mode 100644 index 00000000..172bcd37 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/stub/stub.go @@ -0,0 +1,95 @@ +package stub + +import ( + "io" + "io/ioutil" + "reflect" + + "github.com/mattes/migrate/database" +) + +func init() { + database.Register("stub", &Stub{}) +} + +type Stub struct { + Url string + Instance interface{} + CurrentVersion int + MigrationSequence []string + LastRunMigration []byte // todo: make []string + IsDirty bool + IsLocked bool + + Config *Config +} + +func (s *Stub) Open(url string) (database.Driver, error) { + return &Stub{ + Url: url, + CurrentVersion: -1, + MigrationSequence: make([]string, 0), + Config: &Config{}, + }, nil +} + +type Config struct{} + +func WithInstance(instance interface{}, config *Config) (database.Driver, error) { + return &Stub{ + Instance: instance, + CurrentVersion: -1, + MigrationSequence: make([]string, 0), + Config: config, + }, nil +} + +func (s *Stub) Close() error { + return nil +} + +func (s *Stub) Lock() error { + if s.IsLocked { + return database.ErrLocked + } + s.IsLocked = true + return nil +} + +func (s *Stub) Unlock() error { + s.IsLocked = false + return nil +} + +func (s *Stub) Run(migration io.Reader) error { + m, err := ioutil.ReadAll(migration) + if err != nil { + return err + } + s.LastRunMigration = m + s.MigrationSequence = append(s.MigrationSequence, string(m[:])) + return nil +} + +func (s *Stub) SetVersion(version int, state bool) error { + s.CurrentVersion = version + s.IsDirty = state + return nil +} + +func (s *Stub) Version() (version int, dirty bool, err error) { + return s.CurrentVersion, s.IsDirty, nil +} + +const DROP = "DROP" + +func (s *Stub) Drop() error { + s.CurrentVersion = -1 + s.LastRunMigration = nil + s.MigrationSequence = append(s.MigrationSequence, DROP) + return nil +} + +func (s *Stub) EqualSequence(seq []string) bool { + return reflect.DeepEqual(seq, s.MigrationSequence) +} diff --git a/vendor/src/github.com/mattes/migrate/database/stub/stub_test.go b/vendor/src/github.com/mattes/migrate/database/stub/stub_test.go new file mode 100644 index 00000000..3d8b8926 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/stub/stub_test.go @@ -0,0 +1,16 @@ +package stub + +import ( + "testing" + + dt "github.com/mattes/migrate/database/testing" +) + +func Test(t *testing.T) { + s := &Stub{} + d, err := s.Open("") + if err != nil { + t.Fatal(err) + } + dt.Test(t, d, []byte("/* foobar migration */")) +} diff --git a/vendor/src/github.com/mattes/migrate/database/testing/testing.go b/vendor/src/github.com/mattes/migrate/database/testing/testing.go new file mode 100644 index 00000000..4ab090d1 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/testing/testing.go @@ -0,0 +1,138 @@ +// Package testing has the database tests. +// All database drivers must pass the Test function. +// This lives in it's own package so it stays a test dependency. +package testing + +import ( + "bytes" + "fmt" + "io" + "testing" + "time" + + "github.com/mattes/migrate/database" +) + +// Test runs tests against database implementations. +func Test(t *testing.T, d database.Driver, migration []byte) { + if migration == nil { + panic("test must provide migration reader") + } + + TestNilVersion(t, d) // test first + TestLockAndUnlock(t, d) + TestRun(t, d, bytes.NewReader(migration)) + TestDrop(t, d) + TestSetVersion(t, d) // also tests Version() +} + +func TestNilVersion(t *testing.T, d database.Driver) { + v, _, err := d.Version() + if err != nil { + t.Fatal(err) + } + if v != database.NilVersion { + t.Fatalf("Version: expected version to be NilVersion (-1), got %v", v) + } +} + +func TestLockAndUnlock(t *testing.T, d database.Driver) { + // add a timeout, in case there is a deadlock + done := make(chan bool, 1) + go func() { + timeout := time.After(15 * time.Second) + for { + select { + case <-done: + return + case <-timeout: + panic(fmt.Sprintf("Timeout after 15 seconds. Looks like a deadlock in Lock/UnLock.\n%#v", d)) + } + } + }() + defer func() { + done <- true + }() + + // run the locking test ... + + if err := d.Lock(); err != nil { + t.Fatal(err) + } + + // try to acquire lock again + if err := d.Lock(); err == nil { + t.Fatal("Lock: expected err not to be nil") + } + + // unlock + if err := d.Unlock(); err != nil { + t.Fatal(err) + } + + // try to lock + if err := d.Lock(); err != nil { + t.Fatal(err) + } + if err := d.Unlock(); err != nil { + t.Fatal(err) + } +} + +func TestRun(t *testing.T, d database.Driver, migration io.Reader) { + if migration == nil { + panic("migration can't be nil") + } + + if err := d.Run(migration); err != nil { + t.Fatal(err) + } +} + +func TestDrop(t *testing.T, d database.Driver) { + if err := d.Drop(); err != nil { + t.Fatal(err) + } +} + +func TestSetVersion(t *testing.T, d database.Driver) { + if err := d.SetVersion(1, true); err != nil { + t.Fatal(err) + } + + // call again + if err := d.SetVersion(1, true); err != nil { + t.Fatal(err) + } + + v, dirty, err := d.Version() + if err != nil { + t.Fatal(err) + } + if !dirty { + t.Fatal("expected dirty") + } + if v != 1 { + t.Fatal("expected version to be 1") + } + + if err := d.SetVersion(2, false); err != nil { + t.Fatal(err) + } + + // call again + if err := d.SetVersion(2, false); err != nil { + t.Fatal(err) + } + + v, dirty, err = d.Version() + if err != nil { + t.Fatal(err) + } + if dirty { + t.Fatal("expected not dirty") + } + if v != 2 { + t.Fatal("expected version to be 2") + } +} diff --git a/vendor/src/github.com/mattes/migrate/database/util.go b/vendor/src/github.com/mattes/migrate/database/util.go new file mode 100644 index 00000000..c636a7ab --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/util.go @@ -0,0 +1,15 @@ +package database + +import ( + "fmt" + "hash/crc32" +) + +const advisoryLockIdSalt uint = 1486364155 + +// inspired by rails migrations, see https://goo.gl/8o9bCT +func GenerateAdvisoryLockId(databaseName string) (string, error) { + sum := crc32.ChecksumIEEE([]byte(databaseName)) + sum = sum * uint32(advisoryLockIdSalt) + return fmt.Sprintf("%v", sum), nil +} diff --git a/vendor/src/github.com/mattes/migrate/database/util_test.go b/vendor/src/github.com/mattes/migrate/database/util_test.go new file mode 100644 index 00000000..905c840b --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/database/util_test.go @@ -0,0 +1,12 @@ +package database + +func TestGenerateAdvisoryLockId(t *testing.T) { + id, err := p.generateAdvisoryLockId("database_name") + if err != nil { + t.Errorf("expected err to be nil, got %v", err) + } + if len(id) == 0 { + t.Errorf("expected generated id not to be empty") + } + t.Logf("generated id: %v", id) +} diff --git a/vendor/src/github.com/mattes/migrate/log.go b/vendor/src/github.com/mattes/migrate/log.go new file mode 100644 index 00000000..cb00b779 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/log.go @@ -0,0 +1,12 @@ +package migrate + +// Logger is an interface so you can pass in your own +// logging implementation. +type Logger interface { + + // Printf is like fmt.Printf + Printf(format string, v ...interface{}) + + // Verbose should return true when verbose logging output is wanted + Verbose() bool +} diff --git a/vendor/src/github.com/mattes/migrate/migrate.go b/vendor/src/github.com/mattes/migrate/migrate.go new file mode 100644 index 00000000..58414e8f --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/migrate.go @@ -0,0 +1,920 @@ +// Package migrate reads migrations from sources and runs them against databases. +// Sources are defined by the `source.Driver` and databases by the `database.Driver` +// interface. The driver interfaces are kept "dump", all migration logic is kept +// in this package. +package migrate + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/mattes/migrate/database" + "github.com/mattes/migrate/source" +) + +// DefaultPrefetchMigrations sets the number of migrations to pre-read +// from the source. This is helpful if the source is remote, but has little +// effect for a local source (i.e. file system). +// Please note that this setting has a major impact on the memory usage, +// since each pre-read migration is buffered in memory. See DefaultBufferSize. +var DefaultPrefetchMigrations = uint(10) + +// DefaultLockTimeout sets the max time a database driver has to acquire a lock. +var DefaultLockTimeout = 15 * time.Second + +var ( + ErrNoChange = fmt.Errorf("no change") + ErrNilVersion = fmt.Errorf("no migration") + ErrLocked = fmt.Errorf("database locked") + ErrLockTimeout = fmt.Errorf("timeout: can't acquire database lock") +) + +// ErrShortLimit is an error returned when not enough migrations +// can be returned by a source for a given limit. +type ErrShortLimit struct { + Short uint +} + +// Error implements the error interface. +func (e ErrShortLimit) Error() string { + return fmt.Sprintf("limit %v short", e.Short) +} + +type ErrDirty struct { + Version int +} + +func (e ErrDirty) Error() string { + return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version) +} + +type Migrate struct { + sourceName string + sourceDrv source.Driver + databaseName string + databaseDrv database.Driver + + // Log accepts a Logger interface + Log Logger + + // GracefulStop accepts `true` and will stop executing migrations + // as soon as possible at a safe break point, so that the database + // is not corrupted. + GracefulStop chan bool + isGracefulStop bool + + isLockedMu *sync.Mutex + isLocked bool + + // PrefetchMigrations defaults to DefaultPrefetchMigrations, + // but can be set per Migrate instance. + PrefetchMigrations uint + + // LockTimeout defaults to DefaultLockTimeout, + // but can be set per Migrate instance. + LockTimeout time.Duration +} + +// New returns a new Migrate instance from a source URL and a database URL. +// The URL scheme is defined by each driver. +func New(sourceUrl, databaseUrl string) (*Migrate, error) { + m := newCommon() + + sourceName, err := schemeFromUrl(sourceUrl) + if err != nil { + return nil, err + } + m.sourceName = sourceName + + databaseName, err := schemeFromUrl(databaseUrl) + if err != nil { + return nil, err + } + m.databaseName = databaseName + + sourceDrv, err := source.Open(sourceUrl) + if err != nil { + return nil, err + } + m.sourceDrv = sourceDrv + + databaseDrv, err := database.Open(databaseUrl) + if err != nil { + return nil, err + } + m.databaseDrv = databaseDrv + + return m, nil +} + +// NewWithDatabaseInstance returns a new Migrate instance from a source URL +// and an existing database instance. The source URL scheme is defined by each driver. +// Use any string that can serve as an identifier during logging as databaseName. +// You are responsible for closing the underlying database client if necessary. +func NewWithDatabaseInstance(sourceUrl string, databaseName string, databaseInstance database.Driver) (*Migrate, error) { + m := newCommon() + + sourceName, err := schemeFromUrl(sourceUrl) + if err != nil { + return nil, err + } + m.sourceName = sourceName + + m.databaseName = databaseName + + sourceDrv, err := source.Open(sourceUrl) + if err != nil { + return nil, err + } + m.sourceDrv = sourceDrv + + m.databaseDrv = databaseInstance + + return m, nil +} + +// NewWithSourceInstance returns a new Migrate instance from an existing source instance +// and a database URL. The database URL scheme is defined by each driver. +// Use any string that can serve as an identifier during logging as sourceName. +// You are responsible for closing the underlying source client if necessary. +func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseUrl string) (*Migrate, error) { + m := newCommon() + + databaseName, err := schemeFromUrl(databaseUrl) + if err != nil { + return nil, err + } + m.databaseName = databaseName + + m.sourceName = sourceName + + databaseDrv, err := database.Open(databaseUrl) + if err != nil { + return nil, err + } + m.databaseDrv = databaseDrv + + m.sourceDrv = sourceInstance + + return m, nil +} + +// NewWithInstance returns a new Migrate instance from an existing source and +// database instance. Use any string that can serve as an identifier during logging +// as sourceName and databaseName. You are responsible for closing down +// the underlying source and database client if necessary. +func NewWithInstance(sourceName string, sourceInstance source.Driver, databaseName string, databaseInstance database.Driver) (*Migrate, error) { + m := newCommon() + + m.sourceName = sourceName + m.databaseName = databaseName + + m.sourceDrv = sourceInstance + m.databaseDrv = databaseInstance + + return m, nil +} + +func newCommon() *Migrate { + return &Migrate{ + GracefulStop: make(chan bool, 1), + PrefetchMigrations: DefaultPrefetchMigrations, + LockTimeout: DefaultLockTimeout, + isLockedMu: &sync.Mutex{}, + } +} + +// Close closes the the source and the database. +func (m *Migrate) Close() (source error, database error) { + databaseSrvClose := make(chan error) + sourceSrvClose := make(chan error) + + m.logVerbosePrintf("Closing source and database\n") + + go func() { + databaseSrvClose <- m.databaseDrv.Close() + }() + + go func() { + sourceSrvClose <- m.sourceDrv.Close() + }() + + return <-sourceSrvClose, <-databaseSrvClose +} + +// Migrate looks at the currently active migration version, +// then migrates either up or down to the specified version. +func (m *Migrate) Migrate(version uint) error { + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + go m.read(curVersion, int(version), ret) + + return m.unlockErr(m.runMigrations(ret)) +} + +// Steps looks at the currently active migration version. +// It will migrate up if n > 0, and down if n < 0. +func (m *Migrate) Steps(n int) error { + if n == 0 { + return ErrNoChange + } + + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + + if n > 0 { + go m.readUp(curVersion, n, ret) + } else { + go m.readDown(curVersion, -n, ret) + } + + return m.unlockErr(m.runMigrations(ret)) +} + +// Up looks at the currently active migration version +// and will migrate all the way up (applying all up migrations). +func (m *Migrate) Up() error { + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + + go m.readUp(curVersion, -1, ret) + return m.unlockErr(m.runMigrations(ret)) +} + +// Down looks at the currently active migration version +// and will migrate all the way down (applying all down migrations). +func (m *Migrate) Down() error { + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + go m.readDown(curVersion, -1, ret) + return m.unlockErr(m.runMigrations(ret)) +} + +// Drop deletes everything in the database. +func (m *Migrate) Drop() error { + if err := m.lock(); err != nil { + return err + } + if err := m.databaseDrv.Drop(); err != nil { + return m.unlockErr(err) + } + return m.unlock() +} + +// Run runs any migration provided by you against the database. +// It does not check any currently active version in database. +// Usually you don't need this function at all. Use Migrate, +// Steps, Up or Down instead. +func (m *Migrate) Run(migration ...*Migration) error { + if len(migration) == 0 { + return ErrNoChange + } + + if err := m.lock(); err != nil { + return err + } + + curVersion, dirty, err := m.databaseDrv.Version() + if err != nil { + return m.unlockErr(err) + } + + if dirty { + return m.unlockErr(ErrDirty{curVersion}) + } + + ret := make(chan interface{}, m.PrefetchMigrations) + + go func() { + defer close(ret) + for _, migr := range migration { + if m.PrefetchMigrations > 0 && migr.Body != nil { + m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) + } else { + m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) + } + + ret <- migr + go migr.Buffer() + } + }() + + return m.unlockErr(m.runMigrations(ret)) +} + +// Force sets a migration version. +// It does not check any currently active version in database. +// It resets the dirty state to false. +func (m *Migrate) Force(version int) error { + if version < -1 { + panic("version must be >= -1") + } + + if err := m.lock(); err != nil { + return err + } + + if err := m.databaseDrv.SetVersion(version, false); err != nil { + return m.unlockErr(err) + } + + return m.unlock() +} + +// Version returns the currently active migration version. +// If no migration has been applied, yet, it will return ErrNilVersion. +func (m *Migrate) Version() (version uint, dirty bool, err error) { + v, d, err := m.databaseDrv.Version() + if err != nil { + return 0, false, err + } + + if v == database.NilVersion { + return 0, false, ErrNilVersion + } + + return suint(v), d, nil +} + +// read reads either up or down migrations from source `from` to `to`. +// Each migration is then written to the ret channel. +// If an error occurs during reading, that error is written to the ret channel, too. +// Once read is done reading it will close the ret channel. +func (m *Migrate) read(from int, to int, ret chan<- interface{}) { + defer close(ret) + + // check if from version exists + if from >= 0 { + if m.versionExists(suint(from)) != nil { + ret <- os.ErrNotExist + return + } + } + + // check if to version exists + if to >= 0 { + if m.versionExists(suint(to)) != nil { + ret <- os.ErrNotExist + return + } + } + + // no change? + if from == to { + ret <- ErrNoChange + return + } + + if from < to { + // it's going up + // apply first migration if from is nil version + if from == -1 { + firstVersion, err := m.sourceDrv.First() + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(firstVersion, int(firstVersion)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(firstVersion) + } + + // run until we reach target ... + for from < to { + if m.stop() { + return + } + + next, err := m.sourceDrv.Next(suint(from)) + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(next, int(next)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(next) + } + + } else { + // it's going down + // run until we reach target ... + for from > to && from >= 0 { + if m.stop() { + return + } + + prev, err := m.sourceDrv.Prev(suint(from)) + if os.IsNotExist(err) && to == -1 { + // apply nil migration + migr, err := m.newMigration(suint(from), -1) + if err != nil { + ret <- err + return + } + ret <- migr + go migr.Buffer() + return + + } else if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(suint(from), int(prev)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(prev) + } + } +} + +// readUp reads up migrations from `from` limitted by `limit`. +// limit can be -1, implying no limit and reading until there are no more migrations. +// Each migration is then written to the ret channel. +// If an error occurs during reading, that error is written to the ret channel, too. +// Once readUp is done reading it will close the ret channel. +func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) { + defer close(ret) + + // check if from version exists + if from >= 0 { + if m.versionExists(suint(from)) != nil { + ret <- os.ErrNotExist + return + } + } + + if limit == 0 { + ret <- ErrNoChange + return + } + + count := 0 + for count < limit || limit == -1 { + if m.stop() { + return + } + + // apply first migration if from is nil version + if from == -1 { + firstVersion, err := m.sourceDrv.First() + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(firstVersion, int(firstVersion)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(firstVersion) + count++ + continue + } + + // apply next migration + next, err := m.sourceDrv.Next(suint(from)) + if os.IsNotExist(err) { + // no limit, but no migrations applied? + if limit == -1 && count == 0 { + ret <- ErrNoChange + return + } + + // no limit, reached end + if limit == -1 { + return + } + + // reached end, and didn't apply any migrations + if limit > 0 && count == 0 { + ret <- os.ErrNotExist + return + } + + // applied less migrations than limit? + if count < limit { + ret <- ErrShortLimit{suint(limit - count)} + return + } + } + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(next, int(next)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(next) + count++ + } +} + +// readDown reads down migrations from `from` limitted by `limit`. +// limit can be -1, implying no limit and reading until there are no more migrations. +// Each migration is then written to the ret channel. +// If an error occurs during reading, that error is written to the ret channel, too. +// Once readDown is done reading it will close the ret channel. +func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) { + defer close(ret) + + // check if from version exists + if from >= 0 { + if m.versionExists(suint(from)) != nil { + ret <- os.ErrNotExist + return + } + } + + if limit == 0 { + ret <- ErrNoChange + return + } + + // no change if already at nil version + if from == -1 && limit == -1 { + ret <- ErrNoChange + return + } + + // can't go over limit if already at nil version + if from == -1 && limit > 0 { + ret <- os.ErrNotExist + return + } + + count := 0 + for count < limit || limit == -1 { + if m.stop() { + return + } + + prev, err := m.sourceDrv.Prev(suint(from)) + if os.IsNotExist(err) { + // no limit or haven't reached limit, apply "first" migration + if limit == -1 || limit-count > 0 { + firstVersion, err := m.sourceDrv.First() + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(firstVersion, -1) + if err != nil { + ret <- err + return + } + ret <- migr + go migr.Buffer() + count++ + } + + if count < limit { + ret <- ErrShortLimit{suint(limit - count)} + } + return + } + if err != nil { + ret <- err + return + } + + migr, err := m.newMigration(suint(from), int(prev)) + if err != nil { + ret <- err + return + } + + ret <- migr + go migr.Buffer() + from = int(prev) + count++ + } +} + +// runMigrations reads *Migration and error from a channel. Any other type +// sent on this channel will result in a panic. Each migration is then +// proxied to the database driver and run against the database. +// Before running a newly received migration it will check if it's supposed +// to stop execution because it might have received a stop signal on the +// GracefulStop channel. +func (m *Migrate) runMigrations(ret <-chan interface{}) error { + for r := range ret { + + if m.stop() { + return nil + } + + switch r.(type) { + case error: + return r.(error) + + case *Migration: + migr := r.(*Migration) + + // set version with dirty state + if err := m.databaseDrv.SetVersion(migr.TargetVersion, true); err != nil { + return err + } + + if migr.Body != nil { + m.logVerbosePrintf("Read and execute %v\n", migr.LogString()) + if err := m.databaseDrv.Run(migr.BufferedBody); err != nil { + return err + } + } + + // set clean state + if err := m.databaseDrv.SetVersion(migr.TargetVersion, false); err != nil { + return err + } + + endTime := time.Now() + readTime := migr.FinishedReading.Sub(migr.StartedBuffering) + runTime := endTime.Sub(migr.FinishedReading) + + // log either verbose or normal + if m.Log != nil { + if m.Log.Verbose() { + m.logPrintf("Finished %v (read %v, ran %v)\n", migr.LogString(), readTime, runTime) + } else { + m.logPrintf("%v (%v)\n", migr.LogString(), readTime+runTime) + } + } + + default: + panic("unknown type") + } + } + return nil +} + +// versionExists checks the source if either the up or down migration for +// the specified migration version exists. +func (m *Migrate) versionExists(version uint) error { + // try up migration first + up, _, err := m.sourceDrv.ReadUp(version) + if err == nil { + defer up.Close() + } + if os.IsExist(err) { + return nil + } else if !os.IsNotExist(err) { + return err + } + + // then try down migration + down, _, err := m.sourceDrv.ReadDown(version) + if err == nil { + defer down.Close() + } + if os.IsExist(err) { + return nil + } else if !os.IsNotExist(err) { + return err + } + + return os.ErrNotExist +} + +// stop returns true if no more migrations should be run against the database +// because a stop signal was received on the GracefulStop channel. +// Calls are cheap and this function is not blocking. +func (m *Migrate) stop() bool { + if m.isGracefulStop { + return true + } + + select { + case <-m.GracefulStop: + m.isGracefulStop = true + return true + + default: + return false + } +} + +// newMigration is a helper func that returns a *Migration for the +// specified version and targetVersion. +func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, error) { + var migr *Migration + + if targetVersion >= int(version) { + r, identifier, err := m.sourceDrv.ReadUp(version) + if os.IsNotExist(err) { + // create "empty" migration + migr, err = NewMigration(nil, "", version, targetVersion) + if err != nil { + return nil, err + } + + } else if err != nil { + return nil, err + + } else { + // create migration from up source + migr, err = NewMigration(r, identifier, version, targetVersion) + if err != nil { + return nil, err + } + } + + } else { + r, identifier, err := m.sourceDrv.ReadDown(version) + if os.IsNotExist(err) { + // create "empty" migration + migr, err = NewMigration(nil, "", version, targetVersion) + if err != nil { + return nil, err + } + + } else if err != nil { + return nil, err + + } else { + // create migration from down source + migr, err = NewMigration(r, identifier, version, targetVersion) + if err != nil { + return nil, err + } + } + } + + if m.PrefetchMigrations > 0 && migr.Body != nil { + m.logVerbosePrintf("Start buffering %v\n", migr.LogString()) + } else { + m.logVerbosePrintf("Scheduled %v\n", migr.LogString()) + } + + return migr, nil +} + +// lock is a thread safe helper function to lock the database. +// It should be called as late as possible when running migrations. +func (m *Migrate) lock() error { + m.isLockedMu.Lock() + defer m.isLockedMu.Unlock() + + if m.isLocked { + return ErrLocked + } + + // create done channel, used in the timeout goroutine + done := make(chan bool, 1) + defer func() { + done <- true + }() + + // use errchan to signal error back to this context + errchan := make(chan error, 2) + + // start timeout goroutine + timeout := time.After(m.LockTimeout) + go func() { + for { + select { + case <-done: + return + case <-timeout: + errchan <- ErrLockTimeout + return + } + } + }() + + // now try to acquire the lock + go func() { + if err := m.databaseDrv.Lock(); err != nil { + errchan <- err + } else { + errchan <- nil + } + return + }() + + // wait until we either recieve ErrLockTimeout or error from Lock operation + err := <-errchan + if err == nil { + m.isLocked = true + } + return err +} + +// unlock is a thread safe helper function to unlock the database. +// It should be called as early as possible when no more migrations are +// expected to be executed. +func (m *Migrate) unlock() error { + m.isLockedMu.Lock() + defer m.isLockedMu.Unlock() + + if err := m.databaseDrv.Unlock(); err != nil { + // BUG: Can potentially create a deadlock. Add a timeout. + return err + } + + m.isLocked = false + return nil +} + +// unlockErr calls unlock and returns a combined error +// if a prevErr is not nil. +func (m *Migrate) unlockErr(prevErr error) error { + if err := m.unlock(); err != nil { + return NewMultiError(prevErr, err) + } + return prevErr +} + +// logPrintf writes to m.Log if not nil +func (m *Migrate) logPrintf(format string, v ...interface{}) { + if m.Log != nil { + m.Log.Printf(format, v...) + } +} + +// logVerbosePrintf writes to m.Log if not nil. Use for verbose logging output. +func (m *Migrate) logVerbosePrintf(format string, v ...interface{}) { + if m.Log != nil && m.Log.Verbose() { + m.Log.Printf(format, v...) + } +} diff --git a/vendor/src/github.com/mattes/migrate/migrate_test.go b/vendor/src/github.com/mattes/migrate/migrate_test.go new file mode 100644 index 00000000..0ec4bce2 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/migrate_test.go @@ -0,0 +1,941 @@ +package migrate + +import ( + "bytes" + "database/sql" + "io/ioutil" + "log" + "os" + "testing" + + dStub "github.com/mattes/migrate/database/stub" + "github.com/mattes/migrate/source" + sStub "github.com/mattes/migrate/source/stub" +) + +// sourceStubMigrations hold the following migrations: +// u = up migration, d = down migration, n = version +// | 1 | - | 3 | 4 | 5 | - | 7 | +// | u d | - | u | u d | d | - | u d | +var sourceStubMigrations *source.Migrations + +func init() { + sourceStubMigrations = source.NewMigrations() + sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 1, Direction: source.Down}) + sourceStubMigrations.Append(&source.Migration{Version: 3, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 4, Direction: source.Down}) + sourceStubMigrations.Append(&source.Migration{Version: 5, Direction: source.Down}) + sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Up}) + sourceStubMigrations.Append(&source.Migration{Version: 7, Direction: source.Down}) +} + +type DummyInstance struct{ Name string } + +func TestNew(t *testing.T) { + m, err := New("stub://", "stub://") + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNew() { + // Read migrations from /home/mattes/migrations and connect to a local postgres database. + m, err := New("file:///home/mattes/migrations", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") + if err != nil { + log.Fatal(err) + } + + // Migrate all the way up ... + if err := m.Up(); err != nil { + log.Fatal(err) + } +} + +func TestNewWithDatabaseInstance(t *testing.T) { + dummyDb := &DummyInstance{"database"} + dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) + if err != nil { + t.Fatal(err) + } + + m, err := NewWithDatabaseInstance("stub://", "stub", dbInst) + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNewWithDatabaseInstance() { + // Create and use an existing database instance. + db, err := sql.Open("postgres", "postgres://mattes:secret@localhost:5432/database?sslmode=disable") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + // Create driver instance from db. + // Check each driver if it supports the WithInstance function. + // `import "github.com/mattes/migrate/database/postgres"` + instance, err := dStub.WithInstance(db, &dStub.Config{}) + if err != nil { + log.Fatal(err) + } + + // Read migrations from /home/mattes/migrations and connect to a local postgres database. + m, err := NewWithDatabaseInstance("file:///home/mattes/migrations", "postgres", instance) + if err != nil { + log.Fatal(err) + } + + // Migrate all the way up ... + if err := m.Up(); err != nil { + log.Fatal(err) + } +} + +func TestNewWithSourceInstance(t *testing.T) { + dummySource := &DummyInstance{"source"} + sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) + if err != nil { + t.Fatal(err) + } + + m, err := NewWithSourceInstance("stub", sInst, "stub://") + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNewWithSourceInstance() { + di := &DummyInstance{"think any client required for a source here"} + + // Create driver instance from DummyInstance di. + // Check each driver if it support the WithInstance function. + // `import "github.com/mattes/migrate/source/stub"` + instance, err := sStub.WithInstance(di, &sStub.Config{}) + if err != nil { + log.Fatal(err) + } + + // Read migrations from Stub and connect to a local postgres database. + m, err := NewWithSourceInstance("stub", instance, "postgres://mattes:secret@localhost:5432/database?sslmode=disable") + if err != nil { + log.Fatal(err) + } + + // Migrate all the way up ... + if err := m.Up(); err != nil { + log.Fatal(err) + } +} + +func TestNewWithInstance(t *testing.T) { + dummyDb := &DummyInstance{"database"} + dbInst, err := dStub.WithInstance(dummyDb, &dStub.Config{}) + if err != nil { + t.Fatal(err) + } + + dummySource := &DummyInstance{"source"} + sInst, err := sStub.WithInstance(dummySource, &sStub.Config{}) + if err != nil { + t.Fatal(err) + } + + m, err := NewWithInstance("stub", sInst, "stub", dbInst) + if err != nil { + t.Fatal(err) + } + + if m.sourceName != "stub" { + t.Errorf("expected stub, got %v", m.sourceName) + } + if m.sourceDrv == nil { + t.Error("expected sourceDrv not to be nil") + } + + if m.databaseName != "stub" { + t.Errorf("expected stub, got %v", m.databaseName) + } + if m.databaseDrv == nil { + t.Error("expected databaseDrv not to be nil") + } +} + +func ExampleNewWithInstance() { + // See NewWithDatabaseInstance and NewWithSourceInstance for an example. +} + +func TestClose(t *testing.T) { + m, _ := New("stub://", "stub://") + sourceErr, databaseErr := m.Close() + if sourceErr != nil { + t.Error(sourceErr) + } + if databaseErr != nil { + t.Error(databaseErr) + } +} + +func TestMigrate(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + seq := newMigSeq() + + tt := []struct { + version uint + expectErr error + expectVersion uint + expectSeq migrationSequence + }{ + // migrate all the way Up in single steps + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, + {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, + {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, // 5 has no up migration + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, + {version: 8, expectErr: os.ErrNotExist}, + + // migrate all the way Down in single steps + {version: 6, expectErr: os.ErrNotExist}, + {version: 5, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, + {version: 4, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, + {version: 3, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add()}, // 3 has no down migration + {version: 0, expectErr: os.ErrNotExist}, + + // migrate all the way Up in one step + {version: 7, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(3), M(4), M(7))}, + + // migrate all the way Down in one step + {version: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, + + // can't migrate the same version twice + {version: 1, expectErr: ErrNoChange}, + } + + for i, v := range tt { + err := m.Migrate(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + version, _, err := m.Version() + if err != nil { + t.Error(err) + } + if version != v.expectVersion { + t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) + } + equalDbSeq(t, i, v.expectSeq, dbDrv) + } + } +} + +func TestMigrateDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Migrate(1) + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestSteps(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + seq := newMigSeq() + + tt := []struct { + n int + expectErr error + expectVersion int + expectSeq migrationSequence + }{ + // step must be != 0 + {n: 0, expectErr: ErrNoChange}, + + // can't go Down if ErrNilVersion + {n: -1, expectErr: os.ErrNotExist}, + + // migrate all the way Up + {n: 1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(1))}, + {n: 1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(3))}, + {n: 1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(4))}, + {n: 1, expectErr: nil, expectVersion: 5, expectSeq: seq.add()}, + {n: 1, expectErr: nil, expectVersion: 7, expectSeq: seq.add(M(7))}, + {n: 1, expectErr: os.ErrNotExist}, + + // migrate all the way Down + {n: -1, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(7, 5))}, + {n: -1, expectErr: nil, expectVersion: 4, expectSeq: seq.add(M(5, 4))}, + {n: -1, expectErr: nil, expectVersion: 3, expectSeq: seq.add(M(4, 3))}, + {n: -1, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(3, 1))}, + {n: -1, expectErr: nil, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, + + // migrate Up in bigger step + {n: 4, expectErr: nil, expectVersion: 5, expectSeq: seq.add(M(1), M(3), M(4), M(5))}, + + // apply one migration, then reaches out of boundary + {n: 2, expectErr: ErrShortLimit{1}, expectVersion: 7, expectSeq: seq.add(M(7))}, + + // migrate Down in bigger step + {n: -4, expectErr: nil, expectVersion: 1, expectSeq: seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, + + // apply one migration, then reaches out of boundary + {n: -2, expectErr: ErrShortLimit{1}, expectVersion: -1, expectSeq: seq.add(M(1, -1))}, + } + + for i, v := range tt { + err := m.Steps(v.n) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected err %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + version, _, err := m.Version() + if err != ErrNilVersion && err != nil { + t.Error(err) + } + if v.expectVersion == -1 && err != ErrNilVersion { + t.Errorf("expected ErrNilVersion, got %v, in %v", version, i) + + } else if v.expectVersion >= 0 && version != uint(v.expectVersion) { + t.Errorf("expected version %v, got %v, in %v", v.expectVersion, version, i) + } + equalDbSeq(t, i, v.expectSeq, dbDrv) + } + } +} + +func TestStepsDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Steps(1) + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestUpAndDown(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + seq := newMigSeq() + + // go Up first + if err := m.Up(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 0, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) + + // go Down + if err := m.Down(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 1, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) + + // go 1 Up and then all the way Up + if err := m.Steps(1); err != nil { + t.Fatal(err) + } + if err := m.Up(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 2, seq.add(M(1), M(3), M(4), M(5), M(7)), dbDrv) + + // go 1 Down and then all the way Down + if err := m.Steps(-1); err != nil { + t.Fatal(err) + } + if err := m.Down(); err != nil { + t.Fatal(err) + } + equalDbSeq(t, 0, seq.add(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1)), dbDrv) +} + +func TestUpDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Up() + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestDownDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + err := m.Down() + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestDrop(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + dbDrv := m.databaseDrv.(*dStub.Stub) + + if err := m.Drop(); err != nil { + t.Fatal(err) + } + + if dbDrv.MigrationSequence[len(dbDrv.MigrationSequence)-1] != dStub.DROP { + t.Fatalf("expected database to DROP, got sequence %v", dbDrv.MigrationSequence) + } +} + +func TestVersion(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + + _, _, err := m.Version() + if err != ErrNilVersion { + t.Fatalf("expected ErrNilVersion, got %v", err) + } + + if err := dbDrv.Run(bytes.NewBufferString("1_up")); err != nil { + t.Fatal(err) + } + + if err := dbDrv.SetVersion(1, false); err != nil { + t.Fatal(err) + } + + v, _, err := m.Version() + if err != nil { + t.Fatal(err) + } + + if v != 1 { + t.Fatalf("expected version 1, got %v", v) + } +} + +func TestRun(t *testing.T) { + m, _ := New("stub://", "stub://") + + mx, err := NewMigration(nil, "", 1, 2) + if err != nil { + t.Fatal(err) + } + + if err := m.Run(mx); err != nil { + t.Fatal(err) + } + + v, _, err := m.Version() + if err != nil { + t.Fatal(err) + } + + if v != 2 { + t.Errorf("expected version 2, got %v", v) + } +} + +func TestRunDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + migr, err := NewMigration(nil, "", 1, 2) + if err != nil { + t.Fatal(err) + } + + err = m.Run(migr) + if _, ok := err.(ErrDirty); !ok { + t.Fatalf("expected ErrDirty, got %v", err) + } +} + +func TestForce(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + if err := m.Force(7); err != nil { + t.Fatal(err) + } + + v, dirty, err := m.Version() + if err != nil { + t.Fatal(err) + } + if dirty { + t.Errorf("expected dirty to be false") + } + if v != 7 { + t.Errorf("expected version to be 7") + } +} + +func TestForceDirty(t *testing.T) { + m, _ := New("stub://", "stub://") + dbDrv := m.databaseDrv.(*dStub.Stub) + if err := dbDrv.SetVersion(0, true); err != nil { + t.Fatal(err) + } + + if err := m.Force(1); err != nil { + t.Fatal(err) + } +} + +func TestRead(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + tt := []struct { + from int + to int + expectErr error + expectMigrations migrationSequence + }{ + {from: -1, to: -1, expectErr: ErrNoChange}, + {from: -1, to: 0, expectErr: os.ErrNotExist}, + {from: -1, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, + {from: -1, to: 2, expectErr: os.ErrNotExist}, + {from: -1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, + {from: -1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4))}, + {from: -1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5))}, + {from: -1, to: 6, expectErr: os.ErrNotExist}, + {from: -1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, + {from: -1, to: 8, expectErr: os.ErrNotExist}, + + {from: 0, to: -1, expectErr: os.ErrNotExist}, + {from: 0, to: 0, expectErr: os.ErrNotExist}, + {from: 0, to: 1, expectErr: os.ErrNotExist}, + {from: 0, to: 2, expectErr: os.ErrNotExist}, + {from: 0, to: 3, expectErr: os.ErrNotExist}, + {from: 0, to: 4, expectErr: os.ErrNotExist}, + {from: 0, to: 5, expectErr: os.ErrNotExist}, + {from: 0, to: 6, expectErr: os.ErrNotExist}, + {from: 0, to: 7, expectErr: os.ErrNotExist}, + {from: 0, to: 8, expectErr: os.ErrNotExist}, + + {from: 1, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, + {from: 1, to: 0, expectErr: os.ErrNotExist}, + {from: 1, to: 1, expectErr: ErrNoChange}, + {from: 1, to: 2, expectErr: os.ErrNotExist}, + {from: 1, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(3))}, + {from: 1, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, + {from: 1, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5))}, + {from: 1, to: 6, expectErr: os.ErrNotExist}, + {from: 1, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, + {from: 1, to: 8, expectErr: os.ErrNotExist}, + + {from: 2, to: -1, expectErr: os.ErrNotExist}, + {from: 2, to: 0, expectErr: os.ErrNotExist}, + {from: 2, to: 1, expectErr: os.ErrNotExist}, + {from: 2, to: 2, expectErr: os.ErrNotExist}, + {from: 2, to: 3, expectErr: os.ErrNotExist}, + {from: 2, to: 4, expectErr: os.ErrNotExist}, + {from: 2, to: 5, expectErr: os.ErrNotExist}, + {from: 2, to: 6, expectErr: os.ErrNotExist}, + {from: 2, to: 7, expectErr: os.ErrNotExist}, + {from: 2, to: 8, expectErr: os.ErrNotExist}, + + {from: 3, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, + {from: 3, to: 0, expectErr: os.ErrNotExist}, + {from: 3, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, + {from: 3, to: 2, expectErr: os.ErrNotExist}, + {from: 3, to: 3, expectErr: ErrNoChange}, + {from: 3, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(4))}, + {from: 3, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, + {from: 3, to: 6, expectErr: os.ErrNotExist}, + {from: 3, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, + {from: 3, to: 8, expectErr: os.ErrNotExist}, + + {from: 4, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, + {from: 4, to: 0, expectErr: os.ErrNotExist}, + {from: 4, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, + {from: 4, to: 2, expectErr: os.ErrNotExist}, + {from: 4, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, + {from: 4, to: 4, expectErr: ErrNoChange}, + {from: 4, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(5))}, + {from: 4, to: 6, expectErr: os.ErrNotExist}, + {from: 4, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, + {from: 4, to: 8, expectErr: os.ErrNotExist}, + + {from: 5, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 5, to: 0, expectErr: os.ErrNotExist}, + {from: 5, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1))}, + {from: 5, to: 2, expectErr: os.ErrNotExist}, + {from: 5, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, + {from: 5, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, + {from: 5, to: 5, expectErr: ErrNoChange}, + {from: 5, to: 6, expectErr: os.ErrNotExist}, + {from: 5, to: 7, expectErr: nil, expectMigrations: newMigSeq(M(7))}, + {from: 5, to: 8, expectErr: os.ErrNotExist}, + + {from: 6, to: -1, expectErr: os.ErrNotExist}, + {from: 6, to: 0, expectErr: os.ErrNotExist}, + {from: 6, to: 1, expectErr: os.ErrNotExist}, + {from: 6, to: 2, expectErr: os.ErrNotExist}, + {from: 6, to: 3, expectErr: os.ErrNotExist}, + {from: 6, to: 4, expectErr: os.ErrNotExist}, + {from: 6, to: 5, expectErr: os.ErrNotExist}, + {from: 6, to: 6, expectErr: os.ErrNotExist}, + {from: 6, to: 7, expectErr: os.ErrNotExist}, + {from: 6, to: 8, expectErr: os.ErrNotExist}, + + {from: 7, to: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 7, to: 0, expectErr: os.ErrNotExist}, + {from: 7, to: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1))}, + {from: 7, to: 2, expectErr: os.ErrNotExist}, + {from: 7, to: 3, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3))}, + {from: 7, to: 4, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, + {from: 7, to: 5, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, + {from: 7, to: 6, expectErr: os.ErrNotExist}, + {from: 7, to: 7, expectErr: ErrNoChange}, + {from: 7, to: 8, expectErr: os.ErrNotExist}, + + {from: 8, to: -1, expectErr: os.ErrNotExist}, + {from: 8, to: 0, expectErr: os.ErrNotExist}, + {from: 8, to: 1, expectErr: os.ErrNotExist}, + {from: 8, to: 2, expectErr: os.ErrNotExist}, + {from: 8, to: 3, expectErr: os.ErrNotExist}, + {from: 8, to: 4, expectErr: os.ErrNotExist}, + {from: 8, to: 5, expectErr: os.ErrNotExist}, + {from: 8, to: 6, expectErr: os.ErrNotExist}, + {from: 8, to: 7, expectErr: os.ErrNotExist}, + {from: 8, to: 8, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + ret := make(chan interface{}) + go m.read(v.from, v.to, ret) + migrations, err := migrationsFromChannel(ret) + + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && v.expectErr != err) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + t.Logf("%v, in %v", migrations, i) + } + if len(v.expectMigrations) > 0 { + equalMigSeq(t, i, v.expectMigrations, migrations) + } + } +} + +func TestReadUp(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + tt := []struct { + from int + limit int // -1 means no limit + expectErr error + expectMigrations migrationSequence + }{ + {from: -1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3), M(4), M(5), M(7))}, + {from: -1, limit: 0, expectErr: ErrNoChange}, + {from: -1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1))}, + {from: -1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(1), M(3))}, + + {from: 0, limit: -1, expectErr: os.ErrNotExist}, + {from: 0, limit: 0, expectErr: os.ErrNotExist}, + {from: 0, limit: 1, expectErr: os.ErrNotExist}, + {from: 0, limit: 2, expectErr: os.ErrNotExist}, + + {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4), M(5), M(7))}, + {from: 1, limit: 0, expectErr: ErrNoChange}, + {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3))}, + {from: 1, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3), M(4))}, + + {from: 2, limit: -1, expectErr: os.ErrNotExist}, + {from: 2, limit: 0, expectErr: os.ErrNotExist}, + {from: 2, limit: 1, expectErr: os.ErrNotExist}, + {from: 2, limit: 2, expectErr: os.ErrNotExist}, + + {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5), M(7))}, + {from: 3, limit: 0, expectErr: ErrNoChange}, + {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4))}, + {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4), M(5))}, + + {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, + {from: 4, limit: 0, expectErr: ErrNoChange}, + {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5))}, + {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5), M(7))}, + + {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, + {from: 5, limit: 0, expectErr: ErrNoChange}, + {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7))}, + {from: 5, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(7))}, + + {from: 6, limit: -1, expectErr: os.ErrNotExist}, + {from: 6, limit: 0, expectErr: os.ErrNotExist}, + {from: 6, limit: 1, expectErr: os.ErrNotExist}, + {from: 6, limit: 2, expectErr: os.ErrNotExist}, + + {from: 7, limit: -1, expectErr: ErrNoChange}, + {from: 7, limit: 0, expectErr: ErrNoChange}, + {from: 7, limit: 1, expectErr: os.ErrNotExist}, + {from: 7, limit: 2, expectErr: os.ErrNotExist}, + + {from: 8, limit: -1, expectErr: os.ErrNotExist}, + {from: 8, limit: 0, expectErr: os.ErrNotExist}, + {from: 8, limit: 1, expectErr: os.ErrNotExist}, + {from: 8, limit: 2, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + ret := make(chan interface{}) + go m.readUp(v.from, v.limit, ret) + migrations, err := migrationsFromChannel(ret) + + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && v.expectErr != err) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + t.Logf("%v, in %v", migrations, i) + } + if len(v.expectMigrations) > 0 { + equalMigSeq(t, i, v.expectMigrations, migrations) + } + } +} + +func TestReadDown(t *testing.T) { + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + + tt := []struct { + from int + limit int // -1 means no limit + expectErr error + expectMigrations migrationSequence + }{ + {from: -1, limit: -1, expectErr: ErrNoChange}, + {from: -1, limit: 0, expectErr: ErrNoChange}, + {from: -1, limit: 1, expectErr: os.ErrNotExist}, + {from: -1, limit: 2, expectErr: os.ErrNotExist}, + + {from: 0, limit: -1, expectErr: os.ErrNotExist}, + {from: 0, limit: 0, expectErr: os.ErrNotExist}, + {from: 0, limit: 1, expectErr: os.ErrNotExist}, + {from: 0, limit: 2, expectErr: os.ErrNotExist}, + + {from: 1, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, + {from: 1, limit: 0, expectErr: ErrNoChange}, + {from: 1, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(1, -1))}, + {from: 1, limit: 2, expectErr: ErrShortLimit{1}, expectMigrations: newMigSeq(M(1, -1))}, + + {from: 2, limit: -1, expectErr: os.ErrNotExist}, + {from: 2, limit: 0, expectErr: os.ErrNotExist}, + {from: 2, limit: 1, expectErr: os.ErrNotExist}, + {from: 2, limit: 2, expectErr: os.ErrNotExist}, + + {from: 3, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, + {from: 3, limit: 0, expectErr: ErrNoChange}, + {from: 3, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(3, 1))}, + {from: 3, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(3, 1), M(1, -1))}, + + {from: 4, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1), M(1, -1))}, + {from: 4, limit: 0, expectErr: ErrNoChange}, + {from: 4, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(4, 3))}, + {from: 4, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(4, 3), M(3, 1))}, + + {from: 5, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 5, limit: 0, expectErr: ErrNoChange}, + {from: 5, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(5, 4))}, + {from: 5, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(5, 4), M(4, 3))}, + + {from: 6, limit: -1, expectErr: os.ErrNotExist}, + {from: 6, limit: 0, expectErr: os.ErrNotExist}, + {from: 6, limit: 1, expectErr: os.ErrNotExist}, + {from: 6, limit: 2, expectErr: os.ErrNotExist}, + + {from: 7, limit: -1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4), M(4, 3), M(3, 1), M(1, -1))}, + {from: 7, limit: 0, expectErr: ErrNoChange}, + {from: 7, limit: 1, expectErr: nil, expectMigrations: newMigSeq(M(7, 5))}, + {from: 7, limit: 2, expectErr: nil, expectMigrations: newMigSeq(M(7, 5), M(5, 4))}, + + {from: 8, limit: -1, expectErr: os.ErrNotExist}, + {from: 8, limit: 0, expectErr: os.ErrNotExist}, + {from: 8, limit: 1, expectErr: os.ErrNotExist}, + {from: 8, limit: 2, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + ret := make(chan interface{}) + go m.readDown(v.from, v.limit, ret) + migrations, err := migrationsFromChannel(ret) + + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && v.expectErr != err) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + t.Logf("%v, in %v", migrations, i) + } + if len(v.expectMigrations) > 0 { + equalMigSeq(t, i, v.expectMigrations, migrations) + } + } +} + +func TestLock(t *testing.T) { + m, _ := New("stub://", "stub://") + if err := m.lock(); err != nil { + t.Fatal(err) + } + + if err := m.lock(); err == nil { + t.Fatal("should be locked already") + } +} + +func migrationsFromChannel(ret chan interface{}) ([]*Migration, error) { + slice := make([]*Migration, 0) + for r := range ret { + switch r.(type) { + case error: + return slice, r.(error) + + case *Migration: + slice = append(slice, r.(*Migration)) + } + } + return slice, nil +} + +type migrationSequence []*Migration + +func newMigSeq(migr ...*Migration) migrationSequence { + return migr +} + +func (m *migrationSequence) add(migr ...*Migration) migrationSequence { + *m = append(*m, migr...) + return *m +} + +func (m *migrationSequence) bodySequence() []string { + r := make([]string, 0) + for _, v := range *m { + if v.Body != nil { + body, err := ioutil.ReadAll(v.Body) + if err != nil { + panic(err) // that should never happen + } + + // reset body reader + // TODO: is there a better/nicer way? + v.Body = ioutil.NopCloser(bytes.NewReader(body)) + + r = append(r, string(body[:])) + } + } + return r +} + +// M is a convenience func to create a new *Migration +func M(version uint, targetVersion ...int) *Migration { + if len(targetVersion) > 1 { + panic("only one targetVersion allowed") + } + ts := int(version) + if len(targetVersion) == 1 { + ts = targetVersion[0] + } + + m, _ := New("stub://", "stub://") + m.sourceDrv.(*sStub.Stub).Migrations = sourceStubMigrations + migr, err := m.newMigration(version, ts) + if err != nil { + panic(err) + } + return migr +} + +func equalMigSeq(t *testing.T, i int, expected, got migrationSequence) { + if len(expected) != len(got) { + t.Errorf("expected migrations %v, got %v, in %v", expected, got, i) + + } else { + for ii := 0; ii < len(expected); ii++ { + if expected[ii].Version != got[ii].Version { + t.Errorf("expected version %v, got %v, in %v", expected[ii].Version, got[ii].Version, i) + } + + if expected[ii].TargetVersion != got[ii].TargetVersion { + t.Errorf("expected targetVersion %v, got %v, in %v", expected[ii].TargetVersion, got[ii].TargetVersion, i) + } + } + } +} + +func equalDbSeq(t *testing.T, i int, expected migrationSequence, got *dStub.Stub) { + bs := expected.bodySequence() + if !got.EqualSequence(bs) { + t.Fatalf("\nexpected sequence %v,\ngot %v, in %v", bs, got.MigrationSequence, i) + } +} diff --git a/vendor/src/github.com/mattes/migrate/migration.go b/vendor/src/github.com/mattes/migrate/migration.go new file mode 100644 index 00000000..069e7f03 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/migration.go @@ -0,0 +1,154 @@ +package migrate + +import ( + "bufio" + "fmt" + "io" + "time" +) + +// DefaultBufferSize sets the in memory buffer size (in Bytes) for every +// pre-read migration (see DefaultPrefetchMigrations). +var DefaultBufferSize = uint(100000) + +// Migration holds information about a migration. +// It is initially created from data coming from the source and then +// used when run against the database. +type Migration struct { + // Identifier can be any string to help identifying + // the migration in the source. + Identifier string + + // Version is the version of this migration. + Version uint + + // TargetVersion is the migration version after this migration + // has been applied to the database. + // Can be -1, implying that this is a NilVersion. + TargetVersion int + + // Body holds an io.ReadCloser to the source. + Body io.ReadCloser + + // BufferedBody holds an buffered io.Reader to the underlying Body. + BufferedBody io.Reader + + // BufferSize defaults to DefaultBufferSize + BufferSize uint + + // bufferWriter holds an io.WriteCloser and pipes to BufferBody. + // It's an *Closer for flow control. + bufferWriter io.WriteCloser + + // Scheduled is the time when the migration was scheduled/ queued. + Scheduled time.Time + + // StartedBuffering is the time when buffering of the migration source started. + StartedBuffering time.Time + + // FinishedBuffering is the time when buffering of the migration source finished. + FinishedBuffering time.Time + + // FinishedReading is the time when the migration source is fully read. + FinishedReading time.Time + + // BytesRead holds the number of Bytes read from the migration source. + BytesRead int64 +} + +// NewMigration returns a new Migration and sets the body, identifier, +// version and targetVersion. Body can be nil, which turns this migration +// into a "NilMigration". If no identifier is provided, it will default to "". +// targetVersion can be -1, implying it is a NilVersion. +// +// What is a NilMigration? +// Usually each migration version coming from source is expected to have an +// Up and Down migration. This is not a hard requirement though, leading to +// a situation where only the Up or Down migration is present. So let's say +// the user wants to migrate up to a version that doesn't have the actual Up +// migration, in that case we still want to apply the version, but with an empty +// body. We are calling that a NilMigration, a migration with an empty body. +// +// What is a NilVersion? +// NilVersion is a const(-1). When running down migrations and we are at the +// last down migration, there is no next down migration, the targetVersion should +// be nil. Nil in this case is represented by -1 (because type int). +func NewMigration(body io.ReadCloser, identifier string, + version uint, targetVersion int) (*Migration, error) { + tnow := time.Now() + m := &Migration{ + Identifier: identifier, + Version: version, + TargetVersion: targetVersion, + Scheduled: tnow, + } + + if body == nil { + if len(identifier) == 0 { + m.Identifier = "" + } + + m.StartedBuffering = tnow + m.FinishedBuffering = tnow + m.FinishedReading = tnow + return m, nil + } + + br, bw := io.Pipe() + m.Body = body // want to simulate low latency? newSlowReader(body) + m.BufferSize = DefaultBufferSize + m.BufferedBody = br + m.bufferWriter = bw + return m, nil +} + +// String implements string.Stringer and is used in tests. +func (m *Migration) String() string { + return fmt.Sprintf("%v [%v=>%v]", m.Identifier, m.Version, m.TargetVersion) +} + +// LogString returns a string describing this migration to humans. +func (m *Migration) LogString() string { + directionStr := "u" + if m.TargetVersion < int(m.Version) { + directionStr = "d" + } + return fmt.Sprintf("%v/%v %v", m.Version, directionStr, m.Identifier) +} + +// Buffer buffers Body up to BufferSize. +// Calling this function blocks. Call with goroutine. +func (m *Migration) Buffer() error { + if m.Body == nil { + return nil + } + + m.StartedBuffering = time.Now() + + b := bufio.NewReaderSize(m.Body, int(m.BufferSize)) + + // start reading from body, peek won't move the read pointer though + // poor man's solution? + b.Peek(int(m.BufferSize)) + + m.FinishedBuffering = time.Now() + + // write to bufferWriter, this will block until + // something starts reading from m.Buffer + n, err := b.WriteTo(m.bufferWriter) + if err != nil { + return err + } + + m.FinishedReading = time.Now() + m.BytesRead = n + + // close bufferWriter so Buffer knows that there is no + // more data coming + m.bufferWriter.Close() + + // it's safe to close the Body too + m.Body.Close() + + return nil +} diff --git a/vendor/src/github.com/mattes/migrate/migration_test.go b/vendor/src/github.com/mattes/migrate/migration_test.go new file mode 100644 index 00000000..b6589f93 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/migration_test.go @@ -0,0 +1,56 @@ +package migrate + +import ( + "fmt" + "io/ioutil" + "log" + "strings" +) + +func ExampleNewMigration() { + // Create a dummy migration body, this is coming from the source usually. + body := ioutil.NopCloser(strings.NewReader("dumy migration that creates users table")) + + // Create a new Migration that represents version 1486686016. + // Once this migration has been applied to the database, the new + // migration version will be 1486689359. + migr, err := NewMigration(body, "create_users_table", 1486686016, 1486689359) + if err != nil { + log.Fatal(err) + } + + fmt.Print(migr.LogString()) + // Output: + // 1486686016/u create_users_table +} + +func ExampleNewMigration_nilMigration() { + // Create a new Migration that represents a NilMigration. + // Once this migration has been applied to the database, the new + // migration version will be 1486689359. + migr, err := NewMigration(nil, "", 1486686016, 1486689359) + if err != nil { + log.Fatal(err) + } + + fmt.Print(migr.LogString()) + // Output: + // 1486686016/u +} + +func ExampleNewMigration_nilVersion() { + // Create a dummy migration body, this is coming from the source usually. + body := ioutil.NopCloser(strings.NewReader("dumy migration that deletes users table")) + + // Create a new Migration that represents version 1486686016. + // This is the last available down migration, so the migration version + // will be -1, meaning NilVersion once this migration ran. + migr, err := NewMigration(body, "drop_users_table", 1486686016, -1) + if err != nil { + log.Fatal(err) + } + + fmt.Print(migr.LogString()) + // Output: + // 1486686016/d drop_users_table +} diff --git a/vendor/src/github.com/mattes/migrate/source/aws-s3/README.md b/vendor/src/github.com/mattes/migrate/source/aws-s3/README.md new file mode 100644 index 00000000..3a59cfec --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/aws-s3/README.md @@ -0,0 +1,3 @@ +# aws-s3 + +`s3:///` diff --git a/vendor/src/github.com/mattes/migrate/source/aws-s3/s3.go b/vendor/src/github.com/mattes/migrate/source/aws-s3/s3.go new file mode 100644 index 00000000..8b581402 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/aws-s3/s3.go @@ -0,0 +1,125 @@ +package awss3 + +import ( + "fmt" + "io" + "net/url" + "os" + "path" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("s3", &s3Driver{}) +} + +type s3Driver struct { + s3client s3iface.S3API + bucket string + prefix string + migrations *source.Migrations +} + +func (s *s3Driver) Open(folder string) (source.Driver, error) { + u, err := url.Parse(folder) + if err != nil { + return nil, err + } + sess, err := session.NewSession() + if err != nil { + return nil, err + } + driver := s3Driver{ + bucket: u.Host, + prefix: strings.Trim(u.Path, "/") + "/", + s3client: s3.New(sess), + migrations: source.NewMigrations(), + } + err = driver.loadMigrations() + if err != nil { + return nil, err + } + return &driver, nil +} + +func (s *s3Driver) loadMigrations() error { + output, err := s.s3client.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(s.bucket), + Prefix: aws.String(s.prefix), + Delimiter: aws.String("/"), + }) + if err != nil { + return err + } + for _, object := range output.Contents { + _, fileName := path.Split(aws.StringValue(object.Key)) + m, err := source.DefaultParse(fileName) + if err != nil { + continue + } + if !s.migrations.Append(m) { + return fmt.Errorf("unable to parse file %v", aws.StringValue(object.Key)) + } + } + return nil +} + +func (s *s3Driver) Close() error { + return nil +} + +func (s *s3Driver) First() (uint, error) { + v, ok := s.migrations.First() + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (s *s3Driver) Prev(version uint) (uint, error) { + v, ok := s.migrations.Prev(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (s *s3Driver) Next(version uint) (uint, error) { + v, ok := s.migrations.Next(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (s *s3Driver) ReadUp(version uint) (io.ReadCloser, string, error) { + if m, ok := s.migrations.Up(version); ok { + return s.open(m) + } + return nil, "", os.ErrNotExist +} + +func (s *s3Driver) ReadDown(version uint) (io.ReadCloser, string, error) { + if m, ok := s.migrations.Down(version); ok { + return s.open(m) + } + return nil, "", os.ErrNotExist +} + +func (s *s3Driver) open(m *source.Migration) (io.ReadCloser, string, error) { + key := path.Join(s.prefix, m.Raw) + object, err := s.s3client.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + }) + if err != nil { + return nil, "", err + } + return object.Body, m.Identifier, nil +} diff --git a/vendor/src/github.com/mattes/migrate/source/aws-s3/s3_test.go b/vendor/src/github.com/mattes/migrate/source/aws-s3/s3_test.go new file mode 100644 index 00000000..f07d7ff2 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/aws-s3/s3_test.go @@ -0,0 +1,82 @@ +package awss3 + +import ( + "errors" + "io/ioutil" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/mattes/migrate/source" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + s3Client := fakeS3{ + bucket: "some-bucket", + objects: map[string]string{ + "staging/migrations/1_foobar.up.sql": "1 up", + "staging/migrations/1_foobar.down.sql": "1 down", + "prod/migrations/1_foobar.up.sql": "1 up", + "prod/migrations/1_foobar.down.sql": "1 down", + "prod/migrations/3_foobar.up.sql": "3 up", + "prod/migrations/4_foobar.up.sql": "4 up", + "prod/migrations/4_foobar.down.sql": "4 down", + "prod/migrations/5_foobar.down.sql": "5 down", + "prod/migrations/7_foobar.up.sql": "7 up", + "prod/migrations/7_foobar.down.sql": "7 down", + "prod/migrations/not-a-migration.txt": "", + "prod/migrations/0-random-stuff/whatever.txt": "", + }, + } + driver := s3Driver{ + bucket: "some-bucket", + prefix: "prod/migrations/", + migrations: source.NewMigrations(), + s3client: &s3Client, + } + err := driver.loadMigrations() + if err != nil { + t.Fatal(err) + } + st.Test(t, &driver) +} + +type fakeS3 struct { + s3.S3 + bucket string + objects map[string]string +} + +func (s *fakeS3) ListObjects(input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { + bucket := aws.StringValue(input.Bucket) + if bucket != s.bucket { + return nil, errors.New("bucket not found") + } + prefix := aws.StringValue(input.Prefix) + delimiter := aws.StringValue(input.Delimiter) + var output s3.ListObjectsOutput + for name := range s.objects { + if strings.HasPrefix(name, prefix) { + if delimiter == "" || !strings.Contains(strings.Replace(name, prefix, "", 1), delimiter) { + output.Contents = append(output.Contents, &s3.Object{ + Key: aws.String(name), + }) + } + } + } + return &output, nil +} + +func (s *fakeS3) GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error) { + bucket := aws.StringValue(input.Bucket) + if bucket != s.bucket { + return nil, errors.New("bucket not found") + } + if data, ok := s.objects[aws.StringValue(input.Key)]; ok { + body := ioutil.NopCloser(strings.NewReader(data)) + return &s3.GetObjectOutput{Body: body}, nil + } + return nil, errors.New("object not found") +} diff --git a/vendor/src/github.com/mattes/migrate/source/driver.go b/vendor/src/github.com/mattes/migrate/source/driver.go new file mode 100644 index 00000000..b9c052c1 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/driver.go @@ -0,0 +1,107 @@ +// Package source provides the Source interface. +// All source drivers must implement this interface, register themselves, +// optionally provide a `WithInstance` function and pass the tests +// in package source/testing. +package source + +import ( + "fmt" + "io" + nurl "net/url" + "sync" +) + +var driversMu sync.RWMutex +var drivers = make(map[string]Driver) + +// Driver is the interface every source driver must implement. +// +// How to implement a source driver? +// 1. Implement this interface. +// 2. Optionally, add a function named `WithInstance`. +// This function should accept an existing source instance and a Config{} struct +// and return a driver instance. +// 3. Add a test that calls source/testing.go:Test() +// 4. Add own tests for Open(), WithInstance() (when provided) and Close(). +// All other functions are tested by tests in source/testing. +// Saves you some time and makes sure all source drivers behave the same way. +// 5. Call Register in init(). +// +// Guidelines: +// * All configuration input must come from the URL string in func Open() +// or the Config{} struct in WithInstance. Don't os.Getenv(). +// * Drivers are supposed to be read only. +// * Ideally don't load any contents (into memory) in Open or WithInstance. +type Driver interface { + // Open returns a a new driver instance configured with parameters + // coming from the URL string. Migrate will call this function + // only once per instance. + Open(url string) (Driver, error) + + // Close closes the underlying source instance managed by the driver. + // Migrate will call this function only once per instance. + Close() error + + // First returns the very first migration version available to the driver. + // Migrate will call this function multiple times. + // If there is no version available, it must return os.ErrNotExist. + First() (version uint, err error) + + // Prev returns the previous version for a given version available to the driver. + // Migrate will call this function multiple times. + // If there is no previous version available, it must return os.ErrNotExist. + Prev(version uint) (prevVersion uint, err error) + + // Next returns the next version for a given version available to the driver. + // Migrate will call this function multiple times. + // If there is no next version available, it must return os.ErrNotExist. + Next(version uint) (nextVersion uint, err error) + + // ReadUp returns the UP migration body and an identifier that helps + // finding this migration in the source for a given version. + // If there is no up migration available for this version, + // it must return os.ErrNotExist. + // Do not start reading, just return the ReadCloser! + ReadUp(version uint) (r io.ReadCloser, identifier string, err error) + + // ReadDown returns the DOWN migration body and an identifier that helps + // finding this migration in the source for a given version. + // If there is no down migration available for this version, + // it must return os.ErrNotExist. + // Do not start reading, just return the ReadCloser! + ReadDown(version uint) (r io.ReadCloser, identifier string, err error) +} + +// Open returns a new driver instance. +func Open(url string) (Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + if u.Scheme == "" { + return nil, fmt.Errorf("source driver: invalid URL scheme") + } + + driversMu.RLock() + d, ok := drivers[u.Scheme] + driversMu.RUnlock() + if !ok { + return nil, fmt.Errorf("source driver: unknown driver %v (forgotten import?)", u.Scheme) + } + + return d.Open(url) +} + +// Register globally registers a driver. +func Register(name string, driver Driver) { + driversMu.Lock() + defer driversMu.Unlock() + if driver == nil { + panic("Register driver is nil") + } + if _, dup := drivers[name]; dup { + panic("Register called twice for driver " + name) + } + drivers[name] = driver +} diff --git a/vendor/src/github.com/mattes/migrate/source/driver_test.go b/vendor/src/github.com/mattes/migrate/source/driver_test.go new file mode 100644 index 00000000..82284a0b --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/driver_test.go @@ -0,0 +1,8 @@ +package source + +func ExampleDriver() { + // see source/stub for an example + + // source/stub/stub.go has the driver implementation + // source/stub/stub_test.go runs source/testing/test.go:Test +} diff --git a/vendor/src/github.com/mattes/migrate/source/file/README.md b/vendor/src/github.com/mattes/migrate/source/file/README.md new file mode 100644 index 00000000..7912eff6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/file/README.md @@ -0,0 +1,4 @@ +# file + +`file:///absolute/path` +`file://relative/path` diff --git a/vendor/src/github.com/mattes/migrate/source/file/file.go b/vendor/src/github.com/mattes/migrate/source/file/file.go new file mode 100644 index 00000000..b97d0aa3 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/file/file.go @@ -0,0 +1,127 @@ +package file + +import ( + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "os" + "path" + "path/filepath" + + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("file", &File{}) +} + +type File struct { + url string + path string + migrations *source.Migrations +} + +func (f *File) Open(url string) (source.Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + // concat host and path to restore full path + // host might be `.` + p := u.Host + u.Path + + if len(p) == 0 { + // default to current directory if no path + wd, err := os.Getwd() + if err != nil { + return nil, err + } + p = wd + + } else if p[0:1] == "." || p[0:1] != "/" { + // make path absolute if relative + abs, err := filepath.Abs(p) + if err != nil { + return nil, err + } + p = abs + } + + // scan directory + files, err := ioutil.ReadDir(p) + if err != nil { + return nil, err + } + + nf := &File{ + url: url, + path: p, + migrations: source.NewMigrations(), + } + + for _, fi := range files { + if !fi.IsDir() { + m, err := source.DefaultParse(fi.Name()) + if err != nil { + continue // ignore files that we can't parse + } + if !nf.migrations.Append(m) { + return nil, fmt.Errorf("unable to parse file %v", fi.Name()) + } + } + } + return nf, nil +} + +func (f *File) Close() error { + // nothing do to here + return nil +} + +func (f *File) First() (version uint, err error) { + if v, ok := f.migrations.First(); !ok { + return 0, &os.PathError{"first", f.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (f *File) Prev(version uint) (prevVersion uint, err error) { + if v, ok := f.migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), f.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (f *File) Next(version uint) (nextVersion uint, err error) { + if v, ok := f.migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), f.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (f *File) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := f.migrations.Up(version); ok { + r, err := os.Open(path.Join(f.path, m.Raw)) + if err != nil { + return nil, "", err + } + return r, m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} +} + +func (f *File) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := f.migrations.Down(version); ok { + r, err := os.Open(path.Join(f.path, m.Raw)) + if err != nil { + return nil, "", err + } + return r, m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), f.path, os.ErrNotExist} +} diff --git a/vendor/src/github.com/mattes/migrate/source/file/file_test.go b/vendor/src/github.com/mattes/migrate/source/file/file_test.go new file mode 100644 index 00000000..310131c6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/file/file_test.go @@ -0,0 +1,207 @@ +package file + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + // write files that meet driver test requirements + mustWriteFile(t, tmpDir, "1_foobar.up.sql", "1 up") + mustWriteFile(t, tmpDir, "1_foobar.down.sql", "1 down") + + mustWriteFile(t, tmpDir, "3_foobar.up.sql", "3 up") + + mustWriteFile(t, tmpDir, "4_foobar.up.sql", "4 up") + mustWriteFile(t, tmpDir, "4_foobar.down.sql", "4 down") + + mustWriteFile(t, tmpDir, "5_foobar.down.sql", "5 down") + + mustWriteFile(t, tmpDir, "7_foobar.up.sql", "7 up") + mustWriteFile(t, tmpDir, "7_foobar.down.sql", "7 down") + + f := &File{} + d, err := f.Open("file://" + tmpDir) + if err != nil { + t.Fatal(err) + } + + st.Test(t, d) +} + +func TestOpen(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpen") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + mustWriteFile(t, tmpDir, "1_foobar.up.sql", "") + mustWriteFile(t, tmpDir, "1_foobar.down.sql", "") + + if !filepath.IsAbs(tmpDir) { + t.Fatal("expected tmpDir to be absolute path") + } + + f := &File{} + _, err = f.Open("file://" + tmpDir) // absolute path + if err != nil { + t.Fatal(err) + } +} + +func TestOpenWithRelativePath(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpen") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + defer os.Chdir(wd) // rescue working dir after we are done + + if err := os.Chdir(tmpDir); err != nil { + t.Fatal(err) + } + + if err := os.Mkdir(filepath.Join(tmpDir, "foo"), os.ModePerm); err != nil { + t.Fatal(err) + } + + mustWriteFile(t, filepath.Join(tmpDir, "foo"), "1_foobar.up.sql", "") + + f := &File{} + + // dir: foo + d, err := f.Open("file://foo") + if err != nil { + t.Fatal(err) + } + _, err = d.First() + if err != nil { + t.Fatalf("expected first file in working dir %v for foo", tmpDir) + } + + // dir: ./foo + d, err = f.Open("file://./foo") + if err != nil { + t.Fatal(err) + } + _, err = d.First() + if err != nil { + t.Fatalf("expected first file in working dir %v for ./foo", tmpDir) + } +} + +func TestOpenDefaultsToCurrentDirectory(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + f := &File{} + d, err := f.Open("file://") + if err != nil { + t.Fatal(err) + } + + if d.(*File).path != wd { + t.Fatal("expected driver to default to current directory") + } +} + +func TestOpenWithDuplicateVersion(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpenWithDuplicateVersion") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + mustWriteFile(t, tmpDir, "1_foo.up.sql", "") // 1 up + mustWriteFile(t, tmpDir, "1_bar.up.sql", "") // 1 up + + f := &File{} + _, err = f.Open("file://" + tmpDir) + if err == nil { + t.Fatal("expected err") + } +} + +func TestClose(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestOpen") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + f := &File{} + d, err := f.Open("file://" + tmpDir) + if err != nil { + t.Fatal(err) + } + + if d.Close() != nil { + t.Fatal("expected nil") + } +} + +func mustWriteFile(t testing.TB, dir, file string, body string) { + if err := ioutil.WriteFile(path.Join(dir, file), []byte(body), 06444); err != nil { + t.Fatal(err) + } +} + +func mustCreateBenchmarkDir(t *testing.B) (dir string) { + tmpDir, err := ioutil.TempDir("", "Benchmark") + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 1000; i++ { + mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.up.sql", i), "") + mustWriteFile(t, tmpDir, fmt.Sprintf("%v_foobar.down.sql", i), "") + } + + return tmpDir +} + +func BenchmarkOpen(b *testing.B) { + dir := mustCreateBenchmarkDir(b) + defer os.RemoveAll(dir) + b.ResetTimer() + for n := 0; n < b.N; n++ { + f := &File{} + f.Open("file://" + dir) + } + b.StopTimer() +} + +func BenchmarkNext(b *testing.B) { + dir := mustCreateBenchmarkDir(b) + defer os.RemoveAll(dir) + f := &File{} + d, _ := f.Open("file://" + dir) + b.ResetTimer() + v, err := d.First() + for n := 0; n < b.N; n++ { + for !os.IsNotExist(err) { + v, err = d.Next(v) + } + } + b.StopTimer() +} diff --git a/vendor/src/github.com/mattes/migrate/source/github/README.md b/vendor/src/github.com/mattes/migrate/source/github/README.md new file mode 100644 index 00000000..257f575c --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/README.md @@ -0,0 +1,11 @@ +# github + +`github://user:personal-access-token@owner/repo/path` + +| URL Query | WithInstance Config | Description | +|------------|---------------------|-------------| +| user | | The username of the user connecting | +| personal-access-token | | An access token from Github (https://github.com/settings/tokens) | +| owner | | the repo owner | +| repo | | the name of the repository | +| path | | path in repo to migrations | diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql new file mode 100644 index 00000000..c99ddcdc --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS users; diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql new file mode 100644 index 00000000..92897dca --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1085649617_create_users_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE users ( + user_id integer unique, + name varchar(40), + email varchar(40) +); diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql new file mode 100644 index 00000000..940c6071 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.down.sql @@ -0,0 +1 @@ +ALTER TABLE users DROP COLUMN IF EXISTS city; diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql new file mode 100644 index 00000000..67823edc --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1185749658_add_city_to_users.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE users ADD COLUMN city varchar(100); + + diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql new file mode 100644 index 00000000..3e87dd22 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS users_email_index; diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql new file mode 100644 index 00000000..fbeb4ab4 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1285849751_add_index_on_user_emails.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX CONCURRENTLY users_email_index ON users (email); + +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql new file mode 100644 index 00000000..1a0b1a21 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS books; diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql new file mode 100644 index 00000000..f1503b51 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1385949617_create_books_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE books ( + user_id integer, + name varchar(40), + author varchar(40) +); diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql new file mode 100644 index 00000000..3a518768 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS movies; diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql new file mode 100644 index 00000000..f0ef5943 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1485949617_create_movies_table.up.sql @@ -0,0 +1,5 @@ +CREATE TABLE movies ( + user_id integer, + name varchar(40), + director varchar(40) +); diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1585849751_just_a_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1685849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1785849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql new file mode 100644 index 00000000..9b6b57a6 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/examples/migrations/1885849751_another_comment.up.sql @@ -0,0 +1 @@ +-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean sed interdum velit, tristique iaculis justo. Pellentesque ut porttitor dolor. Donec sit amet pharetra elit. Cras vel ligula ex. Phasellus posuere. diff --git a/vendor/src/github.com/mattes/migrate/source/github/github.go b/vendor/src/github.com/mattes/migrate/source/github/github.go new file mode 100644 index 00000000..d534ed37 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/github.go @@ -0,0 +1,180 @@ +package github + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + nurl "net/url" + "os" + "path" + "strings" + + "github.com/google/go-github/github" + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("github", &Github{}) +} + +var ( + ErrNoUserInfo = fmt.Errorf("no username:token provided") + ErrNoAccessToken = fmt.Errorf("no access token") + ErrInvalidRepo = fmt.Errorf("invalid repo") + ErrInvalidGithubClient = fmt.Errorf("expected *github.Client") + ErrNoDir = fmt.Errorf("no directory") +) + +type Github struct { + client *github.Client + url string + + pathOwner string + pathRepo string + path string + migrations *source.Migrations +} + +type Config struct { +} + +func (g *Github) Open(url string) (source.Driver, error) { + u, err := nurl.Parse(url) + if err != nil { + return nil, err + } + + if u.User == nil { + return nil, ErrNoUserInfo + } + + password, ok := u.User.Password() + if !ok { + return nil, ErrNoUserInfo + } + + tr := &github.BasicAuthTransport{ + Username: u.User.Username(), + Password: password, + } + + gn := &Github{ + client: github.NewClient(tr.Client()), + url: url, + migrations: source.NewMigrations(), + } + + // set owner, repo and path in repo + gn.pathOwner = u.Host + pe := strings.Split(strings.Trim(u.Path, "/"), "/") + if len(pe) < 1 { + return nil, ErrInvalidRepo + } + gn.pathRepo = pe[0] + if len(pe) > 1 { + gn.path = strings.Join(pe[1:], "/") + } + + if err := gn.readDirectory(); err != nil { + return nil, err + } + + return gn, nil +} + +func WithInstance(client *github.Client, config *Config) (source.Driver, error) { + gn := &Github{ + client: client, + migrations: source.NewMigrations(), + } + if err := gn.readDirectory(); err != nil { + return nil, err + } + return gn, nil +} + +func (g *Github) readDirectory() error { + fileContent, dirContents, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, g.path, &github.RepositoryContentGetOptions{}) + if err != nil { + return err + } + if fileContent != nil { + return ErrNoDir + } + + for _, fi := range dirContents { + m, err := source.DefaultParse(*fi.Name) + if err != nil { + continue // ignore files that we can't parse + } + if !g.migrations.Append(m) { + return fmt.Errorf("unable to parse file %v", *fi.Name) + } + } + + return nil +} + +func (g *Github) Close() error { + return nil +} + +func (g *Github) First() (version uint, er error) { + if v, ok := g.migrations.First(); !ok { + return 0, &os.PathError{"first", g.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (g *Github) Prev(version uint) (prevVersion uint, err error) { + if v, ok := g.migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), g.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (g *Github) Next(version uint) (nextVersion uint, err error) { + if v, ok := g.migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), g.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (g *Github) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := g.migrations.Up(version); ok { + file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) + if err != nil { + return nil, "", err + } + if file != nil { + r, err := file.GetContent() + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil + } + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} +} + +func (g *Github) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := g.migrations.Down(version); ok { + file, _, _, err := g.client.Repositories.GetContents(context.Background(), g.pathOwner, g.pathRepo, path.Join(g.path, m.Raw), &github.RepositoryContentGetOptions{}) + if err != nil { + return nil, "", err + } + if file != nil { + r, err := file.GetContent() + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader([]byte(r))), m.Identifier, nil + } + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), g.path, os.ErrNotExist} +} diff --git a/vendor/src/github.com/mattes/migrate/source/github/github_test.go b/vendor/src/github.com/mattes/migrate/source/github/github_test.go new file mode 100644 index 00000000..83e86618 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/github/github_test.go @@ -0,0 +1,32 @@ +package github + +import ( + "bytes" + "io/ioutil" + "testing" + + st "github.com/mattes/migrate/source/testing" +) + +var GithubTestSecret = "" // username:token + +func init() { + secrets, err := ioutil.ReadFile(".github_test_secrets") + if err == nil { + GithubTestSecret = string(bytes.TrimSpace(secrets)[:]) + } +} + +func Test(t *testing.T) { + if len(GithubTestSecret) == 0 { + t.Skip("test requires .github_test_secrets") + } + + g := &Github{} + d, err := g.Open("github://" + GithubTestSecret + "@mattes/migrate_test_tmp/test") + if err != nil { + t.Fatal(err) + } + + st.Test(t, d) +} diff --git a/vendor/src/github.com/mattes/migrate/source/go-bindata/README.md b/vendor/src/github.com/mattes/migrate/source/go-bindata/README.md new file mode 100644 index 00000000..cd9dd4b7 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/go-bindata/README.md @@ -0,0 +1,43 @@ +# go-bindata + +## Usage + + + +### Read bindata with NewWithSourceInstance + +```shell +go get -u github.com/jteeuwen/go-bindata/... +cd examples/migrations && go-bindata -pkg migrations . +``` + +```go +import ( + "github.com/mattes/migrate" + "github.com/mattes/migrate/source/go-bindata" + "github.com/mattes/migrate/source/go-bindata/examples/migrations" +) + +func main() { + // wrap assets into Resource + s := bindata.Resource(migrations.AssetNames(), + func(name string) ([]byte, error) { + return migrations.Asset(name) + }) + + d, err := bindata.WithInstance(s) + m, err := migrate.NewWithSourceInstance("go-bindata", d, "database://foobar") + m.Up() // run your migrations and handle the errors above of course +} +``` + +### Read bindata with URL (todo) + +This will restore the assets in a tmp directory and then +proxy to source/file. go-bindata must be in your `$PATH`. + +``` +migrate -source go-bindata://examples/migrations/bindata.go +``` + + diff --git a/vendor/src/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go b/vendor/src/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go new file mode 100644 index 00000000..282d5ef5 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/go-bindata/examples/migrations/bindata.go @@ -0,0 +1,304 @@ +// Code generated by go-bindata. +// sources: +// 1085649617_create_users_table.down.sql +// 1085649617_create_users_table.up.sql +// 1185749658_add_city_to_users.down.sql +// 1185749658_add_city_to_users.up.sql +// DO NOT EDIT! + +package testdata + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var __1085649617_create_users_tableDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x28\x2d\x4e\x2d\x2a\xb6\xe6\x02\x04\x00\x00\xff\xff\x2c\x02\x3d\xa7\x1c\x00\x00\x00") + +func _1085649617_create_users_tableDownSqlBytes() ([]byte, error) { + return bindataRead( + __1085649617_create_users_tableDownSql, + "1085649617_create_users_table.down.sql", + ) +} + +func _1085649617_create_users_tableDownSql() (*asset, error) { + bytes, err := _1085649617_create_users_tableDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1085649617_create_users_table.down.sql", size: 28, mode: os.FileMode(420), modTime: time.Unix(1485750305, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1085649617_create_users_tableUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\xd0\xe0\x52\x00\xb3\xe2\x33\x53\x14\x32\xf3\x4a\x52\xd3\x53\x8b\x14\x4a\xf3\x32\x0b\x4b\x53\x75\xb8\x14\x14\xf2\x12\x73\x53\x15\x14\x14\x14\xca\x12\x8b\x92\x33\x12\x8b\x34\x4c\x0c\x34\x41\xc2\xa9\xb9\x89\x99\x39\xa8\xc2\x5c\x9a\xd6\x5c\x80\x00\x00\x00\xff\xff\xa3\x57\xbc\x0b\x5f\x00\x00\x00") + +func _1085649617_create_users_tableUpSqlBytes() ([]byte, error) { + return bindataRead( + __1085649617_create_users_tableUpSql, + "1085649617_create_users_table.up.sql", + ) +} + +func _1085649617_create_users_tableUpSql() (*asset, error) { + bytes, err := _1085649617_create_users_tableUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1085649617_create_users_table.up.sql", size: 95, mode: os.FileMode(420), modTime: time.Unix(1485803085, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1185749658_add_city_to_usersDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x09\xf5\xf5\x53\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\x48\xce\x2c\xa9\xb4\xe6\x02\x04\x00\x00\xff\xff\xb7\x52\x88\xd7\x2e\x00\x00\x00") + +func _1185749658_add_city_to_usersDownSqlBytes() ([]byte, error) { + return bindataRead( + __1185749658_add_city_to_usersDownSql, + "1185749658_add_city_to_users.down.sql", + ) +} + +func _1185749658_add_city_to_usersDownSql() (*asset, error) { + bytes, err := _1185749658_add_city_to_usersDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1185749658_add_city_to_users.down.sql", size: 46, mode: os.FileMode(420), modTime: time.Unix(1485750443, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1185749658_add_city_to_usersUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x2d\x4e\x2d\x2a\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x48\xce\x2c\xa9\x54\x28\x4b\x2c\x4a\xce\x48\x2c\xd2\x30\x34\x30\xd0\xb4\xe6\xe2\xe2\x02\x04\x00\x00\xff\xff\xa8\x0f\x49\xc6\x32\x00\x00\x00") + +func _1185749658_add_city_to_usersUpSqlBytes() ([]byte, error) { + return bindataRead( + __1185749658_add_city_to_usersUpSql, + "1185749658_add_city_to_users.up.sql", + ) +} + +func _1185749658_add_city_to_usersUpSql() (*asset, error) { + bytes, err := _1185749658_add_city_to_usersUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1185749658_add_city_to_users.up.sql", size: 50, mode: os.FileMode(420), modTime: time.Unix(1485843733, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "1085649617_create_users_table.down.sql": _1085649617_create_users_tableDownSql, + "1085649617_create_users_table.up.sql": _1085649617_create_users_tableUpSql, + "1185749658_add_city_to_users.down.sql": _1185749658_add_city_to_usersDownSql, + "1185749658_add_city_to_users.up.sql": _1185749658_add_city_to_usersUpSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "1085649617_create_users_table.down.sql": &bintree{_1085649617_create_users_tableDownSql, map[string]*bintree{}}, + "1085649617_create_users_table.up.sql": &bintree{_1085649617_create_users_tableUpSql, map[string]*bintree{}}, + "1185749658_add_city_to_users.down.sql": &bintree{_1185749658_add_city_to_usersDownSql, map[string]*bintree{}}, + "1185749658_add_city_to_users.up.sql": &bintree{_1185749658_add_city_to_usersUpSql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/vendor/src/github.com/mattes/migrate/source/go-bindata/go-bindata.go b/vendor/src/github.com/mattes/migrate/source/go-bindata/go-bindata.go new file mode 100644 index 00000000..7426db71 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/go-bindata/go-bindata.go @@ -0,0 +1,119 @@ +package bindata + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/mattes/migrate/source" +) + +type AssetFunc func(name string) ([]byte, error) + +func Resource(names []string, afn AssetFunc) *AssetSource { + return &AssetSource{ + Names: names, + AssetFunc: afn, + } +} + +type AssetSource struct { + Names []string + AssetFunc AssetFunc +} + +func init() { + source.Register("go-bindata", &Bindata{}) +} + +type Bindata struct { + path string + assetSource *AssetSource + migrations *source.Migrations +} + +func (b *Bindata) Open(url string) (source.Driver, error) { + return nil, fmt.Errorf("not yet implemented") +} + +var ( + ErrNoAssetSource = fmt.Errorf("expects *AssetSource") +) + +func WithInstance(instance interface{}) (source.Driver, error) { + if _, ok := instance.(*AssetSource); !ok { + return nil, ErrNoAssetSource + } + as := instance.(*AssetSource) + + bn := &Bindata{ + path: "", + assetSource: as, + migrations: source.NewMigrations(), + } + + for _, fi := range as.Names { + m, err := source.DefaultParse(fi) + if err != nil { + continue // ignore files that we can't parse + } + + if !bn.migrations.Append(m) { + return nil, fmt.Errorf("unable to parse file %v", fi) + } + } + + return bn, nil +} + +func (b *Bindata) Close() error { + return nil +} + +func (b *Bindata) First() (version uint, err error) { + if v, ok := b.migrations.First(); !ok { + return 0, &os.PathError{"first", b.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (b *Bindata) Prev(version uint) (prevVersion uint, err error) { + if v, ok := b.migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), b.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (b *Bindata) Next(version uint) (nextVersion uint, err error) { + if v, ok := b.migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), b.path, os.ErrNotExist} + } else { + return v, nil + } +} + +func (b *Bindata) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := b.migrations.Up(version); ok { + body, err := b.assetSource.AssetFunc(m.Raw) + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} +} + +func (b *Bindata) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := b.migrations.Down(version); ok { + body, err := b.assetSource.AssetFunc(m.Raw) + if err != nil { + return nil, "", err + } + return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil + } + return nil, "", &os.PathError{fmt.Sprintf("read version %v", version), b.path, os.ErrNotExist} +} diff --git a/vendor/src/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go b/vendor/src/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go new file mode 100644 index 00000000..746a7b91 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/go-bindata/go-bindata_test.go @@ -0,0 +1,43 @@ +package bindata + +import ( + "testing" + + "github.com/mattes/migrate/source/go-bindata/testdata" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + // wrap assets into Resource first + s := Resource(testdata.AssetNames(), + func(name string) ([]byte, error) { + return testdata.Asset(name) + }) + + d, err := WithInstance(s) + if err != nil { + t.Fatal(err) + } + st.Test(t, d) +} + +func TestWithInstance(t *testing.T) { + // wrap assets into Resource + s := Resource(testdata.AssetNames(), + func(name string) ([]byte, error) { + return testdata.Asset(name) + }) + + _, err := WithInstance(s) + if err != nil { + t.Fatal(err) + } +} + +func TestOpen(t *testing.T) { + b := &Bindata{} + _, err := b.Open("") + if err == nil { + t.Fatal("expected err, because it's not implemented yet") + } +} diff --git a/vendor/src/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go b/vendor/src/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go new file mode 100644 index 00000000..304f3d87 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/go-bindata/testdata/bindata.go @@ -0,0 +1,396 @@ +// Code generated by go-bindata. +// sources: +// 1_test.down.sql +// 1_test.up.sql +// 3_test.up.sql +// 4_test.down.sql +// 4_test.up.sql +// 5_test.down.sql +// 7_test.down.sql +// 7_test.up.sql +// DO NOT EDIT! + +package testdata + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var __1_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _1_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __1_testDownSql, + "1_test.down.sql", + ) +} + +func _1_testDownSql() (*asset, error) { + bytes, err := _1_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440324, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __1_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _1_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __1_testUpSql, + "1_test.up.sql", + ) +} + +func _1_testUpSql() (*asset, error) { + bytes, err := _1_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "1_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440319, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __3_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _3_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __3_testUpSql, + "3_test.up.sql", + ) +} + +func _3_testUpSql() (*asset, error) { + bytes, err := _3_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "3_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440331, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __4_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _4_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __4_testDownSql, + "4_test.down.sql", + ) +} + +func _4_testDownSql() (*asset, error) { + bytes, err := _4_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "4_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440337, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __4_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _4_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __4_testUpSql, + "4_test.up.sql", + ) +} + +func _4_testUpSql() (*asset, error) { + bytes, err := _4_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "4_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440335, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __5_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _5_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __5_testDownSql, + "5_test.down.sql", + ) +} + +func _5_testDownSql() (*asset, error) { + bytes, err := _5_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "5_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440340, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __7_testDownSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _7_testDownSqlBytes() ([]byte, error) { + return bindataRead( + __7_testDownSql, + "7_test.down.sql", + ) +} + +func _7_testDownSql() (*asset, error) { + bytes, err := _7_testDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "7_test.down.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440343, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __7_testUpSql = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func _7_testUpSqlBytes() ([]byte, error) { + return bindataRead( + __7_testUpSql, + "7_test.up.sql", + ) +} + +func _7_testUpSql() (*asset, error) { + bytes, err := _7_testUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "7_test.up.sql", size: 0, mode: os.FileMode(420), modTime: time.Unix(1486440347, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "1_test.down.sql": _1_testDownSql, + "1_test.up.sql": _1_testUpSql, + "3_test.up.sql": _3_testUpSql, + "4_test.down.sql": _4_testDownSql, + "4_test.up.sql": _4_testUpSql, + "5_test.down.sql": _5_testDownSql, + "7_test.down.sql": _7_testDownSql, + "7_test.up.sql": _7_testUpSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "1_test.down.sql": &bintree{_1_testDownSql, map[string]*bintree{}}, + "1_test.up.sql": &bintree{_1_testUpSql, map[string]*bintree{}}, + "3_test.up.sql": &bintree{_3_testUpSql, map[string]*bintree{}}, + "4_test.down.sql": &bintree{_4_testDownSql, map[string]*bintree{}}, + "4_test.up.sql": &bintree{_4_testUpSql, map[string]*bintree{}}, + "5_test.down.sql": &bintree{_5_testDownSql, map[string]*bintree{}}, + "7_test.down.sql": &bintree{_7_testDownSql, map[string]*bintree{}}, + "7_test.up.sql": &bintree{_7_testUpSql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/vendor/src/github.com/mattes/migrate/source/google-cloud-storage/README.md b/vendor/src/github.com/mattes/migrate/source/google-cloud-storage/README.md new file mode 100644 index 00000000..e61cb231 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/google-cloud-storage/README.md @@ -0,0 +1,3 @@ +# google-cloud-storage + +`gcs:///` diff --git a/vendor/src/github.com/mattes/migrate/source/google-cloud-storage/storage.go b/vendor/src/github.com/mattes/migrate/source/google-cloud-storage/storage.go new file mode 100644 index 00000000..c1a18bc2 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/google-cloud-storage/storage.go @@ -0,0 +1,119 @@ +package googlecloudstorage + +import ( + "fmt" + "io" + "net/url" + "os" + "path" + "strings" + + "cloud.google.com/go/storage" + "github.com/mattes/migrate/source" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func init() { + source.Register("gcs", &gcs{}) +} + +type gcs struct { + bucket *storage.BucketHandle + prefix string + migrations *source.Migrations +} + +func (g *gcs) Open(folder string) (source.Driver, error) { + u, err := url.Parse(folder) + if err != nil { + return nil, err + } + client, err := storage.NewClient(context.Background()) + if err != nil { + return nil, err + } + driver := gcs{ + bucket: client.Bucket(u.Host), + prefix: strings.Trim(u.Path, "/") + "/", + migrations: source.NewMigrations(), + } + err = driver.loadMigrations() + if err != nil { + return nil, err + } + return &driver, nil +} + +func (g *gcs) loadMigrations() error { + iter := g.bucket.Objects(context.Background(), &storage.Query{ + Prefix: g.prefix, + Delimiter: "/", + }) + object, err := iter.Next() + for ; err == nil; object, err = iter.Next() { + _, fileName := path.Split(object.Name) + m, parseErr := source.DefaultParse(fileName) + if parseErr != nil { + continue + } + if !g.migrations.Append(m) { + return fmt.Errorf("unable to parse file %v", object.Name) + } + } + if err != iterator.Done { + return err + } + return nil +} + +func (g *gcs) Close() error { + return nil +} + +func (g *gcs) First() (uint, error) { + v, ok := g.migrations.First() + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (g *gcs) Prev(version uint) (uint, error) { + v, ok := g.migrations.Prev(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (g *gcs) Next(version uint) (uint, error) { + v, ok := g.migrations.Next(version) + if !ok { + return 0, os.ErrNotExist + } + return v, nil +} + +func (g *gcs) ReadUp(version uint) (io.ReadCloser, string, error) { + if m, ok := g.migrations.Up(version); ok { + return g.open(m) + } + return nil, "", os.ErrNotExist +} + +func (g *gcs) ReadDown(version uint) (io.ReadCloser, string, error) { + if m, ok := g.migrations.Down(version); ok { + return g.open(m) + } + return nil, "", os.ErrNotExist +} + +func (g *gcs) open(m *source.Migration) (io.ReadCloser, string, error) { + objectPath := path.Join(g.prefix, m.Raw) + reader, err := g.bucket.Object(objectPath).NewReader(context.Background()) + if err != nil { + return nil, "", err + } + return reader, m.Identifier, nil +} diff --git a/vendor/src/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go b/vendor/src/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go new file mode 100644 index 00000000..2af4947c --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/google-cloud-storage/storage_test.go @@ -0,0 +1,37 @@ +package googlecloudstorage + +import ( + "testing" + + "github.com/fsouza/fake-gcs-server/fakestorage" + "github.com/mattes/migrate/source" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + server := fakestorage.NewServer([]fakestorage.Object{ + {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.up.sql", Content: []byte("1 up")}, + {BucketName: "some-bucket", Name: "staging/migrations/1_foobar.down.sql", Content: []byte("1 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.up.sql", Content: []byte("1 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/1_foobar.down.sql", Content: []byte("1 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/3_foobar.up.sql", Content: []byte("3 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.up.sql", Content: []byte("4 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/4_foobar.down.sql", Content: []byte("4 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/5_foobar.down.sql", Content: []byte("5 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.up.sql", Content: []byte("7 up")}, + {BucketName: "some-bucket", Name: "prod/migrations/7_foobar.down.sql", Content: []byte("7 down")}, + {BucketName: "some-bucket", Name: "prod/migrations/not-a-migration.txt"}, + {BucketName: "some-bucket", Name: "prod/migrations/0-random-stuff/whatever.txt"}, + }) + defer server.Stop() + driver := gcs{ + bucket: server.Client().Bucket("some-bucket"), + prefix: "prod/migrations/", + migrations: source.NewMigrations(), + } + err := driver.loadMigrations() + if err != nil { + t.Fatal(err) + } + st.Test(t, &driver) +} diff --git a/vendor/src/github.com/mattes/migrate/source/migration.go b/vendor/src/github.com/mattes/migrate/source/migration.go new file mode 100644 index 00000000..97a4ee22 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/migration.go @@ -0,0 +1,143 @@ +package source + +import ( + "sort" +) + +// Direction is either up or down. +type Direction string + +const ( + Down Direction = "down" + Up = "up" +) + +// Migration is a helper struct for source drivers that need to +// build the full directory tree in memory. +// Migration is fully independent from migrate.Migration. +type Migration struct { + // Version is the version of this migration. + Version uint + + // Identifier can be any string that helps identifying + // this migration in the source. + Identifier string + + // Direction is either Up or Down. + Direction Direction + + // Raw holds the raw location path to this migration in source. + // ReadUp and ReadDown will use this. + Raw string +} + +// Migrations wraps Migration and has an internal index +// to keep track of Migration order. +type Migrations struct { + index uintSlice + migrations map[uint]map[Direction]*Migration +} + +func NewMigrations() *Migrations { + return &Migrations{ + index: make(uintSlice, 0), + migrations: make(map[uint]map[Direction]*Migration), + } +} + +func (i *Migrations) Append(m *Migration) (ok bool) { + if m == nil { + return false + } + + if i.migrations[m.Version] == nil { + i.migrations[m.Version] = make(map[Direction]*Migration) + } + + // reject duplicate versions + if _, dup := i.migrations[m.Version][m.Direction]; dup { + return false + } + + i.migrations[m.Version][m.Direction] = m + i.buildIndex() + + return true +} + +func (i *Migrations) buildIndex() { + i.index = make(uintSlice, 0) + for version, _ := range i.migrations { + i.index = append(i.index, version) + } + sort.Sort(i.index) +} + +func (i *Migrations) First() (version uint, ok bool) { + if len(i.index) == 0 { + return 0, false + } + return i.index[0], true +} + +func (i *Migrations) Prev(version uint) (prevVersion uint, ok bool) { + pos := i.findPos(version) + if pos >= 1 && len(i.index) > pos-1 { + return i.index[pos-1], true + } + return 0, false +} + +func (i *Migrations) Next(version uint) (nextVersion uint, ok bool) { + pos := i.findPos(version) + if pos >= 0 && len(i.index) > pos+1 { + return i.index[pos+1], true + } + return 0, false +} + +func (i *Migrations) Up(version uint) (m *Migration, ok bool) { + if _, ok := i.migrations[version]; ok { + if mx, ok := i.migrations[version][Up]; ok { + return mx, true + } + } + return nil, false +} + +func (i *Migrations) Down(version uint) (m *Migration, ok bool) { + if _, ok := i.migrations[version]; ok { + if mx, ok := i.migrations[version][Down]; ok { + return mx, true + } + } + return nil, false +} + +func (i *Migrations) findPos(version uint) int { + if len(i.index) > 0 { + ix := i.index.Search(version) + if ix < len(i.index) && i.index[ix] == version { + return ix + } + } + return -1 +} + +type uintSlice []uint + +func (s uintSlice) Len() int { + return len(s) +} + +func (s uintSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s uintSlice) Less(i, j int) bool { + return s[i] < s[j] +} + +func (s uintSlice) Search(x uint) int { + return sort.Search(len(s), func(i int) bool { return s[i] >= x }) +} diff --git a/vendor/src/github.com/mattes/migrate/source/migration_test.go b/vendor/src/github.com/mattes/migrate/source/migration_test.go new file mode 100644 index 00000000..857cd26a --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/migration_test.go @@ -0,0 +1,46 @@ +package source + +import ( + "testing" +) + +func TestNewMigrations(t *testing.T) { + // TODO +} + +func TestAppend(t *testing.T) { + // TODO +} + +func TestBuildIndex(t *testing.T) { + // TODO +} + +func TestFirst(t *testing.T) { + // TODO +} + +func TestPrev(t *testing.T) { + // TODO +} + +func TestUp(t *testing.T) { + // TODO +} + +func TestDown(t *testing.T) { + // TODO +} + +func TestFindPos(t *testing.T) { + m := Migrations{index: uintSlice{1, 2, 3}} + if p := m.findPos(0); p != -1 { + t.Errorf("expected -1, got %v", p) + } + if p := m.findPos(1); p != 0 { + t.Errorf("expected 0, got %v", p) + } + if p := m.findPos(3); p != 2 { + t.Errorf("expected 2, got %v", p) + } +} diff --git a/vendor/src/github.com/mattes/migrate/source/parse.go b/vendor/src/github.com/mattes/migrate/source/parse.go new file mode 100644 index 00000000..2f888fe7 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/parse.go @@ -0,0 +1,39 @@ +package source + +import ( + "fmt" + "regexp" + "strconv" +) + +var ( + ErrParse = fmt.Errorf("no match") +) + +var ( + DefaultParse = Parse + DefaultRegex = Regex +) + +// Regex matches the following pattern: +// 123_name.up.ext +// 123_name.down.ext +var Regex = regexp.MustCompile(`^([0-9]+)_(.*)\.(` + string(Down) + `|` + string(Up) + `)\.(.*)$`) + +// Parse returns Migration for matching Regex pattern. +func Parse(raw string) (*Migration, error) { + m := Regex.FindStringSubmatch(raw) + if len(m) == 5 { + versionUint64, err := strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, err + } + return &Migration{ + Version: uint(versionUint64), + Identifier: m[2], + Direction: Direction(m[3]), + Raw: raw, + }, nil + } + return nil, ErrParse +} diff --git a/vendor/src/github.com/mattes/migrate/source/parse_test.go b/vendor/src/github.com/mattes/migrate/source/parse_test.go new file mode 100644 index 00000000..d06356cc --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/parse_test.go @@ -0,0 +1,106 @@ +package source + +import ( + "testing" +) + +func TestParse(t *testing.T) { + tt := []struct { + name string + expectErr error + expectMigration *Migration + }{ + { + name: "1_foobar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1, + Identifier: "foobar", + Direction: Up, + Raw: "1_foobar.up.sql", + }, + }, + { + name: "1_foobar.down.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1, + Identifier: "foobar", + Direction: Down, + Raw: "1_foobar.down.sql", + }, + }, + { + name: "1_f-o_ob+ar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1, + Identifier: "f-o_ob+ar", + Direction: Up, + Raw: "1_f-o_ob+ar.up.sql", + }, + }, + { + name: "1485385885_foobar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 1485385885, + Identifier: "foobar", + Direction: Up, + Raw: "1485385885_foobar.up.sql", + }, + }, + { + name: "20170412214116_date_foobar.up.sql", + expectErr: nil, + expectMigration: &Migration{ + Version: 20170412214116, + Identifier: "date_foobar", + Direction: Up, + Raw: "20170412214116_date_foobar.up.sql", + }, + }, + { + name: "-1_foobar.up.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "foobar.up.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1.up.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1_foobar.sql", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1_foobar.up", + expectErr: ErrParse, + expectMigration: nil, + }, + { + name: "1_foobar.down", + expectErr: ErrParse, + expectMigration: nil, + }, + } + + for i, v := range tt { + f, err := Parse(v.name) + + if err != v.expectErr { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + } + + if v.expectMigration != nil && *f != *v.expectMigration { + t.Errorf("expected %+v, got %+v, in %v", *v.expectMigration, *f, i) + } + } +} diff --git a/vendor/src/github.com/mattes/migrate/source/stub/stub.go b/vendor/src/github.com/mattes/migrate/source/stub/stub.go new file mode 100644 index 00000000..0f4153c5 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/stub/stub.go @@ -0,0 +1,85 @@ +package stub + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/mattes/migrate/source" +) + +func init() { + source.Register("stub", &Stub{}) +} + +type Config struct{} + +// d, _ := source.Open("stub://") +// d.(*stub.Stub).Migrations = + +type Stub struct { + Url string + Instance interface{} + Migrations *source.Migrations + Config *Config +} + +func (s *Stub) Open(url string) (source.Driver, error) { + return &Stub{ + Url: url, + Migrations: source.NewMigrations(), + Config: &Config{}, + }, nil +} + +func WithInstance(instance interface{}, config *Config) (source.Driver, error) { + return &Stub{ + Instance: instance, + Migrations: source.NewMigrations(), + Config: config, + }, nil +} + +func (s *Stub) Close() error { + return nil +} + +func (s *Stub) First() (version uint, err error) { + if v, ok := s.Migrations.First(); !ok { + return 0, &os.PathError{"first", s.Url, os.ErrNotExist} // TODO: s.Url can be empty when called with WithInstance + } else { + return v, nil + } +} + +func (s *Stub) Prev(version uint) (prevVersion uint, err error) { + if v, ok := s.Migrations.Prev(version); !ok { + return 0, &os.PathError{fmt.Sprintf("prev for version %v", version), s.Url, os.ErrNotExist} + } else { + return v, nil + } +} + +func (s *Stub) Next(version uint) (nextVersion uint, err error) { + if v, ok := s.Migrations.Next(version); !ok { + return 0, &os.PathError{fmt.Sprintf("next for version %v", version), s.Url, os.ErrNotExist} + } else { + return v, nil + } +} + +func (s *Stub) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := s.Migrations.Up(version); ok { + return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.up.stub", version), nil + } + return nil, "", &os.PathError{fmt.Sprintf("read up version %v", version), s.Url, os.ErrNotExist} +} + +func (s *Stub) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + if m, ok := s.Migrations.Down(version); ok { + return ioutil.NopCloser(bytes.NewBufferString(m.Identifier)), fmt.Sprintf("%v.down.stub", version), nil + } + return nil, "", &os.PathError{fmt.Sprintf("read down version %v", version), s.Url, os.ErrNotExist} +} diff --git a/vendor/src/github.com/mattes/migrate/source/stub/stub_test.go b/vendor/src/github.com/mattes/migrate/source/stub/stub_test.go new file mode 100644 index 00000000..05ce819d --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/stub/stub_test.go @@ -0,0 +1,30 @@ +package stub + +import ( + "testing" + + "github.com/mattes/migrate/source" + st "github.com/mattes/migrate/source/testing" +) + +func Test(t *testing.T) { + s := &Stub{} + d, err := s.Open("") + if err != nil { + t.Fatal(err) + } + + m := source.NewMigrations() + m.Append(&source.Migration{Version: 1, Direction: source.Up}) + m.Append(&source.Migration{Version: 1, Direction: source.Down}) + m.Append(&source.Migration{Version: 3, Direction: source.Up}) + m.Append(&source.Migration{Version: 4, Direction: source.Up}) + m.Append(&source.Migration{Version: 4, Direction: source.Down}) + m.Append(&source.Migration{Version: 5, Direction: source.Down}) + m.Append(&source.Migration{Version: 7, Direction: source.Up}) + m.Append(&source.Migration{Version: 7, Direction: source.Down}) + + d.(*Stub).Migrations = m + + st.Test(t, d) +} diff --git a/vendor/src/github.com/mattes/migrate/source/testing/testing.go b/vendor/src/github.com/mattes/migrate/source/testing/testing.go new file mode 100644 index 00000000..3cc003c5 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/source/testing/testing.go @@ -0,0 +1,169 @@ +// Package testing has the source tests. +// All source drivers must pass the Test function. +// This lives in it's own package so it stays a test dependency. +package testing + +import ( + "os" + "testing" + + "github.com/mattes/migrate/source" +) + +// Test runs tests against source implementations. +// It assumes that the driver tests has access to the following migrations: +// +// u = up migration, d = down migration, n = version +// | 1 | - | 3 | 4 | 5 | - | 7 | +// | u d | - | u | u d | d | - | u d | +// +// See source/stub/stub_test.go or source/file/file_test.go for an example. +func Test(t *testing.T, d source.Driver) { + TestFirst(t, d) + TestPrev(t, d) + TestNext(t, d) + TestReadUp(t, d) + TestReadDown(t, d) +} + +func TestFirst(t *testing.T, d source.Driver) { + version, err := d.First() + if err != nil { + t.Fatalf("First: expected err to be nil, got %v", err) + } + if version != 1 { + t.Errorf("First: expected 1, got %v", version) + } +} + +func TestPrev(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectPrevVersion uint + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: os.ErrNotExist}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectPrevVersion: 1}, + {version: 4, expectErr: nil, expectPrevVersion: 3}, + {version: 5, expectErr: nil, expectPrevVersion: 4}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectPrevVersion: 5}, + {version: 8, expectErr: os.ErrNotExist}, + {version: 9, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + pv, err := d.Prev(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { + t.Errorf("Prev: expected %v, got %v, in %v", v.expectErr, err, i) + } + if err == nil && v.expectPrevVersion != pv { + t.Errorf("Prev: expected %v, got %v, in %v", v.expectPrevVersion, pv, i) + } + } +} + +func TestNext(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectNextVersion uint + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectNextVersion: 3}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectNextVersion: 4}, + {version: 4, expectErr: nil, expectNextVersion: 5}, + {version: 5, expectErr: nil, expectNextVersion: 7}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: os.ErrNotExist}, + {version: 8, expectErr: os.ErrNotExist}, + {version: 9, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + nv, err := d.Next(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) && v.expectErr != err { + t.Errorf("Next: expected %v, got %v, in %v", v.expectErr, err, i) + } + if err == nil && v.expectNextVersion != nv { + t.Errorf("Next: expected %v, got %v, in %v", v.expectNextVersion, nv, i) + } + } +} + +func TestReadUp(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectUp bool + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectUp: true}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: nil, expectUp: true}, + {version: 4, expectErr: nil, expectUp: true}, + {version: 5, expectErr: os.ErrNotExist}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectUp: true}, + {version: 8, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + up, identifier, err := d.ReadUp(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + if len(identifier) == 0 { + t.Errorf("expected identifier not to be empty, in %v", i) + } + + if v.expectUp == true && up == nil { + t.Errorf("expected up not to be nil, in %v", i) + } else if v.expectUp == false && up != nil { + t.Errorf("expected up to be nil, got %v, in %v", up, i) + } + } + } +} + +func TestReadDown(t *testing.T, d source.Driver) { + tt := []struct { + version uint + expectErr error + expectDown bool + }{ + {version: 0, expectErr: os.ErrNotExist}, + {version: 1, expectErr: nil, expectDown: true}, + {version: 2, expectErr: os.ErrNotExist}, + {version: 3, expectErr: os.ErrNotExist}, + {version: 4, expectErr: nil, expectDown: true}, + {version: 5, expectErr: nil, expectDown: true}, + {version: 6, expectErr: os.ErrNotExist}, + {version: 7, expectErr: nil, expectDown: true}, + {version: 8, expectErr: os.ErrNotExist}, + } + + for i, v := range tt { + down, identifier, err := d.ReadDown(v.version) + if (v.expectErr == os.ErrNotExist && !os.IsNotExist(err)) || + (v.expectErr != os.ErrNotExist && err != v.expectErr) { + t.Errorf("expected %v, got %v, in %v", v.expectErr, err, i) + + } else if err == nil { + if len(identifier) == 0 { + t.Errorf("expected identifier not to be empty, in %v", i) + } + + if v.expectDown == true && down == nil { + t.Errorf("expected down not to be nil, in %v", i) + } else if v.expectDown == false && down != nil { + t.Errorf("expected down to be nil, got %v, in %v", down, i) + } + } + } +} diff --git a/vendor/src/github.com/mattes/migrate/testing/docker.go b/vendor/src/github.com/mattes/migrate/testing/docker.go new file mode 100644 index 00000000..f7a7c415 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/testing/docker.go @@ -0,0 +1,254 @@ +// Package testing is used in driver tests. +package testing + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "strconv" + "strings" + "testing" + "time" + dockertypes "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" + dockernetwork "github.com/docker/docker/api/types/network" + dockerclient "github.com/docker/docker/client" +) + +func NewDockerContainer(t testing.TB, image string, env []string, cmd []string) (*DockerContainer, error) { + c, err := dockerclient.NewEnvClient() + if err != nil { + return nil, err + } + + if cmd == nil { + cmd = make([]string, 0) + } + + contr := &DockerContainer{ + t: t, + client: c, + ImageName: image, + ENV: env, + Cmd: cmd, + } + + if err := contr.PullImage(); err != nil { + return nil, err + } + + if err := contr.Start(); err != nil { + return nil, err + } + + return contr, nil +} + +// DockerContainer implements Instance interface +type DockerContainer struct { + t testing.TB + client *dockerclient.Client + ImageName string + ENV []string + Cmd []string + ContainerId string + ContainerName string + ContainerJSON dockertypes.ContainerJSON + containerInspected bool + keepForDebugging bool +} + +func (d *DockerContainer) PullImage() error { + d.t.Logf("Docker: Pull image %v", d.ImageName) + r, err := d.client.ImagePull(context.Background(), d.ImageName, dockertypes.ImagePullOptions{}) + if err != nil { + return err + } + defer r.Close() + + // read output and log relevant lines + bf := bufio.NewScanner(r) + for bf.Scan() { + var resp dockerImagePullOutput + if err := json.Unmarshal(bf.Bytes(), &resp); err != nil { + return err + } + if strings.HasPrefix(resp.Status, "Status: ") { + d.t.Logf("Docker: %v", resp.Status) + } + } + return bf.Err() +} + +func (d *DockerContainer) Start() error { + containerName := fmt.Sprintf("migrate_test_%v", pseudoRandStr(10)) + + // create container first + resp, err := d.client.ContainerCreate(context.Background(), + &dockercontainer.Config{ + Image: d.ImageName, + Labels: map[string]string{"migrate_test": "true"}, + Env: d.ENV, + Cmd: d.Cmd, + }, + &dockercontainer.HostConfig{ + PublishAllPorts: true, + }, + &dockernetwork.NetworkingConfig{}, + containerName) + if err != nil { + return err + } + + d.ContainerId = resp.ID + d.ContainerName = containerName + + // then start it + if err := d.client.ContainerStart(context.Background(), resp.ID, dockertypes.ContainerStartOptions{}); err != nil { + return err + } + + d.t.Logf("Docker: Started container %v (%v) for image %v listening at %v:%v", resp.ID[0:12], containerName, d.ImageName, d.Host(), d.Port()) + for _, v := range resp.Warnings { + d.t.Logf("Docker: Warning: %v", v) + } + return nil +} + +func (d *DockerContainer) KeepForDebugging() { + d.keepForDebugging = true +} + +func (d *DockerContainer) Remove() error { + if d.keepForDebugging { + return nil + } + + if len(d.ContainerId) == 0 { + return fmt.Errorf("missing containerId") + } + if err := d.client.ContainerRemove(context.Background(), d.ContainerId, + dockertypes.ContainerRemoveOptions{ + Force: true, + }); err != nil { + d.t.Log(err) + return err + } + d.t.Logf("Docker: Removed %v", d.ContainerName) + return nil +} + +func (d *DockerContainer) Inspect() error { + if len(d.ContainerId) == 0 { + return fmt.Errorf("missing containerId") + } + resp, err := d.client.ContainerInspect(context.Background(), d.ContainerId) + if err != nil { + return err + } + + d.ContainerJSON = resp + d.containerInspected = true + return nil +} + +func (d *DockerContainer) Logs() (io.ReadCloser, error) { + if len(d.ContainerId) == 0 { + return nil, fmt.Errorf("missing containerId") + } + + return d.client.ContainerLogs(context.Background(), d.ContainerId, dockertypes.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + }) +} + +func (d *DockerContainer) portMapping(selectFirst bool, cPort int) (containerPort uint, hostIP string, hostPort uint, err error) { + if !d.containerInspected { + if err := d.Inspect(); err != nil { + d.t.Fatal(err) + } + } + + for port, bindings := range d.ContainerJSON.NetworkSettings.Ports { + if !selectFirst && port.Int() != cPort { + // Skip ahead until we find the port we want + continue + } + for _, binding := range bindings { + + hostPortUint, err := strconv.ParseUint(binding.HostPort, 10, 64) + if err != nil { + return 0, "", 0, err + } + + return uint(port.Int()), binding.HostIP, uint(hostPortUint), nil + } + } + + if selectFirst { + return 0, "", 0, fmt.Errorf("no port binding") + } else { + return 0, "", 0, fmt.Errorf("specified port not bound") + } +} + +func (d *DockerContainer) Host() string { + _, hostIP, _, err := d.portMapping(true, -1) + if err != nil { + d.t.Fatal(err) + } + + if hostIP == "0.0.0.0" { + return "127.0.0.1" + } else { + return hostIP + } +} + +func (d *DockerContainer) Port() uint { + _, _, port, err := d.portMapping(true, -1) + if err != nil { + d.t.Fatal(err) + } + return port +} + +func (d *DockerContainer) PortFor(cPort int) uint { + _, _, port, err := d.portMapping(false, cPort) + if err != nil { + d.t.Fatal(err) + } + return port +} + +func (d *DockerContainer) NetworkSettings() dockertypes.NetworkSettings { + netSettings := d.ContainerJSON.NetworkSettings + return *netSettings +} + +type dockerImagePullOutput struct { + Status string `json:"status"` + ProgressDetails struct { + Current int `json:"current"` + Total int `json:"total"` + } `json:"progressDetail"` + Id string `json:"id"` + Progress string `json:"progress"` +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func pseudoRandStr(n int) string { + var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz0123456789") + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/vendor/src/github.com/mattes/migrate/testing/testing.go b/vendor/src/github.com/mattes/migrate/testing/testing.go new file mode 100644 index 00000000..64e0a646 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/testing/testing.go @@ -0,0 +1,96 @@ +package testing + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + "time" + + dockertypes "github.com/docker/docker/api/types" +) + +type IsReadyFunc func(Instance) bool + +type TestFunc func(*testing.T, Instance) + +type Version struct { + Image string + ENV []string + Cmd []string +} + +func ParallelTest(t *testing.T, versions []Version, readyFn IsReadyFunc, testFn TestFunc) { + delay, err := strconv.Atoi(os.Getenv("MIGRATE_TEST_CONTAINER_BOOT_DELAY")) + if err != nil { + delay = 0 + } + + for i, version := range versions { + version := version // capture range variable, see https://goo.gl/60w3p2 + + // Only test against one version in short mode + // TODO: order is random, maybe always pick first version instead? + if i > 0 && testing.Short() { + t.Logf("Skipping %v in short mode", version) + + } else { + t.Run(version.Image, func(t *testing.T) { + t.Parallel() + + // create new container + container, err := NewDockerContainer(t, version.Image, version.ENV, version.Cmd) + if err != nil { + t.Fatalf("%v\n%s", err, containerLogs(t, container)) + } + + // make sure to remove container once done + defer container.Remove() + + // wait until database is ready + tick := time.Tick(1000 * time.Millisecond) + timeout := time.After(time.Duration(delay + 60) * time.Second) + outer: + for { + select { + case <-tick: + if readyFn(container) { + break outer + } + + case <-timeout: + t.Fatalf("Docker: Container not ready, timeout for %v.\n%s", version, containerLogs(t, container)) + } + } + + time.Sleep(time.Duration(int64(delay)) * time.Second) + + // we can now run the tests + testFn(t, container) + }) + } + } +} + +func containerLogs(t *testing.T, c *DockerContainer) []byte { + r, err := c.Logs() + if err != nil { + t.Error("%v", err) + return nil + } + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + t.Error("%v", err) + return nil + } + return b +} + +type Instance interface { + Host() string + Port() uint + PortFor(int) uint + NetworkSettings() dockertypes.NetworkSettings + KeepForDebugging() +} diff --git a/vendor/src/github.com/mattes/migrate/testing/testing_test.go b/vendor/src/github.com/mattes/migrate/testing/testing_test.go new file mode 100644 index 00000000..8217decf --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/testing/testing_test.go @@ -0,0 +1,20 @@ +package testing + +import ( + "testing" +) + +func ExampleParallelTest(t *testing.T) { + var isReady = func(i Instance) bool { + // Return true if Instance is ready to run tests. + // Don't block here though. + return true + } + + // t is *testing.T coming from parent Test(t *testing.T) + ParallelTest(t, []Version{{Image: "docker_image:9.6"}}, isReady, + func(t *testing.T, i Instance) { + // Run your test/s ... + t.Fatal("...") + }) +} diff --git a/vendor/src/github.com/mattes/migrate/util.go b/vendor/src/github.com/mattes/migrate/util.go new file mode 100644 index 00000000..67048ea5 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/util.go @@ -0,0 +1,105 @@ +package migrate + +import ( + "bufio" + "fmt" + "io" + nurl "net/url" + "strings" + "time" +) + +// MultiError holds multiple errors. +type MultiError struct { + Errs []error +} + +// NewMultiError returns an error type holding multiple errors. +func NewMultiError(errs ...error) MultiError { + compactErrs := make([]error, 0) + for _, e := range errs { + if e != nil { + compactErrs = append(compactErrs, e) + } + } + return MultiError{compactErrs} +} + +// Error implements error. Mulitple errors are concatenated with 'and's. +func (m MultiError) Error() string { + var strs = make([]string, 0) + for _, e := range m.Errs { + if len(e.Error()) > 0 { + strs = append(strs, e.Error()) + } + } + return strings.Join(strs, " and ") +} + +// suint safely converts int to uint +// see https://goo.gl/wEcqof +// see https://goo.gl/pai7Dr +func suint(n int) uint { + if n < 0 { + panic(fmt.Sprintf("suint(%v) expects input >= 0", n)) + } + return uint(n) +} + +// newSlowReader turns an io.ReadCloser into a slow io.ReadCloser. +// Use this to simulate a slow internet connection. +func newSlowReader(r io.ReadCloser) io.ReadCloser { + return &slowReader{ + rx: r, + reader: bufio.NewReader(r), + } +} + +type slowReader struct { + rx io.ReadCloser + reader *bufio.Reader +} + +func (b *slowReader) Read(p []byte) (n int, err error) { + time.Sleep(10 * time.Millisecond) + c, err := b.reader.ReadByte() + if err != nil { + return 0, err + } else { + copy(p, []byte{c}) + return 1, nil + } +} + +func (b *slowReader) Close() error { + return b.rx.Close() +} + +var errNoScheme = fmt.Errorf("no scheme") + +// schemeFromUrl returns the scheme from a URL string +func schemeFromUrl(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if len(u.Scheme) == 0 { + return "", errNoScheme + } + + return u.Scheme, nil +} + +// FilterCustomQuery filters all query values starting with `x-` +func FilterCustomQuery(u *nurl.URL) *nurl.URL { + ux := *u + vx := make(nurl.Values) + for k, v := range ux.Query() { + if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") { + vx[k] = v + } + } + ux.RawQuery = vx.Encode() + return &ux +} diff --git a/vendor/src/github.com/mattes/migrate/util_test.go b/vendor/src/github.com/mattes/migrate/util_test.go new file mode 100644 index 00000000..1ad23447 --- /dev/null +++ b/vendor/src/github.com/mattes/migrate/util_test.go @@ -0,0 +1,32 @@ +package migrate + +import ( + nurl "net/url" + "testing" +) + +func TestSuintPanicsWithNegativeInput(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Fatal("expected suint to panic for -1") + } + }() + suint(-1) +} + +func TestSuint(t *testing.T) { + if u := suint(0); u != 0 { + t.Fatalf("expected 0, got %v", u) + } +} + +func TestFilterCustomQuery(t *testing.T) { + n, err := nurl.Parse("foo://host?a=b&x-custom=foo&c=d") + if err != nil { + t.Fatal(err) + } + nx := FilterCustomQuery(n).Query() + if nx.Get("x-custom") != "" { + t.Fatalf("didn't expect x-custom") + } +}