mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2025-11-09 03:27:29 -06:00
bumps uptrace/bun dependencies to v1.2.6 (#3569)
This commit is contained in:
parent
a444adee97
commit
3fceb5fc1a
68 changed files with 6517 additions and 194 deletions
429
vendor/github.com/uptrace/bun/migrate/auto.go
generated
vendored
Normal file
429
vendor/github.com/uptrace/bun/migrate/auto.go
generated
vendored
Normal file
|
|
@ -0,0 +1,429 @@
|
|||
package migrate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/internal"
|
||||
"github.com/uptrace/bun/migrate/sqlschema"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type AutoMigratorOption func(m *AutoMigrator)
|
||||
|
||||
// WithModel adds a bun.Model to the scope of migrations.
|
||||
func WithModel(models ...interface{}) AutoMigratorOption {
|
||||
return func(m *AutoMigrator) {
|
||||
m.includeModels = append(m.includeModels, models...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithExcludeTable tells the AutoMigrator to ignore a table in the database.
|
||||
// This prevents AutoMigrator from dropping tables which may exist in the schema
|
||||
// but which are not used by the application.
|
||||
//
|
||||
// Do not exclude tables included via WithModel, as BunModelInspector ignores this setting.
|
||||
func WithExcludeTable(tables ...string) AutoMigratorOption {
|
||||
return func(m *AutoMigrator) {
|
||||
m.excludeTables = append(m.excludeTables, tables...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithSchemaName changes the default database schema to migrate objects in.
|
||||
func WithSchemaName(schemaName string) AutoMigratorOption {
|
||||
return func(m *AutoMigrator) {
|
||||
m.schemaName = schemaName
|
||||
}
|
||||
}
|
||||
|
||||
// WithTableNameAuto overrides default migrations table name.
|
||||
func WithTableNameAuto(table string) AutoMigratorOption {
|
||||
return func(m *AutoMigrator) {
|
||||
m.table = table
|
||||
m.migratorOpts = append(m.migratorOpts, WithTableName(table))
|
||||
}
|
||||
}
|
||||
|
||||
// WithLocksTableNameAuto overrides default migration locks table name.
|
||||
func WithLocksTableNameAuto(table string) AutoMigratorOption {
|
||||
return func(m *AutoMigrator) {
|
||||
m.locksTable = table
|
||||
m.migratorOpts = append(m.migratorOpts, WithLocksTableName(table))
|
||||
}
|
||||
}
|
||||
|
||||
// WithMarkAppliedOnSuccessAuto sets the migrator to only mark migrations as applied/unapplied
|
||||
// when their up/down is successful.
|
||||
func WithMarkAppliedOnSuccessAuto(enabled bool) AutoMigratorOption {
|
||||
return func(m *AutoMigrator) {
|
||||
m.migratorOpts = append(m.migratorOpts, WithMarkAppliedOnSuccess(enabled))
|
||||
}
|
||||
}
|
||||
|
||||
// WithMigrationsDirectoryAuto overrides the default directory for migration files.
|
||||
func WithMigrationsDirectoryAuto(directory string) AutoMigratorOption {
|
||||
return func(m *AutoMigrator) {
|
||||
m.migrationsOpts = append(m.migrationsOpts, WithMigrationsDirectory(directory))
|
||||
}
|
||||
}
|
||||
|
||||
// AutoMigrator performs automated schema migrations.
|
||||
//
|
||||
// It is designed to be a drop-in replacement for some Migrator functionality and supports all existing
|
||||
// configuration options.
|
||||
// Similarly to Migrator, it has methods to create SQL migrations, write them to a file, and apply them.
|
||||
// Unlike Migrator, it detects the differences between the state defined by bun models and the current
|
||||
// database schema automatically.
|
||||
//
|
||||
// Usage:
|
||||
// 1. Generate migrations and apply them au once with AutoMigrator.Migrate().
|
||||
// 2. Create up- and down-SQL migration files and apply migrations using Migrator.Migrate().
|
||||
//
|
||||
// While both methods produce complete, reversible migrations (with entries in the database
|
||||
// and SQL migration files), prefer creating migrations and applying them separately for
|
||||
// any non-trivial cases to ensure AutoMigrator detects expected changes correctly.
|
||||
//
|
||||
// Limitations:
|
||||
// - AutoMigrator only supports a subset of the possible ALTER TABLE modifications.
|
||||
// - Some changes are not automatically reversible. For example, you would need to manually
|
||||
// add a CREATE TABLE query to the .down migration file to revert a DROP TABLE migration.
|
||||
// - Does not validate most dialect-specific constraints. For example, when changing column
|
||||
// data type, make sure the data con be auto-casted to the new type.
|
||||
// - Due to how the schema-state diff is calculated, it is not possible to rename a table and
|
||||
// modify any of its columns' _data type_ in a single run. This will cause the AutoMigrator
|
||||
// to drop and re-create the table under a different name; it is better to apply this change in 2 steps.
|
||||
// Renaming a table and renaming its columns at the same time is possible.
|
||||
// - Renaming table/column to an existing name, i.e. like this [A->B] [B->C], is not possible due to how
|
||||
// AutoMigrator distinguishes "rename" and "unchanged" columns.
|
||||
//
|
||||
// Dialect must implement both sqlschema.Inspector and sqlschema.Migrator to be used with AutoMigrator.
|
||||
type AutoMigrator struct {
|
||||
db *bun.DB
|
||||
|
||||
// dbInspector creates the current state for the target database.
|
||||
dbInspector sqlschema.Inspector
|
||||
|
||||
// modelInspector creates the desired state based on the model definitions.
|
||||
modelInspector sqlschema.Inspector
|
||||
|
||||
// dbMigrator executes ALTER TABLE queries.
|
||||
dbMigrator sqlschema.Migrator
|
||||
|
||||
table string // Migrations table (excluded from database inspection)
|
||||
locksTable string // Migration locks table (excluded from database inspection)
|
||||
|
||||
// schemaName is the database schema considered for migration.
|
||||
schemaName string
|
||||
|
||||
// includeModels define the migration scope.
|
||||
includeModels []interface{}
|
||||
|
||||
// excludeTables are excluded from database inspection.
|
||||
excludeTables []string
|
||||
|
||||
// diffOpts are passed to detector constructor.
|
||||
diffOpts []diffOption
|
||||
|
||||
// migratorOpts are passed to Migrator constructor.
|
||||
migratorOpts []MigratorOption
|
||||
|
||||
// migrationsOpts are passed to Migrations constructor.
|
||||
migrationsOpts []MigrationsOption
|
||||
}
|
||||
|
||||
func NewAutoMigrator(db *bun.DB, opts ...AutoMigratorOption) (*AutoMigrator, error) {
|
||||
am := &AutoMigrator{
|
||||
db: db,
|
||||
table: defaultTable,
|
||||
locksTable: defaultLocksTable,
|
||||
schemaName: db.Dialect().DefaultSchema(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(am)
|
||||
}
|
||||
am.excludeTables = append(am.excludeTables, am.table, am.locksTable)
|
||||
|
||||
dbInspector, err := sqlschema.NewInspector(db, sqlschema.WithSchemaName(am.schemaName), sqlschema.WithExcludeTables(am.excludeTables...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
am.dbInspector = dbInspector
|
||||
am.diffOpts = append(am.diffOpts, withCompareTypeFunc(db.Dialect().(sqlschema.InspectorDialect).CompareType))
|
||||
|
||||
dbMigrator, err := sqlschema.NewMigrator(db, am.schemaName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
am.dbMigrator = dbMigrator
|
||||
|
||||
tables := schema.NewTables(db.Dialect())
|
||||
tables.Register(am.includeModels...)
|
||||
am.modelInspector = sqlschema.NewBunModelInspector(tables, sqlschema.WithSchemaName(am.schemaName))
|
||||
|
||||
return am, nil
|
||||
}
|
||||
|
||||
func (am *AutoMigrator) plan(ctx context.Context) (*changeset, error) {
|
||||
var err error
|
||||
|
||||
got, err := am.dbInspector.Inspect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
want, err := am.modelInspector.Inspect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := diff(got, want, am.diffOpts...)
|
||||
if err := changes.ResolveDependencies(); err != nil {
|
||||
return nil, fmt.Errorf("plan migrations: %w", err)
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// Migrate writes required changes to a new migration file and runs the migration.
|
||||
// This will create and entry in the migrations table, making it possible to revert
|
||||
// the changes with Migrator.Rollback(). MigrationOptions are passed on to Migrator.Migrate().
|
||||
func (am *AutoMigrator) Migrate(ctx context.Context, opts ...MigrationOption) (*MigrationGroup, error) {
|
||||
migrations, _, err := am.createSQLMigrations(ctx, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("auto migrate: %w", err)
|
||||
}
|
||||
|
||||
migrator := NewMigrator(am.db, migrations, am.migratorOpts...)
|
||||
if err := migrator.Init(ctx); err != nil {
|
||||
return nil, fmt.Errorf("auto migrate: %w", err)
|
||||
}
|
||||
|
||||
group, err := migrator.Migrate(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("auto migrate: %w", err)
|
||||
}
|
||||
return group, nil
|
||||
}
|
||||
|
||||
// CreateSQLMigration writes required changes to a new migration file.
|
||||
// Use migrate.Migrator to apply the generated migrations.
|
||||
func (am *AutoMigrator) CreateSQLMigrations(ctx context.Context) ([]*MigrationFile, error) {
|
||||
_, files, err := am.createSQLMigrations(ctx, true)
|
||||
return files, err
|
||||
}
|
||||
|
||||
// CreateTxSQLMigration writes required changes to a new migration file making sure they will be executed
|
||||
// in a transaction when applied. Use migrate.Migrator to apply the generated migrations.
|
||||
func (am *AutoMigrator) CreateTxSQLMigrations(ctx context.Context) ([]*MigrationFile, error) {
|
||||
_, files, err := am.createSQLMigrations(ctx, false)
|
||||
return files, err
|
||||
}
|
||||
|
||||
func (am *AutoMigrator) createSQLMigrations(ctx context.Context, transactional bool) (*Migrations, []*MigrationFile, error) {
|
||||
changes, err := am.plan(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("create sql migrations: %w", err)
|
||||
}
|
||||
|
||||
name, _ := genMigrationName(am.schemaName + "_auto")
|
||||
migrations := NewMigrations(am.migrationsOpts...)
|
||||
migrations.Add(Migration{
|
||||
Name: name,
|
||||
Up: changes.Up(am.dbMigrator),
|
||||
Down: changes.Down(am.dbMigrator),
|
||||
Comment: "Changes detected by bun.AutoMigrator",
|
||||
})
|
||||
|
||||
// Append .tx.up.sql or .up.sql to migration name, dependin if it should be transactional.
|
||||
fname := func(direction string) string {
|
||||
return name + map[bool]string{true: ".tx.", false: "."}[transactional] + direction + ".sql"
|
||||
}
|
||||
|
||||
up, err := am.createSQL(ctx, migrations, fname("up"), changes, transactional)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("create sql migration up: %w", err)
|
||||
}
|
||||
|
||||
down, err := am.createSQL(ctx, migrations, fname("down"), changes.GetReverse(), transactional)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("create sql migration down: %w", err)
|
||||
}
|
||||
return migrations, []*MigrationFile{up, down}, nil
|
||||
}
|
||||
|
||||
func (am *AutoMigrator) createSQL(_ context.Context, migrations *Migrations, fname string, changes *changeset, transactional bool) (*MigrationFile, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
if transactional {
|
||||
buf.WriteString("SET statement_timeout = 0;")
|
||||
}
|
||||
|
||||
if err := changes.WriteTo(&buf, am.dbMigrator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
content := buf.Bytes()
|
||||
|
||||
fpath := filepath.Join(migrations.getDirectory(), fname)
|
||||
if err := os.WriteFile(fpath, content, 0o644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mf := &MigrationFile{
|
||||
Name: fname,
|
||||
Path: fpath,
|
||||
Content: string(content),
|
||||
}
|
||||
return mf, nil
|
||||
}
|
||||
|
||||
// Func creates a MigrationFunc that applies all operations all the changeset.
|
||||
func (c *changeset) Func(m sqlschema.Migrator) MigrationFunc {
|
||||
return func(ctx context.Context, db *bun.DB) error {
|
||||
return c.apply(ctx, db, m)
|
||||
}
|
||||
}
|
||||
|
||||
// GetReverse returns a new changeset with each operation in it "reversed" and in reverse order.
|
||||
func (c *changeset) GetReverse() *changeset {
|
||||
var reverse changeset
|
||||
for i := len(c.operations) - 1; i >= 0; i-- {
|
||||
reverse.Add(c.operations[i].GetReverse())
|
||||
}
|
||||
return &reverse
|
||||
}
|
||||
|
||||
// Up is syntactic sugar.
|
||||
func (c *changeset) Up(m sqlschema.Migrator) MigrationFunc {
|
||||
return c.Func(m)
|
||||
}
|
||||
|
||||
// Down is syntactic sugar.
|
||||
func (c *changeset) Down(m sqlschema.Migrator) MigrationFunc {
|
||||
return c.GetReverse().Func(m)
|
||||
}
|
||||
|
||||
// apply generates SQL for each operation and executes it.
|
||||
func (c *changeset) apply(ctx context.Context, db *bun.DB, m sqlschema.Migrator) error {
|
||||
if len(c.operations) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, op := range c.operations {
|
||||
if _, isComment := op.(*comment); isComment {
|
||||
continue
|
||||
}
|
||||
|
||||
b := internal.MakeQueryBytes()
|
||||
b, err := m.AppendSQL(b, op)
|
||||
if err != nil {
|
||||
return fmt.Errorf("apply changes: %w", err)
|
||||
}
|
||||
|
||||
query := internal.String(b)
|
||||
if _, err = db.ExecContext(ctx, query); err != nil {
|
||||
return fmt.Errorf("apply changes: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *changeset) WriteTo(w io.Writer, m sqlschema.Migrator) error {
|
||||
var err error
|
||||
|
||||
b := internal.MakeQueryBytes()
|
||||
for _, op := range c.operations {
|
||||
if c, isComment := op.(*comment); isComment {
|
||||
b = append(b, "/*\n"...)
|
||||
b = append(b, *c...)
|
||||
b = append(b, "\n*/"...)
|
||||
continue
|
||||
}
|
||||
|
||||
b, err = m.AppendSQL(b, op)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write changeset: %w", err)
|
||||
}
|
||||
b = append(b, ";\n"...)
|
||||
}
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return fmt.Errorf("write changeset: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *changeset) ResolveDependencies() error {
|
||||
if len(c.operations) <= 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
unvisited = iota
|
||||
current
|
||||
visited
|
||||
)
|
||||
|
||||
status := make(map[Operation]int, len(c.operations))
|
||||
for _, op := range c.operations {
|
||||
status[op] = unvisited
|
||||
}
|
||||
|
||||
var resolved []Operation
|
||||
var nextOp Operation
|
||||
var visit func(op Operation) error
|
||||
|
||||
next := func() bool {
|
||||
for op, s := range status {
|
||||
if s == unvisited {
|
||||
nextOp = op
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// visit iterates over c.operations until it finds all operations that depend on the current one
|
||||
// or runs into cirtular dependency, in which case it will return an error.
|
||||
visit = func(op Operation) error {
|
||||
switch status[op] {
|
||||
case visited:
|
||||
return nil
|
||||
case current:
|
||||
// TODO: add details (circle) to the error message
|
||||
return errors.New("detected circular dependency")
|
||||
}
|
||||
|
||||
status[op] = current
|
||||
|
||||
for _, another := range c.operations {
|
||||
if dop, hasDeps := another.(interface {
|
||||
DependsOn(Operation) bool
|
||||
}); another == op || !hasDeps || !dop.DependsOn(op) {
|
||||
continue
|
||||
}
|
||||
if err := visit(another); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
status[op] = visited
|
||||
|
||||
// Any dependent nodes would've already been added to the list by now, so we prepend.
|
||||
resolved = append([]Operation{op}, resolved...)
|
||||
return nil
|
||||
}
|
||||
|
||||
for next() {
|
||||
if err := visit(nextOp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.operations = resolved
|
||||
return nil
|
||||
}
|
||||
411
vendor/github.com/uptrace/bun/migrate/diff.go
generated
vendored
Normal file
411
vendor/github.com/uptrace/bun/migrate/diff.go
generated
vendored
Normal file
|
|
@ -0,0 +1,411 @@
|
|||
package migrate
|
||||
|
||||
import (
|
||||
"github.com/uptrace/bun/migrate/sqlschema"
|
||||
)
|
||||
|
||||
// changeset is a set of changes to the database schema definition.
|
||||
type changeset struct {
|
||||
operations []Operation
|
||||
}
|
||||
|
||||
// Add new operations to the changeset.
|
||||
func (c *changeset) Add(op ...Operation) {
|
||||
c.operations = append(c.operations, op...)
|
||||
}
|
||||
|
||||
// diff calculates the diff between the current database schema and the target state.
|
||||
// The changeset is not sorted -- the caller should resolve dependencies before applying the changes.
|
||||
func diff(got, want sqlschema.Database, opts ...diffOption) *changeset {
|
||||
d := newDetector(got, want, opts...)
|
||||
return d.detectChanges()
|
||||
}
|
||||
|
||||
func (d *detector) detectChanges() *changeset {
|
||||
currentTables := d.current.GetTables()
|
||||
targetTables := d.target.GetTables()
|
||||
|
||||
RenameCreate:
|
||||
for wantName, wantTable := range targetTables.FromOldest() {
|
||||
|
||||
// A table with this name exists in the database. We assume that schema objects won't
|
||||
// be renamed to an already existing name, nor do we support such cases.
|
||||
// Simply check if the table definition has changed.
|
||||
if haveTable, ok := currentTables.Get(wantName); ok {
|
||||
d.detectColumnChanges(haveTable, wantTable, true)
|
||||
d.detectConstraintChanges(haveTable, wantTable)
|
||||
continue
|
||||
}
|
||||
|
||||
// Find all renamed tables. We assume that renamed tables have the same signature.
|
||||
for haveName, haveTable := range currentTables.FromOldest() {
|
||||
if _, exists := targetTables.Get(haveName); !exists && d.canRename(haveTable, wantTable) {
|
||||
d.changes.Add(&RenameTableOp{
|
||||
TableName: haveTable.GetName(),
|
||||
NewName: wantName,
|
||||
})
|
||||
d.refMap.RenameTable(haveTable.GetName(), wantName)
|
||||
|
||||
// Find renamed columns, if any, and check if constraints (PK, UNIQUE) have been updated.
|
||||
// We need not check wantTable any further.
|
||||
d.detectColumnChanges(haveTable, wantTable, false)
|
||||
d.detectConstraintChanges(haveTable, wantTable)
|
||||
currentTables.Delete(haveName)
|
||||
continue RenameCreate
|
||||
}
|
||||
}
|
||||
|
||||
// If wantTable does not exist in the database and was not renamed
|
||||
// then we need to create this table in the database.
|
||||
additional := wantTable.(*sqlschema.BunTable)
|
||||
d.changes.Add(&CreateTableOp{
|
||||
TableName: wantTable.GetName(),
|
||||
Model: additional.Model,
|
||||
})
|
||||
}
|
||||
|
||||
// Drop any remaining "current" tables which do not have a model.
|
||||
for name, table := range currentTables.FromOldest() {
|
||||
if _, keep := targetTables.Get(name); !keep {
|
||||
d.changes.Add(&DropTableOp{
|
||||
TableName: table.GetName(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
targetFKs := d.target.GetForeignKeys()
|
||||
currentFKs := d.refMap.Deref()
|
||||
|
||||
for fk := range targetFKs {
|
||||
if _, ok := currentFKs[fk]; !ok {
|
||||
d.changes.Add(&AddForeignKeyOp{
|
||||
ForeignKey: fk,
|
||||
ConstraintName: "", // leave empty to let each dialect apply their convention
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for fk, name := range currentFKs {
|
||||
if _, ok := targetFKs[fk]; !ok {
|
||||
d.changes.Add(&DropForeignKeyOp{
|
||||
ConstraintName: name,
|
||||
ForeignKey: fk,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return &d.changes
|
||||
}
|
||||
|
||||
// detechColumnChanges finds renamed columns and, if checkType == true, columns with changed type.
|
||||
func (d *detector) detectColumnChanges(current, target sqlschema.Table, checkType bool) {
|
||||
currentColumns := current.GetColumns()
|
||||
targetColumns := target.GetColumns()
|
||||
|
||||
ChangeRename:
|
||||
for tName, tCol := range targetColumns.FromOldest() {
|
||||
|
||||
// This column exists in the database, so it hasn't been renamed, dropped, or added.
|
||||
// Still, we should not delete(columns, thisColumn), because later we will need to
|
||||
// check that we do not try to rename a column to an already a name that already exists.
|
||||
if cCol, ok := currentColumns.Get(tName); ok {
|
||||
if checkType && !d.equalColumns(cCol, tCol) {
|
||||
d.changes.Add(&ChangeColumnTypeOp{
|
||||
TableName: target.GetName(),
|
||||
Column: tName,
|
||||
From: cCol,
|
||||
To: d.makeTargetColDef(cCol, tCol),
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Column tName does not exist in the database -- it's been either renamed or added.
|
||||
// Find renamed columns first.
|
||||
for cName, cCol := range currentColumns.FromOldest() {
|
||||
// Cannot rename if a column with this name already exists or the types differ.
|
||||
if _, exists := targetColumns.Get(cName); exists || !d.equalColumns(tCol, cCol) {
|
||||
continue
|
||||
}
|
||||
d.changes.Add(&RenameColumnOp{
|
||||
TableName: target.GetName(),
|
||||
OldName: cName,
|
||||
NewName: tName,
|
||||
})
|
||||
d.refMap.RenameColumn(target.GetName(), cName, tName)
|
||||
currentColumns.Delete(cName) // no need to check this column again
|
||||
|
||||
// Update primary key definition to avoid superficially recreating the constraint.
|
||||
current.GetPrimaryKey().Columns.Replace(cName, tName)
|
||||
|
||||
continue ChangeRename
|
||||
}
|
||||
|
||||
d.changes.Add(&AddColumnOp{
|
||||
TableName: target.GetName(),
|
||||
ColumnName: tName,
|
||||
Column: tCol,
|
||||
})
|
||||
}
|
||||
|
||||
// Drop columns which do not exist in the target schema and were not renamed.
|
||||
for cName, cCol := range currentColumns.FromOldest() {
|
||||
if _, keep := targetColumns.Get(cName); !keep {
|
||||
d.changes.Add(&DropColumnOp{
|
||||
TableName: target.GetName(),
|
||||
ColumnName: cName,
|
||||
Column: cCol,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *detector) detectConstraintChanges(current, target sqlschema.Table) {
|
||||
Add:
|
||||
for _, want := range target.GetUniqueConstraints() {
|
||||
for _, got := range current.GetUniqueConstraints() {
|
||||
if got.Equals(want) {
|
||||
continue Add
|
||||
}
|
||||
}
|
||||
d.changes.Add(&AddUniqueConstraintOp{
|
||||
TableName: target.GetName(),
|
||||
Unique: want,
|
||||
})
|
||||
}
|
||||
|
||||
Drop:
|
||||
for _, got := range current.GetUniqueConstraints() {
|
||||
for _, want := range target.GetUniqueConstraints() {
|
||||
if got.Equals(want) {
|
||||
continue Drop
|
||||
}
|
||||
}
|
||||
|
||||
d.changes.Add(&DropUniqueConstraintOp{
|
||||
TableName: target.GetName(),
|
||||
Unique: got,
|
||||
})
|
||||
}
|
||||
|
||||
targetPK := target.GetPrimaryKey()
|
||||
currentPK := current.GetPrimaryKey()
|
||||
|
||||
// Detect primary key changes
|
||||
if targetPK == nil && currentPK == nil {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case targetPK == nil && currentPK != nil:
|
||||
d.changes.Add(&DropPrimaryKeyOp{
|
||||
TableName: target.GetName(),
|
||||
PrimaryKey: *currentPK,
|
||||
})
|
||||
case currentPK == nil && targetPK != nil:
|
||||
d.changes.Add(&AddPrimaryKeyOp{
|
||||
TableName: target.GetName(),
|
||||
PrimaryKey: *targetPK,
|
||||
})
|
||||
case targetPK.Columns != currentPK.Columns:
|
||||
d.changes.Add(&ChangePrimaryKeyOp{
|
||||
TableName: target.GetName(),
|
||||
Old: *currentPK,
|
||||
New: *targetPK,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newDetector(got, want sqlschema.Database, opts ...diffOption) *detector {
|
||||
cfg := &detectorConfig{
|
||||
cmpType: func(c1, c2 sqlschema.Column) bool {
|
||||
return c1.GetSQLType() == c2.GetSQLType() && c1.GetVarcharLen() == c2.GetVarcharLen()
|
||||
},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
|
||||
return &detector{
|
||||
current: got,
|
||||
target: want,
|
||||
refMap: newRefMap(got.GetForeignKeys()),
|
||||
cmpType: cfg.cmpType,
|
||||
}
|
||||
}
|
||||
|
||||
type diffOption func(*detectorConfig)
|
||||
|
||||
func withCompareTypeFunc(f CompareTypeFunc) diffOption {
|
||||
return func(cfg *detectorConfig) {
|
||||
cfg.cmpType = f
|
||||
}
|
||||
}
|
||||
|
||||
// detectorConfig controls how differences in the model states are resolved.
|
||||
type detectorConfig struct {
|
||||
cmpType CompareTypeFunc
|
||||
}
|
||||
|
||||
// detector may modify the passed database schemas, so it isn't safe to re-use them.
|
||||
type detector struct {
|
||||
// current state represents the existing database schema.
|
||||
current sqlschema.Database
|
||||
|
||||
// target state represents the database schema defined in bun models.
|
||||
target sqlschema.Database
|
||||
|
||||
changes changeset
|
||||
refMap refMap
|
||||
|
||||
// cmpType determines column type equivalence.
|
||||
// Default is direct comparison with '==' operator, which is inaccurate
|
||||
// due to the existence of dialect-specific type aliases. The caller
|
||||
// should pass a concrete InspectorDialect.EquuivalentType for robust comparison.
|
||||
cmpType CompareTypeFunc
|
||||
}
|
||||
|
||||
// canRename checks if t1 can be renamed to t2.
|
||||
func (d detector) canRename(t1, t2 sqlschema.Table) bool {
|
||||
return t1.GetSchema() == t2.GetSchema() && equalSignatures(t1, t2, d.equalColumns)
|
||||
}
|
||||
|
||||
func (d detector) equalColumns(col1, col2 sqlschema.Column) bool {
|
||||
return d.cmpType(col1, col2) &&
|
||||
col1.GetDefaultValue() == col2.GetDefaultValue() &&
|
||||
col1.GetIsNullable() == col2.GetIsNullable() &&
|
||||
col1.GetIsAutoIncrement() == col2.GetIsAutoIncrement() &&
|
||||
col1.GetIsIdentity() == col2.GetIsIdentity()
|
||||
}
|
||||
|
||||
func (d detector) makeTargetColDef(current, target sqlschema.Column) sqlschema.Column {
|
||||
// Avoid unneccessary type-change migrations if the types are equivalent.
|
||||
if d.cmpType(current, target) {
|
||||
target = &sqlschema.BaseColumn{
|
||||
Name: target.GetName(),
|
||||
DefaultValue: target.GetDefaultValue(),
|
||||
IsNullable: target.GetIsNullable(),
|
||||
IsAutoIncrement: target.GetIsAutoIncrement(),
|
||||
IsIdentity: target.GetIsIdentity(),
|
||||
|
||||
SQLType: current.GetSQLType(),
|
||||
VarcharLen: current.GetVarcharLen(),
|
||||
}
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
type CompareTypeFunc func(sqlschema.Column, sqlschema.Column) bool
|
||||
|
||||
// equalSignatures determines if two tables have the same "signature".
|
||||
func equalSignatures(t1, t2 sqlschema.Table, eq CompareTypeFunc) bool {
|
||||
sig1 := newSignature(t1, eq)
|
||||
sig2 := newSignature(t2, eq)
|
||||
return sig1.Equals(sig2)
|
||||
}
|
||||
|
||||
// signature is a set of column definitions, which allows "relation/name-agnostic" comparison between them;
|
||||
// meaning that two columns are considered equal if their types are the same.
|
||||
type signature struct {
|
||||
|
||||
// underlying stores the number of occurences for each unique column type.
|
||||
// It helps to account for the fact that a table might have multiple columns that have the same type.
|
||||
underlying map[sqlschema.BaseColumn]int
|
||||
|
||||
eq CompareTypeFunc
|
||||
}
|
||||
|
||||
func newSignature(t sqlschema.Table, eq CompareTypeFunc) signature {
|
||||
s := signature{
|
||||
underlying: make(map[sqlschema.BaseColumn]int),
|
||||
eq: eq,
|
||||
}
|
||||
s.scan(t)
|
||||
return s
|
||||
}
|
||||
|
||||
// scan iterates over table's field and counts occurrences of each unique column definition.
|
||||
func (s *signature) scan(t sqlschema.Table) {
|
||||
for _, icol := range t.GetColumns().FromOldest() {
|
||||
scanCol := icol.(*sqlschema.BaseColumn)
|
||||
// This is slightly more expensive than if the columns could be compared directly
|
||||
// and we always did s.underlying[col]++, but we get type-equivalence in return.
|
||||
col, count := s.getCount(*scanCol)
|
||||
if count == 0 {
|
||||
s.underlying[*scanCol] = 1
|
||||
} else {
|
||||
s.underlying[col]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getCount uses CompareTypeFunc to find a column with the same (equivalent) SQL type
|
||||
// and returns its count. Count 0 means there are no columns with of this type.
|
||||
func (s *signature) getCount(keyCol sqlschema.BaseColumn) (key sqlschema.BaseColumn, count int) {
|
||||
for col, cnt := range s.underlying {
|
||||
if s.eq(&col, &keyCol) {
|
||||
return col, cnt
|
||||
}
|
||||
}
|
||||
return keyCol, 0
|
||||
}
|
||||
|
||||
// Equals returns true if 2 signatures share an identical set of columns.
|
||||
func (s *signature) Equals(other signature) bool {
|
||||
if len(s.underlying) != len(other.underlying) {
|
||||
return false
|
||||
}
|
||||
for col, count := range s.underlying {
|
||||
if _, countOther := other.getCount(col); countOther != count {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// refMap is a utility for tracking superficial changes in foreign keys,
|
||||
// which do not require any modificiation in the database.
|
||||
// Modern SQL dialects automatically updated foreign key constraints whenever
|
||||
// a column or a table is renamed. Detector can use refMap to ignore any
|
||||
// differences in foreign keys which were caused by renamed column/table.
|
||||
type refMap map[*sqlschema.ForeignKey]string
|
||||
|
||||
func newRefMap(fks map[sqlschema.ForeignKey]string) refMap {
|
||||
rm := make(map[*sqlschema.ForeignKey]string)
|
||||
for fk, name := range fks {
|
||||
rm[&fk] = name
|
||||
}
|
||||
return rm
|
||||
}
|
||||
|
||||
// RenameT updates table name in all foreign key definions which depend on it.
|
||||
func (rm refMap) RenameTable(tableName string, newName string) {
|
||||
for fk := range rm {
|
||||
switch tableName {
|
||||
case fk.From.TableName:
|
||||
fk.From.TableName = newName
|
||||
case fk.To.TableName:
|
||||
fk.To.TableName = newName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RenameColumn updates column name in all foreign key definions which depend on it.
|
||||
func (rm refMap) RenameColumn(tableName string, column, newName string) {
|
||||
for fk := range rm {
|
||||
if tableName == fk.From.TableName {
|
||||
fk.From.Column.Replace(column, newName)
|
||||
}
|
||||
if tableName == fk.To.TableName {
|
||||
fk.To.Column.Replace(column, newName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deref returns copies of ForeignKey values to a map.
|
||||
func (rm refMap) Deref() map[sqlschema.ForeignKey]string {
|
||||
out := make(map[sqlschema.ForeignKey]string)
|
||||
for fk, name := range rm {
|
||||
out[*fk] = name
|
||||
}
|
||||
return out
|
||||
}
|
||||
23
vendor/github.com/uptrace/bun/migrate/migrator.go
generated
vendored
23
vendor/github.com/uptrace/bun/migrate/migrator.go
generated
vendored
|
|
@ -12,14 +12,21 @@ import (
|
|||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTable = "bun_migrations"
|
||||
defaultLocksTable = "bun_migration_locks"
|
||||
)
|
||||
|
||||
type MigratorOption func(m *Migrator)
|
||||
|
||||
// WithTableName overrides default migrations table name.
|
||||
func WithTableName(table string) MigratorOption {
|
||||
return func(m *Migrator) {
|
||||
m.table = table
|
||||
}
|
||||
}
|
||||
|
||||
// WithLocksTableName overrides default migration locks table name.
|
||||
func WithLocksTableName(table string) MigratorOption {
|
||||
return func(m *Migrator) {
|
||||
m.locksTable = table
|
||||
|
|
@ -27,7 +34,7 @@ func WithLocksTableName(table string) MigratorOption {
|
|||
}
|
||||
|
||||
// WithMarkAppliedOnSuccess sets the migrator to only mark migrations as applied/unapplied
|
||||
// when their up/down is successful
|
||||
// when their up/down is successful.
|
||||
func WithMarkAppliedOnSuccess(enabled bool) MigratorOption {
|
||||
return func(m *Migrator) {
|
||||
m.markAppliedOnSuccess = enabled
|
||||
|
|
@ -52,8 +59,8 @@ func NewMigrator(db *bun.DB, migrations *Migrations, opts ...MigratorOption) *Mi
|
|||
|
||||
ms: migrations.ms,
|
||||
|
||||
table: "bun_migrations",
|
||||
locksTable: "bun_migration_locks",
|
||||
table: defaultTable,
|
||||
locksTable: defaultLocksTable,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(m)
|
||||
|
|
@ -246,7 +253,7 @@ func (m *Migrator) CreateGoMigration(
|
|||
opt(cfg)
|
||||
}
|
||||
|
||||
name, err := m.genMigrationName(name)
|
||||
name, err := genMigrationName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -269,7 +276,7 @@ func (m *Migrator) CreateGoMigration(
|
|||
|
||||
// CreateTxSQLMigration creates transactional up and down SQL migration files.
|
||||
func (m *Migrator) CreateTxSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
|
||||
name, err := m.genMigrationName(name)
|
||||
name, err := genMigrationName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -289,7 +296,7 @@ func (m *Migrator) CreateTxSQLMigrations(ctx context.Context, name string) ([]*M
|
|||
|
||||
// CreateSQLMigrations creates up and down SQL migration files.
|
||||
func (m *Migrator) CreateSQLMigrations(ctx context.Context, name string) ([]*MigrationFile, error) {
|
||||
name, err := m.genMigrationName(name)
|
||||
name, err := genMigrationName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -307,7 +314,7 @@ func (m *Migrator) CreateSQLMigrations(ctx context.Context, name string) ([]*Mig
|
|||
return []*MigrationFile{up, down}, nil
|
||||
}
|
||||
|
||||
func (m *Migrator) createSQL(ctx context.Context, fname string, transactional bool) (*MigrationFile, error) {
|
||||
func (m *Migrator) createSQL(_ context.Context, fname string, transactional bool) (*MigrationFile, error) {
|
||||
fpath := filepath.Join(m.migrations.getDirectory(), fname)
|
||||
|
||||
template := sqlTemplate
|
||||
|
|
@ -329,7 +336,7 @@ func (m *Migrator) createSQL(ctx context.Context, fname string, transactional bo
|
|||
|
||||
var nameRE = regexp.MustCompile(`^[0-9a-z_\-]+$`)
|
||||
|
||||
func (m *Migrator) genMigrationName(name string) (string, error) {
|
||||
func genMigrationName(name string) (string, error) {
|
||||
const timeFormat = "20060102150405"
|
||||
|
||||
if name == "" {
|
||||
|
|
|
|||
340
vendor/github.com/uptrace/bun/migrate/operations.go
generated
vendored
Normal file
340
vendor/github.com/uptrace/bun/migrate/operations.go
generated
vendored
Normal file
|
|
@ -0,0 +1,340 @@
|
|||
package migrate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/uptrace/bun/migrate/sqlschema"
|
||||
)
|
||||
|
||||
// Operation encapsulates the request to change a database definition
|
||||
// and knowns which operation can revert it.
|
||||
//
|
||||
// It is useful to define "monolith" Operations whenever possible,
|
||||
// even though they a dialect may require several distinct steps to apply them.
|
||||
// For example, changing a primary key involves first dropping the old constraint
|
||||
// before generating the new one. Yet, this is only an implementation detail and
|
||||
// passing a higher-level ChangePrimaryKeyOp will give the dialect more information
|
||||
// about the applied change.
|
||||
//
|
||||
// Some operations might be irreversible due to technical limitations. Returning
|
||||
// a *comment from GetReverse() will add an explanatory note to the generate migation file.
|
||||
//
|
||||
// To declare dependency on another Operation, operations should implement
|
||||
// { DependsOn(Operation) bool } interface, which Changeset will use to resolve dependencies.
|
||||
type Operation interface {
|
||||
GetReverse() Operation
|
||||
}
|
||||
|
||||
// CreateTableOp creates a new table in the schema.
|
||||
//
|
||||
// It does not report dependency on any other migration and may be executed first.
|
||||
// Make sure the dialect does not include FOREIGN KEY constraints in the CREATE TABLE
|
||||
// statement, as those may potentially reference not-yet-existing columns/tables.
|
||||
type CreateTableOp struct {
|
||||
TableName string
|
||||
Model interface{}
|
||||
}
|
||||
|
||||
var _ Operation = (*CreateTableOp)(nil)
|
||||
|
||||
func (op *CreateTableOp) GetReverse() Operation {
|
||||
return &DropTableOp{TableName: op.TableName}
|
||||
}
|
||||
|
||||
// DropTableOp drops a database table. This operation is not reversible.
|
||||
type DropTableOp struct {
|
||||
TableName string
|
||||
}
|
||||
|
||||
var _ Operation = (*DropTableOp)(nil)
|
||||
|
||||
func (op *DropTableOp) DependsOn(another Operation) bool {
|
||||
drop, ok := another.(*DropForeignKeyOp)
|
||||
return ok && drop.ForeignKey.DependsOnTable(op.TableName)
|
||||
}
|
||||
|
||||
// GetReverse for a DropTable returns a no-op migration. Logically, CreateTable is the reverse,
|
||||
// but DropTable does not have the table's definition to create one.
|
||||
func (op *DropTableOp) GetReverse() Operation {
|
||||
c := comment(fmt.Sprintf("WARNING: \"DROP TABLE %s\" cannot be reversed automatically because table definition is not available", op.TableName))
|
||||
return &c
|
||||
}
|
||||
|
||||
// RenameTableOp renames the table. Changing the "schema" part of the table's FQN (moving tables between schemas) is not allowed.
|
||||
type RenameTableOp struct {
|
||||
TableName string
|
||||
NewName string
|
||||
}
|
||||
|
||||
var _ Operation = (*RenameTableOp)(nil)
|
||||
|
||||
func (op *RenameTableOp) GetReverse() Operation {
|
||||
return &RenameTableOp{
|
||||
TableName: op.NewName,
|
||||
NewName: op.TableName,
|
||||
}
|
||||
}
|
||||
|
||||
// RenameColumnOp renames a column in the table. If the changeset includes a rename operation
|
||||
// for the column's table, it should be executed first.
|
||||
type RenameColumnOp struct {
|
||||
TableName string
|
||||
OldName string
|
||||
NewName string
|
||||
}
|
||||
|
||||
var _ Operation = (*RenameColumnOp)(nil)
|
||||
|
||||
func (op *RenameColumnOp) GetReverse() Operation {
|
||||
return &RenameColumnOp{
|
||||
TableName: op.TableName,
|
||||
OldName: op.NewName,
|
||||
NewName: op.OldName,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *RenameColumnOp) DependsOn(another Operation) bool {
|
||||
rename, ok := another.(*RenameTableOp)
|
||||
return ok && op.TableName == rename.NewName
|
||||
}
|
||||
|
||||
// AddColumnOp adds a new column to the table.
|
||||
type AddColumnOp struct {
|
||||
TableName string
|
||||
ColumnName string
|
||||
Column sqlschema.Column
|
||||
}
|
||||
|
||||
var _ Operation = (*AddColumnOp)(nil)
|
||||
|
||||
func (op *AddColumnOp) GetReverse() Operation {
|
||||
return &DropColumnOp{
|
||||
TableName: op.TableName,
|
||||
ColumnName: op.ColumnName,
|
||||
Column: op.Column,
|
||||
}
|
||||
}
|
||||
|
||||
// DropColumnOp drop a column from the table.
|
||||
//
|
||||
// While some dialects allow DROP CASCADE to drop dependent constraints,
|
||||
// explicit handling on constraints is preferred for transparency and debugging.
|
||||
// DropColumnOp depends on DropForeignKeyOp, DropPrimaryKeyOp, and ChangePrimaryKeyOp
|
||||
// if any of the constraints is defined on this table.
|
||||
type DropColumnOp struct {
|
||||
TableName string
|
||||
ColumnName string
|
||||
Column sqlschema.Column
|
||||
}
|
||||
|
||||
var _ Operation = (*DropColumnOp)(nil)
|
||||
|
||||
func (op *DropColumnOp) GetReverse() Operation {
|
||||
return &AddColumnOp{
|
||||
TableName: op.TableName,
|
||||
ColumnName: op.ColumnName,
|
||||
Column: op.Column,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *DropColumnOp) DependsOn(another Operation) bool {
|
||||
switch drop := another.(type) {
|
||||
case *DropForeignKeyOp:
|
||||
return drop.ForeignKey.DependsOnColumn(op.TableName, op.ColumnName)
|
||||
case *DropPrimaryKeyOp:
|
||||
return op.TableName == drop.TableName && drop.PrimaryKey.Columns.Contains(op.ColumnName)
|
||||
case *ChangePrimaryKeyOp:
|
||||
return op.TableName == drop.TableName && drop.Old.Columns.Contains(op.ColumnName)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// AddForeignKey adds a new FOREIGN KEY constraint.
|
||||
type AddForeignKeyOp struct {
|
||||
ForeignKey sqlschema.ForeignKey
|
||||
ConstraintName string
|
||||
}
|
||||
|
||||
var _ Operation = (*AddForeignKeyOp)(nil)
|
||||
|
||||
func (op *AddForeignKeyOp) TableName() string {
|
||||
return op.ForeignKey.From.TableName
|
||||
}
|
||||
|
||||
func (op *AddForeignKeyOp) DependsOn(another Operation) bool {
|
||||
switch another := another.(type) {
|
||||
case *RenameTableOp:
|
||||
return op.ForeignKey.DependsOnTable(another.TableName) || op.ForeignKey.DependsOnTable(another.NewName)
|
||||
case *CreateTableOp:
|
||||
return op.ForeignKey.DependsOnTable(another.TableName)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (op *AddForeignKeyOp) GetReverse() Operation {
|
||||
return &DropForeignKeyOp{
|
||||
ForeignKey: op.ForeignKey,
|
||||
ConstraintName: op.ConstraintName,
|
||||
}
|
||||
}
|
||||
|
||||
// DropForeignKeyOp drops a FOREIGN KEY constraint.
|
||||
type DropForeignKeyOp struct {
|
||||
ForeignKey sqlschema.ForeignKey
|
||||
ConstraintName string
|
||||
}
|
||||
|
||||
var _ Operation = (*DropForeignKeyOp)(nil)
|
||||
|
||||
func (op *DropForeignKeyOp) TableName() string {
|
||||
return op.ForeignKey.From.TableName
|
||||
}
|
||||
|
||||
func (op *DropForeignKeyOp) GetReverse() Operation {
|
||||
return &AddForeignKeyOp{
|
||||
ForeignKey: op.ForeignKey,
|
||||
ConstraintName: op.ConstraintName,
|
||||
}
|
||||
}
|
||||
|
||||
// AddUniqueConstraintOp adds new UNIQUE constraint to the table.
|
||||
type AddUniqueConstraintOp struct {
|
||||
TableName string
|
||||
Unique sqlschema.Unique
|
||||
}
|
||||
|
||||
var _ Operation = (*AddUniqueConstraintOp)(nil)
|
||||
|
||||
func (op *AddUniqueConstraintOp) GetReverse() Operation {
|
||||
return &DropUniqueConstraintOp{
|
||||
TableName: op.TableName,
|
||||
Unique: op.Unique,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *AddUniqueConstraintOp) DependsOn(another Operation) bool {
|
||||
switch another := another.(type) {
|
||||
case *AddColumnOp:
|
||||
return op.TableName == another.TableName && op.Unique.Columns.Contains(another.ColumnName)
|
||||
case *RenameTableOp:
|
||||
return op.TableName == another.NewName
|
||||
case *DropUniqueConstraintOp:
|
||||
// We want to drop the constraint with the same name before adding this one.
|
||||
return op.TableName == another.TableName && op.Unique.Name == another.Unique.Name
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// DropUniqueConstraintOp drops a UNIQUE constraint.
|
||||
type DropUniqueConstraintOp struct {
|
||||
TableName string
|
||||
Unique sqlschema.Unique
|
||||
}
|
||||
|
||||
var _ Operation = (*DropUniqueConstraintOp)(nil)
|
||||
|
||||
func (op *DropUniqueConstraintOp) DependsOn(another Operation) bool {
|
||||
if rename, ok := another.(*RenameTableOp); ok {
|
||||
return op.TableName == rename.NewName
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (op *DropUniqueConstraintOp) GetReverse() Operation {
|
||||
return &AddUniqueConstraintOp{
|
||||
TableName: op.TableName,
|
||||
Unique: op.Unique,
|
||||
}
|
||||
}
|
||||
|
||||
// ChangeColumnTypeOp set a new data type for the column.
|
||||
// The two types should be such that the data can be auto-casted from one to another.
|
||||
// E.g. reducing VARCHAR lenght is not possible in most dialects.
|
||||
// AutoMigrator does not enforce or validate these rules.
|
||||
type ChangeColumnTypeOp struct {
|
||||
TableName string
|
||||
Column string
|
||||
From sqlschema.Column
|
||||
To sqlschema.Column
|
||||
}
|
||||
|
||||
var _ Operation = (*ChangeColumnTypeOp)(nil)
|
||||
|
||||
func (op *ChangeColumnTypeOp) GetReverse() Operation {
|
||||
return &ChangeColumnTypeOp{
|
||||
TableName: op.TableName,
|
||||
Column: op.Column,
|
||||
From: op.To,
|
||||
To: op.From,
|
||||
}
|
||||
}
|
||||
|
||||
// DropPrimaryKeyOp drops the table's PRIMARY KEY.
|
||||
type DropPrimaryKeyOp struct {
|
||||
TableName string
|
||||
PrimaryKey sqlschema.PrimaryKey
|
||||
}
|
||||
|
||||
var _ Operation = (*DropPrimaryKeyOp)(nil)
|
||||
|
||||
func (op *DropPrimaryKeyOp) GetReverse() Operation {
|
||||
return &AddPrimaryKeyOp{
|
||||
TableName: op.TableName,
|
||||
PrimaryKey: op.PrimaryKey,
|
||||
}
|
||||
}
|
||||
|
||||
// AddPrimaryKeyOp adds a new PRIMARY KEY to the table.
|
||||
type AddPrimaryKeyOp struct {
|
||||
TableName string
|
||||
PrimaryKey sqlschema.PrimaryKey
|
||||
}
|
||||
|
||||
var _ Operation = (*AddPrimaryKeyOp)(nil)
|
||||
|
||||
func (op *AddPrimaryKeyOp) GetReverse() Operation {
|
||||
return &DropPrimaryKeyOp{
|
||||
TableName: op.TableName,
|
||||
PrimaryKey: op.PrimaryKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *AddPrimaryKeyOp) DependsOn(another Operation) bool {
|
||||
switch another := another.(type) {
|
||||
case *AddColumnOp:
|
||||
return op.TableName == another.TableName && op.PrimaryKey.Columns.Contains(another.ColumnName)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ChangePrimaryKeyOp changes the PRIMARY KEY of the table.
|
||||
type ChangePrimaryKeyOp struct {
|
||||
TableName string
|
||||
Old sqlschema.PrimaryKey
|
||||
New sqlschema.PrimaryKey
|
||||
}
|
||||
|
||||
var _ Operation = (*AddPrimaryKeyOp)(nil)
|
||||
|
||||
func (op *ChangePrimaryKeyOp) GetReverse() Operation {
|
||||
return &ChangePrimaryKeyOp{
|
||||
TableName: op.TableName,
|
||||
Old: op.New,
|
||||
New: op.Old,
|
||||
}
|
||||
}
|
||||
|
||||
// comment denotes an Operation that cannot be executed.
|
||||
//
|
||||
// Operations, which cannot be reversed due to current technical limitations,
|
||||
// may return &comment with a helpful message from their GetReverse() method.
|
||||
//
|
||||
// Chnagelog should skip it when applying operations or output as log message,
|
||||
// and write it as an SQL comment when creating migration files.
|
||||
type comment string
|
||||
|
||||
var _ Operation = (*comment)(nil)
|
||||
|
||||
func (c *comment) GetReverse() Operation { return c }
|
||||
75
vendor/github.com/uptrace/bun/migrate/sqlschema/column.go
generated
vendored
Normal file
75
vendor/github.com/uptrace/bun/migrate/sqlschema/column.go
generated
vendored
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
package sqlschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type Column interface {
|
||||
GetName() string
|
||||
GetSQLType() string
|
||||
GetVarcharLen() int
|
||||
GetDefaultValue() string
|
||||
GetIsNullable() bool
|
||||
GetIsAutoIncrement() bool
|
||||
GetIsIdentity() bool
|
||||
AppendQuery(schema.Formatter, []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
var _ Column = (*BaseColumn)(nil)
|
||||
|
||||
// BaseColumn is a base column definition that stores various attributes of a column.
|
||||
//
|
||||
// Dialects and only dialects can use it to implement the Column interface.
|
||||
// Other packages must use the Column interface.
|
||||
type BaseColumn struct {
|
||||
Name string
|
||||
SQLType string
|
||||
VarcharLen int
|
||||
DefaultValue string
|
||||
IsNullable bool
|
||||
IsAutoIncrement bool
|
||||
IsIdentity bool
|
||||
// TODO: add Precision and Cardinality for timestamps/bit-strings/floats and arrays respectively.
|
||||
}
|
||||
|
||||
func (cd BaseColumn) GetName() string {
|
||||
return cd.Name
|
||||
}
|
||||
|
||||
func (cd BaseColumn) GetSQLType() string {
|
||||
return cd.SQLType
|
||||
}
|
||||
|
||||
func (cd BaseColumn) GetVarcharLen() int {
|
||||
return cd.VarcharLen
|
||||
}
|
||||
|
||||
func (cd BaseColumn) GetDefaultValue() string {
|
||||
return cd.DefaultValue
|
||||
}
|
||||
|
||||
func (cd BaseColumn) GetIsNullable() bool {
|
||||
return cd.IsNullable
|
||||
}
|
||||
|
||||
func (cd BaseColumn) GetIsAutoIncrement() bool {
|
||||
return cd.IsAutoIncrement
|
||||
}
|
||||
|
||||
func (cd BaseColumn) GetIsIdentity() bool {
|
||||
return cd.IsIdentity
|
||||
}
|
||||
|
||||
// AppendQuery appends full SQL data type.
|
||||
func (c *BaseColumn) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, err error) {
|
||||
b = append(b, c.SQLType...)
|
||||
if c.VarcharLen == 0 {
|
||||
return b, nil
|
||||
}
|
||||
b = append(b, "("...)
|
||||
b = append(b, fmt.Sprint(c.VarcharLen)...)
|
||||
b = append(b, ")"...)
|
||||
return b, nil
|
||||
}
|
||||
127
vendor/github.com/uptrace/bun/migrate/sqlschema/database.go
generated
vendored
Normal file
127
vendor/github.com/uptrace/bun/migrate/sqlschema/database.go
generated
vendored
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
package sqlschema
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
orderedmap "github.com/wk8/go-ordered-map/v2"
|
||||
)
|
||||
|
||||
type Database interface {
|
||||
GetTables() *orderedmap.OrderedMap[string, Table]
|
||||
GetForeignKeys() map[ForeignKey]string
|
||||
}
|
||||
|
||||
var _ Database = (*BaseDatabase)(nil)
|
||||
|
||||
// BaseDatabase is a base database definition.
|
||||
//
|
||||
// Dialects and only dialects can use it to implement the Database interface.
|
||||
// Other packages must use the Database interface.
|
||||
type BaseDatabase struct {
|
||||
Tables *orderedmap.OrderedMap[string, Table]
|
||||
ForeignKeys map[ForeignKey]string
|
||||
}
|
||||
|
||||
func (ds BaseDatabase) GetTables() *orderedmap.OrderedMap[string, Table] {
|
||||
return ds.Tables
|
||||
}
|
||||
|
||||
func (ds BaseDatabase) GetForeignKeys() map[ForeignKey]string {
|
||||
return ds.ForeignKeys
|
||||
}
|
||||
|
||||
type ForeignKey struct {
|
||||
From ColumnReference
|
||||
To ColumnReference
|
||||
}
|
||||
|
||||
func NewColumnReference(tableName string, columns ...string) ColumnReference {
|
||||
return ColumnReference{
|
||||
TableName: tableName,
|
||||
Column: NewColumns(columns...),
|
||||
}
|
||||
}
|
||||
|
||||
func (fk ForeignKey) DependsOnTable(tableName string) bool {
|
||||
return fk.From.TableName == tableName || fk.To.TableName == tableName
|
||||
}
|
||||
|
||||
func (fk ForeignKey) DependsOnColumn(tableName string, column string) bool {
|
||||
return fk.DependsOnTable(tableName) &&
|
||||
(fk.From.Column.Contains(column) || fk.To.Column.Contains(column))
|
||||
}
|
||||
|
||||
// Columns is a hashable representation of []string used to define schema constraints that depend on multiple columns.
|
||||
// Although having duplicated column references in these constraints is illegal, Columns neither validates nor enforces this constraint on the caller.
|
||||
type Columns string
|
||||
|
||||
// NewColumns creates a composite column from a slice of column names.
|
||||
func NewColumns(columns ...string) Columns {
|
||||
slices.Sort(columns)
|
||||
return Columns(strings.Join(columns, ","))
|
||||
}
|
||||
|
||||
func (c *Columns) String() string {
|
||||
return string(*c)
|
||||
}
|
||||
|
||||
func (c *Columns) AppendQuery(fmter schema.Formatter, b []byte) ([]byte, error) {
|
||||
return schema.Safe(*c).AppendQuery(fmter, b)
|
||||
}
|
||||
|
||||
// Split returns a slice of column names that make up the composite.
|
||||
func (c *Columns) Split() []string {
|
||||
return strings.Split(c.String(), ",")
|
||||
}
|
||||
|
||||
// ContainsColumns checks that columns in "other" are a subset of current colums.
|
||||
func (c *Columns) ContainsColumns(other Columns) bool {
|
||||
columns := c.Split()
|
||||
Outer:
|
||||
for _, check := range other.Split() {
|
||||
for _, column := range columns {
|
||||
if check == column {
|
||||
continue Outer
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Contains checks that a composite column contains the current column.
|
||||
func (c *Columns) Contains(other string) bool {
|
||||
return c.ContainsColumns(Columns(other))
|
||||
}
|
||||
|
||||
// Replace renames a column if it is part of the composite.
|
||||
// If a composite consists of multiple columns, only one column will be renamed.
|
||||
func (c *Columns) Replace(oldColumn, newColumn string) bool {
|
||||
columns := c.Split()
|
||||
for i, column := range columns {
|
||||
if column == oldColumn {
|
||||
columns[i] = newColumn
|
||||
*c = NewColumns(columns...)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Unique represents a unique constraint defined on 1 or more columns.
|
||||
type Unique struct {
|
||||
Name string
|
||||
Columns Columns
|
||||
}
|
||||
|
||||
// Equals checks that two unique constraint are the same, assuming both are defined for the same table.
|
||||
func (u Unique) Equals(other Unique) bool {
|
||||
return u.Columns == other.Columns
|
||||
}
|
||||
|
||||
type ColumnReference struct {
|
||||
TableName string
|
||||
Column Columns
|
||||
}
|
||||
241
vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go
generated
vendored
Normal file
241
vendor/github.com/uptrace/bun/migrate/sqlschema/inspector.go
generated
vendored
Normal file
|
|
@ -0,0 +1,241 @@
|
|||
package sqlschema
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/schema"
|
||||
orderedmap "github.com/wk8/go-ordered-map/v2"
|
||||
)
|
||||
|
||||
type InspectorDialect interface {
|
||||
schema.Dialect
|
||||
|
||||
// Inspector returns a new instance of Inspector for the dialect.
|
||||
// Dialects MAY set their default InspectorConfig values in constructor
|
||||
// but MUST apply InspectorOptions to ensure they can be overriden.
|
||||
//
|
||||
// Use ApplyInspectorOptions to reduce boilerplate.
|
||||
NewInspector(db *bun.DB, options ...InspectorOption) Inspector
|
||||
|
||||
// CompareType returns true if col1 and co2 SQL types are equivalent,
|
||||
// i.e. they might use dialect-specifc type aliases (SERIAL ~ SMALLINT)
|
||||
// or specify the same VARCHAR length differently (VARCHAR(255) ~ VARCHAR).
|
||||
CompareType(Column, Column) bool
|
||||
}
|
||||
|
||||
// InspectorConfig controls the scope of migration by limiting the objects Inspector should return.
|
||||
// Inspectors SHOULD use the configuration directly instead of copying it, or MAY choose to embed it,
|
||||
// to make sure options are always applied correctly.
|
||||
type InspectorConfig struct {
|
||||
// SchemaName limits inspection to tables in a particular schema.
|
||||
SchemaName string
|
||||
|
||||
// ExcludeTables from inspection.
|
||||
ExcludeTables []string
|
||||
}
|
||||
|
||||
// Inspector reads schema state.
|
||||
type Inspector interface {
|
||||
Inspect(ctx context.Context) (Database, error)
|
||||
}
|
||||
|
||||
func WithSchemaName(schemaName string) InspectorOption {
|
||||
return func(cfg *InspectorConfig) {
|
||||
cfg.SchemaName = schemaName
|
||||
}
|
||||
}
|
||||
|
||||
// WithExcludeTables works in append-only mode, i.e. tables cannot be re-included.
|
||||
func WithExcludeTables(tables ...string) InspectorOption {
|
||||
return func(cfg *InspectorConfig) {
|
||||
cfg.ExcludeTables = append(cfg.ExcludeTables, tables...)
|
||||
}
|
||||
}
|
||||
|
||||
// NewInspector creates a new database inspector, if the dialect supports it.
|
||||
func NewInspector(db *bun.DB, options ...InspectorOption) (Inspector, error) {
|
||||
dialect, ok := (db.Dialect()).(InspectorDialect)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s does not implement sqlschema.Inspector", db.Dialect().Name())
|
||||
}
|
||||
return &inspector{
|
||||
Inspector: dialect.NewInspector(db, options...),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewBunModelInspector(tables *schema.Tables, options ...InspectorOption) *BunModelInspector {
|
||||
bmi := &BunModelInspector{
|
||||
tables: tables,
|
||||
}
|
||||
ApplyInspectorOptions(&bmi.InspectorConfig, options...)
|
||||
return bmi
|
||||
}
|
||||
|
||||
type InspectorOption func(*InspectorConfig)
|
||||
|
||||
func ApplyInspectorOptions(cfg *InspectorConfig, options ...InspectorOption) {
|
||||
for _, opt := range options {
|
||||
opt(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
// inspector is opaque pointer to a database inspector.
|
||||
type inspector struct {
|
||||
Inspector
|
||||
}
|
||||
|
||||
// BunModelInspector creates the current project state from the passed bun.Models.
|
||||
// Do not recycle BunModelInspector for different sets of models, as older models will not be de-registerred before the next run.
|
||||
type BunModelInspector struct {
|
||||
InspectorConfig
|
||||
tables *schema.Tables
|
||||
}
|
||||
|
||||
var _ Inspector = (*BunModelInspector)(nil)
|
||||
|
||||
func (bmi *BunModelInspector) Inspect(ctx context.Context) (Database, error) {
|
||||
state := BunModelSchema{
|
||||
BaseDatabase: BaseDatabase{
|
||||
ForeignKeys: make(map[ForeignKey]string),
|
||||
},
|
||||
Tables: orderedmap.New[string, Table](),
|
||||
}
|
||||
for _, t := range bmi.tables.All() {
|
||||
if t.Schema != bmi.SchemaName {
|
||||
continue
|
||||
}
|
||||
|
||||
columns := orderedmap.New[string, Column]()
|
||||
for _, f := range t.Fields {
|
||||
|
||||
sqlType, length, err := parseLen(f.CreateTableSQLType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse length in %q: %w", f.CreateTableSQLType, err)
|
||||
}
|
||||
columns.Set(f.Name, &BaseColumn{
|
||||
Name: f.Name,
|
||||
SQLType: strings.ToLower(sqlType), // TODO(dyma): maybe this is not necessary after Column.Eq()
|
||||
VarcharLen: length,
|
||||
DefaultValue: exprToLower(f.SQLDefault),
|
||||
IsNullable: !f.NotNull,
|
||||
IsAutoIncrement: f.AutoIncrement,
|
||||
IsIdentity: f.Identity,
|
||||
})
|
||||
}
|
||||
|
||||
var unique []Unique
|
||||
for name, group := range t.Unique {
|
||||
// Create a separate unique index for single-column unique constraints
|
||||
// let each dialect apply the default naming convention.
|
||||
if name == "" {
|
||||
for _, f := range group {
|
||||
unique = append(unique, Unique{Columns: NewColumns(f.Name)})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Set the name if it is a "unique group", in which case the user has provided the name.
|
||||
var columns []string
|
||||
for _, f := range group {
|
||||
columns = append(columns, f.Name)
|
||||
}
|
||||
unique = append(unique, Unique{Name: name, Columns: NewColumns(columns...)})
|
||||
}
|
||||
|
||||
var pk *PrimaryKey
|
||||
if len(t.PKs) > 0 {
|
||||
var columns []string
|
||||
for _, f := range t.PKs {
|
||||
columns = append(columns, f.Name)
|
||||
}
|
||||
pk = &PrimaryKey{Columns: NewColumns(columns...)}
|
||||
}
|
||||
|
||||
// In cases where a table is defined in a non-default schema in the `bun:table` tag,
|
||||
// schema.Table only extracts the name of the schema, but passes the entire tag value to t.Name
|
||||
// for backwads-compatibility. For example, a bun model like this:
|
||||
// type Model struct { bun.BaseModel `bun:"table:favourite.books` }
|
||||
// produces
|
||||
// schema.Table{ Schema: "favourite", Name: "favourite.books" }
|
||||
tableName := strings.TrimPrefix(t.Name, t.Schema+".")
|
||||
state.Tables.Set(tableName, &BunTable{
|
||||
BaseTable: BaseTable{
|
||||
Schema: t.Schema,
|
||||
Name: tableName,
|
||||
Columns: columns,
|
||||
UniqueConstraints: unique,
|
||||
PrimaryKey: pk,
|
||||
},
|
||||
Model: t.ZeroIface,
|
||||
})
|
||||
|
||||
for _, rel := range t.Relations {
|
||||
// These relations are nominal and do not need a foreign key to be declared in the current table.
|
||||
// They will be either expressed as N:1 relations in an m2m mapping table, or will be referenced by the other table if it's a 1:N.
|
||||
if rel.Type == schema.ManyToManyRelation ||
|
||||
rel.Type == schema.HasManyRelation {
|
||||
continue
|
||||
}
|
||||
|
||||
var fromCols, toCols []string
|
||||
for _, f := range rel.BasePKs {
|
||||
fromCols = append(fromCols, f.Name)
|
||||
}
|
||||
for _, f := range rel.JoinPKs {
|
||||
toCols = append(toCols, f.Name)
|
||||
}
|
||||
|
||||
target := rel.JoinTable
|
||||
state.ForeignKeys[ForeignKey{
|
||||
From: NewColumnReference(t.Name, fromCols...),
|
||||
To: NewColumnReference(target.Name, toCols...),
|
||||
}] = ""
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func parseLen(typ string) (string, int, error) {
|
||||
paren := strings.Index(typ, "(")
|
||||
if paren == -1 {
|
||||
return typ, 0, nil
|
||||
}
|
||||
length, err := strconv.Atoi(typ[paren+1 : len(typ)-1])
|
||||
if err != nil {
|
||||
return typ, 0, err
|
||||
}
|
||||
return typ[:paren], length, nil
|
||||
}
|
||||
|
||||
// exprToLower converts string to lowercase, if it does not contain a string literal 'lit'.
|
||||
// Use it to ensure that user-defined default values in the models are always comparable
|
||||
// to those returned by the database inspector, regardless of the case convention in individual drivers.
|
||||
func exprToLower(s string) string {
|
||||
if strings.HasPrefix(s, "'") && strings.HasSuffix(s, "'") {
|
||||
return s
|
||||
}
|
||||
return strings.ToLower(s)
|
||||
}
|
||||
|
||||
// BunModelSchema is the schema state derived from bun table models.
|
||||
type BunModelSchema struct {
|
||||
BaseDatabase
|
||||
|
||||
Tables *orderedmap.OrderedMap[string, Table]
|
||||
}
|
||||
|
||||
func (ms BunModelSchema) GetTables() *orderedmap.OrderedMap[string, Table] {
|
||||
return ms.Tables
|
||||
}
|
||||
|
||||
// BunTable provides additional table metadata that is only accessible from scanning bun models.
|
||||
type BunTable struct {
|
||||
BaseTable
|
||||
|
||||
// Model stores the zero interface to the underlying Go struct.
|
||||
Model interface{}
|
||||
}
|
||||
49
vendor/github.com/uptrace/bun/migrate/sqlschema/migrator.go
generated
vendored
Normal file
49
vendor/github.com/uptrace/bun/migrate/sqlschema/migrator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
package sqlschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type MigratorDialect interface {
|
||||
schema.Dialect
|
||||
NewMigrator(db *bun.DB, schemaName string) Migrator
|
||||
}
|
||||
|
||||
type Migrator interface {
|
||||
AppendSQL(b []byte, operation interface{}) ([]byte, error)
|
||||
}
|
||||
|
||||
// migrator is a dialect-agnostic wrapper for sqlschema.MigratorDialect.
|
||||
type migrator struct {
|
||||
Migrator
|
||||
}
|
||||
|
||||
func NewMigrator(db *bun.DB, schemaName string) (Migrator, error) {
|
||||
md, ok := db.Dialect().(MigratorDialect)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%q dialect does not implement sqlschema.Migrator", db.Dialect().Name())
|
||||
}
|
||||
return &migrator{
|
||||
Migrator: md.NewMigrator(db, schemaName),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BaseMigrator can be embeded by dialect's Migrator implementations to re-use some of the existing bun queries.
|
||||
type BaseMigrator struct {
|
||||
db *bun.DB
|
||||
}
|
||||
|
||||
func NewBaseMigrator(db *bun.DB) *BaseMigrator {
|
||||
return &BaseMigrator{db: db}
|
||||
}
|
||||
|
||||
func (m *BaseMigrator) AppendCreateTable(b []byte, model interface{}) ([]byte, error) {
|
||||
return m.db.NewCreateTable().Model(model).AppendQuery(m.db.Formatter(), b)
|
||||
}
|
||||
|
||||
func (m *BaseMigrator) AppendDropTable(b []byte, schemaName, tableName string) ([]byte, error) {
|
||||
return m.db.NewDropTable().TableExpr("?.?", bun.Ident(schemaName), bun.Ident(tableName)).AppendQuery(m.db.Formatter(), b)
|
||||
}
|
||||
60
vendor/github.com/uptrace/bun/migrate/sqlschema/table.go
generated
vendored
Normal file
60
vendor/github.com/uptrace/bun/migrate/sqlschema/table.go
generated
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
package sqlschema
|
||||
|
||||
import (
|
||||
orderedmap "github.com/wk8/go-ordered-map/v2"
|
||||
)
|
||||
|
||||
type Table interface {
|
||||
GetSchema() string
|
||||
GetName() string
|
||||
GetColumns() *orderedmap.OrderedMap[string, Column]
|
||||
GetPrimaryKey() *PrimaryKey
|
||||
GetUniqueConstraints() []Unique
|
||||
}
|
||||
|
||||
var _ Table = (*BaseTable)(nil)
|
||||
|
||||
// BaseTable is a base table definition.
|
||||
//
|
||||
// Dialects and only dialects can use it to implement the Table interface.
|
||||
// Other packages must use the Table interface.
|
||||
type BaseTable struct {
|
||||
Schema string
|
||||
Name string
|
||||
|
||||
// ColumnDefinitions map each column name to the column definition.
|
||||
Columns *orderedmap.OrderedMap[string, Column]
|
||||
|
||||
// PrimaryKey holds the primary key definition.
|
||||
// A nil value means that no primary key is defined for the table.
|
||||
PrimaryKey *PrimaryKey
|
||||
|
||||
// UniqueConstraints defined on the table.
|
||||
UniqueConstraints []Unique
|
||||
}
|
||||
|
||||
// PrimaryKey represents a primary key constraint defined on 1 or more columns.
|
||||
type PrimaryKey struct {
|
||||
Name string
|
||||
Columns Columns
|
||||
}
|
||||
|
||||
func (td *BaseTable) GetSchema() string {
|
||||
return td.Schema
|
||||
}
|
||||
|
||||
func (td *BaseTable) GetName() string {
|
||||
return td.Name
|
||||
}
|
||||
|
||||
func (td *BaseTable) GetColumns() *orderedmap.OrderedMap[string, Column] {
|
||||
return td.Columns
|
||||
}
|
||||
|
||||
func (td *BaseTable) GetPrimaryKey() *PrimaryKey {
|
||||
return td.PrimaryKey
|
||||
}
|
||||
|
||||
func (td *BaseTable) GetUniqueConstraints() []Unique {
|
||||
return td.UniqueConstraints
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue