[chore] update go dependencies (#4304)

- github.com/KimMachineGun/automemlimit v0.7.2 => v0.7.3
- github.com/gin-contrib/cors v1.7.5 => v1.7.6
- github.com/minio/minio-go/v7 v7.0.92 => v7.0.94
- github.com/spf13/cast v1.8.0 => v1.9.2
- github.com/uptrace/bun{,/*} v1.2.11 => v1.2.14
- golang.org/x/image v0.27.0 => v0.28.0
- golang.org/x/net v0.40.0 => v0.41.0
- code.superseriousbusiness.org/go-swagger v0.31.0-gts-go1.23-fix => v0.32.3-gts-go1.23-fix

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4304
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-06-30 15:19:09 +02:00 committed by kim
commit 8b0ea56027
294 changed files with 139999 additions and 21873 deletions

View file

@ -1,3 +1,52 @@
## [1.2.14](https://github.com/uptrace/bun/compare/v1.2.13...v1.2.14) (2025-06-16)
### Bug Fixes
* restore q.limit check ([07d32c1](https://github.com/uptrace/bun/commit/07d32c1662015a398322fdbc0dc34c5f0d10ce44))
## [1.2.13](https://github.com/uptrace/bun/compare/v1.2.12...v1.2.13) (2025-06-11)
### Bug Fixes
* **query:** scanAndCount without model ([07fb7ec](https://github.com/uptrace/bun/commit/07fb7ec540979d0625cfeb771a0679c5982c6e2a)), closes [#1209](https://github.com/uptrace/bun/issues/1209)
* sort fk constraints before appending ([c87fa90](https://github.com/uptrace/bun/commit/c87fa903c56743e24a2cb677e8e96fd5c802fba5))
* use slices sort ([8555900](https://github.com/uptrace/bun/commit/8555900ad840d9b6e73c8655af4f1b6766bc943b))
## [1.2.12](https://github.com/uptrace/bun/compare/v1.2.11...v1.2.12) (2025-06-05)
### Bug Fixes
* **automigrate:** append SQL to separate []byte slices ([f44a349](https://github.com/uptrace/bun/commit/f44a349ec61b09f9f0240a923e121cbaa3ab1d14))
* **gh-1160:** add WithExcludeForeignKeys option ([63141cb](https://github.com/uptrace/bun/commit/63141cb6c9a6d0d2abf4b41eac5b1c6078884326)), closes [#1160](https://github.com/uptrace/bun/issues/1160)
* iss-824 to allow mssql to support non unicode strings ([0565763](https://github.com/uptrace/bun/commit/056576355a0a7ff75f616cedb5d81144f6657a6a))
* **migrations:** skip template rendering if no data + fix tests ([4055827](https://github.com/uptrace/bun/commit/4055827e1af4f0b7e13879d393c1131ab497d962))
* **pgdriver:** rename channelOverflowHandler to ChannelOverflowHandler for public API ([65760a9](https://github.com/uptrace/bun/commit/65760a9e648a1ae379982e5d8737d6d864f6a8e3))
* relation join data race ([37971d7](https://github.com/uptrace/bun/commit/37971d7f83042ab83e52be1c122083f8a98a1efa))
* report BIGSERIAL ~ BIGINT in pgdialect ([ad7356a](https://github.com/uptrace/bun/commit/ad7356a772324950cf866b86d23771fc53f83505))
* skip automigrator test early ([5b22710](https://github.com/uptrace/bun/commit/5b22710f0b4d980ebec38fcd306bf459dc1eb615))
* start sequence with last+1 ([7fbf34a](https://github.com/uptrace/bun/commit/7fbf34a69ff249c72af522331a4f6116f240630a))
### Features
* add support for netip.Addr and netip.Prefix ([63ccc8f](https://github.com/uptrace/bun/commit/63ccc8f530092c3dfc71179b94a43db452fa54ec))
* exclude tables using LIKE pattern ([5351f7e](https://github.com/uptrace/bun/commit/5351f7ed4fe53662386e697cc551ba54487da018))
* **migrations:** support Go templates in SQL migrations ([d92e29e](https://github.com/uptrace/bun/commit/d92e29e459ae2804ad48e1b4f6a8147211a47a57))
* **pg:** allow user config buffer size of pg's connect ([e2f2650](https://github.com/uptrace/bun/commit/e2f2650950d13442d45694b7cd186b77b4e8e0bb)), closes [#1201](https://github.com/uptrace/bun/issues/1201)
* **pgdriver:** add option for tracing ([80c5e3c](https://github.com/uptrace/bun/commit/80c5e3c684c410dfc02170cfb8671bb8b1db2e35)), closes [#1150](https://github.com/uptrace/bun/issues/1150)
* **pgdriver:** add overflow handler to listener channel ([6f0e3a1](https://github.com/uptrace/bun/commit/6f0e3a1d33de5a61625d22ba6464bfe5da404a11))
* set notnull=true for autoincrement columns ([1bd5dd7](https://github.com/uptrace/bun/commit/1bd5dd73ce943235a403c5896b6e70401b194093))
* support changing column type to SERIAL ([136b480](https://github.com/uptrace/bun/commit/136b480e6835dd9a12b4925f57225fb73d0aa7ae))
## [1.2.11](https://github.com/uptrace/bun/compare/v1.2.10...v1.2.11) (2025-03-05)

View file

@ -6,6 +6,7 @@ test:
echo "go test in $${dir}"; \
(cd "$${dir}" && \
go test && \
env RACETEST=1 go test -race && \
env GOOS=linux GOARCH=386 TZ= go test && \
go vet); \
done

View file

@ -1,6 +1,7 @@
package pgdialect
import (
"context"
"fmt"
"strings"
@ -57,6 +58,11 @@ func (m *migrator) AppendSQL(b []byte, operation interface{}) (_ []byte, err err
case *migrate.DropUniqueConstraintOp:
b, err = m.dropConstraint(fmter, appendAlterTable(b, change.TableName), change.Unique.Name)
case *migrate.ChangeColumnTypeOp:
// If column changes to SERIAL, create sequence first.
// https://gist.github.com/oleglomako/185df689706c5499612a0d54d3ffe856
if !change.From.GetIsAutoIncrement() && change.To.GetIsAutoIncrement() {
change.To, b, err = m.createDefaultSequence(fmter, b, change)
}
b, err = m.changeColumnType(fmter, appendAlterTable(b, change.TableName), change)
case *migrate.AddForeignKeyOp:
b, err = m.addForeignKey(fmter, appendAlterTable(b, change.TableName()), change)
@ -187,6 +193,39 @@ func (m *migrator) addForeignKey(fmter schema.Formatter, b []byte, add *migrate.
return b, nil
}
// createDefaultSequence creates a SEQUENCE to back a serial column.
// Having a backing sequence is necessary to change column type to SERIAL.
// The updated Column's default is set to "nextval" of the new sequence.
func (m *migrator) createDefaultSequence(_ schema.Formatter, b []byte, op *migrate.ChangeColumnTypeOp) (_ sqlschema.Column, _ []byte, err error) {
var last int
if err = m.db.NewSelect().Table(op.TableName).
ColumnExpr("MAX(?)", op.Column).Scan(context.TODO(), &last); err != nil {
return nil, b, err
}
seq := op.TableName + "_" + op.Column + "_seq"
fqn := op.TableName + "." + op.Column
// A sequence that is OWNED BY a table will be dropped
// if the table is dropped with CASCADE action.
b = append(b, "CREATE SEQUENCE "...)
b = append(b, seq...)
b = append(b, " START WITH "...)
b = append(b, fmt.Sprint(last+1)...) // start with next value
b = append(b, " OWNED BY "...)
b = append(b, fqn...)
b = append(b, ";\n"...)
return &Column{
Name: op.To.GetName(),
SQLType: op.To.GetSQLType(),
VarcharLen: op.To.GetVarcharLen(),
DefaultValue: fmt.Sprintf("nextval('%s'::regclass)", seq),
IsNullable: op.To.GetIsNullable(),
IsAutoIncrement: op.To.GetIsAutoIncrement(),
IsIdentity: op.To.GetIsIdentity(),
}, b, nil
}
func (m *migrator) changeColumnType(fmter schema.Formatter, b []byte, colDef *migrate.ChangeColumnTypeOp) (_ []byte, err error) {
// alterColumn never re-assigns err, so there is no need to check for err != nil after calling it
var i int

View file

@ -5,7 +5,6 @@ import (
"strings"
"github.com/uptrace/bun"
"github.com/uptrace/bun/internal/ordered"
"github.com/uptrace/bun/migrate/sqlschema"
)
@ -34,13 +33,12 @@ func newInspector(db *bun.DB, options ...sqlschema.InspectorOption) *Inspector {
func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
dbSchema := Schema{
Tables: ordered.NewMap[string, sqlschema.Table](),
ForeignKeys: make(map[sqlschema.ForeignKey]string),
}
exclude := in.ExcludeTables
if len(exclude) == 0 {
// Avoid getting NOT IN (NULL) if bun.In() is called with an empty slice.
// Avoid getting NOT LIKE ALL (ARRAY[NULL]) if bun.In() is called with an empty slice.
exclude = []string{""}
}
@ -61,7 +59,7 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
return dbSchema, err
}
colDefs := ordered.NewMap[string, sqlschema.Column]()
var colDefs []sqlschema.Column
uniqueGroups := make(map[string][]string)
for _, c := range columns {
@ -72,7 +70,7 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
def = strings.ToLower(def)
}
colDefs.Store(c.Name, &Column{
colDefs = append(colDefs, &Column{
Name: c.Name,
SQLType: c.DataType,
VarcharLen: c.VarcharLen,
@ -103,7 +101,7 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
}
}
dbSchema.Tables.Store(table.Name, &Table{
dbSchema.Tables = append(dbSchema.Tables, &Table{
Schema: table.Schema,
Name: table.Name,
Columns: colDefs,
@ -113,10 +111,14 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
}
for _, fk := range fks {
dbSchema.ForeignKeys[sqlschema.ForeignKey{
dbFK := sqlschema.ForeignKey{
From: sqlschema.NewColumnReference(fk.SourceTable, fk.SourceColumns...),
To: sqlschema.NewColumnReference(fk.TargetTable, fk.TargetColumns...),
}] = fk.ConstraintName
}
if _, exclude := in.ExcludeForeignKeys[dbFK]; exclude {
continue
}
dbSchema.ForeignKeys[dbFK] = fk.ConstraintName
}
return dbSchema, nil
}
@ -185,7 +187,7 @@ FROM information_schema.tables "t"
WHERE table_type = 'BASE TABLE'
AND "t".table_schema = ?
AND "t".table_schema NOT LIKE 'pg_%'
AND "table_name" NOT IN (?)
AND "table_name" NOT LIKE ALL (ARRAY[?])
ORDER BY "t".table_schema, "t".table_name
`
@ -291,7 +293,8 @@ WHERE co.contype = 'f'
AND co.conrelid IN (SELECT oid FROM pg_class WHERE relkind = 'r')
AND ARRAY_POSITION(co.conkey, sc.attnum) = ARRAY_POSITION(co.confkey, tc.attnum)
AND ss.nspname = ?
AND s.relname NOT IN (?) AND "t".relname NOT IN (?)
AND s.relname NOT LIKE ALL (ARRAY[?])
AND "t".relname NOT LIKE ALL (ARRAY[?])
GROUP BY "constraint_name", "schema_name", "table_name", target_schema, target_table
`
)

View file

@ -127,6 +127,9 @@ var (
char = newAliases(pgTypeChar, pgTypeCharacter)
varchar = newAliases(pgTypeVarchar, pgTypeCharacterVarying)
timestampTz = newAliases(sqltype.Timestamp, pgTypeTimestampTz, pgTypeTimestampWithTz)
bigint = newAliases(sqltype.BigInt, pgTypeBigSerial)
integer = newAliases(sqltype.Integer, pgTypeSerial)
smallint = newAliases(sqltype.SmallInt, pgTypeSmallSerial)
)
func (d *Dialect) CompareType(col1, col2 sqlschema.Column) bool {
@ -143,6 +146,10 @@ func (d *Dialect) CompareType(col1, col2 sqlschema.Column) bool {
return checkVarcharLen(col1, col2, d.DefaultVarcharLen())
case timestampTz.IsAlias(typ1) && timestampTz.IsAlias(typ2):
return true
case bigint.IsAlias(typ1) && bigint.IsAlias(typ2),
integer.IsAlias(typ1) && integer.IsAlias(typ2),
smallint.IsAlias(typ1) && smallint.IsAlias(typ2):
return true
}
return false
}

View file

@ -2,5 +2,5 @@ package pgdialect
// Version is the current release version.
func Version() string {
return "1.2.11"
return "1.2.14"
}

View file

@ -2,5 +2,5 @@ package sqlitedialect
// Version is the current release version.
func Version() string {
return "1.2.11"
return "1.2.14"
}

View file

@ -17,17 +17,21 @@ import (
type AutoMigratorOption func(m *AutoMigrator)
// WithModel adds a bun.Model to the scope of migrations.
// WithModel adds a bun.Model to the migration scope.
func WithModel(models ...interface{}) AutoMigratorOption {
return func(m *AutoMigrator) {
m.includeModels = append(m.includeModels, models...)
}
}
// WithExcludeTable tells the AutoMigrator to ignore a table in the database.
// WithExcludeTable tells AutoMigrator to exclude database tables from the migration scope.
// This prevents AutoMigrator from dropping tables which may exist in the schema
// but which are not used by the application.
//
// Expressions may make use of the wildcards supported by the SQL LIKE operator:
// - % as a wildcard
// - _ as a single character
//
// Do not exclude tables included via WithModel, as BunModelInspector ignores this setting.
func WithExcludeTable(tables ...string) AutoMigratorOption {
return func(m *AutoMigrator) {
@ -35,7 +39,17 @@ func WithExcludeTable(tables ...string) AutoMigratorOption {
}
}
// WithSchemaName changes the default database schema to migrate objects in.
// WithExcludeForeignKeys tells AutoMigrator to exclude a foreign key constaint
// from the migration scope. This prevents AutoMigrator from dropping foreign keys
// that are defined manually via CreateTableQuery.ForeignKey().
func WithExcludeForeignKeys(fks ...sqlschema.ForeignKey) AutoMigratorOption {
return func(m *AutoMigrator) {
m.excludeForeignKeys = append(m.excludeForeignKeys, fks...)
}
}
// WithSchemaName sets the database schema to migrate objects in.
// By default, dialects' default schema is used.
func WithSchemaName(schemaName string) AutoMigratorOption {
return func(m *AutoMigrator) {
m.schemaName = schemaName
@ -82,7 +96,7 @@ func WithMigrationsDirectoryAuto(directory string) AutoMigratorOption {
// database schema automatically.
//
// Usage:
// 1. Generate migrations and apply them au once with AutoMigrator.Migrate().
// 1. Generate migrations and apply them at once with AutoMigrator.Migrate().
// 2. Create up- and down-SQL migration files and apply migrations using Migrator.Migrate().
//
// While both methods produce complete, reversible migrations (with entries in the database
@ -124,8 +138,8 @@ type AutoMigrator struct {
// includeModels define the migration scope.
includeModels []interface{}
// excludeTables are excluded from database inspection.
excludeTables []string
excludeTables []string // excludeTables are excluded from database inspection.
excludeForeignKeys []sqlschema.ForeignKey // excludeForeignKeys are excluded from database inspection.
// diffOpts are passed to detector constructor.
diffOpts []diffOption
@ -150,7 +164,11 @@ func NewAutoMigrator(db *bun.DB, opts ...AutoMigratorOption) (*AutoMigrator, err
}
am.excludeTables = append(am.excludeTables, am.table, am.locksTable)
dbInspector, err := sqlschema.NewInspector(db, sqlschema.WithSchemaName(am.schemaName), sqlschema.WithExcludeTables(am.excludeTables...))
dbInspector, err := sqlschema.NewInspector(db,
sqlschema.WithSchemaName(am.schemaName),
sqlschema.WithExcludeTables(am.excludeTables...),
sqlschema.WithExcludeForeignKeys(am.excludeForeignKeys...),
)
if err != nil {
return nil, err
}
@ -252,12 +270,12 @@ func (am *AutoMigrator) createSQLMigrations(ctx context.Context, transactional b
migrations := NewMigrations(am.migrationsOpts...)
migrations.Add(Migration{
Name: name,
Up: changes.Up(am.dbMigrator),
Down: changes.Down(am.dbMigrator),
Up: wrapMigrationFunc(changes.Up(am.dbMigrator)),
Down: wrapMigrationFunc(changes.Down(am.dbMigrator)),
Comment: "Changes detected by bun.AutoMigrator",
})
// Append .tx.up.sql or .up.sql to migration name, dependin if it should be transactional.
// Append .tx.up.sql or .up.sql to migration name, depending if it should be transactional.
fname := func(direction string) string {
return name + map[bool]string{true: ".tx.", false: "."}[transactional] + direction + ".sql"
}
@ -336,7 +354,7 @@ func (c *changeset) apply(ctx context.Context, db *bun.DB, m sqlschema.Migrator)
}
for _, op := range c.operations {
if _, isComment := op.(*comment); isComment {
if _, skip := op.(*Unimplemented); skip {
continue
}
@ -359,17 +377,22 @@ func (c *changeset) WriteTo(w io.Writer, m sqlschema.Migrator) error {
b := internal.MakeQueryBytes()
for _, op := range c.operations {
if c, isComment := op.(*comment); isComment {
if comment, isComment := op.(*Unimplemented); isComment {
b = append(b, "/*\n"...)
b = append(b, *c...)
b = append(b, *comment...)
b = append(b, "\n*/"...)
continue
}
b, err = m.AppendSQL(b, op)
// Append each query separately, merge later.
// Dialects assume that the []byte only holds
// the contents of a single query and may be misled.
queryBytes := internal.MakeQueryBytes()
queryBytes, err = m.AppendSQL(queryBytes, op)
if err != nil {
return fmt.Errorf("write changeset: %w", err)
}
b = append(b, queryBytes...)
b = append(b, ";\n"...)
}
if _, err := w.Write(b); err != nil {
@ -409,7 +432,7 @@ func (c *changeset) ResolveDependencies() error {
}
// visit iterates over c.operations until it finds all operations that depend on the current one
// or runs into cirtular dependency, in which case it will return an error.
// or runs into circular dependency, in which case it will return an error.
visit = func(op Operation) error {
switch status[op] {
case visited:

View file

@ -1,6 +1,7 @@
package migrate
import (
"github.com/uptrace/bun/internal/ordered"
"github.com/uptrace/bun/migrate/sqlschema"
)
@ -22,8 +23,8 @@ func diff(got, want sqlschema.Database, opts ...diffOption) *changeset {
}
func (d *detector) detectChanges() *changeset {
currentTables := d.current.GetTables()
targetTables := d.target.GetTables()
currentTables := toOrderedMap(d.current.GetTables())
targetTables := toOrderedMap(d.target.GetTables())
RenameCreate:
for _, wantPair := range targetTables.Pairs() {
@ -99,10 +100,19 @@ RenameCreate:
return &d.changes
}
// detechColumnChanges finds renamed columns and, if checkType == true, columns with changed type.
// toOrderedMap transforms a slice of objects to an ordered map, using return of GetName() as key.
func toOrderedMap[V interface{ GetName() string }](named []V) *ordered.Map[string, V] {
m := ordered.NewMap[string, V]()
for _, v := range named {
m.Store(v.GetName(), v)
}
return m
}
// detectColumnChanges finds renamed columns and, if checkType == true, columns with changed type.
func (d *detector) detectColumnChanges(current, target sqlschema.Table, checkType bool) {
currentColumns := current.GetColumns()
targetColumns := target.GetColumns()
currentColumns := toOrderedMap(current.GetColumns())
targetColumns := toOrderedMap(target.GetColumns())
ChangeRename:
for _, tPair := range targetColumns.Pairs() {
@ -265,7 +275,7 @@ type detector struct {
// cmpType determines column type equivalence.
// Default is direct comparison with '==' operator, which is inaccurate
// due to the existence of dialect-specific type aliases. The caller
// should pass a concrete InspectorDialect.EquuivalentType for robust comparison.
// should pass a concrete InspectorDialect.EquivalentType for robust comparison.
cmpType CompareTypeFunc
}
@ -283,7 +293,7 @@ func (d detector) equalColumns(col1, col2 sqlschema.Column) bool {
}
func (d detector) makeTargetColDef(current, target sqlschema.Column) sqlschema.Column {
// Avoid unneccessary type-change migrations if the types are equivalent.
// Avoid unnecessary type-change migrations if the types are equivalent.
if d.cmpType(current, target) {
target = &sqlschema.BaseColumn{
Name: target.GetName(),
@ -311,8 +321,7 @@ func equalSignatures(t1, t2 sqlschema.Table, eq CompareTypeFunc) bool {
// signature is a set of column definitions, which allows "relation/name-agnostic" comparison between them;
// meaning that two columns are considered equal if their types are the same.
type signature struct {
// underlying stores the number of occurences for each unique column type.
// underlying stores the number of occurrences for each unique column type.
// It helps to account for the fact that a table might have multiple columns that have the same type.
underlying map[sqlschema.BaseColumn]int
@ -330,7 +339,7 @@ func newSignature(t sqlschema.Table, eq CompareTypeFunc) signature {
// scan iterates over table's field and counts occurrences of each unique column definition.
func (s *signature) scan(t sqlschema.Table) {
for _, icol := range t.GetColumns().Values() {
for _, icol := range t.GetColumns() {
scanCol := icol.(*sqlschema.BaseColumn)
// This is slightly more expensive than if the columns could be compared directly
// and we always did s.underlying[col]++, but we get type-equivalence in return.
@ -368,7 +377,7 @@ func (s *signature) Equals(other signature) bool {
}
// refMap is a utility for tracking superficial changes in foreign keys,
// which do not require any modificiation in the database.
// which do not require any modification in the database.
// Modern SQL dialects automatically updated foreign key constraints whenever
// a column or a table is renamed. Detector can use refMap to ignore any
// differences in foreign keys which were caused by renamed column/table.

View file

@ -9,6 +9,7 @@ import (
"io/fs"
"sort"
"strings"
"text/template"
"time"
"github.com/uptrace/bun"
@ -23,8 +24,8 @@ type Migration struct {
GroupID int64
MigratedAt time.Time `bun:",notnull,nullzero,default:current_timestamp"`
Up MigrationFunc `bun:"-"`
Down MigrationFunc `bun:"-"`
Up internalMigrationFunc `bun:"-"`
Down internalMigrationFunc `bun:"-"`
}
func (m Migration) String() string {
@ -35,23 +36,57 @@ func (m Migration) IsApplied() bool {
return m.ID > 0
}
type internalMigrationFunc func(ctx context.Context, db *bun.DB, templateData any) error
type MigrationFunc func(ctx context.Context, db *bun.DB) error
func NewSQLMigrationFunc(fsys fs.FS, name string) MigrationFunc {
return func(ctx context.Context, db *bun.DB) error {
func NewSQLMigrationFunc(fsys fs.FS, name string) internalMigrationFunc {
return func(ctx context.Context, db *bun.DB, templateData any) error {
f, err := fsys.Open(name)
if err != nil {
return err
}
isTx := strings.HasSuffix(name, ".tx.up.sql") || strings.HasSuffix(name, ".tx.down.sql")
return Exec(ctx, db, f, isTx)
return Exec(ctx, db, f, templateData, isTx)
}
}
func wrapMigrationFunc(fn MigrationFunc) internalMigrationFunc {
return func(ctx context.Context, db *bun.DB, templateData any) error {
return fn(ctx, db)
}
}
func renderTemplate(contents []byte, templateData any) (*bytes.Buffer, error) {
tmpl, err := template.New("migration").Parse(string(contents))
if err != nil {
return nil, fmt.Errorf("failed to parse template: %w", err)
}
var rendered bytes.Buffer
if err := tmpl.Execute(&rendered, templateData); err != nil {
return nil, fmt.Errorf("failed to execute template: %w", err)
}
return &rendered, nil
}
// Exec reads and executes the SQL migration in the f.
func Exec(ctx context.Context, db *bun.DB, f io.Reader, isTx bool) error {
scanner := bufio.NewScanner(f)
func Exec(ctx context.Context, db *bun.DB, f io.Reader, templateData any, isTx bool) error {
contents, err := io.ReadAll(f)
if err != nil {
return err
}
var reader io.Reader = bytes.NewReader(contents)
if templateData != nil {
buf, err := renderTemplate(contents, templateData)
if err != nil {
return err
}
reader = buf
}
scanner := bufio.NewScanner(reader)
var queries []string
var query []byte

View file

@ -58,8 +58,8 @@ func (m *Migrations) Register(up, down MigrationFunc) error {
m.Add(Migration{
Name: name,
Comment: comment,
Up: up,
Down: down,
Up: wrapMigrationFunc(up),
Down: wrapMigrationFunc(down),
})
return nil

View file

@ -41,6 +41,12 @@ func WithMarkAppliedOnSuccess(enabled bool) MigratorOption {
}
}
func WithTemplateData(data any) MigratorOption {
return func(m *Migrator) {
m.templateData = data
}
}
type Migrator struct {
db *bun.DB
migrations *Migrations
@ -50,6 +56,8 @@ type Migrator struct {
table string
locksTable string
markAppliedOnSuccess bool
templateData any
}
func NewMigrator(db *bun.DB, migrations *Migrations, opts ...MigratorOption) *Migrator {
@ -168,7 +176,7 @@ func (m *Migrator) Migrate(ctx context.Context, opts ...MigrationOption) (*Migra
group.Migrations = migrations[:i+1]
if !cfg.nop && migration.Up != nil {
if err := migration.Up(ctx, m.db); err != nil {
if err := migration.Up(ctx, m.db, m.templateData); err != nil {
return group, err
}
}
@ -207,7 +215,7 @@ func (m *Migrator) Rollback(ctx context.Context, opts ...MigrationOption) (*Migr
}
if !cfg.nop && migration.Down != nil {
if err := migration.Down(ctx, m.db); err != nil {
if err := migration.Down(ctx, m.db, m.templateData); err != nil {
return lastGroup, err
}
}

View file

@ -17,7 +17,7 @@ import (
// about the applied change.
//
// Some operations might be irreversible due to technical limitations. Returning
// a *comment from GetReverse() will add an explanatory note to the generate migation file.
// a *comment from GetReverse() will add an explanatory note to the generate migration file.
//
// To declare dependency on another Operation, operations should implement
// { DependsOn(Operation) bool } interface, which Changeset will use to resolve dependencies.
@ -56,7 +56,7 @@ func (op *DropTableOp) DependsOn(another Operation) bool {
// GetReverse for a DropTable returns a no-op migration. Logically, CreateTable is the reverse,
// but DropTable does not have the table's definition to create one.
func (op *DropTableOp) GetReverse() Operation {
c := comment(fmt.Sprintf("WARNING: \"DROP TABLE %s\" cannot be reversed automatically because table definition is not available", op.TableName))
c := Unimplemented(fmt.Sprintf("WARNING: \"DROP TABLE %s\" cannot be reversed automatically because table definition is not available", op.TableName))
return &c
}
@ -224,7 +224,6 @@ func (op *AddUniqueConstraintOp) DependsOn(another Operation) bool {
default:
return false
}
}
// DropUniqueConstraintOp drops a UNIQUE constraint.
@ -326,15 +325,15 @@ func (op *ChangePrimaryKeyOp) GetReverse() Operation {
}
}
// comment denotes an Operation that cannot be executed.
// Unimplemented denotes an Operation that cannot be executed.
//
// Operations, which cannot be reversed due to current technical limitations,
// may return &comment with a helpful message from their GetReverse() method.
// may have their GetReverse() return &Unimplemented with a helpful message.
//
// Chnagelog should skip it when applying operations or output as log message,
// and write it as an SQL comment when creating migration files.
type comment string
// When applying operations, changelog should skip it or output as a log message,
// and write it as an SQL Unimplemented when creating migration files.
type Unimplemented string
var _ Operation = (*comment)(nil)
var _ Operation = (*Unimplemented)(nil)
func (c *comment) GetReverse() Operation { return c }
func (reason *Unimplemented) GetReverse() Operation { return reason }

View file

@ -4,12 +4,11 @@ import (
"slices"
"strings"
"github.com/uptrace/bun/internal/ordered"
"github.com/uptrace/bun/schema"
)
type Database interface {
GetTables() *ordered.Map[string, Table]
GetTables() []Table
GetForeignKeys() map[ForeignKey]string
}
@ -20,11 +19,11 @@ var _ Database = (*BaseDatabase)(nil)
// Dialects and only dialects can use it to implement the Database interface.
// Other packages must use the Database interface.
type BaseDatabase struct {
Tables *ordered.Map[string, Table]
Tables []Table
ForeignKeys map[ForeignKey]string
}
func (ds BaseDatabase) GetTables() *ordered.Map[string, Table] {
func (ds BaseDatabase) GetTables() []Table {
return ds.Tables
}

View file

@ -7,7 +7,6 @@ import (
"strings"
"github.com/uptrace/bun"
"github.com/uptrace/bun/internal/ordered"
"github.com/uptrace/bun/schema"
)
@ -30,12 +29,23 @@ type InspectorDialect interface {
// InspectorConfig controls the scope of migration by limiting the objects Inspector should return.
// Inspectors SHOULD use the configuration directly instead of copying it, or MAY choose to embed it,
// to make sure options are always applied correctly.
//
// ExcludeTables and ExcludeForeignKeys are intended for database inspectors,
// to compensate for the fact that model structs may not wholly reflect the
// state of the database schema.
// Database inspectors MUST respect these exclusions to prevent relations
// from being dropped unintentionally.
type InspectorConfig struct {
// SchemaName limits inspection to tables in a particular schema.
SchemaName string
// ExcludeTables from inspection.
// ExcludeTables from inspection. Patterns MAY make use of wildcards
// like % and _ and dialects MUST acknowledge that by using them
// with the SQL LIKE operator.
ExcludeTables []string
// ExcludeForeignKeys from inspection.
ExcludeForeignKeys map[ForeignKey]string
}
// Inspector reads schema state.
@ -49,13 +59,26 @@ func WithSchemaName(schemaName string) InspectorOption {
}
}
// WithExcludeTables works in append-only mode, i.e. tables cannot be re-included.
// WithExcludeTables forces inspector to exclude tables from the reported schema state.
// It works in append-only mode, i.e. tables cannot be re-included.
//
// Patterns MAY make use of % and _ wildcards, as if writing a LIKE clause in SQL.
func WithExcludeTables(tables ...string) InspectorOption {
return func(cfg *InspectorConfig) {
cfg.ExcludeTables = append(cfg.ExcludeTables, tables...)
}
}
// WithExcludeForeignKeys forces inspector to exclude foreign keys
// from the reported schema state.
func WithExcludeForeignKeys(fks ...ForeignKey) InspectorOption {
return func(cfg *InspectorConfig) {
for _, fk := range fks {
cfg.ExcludeForeignKeys[fk] = ""
}
}
}
// NewInspector creates a new database inspector, if the dialect supports it.
func NewInspector(db *bun.DB, options ...InspectorOption) (Inspector, error) {
dialect, ok := (db.Dialect()).(InspectorDialect)
@ -78,6 +101,9 @@ func NewBunModelInspector(tables *schema.Tables, options ...InspectorOption) *Bu
type InspectorOption func(*InspectorConfig)
func ApplyInspectorOptions(cfg *InspectorConfig, options ...InspectorOption) {
if cfg.ExcludeForeignKeys == nil {
cfg.ExcludeForeignKeys = make(map[ForeignKey]string)
}
for _, opt := range options {
opt(cfg)
}
@ -90,6 +116,10 @@ type inspector struct {
// BunModelInspector creates the current project state from the passed bun.Models.
// Do not recycle BunModelInspector for different sets of models, as older models will not be de-registerred before the next run.
//
// BunModelInspector does not know which the database's dialect, so it does not
// assume any default schema name. Always specify the target schema name via
// WithSchemaName option to receive meaningful results.
type BunModelInspector struct {
InspectorConfig
tables *schema.Tables
@ -102,21 +132,21 @@ func (bmi *BunModelInspector) Inspect(ctx context.Context) (Database, error) {
BaseDatabase: BaseDatabase{
ForeignKeys: make(map[ForeignKey]string),
},
Tables: ordered.NewMap[string, Table](),
}
for _, t := range bmi.tables.All() {
if t.Schema != bmi.SchemaName {
continue
}
columns := ordered.NewMap[string, Column]()
var columns []Column
for _, f := range t.Fields {
sqlType, length, err := parseLen(f.CreateTableSQLType)
if err != nil {
return nil, fmt.Errorf("parse length in %q: %w", f.CreateTableSQLType, err)
}
columns.Store(f.Name, &BaseColumn{
columns = append(columns, &BaseColumn{
Name: f.Name,
SQLType: strings.ToLower(sqlType), // TODO(dyma): maybe this is not necessary after Column.Eq()
VarcharLen: length,
@ -162,7 +192,7 @@ func (bmi *BunModelInspector) Inspect(ctx context.Context) (Database, error) {
// produces
// schema.Table{ Schema: "favourite", Name: "favourite.books" }
tableName := strings.TrimPrefix(t.Name, t.Schema+".")
state.Tables.Store(tableName, &BunTable{
state.Tables = append(state.Tables, &BunTable{
BaseTable: BaseTable{
Schema: t.Schema,
Name: tableName,
@ -212,7 +242,7 @@ func parseLen(typ string) (string, int, error) {
}
// exprOrLiteral converts string to lowercase, if it does not contain a string literal 'lit'
// and trims the surrounding '' otherwise.
// and trims the surrounding otherwise.
// Use it to ensure that user-defined default values in the models are always comparable
// to those returned by the database inspector, regardless of the case convention in individual drivers.
func exprOrLiteral(s string) string {
@ -226,10 +256,10 @@ func exprOrLiteral(s string) string {
type BunModelSchema struct {
BaseDatabase
Tables *ordered.Map[string, Table]
Tables []Table
}
func (ms BunModelSchema) GetTables() *ordered.Map[string, Table] {
func (ms BunModelSchema) GetTables() []Table {
return ms.Tables
}

View file

@ -1,13 +1,9 @@
package sqlschema
import (
"github.com/uptrace/bun/internal/ordered"
)
type Table interface {
GetSchema() string
GetName() string
GetColumns() *ordered.Map[string, Column]
GetColumns() []Column
GetPrimaryKey() *PrimaryKey
GetUniqueConstraints() []Unique
}
@ -23,7 +19,7 @@ type BaseTable struct {
Name string
// ColumnDefinitions map each column name to the column definition.
Columns *ordered.Map[string, Column]
Columns []Column
// PrimaryKey holds the primary key definition.
// A nil value means that no primary key is defined for the table.
@ -47,7 +43,7 @@ func (td *BaseTable) GetName() string {
return td.Name
}
func (td *BaseTable) GetColumns() *ordered.Map[string, Column] {
func (td *BaseTable) GetColumns() []Column {
return td.Columns
}

View file

@ -39,6 +39,7 @@ type TableModel interface {
getJoin(string) *relationJoin
getJoins() []relationJoin
addJoin(relationJoin) *relationJoin
clone() TableModel
rootValue() reflect.Value
parentIndex() []int

View file

@ -5,7 +5,7 @@ import (
"context"
"database/sql"
"reflect"
"sort"
"slices"
"github.com/uptrace/bun/schema"
)
@ -121,7 +121,7 @@ func (m *mapModel) appendColumnsValues(fmter schema.Formatter, b []byte) []byte
for k := range m.m {
keys = append(keys, k)
}
sort.Strings(keys)
slices.Sort(keys)
b = append(b, " ("...)
@ -157,7 +157,7 @@ func (m *mapModel) appendSet(fmter schema.Formatter, b []byte) []byte {
for k := range m.m {
keys = append(keys, k)
}
sort.Strings(keys)
slices.Sort(keys)
isTemplate := fmter.IsNop()
for i, k := range keys {

View file

@ -4,7 +4,7 @@ import (
"context"
"database/sql"
"errors"
"sort"
"slices"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/schema"
@ -155,7 +155,7 @@ func (m *mapSliceModel) initKeys() error {
keys = append(keys, k)
}
sort.Strings(keys)
slices.Sort(keys)
m.keys = keys
return nil

View file

@ -130,6 +130,16 @@ func (m *hasManyModel) parkStruct() error {
return nil
}
func (m *hasManyModel) clone() TableModel {
return &hasManyModel{
sliceTableModel: m.sliceTableModel.clone().(*sliceTableModel),
baseTable: m.baseTable,
rel: m.rel,
baseValues: m.baseValues,
structKey: m.structKey,
}
}
func baseValues(model TableModel, fields []*schema.Field) map[internal.MapKey][]reflect.Value {
fieldIndex := model.Relation().Field.Index
m := make(map[internal.MapKey][]reflect.Value)

View file

@ -130,3 +130,13 @@ func (m *m2mModel) parkStruct() error {
return nil
}
func (m *m2mModel) clone() TableModel {
return &m2mModel{
sliceTableModel: m.sliceTableModel.clone().(*sliceTableModel),
baseTable: m.baseTable,
rel: m.rel,
baseValues: m.baseValues,
structKey: m.structKey,
}
}

View file

@ -124,3 +124,13 @@ func (m *sliceTableModel) updateSoftDeleteField(tm time.Time) error {
}
return nil
}
func (m *sliceTableModel) clone() TableModel {
return &sliceTableModel{
structTableModel: *m.structTableModel.clone().(*structTableModel),
slice: m.slice,
sliceLen: m.sliceLen,
sliceOfPtr: m.sliceOfPtr,
nextElem: m.nextElem,
}
}

View file

@ -337,6 +337,23 @@ func (m *structTableModel) AppendNamedArg(
return m.table.AppendNamedArg(fmter, b, name, m.strct)
}
func (m *structTableModel) clone() TableModel {
return &structTableModel{
db: m.db,
table: m.table,
rel: m.rel,
joins: append([]relationJoin{}, m.joins...),
dest: m.dest,
root: m.root,
index: append([]int{}, m.index...),
strct: m.strct,
structInited: m.structInited,
structInitErr: m.structInitErr,
columns: append([]string{}, m.columns...),
scanIndex: m.scanIndex,
}
}
// sqlite3 sometimes does not unquote columns.
func unquote(s string) string {
if s == "" {

View file

@ -1,6 +1,6 @@
{
"name": "gobun",
"version": "1.2.11",
"version": "1.2.14",
"main": "index.js",
"repository": "git@github.com:uptrace/bun.git",
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",

View file

@ -384,12 +384,13 @@ func (q *DeleteQuery) afterDeleteHook(ctx context.Context) error {
return nil
}
// String returns the generated SQL query string. The DeleteQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *DeleteQuery) String() string {
buf, err := q.AppendQuery(q.db.Formatter(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

View file

@ -694,11 +694,12 @@ func (q *InsertQuery) tryLastInsertID(res sql.Result, dest []interface{}) error
return nil
}
// String returns the generated SQL query string. The InsertQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *InsertQuery) String() string {
buf, err := q.AppendQuery(q.db.Formatter(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

View file

@ -281,12 +281,13 @@ func (q *MergeQuery) scanOrExec(
return res, nil
}
// String returns the generated SQL query string. The MergeQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *MergeQuery) String() string {
buf, err := q.AppendQuery(q.db.Formatter(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

View file

@ -96,11 +96,12 @@ func (q *RawQuery) Operation() string {
return "SELECT"
}
// String returns the generated SQL query string. The RawQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *RawQuery) String() string {
buf, err := q.AppendQuery(q.db.Formatter(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

View file

@ -979,20 +979,25 @@ func (q *SelectQuery) scanAndCountConcurrently(
var mu sync.Mutex
var firstErr error
// FIXME: clone should not be needed, because the query is not modified here
// and should not be implicitly modified by the Bun lib.
countQuery := q.Clone()
wg.Add(1)
go func() {
defer wg.Done()
// Don't scan results if the user explicitly set Limit(-1).
if q.limit >= 0 {
wg.Add(1)
go func() {
defer wg.Done()
if err := q.Scan(ctx, dest...); err != nil {
mu.Lock()
if firstErr == nil {
firstErr = err
if err := q.Scan(ctx, dest...); err != nil {
mu.Lock()
if firstErr == nil {
firstErr = err
}
mu.Unlock()
}
mu.Unlock()
}
}()
}()
}
wg.Add(1)
go func() {
@ -1016,6 +1021,7 @@ func (q *SelectQuery) scanAndCountConcurrently(
func (q *SelectQuery) scanAndCountSeq(ctx context.Context, dest ...interface{}) (int, error) {
var firstErr error
// Don't scan results if the user explicitly set Limit(-1).
if q.limit >= 0 {
firstErr = q.Scan(ctx, dest...)
}
@ -1086,12 +1092,13 @@ func (q *SelectQuery) whereExists(ctx context.Context) (bool, error) {
return n == 1, nil
}
// String returns the generated SQL query string. The SelectQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *SelectQuery) String() string {
buf, err := q.AppendQuery(q.db.Formatter(), nil)
if err != nil {
panic(err)
}
return string(buf)
}
@ -1120,13 +1127,17 @@ func (q *SelectQuery) Clone() *SelectQuery {
}
}
var tableModel TableModel
if q.tableModel != nil {
tableModel = q.tableModel.clone()
}
clone := &SelectQuery{
whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{
db: q.db,
table: q.table,
model: q.model,
tableModel: q.tableModel,
tableModel: tableModel,
with: make([]withQuery, len(q.with)),
tables: cloneArgs(q.tables),
columns: cloneArgs(q.columns),

View file

@ -5,7 +5,7 @@ import (
"context"
"database/sql"
"fmt"
"sort"
"slices"
"strconv"
"strings"
@ -276,7 +276,7 @@ func (q *CreateTableQuery) appendUniqueConstraints(fmter schema.Formatter, b []b
for key := range unique {
keys = append(keys, key)
}
sort.Strings(keys)
slices.Sort(keys)
for _, key := range keys {
if key == "" {
@ -308,8 +308,16 @@ func (q *CreateTableQuery) appendUniqueConstraint(
// appendFKConstraintsRel appends a FOREIGN KEY clause for each of the model's existing relations.
func (q *CreateTableQuery) appendFKConstraintsRel(fmter schema.Formatter, b []byte) (_ []byte, err error) {
for _, rel := range q.tableModel.Table().Relations {
if rel.References() {
relations := q.tableModel.Table().Relations
keys := make([]string, 0, len(relations))
for key := range relations {
keys = append(keys, key)
}
slices.Sort(keys)
for _, key := range keys {
if rel := relations[key]; rel.References() {
b, err = q.appendFK(fmter, b, schema.QueryWithArgs{
Query: "(?) REFERENCES ? (?) ? ?",
Args: []interface{}{
@ -400,11 +408,12 @@ func (q *CreateTableQuery) afterCreateTableHook(ctx context.Context) error {
return nil
}
// String returns the generated SQL query string. The CreateTableQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *CreateTableQuery) String() string {
buf, err := q.AppendQuery(q.db.Formatter(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

View file

@ -165,11 +165,12 @@ func (q *DropTableQuery) afterDropTableHook(ctx context.Context) error {
return nil
}
// String returns the generated SQL query string. The DropTableQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *DropTableQuery) String() string {
buf, err := q.AppendQuery(q.db.Formatter(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

View file

@ -635,12 +635,13 @@ func (q *UpdateQuery) hasTableAlias(fmter schema.Formatter) bool {
return fmter.HasFeature(feature.UpdateMultiTable | feature.UpdateTableAlias)
}
// String returns the generated SQL query string. The UpdateQuery instance must not be
// modified during query generation to ensure multiple calls to String() return identical results.
func (q *UpdateQuery) String() string {
buf, err := q.AppendQuery(q.db.Formatter(), nil)
if err != nil {
panic(err)
}
return string(buf)
}

View file

@ -5,6 +5,7 @@ import (
"database/sql"
"fmt"
"net"
"net/netip"
"reflect"
"strconv"
"strings"
@ -102,6 +103,10 @@ func scanner(typ reflect.Type) ScannerFunc {
return scanIP
case ipNetType:
return scanIPNet
case netipAddrType:
return scanNetIpAddr
case netipPrefixType:
return scanNetIpPrefix
case jsonRawMessageType:
return scanBytes
}
@ -413,6 +418,48 @@ func scanIPNet(dest reflect.Value, src interface{}) error {
return nil
}
func scanNetIpAddr(dest reflect.Value, src interface{}) error {
if src == nil {
return scanNull(dest)
}
b, err := toBytes(src)
if err != nil {
return err
}
val, _ := netip.ParseAddr(internal.String(b))
if !val.IsValid() {
return fmt.Errorf("bun: invalid ip: %q", b)
}
ptr := dest.Addr().Interface().(*netip.Addr)
*ptr = val
return nil
}
func scanNetIpPrefix(dest reflect.Value, src interface{}) error {
if src == nil {
return scanNull(dest)
}
b, err := toBytes(src)
if err != nil {
return err
}
val, _ := netip.ParsePrefix(internal.String(b))
if !val.IsValid() {
return fmt.Errorf("bun: invalid prefix: %q", b)
}
ptr := dest.Addr().Interface().(*netip.Prefix)
*ptr = val
return nil
}
func addrScanner(fn ScannerFunc) ScannerFunc {
return func(dest reflect.Value, src interface{}) error {
if !dest.CanAddr() {

View file

@ -1,10 +1,11 @@
package schema
import (
"cmp"
"database/sql"
"fmt"
"reflect"
"sort"
"slices"
"strings"
"time"
@ -299,15 +300,14 @@ func (t *Table) processFields(typ reflect.Type) {
}
func sortFieldsByStruct(fields []*Field) {
sort.Slice(fields, func(i, j int) bool {
left, right := fields[i], fields[j]
slices.SortFunc(fields, func(left, right *Field) int {
for k := 0; k < len(left.Index) && k < len(right.Index); k++ {
if left.Index[k] != right.Index[k] {
return left.Index[k] < right.Index[k]
if res := cmp.Compare(left.Index[k], right.Index[k]); res != 0 {
return res
}
}
// NOTE: should not reach
return true
return 0
})
}
@ -538,6 +538,7 @@ func (t *Table) newField(sf reflect.StructField, tag tagparser.Tag) *Field {
}
if tag.HasOption("autoincrement") {
field.AutoIncrement = true
field.NotNull = true
field.NullZero = true
}
if tag.HasOption("identity") {

View file

@ -2,5 +2,5 @@ package bun
// Version is the current release version.
func Version() string {
return "1.2.11"
return "1.2.14"
}