Compare commits

..

No commits in common. "57cb4fe7482962aa8e5a05874a343474d5a453e7" and "bd1c43d55e0bf113a49abb3aefdb89a07e8cf259" have entirely different histories.

41 changed files with 606 additions and 1251 deletions

View file

@ -33,8 +33,6 @@ These contribution guidelines were adapted from / inspired by those of Gitea (ht
- [Federation](#federation) - [Federation](#federation)
- [Updating Swagger docs](#updating-swagger-docs) - [Updating Swagger docs](#updating-swagger-docs)
- [CI/CD configuration](#ci-cd-configuration) - [CI/CD configuration](#ci-cd-configuration)
- [Other Useful Stuff](#other-useful-stuff)
- [Running migrations on a Postgres DB backup locally](#running-migrations-on-a-postgres-db-backup-locally)
## Introduction ## Introduction
@ -527,38 +525,3 @@ The `woodpecker` pipeline files are in the `.woodpecker` directory of this repos
The Woodpecker instance for GoToSocial is [here](https://woodpecker.superseriousbusiness.org/repos/2). The Woodpecker instance for GoToSocial is [here](https://woodpecker.superseriousbusiness.org/repos/2).
Documentation for Woodpecker is [here](https://woodpecker-ci.org/docs/intro). Documentation for Woodpecker is [here](https://woodpecker-ci.org/docs/intro).
## Other Useful Stuff
Various bits and bobs.
### Running migrations on a Postgres DB backup locally
It may be useful when testing or debugging migrations to be able to run them against a copy of a real instance's Postgres database locally.
Basic steps for this:
First dump the Postgres database on the remote machine, and copy the dump over to your development machine.
Now create a local Postgres container and mount the dump into it with, for example:
```bash
docker run -it --name postgres --network host -e POSTGRES_PASSWORD=postgres -v /path/to/db_dump:/db_dump postgres
```
In a separate terminal window, execute a command inside the running container to load the dump into the "postgres" database:
```bash
docker exec -it --user postgres postgres psql -X -f /db_dump postgres
```
With the Postgres container still running, run GoToSocial and point it towards the container. Use the appropriate `GTS_HOST` (and `GTS_ACCOUNT_DOMAIN`) values for the instance you dumped:
```bash
GTS_HOST=example.org \
GTS_DB_TYPE=postgres \
GTS_DB_POSTGRES_CONNECTION_STRING=postgres://postgres:postgres@localhost:5432/postgres \
./gotosocial migrations run
```
When you're done messing around, don't forget to remove any containers that you started up, and remove any lingering volumes with `docker volume prune`, else you might end up filling your disk with unused temporary volumes.

8
go.mod
View file

@ -25,13 +25,13 @@ require (
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf
codeberg.org/gruf/go-kv/v2 v2.0.7 codeberg.org/gruf/go-kv/v2 v2.0.7
codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f
codeberg.org/gruf/go-mempool v0.0.0-20251003110531-b54adae66253 codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760
codeberg.org/gruf/go-mutexes v1.5.8 codeberg.org/gruf/go-mutexes v1.5.3
codeberg.org/gruf/go-runners v1.6.3 codeberg.org/gruf/go-runners v1.6.3
codeberg.org/gruf/go-sched v1.2.4 codeberg.org/gruf/go-sched v1.2.4
codeberg.org/gruf/go-split v1.2.0 codeberg.org/gruf/go-split v1.2.0
codeberg.org/gruf/go-storage v0.3.1 codeberg.org/gruf/go-storage v0.3.1
codeberg.org/gruf/go-structr v0.9.12 codeberg.org/gruf/go-structr v0.9.9
github.com/DmitriyVTitov/size v1.5.0 github.com/DmitriyVTitov/size v1.5.0
github.com/KimMachineGun/automemlimit v0.7.4 github.com/KimMachineGun/automemlimit v0.7.4
github.com/SherClockHolmes/webpush-go v1.4.0 github.com/SherClockHolmes/webpush-go v1.4.0
@ -53,7 +53,7 @@ require (
github.com/miekg/dns v1.1.68 github.com/miekg/dns v1.1.68
github.com/minio/minio-go/v7 v7.0.95 github.com/minio/minio-go/v7 v7.0.95
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/ncruces/go-sqlite3 v0.29.1 github.com/ncruces/go-sqlite3 v0.29.0
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/pquerna/otp v1.5.0 github.com/pquerna/otp v1.5.0
github.com/rivo/uniseg v0.4.7 github.com/rivo/uniseg v0.4.7

16
go.sum generated
View file

@ -42,10 +42,10 @@ codeberg.org/gruf/go-mangler/v2 v2.0.6 h1:c3cwnI6Mi17EAwGSYGNMN6+9PMzaIj2GLAKx9D
codeberg.org/gruf/go-mangler/v2 v2.0.6/go.mod h1:CXIm7zAWPdNmZVAGM1NRiF/ekJTPE7YTb8kiRxiEFaQ= codeberg.org/gruf/go-mangler/v2 v2.0.6/go.mod h1:CXIm7zAWPdNmZVAGM1NRiF/ekJTPE7YTb8kiRxiEFaQ=
codeberg.org/gruf/go-maps v1.0.4 h1:K+Ww4vvR3TZqm5jqrKVirmguZwa3v1VUvmig2SE8uxY= codeberg.org/gruf/go-maps v1.0.4 h1:K+Ww4vvR3TZqm5jqrKVirmguZwa3v1VUvmig2SE8uxY=
codeberg.org/gruf/go-maps v1.0.4/go.mod h1:ASX7osM7kFwt5O8GfGflcFjrwYGD8eIuRLl/oMjhEi8= codeberg.org/gruf/go-maps v1.0.4/go.mod h1:ASX7osM7kFwt5O8GfGflcFjrwYGD8eIuRLl/oMjhEi8=
codeberg.org/gruf/go-mempool v0.0.0-20251003110531-b54adae66253 h1:qPAY72xCWlySVROSNZecfLGAyeV/SiXmPmfhUU+o3Xw= codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760 h1:m2/UCRXhjDwAg4vyji6iKCpomKw6P4PmBOUi5DvAMH4=
codeberg.org/gruf/go-mempool v0.0.0-20251003110531-b54adae66253/go.mod h1:761koiXmqfgzvu5mez2Rk7YlwWilpqJ/zv5hIA6NoNI= codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760/go.mod h1:E3RcaCFNq4zXpvaJb8lfpPqdUAmSkP5F1VmMiEUYTEk=
codeberg.org/gruf/go-mutexes v1.5.8 h1:HRGnvT4COb3jX9xdeoSUUbjPgmk5kXPuDfld9ksUJKA= codeberg.org/gruf/go-mutexes v1.5.3 h1:RIEy1UuDxKgAiINRMrPxTUWSGW6pFx9DzeJN4WPqra8=
codeberg.org/gruf/go-mutexes v1.5.8/go.mod h1:21sy/hWH8dDQBk7ocsxqo2GNpWiIir+e82RG3hjnN20= codeberg.org/gruf/go-mutexes v1.5.3/go.mod h1:AnhagsMzUISL/nBVwhnHwDwTZOAxMILwCOG8/wKOblg=
codeberg.org/gruf/go-runners v1.6.3 h1:To/AX7eTrWuXrTkA3RA01YTP5zha1VZ68LQ+0D4RY7E= codeberg.org/gruf/go-runners v1.6.3 h1:To/AX7eTrWuXrTkA3RA01YTP5zha1VZ68LQ+0D4RY7E=
codeberg.org/gruf/go-runners v1.6.3/go.mod h1:oXAaUmG2VxoKttpCqZGv5nQBeSvZSR2BzIk7h1yTRlU= codeberg.org/gruf/go-runners v1.6.3/go.mod h1:oXAaUmG2VxoKttpCqZGv5nQBeSvZSR2BzIk7h1yTRlU=
codeberg.org/gruf/go-sched v1.2.4 h1:ddBB9o0D/2oU8NbQ0ldN5aWxogpXPRBATWi58+p++Hw= codeberg.org/gruf/go-sched v1.2.4 h1:ddBB9o0D/2oU8NbQ0ldN5aWxogpXPRBATWi58+p++Hw=
@ -54,8 +54,8 @@ codeberg.org/gruf/go-split v1.2.0 h1:PmzL23nVEVHm8VxjsJmv4m4wGQz2bGgQw52dgSSj65c
codeberg.org/gruf/go-split v1.2.0/go.mod h1:0rejWJpqvOoFAd7nwm5tIXYKaAqjtFGOXmTqQV+VO38= codeberg.org/gruf/go-split v1.2.0/go.mod h1:0rejWJpqvOoFAd7nwm5tIXYKaAqjtFGOXmTqQV+VO38=
codeberg.org/gruf/go-storage v0.3.1 h1:g66UIM/xXnEk9ejT+W0T9s/PODBZhXa/8ajzeY/MELI= codeberg.org/gruf/go-storage v0.3.1 h1:g66UIM/xXnEk9ejT+W0T9s/PODBZhXa/8ajzeY/MELI=
codeberg.org/gruf/go-storage v0.3.1/go.mod h1:r43n/zi7YGOCl2iSl7AMI27D1zcWS65Bi2+5xDzypeo= codeberg.org/gruf/go-storage v0.3.1/go.mod h1:r43n/zi7YGOCl2iSl7AMI27D1zcWS65Bi2+5xDzypeo=
codeberg.org/gruf/go-structr v0.9.12 h1:yMopvexnuKgZme9WgvIhrJaAuAjfper/x38xsVuJOOo= codeberg.org/gruf/go-structr v0.9.9 h1:fwIzi/94yBNSWleXZIfVW/QyNK5+/xxI2reVYzu5V/c=
codeberg.org/gruf/go-structr v0.9.12/go.mod h1:sP2ZSjM5X5XKlxuhAbTKuVQm9DWbHsrQRuTl3MUwbHw= codeberg.org/gruf/go-structr v0.9.9/go.mod h1:5dsazOsIeJyV8Dl2DdSXqCDEZUx3e3dc41N6f2mPtgw=
codeberg.org/gruf/go-xunsafe v0.0.0-20250809104800-512a9df57d73 h1:pRaOwIOS1WSZoPCAvE0H1zpv+D4gF37OVppybffqdI8= codeberg.org/gruf/go-xunsafe v0.0.0-20250809104800-512a9df57d73 h1:pRaOwIOS1WSZoPCAvE0H1zpv+D4gF37OVppybffqdI8=
codeberg.org/gruf/go-xunsafe v0.0.0-20250809104800-512a9df57d73/go.mod h1:9wkq+dmHjUhB/0ZxDUWAwsWuXwwGyx5N1dDCB9hpWs8= codeberg.org/gruf/go-xunsafe v0.0.0-20250809104800-512a9df57d73/go.mod h1:9wkq+dmHjUhB/0ZxDUWAwsWuXwwGyx5N1dDCB9hpWs8=
codeberg.org/superseriousbusiness/go-swagger v0.32.3-gts-go1.23-fix h1:k76/Th+bruqU/d+dB0Ru466ctTF2aVjKpisy/471ILE= codeberg.org/superseriousbusiness/go-swagger v0.32.3-gts-go1.23-fix h1:k76/Th+bruqU/d+dB0Ru466ctTF2aVjKpisy/471ILE=
@ -338,8 +338,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/ncruces/go-sqlite3 v0.29.1 h1:NIi8AISWBToRHyoz01FXiTNvU147Tqdibgj2tFzJCqM= github.com/ncruces/go-sqlite3 v0.29.0 h1:1tsLiagCoqZEfcHDeKsNSv5jvrY/Iu393pAnw2wLNJU=
github.com/ncruces/go-sqlite3 v0.29.1/go.mod h1:PpccBNNhvjwUOwDQEn2gXQPFPTWdlromj0+fSkd5KSg= github.com/ncruces/go-sqlite3 v0.29.0/go.mod h1:r1hSvYKPNJ+OlUA1O3r8o9LAawzPAlqeZiIdxTBBBJ0=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M= github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=

View file

@ -24,16 +24,13 @@ import (
"reflect" "reflect"
"slices" "slices"
"strings" "strings"
"time"
"code.superseriousbusiness.org/gotosocial/internal/db" "code.superseriousbusiness.org/gotosocial/internal/db"
newmodel "code.superseriousbusiness.org/gotosocial/internal/db/bundb/migrations/20250415111056_thread_all_statuses/new" newmodel "code.superseriousbusiness.org/gotosocial/internal/db/bundb/migrations/20250415111056_thread_all_statuses/new"
oldmodel "code.superseriousbusiness.org/gotosocial/internal/db/bundb/migrations/20250415111056_thread_all_statuses/old" oldmodel "code.superseriousbusiness.org/gotosocial/internal/db/bundb/migrations/20250415111056_thread_all_statuses/old"
"code.superseriousbusiness.org/gotosocial/internal/db/bundb/migrations/20250415111056_thread_all_statuses/util"
"code.superseriousbusiness.org/gotosocial/internal/gtserror" "code.superseriousbusiness.org/gotosocial/internal/gtserror"
"code.superseriousbusiness.org/gotosocial/internal/id" "code.superseriousbusiness.org/gotosocial/internal/id"
"code.superseriousbusiness.org/gotosocial/internal/log" "code.superseriousbusiness.org/gotosocial/internal/log"
"code.superseriousbusiness.org/gotosocial/internal/util/xslices"
"github.com/uptrace/bun" "github.com/uptrace/bun"
) )
@ -52,26 +49,10 @@ func init() {
"thread_id", "thread_id_new", 1) "thread_id", "thread_id_new", 1)
var sr statusRethreader var sr statusRethreader
var updatedTotal int64 var count int
var maxID string var maxID string
var statuses []*oldmodel.Status var statuses []*oldmodel.Status
// Create thread_id_new already
// so we can populate it as we go.
log.Info(ctx, "creating statuses column thread_id_new")
if _, err := db.NewAddColumn().
Table("statuses").
ColumnExpr(newColDef).
Exec(ctx); err != nil {
return gtserror.Newf("error adding statuses column thread_id_new: %w", err)
}
// Try to merge the wal so we're
// not working on the wal file.
if err := doWALCheckpoint(ctx, db); err != nil {
return err
}
// Get a total count of all statuses before migration. // Get a total count of all statuses before migration.
total, err := db.NewSelect().Table("statuses").Count(ctx) total, err := db.NewSelect().Table("statuses").Count(ctx)
if err != nil { if err != nil {
@ -82,129 +63,74 @@ func init() {
// possible ULID value. // possible ULID value.
maxID = id.Highest maxID = id.Highest
log.Warnf(ctx, "rethreading %d statuses, this will take a *long* time", total) log.Warn(ctx, "rethreading top-level statuses, this will take a *long* time")
for /* TOP LEVEL STATUS LOOP */ {
// Open initial transaction.
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
for i := 1; ; i++ {
// Reset slice. // Reset slice.
clear(statuses) clear(statuses)
statuses = statuses[:0] statuses = statuses[:0]
batchStart := time.Now()
// Select top-level statuses. // Select top-level statuses.
if err := tx.NewSelect(). if err := db.NewSelect().
Model(&statuses). Model(&statuses).
Column("id"). Column("id", "thread_id").
// We specifically use in_reply_to_account_id instead of in_reply_to_id as // We specifically use in_reply_to_account_id instead of in_reply_to_id as
// they should both be set / unset in unison, but we specifically have an // they should both be set / unset in unison, but we specifically have an
// index on in_reply_to_account_id with ID ordering, unlike in_reply_to_id. // index on in_reply_to_account_id with ID ordering, unlike in_reply_to_id.
Where("? IS NULL", bun.Ident("in_reply_to_account_id")). Where("? IS NULL", bun.Ident("in_reply_to_account_id")).
Where("? < ?", bun.Ident("id"), maxID). Where("? < ?", bun.Ident("id"), maxID).
OrderExpr("? DESC", bun.Ident("id")). OrderExpr("? DESC", bun.Ident("id")).
Limit(500). Limit(5000).
Scan(ctx); err != nil && !errors.Is(err, sql.ErrNoRows) { Scan(ctx); err != nil && !errors.Is(err, sql.ErrNoRows) {
return gtserror.Newf("error selecting top level statuses: %w", err) return gtserror.Newf("error selecting top level statuses: %w", err)
} }
l := len(statuses) // Reached end of block.
if l == 0 { if len(statuses) == 0 {
// No more statuses!
//
// Transaction will be closed
// after leaving the loop.
break break
} else if i%200 == 0 {
// Begin a new transaction every
// 200 batches (~100,000 statuses),
// to avoid massive commits.
// Close existing transaction.
if err := tx.Commit(); err != nil {
return err
}
// Try to flush the wal
// to avoid silly wal sizes.
if err := doWALCheckpoint(ctx, db); err != nil {
return err
}
// Open new transaction.
tx, err = db.BeginTx(ctx, nil)
if err != nil {
return err
}
} }
// Set next maxID value from statuses. // Set next maxID value from statuses.
maxID = statuses[len(statuses)-1].ID maxID = statuses[len(statuses)-1].ID
// Rethread using the // Rethread each selected batch of top-level statuses in a transaction.
// open transaction. if err := db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
var updatedInBatch int64
for _, status := range statuses { // Rethread each top-level status.
n, err := sr.rethreadStatus(ctx, tx, status, false) for _, status := range statuses {
if err != nil { n, err := sr.rethreadStatus(ctx, tx, status)
return gtserror.Newf("error rethreading status %s: %w", status.URI, err) if err != nil {
return gtserror.Newf("error rethreading status %s: %w", status.URI, err)
}
count += n
} }
updatedInBatch += n
updatedTotal += n return nil
}); err != nil {
return err
} }
// Show speed for this batch. log.Infof(ctx, "[approx %d of %d] rethreading statuses (top-level)", count, total)
timeTaken := time.Since(batchStart).Milliseconds()
msPerRow := float64(timeTaken) / float64(updatedInBatch)
rowsPerMs := float64(1) / float64(msPerRow)
rowsPerSecond := 1000 * rowsPerMs
// Show percent migrated overall.
totalDone := (float64(updatedTotal) / float64(total)) * 100
log.Infof(
ctx,
"[~%.2f%% done; ~%.0f rows/s] migrating threads",
totalDone, rowsPerSecond,
)
} }
// Close transaction. // Attempt to merge any sqlite write-ahead-log.
if err := tx.Commit(); err != nil { if err := doWALCheckpoint(ctx, db); err != nil {
return err return err
} }
// Create a partial index on thread_id_new to find stragglers. log.Warn(ctx, "rethreading straggler statuses, this will take a *long* time")
// This index will be removed at the end of the migration. for /* STRAGGLER STATUS LOOP */ {
log.Info(ctx, "creating temporary statuses thread_id_new index")
if _, err := db.NewCreateIndex().
Table("statuses").
Index("statuses_thread_id_new_idx").
Column("thread_id_new").
Where("? = ?", bun.Ident("thread_id_new"), id.Lowest).
Exec(ctx); err != nil {
return gtserror.Newf("error creating new thread_id index: %w", err)
}
for i := 1; ; i++ {
// Reset slice. // Reset slice.
clear(statuses) clear(statuses)
statuses = statuses[:0] statuses = statuses[:0]
batchStart := time.Now()
// Select straggler statuses. // Select straggler statuses.
if err := db.NewSelect(). if err := db.NewSelect().
Model(&statuses). Model(&statuses).
Column("id"). Column("id", "in_reply_to_id", "thread_id").
Where("? = ?", bun.Ident("thread_id_new"), id.Lowest). Where("? IS NULL", bun.Ident("thread_id")).
// We select in smaller batches for this part // We select in smaller batches for this part
// of the migration as there is a chance that // of the migration as there is a chance that
@ -212,7 +138,7 @@ func init() {
// part of the same thread, i.e. one call to // part of the same thread, i.e. one call to
// rethreadStatus() may effect other statuses // rethreadStatus() may effect other statuses
// later in the slice. // later in the slice.
Limit(250). Limit(1000).
Scan(ctx); err != nil && !errors.Is(err, sql.ErrNoRows) { Scan(ctx); err != nil && !errors.Is(err, sql.ErrNoRows) {
return gtserror.Newf("error selecting straggler statuses: %w", err) return gtserror.Newf("error selecting straggler statuses: %w", err)
} }
@ -223,35 +149,23 @@ func init() {
} }
// Rethread each selected batch of straggler statuses in a transaction. // Rethread each selected batch of straggler statuses in a transaction.
var updatedInBatch int64
if err := db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error { if err := db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
// Rethread each top-level status.
for _, status := range statuses { for _, status := range statuses {
n, err := sr.rethreadStatus(ctx, tx, status, true) n, err := sr.rethreadStatus(ctx, tx, status)
if err != nil { if err != nil {
return gtserror.Newf("error rethreading status %s: %w", status.URI, err) return gtserror.Newf("error rethreading status %s: %w", status.URI, err)
} }
updatedInBatch += n count += n
updatedTotal += n
} }
return nil return nil
}); err != nil { }); err != nil {
return err return err
} }
// Show speed for this batch. log.Infof(ctx, "[approx %d of %d] rethreading statuses (stragglers)", count, total)
timeTaken := time.Since(batchStart).Milliseconds()
msPerRow := float64(timeTaken) / float64(updatedInBatch)
rowsPerMs := float64(1) / float64(msPerRow)
rowsPerSecond := 1000 * rowsPerMs
// Show percent migrated overall.
totalDone := (float64(updatedTotal) / float64(total)) * 100
log.Infof(
ctx,
"[~%.2f%% done; ~%.0f rows/s] migrating stragglers",
totalDone, rowsPerSecond,
)
} }
// Attempt to merge any sqlite write-ahead-log. // Attempt to merge any sqlite write-ahead-log.
@ -259,13 +173,6 @@ func init() {
return err return err
} }
log.Info(ctx, "dropping temporary thread_id_new index")
if _, err := db.NewDropIndex().
Index("statuses_thread_id_new_idx").
Exec(ctx); err != nil {
return gtserror.Newf("error dropping temporary thread_id_new index: %w", err)
}
log.Info(ctx, "dropping old thread_to_statuses table") log.Info(ctx, "dropping old thread_to_statuses table")
if _, err := db.NewDropTable(). if _, err := db.NewDropTable().
Table("thread_to_statuses"). Table("thread_to_statuses").
@ -273,6 +180,33 @@ func init() {
return gtserror.Newf("error dropping old thread_to_statuses table: %w", err) return gtserror.Newf("error dropping old thread_to_statuses table: %w", err)
} }
log.Info(ctx, "creating new statuses thread_id column")
if _, err := db.NewAddColumn().
Table("statuses").
ColumnExpr(newColDef).
Exec(ctx); err != nil {
return gtserror.Newf("error adding new thread_id column: %w", err)
}
log.Info(ctx, "setting thread_id_new = thread_id (this may take a while...)")
if err := db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
return batchUpdateByID(ctx, tx,
"statuses", // table
"id", // batchByCol
"UPDATE ? SET ? = ?", // updateQuery
[]any{bun.Ident("statuses"),
bun.Ident("thread_id_new"),
bun.Ident("thread_id")},
)
}); err != nil {
return err
}
// Attempt to merge any sqlite write-ahead-log.
if err := doWALCheckpoint(ctx, db); err != nil {
return err
}
log.Info(ctx, "dropping old statuses thread_id index") log.Info(ctx, "dropping old statuses thread_id index")
if _, err := db.NewDropIndex(). if _, err := db.NewDropIndex().
Index("statuses_thread_id_idx"). Index("statuses_thread_id_idx").
@ -340,11 +274,6 @@ type statusRethreader struct {
// its contents are ephemeral. // its contents are ephemeral.
statuses []*oldmodel.Status statuses []*oldmodel.Status
// newThreadIDSet is used to track whether
// statuses in statusIDs have already have
// thread_id_new set on them.
newThreadIDSet map[string]struct{}
// seenIDs tracks the unique status and // seenIDs tracks the unique status and
// thread IDs we have seen, ensuring we // thread IDs we have seen, ensuring we
// don't append duplicates to statusIDs // don't append duplicates to statusIDs
@ -360,15 +289,14 @@ type statusRethreader struct {
} }
// rethreadStatus is the main logic handler for statusRethreader{}. this is what gets called from the migration // rethreadStatus is the main logic handler for statusRethreader{}. this is what gets called from the migration
// in order to trigger a status rethreading operation for the given status, returning total number of rows changed. // in order to trigger a status rethreading operation for the given status, returning total number rethreaded.
func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, status *oldmodel.Status, straggler bool) (int64, error) { func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, status *oldmodel.Status) (int, error) {
// Zero slice and // Zero slice and
// map ptr values. // map ptr values.
clear(sr.statusIDs) clear(sr.statusIDs)
clear(sr.threadIDs) clear(sr.threadIDs)
clear(sr.statuses) clear(sr.statuses)
clear(sr.newThreadIDSet)
clear(sr.seenIDs) clear(sr.seenIDs)
// Reset slices and values for use. // Reset slices and values for use.
@ -377,11 +305,6 @@ func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, statu
sr.statuses = sr.statuses[:0] sr.statuses = sr.statuses[:0]
sr.allThreaded = true sr.allThreaded = true
if sr.newThreadIDSet == nil {
// Allocate new hash set for newThreadIDSet.
sr.newThreadIDSet = make(map[string]struct{})
}
if sr.seenIDs == nil { if sr.seenIDs == nil {
// Allocate new hash set for status IDs. // Allocate new hash set for status IDs.
sr.seenIDs = make(map[string]struct{}) sr.seenIDs = make(map[string]struct{})
@ -394,22 +317,12 @@ func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, statu
// to the rethreadStatus() call. // to the rethreadStatus() call.
if err := tx.NewSelect(). if err := tx.NewSelect().
Model(status). Model(status).
Column("in_reply_to_id", "thread_id", "thread_id_new"). Column("in_reply_to_id", "thread_id").
Where("? = ?", bun.Ident("id"), status.ID). Where("? = ?", bun.Ident("id"), status.ID).
Scan(ctx); err != nil { Scan(ctx); err != nil {
return 0, gtserror.Newf("error selecting status: %w", err) return 0, gtserror.Newf("error selecting status: %w", err)
} }
// If we've just threaded this status by setting
// thread_id_new, then by definition anything we
// could find from the entire thread must now be
// threaded, so we can save some database calls
// by skipping iterating up + down from here.
if status.ThreadIDNew != id.Lowest {
log.Debugf(ctx, "skipping just rethreaded status: %s", status.ID)
return 0, nil
}
// status and thread ID cursor // status and thread ID cursor
// index values. these are used // index values. these are used
// to keep track of newly loaded // to keep track of newly loaded
@ -458,14 +371,14 @@ func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, statu
threadIdx = len(sr.threadIDs) threadIdx = len(sr.threadIDs)
} }
// Total number of
// statuses threaded.
total := len(sr.statusIDs)
// Check for the case where the entire // Check for the case where the entire
// batch of statuses is already correctly // batch of statuses is already correctly
// threaded. Then we have nothing to do! // threaded. Then we have nothing to do!
// if sr.allThreaded && len(sr.threadIDs) == 1 {
// Skip this check for straggler statuses
// that are part of broken threads.
if !straggler && sr.allThreaded && len(sr.threadIDs) == 1 {
log.Debug(ctx, "skipping just rethreaded thread")
return 0, nil return 0, nil
} }
@ -504,120 +417,36 @@ func (sr *statusRethreader) rethreadStatus(ctx context.Context, tx bun.Tx, statu
} }
} }
var ( // Update all the statuses to
res sql.Result // use determined thread_id.
err error if _, err := tx.NewUpdate().
) Table("statuses").
Where("? IN (?)", bun.Ident("id"), bun.In(sr.statusIDs)).
if len(sr.statusIDs) == 1 { Set("? = ?", bun.Ident("thread_id"), threadID).
Exec(ctx); err != nil {
// If we're only updating one status
// we can use a simple update query.
res, err = tx.NewUpdate().
// Update the status model.
TableExpr("? AS ?", bun.Ident("statuses"), bun.Ident("status")).
// Set the new thread ID, which we can use as
// an indication that we've migrated this batch.
Set("? = ?", bun.Ident("thread_id_new"), threadID).
// While we're here, also set old thread_id, as
// we'll use it for further rethreading purposes.
Set("? = ?", bun.Ident("thread_id"), threadID).
Where("? = ?", bun.Ident("status.id"), sr.statusIDs[0]).
Exec(ctx)
} else {
// If we're updating multiple statuses at once,
// build up a common table expression to update
// all statuses in this thread to use threadID.
//
// This ought to be a little more readable than
// using an "IN(*)" query, and PG or SQLite *may*
// be able to optimize it better.
//
// See:
//
// - https://sqlite.org/lang_with.html
// - https://www.postgresql.org/docs/current/queries-with.html
// - https://bun.uptrace.dev/guide/query-update.html#bulk-update
values := make([]*util.Status, 0, len(sr.statusIDs))
for _, statusID := range sr.statusIDs {
// Filter out statusIDs that have already had
// thread_id_new set, to avoid spurious writes.
if _, set := sr.newThreadIDSet[statusID]; !set {
values = append(values, &util.Status{
ID: statusID,
})
}
}
// Resulting query will look something like this:
//
// WITH "_data" ("id") AS (
// VALUES
// ('01JR6PZED0DDR2VZHQ8H87ZW98'),
// ('01JR6PZED0J91MJCAFDTCCCG8Q')
// )
// UPDATE "statuses" AS "status"
// SET
// "thread_id_new" = '01K6MGKX54BBJ3Y1FBPQY45E5P',
// "thread_id" = '01K6MGKX54BBJ3Y1FBPQY45E5P'
// FROM _data
// WHERE "status"."id" = "_data"."id"
res, err = tx.NewUpdate().
// Update the status model.
Model((*oldmodel.Status)(nil)).
// Provide the CTE values as "_data".
With("_data", tx.NewValues(&values)).
// Include `FROM _data` statement so we can use
// `_data` table in SET and WHERE components.
TableExpr("_data").
// Set the new thread ID, which we can use as
// an indication that we've migrated this batch.
Set("? = ?", bun.Ident("thread_id_new"), threadID).
// While we're here, also set old thread_id, as
// we'll use it for further rethreading purposes.
Set("? = ?", bun.Ident("thread_id"), threadID).
// "Join" to the CTE on status ID.
Where("? = ?", bun.Ident("status.id"), bun.Ident("_data.id")).
Exec(ctx)
}
if err != nil {
return 0, gtserror.Newf("error updating status thread ids: %w", err) return 0, gtserror.Newf("error updating status thread ids: %w", err)
} }
rowsAffected, err := res.RowsAffected()
if err != nil {
return 0, gtserror.Newf("error counting rows affected: %w", err)
}
if len(sr.threadIDs) > 0 { if len(sr.threadIDs) > 0 {
// Update any existing thread // Update any existing thread
// mutes to use latest thread_id. // mutes to use latest thread_id.
// Dedupe thread IDs before query
// to avoid ludicrous "IN" clause.
threadIDs := sr.threadIDs
threadIDs = xslices.Deduplicate(threadIDs)
if _, err := tx.NewUpdate(). if _, err := tx.NewUpdate().
Table("thread_mutes"). Table("thread_mutes").
Where("? IN (?)", bun.Ident("thread_id"), bun.In(threadIDs)). Where("? IN (?)", bun.Ident("thread_id"), bun.In(sr.threadIDs)).
Set("? = ?", bun.Ident("thread_id"), threadID). Set("? = ?", bun.Ident("thread_id"), threadID).
Exec(ctx); err != nil { Exec(ctx); err != nil {
return 0, gtserror.Newf("error updating mute thread ids: %w", err) return 0, gtserror.Newf("error updating mute thread ids: %w", err)
} }
} }
return rowsAffected, nil return total, nil
} }
// append will append the given status to the internal tracking of statusRethreader{} for // append will append the given status to the internal tracking of statusRethreader{} for
// potential future operations, checking for uniqueness. it tracks the inReplyToID value // potential future operations, checking for uniqueness. it tracks the inReplyToID value
// for the next call to getParents(), it tracks the status ID for list of statuses that // for the next call to getParents(), it tracks the status ID for list of statuses that
// may need updating, whether a new thread ID has been set for each status, the thread ID // need updating, the thread ID for the list of thread links and mutes that need updating,
// for the list of thread links and mutes that need updating, and whether all the statuses // and whether all the statuses all have a provided thread ID (i.e. allThreaded).
// all have a provided thread ID (i.e. allThreaded).
func (sr *statusRethreader) append(status *oldmodel.Status) { func (sr *statusRethreader) append(status *oldmodel.Status) {
// Check if status already seen before. // Check if status already seen before.
@ -650,14 +479,7 @@ func (sr *statusRethreader) append(status *oldmodel.Status) {
} }
// Add status ID to map of seen IDs. // Add status ID to map of seen IDs.
mark := struct{}{} sr.seenIDs[status.ID] = struct{}{}
sr.seenIDs[status.ID] = mark
// If new thread ID has already been
// set, add status ID to map of set IDs.
if status.ThreadIDNew != id.Lowest {
sr.newThreadIDSet[status.ID] = mark
}
} }
func (sr *statusRethreader) getParents(ctx context.Context, tx bun.Tx) error { func (sr *statusRethreader) getParents(ctx context.Context, tx bun.Tx) error {
@ -674,7 +496,7 @@ func (sr *statusRethreader) getParents(ctx context.Context, tx bun.Tx) error {
// Select next parent status. // Select next parent status.
if err := tx.NewSelect(). if err := tx.NewSelect().
Model(&parent). Model(&parent).
Column("id", "in_reply_to_id", "thread_id", "thread_id_new"). Column("id", "in_reply_to_id", "thread_id").
Where("? = ?", bun.Ident("id"), id). Where("? = ?", bun.Ident("id"), id).
Scan(ctx); err != nil && err != db.ErrNoEntries { Scan(ctx); err != nil && err != db.ErrNoEntries {
return err return err
@ -713,7 +535,7 @@ func (sr *statusRethreader) getChildren(ctx context.Context, tx bun.Tx, idx int)
// Select children of ID. // Select children of ID.
if err := tx.NewSelect(). if err := tx.NewSelect().
Model(&sr.statuses). Model(&sr.statuses).
Column("id", "thread_id", "thread_id_new"). Column("id", "thread_id").
Where("? = ?", bun.Ident("in_reply_to_id"), id). Where("? = ?", bun.Ident("in_reply_to_id"), id).
Scan(ctx); err != nil && err != db.ErrNoEntries { Scan(ctx); err != nil && err != db.ErrNoEntries {
return err return err
@ -738,19 +560,14 @@ func (sr *statusRethreader) getStragglers(ctx context.Context, tx bun.Tx, idx in
clear(sr.statuses) clear(sr.statuses)
sr.statuses = sr.statuses[:0] sr.statuses = sr.statuses[:0]
// Dedupe thread IDs before query
// to avoid ludicrous "IN" clause.
threadIDs := sr.threadIDs[idx:]
threadIDs = xslices.Deduplicate(threadIDs)
// Select stragglers that // Select stragglers that
// also have thread IDs. // also have thread IDs.
if err := tx.NewSelect(). if err := tx.NewSelect().
Model(&sr.statuses). Model(&sr.statuses).
Column("id", "thread_id", "in_reply_to_id", "thread_id_new"). Column("id", "thread_id", "in_reply_to_id").
Where("? IN (?) AND ? NOT IN (?)", Where("? IN (?) AND ? NOT IN (?)",
bun.Ident("thread_id"), bun.Ident("thread_id"),
bun.In(threadIDs), bun.In(sr.threadIDs[idx:]),
bun.Ident("id"), bun.Ident("id"),
bun.In(sr.statusIDs), bun.In(sr.statusIDs),
). ).

View file

@ -23,45 +23,45 @@ import (
// Status represents a user-created 'post' or 'status' in the database, either remote or local // Status represents a user-created 'post' or 'status' in the database, either remote or local
type Status struct { type Status struct {
ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database
CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created
EditedAt time.Time `bun:"type:timestamptz,nullzero"` // when this status was last edited (if set) EditedAt time.Time `bun:"type:timestamptz,nullzero"` // when this status was last edited (if set)
FetchedAt time.Time `bun:"type:timestamptz,nullzero"` // when was item (remote) last fetched. FetchedAt time.Time `bun:"type:timestamptz,nullzero"` // when was item (remote) last fetched.
PinnedAt time.Time `bun:"type:timestamptz,nullzero"` // Status was pinned by owning account at this time. PinnedAt time.Time `bun:"type:timestamptz,nullzero"` // Status was pinned by owning account at this time.
URI string `bun:",unique,nullzero,notnull"` // activitypub URI of this status URI string `bun:",unique,nullzero,notnull"` // activitypub URI of this status
URL string `bun:",nullzero"` // web url for viewing this status URL string `bun:",nullzero"` // web url for viewing this status
Content string `bun:""` // Content HTML for this status. Content string `bun:""` // Content HTML for this status.
AttachmentIDs []string `bun:"attachments,array"` // Database IDs of any media attachments associated with this status AttachmentIDs []string `bun:"attachments,array"` // Database IDs of any media attachments associated with this status
TagIDs []string `bun:"tags,array"` // Database IDs of any tags used in this status TagIDs []string `bun:"tags,array"` // Database IDs of any tags used in this status
MentionIDs []string `bun:"mentions,array"` // Database IDs of any mentions in this status MentionIDs []string `bun:"mentions,array"` // Database IDs of any mentions in this status
EmojiIDs []string `bun:"emojis,array"` // Database IDs of any emojis used in this status EmojiIDs []string `bun:"emojis,array"` // Database IDs of any emojis used in this status
Local *bool `bun:",nullzero,notnull,default:false"` // is this status from a local account? Local *bool `bun:",nullzero,notnull,default:false"` // is this status from a local account?
AccountID string `bun:"type:CHAR(26),nullzero,notnull"` // which account posted this status? AccountID string `bun:"type:CHAR(26),nullzero,notnull"` // which account posted this status?
AccountURI string `bun:",nullzero,notnull"` // activitypub uri of the owner of this status AccountURI string `bun:",nullzero,notnull"` // activitypub uri of the owner of this status
InReplyToID string `bun:"type:CHAR(26),nullzero"` // id of the status this status replies to InReplyToID string `bun:"type:CHAR(26),nullzero"` // id of the status this status replies to
InReplyToURI string `bun:",nullzero"` // activitypub uri of the status this status is a reply to InReplyToURI string `bun:",nullzero"` // activitypub uri of the status this status is a reply to
InReplyToAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that this status replies to InReplyToAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that this status replies to
InReplyTo *Status `bun:"-"` // status corresponding to inReplyToID InReplyTo *Status `bun:"-"` // status corresponding to inReplyToID
BoostOfID string `bun:"type:CHAR(26),nullzero"` // id of the status this status is a boost of BoostOfID string `bun:"type:CHAR(26),nullzero"` // id of the status this status is a boost of
BoostOfURI string `bun:"-"` // URI of the status this status is a boost of; field not inserted in the db, just for dereferencing purposes. BoostOfURI string `bun:"-"` // URI of the status this status is a boost of; field not inserted in the db, just for dereferencing purposes.
BoostOfAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that owns the boosted status BoostOfAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that owns the boosted status
BoostOf *Status `bun:"-"` // status that corresponds to boostOfID BoostOf *Status `bun:"-"` // status that corresponds to boostOfID
ThreadID string `bun:"type:CHAR(26),nullzero,notnull,default:'00000000000000000000000000'"` // id of the thread to which this status belongs ThreadID string `bun:"type:CHAR(26),nullzero,notnull,default:00000000000000000000000000"` // id of the thread to which this status belongs
EditIDs []string `bun:"edits,array"` // EditIDs []string `bun:"edits,array"` //
PollID string `bun:"type:CHAR(26),nullzero"` // PollID string `bun:"type:CHAR(26),nullzero"` //
ContentWarning string `bun:",nullzero"` // Content warning HTML for this status. ContentWarning string `bun:",nullzero"` // Content warning HTML for this status.
ContentWarningText string `bun:""` // Original text of the content warning without formatting ContentWarningText string `bun:""` // Original text of the content warning without formatting
Visibility Visibility `bun:",nullzero,notnull"` // visibility entry for this status Visibility Visibility `bun:",nullzero,notnull"` // visibility entry for this status
Sensitive *bool `bun:",nullzero,notnull,default:false"` // mark the status as sensitive? Sensitive *bool `bun:",nullzero,notnull,default:false"` // mark the status as sensitive?
Language string `bun:",nullzero"` // what language is this status written in? Language string `bun:",nullzero"` // what language is this status written in?
CreatedWithApplicationID string `bun:"type:CHAR(26),nullzero"` // Which application was used to create this status? CreatedWithApplicationID string `bun:"type:CHAR(26),nullzero"` // Which application was used to create this status?
ActivityStreamsType string `bun:",nullzero,notnull"` // What is the activitystreams type of this status? See: https://www.w3.org/TR/activitystreams-vocabulary/#object-types. Will probably almost always be Note but who knows!. ActivityStreamsType string `bun:",nullzero,notnull"` // What is the activitystreams type of this status? See: https://www.w3.org/TR/activitystreams-vocabulary/#object-types. Will probably almost always be Note but who knows!.
Text string `bun:""` // Original text of the status without formatting Text string `bun:""` // Original text of the status without formatting
ContentType StatusContentType `bun:",nullzero"` // Content type used to process the original text of the status ContentType StatusContentType `bun:",nullzero"` // Content type used to process the original text of the status
Federated *bool `bun:",notnull"` // This status will be federated beyond the local timeline(s) Federated *bool `bun:",notnull"` // This status will be federated beyond the local timeline(s)
PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed. PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed.
PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB. PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB.
ApprovedByURI string `bun:",nullzero"` // URI of an Accept Activity that approves the Announce or Create Activity that this status was/will be attached to. ApprovedByURI string `bun:",nullzero"` // URI of an Accept Activity that approves the Announce or Create Activity that this status was/will be attached to.
} }
// enumType is the type we (at least, should) use // enumType is the type we (at least, should) use

View file

@ -21,10 +21,7 @@ import (
"time" "time"
) )
// Status represents a user-created 'post' or 'status' in the database, either remote or local. // Status represents a user-created 'post' or 'status' in the database, either remote or local
//
// Note: this model differs from an exact representation of the old model at the time of migration,
// as it includes the intermediate field "ThreadIDNew", which is only used during the migration.
type Status struct { type Status struct {
ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database
CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created
@ -63,9 +60,6 @@ type Status struct {
PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed. PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed.
PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB. PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB.
ApprovedByURI string `bun:",nullzero"` // URI of an Accept Activity that approves the Announce or Create Activity that this status was/will be attached to. ApprovedByURI string `bun:",nullzero"` // URI of an Accept Activity that approves the Announce or Create Activity that this status was/will be attached to.
// This field is *only* used during the migration, it was not on the original status model.
ThreadIDNew string `bun:"type:CHAR(26),nullzero,notnull,default:'00000000000000000000000000'"`
} }
// enumType is the type we (at least, should) use // enumType is the type we (at least, should) use

View file

@ -1,24 +0,0 @@
// GoToSocial
// Copyright (C) GoToSocial Authors admin@gotosocial.org
// SPDX-License-Identifier: AGPL-3.0-or-later
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package util
// Status is a helper type specifically
// for updating the thread ID of a status.
type Status struct {
ID string `bun:"type:CHAR(26)"`
}

View file

@ -66,6 +66,98 @@ func doWALCheckpoint(ctx context.Context, db *bun.DB) error {
return nil return nil
} }
// batchUpdateByID performs the given updateQuery with updateArgs
// over the entire given table, batching by the ID of batchByCol.
func batchUpdateByID(
ctx context.Context,
tx bun.Tx,
table string,
batchByCol string,
updateQuery string,
updateArgs []any,
) error {
// Get a count of all in table.
total, err := tx.NewSelect().
Table(table).
Count(ctx)
if err != nil {
return gtserror.Newf("error selecting total count: %w", err)
}
// Query batch size
// in number of rows.
const batchsz = 5000
// Stores highest batch value
// used in iterate queries,
// starting at highest possible.
highest := id.Highest
// Total updated rows.
var updated int
for {
// Limit to batchsz
// items at once.
batchQ := tx.
NewSelect().
Table(table).
Column(batchByCol).
Where("? < ?", bun.Ident(batchByCol), highest).
OrderExpr("? DESC", bun.Ident(batchByCol)).
Limit(batchsz)
// Finalize UPDATE to act only on batch.
qStr := updateQuery + " WHERE ? IN (?)"
args := append(slices.Clone(updateArgs),
bun.Ident(batchByCol),
batchQ,
)
// Execute the prepared raw query with arguments.
res, err := tx.NewRaw(qStr, args...).Exec(ctx)
if err != nil {
return gtserror.Newf("error updating old column values: %w", err)
}
// Check how many items we updated.
thisUpdated, err := res.RowsAffected()
if err != nil {
return gtserror.Newf("error counting affected rows: %w", err)
}
if thisUpdated == 0 {
// Nothing updated
// means we're done.
break
}
// Update the overall count.
updated += int(thisUpdated)
// Log helpful message to admin.
log.Infof(ctx, "migrated %d of %d %s (up to %s)",
updated, total, table, highest)
// Get next highest
// id for next batch.
if err := tx.
NewSelect().
With("batch_query", batchQ).
ColumnExpr("min(?) FROM ?", bun.Ident(batchByCol), bun.Ident("batch_query")).
Scan(ctx, &highest); err != nil {
return gtserror.Newf("error selecting next highest: %w", err)
}
}
if total != int(updated) {
// Return error here in order to rollback the whole transaction.
return fmt.Errorf("total=%d does not match updated=%d", total, updated)
}
return nil
}
// convertEnums performs a transaction that converts // convertEnums performs a transaction that converts
// a table's column of our old-style enums (strings) to // a table's column of our old-style enums (strings) to
// more performant and space-saving integer types. // more performant and space-saving integer types.

View file

@ -277,6 +277,18 @@ func (d *Dereferencer) enrichStatusSafely(
) (*gtsmodel.Status, ap.Statusable, bool, error) { ) (*gtsmodel.Status, ap.Statusable, bool, error) {
uriStr := status.URI uriStr := status.URI
var isNew bool
// Check if this is a new status (to us).
if isNew = (status.ID == ""); !isNew {
// This is an existing status, first try to populate it. This
// is required by the checks below for existing tags, media etc.
if err := d.state.DB.PopulateStatus(ctx, status); err != nil {
log.Errorf(ctx, "error populating existing status %s: %v", uriStr, err)
}
}
// Acquire per-URI deref lock, wraping unlock // Acquire per-URI deref lock, wraping unlock
// to safely defer in case of panic, while still // to safely defer in case of panic, while still
// performing more granular unlocks when needed. // performing more granular unlocks when needed.
@ -284,23 +296,6 @@ func (d *Dereferencer) enrichStatusSafely(
unlock = util.DoOnce(unlock) unlock = util.DoOnce(unlock)
defer unlock() defer unlock()
var err error
var isNew bool
// Check if this is a new status (to us).
if isNew = (status.ID == ""); !isNew {
// We reload the existing status, just to ensure we have the
// latest version of it. e.g. another racing thread might have
// just input a change but we still have an old status copy.
//
// Note: returned status will be fully populated, required below.
status, err = d.state.DB.GetStatusByID(ctx, status.ID)
if err != nil {
return nil, nil, false, gtserror.Newf("error getting up-to-date existing status: %w", err)
}
}
// Perform status enrichment with passed vars. // Perform status enrichment with passed vars.
latest, statusable, err := d.enrichStatus(ctx, latest, statusable, err := d.enrichStatus(ctx,
requestUser, requestUser,
@ -484,10 +479,12 @@ func (d *Dereferencer) enrichStatus(
// Ensure the final parsed status URI or URL matches // Ensure the final parsed status URI or URL matches
// the input URI we fetched (or received) it as. // the input URI we fetched (or received) it as.
matches, err := util.URIMatches(uri, append( matches, err := util.URIMatches(uri,
ap.GetURL(statusable), // status URL(s) append(
ap.GetJSONLDId(statusable), // status URI ap.GetURL(statusable), // status URL(s)
)...) ap.GetJSONLDId(statusable), // status URI
)...,
)
if err != nil { if err != nil {
return nil, nil, gtserror.Newf( return nil, nil, gtserror.Newf(
"error checking dereferenced status uri %s: %w", "error checking dereferenced status uri %s: %w",

View file

@ -18,6 +18,7 @@
package dereferencing_test package dereferencing_test
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
"time" "time"
@ -236,7 +237,9 @@ func (suite *StatusTestSuite) TestDereferenceStatusWithNonMatchingURI() {
} }
func (suite *StatusTestSuite) TestDereferencerRefreshStatusUpdated() { func (suite *StatusTestSuite) TestDereferencerRefreshStatusUpdated() {
ctx := suite.T().Context() // Create a new context for this test.
ctx, cncl := context.WithCancel(suite.T().Context())
defer cncl()
// The local account we will be fetching statuses as. // The local account we will be fetching statuses as.
fetchingAccount := suite.testAccounts["local_account_1"] fetchingAccount := suite.testAccounts["local_account_1"]
@ -340,104 +343,6 @@ func (suite *StatusTestSuite) TestDereferencerRefreshStatusUpdated() {
} }
} }
func (suite *StatusTestSuite) TestDereferencerRefreshStatusRace() {
ctx := suite.T().Context()
// The local account we will be fetching statuses as.
fetchingAccount := suite.testAccounts["local_account_1"]
// The test status in question that we will be dereferencing from "remote".
testURIStr := "https://unknown-instance.com/users/brand_new_person/statuses/01FE4NTHKWW7THT67EF10EB839"
testURI := testrig.URLMustParse(testURIStr)
testStatusable := suite.client.TestRemoteStatuses[testURIStr]
// Fetch the remote status first to load it into instance.
testStatus, statusable, err := suite.dereferencer.GetStatusByURI(ctx,
fetchingAccount.Username,
testURI,
)
suite.NotNil(statusable)
suite.NoError(err)
// Take a snapshot of current
// state of the test status.
beforeEdit := copyStatus(testStatus)
// Edit the "remote" statusable obj.
suite.editStatusable(testStatusable,
"updated status content!",
"CW: edited status content",
beforeEdit.Language, // no change
*beforeEdit.Sensitive, // no change
beforeEdit.AttachmentIDs, // no change
getPollOptions(beforeEdit), // no change
getPollVotes(beforeEdit), // no change
time.Now(),
)
// Refresh with a given statusable to updated to edited copy.
afterEdit, statusable, err := suite.dereferencer.RefreshStatus(ctx,
fetchingAccount.Username,
testStatus,
testStatusable,
instantFreshness,
)
suite.NotNil(statusable)
suite.NoError(err)
// verify updated status details.
suite.verifyEditedStatusUpdate(
// the original status
// before any changes.
beforeEdit,
// latest status
// being tested.
afterEdit,
// expected current state.
&gtsmodel.StatusEdit{
Content: "updated status content!",
ContentWarning: "CW: edited status content",
Language: beforeEdit.Language,
Sensitive: beforeEdit.Sensitive,
AttachmentIDs: beforeEdit.AttachmentIDs,
PollOptions: getPollOptions(beforeEdit),
PollVotes: getPollVotes(beforeEdit),
// createdAt never changes
},
// expected historic edit.
&gtsmodel.StatusEdit{
Content: beforeEdit.Content,
ContentWarning: beforeEdit.ContentWarning,
Language: beforeEdit.Language,
Sensitive: beforeEdit.Sensitive,
AttachmentIDs: beforeEdit.AttachmentIDs,
PollOptions: getPollOptions(beforeEdit),
PollVotes: getPollVotes(beforeEdit),
CreatedAt: beforeEdit.UpdatedAt(),
},
)
// Now make another attempt to refresh, using the old copy of the
// status. This should still successfully update based on our passed
// freshness window, but it *should* refetch the provided status to
// check for race shenanigans and realize that no edit has occurred.
afterBodge, statusable, err := suite.dereferencer.RefreshStatus(ctx,
fetchingAccount.Username,
beforeEdit,
testStatusable,
instantFreshness,
)
suite.NotNil(statusable)
suite.NoError(err)
// Check that no further edit occurred on status.
suite.Equal(afterEdit.EditIDs, afterBodge.EditIDs)
}
// editStatusable updates the given statusable attributes. // editStatusable updates the given statusable attributes.
// note that this acts on the original object, no copying. // note that this acts on the original object, no copying.
func (suite *StatusTestSuite) editStatusable( func (suite *StatusTestSuite) editStatusable(

View file

@ -27,56 +27,56 @@ import (
// Status represents a user-created 'post' or 'status' in the database, either remote or local // Status represents a user-created 'post' or 'status' in the database, either remote or local
type Status struct { type Status struct {
ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database ID string `bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database
CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created CreatedAt time.Time `bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created
EditedAt time.Time `bun:"type:timestamptz,nullzero"` // when this status was last edited (if set) EditedAt time.Time `bun:"type:timestamptz,nullzero"` // when this status was last edited (if set)
FetchedAt time.Time `bun:"type:timestamptz,nullzero"` // when was item (remote) last fetched. FetchedAt time.Time `bun:"type:timestamptz,nullzero"` // when was item (remote) last fetched.
PinnedAt time.Time `bun:"type:timestamptz,nullzero"` // Status was pinned by owning account at this time. PinnedAt time.Time `bun:"type:timestamptz,nullzero"` // Status was pinned by owning account at this time.
URI string `bun:",unique,nullzero,notnull"` // activitypub URI of this status URI string `bun:",unique,nullzero,notnull"` // activitypub URI of this status
URL string `bun:",nullzero"` // web url for viewing this status URL string `bun:",nullzero"` // web url for viewing this status
Content string `bun:""` // Content HTML for this status. Content string `bun:""` // Content HTML for this status.
AttachmentIDs []string `bun:"attachments,array"` // Database IDs of any media attachments associated with this status AttachmentIDs []string `bun:"attachments,array"` // Database IDs of any media attachments associated with this status
Attachments []*MediaAttachment `bun:"attached_media,rel:has-many"` // Attachments corresponding to attachmentIDs Attachments []*MediaAttachment `bun:"attached_media,rel:has-many"` // Attachments corresponding to attachmentIDs
TagIDs []string `bun:"tags,array"` // Database IDs of any tags used in this status TagIDs []string `bun:"tags,array"` // Database IDs of any tags used in this status
Tags []*Tag `bun:"attached_tags,m2m:status_to_tags"` // Tags corresponding to tagIDs. https://bun.uptrace.dev/guide/relations.html#many-to-many-relation Tags []*Tag `bun:"attached_tags,m2m:status_to_tags"` // Tags corresponding to tagIDs. https://bun.uptrace.dev/guide/relations.html#many-to-many-relation
MentionIDs []string `bun:"mentions,array"` // Database IDs of any mentions in this status MentionIDs []string `bun:"mentions,array"` // Database IDs of any mentions in this status
Mentions []*Mention `bun:"attached_mentions,rel:has-many"` // Mentions corresponding to mentionIDs Mentions []*Mention `bun:"attached_mentions,rel:has-many"` // Mentions corresponding to mentionIDs
EmojiIDs []string `bun:"emojis,array"` // Database IDs of any emojis used in this status EmojiIDs []string `bun:"emojis,array"` // Database IDs of any emojis used in this status
Emojis []*Emoji `bun:"attached_emojis,m2m:status_to_emojis"` // Emojis corresponding to emojiIDs. https://bun.uptrace.dev/guide/relations.html#many-to-many-relation Emojis []*Emoji `bun:"attached_emojis,m2m:status_to_emojis"` // Emojis corresponding to emojiIDs. https://bun.uptrace.dev/guide/relations.html#many-to-many-relation
Local *bool `bun:",nullzero,notnull,default:false"` // is this status from a local account? Local *bool `bun:",nullzero,notnull,default:false"` // is this status from a local account?
AccountID string `bun:"type:CHAR(26),nullzero,notnull"` // which account posted this status? AccountID string `bun:"type:CHAR(26),nullzero,notnull"` // which account posted this status?
Account *Account `bun:"rel:belongs-to"` // account corresponding to accountID Account *Account `bun:"rel:belongs-to"` // account corresponding to accountID
AccountURI string `bun:",nullzero,notnull"` // activitypub uri of the owner of this status AccountURI string `bun:",nullzero,notnull"` // activitypub uri of the owner of this status
InReplyToID string `bun:"type:CHAR(26),nullzero"` // id of the status this status replies to InReplyToID string `bun:"type:CHAR(26),nullzero"` // id of the status this status replies to
InReplyToURI string `bun:",nullzero"` // activitypub uri of the status this status is a reply to InReplyToURI string `bun:",nullzero"` // activitypub uri of the status this status is a reply to
InReplyToAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that this status replies to InReplyToAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that this status replies to
InReplyTo *Status `bun:"-"` // status corresponding to inReplyToID InReplyTo *Status `bun:"-"` // status corresponding to inReplyToID
InReplyToAccount *Account `bun:"rel:belongs-to"` // account corresponding to inReplyToAccountID InReplyToAccount *Account `bun:"rel:belongs-to"` // account corresponding to inReplyToAccountID
BoostOfID string `bun:"type:CHAR(26),nullzero"` // id of the status this status is a boost of BoostOfID string `bun:"type:CHAR(26),nullzero"` // id of the status this status is a boost of
BoostOfURI string `bun:"-"` // URI of the status this status is a boost of; field not inserted in the db, just for dereferencing purposes. BoostOfURI string `bun:"-"` // URI of the status this status is a boost of; field not inserted in the db, just for dereferencing purposes.
BoostOfAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that owns the boosted status BoostOfAccountID string `bun:"type:CHAR(26),nullzero"` // id of the account that owns the boosted status
BoostOf *Status `bun:"-"` // status that corresponds to boostOfID BoostOf *Status `bun:"-"` // status that corresponds to boostOfID
BoostOfAccount *Account `bun:"rel:belongs-to"` // account that corresponds to boostOfAccountID BoostOfAccount *Account `bun:"rel:belongs-to"` // account that corresponds to boostOfAccountID
ThreadID string `bun:"type:CHAR(26),nullzero,notnull,default:'00000000000000000000000000'"` // id of the thread to which this status belongs ThreadID string `bun:"type:CHAR(26),nullzero,notnull,default:00000000000000000000000000"` // id of the thread to which this status belongs
EditIDs []string `bun:"edits,array"` // IDs of status edits for this status, ordered from smallest (oldest) -> largest (newest) ID. EditIDs []string `bun:"edits,array"` // IDs of status edits for this status, ordered from smallest (oldest) -> largest (newest) ID.
Edits []*StatusEdit `bun:"-"` // Edits of this status, ordered from oldest -> newest edit. Edits []*StatusEdit `bun:"-"` // Edits of this status, ordered from oldest -> newest edit.
PollID string `bun:"type:CHAR(26),nullzero"` // PollID string `bun:"type:CHAR(26),nullzero"` //
Poll *Poll `bun:"-"` // Poll *Poll `bun:"-"` //
ContentWarning string `bun:",nullzero"` // Content warning HTML for this status. ContentWarning string `bun:",nullzero"` // Content warning HTML for this status.
ContentWarningText string `bun:""` // Original text of the content warning without formatting ContentWarningText string `bun:""` // Original text of the content warning without formatting
Visibility Visibility `bun:",nullzero,notnull"` // visibility entry for this status Visibility Visibility `bun:",nullzero,notnull"` // visibility entry for this status
Sensitive *bool `bun:",nullzero,notnull,default:false"` // mark the status as sensitive? Sensitive *bool `bun:",nullzero,notnull,default:false"` // mark the status as sensitive?
Language string `bun:",nullzero"` // what language is this status written in? Language string `bun:",nullzero"` // what language is this status written in?
CreatedWithApplicationID string `bun:"type:CHAR(26),nullzero"` // Which application was used to create this status? CreatedWithApplicationID string `bun:"type:CHAR(26),nullzero"` // Which application was used to create this status?
CreatedWithApplication *Application `bun:"rel:belongs-to"` // application corresponding to createdWithApplicationID CreatedWithApplication *Application `bun:"rel:belongs-to"` // application corresponding to createdWithApplicationID
ActivityStreamsType string `bun:",nullzero,notnull"` // What is the activitystreams type of this status? See: https://www.w3.org/TR/activitystreams-vocabulary/#object-types. Will probably almost always be Note but who knows!. ActivityStreamsType string `bun:",nullzero,notnull"` // What is the activitystreams type of this status? See: https://www.w3.org/TR/activitystreams-vocabulary/#object-types. Will probably almost always be Note but who knows!.
Text string `bun:""` // Original text of the status without formatting Text string `bun:""` // Original text of the status without formatting
ContentType StatusContentType `bun:",nullzero"` // Content type used to process the original text of the status ContentType StatusContentType `bun:",nullzero"` // Content type used to process the original text of the status
Federated *bool `bun:",notnull"` // This status will be federated beyond the local timeline(s) Federated *bool `bun:",notnull"` // This status will be federated beyond the local timeline(s)
InteractionPolicy *InteractionPolicy `bun:""` // InteractionPolicy for this status. If null then the default InteractionPolicy should be assumed for this status's Visibility. Always null for boost wrappers. InteractionPolicy *InteractionPolicy `bun:""` // InteractionPolicy for this status. If null then the default InteractionPolicy should be assumed for this status's Visibility. Always null for boost wrappers.
PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed. PendingApproval *bool `bun:",nullzero,notnull,default:false"` // If true then status is a reply or boost wrapper that must be Approved by the reply-ee or boost-ee before being fully distributed.
PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB. PreApproved bool `bun:"-"` // If true, then status is a reply to or boost wrapper of a status on our instance, has permission to do the interaction, and an Accept should be sent out for it immediately. Field not stored in the DB.
ApprovedByURI string `bun:",nullzero"` // URI of *either* an Accept Activity, or a ReplyAuthorization or AnnounceAuthorization, which approves the Announce, Create or interaction request Activity that this status was/will be attached to. ApprovedByURI string `bun:",nullzero"` // URI of *either* an Accept Activity, or a ReplyAuthorization or AnnounceAuthorization, which approves the Announce, Create or interaction request Activity that this status was/will be attached to.
} }
// GetID implements timeline.Timelineable{}. // GetID implements timeline.Timelineable{}.

View file

@ -31,7 +31,7 @@ import (
// elements to reduce overall memory usage. // elements to reduce overall memory usage.
type SimpleQueue[T any] struct { type SimpleQueue[T any] struct {
l list.List[T] l list.List[T]
p mempool.UnsafeSimplePool p mempool.UnsafePool
w chan struct{} w chan struct{}
m sync.Mutex m sync.Mutex
} }

View file

@ -1,3 +1,3 @@
# go-mempool # go-mempool
very simple memory pool implementation very simple memory pool implementation

View file

@ -1,17 +1,17 @@
package mempool package mempool
import ( import (
"sync"
"sync/atomic"
"unsafe" "unsafe"
"golang.org/x/sys/cpu"
) )
// Pool provides a form of SimplePool const DefaultDirtyFactor = 128
// with the addition of concurrency safety.
// Pool provides a type-safe form
// of UnsafePool using generics.
//
// Note it is NOT safe for concurrent
// use, you must protect it yourself!
type Pool[T any] struct { type Pool[T any] struct {
UnsafePool
// New is an optionally provided // New is an optionally provided
// allocator used when no value // allocator used when no value
@ -21,119 +21,79 @@ type Pool[T any] struct {
// Reset is an optionally provided // Reset is an optionally provided
// value resetting function called // value resetting function called
// on passed value to Put(). // on passed value to Put().
Reset func(T) bool Reset func(T)
}
func NewPool[T any](new func() T, reset func(T) bool, check func(current, victim int) bool) Pool[T] { UnsafePool
return Pool[T]{
New: new,
Reset: reset,
UnsafePool: NewUnsafePool(check),
}
} }
func (p *Pool[T]) Get() T { func (p *Pool[T]) Get() T {
if ptr := p.UnsafePool.Get(); ptr != nil { if ptr := p.UnsafePool.Get(); ptr != nil {
return *(*T)(ptr) return *(*T)(ptr)
} else if p.New != nil {
return p.New()
} }
var t T var z T
if p.New != nil { return z
t = p.New()
}
return t
} }
func (p *Pool[T]) Put(t T) { func (p *Pool[T]) Put(t T) {
if p.Reset != nil && !p.Reset(t) { if p.Reset != nil {
return p.Reset(t)
} }
ptr := unsafe.Pointer(&t) ptr := unsafe.Pointer(&t)
p.UnsafePool.Put(ptr) p.UnsafePool.Put(ptr)
} }
// UnsafePool provides a form of UnsafeSimplePool // UnsafePool provides an incredibly
// with the addition of concurrency safety. // simple memory pool implementation
// that stores ptrs to memory values,
// and regularly flushes internal pool
// structures according to DirtyFactor.
//
// Note it is NOT safe for concurrent
// use, you must protect it yourself!
type UnsafePool struct { type UnsafePool struct {
internal
_ [cache_line_size - unsafe.Sizeof(internal{})%cache_line_size]byte // DirtyFactor determines the max
// number of $dirty count before
// pool is garbage collected. Where:
// $dirty = len(current) - len(victim)
DirtyFactor int
current []unsafe.Pointer
victim []unsafe.Pointer
} }
func NewUnsafePool(check func(current, victim int) bool) UnsafePool { func (p *UnsafePool) Get() unsafe.Pointer {
return UnsafePool{internal: internal{ // First try current list.
pool: UnsafeSimplePool{Check: check}, if len(p.current) > 0 {
}} ptr := p.current[len(p.current)-1]
} p.current = p.current[:len(p.current)-1]
const (
// current platform integer size.
int_size = 32 << (^uint(0) >> 63)
// platform CPU cache line size to avoid false sharing.
cache_line_size = unsafe.Sizeof(cpu.CacheLinePad{})
)
type internal struct {
// fast-access ring-buffer of
// pointers accessible by index.
//
// if Go ever exposes goroutine IDs
// to us we can make this a lot faster.
ring [int_size / 4]unsafe.Pointer
index atomic.Uint64
// underlying pool and
// slow mutex protection.
pool UnsafeSimplePool
mutex sync.Mutex
}
func (p *internal) Check(fn func(current, victim int) bool) func(current, victim int) bool {
p.mutex.Lock()
if fn == nil {
if p.pool.Check == nil {
fn = defaultCheck
} else {
fn = p.pool.Check
}
} else {
p.pool.Check = fn
}
p.mutex.Unlock()
return fn
}
func (p *internal) Get() unsafe.Pointer {
if ptr := atomic.SwapPointer(&p.ring[p.index.Load()%uint64(cap(p.ring))], nil); ptr != nil {
p.index.Add(^uint64(0)) // i.e. -1
return ptr return ptr
} }
p.mutex.Lock()
ptr := p.pool.Get()
p.mutex.Unlock()
return ptr
}
func (p *internal) Put(ptr unsafe.Pointer) { // Fallback to victim.
if atomic.CompareAndSwapPointer(&p.ring[p.index.Add(1)%uint64(cap(p.ring))], nil, ptr) { if len(p.victim) > 0 {
return ptr := p.victim[len(p.victim)-1]
p.victim = p.victim[:len(p.victim)-1]
return ptr
} }
p.mutex.Lock()
p.pool.Put(ptr) return nil
p.mutex.Unlock()
} }
func (p *internal) GC() { func (p *UnsafePool) Put(ptr unsafe.Pointer) {
for i := range p.ring { p.current = append(p.current, ptr)
atomic.StorePointer(&p.ring[i], nil)
// Get dirty factor.
df := p.DirtyFactor
if df == 0 {
df = DefaultDirtyFactor
} }
p.mutex.Lock()
p.pool.GC()
p.mutex.Unlock()
}
func (p *internal) Size() int { if len(p.current)-len(p.victim) > df {
p.mutex.Lock() // Garbage collection!
sz := p.pool.Size() p.victim = p.current
p.mutex.Unlock() p.current = nil
return sz }
} }

View file

@ -1,111 +0,0 @@
package mempool
import (
"unsafe"
)
// SimplePool provides a type-safe form
// of UnsafePool using generics.
//
// Note it is NOT safe for concurrent
// use, you must protect it yourself!
type SimplePool[T any] struct {
UnsafeSimplePool
// New is an optionally provided
// allocator used when no value
// is available for use in pool.
New func() T
// Reset is an optionally provided
// value resetting function called
// on passed value to Put().
Reset func(T) bool
}
func (p *SimplePool[T]) Get() T {
if ptr := p.UnsafeSimplePool.Get(); ptr != nil {
return *(*T)(ptr)
}
var t T
if p.New != nil {
t = p.New()
}
return t
}
func (p *SimplePool[T]) Put(t T) {
if p.Reset != nil && !p.Reset(t) {
return
}
ptr := unsafe.Pointer(&t)
p.UnsafeSimplePool.Put(ptr)
}
// UnsafeSimplePool provides an incredibly
// simple memory pool implementation
// that stores ptrs to memory values,
// and regularly flushes internal pool
// structures according to CheckGC().
//
// Note it is NOT safe for concurrent
// use, you must protect it yourself!
type UnsafeSimplePool struct {
// Check determines how often to flush
// internal pools based on underlying
// current and victim pool sizes. It gets
// called on every pool Put() operation.
//
// A flush will start a new current
// pool, make victim the old current,
// and drop the existing victim pool.
Check func(current, victim int) bool
current []unsafe.Pointer
victim []unsafe.Pointer
}
func (p *UnsafeSimplePool) Get() unsafe.Pointer {
// First try current list.
if len(p.current) > 0 {
ptr := p.current[len(p.current)-1]
p.current = p.current[:len(p.current)-1]
return ptr
}
// Fallback to victim.
if len(p.victim) > 0 {
ptr := p.victim[len(p.victim)-1]
p.victim = p.victim[:len(p.victim)-1]
return ptr
}
return nil
}
func (p *UnsafeSimplePool) Put(ptr unsafe.Pointer) {
p.current = append(p.current, ptr)
// Get GC check func.
if p.Check == nil {
p.Check = defaultCheck
}
if p.Check(len(p.current), len(p.victim)) {
p.GC() // garbage collection time!
}
}
func (p *UnsafeSimplePool) GC() {
p.victim = p.current
p.current = nil
}
func (p *UnsafeSimplePool) Size() int {
return len(p.current) + len(p.victim)
}
func defaultCheck(current, victim int) bool {
return current-victim > 128 || victim > 256
}

View file

@ -26,13 +26,14 @@ const (
type MutexMap struct { type MutexMap struct {
mapmu sync.Mutex mapmu sync.Mutex
mumap hashmap mumap hashmap
mupool mempool.UnsafeSimplePool mupool mempool.UnsafePool
} }
// checkInit ensures MutexMap is initialized (UNSAFE). // checkInit ensures MutexMap is initialized (UNSAFE).
func (mm *MutexMap) checkInit() { func (mm *MutexMap) checkInit() {
if mm.mumap.m == nil { if mm.mumap.m == nil {
mm.mumap.init(0) mm.mumap.init(0)
mm.mupool.DirtyFactor = 256
} }
} }
@ -174,9 +175,13 @@ func (mu *rwmutex) Lock(lt uint8) bool {
// sleeping goroutines waiting on this mutex. // sleeping goroutines waiting on this mutex.
func (mu *rwmutex) Unlock() bool { func (mu *rwmutex) Unlock() bool {
switch mu.l--; { switch mu.l--; {
case mu.l > 0 && mu.t == lockTypeWrite:
panic("BUG: multiple writer locks")
case mu.l < 0:
panic("BUG: negative lock count")
case mu.l == 0: case mu.l == 0:
// Fully // Fully unlocked.
// unlock.
mu.t = 0 mu.t = 0
// Awake all blocked goroutines and check // Awake all blocked goroutines and check
@ -192,15 +197,11 @@ func (mu *rwmutex) Unlock() bool {
// (before == after) => (waiters = 0) // (before == after) => (waiters = 0)
return (before == after) return (before == after)
case mu.l < 0: default:
panic("BUG: negative lock count") // i.e. mutex still
case mu.t == lockTypeWrite: // locked by others.
panic("BUG: multiple write locks") return false
} }
// i.e. mutex still
// locked by others.
return false
} }
// WaitRelock expects a mutex to be passed in, already in the // WaitRelock expects a mutex to be passed in, already in the

View file

@ -4,10 +4,10 @@ import (
"os" "os"
"reflect" "reflect"
"strings" "strings"
"sync"
"unsafe" "unsafe"
"codeberg.org/gruf/go-byteutil" "codeberg.org/gruf/go-byteutil"
"codeberg.org/gruf/go-mempool"
"codeberg.org/gruf/go-xunsafe" "codeberg.org/gruf/go-xunsafe"
) )
@ -371,15 +371,17 @@ type index_entry struct {
key string key string
} }
var index_entry_pool mempool.UnsafePool var index_entry_pool sync.Pool
// new_index_entry returns a new prepared index_entry. // new_index_entry returns a new prepared index_entry.
func new_index_entry() *index_entry { func new_index_entry() *index_entry {
if ptr := index_entry_pool.Get(); ptr != nil { v := index_entry_pool.Get()
return (*index_entry)(ptr) if v == nil {
e := new(index_entry)
e.elem.data = unsafe.Pointer(e)
v = e
} }
entry := new(index_entry) entry := v.(*index_entry)
entry.elem.data = unsafe.Pointer(entry)
return entry return entry
} }
@ -394,8 +396,7 @@ func free_index_entry(entry *index_entry) {
entry.key = "" entry.key = ""
entry.index = nil entry.index = nil
entry.item = nil entry.item = nil
ptr := unsafe.Pointer(entry) index_entry_pool.Put(entry)
index_entry_pool.Put(ptr)
} }
func is_unique(f uint8) bool { func is_unique(f uint8) bool {

View file

@ -2,9 +2,8 @@ package structr
import ( import (
"os" "os"
"sync"
"unsafe" "unsafe"
"codeberg.org/gruf/go-mempool"
) )
type indexed_item struct { type indexed_item struct {
@ -20,15 +19,17 @@ type indexed_item struct {
indexed []*index_entry indexed []*index_entry
} }
var indexed_item_pool mempool.UnsafePool var indexed_item_pool sync.Pool
// new_indexed_item returns a new prepared indexed_item. // new_indexed_item returns a new prepared indexed_item.
func new_indexed_item() *indexed_item { func new_indexed_item() *indexed_item {
if ptr := indexed_item_pool.Get(); ptr != nil { v := indexed_item_pool.Get()
return (*indexed_item)(ptr) if v == nil {
i := new(indexed_item)
i.elem.data = unsafe.Pointer(i)
v = i
} }
item := new(indexed_item) item := v.(*indexed_item)
item.elem.data = unsafe.Pointer(item)
return item return item
} }
@ -42,8 +43,7 @@ func free_indexed_item(item *indexed_item) {
return return
} }
item.data = nil item.data = nil
ptr := unsafe.Pointer(item) indexed_item_pool.Put(item)
indexed_item_pool.Put(ptr)
} }
// drop_index will drop the given index entry from item's indexed. // drop_index will drop the given index entry from item's indexed.

View file

@ -2,9 +2,8 @@ package structr
import ( import (
"os" "os"
"sync"
"unsafe" "unsafe"
"codeberg.org/gruf/go-mempool"
) )
// elem represents an elem // elem represents an elem
@ -28,14 +27,16 @@ type list struct {
len int len int
} }
var list_pool mempool.UnsafePool var list_pool sync.Pool
// new_list returns a new prepared list. // new_list returns a new prepared list.
func new_list() *list { func new_list() *list {
if ptr := list_pool.Get(); ptr != nil { v := list_pool.Get()
return (*list)(ptr) if v == nil {
v = new(list)
} }
return new(list) list := v.(*list)
return list
} }
// free_list releases the list. // free_list releases the list.
@ -47,13 +48,11 @@ func free_list(list *list) {
os.Stderr.WriteString(msg + "\n") os.Stderr.WriteString(msg + "\n")
return return
} }
ptr := unsafe.Pointer(list) list_pool.Put(list)
list_pool.Put(ptr)
} }
// push_front will push the given elem to front (head) of list. // push_front will push the given elem to front (head) of list.
func (l *list) push_front(elem *list_elem) { func (l *list) push_front(elem *list_elem) {
// Set new head. // Set new head.
oldHead := l.head oldHead := l.head
l.head = elem l.head = elem
@ -67,14 +66,12 @@ func (l *list) push_front(elem *list_elem) {
l.tail = elem l.tail = elem
} }
// Incr // Incr count
// count
l.len++ l.len++
} }
// push_back will push the given elem to back (tail) of list. // push_back will push the given elem to back (tail) of list.
func (l *list) push_back(elem *list_elem) { func (l *list) push_back(elem *list_elem) {
// Set new tail. // Set new tail.
oldTail := l.tail oldTail := l.tail
l.tail = elem l.tail = elem
@ -88,8 +85,7 @@ func (l *list) push_back(elem *list_elem) {
l.head = elem l.head = elem
} }
// Incr // Incr count
// count
l.len++ l.len++
} }
@ -135,8 +131,7 @@ func (l *list) insert(elem *list_elem, at *list_elem) {
elem.next = oldNext elem.next = oldNext
} }
// Incr // Incr count
// count
l.len++ l.len++
} }
@ -179,7 +174,6 @@ func (l *list) remove(elem *list_elem) {
prev.next = next prev.next = next
} }
// Decr // Decr count
// count
l.len-- l.len--
} }

View file

@ -146,7 +146,7 @@ func find_field(t xunsafe.TypeIter, names []string) (sfield struct_field, ftype
sfield.mangle = mangler.Get(t) sfield.mangle = mangler.Get(t)
// Calculate zero value string. // Calculate zero value string.
zptr := zero_value_ptr(o, sfield.offsets) zptr := zero_value_field(o, sfield.offsets)
zstr := string(sfield.mangle(nil, zptr)) zstr := string(sfield.mangle(nil, zptr))
sfield.zerostr = zstr sfield.zerostr = zstr
sfield.zero = zptr sfield.zero = zptr
@ -154,9 +154,7 @@ func find_field(t xunsafe.TypeIter, names []string) (sfield struct_field, ftype
return return
} }
// zero_value iterates the type contained in TypeIter{} along the given // zero_value ...
// next_offset{} values, creating new ptrs where necessary, returning the
// zero reflect.Value{} after fully iterating the next_offset{} slice.
func zero_value(t xunsafe.TypeIter, offsets []next_offset) reflect.Value { func zero_value(t xunsafe.TypeIter, offsets []next_offset) reflect.Value {
v := reflect.New(t.Type).Elem() v := reflect.New(t.Type).Elem()
for _, offset := range offsets { for _, offset := range offsets {
@ -177,8 +175,8 @@ func zero_value(t xunsafe.TypeIter, offsets []next_offset) reflect.Value {
return v return v
} }
// zero_value_ptr returns the unsafe pointer address of the result of zero_value(). // zero_value_field ...
func zero_value_ptr(t xunsafe.TypeIter, offsets []next_offset) unsafe.Pointer { func zero_value_field(t xunsafe.TypeIter, offsets []next_offset) unsafe.Pointer {
return zero_value(t, offsets).Addr().UnsafePointer() return zero_value(t, offsets).Addr().UnsafePointer()
} }

View file

@ -8,8 +8,6 @@ import (
"strings" "strings"
"sync" "sync"
"unsafe" "unsafe"
"codeberg.org/gruf/go-mempool"
) )
// Direction defines a direction // Direction defines a direction
@ -1135,16 +1133,18 @@ func to_timeline_item(item *indexed_item) *timeline_item {
return to return to
} }
var timeline_item_pool mempool.UnsafePool var timeline_item_pool sync.Pool
// new_timeline_item returns a new prepared timeline_item. // new_timeline_item returns a new prepared timeline_item.
func new_timeline_item() *timeline_item { func new_timeline_item() *timeline_item {
if ptr := timeline_item_pool.Get(); ptr != nil { v := timeline_item_pool.Get()
return (*timeline_item)(ptr) if v == nil {
i := new(timeline_item)
i.elem.data = unsafe.Pointer(i)
i.ck = ^uint(0)
v = i
} }
item := new(timeline_item) item := v.(*timeline_item)
item.elem.data = unsafe.Pointer(item)
item.ck = ^uint(0)
return item return item
} }
@ -1159,6 +1159,5 @@ func free_timeline_item(item *timeline_item) {
} }
item.data = nil item.data = nil
item.pk = nil item.pk = nil
ptr := unsafe.Pointer(item) timeline_item_pool.Put(item)
timeline_item_pool.Put(ptr)
} }

View file

@ -444,27 +444,20 @@ func (c *Conn) Status(op DBStatus, reset bool) (current, highwater int, err erro
// https://sqlite.org/c3ref/table_column_metadata.html // https://sqlite.org/c3ref/table_column_metadata.html
func (c *Conn) TableColumnMetadata(schema, table, column string) (declType, collSeq string, notNull, primaryKey, autoInc bool, err error) { func (c *Conn) TableColumnMetadata(schema, table, column string) (declType, collSeq string, notNull, primaryKey, autoInc bool, err error) {
defer c.arena.mark()() defer c.arena.mark()()
var (
declTypePtr ptr_t var schemaPtr, columnPtr ptr_t
collSeqPtr ptr_t declTypePtr := c.arena.new(ptrlen)
notNullPtr ptr_t collSeqPtr := c.arena.new(ptrlen)
primaryKeyPtr ptr_t notNullPtr := c.arena.new(ptrlen)
autoIncPtr ptr_t autoIncPtr := c.arena.new(ptrlen)
columnPtr ptr_t primaryKeyPtr := c.arena.new(ptrlen)
schemaPtr ptr_t
)
if column != "" {
declTypePtr = c.arena.new(ptrlen)
collSeqPtr = c.arena.new(ptrlen)
notNullPtr = c.arena.new(ptrlen)
primaryKeyPtr = c.arena.new(ptrlen)
autoIncPtr = c.arena.new(ptrlen)
columnPtr = c.arena.string(column)
}
if schema != "" { if schema != "" {
schemaPtr = c.arena.string(schema) schemaPtr = c.arena.string(schema)
} }
tablePtr := c.arena.string(table) tablePtr := c.arena.string(table)
if column != "" {
columnPtr = c.arena.string(column)
}
rc := res_t(c.call("sqlite3_table_column_metadata", stk_t(c.handle), rc := res_t(c.call("sqlite3_table_column_metadata", stk_t(c.handle),
stk_t(schemaPtr), stk_t(tablePtr), stk_t(columnPtr), stk_t(schemaPtr), stk_t(tablePtr), stk_t(columnPtr),

View file

@ -1,6 +1,7 @@
package sqlite3 package sqlite3
import ( import (
"encoding/json"
"errors" "errors"
"math" "math"
"time" "time"
@ -172,6 +173,21 @@ func (ctx Context) ResultPointer(ptr any) {
stk_t(ctx.handle), stk_t(valPtr)) stk_t(ctx.handle), stk_t(valPtr))
} }
// ResultJSON sets the result of the function to the JSON encoding of value.
//
// https://sqlite.org/c3ref/result_blob.html
func (ctx Context) ResultJSON(value any) {
err := json.NewEncoder(callbackWriter(func(p []byte) (int, error) {
ctx.ResultRawText(p[:len(p)-1]) // remove the newline
return 0, nil
})).Encode(value)
if err != nil {
ctx.ResultError(err)
return // notest
}
}
// ResultValue sets the result of the function to a copy of [Value]. // ResultValue sets the result of the function to a copy of [Value].
// //
// https://sqlite.org/c3ref/result_blob.html // https://sqlite.org/c3ref/result_blob.html

View file

@ -607,24 +607,14 @@ func (r resultRowsAffected) RowsAffected() (int64, error) {
type scantype byte type scantype byte
const ( const (
_ANY scantype = iota _ANY scantype = iota
_INT _INT scantype = scantype(sqlite3.INTEGER)
_REAL _REAL scantype = scantype(sqlite3.FLOAT)
_TEXT _TEXT scantype = scantype(sqlite3.TEXT)
_BLOB _BLOB scantype = scantype(sqlite3.BLOB)
_NULL _NULL scantype = scantype(sqlite3.NULL)
_BOOL _BOOL scantype = iota
_TIME _TIME
_NOT_NULL
)
var (
_ [0]struct{} = [scantype(sqlite3.INTEGER) - _INT]struct{}{}
_ [0]struct{} = [scantype(sqlite3.FLOAT) - _REAL]struct{}{}
_ [0]struct{} = [scantype(sqlite3.TEXT) - _TEXT]struct{}{}
_ [0]struct{} = [scantype(sqlite3.BLOB) - _BLOB]struct{}{}
_ [0]struct{} = [scantype(sqlite3.NULL) - _NULL]struct{}{}
_ [0]struct{} = [_NOT_NULL & (_NOT_NULL - 1)]struct{}{}
) )
func scanFromDecl(decl string) scantype { func scanFromDecl(decl string) scantype {
@ -654,8 +644,8 @@ type rows struct {
*stmt *stmt
names []string names []string
types []string types []string
nulls []bool
scans []scantype scans []scantype
dest []driver.Value
} }
var ( var (
@ -685,36 +675,34 @@ func (r *rows) Columns() []string {
func (r *rows) scanType(index int) scantype { func (r *rows) scanType(index int) scantype {
if r.scans == nil { if r.scans == nil {
count := len(r.names) count := r.Stmt.ColumnCount()
scans := make([]scantype, count) scans := make([]scantype, count)
for i := range scans { for i := range scans {
scans[i] = scanFromDecl(strings.ToUpper(r.Stmt.ColumnDeclType(i))) scans[i] = scanFromDecl(strings.ToUpper(r.Stmt.ColumnDeclType(i)))
} }
r.scans = scans r.scans = scans
} }
return r.scans[index] &^ _NOT_NULL return r.scans[index]
} }
func (r *rows) loadColumnMetadata() { func (r *rows) loadColumnMetadata() {
if r.types == nil { if r.nulls == nil {
c := r.Stmt.Conn() c := r.Stmt.Conn()
count := len(r.names) count := r.Stmt.ColumnCount()
nulls := make([]bool, count)
types := make([]string, count) types := make([]string, count)
scans := make([]scantype, count) scans := make([]scantype, count)
for i := range types { for i := range nulls {
var notnull bool
if col := r.Stmt.ColumnOriginName(i); col != "" { if col := r.Stmt.ColumnOriginName(i); col != "" {
types[i], _, notnull, _, _, _ = c.TableColumnMetadata( types[i], _, nulls[i], _, _, _ = c.TableColumnMetadata(
r.Stmt.ColumnDatabaseName(i), r.Stmt.ColumnDatabaseName(i),
r.Stmt.ColumnTableName(i), r.Stmt.ColumnTableName(i),
col) col)
types[i] = strings.ToUpper(types[i]) types[i] = strings.ToUpper(types[i])
scans[i] = scanFromDecl(types[i]) scans[i] = scanFromDecl(types[i])
if notnull {
scans[i] |= _NOT_NULL
}
} }
} }
r.nulls = nulls
r.types = types r.types = types
r.scans = scans r.scans = scans
} }
@ -733,13 +721,15 @@ func (r *rows) ColumnTypeDatabaseTypeName(index int) string {
func (r *rows) ColumnTypeNullable(index int) (nullable, ok bool) { func (r *rows) ColumnTypeNullable(index int) (nullable, ok bool) {
r.loadColumnMetadata() r.loadColumnMetadata()
nullable = r.scans[index]&^_NOT_NULL == 0 if r.nulls[index] {
return nullable, !nullable return false, true
}
return true, false
} }
func (r *rows) ColumnTypeScanType(index int) (typ reflect.Type) { func (r *rows) ColumnTypeScanType(index int) (typ reflect.Type) {
r.loadColumnMetadata() r.loadColumnMetadata()
scan := r.scans[index] &^ _NOT_NULL scan := r.scans[index]
if r.Stmt.Busy() { if r.Stmt.Busy() {
// SQLite is dynamically typed and we now have a row. // SQLite is dynamically typed and we now have a row.
@ -782,7 +772,6 @@ func (r *rows) ColumnTypeScanType(index int) (typ reflect.Type) {
} }
func (r *rows) Next(dest []driver.Value) error { func (r *rows) Next(dest []driver.Value) error {
r.dest = nil
c := r.Stmt.Conn() c := r.Stmt.Conn()
if old := c.SetInterrupt(r.ctx); old != r.ctx { if old := c.SetInterrupt(r.ctx); old != r.ctx {
defer c.SetInterrupt(old) defer c.SetInterrupt(old)
@ -801,7 +790,18 @@ func (r *rows) Next(dest []driver.Value) error {
} }
for i := range dest { for i := range dest {
scan := r.scanType(i) scan := r.scanType(i)
if v, ok := dest[i].([]byte); ok { switch v := dest[i].(type) {
case int64:
if scan == _BOOL {
switch v {
case 1:
dest[i] = true
case 0:
dest[i] = false
}
continue
}
case []byte:
if len(v) == cap(v) { // a BLOB if len(v) == cap(v) { // a BLOB
continue continue
} }
@ -816,49 +816,38 @@ func (r *rows) Next(dest []driver.Value) error {
} }
} }
dest[i] = string(v) dest[i] = string(v)
case float64:
break
default:
continue
} }
switch scan { if scan == _TIME {
case _TIME:
t, err := r.tmRead.Decode(dest[i]) t, err := r.tmRead.Decode(dest[i])
if err == nil { if err == nil {
dest[i] = t dest[i] = t
} continue
case _BOOL:
switch dest[i] {
case int64(0):
dest[i] = false
case int64(1):
dest[i] = true
} }
} }
} }
r.dest = dest
return nil return nil
} }
func (r *rows) ScanColumn(dest any, index int) (err error) { func (r *rows) ScanColumn(dest any, index int) error {
// notest // Go 1.26 // notest // Go 1.26
var tm *time.Time var ptr *time.Time
var ok *bool
switch d := dest.(type) { switch d := dest.(type) {
case *time.Time: case *time.Time:
tm = d ptr = d
case *sql.NullTime: case *sql.NullTime:
tm = &d.Time ptr = &d.Time
ok = &d.Valid
case *sql.Null[time.Time]: case *sql.Null[time.Time]:
tm = &d.V ptr = &d.V
ok = &d.Valid
default: default:
return driver.ErrSkip return driver.ErrSkip
} }
value := r.dest[index] if t := r.Stmt.ColumnTime(index, r.tmRead); !t.IsZero() {
*tm, err = r.tmRead.Decode(value) *ptr = t
if ok != nil { return nil
*ok = err == nil
if value == nil {
return nil
}
} }
return err return driver.ErrSkip
} }

View file

@ -1,5 +1,3 @@
//go:build !goexperiment.jsonv2
package util package util
import ( import (

View file

@ -1,52 +0,0 @@
//go:build goexperiment.jsonv2
package util
import (
"encoding/json/v2"
"math"
"strconv"
"time"
"unsafe"
)
type JSON struct{ Value any }
func (j JSON) Scan(value any) error {
var buf []byte
switch v := value.(type) {
case []byte:
buf = v
case string:
buf = unsafe.Slice(unsafe.StringData(v), len(v))
case int64:
buf = strconv.AppendInt(nil, v, 10)
case float64:
buf = AppendNumber(nil, v)
case time.Time:
buf = append(buf, '"')
buf = v.AppendFormat(buf, time.RFC3339Nano)
buf = append(buf, '"')
case nil:
buf = []byte("null")
default:
panic(AssertErr())
}
return json.Unmarshal(buf, j.Value)
}
func AppendNumber(dst []byte, f float64) []byte {
switch {
case math.IsNaN(f):
dst = append(dst, "null"...)
case math.IsInf(f, 1):
dst = append(dst, "9.0e999"...)
case math.IsInf(f, -1):
dst = append(dst, "-9.0e999"...)
default:
return strconv.AppendFloat(dst, f, 'g', -1, 64)
}
return dst
}

View file

@ -1,13 +1,6 @@
//go:build !goexperiment.jsonv2
package sqlite3 package sqlite3
import ( import "github.com/ncruces/go-sqlite3/internal/util"
"encoding/json"
"strconv"
"github.com/ncruces/go-sqlite3/internal/util"
)
// JSON returns a value that can be used as an argument to // JSON returns a value that can be used as an argument to
// [database/sql.DB.Exec], [database/sql.Row.Scan] and similar methods to // [database/sql.DB.Exec], [database/sql.Row.Scan] and similar methods to
@ -17,77 +10,3 @@ import (
func JSON(value any) any { func JSON(value any) any {
return util.JSON{Value: value} return util.JSON{Value: value}
} }
// ResultJSON sets the result of the function to the JSON encoding of value.
//
// https://sqlite.org/c3ref/result_blob.html
func (ctx Context) ResultJSON(value any) {
err := json.NewEncoder(callbackWriter(func(p []byte) (int, error) {
ctx.ResultRawText(p[:len(p)-1]) // remove the newline
return 0, nil
})).Encode(value)
if err != nil {
ctx.ResultError(err)
return // notest
}
}
// BindJSON binds the JSON encoding of value to the prepared statement.
// The leftmost SQL parameter has an index of 1.
//
// https://sqlite.org/c3ref/bind_blob.html
func (s *Stmt) BindJSON(param int, value any) error {
return json.NewEncoder(callbackWriter(func(p []byte) (int, error) {
return 0, s.BindRawText(param, p[:len(p)-1]) // remove the newline
})).Encode(value)
}
// ColumnJSON parses the JSON-encoded value of the result column
// and stores it in the value pointed to by ptr.
// The leftmost column of the result set has the index 0.
//
// https://sqlite.org/c3ref/column_blob.html
func (s *Stmt) ColumnJSON(col int, ptr any) error {
var data []byte
switch s.ColumnType(col) {
case NULL:
data = []byte("null")
case TEXT:
data = s.ColumnRawText(col)
case BLOB:
data = s.ColumnRawBlob(col)
case INTEGER:
data = strconv.AppendInt(nil, s.ColumnInt64(col), 10)
case FLOAT:
data = util.AppendNumber(nil, s.ColumnFloat(col))
default:
panic(util.AssertErr())
}
return json.Unmarshal(data, ptr)
}
// JSON parses a JSON-encoded value
// and stores the result in the value pointed to by ptr.
func (v Value) JSON(ptr any) error {
var data []byte
switch v.Type() {
case NULL:
data = []byte("null")
case TEXT:
data = v.RawText()
case BLOB:
data = v.RawBlob()
case INTEGER:
data = strconv.AppendInt(nil, v.Int64(), 10)
case FLOAT:
data = util.AppendNumber(nil, v.Float())
default:
panic(util.AssertErr())
}
return json.Unmarshal(data, ptr)
}
type callbackWriter func(p []byte) (int, error)
func (fn callbackWriter) Write(p []byte) (int, error) { return fn(p) }

View file

@ -1,113 +0,0 @@
//go:build goexperiment.jsonv2
package sqlite3
import (
"encoding/json/v2"
"strconv"
"github.com/ncruces/go-sqlite3/internal/util"
)
// JSON returns a value that can be used as an argument to
// [database/sql.DB.Exec], [database/sql.Row.Scan] and similar methods to
// store value as JSON, or decode JSON into value.
// JSON should NOT be used with [Stmt.BindJSON], [Stmt.ColumnJSON],
// [Value.JSON], or [Context.ResultJSON].
func JSON(value any) any {
return util.JSON{Value: value}
}
// ResultJSON sets the result of the function to the JSON encoding of value.
//
// https://sqlite.org/c3ref/result_blob.html
func (ctx Context) ResultJSON(value any) {
w := bytesWriter{sqlite: ctx.c.sqlite}
if err := json.MarshalWrite(&w, value); err != nil {
ctx.c.free(w.ptr)
ctx.ResultError(err)
return // notest
}
ctx.c.call("sqlite3_result_text_go",
stk_t(ctx.handle), stk_t(w.ptr), stk_t(len(w.buf)))
}
// BindJSON binds the JSON encoding of value to the prepared statement.
// The leftmost SQL parameter has an index of 1.
//
// https://sqlite.org/c3ref/bind_blob.html
func (s *Stmt) BindJSON(param int, value any) error {
w := bytesWriter{sqlite: s.c.sqlite}
if err := json.MarshalWrite(&w, value); err != nil {
s.c.free(w.ptr)
return err
}
rc := res_t(s.c.call("sqlite3_bind_text_go",
stk_t(s.handle), stk_t(param),
stk_t(w.ptr), stk_t(len(w.buf))))
return s.c.error(rc)
}
// ColumnJSON parses the JSON-encoded value of the result column
// and stores it in the value pointed to by ptr.
// The leftmost column of the result set has the index 0.
//
// https://sqlite.org/c3ref/column_blob.html
func (s *Stmt) ColumnJSON(col int, ptr any) error {
var data []byte
switch s.ColumnType(col) {
case NULL:
data = []byte("null")
case TEXT:
data = s.ColumnRawText(col)
case BLOB:
data = s.ColumnRawBlob(col)
case INTEGER:
data = strconv.AppendInt(nil, s.ColumnInt64(col), 10)
case FLOAT:
data = util.AppendNumber(nil, s.ColumnFloat(col))
default:
panic(util.AssertErr())
}
return json.Unmarshal(data, ptr)
}
// JSON parses a JSON-encoded value
// and stores the result in the value pointed to by ptr.
func (v Value) JSON(ptr any) error {
var data []byte
switch v.Type() {
case NULL:
data = []byte("null")
case TEXT:
data = v.RawText()
case BLOB:
data = v.RawBlob()
case INTEGER:
data = strconv.AppendInt(nil, v.Int64(), 10)
case FLOAT:
data = util.AppendNumber(nil, v.Float())
default:
panic(util.AssertErr())
}
return json.Unmarshal(data, ptr)
}
type bytesWriter struct {
*sqlite
buf []byte
ptr ptr_t
}
func (b *bytesWriter) Write(p []byte) (n int, err error) {
if len(p) > cap(b.buf)-len(b.buf) {
want := int64(len(b.buf)) + int64(len(p))
grow := int64(cap(b.buf))
grow += grow >> 1
want = max(want, grow)
b.ptr = b.realloc(b.ptr, want)
b.buf = util.View(b.mod, b.ptr, want)[:len(b.buf)]
}
b.buf = append(b.buf, p...)
return len(p), nil
}

View file

@ -5,7 +5,6 @@ import (
"context" "context"
"math/bits" "math/bits"
"os" "os"
"strings"
"sync" "sync"
"unsafe" "unsafe"
@ -129,10 +128,11 @@ func (sqlt *sqlite) error(rc res_t, handle ptr_t, sql ...string) error {
var msg, query string var msg, query string
if ptr := ptr_t(sqlt.call("sqlite3_errmsg", stk_t(handle))); ptr != 0 { if ptr := ptr_t(sqlt.call("sqlite3_errmsg", stk_t(handle))); ptr != 0 {
msg = util.ReadString(sqlt.mod, ptr, _MAX_LENGTH) msg = util.ReadString(sqlt.mod, ptr, _MAX_LENGTH)
if msg == "not an error" { switch {
case msg == "not an error":
msg = ""
case msg == util.ErrorCodeString(uint32(rc))[len("sqlite3: "):]:
msg = "" msg = ""
} else {
msg = strings.TrimPrefix(msg, util.ErrorCodeString(uint32(rc))[len("sqlite3: "):])
} }
} }

View file

@ -1,7 +1,9 @@
package sqlite3 package sqlite3
import ( import (
"encoding/json"
"math" "math"
"strconv"
"time" "time"
"github.com/ncruces/go-sqlite3/internal/util" "github.com/ncruces/go-sqlite3/internal/util"
@ -360,6 +362,16 @@ func (s *Stmt) BindPointer(param int, ptr any) error {
return s.c.error(rc) return s.c.error(rc)
} }
// BindJSON binds the JSON encoding of value to the prepared statement.
// The leftmost SQL parameter has an index of 1.
//
// https://sqlite.org/c3ref/bind_blob.html
func (s *Stmt) BindJSON(param int, value any) error {
return json.NewEncoder(callbackWriter(func(p []byte) (int, error) {
return 0, s.BindRawText(param, p[:len(p)-1]) // remove the newline
})).Encode(value)
}
// BindValue binds a copy of value to the prepared statement. // BindValue binds a copy of value to the prepared statement.
// The leftmost SQL parameter has an index of 1. // The leftmost SQL parameter has an index of 1.
// //
@ -586,6 +598,30 @@ func (s *Stmt) columnRawBytes(col int, ptr ptr_t, nul int32) []byte {
return util.View(s.c.mod, ptr, int64(n+nul))[:n] return util.View(s.c.mod, ptr, int64(n+nul))[:n]
} }
// ColumnJSON parses the JSON-encoded value of the result column
// and stores it in the value pointed to by ptr.
// The leftmost column of the result set has the index 0.
//
// https://sqlite.org/c3ref/column_blob.html
func (s *Stmt) ColumnJSON(col int, ptr any) error {
var data []byte
switch s.ColumnType(col) {
case NULL:
data = []byte("null")
case TEXT:
data = s.ColumnRawText(col)
case BLOB:
data = s.ColumnRawBlob(col)
case INTEGER:
data = strconv.AppendInt(nil, s.ColumnInt64(col), 10)
case FLOAT:
data = util.AppendNumber(nil, s.ColumnFloat(col))
default:
panic(util.AssertErr())
}
return json.Unmarshal(data, ptr)
}
// ColumnValue returns the unprotected value of the result column. // ColumnValue returns the unprotected value of the result column.
// The leftmost column of the result set has the index 0. // The leftmost column of the result set has the index 0.
// //
@ -712,3 +748,7 @@ func (s *Stmt) columns(count int64) ([]byte, ptr_t, error) {
return util.View(s.c.mod, typePtr, count), dataPtr, nil return util.View(s.c.mod, typePtr, count), dataPtr, nil
} }
type callbackWriter func(p []byte) (int, error)
func (fn callbackWriter) Write(p []byte) (int, error) { return fn(p) }

View file

@ -94,7 +94,7 @@ func (f TimeFormat) Encode(t time.Time) any {
case TimeFormatUnix: case TimeFormatUnix:
return t.Unix() return t.Unix()
case TimeFormatUnixFrac: case TimeFormatUnixFrac:
return math.FMA(1e-9, float64(t.Nanosecond()), float64(t.Unix())) return float64(t.Unix()) + float64(t.Nanosecond())*1e-9
case TimeFormatUnixMilli: case TimeFormatUnixMilli:
return t.UnixMilli() return t.UnixMilli()
case TimeFormatUnixMicro: case TimeFormatUnixMicro:

View file

@ -1,7 +1,9 @@
package sqlite3 package sqlite3
import ( import (
"encoding/json"
"math" "math"
"strconv"
"time" "time"
"github.com/ncruces/go-sqlite3/internal/util" "github.com/ncruces/go-sqlite3/internal/util"
@ -160,6 +162,27 @@ func (v Value) Pointer() any {
return util.GetHandle(v.c.ctx, ptr) return util.GetHandle(v.c.ctx, ptr)
} }
// JSON parses a JSON-encoded value
// and stores the result in the value pointed to by ptr.
func (v Value) JSON(ptr any) error {
var data []byte
switch v.Type() {
case NULL:
data = []byte("null")
case TEXT:
data = v.RawText()
case BLOB:
data = v.RawBlob()
case INTEGER:
data = strconv.AppendInt(nil, v.Int64(), 10)
case FLOAT:
data = util.AppendNumber(nil, v.Float())
default:
panic(util.AssertErr())
}
return json.Unmarshal(data, ptr)
}
// NoChange returns true if and only if the value is unchanged // NoChange returns true if and only if the value is unchanged
// in a virtual table update operatiom. // in a virtual table update operatiom.
// //

View file

@ -94,10 +94,6 @@ const (
OPEN_PRIVATECACHE OpenFlag = 0x00040000 /* Ok for sqlite3_open_v2() */ OPEN_PRIVATECACHE OpenFlag = 0x00040000 /* Ok for sqlite3_open_v2() */
OPEN_WAL OpenFlag = 0x00080000 /* VFS only */ OPEN_WAL OpenFlag = 0x00080000 /* VFS only */
OPEN_NOFOLLOW OpenFlag = 0x01000000 /* Ok for sqlite3_open_v2() */ OPEN_NOFOLLOW OpenFlag = 0x01000000 /* Ok for sqlite3_open_v2() */
_FLAG_ATOMIC OpenFlag = 0x10000000
_FLAG_KEEP_WAL OpenFlag = 0x20000000
_FLAG_PSOW OpenFlag = 0x40000000
_FLAG_SYNC_DIR OpenFlag = 0x80000000
) )
// AccessFlag is a flag for the [VFS] Access method. // AccessFlag is a flag for the [VFS] Access method.

View file

@ -51,7 +51,7 @@ func (vfsOS) Delete(path string, syncDir bool) error {
return _OK return _OK
} }
defer f.Close() defer f.Close()
err = osSync(f, 0, SYNC_FULL) err = osSync(f, false, false)
if err != nil { if err != nil {
return _IOERR_DIR_FSYNC return _IOERR_DIR_FSYNC
} }
@ -131,24 +131,27 @@ func (vfsOS) OpenFilename(name *Filename, flags OpenFlag) (File, OpenFlag, error
} }
file := vfsFile{ file := vfsFile{
File: f, File: f,
flags: flags | _FLAG_PSOW, psow: true,
shm: NewSharedMemory(name.String()+"-shm", flags), atomic: osBatchAtomic(f),
} readOnly: flags&OPEN_READONLY != 0,
if osBatchAtomic(f) { syncDir: isUnix && isCreate && isJournl,
file.flags |= _FLAG_ATOMIC delete: !isUnix && flags&OPEN_DELETEONCLOSE != 0,
} shm: NewSharedMemory(name.String()+"-shm", flags),
if isUnix && isCreate && isJournl {
file.flags |= _FLAG_SYNC_DIR
} }
return &file, flags, nil return &file, flags, nil
} }
type vfsFile struct { type vfsFile struct {
*os.File *os.File
shm SharedMemory shm SharedMemory
lock LockLevel lock LockLevel
flags OpenFlag readOnly bool
keepWAL bool
syncDir bool
atomic bool
delete bool
psow bool
} }
var ( var (
@ -161,7 +164,7 @@ var (
) )
func (f *vfsFile) Close() error { func (f *vfsFile) Close() error {
if !isUnix && f.flags&OPEN_DELETEONCLOSE != 0 { if f.delete {
defer os.Remove(f.Name()) defer os.Remove(f.Name())
} }
if f.shm != nil { if f.shm != nil {
@ -180,18 +183,21 @@ func (f *vfsFile) WriteAt(p []byte, off int64) (n int, err error) {
} }
func (f *vfsFile) Sync(flags SyncFlag) error { func (f *vfsFile) Sync(flags SyncFlag) error {
err := osSync(f.File, f.flags, flags) dataonly := (flags & SYNC_DATAONLY) != 0
fullsync := (flags & 0x0f) == SYNC_FULL
err := osSync(f.File, fullsync, dataonly)
if err != nil { if err != nil {
return err return err
} }
if isUnix && f.flags&_FLAG_SYNC_DIR != 0 { if isUnix && f.syncDir {
f.flags ^= _FLAG_SYNC_DIR f.syncDir = false
d, err := os.Open(filepath.Dir(f.File.Name())) d, err := os.Open(filepath.Dir(f.File.Name()))
if err != nil { if err != nil {
return nil return nil
} }
defer d.Close() defer d.Close()
err = osSync(f.File, f.flags, flags) err = osSync(d, false, false)
if err != nil { if err != nil {
return _IOERR_DIR_FSYNC return _IOERR_DIR_FSYNC
} }
@ -209,10 +215,10 @@ func (f *vfsFile) SectorSize() int {
func (f *vfsFile) DeviceCharacteristics() DeviceCharacteristic { func (f *vfsFile) DeviceCharacteristics() DeviceCharacteristic {
ret := IOCAP_SUBPAGE_READ ret := IOCAP_SUBPAGE_READ
if f.flags&_FLAG_ATOMIC != 0 { if f.atomic {
ret |= IOCAP_BATCH_ATOMIC ret |= IOCAP_BATCH_ATOMIC
} }
if f.flags&_FLAG_PSOW != 0 { if f.psow {
ret |= IOCAP_POWERSAFE_OVERWRITE ret |= IOCAP_POWERSAFE_OVERWRITE
} }
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
@ -243,20 +249,8 @@ func (f *vfsFile) HasMoved() (bool, error) {
return !os.SameFile(fi, pi), nil return !os.SameFile(fi, pi), nil
} }
func (f *vfsFile) LockState() LockLevel { return f.lock } func (f *vfsFile) LockState() LockLevel { return f.lock }
func (f *vfsFile) PowersafeOverwrite() bool { return f.flags&_FLAG_PSOW != 0 } func (f *vfsFile) PowersafeOverwrite() bool { return f.psow }
func (f *vfsFile) PersistWAL() bool { return f.flags&_FLAG_KEEP_WAL != 0 } func (f *vfsFile) PersistWAL() bool { return f.keepWAL }
func (f *vfsFile) SetPowersafeOverwrite(psow bool) { f.psow = psow }
func (f *vfsFile) SetPowersafeOverwrite(psow bool) { func (f *vfsFile) SetPersistWAL(keepWAL bool) { f.keepWAL = keepWAL }
f.flags &^= _FLAG_PSOW
if psow {
f.flags |= _FLAG_PSOW
}
}
func (f *vfsFile) SetPersistWAL(keepWAL bool) {
f.flags &^= _FLAG_KEEP_WAL
if keepWAL {
f.flags |= _FLAG_KEEP_WAL
}
}

View file

@ -41,7 +41,7 @@ func (f *vfsFile) Lock(lock LockLevel) error {
} }
// Do not allow any kind of write-lock on a read-only database. // Do not allow any kind of write-lock on a read-only database.
if lock >= LOCK_RESERVED && f.flags&OPEN_READONLY != 0 { if f.readOnly && lock >= LOCK_RESERVED {
return _IOERR_LOCK return _IOERR_LOCK
} }

View file

@ -6,7 +6,4 @@ SQLite VFS in pure Go.
It has some benefits over the C version: It has some benefits over the C version:
- the memory backing the database needs not be contiguous, - the memory backing the database needs not be contiguous,
- the database can grow/shrink incrementally without copying, - the database can grow/shrink incrementally without copying,
- reader-writer concurrency is slightly improved. - reader-writer concurrency is slightly improved.
[`memdb.TestDB`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/memdb#TestDB)
is the preferred way to setup an in-memory database for testing.

View file

@ -10,7 +10,6 @@
package memdb package memdb
import ( import (
"crypto/rand"
"fmt" "fmt"
"net/url" "net/url"
"sync" "sync"
@ -75,27 +74,11 @@ func Delete(name string) {
// TestDB creates an empty shared memory database for the test to use. // TestDB creates an empty shared memory database for the test to use.
// The database is automatically deleted when the test and all its subtests complete. // The database is automatically deleted when the test and all its subtests complete.
// Returns a URI filename appropriate to call Open with.
// Each subsequent call to TestDB returns a unique database. // Each subsequent call to TestDB returns a unique database.
//
// func Test_something(t *testing.T) {
// t.Parallel()
// dsn := memdb.TestDB(t, url.Values{
// "_pragma": {"busy_timeout(1000)"},
// })
//
// db, err := sql.Open("sqlite3", dsn)
// if err != nil {
// t.Fatal(err)
// }
// defer db.Close()
//
// // ...
// }
func TestDB(tb testing.TB, params ...url.Values) string { func TestDB(tb testing.TB, params ...url.Values) string {
tb.Helper() tb.Helper()
name := fmt.Sprintf("%s_%s", tb.Name(), rand.Text()) name := fmt.Sprintf("%s_%p", tb.Name(), tb)
tb.Cleanup(func() { Delete(name) }) tb.Cleanup(func() { Delete(name) })
Create(name, nil) Create(name, nil)

View file

@ -23,26 +23,12 @@ type flocktimeout_t struct {
timeout unix.Timespec timeout unix.Timespec
} }
func osSync(file *os.File, open OpenFlag, sync SyncFlag) error { func osSync(file *os.File, fullsync, _ /*dataonly*/ bool) error {
var cmd int if fullsync {
if sync&SYNC_FULL == SYNC_FULL { return file.Sync()
// For rollback journals all we really need is a barrier.
if open&OPEN_MAIN_JOURNAL != 0 {
cmd = unix.F_BARRIERFSYNC
} else {
cmd = unix.F_FULLFSYNC
}
} }
fd := file.Fd()
for { for {
err := error(unix.ENOTSUP) err := unix.Fsync(int(file.Fd()))
if cmd != 0 {
_, err = unix.FcntlInt(fd, cmd, 0)
}
if err == unix.ENOTSUP {
err = unix.Fsync(int(fd))
}
if err != unix.EINTR { if err != unix.EINTR {
return err return err
} }

View file

@ -10,7 +10,7 @@ import (
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
func osSync(file *os.File, _ OpenFlag, _ SyncFlag) error { func osSync(file *os.File, _ /*fullsync*/, _ /*dataonly*/ bool) error {
// SQLite trusts Linux's fdatasync for all fsync's. // SQLite trusts Linux's fdatasync for all fsync's.
for { for {
err := unix.Fdatasync(int(file.Fd())) err := unix.Fdatasync(int(file.Fd()))

View file

@ -4,6 +4,6 @@ package vfs
import "os" import "os"
func osSync(file *os.File, _ OpenFlag, _ SyncFlag) error { func osSync(file *os.File, _ /*fullsync*/, _ /*dataonly*/ bool) error {
return file.Sync() return file.Sync()
} }

12
vendor/modules.txt vendored
View file

@ -271,11 +271,11 @@ codeberg.org/gruf/go-mangler/v2
# codeberg.org/gruf/go-maps v1.0.4 # codeberg.org/gruf/go-maps v1.0.4
## explicit; go 1.20 ## explicit; go 1.20
codeberg.org/gruf/go-maps codeberg.org/gruf/go-maps
# codeberg.org/gruf/go-mempool v0.0.0-20251003110531-b54adae66253 # codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760
## explicit; go 1.24.0 ## explicit; go 1.22.2
codeberg.org/gruf/go-mempool codeberg.org/gruf/go-mempool
# codeberg.org/gruf/go-mutexes v1.5.8 # codeberg.org/gruf/go-mutexes v1.5.3
## explicit; go 1.24.0 ## explicit; go 1.22.2
codeberg.org/gruf/go-mutexes codeberg.org/gruf/go-mutexes
# codeberg.org/gruf/go-runners v1.6.3 # codeberg.org/gruf/go-runners v1.6.3
## explicit; go 1.19 ## explicit; go 1.19
@ -293,7 +293,7 @@ codeberg.org/gruf/go-storage/disk
codeberg.org/gruf/go-storage/internal codeberg.org/gruf/go-storage/internal
codeberg.org/gruf/go-storage/memory codeberg.org/gruf/go-storage/memory
codeberg.org/gruf/go-storage/s3 codeberg.org/gruf/go-storage/s3
# codeberg.org/gruf/go-structr v0.9.12 # codeberg.org/gruf/go-structr v0.9.9
## explicit; go 1.24.5 ## explicit; go 1.24.5
codeberg.org/gruf/go-structr codeberg.org/gruf/go-structr
# codeberg.org/gruf/go-xunsafe v0.0.0-20250809104800-512a9df57d73 # codeberg.org/gruf/go-xunsafe v0.0.0-20250809104800-512a9df57d73
@ -727,7 +727,7 @@ github.com/modern-go/reflect2
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit ## explicit
github.com/munnerz/goautoneg github.com/munnerz/goautoneg
# github.com/ncruces/go-sqlite3 v0.29.1 # github.com/ncruces/go-sqlite3 v0.29.0
## explicit; go 1.24.0 ## explicit; go 1.24.0
github.com/ncruces/go-sqlite3 github.com/ncruces/go-sqlite3
github.com/ncruces/go-sqlite3/driver github.com/ncruces/go-sqlite3/driver