[chore] update dependencies (#4188)

Update dependencies:
- github.com/gin-gonic/gin v1.10.0 -> v1.10.1
- github.com/gin-contrib/sessions v1.10.3 -> v1.10.4
- github.com/jackc/pgx/v5 v5.7.4 -> v5.7.5
- github.com/minio/minio-go/v7 v7.0.91 -> v7.0.92
- github.com/pquerna/otp v1.4.0 -> v1.5.0
- github.com/tdewolff/minify/v2 v2.23.5 -> v2.23.8
- github.com/yuin/goldmark v1.7.11 -> v1.7.12
- go.opentelemetry.io/otel{,/*} v1.35.0 -> v1.36.0
- modernc.org/sqlite v1.37.0 -> v1.37.1

Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4188
Reviewed-by: Daenney <daenney@noreply.codeberg.org>
Co-authored-by: kim <grufwub@gmail.com>
Co-committed-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2025-05-22 16:27:55 +02:00 committed by kim
commit b6ff55662e
214 changed files with 44839 additions and 32023 deletions

5
vendor/modernc.org/libc/Makefile generated vendored
View file

@ -32,13 +32,12 @@ download:
@if [ ! -f $(TAR) ]; then wget $(URL) ; fi
edit:
@if [ -f "Session.vim" ]; then novim -S & else novim -p Makefile go.mod builder.json & fi
@if [ -f "Session.vim" ]; then gvim -S & else gvim -p Makefile go.mod builder.json & fi
editor:
gofmt -l -s -w *.go
go test -c -o /dev/null
go install -v
go build -o /dev/null generator*.go
go build -o /dev/null -v generator*.go
generate: download
mkdir -p $(DIR) || true

21
vendor/modernc.org/libc/README.md generated vendored
View file

@ -1,20 +1,9 @@
# libc
Package libc provides C-runtime services. Work in progress.
[![LiberaPay](https://liberapay.com/assets/widgets/donate.svg)](https://liberapay.com/jnml/donate)
[![receives](https://img.shields.io/liberapay/receives/jnml.svg?logo=liberapay)](https://liberapay.com/jnml/donate)
[![patrons](https://img.shields.io/liberapay/patrons/jnml.svg?logo=liberapay)](https://liberapay.com/jnml/donate)
This package is a continuation of the Crt package in modernc.org/crt/v3.
[![Go Reference](https://pkg.go.dev/badge/modernc.org/libc.svg)](https://pkg.go.dev/modernc.org/libc)
Installation
$ go get [-u] modernc.org/libc
Documentation: [godoc.org/modernc.org/libc](http://godoc.org/modernc.org/libc)
Building with `make` requires the following Go packages
* github.com/golang/lint/golint
* github.com/mdempsky/maligned
* github.com/mdempsky/unconvert
* honnef.co/go/tools/cmd/unused
* honnef.co/go/tools/cmd/gosimple
* github.com/client9/misspell/cmd/misspell
Package libc is a partial reimplementation of C libc in pure Go.

View file

@ -8,15 +8,18 @@ do
echo "GOOS=darwin GOARCH=arm64"
GOOS=darwin GOARCH=arm64 go build -tags=$tag -v ./...
GOOS=darwin GOARCH=arm64 go test -tags=$tag -c -o /dev/null
echo "GOOS=freebsd GOARCH=386"
GOOS=freebsd GOARCH=386 go build -tags=$tag -v ./...
GOOS=freebsd GOARCH=386 go test -tags=$tag -c -o /dev/null
#TODO echo "GOOS=freebsd GOARCH=386"
#TODO GOOS=freebsd GOARCH=386 go build -tags=$tag -v ./...
#TODO GOOS=freebsd GOARCH=386 go test -tags=$tag -c -o /dev/null
echo "GOOS=freebsd GOARCH=amd64"
GOOS=freebsd GOARCH=amd64 go build -tags=$tag -v ./...
GOOS=freebsd GOARCH=amd64 go test -tags=$tag -c -o /dev/null
echo "GOOS=freebsd GOARCH=arm"
GOOS=freebsd GOARCH=arm go build -tags=$tag -v ./...
GOOS=freebsd GOARCH=arm go test -tags=$tag -c -o /dev/null
echo "GOOS=freebsd GOARCH=arm64"
GOOS=freebsd GOARCH=arm64 go build -tags=$tag -v ./...
GOOS=freebsd GOARCH=arm64 go test -tags=$tag -c -o /dev/null
#TODO echo "GOOS=freebsd GOARCH=arm"
#TODO GOOS=freebsd GOARCH=arm go build -tags=$tag -v ./...
#TODO GOOS=freebsd GOARCH=arm go test -tags=$tag -c -o /dev/null
echo "GOOS=linux GOARCH=386"
GOOS=linux GOARCH=386 go build -tags=$tag -v ./...
GOOS=linux GOARCH=386 go test -tags=$tag -c -o /dev/null

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

2
vendor/modernc.org/libc/etc.go generated vendored
View file

@ -237,6 +237,7 @@ func (t *TLS) Close() {
// t.Free(11)
// t.Free(22)
func (t *TLS) Alloc(n int) (r uintptr) {
t.sp++
if memgrind {
if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
panic(todo("concurrent use of TLS instance %p", t))
@ -321,6 +322,7 @@ const stackFrameKeepalive = 2
// Free deallocates n bytes of thread-local storage. See TLS.Alloc for details
// on correct usage.
func (t *TLS) Free(n int) {
t.sp--
if memgrind {
if atomic.SwapInt32(&t.reentryGuard, 1) != 0 {
panic(todo("concurrent use of TLS instance %p", t))

951
vendor/modernc.org/libc/libc.go generated vendored
View file

@ -4,11 +4,9 @@
//go:build !linux || mips64le
//go.generate echo package libc > ccgo.go
//go:generate go fmt ./...
// Package libc provides run time support for ccgo generated programs and
// implements selected parts of the C standard library.
// go.generate echo package libc > ccgo.go
//
//go:generate go fmt -l -s -w ./...
package libc // import "modernc.org/libc"
//TODO use O_RDONLY etc. from fcntl header
@ -1952,7 +1950,7 @@ func getLocalLocation() (loc *gotime.Location) {
}
// time_t mktime(struct tm *tm);
func Xmktime(t *TLS, ptm uintptr) time.Time_t {
func Xmktime(t *TLS, ptm uintptr) (r time.Time_t) {
if __ccgo_strace {
trc("t=%v ptm=%v, (%v:)", t, ptm, origin(2))
}
@ -1969,7 +1967,8 @@ func Xmktime(t *TLS, ptm uintptr) time.Time_t {
)
(*time.Tm)(unsafe.Pointer(ptm)).Ftm_wday = int32(tt.Weekday())
(*time.Tm)(unsafe.Pointer(ptm)).Ftm_yday = int32(tt.YearDay() - 1)
return time.Time_t(tt.Unix())
r = time.Time_t(tt.Unix())
return r
}
// char *strpbrk(const char *s, const char *accept);
@ -2314,14 +2313,6 @@ func Xwcwidth(t *TLS, c wchar_t) int32 {
panic(todo(""))
}
// int clock_gettime(clockid_t clk_id, struct timespec *tp);
func Xclock_gettime(t *TLS, clk_id int32, tp uintptr) int32 {
if __ccgo_strace {
trc("t=%v clk_id=%v tp=%v, (%v:)", t, clk_id, tp, origin(2))
}
panic(todo(""))
}
// AtExit will attempt to run f at process exit. The execution cannot be
// guaranteed, neither its ordering with respect to any other handlers
// registered by AtExit.
@ -2650,3 +2641,933 @@ func Xlrint(tls *TLS, x float64) (r long) {
}
return long(Xrint(tls, x))
}
func X__builtin_trunc(tls *TLS, x float64) (r float64) {
return Xtrunc(tls, x)
}
func X__builtin_fmin(tls *TLS, x float64, y float64) (r float64) {
return Xfmin(tls, x, y)
}
func Xfmin(tls *TLS, x float64, y float64) (r float64) {
if __ccgo_strace {
trc("tls=%v x=%v y=%v, (%v:)", tls, x, y, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var v1, v10, v3, v5, v7 uint64
var v12, v9 float64
var _ /* __u at bp+0 */ struct {
F__i [0]uint64
F__f float64
}
_, _, _, _, _, _, _ = v1, v10, v12, v3, v5, v7, v9
*(*float64)(unsafe.Pointer(bp)) = x
v1 = *(*uint64)(unsafe.Pointer(bp))
goto _2
_2:
if BoolInt32(v1&(-Uint64FromUint64(1)>>Int32FromInt32(1)) > Uint64FromUint64(0x7ff)<<Int32FromInt32(52)) != 0 {
return y
}
*(*float64)(unsafe.Pointer(bp)) = y
v3 = *(*uint64)(unsafe.Pointer(bp))
goto _4
_4:
if BoolInt32(v3&(-Uint64FromUint64(1)>>Int32FromInt32(1)) > Uint64FromUint64(0x7ff)<<Int32FromInt32(52)) != 0 {
return x
}
/* handle signed zeros, see C99 Annex F.9.9.2 */
*(*float64)(unsafe.Pointer(bp)) = x
v5 = *(*uint64)(unsafe.Pointer(bp))
goto _6
_6:
*(*float64)(unsafe.Pointer(bp)) = y
v7 = *(*uint64)(unsafe.Pointer(bp))
goto _8
_8:
if Int32FromUint64(v5>>Int32FromInt32(63)) != Int32FromUint64(v7>>Int32FromInt32(63)) {
*(*float64)(unsafe.Pointer(bp)) = x
v10 = *(*uint64)(unsafe.Pointer(bp))
goto _11
_11:
if Int32FromUint64(v10>>Int32FromInt32(63)) != 0 {
v9 = x
} else {
v9 = y
}
return v9
}
if x < y {
v12 = x
} else {
v12 = y
}
return v12
}
func Xfminf(tls *TLS, x float32, y float32) (r float32) {
if __ccgo_strace {
trc("tls=%v x=%v y=%v, (%v:)", tls, x, y, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var v1, v10, v3, v5, v7 uint32
var v12, v9 float32
var _ /* __u at bp+0 */ struct {
F__i [0]uint32
F__f float32
}
_, _, _, _, _, _, _ = v1, v10, v12, v3, v5, v7, v9
*(*float32)(unsafe.Pointer(bp)) = x
v1 = *(*uint32)(unsafe.Pointer(bp))
goto _2
_2:
if BoolInt32(v1&uint32(0x7fffffff) > uint32(0x7f800000)) != 0 {
return y
}
*(*float32)(unsafe.Pointer(bp)) = y
v3 = *(*uint32)(unsafe.Pointer(bp))
goto _4
_4:
if BoolInt32(v3&uint32(0x7fffffff) > uint32(0x7f800000)) != 0 {
return x
}
/* handle signed zeros, see C99 Annex F.9.9.2 */
*(*float32)(unsafe.Pointer(bp)) = x
v5 = *(*uint32)(unsafe.Pointer(bp))
goto _6
_6:
*(*float32)(unsafe.Pointer(bp)) = y
v7 = *(*uint32)(unsafe.Pointer(bp))
goto _8
_8:
if Int32FromUint32(v5>>Int32FromInt32(31)) != Int32FromUint32(v7>>Int32FromInt32(31)) {
*(*float32)(unsafe.Pointer(bp)) = x
v10 = *(*uint32)(unsafe.Pointer(bp))
goto _11
_11:
if Int32FromUint32(v10>>Int32FromInt32(31)) != 0 {
v9 = x
} else {
v9 = y
}
return v9
}
if x < y {
v12 = x
} else {
v12 = y
}
return v12
}
func Xfminl(tls *TLS, x float64, y float64) (r float64) {
if __ccgo_strace {
trc("tls=%v x=%v y=%v, (%v:)", tls, x, y, origin(2))
defer func() { trc("-> %v", r) }()
}
return Xfmin(tls, x, y)
}
func Xfmax(tls *TLS, x float64, y float64) (r float64) {
if __ccgo_strace {
trc("tls=%v x=%v y=%v, (%v:)", tls, x, y, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var v1, v10, v3, v5, v7 uint64
var v12, v9 float64
var _ /* __u at bp+0 */ struct {
F__i [0]uint64
F__f float64
}
_, _, _, _, _, _, _ = v1, v10, v12, v3, v5, v7, v9
*(*float64)(unsafe.Pointer(bp)) = x
v1 = *(*uint64)(unsafe.Pointer(bp))
goto _2
_2:
if BoolInt32(v1&(-Uint64FromUint64(1)>>Int32FromInt32(1)) > Uint64FromUint64(0x7ff)<<Int32FromInt32(52)) != 0 {
return y
}
*(*float64)(unsafe.Pointer(bp)) = y
v3 = *(*uint64)(unsafe.Pointer(bp))
goto _4
_4:
if BoolInt32(v3&(-Uint64FromUint64(1)>>Int32FromInt32(1)) > Uint64FromUint64(0x7ff)<<Int32FromInt32(52)) != 0 {
return x
}
/* handle signed zeros, see C99 Annex F.9.9.2 */
*(*float64)(unsafe.Pointer(bp)) = x
v5 = *(*uint64)(unsafe.Pointer(bp))
goto _6
_6:
*(*float64)(unsafe.Pointer(bp)) = y
v7 = *(*uint64)(unsafe.Pointer(bp))
goto _8
_8:
if Int32FromUint64(v5>>Int32FromInt32(63)) != Int32FromUint64(v7>>Int32FromInt32(63)) {
*(*float64)(unsafe.Pointer(bp)) = x
v10 = *(*uint64)(unsafe.Pointer(bp))
goto _11
_11:
if Int32FromUint64(v10>>Int32FromInt32(63)) != 0 {
v9 = y
} else {
v9 = x
}
return v9
}
if x < y {
v12 = y
} else {
v12 = x
}
return v12
}
func Xfmaxf(tls *TLS, x float32, y float32) (r float32) {
if __ccgo_strace {
trc("tls=%v x=%v y=%v, (%v:)", tls, x, y, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var v1, v10, v3, v5, v7 uint32
var v12, v9 float32
var _ /* __u at bp+0 */ struct {
F__i [0]uint32
F__f float32
}
_, _, _, _, _, _, _ = v1, v10, v12, v3, v5, v7, v9
*(*float32)(unsafe.Pointer(bp)) = x
v1 = *(*uint32)(unsafe.Pointer(bp))
goto _2
_2:
if BoolInt32(v1&uint32(0x7fffffff) > uint32(0x7f800000)) != 0 {
return y
}
*(*float32)(unsafe.Pointer(bp)) = y
v3 = *(*uint32)(unsafe.Pointer(bp))
goto _4
_4:
if BoolInt32(v3&uint32(0x7fffffff) > uint32(0x7f800000)) != 0 {
return x
}
/* handle signed zeroes, see C99 Annex F.9.9.2 */
*(*float32)(unsafe.Pointer(bp)) = x
v5 = *(*uint32)(unsafe.Pointer(bp))
goto _6
_6:
*(*float32)(unsafe.Pointer(bp)) = y
v7 = *(*uint32)(unsafe.Pointer(bp))
goto _8
_8:
if Int32FromUint32(v5>>Int32FromInt32(31)) != Int32FromUint32(v7>>Int32FromInt32(31)) {
*(*float32)(unsafe.Pointer(bp)) = x
v10 = *(*uint32)(unsafe.Pointer(bp))
goto _11
_11:
if Int32FromUint32(v10>>Int32FromInt32(31)) != 0 {
v9 = y
} else {
v9 = x
}
return v9
}
if x < y {
v12 = y
} else {
v12 = x
}
return v12
}
func Xfmaxl(tls *TLS, x float64, y float64) (r float64) {
if __ccgo_strace {
trc("tls=%v x=%v y=%v, (%v:)", tls, x, y, origin(2))
defer func() { trc("-> %v", r) }()
}
return Xfmax(tls, x, y)
}
func X__builtin_fmax(tls *TLS, x float64, y float64) (r float64) {
return Xfmax(tls, x, y)
}
func Xexpm1(tls *TLS, x3 float64) (r float64) {
if __ccgo_strace {
trc("tls=%v x3=%v, (%v:)", tls, x3, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var c, e, hfx, hi, hxs, lo, r1, t, twopk, y3 Tdouble_t
var hx Tuint32_t
var k, sign int32
var y float32
var y1, y2, v3 float64
var v1 uint64
var _ /* __u at bp+0 */ struct {
F__i [0]uint64
F__f float64
}
var _ /* u at bp+8 */ struct {
Fi [0]Tuint64_t
Ff float64
}
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = c, e, hfx, hi, hx, hxs, k, lo, r1, sign, t, twopk, y, y1, y2, y3, v1, v3
*(*struct {
Fi [0]Tuint64_t
Ff float64
})(unsafe.Pointer(bp + 8)) = struct {
Fi [0]Tuint64_t
Ff float64
}{}
*(*float64)(unsafe.Pointer(bp + 8)) = x3
hx = uint32(*(*Tuint64_t)(unsafe.Pointer(bp + 8)) >> int32(32) & uint64(0x7fffffff))
sign = Int32FromUint64(*(*Tuint64_t)(unsafe.Pointer(bp + 8)) >> int32(63))
/* filter out huge and non-finite argument */
if hx >= uint32(0x4043687A) { /* if |x|>=56*ln2 */
*(*float64)(unsafe.Pointer(bp)) = x3
v1 = *(*uint64)(unsafe.Pointer(bp))
goto _2
_2:
if BoolInt32(v1&(-Uint64FromUint64(1)>>Int32FromInt32(1)) > Uint64FromUint64(0x7ff)<<Int32FromInt32(52)) != 0 {
return x3
}
if sign != 0 {
return float64(-Int32FromInt32(1))
}
if x3 > _o_threshold {
x3 *= float64(8.98846567431158e+307)
return x3
}
}
/* argument reduction */
if hx > uint32(0x3fd62e42) { /* if |x| > 0.5 ln2 */
if hx < uint32(0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
if !(sign != 0) {
hi = x3 - _ln2_hi
lo = _ln2_lo
k = int32(1)
} else {
hi = x3 + _ln2_hi
lo = -_ln2_lo
k = -int32(1)
}
} else {
if sign != 0 {
v3 = -Float64FromFloat64(0.5)
} else {
v3 = float64(0.5)
}
k = int32(float64(_invln2*x3) + v3)
t = float64(k)
hi = x3 - float64(t*_ln2_hi) /* t*ln2_hi is exact here */
lo = Tdouble_t(t * _ln2_lo)
}
x3 = hi - lo
c = hi - x3 - lo
} else {
if hx < uint32(0x3c900000) { /* |x| < 2**-54, return x */
if hx < uint32(0x00100000) {
if uint64(4) == uint64(4) {
y = float32(x3)
} else {
if uint64(4) == uint64(8) {
y1 = float64(float32(x3))
} else {
y2 = float64(float32(x3))
}
}
}
return x3
} else {
k = 0
}
}
/* x is now in primary range */
hfx = Tdouble_t(float64(0.5) * x3)
hxs = Tdouble_t(x3 * hfx)
r1 = float64(1) + float64(hxs*(_Q1+float64(hxs*(_Q2+float64(hxs*(_Q3+float64(hxs*(_Q4+float64(hxs*_Q5)))))))))
t = float64(3) - float64(r1*hfx)
e = Tdouble_t(hxs * ((r1 - t) / (Float64FromFloat64(6) - float64(x3*t))))
if k == 0 { /* c is 0 */
return x3 - (float64(x3*e) - hxs)
}
e = float64(x3*(e-c)) - c
e -= hxs
/* exp(x) ~ 2^k (Xreduced - e + 1) */
if k == -int32(1) {
return float64(float64(0.5)*(x3-e)) - float64(0.5)
}
if k == int32(1) {
if x3 < -Float64FromFloat64(0.25) {
return float64(-Float64FromFloat64(2) * (e - (x3 + Float64FromFloat64(0.5))))
}
return float64(1) + float64(float64(2)*(x3-e))
}
*(*Tuint64_t)(unsafe.Pointer(bp + 8)) = Uint64FromInt32(Int32FromInt32(0x3ff)+k) << int32(52) /* 2^k */
twopk = *(*float64)(unsafe.Pointer(bp + 8))
if k < 0 || k > int32(56) { /* suffice to return exp(x)-1 */
y3 = x3 - e + float64(1)
if k == int32(1024) {
y3 = Tdouble_t(Tdouble_t(y3*float64(2)) * float64(8.98846567431158e+307))
} else {
y3 = Tdouble_t(y3 * twopk)
}
return y3 - float64(1)
}
*(*Tuint64_t)(unsafe.Pointer(bp + 8)) = Uint64FromInt32(Int32FromInt32(0x3ff)-k) << int32(52) /* 2^-k */
if k < int32(20) {
y3 = Tdouble_t((x3 - e + (Float64FromInt32(1) - *(*float64)(unsafe.Pointer(bp + 8)))) * twopk)
} else {
y3 = Tdouble_t((x3 - (e + *(*float64)(unsafe.Pointer(bp + 8))) + Float64FromInt32(1)) * twopk)
}
return y3
}
var _ln2_hi1 = float32(0.69313812256) /* 0x3f317180 */
var _ln2_lo1 = float32(9.0580006145e-06) /* 0x3717f7d1 */
var _invln21 = float32(1.4426950216) /* 0x3fb8aa3b */
/*
* Domain [-0.34568, 0.34568], range ~[-6.694e-10, 6.696e-10]:
* |6 / x * (1 + 2 * (1 / (exp(x) - 1) - 1 / x)) - q(x)| < 2**-30.04
* Scaled coefficients: Qn_here = 2**n * Qn_for_q (see s_expm1.c):
*/
var _Q11 = float32(-Float64FromFloat64(0.033333212137)) /* -0x888868.0p-28 */
var _Q21 = float32(0.0015807170421) /* 0xcf3010.0p-33 */
func Xexpm1f(tls *TLS, x3 float32) (r float32) {
if __ccgo_strace {
trc("tls=%v x3=%v, (%v:)", tls, x3, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var c, e, hfx, hi, hxs, lo, r1, t, twopk, y3 Tfloat_t
var hx Tuint32_t
var k, sign int32
var y, v1 float32
var y1, y2 float64
var _ /* u at bp+0 */ struct {
Fi [0]Tuint32_t
Ff float32
}
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = c, e, hfx, hi, hx, hxs, k, lo, r1, sign, t, twopk, y, y1, y2, y3, v1
*(*struct {
Fi [0]Tuint32_t
Ff float32
})(unsafe.Pointer(bp)) = struct {
Fi [0]Tuint32_t
Ff float32
}{}
*(*float32)(unsafe.Pointer(bp)) = x3
hx = *(*Tuint32_t)(unsafe.Pointer(bp)) & uint32(0x7fffffff)
sign = Int32FromUint32(*(*Tuint32_t)(unsafe.Pointer(bp)) >> int32(31))
/* filter out huge and non-finite argument */
if hx >= uint32(0x4195b844) { /* if |x|>=27*ln2 */
if hx > uint32(0x7f800000) { /* NaN */
return x3
}
if sign != 0 {
return float32(-Int32FromInt32(1))
}
if hx > uint32(0x42b17217) { /* x > log(FLT_MAX) */
x3 *= Float32FromFloat32(1.7014118346046923e+38)
return x3
}
}
/* argument reduction */
if hx > uint32(0x3eb17218) { /* if |x| > 0.5 ln2 */
if hx < uint32(0x3F851592) { /* and |x| < 1.5 ln2 */
if !(sign != 0) {
hi = x3 - _ln2_hi1
lo = _ln2_lo1
k = int32(1)
} else {
hi = x3 + _ln2_hi1
lo = -_ln2_lo1
k = -int32(1)
}
} else {
if sign != 0 {
v1 = -Float32FromFloat32(0.5)
} else {
v1 = Float32FromFloat32(0.5)
}
k = int32(float32(_invln21*x3) + v1)
t = float32(k)
hi = x3 - float32(t*_ln2_hi1) /* t*ln2_hi is exact here */
lo = Tfloat_t(t * _ln2_lo1)
}
x3 = hi - lo
c = hi - x3 - lo
} else {
if hx < uint32(0x33000000) { /* when |x|<2**-25, return x */
if hx < uint32(0x00800000) {
if uint64(4) == uint64(4) {
y = float32(x3 * x3)
} else {
if uint64(4) == uint64(8) {
y1 = float64(x3 * x3)
} else {
y2 = float64(x3 * x3)
}
}
}
return x3
} else {
k = 0
}
}
/* x is now in primary range */
hfx = Tfloat_t(Float32FromFloat32(0.5) * x3)
hxs = Tfloat_t(x3 * hfx)
r1 = Float32FromFloat32(1) + float32(hxs*(_Q11+float32(hxs*_Q21)))
t = Float32FromFloat32(3) - float32(r1*hfx)
e = Tfloat_t(hxs * ((r1 - t) / (Float32FromFloat32(6) - float32(x3*t))))
if k == 0 { /* c is 0 */
return x3 - (float32(x3*e) - hxs)
}
e = float32(x3*(e-c)) - c
e -= hxs
/* exp(x) ~ 2^k (Xreduced - e + 1) */
if k == -int32(1) {
return float32(Float32FromFloat32(0.5)*(x3-e)) - Float32FromFloat32(0.5)
}
if k == int32(1) {
if x3 < -Float32FromFloat32(0.25) {
return float32(-Float32FromFloat32(2) * (e - (x3 + Float32FromFloat32(0.5))))
}
return Float32FromFloat32(1) + float32(Float32FromFloat32(2)*(x3-e))
}
*(*Tuint32_t)(unsafe.Pointer(bp)) = Uint32FromInt32((int32(0x7f) + k) << int32(23)) /* 2^k */
twopk = *(*float32)(unsafe.Pointer(bp))
if k < 0 || k > int32(56) { /* suffice to return exp(x)-1 */
y3 = x3 - e + Float32FromFloat32(1)
if k == int32(128) {
y3 = Tfloat_t(Tfloat_t(y3*Float32FromFloat32(2)) * Float32FromFloat32(1.7014118346046923e+38))
} else {
y3 = Tfloat_t(y3 * twopk)
}
return y3 - Float32FromFloat32(1)
}
*(*Tuint32_t)(unsafe.Pointer(bp)) = Uint32FromInt32((int32(0x7f) - k) << int32(23)) /* 2^-k */
if k < int32(23) {
y3 = Tfloat_t((x3 - e + (Float32FromInt32(1) - *(*float32)(unsafe.Pointer(bp)))) * twopk)
} else {
y3 = Tfloat_t((x3 - (e + *(*float32)(unsafe.Pointer(bp))) + Float32FromInt32(1)) * twopk)
}
return y3
}
func Xexpm1l(tls *TLS, x float64) (r float64) {
if __ccgo_strace {
trc("tls=%v x=%v, (%v:)", tls, x, origin(2))
defer func() { trc("-> %v", r) }()
}
return Xexpm1(tls, x)
}
type Tdouble_t = float64
type Tuint32_t = uint32
type Tuint64_t = uint64
var _o_threshold = float64(709.782712893384) /* 0x40862E42, 0xFEFA39EF */
var _ln2_hi = float64(0.6931471803691238) /* 0x3fe62e42, 0xfee00000 */
var _ln2_lo = float64(1.9082149292705877e-10) /* 0x3dea39ef, 0x35793c76 */
var _invln2 = float64(1.4426950408889634) /* 0x3ff71547, 0x652b82fe */
/* Scaled Q's: Qn_here = 2**n * Qn_above, for R(2*z) where z = hxs = x*x/2: */
var _Q1 = -Float64FromFloat64(0.03333333333333313) /* BFA11111 111110F4 */
var _Q2 = float64(0.0015873015872548146) /* 3F5A01A0 19FE5585 */
var _Q3 = -Float64FromFloat64(7.93650757867488e-05) /* BF14CE19 9EAADBB7 */
var _Q4 = float64(4.008217827329362e-06) /* 3ED0CFCA 86E65239 */
var _Q5 = -Float64FromFloat64(2.0109921818362437e-07) /* BE8AFDB7 6E09C32D */
var _ln2_hi2 = float64(0.6931471803691238) /* 3fe62e42 fee00000 */
var _ln2_lo2 = float64(1.9082149292705877e-10) /* 3dea39ef 35793c76 */
var _Lg12 = float64(0.6666666666666735) /* 3FE55555 55555593 */
var _Lg22 = float64(0.3999999999940942) /* 3FD99999 9997FA04 */
var _Lg32 = float64(0.2857142874366239) /* 3FD24924 94229359 */
var _Lg42 = float64(0.22222198432149784) /* 3FCC71C5 1D8E78AF */
var _Lg51 = float64(0.1818357216161805) /* 3FC74664 96CB03DE */
var _Lg61 = float64(0.15313837699209373) /* 3FC39A09 D078C69F */
var _Lg71 = float64(0.14798198605116586) /* 3FC2F112 DF3E5244 */
func Xlog1p(tls *TLS, x3 float64) (r float64) {
if __ccgo_strace {
trc("tls=%v x3=%v, (%v:)", tls, x3, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var R, c, dk, f, hfsq, s, t1, t2, w, z Tdouble_t
var hu, hx Tuint32_t
var k int32
var y float32
var y1, y2, v1 float64
var _ /* u at bp+0 */ struct {
Fi [0]Tuint64_t
Ff float64
}
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = R, c, dk, f, hfsq, hu, hx, k, s, t1, t2, w, y, y1, y2, z, v1
*(*struct {
Fi [0]Tuint64_t
Ff float64
})(unsafe.Pointer(bp)) = struct {
Fi [0]Tuint64_t
Ff float64
}{}
*(*float64)(unsafe.Pointer(bp)) = x3
hx = uint32(*(*Tuint64_t)(unsafe.Pointer(bp)) >> int32(32))
k = int32(1)
if hx < uint32(0x3fda827a) || hx>>int32(31) != 0 { /* 1+x < sqrt(2)+ */
if hx >= uint32(0xbff00000) { /* x <= -1.0 */
if x3 == float64(-Int32FromInt32(1)) {
return x3 / float64(0)
} /* log1p(-1) = -inf */
return (x3 - x3) / float64(0) /* log1p(x<-1) = NaN */
}
if hx<<int32(1) < Uint32FromInt32(Int32FromInt32(0x3ca00000)<<Int32FromInt32(1)) { /* |x| < 2**-53 */
/* underflow if subnormal */
if hx&uint32(0x7ff00000) == uint32(0) {
if uint64(4) == uint64(4) {
y = float32(x3)
} else {
if uint64(4) == uint64(8) {
y1 = float64(float32(x3))
} else {
y2 = float64(float32(x3))
}
}
}
return x3
}
if hx <= uint32(0xbfd2bec4) { /* sqrt(2)/2- <= 1+x < sqrt(2)+ */
k = 0
c = Float64FromInt32(0)
f = x3
}
} else {
if hx >= uint32(0x7ff00000) {
return x3
}
}
if k != 0 {
*(*float64)(unsafe.Pointer(bp)) = Float64FromInt32(1) + x3
hu = uint32(*(*Tuint64_t)(unsafe.Pointer(bp)) >> int32(32))
hu += Uint32FromInt32(Int32FromInt32(0x3ff00000) - Int32FromInt32(0x3fe6a09e))
k = Int32FromUint32(hu>>Int32FromInt32(20)) - int32(0x3ff)
/* correction term ~ log(1+x)-log(u), avoid underflow in c/u */
if k < int32(54) {
if k >= int32(2) {
v1 = Float64FromInt32(1) - (*(*float64)(unsafe.Pointer(bp)) - x3)
} else {
v1 = x3 - (*(*float64)(unsafe.Pointer(bp)) - Float64FromInt32(1))
}
c = v1
c /= *(*float64)(unsafe.Pointer(bp))
} else {
c = Float64FromInt32(0)
}
/* reduce u into [sqrt(2)/2, sqrt(2)] */
hu = hu&uint32(0x000fffff) + uint32(0x3fe6a09e)
*(*Tuint64_t)(unsafe.Pointer(bp)) = uint64(hu)<<int32(32) | *(*Tuint64_t)(unsafe.Pointer(bp))&uint64(0xffffffff)
f = *(*float64)(unsafe.Pointer(bp)) - Float64FromInt32(1)
}
hfsq = Tdouble_t(float64(float64(0.5)*f) * f)
s = f / (Float64FromFloat64(2) + f)
z = Tdouble_t(s * s)
w = Tdouble_t(z * z)
t1 = Tdouble_t(w * (_Lg22 + float64(w*(_Lg42+float64(w*_Lg61)))))
t2 = Tdouble_t(z * (_Lg12 + float64(w*(_Lg32+float64(w*(_Lg51+float64(w*_Lg71)))))))
R = t2 + t1
dk = float64(k)
return Tdouble_t(s*(hfsq+R)) + (Tdouble_t(dk*_ln2_lo2) + c) - hfsq + f + Tdouble_t(dk*_ln2_hi2)
}
var _ln2_hi3 = float32(0.69313812256) /* 0x3f317180 */
var _ln2_lo3 = float32(9.0580006145e-06) /* 0x3717f7d1 */
/* |(log(1+s)-log(1-s))/s - Lg(s)| < 2**-34.24 (~[-4.95e-11, 4.97e-11]). */
var _Lg13 = float32(0.6666666269302368) /* 0.66666662693 */
var _Lg23 = float32(0.40000972151756287) /* 0.40000972152 */
var _Lg33 = float32(0.2849878668785095) /* 0.28498786688 */
var _Lg43 = float32(0.24279078841209412) /* 0.24279078841 */
func Xlog1pf(tls *TLS, x3 float32) (r float32) {
if __ccgo_strace {
trc("tls=%v x3=%v, (%v:)", tls, x3, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var R, c, dk, f, hfsq, s, t1, t2, w, z Tfloat_t
var iu, ix Tuint32_t
var k int32
var y, v1 float32
var y1, y2 float64
var _ /* u at bp+0 */ struct {
Fi [0]Tuint32_t
Ff float32
}
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = R, c, dk, f, hfsq, iu, ix, k, s, t1, t2, w, y, y1, y2, z, v1
*(*struct {
Fi [0]Tuint32_t
Ff float32
})(unsafe.Pointer(bp)) = struct {
Fi [0]Tuint32_t
Ff float32
}{}
*(*float32)(unsafe.Pointer(bp)) = x3
ix = *(*Tuint32_t)(unsafe.Pointer(bp))
k = int32(1)
if ix < uint32(0x3ed413d0) || ix>>int32(31) != 0 { /* 1+x < sqrt(2)+ */
if ix >= uint32(0xbf800000) { /* x <= -1.0 */
if x3 == float32(-Int32FromInt32(1)) {
return x3 / Float32FromFloat32(0)
} /* log1p(-1)=+inf */
return (x3 - x3) / Float32FromFloat32(0) /* log1p(x<-1)=NaN */
}
if ix<<int32(1) < Uint32FromInt32(Int32FromInt32(0x33800000)<<Int32FromInt32(1)) { /* |x| < 2**-24 */
/* underflow if subnormal */
if ix&uint32(0x7f800000) == uint32(0) {
if uint64(4) == uint64(4) {
y = float32(x3 * x3)
} else {
if uint64(4) == uint64(8) {
y1 = float64(x3 * x3)
} else {
y2 = float64(x3 * x3)
}
}
}
return x3
}
if ix <= uint32(0xbe95f619) { /* sqrt(2)/2- <= 1+x < sqrt(2)+ */
k = 0
c = Float32FromInt32(0)
f = x3
}
} else {
if ix >= uint32(0x7f800000) {
return x3
}
}
if k != 0 {
*(*float32)(unsafe.Pointer(bp)) = Float32FromInt32(1) + x3
iu = *(*Tuint32_t)(unsafe.Pointer(bp))
iu += Uint32FromInt32(Int32FromInt32(0x3f800000) - Int32FromInt32(0x3f3504f3))
k = Int32FromUint32(iu>>Int32FromInt32(23)) - int32(0x7f)
/* correction term ~ log(1+x)-log(u), avoid underflow in c/u */
if k < int32(25) {
if k >= int32(2) {
v1 = Float32FromInt32(1) - (*(*float32)(unsafe.Pointer(bp)) - x3)
} else {
v1 = x3 - (*(*float32)(unsafe.Pointer(bp)) - Float32FromInt32(1))
}
c = v1
c /= *(*float32)(unsafe.Pointer(bp))
} else {
c = Float32FromInt32(0)
}
/* reduce u into [sqrt(2)/2, sqrt(2)] */
iu = iu&uint32(0x007fffff) + uint32(0x3f3504f3)
*(*Tuint32_t)(unsafe.Pointer(bp)) = iu
f = *(*float32)(unsafe.Pointer(bp)) - Float32FromInt32(1)
}
s = f / (Float32FromFloat32(2) + f)
z = Tfloat_t(s * s)
w = Tfloat_t(z * z)
t1 = Tfloat_t(w * (_Lg23 + float32(w*_Lg43)))
t2 = Tfloat_t(z * (_Lg13 + float32(w*_Lg33)))
R = t2 + t1
hfsq = Tfloat_t(float32(Float32FromFloat32(0.5)*f) * f)
dk = float32(k)
return Tfloat_t(s*(hfsq+R)) + (Tfloat_t(dk*_ln2_lo3) + c) - hfsq + f + Tfloat_t(dk*_ln2_hi3)
}
func Xlog1pl(tls *TLS, x float64) (r float64) {
if __ccgo_strace {
trc("tls=%v x=%v, (%v:)", tls, x, origin(2))
defer func() { trc("-> %v", r) }()
}
return Xlog1p(tls, x)
}
type Tfloat_t = float32
var _B1 = uint32(715094163) /* B1 = (1023-1023/3-0.03306235651)*2**20 */
var _B2 = uint32(696219795) /* B2 = (1023-1023/3-54/3-0.03306235651)*2**20 */
// C documentation
//
// /* |1/cbrt(x) - p(x)| < 2**-23.5 (~[-7.93e-8, 7.929e-8]). */
var _P0 = float64(1.87595182427177) /* 0x3ffe03e6, 0x0f61e692 */
var _P1 = -Float64FromFloat64(1.8849797954337717) /* 0xbffe28e0, 0x92f02420 */
var _P2 = float64(1.6214297201053545) /* 0x3ff9f160, 0x4a49d6c2 */
var _P3 = -Float64FromFloat64(0.758397934778766) /* 0xbfe844cb, 0xbee751d9 */
var _P4 = float64(0.14599619288661245) /* 0x3fc2b000, 0xd4e4edd7 */
func Xcbrt(tls *TLS, x float64) (r1 float64) {
if __ccgo_strace {
trc("tls=%v x=%v, (%v:)", tls, x, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var hx Tuint32_t
var r, s, t, w Tdouble_t
var p1 uintptr
var _ /* u at bp+0 */ struct {
Fi [0]Tuint64_t
Ff float64
}
_, _, _, _, _, _ = hx, r, s, t, w, p1
*(*struct {
Fi [0]Tuint64_t
Ff float64
})(unsafe.Pointer(bp)) = struct {
Fi [0]Tuint64_t
Ff float64
}{}
*(*float64)(unsafe.Pointer(bp)) = x
hx = uint32(*(*Tuint64_t)(unsafe.Pointer(bp)) >> int32(32) & uint64(0x7fffffff))
if hx >= uint32(0x7ff00000) { /* cbrt(NaN,INF) is itself */
return x + x
}
/*
* Rough cbrt to 5 bits:
* cbrt(2**e*(1+m) ~= 2**(e/3)*(1+(e%3+m)/3)
* where e is integral and >= 0, m is real and in [0, 1), and "/" and
* "%" are integer division and modulus with rounding towards minus
* infinity. The RHS is always >= the LHS and has a maximum relative
* error of about 1 in 16. Adding a bias of -0.03306235651 to the
* (e%3+m)/3 term reduces the error to about 1 in 32. With the IEEE
* floating point representation, for finite positive normal values,
* ordinary integer divison of the value in bits magically gives
* almost exactly the RHS of the above provided we first subtract the
* exponent bias (1023 for doubles) and later add it back. We do the
* subtraction virtually to keep e >= 0 so that ordinary integer
* division rounds towards minus infinity; this is also efficient.
*/
if hx < uint32(0x00100000) { /* zero or subnormal? */
*(*float64)(unsafe.Pointer(bp)) = float64(x * float64(1.8014398509481984e+16))
hx = uint32(*(*Tuint64_t)(unsafe.Pointer(bp)) >> int32(32) & uint64(0x7fffffff))
if hx == uint32(0) {
return x
} /* cbrt(0) is itself */
hx = hx/uint32(3) + _B2
} else {
hx = hx/uint32(3) + _B1
}
p1 = bp
*(*Tuint64_t)(unsafe.Pointer(p1)) = Tuint64_t(*(*Tuint64_t)(unsafe.Pointer(p1)) & (Uint64FromUint64(1) << Int32FromInt32(63)))
*(*Tuint64_t)(unsafe.Pointer(bp)) |= uint64(hx) << int32(32)
t = *(*float64)(unsafe.Pointer(bp))
/*
* New cbrt to 23 bits:
* cbrt(x) = t*cbrt(x/t**3) ~= t*P(t**3/x)
* where P(r) is a polynomial of degree 4 that approximates 1/cbrt(r)
* to within 2**-23.5 when |r - 1| < 1/10. The rough approximation
* has produced t such than |t/cbrt(x) - 1| ~< 1/32, and cubing this
* gives us bounds for r = t**3/x.
*
* Try to optimize for parallel evaluation as in __tanf.c.
*/
r = Tdouble_t(Tdouble_t(t*t) * (t / x))
t = Tdouble_t(t * (_P0 + float64(r*(_P1+float64(r*_P2))) + float64(Tdouble_t(Tdouble_t(r*r)*r)*(_P3+float64(r*_P4)))))
/*
* Round t away from zero to 23 bits (sloppily except for ensuring that
* the result is larger in magnitude than cbrt(x) but not much more than
* 2 23-bit ulps larger). With rounding towards zero, the error bound
* would be ~5/6 instead of ~4/6. With a maximum error of 2 23-bit ulps
* in the rounded t, the infinite-precision error in the Newton
* approximation barely affects third digit in the final error
* 0.667; the error in the rounded t can be up to about 3 23-bit ulps
* before the final error is larger than 0.667 ulps.
*/
*(*float64)(unsafe.Pointer(bp)) = t
*(*Tuint64_t)(unsafe.Pointer(bp)) = uint64(*(*Tuint64_t)(unsafe.Pointer(bp))+Uint64FromUint32(0x80000000)) & uint64(0xffffffffc0000000)
t = *(*float64)(unsafe.Pointer(bp))
/* one step Newton iteration to 53 bits with error < 0.667 ulps */
s = Tdouble_t(t * t) /* t*t is exact */
r = x / s /* error <= 0.5 ulps; |r| < |t| */
w = t + t /* t+t is exact */
r = (r - t) / (w + r) /* r-t is exact; w+r ~= 3*t */
t = t + Tdouble_t(t*r) /* error <= 0.5 + 0.5/3 + epsilon */
return t
}
var _B11 = uint32(709958130) /* B1 = (127-127.0/3-0.03306235651)*2**23 */
var _B21 = uint32(642849266) /* B2 = (127-127.0/3-24/3-0.03306235651)*2**23 */
func Xcbrtf(tls *TLS, x float32) (r1 float32) {
if __ccgo_strace {
trc("tls=%v x=%v, (%v:)", tls, x, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(16)
defer tls.Free(16)
var T, r Tdouble_t
var hx Tuint32_t
var _ /* u at bp+0 */ struct {
Fi [0]Tuint32_t
Ff float32
}
_, _, _ = T, hx, r
*(*struct {
Fi [0]Tuint32_t
Ff float32
})(unsafe.Pointer(bp)) = struct {
Fi [0]Tuint32_t
Ff float32
}{}
*(*float32)(unsafe.Pointer(bp)) = x
hx = *(*Tuint32_t)(unsafe.Pointer(bp)) & uint32(0x7fffffff)
if hx >= uint32(0x7f800000) { /* cbrt(NaN,INF) is itself */
return x + x
}
/* rough cbrt to 5 bits */
if hx < uint32(0x00800000) { /* zero or subnormal? */
if hx == uint32(0) {
return x
} /* cbrt(+-0) is itself */
*(*float32)(unsafe.Pointer(bp)) = float32(x * Float32FromFloat32(1.6777216e+07))
hx = *(*Tuint32_t)(unsafe.Pointer(bp)) & uint32(0x7fffffff)
hx = hx/uint32(3) + _B21
} else {
hx = hx/uint32(3) + _B11
}
*(*Tuint32_t)(unsafe.Pointer(bp)) &= uint32(0x80000000)
*(*Tuint32_t)(unsafe.Pointer(bp)) |= hx
/*
* First step Newton iteration (solving t*t-x/t == 0) to 16 bits. In
* double precision so that its terms can be arranged for efficiency
* without causing overflow or underflow.
*/
T = float64(*(*float32)(unsafe.Pointer(bp)))
r = Tdouble_t(Tdouble_t(T*T) * T)
T = Tdouble_t(T*(float64(x)+float64(x)+r)) / (float64(x) + r + r)
/*
* Second step Newton iteration to 47 bits. In double precision for
* efficiency and accuracy.
*/
r = Tdouble_t(Tdouble_t(T*T) * T)
T = Tdouble_t(T*(float64(x)+float64(x)+r)) / (float64(x) + r + r)
/* rounding to 24 bits is perfect in round-to-nearest mode */
return float32(T)
}
func Xcbrtl(tls *TLS, x float64) (r float64) {
if __ccgo_strace {
trc("tls=%v x=%v, (%v:)", tls, x, origin(2))
defer func() { trc("-> %v", r) }()
}
return Xcbrt(tls, x)
}

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package libc is a partial reimplementation of C libc in pure Go.
package libc // import "modernc.org/libc"
import (

View file

@ -7,8 +7,11 @@ package libc // import "modernc.org/libc"
import (
crand "crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"io/fs"
mbits "math/bits"
"os"
"os/exec"
gosignal "os/signal"
@ -39,6 +42,7 @@ import (
"modernc.org/libc/unistd"
"modernc.org/libc/uuid/uuid"
"modernc.org/libc/wctype"
"modernc.org/memory"
)
const (
@ -49,16 +53,14 @@ const (
// in6_addr_any in.In6_addr
// )
type Tsize_t = types.Size_t
type (
syscallErrno = unix.Errno
long = types.User_long_t
ulong = types.User_ulong_t
)
type pthreadAttr struct {
detachState int32
}
// // Keep these outside of the var block otherwise go generate will miss them.
var X__stderrp = Xstdout
var X__stdinp = Xstdin
@ -94,6 +96,18 @@ func (f file) setErr() {
(*stdio.FILE)(unsafe.Pointer(f)).F_flags |= 1
}
func (f file) clearErr() {
(*stdio.FILE)(unsafe.Pointer(f)).F_flags &^= 3
}
func (f file) eof() bool {
return (*stdio.FILE)(unsafe.Pointer(f)).F_flags&2 != 0
}
func (f file) setEOF() {
(*stdio.FILE)(unsafe.Pointer(f)).F_flags |= 2
}
func (f file) close(t *TLS) int32 {
r := Xclose(t, f.fd())
Xfree(t, uintptr(f))
@ -125,6 +139,19 @@ func fwrite(fd int32, b []byte) (int, error) {
return unix.Write(int(fd), b)
}
func Xclearerr(tls *TLS, f uintptr) {
file(f).clearErr()
}
func Xfeof(t *TLS, f uintptr) (r int32) {
if __ccgo_strace {
trc("t=%v f=%v, (%v:)", t, f, origin(2))
defer func() { trc("-> %v", r) }()
}
r = BoolInt32(file(f).eof())
return r
}
func X__inline_isnand(t *TLS, x float64) int32 {
if __ccgo_strace {
trc("t=%v x=%v, (%v:)", t, x, origin(2))
@ -355,6 +382,9 @@ func Xlocaltime(_ *TLS, timep uintptr) uintptr {
localtime.Ftm_wday = int32(t.Weekday())
localtime.Ftm_yday = int32(t.YearDay())
localtime.Ftm_isdst = Bool32(isTimeDST(t))
_, off := t.Zone()
localtime.Ftm_gmtoff = int64(off)
localtime.Ftm_zone = 0
return uintptr(unsafe.Pointer(&localtime))
}
@ -372,6 +402,9 @@ func Xlocaltime_r(_ *TLS, timep, result uintptr) uintptr {
(*time.Tm)(unsafe.Pointer(result)).Ftm_wday = int32(t.Weekday())
(*time.Tm)(unsafe.Pointer(result)).Ftm_yday = int32(t.YearDay())
(*time.Tm)(unsafe.Pointer(result)).Ftm_isdst = Bool32(isTimeDST(t))
_, off := t.Zone()
(*time.Tm)(unsafe.Pointer(result)).Ftm_gmtoff = int64(off)
(*time.Tm)(unsafe.Pointer(result)).Ftm_zone = 0
return result
}
@ -1539,6 +1572,9 @@ func Xfread(t *TLS, ptr uintptr, size, nmemb types.Size_t, stream uintptr) types
n, err = unix.Read(int(fd), nil)
default:
n, err = unix.Read(int(fd), (*RawMem)(unsafe.Pointer(ptr))[:count:count])
if n == 0 {
file(stream).setEOF()
}
if dmesgs && err == nil {
dmesg("%v: fd %v, n %#x\n%s", origin(1), fd, n, hex.Dump((*RawMem)(unsafe.Pointer(ptr))[:n:n]))
}
@ -1670,12 +1706,11 @@ func Xfputs(t *TLS, s, stream uintptr) int32 {
if __ccgo_strace {
trc("t=%v stream=%v, (%v:)", t, stream, origin(2))
}
panic(todo(""))
// if _, _, err := unix.Syscall(unix.SYS_WRITE, uintptr(file(stream).fd()), s, uintptr(Xstrlen(t, s))); err != 0 {
// return -1
// }
if _, _, err := unix.Syscall(unix.SYS_WRITE, uintptr(file(stream).fd()), s, uintptr(Xstrlen(t, s))); err != 0 {
return -1
}
// return 0
return 0
}
var getservbynameStaticResult netdb.Servent
@ -2174,11 +2209,13 @@ func Xpthread_attr_getdetachstate(tls *TLS, a uintptr, state uintptr) int32 {
panic(todo(""))
}
func Xpthread_attr_setdetachstate(tls *TLS, a uintptr, state int32) int32 {
if __ccgo_strace {
trc("tls=%v a=%v state=%v, (%v:)", tls, a, state, origin(2))
func Xpthread_attr_setdetachstate(tls *TLS, a uintptr, state int32) (r int32) {
if uint32(state) > 1 {
return errno.EINVAL
}
panic(todo(""))
(*pthreadAttr)(unsafe.Pointer(a)).detachState = state
return 0
}
func Xpthread_mutexattr_destroy(tls *TLS, a uintptr) int32 {
@ -2447,11 +2484,20 @@ func Xnanosleep(t *TLS, req, rem uintptr) int32 {
// }
// size_t malloc_size(const void *ptr);
func Xmalloc_size(t *TLS, ptr uintptr) types.Size_t {
func Xmalloc_size(t *TLS, p uintptr) (r types.Size_t) {
if __ccgo_strace {
trc("t=%v ptr=%v, (%v:)", t, ptr, origin(2))
trc("t=%v p=%v, (%v:)", t, p, origin(2))
defer func() { trc("-> %v", r) }()
}
panic(todo(""))
if p == 0 {
return 0
}
allocMu.Lock()
defer allocMu.Unlock()
return types.Size_t(memory.UintptrUsableSize(p))
}
// int open(const char *pathname, int flags, ...);
@ -2518,3 +2564,226 @@ func X__builtin_lround(tls *TLS, x float64) (r long) {
func Xlround(tls *TLS, x float64) (r long) {
return long(Xround(tls, x))
}
// https://g.co/gemini/share/2c37d5b57994
// Constants mirroring C's ftw type flags
const (
FTW_F = 0 // Regular file
FTW_D = 1 // Directory (visited pre-order)
FTW_DNR = 2 // Directory that cannot be read
FTW_NS = 4 // Stat failed (permissions, broken link, etc.)
FTW_SL = 4 // Symbolic link (lstat was used)
// Note: C's ftw might have other flags like FTW_DP (post-order dir) or FTW_SLN
// which are not directly supported by filepath.WalkDir's simple pre-order traversal.
// This emulation focuses on the most common flags associated with stat/lstat results.
)
// ftwStopError is used internally to signal that the walk should stop
// because the user callback returned a non-zero value.
type ftwStopError struct {
stopValue int
}
func (e *ftwStopError) Error() string {
return fmt.Sprintf("ftw walk stopped by callback with return value %d", e.stopValue)
}
// goFtwFunc is the callback function type, mirroring the C ftw callback.
// It receives the path, file info (if available), and a type flag.
// Returning a non-zero value stops the walk and becomes the return value of Ftw.
// Returning 0 continues the walk.
type goFtwFunc func(path string, info os.FileInfo, typeflag int) int
// Ftw emulates the C standard library function ftw(3).
// It walks the directory tree starting at 'dirpath' and calls the 'callback'
// function for each entry encountered.
//
// Parameters:
// - dirpath: The root directory path for the traversal.
// - callback: The goFtwFunc to call for each file system entry.
// - nopenfd: This parameter is part of the C ftw signature but is IGNORED
// in this Go implementation. Go's filepath.WalkDir manages concurrency
// and file descriptors internally.
//
// Returns:
// - 0 on successful completion of the walk.
// - The non-zero value returned by the callback, if the callback terminated the walk.
// - -1 if an error occurred during the walk that wasn't handled by calling
// the callback with FTW_DNR or FTW_NS (e.g., error accessing the initial dirpath).
func ftw(dirpath string, callback goFtwFunc, nopenfd int) int {
// nopenfd is ignored in this Go implementation.
walkErr := filepath.WalkDir(dirpath, func(path string, d fs.DirEntry, err error) error {
var info os.FileInfo
var typeflag int
// --- Handle errors passed by WalkDir ---
if err != nil {
// Check if the error is related to accessing a directory
if errors.Is(err, fs.ErrPermission) || errors.Is(err, unix.EACCES) { // Added syscall.EACCES check
// Try to determine if it's a directory we can't read
// We might not have 'd' if the error occurred trying to list 'path' contents
// Let's try a direct Lstat on the path itself if d is nil
lstatInfo, lstatErr := os.Lstat(path)
if lstatErr == nil && lstatInfo.IsDir() {
typeflag = FTW_DNR // Directory, but WalkDir errored (likely reading it)
info = lstatInfo // Provide the info we could get
} else {
// Can't confirm it's a directory, or Lstat itself failed
typeflag = FTW_NS // Treat as general stat failure
// info remains nil
}
} else {
// Other errors (e.g., broken symlink during traversal, I/O error)
typeflag = FTW_NS
// Attempt to get Lstat info even if WalkDir had an error, maybe it's available
lstatInfo, _ := os.Lstat(path) // Ignore error here, if it fails info stays nil
info = lstatInfo
}
// Even with errors, call the callback with the path and appropriate flag
stopVal := callback(path, info, typeflag)
if stopVal != 0 {
return &ftwStopError{stopValue: stopVal}
}
// If the error was on a directory, returning the error might stop WalkDir
// from descending. If it was fs.ErrPermission on a dir, WalkDir might
// pass filepath.SkipDir implicitly or continue depending on implementation.
// Let's return nil here to *try* to continue the walk for other siblings
// if the callback didn't stop it. The callback *was* notified.
// If the error prevents further progress WalkDir will stop anyway.
return nil // Allow walk to potentially continue elsewhere
}
// --- No error from WalkDir, process the DirEntry ---
info, err = d.Info() // Get FileInfo (like C's stat/lstat result)
if err != nil {
// Error getting info for an entry WalkDir *could* list (rare, maybe permissions changed?)
typeflag = FTW_NS
// info remains nil
} else {
// Determine type flag based on file mode
mode := info.Mode()
if mode&fs.ModeSymlink != 0 {
typeflag = FTW_SL
} else if mode.IsDir() {
typeflag = FTW_D // Visited pre-order
} else if mode.IsRegular() {
typeflag = FTW_F
} else {
// Other types (device, socket, pipe, etc.) - C ftw usually lumps these under FTW_F
// or might have FTW_NS if stat fails. Let's treat non-dir, non-link, non-regular
// as FTW_F for simplicity, aligning with common C practice, or FTW_NS if stat failed above.
// Since we have info here, we know stat didn't fail.
// Let's be more specific, maybe treat others as FTW_NS? Or stick to FTW_F?
// C ftw man page isn't super specific about all types. FTW_F seems reasonable.
typeflag = FTW_F // Treat other valid types as 'files' for simplicity
}
}
// --- Call the user callback ---
stopVal := callback(path, info, typeflag)
if stopVal != 0 {
// User wants to stop the walk
return &ftwStopError{stopValue: stopVal}
}
return nil // Continue walk
})
// --- Handle WalkDir's final return value ---
if walkErr == nil {
return 0 // Success
}
// Check if the error was our custom stop signal
var stopErr *ftwStopError
if errors.As(walkErr, &stopErr) {
return stopErr.stopValue // Return the value from the callback
}
// Otherwise, it was an unhandled error during the walk
// (e.g., initial dirpath access error, or other error not mapped to FTW_NS/DNR)
return -1 // General error return
}
func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
statp := tls.Alloc(int(unsafe.Sizeof(unix.Stat_t{})))
defer tls.Free(int(unsafe.Sizeof(unix.Stat_t{})))
return int32(ftw(
GoString(path),
func(path string, info os.FileInfo, typeflag int) int {
cs, _ := CString(path)
defer Xfree(tls, cs)
Xstat(tls, cs, statp)
return int((*(*func(*TLS, uintptr, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, cs, statp, int32(typeflag)))
},
int(fd_limit),
))
}
func Xexecve(tls *TLS, path uintptr, argv uintptr, envp uintptr) (r int32) {
goPath := GoString(path)
var goArgv, goEnvp []string
for p := *(*uintptr)(unsafe.Pointer(argv)); p != 0; p = *(*uintptr)(unsafe.Pointer(argv)) {
goArgv = append(goArgv, GoString(p))
argv += unsafe.Sizeof(uintptr(0))
}
for p := *(*uintptr)(unsafe.Pointer(envp)); p != 0; p = *(*uintptr)(unsafe.Pointer(envp)) {
goEnvp = append(goEnvp, GoString(p))
envp += unsafe.Sizeof(uintptr(0))
}
if err := unix.Exec(goPath, goArgv, goEnvp); err != nil {
tls.setErrno(err)
return -1
}
panic("unreachable")
}
func Xsetuid(tls *TLS, uid uint32) (r int32) {
if __ccgo_strace {
trc("tls=%v uid=%v, (%v:)", tls, uid, origin(2))
defer func() { trc("-> %v", r) }()
}
if err := unix.Setuid(int(uid)); err != nil {
tls.setErrno(err)
return -1
}
return 0
}
func Xsetgid(tls *TLS, gid uint32) (r int32) {
if __ccgo_strace {
trc("tls=%v gid=%v, (%v:)", tls, gid, origin(2))
defer func() { trc("-> %v", r) }()
}
if err := unix.Setgid(int(gid)); err != nil {
tls.setErrno(err)
return -1
}
return 0
}
func Xdup(tls *TLS, fd int32) (r int32) {
if __ccgo_strace {
trc("tls=%v fd=%v, (%v:)", tls, fd, origin(2))
defer func() { trc("-> %v", r) }()
}
nfd, err := unix.Dup(int(fd))
if err != nil {
tls.setErrno(err)
return -1
}
return int32(nfd)
}
func X__builtin_ctz(t *TLS, n uint32) int32 {
return int32(mbits.TrailingZeros32(n))
}

View file

@ -17,6 +17,11 @@ import (
"modernc.org/libc/utime"
)
// #define FE_DOWNWARD 0x0400
// #define FE_UPWARD 0x0800
const FE_DOWNWARD = 0x0400
const FE_UPWARD = 0x0800
// int sigaction(int signum, const struct sigaction *act, struct sigaction *oldact);
func Xsigaction(t *TLS, signum int32, act, oldact uintptr) int32 {
if __ccgo_strace {

View file

@ -17,6 +17,11 @@ import (
"modernc.org/libc/utime"
)
// #define FE_UPWARD 0x00400000
// #define FE_DOWNWARD 0x00800000
const FE_UPWARD = 0x00400000
const FE_DOWNWARD = 0x00800000
// int sigaction(int signum, const struct sigaction *act, struct sigaction *oldact);
func Xsigaction(t *TLS, signum int32, act, oldact uintptr) int32 {
if __ccgo_strace {

View file

@ -5,8 +5,12 @@
package libc // import "modernc.org/libc"
import (
"errors"
"fmt"
"io"
"io/fs"
"math"
mbits "math/bits"
"os"
"os/exec"
"path/filepath"
@ -65,6 +69,8 @@ func X__runes_for_locale(t *TLS, l locale_t, p uintptr) uintptr {
panic(todo(""))
}
type Tsize_t = types.Size_t
type syscallErrno = unix.Errno
type file uintptr
@ -80,6 +86,18 @@ func (f file) setErr() {
(*stdio.FILE)(unsafe.Pointer(f)).F_flags |= 1
}
func (f file) clearErr() {
(*stdio.FILE)(unsafe.Pointer(f)).F_flags &^= 3
}
func (f file) eof() bool {
return (*stdio.FILE)(unsafe.Pointer(f)).F_flags&2 != 0
}
func (f file) setEOF() {
(*stdio.FILE)(unsafe.Pointer(f)).F_flags |= 2
}
func (f file) close(t *TLS) int32 {
r := Xclose(t, f.fd())
Xfree(t, uintptr(f))
@ -110,6 +128,19 @@ func fwrite(fd int32, b []byte) (int, error) {
return unix.Write(int(fd), b) //TODO use Xwrite
}
func Xclearerr(tls *TLS, f uintptr) {
file(f).clearErr()
}
func Xfeof(t *TLS, f uintptr) (r int32) {
if __ccgo_strace {
trc("t=%v f=%v, (%v:)", t, f, origin(2))
defer func() { trc("-> %v", r) }()
}
r = BoolInt32(file(f).eof())
return r
}
// unsigned long ___runetype(__ct_rune_t) __pure;
func X___runetype(t *TLS, x types.X__ct_rune_t) ulong {
if __ccgo_strace {
@ -199,6 +230,7 @@ var localtime time.Tm
// struct tm *localtime(const time_t *timep);
func Xlocaltime(_ *TLS, timep uintptr) uintptr {
// trc("%T timep=%+v", time.Time_t(0), *(*time.Time_t)(unsafe.Pointer(timep)))
loc := getLocalLocation()
ut := *(*time.Time_t)(unsafe.Pointer(timep))
t := gotime.Unix(int64(ut), 0).In(loc)
@ -211,11 +243,16 @@ func Xlocaltime(_ *TLS, timep uintptr) uintptr {
localtime.Ftm_wday = int32(t.Weekday())
localtime.Ftm_yday = int32(t.YearDay())
localtime.Ftm_isdst = Bool32(isTimeDST(t))
_, off := t.Zone()
localtime.Ftm_gmtoff = int64(off)
localtime.Ftm_zone = 0
// trc("%T localtime=%+v", localtime, localtime)
return uintptr(unsafe.Pointer(&localtime))
}
// struct tm *localtime_r(const time_t *timep, struct tm *result);
func Xlocaltime_r(_ *TLS, timep, result uintptr) uintptr {
// trc("%T timep=%+v", time.Time_t(0), *(*time.Time_t)(unsafe.Pointer(timep)))
loc := getLocalLocation()
ut := *(*unix.Time_t)(unsafe.Pointer(timep))
t := gotime.Unix(int64(ut), 0).In(loc)
@ -228,6 +265,10 @@ func Xlocaltime_r(_ *TLS, timep, result uintptr) uintptr {
(*time.Tm)(unsafe.Pointer(result)).Ftm_wday = int32(t.Weekday())
(*time.Tm)(unsafe.Pointer(result)).Ftm_yday = int32(t.YearDay())
(*time.Tm)(unsafe.Pointer(result)).Ftm_isdst = Bool32(isTimeDST(t))
_, off := t.Zone()
(*time.Tm)(unsafe.Pointer(result)).Ftm_gmtoff = int64(off)
(*time.Tm)(unsafe.Pointer(result)).Ftm_zone = 0
// trc("%T localtime_r=%+v", localtime, (*time.Tm)(unsafe.Pointer(result)))
return result
}
@ -501,6 +542,7 @@ func Xgettimeofday(t *TLS, tv, tz uintptr) int32 {
return -1
}
//trc("tvs=%+v", tvs)
*(*unix.Timeval)(unsafe.Pointer(tv)) = tvs
return 0
}
@ -1563,12 +1605,167 @@ func fcntlCmdStr(cmd int32) string {
}
}
// int setenv(const char *name, const char *value, int overwrite);
func Xsetenv(t *TLS, name, value uintptr, overwrite int32) int32 {
func X__strchrnul(tls *TLS, s uintptr, c int32) uintptr { /* strchrnul.c:10:6: */
if __ccgo_strace {
trc("t=%v value=%v overwrite=%v, (%v:)", t, value, overwrite, origin(2))
trc("tls=%v s=%v c=%v, (%v:)", tls, s, c, origin(2))
}
panic(todo(""))
c = int32(uint8(c))
if !(c != 0) {
return s + uintptr(Xstrlen(tls, s))
}
var w uintptr
for ; uintptr_t(s)%uintptr_t(unsafe.Sizeof(size_t(0))) != 0; s++ {
if !(int32(*(*int8)(unsafe.Pointer(s))) != 0) || int32(*(*uint8)(unsafe.Pointer(s))) == c {
return s
}
}
var k size_t = Uint64(Uint64FromInt32(-1)) / uint64(255) * size_t(c)
for w = s; !((*(*uint64)(unsafe.Pointer(w))-Uint64(Uint64FromInt32(-1))/uint64(255)) & ^*(*uint64)(unsafe.Pointer(w)) & (Uint64(Uint64FromInt32(-1))/uint64(255)*uint64(255/2+1)) != 0) && !((*(*uint64)(unsafe.Pointer(w))^k-Uint64(Uint64FromInt32(-1))/uint64(255)) & ^(*(*uint64)(unsafe.Pointer(w))^k) & (Uint64(Uint64FromInt32(-1))/uint64(255)*uint64(255/2+1)) != 0); w += 8 {
}
s = w
for ; *(*int8)(unsafe.Pointer(s)) != 0 && int32(*(*uint8)(unsafe.Pointer(s))) != c; s++ {
}
return s
}
var _soldenv uintptr /* putenv.c:22:14: */
// int setenv(const char *name, const char *value, int overwrite);
func Xsetenv(tls *TLS, var1 uintptr, value uintptr, overwrite int32) int32 { /* setenv.c:26:5: */
if __ccgo_strace {
trc("tls=%v var1=%v value=%v overwrite=%v, (%v:)", tls, var1, value, overwrite, origin(2))
}
var s uintptr
var l1 size_t
var l2 size_t
if !(var1 != 0) || !(int32(AssignUint64(&l1, size_t((int64(X__strchrnul(tls, var1, '='))-int64(var1))/1))) != 0) || *(*int8)(unsafe.Pointer(var1 + uintptr(l1))) != 0 {
*(*int32)(unsafe.Pointer(X___errno_location(tls))) = 22
return -1
}
if !(overwrite != 0) && Xgetenv(tls, var1) != 0 {
return 0
}
l2 = Xstrlen(tls, value)
s = Xmalloc(tls, l1+l2+uint64(2))
if !(s != 0) {
return -1
}
Xmemcpy(tls, s, var1, l1)
*(*int8)(unsafe.Pointer(s + uintptr(l1))) = int8('=')
Xmemcpy(tls, s+uintptr(l1)+uintptr(1), value, l2+uint64(1))
return X__putenv(tls, s, l1, s)
}
func X__putenv(tls *TLS, s uintptr, l size_t, r uintptr) int32 { /* putenv.c:8:5: */
if __ccgo_strace {
trc("tls=%v s=%v l=%v r=%v, (%v:)", tls, s, l, r, origin(2))
}
var i size_t
var newenv uintptr
var tmp uintptr
//TODO for (char **e = __environ; *e; e++, i++)
var e uintptr
i = uint64(0)
if !(Environ() != 0) {
goto __1
}
//TODO for (char **e = __environ; *e; e++, i++)
e = Environ()
__2:
if !(*(*uintptr)(unsafe.Pointer(e)) != 0) {
goto __4
}
if !!(Xstrncmp(tls, s, *(*uintptr)(unsafe.Pointer(e)), l+uint64(1)) != 0) {
goto __5
}
tmp = *(*uintptr)(unsafe.Pointer(e))
*(*uintptr)(unsafe.Pointer(e)) = s
X__env_rm_add(tls, tmp, r)
return 0
__5:
;
goto __3
__3:
e += 8
i++
goto __2
goto __4
__4:
;
__1:
;
if !(Environ() == _soldenv) {
goto __6
}
newenv = Xrealloc(tls, _soldenv, uint64(unsafe.Sizeof(uintptr(0)))*(i+uint64(2)))
if !!(newenv != 0) {
goto __8
}
goto oom
__8:
;
goto __7
__6:
newenv = Xmalloc(tls, uint64(unsafe.Sizeof(uintptr(0)))*(i+uint64(2)))
if !!(newenv != 0) {
goto __9
}
goto oom
__9:
;
if !(i != 0) {
goto __10
}
Xmemcpy(tls, newenv, Environ(), uint64(unsafe.Sizeof(uintptr(0)))*i)
__10:
;
Xfree(tls, _soldenv)
__7:
;
*(*uintptr)(unsafe.Pointer(newenv + uintptr(i)*8)) = s
*(*uintptr)(unsafe.Pointer(newenv + uintptr(i+uint64(1))*8)) = uintptr(0)
*(*uintptr)(unsafe.Pointer(EnvironP())) = AssignPtrUintptr(uintptr(unsafe.Pointer(&_soldenv)), newenv)
if !(r != 0) {
goto __11
}
X__env_rm_add(tls, uintptr(0), r)
__11:
;
return 0
oom:
Xfree(tls, r)
return -1
}
var _senv_alloced uintptr /* setenv.c:7:14: */
var _senv_alloced_n size_t /* setenv.c:8:16: */
func X__env_rm_add(tls *TLS, old uintptr, new uintptr) { /* setenv.c:5:6: */
if __ccgo_strace {
trc("tls=%v old=%v new=%v, (%v:)", tls, old, new, origin(2))
}
//TODO for (size_t i=0; i < env_alloced_n; i++)
var i size_t = uint64(0)
for ; i < _senv_alloced_n; i++ {
if *(*uintptr)(unsafe.Pointer(_senv_alloced + uintptr(i)*8)) == old {
*(*uintptr)(unsafe.Pointer(_senv_alloced + uintptr(i)*8)) = new
Xfree(tls, old)
return
} else if !(int32(*(*uintptr)(unsafe.Pointer(_senv_alloced + uintptr(i)*8))) != 0) && new != 0 {
*(*uintptr)(unsafe.Pointer(_senv_alloced + uintptr(i)*8)) = new
new = uintptr(0)
}
}
if !(new != 0) {
return
}
var t uintptr = Xrealloc(tls, _senv_alloced, uint64(unsafe.Sizeof(uintptr(0)))*(_senv_alloced_n+uint64(1)))
if !(t != 0) {
return
}
*(*uintptr)(unsafe.Pointer(AssignPtrUintptr(uintptr(unsafe.Pointer(&_senv_alloced)), t) + uintptr(PostIncUint64(&_senv_alloced_n, 1))*8)) = new
}
// int unsetenv(const char *name);
@ -2036,3 +2233,238 @@ __3:
;
return Xtolower(tls, int32(*(*uint8)(unsafe.Pointer(l)))) - Xtolower(tls, int32(*(*uint8)(unsafe.Pointer(r))))
}
func X__isfinite(tls *TLS, d float64) int32 {
if !math.IsInf(d, 0) && !math.IsNaN(d) {
return 1
}
return 0
}
func X__signbit(tls *TLS, x float64) (r int32) {
return int32(math.Float64bits(x) >> 63)
}
func X__builtin_ctz(t *TLS, n uint32) int32 {
return int32(mbits.TrailingZeros32(n))
}
// https://g.co/gemini/share/2c37d5b57994
// Constants mirroring C's ftw type flags
const (
FTW_F = 0 // Regular file
FTW_D = 1 // Directory (visited pre-order)
FTW_DNR = 2 // Directory that cannot be read
FTW_NS = 4 // Stat failed (permissions, broken link, etc.)
FTW_SL = 4 // Symbolic link (lstat was used)
// Note: C's ftw might have other flags like FTW_DP (post-order dir) or FTW_SLN
// which are not directly supported by filepath.WalkDir's simple pre-order traversal.
// This emulation focuses on the most common flags associated with stat/lstat results.
)
// ftwStopError is used internally to signal that the walk should stop
// because the user callback returned a non-zero value.
type ftwStopError struct {
stopValue int
}
func (e *ftwStopError) Error() string {
return fmt.Sprintf("ftw walk stopped by callback with return value %d", e.stopValue)
}
// goFtwFunc is the callback function type, mirroring the C ftw callback.
// It receives the path, file info (if available), and a type flag.
// Returning a non-zero value stops the walk and becomes the return value of Ftw.
// Returning 0 continues the walk.
type goFtwFunc func(path string, info os.FileInfo, typeflag int) int
// Ftw emulates the C standard library function ftw(3).
// It walks the directory tree starting at 'dirpath' and calls the 'callback'
// function for each entry encountered.
//
// Parameters:
// - dirpath: The root directory path for the traversal.
// - callback: The goFtwFunc to call for each file system entry.
// - nopenfd: This parameter is part of the C ftw signature but is IGNORED
// in this Go implementation. Go's filepath.WalkDir manages concurrency
// and file descriptors internally.
//
// Returns:
// - 0 on successful completion of the walk.
// - The non-zero value returned by the callback, if the callback terminated the walk.
// - -1 if an error occurred during the walk that wasn't handled by calling
// the callback with FTW_DNR or FTW_NS (e.g., error accessing the initial dirpath).
func ftw(dirpath string, callback goFtwFunc, nopenfd int) int {
// nopenfd is ignored in this Go implementation.
walkErr := filepath.WalkDir(dirpath, func(path string, d fs.DirEntry, err error) error {
var info os.FileInfo
var typeflag int
// --- Handle errors passed by WalkDir ---
if err != nil {
// Check if the error is related to accessing a directory
if errors.Is(err, fs.ErrPermission) || errors.Is(err, unix.EACCES) { // Added syscall.EACCES check
// Try to determine if it's a directory we can't read
// We might not have 'd' if the error occurred trying to list 'path' contents
// Let's try a direct Lstat on the path itself if d is nil
lstatInfo, lstatErr := os.Lstat(path)
if lstatErr == nil && lstatInfo.IsDir() {
typeflag = FTW_DNR // Directory, but WalkDir errored (likely reading it)
info = lstatInfo // Provide the info we could get
} else {
// Can't confirm it's a directory, or Lstat itself failed
typeflag = FTW_NS // Treat as general stat failure
// info remains nil
}
} else {
// Other errors (e.g., broken symlink during traversal, I/O error)
typeflag = FTW_NS
// Attempt to get Lstat info even if WalkDir had an error, maybe it's available
lstatInfo, _ := os.Lstat(path) // Ignore error here, if it fails info stays nil
info = lstatInfo
}
// Even with errors, call the callback with the path and appropriate flag
stopVal := callback(path, info, typeflag)
if stopVal != 0 {
return &ftwStopError{stopValue: stopVal}
}
// If the error was on a directory, returning the error might stop WalkDir
// from descending. If it was fs.ErrPermission on a dir, WalkDir might
// pass filepath.SkipDir implicitly or continue depending on implementation.
// Let's return nil here to *try* to continue the walk for other siblings
// if the callback didn't stop it. The callback *was* notified.
// If the error prevents further progress WalkDir will stop anyway.
return nil // Allow walk to potentially continue elsewhere
}
// --- No error from WalkDir, process the DirEntry ---
info, err = d.Info() // Get FileInfo (like C's stat/lstat result)
if err != nil {
// Error getting info for an entry WalkDir *could* list (rare, maybe permissions changed?)
typeflag = FTW_NS
// info remains nil
} else {
// Determine type flag based on file mode
mode := info.Mode()
if mode&fs.ModeSymlink != 0 {
typeflag = FTW_SL
} else if mode.IsDir() {
typeflag = FTW_D // Visited pre-order
} else if mode.IsRegular() {
typeflag = FTW_F
} else {
// Other types (device, socket, pipe, etc.) - C ftw usually lumps these under FTW_F
// or might have FTW_NS if stat fails. Let's treat non-dir, non-link, non-regular
// as FTW_F for simplicity, aligning with common C practice, or FTW_NS if stat failed above.
// Since we have info here, we know stat didn't fail.
// Let's be more specific, maybe treat others as FTW_NS? Or stick to FTW_F?
// C ftw man page isn't super specific about all types. FTW_F seems reasonable.
typeflag = FTW_F // Treat other valid types as 'files' for simplicity
}
}
// --- Call the user callback ---
stopVal := callback(path, info, typeflag)
if stopVal != 0 {
// User wants to stop the walk
return &ftwStopError{stopValue: stopVal}
}
return nil // Continue walk
})
// --- Handle WalkDir's final return value ---
if walkErr == nil {
return 0 // Success
}
// Check if the error was our custom stop signal
var stopErr *ftwStopError
if errors.As(walkErr, &stopErr) {
return stopErr.stopValue // Return the value from the callback
}
// Otherwise, it was an unhandled error during the walk
// (e.g., initial dirpath access error, or other error not mapped to FTW_NS/DNR)
return -1 // General error return
}
func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
statp := tls.Alloc(int(unsafe.Sizeof(unix.Stat_t{})))
defer tls.Free(int(unsafe.Sizeof(unix.Stat_t{})))
return int32(ftw(
GoString(path),
func(path string, info os.FileInfo, typeflag int) int {
cs, _ := CString(path)
defer Xfree(tls, cs)
Xstat(tls, cs, statp)
return int((*(*func(*TLS, uintptr, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, cs, statp, int32(typeflag)))
},
int(fd_limit),
))
}
func Xexecve(tls *TLS, path uintptr, argv uintptr, envp uintptr) (r int32) {
goPath := GoString(path)
var goArgv, goEnvp []string
for p := *(*uintptr)(unsafe.Pointer(argv)); p != 0; p = *(*uintptr)(unsafe.Pointer(argv)) {
goArgv = append(goArgv, GoString(p))
argv += unsafe.Sizeof(uintptr(0))
}
for p := *(*uintptr)(unsafe.Pointer(envp)); p != 0; p = *(*uintptr)(unsafe.Pointer(envp)) {
goEnvp = append(goEnvp, GoString(p))
envp += unsafe.Sizeof(uintptr(0))
}
if err := unix.Exec(goPath, goArgv, goEnvp); err != nil {
tls.setErrno(err)
return -1
}
panic("unreachable")
}
func Xsetuid(tls *TLS, uid uint32) (r int32) {
if __ccgo_strace {
trc("tls=%v uid=%v, (%v:)", tls, uid, origin(2))
defer func() { trc("-> %v", r) }()
}
if err := unix.Setuid(int(uid)); err != nil {
tls.setErrno(err)
return -1
}
return 0
}
func Xsetgid(tls *TLS, gid uint32) (r int32) {
if __ccgo_strace {
trc("tls=%v gid=%v, (%v:)", tls, gid, origin(2))
defer func() { trc("-> %v", r) }()
}
if err := unix.Setgid(int(gid)); err != nil {
tls.setErrno(err)
return -1
}
return 0
}
func Xdup(tls *TLS, fd int32) (r int32) {
if __ccgo_strace {
trc("tls=%v fd=%v, (%v:)", tls, fd, origin(2))
defer func() { trc("-> %v", r) }()
}
nfd, err := unix.Dup(int(fd))
if err != nil {
tls.setErrno(err)
return -1
}
return int32(nfd)
}

View file

@ -2094,3 +2094,11 @@ func (s *byteScanner) UnreadByte() error {
Xungetc(s.t, int32(s.last), s.stream)
return nil
}
// int clock_gettime(clockid_t clk_id, struct timespec *tp);
func Xclock_gettime(t *TLS, clk_id int32, tp uintptr) int32 {
if __ccgo_strace {
trc("t=%v clk_id=%v tp=%v, (%v:)", t, clk_id, tp, origin(2))
}
panic(todo(""))
}

View file

@ -15,7 +15,7 @@
// have generated some Go code from C you should stick to the version of this
// package that you used at that time and was tested with your payload. The
// correct way to upgrade to a newer version of this package is to first
// recompile (C to Go) your code with a newwer version if ccgo that depends on
// recompile (C to Go) your code with a newer version of ccgo that depends on
// the new libc version.
//
// If you use C to Go translated code provided by others, stick to the version
@ -296,6 +296,11 @@ func NewTLS() (r *TLS) {
}
}
// StackSlots reports the number of tls stack slots currently in use.
func (tls *TLS) StackSlots() int {
return tls.sp
}
// int *__errno_location(void)
func X__errno_location(tls *TLS) (r uintptr) {
return tls.pthread + unsafe.Offsetof(t__pthread{}.Ferrno_val)

View file

@ -86,6 +86,8 @@ func X__runes_for_locale(t *TLS, l locale_t, p uintptr) uintptr {
panic(todo(""))
}
type Tsize_t = types.Size_t
type file uintptr
func (f file) fd() int32 { return int32((*stdio.FILE)(unsafe.Pointer(f)).F_file) }

View file

@ -43,6 +43,8 @@ var (
in6_addr_any in.In6_addr
)
type Tsize_t = types.Size_t
type syscallErrno = unix.Errno
// // Keep these outside of the var block otherwise go generate will miss them.

16
vendor/modernc.org/libc/libc_unix.go generated vendored
View file

@ -1386,3 +1386,19 @@ func x___secs_to_tm(tls *TLS, t int64, tm uintptr) (r int32) {
(*ctime.Tm)(unsafe.Pointer(tm)).Ftm_sec = remsecs % int32(60)
return 0
}
// int clock_gettime(clockid_t clk_id, struct timespec *tp);
func Xclock_gettime(t *TLS, clk_id int32, tp uintptr) int32 {
if __ccgo_strace {
trc("t=%v clk_id=%v tp=%v, (%v:)", t, clk_id, tp, origin(2))
}
var ts unix.Timespec
if err := unix.ClockGettime(clk_id, &ts); err != nil {
t.setErrno(err)
trc("FAIL: %v", err)
return -1
}
*(*unix.Timespec)(unsafe.Pointer(tp)) = ts
return 0
}

View file

@ -9,6 +9,7 @@ import (
"fmt"
"golang.org/x/sys/windows"
"math"
mbits "math/bits"
"os"
"os/exec"
"os/user"
@ -42,6 +43,8 @@ var X_iob [stdio.X_IOB_ENTRIES]stdio.FILE
var Xin6addr_any [16]byte
var Xtimezone long // extern long timezone;
type Tsize_t = types.Size_t
var (
iobMap = map[uintptr]int32{} // &_iob[fd] -> fd
wenvValid bool
@ -845,25 +848,49 @@ func Xmunmap(t *TLS, addr uintptr, length types.Size_t) int32 {
// return 0
}
type Timeval = struct {
Ftv_sec int32
Ftv_usec int32
}
// int gettimeofday(struct timeval *tv, struct timezone *tz);
func Xgettimeofday(t *TLS, tv, tz uintptr) int32 {
if __ccgo_strace {
trc("t=%v tz=%v, (%v:)", t, tz, origin(2))
}
panic(todo(""))
// if tz != 0 {
// panic(todo(""))
// }
if tv == 0 {
return 0
}
// var tvs unix.Timeval
// err := unix.Gettimeofday(&tvs)
// if err != nil {
// t.setErrno(err)
// return -1
// }
// *(*unix.Timeval)(unsafe.Pointer(tv)) = tvs
// This seems to work as well
// var u64 uint64
// procGetSystemTimeAsFileTime.Call(uintptr(unsafe.Pointer(&u64)), 0, 0)
// u64 /= 10
// u64 -= 11644473600000000
// (*Timeval)(unsafe.Pointer(tv)).Ftv_sec = int32(u64/1e6)
// (*Timeval)(unsafe.Pointer(tv)).Ftv_usec = int32(u64%1e6)
// return 0
// But let's use the golang.org/x/sys version
windows.Gettimeofday((*windows.Timeval)(unsafe.Pointer(tv)))
return 0
}
type Timespec = struct {
Ftv_sec time.Time_t
Ftv_nsec int32
}
// int clock_gettime(clockid_t clk_id, struct timespec *tp);
func Xclock_gettime(t *TLS, clk_id int32, tp uintptr) int32 {
if __ccgo_strace {
trc("t=%v clk_id=%v tp=%v, (%v:)", t, clk_id, tp, origin(2))
}
var u64 uint64 // [100ns]
procGetSystemTimeAsFileTime.Call(uintptr(unsafe.Pointer(&u64)), 0, 0)
(*Timespec)(unsafe.Pointer(tp)).Ftv_sec = time.Time_t((u64/10 - 11644473600000000) / 1e6)
(*Timespec)(unsafe.Pointer(tp)).Ftv_nsec = int32((u64 * 100) % 1e9)
return 0
}
// int getsockopt(int sockfd, int level, int optname, void *optval, socklen_t *optlen);
@ -1770,14 +1797,6 @@ func Xrealpath(t *TLS, path, resolved_path uintptr) uintptr {
return resolved_path
}
// struct tm *gmtime_r(const time_t *timep, struct tm *result);
func Xgmtime_r(t *TLS, timep, result uintptr) uintptr {
if __ccgo_strace {
trc("t=%v result=%v, (%v:)", t, result, origin(2))
}
panic(todo(""))
}
// // char *inet_ntoa(struct in_addr in);
// func Xinet_ntoa(t *TLS, in1 in.In_addr) uintptr {
// panic(todo(""))
@ -7108,22 +7127,49 @@ func Xsscanf(t *TLS, str, format, va uintptr) int32 {
return r
}
var _toint4 = Float64FromInt32(1) / Float64FromFloat64(2.220446049250313e-16)
func Xrint(tls *TLS, x float64) float64 {
if __ccgo_strace {
trc("tls=%v x=%v, (%v:)", tls, x, origin(2))
}
switch {
case x == 0: // also +0 and -0
return 0
case math.IsInf(x, 0), math.IsNaN(x):
return x
case x >= math.MinInt64 && x <= math.MaxInt64 && float64(int64(x)) == x:
return x
case x >= 0:
return math.Floor(x + 0.5)
default:
return math.Ceil(x - 0.5)
bp := tls.Alloc(16)
defer tls.Free(16)
var e, s int32
var y Tdouble_t
var v1 float64
var _ /* u at bp+0 */ struct {
Fi [0]Tuint64_t
Ff float64
}
_, _, _, _ = e, s, y, v1
*(*struct {
Fi [0]Tuint64_t
Ff float64
})(unsafe.Pointer(bp)) = struct {
Fi [0]Tuint64_t
Ff float64
}{}
*(*float64)(unsafe.Pointer(bp)) = x
e = Int32FromUint64(*(*Tuint64_t)(unsafe.Pointer(bp)) >> int32(52) & uint64(0x7ff))
s = Int32FromUint64(*(*Tuint64_t)(unsafe.Pointer(bp)) >> int32(63))
if e >= Int32FromInt32(0x3ff)+Int32FromInt32(52) {
return x
}
if s != 0 {
y = x - _toint4 + _toint4
} else {
y = x + _toint4 - _toint4
}
if y == Float64FromInt32(0) {
if s != 0 {
v1 = -Float64FromFloat64(0)
} else {
v1 = Float64FromInt32(0)
}
return v1
}
return y
}
// FILE *fdopen(int fd, const char *mode);
@ -7471,15 +7517,131 @@ func AtomicLoadNUint8(ptr uintptr, memorder int32) uint8 {
}
// struct tm *gmtime( const time_t *sourceTime );
func Xgmtime(t *TLS, sourceTime uintptr) uintptr {
// func Xgmtime(t *TLS, sourceTime uintptr) uintptr {
// if __ccgo_strace {
// trc("t=%v sourceTime=%v, (%v:)", t, sourceTime, origin(2))
// }
// r0, _, err := procGmtime.Call(uintptr(sourceTime))
// if err != windows.NOERROR {
// t.setErrno(err)
// }
// return uintptr(r0)
// }
var _tm time.Tm
// /tmp/libc/musl-master/src/time/gmtime.c:4:11:
func Xgmtime(tls *TLS, t uintptr) (r uintptr) { // /tmp/libc/musl-master/src/time/gmtime.c:7:2:
if __ccgo_strace {
trc("t=%v sourceTime=%v, (%v:)", t, sourceTime, origin(2))
trc("tls=%v t=%v, (%v:)", tls, t, origin(2))
defer func() { trc("-> %v", r) }()
}
r0, _, err := procGmtime.Call(uintptr(sourceTime))
if err != windows.NOERROR {
t.setErrno(err)
r = Xgmtime_r(tls, t, uintptr(unsafe.Pointer(&_tm)))
return r
}
var _days_in_month = [12]int8{
0: int8(31),
1: int8(30),
2: int8(31),
3: int8(30),
4: int8(31),
5: int8(31),
6: int8(30),
7: int8(31),
8: int8(30),
9: int8(31),
10: int8(31),
11: int8(29),
}
var x___utc = [4]int8{'U', 'T', 'C'}
func Xgmtime_r(tls *TLS, t uintptr, tm uintptr) (r uintptr) {
if __ccgo_strace {
trc("tls=%v t=%v tm=%v, (%v:)", tls, t, tm, origin(2))
defer func() { trc("-> %v", r) }()
}
return uintptr(r0)
if x___secs_to_tm(tls, int64(*(*time.Time_t)(unsafe.Pointer(t))), tm) < 0 {
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = int32(errno.EOVERFLOW)
return uintptr(0)
}
(*time.Tm)(unsafe.Pointer(tm)).Ftm_isdst = 0
return tm
}
func x___secs_to_tm(tls *TLS, t int64, tm uintptr) (r int32) {
var c_cycles, leap, months, q_cycles, qc_cycles, remdays, remsecs, remyears, wday, yday int32
var days, secs, years int64
_, _, _, _, _, _, _, _, _, _, _, _, _ = c_cycles, days, leap, months, q_cycles, qc_cycles, remdays, remsecs, remyears, secs, wday, yday, years
/* Reject time_t values whose year would overflow int */
if t < int64(-Int32FromInt32(1)-Int32FromInt32(0x7fffffff))*Int64FromInt64(31622400) || t > Int64FromInt32(limits.INT_MAX)*Int64FromInt64(31622400) {
return -int32(1)
}
secs = t - (Int64FromInt64(946684800) + int64(Int32FromInt32(86400)*(Int32FromInt32(31)+Int32FromInt32(29))))
days = secs / int64(86400)
remsecs = int32(secs % int64(86400))
if remsecs < 0 {
remsecs += int32(86400)
days--
}
wday = int32((int64(3) + days) % int64(7))
if wday < 0 {
wday += int32(7)
}
qc_cycles = int32(days / int64(Int32FromInt32(365)*Int32FromInt32(400)+Int32FromInt32(97)))
remdays = int32(days % int64(Int32FromInt32(365)*Int32FromInt32(400)+Int32FromInt32(97)))
if remdays < 0 {
remdays += Int32FromInt32(365)*Int32FromInt32(400) + Int32FromInt32(97)
qc_cycles--
}
c_cycles = remdays / (Int32FromInt32(365)*Int32FromInt32(100) + Int32FromInt32(24))
if c_cycles == int32(4) {
c_cycles--
}
remdays -= c_cycles * (Int32FromInt32(365)*Int32FromInt32(100) + Int32FromInt32(24))
q_cycles = remdays / (Int32FromInt32(365)*Int32FromInt32(4) + Int32FromInt32(1))
if q_cycles == int32(25) {
q_cycles--
}
remdays -= q_cycles * (Int32FromInt32(365)*Int32FromInt32(4) + Int32FromInt32(1))
remyears = remdays / int32(365)
if remyears == int32(4) {
remyears--
}
remdays -= remyears * int32(365)
leap = BoolInt32(!(remyears != 0) && (q_cycles != 0 || !(c_cycles != 0)))
yday = remdays + int32(31) + int32(28) + leap
if yday >= int32(365)+leap {
yday -= int32(365) + leap
}
years = int64(remyears+int32(4)*q_cycles+int32(100)*c_cycles) + int64(400)*int64(int64(qc_cycles))
months = 0
for {
if !(int32(_days_in_month[months]) <= remdays) {
break
}
remdays -= int32(_days_in_month[months])
goto _1
_1:
months++
}
if months >= int32(10) {
months -= int32(12)
years++
}
if years+int64(100) > int64(limits.INT_MAX) || years+int64(100) < int64(-Int32FromInt32(1)-Int32FromInt32(0x7fffffff)) {
return -int32(1)
}
(*time.Tm)(unsafe.Pointer(tm)).Ftm_year = int32(years + int64(100))
(*time.Tm)(unsafe.Pointer(tm)).Ftm_mon = months + int32(2)
(*time.Tm)(unsafe.Pointer(tm)).Ftm_mday = remdays + int32(1)
(*time.Tm)(unsafe.Pointer(tm)).Ftm_wday = wday
(*time.Tm)(unsafe.Pointer(tm)).Ftm_yday = yday
(*time.Tm)(unsafe.Pointer(tm)).Ftm_hour = remsecs / int32(3600)
(*time.Tm)(unsafe.Pointer(tm)).Ftm_min = remsecs / int32(60) % int32(60)
(*time.Tm)(unsafe.Pointer(tm)).Ftm_sec = remsecs % int32(60)
return 0
}
// size_t strftime(
@ -7609,3 +7771,7 @@ func X_strnicmp(tls *TLS, __Str1 uintptr, __Str2 uintptr, __MaxCount types.Size_
}
return int32(r0)
}
func X__builtin_ctz(t *TLS, n uint32) int32 {
return int32(mbits.TrailingZeros32(n))
}

10
vendor/modernc.org/libc/mem.go generated vendored
View file

@ -24,7 +24,9 @@ func Xmalloc(t *TLS, n types.Size_t) uintptr {
trc("t=%v n=%v, (%v:)", t, n, origin(2))
}
if n == 0 {
return 0
// malloc(0) should return unique pointers
// (often expected and gnulib replaces malloc if malloc(0) returns 0)
n = 1
}
allocMu.Lock()
@ -43,18 +45,18 @@ func Xmalloc(t *TLS, n types.Size_t) uintptr {
// void *calloc(size_t nmemb, size_t size);
func Xcalloc(t *TLS, n, size types.Size_t) uintptr {
if __ccgo_strace {
trc("t=%v size=%v, (%v:)", t, size, origin(2))
trc("t=%v n=%v size=%v, (%v:)", t, n, size, origin(2))
}
rq := int(n * size)
if rq == 0 {
return 0
rq = 1
}
allocMu.Lock()
defer allocMu.Unlock()
p, err := allocator.UintptrCalloc(int(n * size))
p, err := allocator.UintptrCalloc(rq)
if err != nil {
t.setErrno(errno.ENOMEM)
return 0

6
vendor/modernc.org/libc/mem_brk.go generated vendored
View file

@ -35,7 +35,9 @@ func Xmalloc(t *TLS, n types.Size_t) uintptr {
trc("t=%v n=%v, (%v:)", t, n, origin(2))
}
if n == 0 {
return 0
// malloc(0) should return unique pointers
// (often expected and gnulib replaces malloc if malloc(0) returns 0)
n = 1
}
allocMu.Lock()
@ -57,7 +59,7 @@ func Xmalloc(t *TLS, n types.Size_t) uintptr {
// void *calloc(size_t nmemb, size_t size);
func Xcalloc(t *TLS, n, size types.Size_t) uintptr {
if __ccgo_strace {
trc("t=%v size=%v, (%v:)", t, size, origin(2))
trc("t=%v n=%v size=%v, (%v:)", t, n, size, origin(2))
}
return Xmalloc(t, n*size)
}

14
vendor/modernc.org/libc/memgrind.go generated vendored
View file

@ -77,7 +77,9 @@ func Xmalloc(t *TLS, size types.Size_t) uintptr {
trc("t=%v size=%v, (%v:)", t, size, origin(2))
}
if size == 0 {
return 0
// malloc(0) should return unique pointers
// (often expected and gnulib replaces malloc if malloc(0) returns 0)
size = 1
}
allocMu.Lock()
@ -113,18 +115,18 @@ func Xmalloc(t *TLS, size types.Size_t) uintptr {
// void *calloc(size_t nmemb, size_t size);
func Xcalloc(t *TLS, n, size types.Size_t) uintptr {
if __ccgo_strace {
trc("t=%v size=%v, (%v:)", t, size, origin(2))
trc("t=%v n=%v size=%v, (%v:)", t, n, size, origin(2))
}
rq := int(n * size)
if rq == 0 {
return 0
rq = 1
}
allocMu.Lock()
defer allocMu.Unlock()
p, err := allocator.UintptrCalloc(int(n * size))
p, err := allocator.UintptrCalloc(rq)
// if dmesgs {
// dmesg("%v: %v -> %#x, %v", origin(1), n*size, p, err)
// }
@ -269,6 +271,10 @@ func UsableSize(p uintptr) types.Size_t {
return types.Size_t(memory.UintptrUsableSize(p))
}
func Xmalloc_usable_size(tls *TLS, p uintptr) (r Tsize_t) {
return UsableSize(p)
}
type MemAllocatorStat struct {
Allocs int
Bytes int

View file

@ -77,7 +77,9 @@ func Xmalloc(t *TLS, size Tsize_t) uintptr {
trc("t=%v size=%v, (%v:)", t, size, origin(2))
}
if size == 0 {
return 0
// malloc(0) should return unique pointers
// (often expected and gnulib replaces malloc if malloc(0) returns 0)
size = 1
}
allocatorMu.Lock()
@ -113,18 +115,18 @@ func Xmalloc(t *TLS, size Tsize_t) uintptr {
// void *calloc(size_t nmemb, size_t size);
func Xcalloc(t *TLS, n, size Tsize_t) uintptr {
if __ccgo_strace {
trc("t=%v size=%v, (%v:)", t, size, origin(2))
trc("t=%v n=%v size=%v, (%v:)", t, n, size, origin(2))
}
rq := int(n * size)
if rq == 0 {
return 0
rq = 1
}
allocatorMu.Lock()
defer allocatorMu.Unlock()
p, err := allocator.UintptrCalloc(int(n * size))
p, err := allocator.UintptrCalloc(rq)
// if dmesgs {
// dmesg("%v: %v -> %#x, %v", origin(1), n*size, p, err)
// }

View file

@ -357,7 +357,11 @@ type size_t = uint64 /* <builtin>:9:23 */
type wchar_t = int32 /* <builtin>:15:24 */
var X__darwin_check_fd_set_overflow uintptr /* <builtin>:146:5: */
// /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include/sys/_types/_fd_def.h:54
// int __darwin_check_fd_set_overflow(int, const void *, int) __API_AVAILABLE(macosx(11.0), ios(14.0), tvos(14.0), watchos(7.0));
func X__darwin_check_fd_set_overflow(tls *TLS, _ int32, _ uintptr, _ int32) int32 {
return 1
}
// pthread opaque structures

8
vendor/modernc.org/libc/pthread.go generated vendored
View file

@ -42,6 +42,7 @@ type TLS struct {
jumpBuffers []uintptr
lastError uint32
pthreadData
sp int
stack stackHeader
ID int32
@ -67,6 +68,11 @@ func newTLS(detached bool) *TLS {
return t
}
// StackSlots reports the number of tls stack slots currently in use.
func (tls *TLS) StackSlots() int {
return tls.sp
}
func (t *TLS) alloca(n size_t) (r uintptr) {
r = Xmalloc(t, n)
t.allocas = append(t.allocas, r)
@ -183,7 +189,7 @@ func Xpthread_attr_setstacksize(t *TLS, attr uintptr, stackSize types.Size_t) in
if __ccgo_strace {
trc("t=%v attr=%v stackSize=%v, (%v:)", t, attr, stackSize, origin(2))
}
panic(todo(""))
return 0
}
// Go side data of pthread_cond_t.

View file

@ -12,12 +12,16 @@ import (
"modernc.org/libc/pthread"
)
type pthreadAttr struct {
detachState int32
}
// int pthread_attr_init(pthread_attr_t *attr);
func Xpthread_attr_init(t *TLS, pAttr uintptr) int32 {
if __ccgo_strace {
trc("t=%v pAttr=%v, (%v:)", t, pAttr, origin(2))
}
*(*pthread.Pthread_attr_t)(unsafe.Pointer(pAttr)) = pthread.Pthread_attr_t{}
*(*pthreadAttr)(unsafe.Pointer(pAttr)) = pthreadAttr{}
return 0
}

508
vendor/modernc.org/libc/stdatomic.go generated vendored
View file

@ -6,6 +6,7 @@ package libc // import "modernc.org/libc"
import (
"sync"
"sync/atomic"
"unsafe"
)
@ -21,6 +22,10 @@ var (
// { tmp = *ptr; *ptr op= val; return tmp; }
// { tmp = *ptr; *ptr = ~(*ptr & val); return tmp; } // nand
func X__c11_atomic_fetch_addInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return X__atomic_fetch_addInt8(t, ptr, val, 0)
}
func X__atomic_fetch_addInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
int8Mu.Lock()
@ -31,6 +36,10 @@ func X__atomic_fetch_addInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return r
}
func X__c11_atomic_fetch_addUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
return X__atomic_fetch_addUint8(t, ptr, val, 0)
}
func X__atomic_fetch_addUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
int8Mu.Lock()
@ -41,6 +50,10 @@ func X__atomic_fetch_addUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
return r
}
func X__c11_atomic_fetch_addInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
return X__atomic_fetch_addInt16(t, ptr, val, 0)
}
func X__atomic_fetch_addInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
int16Mu.Lock()
@ -51,6 +64,10 @@ func X__atomic_fetch_addInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
return r
}
func X__c11_atomic_fetch_addUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
return X__atomic_fetch_addUint16(t, ptr, val, 0)
}
func X__atomic_fetch_addUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
int16Mu.Lock()
@ -61,6 +78,10 @@ func X__atomic_fetch_addUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint
return r
}
func X__c11_atomic_fetch_addInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
return X__atomic_fetch_addInt32(t, ptr, val, 0)
}
func X__atomic_fetch_addInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
int32Mu.Lock()
@ -71,6 +92,10 @@ func X__atomic_fetch_addInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
return r
}
func X__c11_atomic_fetch_addUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
return X__atomic_fetch_addUint32(t, ptr, val, 0)
}
func X__atomic_fetch_addUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
int32Mu.Lock()
@ -81,6 +106,10 @@ func X__atomic_fetch_addUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint
return r
}
func X__c11_atomic_fetch_addInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
return X__atomic_fetch_addInt64(t, ptr, val, 0)
}
func X__atomic_fetch_addInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
int64Mu.Lock()
@ -91,6 +120,10 @@ func X__atomic_fetch_addInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
return r
}
func X__c11_atomic_fetch_addUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
return X__atomic_fetch_addUint64(t, ptr, val, 0)
}
func X__atomic_fetch_addUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
int64Mu.Lock()
@ -103,6 +136,10 @@ func X__atomic_fetch_addUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint
// ----
func X__c11_atomic_fetch_andInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return X__atomic_fetch_andInt8(t, ptr, val, 0)
}
func X__atomic_fetch_andInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
int8Mu.Lock()
@ -113,6 +150,10 @@ func X__atomic_fetch_andInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return r
}
func X__c11_atomic_fetch_andUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
return X__atomic_fetch_andUint8(t, ptr, val, 0)
}
func X__atomic_fetch_andUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
int8Mu.Lock()
@ -123,6 +164,10 @@ func X__atomic_fetch_andUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
return r
}
func X__c11_atomic_fetch_andInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
return X__atomic_fetch_andInt16(t, ptr, val, 0)
}
func X__atomic_fetch_andInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
int16Mu.Lock()
@ -133,6 +178,10 @@ func X__atomic_fetch_andInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
return r
}
func X__c11_atomic_fetch_andUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
return X__atomic_fetch_andUint16(t, ptr, val, 0)
}
func X__atomic_fetch_andUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
int16Mu.Lock()
@ -143,6 +192,10 @@ func X__atomic_fetch_andUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint
return r
}
func X__c11_atomic_fetch_andInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
return X__atomic_fetch_andInt32(t, ptr, val, 0)
}
func X__atomic_fetch_andInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
int32Mu.Lock()
@ -153,6 +206,10 @@ func X__atomic_fetch_andInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
return r
}
func X__c11_atomic_fetch_andUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
return X__atomic_fetch_andUint32(t, ptr, val, 0)
}
func X__atomic_fetch_andUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
int32Mu.Lock()
@ -163,6 +220,10 @@ func X__atomic_fetch_andUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint
return r
}
func X__c11_atomic_fetch_andInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
return X__atomic_fetch_andInt64(t, ptr, val, 0)
}
func X__atomic_fetch_andInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
int64Mu.Lock()
@ -173,6 +234,10 @@ func X__atomic_fetch_andInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
return r
}
func X__c11_atomic_fetch_andUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
return X__atomic_fetch_andUint64(t, ptr, val, 0)
}
func X__atomic_fetch_andUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
int64Mu.Lock()
@ -185,6 +250,10 @@ func X__atomic_fetch_andUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint
// ----
func X__c11_atomic_fetch_orInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return X__atomic_fetch_orInt8(t, ptr, val, 0)
}
func X__atomic_fetch_orInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
int8Mu.Lock()
@ -195,6 +264,10 @@ func X__atomic_fetch_orInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return r
}
func X__c11_atomic_fetch_orUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
return X__atomic_fetch_orUint8(t, ptr, val, 0)
}
func X__atomic_fetch_orUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
int8Mu.Lock()
@ -205,6 +278,10 @@ func X__atomic_fetch_orUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
return r
}
func X__c11_atomic_fetch_orInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
return X__atomic_fetch_orInt16(t, ptr, val, 0)
}
func X__atomic_fetch_orInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
int16Mu.Lock()
@ -215,6 +292,10 @@ func X__atomic_fetch_orInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
return r
}
func X__c11_atomic_fetch_orUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
return X__atomic_fetch_orUint16(t, ptr, val, 0)
}
func X__atomic_fetch_orUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
int16Mu.Lock()
@ -225,6 +306,10 @@ func X__atomic_fetch_orUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint1
return r
}
func X__c11_atomic_fetch_orInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
return X__atomic_fetch_orInt32(t, ptr, val, 0)
}
func X__atomic_fetch_orInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
int32Mu.Lock()
@ -235,6 +320,10 @@ func X__atomic_fetch_orInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
return r
}
func X__c11_atomic_fetch_orUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
return X__atomic_fetch_orUint32(t, ptr, val, 0)
}
func X__atomic_fetch_orUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
int32Mu.Lock()
@ -245,6 +334,10 @@ func X__atomic_fetch_orUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint3
return r
}
func X__c11_atomic_fetch_orInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
return X__atomic_fetch_orInt64(t, ptr, val, 0)
}
func X__atomic_fetch_orInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
int64Mu.Lock()
@ -255,6 +348,10 @@ func X__atomic_fetch_orInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
return r
}
func X__c11_atomic_fetch_orUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
return X__atomic_fetch_orUint64(t, ptr, val, 0)
}
func X__atomic_fetch_orUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
int64Mu.Lock()
@ -267,6 +364,10 @@ func X__atomic_fetch_orUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint6
// ----
func X__c11_atomic_fetch_subInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return X__atomic_fetch_subInt8(t, ptr, val, 0)
}
func X__atomic_fetch_subInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
int8Mu.Lock()
@ -277,6 +378,10 @@ func X__atomic_fetch_subInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return r
}
func X__c11_atomic_fetch_subUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
return X__atomic_fetch_subUint8(t, ptr, val, 0)
}
func X__atomic_fetch_subUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
int8Mu.Lock()
@ -287,6 +392,10 @@ func X__atomic_fetch_subUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
return r
}
func X__c11_atomic_fetch_subInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
return X__atomic_fetch_subInt16(t, ptr, val, 0)
}
func X__atomic_fetch_subInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
int16Mu.Lock()
@ -297,6 +406,10 @@ func X__atomic_fetch_subInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
return r
}
func X__c11_atomic_fetch_subUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
return X__atomic_fetch_subUint16(t, ptr, val, 0)
}
func X__atomic_fetch_subUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
int16Mu.Lock()
@ -307,6 +420,10 @@ func X__atomic_fetch_subUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint
return r
}
func X__c11_atomic_fetch_subInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
return X__atomic_fetch_subInt32(t, ptr, val, 0)
}
func X__atomic_fetch_subInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
int32Mu.Lock()
@ -317,6 +434,10 @@ func X__atomic_fetch_subInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
return r
}
func X__c11_atomic_fetch_subUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
return X__atomic_fetch_subUint32(t, ptr, val, 0)
}
func X__atomic_fetch_subUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
int32Mu.Lock()
@ -327,6 +448,10 @@ func X__atomic_fetch_subUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint
return r
}
func X__c11_atomic_fetch_subInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
return X__atomic_fetch_subInt64(t, ptr, val, 0)
}
func X__atomic_fetch_subInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
int64Mu.Lock()
@ -337,6 +462,10 @@ func X__atomic_fetch_subInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
return r
}
func X__c11_atomic_fetch_subUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
return X__atomic_fetch_subUint64(t, ptr, val, 0)
}
func X__atomic_fetch_subUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
int64Mu.Lock()
@ -349,6 +478,10 @@ func X__atomic_fetch_subUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint
// ----
func X__c11_atomic_fetch_xorInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return X__atomic_fetch_xorInt8(t, ptr, val, 0)
}
func X__atomic_fetch_xorInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
int8Mu.Lock()
@ -359,6 +492,10 @@ func X__atomic_fetch_xorInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
return r
}
func X__c11_atomic_fetch_xorUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
return X__atomic_fetch_xorUint8(t, ptr, val, 0)
}
func X__atomic_fetch_xorUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
int8Mu.Lock()
@ -369,6 +506,10 @@ func X__atomic_fetch_xorUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
return r
}
func X__c11_atomic_fetch_xorInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
return X__atomic_fetch_xorInt16(t, ptr, val, 0)
}
func X__atomic_fetch_xorInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
int16Mu.Lock()
@ -379,6 +520,10 @@ func X__atomic_fetch_xorInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
return r
}
func X__c11_atomic_fetch_xorUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
return X__atomic_fetch_xorUint16(t, ptr, val, 0)
}
func X__atomic_fetch_xorUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
int16Mu.Lock()
@ -389,6 +534,10 @@ func X__atomic_fetch_xorUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint
return r
}
func X__c11_atomic_fetch_xorInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
return X__atomic_fetch_xorInt32(t, ptr, val, 0)
}
func X__atomic_fetch_xorInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
int32Mu.Lock()
@ -399,6 +548,10 @@ func X__atomic_fetch_xorInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
return r
}
func X__c11_atomic_fetch_xorUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
return X__atomic_fetch_xorUint32(t, ptr, val, 0)
}
func X__atomic_fetch_xorUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
int32Mu.Lock()
@ -409,6 +562,10 @@ func X__atomic_fetch_xorUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint
return r
}
func X__c11_atomic_fetch_xorInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
return X__atomic_fetch_xorInt64(t, ptr, val, 0)
}
func X__atomic_fetch_xorInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
int64Mu.Lock()
@ -419,6 +576,10 @@ func X__atomic_fetch_xorInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
return r
}
func X__c11_atomic_fetch_xorUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
return X__atomic_fetch_xorUint64(t, ptr, val, 0)
}
func X__atomic_fetch_xorUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
int64Mu.Lock()
@ -433,6 +594,16 @@ func X__atomic_fetch_xorUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint
// void __atomic_exchange (type *ptr, type *val, type *ret, int memorder)
func X__c11_atomic_exchangeInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8) {
int8Mu.Lock()
defer int8Mu.Unlock()
r = *(*int8)(unsafe.Pointer(ptr))
*(*int8)(unsafe.Pointer(ptr)) = val
return r
}
func X__atomic_exchangeInt8(t *TLS, ptr, val, ret uintptr, _ int32) {
int8Mu.Lock()
@ -442,6 +613,16 @@ func X__atomic_exchangeInt8(t *TLS, ptr, val, ret uintptr, _ int32) {
*(*int8)(unsafe.Pointer(ptr)) = *(*int8)(unsafe.Pointer(val))
}
func X__c11_atomic_exchangeUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8) {
int8Mu.Lock()
defer int8Mu.Unlock()
r = *(*uint8)(unsafe.Pointer(ptr))
*(*uint8)(unsafe.Pointer(ptr)) = val
return r
}
func X__atomic_exchangeUint8(t *TLS, ptr, val, ret uintptr, _ int32) {
int8Mu.Lock()
@ -451,6 +632,16 @@ func X__atomic_exchangeUint8(t *TLS, ptr, val, ret uintptr, _ int32) {
*(*uint8)(unsafe.Pointer(ptr)) = *(*uint8)(unsafe.Pointer(val))
}
func X__c11_atomic_exchangeInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16) {
int16Mu.Lock()
defer int16Mu.Unlock()
r = *(*int16)(unsafe.Pointer(ptr))
*(*int16)(unsafe.Pointer(ptr)) = val
return r
}
func X__atomic_exchangeInt16(t *TLS, ptr, val, ret uintptr, _ int32) {
int16Mu.Lock()
@ -460,6 +651,16 @@ func X__atomic_exchangeInt16(t *TLS, ptr, val, ret uintptr, _ int32) {
*(*int16)(unsafe.Pointer(ptr)) = *(*int16)(unsafe.Pointer(val))
}
func X__c11_atomic_exchangeUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16) {
int16Mu.Lock()
defer int16Mu.Unlock()
r = *(*uint16)(unsafe.Pointer(ptr))
*(*uint16)(unsafe.Pointer(ptr)) = val
return r
}
func X__atomic_exchangeUint16(t *TLS, ptr, val, ret uintptr, _ int32) {
int16Mu.Lock()
@ -469,40 +670,36 @@ func X__atomic_exchangeUint16(t *TLS, ptr, val, ret uintptr, _ int32) {
*(*uint16)(unsafe.Pointer(ptr)) = *(*uint16)(unsafe.Pointer(val))
}
func X__c11_atomic_exchangeInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32) {
return atomic.SwapInt32((*int32)(unsafe.Pointer(ptr)), val)
}
func X__atomic_exchangeInt32(t *TLS, ptr, val, ret uintptr, _ int32) {
int32Mu.Lock()
*(*int32)(unsafe.Pointer(ret)) = atomic.SwapInt32((*int32)(unsafe.Pointer(ptr)), *(*int32)(unsafe.Pointer(val)))
}
defer int32Mu.Unlock()
*(*int32)(unsafe.Pointer(ret)) = *(*int32)(unsafe.Pointer(ptr))
*(*int32)(unsafe.Pointer(ptr)) = *(*int32)(unsafe.Pointer(val))
func X__c11_atomic_exchangeUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32) {
return uint32(atomic.SwapInt32((*int32)(unsafe.Pointer(ptr)), int32(val)))
}
func X__atomic_exchangeUint32(t *TLS, ptr, val, ret uintptr, _ int32) {
int32Mu.Lock()
*(*uint32)(unsafe.Pointer(ret)) = atomic.SwapUint32((*uint32)(unsafe.Pointer(ptr)), *(*uint32)(unsafe.Pointer(val)))
}
defer int32Mu.Unlock()
*(*uint32)(unsafe.Pointer(ret)) = *(*uint32)(unsafe.Pointer(ptr))
*(*uint32)(unsafe.Pointer(ptr)) = *(*uint32)(unsafe.Pointer(val))
func X__c11_atomic_exchangeInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64) {
return atomic.SwapInt64((*int64)(unsafe.Pointer(ptr)), val)
}
func X__atomic_exchangeInt64(t *TLS, ptr, val, ret uintptr, _ int32) {
int64Mu.Lock()
*(*int64)(unsafe.Pointer(ret)) = atomic.SwapInt64((*int64)(unsafe.Pointer(ptr)), *(*int64)(unsafe.Pointer(val)))
}
defer int64Mu.Unlock()
*(*int64)(unsafe.Pointer(ret)) = *(*int64)(unsafe.Pointer(ptr))
*(*int64)(unsafe.Pointer(ptr)) = *(*int64)(unsafe.Pointer(val))
func X__c11_atomic_exchangeUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64) {
return uint64(atomic.SwapInt64((*int64)(unsafe.Pointer(ptr)), int64(val)))
}
func X__atomic_exchangeUint64(t *TLS, ptr, val, ret uintptr, _ int32) {
int64Mu.Lock()
defer int64Mu.Unlock()
*(*uint64)(unsafe.Pointer(ret)) = *(*uint64)(unsafe.Pointer(ptr))
*(*uint64)(unsafe.Pointer(ptr)) = *(*uint64)(unsafe.Pointer(val))
*(*uint64)(unsafe.Pointer(ret)) = atomic.SwapUint64((*uint64)(unsafe.Pointer(ptr)), *(*uint64)(unsafe.Pointer(val)))
}
// ----
@ -605,10 +802,94 @@ func X__atomic_compare_exchangeUint64(t *TLS, ptr, expected, desired uintptr, we
return X__atomic_compare_exchangeInt64(t, ptr, expected, desired, weak, success, failure)
}
func X__c11_atomic_compare_exchange_strongInt8(t *TLS, ptr, expected uintptr, desired int8, success, failure int32) int32 {
int8Mu.Lock()
defer int8Mu.Unlock()
have := *(*int8)(unsafe.Pointer(ptr))
if have == *(*int8)(unsafe.Pointer(expected)) {
*(*int8)(unsafe.Pointer(ptr)) = desired
return 1
}
*(*int8)(unsafe.Pointer(expected)) = have
return 0
}
func X__c11_atomic_compare_exchange_strongUint8(t *TLS, ptr, expected uintptr, desired uint8, success, failure int32) int32 {
return X__c11_atomic_compare_exchange_strongInt8(t, ptr, expected, int8(desired), success, failure)
}
func X__c11_atomic_compare_exchange_strongInt16(t *TLS, ptr, expected uintptr, desired int16, success, failure int32) int32 {
int16Mu.Lock()
defer int16Mu.Unlock()
have := *(*int16)(unsafe.Pointer(ptr))
if have == *(*int16)(unsafe.Pointer(expected)) {
*(*int16)(unsafe.Pointer(ptr)) = desired
return 1
}
*(*int16)(unsafe.Pointer(expected)) = have
return 0
}
func X__c11_atomic_compare_exchange_strongUint16(t *TLS, ptr, expected uintptr, desired uint16, success, failure int32) int32 {
return X__c11_atomic_compare_exchange_strongInt16(t, ptr, expected, int16(desired), success, failure)
}
func X__c11_atomic_compare_exchange_strongInt32(t *TLS, ptr, expected uintptr, desired, success, failure int32) int32 {
int32Mu.Lock()
defer int32Mu.Unlock()
have := *(*int32)(unsafe.Pointer(ptr))
if have == *(*int32)(unsafe.Pointer(expected)) {
*(*int32)(unsafe.Pointer(ptr)) = desired
return 1
}
*(*int32)(unsafe.Pointer(expected)) = have
return 0
}
func X__c11_atomic_compare_exchange_strongUint32(t *TLS, ptr, expected uintptr, desired uint32, success, failure int32) int32 {
return X__c11_atomic_compare_exchange_strongInt32(t, ptr, expected, int32(desired), success, failure)
}
func X__c11_atomic_compare_exchange_strongInt64(t *TLS, ptr, expected uintptr, desired int64, success, failure int32) int32 {
int64Mu.Lock()
defer int64Mu.Unlock()
have := *(*int64)(unsafe.Pointer(ptr))
if have == *(*int64)(unsafe.Pointer(expected)) {
*(*int64)(unsafe.Pointer(ptr)) = desired
return 1
}
*(*int64)(unsafe.Pointer(expected)) = have
return 0
}
func X__c11_atomic_compare_exchange_strongUint64(t *TLS, ptr, expected uintptr, desired uint64, success, failure int32) int32 {
return X__c11_atomic_compare_exchange_strongInt64(t, ptr, expected, int64(desired), success, failure)
}
// ----
// void __atomic_load (type *ptr, type *ret, int memorder)
func X__c11_atomic_loadInt8(t *TLS, ptr uintptr, memorder int32) (r int8) {
int8Mu.Lock()
defer int8Mu.Unlock()
return *(*int8)(unsafe.Pointer(ptr))
}
func X__atomic_loadInt8(t *TLS, ptr, ret uintptr, memorder int32) {
int8Mu.Lock()
@ -617,10 +898,22 @@ func X__atomic_loadInt8(t *TLS, ptr, ret uintptr, memorder int32) {
*(*int8)(unsafe.Pointer(ret)) = *(*int8)(unsafe.Pointer(ptr))
}
func X__c11_atomic_loadUint8(t *TLS, ptr uintptr, memorder int32) (r uint8) {
return uint8(X__c11_atomic_loadInt8(t, ptr, memorder))
}
func X__atomic_loadUint8(t *TLS, ptr, ret uintptr, memorder int32) {
X__atomic_loadInt8(t, ptr, ret, memorder)
}
func X__c11_atomic_loadInt16(t *TLS, ptr uintptr, memorder int32) (r int16) {
int16Mu.Lock()
defer int16Mu.Unlock()
return *(*int16)(unsafe.Pointer(ptr))
}
func X__atomic_loadInt16(t *TLS, ptr, ret uintptr, memorder int32) {
int16Mu.Lock()
@ -629,28 +922,40 @@ func X__atomic_loadInt16(t *TLS, ptr, ret uintptr, memorder int32) {
*(*int16)(unsafe.Pointer(ret)) = *(*int16)(unsafe.Pointer(ptr))
}
func X__c11_atomic_loadUint16(t *TLS, ptr uintptr, memorder int32) (r uint16) {
return uint16(X__c11_atomic_loadInt16(t, ptr, memorder))
}
func X__atomic_loadUint16(t *TLS, ptr, ret uintptr, memorder int32) {
X__atomic_loadInt16(t, ptr, ret, memorder)
}
func X__c11_atomic_loadInt32(t *TLS, ptr uintptr, memorder int32) (r int32) {
return atomic.LoadInt32((*int32)(unsafe.Pointer(ptr)))
}
func X__atomic_loadInt32(t *TLS, ptr, ret uintptr, memorder int32) {
int32Mu.Lock()
*(*int32)(unsafe.Pointer(ret)) = atomic.LoadInt32((*int32)(unsafe.Pointer(ptr)))
}
defer int32Mu.Unlock()
*(*int32)(unsafe.Pointer(ret)) = *(*int32)(unsafe.Pointer(ptr))
func X__c11_atomic_loadUint32(t *TLS, ptr uintptr, memorder int32) (r uint32) {
return uint32(X__c11_atomic_loadInt32(t, ptr, memorder))
}
func X__atomic_loadUint32(t *TLS, ptr, ret uintptr, memorder int32) {
X__atomic_loadInt32(t, ptr, ret, memorder)
}
func X__c11_atomic_loadInt64(t *TLS, ptr uintptr, memorder int32) (r int64) {
return atomic.LoadInt64((*int64)(unsafe.Pointer(ptr)))
}
func X__atomic_loadInt64(t *TLS, ptr, ret uintptr, memorder int32) {
int64Mu.Lock()
*(*int64)(unsafe.Pointer(ret)) = atomic.LoadInt64((*int64)(unsafe.Pointer(ptr)))
}
defer int64Mu.Unlock()
*(*int64)(unsafe.Pointer(ret)) = *(*int64)(unsafe.Pointer(ptr))
func X__c11_atomic_loadUint64(t *TLS, ptr uintptr, memorder int32) (r uint64) {
return uint64(X__c11_atomic_loadInt64(t, ptr, memorder))
}
func X__atomic_loadUint64(t *TLS, ptr, ret uintptr, memorder int32) {
@ -661,6 +966,14 @@ func X__atomic_loadUint64(t *TLS, ptr, ret uintptr, memorder int32) {
// void __atomic_store (type *ptr, type *val, int memorder)
func X__c11_atomic_storeInt8(t *TLS, ptr uintptr, val int8, memorder int32) {
int8Mu.Lock()
defer int8Mu.Unlock()
*(*int8)(unsafe.Pointer(ptr)) = val
}
func X__atomic_storeInt8(t *TLS, ptr, val uintptr, memorder int32) {
int8Mu.Lock()
@ -669,10 +982,22 @@ func X__atomic_storeInt8(t *TLS, ptr, val uintptr, memorder int32) {
*(*int8)(unsafe.Pointer(ptr)) = *(*int8)(unsafe.Pointer(val))
}
func X__c11_atomic_storeUint8(t *TLS, ptr uintptr, val uint8, memorder int32) {
X__c11_atomic_storeInt8(t, ptr, int8(val), memorder)
}
func X__atomic_storeUint8(t *TLS, ptr, val uintptr, memorder int32) {
X__atomic_storeInt8(t, ptr, val, memorder)
}
func X__c11_atomic_storeInt16(t *TLS, ptr uintptr, val int16, memorder int32) {
int16Mu.Lock()
defer int16Mu.Unlock()
*(*int16)(unsafe.Pointer(ptr)) = val
}
func X__atomic_storeInt16(t *TLS, ptr, val uintptr, memorder int32) {
int16Mu.Lock()
@ -681,30 +1006,139 @@ func X__atomic_storeInt16(t *TLS, ptr, val uintptr, memorder int32) {
*(*int16)(unsafe.Pointer(ptr)) = *(*int16)(unsafe.Pointer(val))
}
func X__c11_atomic_storeUint16(t *TLS, ptr uintptr, val uint16, memorder int32) {
X__c11_atomic_storeInt16(t, ptr, int16(val), memorder)
}
func X__atomic_storeUint16(t *TLS, ptr, val uintptr, memorder int32) {
X__atomic_storeInt16(t, ptr, val, memorder)
}
func X__c11_atomic_storeInt32(t *TLS, ptr uintptr, val int32, memorder int32) {
atomic.StoreInt32((*int32)(unsafe.Pointer(ptr)), val)
}
func X__atomic_storeInt32(t *TLS, ptr, val uintptr, memorder int32) {
int32Mu.Lock()
atomic.StoreInt32((*int32)(unsafe.Pointer(ptr)), *(*int32)(unsafe.Pointer(val)))
}
defer int32Mu.Unlock()
*(*int32)(unsafe.Pointer(ptr)) = *(*int32)(unsafe.Pointer(val))
func X__c11_atomic_storeUint32(t *TLS, ptr uintptr, val uint32, memorder int32) {
X__c11_atomic_storeInt32(t, ptr, int32(val), memorder)
}
func X__atomic_storeUint32(t *TLS, ptr, val uintptr, memorder int32) {
X__atomic_storeInt32(t, ptr, val, memorder)
}
func X__c11_atomic_storeInt64(t *TLS, ptr uintptr, val int64, memorder int32) {
atomic.StoreInt64((*int64)(unsafe.Pointer(ptr)), val)
}
func X__atomic_storeInt64(t *TLS, ptr, val uintptr, memorder int32) {
int64Mu.Lock()
atomic.StoreInt64((*int64)(unsafe.Pointer(ptr)), *(*int64)(unsafe.Pointer(val)))
}
defer int64Mu.Unlock()
*(*int64)(unsafe.Pointer(ptr)) = *(*int64)(unsafe.Pointer(val))
func X__c11_atomic_storeUint64(t *TLS, ptr uintptr, val uint64, memorder int32) {
X__c11_atomic_storeInt64(t, ptr, int64(val), memorder)
}
func X__atomic_storeUint64(t *TLS, ptr, val uintptr, memorder int32) {
X__atomic_storeInt64(t, ptr, val, memorder)
}
// type __sync_val_compare_and_swap (type *ptr, type oldval type newval, ...)
func X__sync_val_compare_and_swapInt8(t *TLS, ptr uintptr, oldval, newval int8) (r int8) {
int8Mu.Lock()
defer int8Mu.Unlock()
if r = *(*int8)(unsafe.Pointer(ptr)); r == oldval {
*(*int8)(unsafe.Pointer(ptr)) = newval
}
return r
}
func X__sync_val_compare_and_swapUint8(t *TLS, ptr uintptr, oldval, newval uint8) (r uint8) {
int8Mu.Lock()
defer int8Mu.Unlock()
if r = *(*uint8)(unsafe.Pointer(ptr)); r == oldval {
*(*uint8)(unsafe.Pointer(ptr)) = newval
}
return r
}
func X__sync_val_compare_and_swapInt16(t *TLS, ptr uintptr, oldval, newval int16) (r int16) {
int16Mu.Lock()
defer int16Mu.Unlock()
if r = *(*int16)(unsafe.Pointer(ptr)); r == oldval {
*(*int16)(unsafe.Pointer(ptr)) = newval
}
return r
}
func X__sync_val_compare_and_swapUint16(t *TLS, ptr uintptr, oldval, newval uint16) (r uint16) {
int16Mu.Lock()
defer int16Mu.Unlock()
if r = *(*uint16)(unsafe.Pointer(ptr)); r == oldval {
*(*uint16)(unsafe.Pointer(ptr)) = newval
}
return r
}
func X__sync_val_compare_and_swapInt32(t *TLS, ptr uintptr, oldval, newval int32) (r int32) {
int32Mu.Lock()
defer int32Mu.Unlock()
if r = *(*int32)(unsafe.Pointer(ptr)); r == oldval {
*(*int32)(unsafe.Pointer(ptr)) = newval
}
return r
}
func X__sync_val_compare_and_swapUint32(t *TLS, ptr uintptr, oldval, newval uint32) (r uint32) {
int32Mu.Lock()
defer int32Mu.Unlock()
if r = *(*uint32)(unsafe.Pointer(ptr)); r == oldval {
*(*uint32)(unsafe.Pointer(ptr)) = newval
}
return r
}
func X__sync_val_compare_and_swapInt64(t *TLS, ptr uintptr, oldval, newval int64) (r int64) {
int64Mu.Lock()
defer int64Mu.Unlock()
if r = *(*int64)(unsafe.Pointer(ptr)); r == oldval {
*(*int64)(unsafe.Pointer(ptr)) = newval
}
return r
}
func X__sync_val_compare_and_swapUint64(t *TLS, ptr uintptr, oldval, newval uint64) (r uint64) {
int64Mu.Lock()
defer int64Mu.Unlock()
if r = *(*uint64)(unsafe.Pointer(ptr)); r == oldval {
*(*uint64)(unsafe.Pointer(ptr)) = newval
}
return r
}

View file

@ -1,6 +0,0 @@
set -evx
until unconvert -fastmath ./...
do
unconvert -fastmath -apply ./...
done
git checkout -f pthread_musl.go