[chore] update dependencies, bump to Go 1.19.1 (#826)

* update dependencies, bump Go version to 1.19

* bump test image Go version

* update golangci-lint

* update gotosocial-drone-build

* sign

* linting, go fmt

* update swagger docs

* update swagger docs

* whitespace

* update contributing.md

* fuckin whoopsie doopsie

* linterino, linteroni

* fix followrequest test not starting processor

* fix other api/client tests not starting processor

* fix remaining tests where processor not started

* bump go-runners version

* don't check last-webfingered-at, processor may have updated this

* update swagger command

* update bun to latest version

* fix embed to work the same as before with new bun

Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: tsmethurst <tobi.smethurst@protonmail.com>
This commit is contained in:
kim 2022-09-28 18:30:40 +01:00 committed by GitHub
commit a156188b3e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
1135 changed files with 258905 additions and 137146 deletions

3
vendor/golang.org/x/crypto/AUTHORS generated vendored
View file

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at https://tip.golang.org/AUTHORS.

View file

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at https://tip.golang.org/CONTRIBUTORS.

View file

@ -306,6 +306,20 @@ func (c *Client) UpdateReg(ctx context.Context, acct *Account) (*Account, error)
return c.updateRegRFC(ctx, acct)
}
// AccountKeyRollover attempts to transition a client's account key to a new key.
// On success client's Key is updated which is not concurrency safe.
// On failure an error will be returned.
// The new key is already registered with the ACME provider if the following is true:
// - error is of type acme.Error
// - StatusCode should be 409 (Conflict)
// - Location header will have the KID of the associated account
//
// More about account key rollover can be found at
// https://tools.ietf.org/html/rfc8555#section-7.3.5.
func (c *Client) AccountKeyRollover(ctx context.Context, newKey crypto.Signer) error {
return c.accountKeyRollover(ctx, newKey)
}
// Authorize performs the initial step in the pre-authorization flow,
// as opposed to order-based flow.
// The caller will then need to choose from and perform a set of returned

View file

@ -41,7 +41,7 @@ type DirCache string
// Get reads a certificate data from the specified file name.
func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) {
name = filepath.Join(string(d), name)
name = filepath.Join(string(d), filepath.Clean("/"+name))
var (
data []byte
err error
@ -82,7 +82,7 @@ func (d DirCache) Put(ctx context.Context, name string, data []byte) error {
case <-ctx.Done():
// Don't overwrite the file if the context was canceled.
default:
newName := filepath.Join(string(d), name)
newName := filepath.Join(string(d), filepath.Clean("/"+name))
err = os.Rename(tmp, newName)
}
}()
@ -96,7 +96,7 @@ func (d DirCache) Put(ctx context.Context, name string, data []byte) error {
// Delete removes the specified file name.
func (d DirCache) Delete(ctx context.Context, name string) error {
name = filepath.Join(string(d), name)
name = filepath.Join(string(d), filepath.Clean("/"+name))
var (
err error
done = make(chan struct{})

View file

@ -33,6 +33,10 @@ const noKeyID = KeyID("")
// See https://tools.ietf.org/html/rfc8555#section-6.3 for more details.
const noPayload = ""
// noNonce indicates that the nonce should be omitted from the protected header.
// See jwsEncodeJSON for details.
const noNonce = ""
// jsonWebSignature can be easily serialized into a JWS following
// https://tools.ietf.org/html/rfc7515#section-3.2.
type jsonWebSignature struct {
@ -45,10 +49,15 @@ type jsonWebSignature struct {
// The result is serialized in JSON format containing either kid or jwk
// fields based on the provided KeyID value.
//
// If kid is non-empty, its quoted value is inserted in the protected head
// The claimset is marshalled using json.Marshal unless it is a string.
// In which case it is inserted directly into the message.
//
// If kid is non-empty, its quoted value is inserted in the protected header
// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted
// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive.
//
// If nonce is non-empty, its quoted value is inserted in the protected header.
//
// See https://tools.ietf.org/html/rfc7515#section-7.
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid KeyID, nonce, url string) ([]byte, error) {
if key == nil {
@ -58,20 +67,36 @@ func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid KeyID, nonce, ur
if alg == "" || !sha.Available() {
return nil, ErrUnsupportedKey
}
var phead string
headers := struct {
Alg string `json:"alg"`
KID string `json:"kid,omitempty"`
JWK json.RawMessage `json:"jwk,omitempty"`
Nonce string `json:"nonce,omitempty"`
URL string `json:"url"`
}{
Alg: alg,
Nonce: nonce,
URL: url,
}
switch kid {
case noKeyID:
jwk, err := jwkEncode(key.Public())
if err != nil {
return nil, err
}
phead = fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q,"url":%q}`, alg, jwk, nonce, url)
headers.JWK = json.RawMessage(jwk)
default:
phead = fmt.Sprintf(`{"alg":%q,"kid":%q,"nonce":%q,"url":%q}`, alg, kid, nonce, url)
headers.KID = string(kid)
}
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
phJSON, err := json.Marshal(headers)
if err != nil {
return nil, err
}
phead := base64.RawURLEncoding.EncodeToString([]byte(phJSON))
var payload string
if claimset != noPayload {
if val, ok := claimset.(string); ok {
payload = val
} else {
cs, err := json.Marshal(claimset)
if err != nil {
return nil, err

View file

@ -24,6 +24,9 @@ import (
//
// It only works with CAs implementing RFC 8555.
func (c *Client) DeactivateReg(ctx context.Context) error {
if _, err := c.Discover(ctx); err != nil { // required by c.accountKID
return err
}
url := string(c.accountKID(ctx))
if url == "" {
return ErrNoAccount
@ -148,6 +151,42 @@ func responseAccount(res *http.Response) (*Account, error) {
}, nil
}
// accountKeyRollover attempts to perform account key rollover.
// On success it will change client.Key to the new key.
func (c *Client) accountKeyRollover(ctx context.Context, newKey crypto.Signer) error {
dir, err := c.Discover(ctx) // Also required by c.accountKID
if err != nil {
return err
}
kid := c.accountKID(ctx)
if kid == noKeyID {
return ErrNoAccount
}
oldKey, err := jwkEncode(c.Key.Public())
if err != nil {
return err
}
payload := struct {
Account string `json:"account"`
OldKey json.RawMessage `json:"oldKey"`
}{
Account: string(kid),
OldKey: json.RawMessage(oldKey),
}
inner, err := jwsEncodeJSON(payload, newKey, noKeyID, noNonce, dir.KeyChangeURL)
if err != nil {
return err
}
res, err := c.post(ctx, nil, dir.KeyChangeURL, base64.RawURLEncoding.EncodeToString(inner), wantStatus(http.StatusOK))
if err != nil {
return err
}
defer res.Body.Close()
c.Key = newKey
return nil
}
// AuthorizeOrder initiates the order-based application for certificate issuance,
// as opposed to pre-authorization in Authorize.
// It is only supported by CAs implementing RFC 8555.

View file

@ -12,7 +12,7 @@ import (
"errors"
"math/bits"
"golang.org/x/crypto/internal/subtle"
"golang.org/x/crypto/internal/alias"
)
const (
@ -189,7 +189,7 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
panic("chacha20: output smaller than input")
}
dst = dst[:len(src)]
if subtle.InexactOverlap(dst, src) {
if alias.InexactOverlap(dst, src) {
panic("chacha20: invalid buffer overlap")
}

View file

@ -9,7 +9,8 @@ package curve25519 // import "golang.org/x/crypto/curve25519"
import (
"crypto/subtle"
"fmt"
"errors"
"strconv"
"golang.org/x/crypto/curve25519/internal/field"
)
@ -124,10 +125,10 @@ func X25519(scalar, point []byte) ([]byte, error) {
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
var in [32]byte
if l := len(scalar); l != 32 {
return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32)
return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32")
}
if l := len(point); l != 32 {
return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32)
return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32")
}
copy(in[:], scalar)
if &point[0] == &Basepoint[0] {
@ -138,7 +139,7 @@ func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
copy(base[:], point)
ScalarMult(dst, &in, &base)
if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
return nil, fmt.Errorf("bad input point: low order point")
return nil, errors.New("bad input point: low order point")
}
}
return dst[:], nil

View file

@ -5,9 +5,8 @@
//go:build !purego
// +build !purego
// Package subtle implements functions that are often useful in cryptographic
// code but require careful thought to use correctly.
package subtle // import "golang.org/x/crypto/internal/subtle"
// Package alias implements memory aliasing tests.
package alias
import "unsafe"

View file

@ -5,9 +5,8 @@
//go:build purego
// +build purego
// Package subtle implements functions that are often useful in cryptographic
// code but require careful thought to use correctly.
package subtle // import "golang.org/x/crypto/internal/subtle"
// Package alias implements memory aliasing tests.
package alias
// This is the Google App Engine standard variant based on reflect
// because the unsafe package and cgo are disallowed.

View file

@ -460,6 +460,8 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
// certKeyAlgoNames is a mapping from known certificate algorithm names to the
// corresponding public key signature algorithm.
//
// This map must be kept in sync with the one in agent/client.go.
var certKeyAlgoNames = map[string]string{
CertAlgoRSAv01: KeyAlgoRSA,
CertAlgoRSASHA256v01: KeyAlgoRSASHA256,

3
vendor/golang.org/x/image/AUTHORS generated vendored
View file

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View file

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View file

@ -85,7 +85,7 @@ func decodeRGB(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
// decodeNRGBA reads a 32 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodeNRGBA(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
func decodeNRGBA(r io.Reader, c image.Config, topDown, allowAlpha bool) (image.Image, error) {
rgba := image.NewNRGBA(image.Rect(0, 0, c.Width, c.Height))
if c.Width == 0 || c.Height == 0 {
return rgba, nil
@ -102,6 +102,9 @@ func decodeNRGBA(r io.Reader, c image.Config, topDown bool) (image.Image, error)
for i := 0; i < len(p); i += 4 {
// BMP images are stored in BGRA order rather than RGBA order.
p[i+0], p[i+2] = p[i+2], p[i+0]
if !allowAlpha {
p[i+3] = 0xFF
}
}
}
return rgba, nil
@ -110,7 +113,7 @@ func decodeNRGBA(r io.Reader, c image.Config, topDown bool) (image.Image, error)
// Decode reads a BMP image from r and returns it as an image.Image.
// Limitation: The file must be 8, 24 or 32 bits per pixel.
func Decode(r io.Reader) (image.Image, error) {
c, bpp, topDown, err := decodeConfig(r)
c, bpp, topDown, allowAlpha, err := decodeConfig(r)
if err != nil {
return nil, err
}
@ -120,7 +123,7 @@ func Decode(r io.Reader) (image.Image, error) {
case 24:
return decodeRGB(r, c, topDown)
case 32:
return decodeNRGBA(r, c, topDown)
return decodeNRGBA(r, c, topDown, allowAlpha)
}
panic("unreachable")
}
@ -129,13 +132,15 @@ func Decode(r io.Reader) (image.Image, error) {
// decoding the entire image.
// Limitation: The file must be 8, 24 or 32 bits per pixel.
func DecodeConfig(r io.Reader) (image.Config, error) {
config, _, _, err := decodeConfig(r)
config, _, _, _, err := decodeConfig(r)
return config, err
}
func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown bool, err error) {
// We only support those BMP images that are a BITMAPFILEHEADER
// immediately followed by a BITMAPINFOHEADER.
func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown bool, allowAlpha bool, err error) {
// We only support those BMP images with one of the following DIB headers:
// - BITMAPINFOHEADER (40 bytes)
// - BITMAPV4HEADER (108 bytes)
// - BITMAPV5HEADER (124 bytes)
const (
fileHeaderLen = 14
infoHeaderLen = 40
@ -144,18 +149,24 @@ func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown b
)
var b [1024]byte
if _, err := io.ReadFull(r, b[:fileHeaderLen+4]); err != nil {
return image.Config{}, 0, false, err
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, 0, false, false, err
}
if string(b[:2]) != "BM" {
return image.Config{}, 0, false, errors.New("bmp: invalid format")
return image.Config{}, 0, false, false, errors.New("bmp: invalid format")
}
offset := readUint32(b[10:14])
infoLen := readUint32(b[14:18])
if infoLen != infoHeaderLen && infoLen != v4InfoHeaderLen && infoLen != v5InfoHeaderLen {
return image.Config{}, 0, false, ErrUnsupported
return image.Config{}, 0, false, false, ErrUnsupported
}
if _, err := io.ReadFull(r, b[fileHeaderLen+4:fileHeaderLen+infoLen]); err != nil {
return image.Config{}, 0, false, err
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, 0, false, false, err
}
width := int(int32(readUint32(b[18:22])))
height := int(int32(readUint32(b[22:26])))
@ -163,12 +174,12 @@ func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown b
height, topDown = -height, true
}
if width < 0 || height < 0 {
return image.Config{}, 0, false, ErrUnsupported
return image.Config{}, 0, false, false, ErrUnsupported
}
// We only support 1 plane and 8, 24 or 32 bits per pixel and no
// compression.
planes, bpp, compression := readUint16(b[26:28]), readUint16(b[28:30]), readUint32(b[30:34])
// if compression is set to BITFIELDS, but the bitmask is set to the default bitmask
// if compression is set to BI_BITFIELDS, but the bitmask is set to the default bitmask
// that would be used if compression was set to 0, we can continue as if compression was 0
if compression == 3 && infoLen > infoHeaderLen &&
readUint32(b[54:58]) == 0xff0000 && readUint32(b[58:62]) == 0xff00 &&
@ -176,16 +187,16 @@ func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown b
compression = 0
}
if planes != 1 || compression != 0 {
return image.Config{}, 0, false, ErrUnsupported
return image.Config{}, 0, false, false, ErrUnsupported
}
switch bpp {
case 8:
if offset != fileHeaderLen+infoLen+256*4 {
return image.Config{}, 0, false, ErrUnsupported
return image.Config{}, 0, false, false, ErrUnsupported
}
_, err = io.ReadFull(r, b[:256*4])
if err != nil {
return image.Config{}, 0, false, err
return image.Config{}, 0, false, false, err
}
pcm := make(color.Palette, 256)
for i := range pcm {
@ -193,19 +204,40 @@ func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown b
// Every 4th byte is padding.
pcm[i] = color.RGBA{b[4*i+2], b[4*i+1], b[4*i+0], 0xFF}
}
return image.Config{ColorModel: pcm, Width: width, Height: height}, 8, topDown, nil
return image.Config{ColorModel: pcm, Width: width, Height: height}, 8, topDown, false, nil
case 24:
if offset != fileHeaderLen+infoLen {
return image.Config{}, 0, false, ErrUnsupported
return image.Config{}, 0, false, false, ErrUnsupported
}
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 24, topDown, nil
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 24, topDown, false, nil
case 32:
if offset != fileHeaderLen+infoLen {
return image.Config{}, 0, false, ErrUnsupported
return image.Config{}, 0, false, false, ErrUnsupported
}
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 32, topDown, nil
// 32 bits per pixel is possibly RGBX (X is padding) or RGBA (A is
// alpha transparency). However, for BMP images, "Alpha is a
// poorly-documented and inconsistently-used feature" says
// https://source.chromium.org/chromium/chromium/src/+/bc0a792d7ebc587190d1a62ccddba10abeea274b:third_party/blink/renderer/platform/image-decoders/bmp/bmp_image_reader.cc;l=621
//
// That goes on to say "BITMAPV3HEADER+ have an alpha bitmask in the
// info header... so we respect it at all times... [For earlier
// (smaller) headers we] ignore alpha in Windows V3 BMPs except inside
// ICO files".
//
// "Ignore" means to always set alpha to 0xFF (fully opaque):
// https://source.chromium.org/chromium/chromium/src/+/bc0a792d7ebc587190d1a62ccddba10abeea274b:third_party/blink/renderer/platform/image-decoders/bmp/bmp_image_reader.h;l=272
//
// Confusingly, "Windows V3" does not correspond to BITMAPV3HEADER, but
// instead corresponds to the earlier (smaller) BITMAPINFOHEADER:
// https://source.chromium.org/chromium/chromium/src/+/bc0a792d7ebc587190d1a62ccddba10abeea274b:third_party/blink/renderer/platform/image-decoders/bmp/bmp_image_reader.cc;l=258
//
// This Go package does not support ICO files and the (infoLen >
// infoHeaderLen) condition distinguishes BITMAPINFOHEADER (40 bytes)
// vs later (larger) headers.
allowAlpha = infoLen > infoHeaderLen
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 32, topDown, allowAlpha, nil
}
return image.Config{}, 0, false, ErrUnsupported
return image.Config{}, 0, false, false, ErrUnsupported
}
func init() {

View file

@ -16,6 +16,7 @@ import (
)
var (
errIncompleteCode = errors.New("ccitt: incomplete code")
errInvalidBounds = errors.New("ccitt: invalid bounds")
errInvalidCode = errors.New("ccitt: invalid code")
errInvalidMode = errors.New("ccitt: invalid mode")
@ -48,6 +49,10 @@ const (
Group4
)
// AutoDetectHeight is passed as the height argument to NewReader to indicate
// that the image height (the number of rows) is not known in advance.
const AutoDetectHeight = -1
// Options are optional parameters.
type Options struct {
// Align means that some variable-bit-width codes are byte-aligned.
@ -205,10 +210,14 @@ func decode(b *bitReader, decodeTable [][2]int16) (uint32, error) {
for {
bit, err := b.nextBit()
if err != nil {
if err == io.EOF {
err = errIncompleteCode
}
return 0, err
}
bitsRead |= bit << (63 - nBitsRead)
nBitsRead++
// The "&1" is redundant, but can eliminate a bounds check.
state = int32(decodeTable[state][bit&1])
if state < 0 {
@ -222,6 +231,35 @@ func decode(b *bitReader, decodeTable [][2]int16) (uint32, error) {
}
}
// decodeEOL decodes the 12-bit EOL code 0000_0000_0001.
func decodeEOL(b *bitReader) error {
nBitsRead, bitsRead := uint32(0), uint64(0)
for {
bit, err := b.nextBit()
if err != nil {
if err == io.EOF {
err = errMissingEOL
}
return err
}
bitsRead |= bit << (63 - nBitsRead)
nBitsRead++
if nBitsRead < 12 {
if bit&1 == 0 {
continue
}
} else if bit&1 != 0 {
return nil
}
// Unread the bits we've read, then return errMissingEOL.
b.bits = (b.bits >> nBitsRead) | bitsRead
b.nBits += nBitsRead
return errMissingEOL
}
}
type reader struct {
br bitReader
subFormat SubFormat
@ -231,7 +269,10 @@ type reader struct {
// rowsRemaining starts at the image height in pixels, when the reader is
// driven through the io.Reader interface, and decrements to zero as rows
// are decoded. When driven through DecodeIntoGray, this field is unused.
// are decoded. Alternatively, it may be negative if the image height is
// not known in advance at the time of the NewReader call.
//
// When driven through DecodeIntoGray, this field is unused.
rowsRemaining int
// curr and prev hold the current and previous rows. Each element is either
@ -269,6 +310,19 @@ type reader struct {
// seenStartOfImage is whether we've called the startDecode method.
seenStartOfImage bool
// truncated is whether the input is missing the final 6 consecutive EOL's
// (for Group3) or 2 consecutive EOL's (for Group4). Omitting that trailer
// (but otherwise padding to a byte boundary, with either all 0 bits or all
// 1 bits) is invalid according to the spec, but happens in practice when
// exporting from Adobe Acrobat to TIFF + CCITT. This package silently
// ignores the format error for CCITT input that has been truncated in that
// fashion, returning the full decoded image.
//
// Detecting trailer truncation (just after the final row of pixels)
// requires knowing which row is the final row, and therefore does not
// trigger if the image height is not known in advance.
truncated bool
// readErr is a sticky error for the Read method.
readErr error
}
@ -294,17 +348,50 @@ func (z *reader) Read(p []byte) (int, error) {
// Decode the next row, if necessary.
if z.atStartOfRow {
if z.rowsRemaining <= 0 {
if z.readErr = z.finishDecode(); z.readErr != nil {
if z.rowsRemaining < 0 {
// We do not know the image height in advance. See if the next
// code is an EOL. If it is, it is consumed. If it isn't, the
// bitReader shouldn't advance along the bit stream, and we
// simply decode another row of pixel data.
//
// For the Group4 subFormat, we may need to align to a byte
// boundary. For the Group3 subFormat, the previous z.decodeRow
// call (or z.startDecode call) has already consumed one of the
// 6 consecutive EOL's. The next EOL is actually the second of
// 6, in the middle, and we shouldn't align at that point.
if z.align && (z.subFormat == Group4) {
z.br.alignToByteBoundary()
}
if err := z.decodeEOL(); err == errMissingEOL {
// No-op. It's another row of pixel data.
} else if err != nil {
z.readErr = err
break
} else {
if z.readErr = z.finishDecode(true); z.readErr != nil {
break
}
z.readErr = io.EOF
break
}
} else if z.rowsRemaining == 0 {
// We do know the image height in advance, and we have already
// decoded exactly that many rows.
if z.readErr = z.finishDecode(false); z.readErr != nil {
break
}
z.readErr = io.EOF
break
} else {
z.rowsRemaining--
}
if z.readErr = z.decodeRow(); z.readErr != nil {
if z.readErr = z.decodeRow(z.rowsRemaining == 0); z.readErr != nil {
break
}
z.rowsRemaining--
}
// Pack from z.curr (1 byte per pixel) to p (1 bit per pixel).
@ -351,32 +438,44 @@ func (z *reader) startDecode() error {
return nil
}
func (z *reader) finishDecode() error {
func (z *reader) finishDecode(alreadySeenEOL bool) error {
numberOfEOLs := 0
switch z.subFormat {
case Group3:
if z.truncated {
return nil
}
// The stream ends with a RTC (Return To Control) of 6 consecutive
// EOL's, but we should have already just seen an EOL, either in
// z.startDecode (for a zero-height image) or in z.decodeRow.
numberOfEOLs = 5
case Group4:
// The stream ends with two EOL's, the first of which is possibly
// byte-aligned.
numberOfEOLs = 2
if err := z.decodeEOL(); err == nil {
numberOfEOLs--
} else if err == errInvalidCode {
// Try again, this time starting from a byte boundary.
autoDetectHeight := z.rowsRemaining < 0
if autoDetectHeight {
// Aligning to a byte boundary was already handled by reader.Read.
} else if z.align {
z.br.alignToByteBoundary()
} else {
}
// The stream ends with two EOL's. If the first one is missing, and we
// had an explicit image height, we just assume that the trailing two
// EOL's were truncated and return a nil error.
if err := z.decodeEOL(); err != nil {
if (err == errMissingEOL) && !autoDetectHeight {
z.truncated = true
return nil
}
return err
}
numberOfEOLs = 1
default:
return errUnsupportedSubFormat
}
if alreadySeenEOL {
numberOfEOLs--
}
for ; numberOfEOLs > 0; numberOfEOLs-- {
if err := z.decodeEOL(); err != nil {
return err
@ -386,19 +485,10 @@ func (z *reader) finishDecode() error {
}
func (z *reader) decodeEOL() error {
// TODO: EOL doesn't have to be in the modeDecodeTable. It could be in its
// own table, or we could just hard-code it, especially if we might need to
// cater for optional byte-alignment, or an arbitrary number (potentially
// more than 8) of 0-valued padding bits.
if mode, err := decode(&z.br, modeDecodeTable[:]); err != nil {
return err
} else if mode != modeEOL {
return errMissingEOL
}
return nil
return decodeEOL(&z.br)
}
func (z *reader) decodeRow() error {
func (z *reader) decodeRow(finalRow bool) error {
z.wi = 0
z.atStartOfRow = true
z.penColorIsWhite = true
@ -414,7 +504,12 @@ func (z *reader) decodeRow() error {
return err
}
}
return z.decodeEOL()
err := z.decodeEOL()
if finalRow && (err == errMissingEOL) {
z.truncated = true
return nil
}
return err
case Group4:
for ; z.wi < len(z.curr); z.atStartOfRow = false {
@ -654,13 +749,13 @@ func DecodeIntoGray(dst *image.Gray, r io.Reader, order Order, sf SubFormat, opt
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
p := (y - bounds.Min.Y) * dst.Stride
z.curr = dst.Pix[p : p+width]
if err := z.decodeRow(); err != nil {
if err := z.decodeRow(y+1 == bounds.Max.Y); err != nil {
return err
}
z.curr, z.prev = nil, z.curr
}
if err := z.finishDecode(); err != nil {
if err := z.finishDecode(false); err != nil {
return err
}
@ -677,9 +772,12 @@ func DecodeIntoGray(dst *image.Gray, r io.Reader, order Order, sf SubFormat, opt
// NewReader returns an io.Reader that decodes the CCITT-formatted data in r.
// The resultant byte stream is one bit per pixel (MSB first), with 1 meaning
// white and 0 meaning black. Each row in the result is byte-aligned.
//
// A negative height, such as passing AutoDetectHeight, means that the image
// height is not known in advance. A negative width is invalid.
func NewReader(r io.Reader, order Order, sf SubFormat, width int, height int, opts *Options) io.Reader {
readErr := error(nil)
if (width < 0) || (height < 0) {
if width < 0 {
readErr = errInvalidBounds
} else if width > maxWidth {
readErr = errUnsupportedWidth

View file

@ -31,37 +31,27 @@ package ccitt
// modeDecodeTable represents Table 1 and the End-of-Line code.
//
// +=XXXXX
// b015 +-+
// | +=v0010
// b014 +-+
// | +=XXXXX
// b013 +-+
// | +=XXXXX
// b012 +-+
// | +=XXXXX
// b011 +-+
// | +=XXXXX
// b009 +-+
// | +=v0009
// b007 +-+
// | | +=v0008
// b010 | +-+
// | +=v0005
// b006 +-+
// | | +=v0007
// b008 | +-+
// | +=v0004
// b005 +-+
// | +=v0000
// b003 +-+
// | +=v0001
// b002 +-+
// | | +=v0006
// b004 | +-+
// | +=v0003
// b001 +-+
// +=v0002
// +=XXXXX
// b009 +-+
// | +=v0009
// b007 +-+
// | | +=v0008
// b010 | +-+
// | +=v0005
// b006 +-+
// | | +=v0007
// b008 | +-+
// | +=v0004
// b005 +-+
// | +=v0000
// b003 +-+
// | +=v0001
// b002 +-+
// | | +=v0006
// b004 | +-+
// | +=v0003
// b001 +-+
// +=v0002
var modeDecodeTable = [...][2]int16{
0: {0, 0},
1: {2, ^2},
@ -72,226 +62,221 @@ var modeDecodeTable = [...][2]int16{
6: {7, 8},
7: {9, 10},
8: {^7, ^4},
9: {11, ^9},
9: {0, ^9},
10: {^8, ^5},
11: {12, 0},
12: {13, 0},
13: {14, 0},
14: {15, 0},
15: {0, ^10},
}
// whiteDecodeTable represents Tables 2 and 3 for a white run.
//
// +=XXXXX
// b059 +-+
// | | +=v1792
// b096 | | +-+
// | | | | +=v1984
// b100 | | | +-+
// | | | +=v2048
// b094 | | +-+
// | | | | +=v2112
// b101 | | | | +-+
// | | | | | +=v2176
// b097 | | | +-+
// | | | | +=v2240
// b102 | | | +-+
// | | | +=v2304
// b085 | +-+
// | | +=v1856
// b098 | | +-+
// | | | +=v1920
// b095 | +-+
// | | +=v2368
// b103 | | +-+
// | | | +=v2432
// b099 | +-+
// | | +=v2496
// b104 | +-+
// | +=v2560
// b040 +-+
// | | +=v0029
// b060 | +-+
// | +=v0030
// b026 +-+
// | | +=v0045
// b061 | | +-+
// | | | +=v0046
// b041 | +-+
// | +=v0022
// b016 +-+
// | | +=v0023
// b042 | | +-+
// | | | | +=v0047
// b062 | | | +-+
// | | | +=v0048
// b027 | +-+
// | +=v0013
// b008 +-+
// | | +=v0020
// b043 | | +-+
// | | | | +=v0033
// b063 | | | +-+
// | | | +=v0034
// b028 | | +-+
// | | | | +=v0035
// b064 | | | | +-+
// | | | | | +=v0036
// b044 | | | +-+
// | | | | +=v0037
// b065 | | | +-+
// | | | +=v0038
// b017 | +-+
// | | +=v0019
// b045 | | +-+
// | | | | +=v0031
// b066 | | | +-+
// | | | +=v0032
// b029 | +-+
// | +=v0001
// b004 +-+
// | | +=v0012
// b030 | | +-+
// | | | | +=v0053
// b067 | | | | +-+
// | | | | | +=v0054
// b046 | | | +-+
// | | | +=v0026
// b018 | | +-+
// | | | | +=v0039
// b068 | | | | +-+
// | | | | | +=v0040
// b047 | | | | +-+
// | | | | | | +=v0041
// b069 | | | | | +-+
// | | | | | +=v0042
// b031 | | | +-+
// | | | | +=v0043
// b070 | | | | +-+
// | | | | | +=v0044
// b048 | | | +-+
// | | | +=v0021
// b009 | +-+
// | | +=v0028
// b049 | | +-+
// | | | | +=v0061
// b071 | | | +-+
// | | | +=v0062
// b032 | | +-+
// | | | | +=v0063
// b072 | | | | +-+
// | | | | | +=v0000
// b050 | | | +-+
// | | | | +=v0320
// b073 | | | +-+
// | | | +=v0384
// b019 | +-+
// | +=v0010
// b002 +-+
// | | +=v0011
// b020 | | +-+
// | | | | +=v0027
// b051 | | | | +-+
// | | | | | | +=v0059
// b074 | | | | | +-+
// | | | | | +=v0060
// b033 | | | +-+
// | | | | +=v1472
// b086 | | | | +-+
// | | | | | +=v1536
// b075 | | | | +-+
// | | | | | | +=v1600
// b087 | | | | | +-+
// | | | | | +=v1728
// b052 | | | +-+
// | | | +=v0018
// b010 | | +-+
// | | | | +=v0024
// b053 | | | | +-+
// | | | | | | +=v0049
// b076 | | | | | +-+
// | | | | | +=v0050
// b034 | | | | +-+
// | | | | | | +=v0051
// b077 | | | | | | +-+
// | | | | | | | +=v0052
// b054 | | | | | +-+
// | | | | | +=v0025
// b021 | | | +-+
// | | | | +=v0055
// b078 | | | | +-+
// | | | | | +=v0056
// b055 | | | | +-+
// | | | | | | +=v0057
// b079 | | | | | +-+
// | | | | | +=v0058
// b035 | | | +-+
// | | | +=v0192
// b005 | +-+
// | | +=v1664
// b036 | | +-+
// | | | | +=v0448
// b080 | | | | +-+
// | | | | | +=v0512
// b056 | | | +-+
// | | | | +=v0704
// b088 | | | | +-+
// | | | | | +=v0768
// b081 | | | +-+
// | | | +=v0640
// b022 | | +-+
// | | | | +=v0576
// b082 | | | | +-+
// | | | | | | +=v0832
// b089 | | | | | +-+
// | | | | | +=v0896
// b057 | | | | +-+
// | | | | | | +=v0960
// b090 | | | | | | +-+
// | | | | | | | +=v1024
// b083 | | | | | +-+
// | | | | | | +=v1088
// b091 | | | | | +-+
// | | | | | +=v1152
// b037 | | | +-+
// | | | | +=v1216
// b092 | | | | +-+
// | | | | | +=v1280
// b084 | | | | +-+
// | | | | | | +=v1344
// b093 | | | | | +-+
// | | | | | +=v1408
// b058 | | | +-+
// | | | +=v0256
// b011 | +-+
// | +=v0002
// b001 +-+
// | +=v0003
// b012 | +-+
// | | | +=v0128
// b023 | | +-+
// | | +=v0008
// b006 | +-+
// | | | +=v0009
// b024 | | | +-+
// | | | | | +=v0016
// b038 | | | | +-+
// | | | | +=v0017
// b013 | | +-+
// | | +=v0004
// b003 +-+
// | +=v0005
// b014 | +-+
// | | | +=v0014
// b039 | | | +-+
// | | | | +=v0015
// b025 | | +-+
// | | +=v0064
// b007 +-+
// | +=v0006
// b015 +-+
// +=v0007
// +=XXXXX
// b059 +-+
// | | +=v1792
// b096 | | +-+
// | | | | +=v1984
// b100 | | | +-+
// | | | +=v2048
// b094 | | +-+
// | | | | +=v2112
// b101 | | | | +-+
// | | | | | +=v2176
// b097 | | | +-+
// | | | | +=v2240
// b102 | | | +-+
// | | | +=v2304
// b085 | +-+
// | | +=v1856
// b098 | | +-+
// | | | +=v1920
// b095 | +-+
// | | +=v2368
// b103 | | +-+
// | | | +=v2432
// b099 | +-+
// | | +=v2496
// b104 | +-+
// | +=v2560
// b040 +-+
// | | +=v0029
// b060 | +-+
// | +=v0030
// b026 +-+
// | | +=v0045
// b061 | | +-+
// | | | +=v0046
// b041 | +-+
// | +=v0022
// b016 +-+
// | | +=v0023
// b042 | | +-+
// | | | | +=v0047
// b062 | | | +-+
// | | | +=v0048
// b027 | +-+
// | +=v0013
// b008 +-+
// | | +=v0020
// b043 | | +-+
// | | | | +=v0033
// b063 | | | +-+
// | | | +=v0034
// b028 | | +-+
// | | | | +=v0035
// b064 | | | | +-+
// | | | | | +=v0036
// b044 | | | +-+
// | | | | +=v0037
// b065 | | | +-+
// | | | +=v0038
// b017 | +-+
// | | +=v0019
// b045 | | +-+
// | | | | +=v0031
// b066 | | | +-+
// | | | +=v0032
// b029 | +-+
// | +=v0001
// b004 +-+
// | | +=v0012
// b030 | | +-+
// | | | | +=v0053
// b067 | | | | +-+
// | | | | | +=v0054
// b046 | | | +-+
// | | | +=v0026
// b018 | | +-+
// | | | | +=v0039
// b068 | | | | +-+
// | | | | | +=v0040
// b047 | | | | +-+
// | | | | | | +=v0041
// b069 | | | | | +-+
// | | | | | +=v0042
// b031 | | | +-+
// | | | | +=v0043
// b070 | | | | +-+
// | | | | | +=v0044
// b048 | | | +-+
// | | | +=v0021
// b009 | +-+
// | | +=v0028
// b049 | | +-+
// | | | | +=v0061
// b071 | | | +-+
// | | | +=v0062
// b032 | | +-+
// | | | | +=v0063
// b072 | | | | +-+
// | | | | | +=v0000
// b050 | | | +-+
// | | | | +=v0320
// b073 | | | +-+
// | | | +=v0384
// b019 | +-+
// | +=v0010
// b002 +-+
// | | +=v0011
// b020 | | +-+
// | | | | +=v0027
// b051 | | | | +-+
// | | | | | | +=v0059
// b074 | | | | | +-+
// | | | | | +=v0060
// b033 | | | +-+
// | | | | +=v1472
// b086 | | | | +-+
// | | | | | +=v1536
// b075 | | | | +-+
// | | | | | | +=v1600
// b087 | | | | | +-+
// | | | | | +=v1728
// b052 | | | +-+
// | | | +=v0018
// b010 | | +-+
// | | | | +=v0024
// b053 | | | | +-+
// | | | | | | +=v0049
// b076 | | | | | +-+
// | | | | | +=v0050
// b034 | | | | +-+
// | | | | | | +=v0051
// b077 | | | | | | +-+
// | | | | | | | +=v0052
// b054 | | | | | +-+
// | | | | | +=v0025
// b021 | | | +-+
// | | | | +=v0055
// b078 | | | | +-+
// | | | | | +=v0056
// b055 | | | | +-+
// | | | | | | +=v0057
// b079 | | | | | +-+
// | | | | | +=v0058
// b035 | | | +-+
// | | | +=v0192
// b005 | +-+
// | | +=v1664
// b036 | | +-+
// | | | | +=v0448
// b080 | | | | +-+
// | | | | | +=v0512
// b056 | | | +-+
// | | | | +=v0704
// b088 | | | | +-+
// | | | | | +=v0768
// b081 | | | +-+
// | | | +=v0640
// b022 | | +-+
// | | | | +=v0576
// b082 | | | | +-+
// | | | | | | +=v0832
// b089 | | | | | +-+
// | | | | | +=v0896
// b057 | | | | +-+
// | | | | | | +=v0960
// b090 | | | | | | +-+
// | | | | | | | +=v1024
// b083 | | | | | +-+
// | | | | | | +=v1088
// b091 | | | | | +-+
// | | | | | +=v1152
// b037 | | | +-+
// | | | | +=v1216
// b092 | | | | +-+
// | | | | | +=v1280
// b084 | | | | +-+
// | | | | | | +=v1344
// b093 | | | | | +-+
// | | | | | +=v1408
// b058 | | | +-+
// | | | +=v0256
// b011 | +-+
// | +=v0002
// b001 +-+
// | +=v0003
// b012 | +-+
// | | | +=v0128
// b023 | | +-+
// | | +=v0008
// b006 | +-+
// | | | +=v0009
// b024 | | | +-+
// | | | | | +=v0016
// b038 | | | | +-+
// | | | | +=v0017
// b013 | | +-+
// | | +=v0004
// b003 +-+
// | +=v0005
// b014 | +-+
// | | | +=v0014
// b039 | | | +-+
// | | | | +=v0015
// b025 | | +-+
// | | +=v0064
// b007 +-+
// | +=v0006
// b015 +-+
// +=v0007
var whiteDecodeTable = [...][2]int16{
0: {0, 0},
1: {2, 3},
@ -402,215 +387,215 @@ var whiteDecodeTable = [...][2]int16{
// blackDecodeTable represents Tables 2 and 3 for a black run.
//
// +=XXXXX
// b017 +-+
// | | +=v1792
// b042 | | +-+
// | | | | +=v1984
// b063 | | | +-+
// | | | +=v2048
// b029 | | +-+
// | | | | +=v2112
// b064 | | | | +-+
// | | | | | +=v2176
// b043 | | | +-+
// | | | | +=v2240
// b065 | | | +-+
// | | | +=v2304
// b022 | +-+
// | | +=v1856
// b044 | | +-+
// | | | +=v1920
// b030 | +-+
// | | +=v2368
// b066 | | +-+
// | | | +=v2432
// b045 | +-+
// | | +=v2496
// b067 | +-+
// | +=v2560
// b013 +-+
// | | +=v0018
// b031 | | +-+
// | | | | +=v0052
// b068 | | | | +-+
// | | | | | | +=v0640
// b095 | | | | | +-+
// | | | | | +=v0704
// b046 | | | +-+
// | | | | +=v0768
// b096 | | | | +-+
// | | | | | +=v0832
// b069 | | | +-+
// | | | +=v0055
// b023 | | +-+
// | | | | +=v0056
// b070 | | | | +-+
// | | | | | | +=v1280
// b097 | | | | | +-+
// | | | | | +=v1344
// b047 | | | | +-+
// | | | | | | +=v1408
// b098 | | | | | | +-+
// | | | | | | | +=v1472
// b071 | | | | | +-+
// | | | | | +=v0059
// b032 | | | +-+
// | | | | +=v0060
// b072 | | | | +-+
// | | | | | | +=v1536
// b099 | | | | | +-+
// | | | | | +=v1600
// b048 | | | +-+
// | | | +=v0024
// b018 | +-+
// | | +=v0025
// b049 | | +-+
// | | | | +=v1664
// b100 | | | | +-+
// | | | | | +=v1728
// b073 | | | +-+
// | | | +=v0320
// b033 | | +-+
// | | | | +=v0384
// b074 | | | | +-+
// | | | | | +=v0448
// b050 | | | +-+
// | | | | +=v0512
// b101 | | | | +-+
// | | | | | +=v0576
// b075 | | | +-+
// | | | +=v0053
// b024 | +-+
// | | +=v0054
// b076 | | +-+
// | | | | +=v0896
// b102 | | | +-+
// | | | +=v0960
// b051 | | +-+
// | | | | +=v1024
// b103 | | | | +-+
// | | | | | +=v1088
// b077 | | | +-+
// | | | | +=v1152
// b104 | | | +-+
// | | | +=v1216
// b034 | +-+
// | +=v0064
// b010 +-+
// | | +=v0013
// b019 | | +-+
// | | | | +=v0023
// b052 | | | | +-+
// | | | | | | +=v0050
// b078 | | | | | +-+
// | | | | | +=v0051
// b035 | | | | +-+
// | | | | | | +=v0044
// b079 | | | | | | +-+
// | | | | | | | +=v0045
// b053 | | | | | +-+
// | | | | | | +=v0046
// b080 | | | | | +-+
// | | | | | +=v0047
// b025 | | | +-+
// | | | | +=v0057
// b081 | | | | +-+
// | | | | | +=v0058
// b054 | | | | +-+
// | | | | | | +=v0061
// b082 | | | | | +-+
// | | | | | +=v0256
// b036 | | | +-+
// | | | +=v0016
// b014 | +-+
// | | +=v0017
// b037 | | +-+
// | | | | +=v0048
// b083 | | | | +-+
// | | | | | +=v0049
// b055 | | | +-+
// | | | | +=v0062
// b084 | | | +-+
// | | | +=v0063
// b026 | | +-+
// | | | | +=v0030
// b085 | | | | +-+
// | | | | | +=v0031
// b056 | | | | +-+
// | | | | | | +=v0032
// b086 | | | | | +-+
// | | | | | +=v0033
// b038 | | | +-+
// | | | | +=v0040
// b087 | | | | +-+
// | | | | | +=v0041
// b057 | | | +-+
// | | | +=v0022
// b020 | +-+
// | +=v0014
// b008 +-+
// | | +=v0010
// b015 | | +-+
// | | | +=v0011
// b011 | +-+
// | | +=v0015
// b027 | | +-+
// | | | | +=v0128
// b088 | | | | +-+
// | | | | | +=v0192
// b058 | | | | +-+
// | | | | | | +=v0026
// b089 | | | | | +-+
// | | | | | +=v0027
// b039 | | | +-+
// | | | | +=v0028
// b090 | | | | +-+
// | | | | | +=v0029
// b059 | | | +-+
// | | | +=v0019
// b021 | | +-+
// | | | | +=v0020
// b060 | | | | +-+
// | | | | | | +=v0034
// b091 | | | | | +-+
// | | | | | +=v0035
// b040 | | | | +-+
// | | | | | | +=v0036
// b092 | | | | | | +-+
// | | | | | | | +=v0037
// b061 | | | | | +-+
// | | | | | | +=v0038
// b093 | | | | | +-+
// | | | | | +=v0039
// b028 | | | +-+
// | | | | +=v0021
// b062 | | | | +-+
// | | | | | | +=v0042
// b094 | | | | | +-+
// | | | | | +=v0043
// b041 | | | +-+
// | | | +=v0000
// b016 | +-+
// | +=v0012
// b006 +-+
// | | +=v0009
// b012 | | +-+
// | | | +=v0008
// b009 | +-+
// | +=v0007
// b004 +-+
// | | +=v0006
// b007 | +-+
// | +=v0005
// b002 +-+
// | | +=v0001
// b005 | +-+
// | +=v0004
// b001 +-+
// | +=v0003
// b003 +-+
// +=v0002
// +=XXXXX
// b017 +-+
// | | +=v1792
// b042 | | +-+
// | | | | +=v1984
// b063 | | | +-+
// | | | +=v2048
// b029 | | +-+
// | | | | +=v2112
// b064 | | | | +-+
// | | | | | +=v2176
// b043 | | | +-+
// | | | | +=v2240
// b065 | | | +-+
// | | | +=v2304
// b022 | +-+
// | | +=v1856
// b044 | | +-+
// | | | +=v1920
// b030 | +-+
// | | +=v2368
// b066 | | +-+
// | | | +=v2432
// b045 | +-+
// | | +=v2496
// b067 | +-+
// | +=v2560
// b013 +-+
// | | +=v0018
// b031 | | +-+
// | | | | +=v0052
// b068 | | | | +-+
// | | | | | | +=v0640
// b095 | | | | | +-+
// | | | | | +=v0704
// b046 | | | +-+
// | | | | +=v0768
// b096 | | | | +-+
// | | | | | +=v0832
// b069 | | | +-+
// | | | +=v0055
// b023 | | +-+
// | | | | +=v0056
// b070 | | | | +-+
// | | | | | | +=v1280
// b097 | | | | | +-+
// | | | | | +=v1344
// b047 | | | | +-+
// | | | | | | +=v1408
// b098 | | | | | | +-+
// | | | | | | | +=v1472
// b071 | | | | | +-+
// | | | | | +=v0059
// b032 | | | +-+
// | | | | +=v0060
// b072 | | | | +-+
// | | | | | | +=v1536
// b099 | | | | | +-+
// | | | | | +=v1600
// b048 | | | +-+
// | | | +=v0024
// b018 | +-+
// | | +=v0025
// b049 | | +-+
// | | | | +=v1664
// b100 | | | | +-+
// | | | | | +=v1728
// b073 | | | +-+
// | | | +=v0320
// b033 | | +-+
// | | | | +=v0384
// b074 | | | | +-+
// | | | | | +=v0448
// b050 | | | +-+
// | | | | +=v0512
// b101 | | | | +-+
// | | | | | +=v0576
// b075 | | | +-+
// | | | +=v0053
// b024 | +-+
// | | +=v0054
// b076 | | +-+
// | | | | +=v0896
// b102 | | | +-+
// | | | +=v0960
// b051 | | +-+
// | | | | +=v1024
// b103 | | | | +-+
// | | | | | +=v1088
// b077 | | | +-+
// | | | | +=v1152
// b104 | | | +-+
// | | | +=v1216
// b034 | +-+
// | +=v0064
// b010 +-+
// | | +=v0013
// b019 | | +-+
// | | | | +=v0023
// b052 | | | | +-+
// | | | | | | +=v0050
// b078 | | | | | +-+
// | | | | | +=v0051
// b035 | | | | +-+
// | | | | | | +=v0044
// b079 | | | | | | +-+
// | | | | | | | +=v0045
// b053 | | | | | +-+
// | | | | | | +=v0046
// b080 | | | | | +-+
// | | | | | +=v0047
// b025 | | | +-+
// | | | | +=v0057
// b081 | | | | +-+
// | | | | | +=v0058
// b054 | | | | +-+
// | | | | | | +=v0061
// b082 | | | | | +-+
// | | | | | +=v0256
// b036 | | | +-+
// | | | +=v0016
// b014 | +-+
// | | +=v0017
// b037 | | +-+
// | | | | +=v0048
// b083 | | | | +-+
// | | | | | +=v0049
// b055 | | | +-+
// | | | | +=v0062
// b084 | | | +-+
// | | | +=v0063
// b026 | | +-+
// | | | | +=v0030
// b085 | | | | +-+
// | | | | | +=v0031
// b056 | | | | +-+
// | | | | | | +=v0032
// b086 | | | | | +-+
// | | | | | +=v0033
// b038 | | | +-+
// | | | | +=v0040
// b087 | | | | +-+
// | | | | | +=v0041
// b057 | | | +-+
// | | | +=v0022
// b020 | +-+
// | +=v0014
// b008 +-+
// | | +=v0010
// b015 | | +-+
// | | | +=v0011
// b011 | +-+
// | | +=v0015
// b027 | | +-+
// | | | | +=v0128
// b088 | | | | +-+
// | | | | | +=v0192
// b058 | | | | +-+
// | | | | | | +=v0026
// b089 | | | | | +-+
// | | | | | +=v0027
// b039 | | | +-+
// | | | | +=v0028
// b090 | | | | +-+
// | | | | | +=v0029
// b059 | | | +-+
// | | | +=v0019
// b021 | | +-+
// | | | | +=v0020
// b060 | | | | +-+
// | | | | | | +=v0034
// b091 | | | | | +-+
// | | | | | +=v0035
// b040 | | | | +-+
// | | | | | | +=v0036
// b092 | | | | | | +-+
// | | | | | | | +=v0037
// b061 | | | | | +-+
// | | | | | | +=v0038
// b093 | | | | | +-+
// | | | | | +=v0039
// b028 | | | +-+
// | | | | +=v0021
// b062 | | | | +-+
// | | | | | | +=v0042
// b094 | | | | | +-+
// | | | | | +=v0043
// b041 | | | +-+
// | | | +=v0000
// b016 | +-+
// | +=v0012
// b006 +-+
// | | +=v0009
// b012 | | +-+
// | | | +=v0008
// b009 | +-+
// | +=v0007
// b004 +-+
// | | +=v0006
// b007 | +-+
// | +=v0005
// b002 +-+
// | | +=v0001
// b005 | +-+
// | +=v0004
// b001 +-+
// | +=v0003
// b003 +-+
// +=v0002
var blackDecodeTable = [...][2]int16{
0: {0, 0},
1: {2, 3},
@ -733,17 +718,16 @@ type bitString struct {
// modeEncodeTable represents Table 1 and the End-of-Line code.
var modeEncodeTable = [...]bitString{
0: {0x0001, 4}, // "0001"
1: {0x0001, 3}, // "001"
2: {0x0001, 1}, // "1"
3: {0x0003, 3}, // "011"
4: {0x0003, 6}, // "000011"
5: {0x0003, 7}, // "0000011"
6: {0x0002, 3}, // "010"
7: {0x0002, 6}, // "000010"
8: {0x0002, 7}, // "0000010"
9: {0x0001, 7}, // "0000001"
10: {0x0001, 12}, // "000000000001"
0: {0x0001, 4}, // "0001"
1: {0x0001, 3}, // "001"
2: {0x0001, 1}, // "1"
3: {0x0003, 3}, // "011"
4: {0x0003, 6}, // "000011"
5: {0x0003, 7}, // "0000011"
6: {0x0002, 3}, // "010"
7: {0x0002, 6}, // "000010"
8: {0x0002, 7}, // "0000010"
9: {0x0001, 7}, // "0000001"
}
// whiteEncodeTable2 represents Table 2 for a white run.
@ -983,7 +967,6 @@ const (
modeVL2 // Vertical-Left-2
modeVL3 // Vertical-Left-3
modeExt // Extension
modeEOL // End-of-Line
)
// COPY PASTE table.go END

View file

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gofuzz
// +build gofuzz
package tiff

View file

@ -3,8 +3,8 @@
// license that can be found in the LICENSE file.
// Package lzw implements the Lempel-Ziv-Welch compressed data format,
// described in T. A. Welch, ``A Technique for High-Performance Data
// Compression'', Computer, 17(6) (June 1984), pp 8-19.
// described in T. A. Welch, A Technique for High-Performance Data
// Compression, Computer, 17(6) (June 1984), pp 8-19.
//
// In particular, it implements LZW as used by the TIFF file format, including
// an "off by one" algorithmic difference when compared to standard LZW.
@ -30,7 +30,7 @@ Aldus "off by one" algorithm.
The Go code doesn't read (invalid) TIFF files written by old versions of
libtiff, but the LZW algorithm in this package still differs from the one in
Go's standard package library to accomodate this "off by one" in valid TIFFs.
Go's standard package library to accommodate this "off by one" in valid TIFFs.
*/
import (

View file

@ -404,6 +404,9 @@ func newDecoder(r io.Reader) (*decoder, error) {
p := make([]byte, 8)
if _, err := d.r.ReadAt(p, 0); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
switch string(p[0:4]) {

View file

@ -8,6 +8,7 @@ import (
"bytes"
"compress/zlib"
"encoding/binary"
"errors"
"image"
"io"
"sort"
@ -338,6 +339,8 @@ func Encode(w io.Writer, m image.Image, opt *Options) error {
}
case cDeflate:
dst = zlib.NewWriter(&buf)
default:
return errors.New("tiff: unsupported compression")
}
pr := uint32(prNone)

3
vendor/golang.org/x/net/AUTHORS generated vendored
View file

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View file

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View file

@ -94,7 +94,7 @@ func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value u
func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
offset := int(ins.Off)
size := int(ins.Size)
size := ins.Size
return loadCommon(in, offset, size)
}
@ -121,7 +121,7 @@ func loadExtension(ins LoadExtension, in []byte) uint32 {
func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
offset := int(ins.Off) + int(regX)
size := int(ins.Size)
size := ins.Size
return loadCommon(in, offset, size)
}

View file

@ -32,7 +32,7 @@ var DeadlineExceeded = context.DeadlineExceeded
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
return ctx, CancelFunc(f)
return ctx, f
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
@ -46,7 +46,7 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
return ctx, CancelFunc(f)
return ctx, f
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).

View file

@ -85,7 +85,7 @@ func render1(w writer, n *Node) error {
if _, err := w.WriteString("<!--"); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
if err := escape(w, n.Data); err != nil {
return err
}
if _, err := w.WriteString("-->"); err != nil {
@ -96,7 +96,7 @@ func render1(w writer, n *Node) error {
if _, err := w.WriteString("<!DOCTYPE "); err != nil {
return err
}
if _, err := w.WriteString(n.Data); err != nil {
if err := escape(w, n.Data); err != nil {
return err
}
if n.Attr != nil {

View file

@ -110,9 +110,9 @@ func (t Token) String() string {
case SelfClosingTagToken:
return "<" + t.tagString() + "/>"
case CommentToken:
return "<!--" + t.Data + "-->"
return "<!--" + EscapeString(t.Data) + "-->"
case DoctypeToken:
return "<!DOCTYPE " + t.Data + ">"
return "<!DOCTYPE " + EscapeString(t.Data) + ">"
}
return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
}

2
vendor/golang.org/x/net/http2/.gitignore generated vendored Normal file
View file

@ -0,0 +1,2 @@
*~
h2i/h2i

51
vendor/golang.org/x/net/http2/Dockerfile generated vendored Normal file
View file

@ -0,0 +1,51 @@
#
# This Dockerfile builds a recent curl with HTTP/2 client support, using
# a recent nghttp2 build.
#
# See the Makefile for how to tag it. If Docker and that image is found, the
# Go tests use this curl binary for integration tests.
#
FROM ubuntu:trusty
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y git-core build-essential wget
RUN apt-get install -y --no-install-recommends \
autotools-dev libtool pkg-config zlib1g-dev \
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
automake autoconf
# The list of packages nghttp2 recommends for h2load:
RUN apt-get install -y --no-install-recommends make binutils \
autoconf automake autotools-dev \
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
cython python3.4-dev python-setuptools
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
ENV NGHTTP2_VER 895da9a
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
WORKDIR /root/nghttp2
RUN git reset --hard $NGHTTP2_VER
RUN autoreconf -i
RUN automake
RUN autoconf
RUN ./configure
RUN make
RUN make install
WORKDIR /root
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
RUN tar -zxvf curl-7.45.0.tar.gz
WORKDIR /root/curl-7.45.0
RUN ./configure --with-ssl --with-nghttp2=/usr/local
RUN make
RUN make install
RUN ldconfig
CMD ["-h"]
ENTRYPOINT ["/usr/local/bin/curl"]

3
vendor/golang.org/x/net/http2/Makefile generated vendored Normal file
View file

@ -0,0 +1,3 @@
curlimage:
docker build -t gohttp2/curl .

53
vendor/golang.org/x/net/http2/ascii.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "strings"
// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
// contains helper functions which may use Unicode-aware functions which would
// otherwise be unsafe and could introduce vulnerabilities if used improperly.
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
// are equal, ASCII-case-insensitively.
func asciiEqualFold(s, t string) bool {
if len(s) != len(t) {
return false
}
for i := 0; i < len(s); i++ {
if lower(s[i]) != lower(t[i]) {
return false
}
}
return true
}
// lower returns the ASCII lowercase version of b.
func lower(b byte) byte {
if 'A' <= b && b <= 'Z' {
return b + ('a' - 'A')
}
return b
}
// isASCIIPrint returns whether s is ASCII and printable according to
// https://tools.ietf.org/html/rfc20#section-4.2.
func isASCIIPrint(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] < ' ' || s[i] > '~' {
return false
}
}
return true
}
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
// and whether or not it was.
func asciiToLower(s string) (lower string, ok bool) {
if !isASCIIPrint(s) {
return "", false
}
return strings.ToLower(s), true
}

641
vendor/golang.org/x/net/http2/ciphers.go generated vendored Normal file
View file

@ -0,0 +1,641 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
// A list of the possible cipher suite ids. Taken from
// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
const (
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
// Reserved uint16 = 0x001C-1D
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
// Reserved uint16 = 0x0047-4F
// Reserved uint16 = 0x0050-58
// Reserved uint16 = 0x0059-5C
// Unassigned uint16 = 0x005D-5F
// Reserved uint16 = 0x0060-66
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
// Unassigned uint16 = 0x006E-83
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
// Unassigned uint16 = 0x00C6-FE
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
// Unassigned uint16 = 0x01-55,*
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
// Unassigned uint16 = 0x5601 - 0xC000
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
// Unassigned uint16 = 0xC0B0-FF
// Unassigned uint16 = 0xC1-CB,*
// Unassigned uint16 = 0xCC00-A7
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
)
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
// References:
// https://tools.ietf.org/html/rfc7540#appendix-A
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
func isBadCipher(cipher uint16) bool {
switch cipher {
case cipher_TLS_NULL_WITH_NULL_NULL,
cipher_TLS_RSA_WITH_NULL_MD5,
cipher_TLS_RSA_WITH_NULL_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_RSA_WITH_RC4_128_MD5,
cipher_TLS_RSA_WITH_RC4_128_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_RSA_WITH_DES_CBC_SHA,
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_RC4_128_SHA,
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
cipher_TLS_KRB5_WITH_RC4_128_MD5,
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_PSK_WITH_NULL_SHA,
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_NULL_SHA256,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_PSK_WITH_RC4_128_SHA,
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_PSK_WITH_NULL_SHA256,
cipher_TLS_PSK_WITH_NULL_SHA384,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_AES_128_CCM,
cipher_TLS_RSA_WITH_AES_256_CCM,
cipher_TLS_RSA_WITH_AES_128_CCM_8,
cipher_TLS_RSA_WITH_AES_256_CCM_8,
cipher_TLS_PSK_WITH_AES_128_CCM,
cipher_TLS_PSK_WITH_AES_256_CCM,
cipher_TLS_PSK_WITH_AES_128_CCM_8,
cipher_TLS_PSK_WITH_AES_256_CCM_8:
return true
default:
return false
}
}

311
vendor/golang.org/x/net/http2/client_conn_pool.go generated vendored Normal file
View file

@ -0,0 +1,311 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Transport code's client connection pooling.
package http2
import (
"context"
"crypto/tls"
"errors"
"net/http"
"sync"
)
// ClientConnPool manages a pool of HTTP/2 client connections.
type ClientConnPool interface {
// GetClientConn returns a specific HTTP/2 connection (usually
// a TLS-TCP connection) to an HTTP/2 server. On success, the
// returned ClientConn accounts for the upcoming RoundTrip
// call, so the caller should not omit it. If the caller needs
// to, ClientConn.RoundTrip can be called with a bogus
// new(http.Request) to release the stream reservation.
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
MarkDead(*ClientConn)
}
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
// implementations which can close their idle connections.
type clientConnPoolIdleCloser interface {
ClientConnPool
closeIdleConnections()
}
var (
_ clientConnPoolIdleCloser = (*clientConnPool)(nil)
_ clientConnPoolIdleCloser = noDialClientConnPool{}
)
// TODO: use singleflight for dialing and addConnCalls?
type clientConnPool struct {
t *Transport
mu sync.Mutex // TODO: maybe switch to RWMutex
// TODO: add support for sharing conns based on cert names
// (e.g. share conn for googleapis.com and appspot.com)
conns map[string][]*ClientConn // key is host:port
dialing map[string]*dialCall // currently in-flight dials
keys map[*ClientConn][]string
addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls
}
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
return p.getClientConn(req, addr, dialOnMiss)
}
const (
dialOnMiss = true
noDialOnMiss = false
)
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
// TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
if isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection.
traceGetConn(req, addr)
const singleUse = true
cc, err := p.t.dialClientConn(req.Context(), addr, singleUse)
if err != nil {
return nil, err
}
return cc, nil
}
for {
p.mu.Lock()
for _, cc := range p.conns[addr] {
if cc.ReserveNewRequest() {
// When a connection is presented to us by the net/http package,
// the GetConn hook has already been called.
// Don't call it a second time here.
if !cc.getConnCalled {
traceGetConn(req, addr)
}
cc.getConnCalled = false
p.mu.Unlock()
return cc, nil
}
}
if !dialOnMiss {
p.mu.Unlock()
return nil, ErrNoCachedConn
}
traceGetConn(req, addr)
call := p.getStartDialLocked(req.Context(), addr)
p.mu.Unlock()
<-call.done
if shouldRetryDial(call, req) {
continue
}
cc, err := call.res, call.err
if err != nil {
return nil, err
}
if cc.ReserveNewRequest() {
return cc, nil
}
}
}
// dialCall is an in-flight Transport dial call to a host.
type dialCall struct {
_ incomparable
p *clientConnPool
// the context associated with the request
// that created this dialCall
ctx context.Context
done chan struct{} // closed when done
res *ClientConn // valid after done is closed
err error // valid after done is closed
}
// requires p.mu is held.
func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *dialCall {
if call, ok := p.dialing[addr]; ok {
// A dial is already in-flight. Don't start another.
return call
}
call := &dialCall{p: p, done: make(chan struct{}), ctx: ctx}
if p.dialing == nil {
p.dialing = make(map[string]*dialCall)
}
p.dialing[addr] = call
go call.dial(call.ctx, addr)
return call
}
// run in its own goroutine.
func (c *dialCall) dial(ctx context.Context, addr string) {
const singleUse = false // shared conn
c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse)
c.p.mu.Lock()
delete(c.p.dialing, addr)
if c.err == nil {
c.p.addConnLocked(addr, c.res)
}
c.p.mu.Unlock()
close(c.done)
}
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
// already exist. It coalesces concurrent calls with the same key.
// This is used by the http1 Transport code when it creates a new connection. Because
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
// the protocol), it can get into a situation where it has multiple TLS connections.
// This code decides which ones live or die.
// The return value used is whether c was used.
// c is never closed.
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
p.mu.Lock()
for _, cc := range p.conns[key] {
if cc.CanTakeNewRequest() {
p.mu.Unlock()
return false, nil
}
}
call, dup := p.addConnCalls[key]
if !dup {
if p.addConnCalls == nil {
p.addConnCalls = make(map[string]*addConnCall)
}
call = &addConnCall{
p: p,
done: make(chan struct{}),
}
p.addConnCalls[key] = call
go call.run(t, key, c)
}
p.mu.Unlock()
<-call.done
if call.err != nil {
return false, call.err
}
return !dup, nil
}
type addConnCall struct {
_ incomparable
p *clientConnPool
done chan struct{} // closed when done
err error
}
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
cc, err := t.NewClientConn(tc)
p := c.p
p.mu.Lock()
if err != nil {
c.err = err
} else {
cc.getConnCalled = true // already called by the net/http package
p.addConnLocked(key, cc)
}
delete(p.addConnCalls, key)
p.mu.Unlock()
close(c.done)
}
// p.mu must be held
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
for _, v := range p.conns[key] {
if v == cc {
return
}
}
if p.conns == nil {
p.conns = make(map[string][]*ClientConn)
}
if p.keys == nil {
p.keys = make(map[*ClientConn][]string)
}
p.conns[key] = append(p.conns[key], cc)
p.keys[cc] = append(p.keys[cc], key)
}
func (p *clientConnPool) MarkDead(cc *ClientConn) {
p.mu.Lock()
defer p.mu.Unlock()
for _, key := range p.keys[cc] {
vv, ok := p.conns[key]
if !ok {
continue
}
newList := filterOutClientConn(vv, cc)
if len(newList) > 0 {
p.conns[key] = newList
} else {
delete(p.conns, key)
}
}
delete(p.keys, cc)
}
func (p *clientConnPool) closeIdleConnections() {
p.mu.Lock()
defer p.mu.Unlock()
// TODO: don't close a cc if it was just added to the pool
// milliseconds ago and has never been used. There's currently
// a small race window with the HTTP/1 Transport's integration
// where it can add an idle conn just before using it, and
// somebody else can concurrently call CloseIdleConns and
// break some caller's RoundTrip.
for _, vv := range p.conns {
for _, cc := range vv {
cc.closeIfIdle()
}
}
}
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
out := in[:0]
for _, v := range in {
if v != exclude {
out = append(out, v)
}
}
// If we filtered it out, zero out the last item to prevent
// the GC from seeing it.
if len(in) != len(out) {
in[len(in)-1] = nil
}
return out
}
// noDialClientConnPool is an implementation of http2.ClientConnPool
// which never dials. We let the HTTP/1.1 client dial and use its TLS
// connection instead.
type noDialClientConnPool struct{ *clientConnPool }
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
return p.getClientConn(req, addr, noDialOnMiss)
}
// shouldRetryDial reports whether the current request should
// retry dialing after the call finished unsuccessfully, for example
// if the dial was canceled because of a context cancellation or
// deadline expiry.
func shouldRetryDial(call *dialCall, req *http.Request) bool {
if call.err == nil {
// No error, no need to retry
return false
}
if call.ctx == req.Context() {
// If the call has the same context as the request, the dial
// should not be retried, since any cancellation will have come
// from this request.
return false
}
if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) {
// If the call error is not because of a context cancellation or a deadline expiry,
// the dial should not be retried.
return false
}
// Only retry if the error is a context cancellation error or deadline expiry
// and the context associated with the call was canceled or expired.
return call.ctx.Err() != nil
}

146
vendor/golang.org/x/net/http2/databuffer.go generated vendored Normal file
View file

@ -0,0 +1,146 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"fmt"
"sync"
)
// Buffer chunks are allocated from a pool to reduce pressure on GC.
// The maximum wasted space per dataBuffer is 2x the largest size class,
// which happens when the dataBuffer has multiple chunks and there is
// one unread byte in both the first and last chunks. We use a few size
// classes to minimize overheads for servers that typically receive very
// small request bodies.
//
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
var (
dataChunkSizeClasses = []int{
1 << 10,
2 << 10,
4 << 10,
8 << 10,
16 << 10,
}
dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return make([]byte, 1<<10) }},
{New: func() interface{} { return make([]byte, 2<<10) }},
{New: func() interface{} { return make([]byte, 4<<10) }},
{New: func() interface{} { return make([]byte, 8<<10) }},
{New: func() interface{} { return make([]byte, 16<<10) }},
}
)
func getDataBufferChunk(size int64) []byte {
i := 0
for ; i < len(dataChunkSizeClasses)-1; i++ {
if size <= int64(dataChunkSizeClasses[i]) {
break
}
}
return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
for i, n := range dataChunkSizeClasses {
if len(p) == n {
dataChunkPools[i].Put(p)
return
}
}
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
// Each dataBuffer is used to read DATA frames on a single stream.
// The buffer is divided into chunks so the server can limit the
// total memory used by a single connection without limiting the
// request body size on any single stream.
type dataBuffer struct {
chunks [][]byte
r int // next byte to read is chunks[0][r]
w int // next byte to write is chunks[len(chunks)-1][w]
size int // total buffered bytes
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
}
var errReadEmpty = errors.New("read from empty dataBuffer")
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *dataBuffer) Read(p []byte) (int, error) {
if b.size == 0 {
return 0, errReadEmpty
}
var ntotal int
for len(p) > 0 && b.size > 0 {
readFrom := b.bytesFromFirstChunk()
n := copy(p, readFrom)
p = p[n:]
ntotal += n
b.r += n
b.size -= n
// If the first chunk has been consumed, advance to the next chunk.
if b.r == len(b.chunks[0]) {
putDataBufferChunk(b.chunks[0])
end := len(b.chunks) - 1
copy(b.chunks[:end], b.chunks[1:])
b.chunks[end] = nil
b.chunks = b.chunks[:end]
b.r = 0
}
}
return ntotal, nil
}
func (b *dataBuffer) bytesFromFirstChunk() []byte {
if len(b.chunks) == 1 {
return b.chunks[0][b.r:b.w]
}
return b.chunks[0][b.r:]
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *dataBuffer) Len() int {
return b.size
}
// Write appends p to the buffer.
func (b *dataBuffer) Write(p []byte) (int, error) {
ntotal := len(p)
for len(p) > 0 {
// If the last chunk is empty, allocate a new chunk. Try to allocate
// enough to fully copy p plus any additional bytes we expect to
// receive. However, this may allocate less than len(p).
want := int64(len(p))
if b.expected > want {
want = b.expected
}
chunk := b.lastChunkOrAlloc(want)
n := copy(chunk[b.w:], p)
p = p[n:]
b.w += n
b.size += n
b.expected -= int64(n)
}
return ntotal, nil
}
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
if len(b.chunks) != 0 {
last := b.chunks[len(b.chunks)-1]
if b.w < len(last) {
return last
}
}
chunk := getDataBufferChunk(want)
b.chunks = append(b.chunks, chunk)
b.w = 0
return chunk
}

145
vendor/golang.org/x/net/http2/errors.go generated vendored Normal file
View file

@ -0,0 +1,145 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"fmt"
)
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
type ErrCode uint32
const (
ErrCodeNo ErrCode = 0x0
ErrCodeProtocol ErrCode = 0x1
ErrCodeInternal ErrCode = 0x2
ErrCodeFlowControl ErrCode = 0x3
ErrCodeSettingsTimeout ErrCode = 0x4
ErrCodeStreamClosed ErrCode = 0x5
ErrCodeFrameSize ErrCode = 0x6
ErrCodeRefusedStream ErrCode = 0x7
ErrCodeCancel ErrCode = 0x8
ErrCodeCompression ErrCode = 0x9
ErrCodeConnect ErrCode = 0xa
ErrCodeEnhanceYourCalm ErrCode = 0xb
ErrCodeInadequateSecurity ErrCode = 0xc
ErrCodeHTTP11Required ErrCode = 0xd
)
var errCodeName = map[ErrCode]string{
ErrCodeNo: "NO_ERROR",
ErrCodeProtocol: "PROTOCOL_ERROR",
ErrCodeInternal: "INTERNAL_ERROR",
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
ErrCodeStreamClosed: "STREAM_CLOSED",
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
ErrCodeRefusedStream: "REFUSED_STREAM",
ErrCodeCancel: "CANCEL",
ErrCodeCompression: "COMPRESSION_ERROR",
ErrCodeConnect: "CONNECT_ERROR",
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
}
func (e ErrCode) String() string {
if s, ok := errCodeName[e]; ok {
return s
}
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
}
func (e ErrCode) stringToken() string {
if s, ok := errCodeName[e]; ok {
return s
}
return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
}
// ConnectionError is an error that results in the termination of the
// entire connection.
type ConnectionError ErrCode
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
// StreamError is an error that only affects one stream within an
// HTTP/2 connection.
type StreamError struct {
StreamID uint32
Code ErrCode
Cause error // optional additional detail
}
// errFromPeer is a sentinel error value for StreamError.Cause to
// indicate that the StreamError was sent from the peer over the wire
// and wasn't locally generated in the Transport.
var errFromPeer = errors.New("received from peer")
func streamError(id uint32, code ErrCode) StreamError {
return StreamError{StreamID: id, Code: code}
}
func (e StreamError) Error() string {
if e.Cause != nil {
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
}
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
}
// 6.9.1 The Flow Control Window
// "If a sender receives a WINDOW_UPDATE that causes a flow control
// window to exceed this maximum it MUST terminate either the stream
// or the connection, as appropriate. For streams, [...]; for the
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
type goAwayFlowError struct{}
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
// connError represents an HTTP/2 ConnectionError error code, along
// with a string (for debugging) explaining why.
//
// Errors of this type are only returned by the frame parser functions
// and converted into ConnectionError(Code), after stashing away
// the Reason into the Framer's errDetail field, accessible via
// the (*Framer).ErrorDetail method.
type connError struct {
Code ErrCode // the ConnectionError error code
Reason string // additional reason
}
func (e connError) Error() string {
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
}
type pseudoHeaderError string
func (e pseudoHeaderError) Error() string {
return fmt.Sprintf("invalid pseudo-header %q", string(e))
}
type duplicatePseudoHeaderError string
func (e duplicatePseudoHeaderError) Error() string {
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
}
type headerFieldNameError string
func (e headerFieldNameError) Error() string {
return fmt.Sprintf("invalid header field name %q", string(e))
}
type headerFieldValueError string
func (e headerFieldValueError) Error() string {
return fmt.Sprintf("invalid header field value for %q", string(e))
}
var (
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
errPseudoAfterRegular = errors.New("pseudo header field after regular")
)

52
vendor/golang.org/x/net/http2/flow.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Flow control
package http2
// flow is the flow control window's size.
type flow struct {
_ incomparable
// n is the number of DATA bytes we're allowed to send.
// A flow is kept both on a conn and a per-stream.
n int32
// conn points to the shared connection-level flow that is
// shared by all streams on that conn. It is nil for the flow
// that's on the conn directly.
conn *flow
}
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
func (f *flow) available() int32 {
n := f.n
if f.conn != nil && f.conn.n < n {
n = f.conn.n
}
return n
}
func (f *flow) take(n int32) {
if n > f.available() {
panic("internal error: took too much")
}
f.n -= n
if f.conn != nil {
f.conn.n -= n
}
}
// add adds n bytes (positive or negative) to the flow control window.
// It returns false if the sum would exceed 2^31-1.
func (f *flow) add(n int32) bool {
sum := f.n + n
if (sum > n) == (f.n > 0) {
f.n = sum
return true
}
return false
}

1649
vendor/golang.org/x/net/http2/frame.go generated vendored Normal file

File diff suppressed because it is too large Load diff

30
vendor/golang.org/x/net/http2/go111.go generated vendored Normal file
View file

@ -0,0 +1,30 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.11
// +build go1.11
package http2
import (
"net/http/httptrace"
"net/textproto"
)
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
}
return nil
}

27
vendor/golang.org/x/net/http2/go115.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.15
// +build go1.15
package http2
import (
"context"
"crypto/tls"
)
// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
// connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
dialer := &tls.Dialer{
Config: cfg,
}
cn, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
return tlsCn, nil
}

17
vendor/golang.org/x/net/http2/go118.go generated vendored Normal file
View file

@ -0,0 +1,17 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.18
// +build go1.18
package http2
import (
"crypto/tls"
"net"
)
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
return tc.NetConn()
}

170
vendor/golang.org/x/net/http2/gotrack.go generated vendored Normal file
View file

@ -0,0 +1,170 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Defensive debug-only utility to track that functions run on the
// goroutine that they're supposed to.
package http2
import (
"bytes"
"errors"
"fmt"
"os"
"runtime"
"strconv"
"sync"
)
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
type goroutineLock uint64
func newGoroutineLock() goroutineLock {
if !DebugGoroutines {
return 0
}
return goroutineLock(curGoroutineID())
}
func (g goroutineLock) check() {
if !DebugGoroutines {
return
}
if curGoroutineID() != uint64(g) {
panic("running on the wrong goroutine")
}
}
func (g goroutineLock) checkNotOn() {
if !DebugGoroutines {
return
}
if curGoroutineID() == uint64(g) {
panic("running on the wrong goroutine")
}
}
var goroutineSpace = []byte("goroutine ")
func curGoroutineID() uint64 {
bp := littleBuf.Get().(*[]byte)
defer littleBuf.Put(bp)
b := *bp
b = b[:runtime.Stack(b, false)]
// Parse the 4707 out of "goroutine 4707 ["
b = bytes.TrimPrefix(b, goroutineSpace)
i := bytes.IndexByte(b, ' ')
if i < 0 {
panic(fmt.Sprintf("No space found in %q", b))
}
b = b[:i]
n, err := parseUintBytes(b, 10, 64)
if err != nil {
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
}
return n
}
var littleBuf = sync.Pool{
New: func() interface{} {
buf := make([]byte, 64)
return &buf
},
}
// parseUintBytes is like strconv.ParseUint, but using a []byte.
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
var cutoff, maxVal uint64
if bitSize == 0 {
bitSize = int(strconv.IntSize)
}
s0 := s
switch {
case len(s) < 1:
err = strconv.ErrSyntax
goto Error
case 2 <= base && base <= 36:
// valid base; nothing to do
case base == 0:
// Look for octal, hex prefix.
switch {
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
base = 16
s = s[2:]
if len(s) < 1 {
err = strconv.ErrSyntax
goto Error
}
case s[0] == '0':
base = 8
default:
base = 10
}
default:
err = errors.New("invalid base " + strconv.Itoa(base))
goto Error
}
n = 0
cutoff = cutoff64(base)
maxVal = 1<<uint(bitSize) - 1
for i := 0; i < len(s); i++ {
var v byte
d := s[i]
switch {
case '0' <= d && d <= '9':
v = d - '0'
case 'a' <= d && d <= 'z':
v = d - 'a' + 10
case 'A' <= d && d <= 'Z':
v = d - 'A' + 10
default:
n = 0
err = strconv.ErrSyntax
goto Error
}
if int(v) >= base {
n = 0
err = strconv.ErrSyntax
goto Error
}
if n >= cutoff {
// n*base overflows
n = 1<<64 - 1
err = strconv.ErrRange
goto Error
}
n *= uint64(base)
n1 := n + uint64(v)
if n1 < n || n1 > maxVal {
// n+v overflows
n = 1<<64 - 1
err = strconv.ErrRange
goto Error
}
n = n1
}
return n, nil
Error:
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
}
// Return the first number n such that n*base >= 1<<64.
func cutoff64(base int) uint64 {
if base < 2 {
return 0
}
return (1<<64-1)/uint64(base) + 1
}

225
vendor/golang.org/x/net/http2/h2c/h2c.go generated vendored Normal file
View file

@ -0,0 +1,225 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package h2c implements the unencrypted "h2c" form of HTTP/2.
//
// The h2c protocol is the non-TLS version of HTTP/2 which is not available from
// net/http or golang.org/x/net/http2.
package h2c
import (
"bufio"
"bytes"
"encoding/base64"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"net/textproto"
"os"
"strings"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2"
)
var (
http2VerboseLogs bool
)
func init() {
e := os.Getenv("GODEBUG")
if strings.Contains(e, "http2debug=1") || strings.Contains(e, "http2debug=2") {
http2VerboseLogs = true
}
}
// h2cHandler is a Handler which implements h2c by hijacking the HTTP/1 traffic
// that should be h2c traffic. There are two ways to begin a h2c connection
// (RFC 7540 Section 3.2 and 3.4): (1) Starting with Prior Knowledge - this
// works by starting an h2c connection with a string of bytes that is valid
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
type h2cHandler struct {
Handler http.Handler
s *http2.Server
}
// NewHandler returns an http.Handler that wraps h, intercepting any h2c
// traffic. If a request is an h2c connection, it's hijacked and redirected to
// s.ServeConn. Otherwise the returned Handler just forwards requests to h. This
// works because h2c is designed to be parseable as valid HTTP/1, but ignored by
// any HTTP server that does not handle h2c. Therefore we leverage the HTTP/1
// compatible parts of the Go http library to parse and recognize h2c requests.
// Once a request is recognized as h2c, we hijack the connection and convert it
// to an HTTP/2 connection which is understandable to s.ServeConn. (s.ServeConn
// understands HTTP/2 except for the h2c part of it.)
//
// The first request on an h2c connection is read entirely into memory before
// the Handler is called. To limit the memory consumed by this request, wrap
// the result of NewHandler in an http.MaxBytesHandler.
func NewHandler(h http.Handler, s *http2.Server) http.Handler {
return &h2cHandler{
Handler: h,
s: s,
}
}
// ServeHTTP implement the h2c support that is enabled by h2c.GetH2CHandler.
func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Handle h2c with prior knowledge (RFC 7540 Section 3.4)
if r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0" {
if http2VerboseLogs {
log.Print("h2c: attempting h2c with prior knowledge.")
}
conn, err := initH2CWithPriorKnowledge(w)
if err != nil {
if http2VerboseLogs {
log.Printf("h2c: error h2c with prior knowledge: %v", err)
}
return
}
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{
Context: r.Context(),
Handler: s.Handler,
SawClientPreface: true,
})
return
}
// Handle Upgrade to h2c (RFC 7540 Section 3.2)
if isH2CUpgrade(r.Header) {
conn, settings, err := h2cUpgrade(w, r)
if err != nil {
if http2VerboseLogs {
log.Printf("h2c: error h2c upgrade: %v", err)
}
return
}
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{
Context: r.Context(),
Handler: s.Handler,
UpgradeRequest: r,
Settings: settings,
})
return
}
s.Handler.ServeHTTP(w, r)
return
}
// initH2CWithPriorKnowledge implements creating a h2c connection with prior
// knowledge (Section 3.4) and creates a net.Conn suitable for http2.ServeConn.
// All we have to do is look for the client preface that is suppose to be part
// of the body, and reforward the client preface on the net.Conn this function
// creates.
func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) {
hijacker, ok := w.(http.Hijacker)
if !ok {
return nil, errors.New("h2c: connection does not support Hijack")
}
conn, rw, err := hijacker.Hijack()
if err != nil {
return nil, err
}
const expectedBody = "SM\r\n\r\n"
buf := make([]byte, len(expectedBody))
n, err := io.ReadFull(rw, buf)
if err != nil {
return nil, fmt.Errorf("h2c: error reading client preface: %s", err)
}
if string(buf[:n]) == expectedBody {
return newBufConn(conn, rw), nil
}
conn.Close()
return nil, errors.New("h2c: invalid client preface")
}
// h2cUpgrade establishes a h2c connection using the HTTP/1 upgrade (Section 3.2).
func h2cUpgrade(w http.ResponseWriter, r *http.Request) (_ net.Conn, settings []byte, err error) {
settings, err = getH2Settings(r.Header)
if err != nil {
return nil, nil, err
}
hijacker, ok := w.(http.Hijacker)
if !ok {
return nil, nil, errors.New("h2c: connection does not support Hijack")
}
body, _ := io.ReadAll(r.Body)
r.Body = io.NopCloser(bytes.NewBuffer(body))
conn, rw, err := hijacker.Hijack()
if err != nil {
return nil, nil, err
}
rw.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n" +
"Connection: Upgrade\r\n" +
"Upgrade: h2c\r\n\r\n"))
return newBufConn(conn, rw), settings, nil
}
// isH2CUpgrade returns true if the header properly request an upgrade to h2c
// as specified by Section 3.2.
func isH2CUpgrade(h http.Header) bool {
return httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Upgrade")], "h2c") &&
httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Connection")], "HTTP2-Settings")
}
// getH2Settings returns the settings in the HTTP2-Settings header.
func getH2Settings(h http.Header) ([]byte, error) {
vals, ok := h[textproto.CanonicalMIMEHeaderKey("HTTP2-Settings")]
if !ok {
return nil, errors.New("missing HTTP2-Settings header")
}
if len(vals) != 1 {
return nil, fmt.Errorf("expected 1 HTTP2-Settings. Got: %v", vals)
}
settings, err := base64.RawURLEncoding.DecodeString(vals[0])
if err != nil {
return nil, err
}
return settings, nil
}
func newBufConn(conn net.Conn, rw *bufio.ReadWriter) net.Conn {
rw.Flush()
if rw.Reader.Buffered() == 0 {
// If there's no buffered data to be read,
// we can just discard the bufio.ReadWriter.
return conn
}
return &bufConn{conn, rw.Reader}
}
// bufConn wraps a net.Conn, but reads drain the bufio.Reader first.
type bufConn struct {
net.Conn
*bufio.Reader
}
func (c *bufConn) Read(p []byte) (int, error) {
if c.Reader == nil {
return c.Conn.Read(p)
}
n := c.Reader.Buffered()
if n == 0 {
c.Reader = nil
return c.Conn.Read(p)
}
if n < len(p) {
p = p[:n]
}
return c.Reader.Read(p)
}

87
vendor/golang.org/x/net/http2/headermap.go generated vendored Normal file
View file

@ -0,0 +1,87 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"net/http"
"sync"
)
var (
commonBuildOnce sync.Once
commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
)
func buildCommonHeaderMapsOnce() {
commonBuildOnce.Do(buildCommonHeaderMaps)
}
func buildCommonHeaderMaps() {
common := []string{
"accept",
"accept-charset",
"accept-encoding",
"accept-language",
"accept-ranges",
"age",
"access-control-allow-origin",
"allow",
"authorization",
"cache-control",
"content-disposition",
"content-encoding",
"content-language",
"content-length",
"content-location",
"content-range",
"content-type",
"cookie",
"date",
"etag",
"expect",
"expires",
"from",
"host",
"if-match",
"if-modified-since",
"if-none-match",
"if-unmodified-since",
"last-modified",
"link",
"location",
"max-forwards",
"proxy-authenticate",
"proxy-authorization",
"range",
"referer",
"refresh",
"retry-after",
"server",
"set-cookie",
"strict-transport-security",
"trailer",
"transfer-encoding",
"user-agent",
"vary",
"via",
"www-authenticate",
}
commonLowerHeader = make(map[string]string, len(common))
commonCanonHeader = make(map[string]string, len(common))
for _, v := range common {
chk := http.CanonicalHeaderKey(v)
commonLowerHeader[chk] = v
commonCanonHeader[v] = chk
}
}
func lowerHeader(v string) (lower string, ascii bool) {
buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok {
return s, true
}
return asciiToLower(v)
}

240
vendor/golang.org/x/net/http2/hpack/encode.go generated vendored Normal file
View file

@ -0,0 +1,240 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hpack
import (
"io"
)
const (
uint32Max = ^uint32(0)
initialHeaderTableSize = 4096
)
type Encoder struct {
dynTab dynamicTable
// minSize is the minimum table size set by
// SetMaxDynamicTableSize after the previous Header Table Size
// Update.
minSize uint32
// maxSizeLimit is the maximum table size this encoder
// supports. This will protect the encoder from too large
// size.
maxSizeLimit uint32
// tableSizeUpdate indicates whether "Header Table Size
// Update" is required.
tableSizeUpdate bool
w io.Writer
buf []byte
}
// NewEncoder returns a new Encoder which performs HPACK encoding. An
// encoded data is written to w.
func NewEncoder(w io.Writer) *Encoder {
e := &Encoder{
minSize: uint32Max,
maxSizeLimit: initialHeaderTableSize,
tableSizeUpdate: false,
w: w,
}
e.dynTab.table.init()
e.dynTab.setMaxSize(initialHeaderTableSize)
return e
}
// WriteField encodes f into a single Write to e's underlying Writer.
// This function may also produce bytes for "Header Table Size Update"
// if necessary. If produced, it is done before encoding f.
func (e *Encoder) WriteField(f HeaderField) error {
e.buf = e.buf[:0]
if e.tableSizeUpdate {
e.tableSizeUpdate = false
if e.minSize < e.dynTab.maxSize {
e.buf = appendTableSize(e.buf, e.minSize)
}
e.minSize = uint32Max
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
}
idx, nameValueMatch := e.searchTable(f)
if nameValueMatch {
e.buf = appendIndexed(e.buf, idx)
} else {
indexing := e.shouldIndex(f)
if indexing {
e.dynTab.add(f)
}
if idx == 0 {
e.buf = appendNewName(e.buf, f, indexing)
} else {
e.buf = appendIndexedName(e.buf, f, idx, indexing)
}
}
n, err := e.w.Write(e.buf)
if err == nil && n != len(e.buf) {
err = io.ErrShortWrite
}
return err
}
// searchTable searches f in both stable and dynamic header tables.
// The static header table is searched first. Only when there is no
// exact match for both name and value, the dynamic header table is
// then searched. If there is no match, i is 0. If both name and value
// match, i is the matched index and nameValueMatch becomes true. If
// only name matches, i points to that index and nameValueMatch
// becomes false.
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
i, nameValueMatch = staticTable.search(f)
if nameValueMatch {
return i, true
}
j, nameValueMatch := e.dynTab.table.search(f)
if nameValueMatch || (i == 0 && j != 0) {
return j + uint64(staticTable.len()), nameValueMatch
}
return i, false
}
// SetMaxDynamicTableSize changes the dynamic header table size to v.
// The actual size is bounded by the value passed to
// SetMaxDynamicTableSizeLimit.
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
if v > e.maxSizeLimit {
v = e.maxSizeLimit
}
if v < e.minSize {
e.minSize = v
}
e.tableSizeUpdate = true
e.dynTab.setMaxSize(v)
}
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
// specified in SetMaxDynamicTableSize to v. By default, it is set to
// 4096, which is the same size of the default dynamic header table
// size described in HPACK specification. If the current maximum
// dynamic header table size is strictly greater than v, "Header Table
// Size Update" will be done in the next WriteField call and the
// maximum dynamic header table size is truncated to v.
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
e.maxSizeLimit = v
if e.dynTab.maxSize > v {
e.tableSizeUpdate = true
e.dynTab.setMaxSize(v)
}
}
// shouldIndex reports whether f should be indexed.
func (e *Encoder) shouldIndex(f HeaderField) bool {
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
}
// appendIndexed appends index i, as encoded in "Indexed Header Field"
// representation, to dst and returns the extended buffer.
func appendIndexed(dst []byte, i uint64) []byte {
first := len(dst)
dst = appendVarInt(dst, 7, i)
dst[first] |= 0x80
return dst
}
// appendNewName appends f, as encoded in one of "Literal Header field
// - New Name" representation variants, to dst and returns the
// extended buffer.
//
// If f.Sensitive is true, "Never Indexed" representation is used. If
// f.Sensitive is false and indexing is true, "Incremental Indexing"
// representation is used.
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
dst = appendHpackString(dst, f.Name)
return appendHpackString(dst, f.Value)
}
// appendIndexedName appends f and index i referring indexed name
// entry, as encoded in one of "Literal Header field - Indexed Name"
// representation variants, to dst and returns the extended buffer.
//
// If f.Sensitive is true, "Never Indexed" representation is used. If
// f.Sensitive is false and indexing is true, "Incremental Indexing"
// representation is used.
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
first := len(dst)
var n byte
if indexing {
n = 6
} else {
n = 4
}
dst = appendVarInt(dst, n, i)
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
return appendHpackString(dst, f.Value)
}
// appendTableSize appends v, as encoded in "Header Table Size Update"
// representation, to dst and returns the extended buffer.
func appendTableSize(dst []byte, v uint32) []byte {
first := len(dst)
dst = appendVarInt(dst, 5, uint64(v))
dst[first] |= 0x20
return dst
}
// appendVarInt appends i, as encoded in variable integer form using n
// bit prefix, to dst and returns the extended buffer.
//
// See
// https://httpwg.org/specs/rfc7541.html#integer.representation
func appendVarInt(dst []byte, n byte, i uint64) []byte {
k := uint64((1 << n) - 1)
if i < k {
return append(dst, byte(i))
}
dst = append(dst, byte(k))
i -= k
for ; i >= 128; i >>= 7 {
dst = append(dst, byte(0x80|(i&0x7f)))
}
return append(dst, byte(i))
}
// appendHpackString appends s, as encoded in "String Literal"
// representation, to dst and returns the extended buffer.
//
// s will be encoded in Huffman codes only when it produces strictly
// shorter byte string.
func appendHpackString(dst []byte, s string) []byte {
huffmanLength := HuffmanEncodeLength(s)
if huffmanLength < uint64(len(s)) {
first := len(dst)
dst = appendVarInt(dst, 7, huffmanLength)
dst = AppendHuffmanString(dst, s)
dst[first] |= 0x80
} else {
dst = appendVarInt(dst, 7, uint64(len(s)))
dst = append(dst, s...)
}
return dst
}
// encodeTypeByte returns type byte. If sensitive is true, type byte
// for "Never Indexed" representation is returned. If sensitive is
// false and indexing is true, type byte for "Incremental Indexing"
// representation is returned. Otherwise, type byte for "Without
// Indexing" is returned.
func encodeTypeByte(indexing, sensitive bool) byte {
if sensitive {
return 0x10
}
if indexing {
return 0x40
}
return 0
}

504
vendor/golang.org/x/net/http2/hpack/hpack.go generated vendored Normal file
View file

@ -0,0 +1,504 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package hpack implements HPACK, a compression format for
// efficiently representing HTTP header fields in the context of HTTP/2.
//
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
package hpack
import (
"bytes"
"errors"
"fmt"
)
// A DecodingError is something the spec defines as a decoding error.
type DecodingError struct {
Err error
}
func (de DecodingError) Error() string {
return fmt.Sprintf("decoding error: %v", de.Err)
}
// An InvalidIndexError is returned when an encoder references a table
// entry before the static table or after the end of the dynamic table.
type InvalidIndexError int
func (e InvalidIndexError) Error() string {
return fmt.Sprintf("invalid indexed representation index %d", int(e))
}
// A HeaderField is a name-value pair. Both the name and value are
// treated as opaque sequences of octets.
type HeaderField struct {
Name, Value string
// Sensitive means that this header field should never be
// indexed.
Sensitive bool
}
// IsPseudo reports whether the header field is an http2 pseudo header.
// That is, it reports whether it starts with a colon.
// It is not otherwise guaranteed to be a valid pseudo header field,
// though.
func (hf HeaderField) IsPseudo() bool {
return len(hf.Name) != 0 && hf.Name[0] == ':'
}
func (hf HeaderField) String() string {
var suffix string
if hf.Sensitive {
suffix = " (sensitive)"
}
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
}
// Size returns the size of an entry per RFC 7541 section 4.1.
func (hf HeaderField) Size() uint32 {
// https://httpwg.org/specs/rfc7541.html#rfc.section.4.1
// "The size of the dynamic table is the sum of the size of
// its entries. The size of an entry is the sum of its name's
// length in octets (as defined in Section 5.2), its value's
// length in octets (see Section 5.2), plus 32. The size of
// an entry is calculated using the length of the name and
// value without any Huffman encoding applied."
// This can overflow if somebody makes a large HeaderField
// Name and/or Value by hand, but we don't care, because that
// won't happen on the wire because the encoding doesn't allow
// it.
return uint32(len(hf.Name) + len(hf.Value) + 32)
}
// A Decoder is the decoding context for incremental processing of
// header blocks.
type Decoder struct {
dynTab dynamicTable
emit func(f HeaderField)
emitEnabled bool // whether calls to emit are enabled
maxStrLen int // 0 means unlimited
// buf is the unparsed buffer. It's only written to
// saveBuf if it was truncated in the middle of a header
// block. Because it's usually not owned, we can only
// process it under Write.
buf []byte // not owned; only valid during Write
// saveBuf is previous data passed to Write which we weren't able
// to fully parse before. Unlike buf, we own this data.
saveBuf bytes.Buffer
firstField bool // processing the first field of the header block
}
// NewDecoder returns a new decoder with the provided maximum dynamic
// table size. The emitFunc will be called for each valid field
// parsed, in the same goroutine as calls to Write, before Write returns.
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
d := &Decoder{
emit: emitFunc,
emitEnabled: true,
firstField: true,
}
d.dynTab.table.init()
d.dynTab.allowedMaxSize = maxDynamicTableSize
d.dynTab.setMaxSize(maxDynamicTableSize)
return d
}
// ErrStringLength is returned by Decoder.Write when the max string length
// (as configured by Decoder.SetMaxStringLength) would be violated.
var ErrStringLength = errors.New("hpack: string too long")
// SetMaxStringLength sets the maximum size of a HeaderField name or
// value string. If a string exceeds this length (even after any
// decompression), Write will return ErrStringLength.
// A value of 0 means unlimited and is the default from NewDecoder.
func (d *Decoder) SetMaxStringLength(n int) {
d.maxStrLen = n
}
// SetEmitFunc changes the callback used when new header fields
// are decoded.
// It must be non-nil. It does not affect EmitEnabled.
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
d.emit = emitFunc
}
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
// should be called. The default is true.
//
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
// while still decoding and keeping in-sync with decoder state, but
// without doing unnecessary decompression or generating unnecessary
// garbage for header fields past the limit.
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
// are currently enabled. The default is true.
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
// underlying buffers for garbage reasons.
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
d.dynTab.setMaxSize(v)
}
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
// stream (via dynamic table size updates) may set the maximum size
// to.
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
d.dynTab.allowedMaxSize = v
}
type dynamicTable struct {
// https://httpwg.org/specs/rfc7541.html#rfc.section.2.3.2
table headerFieldTable
size uint32 // in bytes
maxSize uint32 // current maxSize
allowedMaxSize uint32 // maxSize may go up to this, inclusive
}
func (dt *dynamicTable) setMaxSize(v uint32) {
dt.maxSize = v
dt.evict()
}
func (dt *dynamicTable) add(f HeaderField) {
dt.table.addEntry(f)
dt.size += f.Size()
dt.evict()
}
// If we're too big, evict old stuff.
func (dt *dynamicTable) evict() {
var n int
for dt.size > dt.maxSize && n < dt.table.len() {
dt.size -= dt.table.ents[n].Size()
n++
}
dt.table.evictOldest(n)
}
func (d *Decoder) maxTableIndex() int {
// This should never overflow. RFC 7540 Section 6.5.2 limits the size of
// the dynamic table to 2^32 bytes, where each entry will occupy more than
// one byte. Further, the staticTable has a fixed, small length.
return d.dynTab.table.len() + staticTable.len()
}
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
// See Section 2.3.3.
if i == 0 {
return
}
if i <= uint64(staticTable.len()) {
return staticTable.ents[i-1], true
}
if i > uint64(d.maxTableIndex()) {
return
}
// In the dynamic table, newer entries have lower indices.
// However, dt.ents[0] is the oldest entry. Hence, dt.ents is
// the reversed dynamic table.
dt := d.dynTab.table
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
}
// Decode decodes an entire block.
//
// TODO: remove this method and make it incremental later? This is
// easier for debugging now.
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
var hf []HeaderField
saveFunc := d.emit
defer func() { d.emit = saveFunc }()
d.emit = func(f HeaderField) { hf = append(hf, f) }
if _, err := d.Write(p); err != nil {
return nil, err
}
if err := d.Close(); err != nil {
return nil, err
}
return hf, nil
}
// Close declares that the decoding is complete and resets the Decoder
// to be reused again for a new header block. If there is any remaining
// data in the decoder's buffer, Close returns an error.
func (d *Decoder) Close() error {
if d.saveBuf.Len() > 0 {
d.saveBuf.Reset()
return DecodingError{errors.New("truncated headers")}
}
d.firstField = true
return nil
}
func (d *Decoder) Write(p []byte) (n int, err error) {
if len(p) == 0 {
// Prevent state machine CPU attacks (making us redo
// work up to the point of finding out we don't have
// enough data)
return
}
// Only copy the data if we have to. Optimistically assume
// that p will contain a complete header block.
if d.saveBuf.Len() == 0 {
d.buf = p
} else {
d.saveBuf.Write(p)
d.buf = d.saveBuf.Bytes()
d.saveBuf.Reset()
}
for len(d.buf) > 0 {
err = d.parseHeaderFieldRepr()
if err == errNeedMore {
// Extra paranoia, making sure saveBuf won't
// get too large. All the varint and string
// reading code earlier should already catch
// overlong things and return ErrStringLength,
// but keep this as a last resort.
const varIntOverhead = 8 // conservative
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
return 0, ErrStringLength
}
d.saveBuf.Write(d.buf)
return len(p), nil
}
d.firstField = false
if err != nil {
break
}
}
return len(p), err
}
// errNeedMore is an internal sentinel error value that means the
// buffer is truncated and we need to read more data before we can
// continue parsing.
var errNeedMore = errors.New("need more data")
type indexType int
const (
indexedTrue indexType = iota
indexedFalse
indexedNever
)
func (v indexType) indexed() bool { return v == indexedTrue }
func (v indexType) sensitive() bool { return v == indexedNever }
// returns errNeedMore if there isn't enough data available.
// any other error is fatal.
// consumes d.buf iff it returns nil.
// precondition: must be called with len(d.buf) > 0
func (d *Decoder) parseHeaderFieldRepr() error {
b := d.buf[0]
switch {
case b&128 != 0:
// Indexed representation.
// High bit set?
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.1
return d.parseFieldIndexed()
case b&192 == 64:
// 6.2.1 Literal Header Field with Incremental Indexing
// 0b10xxxxxx: top two bits are 10
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.1
return d.parseFieldLiteral(6, indexedTrue)
case b&240 == 0:
// 6.2.2 Literal Header Field without Indexing
// 0b0000xxxx: top four bits are 0000
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.2
return d.parseFieldLiteral(4, indexedFalse)
case b&240 == 16:
// 6.2.3 Literal Header Field never Indexed
// 0b0001xxxx: top four bits are 0001
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.3
return d.parseFieldLiteral(4, indexedNever)
case b&224 == 32:
// 6.3 Dynamic Table Size Update
// Top three bits are '001'.
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.3
return d.parseDynamicTableSizeUpdate()
}
return DecodingError{errors.New("invalid encoding")}
}
// (same invariants and behavior as parseHeaderFieldRepr)
func (d *Decoder) parseFieldIndexed() error {
buf := d.buf
idx, buf, err := readVarInt(7, buf)
if err != nil {
return err
}
hf, ok := d.at(idx)
if !ok {
return DecodingError{InvalidIndexError(idx)}
}
d.buf = buf
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
}
// (same invariants and behavior as parseHeaderFieldRepr)
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
buf := d.buf
nameIdx, buf, err := readVarInt(n, buf)
if err != nil {
return err
}
var hf HeaderField
wantStr := d.emitEnabled || it.indexed()
if nameIdx > 0 {
ihf, ok := d.at(nameIdx)
if !ok {
return DecodingError{InvalidIndexError(nameIdx)}
}
hf.Name = ihf.Name
} else {
hf.Name, buf, err = d.readString(buf, wantStr)
if err != nil {
return err
}
}
hf.Value, buf, err = d.readString(buf, wantStr)
if err != nil {
return err
}
d.buf = buf
if it.indexed() {
d.dynTab.add(hf)
}
hf.Sensitive = it.sensitive()
return d.callEmit(hf)
}
func (d *Decoder) callEmit(hf HeaderField) error {
if d.maxStrLen != 0 {
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
return ErrStringLength
}
}
if d.emitEnabled {
d.emit(hf)
}
return nil
}
// (same invariants and behavior as parseHeaderFieldRepr)
func (d *Decoder) parseDynamicTableSizeUpdate() error {
// RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
// beginning of the first header block following the change to the dynamic table size.
if !d.firstField && d.dynTab.size > 0 {
return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
}
buf := d.buf
size, buf, err := readVarInt(5, buf)
if err != nil {
return err
}
if size > uint64(d.dynTab.allowedMaxSize) {
return DecodingError{errors.New("dynamic table size update too large")}
}
d.dynTab.setMaxSize(uint32(size))
d.buf = buf
return nil
}
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
// readVarInt reads an unsigned variable length integer off the
// beginning of p. n is the parameter as described in
// https://httpwg.org/specs/rfc7541.html#rfc.section.5.1.
//
// n must always be between 1 and 8.
//
// The returned remain buffer is either a smaller suffix of p, or err != nil.
// The error is errNeedMore if p doesn't contain a complete integer.
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
if n < 1 || n > 8 {
panic("bad n")
}
if len(p) == 0 {
return 0, p, errNeedMore
}
i = uint64(p[0])
if n < 8 {
i &= (1 << uint64(n)) - 1
}
if i < (1<<uint64(n))-1 {
return i, p[1:], nil
}
origP := p
p = p[1:]
var m uint64
for len(p) > 0 {
b := p[0]
p = p[1:]
i += uint64(b&127) << m
if b&128 == 0 {
return i, p, nil
}
m += 7
if m >= 63 { // TODO: proper overflow check. making this up.
return 0, origP, errVarintOverflow
}
}
return 0, origP, errNeedMore
}
// readString decodes an hpack string from p.
//
// wantStr is whether s will be used. If false, decompression and
// []byte->string garbage are skipped if s will be ignored
// anyway. This does mean that huffman decoding errors for non-indexed
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
// is returning an error anyway, and because they're not indexed, the error
// won't affect the decoding state.
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
if len(p) == 0 {
return "", p, errNeedMore
}
isHuff := p[0]&128 != 0
strLen, p, err := readVarInt(7, p)
if err != nil {
return "", p, err
}
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
return "", nil, ErrStringLength
}
if uint64(len(p)) < strLen {
return "", p, errNeedMore
}
if !isHuff {
if wantStr {
s = string(p[:strLen])
}
return s, p[strLen:], nil
}
if wantStr {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset() // don't trust others
defer bufPool.Put(buf)
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
buf.Reset()
return "", nil, err
}
s = buf.String()
buf.Reset() // be nice to GC
}
return s, p[strLen:], nil
}

226
vendor/golang.org/x/net/http2/hpack/huffman.go generated vendored Normal file
View file

@ -0,0 +1,226 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hpack
import (
"bytes"
"errors"
"io"
"sync"
)
var bufPool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
// HuffmanDecode decodes the string in v and writes the expanded
// result to w, returning the number of bytes written to w and the
// Write call's return value. At most one Write call is made.
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
if err := huffmanDecode(buf, 0, v); err != nil {
return 0, err
}
return w.Write(buf.Bytes())
}
// HuffmanDecodeToString decodes the string in v.
func HuffmanDecodeToString(v []byte) (string, error) {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
if err := huffmanDecode(buf, 0, v); err != nil {
return "", err
}
return buf.String(), nil
}
// ErrInvalidHuffman is returned for errors found decoding
// Huffman-encoded strings.
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
// huffmanDecode decodes v to buf.
// If maxLen is greater than 0, attempts to write more to buf than
// maxLen bytes will return ErrStringLength.
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
rootHuffmanNode := getRootHuffmanNode()
n := rootHuffmanNode
// cur is the bit buffer that has not been fed into n.
// cbits is the number of low order bits in cur that are valid.
// sbits is the number of bits of the symbol prefix being decoded.
cur, cbits, sbits := uint(0), uint8(0), uint8(0)
for _, b := range v {
cur = cur<<8 | uint(b)
cbits += 8
sbits += 8
for cbits >= 8 {
idx := byte(cur >> (cbits - 8))
n = n.children[idx]
if n == nil {
return ErrInvalidHuffman
}
if n.children == nil {
if maxLen != 0 && buf.Len() == maxLen {
return ErrStringLength
}
buf.WriteByte(n.sym)
cbits -= n.codeLen
n = rootHuffmanNode
sbits = cbits
} else {
cbits -= 8
}
}
}
for cbits > 0 {
n = n.children[byte(cur<<(8-cbits))]
if n == nil {
return ErrInvalidHuffman
}
if n.children != nil || n.codeLen > cbits {
break
}
if maxLen != 0 && buf.Len() == maxLen {
return ErrStringLength
}
buf.WriteByte(n.sym)
cbits -= n.codeLen
n = rootHuffmanNode
sbits = cbits
}
if sbits > 7 {
// Either there was an incomplete symbol, or overlong padding.
// Both are decoding errors per RFC 7541 section 5.2.
return ErrInvalidHuffman
}
if mask := uint(1<<cbits - 1); cur&mask != mask {
// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
return ErrInvalidHuffman
}
return nil
}
// incomparable is a zero-width, non-comparable type. Adding it to a struct
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()
type node struct {
_ incomparable
// children is non-nil for internal nodes
children *[256]*node
// The following are only valid if children is nil:
codeLen uint8 // number of bits that led to the output of sym
sym byte // output symbol
}
func newInternalNode() *node {
return &node{children: new([256]*node)}
}
var (
buildRootOnce sync.Once
lazyRootHuffmanNode *node
)
func getRootHuffmanNode() *node {
buildRootOnce.Do(buildRootHuffmanNode)
return lazyRootHuffmanNode
}
func buildRootHuffmanNode() {
if len(huffmanCodes) != 256 {
panic("unexpected size")
}
lazyRootHuffmanNode = newInternalNode()
// allocate a leaf node for each of the 256 symbols
leaves := new([256]node)
for sym, code := range huffmanCodes {
codeLen := huffmanCodeLen[sym]
cur := lazyRootHuffmanNode
for codeLen > 8 {
codeLen -= 8
i := uint8(code >> codeLen)
if cur.children[i] == nil {
cur.children[i] = newInternalNode()
}
cur = cur.children[i]
}
shift := 8 - codeLen
start, end := int(uint8(code<<shift)), int(1<<shift)
leaves[sym].sym = byte(sym)
leaves[sym].codeLen = codeLen
for i := start; i < start+end; i++ {
cur.children[i] = &leaves[sym]
}
}
}
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
// and returns the extended buffer.
func AppendHuffmanString(dst []byte, s string) []byte {
// This relies on the maximum huffman code length being 30 (See tables.go huffmanCodeLen array)
// So if a uint64 buffer has less than 32 valid bits can always accommodate another huffmanCode.
var (
x uint64 // buffer
n uint // number valid of bits present in x
)
for i := 0; i < len(s); i++ {
c := s[i]
n += uint(huffmanCodeLen[c])
x <<= huffmanCodeLen[c] % 64
x |= uint64(huffmanCodes[c])
if n >= 32 {
n %= 32 // Normally would be -= 32 but %= 32 informs compiler 0 <= n <= 31 for upcoming shift
y := uint32(x >> n) // Compiler doesn't combine memory writes if y isn't uint32
dst = append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y))
}
}
// Add padding bits if necessary
if over := n % 8; over > 0 {
const (
eosCode = 0x3fffffff
eosNBits = 30
eosPadByte = eosCode >> (eosNBits - 8)
)
pad := 8 - over
x = (x << pad) | (eosPadByte >> over)
n += pad // 8 now divides into n exactly
}
// n in (0, 8, 16, 24, 32)
switch n / 8 {
case 0:
return dst
case 1:
return append(dst, byte(x))
case 2:
y := uint16(x)
return append(dst, byte(y>>8), byte(y))
case 3:
y := uint16(x >> 8)
return append(dst, byte(y>>8), byte(y), byte(x))
}
// case 4:
y := uint32(x)
return append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y))
}
// HuffmanEncodeLength returns the number of bytes required to encode
// s in Huffman codes. The result is round up to byte boundary.
func HuffmanEncodeLength(s string) uint64 {
n := uint64(0)
for i := 0; i < len(s); i++ {
n += uint64(huffmanCodeLen[s[i]])
}
return (n + 7) / 8
}

479
vendor/golang.org/x/net/http2/hpack/tables.go generated vendored Normal file
View file

@ -0,0 +1,479 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hpack
import (
"fmt"
)
// headerFieldTable implements a list of HeaderFields.
// This is used to implement the static and dynamic tables.
type headerFieldTable struct {
// For static tables, entries are never evicted.
//
// For dynamic tables, entries are evicted from ents[0] and added to the end.
// Each entry has a unique id that starts at one and increments for each
// entry that is added. This unique id is stable across evictions, meaning
// it can be used as a pointer to a specific entry. As in hpack, unique ids
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
//
// Zero is not a valid unique id.
//
// evictCount should not overflow in any remotely practical situation. In
// practice, we will have one dynamic table per HTTP/2 connection. If we
// assume a very powerful server that handles 1M QPS per connection and each
// request adds (then evicts) 100 entries from the table, it would still take
// 2M years for evictCount to overflow.
ents []HeaderField
evictCount uint64
// byName maps a HeaderField name to the unique id of the newest entry with
// the same name. See above for a definition of "unique id".
byName map[string]uint64
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
// entry with the same name and value. See above for a definition of "unique id".
byNameValue map[pairNameValue]uint64
}
type pairNameValue struct {
name, value string
}
func (t *headerFieldTable) init() {
t.byName = make(map[string]uint64)
t.byNameValue = make(map[pairNameValue]uint64)
}
// len reports the number of entries in the table.
func (t *headerFieldTable) len() int {
return len(t.ents)
}
// addEntry adds a new entry.
func (t *headerFieldTable) addEntry(f HeaderField) {
id := uint64(t.len()) + t.evictCount + 1
t.byName[f.Name] = id
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
t.ents = append(t.ents, f)
}
// evictOldest evicts the n oldest entries in the table.
func (t *headerFieldTable) evictOldest(n int) {
if n > t.len() {
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
}
for k := 0; k < n; k++ {
f := t.ents[k]
id := t.evictCount + uint64(k) + 1
if t.byName[f.Name] == id {
delete(t.byName, f.Name)
}
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
delete(t.byNameValue, p)
}
}
copy(t.ents, t.ents[n:])
for k := t.len() - n; k < t.len(); k++ {
t.ents[k] = HeaderField{} // so strings can be garbage collected
}
t.ents = t.ents[:t.len()-n]
if t.evictCount+uint64(n) < t.evictCount {
panic("evictCount overflow")
}
t.evictCount += uint64(n)
}
// search finds f in the table. If there is no match, i is 0.
// If both name and value match, i is the matched index and nameValueMatch
// becomes true. If only name matches, i points to that index and
// nameValueMatch becomes false.
//
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
// table, the return value i actually refers to the entry t.ents[t.len()-i].
//
// All tables are assumed to be a dynamic tables except for the global
// staticTable pointer.
//
// See Section 2.3.3.
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
if !f.Sensitive {
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
return t.idToIndex(id), true
}
}
if id := t.byName[f.Name]; id != 0 {
return t.idToIndex(id), false
}
return 0, false
}
// idToIndex converts a unique id to an HPACK index.
// See Section 2.3.3.
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
if id <= t.evictCount {
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
}
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
if t != staticTable {
return uint64(t.len()) - k // dynamic table
}
return k + 1
}
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
var staticTable = newStaticTable()
var staticTableEntries = [...]HeaderField{
{Name: ":authority"},
{Name: ":method", Value: "GET"},
{Name: ":method", Value: "POST"},
{Name: ":path", Value: "/"},
{Name: ":path", Value: "/index.html"},
{Name: ":scheme", Value: "http"},
{Name: ":scheme", Value: "https"},
{Name: ":status", Value: "200"},
{Name: ":status", Value: "204"},
{Name: ":status", Value: "206"},
{Name: ":status", Value: "304"},
{Name: ":status", Value: "400"},
{Name: ":status", Value: "404"},
{Name: ":status", Value: "500"},
{Name: "accept-charset"},
{Name: "accept-encoding", Value: "gzip, deflate"},
{Name: "accept-language"},
{Name: "accept-ranges"},
{Name: "accept"},
{Name: "access-control-allow-origin"},
{Name: "age"},
{Name: "allow"},
{Name: "authorization"},
{Name: "cache-control"},
{Name: "content-disposition"},
{Name: "content-encoding"},
{Name: "content-language"},
{Name: "content-length"},
{Name: "content-location"},
{Name: "content-range"},
{Name: "content-type"},
{Name: "cookie"},
{Name: "date"},
{Name: "etag"},
{Name: "expect"},
{Name: "expires"},
{Name: "from"},
{Name: "host"},
{Name: "if-match"},
{Name: "if-modified-since"},
{Name: "if-none-match"},
{Name: "if-range"},
{Name: "if-unmodified-since"},
{Name: "last-modified"},
{Name: "link"},
{Name: "location"},
{Name: "max-forwards"},
{Name: "proxy-authenticate"},
{Name: "proxy-authorization"},
{Name: "range"},
{Name: "referer"},
{Name: "refresh"},
{Name: "retry-after"},
{Name: "server"},
{Name: "set-cookie"},
{Name: "strict-transport-security"},
{Name: "transfer-encoding"},
{Name: "user-agent"},
{Name: "vary"},
{Name: "via"},
{Name: "www-authenticate"},
}
func newStaticTable() *headerFieldTable {
t := &headerFieldTable{}
t.init()
for _, e := range staticTableEntries[:] {
t.addEntry(e)
}
return t
}
var huffmanCodes = [256]uint32{
0x1ff8,
0x7fffd8,
0xfffffe2,
0xfffffe3,
0xfffffe4,
0xfffffe5,
0xfffffe6,
0xfffffe7,
0xfffffe8,
0xffffea,
0x3ffffffc,
0xfffffe9,
0xfffffea,
0x3ffffffd,
0xfffffeb,
0xfffffec,
0xfffffed,
0xfffffee,
0xfffffef,
0xffffff0,
0xffffff1,
0xffffff2,
0x3ffffffe,
0xffffff3,
0xffffff4,
0xffffff5,
0xffffff6,
0xffffff7,
0xffffff8,
0xffffff9,
0xffffffa,
0xffffffb,
0x14,
0x3f8,
0x3f9,
0xffa,
0x1ff9,
0x15,
0xf8,
0x7fa,
0x3fa,
0x3fb,
0xf9,
0x7fb,
0xfa,
0x16,
0x17,
0x18,
0x0,
0x1,
0x2,
0x19,
0x1a,
0x1b,
0x1c,
0x1d,
0x1e,
0x1f,
0x5c,
0xfb,
0x7ffc,
0x20,
0xffb,
0x3fc,
0x1ffa,
0x21,
0x5d,
0x5e,
0x5f,
0x60,
0x61,
0x62,
0x63,
0x64,
0x65,
0x66,
0x67,
0x68,
0x69,
0x6a,
0x6b,
0x6c,
0x6d,
0x6e,
0x6f,
0x70,
0x71,
0x72,
0xfc,
0x73,
0xfd,
0x1ffb,
0x7fff0,
0x1ffc,
0x3ffc,
0x22,
0x7ffd,
0x3,
0x23,
0x4,
0x24,
0x5,
0x25,
0x26,
0x27,
0x6,
0x74,
0x75,
0x28,
0x29,
0x2a,
0x7,
0x2b,
0x76,
0x2c,
0x8,
0x9,
0x2d,
0x77,
0x78,
0x79,
0x7a,
0x7b,
0x7ffe,
0x7fc,
0x3ffd,
0x1ffd,
0xffffffc,
0xfffe6,
0x3fffd2,
0xfffe7,
0xfffe8,
0x3fffd3,
0x3fffd4,
0x3fffd5,
0x7fffd9,
0x3fffd6,
0x7fffda,
0x7fffdb,
0x7fffdc,
0x7fffdd,
0x7fffde,
0xffffeb,
0x7fffdf,
0xffffec,
0xffffed,
0x3fffd7,
0x7fffe0,
0xffffee,
0x7fffe1,
0x7fffe2,
0x7fffe3,
0x7fffe4,
0x1fffdc,
0x3fffd8,
0x7fffe5,
0x3fffd9,
0x7fffe6,
0x7fffe7,
0xffffef,
0x3fffda,
0x1fffdd,
0xfffe9,
0x3fffdb,
0x3fffdc,
0x7fffe8,
0x7fffe9,
0x1fffde,
0x7fffea,
0x3fffdd,
0x3fffde,
0xfffff0,
0x1fffdf,
0x3fffdf,
0x7fffeb,
0x7fffec,
0x1fffe0,
0x1fffe1,
0x3fffe0,
0x1fffe2,
0x7fffed,
0x3fffe1,
0x7fffee,
0x7fffef,
0xfffea,
0x3fffe2,
0x3fffe3,
0x3fffe4,
0x7ffff0,
0x3fffe5,
0x3fffe6,
0x7ffff1,
0x3ffffe0,
0x3ffffe1,
0xfffeb,
0x7fff1,
0x3fffe7,
0x7ffff2,
0x3fffe8,
0x1ffffec,
0x3ffffe2,
0x3ffffe3,
0x3ffffe4,
0x7ffffde,
0x7ffffdf,
0x3ffffe5,
0xfffff1,
0x1ffffed,
0x7fff2,
0x1fffe3,
0x3ffffe6,
0x7ffffe0,
0x7ffffe1,
0x3ffffe7,
0x7ffffe2,
0xfffff2,
0x1fffe4,
0x1fffe5,
0x3ffffe8,
0x3ffffe9,
0xffffffd,
0x7ffffe3,
0x7ffffe4,
0x7ffffe5,
0xfffec,
0xfffff3,
0xfffed,
0x1fffe6,
0x3fffe9,
0x1fffe7,
0x1fffe8,
0x7ffff3,
0x3fffea,
0x3fffeb,
0x1ffffee,
0x1ffffef,
0xfffff4,
0xfffff5,
0x3ffffea,
0x7ffff4,
0x3ffffeb,
0x7ffffe6,
0x3ffffec,
0x3ffffed,
0x7ffffe7,
0x7ffffe8,
0x7ffffe9,
0x7ffffea,
0x7ffffeb,
0xffffffe,
0x7ffffec,
0x7ffffed,
0x7ffffee,
0x7ffffef,
0x7fffff0,
0x3ffffee,
}
var huffmanCodeLen = [256]uint8{
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
}

385
vendor/golang.org/x/net/http2/http2.go generated vendored Normal file
View file

@ -0,0 +1,385 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package http2 implements the HTTP/2 protocol.
//
// This package is low-level and intended to be used directly by very
// few people. Most users will use it indirectly through the automatic
// use by the net/http package (from Go 1.6 and later).
// For use in earlier Go versions see ConfigureServer. (Transport support
// requires Go 1.6 or later)
//
// See https://http2.github.io/ for more information on HTTP/2.
//
// See https://http2.golang.org/ for a test server running this code.
package http2 // import "golang.org/x/net/http2"
import (
"bufio"
"crypto/tls"
"fmt"
"io"
"net/http"
"os"
"sort"
"strconv"
"strings"
"sync"
"golang.org/x/net/http/httpguts"
)
var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
inTests bool
)
func init() {
e := os.Getenv("GODEBUG")
if strings.Contains(e, "http2debug=1") {
VerboseLogs = true
}
if strings.Contains(e, "http2debug=2") {
VerboseLogs = true
logFrameWrites = true
logFrameReads = true
}
}
const (
// ClientPreface is the string that must be sent by new
// connections from clients.
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
// SETTINGS_MAX_FRAME_SIZE default
// https://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2
initialMaxFrameSize = 16384
// NextProtoTLS is the NPN/ALPN protocol negotiated during
// HTTP/2's TLS setup.
NextProtoTLS = "h2"
// https://httpwg.org/specs/rfc7540.html#SettingValues
initialHeaderTableSize = 4096
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
defaultMaxReadFrameSize = 1 << 20
)
var (
clientPreface = []byte(ClientPreface)
)
type streamState int
// HTTP/2 stream states.
//
// See http://tools.ietf.org/html/rfc7540#section-5.1.
//
// For simplicity, the server code merges "reserved (local)" into
// "half-closed (remote)". This is one less state transition to track.
// The only downside is that we send PUSH_PROMISEs slightly less
// liberally than allowable. More discussion here:
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
//
// "reserved (remote)" is omitted since the client code does not
// support server push.
const (
stateIdle streamState = iota
stateOpen
stateHalfClosedLocal
stateHalfClosedRemote
stateClosed
)
var stateName = [...]string{
stateIdle: "Idle",
stateOpen: "Open",
stateHalfClosedLocal: "HalfClosedLocal",
stateHalfClosedRemote: "HalfClosedRemote",
stateClosed: "Closed",
}
func (st streamState) String() string {
return stateName[st]
}
// Setting is a setting parameter: which setting it is, and its value.
type Setting struct {
// ID is which setting is being set.
// See https://httpwg.org/specs/rfc7540.html#SettingFormat
ID SettingID
// Val is the value.
Val uint32
}
func (s Setting) String() string {
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
}
// Valid reports whether the setting is valid.
func (s Setting) Valid() error {
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
switch s.ID {
case SettingEnablePush:
if s.Val != 1 && s.Val != 0 {
return ConnectionError(ErrCodeProtocol)
}
case SettingInitialWindowSize:
if s.Val > 1<<31-1 {
return ConnectionError(ErrCodeFlowControl)
}
case SettingMaxFrameSize:
if s.Val < 16384 || s.Val > 1<<24-1 {
return ConnectionError(ErrCodeProtocol)
}
}
return nil
}
// A SettingID is an HTTP/2 setting as defined in
// https://httpwg.org/specs/rfc7540.html#iana-settings
type SettingID uint16
const (
SettingHeaderTableSize SettingID = 0x1
SettingEnablePush SettingID = 0x2
SettingMaxConcurrentStreams SettingID = 0x3
SettingInitialWindowSize SettingID = 0x4
SettingMaxFrameSize SettingID = 0x5
SettingMaxHeaderListSize SettingID = 0x6
)
var settingName = map[SettingID]string{
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
SettingEnablePush: "ENABLE_PUSH",
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
SettingMaxFrameSize: "MAX_FRAME_SIZE",
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
}
func (s SettingID) String() string {
if v, ok := settingName[s]; ok {
return v
}
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
}
// validWireHeaderFieldName reports whether v is a valid header field
// name (key). See httpguts.ValidHeaderName for the base rules.
//
// Further, http2 says:
//
// "Just as in HTTP/1.x, header field names are strings of ASCII
// characters that are compared in a case-insensitive
// fashion. However, header field names MUST be converted to
// lowercase prior to their encoding in HTTP/2. "
func validWireHeaderFieldName(v string) bool {
if len(v) == 0 {
return false
}
for _, r := range v {
if !httpguts.IsTokenRune(r) {
return false
}
if 'A' <= r && r <= 'Z' {
return false
}
}
return true
}
func httpCodeString(code int) string {
switch code {
case 200:
return "200"
case 404:
return "404"
}
return strconv.Itoa(code)
}
// from pkg io
type stringWriter interface {
WriteString(s string) (n int, err error)
}
// A gate lets two goroutines coordinate their activities.
type gate chan struct{}
func (g gate) Done() { g <- struct{}{} }
func (g gate) Wait() { <-g }
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
type closeWaiter chan struct{}
// Init makes a closeWaiter usable.
// It exists because so a closeWaiter value can be placed inside a
// larger struct and have the Mutex and Cond's memory in the same
// allocation.
func (cw *closeWaiter) Init() {
*cw = make(chan struct{})
}
// Close marks the closeWaiter as closed and unblocks any waiters.
func (cw closeWaiter) Close() {
close(cw)
}
// Wait waits for the closeWaiter to become closed.
func (cw closeWaiter) Wait() {
<-cw
}
// bufferedWriter is a buffered writer that writes to w.
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type bufferedWriter struct {
_ incomparable
w io.Writer // immutable
bw *bufio.Writer // non-nil when data is buffered
}
func newBufferedWriter(w io.Writer) *bufferedWriter {
return &bufferedWriter{w: w}
}
// bufWriterPoolBufferSize is the size of bufio.Writer's
// buffers created using bufWriterPool.
//
// TODO: pick a less arbitrary value? this is a bit under
// (3 x typical 1500 byte MTU) at least. Other than that,
// not much thought went into it.
const bufWriterPoolBufferSize = 4 << 10
var bufWriterPool = sync.Pool{
New: func() interface{} {
return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
},
}
func (w *bufferedWriter) Available() int {
if w.bw == nil {
return bufWriterPoolBufferSize
}
return w.bw.Available()
}
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
bw.Reset(w.w)
w.bw = bw
}
return w.bw.Write(p)
}
func (w *bufferedWriter) Flush() error {
bw := w.bw
if bw == nil {
return nil
}
err := bw.Flush()
bw.Reset(nil)
bufWriterPool.Put(bw)
w.bw = nil
return err
}
func mustUint31(v int32) uint32 {
if v < 0 || v > 2147483647 {
panic("out of range")
}
return uint32(v)
}
// bodyAllowedForStatus reports whether a given response status code
// permits a body. See RFC 7230, section 3.3.
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == 204:
return false
case status == 304:
return false
}
return true
}
type httpError struct {
_ incomparable
msg string
timeout bool
}
func (e *httpError) Error() string { return e.msg }
func (e *httpError) Timeout() bool { return e.timeout }
func (e *httpError) Temporary() bool { return true }
var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
type connectionStater interface {
ConnectionState() tls.ConnectionState
}
var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
type sorter struct {
v []string // owned by sorter
}
func (s *sorter) Len() int { return len(s.v) }
func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
// Keys returns the sorted keys of h.
//
// The returned slice is only valid until s used again or returned to
// its pool.
func (s *sorter) Keys(h http.Header) []string {
keys := s.v[:0]
for k := range h {
keys = append(keys, k)
}
s.v = keys
sort.Sort(s)
return keys
}
func (s *sorter) SortStrings(ss []string) {
// Our sorter works on s.v, which sorter owns, so
// stash it away while we sort the user's buffer.
save := s.v
s.v = ss
sort.Sort(s)
s.v = save
}
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// - a non-empty string starting with '/'
// - the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
// incomparable is a zero-width, non-comparable type. Adding it to a struct
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()

21
vendor/golang.org/x/net/http2/not_go111.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.11
// +build !go1.11
package http2
import (
"net/http/httptrace"
"net/textproto"
)
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
return nil
}

31
vendor/golang.org/x/net/http2/not_go115.go generated vendored Normal file
View file

@ -0,0 +1,31 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.15
// +build !go1.15
package http2
import (
"context"
"crypto/tls"
)
// dialTLSWithContext opens a TLS connection.
func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
cn, err := tls.Dial(network, addr, cfg)
if err != nil {
return nil, err
}
if err := cn.Handshake(); err != nil {
return nil, err
}
if cfg.InsecureSkipVerify {
return cn, nil
}
if err := cn.VerifyHostname(cfg.ServerName); err != nil {
return nil, err
}
return cn, nil
}

17
vendor/golang.org/x/net/http2/not_go118.go generated vendored Normal file
View file

@ -0,0 +1,17 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.18
// +build !go1.18
package http2
import (
"crypto/tls"
"net"
)
func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
return nil
}

179
vendor/golang.org/x/net/http2/pipe.go generated vendored Normal file
View file

@ -0,0 +1,179 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"io"
"sync"
)
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
// underlying buffer is an interface. (io.Pipe is always unbuffered)
type pipe struct {
mu sync.Mutex
c sync.Cond // c.L lazily initialized to &p.mu
b pipeBuffer // nil when done reading
unread int // bytes unread when done
err error // read error once empty. non-nil means closed.
breakErr error // immediate read error (caller doesn't see rest of b)
donec chan struct{} // closed on error
readFn func() // optional code to run in Read before error
}
type pipeBuffer interface {
Len() int
io.Writer
io.Reader
}
// setBuffer initializes the pipe buffer.
// It has no effect if the pipe is already closed.
func (p *pipe) setBuffer(b pipeBuffer) {
p.mu.Lock()
defer p.mu.Unlock()
if p.err != nil || p.breakErr != nil {
return
}
p.b = b
}
func (p *pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.b == nil {
return p.unread
}
return p.b.Len()
}
// Read waits until data is available and copies bytes
// from the buffer into p.
func (p *pipe) Read(d []byte) (n int, err error) {
p.mu.Lock()
defer p.mu.Unlock()
if p.c.L == nil {
p.c.L = &p.mu
}
for {
if p.breakErr != nil {
return 0, p.breakErr
}
if p.b != nil && p.b.Len() > 0 {
return p.b.Read(d)
}
if p.err != nil {
if p.readFn != nil {
p.readFn() // e.g. copy trailers
p.readFn = nil // not sticky like p.err
}
p.b = nil
return 0, p.err
}
p.c.Wait()
}
}
var errClosedPipeWrite = errors.New("write on closed buffer")
// Write copies bytes from p into the buffer and wakes a reader.
// It is an error to write more data than the buffer can hold.
func (p *pipe) Write(d []byte) (n int, err error) {
p.mu.Lock()
defer p.mu.Unlock()
if p.c.L == nil {
p.c.L = &p.mu
}
defer p.c.Signal()
if p.err != nil {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
}
// CloseWithError causes the next Read (waking up a current blocked
// Read if needed) to return the provided err after all data has been
// read.
//
// The error must be non-nil.
func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
// BreakWithError causes the next Read (waking up a current blocked
// Read if needed) to return the provided err immediately, without
// waiting for unread data.
func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
// closeWithErrorAndCode is like CloseWithError but also sets some code to run
// in the caller's goroutine before returning the error.
func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
func (p *pipe) closeWithError(dst *error, err error, fn func()) {
if err == nil {
panic("err must be non-nil")
}
p.mu.Lock()
defer p.mu.Unlock()
if p.c.L == nil {
p.c.L = &p.mu
}
defer p.c.Signal()
if *dst != nil {
// Already been done.
return
}
p.readFn = fn
if dst == &p.breakErr {
if p.b != nil {
p.unread += p.b.Len()
}
p.b = nil
}
*dst = err
p.closeDoneLocked()
}
// requires p.mu be held.
func (p *pipe) closeDoneLocked() {
if p.donec == nil {
return
}
// Close if unclosed. This isn't racy since we always
// hold p.mu while closing.
select {
case <-p.donec:
default:
close(p.donec)
}
}
// Err returns the error (if any) first set by BreakWithError or CloseWithError.
func (p *pipe) Err() error {
p.mu.Lock()
defer p.mu.Unlock()
if p.breakErr != nil {
return p.breakErr
}
return p.err
}
// Done returns a channel which is closed if and when this pipe is closed
// with CloseWithError.
func (p *pipe) Done() <-chan struct{} {
p.mu.Lock()
defer p.mu.Unlock()
if p.donec == nil {
p.donec = make(chan struct{})
if p.err != nil || p.breakErr != nil {
// Already hit an error.
p.closeDoneLocked()
}
}
return p.donec
}

3131
vendor/golang.org/x/net/http2/server.go generated vendored Normal file

File diff suppressed because it is too large Load diff

3066
vendor/golang.org/x/net/http2/transport.go generated vendored Normal file

File diff suppressed because it is too large Load diff

370
vendor/golang.org/x/net/http2/write.go generated vendored Normal file
View file

@ -0,0 +1,370 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"bytes"
"fmt"
"log"
"net/http"
"net/url"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
)
// writeFramer is implemented by any type that is used to write frames.
type writeFramer interface {
writeFrame(writeContext) error
// staysWithinBuffer reports whether this writer promises that
// it will only write less than or equal to size bytes, and it
// won't Flush the write context.
staysWithinBuffer(size int) bool
}
// writeContext is the interface needed by the various frame writer
// types below. All the writeFrame methods below are scheduled via the
// frame writing scheduler (see writeScheduler in writesched.go).
//
// This interface is implemented by *serverConn.
//
// TODO: decide whether to a) use this in the client code (which didn't
// end up using this yet, because it has a simpler design, not
// currently implementing priorities), or b) delete this and
// make the server code a bit more concrete.
type writeContext interface {
Framer() *Framer
Flush() error
CloseConn() error
// HeaderEncoder returns an HPACK encoder that writes to the
// returned buffer.
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
}
// writeEndsStream reports whether w writes a frame that will transition
// the stream to a half-closed local state. This returns false for RST_STREAM,
// which closes the entire stream (not just the local half).
func writeEndsStream(w writeFramer) bool {
switch v := w.(type) {
case *writeData:
return v.endStream
case *writeResHeaders:
return v.endStream
case nil:
// This can only happen if the caller reuses w after it's
// been intentionally nil'ed out to prevent use. Keep this
// here to catch future refactoring breaking it.
panic("writeEndsStream called on nil writeFramer")
}
return false
}
type flushFrameWriter struct{}
func (flushFrameWriter) writeFrame(ctx writeContext) error {
return ctx.Flush()
}
func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
type writeSettings []Setting
func (s writeSettings) staysWithinBuffer(max int) bool {
const settingSize = 6 // uint16 + uint32
return frameHeaderLen+settingSize*len(s) <= max
}
func (s writeSettings) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettings([]Setting(s)...)
}
type writeGoAway struct {
maxStreamID uint32
code ErrCode
}
func (p *writeGoAway) writeFrame(ctx writeContext) error {
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
ctx.Flush() // ignore error: we're hanging up on them anyway
return err
}
func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
type writeData struct {
streamID uint32
p []byte
endStream bool
}
func (w *writeData) String() string {
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
}
func (w *writeData) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
}
func (w *writeData) staysWithinBuffer(max int) bool {
return frameHeaderLen+len(w.p) <= max
}
// handlerPanicRST is the message sent from handler goroutines when
// the handler panics.
type handlerPanicRST struct {
StreamID uint32
}
func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
}
func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
func (se StreamError) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
}
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WritePing(true, w.pf.Data)
}
func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
type writeSettingsAck struct{}
func (writeSettingsAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettingsAck()
}
func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
// for the first/last fragment, respectively.
func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
// that all peers must support (16KB). Later we could care
// more and send larger frames if the peer advertised it, but
// there's little point. Most headers are small anyway (so we
// generally won't have CONTINUATION frames), and extra frames
// only waste 9 bytes anyway.
const maxFrameSize = 16384
first := true
for len(headerBlock) > 0 {
frag := headerBlock
if len(frag) > maxFrameSize {
frag = frag[:maxFrameSize]
}
headerBlock = headerBlock[len(frag):]
if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
return err
}
first = false
}
return nil
}
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
// for HTTP response headers or trailers from a server handler.
type writeResHeaders struct {
streamID uint32
httpResCode int // 0 means no ":status" line
h http.Header // may be nil
trailers []string // if non-nil, which keys of h to write. nil means all.
endStream bool
date string
contentType string
contentLength string
}
func encKV(enc *hpack.Encoder, k, v string) {
if VerboseLogs {
log.Printf("http2: server encoding header %q = %q", k, v)
}
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
}
func (w *writeResHeaders) staysWithinBuffer(max int) bool {
// TODO: this is a common one. It'd be nice to return true
// here and get into the fast path if we could be clever and
// calculate the size fast enough, or at least a conservative
// upper bound that usually fires. (Maybe if w.h and
// w.trailers are nil, so we don't need to enumerate it.)
// Otherwise I'm afraid that just calculating the length to
// answer this question would be slower than the ~2µs benefit.
return false
}
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
if w.httpResCode != 0 {
encKV(enc, ":status", httpCodeString(w.httpResCode))
}
encodeHeaders(enc, w.h, w.trailers)
if w.contentType != "" {
encKV(enc, "content-type", w.contentType)
}
if w.contentLength != "" {
encKV(enc, "content-length", w.contentLength)
}
if w.date != "" {
encKV(enc, "date", w.date)
}
headerBlock := buf.Bytes()
if len(headerBlock) == 0 && w.trailers == nil {
panic("unexpected empty hpack")
}
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
}
func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
if firstFrag {
return ctx.Framer().WriteHeaders(HeadersFrameParam{
StreamID: w.streamID,
BlockFragment: frag,
EndStream: w.endStream,
EndHeaders: lastFrag,
})
} else {
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
}
}
// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
type writePushPromise struct {
streamID uint32 // pusher stream
method string // for :method
url *url.URL // for :scheme, :authority, :path
h http.Header
// Creates an ID for a pushed stream. This runs on serveG just before
// the frame is written. The returned ID is copied to promisedID.
allocatePromisedID func() (uint32, error)
promisedID uint32
}
func (w *writePushPromise) staysWithinBuffer(max int) bool {
// TODO: see writeResHeaders.staysWithinBuffer
return false
}
func (w *writePushPromise) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
encKV(enc, ":method", w.method)
encKV(enc, ":scheme", w.url.Scheme)
encKV(enc, ":authority", w.url.Host)
encKV(enc, ":path", w.url.RequestURI())
encodeHeaders(enc, w.h, nil)
headerBlock := buf.Bytes()
if len(headerBlock) == 0 {
panic("unexpected empty hpack")
}
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
}
func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
if firstFrag {
return ctx.Framer().WritePushPromise(PushPromiseParam{
StreamID: w.streamID,
PromiseID: w.promisedID,
BlockFragment: frag,
EndHeaders: lastFrag,
})
} else {
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
}
}
type write100ContinueHeadersFrame struct {
streamID uint32
}
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
encKV(enc, ":status", "100")
return ctx.Framer().WriteHeaders(HeadersFrameParam{
StreamID: w.streamID,
BlockFragment: buf.Bytes(),
EndStream: false,
EndHeaders: true,
})
}
func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
// Sloppy but conservative:
return 9+2*(len(":status")+len("100")) <= max
}
type writeWindowUpdate struct {
streamID uint32 // or 0 for conn-level
n uint32
}
func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
}
// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
// is encoded only if k is in keys.
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
if keys == nil {
sorter := sorterPool.Get().(*sorter)
// Using defer here, since the returned keys from the
// sorter.Keys method is only valid until the sorter
// is returned:
defer sorterPool.Put(sorter)
keys = sorter.Keys(h)
}
for _, k := range keys {
vv := h[k]
k, ascii := lowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
continue
}
if !validWireHeaderFieldName(k) {
// Skip it as backup paranoia. Per
// golang.org/issue/14048, these should
// already be rejected at a higher level.
continue
}
isTE := k == "transfer-encoding"
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
// TODO: return an error? golang.org/issue/14048
// For now just omit it.
continue
}
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
if isTE && v != "trailers" {
continue
}
encKV(enc, k, v)
}
}
}

250
vendor/golang.org/x/net/http2/writesched.go generated vendored Normal file
View file

@ -0,0 +1,250 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "fmt"
// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
// Methods are never called concurrently.
type WriteScheduler interface {
// OpenStream opens a new stream in the write scheduler.
// It is illegal to call this with streamID=0 or with a streamID that is
// already open -- the call may panic.
OpenStream(streamID uint32, options OpenStreamOptions)
// CloseStream closes a stream in the write scheduler. Any frames queued on
// this stream should be discarded. It is illegal to call this on a stream
// that is not open -- the call may panic.
CloseStream(streamID uint32)
// AdjustStream adjusts the priority of the given stream. This may be called
// on a stream that has not yet been opened or has been closed. Note that
// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
// https://tools.ietf.org/html/rfc7540#section-5.1
AdjustStream(streamID uint32, priority PriorityParam)
// Push queues a frame in the scheduler. In most cases, this will not be
// called with wr.StreamID()!=0 unless that stream is currently open. The one
// exception is RST_STREAM frames, which may be sent on idle or closed streams.
Push(wr FrameWriteRequest)
// Pop dequeues the next frame to write. Returns false if no frames can
// be written. Frames with a given wr.StreamID() are Pop'd in the same
// order they are Push'd, except RST_STREAM frames. No frames should be
// discarded except by CloseStream.
Pop() (wr FrameWriteRequest, ok bool)
}
// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
type OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
}
// FrameWriteRequest is a request to write a frame.
type FrameWriteRequest struct {
// write is the interface value that does the writing, once the
// WriteScheduler has selected this frame to write. The write
// functions are all defined in write.go.
write writeFramer
// stream is the stream on which this frame will be written.
// nil for non-stream frames like PING and SETTINGS.
// nil for RST_STREAM streams, which use the StreamError.StreamID field instead.
stream *stream
// done, if non-nil, must be a buffered channel with space for
// 1 message and is sent the return value from write (or an
// earlier error) when the frame has been written.
done chan error
}
// StreamID returns the id of the stream this frame will be written to.
// 0 is used for non-stream frames such as PING and SETTINGS.
func (wr FrameWriteRequest) StreamID() uint32 {
if wr.stream == nil {
if se, ok := wr.write.(StreamError); ok {
// (*serverConn).resetStream doesn't set
// stream because it doesn't necessarily have
// one. So special case this type of write
// message.
return se.StreamID
}
return 0
}
return wr.stream.id
}
// isControl reports whether wr is a control frame for MaxQueuedControlFrames
// purposes. That includes non-stream frames and RST_STREAM frames.
func (wr FrameWriteRequest) isControl() bool {
return wr.stream == nil
}
// DataSize returns the number of flow control bytes that must be consumed
// to write this entire frame. This is 0 for non-DATA frames.
func (wr FrameWriteRequest) DataSize() int {
if wd, ok := wr.write.(*writeData); ok {
return len(wd.p)
}
return 0
}
// Consume consumes min(n, available) bytes from this frame, where available
// is the number of flow control bytes available on the stream. Consume returns
// 0, 1, or 2 frames, where the integer return value gives the number of frames
// returned.
//
// If flow control prevents consuming any bytes, this returns (_, _, 0). If
// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
// underlying stream's flow control budget.
func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
var empty FrameWriteRequest
// Non-DATA frames are always consumed whole.
wd, ok := wr.write.(*writeData)
if !ok || len(wd.p) == 0 {
return wr, empty, 1
}
// Might need to split after applying limits.
allowed := wr.stream.flow.available()
if n < allowed {
allowed = n
}
if wr.stream.sc.maxFrameSize < allowed {
allowed = wr.stream.sc.maxFrameSize
}
if allowed <= 0 {
return empty, empty, 0
}
if len(wd.p) > int(allowed) {
wr.stream.flow.take(allowed)
consumed := FrameWriteRequest{
stream: wr.stream,
write: &writeData{
streamID: wd.streamID,
p: wd.p[:allowed],
// Even if the original had endStream set, there
// are bytes remaining because len(wd.p) > allowed,
// so we know endStream is false.
endStream: false,
},
// Our caller is blocking on the final DATA frame, not
// this intermediate frame, so no need to wait.
done: nil,
}
rest := FrameWriteRequest{
stream: wr.stream,
write: &writeData{
streamID: wd.streamID,
p: wd.p[allowed:],
endStream: wd.endStream,
},
done: wr.done,
}
return consumed, rest, 2
}
// The frame is consumed whole.
// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
wr.stream.flow.take(int32(len(wd.p)))
return wr, empty, 1
}
// String is for debugging only.
func (wr FrameWriteRequest) String() string {
var des string
if s, ok := wr.write.(fmt.Stringer); ok {
des = s.String()
} else {
des = fmt.Sprintf("%T", wr.write)
}
return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
}
// replyToWriter sends err to wr.done and panics if the send must block
// This does nothing if wr.done is nil.
func (wr *FrameWriteRequest) replyToWriter(err error) {
if wr.done == nil {
return
}
select {
case wr.done <- err:
default:
panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
}
wr.write = nil // prevent use (assume it's tainted after wr.done send)
}
// writeQueue is used by implementations of WriteScheduler.
type writeQueue struct {
s []FrameWriteRequest
}
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
func (q *writeQueue) push(wr FrameWriteRequest) {
q.s = append(q.s, wr)
}
func (q *writeQueue) shift() FrameWriteRequest {
if len(q.s) == 0 {
panic("invalid use of queue")
}
wr := q.s[0]
// TODO: less copy-happy queue.
copy(q.s, q.s[1:])
q.s[len(q.s)-1] = FrameWriteRequest{}
q.s = q.s[:len(q.s)-1]
return wr
}
// consume consumes up to n bytes from q.s[0]. If the frame is
// entirely consumed, it is removed from the queue. If the frame
// is partially consumed, the frame is kept with the consumed
// bytes removed. Returns true iff any bytes were consumed.
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
if len(q.s) == 0 {
return FrameWriteRequest{}, false
}
consumed, rest, numresult := q.s[0].Consume(n)
switch numresult {
case 0:
return FrameWriteRequest{}, false
case 1:
q.shift()
case 2:
q.s[0] = rest
}
return consumed, true
}
type writeQueuePool []*writeQueue
// put inserts an unused writeQueue into the pool.
func (p *writeQueuePool) put(q *writeQueue) {
for i := range q.s {
q.s[i] = FrameWriteRequest{}
}
q.s = q.s[:0]
*p = append(*p, q)
}
// get returns an empty writeQueue.
func (p *writeQueuePool) get() *writeQueue {
ln := len(*p)
if ln == 0 {
return new(writeQueue)
}
x := ln - 1
q := (*p)[x]
(*p)[x] = nil
*p = (*p)[:x]
return q
}

451
vendor/golang.org/x/net/http2/writesched_priority.go generated vendored Normal file
View file

@ -0,0 +1,451 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"math"
"sort"
)
// RFC 7540, Section 5.3.5: the default weight is 16.
const priorityDefaultWeight = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type PriorityWriteSchedulerConfig struct {
// MaxClosedNodesInTree controls the maximum number of closed streams to
// retain in the priority tree. Setting this to zero saves a small amount
// of memory at the cost of performance.
//
// See RFC 7540, Section 5.3.4:
// "It is possible for a stream to become closed while prioritization
// information ... is in transit. ... This potentially creates suboptimal
// prioritization, since the stream could be given a priority that is
// different from what is intended. To avoid these problems, an endpoint
// SHOULD retain stream prioritization state for a period after streams
// become closed. The longer state is retained, the lower the chance that
// streams are assigned incorrect or default priority values."
MaxClosedNodesInTree int
// MaxIdleNodesInTree controls the maximum number of idle streams to
// retain in the priority tree. Setting this to zero saves a small amount
// of memory at the cost of performance.
//
// See RFC 7540, Section 5.3.4:
// Similarly, streams that are in the "idle" state can be assigned
// priority or become a parent of other streams. This allows for the
// creation of a grouping node in the dependency tree, which enables
// more flexible expressions of priority. Idle streams begin with a
// default priority (Section 5.3.5).
MaxIdleNodesInTree int
// ThrottleOutOfOrderWrites enables write throttling to help ensure that
// data is delivered in priority order. This works around a race where
// stream B depends on stream A and both streams are about to call Write
// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
// write as much data from B as possible, but this is suboptimal because A
// is a higher-priority stream. With throttling enabled, we write a small
// amount of data from B to minimize the amount of bandwidth that B can
// steal from A.
ThrottleOutOfOrderWrites bool
}
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
// If cfg is nil, default options are used.
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
if cfg == nil {
// For justification of these defaults, see:
// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
cfg = &PriorityWriteSchedulerConfig{
MaxClosedNodesInTree: 10,
MaxIdleNodesInTree: 10,
ThrottleOutOfOrderWrites: false,
}
}
ws := &priorityWriteScheduler{
nodes: make(map[uint32]*priorityNode),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
}
ws.nodes[0] = &ws.root
if cfg.ThrottleOutOfOrderWrites {
ws.writeThrottleLimit = 1024
} else {
ws.writeThrottleLimit = math.MaxInt32
}
return ws
}
type priorityNodeState int
const (
priorityNodeOpen priorityNodeState = iota
priorityNodeClosed
priorityNodeIdle
)
// priorityNode is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3.
type priorityNode struct {
q writeQueue // queue of pending frames to write
id uint32 // id of the stream, or 0 for the root of the tree
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
state priorityNodeState // open | closed | idle
bytes int64 // number of bytes written by this node, or 0 if closed
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree.
parent *priorityNode
kids *priorityNode // start of the kids list
prev, next *priorityNode // doubly-linked list of siblings
}
func (n *priorityNode) setParent(parent *priorityNode) {
if n == parent {
panic("setParent to self")
}
if n.parent == parent {
return
}
// Unlink from current parent.
if parent := n.parent; parent != nil {
if n.prev == nil {
parent.kids = n.next
} else {
n.prev.next = n.next
}
if n.next != nil {
n.next.prev = n.prev
}
}
// Link to new parent.
// If parent=nil, remove n from the tree.
// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
n.parent = parent
if parent == nil {
n.next = nil
n.prev = nil
} else {
n.next = parent.kids
n.prev = nil
if n.next != nil {
n.next.prev = n
}
parent.kids = n
}
}
func (n *priorityNode) addBytes(b int64) {
n.bytes += b
for ; n != nil; n = n.parent {
n.subtreeBytes += b
}
}
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
// with a non-empty write queue. When f returns true, this function returns true and the
// walk halts. tmp is used as scratch space for sorting.
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node).
func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
if !n.q.empty() && f(n, openParent) {
return true
}
if n.kids == nil {
return false
}
// Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames).
if n.id != 0 {
openParent = openParent || (n.state == priorityNodeOpen)
}
// Common case: only one kid or all kids have the same weight.
// Some clients don't use weights; other clients (like web browsers)
// use mostly-linear priority trees.
w := n.kids.weight
needSort := false
for k := n.kids.next; k != nil; k = k.next {
if k.weight != w {
needSort = true
break
}
}
if !needSort {
for k := n.kids; k != nil; k = k.next {
if k.walkReadyInOrder(openParent, tmp, f) {
return true
}
}
return false
}
// Uncommon case: sort the child nodes. We remove the kids from the parent,
// then re-insert after sorting so we can reuse tmp for future sort calls.
*tmp = (*tmp)[:0]
for n.kids != nil {
*tmp = append(*tmp, n.kids)
n.kids.setParent(nil)
}
sort.Sort(sortPriorityNodeSiblings(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
for k := n.kids; k != nil; k = k.next {
if k.walkReadyInOrder(openParent, tmp, f) {
return true
}
}
return false
}
type sortPriorityNodeSiblings []*priorityNode
func (z sortPriorityNodeSiblings) Len() int { return len(z) }
func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
func (z sortPriorityNodeSiblings) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
if bi == 0 && bk == 0 {
return wi >= wk
}
if bk == 0 {
return false
}
return bi/bk <= wi/wk
}
type priorityWriteScheduler struct {
// root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream.
root priorityNode
// nodes maps stream ids to priority tree nodes.
nodes map[uint32]*priorityNode
// maxID is the maximum stream id in nodes.
maxID uint32
// lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
closedNodes, idleNodes []*priorityNode
// From the config.
maxClosedNodesInTree int
maxIdleNodesInTree int
writeThrottleLimit int32
enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
tmp []*priorityNode
// pool of empty queues for reuse.
queuePool writeQueuePool
}
func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
if curr.state != priorityNodeIdle {
panic(fmt.Sprintf("stream %d already opened", streamID))
}
curr.state = priorityNodeOpen
return
}
// RFC 7540, Section 5.3.5:
// "All streams are initially assigned a non-exclusive dependency on stream 0x0.
// Pushed streams initially depend on their associated stream. In both cases,
// streams are assigned a default weight of 16."
parent := ws.nodes[options.PusherID]
if parent == nil {
parent = &ws.root
}
n := &priorityNode{
q: *ws.queuePool.get(),
id: streamID,
weight: priorityDefaultWeight,
state: priorityNodeOpen,
}
n.setParent(parent)
ws.nodes[streamID] = n
if streamID > ws.maxID {
ws.maxID = streamID
}
}
func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0")
}
if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
}
if ws.nodes[streamID].state != priorityNodeOpen {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
}
n := ws.nodes[streamID]
n.state = priorityNodeClosed
n.addBytes(-n.bytes)
q := n.q
ws.queuePool.put(&q)
n.q.s = nil
if ws.maxClosedNodesInTree > 0 {
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
} else {
ws.removeNode(n)
}
}
func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
if streamID == 0 {
panic("adjustPriority on root")
}
// If streamID does not exist, there are two cases:
// - A closed stream that has been removed (this will have ID <= maxID)
// - An idle stream that is being used for "grouping" (this will have ID > maxID)
n := ws.nodes[streamID]
if n == nil {
if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
return
}
ws.maxID = streamID
n = &priorityNode{
q: *ws.queuePool.get(),
id: streamID,
weight: priorityDefaultWeight,
state: priorityNodeIdle,
}
n.setParent(&ws.root)
ws.nodes[streamID] = n
ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
}
// Section 5.3.1: A dependency on a stream that is not currently in the tree
// results in that stream being given a default priority (Section 5.3.5).
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
n.weight = priorityDefaultWeight
return
}
// Ignore if the client tries to make a node its own parent.
if n == parent {
return
}
// Section 5.3.3:
// "If a stream is made dependent on one of its own dependencies, the
// formerly dependent stream is first moved to be dependent on the
// reprioritized stream's previous parent. The moved dependency retains
// its weight."
//
// That is: if parent depends on n, move parent to depend on n.parent.
for x := parent.parent; x != nil; x = x.parent {
if x == n {
parent.setParent(n.parent)
break
}
}
// Section 5.3.3: The exclusive flag causes the stream to become the sole
// dependency of its parent stream, causing other dependencies to become
// dependent on the exclusive stream.
if priority.Exclusive {
k := parent.kids
for k != nil {
next := k.next
if k != n {
k.setParent(n)
}
k = next
}
}
n.setParent(parent)
n.weight = priority.Weight
}
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
var n *priorityNode
if wr.isControl() {
n = &ws.root
} else {
id := wr.StreamID()
n = ws.nodes[id]
if n == nil {
// id is an idle or closed stream. wr should not be a HEADERS or
// DATA frame. In other case, we push wr onto the root, rather
// than creating a new priorityNode.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
n = &ws.root
}
}
n.q.push(wr)
}
func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
limit := int32(math.MaxInt32)
if openParent {
limit = ws.writeThrottleLimit
}
wr, ok = n.q.consume(limit)
if !ok {
return false
}
n.addBytes(int64(wr.DataSize()))
// If B depends on A and B continuously has data available but A
// does not, gradually increase the throttling limit to allow B to
// steal more and more bandwidth from A.
if openParent {
ws.writeThrottleLimit += 1024
if ws.writeThrottleLimit < 0 {
ws.writeThrottleLimit = math.MaxInt32
}
} else if ws.enableWriteThrottle {
ws.writeThrottleLimit = 1024
}
return true
})
return wr, ok
}
func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
if maxSize == 0 {
return
}
if len(*list) == maxSize {
// Remove the oldest node, then shift left.
ws.removeNode((*list)[0])
x := (*list)[1:]
copy(*list, x)
*list = (*list)[:len(x)]
}
*list = append(*list, n)
}
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
for k := n.kids; k != nil; k = k.next {
k.setParent(n.parent)
}
n.setParent(nil)
delete(ws.nodes, n.id)
}

77
vendor/golang.org/x/net/http2/writesched_random.go generated vendored Normal file
View file

@ -0,0 +1,77 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import "math"
// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
// priorities. Control frames like SETTINGS and PING are written before DATA
// frames, but if no control frames are queued and multiple streams have queued
// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
func NewRandomWriteScheduler() WriteScheduler {
return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
}
type randomWriteScheduler struct {
// zero are frames not associated with a specific stream.
zero writeQueue
// sq contains the stream-specific queues, keyed by stream ID.
// When a stream is idle, closed, or emptied, it's deleted
// from the map.
sq map[uint32]*writeQueue
// pool of empty queues for reuse.
queuePool writeQueuePool
}
func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
// no-op: idle streams are not tracked
}
func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
q, ok := ws.sq[streamID]
if !ok {
return
}
delete(ws.sq, streamID)
ws.queuePool.put(q)
}
func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
// no-op: priorities are ignored
}
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
if wr.isControl() {
ws.zero.push(wr)
return
}
id := wr.StreamID()
q, ok := ws.sq[id]
if !ok {
q = ws.queuePool.get()
ws.sq[id] = q
}
q.push(wr)
}
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
// Control and RST_STREAM frames first.
if !ws.zero.empty() {
return ws.zero.shift(), true
}
// Iterate over all non-idle streams until finding one that can be consumed.
for streamID, q := range ws.sq {
if wr, ok := q.consume(math.MaxInt32); ok {
if q.empty() {
delete(ws.sq, streamID)
ws.queuePool.put(q)
}
return wr, true
}
}
return FrameWriteRequest{}, false
}

View file

@ -8,22 +8,21 @@
package socket
import (
"net"
"os"
)
func (c *Conn) recvMsg(m *Message, flags int) error {
m.raceWrite()
var h msghdr
vs := make([]iovec, len(m.Buffers))
var sa []byte
if c.network != "tcp" {
sa = make([]byte, sizeofSockaddrInet6)
}
h.pack(vs, m.Buffers, m.OOB, sa)
var operr error
var n int
var (
operr error
n int
oobn int
recvflags int
from net.Addr
)
fn := func(s uintptr) bool {
n, operr = recvmsg(s, &h, flags)
n, oobn, recvflags, from, operr = recvmsg(s, m.Buffers, m.OOB, flags, c.network)
return ioComplete(flags, operr)
}
if err := c.c.Read(fn); err != nil {
@ -32,34 +31,21 @@ func (c *Conn) recvMsg(m *Message, flags int) error {
if operr != nil {
return os.NewSyscallError("recvmsg", operr)
}
if c.network != "tcp" {
var err error
m.Addr, err = parseInetAddr(sa[:], c.network)
if err != nil {
return err
}
}
m.Addr = from
m.N = n
m.NN = h.controllen()
m.Flags = h.flags()
m.NN = oobn
m.Flags = recvflags
return nil
}
func (c *Conn) sendMsg(m *Message, flags int) error {
m.raceRead()
var h msghdr
vs := make([]iovec, len(m.Buffers))
var sa []byte
if m.Addr != nil {
var a [sizeofSockaddrInet6]byte
n := marshalInetAddr(m.Addr, a[:])
sa = a[:n]
}
h.pack(vs, m.Buffers, m.OOB, sa)
var operr error
var n int
var (
operr error
n int
)
fn := func(s uintptr) bool {
n, operr = sendmsg(s, &h, flags)
n, operr = sendmsg(s, m.Buffers, m.OOB, m.Addr, flags)
return ioComplete(flags, operr)
}
if err := c.c.Write(fn); err != nil {

View file

@ -36,11 +36,11 @@ func setsockopt(s uintptr, level, name int, b []byte) error {
return errNotImplemented
}
func recvmsg(s uintptr, h *msghdr, flags int) (int, error) {
return 0, errNotImplemented
func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) {
return 0, 0, 0, nil, errNotImplemented
}
func sendmsg(s uintptr, h *msghdr, flags int) (int, error) {
func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) {
return 0, errNotImplemented
}

View file

@ -8,8 +8,10 @@
package socket
import (
"syscall"
"net"
"unsafe"
"golang.org/x/sys/unix"
)
//go:linkname syscall_getsockopt syscall.getsockopt
@ -18,12 +20,6 @@ func syscall_getsockopt(s, level, name int, val unsafe.Pointer, vallen *uint32)
//go:linkname syscall_setsockopt syscall.setsockopt
func syscall_setsockopt(s, level, name int, val unsafe.Pointer, vallen uintptr) error
//go:linkname syscall_recvmsg syscall.recvmsg
func syscall_recvmsg(s int, msg *syscall.Msghdr, flags int) (int, error)
//go:linkname syscall_sendmsg syscall.sendmsg
func syscall_sendmsg(s int, msg *syscall.Msghdr, flags int) (int, error)
func getsockopt(s uintptr, level, name int, b []byte) (int, error) {
l := uint32(len(b))
err := syscall_getsockopt(int(s), level, name, unsafe.Pointer(&b[0]), &l)
@ -34,10 +30,93 @@ func setsockopt(s uintptr, level, name int, b []byte) error {
return syscall_setsockopt(int(s), level, name, unsafe.Pointer(&b[0]), uintptr(len(b)))
}
func recvmsg(s uintptr, h *msghdr, flags int) (int, error) {
return syscall_recvmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags)
func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) {
var unixFrom unix.Sockaddr
n, oobn, recvflags, unixFrom, err = unix.RecvmsgBuffers(int(s), buffers, oob, flags)
if unixFrom != nil {
from = sockaddrToAddr(unixFrom, network)
}
return
}
func sendmsg(s uintptr, h *msghdr, flags int) (int, error) {
return syscall_sendmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags)
func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) {
var unixTo unix.Sockaddr
if to != nil {
unixTo = addrToSockaddr(to)
}
return unix.SendmsgBuffers(int(s), buffers, oob, unixTo, flags)
}
// addrToSockaddr converts a net.Addr to a unix.Sockaddr.
func addrToSockaddr(a net.Addr) unix.Sockaddr {
var (
ip net.IP
port int
zone string
)
switch a := a.(type) {
case *net.TCPAddr:
ip = a.IP
port = a.Port
zone = a.Zone
case *net.UDPAddr:
ip = a.IP
port = a.Port
zone = a.Zone
case *net.IPAddr:
ip = a.IP
zone = a.Zone
default:
return nil
}
if ip4 := ip.To4(); ip4 != nil {
sa := unix.SockaddrInet4{Port: port}
copy(sa.Addr[:], ip4)
return &sa
}
if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil {
sa := unix.SockaddrInet6{Port: port}
copy(sa.Addr[:], ip6)
if zone != "" {
sa.ZoneId = uint32(zoneCache.index(zone))
}
return &sa
}
return nil
}
// sockaddrToAddr converts a unix.Sockaddr to a net.Addr.
func sockaddrToAddr(sa unix.Sockaddr, network string) net.Addr {
var (
ip net.IP
port int
zone string
)
switch sa := sa.(type) {
case *unix.SockaddrInet4:
ip = make(net.IP, net.IPv4len)
copy(ip, sa.Addr[:])
port = sa.Port
case *unix.SockaddrInet6:
ip = make(net.IP, net.IPv6len)
copy(ip, sa.Addr[:])
port = sa.Port
if sa.ZoneId > 0 {
zone = zoneCache.name(int(sa.ZoneId))
}
default:
return nil
}
switch network {
case "tcp", "tcp4", "tcp6":
return &net.TCPAddr{IP: ip, Port: port, Zone: zone}
case "udp", "udp4", "udp6":
return &net.UDPAddr{IP: ip, Port: port, Zone: zone}
default:
return &net.IPAddr{IP: ip, Zone: zone}
}
}

View file

@ -5,6 +5,7 @@
package socket
import (
"net"
"syscall"
"unsafe"
@ -37,11 +38,11 @@ func setsockopt(s uintptr, level, name int, b []byte) error {
return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b)))
}
func recvmsg(s uintptr, h *msghdr, flags int) (int, error) {
return 0, errNotImplemented
func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) {
return 0, 0, 0, nil, errNotImplemented
}
func sendmsg(s uintptr, h *msghdr, flags int) (int, error) {
func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) {
return 0, errNotImplemented
}

View file

@ -5,6 +5,7 @@
package socket
import (
"net"
"syscall"
"unsafe"
)
@ -27,12 +28,39 @@ func setsockopt(s uintptr, level, name int, b []byte) error {
return errnoErr(errno)
}
func recvmsg(s uintptr, h *msghdr, flags int) (int, error) {
n, _, errno := syscall_syscall(syscall.SYS___RECVMSG_A, s, uintptr(unsafe.Pointer(h)), uintptr(flags))
return int(n), errnoErr(errno)
func recvmsg(s uintptr, buffers [][]byte, oob []byte, flags int, network string) (n, oobn int, recvflags int, from net.Addr, err error) {
var h msghdr
vs := make([]iovec, len(buffers))
var sa []byte
if network != "tcp" {
sa = make([]byte, sizeofSockaddrInet6)
}
h.pack(vs, buffers, oob, sa)
sn, _, errno := syscall_syscall(syscall.SYS___RECVMSG_A, s, uintptr(unsafe.Pointer(&h)), uintptr(flags))
n = int(sn)
oobn = h.controllen()
recvflags = h.flags()
err = errnoErr(errno)
if network != "tcp" {
var err2 error
from, err2 = parseInetAddr(sa, network)
if err2 != nil && err == nil {
err = err2
}
}
return
}
func sendmsg(s uintptr, h *msghdr, flags int) (int, error) {
n, _, errno := syscall_syscall(syscall.SYS___SENDMSG_A, s, uintptr(unsafe.Pointer(h)), uintptr(flags))
func sendmsg(s uintptr, buffers [][]byte, oob []byte, to net.Addr, flags int) (int, error) {
var h msghdr
vs := make([]iovec, len(buffers))
var sa []byte
if to != nil {
var a [sizeofSockaddrInet6]byte
n := marshalInetAddr(to, a[:])
sa = a[:n]
}
h.pack(vs, buffers, oob, sa)
n, _, errno := syscall_syscall(syscall.SYS___SENDMSG_A, s, uintptr(unsafe.Pointer(&h)), uintptr(flags))
return int(n), errnoErr(errno)
}

View file

@ -1,30 +0,0 @@
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_darwin.go
package socket
type iovec struct {
Base *byte
Len uint32
}
type msghdr struct {
Name *byte
Namelen uint32
Iov *iovec
Iovlen int32
Control *byte
Controllen uint32
Flags int32
}
type cmsghdr struct {
Len uint32
Level int32
Type int32
}
const (
sizeofIovec = 0x8
sizeofMsghdr = 0x1c
)

View file

@ -1,11 +1,11 @@
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_darwin.go
// cgo -godefs defs_freebsd.go
package socket
type iovec struct {
Base *byte
Len uint32
Len uint64
}
type msghdr struct {
@ -25,6 +25,6 @@ type cmsghdr struct {
}
const (
sizeofIovec = 0x8
sizeofMsghdr = 0x1c
sizeofIovec = 0x10
sizeofMsghdr = 0x30
)

52
vendor/golang.org/x/net/ipv4/zsys_freebsd_riscv64.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_freebsd.go
package ipv4
const (
sizeofSockaddrStorage = 0x80
sizeofSockaddrInet = 0x10
sizeofIPMreq = 0x8
sizeofIPMreqSource = 0xc
sizeofGroupReq = 0x88
sizeofGroupSourceReq = 0x108
)
type sockaddrStorage struct {
Len uint8
Family uint8
X__ss_pad1 [6]uint8
X__ss_align int64
X__ss_pad2 [112]uint8
}
type sockaddrInet struct {
Len uint8
Family uint8
Port uint16
Addr [4]byte /* in_addr */
Zero [8]uint8
}
type ipMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type ipMreqSource struct {
Multiaddr [4]byte /* in_addr */
Sourceaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type groupReq struct {
Interface uint32
Group sockaddrStorage
}
type groupSourceReq struct {
Interface uint32
Group sockaddrStorage
Source sockaddrStorage
}

64
vendor/golang.org/x/net/ipv6/zsys_freebsd_riscv64.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_freebsd.go
package ipv6
const (
sizeofSockaddrStorage = 0x80
sizeofSockaddrInet6 = 0x1c
sizeofInet6Pktinfo = 0x14
sizeofIPv6Mtuinfo = 0x20
sizeofIPv6Mreq = 0x14
sizeofGroupReq = 0x88
sizeofGroupSourceReq = 0x108
sizeofICMPv6Filter = 0x20
)
type sockaddrStorage struct {
Len uint8
Family uint8
X__ss_pad1 [6]uint8
X__ss_align int64
X__ss_pad2 [112]uint8
}
type sockaddrInet6 struct {
Len uint8
Family uint8
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type inet6Pktinfo struct {
Addr [16]byte /* in6_addr */
Ifindex uint32
}
type ipv6Mtuinfo struct {
Addr sockaddrInet6
Mtu uint32
}
type ipv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type groupReq struct {
Interface uint32
Group sockaddrStorage
}
type groupSourceReq struct {
Interface uint32
Group sockaddrStorage
Source sockaddrStorage
}
type icmpv6Filter struct {
Filt [8]uint32
}

3
vendor/golang.org/x/oauth2/AUTHORS generated vendored
View file

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View file

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View file

@ -6,7 +6,10 @@ package cpu
import "runtime"
const cacheLineSize = 64
// cacheLineSize is used to prevent false sharing of cache lines.
// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size.
// It doesn't cost much and is much more future-proof.
const cacheLineSize = 128
func initOptions() {
options = []option{
@ -41,13 +44,10 @@ func archInit() {
switch runtime.GOOS {
case "freebsd":
readARM64Registers()
case "linux", "netbsd":
case "linux", "netbsd", "openbsd":
doinit()
default:
// Most platforms don't seem to allow reading these registers.
//
// OpenBSD:
// See https://golang.org/issue/31746
// Many platforms don't seem to allow reading these registers.
setMinimalFeatures()
}
}

65
vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go generated vendored Normal file
View file

@ -0,0 +1,65 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cpu
import (
"syscall"
"unsafe"
)
// Minimal copy of functionality from x/sys/unix so the cpu package can call
// sysctl without depending on x/sys/unix.
const (
// From OpenBSD's sys/sysctl.h.
_CTL_MACHDEP = 7
// From OpenBSD's machine/cpu.h.
_CPU_ID_AA64ISAR0 = 2
_CPU_ID_AA64ISAR1 = 3
)
// Implemented in the runtime package (runtime/sys_openbsd3.go)
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
//go:linkname syscall_syscall6 syscall.syscall6
func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
_, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
if errno != 0 {
return errno
}
return nil
}
var libc_sysctl_trampoline_addr uintptr
//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
func sysctlUint64(mib []uint32) (uint64, bool) {
var out uint64
nout := unsafe.Sizeof(out)
if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil {
return 0, false
}
return out, true
}
func doinit() {
setMinimalFeatures()
// Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl.
isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0})
if !ok {
return
}
isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1})
if !ok {
return
}
parseARM64SystemRegisters(isar0, isar1, 0)
Initialized = true
}

11
vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s generated vendored Normal file
View file

@ -0,0 +1,11 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)

View file

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !linux && !netbsd && arm64
// +build !linux,!netbsd,arm64
//go:build !linux && !netbsd && !openbsd && arm64
// +build !linux,!netbsd,!openbsd,arm64
package cpu

View file

@ -4,9 +4,7 @@
package unix
import (
"unsafe"
)
import "unsafe"
// IoctlRetInt performs an ioctl operation specified by req on a device
// associated with opened file descriptor fd, and returns a non-negative
@ -217,3 +215,19 @@ func IoctlKCMAttach(fd int, info KCMAttach) error {
func IoctlKCMUnattach(fd int, info KCMUnattach) error {
return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info))
}
// IoctlLoopGetStatus64 gets the status of the loop device associated with the
// file descriptor fd using the LOOP_GET_STATUS64 operation.
func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) {
var value LoopInfo64
if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil {
return nil, err
}
return &value, nil
}
// IoctlLoopSetStatus64 sets the status of the loop device associated with the
// file descriptor fd using the LOOP_SET_STATUS64 operation.
func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error {
return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value))
}

View file

@ -73,12 +73,12 @@ aix_ppc64)
darwin_amd64)
mkerrors="$mkerrors -m64"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
mkasm="go run mkasm.go"
;;
darwin_arm64)
mkerrors="$mkerrors -m64"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
mkasm="go run mkasm.go"
;;
dragonfly_amd64)
mkerrors="$mkerrors -m64"
@ -142,33 +142,33 @@ netbsd_arm64)
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_386)
mkasm="go run mkasm.go"
mkerrors="$mkerrors -m32"
mksyscall="go run mksyscall.go -l32 -openbsd"
mksyscall="go run mksyscall.go -l32 -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_amd64)
mkasm="go run mkasm.go"
mkerrors="$mkerrors -m64"
mksyscall="go run mksyscall.go -openbsd"
mksyscall="go run mksyscall.go -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_arm)
mkasm="go run mkasm.go"
mkerrors="$mkerrors"
mksyscall="go run mksyscall.go -l32 -openbsd -arm"
mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
openbsd_arm64)
mkasm="go run mkasm.go"
mkerrors="$mkerrors -m64"
mksyscall="go run mksyscall.go -openbsd"
mksyscall="go run mksyscall.go -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
@ -232,5 +232,5 @@ esac
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi
if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi
) | $run

27
vendor/golang.org/x/sys/unix/str.go generated vendored
View file

@ -1,27 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix
func itoa(val int) string { // do it here rather than with fmt to avoid dependency
if val < 0 {
return "-" + uitoa(uint(-val))
}
return uitoa(uint(val))
}
func uitoa(val uint) string {
var buf [32]byte // big enough for int64
i := len(buf) - 1
for val >= 10 {
buf[i] = byte(val%10 + '0')
i--
val /= 10
}
buf[i] = byte(val + '0')
return string(buf[i:])
}

View file

@ -29,8 +29,6 @@ import (
"bytes"
"strings"
"unsafe"
"golang.org/x/sys/internal/unsafeheader"
)
// ByteSliceFromString returns a NUL-terminated slice of bytes
@ -82,12 +80,7 @@ func BytePtrToString(p *byte) string {
ptr = unsafe.Pointer(uintptr(ptr) + 1)
}
var s []byte
h := (*unsafeheader.Slice)(unsafe.Pointer(&s))
h.Data = unsafe.Pointer(p)
h.Len = n
h.Cap = n
s := unsafe.Slice((*byte)(unsafe.Pointer(p)), n)
return string(s)
}

View file

@ -253,7 +253,7 @@ func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Sockle
var empty bool
if len(oob) > 0 {
// send at least one normal byte
empty := emptyIovecs(iov)
empty = emptyIovecs(iov)
if empty {
var iova [1]Iovec
iova[0].Base = &dummy

View file

@ -363,7 +363,7 @@ func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Sockle
var empty bool
if len(oob) > 0 {
// send at least one normal byte
empty := emptyIovecs(iov)
empty = emptyIovecs(iov)
if empty {
var iova [1]Iovec
iova[0].Base = &dummy

View file

@ -7,11 +7,7 @@
package unix
import (
"unsafe"
"golang.org/x/sys/internal/unsafeheader"
)
import "unsafe"
//sys closedir(dir uintptr) (err error)
//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
@ -86,11 +82,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
}
// Copy entry into return buffer.
var s []byte
hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s))
hdr.Data = unsafe.Pointer(&entry)
hdr.Cap = reclen
hdr.Len = reclen
s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen)
copy(buf, s)
buf = buf[reclen:]

View file

@ -61,7 +61,7 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
}
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)}
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -61,7 +61,7 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
}
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)}
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View file

@ -13,6 +13,7 @@ package unix
import (
"encoding/binary"
"strconv"
"syscall"
"time"
"unsafe"
@ -233,7 +234,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) error {
func Futimes(fd int, tv []Timeval) (err error) {
// Believe it or not, this is the best we can do on Linux
// (and is what glibc does).
return Utimes("/proc/self/fd/"+itoa(fd), tv)
return Utimes("/proc/self/fd/"+strconv.Itoa(fd), tv)
}
const ImplementsGetwd = true
@ -1541,7 +1542,7 @@ func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Sockle
var dummy byte
var empty bool
if len(oob) > 0 {
empty := emptyIovecs(iov)
empty = emptyIovecs(iov)
if empty {
var sockType int
sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
@ -1891,17 +1892,28 @@ func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uint
return int(ret), nil
}
// issue 1435.
// On linux Setuid and Setgid only affects the current thread, not the process.
// This does not match what most callers expect so we must return an error
// here rather than letting the caller think that the call succeeded.
func Setuid(uid int) (err error) {
return EOPNOTSUPP
return syscall.Setuid(uid)
}
func Setgid(uid int) (err error) {
return EOPNOTSUPP
func Setgid(gid int) (err error) {
return syscall.Setgid(gid)
}
func Setreuid(ruid, euid int) (err error) {
return syscall.Setreuid(ruid, euid)
}
func Setregid(rgid, egid int) (err error) {
return syscall.Setregid(rgid, egid)
}
func Setresuid(ruid, euid, suid int) (err error) {
return syscall.Setresuid(ruid, euid, suid)
}
func Setresgid(rgid, egid, sgid int) (err error) {
return syscall.Setresgid(rgid, egid, sgid)
}
// SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set.

View file

@ -41,10 +41,6 @@ func setTimeval(sec, usec int64) Timeval {
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32
//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)

View file

@ -46,11 +46,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)

View file

@ -62,10 +62,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32
//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64

View file

@ -39,11 +39,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb setrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)

View file

@ -34,10 +34,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)

View file

@ -37,11 +37,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Statfs(path string, buf *Statfs_t) (err error)

View file

@ -32,10 +32,6 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)

View file

@ -34,10 +34,6 @@ import (
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64

View file

@ -34,11 +34,7 @@ package unix
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Stat(path string, stat *Stat_t) (err error)

View file

@ -38,11 +38,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)

View file

@ -34,11 +34,7 @@ import (
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Stat(path string, stat *Stat_t) (err error)
//sys Statfs(path string, buf *Statfs_t) (err error)

View file

@ -31,11 +31,7 @@ package unix
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Stat(path string, stat *Stat_t) (err error)

27
vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (openbsd && 386) || (openbsd && amd64) || (openbsd && arm) || (openbsd && arm64)
// +build openbsd,386 openbsd,amd64 openbsd,arm openbsd,arm64
package unix
import _ "unsafe"
// Implemented in the runtime package (runtime/sys_openbsd3.go)
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2 uintptr, err Errno)
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
//go:linkname syscall_syscall syscall.syscall
//go:linkname syscall_syscall6 syscall.syscall6
//go:linkname syscall_syscall10 syscall.syscall10
//go:linkname syscall_rawSyscall syscall.rawSyscall
//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) {
return syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, 0)
}

View file

@ -750,8 +750,8 @@ type EventPort struct {
// we should handle things gracefully. To do so, we need to keep an extra
// reference to the cookie around until the event is processed
// thus the otherwise seemingly extraneous "cookies" map
// The key of this map is a pointer to the corresponding &fCookie.cookie
cookies map[*interface{}]*fileObjCookie
// The key of this map is a pointer to the corresponding fCookie
cookies map[*fileObjCookie]struct{}
}
// PortEvent is an abstraction of the port_event C struct.
@ -778,7 +778,7 @@ func NewEventPort() (*EventPort, error) {
port: port,
fds: make(map[uintptr]*fileObjCookie),
paths: make(map[string]*fileObjCookie),
cookies: make(map[*interface{}]*fileObjCookie),
cookies: make(map[*fileObjCookie]struct{}),
}
return e, nil
}
@ -799,6 +799,7 @@ func (e *EventPort) Close() error {
}
e.fds = nil
e.paths = nil
e.cookies = nil
return nil
}
@ -826,17 +827,16 @@ func (e *EventPort) AssociatePath(path string, stat os.FileInfo, events int, coo
if _, found := e.paths[path]; found {
return fmt.Errorf("%v is already associated with this Event Port", path)
}
fobj, err := createFileObj(path, stat)
fCookie, err := createFileObjCookie(path, stat, cookie)
if err != nil {
return err
}
fCookie := &fileObjCookie{fobj, cookie}
_, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fobj)), events, (*byte)(unsafe.Pointer(&fCookie.cookie)))
_, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fCookie.fobj)), events, (*byte)(unsafe.Pointer(fCookie)))
if err != nil {
return err
}
e.paths[path] = fCookie
e.cookies[&fCookie.cookie] = fCookie
e.cookies[fCookie] = struct{}{}
return nil
}
@ -858,7 +858,7 @@ func (e *EventPort) DissociatePath(path string) error {
if err == nil {
// dissociate was successful, safe to delete the cookie
fCookie := e.paths[path]
delete(e.cookies, &fCookie.cookie)
delete(e.cookies, fCookie)
}
delete(e.paths, path)
return err
@ -871,13 +871,16 @@ func (e *EventPort) AssociateFd(fd uintptr, events int, cookie interface{}) erro
if _, found := e.fds[fd]; found {
return fmt.Errorf("%v is already associated with this Event Port", fd)
}
fCookie := &fileObjCookie{nil, cookie}
_, err := port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(&fCookie.cookie)))
fCookie, err := createFileObjCookie("", nil, cookie)
if err != nil {
return err
}
_, err = port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(fCookie)))
if err != nil {
return err
}
e.fds[fd] = fCookie
e.cookies[&fCookie.cookie] = fCookie
e.cookies[fCookie] = struct{}{}
return nil
}
@ -896,27 +899,31 @@ func (e *EventPort) DissociateFd(fd uintptr) error {
if err == nil {
// dissociate was successful, safe to delete the cookie
fCookie := e.fds[fd]
delete(e.cookies, &fCookie.cookie)
delete(e.cookies, fCookie)
}
delete(e.fds, fd)
return err
}
func createFileObj(name string, stat os.FileInfo) (*fileObj, error) {
fobj := new(fileObj)
bs, err := ByteSliceFromString(name)
if err != nil {
return nil, err
func createFileObjCookie(name string, stat os.FileInfo, cookie interface{}) (*fileObjCookie, error) {
fCookie := new(fileObjCookie)
fCookie.cookie = cookie
if name != "" && stat != nil {
fCookie.fobj = new(fileObj)
bs, err := ByteSliceFromString(name)
if err != nil {
return nil, err
}
fCookie.fobj.Name = (*int8)(unsafe.Pointer(&bs[0]))
s := stat.Sys().(*syscall.Stat_t)
fCookie.fobj.Atim.Sec = s.Atim.Sec
fCookie.fobj.Atim.Nsec = s.Atim.Nsec
fCookie.fobj.Mtim.Sec = s.Mtim.Sec
fCookie.fobj.Mtim.Nsec = s.Mtim.Nsec
fCookie.fobj.Ctim.Sec = s.Ctim.Sec
fCookie.fobj.Ctim.Nsec = s.Ctim.Nsec
}
fobj.Name = (*int8)(unsafe.Pointer(&bs[0]))
s := stat.Sys().(*syscall.Stat_t)
fobj.Atim.Sec = s.Atim.Sec
fobj.Atim.Nsec = s.Atim.Nsec
fobj.Mtim.Sec = s.Mtim.Sec
fobj.Mtim.Nsec = s.Mtim.Nsec
fobj.Ctim.Sec = s.Ctim.Sec
fobj.Ctim.Nsec = s.Ctim.Nsec
return fobj, nil
return fCookie, nil
}
// GetOne wraps port_get(3c) and returns a single PortEvent.
@ -929,44 +936,50 @@ func (e *EventPort) GetOne(t *Timespec) (*PortEvent, error) {
p := new(PortEvent)
e.mu.Lock()
defer e.mu.Unlock()
e.peIntToExt(pe, p)
err = e.peIntToExt(pe, p)
if err != nil {
return nil, err
}
return p, nil
}
// peIntToExt converts a cgo portEvent struct into the friendlier PortEvent
// NOTE: Always call this function while holding the e.mu mutex
func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) {
func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) error {
if e.cookies == nil {
return fmt.Errorf("this EventPort is already closed")
}
peExt.Events = peInt.Events
peExt.Source = peInt.Source
cookie := (*interface{})(unsafe.Pointer(peInt.User))
peExt.Cookie = *cookie
fCookie := (*fileObjCookie)(unsafe.Pointer(peInt.User))
_, found := e.cookies[fCookie]
if !found {
panic("unexpected event port address; may be due to kernel bug; see https://go.dev/issue/54254")
}
peExt.Cookie = fCookie.cookie
delete(e.cookies, fCookie)
switch peInt.Source {
case PORT_SOURCE_FD:
delete(e.cookies, cookie)
peExt.Fd = uintptr(peInt.Object)
// Only remove the fds entry if it exists and this cookie matches
if fobj, ok := e.fds[peExt.Fd]; ok {
if &fobj.cookie == cookie {
if fobj == fCookie {
delete(e.fds, peExt.Fd)
}
}
case PORT_SOURCE_FILE:
if fCookie, ok := e.cookies[cookie]; ok && uintptr(unsafe.Pointer(fCookie.fobj)) == uintptr(peInt.Object) {
// Use our stashed reference rather than using unsafe on what we got back
// the unsafe version would be (*fileObj)(unsafe.Pointer(uintptr(peInt.Object)))
peExt.fobj = fCookie.fobj
} else {
panic("mismanaged memory")
}
delete(e.cookies, cookie)
peExt.fobj = fCookie.fobj
peExt.Path = BytePtrToString((*byte)(unsafe.Pointer(peExt.fobj.Name)))
// Only remove the paths entry if it exists and this cookie matches
if fobj, ok := e.paths[peExt.Path]; ok {
if &fobj.cookie == cookie {
if fobj == fCookie {
delete(e.paths, peExt.Path)
}
}
}
return nil
}
// Pending wraps port_getn(3c) and returns how many events are pending.
@ -990,7 +1003,7 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error)
got := uint32(min)
max := uint32(len(s))
var err error
ps := make([]portEvent, max, max)
ps := make([]portEvent, max)
_, err = port_getn(e.port, &ps[0], max, &got, timeout)
// got will be trustworthy with ETIME, but not any other error.
if err != nil && err != ETIME {
@ -998,8 +1011,18 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error)
}
e.mu.Lock()
defer e.mu.Unlock()
valid := 0
for i := 0; i < int(got); i++ {
e.peIntToExt(&ps[i], &s[i])
err2 := e.peIntToExt(&ps[i], &s[i])
if err2 != nil {
if valid == 0 && err == nil {
// If err2 is the only error and there are no valid events
// to return, return it to the caller.
err = err2
}
break
}
valid = i + 1
}
return int(got), err
return valid, err
}

View file

@ -13,8 +13,6 @@ import (
"sync"
"syscall"
"unsafe"
"golang.org/x/sys/internal/unsafeheader"
)
var (
@ -117,11 +115,7 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d
}
// Use unsafe to convert addr into a []byte.
var b []byte
hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b))
hdr.Data = unsafe.Pointer(addr)
hdr.Cap = length
hdr.Len = length
b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), length)
// Register mapping in m and return it.
p := &b[cap(b)-1]

Some files were not shown because too many files have changed in this diff Show more