chore: bump libp2p related dependencies

This commit is contained in:
Richard Ramos 2023-06-30 09:41:32 -04:00 committed by richΛrd
parent 1efecbf73b
commit 28229faec0
260 changed files with 9521 additions and 15523 deletions

View file

@ -1 +1 @@
0.160.2
0.161.0

71
go.mod
View file

@ -31,29 +31,29 @@ require (
github.com/keighl/metabolize v0.0.0-20150915210303-97ab655d4034
github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f
github.com/lib/pq v1.10.4
github.com/libp2p/go-libp2p v0.27.3
github.com/libp2p/go-libp2p v0.28.1
github.com/libp2p/go-libp2p-pubsub v0.9.3
github.com/lucasb-eyer/go-colorful v1.0.3
github.com/mat/besticon v0.0.0-20210314201728-1579f269edb7
github.com/multiformats/go-multiaddr v0.9.0
github.com/multiformats/go-multibase v0.2.0
github.com/multiformats/go-multihash v0.2.1
github.com/multiformats/go-multihash v0.2.3
github.com/multiformats/go-varint v0.0.7
github.com/nfnt/resize v0.0.0-00010101000000-000000000000
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
github.com/oliamb/cutter v0.2.2
github.com/pborman/uuid v1.2.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_golang v1.16.0
github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a
github.com/status-im/doubleratchet v3.0.0+incompatible
github.com/status-im/markdown v0.0.0-20230314100416-26c6f74522d5
github.com/status-im/migrate/v4 v4.6.2-status.3
github.com/status-im/rendezvous v1.3.6
github.com/status-im/rendezvous v1.3.7
github.com/status-im/status-go/extkeys v1.1.2
github.com/status-im/tcp-shaker v0.0.0-20191114194237-215893130501
github.com/status-im/zxcvbn-go v0.0.0-20220311183720-5e8676676857
github.com/stretchr/testify v1.8.2
github.com/stretchr/testify v1.8.4
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a
github.com/tsenart/tb v0.0.0-20181025101425-0d2499c8b6e9
github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8
@ -64,7 +64,7 @@ require (
go.uber.org/zap v1.24.0
golang.org/x/crypto v0.7.0
golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb
google.golang.org/protobuf v1.30.1-0.20230508203708-b8fc77060104
google.golang.org/protobuf v1.31.0
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
gopkg.in/go-playground/validator.v9 v9.31.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
@ -82,12 +82,12 @@ require (
github.com/mutecomm/go-sqlcipher/v4 v4.4.2
github.com/schollz/peerdiscovery v1.7.0
github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7
github.com/waku-org/go-waku v0.6.1-0.20230605200314-b0c094b0b663
github.com/waku-org/go-waku v0.7.1-0.20230630125546-47cdb86aaf07
github.com/yeqown/go-qrcode/v2 v2.2.1
github.com/yeqown/go-qrcode/writer/standard v1.2.1
go.uber.org/multierr v1.11.0
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
golang.org/x/net v0.8.0
golang.org/x/net v0.10.0
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
)
@ -113,13 +113,13 @@ require (
github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 // indirect
github.com/anacrolix/utp v0.1.0 // indirect
github.com/andybalholm/cascadia v1.2.0 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/benbjohnson/immutable v0.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.2.0 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/btcsuite/btcd v0.22.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
@ -128,7 +128,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/cruxic/go-hmac-drbg v0.0.0-20170206035330-84c46983886d // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
@ -151,7 +151,7 @@ require (
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect
github.com/gorilla/securecookie v1.1.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
@ -162,11 +162,11 @@ require (
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/huin/goupnp v1.1.0 // indirect
github.com/huin/goupnp v1.2.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/klauspost/compress v1.16.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
@ -175,19 +175,19 @@ require (
github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect
github.com/libp2p/go-mplex v0.7.0 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-nat v0.1.0 // indirect
github.com/libp2p/go-nat v0.2.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-reuseport v0.2.0 // indirect
github.com/libp2p/go-reuseport v0.3.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.53 // indirect
github.com/miekg/dns v1.1.54 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
@ -196,10 +196,10 @@ require (
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multicodec v0.8.1 // indirect
github.com/multiformats/go-multicodec v0.9.0 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo/v2 v2.9.2 // indirect
github.com/onsi/ginkgo/v2 v2.9.7 // indirect
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.2 // indirect
@ -220,15 +220,15 @@ require (
github.com/pion/udp v0.1.1 // indirect
github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/prometheus/tsdb v0.10.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-19 v0.3.2 // indirect
github.com/quic-go/qtls-go1-20 v0.2.2 // indirect
github.com/quic-go/quic-go v0.33.0 // indirect
github.com/quic-go/webtransport-go v0.5.2 // indirect
github.com/quic-go/webtransport-go v0.5.3 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
@ -241,14 +241,14 @@ require (
github.com/shirou/gopsutil v3.21.5+incompatible // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/status-im/go-multiaddr-ethv4 v1.2.4 // indirect
github.com/status-im/go-multiaddr-ethv4 v1.2.5 // indirect
github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 // indirect
github.com/tklauser/go-sysconf v0.3.6 // indirect
github.com/tklauser/numcpus v0.2.2 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/urfave/cli/v2 v2.24.4 // indirect
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 // indirect
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230601172541-0fad5ff68671 // indirect
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7 // indirect
github.com/waku-org/go-zerokit-rln v0.1.12 // indirect
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230331231302-258cacb91327 // indirect
github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230331223149-f90e66aebb0d // indirect
@ -260,23 +260,22 @@ require (
github.com/yeqown/reedsolomon v1.0.0 // indirect
go.etcd.io/bbolt v1.3.6 // indirect
go.opencensus.io v0.24.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/dig v1.16.1 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/dig v1.17.0 // indirect
go.uber.org/fx v1.19.2 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/tools v0.7.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/tools v0.9.1 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.1.7 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
modernc.org/libc v1.11.82 // indirect
modernc.org/mathutil v1.4.1 // indirect
modernc.org/memory v1.0.5 // indirect
modernc.org/sqlite v1.14.2-0.20211125151325-d4ed92c0a70f // indirect
nhooyr.io/websocket v1.8.7 // indirect
zombiezen.com/go/sqlite v0.8.0 // indirect
)

184
go.sum
View file

@ -388,8 +388,9 @@ github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw=
github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/benbjohnson/immutable v0.3.0 h1:TVRhuZx2wG9SZ0LRdqlbs9S5BZ6Y24hJEHTCgWHZEIw=
github.com/benbjohnson/immutable v0.3.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
@ -421,8 +422,8 @@ github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13P
github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c=
github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y=
github.com/btcsuite/btcd/btcec/v2 v2.3.0 h1:S/6K1GEwlEsFzZP4cOOl5mg6PEd/pr0zz7hvXcaxhJ4=
github.com/btcsuite/btcd/btcec/v2 v2.3.0/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@ -657,9 +658,9 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vs
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
@ -783,10 +784,6 @@ github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
@ -821,7 +818,7 @@ github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
@ -841,15 +838,10 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY=
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -886,12 +878,6 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
@ -1004,7 +990,6 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@ -1026,8 +1011,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4=
github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs=
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -1065,7 +1050,6 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@ -1137,11 +1121,10 @@ github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq
github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU=
github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY=
github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -1254,7 +1237,6 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
@ -1287,26 +1269,23 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU=
github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@ -1314,8 +1293,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
@ -1331,7 +1310,6 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/ladydascalie/currency v1.6.0 h1:r5s/TMCYcpn6jPRHLV3F8nI7YjpY8trvstfuixxiHns=
github.com/ladydascalie/currency v1.6.0/go.mod h1:C9eil8e6tthhBb5yhwoH1U0LT5hm1BP/g+v/V82KYjY=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
@ -1348,8 +1326,8 @@ github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38y
github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
github.com/libp2p/go-libp2p v0.27.3 h1:tkV/zm3KCZ4R5er9Xcs2pt0YNB4JH0iBfGAtHJdLHRs=
github.com/libp2p/go-libp2p v0.27.3/go.mod h1:FAvvfQa/YOShUYdiSS03IR9OXzkcJXwcNA2FUCh9ImE=
github.com/libp2p/go-libp2p v0.28.1 h1:YurK+ZAI6cKfASLJBVFkpVBdl3wGhFi6fusOt725ii8=
github.com/libp2p/go-libp2p v0.28.1/go.mod h1:s3Xabc9LSwOcnv9UD4nORnXKTsWkPMkIMB/JIGXVnzk=
github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s=
github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w=
github.com/libp2p/go-libp2p-pubsub v0.9.3 h1:ihcz9oIBMaCK9kcx+yHWm3mLAFBMAUsM4ux42aikDxo=
@ -1360,14 +1338,12 @@ github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY
github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg=
github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM=
github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk=
github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560=
github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k=
github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k=
github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw=
github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI=
github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ=
github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
@ -1426,8 +1402,8 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
@ -1458,8 +1434,8 @@ github.com/meirf/gopart v0.0.0-20180520194036-37e9492a85a8/go.mod h1:Uz8uoD6o+eQ
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw=
github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
@ -1469,8 +1445,9 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdn
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -1498,11 +1475,9 @@ github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/f
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
@ -1536,14 +1511,14 @@ github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDu
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8=
github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg=
github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo=
github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q=
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
@ -1604,8 +1579,8 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss=
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
@ -1620,7 +1595,7 @@ github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQ
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@ -1798,8 +1773,8 @@ github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@ -1807,8 +1782,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
@ -1842,8 +1817,8 @@ github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.2/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
@ -1855,8 +1830,8 @@ github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8G
github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0=
github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA=
github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk=
github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@ -1877,8 +1852,8 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/dnscache v0.0.0-20190621150935-06bb5526f76b/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA=
@ -1995,8 +1970,8 @@ github.com/status-im/doubleratchet v3.0.0+incompatible h1:aJ1ejcSERpSzmWZBgtfYti
github.com/status-im/doubleratchet v3.0.0+incompatible/go.mod h1:1sqR0+yhiM/bd+wrdX79AOt2csZuJOni0nUDzKNuqOU=
github.com/status-im/go-ethereum v1.10.25-status.6 h1:5YC8k1inTBqA6LpON0uX6y86niOKukA/LnKxzd5GFyI=
github.com/status-im/go-ethereum v1.10.25-status.6/go.mod h1:Dt4K5JYMhJRdtXJwBEyGZLZn9iz/chSOZyjVmt5ZhwQ=
github.com/status-im/go-multiaddr-ethv4 v1.2.4 h1:7fw0Y48TJXEqx4fOHlDOUiM/uBq9zG5w4x975Mjh4E0=
github.com/status-im/go-multiaddr-ethv4 v1.2.4/go.mod h1:PDh4D7h5CvecPIy0ji0rLNwTnzzEcyz9uTPHD42VyH4=
github.com/status-im/go-multiaddr-ethv4 v1.2.5 h1:pN+ey6wYKbvNNu5/xq9+VL0N8Yq0pZUTbZp0URg+Yn4=
github.com/status-im/go-multiaddr-ethv4 v1.2.5/go.mod h1:Fhe/18yWU5QwlAYiOO3Bb1BLe0bn5YobcNBHsjRr4kk=
github.com/status-im/go-sqlcipher/v4 v4.5.4-status.2 h1:Oi9JTAI2DZEe5UKlpUcvKBCCSn3ULsLIrix7jPnEoPE=
github.com/status-im/go-sqlcipher/v4 v4.5.4-status.2/go.mod h1:mF2UmIpBnzFeBdu/ypTDb/LdbS0nk0dfSN1WUsWTjMA=
github.com/status-im/gomoji v1.1.3-0.20220213022530-e5ac4a8732d4 h1:CtobZoiNdHpx+xurFxnuJ1xsGm3oKMfcZkB3vmomJmA=
@ -2009,8 +1984,8 @@ github.com/status-im/markdown v0.0.0-20230314100416-26c6f74522d5/go.mod h1:5rjPy
github.com/status-im/migrate/v4 v4.6.2-status.2/go.mod h1:c/kc90n47GZu/58nnz1OMLTf7uE4Da4gZP5qmU+A/v8=
github.com/status-im/migrate/v4 v4.6.2-status.3 h1:Khwjb59NzniloUr5i9s9AtkEyqBbQFt1lkoAu66sAu0=
github.com/status-im/migrate/v4 v4.6.2-status.3/go.mod h1:c/kc90n47GZu/58nnz1OMLTf7uE4Da4gZP5qmU+A/v8=
github.com/status-im/rendezvous v1.3.6 h1:iZTmTjNjy0aHtwpr+qoqZfDcwHDlxp/JMh3AOCi3gnc=
github.com/status-im/rendezvous v1.3.6/go.mod h1:wznFLwGWJl8s0EFEBn9mCsSmLvEvvMTpW9qTNJEZLFY=
github.com/status-im/rendezvous v1.3.7 h1:rZGWsFCjPV3MWeUkLkZSOGTAvyRf+rxx5hnEGLE4OHg=
github.com/status-im/rendezvous v1.3.7/go.mod h1:r0vCbQJByTteMajN0f+Mcet/Vd7uAXxFPfewNpI2iXQ=
github.com/status-im/resize v0.0.0-20201215164250-7c6d9f0d3088 h1:ClCAP2FPCvl8hGMhbUx/tq/sOu2wibztAa5jAvQEe4Q=
github.com/status-im/resize v0.0.0-20201215164250-7c6d9f0d3088/go.mod h1:+92j1tN27DypDeBFxkg0uzkqfh1bNHTZe3Bv2PjvxpM=
github.com/status-im/status-go/extkeys v1.1.2 h1:FSjARgDathJ3rIapJt851LsIXP9Oyuu2M2jPJKuzloU=
@ -2046,8 +2021,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syncthing/syncthing v0.14.48-rc.4/go.mod h1:nw3siZwHPA6M8iSfjDCWQ402eqvEIasMQOE8nFOxy7M=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -2082,10 +2057,6 @@ github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:s
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@ -2110,10 +2081,10 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98 h1:xwY0kW5XZFimdqfZb9cZwT1S3VJP9j3AE6bdNd9boXM=
github.com/waku-org/go-discover v0.0.0-20221209174356-61c833f34d98/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw=
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230601172541-0fad5ff68671 h1:iOCDabjZ11Zk0ejdWBR54OEFA/rRZdQgIrX6Rv4U7AM=
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230601172541-0fad5ff68671/go.mod h1:/1YwD6sx3xsbrSkVa4++e8AUDcUjC035bgKwDsZo+0Q=
github.com/waku-org/go-waku v0.6.1-0.20230605200314-b0c094b0b663 h1:BBRrzCEy0p+TEdspf+gAvl6uc3FR27oHL0t4BgrLaDk=
github.com/waku-org/go-waku v0.6.1-0.20230605200314-b0c094b0b663/go.mod h1:qz1a/J+lTA+hJy61aVgGP9E85SnAQ2gldRgU97aUB/U=
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7 h1:0e1h+p84yBp0IN7AqgbZlV7lgFBjm214lgSOE7CeJmE=
github.com/waku-org/go-libp2p-rendezvous v0.0.0-20230628220917-7b4e5ae4c0e7/go.mod h1:pFvOZ9YTFsW0o5zJW7a0B5tr1owAijRWJctXJ2toL04=
github.com/waku-org/go-waku v0.7.1-0.20230630125546-47cdb86aaf07 h1:lH7mSbj0CIjCpCSQDDj9j/YNbI5XEyKXBNFP0lGucE4=
github.com/waku-org/go-waku v0.7.1-0.20230630125546-47cdb86aaf07/go.mod h1:+7GhIHEjU3g6dMsYYlmLMJYIGc6ufnEkFxSQ83cSclI=
github.com/waku-org/go-zerokit-rln v0.1.12 h1:66+tU6sTlmUpuUlEv7kCFOGZ37MwZYFJBXHcm8QquwU=
github.com/waku-org/go-zerokit-rln v0.1.12/go.mod h1:MUW+wB6Yj7UBMdZrhko7oHfUZeY2wchggXYjpUiMoac=
github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230331231302-258cacb91327 h1:Q5XQqo+PEmvrybT8D7BEsKCwIYDi80s+00Q49cfm9Gs=
@ -2231,10 +2202,10 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8=
go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI=
go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU=
go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY=
go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
@ -2449,8 +2420,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -2486,8 +2457,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -2507,7 +2478,6 @@ golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -2643,20 +2613,20 @@ golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -2667,8 +2637,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -2768,8 +2738,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -2968,8 +2938,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.1-0.20230508203708-b8fc77060104 h1:3QvSsKaCYve39lBVTCnmryh9VdHhPfRJJXquZqsEqgI=
google.golang.org/protobuf v1.30.1-0.20230508203708-b8fc77060104/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -3080,8 +3050,8 @@ k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
modernc.org/cc/v3 v3.31.5-0.20210308123301-7a3e9dab9009/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878=
@ -3210,8 +3180,6 @@ modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
modernc.org/z v1.1.2/go.mod h1:sj9T1AGBG0dm6SCVzldPOHWrif6XBpooJtbttMn1+Js=
modernc.org/z v1.2.19/go.mod h1:+ZpP0pc4zz97eukOzW3xagV/lS82IpPN9NGG5pNF9vY=
modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4=
nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g=
nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3 h1:slmdOY3vp8a7KQbHkL+FLbvbkgMqmXojpFUO/jENuqQ=
olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3/go.mod h1:oVgVk4OWVDi43qWBEyGhXgYxt7+ED4iYNpTngSLX2Iw=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

View file

@ -74,7 +74,10 @@ func (c *clock) WithTimeout(parent context.Context, t time.Duration) (context.Co
// Mock represents a mock clock that only moves forward programmically.
// It can be preferable to a real-time clock when testing time-based functionality.
type Mock struct {
mu sync.Mutex
// mu protects all other fields in this struct, and the data that they
// point to.
mu sync.Mutex
now time.Time // current time
timers clockTimers // tickers & timers
}
@ -89,7 +92,9 @@ func NewMock() *Mock {
// This should only be called from a single goroutine at a time.
func (m *Mock) Add(d time.Duration) {
// Calculate the final current time.
m.mu.Lock()
t := m.now.Add(d)
m.mu.Unlock()
// Continue to execute timers until there are no more before the new time.
for {
@ -126,6 +131,23 @@ func (m *Mock) Set(t time.Time) {
gosched()
}
// WaitForAllTimers sets the clock until all timers are expired
func (m *Mock) WaitForAllTimers() time.Time {
// Continue to execute timers until there are no more
for {
m.mu.Lock()
if len(m.timers) == 0 {
m.mu.Unlock()
return m.Now()
}
sort.Sort(m.timers)
next := m.timers[len(m.timers)-1].Next()
m.mu.Unlock()
m.Set(next)
}
}
// runNextTimer executes the next timer in chronological order and moves the
// current time to the timer's next tick time. The next time is not executed if
// its next time is after the max time. Returns true if a timer was executed.
@ -150,10 +172,11 @@ func (m *Mock) runNextTimer(max time.Time) bool {
// Move "now" forward and unlock clock.
m.now = t.Next()
now := m.now
m.mu.Unlock()
// Execute timer.
t.Tick(m.now)
t.Tick(now)
return true
}
@ -162,12 +185,20 @@ func (m *Mock) After(d time.Duration) <-chan time.Time {
return m.Timer(d).C
}
// AfterFunc waits for the duration to elapse and then executes a function.
// AfterFunc waits for the duration to elapse and then executes a function in its own goroutine.
// A Timer is returned that can be stopped.
func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer {
t := m.Timer(d)
t.C = nil
t.fn = f
m.mu.Lock()
defer m.mu.Unlock()
ch := make(chan time.Time, 1)
t := &Timer{
c: ch,
fn: f,
mock: m,
next: m.now.Add(d),
stopped: false,
}
m.timers = append(m.timers, (*internalTimer)(t))
return t
}
@ -219,7 +250,6 @@ func (m *Mock) Ticker(d time.Duration) *Ticker {
// Timer creates a new instance of Timer.
func (m *Mock) Timer(d time.Duration) *Timer {
m.mu.Lock()
defer m.mu.Unlock()
ch := make(chan time.Time, 1)
t := &Timer{
C: ch,
@ -229,9 +259,14 @@ func (m *Mock) Timer(d time.Duration) *Timer {
stopped: false,
}
m.timers = append(m.timers, (*internalTimer)(t))
now := m.now
m.mu.Unlock()
m.runNextTimer(now)
return t
}
// removeClockTimer removes a timer from m.timers. m.mu MUST be held
// when this method is called.
func (m *Mock) removeClockTimer(t clockTimer) {
for i, timer := range m.timers {
if timer == t {
@ -313,7 +348,7 @@ func (t *internalTimer) Tick(now time.Time) {
t.mock.mu.Lock()
if t.fn != nil {
// defer function execution until the lock is released, and
defer t.fn()
defer func() { go t.fn() }()
} else {
t.c <- now
}
@ -324,12 +359,13 @@ func (t *internalTimer) Tick(now time.Time) {
// Ticker holds a channel that receives "ticks" at regular intervals.
type Ticker struct {
C <-chan time.Time
c chan time.Time
ticker *time.Ticker // realtime impl, if set
next time.Time // next tick time
mock *Mock // mock clock, if set
d time.Duration // time between ticks
C <-chan time.Time
c chan time.Time
ticker *time.Ticker // realtime impl, if set
next time.Time // next tick time
mock *Mock // mock clock, if set
d time.Duration // time between ticks
stopped bool // True if stopped, false if running
}
// Stop turns off the ticker.
@ -339,6 +375,7 @@ func (t *Ticker) Stop() {
} else {
t.mock.mu.Lock()
t.mock.removeClockTimer((*internalTicker)(t))
t.stopped = true
t.mock.mu.Unlock()
}
}
@ -353,6 +390,11 @@ func (t *Ticker) Reset(dur time.Duration) {
t.mock.mu.Lock()
defer t.mock.mu.Unlock()
if t.stopped {
t.mock.timers = append(t.mock.timers, (*internalTicker)(t))
t.stopped = false
}
t.d = dur
t.next = t.mock.now.Add(dur)
}
@ -365,7 +407,9 @@ func (t *internalTicker) Tick(now time.Time) {
case t.c <- now:
default:
}
t.mock.mu.Lock()
t.next = now.Add(t.d)
t.mock.mu.Unlock()
gosched()
}

View file

@ -1,5 +1,5 @@
// Copyright (c) 2015 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Copyright (c) 2015-2023 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
@ -9,7 +9,7 @@ package secp256k1
// public key using Diffie-Hellman key exchange (ECDH) (RFC 5903).
// RFC5903 Section 9 states we should only return x.
//
// It is recommended to securily hash the result before using as a cryptographic
// It is recommended to securely hash the result before using as a cryptographic
// key.
func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte {
var point, result JacobianPoint

View file

@ -1,12 +1,13 @@
// Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015-2022 The Decred developers
// Copyright (c) 2015-2023 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package secp256k1
import (
csprng "crypto/rand"
cryptorand "crypto/rand"
"io"
)
// PrivateKey provides facilities for working with secp256k1 private keys within
@ -26,29 +27,36 @@ func NewPrivateKey(key *ModNScalar) *PrivateKey {
// interpreted as an unsigned 256-bit big-endian integer in the range [0, N-1],
// where N is the order of the curve.
//
// Note that this means passing a slice with more than 32 bytes is truncated and
// that truncated value is reduced modulo N. It is up to the caller to either
// provide a value in the appropriate range or choose to accept the described
// behavior.
// WARNING: This means passing a slice with more than 32 bytes is truncated and
// that truncated value is reduced modulo N. Further, 0 is not a valid private
// key. It is up to the caller to provide a value in the appropriate range of
// [1, N-1]. Failure to do so will either result in an invalid private key or
// potentially weak private keys that have bias that could be exploited.
//
// Typically callers should simply make use of GeneratePrivateKey when creating
// private keys which properly handles generation of appropriate values.
// This function primarily exists to provide a mechanism for converting
// serialized private keys that are already known to be good.
//
// Typically callers should make use of GeneratePrivateKey or
// GeneratePrivateKeyFromRand when creating private keys since they properly
// handle generation of appropriate values.
func PrivKeyFromBytes(privKeyBytes []byte) *PrivateKey {
var privKey PrivateKey
privKey.Key.SetByteSlice(privKeyBytes)
return &privKey
}
// GeneratePrivateKey generates and returns a new cryptographically secure
// private key that is suitable for use with secp256k1.
func GeneratePrivateKey() (*PrivateKey, error) {
// generatePrivateKey generates and returns a new private key that is suitable
// for use with secp256k1 using the provided reader as a source of entropy. The
// provided reader must be a source of cryptographically secure randomness to
// avoid weak private keys.
func generatePrivateKey(rand io.Reader) (*PrivateKey, error) {
// The group order is close enough to 2^256 that there is only roughly a 1
// in 2^128 chance of generating an invalid private key, so this loop will
// virtually never run more than a single iteration in practice.
var key PrivateKey
var b32 [32]byte
for valid := false; !valid; {
if _, err := csprng.Read(b32[:]); err != nil {
if _, err := io.ReadFull(rand, b32[:]); err != nil {
return nil, err
}
@ -62,6 +70,20 @@ func GeneratePrivateKey() (*PrivateKey, error) {
return &key, nil
}
// GeneratePrivateKey generates and returns a new cryptographically secure
// private key that is suitable for use with secp256k1.
func GeneratePrivateKey() (*PrivateKey, error) {
return generatePrivateKey(cryptorand.Reader)
}
// GeneratePrivateKeyFromRand generates a private key that is suitable for use
// with secp256k1 using the provided reader as a source of entropy. The
// provided reader must be a source of cryptographically secure randomness, such
// as [crypto/rand.Reader], to avoid weak private keys.
func GeneratePrivateKeyFromRand(rand io.Reader) (*PrivateKey, error) {
return generatePrivateKey(rand)
}
// PubKey computes and returns the public key corresponding to this private key.
func (p *PrivateKey) PubKey() *PublicKey {
var result JacobianPoint

View file

@ -258,10 +258,10 @@ func (p *Profile) postDecode() error {
// If this a main linux kernel mapping with a relocation symbol suffix
// ("[kernel.kallsyms]_text"), extract said suffix.
// It is fairly hacky to handle at this level, but the alternatives appear even worse.
if strings.HasPrefix(m.File, "[kernel.kallsyms]") {
m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "")
const prefix = "[kernel.kallsyms]"
if strings.HasPrefix(m.File, prefix) {
m.KernelRelocationSymbol = m.File[len(prefix):]
}
}
functions := make(map[uint64]*Function, len(p.Function))

View file

@ -194,9 +194,13 @@ type soapBody struct {
// SOAPFaultError implements error, and contains SOAP fault information.
type SOAPFaultError struct {
FaultCode string `xml:"faultCode"`
FaultString string `xml:"faultString"`
FaultCode string `xml:"faultcode"`
FaultString string `xml:"faultstring"`
Detail struct {
UPnPError struct {
Errorcode int `xml:"errorCode"`
ErrorDescription string `xml:"errorDescription"`
} `xml:"UPnPError"`
Raw []byte `xml:",innerxml"`
} `xml:"detail"`
}

View file

@ -16,6 +16,15 @@ This package provides various compression algorithms.
# changelog
* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767

View file

@ -1,989 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright (c) 2015 Klaus Post
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"encoding/binary"
"fmt"
"io"
"math"
)
const (
NoCompression = 0
BestSpeed = 1
BestCompression = 9
DefaultCompression = -1
// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
// entropy encoding. This mode is useful in compressing data that has
// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
// that lacks an entropy encoder. Compression gains are achieved when
// certain bytes in the input stream occur more frequently than others.
//
// Note that HuffmanOnly produces a compressed output that is
// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
// continue to be able to decompress this output.
HuffmanOnly = -2
ConstantCompression = HuffmanOnly // compatibility alias.
logWindowSize = 15
windowSize = 1 << logWindowSize
windowMask = windowSize - 1
logMaxOffsetSize = 15 // Standard DEFLATE
minMatchLength = 4 // The smallest match that the compressor looks for
maxMatchLength = 258 // The longest match for the compressor
minOffsetSize = 1 // The shortest offset that makes any sense
// The maximum number of tokens we will encode at the time.
// Smaller sizes usually creates less optimal blocks.
// Bigger can make context switching slow.
// We use this for levels 7-9, so we make it big.
maxFlateBlockTokens = 1 << 15
maxStoreBlockSize = 65535
hashBits = 17 // After 17 performance degrades
hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
maxHashOffset = 1 << 28
skipNever = math.MaxInt32
debugDeflate = false
)
type compressionLevel struct {
good, lazy, nice, chain, fastSkipHashing, level int
}
// Compression levels have been rebalanced from zlib deflate defaults
// to give a bigger spread in speed and compression.
// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
var levels = []compressionLevel{
{}, // 0
// Level 1-6 uses specialized algorithm - values not used
{0, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 2},
{0, 0, 0, 0, 0, 3},
{0, 0, 0, 0, 0, 4},
{0, 0, 0, 0, 0, 5},
{0, 0, 0, 0, 0, 6},
// Levels 7-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
{8, 12, 16, 24, skipNever, 7},
{16, 30, 40, 64, skipNever, 8},
{32, 258, 258, 1024, skipNever, 9},
}
// advancedState contains state for the advanced levels, with bigger hash tables, etc.
type advancedState struct {
// deflate state
length int
offset int
maxInsertIndex int
chainHead int
hashOffset int
ii uint16 // position of last match, intended to overflow to reset.
// input window: unprocessed data is window[index:windowEnd]
index int
estBitsPerByte int
hashMatch [maxMatchLength + minMatchLength]uint32
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
// If hashHead[hashValue] is within the current window, then
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
// with the same hash value.
hashHead [hashSize]uint32
hashPrev [windowSize]uint32
}
type compressor struct {
compressionLevel
h *huffmanEncoder
w *huffmanBitWriter
// compression algorithm
fill func(*compressor, []byte) int // copy data to window
step func(*compressor) // process window
window []byte
windowEnd int
blockStart int // window index where current tokens start
err error
// queued output tokens
tokens tokens
fast fastEnc
state *advancedState
sync bool // requesting flush
byteAvailable bool // if true, still need to process window[index-1].
}
func (d *compressor) fillDeflate(b []byte) int {
s := d.state
if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
// shift the window by windowSize
//copy(d.window[:], d.window[windowSize:2*windowSize])
*(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
s.index -= windowSize
d.windowEnd -= windowSize
if d.blockStart >= windowSize {
d.blockStart -= windowSize
} else {
d.blockStart = math.MaxInt32
}
s.hashOffset += windowSize
if s.hashOffset > maxHashOffset {
delta := s.hashOffset - 1
s.hashOffset -= delta
s.chainHead -= delta
// Iterate over slices instead of arrays to avoid copying
// the entire table onto the stack (Issue #18625).
for i, v := range s.hashPrev[:] {
if int(v) > delta {
s.hashPrev[i] = uint32(int(v) - delta)
} else {
s.hashPrev[i] = 0
}
}
for i, v := range s.hashHead[:] {
if int(v) > delta {
s.hashHead[i] = uint32(int(v) - delta)
} else {
s.hashHead[i] = 0
}
}
}
}
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
if index > 0 || eof {
var window []byte
if d.blockStart <= index {
window = d.window[d.blockStart:index]
}
d.blockStart = index
//d.w.writeBlock(tok, eof, window)
d.w.writeBlockDynamic(tok, eof, window, d.sync)
return d.w.err
}
return nil
}
// writeBlockSkip writes the current block and uses the number of tokens
// to determine if the block should be stored on no matches, or
// only huffman encoded.
func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
if index > 0 || eof {
if d.blockStart <= index {
window := d.window[d.blockStart:index]
// If we removed less than a 64th of all literals
// we huffman compress the block.
if int(tok.n) > len(window)-int(tok.n>>6) {
d.w.writeBlockHuff(eof, window, d.sync)
} else {
// Write a dynamic huffman block.
d.w.writeBlockDynamic(tok, eof, window, d.sync)
}
} else {
d.w.writeBlock(tok, eof, nil)
}
d.blockStart = index
return d.w.err
}
return nil
}
// fillWindow will fill the current window with the supplied
// dictionary and calculate all hashes.
// This is much faster than doing a full encode.
// Should only be used after a start/reset.
func (d *compressor) fillWindow(b []byte) {
// Do not fill window if we are in store-only or huffman mode.
if d.level <= 0 {
return
}
if d.fast != nil {
// encode the last data, but discard the result
if len(b) > maxMatchOffset {
b = b[len(b)-maxMatchOffset:]
}
d.fast.Encode(&d.tokens, b)
d.tokens.Reset()
return
}
s := d.state
// If we are given too much, cut it.
if len(b) > windowSize {
b = b[len(b)-windowSize:]
}
// Add all to window.
n := copy(d.window[d.windowEnd:], b)
// Calculate 256 hashes at the time (more L1 cache hits)
loops := (n + 256 - minMatchLength) / 256
for j := 0; j < loops; j++ {
startindex := j * 256
end := startindex + 256 + minMatchLength - 1
if end > n {
end = n
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize <= 0 {
continue
}
dst := s.hashMatch[:dstSize]
bulkHash4(tocheck, dst)
var newH uint32
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
s.hashPrev[di&windowMask] = s.hashHead[newH]
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
}
// Update window information.
d.windowEnd += n
s.index = n
}
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
}
win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
nice := len(win) - pos
if d.nice < nice {
nice = d.nice
}
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
length = minMatchLength - 1
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
if minIndex < 0 {
minIndex = 0
}
offset = 0
if d.chain < 100 {
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
if n > length {
length = n
offset = pos - i
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
if i <= minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
if i < minIndex {
break
}
}
return
}
// Minimum gain to accept a match.
cGain := 4
// Some like it higher (CSV), some like it lower (JSON)
const baseCost = 3
// Base is 4 bytes at with an additional cost.
// Matches must be better than this.
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
if n > length {
// Calculate gain. Estimate
newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
//fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
if newGain > cGain {
length = n
offset = pos - i
cGain = newGain
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
}
if i <= minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
if i < minIndex {
break
}
}
return
}
func (d *compressor) writeStoredBlock(buf []byte) error {
if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
return d.w.err
}
d.w.writeBytes(buf)
return d.w.err
}
// hash4 returns a hash representation of the first 4 bytes
// of the supplied slice.
// The caller must ensure that len(b) >= 4.
func hash4(b []byte) uint32 {
return hash4u(binary.LittleEndian.Uint32(b), hashBits)
}
// hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4u(u uint32, h uint8) uint32 {
return (u * prime4bytes) >> (32 - h)
}
// bulkHash4 will compute hashes using the same
// algorithm as hash4
func bulkHash4(b []byte, dst []uint32) {
if len(b) < 4 {
return
}
hb := binary.LittleEndian.Uint32(b)
dst[0] = hash4u(hb, hashBits)
end := len(b) - 4 + 1
for i := 1; i < end; i++ {
hb = (hb >> 8) | uint32(b[i+3])<<24
dst[i] = hash4u(hb, hashBits)
}
}
func (d *compressor) initDeflate() {
d.window = make([]byte, 2*windowSize)
d.byteAvailable = false
d.err = nil
if d.state == nil {
return
}
s := d.state
s.index = 0
s.hashOffset = 1
s.length = minMatchLength - 1
s.offset = 0
s.chainHead = -1
}
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
// meaning it always has lazy matching on.
func (d *compressor) deflateLazy() {
s := d.state
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = debugDeflate
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
if d.windowEnd != s.index && d.chain > 100 {
// Get literal huffman coder.
if d.h == nil {
d.h = newHuffmanEncoder(maxFlateBlockTokens)
}
var tmp [256]uint16
for _, v := range d.window[s.index:d.windowEnd] {
tmp[v]++
}
d.h.generate(tmp[:], 15)
}
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
for {
if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - s.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
d.tokens.AddLiteral(d.window[s.index-1])
d.byteAvailable = false
}
if d.tokens.n > 0 {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
return
}
}
if s.index < s.maxInsertIndex {
// Update the hash
hash := hash4(d.window[s.index:])
ch := s.hashHead[hash]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[hash] = uint32(s.index + s.hashOffset)
}
prevLength := s.length
prevOffset := s.offset
s.length = minMatchLength - 1
s.offset = 0
minIndex := s.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
s.length = newLength
s.offset = newOffset
}
}
if prevLength >= minMatchLength && s.length <= prevLength {
// No better match, but check for better match at end...
//
// Skip forward a number of bytes.
// Offset of 2 seems to yield best results. 3 is sometimes better.
const checkOff = 2
// Check all, except full length
if prevLength < maxMatchLength-checkOff {
prevIndex := s.index - 1
if prevIndex+prevLength < s.maxInsertIndex {
end := lookahead
if lookahead > maxMatchLength+checkOff {
end = maxMatchLength + checkOff
}
end += prevIndex
// Hash at match end.
h := hash4(d.window[prevIndex+prevLength:])
ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
// It seems like a pure length metric is best.
if length > prevLength {
prevLength = length
prevOffset = prevIndex - ch2
// Extend back...
for i := checkOff - 1; i >= 0; i-- {
if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] {
// Emit tokens we "owe"
for j := 0; j <= i; j++ {
d.tokens.AddLiteral(d.window[prevIndex+j])
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.index++
if s.index < s.maxInsertIndex {
h := hash4(d.window[s.index:])
ch := s.hashHead[h]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[h] = uint32(s.index + s.hashOffset)
}
}
break
} else {
prevLength++
}
}
} else if false {
// Check one further ahead.
// Only rarely better, disabled for now.
prevIndex++
h := hash4(d.window[prevIndex+prevLength:])
ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
// It seems like a pure length metric is best.
if length > prevLength+checkOff {
prevLength = length
prevOffset = prevIndex - ch2
prevIndex--
// Extend back...
for i := checkOff; i >= 0; i-- {
if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] {
// Emit tokens we "owe"
for j := 0; j <= i; j++ {
d.tokens.AddLiteral(d.window[prevIndex+j])
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.index++
if s.index < s.maxInsertIndex {
h := hash4(d.window[s.index:])
ch := s.hashHead[h]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[h] = uint32(s.index + s.hashOffset)
}
}
break
} else {
prevLength++
}
}
}
}
}
}
}
}
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
newIndex := s.index + prevLength - 1
// Calculate missing hashes
end := newIndex
if end > s.maxInsertIndex {
end = s.maxInsertIndex
}
end += minMatchLength - 1
startindex := s.index + 1
if startindex > s.maxInsertIndex {
startindex = s.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := s.hashMatch[:dstSize]
bulkHash4(tocheck, dst)
var newH uint32
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
s.hashPrev[di&windowMask] = s.hashHead[newH]
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
}
s.index = newIndex
d.byteAvailable = false
s.length = minMatchLength - 1
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.ii = 0
} else {
// Reset, if we got a match this run.
if s.length >= minMatchLength {
s.ii = 0
}
// We have a byte waiting. Emit it.
if d.byteAvailable {
s.ii++
d.tokens.AddLiteral(d.window[s.index-1])
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.index++
// If we have a long run of no matches, skip additional bytes
// Resets when s.ii overflows after 64KB.
if n := int(s.ii) - d.chain; n > 0 {
n = 1 + int(n>>6)
for j := 0; j < n; j++ {
if s.index >= d.windowEnd-1 {
break
}
d.tokens.AddLiteral(d.window[s.index-1])
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
// Index...
if s.index < s.maxInsertIndex {
h := hash4(d.window[s.index:])
ch := s.hashHead[h]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[h] = uint32(s.index + s.hashOffset)
}
s.index++
}
// Flush last byte
d.tokens.AddLiteral(d.window[s.index-1])
d.byteAvailable = false
// s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
}
} else {
s.index++
d.byteAvailable = true
}
}
}
}
func (d *compressor) store() {
if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
d.windowEnd = 0
}
}
// fillWindow will fill the buffer with data for huffman-only compression.
// The number of bytes copied is returned.
func (d *compressor) fillBlock(b []byte) int {
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
// storeHuff will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) storeHuff() {
if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
return
}
d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
d.err = d.w.err
d.windowEnd = 0
}
// storeFast will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) storeFast() {
// We only compress if we have maxStoreBlockSize.
if d.windowEnd < len(d.window) {
if !d.sync {
return
}
// Handle extremely small sizes.
if d.windowEnd < 128 {
if d.windowEnd == 0 {
return
}
if d.windowEnd <= 32 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
} else {
d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
d.err = d.w.err
}
d.tokens.Reset()
d.windowEnd = 0
d.fast.Reset()
return
}
}
d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
// If we made zero matches, store the block as is.
if d.tokens.n == 0 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
// If we removed less than 1/16th, huffman compress the block.
} else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
d.err = d.w.err
} else {
d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
d.err = d.w.err
}
d.tokens.Reset()
d.windowEnd = 0
}
// write will add input byte to the stream.
// Unless an error occurs all bytes will be consumed.
func (d *compressor) write(b []byte) (n int, err error) {
if d.err != nil {
return 0, d.err
}
n = len(b)
for len(b) > 0 {
if d.windowEnd == len(d.window) || d.sync {
d.step(d)
}
b = b[d.fill(d, b):]
if d.err != nil {
return 0, d.err
}
}
return n, d.err
}
func (d *compressor) syncFlush() error {
d.sync = true
if d.err != nil {
return d.err
}
d.step(d)
if d.err == nil {
d.w.writeStoredHeader(0, false)
d.w.flush()
d.err = d.w.err
}
d.sync = false
return d.err
}
func (d *compressor) init(w io.Writer, level int) (err error) {
d.w = newHuffmanBitWriter(w)
switch {
case level == NoCompression:
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).store
case level == ConstantCompression:
d.w.logNewTablePenalty = 10
d.window = make([]byte, 32<<10)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeHuff
case level == DefaultCompression:
level = 5
fallthrough
case level >= 1 && level <= 6:
d.w.logNewTablePenalty = 7
d.fast = newFastEnc(level)
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeFast
case 7 <= level && level <= 9:
d.w.logNewTablePenalty = 8
d.state = &advancedState{}
d.compressionLevel = levels[level]
d.initDeflate()
d.fill = (*compressor).fillDeflate
d.step = (*compressor).deflateLazy
default:
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
}
d.level = level
return nil
}
// reset the state of the compressor.
func (d *compressor) reset(w io.Writer) {
d.w.reset(w)
d.sync = false
d.err = nil
// We only need to reset a few things for Snappy.
if d.fast != nil {
d.fast.Reset()
d.windowEnd = 0
d.tokens.Reset()
return
}
switch d.compressionLevel.chain {
case 0:
// level was NoCompression or ConstantCompresssion.
d.windowEnd = 0
default:
s := d.state
s.chainHead = -1
for i := range s.hashHead {
s.hashHead[i] = 0
}
for i := range s.hashPrev {
s.hashPrev[i] = 0
}
s.hashOffset = 1
s.index, d.windowEnd = 0, 0
d.blockStart, d.byteAvailable = 0, false
d.tokens.Reset()
s.length = minMatchLength - 1
s.offset = 0
s.ii = 0
s.maxInsertIndex = 0
}
}
func (d *compressor) close() error {
if d.err != nil {
return d.err
}
d.sync = true
d.step(d)
if d.err != nil {
return d.err
}
if d.w.writeStoredHeader(0, true); d.w.err != nil {
return d.w.err
}
d.w.flush()
d.w.reset(nil)
return d.w.err
}
// NewWriter returns a new Writer compressing data at the given level.
// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
// higher levels typically run slower but compress more.
// Level 0 (NoCompression) does not attempt any compression; it only adds the
// necessary DEFLATE framing.
// Level -1 (DefaultCompression) uses the default compression level.
// Level -2 (ConstantCompression) will use Huffman compression only, giving
// a very fast compression for all types of input, but sacrificing considerable
// compression efficiency.
//
// If level is in the range [-2, 9] then the error returned will be nil.
// Otherwise the error returned will be non-nil.
func NewWriter(w io.Writer, level int) (*Writer, error) {
var dw Writer
if err := dw.d.init(w, level); err != nil {
return nil, err
}
return &dw, nil
}
// NewWriterDict is like NewWriter but initializes the new
// Writer with a preset dictionary. The returned Writer behaves
// as if the dictionary had been written to it without producing
// any compressed output. The compressed data written to w
// can only be decompressed by a Reader initialized with the
// same dictionary.
func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
zw, err := NewWriter(w, level)
if err != nil {
return nil, err
}
zw.d.fillWindow(dict)
zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
return zw, err
}
// A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see NewWriter).
type Writer struct {
d compressor
dict []byte
}
// Write writes data to w, which will eventually write the
// compressed form of data to its underlying writer.
func (w *Writer) Write(data []byte) (n int, err error) {
return w.d.write(data)
}
// Flush flushes any pending data to the underlying writer.
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet.
// Flush does not return until the data has been written.
// Calling Flush when there is no pending data still causes the Writer
// to emit a sync marker of at least 4 bytes.
// If the underlying writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
func (w *Writer) Flush() error {
// For more about flushing:
// http://www.bolet.org/~pornin/deflate-flush.html
return w.d.syncFlush()
}
// Close flushes and closes the writer.
func (w *Writer) Close() error {
return w.d.close()
}
// Reset discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level and dictionary.
func (w *Writer) Reset(dst io.Writer) {
if len(w.dict) > 0 {
// w was created with NewWriterDict
w.d.reset(dst)
if dst != nil {
w.d.fillWindow(w.dict)
}
} else {
// w was created with NewWriter
w.d.reset(dst)
}
}
// ResetDict discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level, but sets a specific dictionary.
func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
w.dict = dict
w.d.reset(dst)
w.d.fillWindow(w.dict)
}

View file

@ -1,184 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
// LZ77 decompresses data through sequences of two forms of commands:
//
// - Literal insertions: Runs of one or more symbols are inserted into the data
// stream as is. This is accomplished through the writeByte method for a
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
// Any valid stream must start with a literal insertion if no preset dictionary
// is used.
//
// - Backward copies: Runs of one or more symbols are copied from previously
// emitted data. Backward copies come as the tuple (dist, length) where dist
// determines how far back in the stream to copy from and length determines how
// many bytes to copy. Note that it is valid for the length to be greater than
// the distance. Since LZ77 uses forward copies, that situation is used to
// perform a form of run-length encoding on repeated runs of symbols.
// The writeCopy and tryWriteCopy are used to implement this command.
//
// For performance reasons, this implementation performs little to no sanity
// checks about the arguments. As such, the invariants documented for each
// method call must be respected.
type dictDecoder struct {
hist []byte // Sliding window history
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
wrPos int // Current output position in buffer
rdPos int // Have emitted hist[:rdPos] already
full bool // Has a full window length been written yet?
}
// init initializes dictDecoder to have a sliding window dictionary of the given
// size. If a preset dict is provided, it will initialize the dictionary with
// the contents of dict.
func (dd *dictDecoder) init(size int, dict []byte) {
*dd = dictDecoder{hist: dd.hist}
if cap(dd.hist) < size {
dd.hist = make([]byte, size)
}
dd.hist = dd.hist[:size]
if len(dict) > len(dd.hist) {
dict = dict[len(dict)-len(dd.hist):]
}
dd.wrPos = copy(dd.hist, dict)
if dd.wrPos == len(dd.hist) {
dd.wrPos = 0
dd.full = true
}
dd.rdPos = dd.wrPos
}
// histSize reports the total amount of historical data in the dictionary.
func (dd *dictDecoder) histSize() int {
if dd.full {
return len(dd.hist)
}
return dd.wrPos
}
// availRead reports the number of bytes that can be flushed by readFlush.
func (dd *dictDecoder) availRead() int {
return dd.wrPos - dd.rdPos
}
// availWrite reports the available amount of output buffer space.
func (dd *dictDecoder) availWrite() int {
return len(dd.hist) - dd.wrPos
}
// writeSlice returns a slice of the available buffer to write data to.
//
// This invariant will be kept: len(s) <= availWrite()
func (dd *dictDecoder) writeSlice() []byte {
return dd.hist[dd.wrPos:]
}
// writeMark advances the writer pointer by cnt.
//
// This invariant must be kept: 0 <= cnt <= availWrite()
func (dd *dictDecoder) writeMark(cnt int) {
dd.wrPos += cnt
}
// writeByte writes a single byte to the dictionary.
//
// This invariant must be kept: 0 < availWrite()
func (dd *dictDecoder) writeByte(c byte) {
dd.hist[dd.wrPos] = c
dd.wrPos++
}
// writeCopy copies a string at a given (dist, length) to the output.
// This returns the number of bytes copied and may be less than the requested
// length if the available space in the output buffer is too small.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) writeCopy(dist, length int) int {
dstBase := dd.wrPos
dstPos := dstBase
srcPos := dstPos - dist
endPos := dstPos + length
if endPos > len(dd.hist) {
endPos = len(dd.hist)
}
// Copy non-overlapping section after destination position.
//
// This section is non-overlapping in that the copy length for this section
// is always less than or equal to the backwards distance. This can occur
// if a distance refers to data that wraps-around in the buffer.
// Thus, a backwards copy is performed here; that is, the exact bytes in
// the source prior to the copy is placed in the destination.
if srcPos < 0 {
srcPos += len(dd.hist)
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
srcPos = 0
}
// Copy possibly overlapping section before destination position.
//
// This section can overlap if the copy length for this section is larger
// than the backwards distance. This is allowed by LZ77 so that repeated
// strings can be succinctly represented using (dist, length) pairs.
// Thus, a forwards copy is performed here; that is, the bytes copied is
// possibly dependent on the resulting bytes in the destination as the copy
// progresses along. This is functionally equivalent to the following:
//
// for i := 0; i < endPos-dstPos; i++ {
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
// }
// dstPos = endPos
//
for dstPos < endPos {
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// tryWriteCopy tries to copy a string at a given (distance, length) to the
// output. This specialized version is optimized for short distances.
//
// This method is designed to be inlined for performance reasons.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
dstPos := dd.wrPos
endPos := dstPos + length
if dstPos < dist || endPos > len(dd.hist) {
return 0
}
dstBase := dstPos
srcPos := dstPos - dist
// Copy possibly overlapping section before destination position.
loop:
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
if dstPos < endPos {
goto loop // Avoid for-loop so that this function can be inlined
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// readFlush returns a slice of the historical buffer that is ready to be
// emitted to the user. The data returned by readFlush must be fully consumed
// before calling any other dictDecoder methods.
func (dd *dictDecoder) readFlush() []byte {
toRead := dd.hist[dd.rdPos:dd.wrPos]
dd.rdPos = dd.wrPos
if dd.wrPos == len(dd.hist) {
dd.wrPos, dd.rdPos = 0, 0
dd.full = true
}
return toRead
}

View file

@ -1,216 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Modified for deflate by Klaus Post (c) 2015.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"encoding/binary"
"fmt"
"math/bits"
)
type fastEnc interface {
Encode(dst *tokens, src []byte)
Reset()
}
func newFastEnc(level int) fastEnc {
switch level {
case 1:
return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
case 2:
return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
case 3:
return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
case 4:
return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
case 5:
return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
case 6:
return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
default:
panic("invalid level specified")
}
}
const (
tableBits = 15 // Bits used in the table
tableSize = 1 << tableBits // Size of the table
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
baseMatchOffset = 1 // The smallest match offset
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
maxMatchOffset = 1 << 15 // The largest match offset
bTableBits = 17 // Bits used in the big tables
bTableSize = 1 << bTableBits // Size of the table
allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
)
const (
prime3bytes = 506832829
prime4bytes = 2654435761
prime5bytes = 889523592379
prime6bytes = 227718039650203
prime7bytes = 58295818150454627
prime8bytes = 0xcf1bbcdcb7a56463
)
func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
type tableEntry struct {
offset int32
}
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastGen struct {
hist []byte
cur int32
}
func (e *fastGen) addBlock(src []byte) int32 {
// check if we have space already
if len(e.hist)+len(src) > cap(e.hist) {
if cap(e.hist) == 0 {
e.hist = make([]byte, 0, allocHistory)
} else {
if cap(e.hist) < maxMatchOffset*2 {
panic("unexpected buffer size")
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
// copy(e.hist[0:maxMatchOffset], e.hist[offset:])
*(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
}
s := int32(len(e.hist))
e.hist = append(e.hist, src...)
return s
}
type tableEntryPrev struct {
Cur tableEntry
Prev tableEntry
}
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
}
// hashLen returns a hash of the lowest mls bytes of with length output bits.
// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
// length should always be < 32.
// Preferably length and mls should be a constant for inlining.
func hashLen(u uint64, length, mls uint8) uint32 {
switch mls {
case 3:
return (uint32(u<<8) * prime3bytes) >> (32 - length)
case 5:
return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
case 6:
return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
case 7:
return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
case 8:
return uint32((u * prime8bytes) >> (64 - length))
default:
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
// matchlen will return the match length between offsets and t in src.
// The maximum length returned is maxMatchLength - 4.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
if debugDecode {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > maxMatchOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
s1 := int(s) + maxMatchLength - 4
if s1 > len(src) {
s1 = len(src)
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:s1], src[t:]))
}
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > maxMatchOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:], src[t:]))
}
// Reset the encoding table.
func (e *fastGen) Reset() {
if cap(e.hist) < allocHistory {
e.hist = make([]byte, 0, allocHistory)
}
// We offset current position so everything will be out of reach.
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
if e.cur <= bufferReset {
e.cur += maxMatchOffset + int32(len(e.hist))
}
e.hist = e.hist[:0]
}
// matchLen returns the maximum length.
// 'a' must be the shortest of the two.
func matchLen(a, b []byte) int {
var checked int
for len(a) >= 8 {
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
return checked + (bits.TrailingZeros64(diff) >> 3)
}
checked += 8
a = a[8:]
b = b[8:]
}
b = b[:len(a)]
for i := range a {
if a[i] != b[i] {
return i + checked
}
}
return len(a) + checked
}

File diff suppressed because it is too large Load diff

View file

@ -1,417 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"math"
"math/bits"
)
const (
maxBitsLimit = 16
// number of valid literals
literalCount = 286
)
// hcode is a huffman code with a bit code and bit length.
type hcode uint32
func (h hcode) len() uint8 {
return uint8(h)
}
func (h hcode) code64() uint64 {
return uint64(h >> 8)
}
func (h hcode) zero() bool {
return h == 0
}
type huffmanEncoder struct {
codes []hcode
bitCount [17]int32
// Allocate a reusable buffer with the longest possible frequency table.
// Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
// The largest of these is literalCount, so we allocate for that case.
freqcache [literalCount + 1]literalNode
}
type literalNode struct {
literal uint16
freq uint16
}
// A levelInfo describes the state of the constructed tree for a given depth.
type levelInfo struct {
// Our level. for better printing
level int32
// The frequency of the last node at this level
lastFreq int32
// The frequency of the next character to add to this level
nextCharFreq int32
// The frequency of the next pair (from level below) to add to this level.
// Only valid if the "needed" value of the next lower level is 0.
nextPairFreq int32
// The number of chains remaining to generate for this level before moving
// up to the next level
needed int32
}
// set sets the code and length of an hcode.
func (h *hcode) set(code uint16, length uint8) {
*h = hcode(length) | (hcode(code) << 8)
}
func newhcode(code uint16, length uint8) hcode {
return hcode(length) | (hcode(code) << 8)
}
func reverseBits(number uint16, bitLength byte) uint16 {
return bits.Reverse16(number << ((16 - bitLength) & 15))
}
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
func newHuffmanEncoder(size int) *huffmanEncoder {
// Make capacity to next power of two.
c := uint(bits.Len32(uint32(size - 1)))
return &huffmanEncoder{codes: make([]hcode, size, 1<<c)}
}
// Generates a HuffmanCode corresponding to the fixed literal table
func generateFixedLiteralEncoding() *huffmanEncoder {
h := newHuffmanEncoder(literalCount)
codes := h.codes
var ch uint16
for ch = 0; ch < literalCount; ch++ {
var bits uint16
var size uint8
switch {
case ch < 144:
// size 8, 000110000 .. 10111111
bits = ch + 48
size = 8
case ch < 256:
// size 9, 110010000 .. 111111111
bits = ch + 400 - 144
size = 9
case ch < 280:
// size 7, 0000000 .. 0010111
bits = ch - 256
size = 7
default:
// size 8, 11000000 .. 11000111
bits = ch + 192 - 280
size = 8
}
codes[ch] = newhcode(reverseBits(bits, size), size)
}
return h
}
func generateFixedOffsetEncoding() *huffmanEncoder {
h := newHuffmanEncoder(30)
codes := h.codes
for ch := range codes {
codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
}
return h
}
var fixedLiteralEncoding = generateFixedLiteralEncoding()
var fixedOffsetEncoding = generateFixedOffsetEncoding()
func (h *huffmanEncoder) bitLength(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
total += int(f) * int(h.codes[i].len())
}
}
return total
}
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
var total int
for _, f := range b {
total += int(h.codes[f].len())
}
return total
}
// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused.
func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
code := h.codes[i]
if code.zero() {
return math.MaxInt32
}
total += int(f) * int(code.len())
}
}
return total
}
// Return the number of literals assigned to each bit size in the Huffman encoding
//
// This method is only called when list.length >= 3
// The cases of 0, 1, and 2 literals are handled by special case code.
//
// list An array of the literals with non-zero frequencies
//
// and their associated frequencies. The array is in order of increasing
// frequency, and has as its last element a special element with frequency
// MaxInt32
//
// maxBits The maximum number of bits that should be used to encode any literal.
//
// Must be less than 16.
//
// return An integer array in which array[i] indicates the number of literals
//
// that should be encoded in i bits.
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
if maxBits >= maxBitsLimit {
panic("flate: maxBits too large")
}
n := int32(len(list))
list = list[0 : n+1]
list[n] = maxNode()
// The tree can't have greater depth than n - 1, no matter what. This
// saves a little bit of work in some small cases
if maxBits > n-1 {
maxBits = n - 1
}
// Create information about each of the levels.
// A bogus "Level 0" whose sole purpose is so that
// level1.prev.needed==0. This makes level1.nextPairFreq
// be a legitimate value that never gets chosen.
var levels [maxBitsLimit]levelInfo
// leafCounts[i] counts the number of literals at the left
// of ancestors of the rightmost node at level i.
// leafCounts[i][j] is the number of literals at the left
// of the level j ancestor.
var leafCounts [maxBitsLimit][maxBitsLimit]int32
// Descending to only have 1 bounds check.
l2f := int32(list[2].freq)
l1f := int32(list[1].freq)
l0f := int32(list[0].freq) + int32(list[1].freq)
for level := int32(1); level <= maxBits; level++ {
// For every level, the first two items are the first two characters.
// We initialize the levels as if we had already figured this out.
levels[level] = levelInfo{
level: level,
lastFreq: l1f,
nextCharFreq: l2f,
nextPairFreq: l0f,
}
leafCounts[level][level] = 2
if level == 1 {
levels[level].nextPairFreq = math.MaxInt32
}
}
// We need a total of 2*n - 2 items at top level and have already generated 2.
levels[maxBits].needed = 2*n - 4
level := uint32(maxBits)
for level < 16 {
l := &levels[level]
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
// We've run out of both leafs and pairs.
// End all calculations for this level.
// To make sure we never come back to this level or any lower level,
// set nextPairFreq impossibly large.
l.needed = 0
levels[level+1].nextPairFreq = math.MaxInt32
level++
continue
}
prevFreq := l.lastFreq
if l.nextCharFreq < l.nextPairFreq {
// The next item on this row is a leaf node.
n := leafCounts[level][level] + 1
l.lastFreq = l.nextCharFreq
// Lower leafCounts are the same of the previous node.
leafCounts[level][level] = n
e := list[n]
if e.literal < math.MaxUint16 {
l.nextCharFreq = int32(e.freq)
} else {
l.nextCharFreq = math.MaxInt32
}
} else {
// The next item on this row is a pair from the previous row.
// nextPairFreq isn't valid until we generate two
// more values in the level below
l.lastFreq = l.nextPairFreq
// Take leaf counts from the lower level, except counts[level] remains the same.
if true {
save := leafCounts[level][level]
leafCounts[level] = leafCounts[level-1]
leafCounts[level][level] = save
} else {
copy(leafCounts[level][:level], leafCounts[level-1][:level])
}
levels[l.level-1].needed = 2
}
if l.needed--; l.needed == 0 {
// We've done everything we need to do for this level.
// Continue calculating one level up. Fill in nextPairFreq
// of that level with the sum of the two nodes we've just calculated on
// this level.
if l.level == maxBits {
// All done!
break
}
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
level++
} else {
// If we stole from below, move down temporarily to replenish it.
for levels[level-1].needed > 0 {
level--
}
}
}
// Somethings is wrong if at the end, the top level is null or hasn't used
// all of the leaves.
if leafCounts[maxBits][maxBits] != n {
panic("leafCounts[maxBits][maxBits] != n")
}
bitCount := h.bitCount[:maxBits+1]
bits := 1
counts := &leafCounts[maxBits]
for level := maxBits; level > 0; level-- {
// chain.leafCount gives the number of literals requiring at least "bits"
// bits to encode.
bitCount[bits] = counts[level] - counts[level-1]
bits++
}
return bitCount
}
// Look at the leaves and assign them a bit count and an encoding as specified
// in RFC 1951 3.2.2
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
code := uint16(0)
for n, bits := range bitCount {
code <<= 1
if n == 0 || bits == 0 {
continue
}
// The literals list[len(list)-bits] .. list[len(list)-bits]
// are encoded using "bits" bits, and get the values
// code, code + 1, .... The code values are
// assigned in literal order (not frequency order).
chunk := list[len(list)-int(bits):]
sortByLiteral(chunk)
for _, node := range chunk {
h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
code++
}
list = list[0 : len(list)-int(bits)]
}
}
// Update this Huffman Code object to be the minimum code for the specified frequency count.
//
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
// maxBits The maximum number of bits to use for any literal.
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list := h.freqcache[:len(freq)+1]
codes := h.codes[:len(freq)]
// Number of non-zero literals
count := 0
// Set list to be the set of all non-zero literals and their frequencies
for i, f := range freq {
if f != 0 {
list[count] = literalNode{uint16(i), f}
count++
} else {
codes[i] = 0
}
}
list[count] = literalNode{}
list = list[:count]
if count <= 2 {
// Handle the small cases here, because they are awkward for the general case code. With
// two or fewer literals, everything has bit length 1.
for i, node := range list {
// "list" is in order of increasing literal value.
h.codes[node.literal].set(uint16(i), 1)
}
return
}
sortByFreq(list)
// Get the number of literals for each bit count
bitCount := h.bitCounts(list, maxBits)
// And do the assignment
h.assignEncodingAndSize(bitCount, list)
}
// atLeastOne clamps the result between 1 and 15.
func atLeastOne(v float32) float32 {
if v < 1 {
return 1
}
if v > 15 {
return 15
}
return v
}
func histogram(b []byte, h []uint16) {
if true && len(b) >= 8<<10 {
// Split for bigger inputs
histogramSplit(b, h)
} else {
h = h[:256]
for _, t := range b {
h[t]++
}
}
}
func histogramSplit(b []byte, h []uint16) {
// Tested, and slightly faster than 2-way.
// Writing to separate arrays and combining is also slightly slower.
h = h[:256]
for len(b)&3 != 0 {
h[b[0]]++
b = b[1:]
}
n := len(b) / 4
x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
for i, t := range x {
v0 := &h[t]
v1 := &h[y[i]]
v3 := &h[w[i]]
v2 := &h[z[i]]
*v0++
*v1++
*v2++
*v3++
}
}

View file

@ -1,178 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// Sort sorts data.
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
// data.Less and data.Swap. The sort is not guaranteed to be stable.
func sortByFreq(data []literalNode) {
n := len(data)
quickSortByFreq(data, 0, n, maxDepth(n))
}
func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
for b-a > 12 { // Use ShellSort for slices <= 12 elements
if maxDepth == 0 {
heapSort(data, a, b)
return
}
maxDepth--
mlo, mhi := doPivotByFreq(data, a, b)
// Avoiding recursion on the larger subproblem guarantees
// a stack depth of at most lg(b-a).
if mlo-a < b-mhi {
quickSortByFreq(data, a, mlo, maxDepth)
a = mhi // i.e., quickSortByFreq(data, mhi, b)
} else {
quickSortByFreq(data, mhi, b, maxDepth)
b = mlo // i.e., quickSortByFreq(data, a, mlo)
}
}
if b-a > 1 {
// Do ShellSort pass with gap 6
// It could be written in this simplified form cause b-a <= 12
for i := a + 6; i < b; i++ {
if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
data[i], data[i-6] = data[i-6], data[i]
}
}
insertionSortByFreq(data, a, b)
}
}
// siftDownByFreq implements the heap property on data[lo, hi).
// first is an offset into the array where the root of the heap lies.
func siftDownByFreq(data []literalNode, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) {
child++
}
if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
if hi-lo > 40 {
// Tukey's ``Ninther,'' median of three medians of three.
s := (hi - lo) / 8
medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
medianOfThreeSortByFreq(data, m, m-s, m+s)
medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
}
medianOfThreeSortByFreq(data, lo, m, hi-1)
// Invariants are:
// data[lo] = pivot (set up by ChoosePivot)
// data[lo < i < a] < pivot
// data[a <= i < b] <= pivot
// data[b <= i < c] unexamined
// data[c <= i < hi-1] > pivot
// data[hi-1] >= pivot
pivot := lo
a, c := lo+1, hi-1
for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
}
b := a
for {
for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
}
for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
}
if b >= c {
break
}
// data[b] > pivot; data[c-1] <= pivot
data[b], data[c-1] = data[c-1], data[b]
b++
c--
}
// If hi-c<3 then there are duplicates (by property of median of nine).
// Let's be a bit more conservative, and set border to 5.
protect := hi-c < 5
if !protect && hi-c < (hi-lo)/4 {
// Lets test some points for equality to pivot
dups := 0
if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
data[c], data[hi-1] = data[hi-1], data[c]
c++
dups++
}
if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
b--
dups++
}
// m-lo = (hi-lo)/2 > 6
// b-lo > (hi-lo)*3/4-1 > 8
// ==> m < b ==> data[m] <= pivot
if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
data[m], data[b-1] = data[b-1], data[m]
b--
dups++
}
// if at least 2 points are equal to pivot, assume skewed distribution
protect = dups > 1
}
if protect {
// Protect against a lot of duplicates
// Add invariant:
// data[a <= i < b] unexamined
// data[b <= i < c] = pivot
for {
for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
}
for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
}
if a >= b {
break
}
// data[a] == pivot; data[b-1] < pivot
data[a], data[b-1] = data[b-1], data[a]
a++
b--
}
}
// Swap pivot into middle
data[pivot], data[b-1] = data[b-1], data[pivot]
return b - 1, c
}
// Insertion sort
func insertionSortByFreq(data []literalNode, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// quickSortByFreq, loosely following Bentley and McIlroy,
// ``Engineering a Sort Function,'' SP&E November 1993.
// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
// sort 3 elements
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
data[m1], data[m0] = data[m0], data[m1]
}
// data[m0] <= data[m1]
if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
data[m2], data[m1] = data[m1], data[m2]
// data[m0] <= data[m2] && data[m1] < data[m2]
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
data[m1], data[m0] = data[m0], data[m1]
}
}
// now data[m0] <= data[m1] <= data[m2]
}

View file

@ -1,201 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// Sort sorts data.
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
// data.Less and data.Swap. The sort is not guaranteed to be stable.
func sortByLiteral(data []literalNode) {
n := len(data)
quickSort(data, 0, n, maxDepth(n))
}
func quickSort(data []literalNode, a, b, maxDepth int) {
for b-a > 12 { // Use ShellSort for slices <= 12 elements
if maxDepth == 0 {
heapSort(data, a, b)
return
}
maxDepth--
mlo, mhi := doPivot(data, a, b)
// Avoiding recursion on the larger subproblem guarantees
// a stack depth of at most lg(b-a).
if mlo-a < b-mhi {
quickSort(data, a, mlo, maxDepth)
a = mhi // i.e., quickSort(data, mhi, b)
} else {
quickSort(data, mhi, b, maxDepth)
b = mlo // i.e., quickSort(data, a, mlo)
}
}
if b-a > 1 {
// Do ShellSort pass with gap 6
// It could be written in this simplified form cause b-a <= 12
for i := a + 6; i < b; i++ {
if data[i].literal < data[i-6].literal {
data[i], data[i-6] = data[i-6], data[i]
}
}
insertionSort(data, a, b)
}
}
func heapSort(data []literalNode, a, b int) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDown(data, i, hi, first)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDown(data, lo, i, first)
}
}
// siftDown implements the heap property on data[lo, hi).
// first is an offset into the array where the root of the heap lies.
func siftDown(data []literalNode, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
child++
}
if data[first+root].literal > data[first+child].literal {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
if hi-lo > 40 {
// Tukey's ``Ninther,'' median of three medians of three.
s := (hi - lo) / 8
medianOfThree(data, lo, lo+s, lo+2*s)
medianOfThree(data, m, m-s, m+s)
medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
}
medianOfThree(data, lo, m, hi-1)
// Invariants are:
// data[lo] = pivot (set up by ChoosePivot)
// data[lo < i < a] < pivot
// data[a <= i < b] <= pivot
// data[b <= i < c] unexamined
// data[c <= i < hi-1] > pivot
// data[hi-1] >= pivot
pivot := lo
a, c := lo+1, hi-1
for ; a < c && data[a].literal < data[pivot].literal; a++ {
}
b := a
for {
for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
}
for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
}
if b >= c {
break
}
// data[b] > pivot; data[c-1] <= pivot
data[b], data[c-1] = data[c-1], data[b]
b++
c--
}
// If hi-c<3 then there are duplicates (by property of median of nine).
// Let's be a bit more conservative, and set border to 5.
protect := hi-c < 5
if !protect && hi-c < (hi-lo)/4 {
// Lets test some points for equality to pivot
dups := 0
if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
data[c], data[hi-1] = data[hi-1], data[c]
c++
dups++
}
if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
b--
dups++
}
// m-lo = (hi-lo)/2 > 6
// b-lo > (hi-lo)*3/4-1 > 8
// ==> m < b ==> data[m] <= pivot
if data[m].literal > data[pivot].literal { // data[m] = pivot
data[m], data[b-1] = data[b-1], data[m]
b--
dups++
}
// if at least 2 points are equal to pivot, assume skewed distribution
protect = dups > 1
}
if protect {
// Protect against a lot of duplicates
// Add invariant:
// data[a <= i < b] unexamined
// data[b <= i < c] = pivot
for {
for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
}
for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
}
if a >= b {
break
}
// data[a] == pivot; data[b-1] < pivot
data[a], data[b-1] = data[b-1], data[a]
a++
b--
}
}
// Swap pivot into middle
data[pivot], data[b-1] = data[b-1], data[pivot]
return b - 1, c
}
// Insertion sort
func insertionSort(data []literalNode, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// maxDepth returns a threshold at which quicksort should switch
// to heapsort. It returns 2*ceil(lg(n+1)).
func maxDepth(n int) int {
var depth int
for i := n; i > 0; i >>= 1 {
depth++
}
return depth * 2
}
// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
func medianOfThree(data []literalNode, m1, m0, m2 int) {
// sort 3 elements
if data[m1].literal < data[m0].literal {
data[m1], data[m0] = data[m0], data[m1]
}
// data[m0] <= data[m1]
if data[m2].literal < data[m1].literal {
data[m2], data[m1] = data[m1], data[m2]
// data[m0] <= data[m2] && data[m1] < data[m2]
if data[m1].literal < data[m0].literal {
data[m1], data[m0] = data[m0], data[m1]
}
}
// now data[m0] <= data[m1] <= data[m2]
}

View file

@ -1,793 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package flate implements the DEFLATE compressed data format, described in
// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
// formats.
package flate
import (
"bufio"
"compress/flate"
"fmt"
"io"
"math/bits"
"sync"
)
const (
maxCodeLen = 16 // max length of Huffman code
maxCodeLenMask = 15 // mask for max length of Huffman code
// The next three numbers come from the RFC section 3.2.7, with the
// additional proviso in section 3.2.5 which implies that distance codes
// 30 and 31 should never occur in compressed data.
maxNumLit = 286
maxNumDist = 30
numCodes = 19 // number of codes in Huffman meta-code
debugDecode = false
)
// Value of length - 3 and extra bits.
type lengthExtra struct {
length, extra uint8
}
var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
var bitMask32 = [32]uint32{
0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
} // up to 32 bits
// Initialize the fixedHuffmanDecoder only once upon first use.
var fixedOnce sync.Once
var fixedHuffmanDecoder huffmanDecoder
// A CorruptInputError reports the presence of corrupt input at a given offset.
type CorruptInputError = flate.CorruptInputError
// An InternalError reports an error in the flate code itself.
type InternalError string
func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
// A ReadError reports an error encountered while reading input.
//
// Deprecated: No longer returned.
type ReadError = flate.ReadError
// A WriteError reports an error encountered while writing output.
//
// Deprecated: No longer returned.
type WriteError = flate.WriteError
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
// to switch to a new underlying Reader. This permits reusing a ReadCloser
// instead of allocating a new one.
type Resetter interface {
// Reset discards any buffered data and resets the Resetter as if it was
// newly initialized with the given reader.
Reset(r io.Reader, dict []byte) error
}
// The data structure for decoding Huffman tables is based on that of
// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
// For codes smaller than the table width, there are multiple entries
// (each combination of trailing bits has the same value). For codes
// larger than the table width, the table contains a link to an overflow
// table. The width of each entry in the link table is the maximum code
// size minus the chunk width.
//
// Note that you can do a lookup in the table even without all bits
// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
// have the property that shorter codes come before longer ones, the
// bit length estimate in the result is a lower bound on the actual
// number of bits.
//
// See the following:
// http://www.gzip.org/algorithm.txt
// chunk & 15 is number of bits
// chunk >> 4 is value, including table link
const (
huffmanChunkBits = 9
huffmanNumChunks = 1 << huffmanChunkBits
huffmanCountMask = 15
huffmanValueShift = 4
)
type huffmanDecoder struct {
maxRead int // the maximum number of bits we can read and not overread
chunks *[huffmanNumChunks]uint16 // chunks as described above
links [][]uint16 // overflow links
linkMask uint32 // mask the width of the link table
}
// Initialize Huffman decoding tables from array of code lengths.
// Following this function, h is guaranteed to be initialized into a complete
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
// degenerate case where the tree has only a single symbol with length 1. Empty
// trees are permitted.
func (h *huffmanDecoder) init(lengths []int) bool {
// Sanity enables additional runtime tests during Huffman
// table construction. It's intended to be used during
// development to supplement the currently ad-hoc unit tests.
const sanity = false
if h.chunks == nil {
h.chunks = &[huffmanNumChunks]uint16{}
}
if h.maxRead != 0 {
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
}
// Count number of codes of each length,
// compute maxRead and max length.
var count [maxCodeLen]int
var min, max int
for _, n := range lengths {
if n == 0 {
continue
}
if min == 0 || n < min {
min = n
}
if n > max {
max = n
}
count[n&maxCodeLenMask]++
}
// Empty tree. The decompressor.huffSym function will fail later if the tree
// is used. Technically, an empty tree is only valid for the HDIST tree and
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
// is guaranteed to fail since it will attempt to use the tree to decode the
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
// guaranteed to fail later since the compressed data section must be
// composed of at least one symbol (the end-of-block marker).
if max == 0 {
return true
}
code := 0
var nextcode [maxCodeLen]int
for i := min; i <= max; i++ {
code <<= 1
nextcode[i&maxCodeLenMask] = code
code += count[i&maxCodeLenMask]
}
// Check that the coding is complete (i.e., that we've
// assigned all 2-to-the-max possible bit sequences).
// Exception: To be compatible with zlib, we also need to
// accept degenerate single-code codings. See also
// TestDegenerateHuffmanCoding.
if code != 1<<uint(max) && !(code == 1 && max == 1) {
if debugDecode {
fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
}
return false
}
h.maxRead = min
chunks := h.chunks[:]
for i := range chunks {
chunks[i] = 0
}
if max > huffmanChunkBits {
numLinks := 1 << (uint(max) - huffmanChunkBits)
h.linkMask = uint32(numLinks - 1)
// create link tables
link := nextcode[huffmanChunkBits+1] >> 1
if cap(h.links) < huffmanNumChunks-link {
h.links = make([][]uint16, huffmanNumChunks-link)
} else {
h.links = h.links[:huffmanNumChunks-link]
}
for j := uint(link); j < huffmanNumChunks; j++ {
reverse := int(bits.Reverse16(uint16(j)))
reverse >>= uint(16 - huffmanChunkBits)
off := j - uint(link)
if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
if cap(h.links[off]) < numLinks {
h.links[off] = make([]uint16, numLinks)
} else {
links := h.links[off][:0]
h.links[off] = links[:numLinks]
}
}
} else {
h.links = h.links[:0]
}
for i, n := range lengths {
if n == 0 {
continue
}
code := nextcode[n]
nextcode[n]++
chunk := uint16(i<<huffmanValueShift | n)
reverse := int(bits.Reverse16(uint16(code)))
reverse >>= uint(16 - n)
if n <= huffmanChunkBits {
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
// We should never need to overwrite
// an existing chunk. Also, 0 is
// never a valid chunk, because the
// lower 4 "count" bits should be
// between 1 and 15.
if sanity && h.chunks[off] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[off] = chunk
}
} else {
j := reverse & (huffmanNumChunks - 1)
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
// Longer codes should have been
// associated with a link table above.
panic("impossible: not an indirect chunk")
}
value := h.chunks[j] >> huffmanValueShift
linktab := h.links[value]
reverse >>= huffmanChunkBits
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
if sanity && linktab[off] != 0 {
panic("impossible: overwriting existing chunk")
}
linktab[off] = chunk
}
}
}
if sanity {
// Above we've sanity checked that we never overwrote
// an existing entry. Here we additionally check that
// we filled the tables completely.
for i, chunk := range h.chunks {
if chunk == 0 {
// As an exception, in the degenerate
// single-code case, we allow odd
// chunks to be missing.
if code == 1 && i%2 == 1 {
continue
}
panic("impossible: missing chunk")
}
}
for _, linktab := range h.links {
for _, chunk := range linktab {
if chunk == 0 {
panic("impossible: missing chunk")
}
}
}
}
return true
}
// The actual read interface needed by NewReader.
// If the passed in io.Reader does not also have ReadByte,
// the NewReader will introduce its own buffering.
type Reader interface {
io.Reader
io.ByteReader
}
// Decompress state.
type decompressor struct {
// Input source.
r Reader
roffset int64
// Huffman decoders for literal/length, distance.
h1, h2 huffmanDecoder
// Length arrays used to define Huffman codes.
bits *[maxNumLit + maxNumDist]int
codebits *[numCodes]int
// Output history, buffer.
dict dictDecoder
// Next step in the decompression,
// and decompression state.
step func(*decompressor)
stepState int
err error
toRead []byte
hl, hd *huffmanDecoder
copyLen int
copyDist int
// Temporary buffer (avoids repeated allocation).
buf [4]byte
// Input bits, in top of b.
b uint32
nb uint
final bool
}
func (f *decompressor) nextBlock() {
for f.nb < 1+2 {
if f.err = f.moreBits(); f.err != nil {
return
}
}
f.final = f.b&1 == 1
f.b >>= 1
typ := f.b & 3
f.b >>= 2
f.nb -= 1 + 2
switch typ {
case 0:
f.dataBlock()
if debugDecode {
fmt.Println("stored block")
}
case 1:
// compressed, fixed Huffman tables
f.hl = &fixedHuffmanDecoder
f.hd = nil
f.huffmanBlockDecoder()()
if debugDecode {
fmt.Println("predefinied huffman block")
}
case 2:
// compressed, dynamic Huffman tables
if f.err = f.readHuffman(); f.err != nil {
break
}
f.hl = &f.h1
f.hd = &f.h2
f.huffmanBlockDecoder()()
if debugDecode {
fmt.Println("dynamic huffman block")
}
default:
// 3 is reserved.
if debugDecode {
fmt.Println("reserved data block encountered")
}
f.err = CorruptInputError(f.roffset)
}
}
func (f *decompressor) Read(b []byte) (int, error) {
for {
if len(f.toRead) > 0 {
n := copy(b, f.toRead)
f.toRead = f.toRead[n:]
if len(f.toRead) == 0 {
return n, f.err
}
return n, nil
}
if f.err != nil {
return 0, f.err
}
f.step(f)
if f.err != nil && len(f.toRead) == 0 {
f.toRead = f.dict.readFlush() // Flush what's left in case of error
}
}
}
// Support the io.WriteTo interface for io.Copy and friends.
func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
total := int64(0)
flushed := false
for {
if len(f.toRead) > 0 {
n, err := w.Write(f.toRead)
total += int64(n)
if err != nil {
f.err = err
return total, err
}
if n != len(f.toRead) {
return total, io.ErrShortWrite
}
f.toRead = f.toRead[:0]
}
if f.err != nil && flushed {
if f.err == io.EOF {
return total, nil
}
return total, f.err
}
if f.err == nil {
f.step(f)
}
if len(f.toRead) == 0 && f.err != nil && !flushed {
f.toRead = f.dict.readFlush() // Flush what's left in case of error
flushed = true
}
}
}
func (f *decompressor) Close() error {
if f.err == io.EOF {
return nil
}
return f.err
}
// RFC 1951 section 3.2.7.
// Compression with dynamic Huffman codes
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
func (f *decompressor) readHuffman() error {
// HLIT[5], HDIST[5], HCLEN[4].
for f.nb < 5+5+4 {
if err := f.moreBits(); err != nil {
return err
}
}
nlit := int(f.b&0x1F) + 257
if nlit > maxNumLit {
if debugDecode {
fmt.Println("nlit > maxNumLit", nlit)
}
return CorruptInputError(f.roffset)
}
f.b >>= 5
ndist := int(f.b&0x1F) + 1
if ndist > maxNumDist {
if debugDecode {
fmt.Println("ndist > maxNumDist", ndist)
}
return CorruptInputError(f.roffset)
}
f.b >>= 5
nclen := int(f.b&0xF) + 4
// numCodes is 19, so nclen is always valid.
f.b >>= 4
f.nb -= 5 + 5 + 4
// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
for i := 0; i < nclen; i++ {
for f.nb < 3 {
if err := f.moreBits(); err != nil {
return err
}
}
f.codebits[codeOrder[i]] = int(f.b & 0x7)
f.b >>= 3
f.nb -= 3
}
for i := nclen; i < len(codeOrder); i++ {
f.codebits[codeOrder[i]] = 0
}
if !f.h1.init(f.codebits[0:]) {
if debugDecode {
fmt.Println("init codebits failed")
}
return CorruptInputError(f.roffset)
}
// HLIT + 257 code lengths, HDIST + 1 code lengths,
// using the code length Huffman code.
for i, n := 0, nlit+ndist; i < n; {
x, err := f.huffSym(&f.h1)
if err != nil {
return err
}
if x < 16 {
// Actual length.
f.bits[i] = x
i++
continue
}
// Repeat previous length or zero.
var rep int
var nb uint
var b int
switch x {
default:
return InternalError("unexpected length code")
case 16:
rep = 3
nb = 2
if i == 0 {
if debugDecode {
fmt.Println("i==0")
}
return CorruptInputError(f.roffset)
}
b = f.bits[i-1]
case 17:
rep = 3
nb = 3
b = 0
case 18:
rep = 11
nb = 7
b = 0
}
for f.nb < nb {
if err := f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits:", err)
}
return err
}
}
rep += int(f.b & uint32(1<<(nb&regSizeMaskUint32)-1))
f.b >>= nb & regSizeMaskUint32
f.nb -= nb
if i+rep > n {
if debugDecode {
fmt.Println("i+rep > n", i, rep, n)
}
return CorruptInputError(f.roffset)
}
for j := 0; j < rep; j++ {
f.bits[i] = b
i++
}
}
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
if debugDecode {
fmt.Println("init2 failed")
}
return CorruptInputError(f.roffset)
}
// As an optimization, we can initialize the maxRead bits to read at a time
// for the HLIT tree to the length of the EOB marker since we know that
// every block must terminate with one. This preserves the property that
// we never read any extra bytes after the end of the DEFLATE stream.
if f.h1.maxRead < f.bits[endBlockMarker] {
f.h1.maxRead = f.bits[endBlockMarker]
}
if !f.final {
// If not the final block, the smallest block possible is
// a predefined table, BTYPE=01, with a single EOB marker.
// This will take up 3 + 7 bits.
f.h1.maxRead += 10
}
return nil
}
// Copy a single uncompressed data block from input to output.
func (f *decompressor) dataBlock() {
// Uncompressed.
// Discard current half-byte.
left := (f.nb) & 7
f.nb -= left
f.b >>= left
offBytes := f.nb >> 3
// Unfilled values will be overwritten.
f.buf[0] = uint8(f.b)
f.buf[1] = uint8(f.b >> 8)
f.buf[2] = uint8(f.b >> 16)
f.buf[3] = uint8(f.b >> 24)
f.roffset += int64(offBytes)
f.nb, f.b = 0, 0
// Length then ones-complement of length.
nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
f.roffset += int64(nr)
if err != nil {
f.err = noEOF(err)
return
}
n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
if nn != ^n {
if debugDecode {
ncomp := ^n
fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
}
f.err = CorruptInputError(f.roffset)
return
}
if n == 0 {
f.toRead = f.dict.readFlush()
f.finishBlock()
return
}
f.copyLen = int(n)
f.copyData()
}
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
// It pauses for reads when f.hist is full.
func (f *decompressor) copyData() {
buf := f.dict.writeSlice()
if len(buf) > f.copyLen {
buf = buf[:f.copyLen]
}
cnt, err := io.ReadFull(f.r, buf)
f.roffset += int64(cnt)
f.copyLen -= cnt
f.dict.writeMark(cnt)
if err != nil {
f.err = noEOF(err)
return
}
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
f.step = (*decompressor).copyData
return
}
f.finishBlock()
}
func (f *decompressor) finishBlock() {
if f.final {
if f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
f.err = io.EOF
}
f.step = (*decompressor).nextBlock
}
// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
func noEOF(e error) error {
if e == io.EOF {
return io.ErrUnexpectedEOF
}
return e
}
func (f *decompressor) moreBits() error {
c, err := f.r.ReadByte()
if err != nil {
return noEOF(err)
}
f.roffset++
f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
f.nb += 8
return nil
}
// Read the next Huffman-encoded symbol from f according to h.
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(h.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
nb, b := f.nb, f.b
for {
for nb < n {
c, err := f.r.ReadByte()
if err != nil {
f.b = b
f.nb = nb
return 0, noEOF(err)
}
f.roffset++
b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := h.chunks[b&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= nb {
if n == 0 {
f.b = b
f.nb = nb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return 0, f.err
}
f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
return int(chunk >> huffmanValueShift), nil
}
}
}
func makeReader(r io.Reader) Reader {
if rr, ok := r.(Reader); ok {
return rr
}
return bufio.NewReader(r)
}
func fixedHuffmanDecoderInit() {
fixedOnce.Do(func() {
// These come from the RFC section 3.2.6.
var bits [288]int
for i := 0; i < 144; i++ {
bits[i] = 8
}
for i := 144; i < 256; i++ {
bits[i] = 9
}
for i := 256; i < 280; i++ {
bits[i] = 7
}
for i := 280; i < 288; i++ {
bits[i] = 8
}
fixedHuffmanDecoder.init(bits[:])
})
}
func (f *decompressor) Reset(r io.Reader, dict []byte) error {
*f = decompressor{
r: makeReader(r),
bits: f.bits,
codebits: f.codebits,
h1: f.h1,
h2: f.h2,
dict: f.dict,
step: (*decompressor).nextBlock,
}
f.dict.init(maxMatchOffset, dict)
return nil
}
// NewReader returns a new ReadCloser that can be used
// to read the uncompressed version of r.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser
// when finished reading.
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReader(r io.Reader) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = (*decompressor).nextBlock
f.dict.init(maxMatchOffset, nil)
return &f
}
// NewReaderDict is like NewReader but initializes the reader
// with a preset dictionary. The returned Reader behaves as if
// the uncompressed data stream started with the given dictionary,
// which has already been read. NewReaderDict is typically used
// to read data compressed by NewWriterDict.
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = (*decompressor).nextBlock
f.dict.init(maxMatchOffset, dict)
return &f
}

File diff suppressed because it is too large Load diff

View file

@ -1,241 +0,0 @@
package flate
import (
"encoding/binary"
"fmt"
"math/bits"
)
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastEncL1 struct {
fastGen
table [tableSize]tableEntry
}
// EncodeL1 uses a similar algorithm to level 1
func (e *fastEncL1) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
for {
nextHash := hashLen(cv, tableBits, hashBytes)
candidate = e.table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
nextHash = hashLen(now, tableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
now >>= 8
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
cv = now
s = nextS
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
var l = int32(4)
if false {
l = e.matchlenLong(s+4, t+4, src) + 4
} else {
// inlined:
a := src[s+4:]
b := src[t+4:]
for len(a) >= 8 {
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
l += int32(bits.TrailingZeros64(diff) >> 3)
break
}
l += 8
a = a[8:]
b = b[8:]
}
if len(a) < 8 {
b = b[:len(a)]
for i := range a {
if a[i] != b[i] {
break
}
l++
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
// Save the match found
if false {
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
} else {
// Inlined...
xoffset := uint32(s - t - baseMatchOffset)
xlength := l
oc := offsetCode(xoffset)
xoffset |= oc << 16
for xlength > 0 {
xl := xlength
if xl > 258 {
if xl > 258+baseMatchLength {
xl = 258
} else {
xl = 258 - baseMatchLength
}
}
xlength -= xl
xl -= baseMatchLength
dst.extraHist[lengthCodes1[uint8(xl)]]++
dst.offHist[oc]++
dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
dst.n++
}
}
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index first pair after match end.
if int(s+l+8) < len(src) {
cv := load6432(src, s)
e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
x >>= 16
currHash := hashLen(x, tableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
cv = x >> 8
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View file

@ -1,214 +0,0 @@
package flate
import "fmt"
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastEncL2 struct {
fastGen
table [bTableSize]tableEntry
}
// EncodeL2 uses a similar algorithm to level 1, but is capable
// of matching across blocks giving better compression at a small slowdown.
func (e *fastEncL2) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
// When should we start skipping if we haven't found matches in a long while.
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
for {
nextHash := hashLen(cv, bTableBits, hashBytes)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
candidate = e.table[nextHash]
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
nextHash = hashLen(now, bTableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
now >>= 8
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
cv = now
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
l := e.matchlenLong(s+4, t+4, src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index first pair after match end.
if int(s+l+8) < len(src) {
cv := load6432(src, s)
e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
// Store every second hash in-between, but offset by 1.
for i := s - l + 2; i < s-5; i += 7 {
x := load6432(src, i)
nextHash := hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i}
// Skip one
x >>= 16
nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
// Skip one
x >>= 16
nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 to s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
prevHash := hashLen(x, bTableBits, hashBytes)
prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
e.table[prevHash2] = tableEntry{offset: o + 1}
currHash := hashLen(x>>16, bTableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
cv = x >> 24
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View file

@ -1,241 +0,0 @@
package flate
import "fmt"
// fastEncL3
type fastEncL3 struct {
fastGen
table [1 << 16]tableEntryPrev
}
// Encode uses a similar algorithm to level 2, will check up to two candidates.
func (e *fastEncL3) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
tableBits = 16
tableSize = 1 << tableBits
hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
}
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
e.table[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// Skip if too small.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 7
nextS := s
var candidate tableEntry
for {
nextHash := hashLen(cv, tableBits, hashBytes)
s = nextS
nextS = s + 1 + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
candidates := e.table[nextHash]
now := load6432(src, nextS)
// Safe offset distance until s + 4...
minOffset := e.cur + s - (maxMatchOffset - 4)
e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
// Check both candidates
candidate = candidates.Cur
if candidate.offset < minOffset {
cv = now
// Previous will also be invalid, we have nothing.
continue
}
if uint32(cv) == load3232(src, candidate.offset-e.cur) {
if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
break
}
// Both match and are valid, pick longest.
offset := s - (candidate.offset - e.cur)
o2 := s - (candidates.Prev.offset - e.cur)
l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
if l2 > l1 {
candidate = candidates.Prev
}
break
} else {
// We only check if value mismatches.
// Offset will always be invalid in other cases.
candidate = candidates.Prev
if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
}
cv = now
}
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
//
t := candidate.offset - e.cur
l := e.matchlenLong(s+4, t+4, src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
t += l
// Index first pair after match end.
if int(t+8) < len(src) && t > 0 {
cv = load6432(src, t)
nextHash := hashLen(cv, tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + t},
}
}
goto emitRemainder
}
// Store every 5th hash in-between.
for i := s - l + 2; i < s-5; i += 6 {
nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + i}}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 to s.
x := load6432(src, s-2)
prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 2},
}
x >>= 8
prevHash = hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 1},
}
x >>= 8
currHash := hashLen(x, tableBits, hashBytes)
candidates := e.table[currHash]
cv = x
e.table[currHash] = tableEntryPrev{
Prev: candidates.Cur,
Cur: tableEntry{offset: s + e.cur},
}
// Check both candidates
candidate = candidates.Cur
minOffset := e.cur + s - (maxMatchOffset - 4)
if candidate.offset > minOffset {
if uint32(cv) == load3232(src, candidate.offset-e.cur) {
// Found a match...
continue
}
candidate = candidates.Prev
if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
// Match at prev...
continue
}
}
cv = x >> 8
s++
break
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View file

@ -1,221 +0,0 @@
package flate
import "fmt"
type fastEncL4 struct {
fastGen
table [tableSize]tableEntry
bTable [tableSize]tableEntry
}
func (e *fastEncL4) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntry{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.bTable[i].offset = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
e.bTable[nextHashL] = entry
t = lCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
// We got a long match. Use that.
break
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
// Found a 4 match...
lCandidate = e.bTable[hash7(next, tableBits)]
// If the next long is a candidate, check if we should use that instead...
lOff := nextS - (lCandidate.offset - e.cur)
if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
if l2 > l1 {
s = nextS
t = lCandidate.offset - e.cur
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
// Extend the 4-byte match as long as possible.
l := e.matchlenLong(s+4, t+4, src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic("s-t")
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index first pair after match end.
if int(s+8) < len(src) {
cv := load6432(src, s)
e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
// Store every 3rd hash in-between
if true {
i := nextS
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
i += 3
for ; i < s-1; i += 3 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
e.bTable[prevHashL] = tableEntry{offset: o}
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View file

@ -1,310 +0,0 @@
package flate
import "fmt"
type fastEncL5 struct {
fastGen
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL5) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// If the next long is a candidate, use that...
t2 := lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
if l == 0 {
// Extend the 4-byte match as long as possible.
l = e.matchlenLong(s+4, t+4, src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
}
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
t2 := eLong - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// Store every 3rd hash in-between.
if true {
const hashEvery = 3
i := s - l + 1
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// Do an long at i+1
cv >>= 8
t = tableEntry{offset: t.offset + 1}
eLong = &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// We only have enough bits for a short entry at i+2
cv >>= 8
t = tableEntry{offset: t.offset + 1}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's'
i += 4
for ; i < s-1; i += hashEvery {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View file

@ -1,325 +0,0 @@
package flate
import "fmt"
type fastEncL6 struct {
fastGen
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL6) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
// Repeat MUST be > 1 and within range
repeat := int32(1)
for {
const skipLog = 7
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
// Calculate hashes of 'next'
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
// Long candidate matches at least 4 bytes.
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// Check the previous long candidate as well.
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
// Current value did not match, but check if previous long value does.
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
// Look up next long candidate (at nextS)
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// Check repeat at s + repOff
const repOff = 1
t2 := s - repeat + repOff
if load3232(src, t2) == uint32(cv>>(8*repOff)) {
ml := e.matchlen(s+4+repOff, t2+4, src) + 4
if ml > l {
t = t2
l = ml
s += repOff
// Not worth checking more.
break
}
}
// If the next long is a candidate, use that...
t2 = lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
// This is ok, but check previous as well.
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
// Extend the 4-byte match as long as possible.
if l == 0 {
l = e.matchlenLong(s+4, t+4, src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
}
// Try to locate a better match by checking the end-of-match...
if sAt := s + l; sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
// Test current
t2 := eLong.Cur.offset - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if off < maxMatchOffset {
if off > 0 && t2 >= 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
// Test next:
t2 = eLong.Prev.offset - e.cur - l + skipBeginning
off := s2 - t2
if off > 0 && off < maxMatchOffset && t2 >= 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if false {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
repeat = s - t
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index after match end.
for i := nextS + 1; i < int32(len(src))-8; i += 2 {
cv := load6432(src, i)
e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
}
goto emitRemainder
}
// Store every long hash in-between and every second short.
if true {
for i := nextS + 1; i < s-1; i += 2 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong.Cur, eLong.Prev = t, eLong.Cur
eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
cv = load6432(src, s)
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View file

@ -1,37 +0,0 @@
package flate
const (
// Masks for shifts with register sizes of the shift value.
// This can be used to work around the x86 design of shifting by mod register size.
// It can be used when a variable shift is always smaller than the register size.
// reg8SizeMaskX - shift value is 8 bits, shifted is X
reg8SizeMask8 = 7
reg8SizeMask16 = 15
reg8SizeMask32 = 31
reg8SizeMask64 = 63
// reg16SizeMaskX - shift value is 16 bits, shifted is X
reg16SizeMask8 = reg8SizeMask8
reg16SizeMask16 = reg8SizeMask16
reg16SizeMask32 = reg8SizeMask32
reg16SizeMask64 = reg8SizeMask64
// reg32SizeMaskX - shift value is 32 bits, shifted is X
reg32SizeMask8 = reg8SizeMask8
reg32SizeMask16 = reg8SizeMask16
reg32SizeMask32 = reg8SizeMask32
reg32SizeMask64 = reg8SizeMask64
// reg64SizeMaskX - shift value is 64 bits, shifted is X
reg64SizeMask8 = reg8SizeMask8
reg64SizeMask16 = reg8SizeMask16
reg64SizeMask32 = reg8SizeMask32
reg64SizeMask64 = reg8SizeMask64
// regSizeMaskUintX - shift value is uint, shifted is X
regSizeMaskUint8 = reg8SizeMask8
regSizeMaskUint16 = reg8SizeMask16
regSizeMaskUint32 = reg8SizeMask32
regSizeMaskUint64 = reg8SizeMask64
)

View file

@ -1,40 +0,0 @@
//go:build !amd64
// +build !amd64
package flate
const (
// Masks for shifts with register sizes of the shift value.
// This can be used to work around the x86 design of shifting by mod register size.
// It can be used when a variable shift is always smaller than the register size.
// reg8SizeMaskX - shift value is 8 bits, shifted is X
reg8SizeMask8 = 0xff
reg8SizeMask16 = 0xff
reg8SizeMask32 = 0xff
reg8SizeMask64 = 0xff
// reg16SizeMaskX - shift value is 16 bits, shifted is X
reg16SizeMask8 = 0xffff
reg16SizeMask16 = 0xffff
reg16SizeMask32 = 0xffff
reg16SizeMask64 = 0xffff
// reg32SizeMaskX - shift value is 32 bits, shifted is X
reg32SizeMask8 = 0xffffffff
reg32SizeMask16 = 0xffffffff
reg32SizeMask32 = 0xffffffff
reg32SizeMask64 = 0xffffffff
// reg64SizeMaskX - shift value is 64 bits, shifted is X
reg64SizeMask8 = 0xffffffffffffffff
reg64SizeMask16 = 0xffffffffffffffff
reg64SizeMask32 = 0xffffffffffffffff
reg64SizeMask64 = 0xffffffffffffffff
// regSizeMaskUintX - shift value is uint, shifted is X
regSizeMaskUint8 = ^uint(0)
regSizeMaskUint16 = ^uint(0)
regSizeMaskUint32 = ^uint(0)
regSizeMaskUint64 = ^uint(0)
)

View file

@ -1,318 +0,0 @@
package flate
import (
"io"
"math"
"sync"
)
const (
maxStatelessBlock = math.MaxInt16
// dictionary will be taken from maxStatelessBlock, so limit it.
maxStatelessDict = 8 << 10
slTableBits = 13
slTableSize = 1 << slTableBits
slTableShift = 32 - slTableBits
)
type statelessWriter struct {
dst io.Writer
closed bool
}
func (s *statelessWriter) Close() error {
if s.closed {
return nil
}
s.closed = true
// Emit EOF block
return StatelessDeflate(s.dst, nil, true, nil)
}
func (s *statelessWriter) Write(p []byte) (n int, err error) {
err = StatelessDeflate(s.dst, p, false, nil)
if err != nil {
return 0, err
}
return len(p), nil
}
func (s *statelessWriter) Reset(w io.Writer) {
s.dst = w
s.closed = false
}
// NewStatelessWriter will do compression but without maintaining any state
// between Write calls.
// There will be no memory kept between Write calls,
// but compression and speed will be suboptimal.
// Because of this, the size of actual Write calls will affect output size.
func NewStatelessWriter(dst io.Writer) io.WriteCloser {
return &statelessWriter{dst: dst}
}
// bitWriterPool contains bit writers that can be reused.
var bitWriterPool = sync.Pool{
New: func() interface{} {
return newHuffmanBitWriter(nil)
},
}
// StatelessDeflate allows compressing directly to a Writer without retaining state.
// When returning everything will be flushed.
// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
// Longer dictionaries will be truncated and will still produce valid output.
// Sending nil dictionary is perfectly fine.
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
var dst tokens
bw := bitWriterPool.Get().(*huffmanBitWriter)
bw.reset(out)
defer func() {
// don't keep a reference to our output
bw.reset(nil)
bitWriterPool.Put(bw)
}()
if eof && len(in) == 0 {
// Just write an EOF block.
// Could be faster...
bw.writeStoredHeader(0, true)
bw.flush()
return bw.err
}
// Truncate dict
if len(dict) > maxStatelessDict {
dict = dict[len(dict)-maxStatelessDict:]
}
// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
var inDict []byte
for len(in) > 0 {
todo := in
if len(inDict) > 0 {
if len(todo) > maxStatelessBlock-maxStatelessDict {
todo = todo[:maxStatelessBlock-maxStatelessDict]
}
} else if len(todo) > maxStatelessBlock-len(dict) {
todo = todo[:maxStatelessBlock-len(dict)]
}
inOrg := in
in = in[len(todo):]
uncompressed := todo
if len(dict) > 0 {
// combine dict and source
bufLen := len(todo) + len(dict)
combined := make([]byte, bufLen)
copy(combined, dict)
copy(combined[len(dict):], todo)
todo = combined
}
// Compress
if len(inDict) == 0 {
statelessEnc(&dst, todo, int16(len(dict)))
} else {
statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
}
isEof := eof && len(in) == 0
if dst.n == 0 {
bw.writeStoredHeader(len(uncompressed), isEof)
if bw.err != nil {
return bw.err
}
bw.writeBytes(uncompressed)
} else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
// If we removed less than 1/16th, huffman compress the block.
bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
} else {
bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
}
if len(in) > 0 {
// Retain a dict if we have more
inDict = inOrg[len(uncompressed)-maxStatelessDict:]
dict = nil
dst.Reset()
}
if bw.err != nil {
return bw.err
}
}
if !eof {
// Align, only a stored block can do that.
bw.writeStoredHeader(0, false)
}
bw.flush()
return bw.err
}
func hashSL(u uint32) uint32 {
return (u * 0x1e35a7bd) >> slTableShift
}
func load3216(b []byte, i int16) uint32 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
b = b[:4]
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load6416(b []byte, i int16) uint64 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
b = b[:8]
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func statelessEnc(dst *tokens, src []byte, startAt int16) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
type tableEntry struct {
offset int16
}
var table [slTableSize]tableEntry
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src)-int(startAt) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = 0
return
}
// Index until startAt
if startAt > 0 {
cv := load3232(src, 0)
for i := int16(0); i < startAt; i++ {
table[hashSL(cv)] = tableEntry{offset: i}
cv = (cv >> 8) | (uint32(src[i+4]) << 24)
}
}
s := startAt + 1
nextEmit := startAt
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int16(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load3216(src, s)
for {
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
for {
nextHash := hashSL(cv)
candidate = table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit || nextS <= 0 {
goto emitRemainder
}
now := load6416(src, nextS)
table[nextHash] = tableEntry{offset: s}
nextHash = hashSL(uint32(now))
if cv == load3216(src, candidate.offset) {
table[nextHash] = tableEntry{offset: nextS}
break
}
// Do one right away...
cv = uint32(now)
s = nextS
nextS++
candidate = table[nextHash]
now >>= 8
table[nextHash] = tableEntry{offset: s}
if cv == load3216(src, candidate.offset) {
table[nextHash] = tableEntry{offset: nextS}
break
}
cv = uint32(now)
s = nextS
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset
l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
// Save the match found
dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6416(src, s-2)
o := s - 2
prevHash := hashSL(uint32(x))
table[prevHash] = tableEntry{offset: o}
x >>= 16
currHash := hashSL(uint32(x))
candidate = table[currHash]
table[currHash] = tableEntry{offset: o + 2}
if uint32(x) != load3216(src, candidate.offset) {
cv = uint32(x >> 8)
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View file

@ -1,379 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
)
const (
// bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
// bits 16-22 offsetcode - 5 bits
// bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
// bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
lengthShift = 22
offsetMask = 1<<lengthShift - 1
typeMask = 3 << 30
literalType = 0 << 30
matchType = 1 << 30
matchOffsetOnlyMask = 0xffff
)
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
// is lengthCodes[length - MIN_MATCH_LENGTH]
var lengthCodes = [256]uint8{
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 28,
}
// lengthCodes1 is length codes, but starting at 1.
var lengthCodes1 = [256]uint8{
1, 2, 3, 4, 5, 6, 7, 8, 9, 9,
10, 10, 11, 11, 12, 12, 13, 13, 13, 13,
14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
18, 18, 18, 18, 18, 18, 18, 18, 19, 19,
19, 19, 19, 19, 19, 19, 20, 20, 20, 20,
20, 20, 20, 20, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 29,
}
var offsetCodes = [256]uint32{
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
}
// offsetCodes14 are offsetCodes, but with 14 added.
var offsetCodes14 = [256]uint32{
14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
}
type token uint32
type tokens struct {
extraHist [32]uint16 // codes 256->maxnumlit
offHist [32]uint16 // offset codes
litHist [256]uint16 // codes 0->255
nFilled int
n uint16 // Must be able to contain maxStoreBlockSize
tokens [maxStoreBlockSize + 1]token
}
func (t *tokens) Reset() {
if t.n == 0 {
return
}
t.n = 0
t.nFilled = 0
for i := range t.litHist[:] {
t.litHist[i] = 0
}
for i := range t.extraHist[:] {
t.extraHist[i] = 0
}
for i := range t.offHist[:] {
t.offHist[i] = 0
}
}
func (t *tokens) Fill() {
if t.n == 0 {
return
}
for i, v := range t.litHist[:] {
if v == 0 {
t.litHist[i] = 1
t.nFilled++
}
}
for i, v := range t.extraHist[:literalCount-256] {
if v == 0 {
t.nFilled++
t.extraHist[i] = 1
}
}
for i, v := range t.offHist[:offsetCodeCount] {
if v == 0 {
t.offHist[i] = 1
}
}
}
func indexTokens(in []token) tokens {
var t tokens
t.indexTokens(in)
return t
}
func (t *tokens) indexTokens(in []token) {
t.Reset()
for _, tok := range in {
if tok < matchType {
t.AddLiteral(tok.literal())
continue
}
t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
}
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst *tokens, lit []byte) {
for _, v := range lit {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
func (t *tokens) AddLiteral(lit byte) {
t.tokens[t.n] = token(lit)
t.litHist[lit]++
t.n++
}
// from https://stackoverflow.com/a/28730362
func mFastLog2(val float32) float32 {
ux := int32(math.Float32bits(val))
log2 := (float32)(((ux >> 23) & 255) - 128)
ux &= -0x7f800001
ux += 127 << 23
uval := math.Float32frombits(uint32(ux))
log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
return log2
}
// EstimatedBits will return an minimum size estimated by an *optimal*
// compression of the block.
// The size of the block
func (t *tokens) EstimatedBits() int {
shannon := float32(0)
bits := int(0)
nMatches := 0
total := int(t.n) + t.nFilled
if total > 0 {
invTotal := 1.0 / float32(total)
for _, v := range t.litHist[:] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
}
}
// Just add 15 for EOB
shannon += 15
for i, v := range t.extraHist[1 : literalCount-256] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
bits += int(lengthExtraBits[i&31]) * int(v)
nMatches += int(v)
}
}
}
if nMatches > 0 {
invTotal := 1.0 / float32(nMatches)
for i, v := range t.offHist[:offsetCodeCount] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
bits += int(offsetExtraBits[i&31]) * int(v)
}
}
}
return int(shannon) + bits
}
// AddMatch adds a match to the tokens.
// This function is very sensitive to inlining and right on the border.
func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
if debugDeflate {
if xlength >= maxMatchLength+baseMatchLength {
panic(fmt.Errorf("invalid length: %v", xlength))
}
if xoffset >= maxMatchOffset+baseMatchOffset {
panic(fmt.Errorf("invalid offset: %v", xoffset))
}
}
oCode := offsetCode(xoffset)
xoffset |= oCode << 16
t.extraHist[lengthCodes1[uint8(xlength)]]++
t.offHist[oCode&31]++
t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
t.n++
}
// AddMatchLong adds a match to the tokens, potentially longer than max match length.
// Length should NOT have the base subtracted, only offset should.
func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
if debugDeflate {
if xoffset >= maxMatchOffset+baseMatchOffset {
panic(fmt.Errorf("invalid offset: %v", xoffset))
}
}
oc := offsetCode(xoffset)
xoffset |= oc << 16
for xlength > 0 {
xl := xlength
if xl > 258 {
// We need to have at least baseMatchLength left over for next loop.
if xl > 258+baseMatchLength {
xl = 258
} else {
xl = 258 - baseMatchLength
}
}
xlength -= xl
xl -= baseMatchLength
t.extraHist[lengthCodes1[uint8(xl)]]++
t.offHist[oc&31]++
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
t.n++
}
}
func (t *tokens) AddEOB() {
t.tokens[t.n] = token(endBlockMarker)
t.extraHist[0]++
t.n++
}
func (t *tokens) Slice() []token {
return t.tokens[:t.n]
}
// VarInt returns the tokens as varint encoded bytes.
func (t *tokens) VarInt() []byte {
var b = make([]byte, binary.MaxVarintLen32*int(t.n))
var off int
for _, v := range t.tokens[:t.n] {
off += binary.PutUvarint(b[off:], uint64(v))
}
return b[:off]
}
// FromVarInt restores t to the varint encoded tokens provided.
// Any data in t is removed.
func (t *tokens) FromVarInt(b []byte) error {
var buf = bytes.NewReader(b)
var toks []token
for {
r, err := binary.ReadUvarint(buf)
if err == io.EOF {
break
}
if err != nil {
return err
}
toks = append(toks, token(r))
}
t.indexTokens(toks)
return nil
}
// Returns the type of a token
func (t token) typ() uint32 { return uint32(t) & typeMask }
// Returns the literal of a literal token
func (t token) literal() uint8 { return uint8(t) }
// Returns the extra offset of a match token
func (t token) offset() uint32 { return uint32(t) & offsetMask }
func (t token) length() uint8 { return uint8(t >> lengthShift) }
// Convert length to code.
func lengthCode(len uint8) uint8 { return lengthCodes[len] }
// Returns the offset code corresponding to a specific offset
func offsetCode(off uint32) uint32 {
if false {
if off < uint32(len(offsetCodes)) {
return offsetCodes[off&255]
} else if off>>7 < uint32(len(offsetCodes)) {
return offsetCodes[(off>>7)&255] + 14
} else {
return offsetCodes[(off>>14)&255] + 28
}
}
if off < uint32(len(offsetCodes)) {
return offsetCodes[uint8(off)]
}
return offsetCodes14[uint8(off>>7)]
}

View file

@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
}
func (r *readerWrapper) readByte() (byte, error) {
n2, err := r.r.Read(r.tmp[:1])
n2, err := io.ReadFull(r.r, r.tmp[:1])
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF

View file

@ -435,6 +435,7 @@ Exit Code 1
| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. |
| SYSEE | SYSENTER and SYSEXIT instructions |
| TBM | AMD Trailing Bit Manipulation |
| TDX_GUEST | Intel Trust Domain Extensions Guest |
| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations |
| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. |
| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. |

View file

@ -226,6 +226,7 @@ const (
SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
SYSEE // SYSENTER and SYSEXIT instructions
TBM // AMD Trailing Bit Manipulation
TDX_GUEST // Intel Trust Domain Extensions Guest
TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations
TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
@ -1186,13 +1187,8 @@ func support() flagSet {
fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP)
fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD)
// CPUID.(EAX=7, ECX=1).EDX
fs.setIf(edx&(1<<4) != 0, AVXVNNIINT8)
fs.setIf(edx&(1<<5) != 0, AVXNECONVERT)
fs.setIf(edx&(1<<14) != 0, PREFETCHI)
// CPUID.(EAX=7, ECX=1).EAX
eax1, _, _, _ := cpuidex(7, 1)
eax1, _, _, edx1 := cpuidex(7, 1)
fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
@ -1202,6 +1198,11 @@ func support() flagSet {
fs.setIf(eax1&(1<<23) != 0, AVXIFMA)
fs.setIf(eax1&(1<<26) != 0, LAM)
// CPUID.(EAX=7, ECX=1).EDX
fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
fs.setIf(edx1&(1<<14) != 0, PREFETCHI)
// Only detect AVX-512 features if XGETBV is supported
if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
// Check for OS support
@ -1393,6 +1394,13 @@ func support() flagSet {
fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
}
if mfi >= 0x21 {
// Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21).
_, ebx, ecx, edx := cpuid(0x21)
identity := string(valAsString(ebx, edx, ecx))
fs.setIf(identity == "IntelTDX ", TDX_GUEST)
}
return fs
}

View file

@ -166,59 +166,60 @@ func _() {
_ = x[SYSCALL-156]
_ = x[SYSEE-157]
_ = x[TBM-158]
_ = x[TLB_FLUSH_NESTED-159]
_ = x[TME-160]
_ = x[TOPEXT-161]
_ = x[TSCRATEMSR-162]
_ = x[TSXLDTRK-163]
_ = x[VAES-164]
_ = x[VMCBCLEAN-165]
_ = x[VMPL-166]
_ = x[VMSA_REGPROT-167]
_ = x[VMX-168]
_ = x[VPCLMULQDQ-169]
_ = x[VTE-170]
_ = x[WAITPKG-171]
_ = x[WBNOINVD-172]
_ = x[WRMSRNS-173]
_ = x[X87-174]
_ = x[XGETBV1-175]
_ = x[XOP-176]
_ = x[XSAVE-177]
_ = x[XSAVEC-178]
_ = x[XSAVEOPT-179]
_ = x[XSAVES-180]
_ = x[AESARM-181]
_ = x[ARMCPUID-182]
_ = x[ASIMD-183]
_ = x[ASIMDDP-184]
_ = x[ASIMDHP-185]
_ = x[ASIMDRDM-186]
_ = x[ATOMICS-187]
_ = x[CRC32-188]
_ = x[DCPOP-189]
_ = x[EVTSTRM-190]
_ = x[FCMA-191]
_ = x[FP-192]
_ = x[FPHP-193]
_ = x[GPA-194]
_ = x[JSCVT-195]
_ = x[LRCPC-196]
_ = x[PMULL-197]
_ = x[SHA1-198]
_ = x[SHA2-199]
_ = x[SHA3-200]
_ = x[SHA512-201]
_ = x[SM3-202]
_ = x[SM4-203]
_ = x[SVE-204]
_ = x[lastID-205]
_ = x[TDX_GUEST-159]
_ = x[TLB_FLUSH_NESTED-160]
_ = x[TME-161]
_ = x[TOPEXT-162]
_ = x[TSCRATEMSR-163]
_ = x[TSXLDTRK-164]
_ = x[VAES-165]
_ = x[VMCBCLEAN-166]
_ = x[VMPL-167]
_ = x[VMSA_REGPROT-168]
_ = x[VMX-169]
_ = x[VPCLMULQDQ-170]
_ = x[VTE-171]
_ = x[WAITPKG-172]
_ = x[WBNOINVD-173]
_ = x[WRMSRNS-174]
_ = x[X87-175]
_ = x[XGETBV1-176]
_ = x[XOP-177]
_ = x[XSAVE-178]
_ = x[XSAVEC-179]
_ = x[XSAVEOPT-180]
_ = x[XSAVES-181]
_ = x[AESARM-182]
_ = x[ARMCPUID-183]
_ = x[ASIMD-184]
_ = x[ASIMDDP-185]
_ = x[ASIMDHP-186]
_ = x[ASIMDRDM-187]
_ = x[ATOMICS-188]
_ = x[CRC32-189]
_ = x[DCPOP-190]
_ = x[EVTSTRM-191]
_ = x[FCMA-192]
_ = x[FP-193]
_ = x[FPHP-194]
_ = x[GPA-195]
_ = x[JSCVT-196]
_ = x[LRCPC-197]
_ = x[PMULL-198]
_ = x[SHA1-199]
_ = x[SHA2-200]
_ = x[SHA3-201]
_ = x[SHA512-202]
_ = x[SM3-203]
_ = x[SM4-204]
_ = x[SVE-205]
_ = x[lastID-206]
_ = x[firstID-0]
}
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1198, 1201, 1207, 1217, 1225, 1229, 1238, 1242, 1254, 1257, 1267, 1270, 1277, 1285, 1292, 1295, 1302, 1305, 1310, 1316, 1324, 1330, 1336, 1344, 1349, 1356, 1363, 1371, 1378, 1383, 1388, 1395, 1399, 1401, 1405, 1408, 1413, 1418, 1423, 1427, 1431, 1435, 1441, 1444, 1447, 1450, 1456}
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1191, 1207, 1210, 1216, 1226, 1234, 1238, 1247, 1251, 1263, 1266, 1276, 1279, 1286, 1294, 1301, 1304, 1311, 1314, 1319, 1325, 1333, 1339, 1345, 1353, 1358, 1365, 1372, 1380, 1387, 1392, 1397, 1404, 1408, 1410, 1414, 1417, 1422, 1427, 1432, 1436, 1440, 1444, 1450, 1453, 1456, 1459, 1465}
func (i FeatureID) String() string {
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {

View file

@ -1,4 +1,5 @@
# Table Of Contents <!-- omit in toc -->
- [v0.28.0](#v0280)
- [v0.27.0](#v0270)
- [v0.26.4](#v0264)
- [v0.26.3](#v0263)
@ -8,6 +9,73 @@
- [v0.25.1](#v0251)
- [v0.25.0](#v0250)
# [v0.28.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.28.0)
## 🔦 Highlights <!-- omit in toc -->
### Smart Dialing <!-- omit in toc -->
This release introduces smart dialing logic. Currently, libp2p dials all addresses of a remote peer in parallel, and
aborts all outstanding dials as soon as the first one succeeds.
Dialing many addresses in parallel creates a lot of churn on the client side, and unnecessary load on the network and
on the server side, and is heavily discouraged by the networking community (see [RFC 8305](https://www.rfc-editor.org/rfc/rfc8305) for example).
When connecting to a peer we first determine the order to dial its addresses. This ranking logic considers a number of corner cases
described in detail in the documentation of the swarm package (`swarm.DefaultDialRanker`).
At a high level, this is what happens:
* If a peer offers a WebTransport and a QUIC address (on the same IP:port), the QUIC address is preferred.
* If a peer has a QUIC and a TCP address, the QUIC address is dialed first. Only if the connection attempt doesn't succeed within 250ms, a TCP connection is started.
Our measurements on the IPFS network show that for >90% of established libp2p connections, the first connection attempt succeeds,
leading a dramatic decrease in the number of aborted connection attempts.
We also added new metrics to the swarm Grafana dashboard, showing:
* The number of connection attempts it took to establish a connection
* The delay introduced by the ranking logic
This feature should be safe to enable for nodes running in data centers and for most nodes in home networks.
However, there are some (mostly home and corporate networks) that block all UDP traffic. If enabled, the current implementation
of the smart dialing logic will lead to a regression, since it preferes QUIC addresses over TCP addresses. Nodes would still be
able to connect, but connection establishment of the TCP connection would be delayed by 250ms.
In a future release (see #1605 for details), we will introduce a feature called blackhole detection. By observing the outcome of
QUIC connection attempts, we can determine if UDP traffic is blocked (namely, if all QUIC connection attempts fail), and stop
dialing QUIC in this case altogether. Once this detection logic is in place, smart dialing will be enabled by default.
### More Metrics! <!-- omit in toc -->
Since the last release, we've added metrics for:
* [Holepunching](https://github.com/libp2p/go-libp2p/pull/2246)
* Smart Dialing (see above)
### WebTransport <!-- omit in toc -->
* [#2251](https://github.com/libp2p/go-libp2p/pull/2251): Infer public WebTransport address from `quic-v1` addresses if both transports are using the same port for both quic-v1 and WebTransport addresses.
* [#2271](https://github.com/libp2p/go-libp2p/pull/2271): Only add certificate hashes to WebTransport mulitaddress if listening on WebTransport
## Housekeeping updates <!-- omit in toc -->
* Identify
* [#2303](https://github.com/libp2p/go-libp2p/pull/2303): Don't send default protocol version
* Prevent polluting PeerStore with local addrs
* [#2325](https://github.com/libp2p/go-libp2p/pull/2325): Don't save signed peer records
* [#2300](https://github.com/libp2p/go-libp2p/pull/2300): Filter received addresses based on the node's remote address
* WebSocket
* [#2280](https://github.com/libp2p/go-libp2p/pull/2280): Reverted back to the Gorilla library for WebSocket
* NAT
* [#2248](https://github.com/libp2p/go-libp2p/pull/2248): Move NAT mapping logic out of the host
## 🐞 Bugfixes <!-- omit in toc -->
* Identify
* [Reject signed peer records on peer ID mismatch](https://github.com/libp2p/go-libp2p/commit/8d771355b41297623e05b04a865d029a2522a074)
* [#2299](https://github.com/libp2p/go-libp2p/pull/2299): Avoid spuriously pushing updates
* Swarm
* [#2322](https://github.com/libp2p/go-libp2p/pull/2322): Dedup addresses to dial
* [#2284](https://github.com/libp2p/go-libp2p/pull/2284): Change maps with multiaddress keys to use strings
* QUIC
* [#2262](https://github.com/libp2p/go-libp2p/pull/2262): Prioritize listen connections for reuse
* [#2276](https://github.com/libp2p/go-libp2p/pull/2276): Don't panic when quic-go's accept call errors
* [#2263](https://github.com/libp2p/go-libp2p/pull/2263): Fix race condition when generating random holepunch packet
**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.27.0...v0.28.0
# [v0.27.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.27.0)
### Breaking Changes <!-- omit in toc -->
@ -82,7 +150,7 @@ Since the last release, we've added additional metrics to different components.
Metrics were added to:
* [AutoNat](https://github.com/libp2p/go-libp2p/pull/2086): Current Reachability Status and Confidence, Client and Server DialResponses, Server DialRejections. The dashboard is [available here](https://github.com/libp2p/go-libp2p/blob/master/dashboards/autonat/autonat.json).
* Swarm:
- [Early Muxer Selection](https://github.com/libp2p/go-libp2p/pull/2119): Added early_muxer label indicating whether a connection was established using early muxer selection.
- [Early Muxer Selection](https://github.com/libp2p/go-libp2p/pull/2119): Added early_muxer label indicating whether a connection was established using early muxer selection.
- [IP Version](https://github.com/libp2p/go-libp2p/pull/2114): Added ip_version label to connection metrics
* Identify:
- Metrics for Identify, IdentifyPush, PushesTriggered (https://github.com/libp2p/go-libp2p/pull/2069)
@ -155,7 +223,7 @@ We therefore concluded that it's safe to drop this code path altogether, and the
* Introduces a new `ResourceLimits` type which uses `LimitVal` instead of ints so it can encode the above for the resources.
* Changes `LimitConfig` to `PartialLimitConfig` and uses `ResourceLimits`. This along with the marshalling changes means you can now marshal the fact that some resource limit is set to block all.
* Because the default is to use the defaults, this avoids the footgun of initializing the resource manager with 0 limits (that would block everything).
In general, you can go from a resource config with defaults to a concrete one with `.Build()`. e.g. `ResourceLimits.Build() => BaseLimit`, `PartialLimitConfig.Build() => ConcreteLimitConfig`, `LimitVal.Build() => int`. See PR #2000 for more details.
If you're using the defaults for the resource manager, there should be no changes needed.

View file

@ -123,6 +123,8 @@ type Config struct {
DisableMetrics bool
PrometheusRegisterer prometheus.Registerer
DialRanker network.DialRanker
}
func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swarm, error) {
@ -173,6 +175,11 @@ func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swa
if cfg.MultiaddrResolver != nil {
opts = append(opts, swarm.WithMultiaddrResolver(cfg.MultiaddrResolver))
}
dialRanker := cfg.DialRanker
if dialRanker == nil {
dialRanker = swarm.NoDelayDialRanker
}
opts = append(opts, swarm.WithDialRanker(dialRanker))
if enableMetrics {
opts = append(opts,
swarm.WithMetricsTracer(swarm.NewMetricsTracer(swarm.WithRegisterer(cfg.PrometheusRegisterer))))
@ -405,6 +412,7 @@ func (cfg *Config) NewNode() (host.Host, error) {
Reporter: cfg.Reporter,
PeerKey: autonatPrivKey,
Peerstore: ps,
DialRanker: swarm.NoDelayDialRanker,
}
dialer, err := autoNatCfg.makeSwarm(eventbus.NewBus(), false)

View file

@ -52,7 +52,6 @@ import (
// DisconnectReasons is that we require stream multiplexing capability to open a
// control protocol stream to transmit the message.
type ConnectionGater interface {
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
//
// This is called by the network.Network implementation when dialling a peer.

View file

@ -6,8 +6,10 @@
package network
import (
"bytes"
"context"
"io"
"sort"
"time"
"github.com/libp2p/go-libp2p/core/peer"
@ -184,3 +186,33 @@ type Dialer interface {
Notify(Notifiee)
StopNotify(Notifiee)
}
// AddrDelay provides an address along with the delay after which the address
// should be dialed
type AddrDelay struct {
Addr ma.Multiaddr
Delay time.Duration
}
// DialRanker provides a schedule of dialing the provided addresses
type DialRanker func([]ma.Multiaddr) []AddrDelay
// DedupAddrs deduplicates addresses in place, leave only unique addresses.
// It doesn't allocate.
func DedupAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
if len(addrs) == 0 {
return addrs
}
sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i].Bytes(), addrs[j].Bytes()) < 0 })
idx := 1
for i := 1; i < len(addrs); i++ {
if !addrs[i-1].Equal(addrs[i]) {
addrs[idx] = addrs[i]
idx++
}
}
for i := idx; i < len(addrs); i++ {
addrs[i] = nil
}
return addrs[:idx]
}

View file

@ -106,11 +106,6 @@ func Seal(rec Record, privateKey crypto.PrivKey) (*Envelope, error) {
// doSomethingWithPeerRecord(peerRec)
// }
//
// Important: you MUST check the error value before using the returned Envelope. In some error
// cases, including when the envelope signature is invalid, both the Envelope and an error will
// be returned. This allows you to inspect the unmarshalled but invalid Envelope. As a result,
// you must not assume that any non-nil Envelope returned from this function is valid.
//
// If the Envelope signature is valid, but no Record type is registered for the Envelope's
// PayloadType, ErrPayloadTypeNotRegistered will be returned, along with the Envelope and
// a nil Record.
@ -122,12 +117,12 @@ func ConsumeEnvelope(data []byte, domain string) (envelope *Envelope, rec Record
err = e.validate(domain)
if err != nil {
return e, nil, fmt.Errorf("failed to validate envelope: %w", err)
return nil, nil, fmt.Errorf("failed to validate envelope: %w", err)
}
rec, err = e.Record()
if err != nil {
return e, nil, fmt.Errorf("failed to unmarshal envelope payload: %w", err)
return nil, nil, fmt.Errorf("failed to unmarshal envelope payload: %w", err)
}
return e, rec, nil
}

View file

@ -4,6 +4,7 @@ package transport
import (
"context"
"errors"
"net"
"github.com/libp2p/go-libp2p/core/network"
@ -94,6 +95,9 @@ type Listener interface {
Multiaddr() ma.Multiaddr
}
// ErrListenerClosed is returned by Listener.Accept when the listener is gracefully closed.
var ErrListenerClosed = errors.New("listener closed")
// TransportNetwork is an inet.Network with methods for managing transports.
type TransportNetwork interface {
network.Network

View file

@ -574,3 +574,16 @@ func PrometheusRegisterer(reg prometheus.Registerer) Option {
return nil
}
}
// DialRanker configures libp2p to use d as the dial ranker. To enable smart
// dialing use `swarm.DefaultDialRanker`. use `swarm.NoDelayDialRanker` to
// disable smart dialing.
func DialRanker(d network.DialRanker) Option {
return func(cfg *Config) error {
if cfg.DialRanker != nil {
return errors.New("dial ranker already configured")
}
cfg.DialRanker = d
return nil
}
}

View file

@ -1,13 +1,11 @@
package basichost
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net"
"sort"
"sync"
"time"
@ -25,7 +23,6 @@ import (
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
"github.com/libp2p/go-libp2p/p2p/host/pstoremanager"
"github.com/libp2p/go-libp2p/p2p/host/relaysvc"
inat "github.com/libp2p/go-libp2p/p2p/net/nat"
relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
"github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
@ -254,6 +251,12 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) {
}
if opts.EnableHolePunching {
if opts.EnableMetrics {
hpOpts := []holepunch.Option{
holepunch.WithMetricsTracer(holepunch.NewMetricsTracer(holepunch.WithRegisterer(opts.PrometheusRegisterer)))}
opts.HolePunchingOptions = append(hpOpts, opts.HolePunchingOptions...)
}
h.hps, err = holepunch.NewService(h, h.ids, opts.HolePunchingOptions...)
if err != nil {
return nil, fmt.Errorf("failed to create hole punch service: %w", err)
@ -592,7 +595,6 @@ func (h *BasicHost) EventBus() event.Bus {
func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) {
h.Mux().AddHandler(pid, func(p protocol.ID, rwc io.ReadWriteCloser) error {
is := rwc.(network.Stream)
is.SetProtocol(p)
handler(is)
return nil
})
@ -606,7 +608,6 @@ func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler network.StreamHand
func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
h.Mux().AddHandlerWithFunc(pid, m, func(p protocol.ID, rwc io.ReadWriteCloser) error {
is := rwc.(network.Stream)
is.SetProtocol(p)
handler(is)
return nil
})
@ -816,26 +817,6 @@ func (h *BasicHost) NormalizeMultiaddr(addr ma.Multiaddr) ma.Multiaddr {
return addr
}
// dedupAddrs deduplicates addresses in place, leave only unique addresses.
// It doesn't allocate.
func dedupAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
if len(addrs) == 0 {
return addrs
}
sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i].Bytes(), addrs[j].Bytes()) < 0 })
idx := 1
for i := 1; i < len(addrs); i++ {
if !addrs[i-1].Equal(addrs[i]) {
addrs[idx] = addrs[i]
idx++
}
}
for i := idx; i < len(addrs); i++ {
addrs[i] = nil
}
return addrs[:idx]
}
// AllAddrs returns all the addresses of BasicHost at this moment in time.
// It's ok to not include addresses if they're not available to be used now.
func (h *BasicHost) AllAddrs() []ma.Multiaddr {
@ -860,101 +841,20 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr {
finalAddrs = append(finalAddrs, resolved...)
}
finalAddrs = dedupAddrs(finalAddrs)
finalAddrs = network.DedupAddrs(finalAddrs)
var natMappings []inat.Mapping
// natmgr is nil if we do not use nat option;
// h.natmgr.NAT() is nil if not ready, or no nat is available.
if h.natmgr != nil && h.natmgr.NAT() != nil {
natMappings = h.natmgr.NAT().Mappings()
}
if len(natMappings) > 0 {
// use nat mappings if we have them
if h.natmgr != nil && h.natmgr.HasDiscoveredNAT() {
// We have successfully mapped ports on our NAT. Use those
// instead of observed addresses (mostly).
// First, generate a mapping table.
// protocol -> internal port -> external addr
ports := make(map[string]map[int]net.Addr)
for _, m := range natMappings {
addr, err := m.ExternalAddr()
if err != nil {
// mapping not ready yet.
continue
}
protoPorts, ok := ports[m.Protocol()]
if !ok {
protoPorts = make(map[int]net.Addr)
ports[m.Protocol()] = protoPorts
}
protoPorts[m.InternalPort()] = addr
}
// Next, apply this mapping to our addresses.
for _, listen := range listenAddrs {
found := false
transport, rest := ma.SplitFunc(listen, func(c ma.Component) bool {
if found {
return true
}
switch c.Protocol().Code {
case ma.P_TCP, ma.P_UDP:
found = true
}
return false
})
if !manet.IsThinWaist(transport) {
extMaddr := h.natmgr.GetMapping(listen)
if extMaddr == nil {
// not mapped
continue
}
naddr, err := manet.ToNetAddr(transport)
if err != nil {
log.Error("error parsing net multiaddr %q: %s", transport, err)
continue
}
var (
ip net.IP
iport int
protocol string
)
switch naddr := naddr.(type) {
case *net.TCPAddr:
ip = naddr.IP
iport = naddr.Port
protocol = "tcp"
case *net.UDPAddr:
ip = naddr.IP
iport = naddr.Port
protocol = "udp"
default:
continue
}
if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
// We only map global unicast & unspecified addresses ports.
// Not broadcast, multicast, etc.
continue
}
mappedAddr, ok := ports[protocol][iport]
if !ok {
// Not mapped.
continue
}
mappedMaddr, err := manet.FromNetAddr(mappedAddr)
if err != nil {
log.Errorf("mapped addr can't be turned into a multiaddr %q: %s", mappedAddr, err)
continue
}
extMaddr := mappedMaddr
if rest != nil {
extMaddr = ma.Join(extMaddr, rest)
}
// if the router reported a sane address
if !manet.IsIPUnspecified(extMaddr) {
// Add in the mapped addr.
@ -1010,7 +910,7 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr {
}
finalAddrs = append(finalAddrs, observedAddrs...)
}
finalAddrs = dedupAddrs(finalAddrs)
finalAddrs = network.DedupAddrs(finalAddrs)
finalAddrs = inferWebtransportAddrsFromQuic(finalAddrs)
return finalAddrs

View file

@ -0,0 +1,6 @@
//go:build gomock || generate
package basichost
//go:generate sh -c "go run github.com/golang/mock/mockgen -build_flags=\"-tags=gomock\" -package basichost -destination mock_nat_test.go github.com/libp2p/go-libp2p/p2p/host/basic NAT"
type NAT nat

View file

@ -4,6 +4,7 @@ import (
"context"
"io"
"net"
"net/netip"
"strconv"
"sync"
"time"
@ -12,24 +13,38 @@ import (
inat "github.com/libp2p/go-libp2p/p2p/net/nat"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
// NATManager is a simple interface to manage NAT devices.
// It listens Listen and ListenClose notifications from the network.Network,
// and tries to obtain port mappings for those.
type NATManager interface {
// NAT gets the NAT device managed by the NAT manager.
NAT() *inat.NAT
// Ready receives a notification when the NAT device is ready for use.
Ready() <-chan struct{}
GetMapping(ma.Multiaddr) ma.Multiaddr
HasDiscoveredNAT() bool
io.Closer
}
// NewNATManager creates a NAT manager.
func NewNATManager(net network.Network) NATManager {
return newNatManager(net)
return newNATManager(net)
}
type entry struct {
protocol string
port int
}
type nat interface {
AddMapping(ctx context.Context, protocol string, port int) error
RemoveMapping(ctx context.Context, protocol string, port int) error
GetMapping(protocol string, port int) (netip.AddrPort, bool)
io.Closer
}
// so we can mock it in tests
var discoverNAT = func(ctx context.Context) (nat, error) { return inat.DiscoverNAT(ctx) }
// natManager takes care of adding + removing port mappings to the nat.
// Initialized with the host if it has a NATPortMap option enabled.
// natManager receives signals from the network, and check on nat mappings:
@ -39,22 +54,25 @@ func NewNATManager(net network.Network) NATManager {
type natManager struct {
net network.Network
natMx sync.RWMutex
nat *inat.NAT
nat nat
ready chan struct{} // closed once the nat is ready to process port mappings
syncFlag chan struct{}
syncFlag chan struct{} // cap: 1
tracked map[entry]bool // the bool is only used in doSync and has no meaning outside of that function
refCount sync.WaitGroup
ctx context.Context
ctxCancel context.CancelFunc
}
func newNatManager(net network.Network) *natManager {
func newNATManager(net network.Network) *natManager {
ctx, cancel := context.WithCancel(context.Background())
nmgr := &natManager{
net: net,
ready: make(chan struct{}),
syncFlag: make(chan struct{}, 1),
ctx: ctx,
ctxCancel: cancel,
tracked: make(map[entry]bool),
}
nmgr.refCount.Add(1)
go nmgr.background(ctx)
@ -69,10 +87,10 @@ func (nmgr *natManager) Close() error {
return nil
}
// Ready returns a channel which will be closed when the NAT has been found
// and is ready to be used, or the search process is done.
func (nmgr *natManager) Ready() <-chan struct{} {
return nmgr.ready
func (nmgr *natManager) HasDiscoveredNAT() bool {
nmgr.natMx.RLock()
defer nmgr.natMx.RUnlock()
return nmgr.nat != nil
}
func (nmgr *natManager) background(ctx context.Context) {
@ -80,25 +98,24 @@ func (nmgr *natManager) background(ctx context.Context) {
defer func() {
nmgr.natMx.Lock()
defer nmgr.natMx.Unlock()
if nmgr.nat != nil {
nmgr.nat.Close()
}
nmgr.natMx.Unlock()
}()
discoverCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
natInstance, err := inat.DiscoverNAT(discoverCtx)
natInstance, err := discoverNAT(discoverCtx)
if err != nil {
log.Info("DiscoverNAT error:", err)
close(nmgr.ready)
return
}
nmgr.natMx.Lock()
nmgr.nat = natInstance
nmgr.natMx.Unlock()
close(nmgr.ready)
// sign natManager up for network notifications
// we need to sign up here to avoid missing some notifs
@ -127,10 +144,10 @@ func (nmgr *natManager) sync() {
// doSync syncs the current NAT mappings, removing any outdated mappings and adding any
// new mappings.
func (nmgr *natManager) doSync() {
ports := map[string]map[int]bool{
"tcp": {},
"udp": {},
for e := range nmgr.tracked {
nmgr.tracked[e] = false
}
var newAddresses []entry
for _, maddr := range nmgr.net.ListenAddresses() {
// Strip the IP
maIP, rest := ma.SplitFirst(maddr)
@ -144,10 +161,9 @@ func (nmgr *natManager) doSync() {
continue
}
// Only bother if we're listening on a
// unicast/unspecified IP.
// Only bother if we're listening on an unicast / unspecified IP.
ip := net.IP(maIP.RawValue())
if !(ip.IsGlobalUnicast() || ip.IsUnspecified()) {
if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
continue
}
@ -166,74 +182,118 @@ func (nmgr *natManager) doSync() {
default:
continue
}
port, err := strconv.ParseUint(proto.Value(), 10, 16)
if err != nil {
// bug in multiaddr
panic(err)
}
ports[protocol][int(port)] = false
e := entry{protocol: protocol, port: int(port)}
if _, ok := nmgr.tracked[e]; ok {
nmgr.tracked[e] = true
} else {
newAddresses = append(newAddresses, e)
}
}
var wg sync.WaitGroup
defer wg.Wait()
// Close old mappings
for _, m := range nmgr.nat.Mappings() {
mappedPort := m.InternalPort()
if _, ok := ports[m.Protocol()][mappedPort]; !ok {
// No longer need this mapping.
wg.Add(1)
go func(m inat.Mapping) {
defer wg.Done()
m.Close()
}(m)
} else {
// already mapped
ports[m.Protocol()][mappedPort] = true
for e, v := range nmgr.tracked {
if !v {
nmgr.nat.RemoveMapping(nmgr.ctx, e.protocol, e.port)
delete(nmgr.tracked, e)
}
}
// Create new mappings.
for proto, pports := range ports {
for port, mapped := range pports {
if mapped {
continue
}
wg.Add(1)
go func(proto string, port int) {
defer wg.Done()
_, err := nmgr.nat.NewMapping(proto, port)
if err != nil {
log.Errorf("failed to port-map %s port %d: %s", proto, port, err)
}
}(proto, port)
for _, e := range newAddresses {
if err := nmgr.nat.AddMapping(nmgr.ctx, e.protocol, e.port); err != nil {
log.Errorf("failed to port-map %s port %d: %s", e.protocol, e.port, err)
}
nmgr.tracked[e] = false
}
}
// NAT returns the natManager's nat object. this may be nil, if
// (a) the search process is still ongoing, or (b) the search process
// found no nat. Clients must check whether the return value is nil.
func (nmgr *natManager) NAT() *inat.NAT {
func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr {
nmgr.natMx.Lock()
defer nmgr.natMx.Unlock()
return nmgr.nat
if nmgr.nat == nil { // NAT not yet initialized
return nil
}
var found bool
var proto int // ma.P_TCP or ma.P_UDP
transport, rest := ma.SplitFunc(addr, func(c ma.Component) bool {
if found {
return true
}
proto = c.Protocol().Code
found = proto == ma.P_TCP || proto == ma.P_UDP
return false
})
if !manet.IsThinWaist(transport) {
return nil
}
naddr, err := manet.ToNetAddr(transport)
if err != nil {
log.Error("error parsing net multiaddr %q: %s", transport, err)
return nil
}
var (
ip net.IP
port int
protocol string
)
switch naddr := naddr.(type) {
case *net.TCPAddr:
ip = naddr.IP
port = naddr.Port
protocol = "tcp"
case *net.UDPAddr:
ip = naddr.IP
port = naddr.Port
protocol = "udp"
default:
return nil
}
if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
// We only map global unicast & unspecified addresses ports, not broadcast, multicast, etc.
return nil
}
extAddr, ok := nmgr.nat.GetMapping(protocol, port)
if !ok {
return nil
}
var mappedAddr net.Addr
switch naddr.(type) {
case *net.TCPAddr:
mappedAddr = net.TCPAddrFromAddrPort(extAddr)
case *net.UDPAddr:
mappedAddr = net.UDPAddrFromAddrPort(extAddr)
}
mappedMaddr, err := manet.FromNetAddr(mappedAddr)
if err != nil {
log.Errorf("mapped addr can't be turned into a multiaddr %q: %s", mappedAddr, err)
return nil
}
extMaddr := mappedMaddr
if rest != nil {
extMaddr = ma.Join(extMaddr, rest)
}
return extMaddr
}
type nmgrNetNotifiee natManager
func (nn *nmgrNetNotifiee) natManager() *natManager {
return (*natManager)(nn)
}
func (nn *nmgrNetNotifiee) Listen(n network.Network, addr ma.Multiaddr) {
nn.natManager().sync()
}
func (nn *nmgrNetNotifiee) ListenClose(n network.Network, addr ma.Multiaddr) {
nn.natManager().sync()
}
func (nn *nmgrNetNotifiee) Connected(network.Network, network.Conn) {}
func (nn *nmgrNetNotifiee) Disconnected(network.Network, network.Conn) {}
func (nn *nmgrNetNotifiee) natManager() *natManager { return (*natManager)(nn) }
func (nn *nmgrNetNotifiee) Listen(network.Network, ma.Multiaddr) { nn.natManager().sync() }
func (nn *nmgrNetNotifiee) ListenClose(n network.Network, addr ma.Multiaddr) { nn.natManager().sync() }
func (nn *nmgrNetNotifiee) Connected(network.Network, network.Conn) {}
func (nn *nmgrNetNotifiee) Disconnected(network.Network, network.Conn) {}

View file

@ -119,16 +119,16 @@ func (rh *RoutedHost) Connect(ctx context.Context, pi peer.AddrInfo) error {
}
// Build lookup map
lookup := make(map[ma.Multiaddr]struct{}, len(addrs))
lookup := make(map[string]struct{}, len(addrs))
for _, addr := range addrs {
lookup[addr] = struct{}{}
lookup[string(addr.Bytes())] = struct{}{}
}
// if there's any address that's not in the previous set
// of addresses, try to connect again. If all addresses
// where known previously we return the original error.
for _, newAddr := range newAddrs {
if _, found := lookup[newAddr]; found {
if _, found := lookup[string(newAddr.Bytes())]; found {
continue
}

View file

@ -0,0 +1,29 @@
package metricshelper
import ma "github.com/multiformats/go-multiaddr"
var transports = [...]int{ma.P_CIRCUIT, ma.P_WEBRTC, ma.P_WEBTRANSPORT, ma.P_QUIC, ma.P_QUIC_V1, ma.P_WSS, ma.P_WS, ma.P_TCP}
func GetTransport(a ma.Multiaddr) string {
for _, t := range transports {
if _, err := a.ValueForProtocol(t); err == nil {
return ma.ProtocolWithCode(t).Name
}
}
return "other"
}
func GetIPVersion(addr ma.Multiaddr) string {
version := "unknown"
ma.ForEach(addr, func(c ma.Component) bool {
if c.Protocol().Code == ma.P_IP4 {
version = "ip4"
return false
} else if c.Protocol().Code == ma.P_IP6 {
version = "ip6"
return false
}
return true
})
return version
}

View file

@ -1,119 +0,0 @@
package nat
import (
"fmt"
"net"
"sync"
"time"
)
// Mapping represents a port mapping in a NAT.
type Mapping interface {
// NAT returns the NAT object this Mapping belongs to.
NAT() *NAT
// Protocol returns the protocol of this port mapping. This is either
// "tcp" or "udp" as no other protocols are likely to be NAT-supported.
Protocol() string
// InternalPort returns the internal device port. Mapping will continue to
// try to map InternalPort() to an external facing port.
InternalPort() int
// ExternalPort returns the external facing port. If the mapping is not
// established, port will be 0
ExternalPort() int
// ExternalAddr returns the external facing address. If the mapping is not
// established, addr will be nil, and and ErrNoMapping will be returned.
ExternalAddr() (addr net.Addr, err error)
// Close closes the port mapping
Close() error
}
// keeps republishing
type mapping struct {
sync.Mutex // guards all fields
nat *NAT
proto string
intport int
extport int
cached net.IP
cacheTime time.Time
cacheLk sync.Mutex
}
func (m *mapping) NAT() *NAT {
m.Lock()
defer m.Unlock()
return m.nat
}
func (m *mapping) Protocol() string {
m.Lock()
defer m.Unlock()
return m.proto
}
func (m *mapping) InternalPort() int {
m.Lock()
defer m.Unlock()
return m.intport
}
func (m *mapping) ExternalPort() int {
m.Lock()
defer m.Unlock()
return m.extport
}
func (m *mapping) setExternalPort(p int) {
m.Lock()
defer m.Unlock()
m.extport = p
}
func (m *mapping) ExternalAddr() (net.Addr, error) {
m.cacheLk.Lock()
defer m.cacheLk.Unlock()
oport := m.ExternalPort()
if oport == 0 {
// dont even try right now.
return nil, ErrNoMapping
}
if time.Since(m.cacheTime) >= CacheTime {
m.nat.natmu.Lock()
cval, err := m.nat.nat.GetExternalAddress()
m.nat.natmu.Unlock()
if err != nil {
return nil, err
}
m.cached = cval
m.cacheTime = time.Now()
}
switch m.Protocol() {
case "tcp":
return &net.TCPAddr{
IP: m.cached,
Port: oport,
}, nil
case "udp":
return &net.UDPAddr{
IP: m.cached,
Port: oport,
}, nil
default:
panic(fmt.Sprintf("invalid protocol %q", m.Protocol()))
}
}
func (m *mapping) Close() error {
m.nat.removeMapping(m)
return nil
}

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"net/netip"
"sync"
"time"
@ -19,18 +20,30 @@ var log = logging.Logger("nat")
// MappingDuration is a default port mapping duration.
// Port mappings are renewed every (MappingDuration / 3)
const MappingDuration = time.Second * 60
const MappingDuration = time.Minute
// CacheTime is the time a mapping will cache an external address for
const CacheTime = time.Second * 15
const CacheTime = 15 * time.Second
// DiscoverNAT looks for a NAT device in the network and
// returns an object that can manage port mappings.
type entry struct {
protocol string
port int
}
// so we can mock it in tests
var discoverGateway = nat.DiscoverGateway
// DiscoverNAT looks for a NAT device in the network and returns an object that can manage port mappings.
func DiscoverNAT(ctx context.Context) (*NAT, error) {
natInstance, err := nat.DiscoverGateway(ctx)
natInstance, err := discoverGateway(ctx)
if err != nil {
return nil, err
}
var extAddr netip.Addr
extIP, err := natInstance.GetExternalAddress()
if err == nil {
extAddr, _ = netip.AddrFromSlice(extIP)
}
// Log the device addr.
addr, err := natInstance.GetDeviceAddress()
@ -40,7 +53,20 @@ func DiscoverNAT(ctx context.Context) (*NAT, error) {
log.Debug("DiscoverGateway address:", addr)
}
return newNAT(natInstance), nil
ctx, cancel := context.WithCancel(context.Background())
nat := &NAT{
nat: natInstance,
extAddr: extAddr,
mappings: make(map[entry]int),
ctx: ctx,
ctxCancel: cancel,
}
nat.refCount.Add(1)
go func() {
defer nat.refCount.Done()
nat.background()
}()
return nat, nil
}
// NAT is an object that manages address port mappings in
@ -50,6 +76,8 @@ func DiscoverNAT(ctx context.Context) (*NAT, error) {
type NAT struct {
natmu sync.Mutex
nat nat.NAT
// External IP of the NAT. Will be renewed periodically (every CacheTime).
extAddr netip.Addr
refCount sync.WaitGroup
ctx context.Context
@ -57,17 +85,7 @@ type NAT struct {
mappingmu sync.RWMutex // guards mappings
closed bool
mappings map[*mapping]struct{}
}
func newNAT(realNAT nat.NAT) *NAT {
ctx, cancel := context.WithCancel(context.Background())
return &NAT{
nat: realNAT,
mappings: make(map[*mapping]struct{}),
ctx: ctx,
ctxCancel: cancel,
}
mappings map[entry]int
}
// Close shuts down all port mappings. NAT can no longer be used.
@ -81,99 +99,141 @@ func (nat *NAT) Close() error {
return nil
}
// Mappings returns a slice of all NAT mappings
func (nat *NAT) Mappings() []Mapping {
func (nat *NAT) GetMapping(protocol string, port int) (addr netip.AddrPort, found bool) {
nat.mappingmu.Lock()
maps2 := make([]Mapping, 0, len(nat.mappings))
for m := range nat.mappings {
maps2 = append(maps2, m)
defer nat.mappingmu.Unlock()
if !nat.extAddr.IsValid() {
return netip.AddrPort{}, false
}
nat.mappingmu.Unlock()
return maps2
extPort, found := nat.mappings[entry{protocol: protocol, port: port}]
if !found {
return netip.AddrPort{}, false
}
return netip.AddrPortFrom(nat.extAddr, uint16(extPort)), true
}
// NewMapping attempts to construct a mapping on protocol and internal port
// It will also periodically renew the mapping until the returned Mapping
// -- or its parent NAT -- is Closed.
// AddMapping attempts to construct a mapping on protocol and internal port.
// It blocks until a mapping was established. Once added, it periodically renews the mapping.
//
// May not succeed, and mappings may change over time;
// NAT devices may not respect our port requests, and even lie.
// Clients should not store the mapped results, but rather always
// poll our object for the latest mappings.
func (nat *NAT) NewMapping(protocol string, port int) (Mapping, error) {
if nat == nil {
return nil, fmt.Errorf("no nat available")
}
func (nat *NAT) AddMapping(ctx context.Context, protocol string, port int) error {
switch protocol {
case "tcp", "udp":
default:
return nil, fmt.Errorf("invalid protocol: %s", protocol)
}
m := &mapping{
intport: port,
nat: nat,
proto: protocol,
return fmt.Errorf("invalid protocol: %s", protocol)
}
nat.mappingmu.Lock()
defer nat.mappingmu.Unlock()
if nat.closed {
nat.mappingmu.Unlock()
return nil, errors.New("closed")
return errors.New("closed")
}
nat.mappings[m] = struct{}{}
nat.refCount.Add(1)
nat.mappingmu.Unlock()
go nat.refreshMappings(m)
// do it once synchronously, so first mapping is done right away, and before exiting,
// allowing users -- in the optimistic case -- to use results right after.
nat.establishMapping(m)
return m, nil
extPort := nat.establishMapping(ctx, protocol, port)
nat.mappings[entry{protocol: protocol, port: port}] = extPort
return nil
}
func (nat *NAT) removeMapping(m *mapping) {
// RemoveMapping removes a port mapping.
// It blocks until the NAT has removed the mapping.
func (nat *NAT) RemoveMapping(ctx context.Context, protocol string, port int) error {
nat.mappingmu.Lock()
delete(nat.mappings, m)
nat.mappingmu.Unlock()
nat.natmu.Lock()
nat.nat.DeletePortMapping(m.Protocol(), m.InternalPort())
nat.natmu.Unlock()
defer nat.mappingmu.Unlock()
switch protocol {
case "tcp", "udp":
e := entry{protocol: protocol, port: port}
if _, ok := nat.mappings[e]; ok {
delete(nat.mappings, e)
return nat.nat.DeletePortMapping(ctx, protocol, port)
}
return errors.New("unknown mapping")
default:
return fmt.Errorf("invalid protocol: %s", protocol)
}
}
func (nat *NAT) refreshMappings(m *mapping) {
defer nat.refCount.Done()
t := time.NewTicker(MappingDuration / 3)
func (nat *NAT) background() {
const mappingUpdate = MappingDuration / 3
now := time.Now()
nextMappingUpdate := now.Add(mappingUpdate)
nextAddrUpdate := now.Add(CacheTime)
t := time.NewTimer(minTime(nextMappingUpdate, nextAddrUpdate).Sub(now)) // don't use a ticker here. We don't know how long establishing the mappings takes.
defer t.Stop()
var in []entry
var out []int // port numbers
for {
select {
case <-t.C:
nat.establishMapping(m)
case now := <-t.C:
if now.After(nextMappingUpdate) {
in = in[:0]
out = out[:0]
nat.mappingmu.Lock()
for e := range nat.mappings {
in = append(in, e)
}
nat.mappingmu.Unlock()
// Establishing the mapping involves network requests.
// Don't hold the mutex, just save the ports.
for _, e := range in {
out = append(out, nat.establishMapping(nat.ctx, e.protocol, e.port))
}
nat.mappingmu.Lock()
for i, p := range in {
if _, ok := nat.mappings[p]; !ok {
continue // entry might have been deleted
}
nat.mappings[p] = out[i]
}
nat.mappingmu.Unlock()
nextMappingUpdate = time.Now().Add(mappingUpdate)
}
if now.After(nextAddrUpdate) {
var extAddr netip.Addr
extIP, err := nat.nat.GetExternalAddress()
if err == nil {
extAddr, _ = netip.AddrFromSlice(extIP)
}
nat.extAddr = extAddr
nextAddrUpdate = time.Now().Add(CacheTime)
}
t.Reset(time.Until(minTime(nextAddrUpdate, nextMappingUpdate)))
case <-nat.ctx.Done():
m.Close()
nat.mappingmu.Lock()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
for e := range nat.mappings {
delete(nat.mappings, e)
nat.nat.DeletePortMapping(ctx, e.protocol, e.port)
}
nat.mappingmu.Unlock()
return
}
}
}
func (nat *NAT) establishMapping(m *mapping) {
oldport := m.ExternalPort()
log.Debugf("Attempting port map: %s/%d", m.Protocol(), m.InternalPort())
func (nat *NAT) establishMapping(ctx context.Context, protocol string, internalPort int) (externalPort int) {
log.Debugf("Attempting port map: %s/%d", protocol, internalPort)
const comment = "libp2p"
nat.natmu.Lock()
newport, err := nat.nat.AddPortMapping(m.Protocol(), m.InternalPort(), comment, MappingDuration)
var err error
externalPort, err = nat.nat.AddPortMapping(ctx, protocol, internalPort, comment, MappingDuration)
if err != nil {
// Some hardware does not support mappings with timeout, so try that
newport, err = nat.nat.AddPortMapping(m.Protocol(), m.InternalPort(), comment, 0)
externalPort, err = nat.nat.AddPortMapping(ctx, protocol, internalPort, comment, 0)
}
nat.natmu.Unlock()
if err != nil || newport == 0 {
m.setExternalPort(0) // clear mapping
if err != nil || externalPort == 0 {
// TODO: log.Event
if err != nil {
log.Warnf("failed to establish port mapping: %s", err)
@ -182,12 +242,16 @@ func (nat *NAT) establishMapping(m *mapping) {
}
// we do not close if the mapping failed,
// because it may work again next time.
return
return 0
}
m.setExternalPort(newport)
log.Debugf("NAT Mapping: %d --> %d (%s)", m.ExternalPort(), m.InternalPort(), m.Protocol())
if oldport != 0 && newport != oldport {
log.Debugf("failed to renew same port mapping: ch %d -> %d", oldport, newport)
}
log.Debugf("NAT Mapping: %d --> %d (%s)", externalPort, internalPort, protocol)
return externalPort
}
func minTime(a, b time.Time) time.Time {
if a.Before(b) {
return a
}
return b
}

View file

@ -0,0 +1,49 @@
package swarm
import "time"
// InstantTimer is a timer that triggers at some instant rather than some duration
type InstantTimer interface {
Reset(d time.Time) bool
Stop() bool
Ch() <-chan time.Time
}
// Clock is a clock that can create timers that trigger at some
// instant rather than some duration
type Clock interface {
Now() time.Time
Since(t time.Time) time.Duration
InstantTimer(when time.Time) InstantTimer
}
type RealTimer struct{ t *time.Timer }
var _ InstantTimer = (*RealTimer)(nil)
func (t RealTimer) Ch() <-chan time.Time {
return t.t.C
}
func (t RealTimer) Reset(d time.Time) bool {
return t.t.Reset(time.Until(d))
}
func (t RealTimer) Stop() bool {
return t.t.Stop()
}
type RealClock struct{}
var _ Clock = RealClock{}
func (RealClock) Now() time.Time {
return time.Now()
}
func (RealClock) Since(t time.Time) time.Duration {
return time.Since(t)
}
func (RealClock) InstantTimer(when time.Time) InstantTimer {
t := time.NewTimer(time.Until(when))
return &RealTimer{t}
}

View file

@ -0,0 +1,170 @@
package swarm
import (
"sort"
"strconv"
"time"
"github.com/libp2p/go-libp2p/core/network"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
// The 250ms value is from happy eyeballs RFC 8305. This is a rough estimate of 1 RTT
const (
// duration by which TCP dials are delayed relative to QUIC dial
PublicTCPDelay = 250 * time.Millisecond
PrivateTCPDelay = 30 * time.Millisecond
// duration by which QUIC dials are delayed relative to first QUIC dial
PublicQUICDelay = 250 * time.Millisecond
PrivateQUICDelay = 30 * time.Millisecond
// RelayDelay is the duration by which relay dials are delayed relative to direct addresses
RelayDelay = 250 * time.Millisecond
)
// NoDelayDialRanker ranks addresses with no delay. This is useful for simultaneous connect requests.
func NoDelayDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
return getAddrDelay(addrs, 0, 0, 0)
}
// DefaultDialRanker determines the ranking of outgoing connection attempts.
//
// Addresses are grouped into four distinct groups:
//
// - private addresses (localhost and local networks (RFC 1918))
// - public IPv4 addresses
// - public IPv6 addresses
// - relay addresses
//
// Within each group, the addresses are ranked according to the ranking logic described below.
// We then dial addresses according to this ranking, with short timeouts applied between dial attempts.
// This ranking logic dramatically reduces the number of simultaneous dial attempts, while introducing
// no additional latency in the vast majority of cases.
//
// The private, public IPv4 and public IPv6 groups are dialed in parallel.
// Dialing relay addresses is delayed by 500 ms, if we have any non-relay alternatives.
//
// In a future iteration, IPv6 will be given a headstart over IPv4, as recommended by Happy Eyeballs RFC 8305.
// This is not enabled yet, since some ISPs are still IPv4-only, and dialing IPv6 addresses will therefore
// always fail.
// The correct solution is to detect this situation, and not attempt to dial IPv6 addresses at all.
// IPv6 blackhole detection is tracked in https://github.com/libp2p/go-libp2p/issues/1605.
//
// Within each group (private, public IPv4, public IPv6, relay addresses) we apply the following
// ranking logic:
//
// 1. If two QUIC addresses are present, dial the QUIC address with the lowest port first:
// This is more likely to be the listen port. After this we dial the rest of the QUIC addresses delayed by
// 250ms (PublicQUICDelay) for public addresses, and 30ms (PrivateQUICDelay) for local addresses.
// 2. If a QUIC or WebTransport address is present, TCP addresses dials are delayed relative to the last QUIC dial:
// We prefer to end up with a QUIC connection. For public addresses, the delay introduced is 250ms (PublicTCPDelay),
// and for private addresses 30ms (PrivateTCPDelay).
func DefaultDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
relay, addrs := filterAddrs(addrs, isRelayAddr)
pvt, addrs := filterAddrs(addrs, manet.IsPrivateAddr)
ip4, addrs := filterAddrs(addrs, func(a ma.Multiaddr) bool { return isProtocolAddr(a, ma.P_IP4) })
ip6, addrs := filterAddrs(addrs, func(a ma.Multiaddr) bool { return isProtocolAddr(a, ma.P_IP6) })
var relayOffset time.Duration = 0
if len(ip4) > 0 || len(ip6) > 0 {
// if there is a public direct address available delay relay dials
relayOffset = RelayDelay
}
res := make([]network.AddrDelay, 0, len(addrs))
for i := 0; i < len(addrs); i++ {
res = append(res, network.AddrDelay{Addr: addrs[i], Delay: 0})
}
res = append(res, getAddrDelay(pvt, PrivateTCPDelay, PrivateQUICDelay, 0)...)
res = append(res, getAddrDelay(ip4, PublicTCPDelay, PublicQUICDelay, 0)...)
res = append(res, getAddrDelay(ip6, PublicTCPDelay, PublicQUICDelay, 0)...)
res = append(res, getAddrDelay(relay, PublicTCPDelay, PublicQUICDelay, relayOffset)...)
return res
}
// getAddrDelay ranks a group of addresses(private, ip4, ip6) according to the ranking logic
// explained in defaultDialRanker.
// offset is used to delay all addresses by a fixed duration. This is useful for delaying all relay
// addresses relative to direct addresses
func getAddrDelay(addrs []ma.Multiaddr, tcpDelay time.Duration, quicDelay time.Duration,
offset time.Duration) []network.AddrDelay {
sort.Slice(addrs, func(i, j int) bool { return score(addrs[i]) < score(addrs[j]) })
res := make([]network.AddrDelay, 0, len(addrs))
quicCount := 0
for _, a := range addrs {
delay := offset
switch {
case isProtocolAddr(a, ma.P_QUIC) || isProtocolAddr(a, ma.P_QUIC_V1):
// For QUIC addresses we dial a single address first and then wait for QUICDelay
// After QUICDelay we dial rest of the QUIC addresses
if quicCount > 0 {
delay += quicDelay
}
quicCount++
case isProtocolAddr(a, ma.P_TCP):
if quicCount >= 2 {
delay += 2 * quicDelay
} else if quicCount == 1 {
delay += tcpDelay
}
}
res = append(res, network.AddrDelay{Addr: a, Delay: delay})
}
return res
}
// score scores a multiaddress for dialing delay. lower is better
func score(a ma.Multiaddr) int {
// the lower 16 bits of the result are the relavant port
// the higher bits rank the protocol
// low ports are ranked higher because they're more likely to
// be listen addresses
if _, err := a.ValueForProtocol(ma.P_WEBTRANSPORT); err == nil {
p, _ := a.ValueForProtocol(ma.P_UDP)
pi, _ := strconv.Atoi(p) // cannot error
return pi + (1 << 18)
}
if _, err := a.ValueForProtocol(ma.P_QUIC); err == nil {
p, _ := a.ValueForProtocol(ma.P_UDP)
pi, _ := strconv.Atoi(p) // cannot error
return pi + (1 << 17)
}
if _, err := a.ValueForProtocol(ma.P_QUIC_V1); err == nil {
p, _ := a.ValueForProtocol(ma.P_UDP)
pi, _ := strconv.Atoi(p) // cannot error
return pi
}
if p, err := a.ValueForProtocol(ma.P_TCP); err == nil {
pi, _ := strconv.Atoi(p) // cannot error
return pi + (1 << 19)
}
return (1 << 30)
}
func isProtocolAddr(a ma.Multiaddr, p int) bool {
found := false
ma.ForEach(a, func(c ma.Component) bool {
if c.Protocol().Code == p {
found = true
return false
}
return true
})
return found
}
// filterAddrs filters an address slice in place
func filterAddrs(addrs []ma.Multiaddr, f func(a ma.Multiaddr) bool) (filtered, rest []ma.Multiaddr) {
j := 0
for i := 0; i < len(addrs); i++ {
if f(addrs[i]) {
addrs[i], addrs[j] = addrs[j], addrs[i]
j++
}
}
return addrs[:j], addrs[j:]
}

View file

@ -2,13 +2,14 @@ package swarm
import (
"context"
"math"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
// /////////////////////////////////////////////////////////////////////////////////
@ -16,78 +17,159 @@ import (
// TODO explain how all this works
// ////////////////////////////////////////////////////////////////////////////////
// dialRequest is structure used to request dials to the peer associated with a
// worker loop
type dialRequest struct {
ctx context.Context
// ctx is the context that may be used for the request
// if another concurrent request is made, any of the concurrent request's ctx may be used for
// dials to the peer's addresses
// ctx for simultaneous connect requests have higher priority than normal requests
ctx context.Context
// resch is the channel used to send the response for this query
resch chan dialResponse
}
// dialResponse is the response sent to dialRequests on the request's resch channel
type dialResponse struct {
// conn is the connection to the peer on success
conn *Conn
err error
// err is the error in dialing the peer
// nil on connection success
err error
}
// pendRequest is used to track progress on a dialRequest.
type pendRequest struct {
req dialRequest // the original request
err *DialError // dial error accumulator
addrs map[ma.Multiaddr]struct{} // pending addr dials
// req is the original dialRequest
req dialRequest
// err comprises errors of all failed dials
err *DialError
// addrs are the addresses on which we are waiting for pending dials
// At the time of creation addrs is initialised to all the addresses of the peer. On a failed dial,
// the addr is removed from the map and err is updated. On a successful dial, the dialRequest is
// completed and response is sent with the connection
addrs map[string]struct{}
}
// addrDial tracks dials to a particular multiaddress.
type addrDial struct {
addr ma.Multiaddr
ctx context.Context
conn *Conn
err error
// addr is the address dialed
addr ma.Multiaddr
// ctx is the context used for dialing the address
ctx context.Context
// conn is the established connection on success
conn *Conn
// err is the err on dialing the address
err error
// requests is the list of pendRequests interested in this dial
// the value in the slice is the request number assigned to this request by the dialWorker
requests []int
dialed bool
// dialed indicates whether we have triggered the dial to the address
dialed bool
// createdAt is the time this struct was created
createdAt time.Time
// dialRankingDelay is the delay in dialing this address introduced by the ranking logic
dialRankingDelay time.Duration
}
// dialWorker synchronises concurrent dials to a peer. It ensures that we make at most one dial to a
// peer's address
type dialWorker struct {
s *Swarm
peer peer.ID
reqch <-chan dialRequest
reqno int
requests map[int]*pendRequest
pending map[ma.Multiaddr]*addrDial
resch chan dialResult
s *Swarm
peer peer.ID
// reqch is used to send dial requests to the worker. close reqch to end the worker loop
reqch <-chan dialRequest
// reqno is the request number used to track different dialRequests for a peer.
// Each incoming request is assigned a reqno. This reqno is used in pendingRequests and in
// addrDial objects in trackedDials to track this request
reqno int
// pendingRequests maps reqno to the pendRequest object for a dialRequest
pendingRequests map[int]*pendRequest
// trackedDials tracks dials to the peers addresses. An entry here is used to ensure that
// we dial an address at most once
trackedDials map[string]*addrDial
// resch is used to receive response for dials to the peers addresses.
resch chan dialResult
connected bool // true when a connection has been successfully established
nextDial []ma.Multiaddr
// ready when we have more addresses to dial (nextDial is not empty)
triggerDial <-chan struct{}
// for testing
wg sync.WaitGroup
cl Clock
}
func newDialWorker(s *Swarm, p peer.ID, reqch <-chan dialRequest) *dialWorker {
func newDialWorker(s *Swarm, p peer.ID, reqch <-chan dialRequest, cl Clock) *dialWorker {
if cl == nil {
cl = RealClock{}
}
return &dialWorker{
s: s,
peer: p,
reqch: reqch,
requests: make(map[int]*pendRequest),
pending: make(map[ma.Multiaddr]*addrDial),
resch: make(chan dialResult),
s: s,
peer: p,
reqch: reqch,
pendingRequests: make(map[int]*pendRequest),
trackedDials: make(map[string]*addrDial),
resch: make(chan dialResult),
cl: cl,
}
}
// loop implements the core dial worker loop. Requests are received on w.reqch.
// The loop exits when w.reqch is closed.
func (w *dialWorker) loop() {
w.wg.Add(1)
defer w.wg.Done()
defer w.s.limiter.clearAllPeerDials(w.peer)
// used to signal readiness to dial and completion of the dial
ready := make(chan struct{})
close(ready)
// dq is used to pace dials to different addresses of the peer
dq := newDialQueue()
// dialsInFlight is the number of dials in flight.
dialsInFlight := 0
startTime := w.cl.Now()
// dialTimer is the dialTimer used to trigger dials
dialTimer := w.cl.InstantTimer(startTime.Add(math.MaxInt64))
timerRunning := true
// scheduleNextDial updates timer for triggering the next dial
scheduleNextDial := func() {
if timerRunning && !dialTimer.Stop() {
<-dialTimer.Ch()
}
timerRunning = false
if dq.len() > 0 {
if dialsInFlight == 0 && !w.connected {
// if there are no dials in flight, trigger the next dials immediately
dialTimer.Reset(startTime)
} else {
dialTimer.Reset(startTime.Add(dq.top().Delay))
}
timerRunning = true
}
}
// totalDials is used to track number of dials made by this worker for metrics
totalDials := 0
loop:
for {
// The loop has three parts
// 1. Input requests are received on w.reqch. If a suitable connection is not available we create
// a pendRequest object to track the dialRequest and add the addresses to dq.
// 2. Addresses from the dialQueue are dialed at appropriate time intervals depending on delay logic.
// We are notified of the completion of these dials on w.resch.
// 3. Responses for dials are received on w.resch. On receiving a response, we updated the pendRequests
// interested in dials on this address.
select {
case req, ok := <-w.reqch:
if !ok {
if w.s.metricsTracer != nil {
w.s.metricsTracer.DialCompleted(w.connected, totalDials)
}
return
}
// We have received a new request. If we do not have a suitable connection,
// track this dialRequest with a pendRequest.
// Enqueue the peer's addresses relevant to this request in dq and
// track dials to the addresses relevant to this request.
c, err := w.s.bestAcceptableConnToPeer(req.ctx, w.peer)
if c != nil || err != nil {
@ -101,29 +183,34 @@ loop:
continue loop
}
// at this point, len(addrs) > 0 or else it would be error from addrsForDial
// ranke them to process in order
addrs = w.rankAddrs(addrs)
// get the delays to dial these addrs from the swarms dialRanker
simConnect, _, _ := network.GetSimultaneousConnect(req.ctx)
addrRanking := w.rankAddrs(addrs, simConnect)
addrDelay := make(map[string]time.Duration, len(addrRanking))
// create the pending request object
pr := &pendRequest{
req: req,
err: &DialError{Peer: w.peer},
addrs: make(map[ma.Multiaddr]struct{}),
addrs: make(map[string]struct{}, len(addrRanking)),
}
for _, a := range addrs {
pr.addrs[a] = struct{}{}
for _, adelay := range addrRanking {
pr.addrs[string(adelay.Addr.Bytes())] = struct{}{}
addrDelay[string(adelay.Addr.Bytes())] = adelay.Delay
}
// check if any of the addrs has been successfully dialed and accumulate
// errors from complete dials while collecting new addrs to dial/join
// Check if dials to any of the addrs have completed already
// If they have errored, record the error in pr. If they have succeeded,
// respond with the connection.
// If they are pending, add them to tojoin.
// If we haven't seen any of the addresses before, add them to todial.
var todial []ma.Multiaddr
var tojoin []*addrDial
for _, a := range addrs {
ad, ok := w.pending[a]
for _, adelay := range addrRanking {
ad, ok := w.trackedDials[string(adelay.Addr.Bytes())]
if !ok {
todial = append(todial, a)
todial = append(todial, adelay.Addr)
continue
}
@ -135,8 +222,8 @@ loop:
if ad.err != nil {
// dial to this addr errored, accumulate the error
pr.err.recordErr(a, ad.err)
delete(pr.addrs, a)
pr.err.recordErr(ad.addr, ad.err)
delete(pr.addrs, string(ad.addr.Bytes()))
continue
}
@ -150,52 +237,91 @@ loop:
continue loop
}
// the request has some pending or new dials, track it and schedule new dials
// The request has some pending or new dials. We assign this request a request number.
// This value of w.reqno is used to track this request in all the structures
w.reqno++
w.requests[w.reqno] = pr
w.pendingRequests[w.reqno] = pr
for _, ad := range tojoin {
if !ad.dialed {
// we haven't dialed this address. update the ad.ctx to have simultaneous connect values
// set correctly
if simConnect, isClient, reason := network.GetSimultaneousConnect(req.ctx); simConnect {
if simConnect, _, _ := network.GetSimultaneousConnect(ad.ctx); !simConnect {
ad.ctx = network.WithSimultaneousConnect(ad.ctx, isClient, reason)
// update the element in dq to use the simultaneous connect delay.
dq.Add(network.AddrDelay{
Addr: ad.addr,
Delay: addrDelay[string(ad.addr.Bytes())],
})
}
}
}
// add the request to the addrDial
ad.requests = append(ad.requests, w.reqno)
}
if len(todial) > 0 {
now := time.Now()
// these are new addresses, track them and add them to dq
for _, a := range todial {
w.pending[a] = &addrDial{addr: a, ctx: req.ctx, requests: []int{w.reqno}}
w.trackedDials[string(a.Bytes())] = &addrDial{
addr: a,
ctx: req.ctx,
requests: []int{w.reqno},
createdAt: now,
}
dq.Add(network.AddrDelay{Addr: a, Delay: addrDelay[string(a.Bytes())]})
}
w.nextDial = append(w.nextDial, todial...)
w.nextDial = w.rankAddrs(w.nextDial)
// trigger a new dial now to account for the new addrs we added
w.triggerDial = ready
}
// setup dialTimer for updates to dq
scheduleNextDial()
case <-w.triggerDial:
for _, addr := range w.nextDial {
case <-dialTimer.Ch():
// It's time to dial the next batch of addresses.
// We don't check the delay of the addresses received from the queue here
// because if the timer triggered before the delay, it means that all
// the inflight dials have errored and we should dial the next batch of
// addresses
now := time.Now()
for _, adelay := range dq.NextBatch() {
// spawn the dial
ad := w.pending[addr]
err := w.s.dialNextAddr(ad.ctx, w.peer, addr, w.resch)
ad, ok := w.trackedDials[string(adelay.Addr.Bytes())]
if !ok {
log.Errorf("SWARM BUG: no entry for address %s in trackedDials", adelay.Addr)
continue
}
ad.dialed = true
ad.dialRankingDelay = now.Sub(ad.createdAt)
err := w.s.dialNextAddr(ad.ctx, w.peer, ad.addr, w.resch)
if err != nil {
// the actual dial happens in a different go routine. An err here
// only happens in case of backoff. handle that.
w.dispatchError(ad, err)
} else {
// the dial was successful. update inflight dials
dialsInFlight++
totalDials++
}
}
w.nextDial = nil
w.triggerDial = nil
timerRunning = false
// schedule more dials
scheduleNextDial()
case res := <-w.resch:
if res.Conn != nil {
w.connected = true
}
// A dial to an address has completed.
// Update all requests waiting on this address. On success, complete the request.
// On error, record the error
ad := w.pending[res.Addr]
dialsInFlight--
ad, ok := w.trackedDials[string(res.Addr.Bytes())]
if !ok {
log.Errorf("SWARM BUG: no entry for address %s in trackedDials", res.Addr)
if res.Conn != nil {
res.Conn.Close()
}
continue
}
if res.Conn != nil {
// we got a connection, add it to the swarm
@ -207,21 +333,27 @@ loop:
continue loop
}
// dispatch to still pending requests
// request succeeded, respond to all pending requests
for _, reqno := range ad.requests {
pr, ok := w.requests[reqno]
pr, ok := w.pendingRequests[reqno]
if !ok {
// it has already dispatched a connection
// some other dial for this request succeeded before this one
continue
}
pr.req.resch <- dialResponse{conn: conn}
delete(w.requests, reqno)
delete(w.pendingRequests, reqno)
}
ad.conn = conn
ad.requests = nil
if !w.connected {
w.connected = true
if w.s.metricsTracer != nil {
w.s.metricsTracer.DialRankingDelay(ad.dialRankingDelay)
}
}
continue loop
}
@ -231,8 +363,11 @@ loop:
// for consistency with the old dialer behavior.
w.s.backf.AddBackoff(w.peer, res.Addr)
}
w.dispatchError(ad, res.Err)
// Only schedule next dial on error.
// If we scheduleNextDial on success, we will end up making one dial more than
// required because the final successful dial will spawn one more dial
scheduleNextDial()
}
}
}
@ -241,16 +376,16 @@ loop:
func (w *dialWorker) dispatchError(ad *addrDial, err error) {
ad.err = err
for _, reqno := range ad.requests {
pr, ok := w.requests[reqno]
pr, ok := w.pendingRequests[reqno]
if !ok {
// has already been dispatched
// some other dial for this request succeeded before this one
continue
}
// accumulate the error
pr.err.recordErr(ad.addr, err)
delete(pr.addrs, ad.addr)
delete(pr.addrs, string(ad.addr.Bytes()))
if len(pr.addrs) == 0 {
// all addrs have erred, dispatch dial error
// but first do a last one check in case an acceptable connection has landed from
@ -261,7 +396,7 @@ func (w *dialWorker) dispatchError(ad *addrDial, err error) {
} else {
pr.req.resch <- dialResponse{err: pr.err}
}
delete(w.requests, reqno)
delete(w.pendingRequests, reqno)
}
}
@ -271,46 +406,82 @@ func (w *dialWorker) dispatchError(ad *addrDial, err error) {
// this is necessary to support active listen scenarios, where a new dial comes in while
// another dial is in progress, and needs to do a direct connection without inhibitions from
// dial backoff.
// it is also necessary to preserve consisent behaviour with the old dialer -- TestDialBackoff
// regresses without this.
if err == ErrDialBackoff {
delete(w.pending, ad.addr)
delete(w.trackedDials, string(ad.addr.Bytes()))
}
}
// ranks addresses in descending order of preference for dialing, with the following rules:
// NonRelay > Relay
// NonWS > WS
// Private > Public
// UDP > TCP
func (w *dialWorker) rankAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
addrTier := func(a ma.Multiaddr) (tier int) {
if isRelayAddr(a) {
tier |= 0b1000
}
if isExpensiveAddr(a) {
tier |= 0b0100
}
if !manet.IsPrivateAddr(a) {
tier |= 0b0010
}
if isFdConsumingAddr(a) {
tier |= 0b0001
}
return tier
// rankAddrs ranks addresses for dialing. if it's a simConnect request we
// dial all addresses immediately without any delay
func (w *dialWorker) rankAddrs(addrs []ma.Multiaddr, isSimConnect bool) []network.AddrDelay {
if isSimConnect {
return NoDelayDialRanker(addrs)
}
tiers := make([][]ma.Multiaddr, 16)
for _, a := range addrs {
tier := addrTier(a)
tiers[tier] = append(tiers[tier], a)
}
result := make([]ma.Multiaddr, 0, len(addrs))
for _, tier := range tiers {
result = append(result, tier...)
}
return result
return w.s.dialRanker(addrs)
}
// dialQueue is a priority queue used to schedule dials
type dialQueue struct {
// q contains dials ordered by delay
q []network.AddrDelay
}
// newDialQueue returns a new dialQueue
func newDialQueue() *dialQueue {
return &dialQueue{q: make([]network.AddrDelay, 0, 16)}
}
// Add adds adelay to the queue. If another element exists in the queue with
// the same address, it replaces that element.
func (dq *dialQueue) Add(adelay network.AddrDelay) {
for i := 0; i < dq.len(); i++ {
if dq.q[i].Addr.Equal(adelay.Addr) {
if dq.q[i].Delay == adelay.Delay {
// existing element is the same. nothing to do
return
}
// remove the element
copy(dq.q[i:], dq.q[i+1:])
dq.q = dq.q[:len(dq.q)-1]
break
}
}
for i := 0; i < dq.len(); i++ {
if dq.q[i].Delay > adelay.Delay {
dq.q = append(dq.q, network.AddrDelay{}) // extend the slice
copy(dq.q[i+1:], dq.q[i:])
dq.q[i] = adelay
return
}
}
dq.q = append(dq.q, adelay)
}
// NextBatch returns all the elements in the queue with the highest priority
func (dq *dialQueue) NextBatch() []network.AddrDelay {
if dq.len() == 0 {
return nil
}
// i is the index of the second highest priority element
var i int
for i = 0; i < dq.len(); i++ {
if dq.q[i].Delay != dq.q[0].Delay {
break
}
}
res := dq.q[:i]
dq.q = dq.q[i:]
return res
}
// top returns the top element of the queue
func (dq *dialQueue) top() network.AddrDelay {
return dq.q[0]
}
// len returns the number of elements in the queue
func (dq *dialQueue) len() int {
return len(dq.q)
}

View file

@ -100,6 +100,14 @@ func WithResourceManager(m network.ResourceManager) Option {
}
}
// WithDialRanker configures swarm to use d as the DialRanker
func WithDialRanker(d network.DialRanker) Option {
return func(s *Swarm) error {
s.dialRanker = d
return nil
}
}
// Swarm is a connection muxer, allowing connections to other peers to
// be opened and closed, while still using the same Chan for all
// communication. The Chan sends/receives Messages, which note the
@ -163,6 +171,8 @@ type Swarm struct {
bwc metrics.Reporter
metricsTracer MetricsTracer
dialRanker network.DialRanker
}
// NewSwarm constructs a Swarm.
@ -181,6 +191,7 @@ func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts
dialTimeout: defaultDialTimeout,
dialTimeoutLocal: defaultDialTimeoutLocal,
maResolver: madns.DefaultResolver,
dialRanker: DefaultDialRanker,
}
s.conns.m = make(map[peer.ID][]*Conn)
@ -229,7 +240,7 @@ func (s *Swarm) close() {
for l := range listeners {
go func(l transport.Listener) {
if err := l.Close(); err != nil {
if err := l.Close(); err != nil && err != transport.ErrListenerClosed {
log.Errorf("error when shutting down listener: %s", err)
}
}(l)
@ -329,12 +340,7 @@ func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn,
}
c.streams.m = make(map[*Stream]struct{})
if len(s.conns.m[p]) == 0 { // first connection
s.emitter.Emit(event.EvtPeerConnectednessChanged{
Peer: p,
Connectedness: network.Connected,
})
}
isFirstConnection := len(s.conns.m[p]) == 0
s.conns.m[p] = append(s.conns.m[p], c)
// Add two swarm refs:
@ -347,6 +353,15 @@ func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn,
c.notifyLk.Lock()
s.conns.Unlock()
// Emit event after releasing `s.conns` lock so that a consumer can still
// use swarm methods that need the `s.conns` lock.
if isFirstConnection {
s.emitter.Emit(event.EvtPeerConnectednessChanged{
Peer: p,
Connectedness: network.Connected,
})
}
s.notifyAll(func(f network.Notifiee) {
f.Connected(s, c)
})
@ -626,25 +641,32 @@ func (s *Swarm) removeConn(c *Conn) {
p := c.RemotePeer()
s.conns.Lock()
defer s.conns.Unlock()
cs := s.conns.m[p]
if len(cs) == 1 {
delete(s.conns.m, p)
s.conns.Unlock()
// Emit event after releasing `s.conns` lock so that a consumer can still
// use swarm methods that need the `s.conns` lock.
s.emitter.Emit(event.EvtPeerConnectednessChanged{
Peer: p,
Connectedness: network.NotConnected,
})
return
}
defer s.conns.Unlock()
for i, ci := range cs {
if ci == c {
if len(cs) == 1 {
delete(s.conns.m, p)
s.emitter.Emit(event.EvtPeerConnectednessChanged{
Peer: p,
Connectedness: network.NotConnected,
})
} else {
// NOTE: We're intentionally preserving order.
// This way, connections to a peer are always
// sorted oldest to newest.
copy(cs[i:], cs[i+1:])
cs[len(cs)-1] = nil
s.conns.m[p] = cs[:len(cs)-1]
}
// NOTE: We're intentionally preserving order.
// This way, connections to a peer are always
// sorted oldest to newest.
copy(cs[i:], cs[i+1:])
cs[len(cs)-1] = nil
s.conns.m[p] = cs[:len(cs)-1]
break
}
}

View file

@ -130,6 +130,7 @@ func (c *Conn) start() {
// We only get an error here when the swarm is closed or closing.
if err != nil {
scope.Done()
return
}

View file

@ -4,6 +4,8 @@ import (
"context"
"errors"
"fmt"
"net/netip"
"strconv"
"sync"
"time"
@ -15,7 +17,6 @@ import (
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/quic-go/quic-go"
)
// The maximum number of address resolution steps we'll perform for a single
@ -74,33 +75,12 @@ const ConcurrentFdDials = 160
// per peer
var DefaultPerPeerRateLimit = 8
// dialbackoff is a struct used to avoid over-dialing the same, dead peers.
// Whenever we totally time out on a peer (all three attempts), we add them
// to dialbackoff. Then, whenevers goroutines would _wait_ (dialsync), they
// check dialbackoff. If it's there, they don't wait and exit promptly with
// an error. (the single goroutine that is actually dialing continues to
// dial). If a dial is successful, the peer is removed from backoff.
// Example:
//
// for {
// if ok, wait := dialsync.Lock(p); !ok {
// if backoff.Backoff(p) {
// return errDialFailed
// }
// <-wait
// continue
// }
// defer dialsync.Unlock(p)
// c, err := actuallyDial(p)
// if err != nil {
// dialbackoff.AddBackoff(p)
// continue
// }
// dialbackoff.Clear(p)
// }
//
// DialBackoff is a type for tracking peer dial backoffs.
// DialBackoff is a type for tracking peer dial backoffs. Dialbackoff is used to
// avoid over-dialing the same, dead peers. Whenever we totally time out on all
// addresses of a peer, we add the addresses to DialBackoff. Then, whenever we
// attempt to dial the peer again, we check each address for backoff. If it's on
// backoff, we don't dial the address and exit promptly. If a dial is
// successful, the peer and all its addresses are removed from backoff.
//
// * It's safe to use its zero value.
// * It's thread-safe.
@ -138,8 +118,8 @@ func (db *DialBackoff) background(ctx context.Context) {
// Backoff returns whether the client should backoff from dialing
// peer p at address addr
func (db *DialBackoff) Backoff(p peer.ID, addr ma.Multiaddr) (backoff bool) {
db.lock.Lock()
defer db.lock.Unlock()
db.lock.RLock()
defer db.lock.RUnlock()
ap, found := db.entries[p][string(addr.Bytes())]
return found && time.Now().Before(ap.until)
@ -154,9 +134,7 @@ var BackoffCoef = time.Second
// BackoffMax is the maximum backoff time (default: 5m).
var BackoffMax = time.Minute * 5
// AddBackoff lets other nodes know that we've entered backoff with
// peer p, so dialers should not wait unnecessarily. We still will
// attempt to dial with one goroutine, in case we get through.
// AddBackoff adds peer's address to backoff.
//
// Backoff is not exponential, it's quadratic and computed according to the
// following formula:
@ -295,7 +273,7 @@ func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
// dialWorkerLoop synchronizes and executes concurrent dials to a single peer
func (s *Swarm) dialWorkerLoop(p peer.ID, reqch <-chan dialRequest) {
w := newDialWorker(s, p, reqch)
w := newDialWorker(s, p, reqch, nil)
w.loop()
}
@ -334,6 +312,7 @@ func (s *Swarm) addrsForDial(ctx context.Context, p peer.ID) ([]ma.Multiaddr, er
if forceDirect, _ := network.GetForceDirectDial(ctx); forceDirect {
goodAddrs = ma.FilterAddrs(goodAddrs, s.nonProxyAddr)
}
goodAddrs = network.DedupAddrs(goodAddrs)
if len(goodAddrs) == 0 {
return nil, ErrNoGoodAddresses
@ -433,30 +412,39 @@ func (s *Swarm) nonProxyAddr(addr ma.Multiaddr) bool {
// filterKnownUndialables takes a list of multiaddrs, and removes those
// that we definitely don't want to dial: addresses configured to be blocked,
// IPv6 link-local addresses, addresses without a dial-capable transport,
// and addresses that we know to be our own.
// This is an optimization to avoid wasting time on dials that we know are going to fail.
// addresses that we know to be our own, and addresses with a better tranport
// available. This is an optimization to avoid wasting time on dials that we
// know are going to fail or for which we have a better alternative.
func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) []ma.Multiaddr {
lisAddrs, _ := s.InterfaceListenAddresses()
var ourAddrs []ma.Multiaddr
for _, addr := range lisAddrs {
protos := addr.Protocols()
// we're only sure about filtering out /ip4 and /ip6 addresses, so far
if protos[0].Code == ma.P_IP4 || protos[0].Code == ma.P_IP6 {
ourAddrs = append(ourAddrs, addr)
}
ma.ForEach(addr, func(c ma.Component) bool {
if c.Protocol().Code == ma.P_IP4 || c.Protocol().Code == ma.P_IP6 {
ourAddrs = append(ourAddrs, addr)
}
return false
})
}
return maybeRemoveWebTransportAddrs(
maybeRemoveQUICDraft29(
ma.FilterAddrs(addrs,
func(addr ma.Multiaddr) bool { return !ma.Contains(ourAddrs, addr) },
s.canDial,
// TODO: Consider allowing link-local addresses
func(addr ma.Multiaddr) bool { return !manet.IsIP6LinkLocal(addr) },
func(addr ma.Multiaddr) bool {
return s.gater == nil || s.gater.InterceptAddrDial(p, addr)
},
)))
// The order of these two filters is important. If we can only dial /webtransport,
// we don't want to filter /webtransport addresses out because the peer had a /quic-v1
// address
// filter addresses we cannot dial
addrs = ma.FilterAddrs(addrs, s.canDial)
// filter low priority addresses among the addresses we can dial
addrs = filterLowPriorityAddresses(addrs)
return ma.FilterAddrs(addrs,
func(addr ma.Multiaddr) bool { return !ma.Contains(ourAddrs, addr) },
// TODO: Consider allowing link-local addresses
func(addr ma.Multiaddr) bool { return !manet.IsIP6LinkLocal(addr) },
func(addr ma.Multiaddr) bool {
return s.gater == nil || s.gater.InterceptAddrDial(p, addr)
},
)
}
// limitedDial will start a dial to the given peer when
@ -542,110 +530,84 @@ func isFdConsumingAddr(addr ma.Multiaddr) bool {
return err1 == nil || err2 == nil
}
func isExpensiveAddr(addr ma.Multiaddr) bool {
_, wsErr := addr.ValueForProtocol(ma.P_WS)
_, wssErr := addr.ValueForProtocol(ma.P_WSS)
_, wtErr := addr.ValueForProtocol(ma.P_WEBTRANSPORT)
return wsErr == nil || wssErr == nil || wtErr == nil
}
func isRelayAddr(addr ma.Multiaddr) bool {
_, err := addr.ValueForProtocol(ma.P_CIRCUIT)
return err == nil
}
func isWebTransport(addr ma.Multiaddr) bool {
_, err := addr.ValueForProtocol(ma.P_WEBTRANSPORT)
return err == nil
// filterLowPriorityAddresses removes addresses inplace for which we have a better alternative
// 1. If a /quic-v1 address is present, filter out /quic and /webtransport address on the same 2-tuple:
// QUIC v1 is preferred over the deprecated QUIC draft-29, and given the choice, we prefer using
// raw QUIC over using WebTransport.
// 2. If a /tcp address is present, filter out /ws or /wss addresses on the same 2-tuple:
// We prefer using raw TCP over using WebSocket.
func filterLowPriorityAddresses(addrs []ma.Multiaddr) []ma.Multiaddr {
// make a map of QUIC v1 and TCP AddrPorts.
quicV1Addr := make(map[netip.AddrPort]struct{})
tcpAddr := make(map[netip.AddrPort]struct{})
for _, a := range addrs {
switch {
case isProtocolAddr(a, ma.P_WEBTRANSPORT):
case isProtocolAddr(a, ma.P_QUIC_V1):
ap, err := addrPort(a, ma.P_UDP)
if err != nil {
continue
}
quicV1Addr[ap] = struct{}{}
case isProtocolAddr(a, ma.P_WS) || isProtocolAddr(a, ma.P_WSS):
case isProtocolAddr(a, ma.P_TCP):
ap, err := addrPort(a, ma.P_TCP)
if err != nil {
continue
}
tcpAddr[ap] = struct{}{}
}
}
i := 0
for _, a := range addrs {
switch {
case isProtocolAddr(a, ma.P_WEBTRANSPORT) || isProtocolAddr(a, ma.P_QUIC):
ap, err := addrPort(a, ma.P_UDP)
if err != nil {
break
}
if _, ok := quicV1Addr[ap]; ok {
continue
}
case isProtocolAddr(a, ma.P_WS) || isProtocolAddr(a, ma.P_WSS):
ap, err := addrPort(a, ma.P_TCP)
if err != nil {
break
}
if _, ok := tcpAddr[ap]; ok {
continue
}
}
addrs[i] = a
i++
}
return addrs[:i]
}
func quicVersion(addr ma.Multiaddr) (quic.VersionNumber, bool) {
found := false
foundWebTransport := false
var version quic.VersionNumber
ma.ForEach(addr, func(c ma.Component) bool {
switch c.Protocol().Code {
case ma.P_QUIC:
version = quic.VersionDraft29
found = true
return true
case ma.P_QUIC_V1:
version = quic.Version1
found = true
return true
case ma.P_WEBTRANSPORT:
version = quic.Version1
foundWebTransport = true
return false
default:
return true
}
})
if foundWebTransport {
return 0, false
// addrPort returns the ip and port for a. p should be either ma.P_TCP or ma.P_UDP.
// a must be an (ip, TCP) or (ip, udp) address.
func addrPort(a ma.Multiaddr, p int) (netip.AddrPort, error) {
ip, err := manet.ToIP(a)
if err != nil {
return netip.AddrPort{}, err
}
return version, found
}
// If we have QUIC addresses, we don't want to dial WebTransport addresses.
// It's better to have a native QUIC connection.
// Note that this is a hack. The correct solution would be a proper
// Happy-Eyeballs-style dialing.
func maybeRemoveWebTransportAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
var hasQuic, hasWebTransport bool
for _, addr := range addrs {
if _, isQuic := quicVersion(addr); isQuic {
hasQuic = true
}
if isWebTransport(addr) {
hasWebTransport = true
}
}
if !hasWebTransport || !hasQuic {
return addrs
}
var c int
for _, addr := range addrs {
if isWebTransport(addr) {
continue
}
addrs[c] = addr
c++
}
return addrs[:c]
}
// If we have QUIC V1 addresses, we don't want to dial QUIC draft29 addresses.
// This is a similar hack to the above. If we add one more hack like this, let's
// define a `Filterer` interface like the `Resolver` interface that transports
// can optionally implement if they want to filter the multiaddrs.
//
// This mutates the input
func maybeRemoveQUICDraft29(addrs []ma.Multiaddr) []ma.Multiaddr {
var hasQuicV1, hasQuicDraft29 bool
for _, addr := range addrs {
v, isQuic := quicVersion(addr)
if !isQuic {
continue
}
if v == quic.Version1 {
hasQuicV1 = true
}
if v == quic.VersionDraft29 {
hasQuicDraft29 = true
}
}
if !hasQuicDraft29 || !hasQuicV1 {
return addrs
}
var c int
for _, addr := range addrs {
if v, isQuic := quicVersion(addr); isQuic && v == quic.VersionDraft29 {
continue
}
addrs[c] = addr
c++
}
return addrs[:c]
port, err := a.ValueForProtocol(p)
if err != nil {
return netip.AddrPort{}, err
}
pi, err := strconv.Atoi(port)
if err != nil {
return netip.AddrPort{}, err
}
addr, ok := netip.AddrFromSlice(ip)
if !ok {
return netip.AddrPort{}, fmt.Errorf("failed to parse IP %s", ip)
}
return netip.AddrPortFrom(addr, uint16(pi)), nil
}

View file

@ -1,6 +1,7 @@
package swarm
import (
"errors"
"fmt"
"time"
@ -127,6 +128,9 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
for {
c, err := list.Accept()
if err != nil {
if !errors.Is(err, transport.ErrListenerClosed) {
log.Errorf("swarm listener for %s accept error: %s", a, err)
}
return
}
canonicallog.LogPeerStatus(100, c.RemotePeer(), c.RemoteMultiaddr(), "connection_status", "established", "dir", "inbound")

View file

@ -69,6 +69,22 @@ var (
},
[]string{"transport", "security", "muxer", "early_muxer", "ip_version"},
)
dialsPerPeer = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: metricNamespace,
Name: "dials_per_peer_total",
Help: "Number of addresses dialed per peer",
},
[]string{"outcome", "num_dials"},
)
dialRankingDelay = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: metricNamespace,
Name: "dial_ranking_delay_seconds",
Help: "delay introduced by the dial ranking logic",
Buckets: []float64{0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 2},
},
)
collectors = []prometheus.Collector{
connsOpened,
keyTypes,
@ -76,6 +92,8 @@ var (
dialError,
connDuration,
connHandshakeLatency,
dialsPerPeer,
dialRankingDelay,
}
)
@ -84,6 +102,8 @@ type MetricsTracer interface {
ClosedConnection(network.Direction, time.Duration, network.ConnectionState, ma.Multiaddr)
CompletedHandshake(time.Duration, network.ConnectionState, ma.Multiaddr)
FailedDialing(ma.Multiaddr, error)
DialCompleted(success bool, totalDials int)
DialRankingDelay(d time.Duration)
}
type metricsTracer struct{}
@ -133,28 +153,13 @@ func appendConnectionState(tags []string, cs network.ConnectionState) []string {
return tags
}
func getIPVersion(addr ma.Multiaddr) string {
version := "unknown"
ma.ForEach(addr, func(c ma.Component) bool {
if c.Protocol().Code == ma.P_IP4 {
version = "ip4"
return false
} else if c.Protocol().Code == ma.P_IP6 {
version = "ip6"
return false
}
return true
})
return version
}
func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey, cs network.ConnectionState, laddr ma.Multiaddr) {
tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, metricshelper.GetDirection(dir))
*tags = appendConnectionState(*tags, cs)
*tags = append(*tags, getIPVersion(laddr))
*tags = append(*tags, metricshelper.GetIPVersion(laddr))
connsOpened.WithLabelValues(*tags...).Inc()
*tags = (*tags)[:0]
@ -169,7 +174,7 @@ func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Du
*tags = append(*tags, metricshelper.GetDirection(dir))
*tags = appendConnectionState(*tags, cs)
*tags = append(*tags, getIPVersion(laddr))
*tags = append(*tags, metricshelper.GetIPVersion(laddr))
connsClosed.WithLabelValues(*tags...).Inc()
connDuration.WithLabelValues(*tags...).Observe(duration.Seconds())
}
@ -179,19 +184,12 @@ func (m *metricsTracer) CompletedHandshake(t time.Duration, cs network.Connectio
defer metricshelper.PutStringSlice(tags)
*tags = appendConnectionState(*tags, cs)
*tags = append(*tags, getIPVersion(laddr))
*tags = append(*tags, metricshelper.GetIPVersion(laddr))
connHandshakeLatency.WithLabelValues(*tags...).Observe(t.Seconds())
}
var transports = [...]int{ma.P_CIRCUIT, ma.P_WEBRTC, ma.P_WEBTRANSPORT, ma.P_QUIC, ma.P_QUIC_V1, ma.P_WSS, ma.P_WS, ma.P_TCP}
func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, err error) {
var transport string
for _, t := range transports {
if _, err := addr.ValueForProtocol(t); err == nil {
transport = ma.ProtocolWithCode(t).Name
}
}
transport := metricshelper.GetTransport(addr)
e := "other"
if errors.Is(err, context.Canceled) {
e = "canceled"
@ -210,6 +208,30 @@ func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, err error) {
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, transport, e)
*tags = append(*tags, getIPVersion(addr))
*tags = append(*tags, metricshelper.GetIPVersion(addr))
dialError.WithLabelValues(*tags...).Inc()
}
func (m *metricsTracer) DialCompleted(success bool, totalDials int) {
tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
if success {
*tags = append(*tags, "success")
} else {
*tags = append(*tags, "failed")
}
numDialLabels := [...]string{"0", "1", "2", "3", "4", "5", ">=6"}
var numDials string
if totalDials < len(numDialLabels) {
numDials = numDialLabels[totalDials]
} else {
numDials = numDialLabels[len(numDialLabels)-1]
}
*tags = append(*tags, numDials)
dialsPerPeer.WithLabelValues(*tags...).Inc()
}
func (m *metricsTracer) DialRankingDelay(d time.Duration) {
dialRankingDelay.Observe(d.Seconds())
}

View file

@ -3,6 +3,7 @@ package upgrader
import (
"context"
"fmt"
"strings"
"sync"
"github.com/libp2p/go-libp2p/core/network"
@ -165,6 +166,9 @@ func (l *listener) Accept() (transport.CapableConn, error) {
return c, nil
}
}
if strings.Contains(l.err.Error(), "use of closed network connection") {
return nil, transport.ErrListenerClosed
}
return nil, l.err
}

View file

@ -1,9 +1,9 @@
package client
import (
"errors"
"net"
"github.com/libp2p/go-libp2p/core/transport"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
@ -33,7 +33,7 @@ func (l *Listener) Accept() (manet.Conn, error) {
return evt.conn, nil
case <-l.ctx.Done():
return nil, errors.New("circuit v2 client closed")
return nil, transport.ErrListenerClosed
}
}
}

View file

@ -101,10 +101,8 @@ func (hp *holePuncher) DirectConnect(p peer.ID) error {
func (hp *holePuncher) directConnect(rp peer.ID) error {
// short-circuit check to see if we already have a direct connection
for _, c := range hp.host.Network().ConnsToPeer(rp) {
if !isRelayAddress(c.RemoteMultiaddr()) {
return nil
}
if getDirectConnection(hp.host, rp) != nil {
return nil
}
// short-circuit hole punching if a direct dial works.
@ -133,8 +131,8 @@ func (hp *holePuncher) directConnect(rp peer.ID) error {
log.Debugw("got inbound proxy conn", "peer", rp)
// hole punch
for i := 0; i < maxRetries; i++ {
addrs, rtt, err := hp.initiateHolePunch(rp)
for i := 1; i <= maxRetries; i++ {
addrs, obsAddrs, rtt, err := hp.initiateHolePunch(rp)
if err != nil {
log.Debugw("hole punching failed", "peer", rp, "error", err)
hp.tracer.ProtocolError(rp, err)
@ -159,44 +157,48 @@ func (hp *holePuncher) directConnect(rp peer.ID) error {
hp.tracer.EndHolePunch(rp, dt, err)
if err == nil {
log.Debugw("hole punching with successful", "peer", rp, "time", dt)
hp.tracer.HolePunchFinished("initiator", i, addrs, obsAddrs, getDirectConnection(hp.host, rp))
return nil
}
case <-hp.ctx.Done():
timer.Stop()
return hp.ctx.Err()
}
if i == maxRetries {
hp.tracer.HolePunchFinished("initiator", maxRetries, addrs, obsAddrs, nil)
}
}
return fmt.Errorf("all retries for hole punch with peer %s failed", rp)
}
// initiateHolePunch opens a new hole punching coordination stream,
// exchanges the addresses and measures the RTT.
func (hp *holePuncher) initiateHolePunch(rp peer.ID) ([]ma.Multiaddr, time.Duration, error) {
func (hp *holePuncher) initiateHolePunch(rp peer.ID) ([]ma.Multiaddr, []ma.Multiaddr, time.Duration, error) {
hpCtx := network.WithUseTransient(hp.ctx, "hole-punch")
sCtx := network.WithNoDial(hpCtx, "hole-punch")
str, err := hp.host.NewStream(sCtx, rp, Protocol)
if err != nil {
return nil, 0, fmt.Errorf("failed to open hole-punching stream: %w", err)
return nil, nil, 0, fmt.Errorf("failed to open hole-punching stream: %w", err)
}
defer str.Close()
addr, rtt, err := hp.initiateHolePunchImpl(str)
addr, obsAddr, rtt, err := hp.initiateHolePunchImpl(str)
if err != nil {
log.Debugf("%s", err)
str.Reset()
return addr, rtt, err
return addr, obsAddr, rtt, err
}
return addr, rtt, err
return addr, obsAddr, rtt, err
}
func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr, time.Duration, error) {
func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr, []ma.Multiaddr, time.Duration, error) {
if err := str.Scope().SetService(ServiceName); err != nil {
return nil, 0, fmt.Errorf("error attaching stream to holepunch service: %s", err)
return nil, nil, 0, fmt.Errorf("error attaching stream to holepunch service: %s", err)
}
if err := str.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
return nil, 0, fmt.Errorf("error reserving memory for stream: %s", err)
return nil, nil, 0, fmt.Errorf("error reserving memory for stream: %s", err)
}
defer str.Scope().ReleaseMemory(maxMsgSize)
@ -211,7 +213,7 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
obsAddrs = hp.filter.FilterLocal(str.Conn().RemotePeer(), obsAddrs)
}
if len(obsAddrs) == 0 {
return nil, 0, errors.New("aborting hole punch initiation as we have no public address")
return nil, nil, 0, errors.New("aborting hole punch initiation as we have no public address")
}
start := time.Now()
@ -220,17 +222,17 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
ObsAddrs: addrsToBytes(obsAddrs),
}); err != nil {
str.Reset()
return nil, 0, err
return nil, nil, 0, err
}
// wait for a CONNECT message from the remote peer
var msg pb.HolePunch
if err := rd.ReadMsg(&msg); err != nil {
return nil, 0, fmt.Errorf("failed to read CONNECT message from remote peer: %w", err)
return nil, nil, 0, fmt.Errorf("failed to read CONNECT message from remote peer: %w", err)
}
rtt := time.Since(start)
if t := msg.GetType(); t != pb.HolePunch_CONNECT {
return nil, 0, fmt.Errorf("expect CONNECT message, got %s", t)
return nil, nil, 0, fmt.Errorf("expect CONNECT message, got %s", t)
}
addrs := removeRelayAddrs(addrsFromBytes(msg.ObsAddrs))
@ -239,13 +241,13 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr
}
if len(addrs) == 0 {
return nil, 0, errors.New("didn't receive any public addresses in CONNECT")
return nil, nil, 0, errors.New("didn't receive any public addresses in CONNECT")
}
if err := w.WriteMsg(&pb.HolePunch{Type: pb.HolePunch_SYNC.Enum()}); err != nil {
return nil, 0, fmt.Errorf("failed to send SYNC message for hole punching: %w", err)
return nil, nil, 0, fmt.Errorf("failed to send SYNC message for hole punching: %w", err)
}
return addrs, rtt, nil
return addrs, obsAddrs, rtt, nil
}
func (hp *holePuncher) Close() error {

View file

@ -0,0 +1,187 @@
package holepunch
import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/p2p/metricshelper"
ma "github.com/multiformats/go-multiaddr"
"github.com/prometheus/client_golang/prometheus"
)
const metricNamespace = "libp2p_holepunch"
var (
directDialsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: metricNamespace,
Name: "direct_dials_total",
Help: "Direct Dials Total",
},
[]string{"outcome"},
)
hpAddressOutcomesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: metricNamespace,
Name: "address_outcomes_total",
Help: "Hole Punch outcomes by Transport",
},
[]string{"side", "num_attempts", "ipv", "transport", "outcome"},
)
hpOutcomesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: metricNamespace,
Name: "outcomes_total",
Help: "Hole Punch outcomes overall",
},
[]string{"side", "num_attempts", "outcome"},
)
collectors = []prometheus.Collector{
directDialsTotal,
hpAddressOutcomesTotal,
hpOutcomesTotal,
}
)
type MetricsTracer interface {
HolePunchFinished(side string, attemptNum int, theirAddrs []ma.Multiaddr, ourAddr []ma.Multiaddr, directConn network.ConnMultiaddrs)
DirectDialFinished(success bool)
}
type metricsTracer struct{}
var _ MetricsTracer = &metricsTracer{}
type metricsTracerSetting struct {
reg prometheus.Registerer
}
type MetricsTracerOption func(*metricsTracerSetting)
func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
return func(s *metricsTracerSetting) {
if reg != nil {
s.reg = reg
}
}
}
func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
for _, opt := range opts {
opt(setting)
}
metricshelper.RegisterCollectors(setting.reg, collectors...)
// initialise metrics's labels so that the first data point is handled correctly
for _, side := range []string{"initiator", "receiver"} {
for _, numAttempts := range []string{"1", "2", "3", "4"} {
for _, outcome := range []string{"success", "failed", "cancelled", "no_suitable_address"} {
for _, ipv := range []string{"ip4", "ip6"} {
for _, transport := range []string{"quic", "quic-v1", "tcp", "webtransport"} {
hpAddressOutcomesTotal.WithLabelValues(side, numAttempts, ipv, transport, outcome)
}
}
if outcome == "cancelled" {
// not a valid outcome for the overall holepunch metric
continue
}
hpOutcomesTotal.WithLabelValues(side, numAttempts, outcome)
}
}
}
return &metricsTracer{}
}
// HolePunchFinished tracks metrics completion of a holepunch. Metrics are tracked on
// a holepunch attempt level and on individual addresses involved in a holepunch.
//
// outcome for an address is computed as:
//
// - success:
// A direct connection was established with the peer using this address
// - cancelled:
// A direct connection was established with the peer but not using this address
// - failed:
// No direct connection was made to the peer and the peer reported an address
// with the same transport as this address
// - no_suitable_address:
// The peer reported no address with the same transport as this address
func (mt *metricsTracer) HolePunchFinished(side string, numAttempts int,
remoteAddrs []ma.Multiaddr, localAddrs []ma.Multiaddr, directConn network.ConnMultiaddrs) {
tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
*tags = append(*tags, side, getNumAttemptString(numAttempts))
var dipv, dtransport string
if directConn != nil {
dipv = metricshelper.GetIPVersion(directConn.LocalMultiaddr())
dtransport = metricshelper.GetTransport(directConn.LocalMultiaddr())
}
matchingAddressCount := 0
// calculate holepunch outcome for all the addresses involved
for _, la := range localAddrs {
lipv := metricshelper.GetIPVersion(la)
ltransport := metricshelper.GetTransport(la)
matchingAddress := false
for _, ra := range remoteAddrs {
ripv := metricshelper.GetIPVersion(ra)
rtransport := metricshelper.GetTransport(ra)
if ripv == lipv && rtransport == ltransport {
// the peer reported an address with the same transport
matchingAddress = true
matchingAddressCount++
*tags = append(*tags, ripv, rtransport)
if directConn != nil && dipv == ripv && dtransport == rtransport {
// the connection was made using this address
*tags = append(*tags, "success")
} else if directConn != nil {
// connection was made but not using this address
*tags = append(*tags, "cancelled")
} else {
// no connection was made
*tags = append(*tags, "failed")
}
hpAddressOutcomesTotal.WithLabelValues(*tags...).Inc()
*tags = (*tags)[:2] // 2 because we want to keep (side, numAttempts)
break
}
}
if !matchingAddress {
*tags = append(*tags, lipv, ltransport, "no_suitable_address")
hpAddressOutcomesTotal.WithLabelValues(*tags...).Inc()
*tags = (*tags)[:2] // 2 because we want to keep (side, numAttempts)
}
}
outcome := "failed"
if directConn != nil {
outcome = "success"
} else if matchingAddressCount == 0 {
// there were no matching addresses, this attempt was going to fail
outcome = "no_suitable_address"
}
*tags = append(*tags, outcome)
hpOutcomesTotal.WithLabelValues(*tags...).Inc()
}
func getNumAttemptString(numAttempt int) string {
var attemptStr = [...]string{"0", "1", "2", "3", "4", "5"}
if numAttempt > 5 {
return "> 5"
}
return attemptStr[numAttempt]
}
func (mt *metricsTracer) DirectDialFinished(success bool) {
tags := metricshelper.GetStringSlice()
defer metricshelper.PutStringSlice(tags)
if success {
*tags = append(*tags, "success")
} else {
*tags = append(*tags, "failed")
}
directDialsTotal.WithLabelValues(*tags...).Inc()
}

View file

@ -84,6 +84,7 @@ func NewService(h host.Host, ids identify.IDService, opts ...Option) (*Service,
return nil, err
}
}
s.tracer.Start()
s.refCount.Add(1)
go s.watchForPublicAddr()
@ -165,24 +166,24 @@ func (s *Service) Close() error {
return err
}
func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, addrs []ma.Multiaddr, err error) {
func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remoteAddrs []ma.Multiaddr, ownAddrs []ma.Multiaddr, err error) {
// sanity check: a hole punch request should only come from peers behind a relay
if !isRelayAddress(str.Conn().RemoteMultiaddr()) {
return 0, nil, fmt.Errorf("received hole punch stream: %s", str.Conn().RemoteMultiaddr())
return 0, nil, nil, fmt.Errorf("received hole punch stream: %s", str.Conn().RemoteMultiaddr())
}
ownAddrs := removeRelayAddrs(s.ids.OwnObservedAddrs())
ownAddrs = removeRelayAddrs(s.ids.OwnObservedAddrs())
if s.filter != nil {
ownAddrs = s.filter.FilterLocal(str.Conn().RemotePeer(), ownAddrs)
}
// If we can't tell the peer where to dial us, there's no point in starting the hole punching.
if len(ownAddrs) == 0 {
return 0, nil, errors.New("rejecting hole punch request, as we don't have any public addresses")
return 0, nil, nil, errors.New("rejecting hole punch request, as we don't have any public addresses")
}
if err := str.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
log.Debugf("error reserving memory for stream: %s, err")
return 0, nil, err
return 0, nil, nil, err
}
defer str.Scope().ReleaseMemory(maxMsgSize)
@ -195,10 +196,10 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, addr
str.SetDeadline(time.Now().Add(StreamTimeout))
if err := rd.ReadMsg(msg); err != nil {
return 0, nil, fmt.Errorf("failed to read message from initator: %w", err)
return 0, nil, nil, fmt.Errorf("failed to read message from initator: %w", err)
}
if t := msg.GetType(); t != pb.HolePunch_CONNECT {
return 0, nil, fmt.Errorf("expected CONNECT message from initiator but got %d", t)
return 0, nil, nil, fmt.Errorf("expected CONNECT message from initiator but got %d", t)
}
obsDial := removeRelayAddrs(addrsFromBytes(msg.ObsAddrs))
@ -208,7 +209,7 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, addr
log.Debugw("received hole punch request", "peer", str.Conn().RemotePeer(), "addrs", obsDial)
if len(obsDial) == 0 {
return 0, nil, errors.New("expected CONNECT message to contain at least one address")
return 0, nil, nil, errors.New("expected CONNECT message to contain at least one address")
}
// Write CONNECT message
@ -217,18 +218,18 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, addr
msg.ObsAddrs = addrsToBytes(ownAddrs)
tstart := time.Now()
if err := wr.WriteMsg(msg); err != nil {
return 0, nil, fmt.Errorf("failed to write CONNECT message to initator: %w", err)
return 0, nil, nil, fmt.Errorf("failed to write CONNECT message to initator: %w", err)
}
// Read SYNC message
msg.Reset()
if err := rd.ReadMsg(msg); err != nil {
return 0, nil, fmt.Errorf("failed to read message from initator: %w", err)
return 0, nil, nil, fmt.Errorf("failed to read message from initator: %w", err)
}
if t := msg.GetType(); t != pb.HolePunch_SYNC {
return 0, nil, fmt.Errorf("expected SYNC message from initiator but got %d", t)
return 0, nil, nil, fmt.Errorf("expected SYNC message from initiator but got %d", t)
}
return time.Since(tstart), obsDial, nil
return time.Since(tstart), obsDial, ownAddrs, nil
}
func (s *Service) handleNewStream(str network.Stream) {
@ -249,7 +250,7 @@ func (s *Service) handleNewStream(str network.Stream) {
}
rp := str.Conn().RemotePeer()
rtt, addrs, err := s.incomingHolePunch(str)
rtt, addrs, ownAddrs, err := s.incomingHolePunch(str)
if err != nil {
s.tracer.ProtocolError(rp, err)
log.Debugw("error handling holepunching stream from", "peer", rp, "error", err)
@ -270,6 +271,7 @@ func (s *Service) handleNewStream(str network.Stream) {
err = holePunchConnect(s.ctx, s.host, pi, false)
dt := time.Since(start)
s.tracer.EndHolePunch(rp, dt, err)
s.tracer.HolePunchFinished("receiver", 1, addrs, ownAddrs, getDirectConnection(s.host, rp))
}
// DirectConnect is only exposed for testing purposes.

View file

@ -2,10 +2,10 @@ package holepunch
import (
"context"
"fmt"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
@ -16,27 +16,57 @@ const (
tracerCacheDuration = 5 * time.Minute
)
// WithTracer is a Service option that enables hole punching tracing
func WithTracer(tr EventTracer) Option {
// WithTracer enables holepunch tracing with EventTracer et
func WithTracer(et EventTracer) Option {
return func(hps *Service) error {
t := &tracer{
tr: tr,
hps.tracer = &tracer{
et: et,
mt: nil,
self: hps.host.ID(),
peers: make(map[peer.ID]struct {
counter int
last time.Time
}),
}
return nil
}
}
// WithMetricsTracer enables holepunch Tracing with MetricsTracer mt
func WithMetricsTracer(mt MetricsTracer) Option {
return func(hps *Service) error {
hps.tracer = &tracer{
et: nil,
mt: mt,
self: hps.host.ID(),
peers: make(map[peer.ID]struct {
counter int
last time.Time
}),
}
return nil
}
}
// WithMetricsAndEventTracer enables holepunch tracking with MetricsTracer and EventTracer
func WithMetricsAndEventTracer(mt MetricsTracer, et EventTracer) Option {
return func(hps *Service) error {
hps.tracer = &tracer{
et: et,
mt: mt,
self: hps.host.ID(),
peers: make(map[peer.ID]struct {
counter int
last time.Time
}),
}
t.refCount.Add(1)
t.ctx, t.ctxCancel = context.WithCancel(context.Background())
go t.gc()
hps.tracer = t
return nil
}
}
type tracer struct {
tr EventTracer
et EventTracer
mt MetricsTracer
self peer.ID
refCount sync.WaitGroup
@ -103,16 +133,22 @@ func (t *tracer) DirectDialSuccessful(p peer.ID, dt time.Duration) {
return
}
t.tr.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: DirectDialEvtT,
Evt: &DirectDialEvt{
Success: true,
EllapsedTime: dt,
},
})
if t.et != nil {
t.et.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: DirectDialEvtT,
Evt: &DirectDialEvt{
Success: true,
EllapsedTime: dt,
},
})
}
if t.mt != nil {
t.mt.DirectDialFinished(true)
}
}
func (t *tracer) DirectDialFailed(p peer.ID, dt time.Duration, err error) {
@ -120,108 +156,110 @@ func (t *tracer) DirectDialFailed(p peer.ID, dt time.Duration, err error) {
return
}
t.tr.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: DirectDialEvtT,
Evt: &DirectDialEvt{
Success: false,
EllapsedTime: dt,
Error: err.Error(),
},
})
if t.et != nil {
t.et.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: DirectDialEvtT,
Evt: &DirectDialEvt{
Success: false,
EllapsedTime: dt,
Error: err.Error(),
},
})
}
if t.mt != nil {
t.mt.DirectDialFinished(false)
}
}
func (t *tracer) ProtocolError(p peer.ID, err error) {
if t == nil {
return
if t != nil && t.et != nil {
t.et.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: ProtocolErrorEvtT,
Evt: &ProtocolErrorEvt{
Error: err.Error(),
},
})
}
t.tr.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: ProtocolErrorEvtT,
Evt: &ProtocolErrorEvt{
Error: err.Error(),
},
})
}
func (t *tracer) StartHolePunch(p peer.ID, obsAddrs []ma.Multiaddr, rtt time.Duration) {
if t == nil {
return
}
if t != nil && t.et != nil {
addrs := make([]string, 0, len(obsAddrs))
for _, a := range obsAddrs {
addrs = append(addrs, a.String())
}
addrs := make([]string, 0, len(obsAddrs))
for _, a := range obsAddrs {
addrs = append(addrs, a.String())
t.et.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: StartHolePunchEvtT,
Evt: &StartHolePunchEvt{
RemoteAddrs: addrs,
RTT: rtt,
},
})
}
t.tr.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: StartHolePunchEvtT,
Evt: &StartHolePunchEvt{
RemoteAddrs: addrs,
RTT: rtt,
},
})
}
func (t *tracer) EndHolePunch(p peer.ID, dt time.Duration, err error) {
if t == nil {
return
}
if t != nil && t.et != nil {
evt := &EndHolePunchEvt{
Success: err == nil,
EllapsedTime: dt,
}
if err != nil {
evt.Error = err.Error()
}
evt := &EndHolePunchEvt{
Success: err == nil,
EllapsedTime: dt,
}
if err != nil {
evt.Error = err.Error()
t.et.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: EndHolePunchEvtT,
Evt: evt,
})
}
}
t.tr.Trace(&Event{
Timestamp: time.Now().UnixNano(),
Peer: t.self,
Remote: p,
Type: EndHolePunchEvtT,
Evt: evt,
})
func (t *tracer) HolePunchFinished(side string, numAttempts int, theirAddrs []ma.Multiaddr, ourAddrs []ma.Multiaddr, directConn network.Conn) {
if t != nil && t.mt != nil {
t.mt.HolePunchFinished(side, numAttempts, theirAddrs, ourAddrs, directConn)
}
}
func (t *tracer) HolePunchAttempt(p peer.ID) {
if t == nil {
return
if t != nil && t.et != nil {
now := time.Now()
t.mutex.Lock()
attempt := t.peers[p]
attempt.counter++
counter := attempt.counter
attempt.last = now
t.peers[p] = attempt
t.mutex.Unlock()
t.et.Trace(&Event{
Timestamp: now.UnixNano(),
Peer: t.self,
Remote: p,
Type: HolePunchAttemptEvtT,
Evt: &HolePunchAttemptEvt{Attempt: counter},
})
}
now := time.Now()
t.mutex.Lock()
attempt := t.peers[p]
attempt.counter++
counter := attempt.counter
attempt.last = now
t.peers[p] = attempt
t.mutex.Unlock()
t.tr.Trace(&Event{
Timestamp: now.UnixNano(),
Peer: t.self,
Remote: p,
Type: HolePunchAttemptEvtT,
Evt: &HolePunchAttemptEvt{Attempt: counter},
})
}
// gc cleans up the peers map. This is only run when tracer is initialised with a non nil
// EventTracer
func (t *tracer) gc() {
defer func() {
fmt.Println("done")
t.refCount.Done()
}()
defer t.refCount.Done()
timer := time.NewTicker(tracerGCInterval)
defer timer.Stop()
@ -242,12 +280,18 @@ func (t *tracer) gc() {
}
}
func (t *tracer) Close() error {
if t == nil {
return nil
func (t *tracer) Start() {
if t != nil && t.et != nil {
t.ctx, t.ctxCancel = context.WithCancel(context.Background())
t.refCount.Add(1)
go t.gc()
}
}
t.ctxCancel()
t.refCount.Wait()
func (t *tracer) Close() error {
if t != nil && t.et != nil {
t.ctxCancel()
t.refCount.Wait()
}
return nil
}

View file

@ -55,6 +55,15 @@ func addrsFromBytes(bzs [][]byte) []ma.Multiaddr {
return addrs
}
func getDirectConnection(h host.Host, p peer.ID) network.Conn {
for _, c := range h.Network().ConnsToPeer(p) {
if !isRelayAddress(c.RemoteMultiaddr()) {
return c
}
}
return nil
}
func holePunchConnect(ctx context.Context, host host.Host, pi peer.AddrInfo, isClient bool) error {
holePunchCtx := network.WithSimultaneousConnect(ctx, isClient, "hole-punching")
forceDirectConnCtx := network.WithForceDirectDial(holePunchCtx, "hole-punching")

View file

@ -1,12 +1,17 @@
package identify
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"sort"
"sync"
"time"
"golang.org/x/exp/slices"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
@ -38,14 +43,11 @@ const (
IDPush = "/ipfs/id/push/1.0.0"
)
const DefaultProtocolVersion = "ipfs/0.1.0"
const ServiceName = "libp2p.identify"
const maxPushConcurrency = 32
// StreamReadTimeout is the read timeout on all incoming Identify family streams.
var StreamReadTimeout = 60 * time.Second
var Timeout = 60 * time.Second // timeout on all incoming Identify interactions
const (
legacyIDSize = 2 * 1024 // 2k Bytes
@ -62,6 +64,31 @@ type identifySnapshot struct {
record *record.Envelope
}
// Equal says if two snapshots are identical.
// It does NOT compare the sequence number.
func (s identifySnapshot) Equal(other *identifySnapshot) bool {
hasRecord := s.record != nil
otherHasRecord := other.record != nil
if hasRecord != otherHasRecord {
return false
}
if hasRecord && !s.record.Equal(other.record) {
return false
}
if !slices.Equal(s.protocols, other.protocols) {
return false
}
if len(s.addrs) != len(other.addrs) {
return false
}
for i, a := range s.addrs {
if !a.Equal(other.addrs[i]) {
return false
}
}
return true
}
type IDService interface {
// IdentifyConn synchronously triggers an identify request on the connection and
// waits for it to complete. If the connection is being identified by another
@ -160,16 +187,11 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) {
userAgent = cfg.userAgent
}
protocolVersion := DefaultProtocolVersion
if cfg.protocolVersion != "" {
protocolVersion = cfg.protocolVersion
}
ctx, cancel := context.WithCancel(context.Background())
s := &idService{
Host: h,
UserAgent: userAgent,
ProtocolVersion: protocolVersion,
ProtocolVersion: cfg.protocolVersion,
ctx: ctx,
ctxCancel: cancel,
conns: make(map[network.Conn]entry),
@ -249,10 +271,12 @@ func (ids *idService) loop(ctx context.Context) {
if !ok {
return
}
if updated := ids.updateSnapshot(); !updated {
continue
}
if ids.metricsTracer != nil {
ids.metricsTracer.TriggeredPushes(e)
}
ids.updateSnapshot()
select {
case triggerPush <- struct{}{}:
default: // we already have one more push queued, no need to queue another one
@ -385,11 +409,14 @@ func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
}
func (ids *idService) identifyConn(c network.Conn) error {
s, err := c.NewStream(network.WithUseTransient(context.TODO(), "identify"))
ctx, cancel := context.WithTimeout(context.Background(), Timeout)
defer cancel()
s, err := c.NewStream(network.WithUseTransient(ctx, "identify"))
if err != nil {
log.Debugw("error opening identify stream", "peer", c.RemotePeer(), "error", err)
return err
}
s.SetDeadline(time.Now().Add(Timeout))
if err := s.SetProtocol(ID); err != nil {
log.Warnf("error setting identify protocol for stream: %s", err)
@ -408,6 +435,7 @@ func (ids *idService) identifyConn(c network.Conn) error {
// handlePush handles incoming identify push streams
func (ids *idService) handlePush(s network.Stream) {
s.SetDeadline(time.Now().Add(Timeout))
ids.handleIdentifyResponse(s, true)
}
@ -469,8 +497,6 @@ func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) erro
}
defer s.Scope().ReleaseMemory(signedIDSize)
_ = s.SetReadDeadline(time.Now().Add(StreamReadTimeout))
c := s.Conn()
r := pbio.NewDelimitedReader(s, signedIDSize)
@ -529,11 +555,16 @@ func readAllIDMessages(r pbio.Reader, finalMsg proto.Message) error {
return fmt.Errorf("too many parts")
}
func (ids *idService) updateSnapshot() {
func (ids *idService) updateSnapshot() (updated bool) {
addrs := ids.Host.Addrs()
sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i].Bytes(), addrs[j].Bytes()) == -1 })
protos := ids.Host.Mux().Protocols()
sort.Slice(protos, func(i, j int) bool { return protos[i] < protos[j] })
snapshot := identifySnapshot{
addrs: ids.Host.Addrs(),
protocols: ids.Host.Mux().Protocols(),
addrs: addrs,
protocols: protos,
}
if !ids.disableSignedPeerRecord {
if cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()); ok {
snapshot.record = cab.GetPeerRecord(ids.Host.ID())
@ -541,11 +572,17 @@ func (ids *idService) updateSnapshot() {
}
ids.currentSnapshot.Lock()
defer ids.currentSnapshot.Unlock()
if ids.currentSnapshot.snapshot.Equal(&snapshot) {
return false
}
snapshot.seq = ids.currentSnapshot.snapshot.seq + 1
ids.currentSnapshot.snapshot = snapshot
ids.currentSnapshot.Unlock()
log.Debugw("updating snapshot", "seq", snapshot.seq, "addrs", snapshot.addrs)
return true
}
func (ids *idService) writeChunkedIdentifyMsg(s network.Stream, mes *pb.Identify) error {
@ -722,16 +759,18 @@ func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn, isPush bo
ids.Host.Peerstore().UpdateAddrs(p, ttl, peerstore.TempAddrTTL)
}
// add signed addrs if we have them and the peerstore supports them
cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore())
if ok && signedPeerRecord != nil {
_, addErr := cab.ConsumePeerRecord(signedPeerRecord, ttl)
if addErr != nil {
log.Debugf("error adding signed addrs to peerstore: %v", addErr)
var addrs []ma.Multiaddr
if signedPeerRecord != nil {
signedAddrs, err := ids.consumeSignedPeerRecord(c.RemotePeer(), signedPeerRecord)
if err != nil {
log.Debugf("failed to consume signed peer record: %s", err)
} else {
addrs = signedAddrs
}
} else {
ids.Host.Peerstore().AddAddrs(p, lmaddrs, ttl)
addrs = lmaddrs
}
ids.Host.Peerstore().AddAddrs(p, filterAddrs(addrs, c.RemoteMultiaddr()), ttl)
// Finally, expire all temporary addrs.
ids.Host.Peerstore().UpdateAddrs(p, peerstore.TempAddrTTL, 0)
@ -750,6 +789,34 @@ func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn, isPush bo
ids.consumeReceivedPubKey(c, mes.PublicKey)
}
func (ids *idService) consumeSignedPeerRecord(p peer.ID, signedPeerRecord *record.Envelope) ([]ma.Multiaddr, error) {
if signedPeerRecord.PublicKey == nil {
return nil, errors.New("missing pubkey")
}
id, err := peer.IDFromPublicKey(signedPeerRecord.PublicKey)
if err != nil {
return nil, fmt.Errorf("failed to derive peer ID: %s", err)
}
if id != p {
return nil, fmt.Errorf("received signed peer record envelope for unexpected peer ID. expected %s, got %s", p, id)
}
r, err := signedPeerRecord.Record()
if err != nil {
return nil, fmt.Errorf("failed to obtain record: %w", err)
}
rec, ok := r.(*peer.PeerRecord)
if !ok {
return nil, errors.New("not a peer record")
}
if rec.PeerID != p {
return nil, fmt.Errorf("received signed peer record for unexpected peer ID. expected %s, got %s", p, rec.PeerID)
}
// Don't put the signed peer record into the peer store.
// They're not used anywhere.
// All we care about are the addresses.
return rec.Addrs, nil
}
func (ids *idService) consumeReceivedPubKey(c network.Conn, kb []byte) {
lp := c.LocalPeer()
rp := c.RemotePeer()
@ -920,3 +987,17 @@ func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {}
// filterAddrs filters the address slice based on the remove multiaddr:
// * if it's a localhost address, no filtering is applied
// * if it's a local network address, all localhost addresses are filtered out
// * if it's a public address, all localhost and local network addresses are filtered out
func filterAddrs(addrs []ma.Multiaddr, remote ma.Multiaddr) []ma.Multiaddr {
if manet.IsIPLoopback(remote) {
return addrs
}
if manet.IsPrivateAddr(remote) {
return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { return !manet.IsIPLoopback(a) })
}
return ma.FilterAddrs(addrs, manet.IsPublicAddr)
}

View file

@ -1,7 +1,6 @@
package libp2pquic
import (
"errors"
"sync"
tpt "github.com/libp2p/go-libp2p/core/transport"
@ -30,7 +29,7 @@ func (l *virtualListener) Multiaddr() ma.Multiaddr {
}
func (l *virtualListener) Close() error {
l.acceptRunnner.RmAcceptForVersion(l.version)
l.acceptRunnner.RmAcceptForVersion(l.version, tpt.ErrListenerClosed)
return l.t.CloseVirtualListener(l)
}
@ -65,7 +64,7 @@ func (r *acceptLoopRunner) AcceptForVersion(v quic.VersionNumber) chan acceptVal
return ch
}
func (r *acceptLoopRunner) RmAcceptForVersion(v quic.VersionNumber) {
func (r *acceptLoopRunner) RmAcceptForVersion(v quic.VersionNumber, err error) {
r.muxerMu.Lock()
defer r.muxerMu.Unlock()
@ -78,7 +77,7 @@ func (r *acceptLoopRunner) RmAcceptForVersion(v quic.VersionNumber) {
if !ok {
panic("expected chan in accept muxer")
}
ch <- acceptVal{err: errors.New("listener Accept closed")}
ch <- acceptVal{err: err}
delete(r.muxer, v)
}
@ -104,7 +103,7 @@ func (r *acceptLoopRunner) innerAccept(l *listener, expectedVersion quic.Version
// Check if we have a buffered connection first from an earlier Accept call
case v, ok := <-bufferedConnChan:
if !ok {
return nil, errors.New("listener closed")
return nil, tpt.ErrListenerClosed
}
return v.conn, v.err
default:
@ -166,7 +165,7 @@ func (r *acceptLoopRunner) Accept(l *listener, expectedVersion quic.VersionNumbe
}
case v, ok := <-bufferedConnChan:
if !ok {
return nil, errors.New("listener closed")
return nil, tpt.ErrListenerClosed
}
conn = v.conn
err = v.err

View file

@ -7,8 +7,10 @@ import (
"fmt"
"io"
"net"
"strings"
"sync"
"github.com/libp2p/go-libp2p/core/transport"
ma "github.com/multiformats/go-multiaddr"
"github.com/quic-go/quic-go"
)
@ -134,6 +136,9 @@ func (l *connListener) Run() error {
for {
conn, err := l.l.Accept(context.Background())
if err != nil {
if errors.Is(err, quic.ErrServerClosed) || strings.Contains(err.Error(), "use of closed network connection") {
return transport.ErrListenerClosed
}
return err
}
proto := conn.ConnectionState().TLS.NegotiatedProtocol
@ -192,10 +197,10 @@ func (l *listener) Accept(ctx context.Context) (quic.Connection, error) {
case <-ctx.Done():
return nil, ctx.Err()
case <-l.acceptLoopRunning:
return nil, errors.New("accept goroutine finished")
return nil, transport.ErrListenerClosed
case c, ok := <-l.queue:
if !ok {
return nil, errors.New("listener closed")
return nil, transport.ErrListenerClosed
}
return c, nil
}

View file

@ -74,16 +74,21 @@ type reuse struct {
routes routing.Router
unicast map[string] /* IP.String() */ map[int] /* port */ *reuseConn
// global contains connections that are listening on 0.0.0.0 / ::
global map[int]*reuseConn
// globalListeners contains connections that are listening on 0.0.0.0 / ::
globalListeners map[int]*reuseConn
// globalDialers contains connections that we've dialed out from. These connections are listening on 0.0.0.0 / ::
// On Dial, connections are reused from this map if no connection is available in the globalListeners
// On Listen, connections are reused from this map if the requested port is 0, and then moved to globalListeners
globalDialers map[int]*reuseConn
}
func newReuse() *reuse {
r := &reuse{
unicast: make(map[string]map[int]*reuseConn),
global: make(map[int]*reuseConn),
closeChan: make(chan struct{}),
gcStopChan: make(chan struct{}),
unicast: make(map[string]map[int]*reuseConn),
globalListeners: make(map[int]*reuseConn),
globalDialers: make(map[int]*reuseConn),
closeChan: make(chan struct{}),
gcStopChan: make(chan struct{}),
}
go r.gc()
return r
@ -92,7 +97,10 @@ func newReuse() *reuse {
func (r *reuse) gc() {
defer func() {
r.mutex.Lock()
for _, conn := range r.global {
for _, conn := range r.globalListeners {
conn.Close()
}
for _, conn := range r.globalDialers {
conn.Close()
}
for _, conns := range r.unicast {
@ -113,10 +121,16 @@ func (r *reuse) gc() {
case <-ticker.C:
now := time.Now()
r.mutex.Lock()
for key, conn := range r.global {
for key, conn := range r.globalListeners {
if conn.ShouldGarbageCollect(now) {
conn.Close()
delete(r.global, key)
delete(r.globalListeners, key)
}
}
for key, conn := range r.globalDialers {
if conn.ShouldGarbageCollect(now) {
conn.Close()
delete(r.globalDialers, key)
}
}
for ukey, conns := range r.unicast {
@ -185,7 +199,12 @@ func (r *reuse) dialLocked(network string, source *net.IP) (*reuseConn, error) {
// Use a connection listening on 0.0.0.0 (or ::).
// Again, we don't care about the port number.
for _, conn := range r.global {
for _, conn := range r.globalListeners {
return conn, nil
}
// Use a connection we've previously dialed from
for _, conn := range r.globalDialers {
return conn, nil
}
@ -203,29 +222,59 @@ func (r *reuse) dialLocked(network string, source *net.IP) (*reuseConn, error) {
return nil, err
}
rconn := newReuseConn(conn)
r.global[conn.LocalAddr().(*net.UDPAddr).Port] = rconn
r.globalDialers[conn.LocalAddr().(*net.UDPAddr).Port] = rconn
return rconn, nil
}
func (r *reuse) Listen(network string, laddr *net.UDPAddr) (*reuseConn, error) {
r.mutex.Lock()
defer r.mutex.Unlock()
// Check if we can reuse a connection we have already dialed out from.
// We reuse a connection from globalDialers when the requested port is 0 or the requested
// port is already in the globalDialers.
// If we are reusing a connection from globalDialers, we move the globalDialers entry to
// globalListeners
if laddr.IP.IsUnspecified() {
var rconn *reuseConn
var localAddr *net.UDPAddr
if laddr.Port == 0 {
// the requested port is 0, we can reuse any connection
for _, conn := range r.globalDialers {
rconn = conn
localAddr = rconn.UDPConn.LocalAddr().(*net.UDPAddr)
delete(r.globalDialers, localAddr.Port)
break
}
} else if _, ok := r.globalDialers[laddr.Port]; ok {
rconn = r.globalDialers[laddr.Port]
localAddr = rconn.UDPConn.LocalAddr().(*net.UDPAddr)
delete(r.globalDialers, localAddr.Port)
}
// found a match
if rconn != nil {
rconn.IncreaseCount()
r.globalListeners[localAddr.Port] = rconn
return rconn, nil
}
}
conn, err := net.ListenUDP(network, laddr)
if err != nil {
return nil, err
}
localAddr := conn.LocalAddr().(*net.UDPAddr)
rconn := newReuseConn(conn)
rconn.IncreaseCount()
r.mutex.Lock()
defer r.mutex.Unlock()
rconn.IncreaseCount()
// Deal with listen on a global address
if localAddr.IP.IsUnspecified() {
// The kernel already checked that the laddr is not already listen
// so we need not check here (when we create ListenUDP).
r.global[localAddr.Port] = rconn
return rconn, err
r.globalListeners[localAddr.Port] = rconn
return rconn, nil
}
// Deal with listen on a unicast address
@ -239,7 +288,7 @@ func (r *reuse) Listen(network string, laddr *net.UDPAddr) (*reuseConn, error) {
// The kernel already checked that the laddr is not already listen
// so we need not check here (when we create ListenUDP).
r.unicast[localAddr.IP.String()][localAddr.Port] = rconn
return rconn, err
return rconn, nil
}
func (r *reuse) Close() error {

View file

@ -10,28 +10,52 @@ import (
manet "github.com/multiformats/go-multiaddr/net"
)
// addrWrapper is an implementation of net.Addr for WebSocket.
type addrWrapper struct {
// Addr is an implementation of net.Addr for WebSocket.
type Addr struct {
*url.URL
}
var _ net.Addr = addrWrapper{}
var _ net.Addr = (*Addr)(nil)
// Network returns the network type for a WebSocket, "websocket".
func (a addrWrapper) Network() string {
func (addr *Addr) Network() string {
return "websocket"
}
// NewAddr creates an Addr with `ws` scheme (insecure).
//
// Deprecated. Use NewAddrWithScheme.
func NewAddr(host string) *Addr {
// Older versions of the transport only supported insecure connections (i.e.
// WS instead of WSS). Assume that is the case here.
return NewAddrWithScheme(host, false)
}
// NewAddrWithScheme creates a new Addr using the given host string. isSecure
// should be true for WSS connections and false for WS.
func NewAddrWithScheme(host string, isSecure bool) *Addr {
scheme := "ws"
if isSecure {
scheme = "wss"
}
return &Addr{
URL: &url.URL{
Scheme: scheme,
Host: host,
},
}
}
func ConvertWebsocketMultiaddrToNetAddr(maddr ma.Multiaddr) (net.Addr, error) {
url, err := parseMultiaddr(maddr)
if err != nil {
return nil, err
}
return addrWrapper{URL: url}, nil
return &Addr{URL: url}, nil
}
func ParseWebsocketNetAddr(a net.Addr) (ma.Multiaddr, error) {
wsa, ok := a.(addrWrapper)
wsa, ok := a.(*Addr)
if !ok {
return nil, fmt.Errorf("not a websocket address")
}

View file

@ -1,44 +1,156 @@
package websocket
import (
"io"
"net"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/transport"
ws "github.com/gorilla/websocket"
)
const maxReadAttempts = 5
// GracefulCloseTimeout is the time to wait trying to gracefully close a
// connection before simply cutting it.
var GracefulCloseTimeout = 100 * time.Millisecond
type conn struct {
net.Conn
readAttempts uint8
localAddr addrWrapper
remoteAddr addrWrapper
// Conn implements net.Conn interface for gorilla/websocket.
type Conn struct {
*ws.Conn
secure bool
DefaultMessageType int
reader io.Reader
closeOnce sync.Once
readLock, writeLock sync.Mutex
}
var _ net.Conn = conn{}
var _ net.Conn = (*Conn)(nil)
func (c conn) LocalAddr() net.Addr {
return c.localAddr
}
func (c conn) RemoteAddr() net.Addr {
return c.remoteAddr
}
func (c conn) Read(b []byte) (int, error) {
n, err := c.Conn.Read(b)
if err == nil && n == 0 && c.readAttempts < maxReadAttempts {
c.readAttempts++
// Nothing happened, let's read again. We reached the end of the frame
// (https://github.com/nhooyr/websocket/blob/master/netconn.go#L118).
// The next read will block until we get
// the next frame. We limit here to avoid looping in case of a bunch of
// empty frames. Would be better if the websocket library did not
// return 0, nil here (see https://github.com/nhooyr/websocket/issues/367). But until, then this is our workaround.
return c.Read(b)
// NewConn creates a Conn given a regular gorilla/websocket Conn.
func NewConn(raw *ws.Conn, secure bool) *Conn {
return &Conn{
Conn: raw,
secure: secure,
DefaultMessageType: ws.BinaryMessage,
}
return n, err
}
func (c *Conn) Read(b []byte) (int, error) {
c.readLock.Lock()
defer c.readLock.Unlock()
if c.reader == nil {
if err := c.prepNextReader(); err != nil {
return 0, err
}
}
for {
n, err := c.reader.Read(b)
switch err {
case io.EOF:
c.reader = nil
if n > 0 {
return n, nil
}
if err := c.prepNextReader(); err != nil {
return 0, err
}
// explicitly looping
default:
return n, err
}
}
}
func (c *Conn) prepNextReader() error {
t, r, err := c.Conn.NextReader()
if err != nil {
if wserr, ok := err.(*ws.CloseError); ok {
if wserr.Code == 1000 || wserr.Code == 1005 {
return io.EOF
}
}
return err
}
if t == ws.CloseMessage {
return io.EOF
}
c.reader = r
return nil
}
func (c *Conn) Write(b []byte) (n int, err error) {
c.writeLock.Lock()
defer c.writeLock.Unlock()
if err := c.Conn.WriteMessage(c.DefaultMessageType, b); err != nil {
return 0, err
}
return len(b), nil
}
// Close closes the connection. Only the first call to Close will receive the
// close error, subsequent and concurrent calls will return nil.
// This method is thread-safe.
func (c *Conn) Close() error {
var err error
c.closeOnce.Do(func() {
err1 := c.Conn.WriteControl(
ws.CloseMessage,
ws.FormatCloseMessage(ws.CloseNormalClosure, "closed"),
time.Now().Add(GracefulCloseTimeout),
)
err2 := c.Conn.Close()
switch {
case err1 != nil:
err = err1
case err2 != nil:
err = err2
}
})
return err
}
func (c *Conn) LocalAddr() net.Addr {
return NewAddrWithScheme(c.Conn.LocalAddr().String(), c.secure)
}
func (c *Conn) RemoteAddr() net.Addr {
return NewAddrWithScheme(c.Conn.RemoteAddr().String(), c.secure)
}
func (c *Conn) SetDeadline(t time.Time) error {
if err := c.SetReadDeadline(t); err != nil {
return err
}
return c.SetWriteDeadline(t)
}
func (c *Conn) SetReadDeadline(t time.Time) error {
// Don't lock when setting the read deadline. That would prevent us from
// interrupting an in-progress read.
return c.Conn.SetReadDeadline(t)
}
func (c *Conn) SetWriteDeadline(t time.Time) error {
// Unlike the read deadline, we need to lock when setting the write
// deadline.
c.writeLock.Lock()
defer c.writeLock.Unlock()
return c.Conn.SetWriteDeadline(t)
}
type capableConn struct {

View file

@ -1,20 +1,16 @@
package websocket
import (
"context"
"crypto/tls"
"fmt"
"math"
"net"
"net/http"
"net/url"
"strings"
"github.com/libp2p/go-libp2p/core/transport"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
ws "nhooyr.io/websocket"
)
type listener struct {
@ -27,7 +23,7 @@ type listener struct {
laddr ma.Multiaddr
closed chan struct{}
incoming chan net.Conn
incoming chan *Conn
}
func (pwma *parsedWebsocketMultiaddr) toMultiaddr() ma.Multiaddr {
@ -79,7 +75,7 @@ func newListener(a ma.Multiaddr, tlsConf *tls.Config) (*listener, error) {
ln := &listener{
nl: nl,
laddr: parsed.toMultiaddr(),
incoming: make(chan net.Conn),
incoming: make(chan *Conn),
closed: make(chan struct{}),
}
ln.server = http.Server{Handler: ln}
@ -100,38 +96,16 @@ func (l *listener) serve() {
}
func (l *listener) ServeHTTP(w http.ResponseWriter, r *http.Request) {
scheme := "ws"
if l.isWss {
scheme = "wss"
}
c, err := ws.Accept(w, r, &ws.AcceptOptions{
// Allow requests from *all* origins.
InsecureSkipVerify: true,
})
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
// The upgrader writes a response for us.
return
}
// Set an arbitrarily large read limit since we don't actually want to limit the message size here.
// See https://github.com/nhooyr/websocket/issues/382 for details.
c.SetReadLimit(math.MaxInt64 - 1) // -1 because the library adds a byte for the fin frame
select {
case l.incoming <- conn{
Conn: ws.NetConn(context.Background(), c, ws.MessageBinary),
localAddr: addrWrapper{&url.URL{
Host: r.Context().Value(http.LocalAddrContextKey).(net.Addr).String(),
Scheme: scheme,
}},
remoteAddr: addrWrapper{&url.URL{
Host: r.RemoteAddr,
Scheme: scheme,
}},
}:
case l.incoming <- NewConn(c, l.isWss):
case <-l.closed:
c.Close(ws.StatusNormalClosure, "closed")
c.Close()
}
// The connection has been hijacked, it's safe to return.
}
@ -140,7 +114,7 @@ func (l *listener) Accept() (manet.Conn, error) {
select {
case c, ok := <-l.incoming:
if !ok {
return nil, fmt.Errorf("listener is closed")
return nil, transport.ErrListenerClosed
}
mnc, err := manet.WrapNetConn(c)
@ -151,7 +125,7 @@ func (l *listener) Accept() (manet.Conn, error) {
return mnc, nil
case <-l.closed:
return nil, fmt.Errorf("listener is closed")
return nil, transport.ErrListenerClosed
}
}
@ -163,6 +137,9 @@ func (l *listener) Close() error {
l.server.Close()
err := l.nl.Close()
<-l.closed
if strings.Contains(err.Error(), "use of closed network connection") {
return transport.ErrListenerClosed
}
return err
}

View file

@ -4,11 +4,8 @@ package websocket
import (
"context"
"crypto/tls"
"fmt"
"math"
"net"
"net/http"
"net/url"
"time"
"github.com/libp2p/go-libp2p/core/network"
@ -19,7 +16,7 @@ import (
mafmt "github.com/multiformats/go-multiaddr-fmt"
manet "github.com/multiformats/go-multiaddr/net"
ws "nhooyr.io/websocket"
ws "github.com/gorilla/websocket"
)
// WsFmt is multiaddr formatter for WsProtocol
@ -53,6 +50,14 @@ func init() {
manet.RegisterToNetAddr(ConvertWebsocketMultiaddrToNetAddr, "wss")
}
// Default gorilla upgrader
var upgrader = ws.Upgrader{
// Allow requests from *all* origins.
CheckOrigin: func(r *http.Request) bool {
return true
},
}
type Option func(*WebsocketTransport) error
// WithTLSClientConfig sets a TLS client configuration on the WebSocket Dialer. Only
@ -182,26 +187,7 @@ func (t *WebsocketTransport) maDial(ctx context.Context, raddr ma.Multiaddr) (ma
return nil, err
}
isWss := wsurl.Scheme == "wss"
wsurlCopy := *wsurl
remoteAddr := addrWrapper{URL: &wsurlCopy}
localAddrChan := make(chan addrWrapper, 1)
transport := &http.Transport{
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
conn, err := net.Dial(network, addr)
if err != nil {
close(localAddrChan)
return nil, err
}
localAddrChan <- addrWrapper{URL: &url.URL{Host: conn.LocalAddr().String(), Scheme: wsurl.Scheme}}
return conn, nil
},
}
dialer := http.Client{
Timeout: 30 * time.Second,
Transport: transport,
}
dialer := ws.Dialer{HandshakeTimeout: 30 * time.Second}
if isWss {
sni := ""
sni, err = raddr.ValueForProtocol(ma.P_SNI)
@ -212,55 +198,31 @@ func (t *WebsocketTransport) maDial(ctx context.Context, raddr ma.Multiaddr) (ma
if sni != "" {
copytlsClientConf := t.tlsClientConf.Clone()
copytlsClientConf.ServerName = sni
transport.TLSClientConfig = copytlsClientConf
dialer.TLSClientConfig = copytlsClientConf
ipAddr := wsurl.Host
// Setting the Dial because we already have the resolved IP address, so we don't want to do another resolution.
// Setting the NetDial because we already have the resolved IP address, so we don't want to do another resolution.
// We set the `.Host` to the sni field so that the host header gets properly set.
transport.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) {
dialer.NetDial = func(network, address string) (net.Conn, error) {
tcpAddr, err := net.ResolveTCPAddr(network, ipAddr)
if err != nil {
close(localAddrChan)
return nil, err
}
conn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
close(localAddrChan)
return nil, err
}
localAddrChan <- addrWrapper{URL: &url.URL{Host: conn.LocalAddr().String(), Scheme: wsurl.Scheme}}
return conn, nil
return net.DialTCP("tcp", nil, tcpAddr)
}
wsurl.Host = sni + ":" + wsurl.Port()
} else {
transport.TLSClientConfig = t.tlsClientConf
dialer.TLSClientConfig = t.tlsClientConf
}
}
wscon, _, err := ws.Dial(ctx, wsurl.String(), &ws.DialOptions{
HTTPClient: &dialer,
})
wscon, _, err := dialer.DialContext(ctx, wsurl.String(), nil)
if err != nil {
return nil, err
}
// We need the local address of this connection, and afaict there's no other
// way of getting it besides hooking into the dial context func.
localAdddr, ok := <-localAddrChan
if !ok {
wscon.Close(ws.StatusNormalClosure, "closed. no local address")
return nil, fmt.Errorf("failed to get local address")
}
// Set an arbitrarily large read limit since we don't actually want to limit the message size here.
wscon.SetReadLimit(math.MaxInt64 - 1) // -1 because the library adds a byte for the fin frame
mnc, err := manet.WrapNetConn(
conn{
Conn: ws.NetConn(context.Background(), wscon, ws.MessageBinary),
localAddr: localAdddr,
remoteAddr: remoteAddr,
})
mnc, err := manet.WrapNetConn(NewConn(wscon, isWss))
if err != nil {
wscon.Close(ws.StatusNormalClosure, "closed. err")
wscon.Close()
return nil, err
}
return mnc, nil

View file

@ -30,12 +30,12 @@ type conn struct {
transport *transport
session *webtransport.Session
scope network.ConnScope
scope network.ConnManagementScope
}
var _ tpt.CapableConn = &conn{}
func newConn(tr *transport, sess *webtransport.Session, sconn *connSecurityMultiaddrs, scope network.ConnScope) *conn {
func newConn(tr *transport, sess *webtransport.Session, sconn *connSecurityMultiaddrs, scope network.ConnManagementScope) *conn {
return &conn{
connSecurityMultiaddrs: sconn,
transport: tr,
@ -68,6 +68,7 @@ func (c *conn) allowWindowIncrease(size uint64) bool {
// It must be called even if the peer closed the connection in order for
// garbage collection to properly work in this package.
func (c *conn) Close() error {
c.scope.Done()
c.transport.removeConn(c.session)
return c.session.CloseWithError(0, "")
}

View file

@ -18,8 +18,6 @@ import (
"github.com/quic-go/webtransport-go"
)
var errClosed = errors.New("closed")
const queueLen = 16
const handshakeTimeout = 10 * time.Second
@ -155,7 +153,7 @@ func (l *listener) httpHandlerWithConnScope(w http.ResponseWriter, r *http.Reque
func (l *listener) Accept() (tpt.CapableConn, error) {
select {
case <-l.ctx.Done():
return nil, errClosed
return nil, tpt.ErrListenerClosed
case c := <-l.queue:
return c, nil
}

View file

@ -1,3 +1,3 @@
{
"version": "v0.27.3"
"version": "v0.28.1"
}

View file

@ -29,10 +29,10 @@ type NAT interface {
GetInternalAddress() (addr net.IP, err error)
// AddPortMapping maps a port on the local host to an external port.
AddPortMapping(protocol string, internalPort int, description string, timeout time.Duration) (mappedExternalPort int, err error)
AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (mappedExternalPort int, err error)
// DeletePortMapping removes a port mapping.
DeletePortMapping(protocol string, internalPort int) (err error)
DeletePortMapping(ctx context.Context, protocol string, internalPort int) (err error)
}
// DiscoverNATs returns all NATs discovered in the network.
@ -118,7 +118,8 @@ func DiscoverGateway(ctx context.Context) (NAT, error) {
return bestNAT, nil
}
var random = rand.New(rand.NewSource(time.Now().UnixNano()))
func randomPort() int {
rand.Seed(time.Now().UnixNano())
return rand.Intn(math.MaxUint16-10000) + 10000
return random.Intn(math.MaxUint16-10000) + 10000
}

View file

@ -5,7 +5,7 @@ import (
"net"
"time"
"github.com/jackpal/go-nat-pmp"
natpmp "github.com/jackpal/go-nat-pmp"
)
var (
@ -95,7 +95,7 @@ func (n *natpmpNAT) GetExternalAddress() (addr net.IP, err error) {
return net.IPv4(d[0], d[1], d[2], d[3]), nil
}
func (n *natpmpNAT) AddPortMapping(protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
func (n *natpmpNAT) AddPortMapping(_ context.Context, protocol string, internalPort int, _ string, timeout time.Duration) (int, error) {
var (
err error
)
@ -122,7 +122,7 @@ func (n *natpmpNAT) AddPortMapping(protocol string, internalPort int, descriptio
return 0, err
}
func (n *natpmpNAT) DeletePortMapping(protocol string, internalPort int) (err error) {
func (n *natpmpNAT) DeletePortMapping(_ context.Context, _ string, internalPort int) (err error) {
delete(n.ports, internalPort)
return nil
}

View file

@ -14,9 +14,7 @@ import (
"github.com/koron/go-ssdp"
)
var (
_ NAT = (*upnp_NAT)(nil)
)
var _ NAT = (*upnp_NAT)(nil)
func discoverUPNP_IG1(ctx context.Context) <-chan NAT {
res := make(chan NAT)
@ -24,7 +22,7 @@ func discoverUPNP_IG1(ctx context.Context) <-chan NAT {
defer close(res)
// find devices
devs, err := goupnp.DiscoverDevices(internetgateway1.URN_WANConnectionDevice_1)
devs, err := goupnp.DiscoverDevicesCtx(ctx, internetgateway1.URN_WANConnectionDevice_1)
if err != nil {
return
}
@ -45,7 +43,7 @@ func discoverUPNP_IG1(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
_, isNat, err := client.GetNATRSIPStatus()
_, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG1-IP1)", dev.Root}:
@ -59,7 +57,7 @@ func discoverUPNP_IG1(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
_, isNat, err := client.GetNATRSIPStatus()
_, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG1-PPP1)", dev.Root}:
@ -81,7 +79,7 @@ func discoverUPNP_IG2(ctx context.Context) <-chan NAT {
defer close(res)
// find devices
devs, err := goupnp.DiscoverDevices(internetgateway2.URN_WANConnectionDevice_2)
devs, err := goupnp.DiscoverDevicesCtx(ctx, internetgateway2.URN_WANConnectionDevice_2)
if err != nil {
return
}
@ -102,7 +100,7 @@ func discoverUPNP_IG2(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
_, isNat, err := client.GetNATRSIPStatus()
_, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG2-IP1)", dev.Root}:
@ -116,7 +114,7 @@ func discoverUPNP_IG2(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
_, isNat, err := client.GetNATRSIPStatus()
_, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG2-IP2)", dev.Root}:
@ -130,7 +128,7 @@ func discoverUPNP_IG2(ctx context.Context) <-chan NAT {
RootDevice: dev.Root,
Service: srv,
}}
_, isNat, err := client.GetNATRSIPStatus()
_, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG2-PPP1)", dev.Root}:
@ -167,7 +165,7 @@ func discoverUPNP_GenIGDev(ctx context.Context) <-chan NAT {
if err != nil {
return
}
RootDevice, err := goupnp.DeviceByURL(DeviceURL)
RootDevice, err := goupnp.DeviceByURLCtx(ctx, DeviceURL)
if err != nil {
return
}
@ -183,7 +181,7 @@ func discoverUPNP_GenIGDev(ctx context.Context) <-chan NAT {
RootDevice: RootDevice,
Service: srv,
}}
_, isNat, err := client.GetNATRSIPStatus()
_, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG1-IP1)", RootDevice}:
@ -197,7 +195,7 @@ func discoverUPNP_GenIGDev(ctx context.Context) <-chan NAT {
RootDevice: RootDevice,
Service: srv,
}}
_, isNat, err := client.GetNATRSIPStatus()
_, isNat, err := client.GetNATRSIPStatusCtx(ctx)
if err == nil && isNat {
select {
case res <- &upnp_NAT{client, make(map[int]int), "UPNP (IG1-PPP1)", RootDevice}:
@ -213,8 +211,8 @@ func discoverUPNP_GenIGDev(ctx context.Context) <-chan NAT {
type upnp_NAT_Client interface {
GetExternalIPAddress() (string, error)
AddPortMapping(string, uint16, string, uint16, string, bool, string, uint32) error
DeletePortMapping(string, uint16, string) error
AddPortMappingCtx(context.Context, string, uint16, string, uint16, string, bool, string, uint32) error
DeletePortMappingCtx(context.Context, string, uint16, string) error
}
type upnp_NAT struct {
@ -249,7 +247,7 @@ func mapProtocol(s string) string {
}
}
func (u *upnp_NAT) AddPortMapping(protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
func (u *upnp_NAT) AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
ip, err := u.GetInternalAddress()
if err != nil {
return 0, nil
@ -258,7 +256,7 @@ func (u *upnp_NAT) AddPortMapping(protocol string, internalPort int, description
timeoutInSeconds := uint32(timeout / time.Second)
if externalPort := u.ports[internalPort]; externalPort > 0 {
err = u.c.AddPortMapping("", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
err = u.c.AddPortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
if err == nil {
return externalPort, nil
}
@ -266,7 +264,7 @@ func (u *upnp_NAT) AddPortMapping(protocol string, internalPort int, description
for i := 0; i < 3; i++ {
externalPort := randomPort()
err = u.c.AddPortMapping("", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
err = u.c.AddPortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
if err == nil {
u.ports[internalPort] = externalPort
return externalPort, nil
@ -276,10 +274,10 @@ func (u *upnp_NAT) AddPortMapping(protocol string, internalPort int, description
return 0, err
}
func (u *upnp_NAT) DeletePortMapping(protocol string, internalPort int) error {
func (u *upnp_NAT) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error {
if externalPort := u.ports[internalPort]; externalPort > 0 {
delete(u.ports, internalPort)
return u.c.DeletePortMapping("", uint16(externalPort), mapProtocol(protocol))
return u.c.DeletePortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol))
}
return nil

3
vendor/github.com/libp2p/go-nat/version.json generated vendored Normal file
View file

@ -0,0 +1,3 @@
{
"version": "v0.2.0"
}

View file

@ -1,14 +1,13 @@
# go-reuseport
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
[![GoDoc](https://godoc.org/github.com/libp2p/go-reuseport?status.svg)](https://godoc.org/github.com/libp2p/go-reuseport)
[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/)
[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23libp2p)
[![codecov](https://codecov.io/gh/libp2p/go-reuseport/branch/master/graph/badge.svg)](https://codecov.io/gh/libp2p/go-reuseport)
[![Travis CI](https://travis-ci.org/libp2p/go-reuseport.svg?branch=master)](https://travis-ci.org/libp2p/go-reuseport)
[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io)
**NOTE:** This package REQUIRES go >= 1.11.
This package enables listening and dialing from _the same_ TCP or UDP port.
This means that the following sockopts may be set:
@ -17,8 +16,6 @@ SO_REUSEADDR
SO_REUSEPORT
```
- godoc: https://godoc.org/github.com/libp2p/go-reuseport
This is a simple package to help with address reuse. This is particularly
important when attempting to do TCP NAT holepunching, which requires a process
to both Listen and Dial on the same TCP port. This package provides some

View file

@ -1,5 +1,4 @@
//go:build !plan9 && !windows && !wasm
// +build !plan9,!windows,!wasm
package reuseport
@ -9,18 +8,16 @@ import (
"golang.org/x/sys/unix"
)
func Control(network, address string, c syscall.RawConn) error {
var err error
c.Control(func(fd uintptr) {
func Control(network, address string, c syscall.RawConn) (err error) {
controlErr := c.Control(func(fd uintptr) {
err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR, 1)
if err != nil {
return
}
err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
if err != nil {
return
}
})
return err
if controlErr != nil {
err = controlErr
}
return
}

View file

@ -1,5 +1,4 @@
//go:build wasm
// +build wasm
package reuseport

View file

@ -7,7 +7,11 @@ import (
)
func Control(network, address string, c syscall.RawConn) (err error) {
return c.Control(func(fd uintptr) {
controlErr := c.Control(func(fd uintptr) {
err = windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_REUSEADDR, 1)
})
if controlErr != nil {
err = controlErr
}
return
}

View file

@ -4,14 +4,14 @@
//
// For example:
//
// // listen on the same port. oh yeah.
// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
// l2, _ := reuse.Listen("tcp", "127.0.0.1:1234")
// // listen on the same port. oh yeah.
// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
// l2, _ := reuse.Listen("tcp", "127.0.0.1:1234")
//
// // dial from the same port. oh yeah.
// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
// l2, _ := reuse.Listen("tcp", "127.0.0.1:1235")
// c, _ := reuse.Dial("tcp", "127.0.0.1:1234", "127.0.0.1:1235")
// // dial from the same port. oh yeah.
// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234")
// l2, _ := reuse.Listen("tcp", "127.0.0.1:1235")
// c, _ := reuse.Dial("tcp", "127.0.0.1:1234", "127.0.0.1:1235")
//
// Note: cant dial self because tcp/ip stacks use 4-tuples to identify connections,
// and doing so would clash.
@ -21,6 +21,7 @@ import (
"context"
"fmt"
"net"
"time"
)
// Available returns whether or not SO_REUSEPORT or equivalent behaviour is
@ -47,10 +48,17 @@ func ListenPacket(network, address string) (net.PacketConn, error) {
return listenConfig.ListenPacket(context.Background(), network, address)
}
// Dial dials the given network and address. see net.Dialer.Dial
// Dial dials the given network and address. see net.Dial
// Returns a net.Conn created from a file descriptor for a socket
// with SO_REUSEPORT and SO_REUSEADDR option set.
func Dial(network, laddr, raddr string) (net.Conn, error) {
return DialTimeout(network, laddr, raddr, time.Duration(0))
}
// Dial dials the given network and address, with the given timeout. see
// net.DialTimeout Returns a net.Conn created from a file descriptor for
// a socket with SO_REUSEPORT and SO_REUSEADDR option set.
func DialTimeout(network, laddr, raddr string, timeout time.Duration) (net.Conn, error) {
nla, err := ResolveAddr(network, laddr)
if err != nil {
return nil, fmt.Errorf("failed to resolve local addr: %w", err)
@ -58,6 +66,7 @@ func Dial(network, laddr, raddr string) (net.Conn, error) {
d := net.Dialer{
Control: Control,
LocalAddr: nla,
Timeout: timeout,
}
return d.Dial(network, raddr)
}

View file

@ -1,3 +1,3 @@
{
"version": "v0.2.0"
"version": "v0.3.0"
}

View file

@ -6,7 +6,6 @@ import (
"context"
"crypto/tls"
"encoding/binary"
"fmt"
"io"
"net"
"strings"
@ -56,14 +55,20 @@ type Client struct {
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
// Client.Dialer) or context.Context.Deadline (see ExchangeContext)
Timeout time.Duration
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight
Timeout time.Duration
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
// SingleInflight previously serialised multiple concurrent queries for the
// same Qname, Qtype and Qclass to ensure only one would be in flight at a
// time.
//
// Deprecated: This is a no-op. Callers should implement their own in flight
// query caching if needed. See github.com/miekg/dns/issues/1449.
SingleInflight bool
}
// Exchange performs a synchronous UDP query. It sends the message m to the address
@ -185,26 +190,7 @@ func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration
return c.exchangeWithConnContext(context.Background(), m, conn)
}
func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight {
return c.exchangeContext(ctx, m, conn)
}
q := m.Question[0]
key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
// When we're doing singleflight we don't want one context cancellation, cancel _all_ outstanding queries.
// Hence we ignore the context and use Background().
return c.exchangeContext(context.Background(), m, conn)
})
if r != nil && shared {
r = r.Copy()
}
return r, rtt, err
}
func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= MinMsgSize {

View file

@ -272,18 +272,24 @@ func IsMsg(buf []byte) error {
// IsFqdn checks if a domain name is fully qualified.
func IsFqdn(s string) bool {
s2 := strings.TrimSuffix(s, ".")
if s == s2 {
// Check for (and remove) a trailing dot, returning if there isn't one.
if s == "" || s[len(s)-1] != '.' {
return false
}
s = s[:len(s)-1]
i := strings.LastIndexFunc(s2, func(r rune) bool {
// If we don't have an escape sequence before the final dot, we know it's
// fully qualified and can return here.
if s == "" || s[len(s)-1] != '\\' {
return true
}
// Otherwise we have to check if the dot is escaped or not by checking if
// there are an odd or even number of escape sequences before the dot.
i := strings.LastIndexFunc(s, func(r rune) bool {
return r != '\\'
})
// Test whether we have an even number of escape sequences before
// the dot or none.
return (len(s2)-i)%2 != 0
return (len(s)-i)%2 != 0
}
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.

20
vendor/github.com/miekg/dns/scan.go generated vendored
View file

@ -10,13 +10,13 @@ import (
"strings"
)
const maxTok = 2048 // Largest token we can return.
const maxTok = 512 // Token buffer start size, and growth size amount.
// The maximum depth of $INCLUDE directives supported by the
// ZoneParser API.
const maxIncludeDepth = 7
// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
// Tokenize a RFC 1035 zone file. The tokenizer will normalize it:
// * Add ownernames if they are left blank;
// * Suppress sequences of spaces;
// * Make each RR fit on one line (_NEWLINE is send as last)
@ -765,8 +765,8 @@ func (zl *zlexer) Next() (lex, bool) {
}
var (
str [maxTok]byte // Hold string text
com [maxTok]byte // Hold comment text
str = make([]byte, maxTok) // Hold string text
com = make([]byte, maxTok) // Hold comment text
stri int // Offset in str (0 means empty)
comi int // Offset in com (0 means empty)
@ -785,14 +785,12 @@ func (zl *zlexer) Next() (lex, bool) {
l.line, l.column = zl.line, zl.column
if stri >= len(str) {
l.token = "token length insufficient for parsing"
l.err = true
return *l, true
// if buffer length is insufficient, increase it.
str = append(str[:], make([]byte, maxTok)...)
}
if comi >= len(com) {
l.token = "comment length insufficient for parsing"
l.err = true
return *l, true
// if buffer length is insufficient, increase it.
com = append(com[:], make([]byte, maxTok)...)
}
switch x {
@ -816,7 +814,7 @@ func (zl *zlexer) Next() (lex, bool) {
if stri == 0 {
// Space directly in the beginning, handled in the grammar
} else if zl.owner {
// If we have a string and its the first, make it an owner
// If we have a string and it's the first, make it an owner
l.value = zOwner
l.token = string(str[:stri])

View file

@ -904,11 +904,18 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank
l, _ = c.Next()
i, e := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err {
if l.err {
return &ParseError{"", "bad RRSIG Algorithm", l}
}
rr.Algorithm = uint8(i)
i, e := strconv.ParseUint(l.token, 10, 8)
rr.Algorithm = uint8(i) // if 0 we'll check the mnemonic in the if
if e != nil {
v, ok := StringToAlgorithm[l.token]
if !ok {
return &ParseError{"", "bad RRSIG Algorithm", l}
}
rr.Algorithm = v
}
c.Next() // zBlank
l, _ = c.Next()

View file

@ -1,61 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted for dns package usage by Miek Gieben.
package dns
import "sync"
import "time"
// call is an in-flight or completed singleflight.Do call
type call struct {
wg sync.WaitGroup
val *Msg
rtt time.Duration
err error
dups int
}
// singleflight represents a class of work and forms a namespace in
// which units of work can be executed with duplicate suppression.
type singleflight struct {
sync.Mutex // protects m
m map[string]*call // lazily initialized
dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
}
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
// The return value shared indicates whether v was given to multiple callers.
func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
g.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok {
c.dups++
g.Unlock()
c.wg.Wait()
return c.val, c.rtt, c.err, true
}
c := new(call)
c.wg.Add(1)
g.m[key] = c
g.Unlock()
c.val, c.rtt, c.err = fn()
c.wg.Done()
if !g.dontDeleteForTesting {
g.Lock()
delete(g.m, key)
g.Unlock()
}
return c.val, c.rtt, c.err, c.dups > 0
}

View file

@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
var Version = v{1, 1, 53}
var Version = v{1, 1, 54}
// v holds the version of this library.
type v struct {

View file

@ -23,6 +23,11 @@ import (
"github.com/klauspost/cpuid/v2"
)
var (
hasIntelSha = runtime.GOARCH == "amd64" && cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4)
hasAvx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
)
func hasArmSha2() bool {
if cpuid.CPU.Has(cpuid.SHA2) {
return true
@ -42,5 +47,4 @@ func hasArmSha2() bool {
return false
}
return bytes.Contains(cpuInfo, []byte(sha256Feature))
}

View file

@ -19,10 +19,8 @@ package sha256
import (
"crypto/sha256"
"encoding/binary"
"errors"
"hash"
"runtime"
"github.com/klauspost/cpuid/v2"
)
// Size - The size of a SHA256 checksum in bytes.
@ -68,42 +66,34 @@ func (d *digest) Reset() {
type blockfuncType int
const (
blockfuncGeneric blockfuncType = iota
blockfuncSha blockfuncType = iota
blockfuncArm blockfuncType = iota
blockfuncStdlib blockfuncType = iota
blockfuncIntelSha
blockfuncArmSha2
blockfuncForceGeneric = -1
)
var blockfunc blockfuncType
func init() {
blockfunc = blockfuncGeneric
switch {
case hasSHAExtensions():
blockfunc = blockfuncSha
case hasIntelSha:
blockfunc = blockfuncIntelSha
case hasArmSha2():
blockfunc = blockfuncArm
default:
blockfunc = blockfuncGeneric
blockfunc = blockfuncArmSha2
}
}
var avx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, cpuid.AVX512VL)
// hasSHAExtensions return whether the cpu supports SHA extensions.
func hasSHAExtensions() bool {
return cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, cpuid.SSE4) && runtime.GOARCH == "amd64"
}
// New returns a new hash.Hash computing the SHA256 checksum.
func New() hash.Hash {
if blockfunc != blockfuncGeneric {
d := new(digest)
d.Reset()
return d
if blockfunc == blockfuncStdlib {
// Fallback to the standard golang implementation
// if no features were found.
return sha256.New()
}
// Fallback to the standard golang implementation
// if no features were found.
return sha256.New()
d := new(digest)
d.Reset()
return d
}
// Sum256 - single caller sha256 helper
@ -272,11 +262,11 @@ func (d *digest) checkSum() (digest [Size]byte) {
}
func block(dig *digest, p []byte) {
if blockfunc == blockfuncSha {
blockShaGo(dig, p)
} else if blockfunc == blockfuncArm {
blockArmGo(dig, p)
} else if blockfunc == blockfuncGeneric {
if blockfunc == blockfuncIntelSha {
blockIntelShaGo(dig, p)
} else if blockfunc == blockfuncArmSha2 {
blockArmSha2Go(dig, p)
} else {
blockGeneric(dig, p)
}
}
@ -397,3 +387,82 @@ var _K = []uint32{
0xbef9a3f7,
0xc67178f2,
}
const (
magic256 = "sha\x03"
marshaledSize = len(magic256) + 8*4 + chunk + 8
)
func (d *digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic256...)
b = appendUint32(b, d.h[0])
b = appendUint32(b, d.h[1])
b = appendUint32(b, d.h[2])
b = appendUint32(b, d.h[3])
b = appendUint32(b, d.h[4])
b = appendUint32(b, d.h[5])
b = appendUint32(b, d.h[6])
b = appendUint32(b, d.h[7])
b = append(b, d.x[:d.nx]...)
b = b[:len(b)+len(d.x)-d.nx] // already zero
b = appendUint64(b, d.len)
return b, nil
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic256) || string(b[:len(magic256)]) != magic256 {
return errors.New("crypto/sha256: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/sha256: invalid hash state size")
}
b = b[len(magic256):]
b, d.h[0] = consumeUint32(b)
b, d.h[1] = consumeUint32(b)
b, d.h[2] = consumeUint32(b)
b, d.h[3] = consumeUint32(b)
b, d.h[4] = consumeUint32(b)
b, d.h[5] = consumeUint32(b)
b, d.h[6] = consumeUint32(b)
b, d.h[7] = consumeUint32(b)
b = b[copy(d.x[:], b):]
b, d.len = consumeUint64(b)
d.nx = int(d.len % chunk)
return nil
}
func appendUint32(b []byte, v uint32) []byte {
return append(b,
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func appendUint64(b []byte, v uint64) []byte {
return append(b,
byte(v>>56),
byte(v>>48),
byte(v>>40),
byte(v>>32),
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func consumeUint64(b []byte) ([]byte, uint64) {
_ = b[7]
x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
return b[8:], x
}
func consumeUint32(b []byte) ([]byte, uint32) {
_ = b[3]
x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
return b[4:], x
}

View file

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.

View file

@ -1,4 +1,4 @@
//+build !noasm,!appengine
//+build !noasm,!appengine,gc
TEXT ·sha256X16Avx512(SB), 7, $0
MOVQ digests+0(FP), DI

View file

@ -1,6 +0,0 @@
//+build !noasm,!appengine,gc
package sha256
//go:noescape
func blockSha(h *[8]uint32, message []uint8)

View file

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
@ -18,10 +19,13 @@
package sha256
func blockArmGo(dig *digest, p []byte) {
panic("blockArmGo called unexpectedly")
func blockArmSha2Go(dig *digest, p []byte) {
panic("blockArmSha2Go called unexpectedly")
}
func blockShaGo(dig *digest, p []byte) {
blockSha(&dig.h, p)
//go:noescape
func blockIntelSha(h *[8]uint32, message []uint8)
func blockIntelShaGo(dig *digest, p []byte) {
blockIntelSha(&dig.h, p)
}

View file

@ -1,4 +1,4 @@
//+build !noasm,!appengine
//+build !noasm,!appengine,gc
// SHA intrinsic version of SHA256
@ -106,7 +106,7 @@ GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16
// X13 saved hash state // CDGH
// X15 data shuffle mask (constant)
TEXT ·blockSha(SB), NOSPLIT, $0-32
TEXT ·blockIntelSha(SB), NOSPLIT, $0-32
MOVQ h+0(FP), DX
MOVQ message_base+8(FP), SI
MOVQ message_len+16(FP), DI

View file

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
@ -18,18 +19,18 @@
package sha256
func blockShaGo(dig *digest, p []byte) {
panic("blockShaGoc called unexpectedly")
func blockIntelShaGo(dig *digest, p []byte) {
panic("blockIntelShaGo called unexpectedly")
}
//go:noescape
func blockArm(h []uint32, message []uint8)
func blockArmSha2(h []uint32, message []uint8)
func blockArmGo(dig *digest, p []byte) {
func blockArmSha2Go(dig *digest, p []byte) {
h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]}
blockArm(h[:], p[:])
blockArmSha2(h[:], p[:])
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4],
h[5], h[6], h[7]

View file

@ -1,4 +1,4 @@
//+build !noasm,!appengine
//+build !noasm,!appengine,gc
// ARM64 version of SHA256
@ -25,7 +25,7 @@
// their Plan9 equivalents
//
TEXT ·blockArm(SB), 7, $0
TEXT ·blockArmSha2(SB), 7, $0
MOVD h+0(FP), R0
MOVD message+24(FP), R1
MOVD message_len+32(FP), R2 // length of message

View file

@ -1,4 +1,5 @@
//+build appengine noasm !amd64,!arm64 !gc
//go:build appengine || noasm || (!amd64 && !arm64) || !gc
// +build appengine noasm !amd64,!arm64 !gc
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
@ -18,11 +19,11 @@
package sha256
func blockShaGo(dig *digest, p []byte) {
panic("blockShaGo called unexpectedly")
func blockIntelShaGo(dig *digest, p []byte) {
panic("blockIntelShaGo called unexpectedly")
}
func blockArmGo(dig *digest, p []byte) {
panic("blockArmGo called unexpectedly")
func blockArmSha2Go(dig *digest, p []byte) {
panic("blockArmSha2Go called unexpectedly")
}

File diff suppressed because one or more lines are too long

View file

@ -288,6 +288,9 @@ const (
// Bls12_381G1g2Pub is a draft code tagged "key" and described by: BLS12-381 concatenated public keys in both the G1 and G2 fields.
Bls12_381G1g2Pub Code = 0xee // bls12_381-g1g2-pub
// Sr25519Pub is a draft code tagged "key" and described by: Sr25519 public key.
Sr25519Pub Code = 0xef // sr25519-pub
// DashBlock is a draft code tagged "ipld" and described by: Dash Block.
DashBlock Code = 0xf0 // dash-block
@ -306,17 +309,20 @@ const (
// Udp is a draft code tagged "multiaddr".
Udp Code = 0x0111 // udp
// P2pWebrtcStar is a draft code tagged "multiaddr".
// P2pWebrtcStar is a deprecated code tagged "multiaddr" and described by: Use webrtc or webrtc-direct instead.
P2pWebrtcStar Code = 0x0113 // p2p-webrtc-star
// P2pWebrtcDirect is a draft code tagged "multiaddr".
// P2pWebrtcDirect is a deprecated code tagged "multiaddr" and described by: Use webrtc or webrtc-direct instead.
P2pWebrtcDirect Code = 0x0114 // p2p-webrtc-direct
// P2pStardust is a draft code tagged "multiaddr".
// P2pStardust is a deprecated code tagged "multiaddr".
P2pStardust Code = 0x0115 // p2p-stardust
// Webrtc is a draft code tagged "multiaddr" and described by: WebRTC.
Webrtc Code = 0x0118 // webrtc
// WebrtcDirect is a draft code tagged "multiaddr" and described by: ICE-lite webrtc transport with SDP munging during connection establishment and without use of a STUN server.
WebrtcDirect Code = 0x0118 // webrtc-direct
// Webrtc is a draft code tagged "multiaddr" and described by: webrtc transport where connection establishment is according to w3c spec.
Webrtc Code = 0x0119 // webrtc
// P2pCircuit is a permanent code tagged "multiaddr".
P2pCircuit Code = 0x0122 // p2p-circuit
@ -429,6 +435,9 @@ const (
// TransportGraphsyncFilecoinv1 is a draft code tagged "transport" and described by: Filecoin graphsync datatransfer.
TransportGraphsyncFilecoinv1 Code = 0x0910 // transport-graphsync-filecoinv1
// TransportIpfsGatewayHttp is a draft code tagged "transport" and described by: HTTP IPFS Gateway trustless datatransfer.
TransportIpfsGatewayHttp Code = 0x0920 // transport-ipfs-gateway-http
// Multidid is a draft code tagged "multiformat" and described by: Compact encoding for Decentralized Identifers.
Multidid Code = 0x0d1d // multidid
@ -492,12 +501,27 @@ const (
// X25519Priv is a draft code tagged "key" and described by: Curve25519 private key.
X25519Priv Code = 0x1302 // x25519-priv
// Sr25519Priv is a draft code tagged "key" and described by: Sr25519 private key.
Sr25519Priv Code = 0x1303 // sr25519-priv
// RsaPriv is a draft code tagged "key" and described by: RSA private key.
RsaPriv Code = 0x1305 // rsa-priv
// P256Priv is a draft code tagged "key" and described by: P-256 private key.
P256Priv Code = 0x1306 // p256-priv
// P384Priv is a draft code tagged "key" and described by: P-384 private key.
P384Priv Code = 0x1307 // p384-priv
// P521Priv is a draft code tagged "key" and described by: P-521 private key.
P521Priv Code = 0x1308 // p521-priv
// Kangarootwelve is a draft code tagged "multihash" and described by: KangarooTwelve is an extendable-output hash function based on Keccak-p.
Kangarootwelve Code = 0x1d01 // kangarootwelve
// AesGcm256 is a draft code tagged "encryption" and described by: AES Galois/Counter Mode with 256-bit key and 12-byte IV.
AesGcm256 Code = 0x2000 // aes-gcm-256
// Silverpine is a draft code tagged "multiaddr" and described by: Experimental QUIC over yggdrasil and ironwood routing protocol.
Silverpine Code = 0x3f42 // silverpine
@ -1669,6 +1693,7 @@ var knownCodes = []Code{
X25519Pub,
Ed25519Pub,
Bls12_381G1g2Pub,
Sr25519Pub,
DashBlock,
DashTx,
SwarmManifest,
@ -1678,6 +1703,7 @@ var knownCodes = []Code{
P2pWebrtcStar,
P2pWebrtcDirect,
P2pStardust,
WebrtcDirect,
Webrtc,
P2pCircuit,
DagJson,
@ -1716,6 +1742,7 @@ var knownCodes = []Code{
CarMultihashIndexSorted,
TransportBitswap,
TransportGraphsyncFilecoinv1,
TransportIpfsGatewayHttp,
Multidid,
Sha2_256Trunc254Padded,
Sha2_224,
@ -1737,8 +1764,13 @@ var knownCodes = []Code{
Ed25519Priv,
Secp256k1Priv,
X25519Priv,
Sr25519Priv,
RsaPriv,
P256Priv,
P384Priv,
P521Priv,
Kangarootwelve,
AesGcm256,
Silverpine,
Sm3_256,
Blake2b8,
@ -2106,6 +2138,9 @@ func (c Code) Tag() string {
Cidv3:
return "cid"
case AesGcm256:
return "encryption"
case FilCommitmentUnsealed,
FilCommitmentSealed:
return "filecoin"
@ -2185,6 +2220,7 @@ func (c Code) Tag() string {
X25519Pub,
Ed25519Pub,
Bls12_381G1g2Pub,
Sr25519Pub,
P256Pub,
P384Pub,
P521Pub,
@ -2195,7 +2231,11 @@ func (c Code) Tag() string {
Ed25519Priv,
Secp256k1Priv,
X25519Priv,
Sr25519Priv,
RsaPriv,
P256Priv,
P384Priv,
P521Priv,
Jwk_jcsPub:
return "key"
@ -2219,6 +2259,7 @@ func (c Code) Tag() string {
P2pWebrtcStar,
P2pWebrtcDirect,
P2pStardust,
WebrtcDirect,
Webrtc,
P2pCircuit,
Udt,
@ -2638,7 +2679,8 @@ func (c Code) Tag() string {
return "softhash"
case TransportBitswap,
TransportGraphsyncFilecoinv1:
TransportGraphsyncFilecoinv1,
TransportIpfsGatewayHttp:
return "transport"
case Varsig,

Some files were not shown because too many files have changed in this diff Show more