Fix iconcache

Iconcache library needed to be upgrade to be compatible with the
libraries required in the latest geth.
This commit is contained in:
Andrea Maria Piana 2021-06-28 10:41:19 +02:00
parent d092a2bb49
commit c1db89e657
88 changed files with 5435 additions and 1453 deletions

9
go.mod
View File

@ -11,7 +11,6 @@ replace github.com/docker/docker => github.com/docker/engine v1.4.2-0.2019071716
replace github.com/nfnt/resize => github.com/status-im/resize v0.0.0-20201215164250-7c6d9f0d3088
require (
github.com/PuerkitoBio/goquery v1.6.0 // indirect
github.com/beevik/ntp v0.2.0
github.com/btcsuite/btcd v0.22.0-beta
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
@ -21,10 +20,10 @@ require (
github.com/ethereum/go-ethereum v1.10.4
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/golang-migrate/migrate/v4 v4.8.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.4.1
github.com/golang/protobuf v1.5.2
github.com/google/uuid v1.2.0
github.com/gorilla/mux v1.7.3 // indirect
github.com/ipfs/go-log v1.0.4
github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a
github.com/keighl/metabolize v0.0.0-20150915210303-97ab655d4034
@ -33,7 +32,7 @@ require (
github.com/lib/pq v1.9.0
github.com/libp2p/go-libp2p-core v0.8.5
github.com/lucasb-eyer/go-colorful v1.0.3
github.com/mat/besticon v3.12.0+incompatible
github.com/mat/besticon v0.0.0-20210314201728-1579f269edb7
github.com/mattn/go-colorable v0.1.4 // indirect
github.com/mattn/go-isatty v0.0.10 // indirect
github.com/multiformats/go-multiaddr v0.3.2
@ -45,7 +44,7 @@ require (
github.com/oliamb/cutter v0.2.2
github.com/pborman/uuid v1.2.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.5.0
github.com/prometheus/client_golang v1.9.0
github.com/russolsen/ohyeah v0.0.0-20160324131710-f4938c005315 // indirect
github.com/russolsen/same v0.0.0-20160222130632-f089df61f51d // indirect
github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a
@ -64,7 +63,7 @@ require (
github.com/xeipuuv/gojsonschema v1.2.0
go.uber.org/zap v1.15.0
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5
golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb
golang.org/x/text v0.3.6
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
gopkg.in/go-playground/validator.v9 v9.31.0

134
go.sum
View File

@ -47,13 +47,14 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/goquery v1.6.0 h1:j7taAbelrdcsOlGeMenZxc2AWXD5fieT1/znArdnx94=
github.com/PuerkitoBio/goquery v1.6.0/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/PuerkitoBio/goquery v1.6.1 h1:FgjbQZKl5HTmcn4sKBgvx8vv63nhyhIpv7lJpFGCWpk=
github.com/PuerkitoBio/goquery v1.6.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
@ -61,7 +62,9 @@ github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjl
github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -72,8 +75,9 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/andybalholm/cascadia v1.2.0 h1:vuRCkM5Ozh/BfmsaTm26kbjm0mIOM3yS5Ek/F5h18aE=
github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxBp0T0eFw1RUQY=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
@ -82,7 +86,11 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo=
github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y=
github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo=
@ -127,6 +135,9 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@ -140,12 +151,14 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
@ -157,11 +170,14 @@ github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8Nz
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cruxic/go-hmac-drbg v0.0.0-20170206035330-84c46983886d h1:bE1UyBQ5aE6FjhNY4lbPtMqh7VDldoVkvZMtFEbd+CE=
github.com/cruxic/go-hmac-drbg v0.0.0-20170206035330-84c46983886d/go.mod h1:HAe1wsCrwH2uFnFaCC2vlcyEohnxs8KeShAFqGIHvmM=
@ -211,6 +227,7 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dvyukov/go-fuzz v0.0.0-20191022152526-8cb203812681/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
@ -223,6 +240,7 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/gosigar v0.0.0-20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@ -235,6 +253,8 @@ github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@ -250,11 +270,13 @@ github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclK
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
@ -270,6 +292,7 @@ github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
@ -282,10 +305,13 @@ github.com/golang-migrate/migrate/v4 v4.8.0/go.mod h1:F6bGIGAA7xSb2k17sF1+eHl2gR
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@ -348,20 +374,25 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/gyuho/goraph v0.0.0-20171001060514-a7a4454fd3eb h1:1mTTeUpJ+imP9A0eSfNnzif1pkpiEoy+ait7XiSqGNc=
github.com/gyuho/goraph v0.0.0-20171001060514-a7a4454fd3eb/go.mod h1:NtSxZCD+s3sZFwbW6WceOcUD83HM9XD5OE2r4c0P8eg=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
@ -374,6 +405,7 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -401,6 +433,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v0.0.0-20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
@ -471,9 +504,12 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
@ -481,6 +517,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
github.com/julienschmidt/httprouter v0.0.0-20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0=
@ -506,12 +544,12 @@ github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ=
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@ -715,8 +753,11 @@ github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI
github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux/v2 v2.0.0 h1:vSGhAy5u6iHBq11ZDcyHH4Blcf9xlBhT4WQDoOE90LU=
github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac=
github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
@ -740,6 +781,7 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
@ -845,6 +887,7 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f h1:hd3r+uv9DNLScbOrnlj82rBldHQf3XWmCeXAWbw8euQ=
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f/go.mod h1:MyUWrZlB1aI5bs7j9/pJ8ckLLZ4QcCYcNiSbsAW32D4=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.0.0-20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
@ -852,6 +895,8 @@ github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo
github.com/nsf/termbox-go v0.0.0-20170211012700-3540b76b9c77/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ=
@ -874,6 +919,7 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
@ -884,7 +930,11 @@ github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mo
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
@ -898,12 +948,14 @@ github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssy
github.com/peterh/liner v1.2.1 h1:O4BlKaq/LWu6VRWmol4ByWfzx6MfXc5Op5HETyIy5yg=
github.com/peterh/liner v1.2.1/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -914,12 +966,15 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.5.0 h1:Ctq0iGpCmr3jeP77kbF2UxgvRwzWWz+4Bh9/vJTyg1A=
github.com/prometheus/client_golang v1.5.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
@ -928,14 +983,16 @@ github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.18.0 h1:WCVKW7aL6LEe1uryfI9dnEc2ZqNB1Fn0ok930v0iL1Y=
github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/prometheus v0.0.0-20170814170113-3101606756c5/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
@ -966,6 +1023,7 @@ github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a/go.mod h1:TPq+fc
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
@ -979,12 +1037,14 @@ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9Nz
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
@ -999,6 +1059,7 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
@ -1028,6 +1089,9 @@ github.com/status-im/status-go/extkeys v1.0.0/go.mod h1:GdqJbrcpkNm5ZsSCpp+PdMxn
github.com/status-im/tcp-shaker v0.0.0-20191114194237-215893130501 h1:oa0KU5jJRNtXaM/P465MhvSFo/HM2O8qi2DDuPcd7ro=
github.com/status-im/tcp-shaker v0.0.0-20191114194237-215893130501/go.mod h1:RYo/itke1oU5k/6sj9DNM3QAwtE5rZSgg5JnkOv83hk=
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v0.0.0-20170809224252-890a5c3458b4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@ -1052,6 +1116,7 @@ github.com/tklauser/go-sysconf v0.3.6 h1:oc1sJWvKkmvIxhDHeKWvZS4f6AW+YcoguSfRF2/
github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA=
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tsenart/tb v0.0.0-20181025101425-0d2499c8b6e9 h1:kjbwitOGH46vD01f2s3leBfrMnePQa3NSAIlW35MvY8=
github.com/tsenart/tb v0.0.0-20181025101425-0d2499c8b6e9/go.mod h1:EcGP24b8DY+bWHnpfJDP7fM+o8Nmz4fYH0l2xTtNr3I=
@ -1061,6 +1126,8 @@ github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3C
github.com/uber/jaeger-client-go v0.0.0-20180607151842-f7e0d4744fa6/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v0.0.0-20180615202729-a51202d6f4a7/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8 h1:aSQuY64yglPb7I6lZRXt/xWD4ExM1DZo8Gpb7xCz6zk=
github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8/go.mod h1:uUmtiahU7efOVl/5w5yk9jOze5xYpDZDrSrT8TvHXjQ=
@ -1096,8 +1163,11 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
@ -1107,17 +1177,20 @@ go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo=
go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
@ -1137,6 +1210,7 @@ golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -1163,8 +1237,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 h1:QelT11PB4FXiDEXucrfNckHoFxwt8USGY1ajP1ZF5lM=
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb h1:fqpd0EBDzlHRCjiphRR5Zo/RSWWQlWv34418dnEixWk=
golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1207,10 +1281,12 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@ -1232,6 +1308,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1261,6 +1338,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1271,18 +1349,24 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210301091718-77cc2087c03b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1303,8 +1387,10 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1342,6 +1428,7 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
@ -1366,6 +1453,7 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1378,6 +1466,7 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
@ -1391,9 +1480,13 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaR
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@ -1418,8 +1511,10 @@ gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v9 v9.31.0 h1:bmXmP2RSNtFES+bn4uYuHT7iJFJv7Vj+an+ZQdDaD1M=
@ -1441,6 +1536,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -1480,3 +1576,5 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=

View File

@ -1,3 +1,6 @@
arch:
- amd64
- ppc64le
language: go
go:
@ -15,3 +18,27 @@ go:
- 1.13.x
- tip
jobs:
exclude:
- arch: ppc64le
go: 1.2.x
- arch: ppc64le
go: 1.3.x
- arch: ppc64le
go: 1.4.x
- arch: ppc64le
go: 1.5.x
- arch: ppc64le
go: 1.6.x
- arch: ppc64le
go: 1.7.x
- arch: ppc64le
go: 1.8.x
- arch: ppc64le
go: 1.9.x
- arch: ppc64le
go: 1.10.x
- arch: ppc64le
go: 1.11.x
- arch: ppc64le
go: 1.12.x

View File

@ -37,6 +37,7 @@ Please note that because of the net/html dependency, goquery requires Go1.1+.
**Note that goquery's API is now stable, and will not break.**
* **2021-01-11 (v1.6.1)** : Fix panic when calling `{Prepend,Append,Set}Html` on a `Selection` that contains non-Element nodes.
* **2020-10-08 (v1.6.0)** : Parse html in context of the container node for all functions that deal with html strings (`AfterHtml`, `AppendHtml`, etc.). Thanks to [@thiemok][thiemok] and [@davidjwilkins][djw] for their work on this.
* **2020-02-04 (v1.5.1)** : Update module dependencies.
* **2018-11-15 (v1.5.0)** : Go module support (thanks @Zaba505).
@ -147,6 +148,7 @@ func main() {
- [Dataflow kit](https://github.com/slotix/dataflowkit), Web Scraping framework for Gophers.
- [Geziyor](https://github.com/geziyor/geziyor), a fast web crawling & scraping framework for Go. Supports JS rendering.
- [Pagser](https://github.com/foolin/pagser), a simple, easy, extensible, configurable HTML parser to struct based on goquery and struct tags.
- [stitcherd](https://github.com/vhodges/stitcherd), A server for doing server side includes using css selectors and DOM updates.
## Support

View File

@ -661,6 +661,9 @@ func (s *Selection) eachNodeHtml(htmlStr string, isParent bool, mergeFn func(n *
if isParent {
context = n.Parent
} else {
if n.Type != html.ElementNode {
continue
}
context = n
}
if context != nil {

View File

@ -1,3 +1,5 @@
module "github.com/andybalholm/cascadia"
module github.com/andybalholm/cascadia
require "golang.org/x/net" v0.0.0-20180218175443-cbe0f9307d01
require golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01
go 1.13

View File

@ -13,6 +13,10 @@ import (
type parser struct {
s string // the source text
i int // the current position
// if `false`, parsing a pseudo-element
// returns an error.
acceptPseudoElements bool
}
// parseEscape parses a backslash escape.
@ -29,7 +33,7 @@ func (p *parser) parseEscape() (result string, err error) {
case hexDigit(c):
// unicode escape (hex)
var i int
for i = start; i < p.i+6 && i < len(p.s) && hexDigit(p.s[i]); i++ {
for i = start; i < start+6 && i < len(p.s) && hexDigit(p.s[i]); i++ {
// empty
}
v, _ := strconv.ParseUint(p.s[start:i], 16, 21)
@ -422,17 +426,25 @@ var errExpectedParenthesis = errors.New("expected '(' but didn't find it")
var errExpectedClosingParenthesis = errors.New("expected ')' but didn't find it")
var errUnmatchedParenthesis = errors.New("unmatched '('")
// parsePseudoclassSelector parses a pseudoclass selector like :not(p)
func (p *parser) parsePseudoclassSelector() (out Sel, err error) {
// parsePseudoclassSelector parses a pseudoclass selector like :not(p) or a pseudo-element
// For backwards compatibility, both ':' and '::' prefix are allowed for pseudo-elements.
// https://drafts.csswg.org/selectors-3/#pseudo-elements
// Returning a nil `Sel` (and a nil `error`) means we found a pseudo-element.
func (p *parser) parsePseudoclassSelector() (out Sel, pseudoElement string, err error) {
if p.i >= len(p.s) {
return nil, fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
return nil, "", fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
}
if p.s[p.i] != ':' {
return nil, fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i])
return nil, "", fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i])
}
p.i++
var mustBePseudoElement bool
if p.i >= len(p.s) {
return nil, "", fmt.Errorf("got empty pseudoclass (or pseudoelement)")
}
if p.s[p.i] == ':' { // we found a pseudo-element
mustBePseudoElement = true
p.i++
}
@ -441,27 +453,33 @@ func (p *parser) parsePseudoclassSelector() (out Sel, err error) {
return
}
name = toLowerASCII(name)
if mustBePseudoElement && (name != "after" && name != "backdrop" && name != "before" &&
name != "cue" && name != "first-letter" && name != "first-line" && name != "grammar-error" &&
name != "marker" && name != "placeholder" && name != "selection" && name != "spelling-error") {
return out, "", fmt.Errorf("unknown pseudoelement :%s", name)
}
switch name {
case "not", "has", "haschild":
if !p.consumeParenthesis() {
return out, errExpectedParenthesis
return out, "", errExpectedParenthesis
}
sel, parseErr := p.parseSelectorGroup()
if parseErr != nil {
return out, parseErr
return out, "", parseErr
}
if !p.consumeClosingParenthesis() {
return out, errExpectedClosingParenthesis
return out, "", errExpectedClosingParenthesis
}
out = relativePseudoClassSelector{name: name, match: sel}
case "contains", "containsown":
if !p.consumeParenthesis() {
return out, errExpectedParenthesis
return out, "", errExpectedParenthesis
}
if p.i == len(p.s) {
return out, errUnmatchedParenthesis
return out, "", errUnmatchedParenthesis
}
var val string
switch p.s[p.i] {
@ -471,46 +489,46 @@ func (p *parser) parsePseudoclassSelector() (out Sel, err error) {
val, err = p.parseIdentifier()
}
if err != nil {
return out, err
return out, "", err
}
val = strings.ToLower(val)
p.skipWhitespace()
if p.i >= len(p.s) {
return out, errors.New("unexpected EOF in pseudo selector")
return out, "", errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
return out, errExpectedClosingParenthesis
return out, "", errExpectedClosingParenthesis
}
out = containsPseudoClassSelector{own: name == "containsown", value: val}
case "matches", "matchesown":
if !p.consumeParenthesis() {
return out, errExpectedParenthesis
return out, "", errExpectedParenthesis
}
rx, err := p.parseRegex()
if err != nil {
return out, err
return out, "", err
}
if p.i >= len(p.s) {
return out, errors.New("unexpected EOF in pseudo selector")
return out, "", errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
return out, errExpectedClosingParenthesis
return out, "", errExpectedClosingParenthesis
}
out = regexpPseudoClassSelector{own: name == "matchesown", regexp: rx}
case "nth-child", "nth-last-child", "nth-of-type", "nth-last-of-type":
if !p.consumeParenthesis() {
return out, errExpectedParenthesis
return out, "", errExpectedParenthesis
}
a, b, err := p.parseNth()
if err != nil {
return out, err
return out, "", err
}
if !p.consumeClosingParenthesis() {
return out, errExpectedClosingParenthesis
return out, "", errExpectedClosingParenthesis
}
last := name == "nth-last-child" || name == "nth-last-of-type"
ofType := name == "nth-of-type" || name == "nth-last-of-type"
@ -535,9 +553,9 @@ func (p *parser) parsePseudoclassSelector() (out Sel, err error) {
case "root":
out = rootPseudoClassSelector{}
case "after", "backdrop", "before", "cue", "first-letter", "first-line", "grammar-error", "marker", "placeholder", "selection", "spelling-error":
return out, errors.New("pseudo-elements are not yet supported")
return nil, name, nil
default:
return out, fmt.Errorf("unknown pseudoclass or pseudoelement :%s", name)
return out, "", fmt.Errorf("unknown pseudoclass or pseudoelement :%s", name)
}
return
}
@ -706,11 +724,13 @@ func (p *parser) parseSimpleSelectorSequence() (Sel, error) {
selectors = append(selectors, r)
}
var pseudoElement string
loop:
for p.i < len(p.s) {
var (
ns Sel
err error
ns Sel
newPseudoElement string
err error
)
switch p.s[p.i] {
case '#':
@ -720,20 +740,37 @@ loop:
case '[':
ns, err = p.parseAttributeSelector()
case ':':
ns, err = p.parsePseudoclassSelector()
ns, newPseudoElement, err = p.parsePseudoclassSelector()
default:
break loop
}
if err != nil {
return nil, err
}
// From https://drafts.csswg.org/selectors-3/#pseudo-elements :
// "Only one pseudo-element may appear per selector, and if present
// it must appear after the sequence of simple selectors that
// represents the subjects of the selector.""
if ns == nil { // we found a pseudo-element
if pseudoElement != "" {
return nil, fmt.Errorf("only one pseudo-element is accepted per selector, got %s and %s", pseudoElement, newPseudoElement)
}
if !p.acceptPseudoElements {
return nil, fmt.Errorf("pseudo-element %s found, but pseudo-elements support is disabled", newPseudoElement)
}
pseudoElement = newPseudoElement
} else {
if pseudoElement != "" {
return nil, fmt.Errorf("pseudo-element %s must be at the end of selector", pseudoElement)
}
selectors = append(selectors, ns)
}
selectors = append(selectors, ns)
}
if len(selectors) == 1 { // no need wrap the selectors in compoundSelector
if len(selectors) == 1 && pseudoElement == "" { // no need wrap the selectors in compoundSelector
return selectors[0], nil
}
return compoundSelector{selectors: selectors}, nil
return compoundSelector{selectors: selectors, pseudoElement: pseudoElement}, nil
}
// parseSelector parses a selector that may include combinators.

View File

@ -16,14 +16,19 @@ type Matcher interface {
}
// Sel is the interface for all the functionality provided by selectors.
// It is currently the same as Matcher, but other methods may be added in the
// future.
type Sel interface {
Matcher
Specificity() Specificity
// Returns a CSS input compiling to this selector.
String() string
// Returns a pseudo-element, or an empty string.
PseudoElement() string
}
// Parse parses a selector.
// Parse parses a selector. Use `ParseWithPseudoElement`
// if you need support for pseudo-elements.
func Parse(sel string) (Sel, error) {
p := &parser{s: sel}
compiled, err := p.parseSelector()
@ -38,7 +43,25 @@ func Parse(sel string) (Sel, error) {
return compiled, nil
}
// ParseWithPseudoElement parses a single selector,
// with support for pseudo-element.
func ParseWithPseudoElement(sel string) (Sel, error) {
p := &parser{s: sel, acceptPseudoElements: true}
compiled, err := p.parseSelector()
if err != nil {
return nil, err
}
if p.i < len(sel) {
return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
}
return compiled, nil
}
// ParseGroup parses a selector, or a group of selectors separated by commas.
// Use `ParseGroupWithPseudoElements`
// if you need support for pseudo-elements.
func ParseGroup(sel string) (SelectorGroup, error) {
p := &parser{s: sel}
compiled, err := p.parseSelectorGroup()
@ -53,6 +76,22 @@ func ParseGroup(sel string) (SelectorGroup, error) {
return compiled, nil
}
// ParseGroupWithPseudoElements parses a selector, or a group of selectors separated by commas.
// It supports pseudo-elements.
func ParseGroupWithPseudoElements(sel string) (SelectorGroup, error) {
p := &parser{s: sel, acceptPseudoElements: true}
compiled, err := p.parseSelectorGroup()
if err != nil {
return nil, err
}
if p.i < len(sel) {
return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
}
return compiled, nil
}
// A Selector is a function which tells whether a node matches or not.
//
// This type is maintained for compatibility; I recommend using the newer and
@ -182,6 +221,10 @@ func (c tagSelector) Specificity() Specificity {
return Specificity{0, 0, 1}
}
func (c tagSelector) PseudoElement() string {
return ""
}
type classSelector struct {
class string
}
@ -197,6 +240,10 @@ func (c classSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c classSelector) PseudoElement() string {
return ""
}
type idSelector struct {
id string
}
@ -212,6 +259,10 @@ func (c idSelector) Specificity() Specificity {
return Specificity{1, 0, 0}
}
func (c idSelector) PseudoElement() string {
return ""
}
type attrSelector struct {
key, val, operation string
regexp *regexp.Regexp
@ -352,6 +403,10 @@ func (c attrSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c attrSelector) PseudoElement() string {
return ""
}
// ---------------- Pseudo class selectors ----------------
// we use severals concrete types of pseudo-class selectors
@ -415,6 +470,10 @@ func (s relativePseudoClassSelector) Specificity() Specificity {
return max
}
func (c relativePseudoClassSelector) PseudoElement() string {
return ""
}
type containsPseudoClassSelector struct {
own bool
value string
@ -436,6 +495,10 @@ func (s containsPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c containsPseudoClassSelector) PseudoElement() string {
return ""
}
type regexpPseudoClassSelector struct {
own bool
regexp *regexp.Regexp
@ -488,6 +551,10 @@ func (s regexpPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c regexpPseudoClassSelector) PseudoElement() string {
return ""
}
type nthPseudoClassSelector struct {
a, b int
last, ofType bool
@ -623,6 +690,10 @@ func (s nthPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c nthPseudoClassSelector) PseudoElement() string {
return ""
}
type onlyChildPseudoClassSelector struct {
ofType bool
}
@ -661,6 +732,10 @@ func (s onlyChildPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c onlyChildPseudoClassSelector) PseudoElement() string {
return ""
}
type inputPseudoClassSelector struct{}
// Matches input, select, textarea and button elements.
@ -672,6 +747,10 @@ func (s inputPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c inputPseudoClassSelector) PseudoElement() string {
return ""
}
type emptyElementPseudoClassSelector struct{}
// Matches empty elements.
@ -694,6 +773,10 @@ func (s emptyElementPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c emptyElementPseudoClassSelector) PseudoElement() string {
return ""
}
type rootPseudoClassSelector struct{}
// Match implements :root
@ -711,8 +794,13 @@ func (s rootPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c rootPseudoClassSelector) PseudoElement() string {
return ""
}
type compoundSelector struct {
selectors []Sel
selectors []Sel
pseudoElement string
}
// Matches elements if each sub-selectors matches.
@ -734,9 +822,17 @@ func (s compoundSelector) Specificity() Specificity {
for _, sel := range s.selectors {
out = out.Add(sel.Specificity())
}
if s.pseudoElement != "" {
// https://drafts.csswg.org/selectors-3/#specificity
out = out.Add(Specificity{0, 0, 1})
}
return out
}
func (c compoundSelector) PseudoElement() string {
return c.pseudoElement
}
type combinedSelector struct {
first Sel
combinator byte
@ -818,6 +914,15 @@ func (s combinedSelector) Specificity() Specificity {
return spec
}
// on combinedSelector, a pseudo-element only makes sens on the last
// selector, although others increase specificity.
func (c combinedSelector) PseudoElement() string {
if c.second == nil {
return ""
}
return c.second.PseudoElement()
}
// A SelectorGroup is a list of selectors, which matches if any of the
// individual selectors matches.
type SelectorGroup []Sel

120
vendor/github.com/andybalholm/cascadia/serialize.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
package cascadia
import (
"fmt"
"strings"
)
// implements the reverse operation Sel -> string
func (c tagSelector) String() string {
return c.tag
}
func (c idSelector) String() string {
return "#" + c.id
}
func (c classSelector) String() string {
return "." + c.class
}
func (c attrSelector) String() string {
val := c.val
if c.operation == "#=" {
val = c.regexp.String()
} else if c.operation != "" {
val = fmt.Sprintf(`"%s"`, val)
}
return fmt.Sprintf(`[%s%s%s]`, c.key, c.operation, val)
}
func (c relativePseudoClassSelector) String() string {
return fmt.Sprintf(":%s(%s)", c.name, c.match.String())
}
func (c containsPseudoClassSelector) String() string {
s := "contains"
if c.own {
s += "Own"
}
return fmt.Sprintf(`:%s("%s")`, s, c.value)
}
func (c regexpPseudoClassSelector) String() string {
s := "matches"
if c.own {
s += "Own"
}
return fmt.Sprintf(":%s(%s)", s, c.regexp.String())
}
func (c nthPseudoClassSelector) String() string {
if c.a == 0 && c.b == 1 { // special cases
s := ":first-"
if c.last {
s = ":last-"
}
if c.ofType {
s += "of-type"
} else {
s += "child"
}
return s
}
var name string
switch [2]bool{c.last, c.ofType} {
case [2]bool{true, true}:
name = "nth-last-of-type"
case [2]bool{true, false}:
name = "nth-last-child"
case [2]bool{false, true}:
name = "nth-of-type"
case [2]bool{false, false}:
name = "nth-child"
}
return fmt.Sprintf(":%s(%dn+%d)", name, c.a, c.b)
}
func (c onlyChildPseudoClassSelector) String() string {
if c.ofType {
return ":only-of-type"
}
return ":only-child"
}
func (c inputPseudoClassSelector) String() string {
return ":input"
}
func (c emptyElementPseudoClassSelector) String() string {
return ":empty"
}
func (c rootPseudoClassSelector) String() string {
return ":root"
}
func (c compoundSelector) String() string {
if len(c.selectors) == 0 && c.pseudoElement == "" {
return "*"
}
chunks := make([]string, len(c.selectors))
for i, sel := range c.selectors {
chunks[i] = sel.String()
}
s := strings.Join(chunks, "")
if c.pseudoElement != "" {
s += "::" + c.pseudoElement
}
return s
}
func (c combinedSelector) String() string {
start := c.first.String()
if c.second != nil {
start += fmt.Sprintf(" %s %s", string(c.combinator), c.second.String())
}
return start
}
func (c SelectorGroup) String() string {
ck := make([]string, len(c))
for i, s := range c {
ck[i] = s.String()
}
return strings.Join(ck, ", ")
}

View File

@ -192,7 +192,7 @@ var bufferPool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
func (h *httpGetter) Get(context context.Context, in *pb.GetRequest, out *pb.GetResponse) error {
func (h *httpGetter) Get(ctx context.Context, in *pb.GetRequest, out *pb.GetResponse) error {
u := fmt.Sprintf(
"%v%v/%v",
h.baseURL,
@ -203,9 +203,10 @@ func (h *httpGetter) Get(context context.Context, in *pb.GetRequest, out *pb.Get
if err != nil {
return err
}
req = req.WithContext(ctx)
tr := http.DefaultTransport
if h.transport != nil {
tr = h.transport(context)
tr = h.transport(ctx)
}
res, err := tr.RoundTrip(req)
if err != nil {

View File

@ -24,9 +24,12 @@ import (
pb "github.com/golang/groupcache/groupcachepb"
)
// Context is an alias to context.Context for backwards compatibility purposes.
type Context = context.Context
// ProtoGetter is the interface that must be implemented by a peer.
type ProtoGetter interface {
Get(context context.Context, in *pb.GetRequest, out *pb.GetResponse) error
Get(ctx context.Context, in *pb.GetRequest, out *pb.GetResponse) error
}
// PeerPicker is the interface that must be implemented to locate

View File

@ -1,7 +1,7 @@
THIRD PARTY NOTICES
*****
github.com/PuerkitoBio/goquery@2d2796f41742ece03e8086188fa4db16a3a0b458
github.com/PuerkitoBio/goquery@v1.5.0
Copyright (c) 2012-2016, Martin Angers & Contributors
All rights reserved.
@ -37,7 +37,7 @@ The [BSD 3-Clause license][bsd], the same as the [Go language][golic]. Cascadia'
[goq]: https://github.com/andrewstuart/goq
*****
github.com/andybalholm/cascadia@901648c87902174f774fac311d7f176f8647bdaa
github.com/andybalholm/cascadia@v1.0.0
Copyright (c) 2011 Andy Balholm. All rights reserved.
@ -65,7 +65,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****
github.com/beorn7/perks/quantile@4b2b341e8d7715fae06375aa633dbb6e91b3fb46
github.com/beorn7/perks/quantile@v1.0.0
Copyright (C) 2013 Blake Mizerany
@ -89,7 +89,7 @@ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*****
github.com/golang/freetype/raster@e2365dfdc4a05e4b8299a783240d4a7d5a65d4e4
github.com/golang/freetype/raster@v0.0.0-20170609003504-e2365dfdc4a0
Use of the Freetype-Go software is subject to your choice of exactly one of
the following two licenses:
@ -105,7 +105,7 @@ The Luxi fonts in the testdata directory are licensed separately. See the
testdata/COPYING file for details.
*****
github.com/golang/freetype/truetype@e2365dfdc4a05e4b8299a783240d4a7d5a65d4e4
github.com/golang/freetype/truetype@v0.0.0-20170609003504-e2365dfdc4a0
Use of the Freetype-Go software is subject to your choice of exactly one of
the following two licenses:
@ -121,7 +121,7 @@ The Luxi fonts in the testdata directory are licensed separately. See the
testdata/COPYING file for details.
*****
github.com/golang/groupcache@869f871628b6baa9cfbc11732cdf6546b17c1298
github.com/golang/groupcache@v0.0.0-20190702054246-869f871628b6
Apache License
Version 2.0, January 2004
@ -316,7 +316,7 @@ third-party archives.
limitations under the License.
*****
github.com/golang/groupcache/consistenthash@869f871628b6baa9cfbc11732cdf6546b17c1298
github.com/golang/groupcache/consistenthash@v0.0.0-20190702054246-869f871628b6
Apache License
Version 2.0, January 2004
@ -511,7 +511,7 @@ third-party archives.
limitations under the License.
*****
github.com/golang/groupcache/groupcachepb@869f871628b6baa9cfbc11732cdf6546b17c1298
github.com/golang/groupcache/groupcachepb@v0.0.0-20190702054246-869f871628b6
Apache License
Version 2.0, January 2004
@ -706,7 +706,7 @@ third-party archives.
limitations under the License.
*****
github.com/golang/groupcache/lru@869f871628b6baa9cfbc11732cdf6546b17c1298
github.com/golang/groupcache/lru@v0.0.0-20190702054246-869f871628b6
Apache License
Version 2.0, January 2004
@ -901,7 +901,7 @@ third-party archives.
limitations under the License.
*****
github.com/golang/groupcache/singleflight@869f871628b6baa9cfbc11732cdf6546b17c1298
github.com/golang/groupcache/singleflight@v0.0.0-20190702054246-869f871628b6
Apache License
Version 2.0, January 2004
@ -1096,7 +1096,7 @@ third-party archives.
limitations under the License.
*****
github.com/golang/protobuf/proto@6c65a5562fc06764971b7c5d05c76c75e84bdbf7
github.com/golang/protobuf/proto@v1.3.2
Copyright 2010 The Go Authors. All rights reserved.
@ -1127,7 +1127,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****
github.com/matttproud/golang_protobuf_extensions/pbutil@c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/matttproud/golang_protobuf_extensions/pbutil@v1.0.1
Apache License
Version 2.0, January 2004
@ -1332,7 +1332,7 @@ Apache License
limitations under the License.
*****
github.com/prometheus/client_golang/prometheus@4ab88e80c249ed361d3299e2930427d9ac43ef8d
github.com/prometheus/client_golang/prometheus@v1.0.0
Apache License
Version 2.0, January 2004
@ -1537,7 +1537,7 @@ Apache License
limitations under the License.
*****
github.com/prometheus/client_golang/prometheus/internal@4ab88e80c249ed361d3299e2930427d9ac43ef8d
github.com/prometheus/client_golang/prometheus/internal@v1.0.0
Apache License
Version 2.0, January 2004
@ -1742,7 +1742,7 @@ Apache License
limitations under the License.
*****
github.com/prometheus/client_golang/prometheus/promhttp@4ab88e80c249ed361d3299e2930427d9ac43ef8d
github.com/prometheus/client_golang/prometheus/promhttp@v1.0.0
Apache License
Version 2.0, January 2004
@ -1947,7 +1947,7 @@ Apache License
limitations under the License.
*****
github.com/prometheus/client_model/go@fd36f4220a901265f90734c3183c5f0c91daa0b8
github.com/prometheus/client_model/go@v0.0.0-20190129233127-fd36f4220a90
Apache License
Version 2.0, January 2004
@ -2152,7 +2152,7 @@ Apache License
limitations under the License.
*****
github.com/prometheus/common/expfmt@31bed53e4047fd6c510e43a941f90cb31be0972a
github.com/prometheus/common/expfmt@v0.6.0
Apache License
Version 2.0, January 2004
@ -2357,7 +2357,7 @@ Apache License
limitations under the License.
*****
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg@31bed53e4047fd6c510e43a941f90cb31be0972a
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg@v0.6.0
Apache License
Version 2.0, January 2004
@ -2562,7 +2562,7 @@ Apache License
limitations under the License.
*****
github.com/prometheus/common/model@31bed53e4047fd6c510e43a941f90cb31be0972a
github.com/prometheus/common/model@v0.6.0
Apache License
Version 2.0, January 2004
@ -2767,7 +2767,7 @@ Apache License
limitations under the License.
*****
github.com/prometheus/procfs@3f98efb27840a48a7a2898ec80be07674d19f9c8
github.com/prometheus/procfs@v0.0.3
Apache License
Version 2.0, January 2004
@ -2982,7 +2982,7 @@ This product includes software developed at
SoundCloud Ltd. (http://soundcloud.com/).
*****
github.com/prometheus/procfs/internal/fs@3f98efb27840a48a7a2898ec80be07674d19f9c8
github.com/prometheus/procfs/internal/fs@v0.0.3
Apache License
Version 2.0, January 2004
@ -3187,7 +3187,7 @@ Apache License
limitations under the License.
*****
github.com/rs/cors@db0fe48135e83b5812a5a31be0eea66984b1b521
github.com/rs/cors@v1.7.0
Copyright (c) 2014 Olivier Poitrey <rs@dailymotion.com>
@ -3210,7 +3210,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*****
golang.org/x/image/bmp@d6a02ce849c95fa57d9927fab7b6d9071876dfe0
golang.org/x/image/bmp@v0.0.0-20190703141733-d6a02ce849c9
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3266,7 +3266,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/image/font@d6a02ce849c95fa57d9927fab7b6d9071876dfe0
golang.org/x/image/font@v0.0.0-20190703141733-d6a02ce849c9
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3322,7 +3322,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/image/math/fixed@d6a02ce849c95fa57d9927fab7b6d9071876dfe0
golang.org/x/image/math/fixed@v0.0.0-20190703141733-d6a02ce849c9
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3378,7 +3378,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/net/html@ca1201d0de80cfde86cb01aea620983605dfe99b
golang.org/x/net/html@v0.0.0-20190724013045-ca1201d0de80
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3434,7 +3434,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/net/html/atom@ca1201d0de80cfde86cb01aea620983605dfe99b
golang.org/x/net/html/atom@v0.0.0-20190724013045-ca1201d0de80
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3490,7 +3490,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/net/html/charset@ca1201d0de80cfde86cb01aea620983605dfe99b
golang.org/x/net/html/charset@v0.0.0-20190724013045-ca1201d0de80
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3546,7 +3546,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/net/publicsuffix@ca1201d0de80cfde86cb01aea620983605dfe99b
golang.org/x/net/publicsuffix@v0.0.0-20190724013045-ca1201d0de80
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3602,7 +3602,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/sys/windows@fc99dfbffb4e5ed5758a37e31dd861afe285406b
golang.org/x/text/encoding@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3658,7 +3658,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/collate@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/encoding/charmap@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3714,7 +3714,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/collate/build@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/encoding/htmlindex@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3770,7 +3770,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/encoding/internal@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3826,7 +3826,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding/charmap@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/encoding/internal/identifier@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3882,7 +3882,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding/htmlindex@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/encoding/japanese@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3938,7 +3938,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding/internal@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/encoding/korean@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -3994,7 +3994,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding/internal/identifier@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/encoding/simplifiedchinese@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -4050,7 +4050,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding/japanese@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/encoding/traditionalchinese@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -4106,7 +4106,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding/korean@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/encoding/unicode@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -4162,7 +4162,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding/simplifiedchinese@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/internal/language@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -4218,7 +4218,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding/traditionalchinese@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/internal/language/compact@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -4274,7 +4274,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/encoding/unicode@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/internal/tag@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -4330,7 +4330,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/internal/colltab@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/internal/utf8internal@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -4386,7 +4386,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/internal/gen@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/language@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.
@ -4442,511 +4442,7 @@ rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/internal/language@342b2e1fbaa52c93f31447ad2c6abc048c63e475
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/internal/language/compact@342b2e1fbaa52c93f31447ad2c6abc048c63e475
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/internal/tag@342b2e1fbaa52c93f31447ad2c6abc048c63e475
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/internal/triegen@342b2e1fbaa52c93f31447ad2c6abc048c63e475
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/internal/ucd@342b2e1fbaa52c93f31447ad2c6abc048c63e475
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/internal/utf8internal@342b2e1fbaa52c93f31447ad2c6abc048c63e475
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/language@342b2e1fbaa52c93f31447ad2c6abc048c63e475
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/runes@342b2e1fbaa52c93f31447ad2c6abc048c63e475
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/unicode/cldr@342b2e1fbaa52c93f31447ad2c6abc048c63e475
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.
*****
golang.org/x/text/unicode/rangetable@342b2e1fbaa52c93f31447ad2c6abc048c63e475
golang.org/x/text/runes@v0.3.2
Copyright (c) 2009 The Go Authors. All rights reserved.

View File

@ -15,7 +15,6 @@ import (
"net/http/cookiejar"
"net/url"
"os"
"sort"
"strings"
"time"
@ -23,13 +22,13 @@ import (
// Load supported image formats.
_ "image/gif"
_ "image/jpeg"
_ "image/png"
_ "github.com/mat/besticon/ico"
"github.com/mat/besticon/colorfinder"
"github.com/PuerkitoBio/goquery"
"golang.org/x/net/html/charset"
"golang.org/x/net/idna"
"golang.org/x/net/publicsuffix"
@ -38,6 +37,7 @@ import (
var defaultFormats []string
const MinIconSize = 0
// TODO: Turn into env var: https://github.com/rendomnet/besticon/commit/c85867cc80c00c898053ce8daf40d51a93b9d39f#diff-37b57e3fdbe4246771791e86deb4d69dL41
const MaxIconSize = 500
@ -102,7 +102,14 @@ func (f *IconFinder) stripIfNecessary(URL string) string {
func (f *IconFinder) IconInSizeRange(r SizeRange) *Icon {
icons := f.Icons()
// Try to return smallest in range perfect..max
// 1. SVG always wins
for _, ico := range icons {
if ico.Format == "svg" {
return &ico
}
}
// 2. Try to return smallest in range perfect..max
sortIcons(icons, false)
for _, ico := range icons {
if (ico.Width >= r.Perfect && ico.Height >= r.Perfect) && (ico.Width <= r.Max && ico.Height <= r.Max) {
@ -110,7 +117,7 @@ func (f *IconFinder) IconInSizeRange(r SizeRange) *Icon {
}
}
// Try to return biggest in range perfect..min
// 3. Try to return biggest in range perfect..min
sortIcons(icons, true)
for _, ico := range icons {
if (ico.Width >= r.Min && ico.Height >= r.Min) && (ico.Width <= r.Perfect && ico.Height <= r.Perfect) {
@ -167,14 +174,22 @@ func includesString(arr []string, str string) bool {
}
func fetchIcons(siteURL string) ([]Icon, error) {
html, urlAfterRedirect, e := fetchHTML(siteURL)
if e != nil {
return nil, e
}
var links []string
links, e := findIconLinks(urlAfterRedirect, html)
if e != nil {
return nil, e
html, urlAfterRedirect, e := fetchHTML(siteURL)
if e == nil {
// Search HTML for icons
links, e = findIconLinks(urlAfterRedirect, html)
if e != nil {
return nil, e
}
} else {
// Unable to fetch the response or got a bad HTTP status code. Try default
// icon paths. https://github.com/mat/besticon/discussions/47
links, e = defaultIconURLs(siteURL)
if e != nil {
return nil, e
}
}
icons := fetchAllIcons(links)
@ -218,111 +233,15 @@ func fetchHTML(url string) ([]byte, *url.URL, error) {
return utf8bytes, r.Request.URL, nil
}
var iconPaths = []string{
"/favicon.ico",
"/apple-touch-icon.png",
"/apple-touch-icon-precomposed.png",
}
type empty struct{}
func findIconLinks(siteURL *url.URL, html []byte) ([]string, error) {
doc, e := docFromHTML(html)
if e != nil {
return nil, e
}
baseURL := determineBaseURL(siteURL, doc)
links := make(map[string]empty)
// Add common, hard coded icon paths
for _, path := range iconPaths {
links[urlFromBase(baseURL, path)] = empty{}
}
// Add icons found in page
urls := extractIconTags(doc)
for _, u := range urls {
absoluteURL, e := absoluteURL(baseURL, u)
if e == nil {
links[absoluteURL] = empty{}
}
}
// Turn unique keys into array
var result []string
for u := range links {
result = append(result, u)
}
sort.Strings(result)
return result, nil
}
func determineBaseURL(siteURL *url.URL, doc *goquery.Document) *url.URL {
baseTagHref := extractBaseTag(doc)
if baseTagHref != "" {
baseTagURL, e := url.Parse(baseTagHref)
if e != nil {
return siteURL
}
return baseTagURL
}
return siteURL
}
func docFromHTML(html []byte) (*goquery.Document, error) {
doc, e := goquery.NewDocumentFromReader(bytes.NewReader(html))
if e != nil || doc == nil {
return nil, errParseHTML
}
return doc, nil
}
var csspaths = strings.Join([]string{
"link[rel='icon']",
"link[rel='shortcut icon']",
"link[rel='apple-touch-icon']",
"link[rel='apple-touch-icon-precomposed']",
// Capitalized variants, TODO: refactor
"link[rel='ICON']",
"link[rel='SHORTCUT ICON']",
"link[rel='APPLE-TOUCH-ICON']",
"link[rel='APPLE-TOUCH-ICON-PRECOMPOSED']",
}, ", ")
var errParseHTML = errors.New("besticon: could not parse html")
func extractBaseTag(doc *goquery.Document) string {
href := ""
doc.Find("head base[href]").First().Each(func(i int, s *goquery.Selection) {
href, _ = s.Attr("href")
})
return href
}
func extractIconTags(doc *goquery.Document) []string {
var hits []string
doc.Find(csspaths).Each(func(i int, s *goquery.Selection) {
href, ok := s.Attr("href")
if ok && href != "" {
hits = append(hits, href)
}
})
return hits
}
func MainColorForIcons(icons []Icon) *color.RGBA {
if len(icons) == 0 {
return nil
}
var icon *Icon
// Prefer .png and .gif
// Prefer gif, jpg, png
for _, ico := range icons {
if ico.Format == "png" || ico.Format == "gif" {
if ico.Format == "gif" || ico.Format == "jpg" || ico.Format == "png" {
icon = &ico
break
}
@ -355,6 +274,25 @@ func MainColorForIcons(icons []Icon) *color.RGBA {
return &mainColor
}
// Construct default icon URLs. A fallback if we can't fetch the HTML.
func defaultIconURLs(siteURL string) ([]string, error) {
baseURL, e := url.Parse(siteURL)
if e != nil {
return nil, e
}
var links []string
for _, path := range iconPaths {
absoluteURL, e := absoluteURL(baseURL, path)
if e != nil {
return nil, e
}
links = append(links, absoluteURL)
}
return links, nil
}
func fetchAllIcons(urls []string) []Icon {
ch := make(chan Icon)
@ -385,15 +323,30 @@ func fetchIconDetails(url string) Icon {
return i
}
cfg, format, e := image.DecodeConfig(bytes.NewReader(b))
if e != nil {
i.Error = fmt.Errorf("besticon: unknown image format: %s", e)
return i
if isSVG(b) {
// Special handling for svg, which golang can't decode with
// image.DecodeConfig. Fill in an absurdly large width/height so SVG always
// wins size contests.
i.Format = "svg"
i.Width = 9999
i.Height = 9999
} else {
cfg, format, e := image.DecodeConfig(bytes.NewReader(b))
if e != nil {
i.Error = fmt.Errorf("besticon: unknown image format: %s", e)
return i
}
// jpeg => jpg
if format == "jpeg" {
format = "jpg"
}
i.Width = cfg.Width
i.Height = cfg.Height
i.Format = format
}
i.Width = cfg.Width
i.Height = cfg.Height
i.Format = format
i.Bytes = len(b)
i.Sha1sum = sha1Sum(b)
if keepImageBytes {
@ -403,6 +356,32 @@ func fetchIconDetails(url string) Icon {
return i
}
// SVG detector. We can't use image.RegisterFormat, since RegisterFormat is
// limited to a simple magic number check. It's easy to confuse the first few
// bytes of HTML with SVG.
func isSVG(body []byte) bool {
// is it long enough?
if len(body) < 10 {
return false
}
// does it start with something reasonable?
switch {
case bytes.Equal(body[0:2], []byte("<!")):
case bytes.Equal(body[0:2], []byte("<?")):
case bytes.Equal(body[0:4], []byte("<svg")):
default:
return false
}
// is there an <svg in the first 300 bytes?
if off := bytes.Index(body, []byte("<svg")); off == -1 || off > 300 {
return false
}
return true
}
func Get(urlstring string) (*http.Response, error) {
u, e := url.Parse(urlstring)
if e != nil {
@ -538,8 +517,11 @@ func init() {
}
setHTTPClient(&http.Client{Timeout: duration})
// Needs to be kept in sync with those image/... imports
defaultFormats = []string{"png", "gif", "ico"}
// see
// https://github.com/mat/besticon/pull/52/commits/208e9dcbdbdeb7ef7491bb42f1bc449e87e084a2
// when we are ready to add support for the FORMATS env variable
defaultFormats = []string{"gif", "ico", "jpg", "png"}
}
func setHTTPClient(c *http.Client) {
@ -561,11 +543,19 @@ func init() {
}
func getenvOrFallback(key string, fallbackValue string) string {
value := os.Getenv(key)
if len(strings.TrimSpace(value)) != 0 {
value := strings.TrimSpace(os.Getenv(key))
if len(value) != 0 {
return value
}
return fallbackValue
}
func getenvOrFallbackArray(key string, fallbackValue []string) []string {
value := strings.TrimSpace(os.Getenv(key))
if len(value) != 0 {
return strings.Split(value, ",")
}
return fallbackValue
}
var BuildDate string // set via ldflags on Make

View File

@ -1,6 +1,7 @@
package besticon
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -11,6 +12,8 @@ import (
var iconCache *groupcache.Group
const contextKeySiteURL = "siteURL"
type result struct {
Icons []Icon
Error string
@ -21,11 +24,9 @@ func resultFromCache(siteURL string) ([]Icon, error) {
return fetchIcons(siteURL)
}
// Let results expire after a day
now := time.Now()
key := fmt.Sprintf("%d-%02d-%02d-%s", now.Year(), now.Month(), now.Day(), siteURL)
c := context.WithValue(context.Background(), contextKeySiteURL, siteURL)
var data []byte
err := iconCache.Get(siteURL, key, groupcache.AllocatingByteSliceSink(&data))
err := iconCache.Get(c, cacheKey(siteURL), groupcache.AllocatingByteSliceSink(&data))
if err != nil {
logger.Println("ERR:", err)
return fetchIcons(siteURL)
@ -43,8 +44,14 @@ func resultFromCache(siteURL string) ([]Icon, error) {
return res.Icons, nil
}
func generatorFunc(ctx groupcache.Context, key string, sink groupcache.Sink) error {
siteURL := ctx.(string)
func cacheKey(siteURL string) string {
// Let results expire after a day
now := time.Now()
return fmt.Sprintf("%d-%02d-%02d-%s", now.Year(), now.Month(), now.Day(), siteURL)
}
func generatorFunc(ctx context.Context, key string, sink groupcache.Sink) error {
siteURL := ctx.Value(contextKeySiteURL).(string)
icons, err := fetchIcons(siteURL)
if err != nil {
// Don't cache errors

151
vendor/github.com/mat/besticon/besticon/extract.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
package besticon
import (
"bytes"
"errors"
"fmt"
"net/url"
"regexp"
"sort"
"strings"
"github.com/PuerkitoBio/goquery"
)
var iconPaths = []string{
"/favicon.ico",
"/apple-touch-icon.png",
"/apple-touch-icon-precomposed.png",
}
const (
favIcon = "icon"
appleTouchIcon = "apple-touch-icon"
appleTouchIconPrecomposed = "apple-touch-icon-precomposed"
)
type empty struct{}
// Find all icons in this html. We use siteURL as the base url unless we detect
// another base url in <head>
func findIconLinks(siteURL *url.URL, html []byte) ([]string, error) {
doc, e := docFromHTML(html)
if e != nil {
return nil, e
}
baseURL := determineBaseURL(siteURL, doc)
// Use a map to avoid dups
links := make(map[string]empty)
// Add common, hard coded icon paths
for _, path := range iconPaths {
links[urlFromBase(baseURL, path)] = empty{}
}
// Add icons found in page
urls := extractIconTags(doc)
for _, u := range urls {
absoluteURL, e := absoluteURL(baseURL, u)
if e == nil {
links[absoluteURL] = empty{}
}
}
// Turn unique keys into array
var result []string
for u := range links {
result = append(result, u)
}
sort.Strings(result)
return result, nil
}
// What is the baseURL for this doc?
func determineBaseURL(siteURL *url.URL, doc *goquery.Document) *url.URL {
baseTagHref := extractBaseTag(doc)
if baseTagHref != "" {
baseTagURL, e := url.Parse(baseTagHref)
if e != nil {
return siteURL
}
return baseTagURL
}
return siteURL
}
// Convert bytes => doc
func docFromHTML(html []byte) (*goquery.Document, error) {
doc, e := goquery.NewDocumentFromReader(bytes.NewReader(html))
if e != nil || doc == nil {
return nil, errParseHTML
}
return doc, nil
}
var errParseHTML = errors.New("besticon: could not parse html")
// Find <head><base href="xxx">
func extractBaseTag(doc *goquery.Document) string {
href := ""
doc.Find("head base[href]").First().Each(func(i int, s *goquery.Selection) {
href, _ = s.Attr("href")
})
return href
}
var (
iconTypes = []string{favIcon, appleTouchIcon, appleTouchIconPrecomposed}
iconTypesRe = regexp.MustCompile(fmt.Sprintf("^(%s)$", strings.Join(regexpQuoteMetaArray(iconTypes), "|")))
)
// Find icons from doc using goquery
func extractIconTags(doc *goquery.Document) []string {
var hits []string
doc.Find("link[href][rel]").Each(func(i int, s *goquery.Selection) {
href := extractIconTag(s)
if href != "" {
hits = append(hits, href)
}
})
return hits
}
func extractIconTag(s *goquery.Selection) string {
// What sort of iconType is in this <rel>?
rel, _ := s.Attr("rel")
if rel == "" {
return ""
}
rel = strings.ToLower(rel)
var iconType string
for _, i := range strings.Fields(rel) {
if iconTypesRe.MatchString(i) {
iconType = i
break
}
}
if iconType == "" {
return ""
}
href, _ := s.Attr("href")
if href == "" {
return ""
}
return href
}
// regexp.QuoteMeta an array of strings
func regexpQuoteMetaArray(a []string) []string {
quoted := make([]string, len(a))
for i, s := range a {
quoted[i] = regexp.QuoteMeta(s)
}
return quoted
}

View File

@ -17,9 +17,11 @@ import (
"image/color"
// Load supported image formats
_ "github.com/mat/besticon/ico"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
_ "github.com/mat/besticon/ico"
)
func main() {

View File

@ -163,7 +163,7 @@ func (c *counter) updateExemplar(v float64, l Labels) {
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
type CounterVec struct {
*metricVec
*MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@ -176,11 +176,11 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels,
)
return &CounterVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection.
return result
}),
@ -188,7 +188,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
}
// GetMetricWithLabelValues returns the Counter for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Counter is created.
//
// It is possible to call this method without using the returned Counter to only
@ -202,7 +202,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// Counter with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -211,7 +211,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
@ -219,19 +219,19 @@ func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
}
// GetMetricWith returns the Counter for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Counter is created. Implications of
// creating a Counter without using it and keeping the Counter for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
@ -275,7 +275,7 @@ func (v *CounterVec) With(labels Labels) Counter {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &CounterVec{vec}, err
}
@ -309,6 +309,8 @@ type CounterFunc interface {
// provided function must be concurrency-safe. The function should also honor
// the contract for a Counter (values only go up, not down), but compliance will
// not be checked.
//
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),

View File

@ -20,6 +20,7 @@ import (
"strings"
"github.com/cespare/xxhash/v2"
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
@ -50,7 +51,7 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
// VariableLabels contains names of labels for which the metric
// variableLabels contains names of labels for which the metric
// maintains variable values.
variableLabels []string
// id is a hash of the values of the ConstLabels and fqName. This

View File

@ -132,7 +132,7 @@ func (g *gauge) Write(out *dto.Metric) error {
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
*metricVec
*MetricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@ -145,11 +145,11 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels,
)
return &GaugeVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
return result
}),
@ -157,7 +157,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
}
// GetMetricWithLabelValues returns the Gauge for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Gauge is created.
//
// It is possible to call this method without using the returned Gauge to only
@ -172,7 +172,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -180,7 +180,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Gauge), err
}
@ -188,19 +188,19 @@ func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
}
// GetMetricWith returns the Gauge for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Gauge is created. Implications of
// creating a Gauge without using it and keeping the Gauge for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Gauge), err
}
@ -244,7 +244,7 @@ func (v *GaugeVec) With(labels Labels) Gauge {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &GaugeVec{vec}, err
}

View File

@ -58,9 +58,10 @@ type goCollector struct {
// collector will use the memstats from a previous collection if
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
// collected memstats, or their collection is more than 5m ago, the collection
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
// issue.)
// will block until runtime.ReadMemStats succeeds.
//
// NOTE: The problem is solved in Go 1.15, see
// https://github.com/golang/go/issues/19812 for the related Go issue.
func NewGoCollector() Collector {
return &goCollector{
goroutinesDesc: NewDesc(

View File

@ -22,6 +22,7 @@ import (
"sync/atomic"
"time"
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
@ -191,7 +192,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{{}, {}},
now: time.Now,
}
@ -408,7 +409,7 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
*metricVec
*MetricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@ -421,14 +422,14 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels,
)
return &HistogramVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Histogram for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Histogram is created.
//
// It is possible to call this method without using the returned Histogram to only
@ -443,7 +444,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -452,7 +453,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Observer), err
}
@ -460,19 +461,19 @@ func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error)
}
// GetMetricWith returns the Histogram for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Histogram is created. Implications of
// creating a Histogram without using it and keeping the Histogram for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Observer), err
}
@ -516,7 +517,7 @@ func (v *HistogramVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &HistogramVec{vec}, err
}
@ -601,12 +602,12 @@ func NewConstHistogram(
count: count,
sum: sum,
buckets: buckets,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}
// MustNewConstHistogram is a version of NewConstHistogram that panics where
// NewConstMetric would have returned an error.
// NewConstHistogram would have returned an error.
func MustNewConstHistogram(
desc *Desc,
count uint64,

View File

@ -17,6 +17,7 @@ import (
"strings"
"time"
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
@ -88,7 +89,7 @@ type Opts struct {
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
}

View File

@ -15,7 +15,11 @@ package prometheus
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
)
type processCollector struct {
@ -149,3 +153,20 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error)
}
ch <- NewInvalidMetric(desc, err)
}
// NewPidFileFn returns a function that retrieves a pid from the specified file.
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
func NewPidFileFn(pidFilePath string) func() (int, error) {
return func() (int, error) {
content, err := ioutil.ReadFile(pidFilePath)
if err != nil {
return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
}
return pid, nil
}
}

View File

@ -33,18 +33,22 @@ var (
)
type processMemoryCounters struct {
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
// System interface description
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
// Refer to the Golang internal implementation
// https://golang.org/src/internal/syscall/windows/psapi_windows.go
_ uint32
PageFaultCount uint32
PeakWorkingSetSize uint64
WorkingSetSize uint64
QuotaPeakPagedPoolUsage uint64
QuotaPagedPoolUsage uint64
QuotaPeakNonPagedPoolUsage uint64
QuotaNonPagedPoolUsage uint64
PagefileUsage uint64
PeakPagefileUsage uint64
PrivateUsage uint64
PeakWorkingSetSize uintptr
WorkingSetSize uintptr
QuotaPeakPagedPoolUsage uintptr
QuotaPagedPoolUsage uintptr
QuotaPeakNonPagedPoolUsage uintptr
QuotaNonPagedPoolUsage uintptr
PagefileUsage uintptr
PeakPagefileUsage uintptr
PrivateUsage uintptr
}
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {

View File

@ -53,12 +53,16 @@ func (r *responseWriterDelegator) Written() int64 {
}
func (r *responseWriterDelegator) WriteHeader(code int) {
if r.observeWriteHeader != nil && !r.wroteHeader {
// Only call observeWriteHeader for the 1st time. It's a bug if
// WriteHeader is called more than once, but we want to protect
// against it here. Note that we still delegate the WriteHeader
// to the original ResponseWriter to not mask the bug from it.
r.observeWriteHeader(code)
}
r.status = code
r.wroteHeader = true
r.ResponseWriter.WriteHeader(code)
if r.observeWriteHeader != nil {
r.observeWriteHeader(code)
}
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {

View File

@ -99,7 +99,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
}
if opts.Registry != nil {
// Initialize all possibilites that can occur below.
// Initialize all possibilities that can occur below.
errCnt.WithLabelValues("gathering")
errCnt.WithLabelValues("encoding")
if err := opts.Registry.Register(errCnt); err != nil {
@ -167,15 +167,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
enc := expfmt.NewEncoder(w, contentType)
var lastErr error
// handleError handles the error according to opts.ErrorHandling
// and returns true if we have to abort after the handling.
handleError := func(err error) bool {
if err == nil {
return false
}
lastErr = err
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding and sending metric family:", err)
}
@ -184,7 +181,10 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
case PanicOnError:
panic(err)
case HTTPErrorOnError:
httpError(rsp, err)
// We cannot really send an HTTP error at this
// point because we most likely have written
// something to rsp already. But at least we can
// stop sending.
return true
}
// Do nothing in all other cases, including ContinueOnError.
@ -202,10 +202,6 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
return
}
}
if lastErr != nil {
httpError(rsp, lastErr)
}
})
if opts.Timeout <= 0 {
@ -276,7 +272,12 @@ type HandlerErrorHandling int
// errors are encountered.
const (
// Serve an HTTP status code 500 upon the first error
// encountered. Report the error message in the body.
// encountered. Report the error message in the body. Note that HTTP
// errors cannot be served anymore once the beginning of a regular
// payload has been sent. Thus, in the (unlikely) case that encoding the
// payload into the negotiated wire format fails, serving the response
// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
// those errors.
HTTPErrorOnError HandlerErrorHandling = iota
// Ignore errors and try to serve as many metrics as possible. However,
// if no metrics can be served, serve an HTTP status code 500 and the
@ -302,8 +303,12 @@ type Logger interface {
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
// zero value of HandlerOpts is a reasonable default.
type HandlerOpts struct {
// ErrorLog specifies an optional logger for errors collecting and
// serving metrics. If nil, errors are not logged at all.
// ErrorLog specifies an optional Logger for errors collecting and
// serving metrics. If nil, errors are not logged at all. Note that the
// type of a reported error is often prometheus.MultiError, which
// formats into a multi-line error string. If you want to avoid the
// latter, create a Logger implementation that detects a
// prometheus.MultiError and formats the contained errors into one line.
ErrorLog Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided ErrorLog
@ -365,11 +370,9 @@ func gzipAccepted(header http.Header) bool {
}
// httpError removes any content-encoding header and then calls http.Error with
// the provided error and http.StatusInternalServerErrer. Error contents is
// supposed to be uncompressed plain text. However, same as with a plain
// http.Error, any header settings will be void if the header has already been
// sent. The error message will still be written to the writer, but it will
// probably be of limited use.
// the provided error and http.StatusInternalServerError. Error contents is
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
// must not be called if the header or any payload has already been sent.
func httpError(rsp http.ResponseWriter, err error) {
rsp.Header().Del(contentEncodingHeader)
http.Error(

View File

@ -43,14 +43,14 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// InstrumentHandlerDuration is a middleware that wraps the provided
// http.Handler to observe the request duration with the provided ObserverVec.
// The ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request duration in seconds. Partitioning happens by HTTP
// status code and/or HTTP method if the respective instance label names are
// present in the ObserverVec. For unpartitioned observations, use an
// ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
// The ObserverVec must have valid metric and label names and must have zero,
// one, or two non-const non-curried labels. For those, the only allowed label
// names are "code" and "method". The function panics otherwise. The Observe
// method of the Observer in the ObserverVec is called with the request duration
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
// the respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
@ -79,12 +79,13 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
}
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
// to observe the request result with the provided CounterVec. The CounterVec
// must have zero, one, or two non-const non-curried labels. For those, the only
// allowed label names are "code" and "method". The function panics
// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
// HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
// to observe the request result with the provided CounterVec. The CounterVec
// must have valid metric and label names and must have zero, one, or two
// non-const non-curried labels. For those, the only allowed label names are
// "code" and "method". The function panics otherwise. Partitioning of the
// CounterVec happens by HTTP status code and/or HTTP method if the respective
// instance label names are present in the CounterVec. For unpartitioned
// counting, use a CounterVec with zero labels.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
@ -110,14 +111,15 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
// http.Handler to observe with the provided ObserverVec the request duration
// until the response headers are written. The ObserverVec must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
// until the response headers are written. The ObserverVec must have valid
// metric and label names and must have zero, one, or two non-const non-curried
// labels. For those, the only allowed label names are "code" and "method". The
// function panics otherwise. The Observe method of the Observer in the
// ObserverVec is called with the request duration in seconds. Partitioning
// happens by HTTP status code and/or HTTP method if the respective instance
// label names are present in the ObserverVec. For unpartitioned observations,
// use an ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
//
// If the wrapped Handler panics before calling WriteHeader, no value is
// reported.
@ -139,15 +141,15 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
}
// InstrumentHandlerRequestSize is a middleware that wraps the provided
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the request size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
@ -174,15 +176,15 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
}
// InstrumentHandlerResponseSize is a middleware that wraps the provided
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the response size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the response size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
@ -198,6 +200,11 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
})
}
// checkLabels returns whether the provided Collector has a non-const,
// non-curried label named "code" and/or "method". It panics if the provided
// Collector does not have a Desc or has more than one Desc or its Desc is
// invalid. It also panics if the Collector has any non-const, non-curried
// labels that are not named "code" or "method".
func checkLabels(c prometheus.Collector) (code bool, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
@ -225,6 +232,10 @@ func checkLabels(c prometheus.Collector) (code bool, method bool) {
close(descc)
// Make sure the Collector has a valid Desc by registering it with a
// temporary registry.
prometheus.NewRegistry().MustRegister(c)
// Create a ConstMetric with the Desc. Since we don't know how many
// variable labels there are, try for as long as it needs.
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {

View File

@ -26,6 +26,7 @@ import (
"unicode/utf8"
"github.com/cespare/xxhash/v2"
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt"
@ -214,6 +215,8 @@ func (err AlreadyRegisteredError) Error() string {
// by a Gatherer to report multiple errors during MetricFamily gathering.
type MultiError []error
// Error formats the contained errors as a bullet point list, preceded by the
// total number of errors. Note that this results in a multi-line string.
func (errs MultiError) Error() string {
if len(errs) == 0 {
return ""

View File

@ -23,6 +23,7 @@ import (
"time"
"github.com/beorn7/perks/quantile"
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
@ -109,7 +110,7 @@ type SummaryOpts struct {
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective
@ -207,7 +208,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
// Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{
desc: desc,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*summaryCounts{{}, {}},
}
s.init(s) // Init self-collection.
@ -220,7 +221,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
hotBuf: make([]float64, 0, opts.BufCap),
coldBuf: make([]float64, 0, opts.BufCap),
@ -512,7 +513,7 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
*metricVec
*MetricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@ -534,14 +535,14 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels,
)
return &SummaryVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Summary for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Summary is created.
//
// It is possible to call this method without using the returned Summary to only
@ -556,7 +557,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -565,7 +566,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Observer), err
}
@ -573,19 +574,19 @@ func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
}
// GetMetricWith returns the Summary for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Summary is created. Implications of
// creating a Summary without using it and keeping the Summary for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Observer), err
}
@ -629,7 +630,7 @@ func (v *SummaryVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &SummaryVec{vec}, err
}
@ -715,7 +716,7 @@ func NewConstSummary(
count: count,
sum: sum,
quantiles: quantiles,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}

View File

@ -19,6 +19,7 @@ import (
"time"
"unicode/utf8"
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
@ -62,7 +63,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
desc: desc,
valType: valueType,
function: function,
labelPairs: makeLabelPairs(desc, nil),
labelPairs: MakeLabelPairs(desc, nil),
}
result.init(result)
return result
@ -94,7 +95,7 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
desc: desc,
valType: valueType,
val: value,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}
@ -144,7 +145,14 @@ func populateMetric(
return nil
}
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
// variable and constant labels in the provided Desc. The values for the
// variable labels are defined by the labelValues slice, which must be in the
// same order as the corresponding variable labels in the Desc.
//
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
if totalLen == 0 {
// Super fast path.

View File

@ -20,12 +20,20 @@ import (
"github.com/prometheus/common/model"
)
// metricVec is a Collector to bundle metrics of the same name that differ in
// their label values. metricVec is not used directly (and therefore
// unexported). It is used as a building block for implementations of vectors of
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
// It also handles label currying.
type metricVec struct {
// MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec,
// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
// used for custom Metric implementations.
//
// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
// FooVec and initialize it with NewMetricVec. Implement wrappers for
// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
// add the convenience methods WithLabelValues, With, and MustCurryWith, which
// panic instead of returning errors. See also the MetricVec example.
type MetricVec struct {
*metricMap
curry []curriedLabelValue
@ -35,9 +43,9 @@ type metricVec struct {
hashAddByte func(h uint64, b byte) uint64
}
// newMetricVec returns an initialized metricVec.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
return &metricVec{
// NewMetricVec returns an initialized metricVec.
func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
return &MetricVec{
metricMap: &metricMap{
metrics: map[uint64][]metricWithLabelValues{},
desc: desc,
@ -63,7 +71,7 @@ func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@ -82,7 +90,7 @@ func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
//
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *metricVec) Delete(labels Labels) bool {
func (m *MetricVec) Delete(labels Labels) bool {
h, err := m.hashLabels(labels)
if err != nil {
return false
@ -95,15 +103,32 @@ func (m *metricVec) Delete(labels Labels) bool {
// show up in GoDoc.
// Describe implements Collector.
func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
// Collect implements Collector.
func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
// Reset deletes all metrics in this vector.
func (m *metricVec) Reset() { m.metricMap.Reset() }
func (m *MetricVec) Reset() { m.metricMap.Reset() }
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the MetricVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
//
// Note that CurryWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
var (
newCurry []curriedLabelValue
oldCurry = m.curry
@ -128,7 +153,7 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
}
return &metricVec{
return &MetricVec{
metricMap: m.metricMap,
curry: newCurry,
hashAdd: m.hashAdd,
@ -136,7 +161,34 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
}, nil
}
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
// GetMetricWithLabelValues returns the Metric for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Metric is created (by
// calling the newMetric function provided during construction of the
// MetricVec).
//
// It is possible to call this method without using the returned Metry to only
// create the new Metric but leave it in its intitial state.
//
// Keeping the Metric for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Metric from the MetricVec. In that case, the
// Metric will still exist, but it will not be exported anymore, even if a
// Metric with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
//
// Note that GetMetricWithLabelValues is usually not called directly but through
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
@ -145,7 +197,23 @@ func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
// GetMetricWith returns the Metric for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Metric is created. Implications of
// creating a Metric without using it and keeping the Metric for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
//
// Note that GetMetricWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@ -154,7 +222,7 @@ func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err
}
@ -177,7 +245,7 @@ func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
return h, nil
}
func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err
}
@ -276,7 +344,9 @@ func (m *metricMap) deleteByHashWithLabelValues(
}
if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else {
delete(m.metrics, h)
}
@ -302,7 +372,9 @@ func (m *metricMap) deleteByHashWithLabels(
}
if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else {
delete(m.metrics, h)
}

View File

@ -17,6 +17,7 @@ import (
"fmt"
"sort"
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
@ -27,10 +28,13 @@ import (
// registered with the wrapped Registerer in a modified way. The modified
// Collector adds the provided Labels to all Metrics it collects (as
// ConstLabels). The Metrics collected by the unmodified Collector must not
// duplicate any of those labels.
// duplicate any of those labels. Wrapping a nil value is valid, resulting
// in a no-op Registerer.
//
// WrapRegistererWith provides a way to add fixed labels to a subset of
// Collectors. It should not be used to add fixed labels to all metrics exposed.
// Collectors. It should not be used to add fixed labels to all metrics
// exposed. See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
//
// Conflicts between Collectors registered through the original Registerer with
// Collectors registered through the wrapping Registerer will still be
@ -50,6 +54,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
// Registerer. Collectors registered with the returned Registerer will be
// registered with the wrapped Registerer in a modified way. The modified
// Collector adds the provided prefix to the name of all Metrics it collects.
// Wrapping a nil value is valid, resulting in a no-op Registerer.
//
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
// a sub-system. To make this work, register metrics of the sub-system with the
@ -80,6 +85,9 @@ type wrappingRegisterer struct {
}
func (r *wrappingRegisterer) Register(c Collector) error {
if r.wrappedRegisterer == nil {
return nil
}
return r.wrappedRegisterer.Register(&wrappingCollector{
wrappedCollector: c,
prefix: r.prefix,
@ -88,6 +96,9 @@ func (r *wrappingRegisterer) Register(c Collector) error {
}
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
if r.wrappedRegisterer == nil {
return
}
for _, c := range cs {
if err := r.Register(c); err != nil {
panic(err)
@ -96,6 +107,9 @@ func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
}
func (r *wrappingRegisterer) Unregister(c Collector) bool {
if r.wrappedRegisterer == nil {
return false
}
return r.wrappedRegisterer.Unregister(&wrappingCollector{
wrappedCollector: c,
prefix: r.prefix,

View File

@ -164,7 +164,7 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error {
}
// ExtractSamples builds a slice of samples from the provided metric
// families. If an error occurrs during sample extraction, it continues to
// families. If an error occurs during sample extraction, it continues to
// extract from the remaining metric families. The returned error is the last
// error that has occurred.
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {

View File

@ -299,6 +299,17 @@ func (p *TextParser) startLabelName() stateFn {
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
return nil
}
// Check for duplicate label names.
labels := make(map[string]struct{})
for _, l := range p.currentMetric.Label {
lName := l.GetName()
if _, exists := labels[lName]; !exists {
labels[lName] = struct{}{}
} else {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
return nil
}
}
return p.startLabelValue
}

View File

@ -20,7 +20,7 @@ const (
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
// hashNew initializes a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}

View File

@ -181,73 +181,89 @@ func (d *Duration) Type() string {
return "duration"
}
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
switch durationStr {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
return 0, fmt.Errorf("empty duration string")
}
matches := durationRE.FindStringSubmatch(durationStr)
if len(matches) != 3 {
if matches == nil {
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
}
var (
n, _ = strconv.Atoi(matches[1])
dur = time.Duration(n) * time.Millisecond
)
switch unit := matches[2]; unit {
case "y":
dur *= 1000 * 60 * 60 * 24 * 365
case "w":
dur *= 1000 * 60 * 60 * 24 * 7
case "d":
dur *= 1000 * 60 * 60 * 24
case "h":
dur *= 1000 * 60 * 60
case "m":
dur *= 1000 * 60
case "s":
dur *= 1000
case "ms":
// Value already correct
default:
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
var dur time.Duration
// Parse the match at pos `pos` in the regex and use `mult` to turn that
// into ms, then add that value to the total parsed duration.
m := func(pos int, mult time.Duration) {
if matches[pos] == "" {
return
}
n, _ := strconv.Atoi(matches[pos])
d := time.Duration(n) * time.Millisecond
dur += d * mult
}
m(2, 1000*60*60*24*365) // y
m(4, 1000*60*60*24*7) // w
m(6, 1000*60*60*24) // d
m(8, 1000*60*60) // h
m(10, 1000*60) // m
m(12, 1000) // s
m(14, 1) // ms
return Duration(dur), nil
}
func (d Duration) String() string {
var (
ms = int64(time.Duration(d) / time.Millisecond)
unit = "ms"
ms = int64(time.Duration(d) / time.Millisecond)
r = ""
)
if ms == 0 {
return "0s"
}
factors := map[string]int64{
"y": 1000 * 60 * 60 * 24 * 365,
"w": 1000 * 60 * 60 * 24 * 7,
"d": 1000 * 60 * 60 * 24,
"h": 1000 * 60 * 60,
"m": 1000 * 60,
"s": 1000,
"ms": 1,
f := func(unit string, mult int64, exact bool) {
if exact && ms%mult != 0 {
return
}
if v := ms / mult; v > 0 {
r += fmt.Sprintf("%d%s", v, unit)
ms -= v * mult
}
}
switch int64(0) {
case ms % factors["y"]:
unit = "y"
case ms % factors["w"]:
unit = "w"
case ms % factors["d"]:
unit = "d"
case ms % factors["h"]:
unit = "h"
case ms % factors["m"]:
unit = "m"
case ms % factors["s"]:
unit = "s"
}
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
// Only format years and weeks if the remainder is zero, as it is often
// easier to read 90d than 12w6d.
f("y", 1000*60*60*24*365, true)
f("w", 1000*60*60*24*7, true)
f("d", 1000*60*60*24, false)
f("h", 1000*60*60, false)
f("m", 1000*60, false)
f("s", 1000, false)
f("ms", 1, false)
return r
}
// MarshalText implements the encoding.TextMarshaler interface.
func (d *Duration) MarshalText() ([]byte, error) {
return []byte(d.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (d *Duration) UnmarshalText(text []byte) error {
var err error
*d, err = ParseDuration(string(text))
return err
}
// MarshalYAML implements the yaml.Marshaler interface.

View File

@ -1,4 +1,4 @@
---
linters:
enable:
- staticcheck
- govet
- golint

View File

@ -0,0 +1,3 @@
## Prometheus Community Code of Conduct
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@ -69,12 +69,21 @@ else
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
endif
PROMU_VERSION ?= 0.4.0
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
ifneq ($(shell which gotestsum),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
PROMU_VERSION ?= 0.7.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.16.0
GOLANGCI_LINT_VERSION ?= v1.18.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -86,7 +95,8 @@ endif
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
DOCKERFILE_PATH ?= ./
DOCKERFILE_PATH ?= ./Dockerfile
DOCKERBUILD_CONTEXT ?= ./
DOCKER_REPO ?= prom
DOCKER_ARCHS ?= amd64
@ -140,15 +150,29 @@ else
$(GO) get $(GOOPTS) -t ./...
endif
.PHONY: update-go-deps
update-go-deps:
@echo ">> updating Go dependencies"
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
$(GO) get $$m; \
done
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
ifneq (,$(wildcard vendor))
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
endif
.PHONY: common-test-short
common-test-short:
common-test-short: $(GOTEST_DIR)
@echo ">> running short tests"
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
.PHONY: common-test
common-test:
common-test: $(GOTEST_DIR)
@echo ">> running all tests"
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
$(GOTEST_DIR):
@mkdir -p $@
.PHONY: common-format
common-format:
@ -200,7 +224,7 @@ endif
.PHONY: common-build
common-build: promu
@echo ">> building binaries"
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
.PHONY: common-tarball
common-tarball: promu
@ -211,19 +235,22 @@ common-tarball: promu
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
$(DOCKERFILE_PATH)
$(DOCKERBUILD_CONTEXT)
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:

6
vendor/github.com/prometheus/procfs/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,6 @@
# Reporting a security issue
The Prometheus security policy, including how to report vulnerabilities, can be
found here:
https://prometheus.io/docs/operating/security/

View File

@ -36,7 +36,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@ -59,7 +59,7 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
}
entries = append(entries, entry)
} else {

View File

@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
}
}

View File

@ -11,11 +11,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package procfs
import (
"bufio"
"bytes"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
@ -52,6 +57,11 @@ type CPUInfo struct {
PowerManagement string
}
var (
cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
)
// CPUInfo returns information about current system CPUs.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) CPUInfo() ([]CPUInfo, error) {
@ -62,14 +72,26 @@ func (fs FS) CPUInfo() ([]CPUInfo, error) {
return parseCPUInfo(data)
}
// parseCPUInfo parses data from /proc/cpuinfo
func parseCPUInfo(info []byte) ([]CPUInfo, error) {
cpuinfo := []CPUInfo{}
i := -1
func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() {
line := scanner.Text()
if strings.TrimSpace(line) == "" {
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
@ -82,7 +104,7 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "vendor_id":
case "vendor", "vendor_id":
cpuinfo[i].VendorID = field[1]
case "cpu family":
cpuinfo[i].CPUFamily = field[1]
@ -163,5 +185,297 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
}
}
return cpuinfo, nil
}
func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
featuresLine := ""
commonCPUInfo := CPUInfo{}
i := 0
if strings.TrimSpace(field[0]) == "Processor" {
commonCPUInfo = CPUInfo{ModelName: field[1]}
i = -1
} else {
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo = []CPUInfo{firstcpu}
}
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
i++
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "BogoMIPS":
if i == -1 {
cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
i++
cpuinfo[i].Processor = 0
}
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].BogoMips = v
case "Features":
featuresLine = line
case "model name":
cpuinfo[i].ModelName = field[1]
}
}
fields := strings.SplitN(featuresLine, ": ", 2)
for i := range cpuinfo {
cpuinfo[i].Flags = strings.Fields(fields[1])
}
return cpuinfo, nil
}
func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
commonCPUInfo := CPUInfo{VendorID: field[1]}
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "bogomips per cpu":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
commonCPUInfo.BogoMips = v
case "features":
commonCPUInfo.Flags = strings.Fields(field[1])
}
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
if err != nil {
return nil, err
}
cpu.Processor = uint(v)
cpuinfo = append(cpuinfo, cpu)
}
if strings.HasPrefix(line, "cpu number") {
break
}
}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "cpu number":
i++
case "cpu MHz dynamic":
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
v, err := strconv.ParseFloat(clock, 64)
if err != nil {
return nil, err
}
cpuinfo[i].CPUMHz = v
case "physical id":
cpuinfo[i].PhysicalID = field[1]
case "core id":
cpuinfo[i].CoreID = field[1]
case "cpu cores":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUCores = uint(v)
case "siblings":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Siblings = uint(v)
}
}
return cpuinfo, nil
}
func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
systemType := field[1]
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
i = int(v)
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
cpuinfo[i].Processor = uint(v)
cpuinfo[i].VendorID = systemType
case "cpu model":
cpuinfo[i].ModelName = field[1]
case "BogoMIPS":
v, err := strconv.ParseFloat(field[1], 64)
if err != nil {
return nil, err
}
cpuinfo[i].BogoMips = v
}
}
return cpuinfo, nil
}
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
i++
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Processor = uint(v)
case "cpu":
cpuinfo[i].VendorID = field[1]
case "clock":
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
v, err := strconv.ParseFloat(clock, 64)
if err != nil {
return nil, err
}
cpuinfo[i].CPUMHz = v
}
}
return cpuinfo, nil
}
func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
firstcpu := CPUInfo{Processor: uint(v)}
cpuinfo := []CPUInfo{firstcpu}
i := 0
for scanner.Scan() {
line := scanner.Text()
if !strings.Contains(line, ":") {
continue
}
field := strings.SplitN(line, ": ", 2)
switch strings.TrimSpace(field[0]) {
case "processor":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
i = int(v)
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
cpuinfo[i].Processor = uint(v)
case "hart":
cpuinfo[i].CoreID = field[1]
case "isa":
cpuinfo[i].ModelName = field[1]
}
}
return cpuinfo, nil
}
func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
return nil, errors.New("not implemented")
}
// firstNonEmptyLine advances the scanner to the first non-empty line
// and returns the contents of that line
func firstNonEmptyLine(scanner *bufio.Scanner) string {
for scanner.Scan() {
line := scanner.Text()
if strings.TrimSpace(line) != "" {
return line
}
}
return ""
}

19
vendor/github.com/prometheus/procfs/cpuinfo_armx.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build arm arm64
package procfs
var parseCPUInfo = parseCPUInfoARM

19
vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build mips mipsle mips64 mips64le
package procfs
var parseCPUInfo = parseCPUInfoMips

19
vendor/github.com/prometheus/procfs/cpuinfo_others.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
var parseCPUInfo = parseCPUInfoDummy

19
vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build ppc64 ppc64le
package procfs
var parseCPUInfo = parseCPUInfoPPC

19
vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build riscv riscv64
package procfs
var parseCPUInfo = parseCPUInfoRISCV

18
vendor/github.com/prometheus/procfs/cpuinfo_s390x.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package procfs
var parseCPUInfo = parseCPUInfoS390X

19
vendor/github.com/prometheus/procfs/cpuinfo_x86.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build 386 amd64
package procfs
var parseCPUInfo = parseCPUInfoX86

View File

@ -14,10 +14,10 @@
package procfs
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"strconv"
"io"
"strings"
"github.com/prometheus/procfs/internal/util"
@ -52,80 +52,102 @@ type Crypto struct {
// structs containing the relevant info. More information available here:
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
func (fs FS) Crypto() ([]Crypto, error) {
data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
}
crypto, err := parseCrypto(data)
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
}
return crypto, nil
}
func parseCrypto(cryptoData []byte) ([]Crypto, error) {
crypto := []Crypto{}
// parseCrypto parses a /proc/crypto stream into Crypto elements.
func parseCrypto(r io.Reader) ([]Crypto, error) {
var out []Crypto
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
for _, block := range cryptoBlocks {
var newCryptoElem Crypto
lines := strings.Split(string(block), "\n")
for _, line := range lines {
if strings.TrimSpace(line) == "" || line[0] == ' ' {
continue
}
fields := strings.Split(line, ":")
key := strings.TrimSpace(fields[0])
value := strings.TrimSpace(fields[1])
vp := util.NewValueParser(value)
switch strings.TrimSpace(key) {
case "async":
b, err := strconv.ParseBool(value)
if err == nil {
newCryptoElem.Async = b
}
case "blocksize":
newCryptoElem.Blocksize = vp.PUInt64()
case "chunksize":
newCryptoElem.Chunksize = vp.PUInt64()
case "digestsize":
newCryptoElem.Digestsize = vp.PUInt64()
case "driver":
newCryptoElem.Driver = value
case "geniv":
newCryptoElem.Geniv = value
case "internal":
newCryptoElem.Internal = value
case "ivsize":
newCryptoElem.Ivsize = vp.PUInt64()
case "maxauthsize":
newCryptoElem.Maxauthsize = vp.PUInt64()
case "max keysize":
newCryptoElem.MaxKeysize = vp.PUInt64()
case "min keysize":
newCryptoElem.MinKeysize = vp.PUInt64()
case "module":
newCryptoElem.Module = value
case "name":
newCryptoElem.Name = value
case "priority":
newCryptoElem.Priority = vp.PInt64()
case "refcnt":
newCryptoElem.Refcnt = vp.PInt64()
case "seedsize":
newCryptoElem.Seedsize = vp.PUInt64()
case "selftest":
newCryptoElem.Selftest = value
case "type":
newCryptoElem.Type = value
case "walksize":
newCryptoElem.Walksize = vp.PUInt64()
}
s := bufio.NewScanner(r)
for s.Scan() {
text := s.Text()
switch {
case strings.HasPrefix(text, "name"):
// Each crypto element begins with its name.
out = append(out, Crypto{})
case text == "":
continue
}
kv := strings.Split(text, ":")
if len(kv) != 2 {
return nil, fmt.Errorf("malformed crypto line: %q", text)
}
k := strings.TrimSpace(kv[0])
v := strings.TrimSpace(kv[1])
// Parse the key/value pair into the currently focused element.
c := &out[len(out)-1]
if err := c.parseKV(k, v); err != nil {
return nil, err
}
crypto = append(crypto, newCryptoElem)
}
return crypto, nil
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
// parseKV parses a key/value pair into the appropriate field of c.
func (c *Crypto) parseKV(k, v string) error {
vp := util.NewValueParser(v)
switch k {
case "async":
// Interpret literal yes as true.
c.Async = v == "yes"
case "blocksize":
c.Blocksize = vp.PUInt64()
case "chunksize":
c.Chunksize = vp.PUInt64()
case "digestsize":
c.Digestsize = vp.PUInt64()
case "driver":
c.Driver = v
case "geniv":
c.Geniv = v
case "internal":
c.Internal = v
case "ivsize":
c.Ivsize = vp.PUInt64()
case "maxauthsize":
c.Maxauthsize = vp.PUInt64()
case "max keysize":
c.MaxKeysize = vp.PUInt64()
case "min keysize":
c.MinKeysize = vp.PUInt64()
case "module":
c.Module = v
case "name":
c.Name = v
case "priority":
c.Priority = vp.PInt64()
case "refcnt":
c.Refcnt = vp.PInt64()
case "seedsize":
c.Seedsize = vp.PUInt64()
case "selftest":
c.Selftest = v
case "type":
c.Type = v
case "walksize":
c.Walksize = vp.PUInt64()
}
return vp.Err()
}

File diff suppressed because it is too large Load Diff

422
vendor/github.com/prometheus/procfs/fscache.go generated vendored Normal file
View File

@ -0,0 +1,422 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Fscacheinfo represents fscache statistics.
type Fscacheinfo struct {
// Number of index cookies allocated
IndexCookiesAllocated uint64
// data storage cookies allocated
DataStorageCookiesAllocated uint64
// Number of special cookies allocated
SpecialCookiesAllocated uint64
// Number of objects allocated
ObjectsAllocated uint64
// Number of object allocation failures
ObjectAllocationsFailure uint64
// Number of objects that reached the available state
ObjectsAvailable uint64
// Number of objects that reached the dead state
ObjectsDead uint64
// Number of objects that didn't have a coherency check
ObjectsWithoutCoherencyCheck uint64
// Number of objects that passed a coherency check
ObjectsWithCoherencyCheck uint64
// Number of objects that needed a coherency data update
ObjectsNeedCoherencyCheckUpdate uint64
// Number of objects that were declared obsolete
ObjectsDeclaredObsolete uint64
// Number of pages marked as being cached
PagesMarkedAsBeingCached uint64
// Number of uncache page requests seen
UncachePagesRequestSeen uint64
// Number of acquire cookie requests seen
AcquireCookiesRequestSeen uint64
// Number of acq reqs given a NULL parent
AcquireRequestsWithNullParent uint64
// Number of acq reqs rejected due to no cache available
AcquireRequestsRejectedNoCacheAvailable uint64
// Number of acq reqs succeeded
AcquireRequestsSucceeded uint64
// Number of acq reqs rejected due to error
AcquireRequestsRejectedDueToError uint64
// Number of acq reqs failed on ENOMEM
AcquireRequestsFailedDueToEnomem uint64
// Number of lookup calls made on cache backends
LookupsNumber uint64
// Number of negative lookups made
LookupsNegative uint64
// Number of positive lookups made
LookupsPositive uint64
// Number of objects created by lookup
ObjectsCreatedByLookup uint64
// Number of lookups timed out and requeued
LookupsTimedOutAndRequed uint64
InvalidationsNumber uint64
InvalidationsRunning uint64
// Number of update cookie requests seen
UpdateCookieRequestSeen uint64
// Number of upd reqs given a NULL parent
UpdateRequestsWithNullParent uint64
// Number of upd reqs granted CPU time
UpdateRequestsRunning uint64
// Number of relinquish cookie requests seen
RelinquishCookiesRequestSeen uint64
// Number of rlq reqs given a NULL parent
RelinquishCookiesWithNullParent uint64
// Number of rlq reqs waited on completion of creation
RelinquishRequestsWaitingCompleteCreation uint64
// Relinqs rtr
RelinquishRetries uint64
// Number of attribute changed requests seen
AttributeChangedRequestsSeen uint64
// Number of attr changed requests queued
AttributeChangedRequestsQueued uint64
// Number of attr changed rejected -ENOBUFS
AttributeChangedRejectDueToEnobufs uint64
// Number of attr changed failed -ENOMEM
AttributeChangedFailedDueToEnomem uint64
// Number of attr changed ops given CPU time
AttributeChangedOps uint64
// Number of allocation requests seen
AllocationRequestsSeen uint64
// Number of successful alloc reqs
AllocationOkRequests uint64
// Number of alloc reqs that waited on lookup completion
AllocationWaitingOnLookup uint64
// Number of alloc reqs rejected -ENOBUFS
AllocationsRejectedDueToEnobufs uint64
// Number of alloc reqs aborted -ERESTARTSYS
AllocationsAbortedDueToErestartsys uint64
// Number of alloc reqs submitted
AllocationOperationsSubmitted uint64
// Number of alloc reqs waited for CPU time
AllocationsWaitedForCPU uint64
// Number of alloc reqs aborted due to object death
AllocationsAbortedDueToObjectDeath uint64
// Number of retrieval (read) requests seen
RetrievalsReadRequests uint64
// Number of successful retr reqs
RetrievalsOk uint64
// Number of retr reqs that waited on lookup completion
RetrievalsWaitingLookupCompletion uint64
// Number of retr reqs returned -ENODATA
RetrievalsReturnedEnodata uint64
// Number of retr reqs rejected -ENOBUFS
RetrievalsRejectedDueToEnobufs uint64
// Number of retr reqs aborted -ERESTARTSYS
RetrievalsAbortedDueToErestartsys uint64
// Number of retr reqs failed -ENOMEM
RetrievalsFailedDueToEnomem uint64
// Number of retr reqs submitted
RetrievalsRequests uint64
// Number of retr reqs waited for CPU time
RetrievalsWaitingCPU uint64
// Number of retr reqs aborted due to object death
RetrievalsAbortedDueToObjectDeath uint64
// Number of storage (write) requests seen
StoreWriteRequests uint64
// Number of successful store reqs
StoreSuccessfulRequests uint64
// Number of store reqs on a page already pending storage
StoreRequestsOnPendingStorage uint64
// Number of store reqs rejected -ENOBUFS
StoreRequestsRejectedDueToEnobufs uint64
// Number of store reqs failed -ENOMEM
StoreRequestsFailedDueToEnomem uint64
// Number of store reqs submitted
StoreRequestsSubmitted uint64
// Number of store reqs granted CPU time
StoreRequestsRunning uint64
// Number of pages given store req processing time
StorePagesWithRequestsProcessing uint64
// Number of store reqs deleted from tracking tree
StoreRequestsDeleted uint64
// Number of store reqs over store limit
StoreRequestsOverStoreLimit uint64
// Number of release reqs against pages with no pending store
ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
// Number of release reqs against pages stored by time lock granted
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
// Number of release reqs ignored due to in-progress store
ReleaseRequestsIgnoredDueToInProgressStore uint64
// Number of page stores cancelled due to release req
PageStoresCancelledByReleaseRequests uint64
VmscanWaiting uint64
// Number of times async ops added to pending queues
OpsPending uint64
// Number of times async ops given CPU time
OpsRunning uint64
// Number of times async ops queued for processing
OpsEnqueued uint64
// Number of async ops cancelled
OpsCancelled uint64
// Number of async ops rejected due to object lookup/create failure
OpsRejected uint64
// Number of async ops initialised
OpsInitialised uint64
// Number of async ops queued for deferred release
OpsDeferred uint64
// Number of async ops released (should equal ini=N when idle)
OpsReleased uint64
// Number of deferred-release async ops garbage collected
OpsGarbageCollected uint64
// Number of in-progress alloc_object() cache ops
CacheopAllocationsinProgress uint64
// Number of in-progress lookup_object() cache ops
CacheopLookupObjectInProgress uint64
// Number of in-progress lookup_complete() cache ops
CacheopLookupCompleteInPorgress uint64
// Number of in-progress grab_object() cache ops
CacheopGrabObjectInProgress uint64
CacheopInvalidations uint64
// Number of in-progress update_object() cache ops
CacheopUpdateObjectInProgress uint64
// Number of in-progress drop_object() cache ops
CacheopDropObjectInProgress uint64
// Number of in-progress put_object() cache ops
CacheopPutObjectInProgress uint64
// Number of in-progress attr_changed() cache ops
CacheopAttributeChangeInProgress uint64
// Number of in-progress sync_cache() cache ops
CacheopSyncCacheInProgress uint64
// Number of in-progress read_or_alloc_page() cache ops
CacheopReadOrAllocPageInProgress uint64
// Number of in-progress read_or_alloc_pages() cache ops
CacheopReadOrAllocPagesInProgress uint64
// Number of in-progress allocate_page() cache ops
CacheopAllocatePageInProgress uint64
// Number of in-progress allocate_pages() cache ops
CacheopAllocatePagesInProgress uint64
// Number of in-progress write_page() cache ops
CacheopWritePagesInProgress uint64
// Number of in-progress uncache_page() cache ops
CacheopUncachePagesInProgress uint64
// Number of in-progress dissociate_pages() cache ops
CacheopDissociatePagesInProgress uint64
// Number of object lookups/creations rejected due to lack of space
CacheevLookupsAndCreationsRejectedLackSpace uint64
// Number of stale objects deleted
CacheevStaleObjectsDeleted uint64
// Number of objects retired when relinquished
CacheevRetiredWhenReliquished uint64
// Number of objects culled
CacheevObjectsCulled uint64
}
// Fscacheinfo returns information about current fscache statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
if err != nil {
return Fscacheinfo{}, err
}
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
}
return *m, nil
}
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
}
for i := range setFields {
*setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
if err != nil {
return err
}
}
return nil
}
func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
var m Fscacheinfo
s := bufio.NewScanner(r)
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
}
switch fields[0] {
case "Cookies:":
err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
&m.SpecialCookiesAllocated)
if err != nil {
return &m, err
}
case "Objects:":
err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
&m.ObjectsAvailable, &m.ObjectsDead)
if err != nil {
return &m, err
}
case "ChkAux":
err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
&m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
if err != nil {
return &m, err
}
case "Pages":
err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
if err != nil {
return &m, err
}
case "Acquire:":
err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
&m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
&m.AcquireRequestsFailedDueToEnomem)
if err != nil {
return &m, err
}
case "Lookups:":
err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
&m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
if err != nil {
return &m, err
}
case "Invals":
err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
if err != nil {
return &m, err
}
case "Updates:":
err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
&m.UpdateRequestsRunning)
if err != nil {
return &m, err
}
case "Relinqs:":
err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
&m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
if err != nil {
return &m, err
}
case "AttrChg:":
err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
&m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
if err != nil {
return &m, err
}
case "Allocs":
if strings.Split(fields[2], "=")[0] == "n" {
err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
&m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
&m.AllocationsAbortedDueToObjectDeath)
if err != nil {
return &m, err
}
}
case "Retrvls:":
if strings.Split(fields[1], "=")[0] == "n" {
err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
&m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
&m.RetrievalsFailedDueToEnomem)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
if err != nil {
return &m, err
}
}
case "Stores":
if strings.Split(fields[2], "=")[0] == "n" {
err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
&m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
&m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
if err != nil {
return &m, err
}
}
case "VmScan":
err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
&m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
&m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
if err != nil {
return &m, err
}
case "Ops":
if strings.Split(fields[2], "=")[0] == "pend" {
err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
if err != nil {
return &m, err
}
}
case "CacheOp:":
if strings.Split(fields[1], "=")[0] == "alo" {
err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
&m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
if err != nil {
return &m, err
}
} else if strings.Split(fields[1], "=")[0] == "inv" {
err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
&m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
&m.CacheopSyncCacheInProgress)
if err != nil {
return &m, err
}
} else {
err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
&m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
&m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
if err != nil {
return &m, err
}
}
case "CacheEv:":
err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
&m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
if err != nil {
return &m, err
}
}
}
return &m, nil
}

View File

@ -1,8 +1,9 @@
module github.com/prometheus/procfs
go 1.12
go 1.13
require (
github.com/google/go-cmp v0.3.1
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
github.com/google/go-cmp v0.5.4
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)

View File

@ -1,4 +1,8 @@
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -39,10 +39,10 @@ type FS string
func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint)
if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
}
if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
}
return FS(mountPoint), nil

View File

@ -73,6 +73,15 @@ func ReadUintFromFile(path string) (uint64, error) {
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
}
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
func ReadIntFromFile(path string) (int64, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
}
// ParseBool parses a string into a boolean pointer.
func ParseBool(b string) *bool {
var truth bool

62
vendor/github.com/prometheus/procfs/kernel_random.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"os"
"github.com/prometheus/procfs/internal/util"
)
// KernelRandom contains information about to the kernel's random number generator.
type KernelRandom struct {
// EntropyAvaliable gives the available entropy, in bits.
EntropyAvaliable *uint64
// PoolSize gives the size of the entropy pool, in bits.
PoolSize *uint64
// URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
URandomMinReseedSeconds *uint64
// WriteWakeupThreshold the number of bits of entropy below which we wake up processes
// that do a select(2) or poll(2) for write access to /dev/random.
WriteWakeupThreshold *uint64
// ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
// waiting for entropy from /dev/random.
ReadWakeupThreshold *uint64
}
// KernelRandom returns values from /proc/sys/kernel/random.
func (fs FS) KernelRandom() (KernelRandom, error) {
random := KernelRandom{}
for file, p := range map[string]**uint64{
"entropy_avail": &random.EntropyAvaliable,
"poolsize": &random.PoolSize,
"urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
"write_wakeup_threshold": &random.WriteWakeupThreshold,
"read_wakeup_threshold": &random.ReadWakeupThreshold,
} {
val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
if os.IsNotExist(err) {
continue
}
if err != nil {
return random, err
}
*p = &val
}
return random, nil
}

62
vendor/github.com/prometheus/procfs/loadavg.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// LoadAvg represents an entry in /proc/loadavg
type LoadAvg struct {
Load1 float64
Load5 float64
Load15 float64
}
// LoadAvg returns loadavg from /proc.
func (fs FS) LoadAvg() (*LoadAvg, error) {
path := fs.proc.Path("loadavg")
data, err := util.ReadFileNoStat(path)
if err != nil {
return nil, err
}
return parseLoad(data)
}
// Parse /proc loadavg and return 1m, 5m and 15m.
func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("could not parse load %q: %w", load, err)
}
}
return &LoadAvg{
Load1: loads[0],
Load5: loads[1],
Load15: loads[2],
}, nil
}

View File

@ -22,8 +22,9 @@ import (
)
var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
)
// MDStat holds info parsed from /proc/mdstat.
@ -44,6 +45,8 @@ type MDStat struct {
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
// Name of md component devices
Devices []string
}
// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
@ -52,11 +55,11 @@ type MDStat struct {
func (fs FS) MDStat() ([]MDStat, error) {
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
return nil, err
}
mdstat, err := parseMDStat(data)
if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@ -82,10 +85,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
return nil, fmt.Errorf(
"error parsing %s: too few lines for md device",
mdName,
)
return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
}
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
@ -94,7 +94,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %s", err)
return nil, fmt.Errorf("error parsing md device lines: %w", err)
}
syncLineIdx := i + 2
@ -107,11 +107,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
syncedBlocks := size
recovering := strings.Contains(lines[syncLineIdx], "recovery")
resyncing := strings.Contains(lines[syncLineIdx], "resync")
checking := strings.Contains(lines[syncLineIdx], "check")
// Append recovery and resyncing state info.
if recovering || resyncing {
if recovering || resyncing || checking {
if recovering {
state = "recovering"
} else if checking {
state = "checking"
} else {
state = "resyncing"
}
@ -123,7 +126,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
} else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err)
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
}
}
}
@ -137,6 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
DisksTotal: total,
BlocksTotal: size,
BlocksSynced: syncedBlocks,
Devices: evalComponentDevices(deviceFields),
})
}
@ -148,7 +152,7 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e
sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@ -168,12 +172,12 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
}
return active, total, size, nil
@ -187,8 +191,23 @@ func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine)
return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
}
return syncedBlocks, nil
}
func evalComponentDevices(deviceFields []string) []string {
mdComponentDevices := make([]string, 0)
if len(deviceFields) > 3 {
for _, field := range deviceFields[4:] {
match := componentDeviceRE.FindStringSubmatch(field)
if match == nil {
continue
}
mdComponentDevices = append(mdComponentDevices, match[1])
}
}
return mdComponentDevices
}

View File

@ -28,9 +28,9 @@ import (
type Meminfo struct {
// Total usable ram (i.e. physical ram minus a few reserved
// bits and the kernel binary code)
MemTotal uint64
MemTotal *uint64
// The sum of LowFree+HighFree
MemFree uint64
MemFree *uint64
// An estimate of how much memory is available for starting
// new applications, without swapping. Calculated from
// MemFree, SReclaimable, the size of the file LRU lists, and
@ -39,59 +39,59 @@ type Meminfo struct {
// well, and that not all reclaimable slab will be
// reclaimable, due to items being in use. The impact of those
// factors will vary from system to system.
MemAvailable uint64
MemAvailable *uint64
// Relatively temporary storage for raw disk blocks shouldn't
// get tremendously large (20MB or so)
Buffers uint64
Cached uint64
Buffers *uint64
Cached *uint64
// Memory that once was swapped out, is swapped back in but
// still also is in the swapfile (if memory is needed it
// doesn't need to be swapped out AGAIN because it is already
// in the swapfile. This saves I/O)
SwapCached uint64
SwapCached *uint64
// Memory that has been used more recently and usually not
// reclaimed unless absolutely necessary.
Active uint64
Active *uint64
// Memory which has been less recently used. It is more
// eligible to be reclaimed for other purposes
Inactive uint64
ActiveAnon uint64
InactiveAnon uint64
ActiveFile uint64
InactiveFile uint64
Unevictable uint64
Mlocked uint64
Inactive *uint64
ActiveAnon *uint64
InactiveAnon *uint64
ActiveFile *uint64
InactiveFile *uint64
Unevictable *uint64
Mlocked *uint64
// total amount of swap space available
SwapTotal uint64
SwapTotal *uint64
// Memory which has been evicted from RAM, and is temporarily
// on the disk
SwapFree uint64
SwapFree *uint64
// Memory which is waiting to get written back to the disk
Dirty uint64
Dirty *uint64
// Memory which is actively being written back to the disk
Writeback uint64
Writeback *uint64
// Non-file backed pages mapped into userspace page tables
AnonPages uint64
AnonPages *uint64
// files which have been mapped, such as libraries
Mapped uint64
Shmem uint64
Mapped *uint64
Shmem *uint64
// in-kernel data structures cache
Slab uint64
Slab *uint64
// Part of Slab, that might be reclaimed, such as caches
SReclaimable uint64
SReclaimable *uint64
// Part of Slab, that cannot be reclaimed on memory pressure
SUnreclaim uint64
KernelStack uint64
SUnreclaim *uint64
KernelStack *uint64
// amount of memory dedicated to the lowest level of page
// tables.
PageTables uint64
PageTables *uint64
// NFS pages sent to the server, but not yet committed to
// stable storage
NFSUnstable uint64
NFSUnstable *uint64
// Memory used for block device "bounce buffers"
Bounce uint64
Bounce *uint64
// Memory used by FUSE for temporary writeback buffers
WritebackTmp uint64
WritebackTmp *uint64
// Based on the overcommit ratio ('vm.overcommit_ratio'),
// this is the total amount of memory currently available to
// be allocated on the system. This limit is only adhered to
@ -105,7 +105,7 @@ type Meminfo struct {
// yield a CommitLimit of 7.3G.
// For more details, see the memory overcommit documentation
// in vm/overcommit-accounting.
CommitLimit uint64
CommitLimit *uint64
// The amount of memory presently allocated on the system.
// The committed memory is a sum of all of the memory which
// has been allocated by processes, even if it has not been
@ -119,27 +119,27 @@ type Meminfo struct {
// This is useful if one needs to guarantee that processes will
// not fail due to lack of memory once that memory has been
// successfully allocated.
CommittedAS uint64
CommittedAS *uint64
// total size of vmalloc memory area
VmallocTotal uint64
VmallocTotal *uint64
// amount of vmalloc area which is used
VmallocUsed uint64
VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free
VmallocChunk uint64
HardwareCorrupted uint64
AnonHugePages uint64
ShmemHugePages uint64
ShmemPmdMapped uint64
CmaTotal uint64
CmaFree uint64
HugePagesTotal uint64
HugePagesFree uint64
HugePagesRsvd uint64
HugePagesSurp uint64
Hugepagesize uint64
DirectMap4k uint64
DirectMap2M uint64
DirectMap1G uint64
VmallocChunk *uint64
HardwareCorrupted *uint64
AnonHugePages *uint64
ShmemHugePages *uint64
ShmemPmdMapped *uint64
CmaTotal *uint64
CmaFree *uint64
HugePagesTotal *uint64
HugePagesFree *uint64
HugePagesRsvd *uint64
HugePagesSurp *uint64
Hugepagesize *uint64
DirectMap4k *uint64
DirectMap2M *uint64
DirectMap1G *uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err)
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
}
return *m, nil
@ -175,101 +175,101 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
switch fields[0] {
case "MemTotal:":
m.MemTotal = v
m.MemTotal = &v
case "MemFree:":
m.MemFree = v
m.MemFree = &v
case "MemAvailable:":
m.MemAvailable = v
m.MemAvailable = &v
case "Buffers:":
m.Buffers = v
m.Buffers = &v
case "Cached:":
m.Cached = v
m.Cached = &v
case "SwapCached:":
m.SwapCached = v
m.SwapCached = &v
case "Active:":
m.Active = v
m.Active = &v
case "Inactive:":
m.Inactive = v
m.Inactive = &v
case "Active(anon):":
m.ActiveAnon = v
m.ActiveAnon = &v
case "Inactive(anon):":
m.InactiveAnon = v
m.InactiveAnon = &v
case "Active(file):":
m.ActiveFile = v
m.ActiveFile = &v
case "Inactive(file):":
m.InactiveFile = v
m.InactiveFile = &v
case "Unevictable:":
m.Unevictable = v
m.Unevictable = &v
case "Mlocked:":
m.Mlocked = v
m.Mlocked = &v
case "SwapTotal:":
m.SwapTotal = v
m.SwapTotal = &v
case "SwapFree:":
m.SwapFree = v
m.SwapFree = &v
case "Dirty:":
m.Dirty = v
m.Dirty = &v
case "Writeback:":
m.Writeback = v
m.Writeback = &v
case "AnonPages:":
m.AnonPages = v
m.AnonPages = &v
case "Mapped:":
m.Mapped = v
m.Mapped = &v
case "Shmem:":
m.Shmem = v
m.Shmem = &v
case "Slab:":
m.Slab = v
m.Slab = &v
case "SReclaimable:":
m.SReclaimable = v
m.SReclaimable = &v
case "SUnreclaim:":
m.SUnreclaim = v
m.SUnreclaim = &v
case "KernelStack:":
m.KernelStack = v
m.KernelStack = &v
case "PageTables:":
m.PageTables = v
m.PageTables = &v
case "NFS_Unstable:":
m.NFSUnstable = v
m.NFSUnstable = &v
case "Bounce:":
m.Bounce = v
m.Bounce = &v
case "WritebackTmp:":
m.WritebackTmp = v
m.WritebackTmp = &v
case "CommitLimit:":
m.CommitLimit = v
m.CommitLimit = &v
case "Committed_AS:":
m.CommittedAS = v
m.CommittedAS = &v
case "VmallocTotal:":
m.VmallocTotal = v
m.VmallocTotal = &v
case "VmallocUsed:":
m.VmallocUsed = v
m.VmallocUsed = &v
case "VmallocChunk:":
m.VmallocChunk = v
m.VmallocChunk = &v
case "HardwareCorrupted:":
m.HardwareCorrupted = v
m.HardwareCorrupted = &v
case "AnonHugePages:":
m.AnonHugePages = v
m.AnonHugePages = &v
case "ShmemHugePages:":
m.ShmemHugePages = v
m.ShmemHugePages = &v
case "ShmemPmdMapped:":
m.ShmemPmdMapped = v
m.ShmemPmdMapped = &v
case "CmaTotal:":
m.CmaTotal = v
m.CmaTotal = &v
case "CmaFree:":
m.CmaFree = v
m.CmaFree = &v
case "HugePages_Total:":
m.HugePagesTotal = v
m.HugePagesTotal = &v
case "HugePages_Free:":
m.HugePagesFree = v
m.HugePagesFree = &v
case "HugePages_Rsvd:":
m.HugePagesRsvd = v
m.HugePagesRsvd = &v
case "HugePages_Surp:":
m.HugePagesSurp = v
m.HugePagesSurp = &v
case "Hugepagesize:":
m.Hugepagesize = v
m.Hugepagesize = &v
case "DirectMap4k:":
m.DirectMap4k = v
m.DirectMap4k = &v
case "DirectMap2M:":
m.DirectMap2M = v
m.DirectMap2M = &v
case "DirectMap1G:":
m.DirectMap1G = v
m.DirectMap1G = &v
}
}

View File

@ -29,10 +29,10 @@ import (
// is described in the following man page.
// http://man7.org/linux/man-pages/man5/proc.5.html
type MountInfo struct {
// Unique Id for the mount
MountId int
// The Id of the parent mount
ParentId int
// Unique ID for the mount
MountID int
// The ID of the parent mount
ParentID int
// The value of `st_dev` for the files on this FS
MajorMinorVer string
// The pathname of the directory in the FS that forms
@ -77,7 +77,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo)
if mountInfoLength < 11 {
if mountInfoLength < 10 {
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
}
@ -96,11 +96,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
}
mount.MountId, err = strconv.Atoi(mountInfo[0])
mount.MountID, err = strconv.Atoi(mountInfo[0])
if err != nil {
return nil, fmt.Errorf("failed to parse mount ID")
}
mount.ParentId, err = strconv.Atoi(mountInfo[1])
mount.ParentID, err = strconv.Atoi(mountInfo[1])
if err != nil {
return nil, fmt.Errorf("failed to parse parent ID")
}
@ -144,7 +144,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
return optionalFields, nil
}
// Parses the mount options, superblock options.
// mountOptionsParser parses the mount options, superblock options.
func mountOptionsParser(mountOptions string) map[string]string {
opts := make(map[string]string)
options := strings.Split(mountOptions, ",")
@ -161,7 +161,7 @@ func mountOptionsParser(mountOptions string) map[string]string {
return opts
}
// Retrieves mountinfo information from `/proc/self/mountinfo`.
// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
func GetMounts() ([]*MountInfo, error) {
data, err := util.ReadFileNoStat("/proc/self/mountinfo")
if err != nil {
@ -170,7 +170,7 @@ func GetMounts() ([]*MountInfo, error) {
return parseMountInfo(data)
}
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
// GetProcMounts retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
func GetProcMounts(pid int) ([]*MountInfo, error) {
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {

View File

@ -186,6 +186,8 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64
}
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
@ -336,12 +338,12 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
if len(ss) == 0 {
break
}
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
switch ss[0] {
case fieldOpts:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
if stats.Opts == nil {
stats.Opts = map[string]string{}
}
@ -354,6 +356,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
}
case fieldAge:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
if err != nil {
@ -362,6 +367,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d
case fieldBytes:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
return nil, err
@ -369,6 +377,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats
case fieldEvents:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
return nil, err
@ -494,8 +505,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
// line is reached.
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
const (
// Number of expected fields in each per-operation statistics set
numFields = 9
// Minimum number of expected fields in each per-operation statistics set
minFields = 9
)
var ops []NFSOperationStats
@ -508,12 +519,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
break
}
if len(ss) != numFields {
if len(ss) < minFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
}
// Skip string operation name for integers
ns := make([]uint64, 0, numFields-1)
ns := make([]uint64, 0, minFields-1)
for _, st := range ss[1:] {
n, err := strconv.ParseUint(st, 10, 64)
if err != nil {
@ -523,7 +534,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n)
}
ops = append(ops, NFSOperationStats{
opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
Transmissions: ns[1],
@ -533,7 +544,13 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeQueueMilliseconds: ns[5],
CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7],
})
}
if len(ns) > 8 {
opStats.Errors = ns[8]
}
ops = append(ops, opStats)
}
return ops, s.Err()

View File

@ -0,0 +1,153 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
// and contains netfilter conntrack statistics at one CPU core
type ConntrackStatEntry struct {
Entries uint64
Found uint64
Invalid uint64
Ignore uint64
Insert uint64
InsertFailed uint64
Drop uint64
EarlyDrop uint64
SearchRestart uint64
}
// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
}
// Parses a slice of ConntrackStatEntries from the given filepath
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
// This file is small and can be read with one syscall.
b, err := util.ReadFileNoStat(path)
if err != nil {
// Do not wrap this error so the caller can detect os.IsNotExist and
// similar conditions.
return nil, err
}
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
}
return stat, nil
}
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
var entries []ConntrackStatEntry
scanner := bufio.NewScanner(r)
scanner.Scan()
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
conntrackEntry, err := parseConntrackStatEntry(fields)
if err != nil {
return nil, err
}
entries = append(entries, *conntrackEntry)
}
return entries, nil
}
// Parses a ConntrackStatEntry from given array of fields
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
}
entry := &ConntrackStatEntry{}
entries, err := parseConntrackStatField(fields[0])
if err != nil {
return nil, err
}
entry.Entries = entries
found, err := parseConntrackStatField(fields[2])
if err != nil {
return nil, err
}
entry.Found = found
invalid, err := parseConntrackStatField(fields[4])
if err != nil {
return nil, err
}
entry.Invalid = invalid
ignore, err := parseConntrackStatField(fields[5])
if err != nil {
return nil, err
}
entry.Ignore = ignore
insert, err := parseConntrackStatField(fields[8])
if err != nil {
return nil, err
}
entry.Insert = insert
insertFailed, err := parseConntrackStatField(fields[9])
if err != nil {
return nil, err
}
entry.InsertFailed = insertFailed
drop, err := parseConntrackStatField(fields[10])
if err != nil {
return nil, err
}
entry.Drop = drop
earlyDrop, err := parseConntrackStatField(fields[11])
if err != nil {
return nil, err
}
entry.EarlyDrop = earlyDrop
searchRestart, err := parseConntrackStatField(fields[16])
if err != nil {
return nil, err
}
entry.SearchRestart = searchRestart
return entry, nil
}
// Parses a uint64 from given hex in string
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
}
return val, err
}

220
vendor/github.com/prometheus/procfs/net_ip_socket.go generated vendored Normal file
View File

@ -0,0 +1,220 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
)
const (
// readLimit is used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
// With e.g. 150 Byte per line and the maximum number of 65535,
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
readLimit = 4294967296 // Byte -> 4 GiB
)
// this contains generic data structures for both udp and tcp sockets
type (
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
NetIPSocket []*netIPSocketLine
// NetIPSocketSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetIPSocket it does not collect
// the parsed lines into a slice.
NetIPSocketSummary struct {
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
TxQueueLength uint64
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
RxQueueLength uint64
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
}
// netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
LocalAddr net.IP
LocalPort uint64
RemAddr net.IP
RemPort uint64
St uint64
TxQueue uint64
RxQueue uint64
UID uint64
}
)
func newNetIPSocket(file string) (NetIPSocket, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var netIPSocket NetIPSocket
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
if err != nil {
return nil, err
}
netIPSocket = append(netIPSocket, line)
}
if err := s.Err(); err != nil {
return nil, err
}
return netIPSocket, nil
}
// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
}
if err := s.Err(); err != nil {
return nil, err
}
return &netIPSocketSummary, nil
}
// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
}
switch len(byteIP) {
case 4:
return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
case 16:
i := net.IP{
byteIP[3], byteIP[2], byteIP[1], byteIP[0],
byteIP[7], byteIP[6], byteIP[5], byteIP[4],
byteIP[11], byteIP[10], byteIP[9], byteIP[8],
byteIP[15], byteIP[14], byteIP[13], byteIP[12],
}
return i, nil
default:
return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
}
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 8 {
return nil, fmt.Errorf(
"cannot parse net socket line as it has less then 8 columns %q",
strings.Join(fields, " "),
)
}
var err error // parse error
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
}
if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
}
if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in socket line as it has a missing colon %q",
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
}
return line, nil
}

180
vendor/github.com/prometheus/procfs/net_protocols.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// NetProtocolStats stores the contents from /proc/net/protocols
type NetProtocolStats map[string]NetProtocolStatLine
// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
// only care about the first six columns as the rest are not likely to change
// and only serve to provide a set of capabilities for each protocol.
type NetProtocolStatLine struct {
Name string // 0 The name of the protocol
Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
Sockets int64 // 2 Number of sockets in use by this protocol
Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol
Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
MaxHeader uint64 // 5 Protocol specific max header size
Slab bool // 6 Indicates whether or not memory is allocated from the SLAB
ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
Capabilities NetProtocolCapabilities
}
// NetProtocolCapabilities contains a list of capabilities for each protocol
type NetProtocolCapabilities struct {
Close bool // 8
Connect bool // 9
Disconnect bool // 10
Accept bool // 11
IoCtl bool // 12
Init bool // 13
Destroy bool // 14
Shutdown bool // 15
SetSockOpt bool // 16
GetSockOpt bool // 17
SendMsg bool // 18
RecvMsg bool // 19
SendPage bool // 20
Bind bool // 21
BacklogRcv bool // 22
Hash bool // 23
UnHash bool // 24
GetPort bool // 25
EnterMemoryPressure bool // 26
}
// NetProtocols reads stats from /proc/net/protocols and returns a map of
// PortocolStatLine entries. As of this writing no official Linux Documentation
// exists, however the source is fairly self-explanatory and the format seems
// stable since its introduction in 2.6.12-rc2
// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
func (fs FS) NetProtocols() (NetProtocolStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
if err != nil {
return NetProtocolStats{}, err
}
return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
}
func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
nps := NetProtocolStats{}
// Skip the header line
s.Scan()
for s.Scan() {
line, err := nps.parseLine(s.Text())
if err != nil {
return NetProtocolStats{}, err
}
nps[line.Name] = *line
}
return nps, nil
}
func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
var err error
const enabled = "yes"
const disabled = "no"
fields := strings.Fields(rawLine)
line.Name = fields[0]
line.Size, err = strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return nil, err
}
line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
if err != nil {
return nil, err
}
line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
if err != nil {
return nil, err
}
if fields[4] == enabled {
line.Pressure = 1
} else if fields[4] == disabled {
line.Pressure = 0
} else {
line.Pressure = -1
}
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
if fields[6] == enabled {
line.Slab = true
} else if fields[6] == disabled {
line.Slab = false
} else {
return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
}
line.ModuleName = fields[7]
err = line.Capabilities.parseCapabilities(fields[8:])
if err != nil {
return nil, err
}
return line, nil
}
func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
// The capabilities are all bools so we can loop over to map them
capabilityFields := [...]*bool{
&pc.Close,
&pc.Connect,
&pc.Disconnect,
&pc.Accept,
&pc.IoCtl,
&pc.Init,
&pc.Destroy,
&pc.Shutdown,
&pc.SetSockOpt,
&pc.GetSockOpt,
&pc.SendMsg,
&pc.RecvMsg,
&pc.SendPage,
&pc.Bind,
&pc.BacklogRcv,
&pc.Hash,
&pc.UnHash,
&pc.GetPort,
&pc.EnterMemoryPressure,
}
for i := 0; i < len(capabilities); i++ {
if capabilities[i] == "y" {
*capabilityFields[i] = true
} else if capabilities[i] == "n" {
*capabilityFields[i] = false
} else {
return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
}
}
return nil
}

View File

@ -70,7 +70,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err)
return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
}
return stat, nil
@ -90,7 +90,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err)
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.

View File

@ -14,78 +14,89 @@
package procfs
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// For the proc file format details,
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
type SoftnetEntry struct {
// SoftnetStat contains a single row of data from /proc/net/softnet_stat
type SoftnetStat struct {
// Number of processed packets
Processed uint
Processed uint32
// Number of dropped packets
Dropped uint
Dropped uint32
// Number of times processing packets ran out of quota
TimeSqueezed uint
TimeSqueezed uint32
}
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
// and then return a slice of SoftnetEntry's.
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
var softNetProcFile = "net/softnet_stat"
// NetSoftnetStat reads data from /proc/net/softnet_stat.
func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
if err != nil {
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
return nil, err
}
return parseSoftnetEntries(data)
}
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
lines := strings.Split(string(data), "\n")
entries := make([]SoftnetEntry, 0)
var err error
const (
expectedColumns = 11
)
for _, line := range lines {
columns := strings.Fields(line)
width := len(columns)
if width == 0 {
continue
}
if width != expectedColumns {
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
}
var entry SoftnetEntry
if entry, err = parseSoftnetEntry(columns); err != nil {
return []SoftnetEntry{}, err
}
entries = append(entries, entry)
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
}
return entries, nil
}
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
var err error
var processed, dropped, timeSqueezed uint64
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
const minColumns = 9
s := bufio.NewScanner(r)
var stats []SoftnetStat
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
}
// We only parse the first three columns at the moment.
us, err := parseHexUint32s(columns[0:3])
if err != nil {
return nil, err
}
stats = append(stats, SoftnetStat{
Processed: us[0],
Dropped: us[1],
TimeSqueezed: us[2],
})
}
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
}
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
}
return SoftnetEntry{
Processed: uint(processed),
Dropped: uint(dropped),
TimeSqueezed: uint(timeSqueezed),
}, nil
return stats, nil
}
func parseHexUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}

64
vendor/github.com/prometheus/procfs/net_tcp.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
type (
// NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
NetTCP []*netIPSocketLine
// NetTCPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetTCP it does not collect
// the parsed lines into a slice.
NetTCPSummary NetIPSocketSummary
)
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp.
func (fs FS) NetTCP() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp"))
}
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp6.
func (fs FS) NetTCP6() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp6"))
}
// NetTCPSummary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp.
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp"))
}
// NetTCP6Summary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp6.
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
}
// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
func newNetTCP(file string) (NetTCP, error) {
n, err := newNetIPSocket(file)
n1 := NetTCP(n)
return n1, err
}
func newNetTCPSummary(file string) (*NetTCPSummary, error) {
n, err := newNetIPSocketSummary(file)
if n == nil {
return nil, err
}
n1 := NetTCPSummary(*n)
return &n1, err
}

64
vendor/github.com/prometheus/procfs/net_udp.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
type (
// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
NetUDP []*netIPSocketLine
// NetUDPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetUDP it does not collect
// the parsed lines into a slice.
NetUDPSummary NetIPSocketSummary
)
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp.
func (fs FS) NetUDP() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp"))
}
// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
// read from /proc/net/udp6.
func (fs FS) NetUDP6() (NetUDP, error) {
return newNetUDP(fs.proc.Path("net/udp6"))
}
// NetUDPSummary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp.
func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp"))
}
// NetUDP6Summary returns already computed statistics like the total queue lengths
// for UDP datagrams read from /proc/net/udp6.
func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
return newNetUDPSummary(fs.proc.Path("net/udp6"))
}
// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
func newNetUDP(file string) (NetUDP, error) {
n, err := newNetIPSocket(file)
n1 := NetUDP(n)
return n1, err
}
func newNetUDPSummary(file string) (*NetUDPSummary, error) {
n, err := newNetIPSocketSummary(file)
if n == nil {
return nil, err
}
n1 := NetUDPSummary(*n)
return &n1, err
}

View File

@ -15,7 +15,6 @@ package procfs
import (
"bufio"
"errors"
"fmt"
"io"
"os"
@ -27,25 +26,15 @@ import (
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
const (
netUnixKernelPtrIdx = iota
netUnixRefCountIdx
_
netUnixFlagsIdx
netUnixTypeIdx
netUnixStateIdx
netUnixInodeIdx
// Inode and Path are optional.
netUnixStaticFieldsCnt = 6
)
// Constants for the various /proc/net/unix enumerations.
// TODO: match against x/sys/unix or similar?
const (
netUnixTypeStream = 1
netUnixTypeDgram = 2
netUnixTypeSeqpacket = 5
netUnixFlagListen = 1 << 16
netUnixFlagDefault = 0
netUnixFlagListen = 1 << 16
netUnixStateUnconnected = 1
netUnixStateConnecting = 2
@ -53,129 +42,127 @@ const (
netUnixStateDisconnected = 4
)
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
// NetUNIXType is the type of the type field.
type NetUNIXType uint64
// NetUnixType is the type of the type field.
type NetUnixType uint64
// NetUNIXFlags is the type of the flags field.
type NetUNIXFlags uint64
// NetUnixFlags is the type of the flags field.
type NetUnixFlags uint64
// NetUNIXState is the type of the state field.
type NetUNIXState uint64
// NetUnixState is the type of the state field.
type NetUnixState uint64
// NetUnixLine represents a line of /proc/net/unix.
type NetUnixLine struct {
// NetUNIXLine represents a line of /proc/net/unix.
type NetUNIXLine struct {
KernelPtr string
RefCount uint64
Protocol uint64
Flags NetUnixFlags
Type NetUnixType
State NetUnixState
Flags NetUNIXFlags
Type NetUNIXType
State NetUNIXState
Inode uint64
Path string
}
// NetUnix holds the data read from /proc/net/unix.
type NetUnix struct {
Rows []*NetUnixLine
// NetUNIX holds the data read from /proc/net/unix.
type NetUNIX struct {
Rows []*NetUNIXLine
}
// NewNetUnix returns data read from /proc/net/unix.
func NewNetUnix() (*NetUnix, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewNetUnix()
// NetUNIX returns data read from /proc/net/unix.
func (fs FS) NetUNIX() (*NetUNIX, error) {
return readNetUNIX(fs.proc.Path("net/unix"))
}
// NewNetUnix returns data read from /proc/net/unix.
func (fs FS) NewNetUnix() (*NetUnix, error) {
return NewNetUnixByPath(fs.proc.Path("net/unix"))
}
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByPath(path string) (*NetUnix, error) {
f, err := os.Open(path)
// readNetUNIX reads data in /proc/net/unix format from the specified file.
func readNetUNIX(file string) (*NetUNIX, error) {
// This file could be quite large and a streaming read is desirable versus
// reading the entire contents at once.
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
return NewNetUnixByReader(f)
return parseNetUNIX(f)
}
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
nu := &NetUnix{
Rows: make([]*NetUnixLine, 0, 32),
}
scanner := bufio.NewScanner(reader)
// Omit the header line.
scanner.Scan()
header := scanner.Text()
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists.
// This code works for both cases.
hasInode := strings.Contains(header, "Inode")
// parseNetUNIX creates a NetUnix structure from the incoming stream.
func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
// Begin scanning by checking for the existence of Inode.
s := bufio.NewScanner(r)
s.Scan()
minFieldsCnt := netUnixStaticFieldsCnt
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists. This code works for both cases.
hasInode := strings.Contains(s.Text(), "Inode")
// Expect a minimum number of fields, but Inode and Path are optional:
// Num RefCount Protocol Flags Type St Inode Path
minFields := 6
if hasInode {
minFieldsCnt++
minFields++
}
for scanner.Scan() {
line := scanner.Text()
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
var nu NetUNIX
for s.Scan() {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
return nu, err
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
}
nu.Rows = append(nu.Rows, item)
}
return nu, scanner.Err()
if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
}
return &nu, nil
}
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
fieldsLen := len(fields)
if fieldsLen < minFieldsCnt {
return nil, fmt.Errorf(
"Parse Unix domain failed: expect at least %d fields but got %d",
minFieldsCnt, fieldsLen)
l := len(fields)
if l < min {
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
}
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
// Field offsets are as follows:
// Num RefCount Protocol Flags Type St Inode Path
kernelPtr := strings.TrimSuffix(fields[0], ":")
users, err := u.parseUsers(fields[1])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
}
users, err := u.parseUsers(fields[netUnixRefCountIdx])
flags, err := u.parseFlags(fields[3])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
}
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
typ, err := u.parseType(fields[4])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
}
typ, err := u.parseType(fields[netUnixTypeIdx])
state, err := u.parseState(fields[5])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
}
state, err := u.parseState(fields[netUnixStateIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
}
var inode uint64
if hasInode {
inodeStr := fields[netUnixInodeIdx]
inode, err = u.parseInode(inodeStr)
inode, err = u.parseInode(fields[6])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
}
}
nuLine := &NetUnixLine{
n := &NetUNIXLine{
KernelPtr: kernelPtr,
RefCount: users,
Type: typ,
@ -185,57 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU
}
// Path field is optional.
if fieldsLen > minFieldsCnt {
pathIdx := netUnixInodeIdx + 1
if l > min {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
if !hasInode {
pathIdx--
}
nuLine.Path = fields[pathIdx]
n.Path = fields[pathIdx]
}
return nuLine, nil
return n, nil
}
func (u NetUnix) parseKernelPtr(str string) (string, error) {
if !strings.HasSuffix(str, ":") {
return "", errInvalidKernelPtrFmt
}
return str[:len(str)-1], nil
func (u NetUNIX) parseUsers(s string) (uint64, error) {
return strconv.ParseUint(s, 16, 32)
}
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
typ, err := strconv.ParseUint(hexStr, 16, 16)
func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
typ, err := strconv.ParseUint(s, 16, 16)
if err != nil {
return 0, err
}
return NetUnixType(typ), nil
return NetUNIXType(typ), nil
}
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
flags, err := strconv.ParseUint(hexStr, 16, 32)
func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
flags, err := strconv.ParseUint(s, 16, 32)
if err != nil {
return 0, err
}
return NetUnixFlags(flags), nil
return NetUNIXFlags(flags), nil
}
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
st, err := strconv.ParseInt(hexStr, 16, 8)
func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
st, err := strconv.ParseInt(s, 16, 8)
if err != nil {
return 0, err
}
return NetUnixState(st), nil
return NetUNIXState(st), nil
}
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
return strconv.ParseUint(inodeStr, 10, 64)
func (u NetUNIX) parseInode(s string) (uint64, error) {
return strconv.ParseUint(s, 10, 64)
}
func (t NetUnixType) String() string {
func (t NetUNIXType) String() string {
switch t {
case netUnixTypeStream:
return "stream"
@ -247,7 +233,7 @@ func (t NetUnixType) String() string {
return "unknown"
}
func (f NetUnixFlags) String() string {
func (f NetUNIXFlags) String() string {
switch f {
case netUnixFlagListen:
return "listen"
@ -256,7 +242,7 @@ func (f NetUnixFlags) String() string {
}
}
func (s NetUnixState) String() string {
func (s NetUNIXState) String() string {
switch s {
case netUnixStateUnconnected:
return "unconnected"

View File

@ -105,7 +105,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
}
p := Procs{}
@ -134,6 +134,27 @@ func (p Proc) CmdLine() ([]string, error) {
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
}
// Wchan returns the wchan (wait channel) of a process.
func (p Proc) Wchan() (string, error) {
f, err := os.Open(p.path("wchan"))
if err != nil {
return "", err
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
wchan := string(data)
if wchan == "" || wchan == "0" {
return "", nil
}
return wchan, nil
}
// Comm returns the command name of a process.
func (p Proc) Comm() (string, error) {
data, err := util.ReadFileNoStat(p.path("comm"))
@ -185,7 +206,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
}
fds[i] = uintptr(fd)
}
@ -257,7 +278,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
}
return names, nil

98
vendor/github.com/prometheus/procfs/proc_cgroup.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
// in this hierarchy
//
// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
type Cgroup struct {
// HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
// hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
HierarchyID int
// Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
// Cgroups V2 this may be empty, as all active controllers use the same hierarchy
Controllers []string
// Path of this control group, relative to the mount point of the cgroupfs representing this specific
// hierarchy
Path string
}
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
// Line format is hierarchyID:[controller1,controller2]:path
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
var err error
fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 {
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
}
cgroup := &Cgroup{
Path: fields[2],
Controllers: nil,
}
cgroup.HierarchyID, err = strconv.Atoi(fields[0])
if err != nil {
return nil, fmt.Errorf("failed to parse hierarchy ID")
}
if fields[1] != "" {
ssNames := strings.Split(fields[1], ",")
cgroup.Controllers = append(cgroup.Controllers, ssNames...)
}
return cgroup, nil
}
// parseCgroups reads each line of the /proc/[pid]/cgroup file
func parseCgroups(data []byte) ([]Cgroup, error) {
var cgroups []Cgroup
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
mountString := scanner.Text()
parsedMounts, err := parseCgroupString(mountString)
if err != nil {
return nil, err
}
cgroups = append(cgroups, *parsedMounts)
}
err := scanner.Err()
return cgroups, err
}
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
// so the len of the returned struct is equal to the number of active hierarchies on this system
func (p Proc) Cgroups() ([]Cgroup, error) {
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID))
if err != nil {
return nil, err
}
return parseCgroups(data)
}

View File

@ -16,6 +16,7 @@ package procfs
import (
"bufio"
"bytes"
"fmt"
"regexp"
"github.com/prometheus/procfs/internal/util"
@ -23,10 +24,11 @@ import (
// Regexp variables
var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
)
// ProcFDInfo contains represents file descriptor information.
@ -39,7 +41,7 @@ type ProcFDInfo struct {
Flags string
// Mount point ID
MntID string
// List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only)
// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo
}
@ -96,15 +98,21 @@ type InotifyInfo struct {
// InotifyInfo constructor. Only available on kernel 3.8+.
func parseInotifyInfo(line string) (*InotifyInfo, error) {
r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`)
m := r.FindStringSubmatch(line)
i := &InotifyInfo{
WD: m[1],
Ino: m[2],
Sdev: m[3],
Mask: m[4],
m := rInotifyParts.FindStringSubmatch(line)
if len(m) >= 4 {
var mask string
if len(m) == 5 {
mask = m[4]
}
i := &InotifyInfo{
WD: m[1],
Ino: m[2],
Sdev: m[3],
Mask: mask,
}
return i, nil
}
return i, nil
return nil, fmt.Errorf("invalid inode entry: %q", line)
}
// ProcFDInfos represents a list of ProcFDInfo structs.

View File

@ -26,55 +26,55 @@ import (
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
// CPU time limit in seconds.
CPUTime int64
CPUTime uint64
// Maximum size of files that the process may create.
FileSize int64
FileSize uint64
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
DataSize int64
DataSize uint64
// Maximum size of the process stack in bytes.
StackSize int64
StackSize uint64
// Maximum size of a core file.
CoreFileSize int64
CoreFileSize uint64
// Limit of the process's resident set in pages.
ResidentSet int64
ResidentSet uint64
// Maximum number of processes that can be created for the real user ID of
// the calling process.
Processes int64
Processes uint64
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
OpenFiles int64
OpenFiles uint64
// Maximum number of bytes of memory that may be locked into RAM.
LockedMemory int64
LockedMemory uint64
// Maximum size of the process's virtual memory address space in bytes.
AddressSpace int64
AddressSpace uint64
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
FileLocks int64
FileLocks uint64
// Limit of signals that may be queued for the real user ID of the calling
// process.
PendingSignals int64
PendingSignals uint64
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
MsqqueueSize int64
MsqqueueSize uint64
// Limit of the nice priority set using setpriority(2) or nice(2).
NicePriority int64
NicePriority uint64
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
RealtimePriority int64
RealtimePriority uint64
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
RealtimeTimeout int64
RealtimeTimeout uint64
}
const (
limitsFields = 3
limitsFields = 4
limitsUnlimited = "unlimited"
)
var (
limitsDelimiter = regexp.MustCompile(" +")
limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
)
// NewLimits returns the current soft limits of the process.
@ -96,46 +96,49 @@ func (p Proc) Limits() (ProcLimits, error) {
l = ProcLimits{}
s = bufio.NewScanner(f)
)
s.Scan() // Skip limits header
for s.Scan() {
fields := limitsDelimiter.Split(s.Text(), limitsFields)
//fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields {
return ProcLimits{}, fmt.Errorf(
"couldn't parse %s line %s", f.Name(), s.Text())
return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
}
switch fields[0] {
switch fields[1] {
case "Max cpu time":
l.CPUTime, err = parseInt(fields[1])
l.CPUTime, err = parseUint(fields[2])
case "Max file size":
l.FileSize, err = parseInt(fields[1])
l.FileSize, err = parseUint(fields[2])
case "Max data size":
l.DataSize, err = parseInt(fields[1])
l.DataSize, err = parseUint(fields[2])
case "Max stack size":
l.StackSize, err = parseInt(fields[1])
l.StackSize, err = parseUint(fields[2])
case "Max core file size":
l.CoreFileSize, err = parseInt(fields[1])
l.CoreFileSize, err = parseUint(fields[2])
case "Max resident set":
l.ResidentSet, err = parseInt(fields[1])
l.ResidentSet, err = parseUint(fields[2])
case "Max processes":
l.Processes, err = parseInt(fields[1])
l.Processes, err = parseUint(fields[2])
case "Max open files":
l.OpenFiles, err = parseInt(fields[1])
l.OpenFiles, err = parseUint(fields[2])
case "Max locked memory":
l.LockedMemory, err = parseInt(fields[1])
l.LockedMemory, err = parseUint(fields[2])
case "Max address space":
l.AddressSpace, err = parseInt(fields[1])
l.AddressSpace, err = parseUint(fields[2])
case "Max file locks":
l.FileLocks, err = parseInt(fields[1])
l.FileLocks, err = parseUint(fields[2])
case "Max pending signals":
l.PendingSignals, err = parseInt(fields[1])
l.PendingSignals, err = parseUint(fields[2])
case "Max msgqueue size":
l.MsqqueueSize, err = parseInt(fields[1])
l.MsqqueueSize, err = parseUint(fields[2])
case "Max nice priority":
l.NicePriority, err = parseInt(fields[1])
l.NicePriority, err = parseUint(fields[2])
case "Max realtime priority":
l.RealtimePriority, err = parseInt(fields[1])
l.RealtimePriority, err = parseUint(fields[2])
case "Max realtime timeout":
l.RealtimeTimeout, err = parseInt(fields[1])
l.RealtimeTimeout, err = parseUint(fields[2])
}
if err != nil {
return ProcLimits{}, err
@ -145,13 +148,13 @@ func (p Proc) Limits() (ProcLimits, error) {
return l, s.Err()
}
func parseInt(s string) (int64, error) {
func parseUint(s string) (uint64, error) {
if s == limitsUnlimited {
return -1, nil
return 18446744073709551615, nil
}
i, err := strconv.ParseInt(s, 10, 64)
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
}
return i, nil
}

209
vendor/github.com/prometheus/procfs/proc_maps.go generated vendored Normal file
View File

@ -0,0 +1,209 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package procfs
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"golang.org/x/sys/unix"
)
// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
type ProcMapPermissions struct {
// mapping has the [R]ead flag set
Read bool
// mapping has the [W]rite flag set
Write bool
// mapping has the [X]ecutable flag set
Execute bool
// mapping has the [S]hared flag set
Shared bool
// mapping is marked as [P]rivate (copy on write)
Private bool
}
// ProcMap contains the process memory-mappings of the process,
// read from /proc/[pid]/maps
type ProcMap struct {
// The start address of current mapping.
StartAddr uintptr
// The end address of the current mapping
EndAddr uintptr
// The permissions for this mapping
Perms *ProcMapPermissions
// The current offset into the file/fd (e.g., shared libs)
Offset int64
// Device owner of this mapping (major:minor) in Mkdev format.
Dev uint64
// The inode of the device above
Inode uint64
// The file or psuedofile (or empty==anonymous)
Pathname string
}
// parseDevice parses the device token of a line and converts it to a dev_t
// (mkdev) like structure.
func parseDevice(s string) (uint64, error) {
toks := strings.Split(s, ":")
if len(toks) < 2 {
return 0, fmt.Errorf("unexpected number of fields")
}
major, err := strconv.ParseUint(toks[0], 16, 0)
if err != nil {
return 0, err
}
minor, err := strconv.ParseUint(toks[1], 16, 0)
if err != nil {
return 0, err
}
return unix.Mkdev(uint32(major), uint32(minor)), nil
}
// parseAddress just converts a hex-string to a uintptr
func parseAddress(s string) (uintptr, error) {
a, err := strconv.ParseUint(s, 16, 0)
if err != nil {
return 0, err
}
return uintptr(a), nil
}
// parseAddresses parses the start-end address
func parseAddresses(s string) (uintptr, uintptr, error) {
toks := strings.Split(s, "-")
if len(toks) < 2 {
return 0, 0, fmt.Errorf("invalid address")
}
saddr, err := parseAddress(toks[0])
if err != nil {
return 0, 0, err
}
eaddr, err := parseAddress(toks[1])
if err != nil {
return 0, 0, err
}
return saddr, eaddr, nil
}
// parsePermissions parses a token and returns any that are set.
func parsePermissions(s string) (*ProcMapPermissions, error) {
if len(s) < 4 {
return nil, fmt.Errorf("invalid permissions token")
}
perms := ProcMapPermissions{}
for _, ch := range s {
switch ch {
case 'r':
perms.Read = true
case 'w':
perms.Write = true
case 'x':
perms.Execute = true
case 'p':
perms.Private = true
case 's':
perms.Shared = true
}
}
return &perms, nil
}
// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
// buffer.
func parseProcMap(text string) (*ProcMap, error) {
fields := strings.Fields(text)
if len(fields) < 5 {
return nil, fmt.Errorf("truncated procmap entry")
}
saddr, eaddr, err := parseAddresses(fields[0])
if err != nil {
return nil, err
}
perms, err := parsePermissions(fields[1])
if err != nil {
return nil, err
}
offset, err := strconv.ParseInt(fields[2], 16, 0)
if err != nil {
return nil, err
}
device, err := parseDevice(fields[3])
if err != nil {
return nil, err
}
inode, err := strconv.ParseUint(fields[4], 10, 0)
if err != nil {
return nil, err
}
pathname := ""
if len(fields) >= 5 {
pathname = strings.Join(fields[5:], " ")
}
return &ProcMap{
StartAddr: saddr,
EndAddr: eaddr,
Perms: perms,
Offset: offset,
Dev: device,
Inode: inode,
Pathname: pathname,
}, nil
}
// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
// process.
func (p Proc) ProcMaps() ([]*ProcMap, error) {
file, err := os.Open(p.path("maps"))
if err != nil {
return nil, err
}
defer file.Close()
maps := []*ProcMap{}
scan := bufio.NewScanner(file)
for scan.Scan() {
m, err := parseProcMap(scan.Text())
if err != nil {
return nil, err
}
maps = append(maps, m)
}
return maps, nil
}

View File

@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
}
ns := make(Namespaces, len(names))
@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
}
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}

View File

@ -59,7 +59,7 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource)
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
}
return parsePSIStats(resource, bytes.NewReader(data))

165
vendor/github.com/prometheus/procfs/proc_smaps.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package procfs
import (
"bufio"
"errors"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
var (
// match the header line before each mapped zone in /proc/pid/smaps
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
)
type ProcSMapsRollup struct {
// Amount of the mapping that is currently resident in RAM
Rss uint64
// Process's proportional share of this mapping
Pss uint64
// Size in bytes of clean shared pages
SharedClean uint64
// Size in bytes of dirty shared pages
SharedDirty uint64
// Size in bytes of clean private pages
PrivateClean uint64
// Size in bytes of dirty private pages
PrivateDirty uint64
// Amount of memory currently marked as referenced or accessed
Referenced uint64
// Amount of memory that does not belong to any file
Anonymous uint64
// Amount would-be-anonymous memory currently on swap
Swap uint64
// Process's proportional memory on swap
SwapPss uint64
}
// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
// process.
//
// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
// we read and summed.
func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
if err != nil && os.IsNotExist(err) {
return p.procSMapsRollupManual()
}
if err != nil {
return ProcSMapsRollup{}, err
}
lines := strings.Split(string(data), "\n")
smaps := ProcSMapsRollup{}
// skip first line which don't contains information we need
lines = lines[1:]
for _, line := range lines {
if line == "" {
continue
}
if err := smaps.parseLine(line); err != nil {
return ProcSMapsRollup{}, err
}
}
return smaps, nil
}
// Read /proc/pid/smaps and do the roll-up in Go code.
func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
file, err := os.Open(p.path("smaps"))
if err != nil {
return ProcSMapsRollup{}, err
}
defer file.Close()
smaps := ProcSMapsRollup{}
scan := bufio.NewScanner(file)
for scan.Scan() {
line := scan.Text()
if procSMapsHeaderLine.MatchString(line) {
continue
}
if err := smaps.parseLine(line); err != nil {
return ProcSMapsRollup{}, err
}
}
return smaps, nil
}
func (s *ProcSMapsRollup) parseLine(line string) error {
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
fmt.Println(line)
return errors.New("invalid net/dev line, missing colon")
}
k := kv[0]
if k == "VmFlags" {
return nil
}
v := strings.TrimSpace(kv[1])
v = strings.TrimRight(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return err
}
vBytes := vKBytes * 1024
s.addValue(k, v, vKBytes, vBytes)
return nil
}
func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
switch k {
case "Rss":
s.Rss += vUintBytes
case "Pss":
s.Pss += vUintBytes
case "Shared_Clean":
s.SharedClean += vUintBytes
case "Shared_Dirty":
s.SharedDirty += vUintBytes
case "Private_Clean":
s.PrivateClean += vUintBytes
case "Private_Dirty":
s.PrivateDirty += vUintBytes
case "Referenced":
s.Referenced += vUintBytes
case "Anonymous":
s.Anonymous += vUintBytes
case "Swap":
s.Swap += vUintBytes
case "SwapPss":
s.SwapPss += vUintBytes
}
}

View File

@ -127,10 +127,7 @@ func (p Proc) Stat() (ProcStat, error) {
)
if l < 0 || r < 0 {
return ProcStat{}, fmt.Errorf(
"unexpected format, couldn't extract comm: %s",
data,
)
return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
}
s.Comm = string(data[l+1 : r])

View File

@ -33,37 +33,37 @@ type ProcStatus struct {
TGID int
// Peak virtual memory size.
VmPeak uint64
VmPeak uint64 // nolint:golint
// Virtual memory size.
VmSize uint64
VmSize uint64 // nolint:golint
// Locked memory size.
VmLck uint64
VmLck uint64 // nolint:golint
// Pinned memory size.
VmPin uint64
VmPin uint64 // nolint:golint
// Peak resident set size.
VmHWM uint64
VmHWM uint64 // nolint:golint
// Resident set size (sum of RssAnnon RssFile and RssShmem).
VmRSS uint64
VmRSS uint64 // nolint:golint
// Size of resident anonymous memory.
RssAnon uint64
RssAnon uint64 // nolint:golint
// Size of resident file mappings.
RssFile uint64
RssFile uint64 // nolint:golint
// Size of resident shared memory.
RssShmem uint64
RssShmem uint64 // nolint:golint
// Size of data segments.
VmData uint64
VmData uint64 // nolint:golint
// Size of stack segments.
VmStk uint64
VmStk uint64 // nolint:golint
// Size of text segments.
VmExe uint64
VmExe uint64 // nolint:golint
// Shared library code size.
VmLib uint64
VmLib uint64 // nolint:golint
// Page table entries size.
VmPTE uint64
VmPTE uint64 // nolint:golint
// Size of second-level page tables.
VmPMD uint64
VmPMD uint64 // nolint:golint
// Swapped-out virtual memory size by anonymous private.
VmSwap uint64
VmSwap uint64 // nolint:golint
// Size of hugetlb memory portions
HugetlbPages uint64
@ -71,6 +71,11 @@ type ProcStatus struct {
VoluntaryCtxtSwitches uint64
// Number of involuntary context switches.
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
UIDs [4]string
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string
}
// NewStatus returns the current status information of the process.
@ -114,6 +119,10 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
copy(s.UIDs[:], strings.Split(vString, "\t"))
case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t"))
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":

View File

@ -95,24 +95,27 @@ func (fs FS) Schedstat() (*Schedstat, error) {
return stats, nil
}
func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) {
func parseProcSchedstat(contents string) (ProcSchedstat, error) {
var (
stats ProcSchedstat
err error
)
match := procLineRE.FindStringSubmatch(contents)
if match != nil {
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
if err != nil {
return
return stats, err
}
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
if err != nil {
return
return stats, err
}
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
return
return stats, err
}
err = errors.New("could not parse schedstat")
return
return stats, errors.New("could not parse schedstat")
}

151
vendor/github.com/prometheus/procfs/slab.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
var (
slabSpace = regexp.MustCompile(`\s+`)
slabVer = regexp.MustCompile(`slabinfo -`)
slabHeader = regexp.MustCompile(`# name`)
)
// Slab represents a slab pool in the kernel.
type Slab struct {
Name string
ObjActive int64
ObjNum int64
ObjSize int64
ObjPerSlab int64
PagesPerSlab int64
// tunables
Limit int64
Batch int64
SharedFactor int64
SlabActive int64
SlabNum int64
SharedAvail int64
}
// SlabInfo represents info for all slabs.
type SlabInfo struct {
Slabs []*Slab
}
func shouldParseSlab(line string) bool {
if slabVer.MatchString(line) {
return false
}
if slabHeader.MatchString(line) {
return false
}
return true
}
// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
func parseV21SlabEntry(line string) (*Slab, error) {
// First cleanup whitespace.
l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ")
if len(s) != 16 {
return nil, fmt.Errorf("unable to parse: %q", line)
}
var err error
i := &Slab{Name: s[0]}
i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
if err != nil {
return nil, err
}
i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
if err != nil {
return nil, err
}
i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
if err != nil {
return nil, err
}
i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
if err != nil {
return nil, err
}
i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
if err != nil {
return nil, err
}
i.Limit, err = strconv.ParseInt(s[8], 10, 64)
if err != nil {
return nil, err
}
i.Batch, err = strconv.ParseInt(s[9], 10, 64)
if err != nil {
return nil, err
}
i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
if err != nil {
return nil, err
}
i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
if err != nil {
return nil, err
}
i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
if err != nil {
return nil, err
}
i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
if err != nil {
return nil, err
}
return i, nil
}
// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
scanner := bufio.NewScanner(r)
s := SlabInfo{Slabs: []*Slab{}}
for scanner.Scan() {
line := scanner.Text()
if !shouldParseSlab(line) {
continue
}
slab, err := parseV21SlabEntry(line)
if err != nil {
return s, err
}
s.Slabs = append(s.Slabs, slab)
}
return s, nil
}
// SlabInfo reads data from /proc/slabinfo
func (fs FS) SlabInfo() (SlabInfo, error) {
// TODO: Consider passing options to allow for parsing different
// slabinfo versions. However, slabinfo 2.1 has been stable since
// kernel 2.6.10 and later.
data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
if err != nil {
return SlabInfo{}, err
}
return parseSlabInfo21(bytes.NewReader(data))
}

View File

@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
}
if count == 0 {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
}
cpuStat.User /= userHZ
@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
}
return cpuStat, cpuID, nil
@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
}
return softIRQStat, total, nil
@ -184,34 +184,34 @@ func (fs FS) Stat() (Stat, error) {
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
@ -237,7 +237,7 @@ func (fs FS) Stat() (Stat, error) {
}
if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err)
return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
}
return stat, nil

89
vendor/github.com/prometheus/procfs/swaps.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Swap represents an entry in /proc/swaps.
type Swap struct {
Filename string
Type string
Size int
Used int
Priority int
}
// Swaps returns a slice of all configured swap devices on the system.
func (fs FS) Swaps() ([]*Swap, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
if err != nil {
return nil, err
}
return parseSwaps(data)
}
func parseSwaps(info []byte) ([]*Swap, error) {
swaps := []*Swap{}
scanner := bufio.NewScanner(bytes.NewReader(info))
scanner.Scan() // ignore header line
for scanner.Scan() {
swapString := scanner.Text()
parsedSwap, err := parseSwapString(swapString)
if err != nil {
return nil, err
}
swaps = append(swaps, parsedSwap)
}
err := scanner.Err()
return swaps, err
}
func parseSwapString(swapString string) (*Swap, error) {
var err error
swapFields := strings.Fields(swapString)
swapLength := len(swapFields)
if swapLength < 5 {
return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
}
swap := &Swap{
Filename: swapFields[0],
Type: swapFields[1],
}
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
}
return swap, nil
}

View File

@ -112,8 +112,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
fields := strings.Fields(s.Text())
if len(fields) != 2 {
return XfrmStat{}, fmt.Errorf(
"couldn't parse %s line %s", file.Name(), s.Text())
return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
}
name := fields[0]

View File

@ -74,11 +74,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
}
zoneinfo, err := parseZoneinfo(data)
if err != nil {
return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err)
return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
}
return zoneinfo, nil
}

16
vendor/modules.txt vendored
View File

@ -1,10 +1,10 @@
# github.com/PuerkitoBio/goquery v1.6.0
# github.com/PuerkitoBio/goquery v1.6.1
github.com/PuerkitoBio/goquery
# github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46
github.com/StackExchange/wmi
# github.com/VictoriaMetrics/fastcache v1.6.0
github.com/VictoriaMetrics/fastcache
# github.com/andybalholm/cascadia v1.1.0
# github.com/andybalholm/cascadia v1.2.0
github.com/andybalholm/cascadia
# github.com/beevik/ntp v0.2.0
github.com/beevik/ntp
@ -153,7 +153,7 @@ github.com/golang-migrate/migrate/v4
github.com/golang-migrate/migrate/v4/database
github.com/golang-migrate/migrate/v4/internal/url
github.com/golang-migrate/migrate/v4/source
# github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7
# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/golang/groupcache
github.com/golang/groupcache/consistenthash
github.com/golang/groupcache/groupcachepb
@ -340,7 +340,7 @@ github.com/libp2p/go-ws-transport
github.com/libp2p/go-yamux/v2
# github.com/lucasb-eyer/go-colorful v1.0.3
github.com/lucasb-eyer/go-colorful
# github.com/mat/besticon v3.12.0+incompatible
# github.com/mat/besticon v0.0.0-20210314201728-1579f269edb7
github.com/mat/besticon/besticon
github.com/mat/besticon/colorfinder
github.com/mat/besticon/ico
@ -406,17 +406,17 @@ github.com/peterh/liner
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib/difflib
# github.com/prometheus/client_golang v1.5.0
# github.com/prometheus/client_golang v1.9.0
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.2.0
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.9.1
# github.com/prometheus/common v0.18.0
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
# github.com/prometheus/procfs v0.0.8
# github.com/prometheus/procfs v0.6.0
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
@ -570,7 +570,7 @@ golang.org/x/crypto/salsa20/salsa
golang.org/x/crypto/scrypt
golang.org/x/crypto/sha3
golang.org/x/crypto/ssh/terminal
# golang.org/x/image v0.0.0-20200927104501-e162460cd6b5
# golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb
golang.org/x/image/bmp
golang.org/x/image/riff
golang.org/x/image/vp8