Update geth to v1.8.15 (#1213)
* Update geth to v1.8.15 * Apply patches
This commit is contained in:
parent
ff7c0e0a17
commit
529e18af33
70 changed files with 1245 additions and 575 deletions
7
Gopkg.lock
generated
7
Gopkg.lock
generated
|
@ -88,7 +88,7 @@
|
|||
revision = "935e0e8a636ca4ba70b713f3e38a19e1b77739e8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d670c508dc01984c721d0d968936412e3edcd8ca58caf82fcfd0df9044013a0f"
|
||||
digest = "1:ba11b65320bfa1a5e8e43b050833bca23490e508a67c45d7e430156cefc2ab7f"
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
packages = [
|
||||
".",
|
||||
|
@ -101,6 +101,7 @@
|
|||
"accounts/usbwallet/internal/trezor",
|
||||
"common",
|
||||
"common/bitutil",
|
||||
"common/fdlimit",
|
||||
"common/hexutil",
|
||||
"common/math",
|
||||
"common/mclock",
|
||||
|
@ -156,8 +157,8 @@
|
|||
"whisper/whisperv6",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "316fc7ecfc10d06603f1358c1f4c1020ec36dd2a"
|
||||
version = "v1.8.14"
|
||||
revision = "89451f7c382ad2185987ee369f16416f89c28a7d"
|
||||
version = "v1.8.15"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5ac7ecd476a2355a5201229081df2e5f57333ecf703e1f69dde699ae34169c1b"
|
||||
|
|
|
@ -27,7 +27,7 @@ ignored = [ "github.com/ethereum/go-ethereum/ethapi" ]
|
|||
|
||||
[[constraint]]
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
version = "=v1.8.14"
|
||||
version = "=v1.8.15"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/golang/protobuf"
|
||||
|
|
|
@ -104,7 +104,7 @@ index 880cced09..702556079 100644
|
|||
+ return nil
|
||||
+}
|
||||
+
|
||||
// HasKeyPair checks if the the whisper node is configured with the private key
|
||||
// HasKeyPair checks if the whisper node is configured with the private key
|
||||
// of the specified public pair.
|
||||
func (whisper *Whisper) HasKeyPair(id string) bool {
|
||||
+ deterministicID, err := toDeterministicID(id, keyIDSize)
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
diff --git a/whisper/whisperv6/whisper.go b/whisper/whisperv6/whisper.go
|
||||
index 48bbca3c..482457cb 100644
|
||||
--- a/whisper/whisperv6/whisper.go
|
||||
+++ b/whisper/whisperv6/whisper.go
|
||||
@@ -1060,7 +1060,7 @@ func (whisper *Whisper) expire() {
|
||||
whisper.stats.messagesCleared++
|
||||
whisper.stats.memoryCleared += sz
|
||||
whisper.stats.memoryUsed -= sz
|
||||
- return true
|
||||
+ return false
|
||||
})
|
||||
whisper.expirations[expiry].Clear()
|
||||
delete(whisper.expirations, expiry)
|
4
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/accounts/abi/bind/bind.go
generated
vendored
|
@ -207,7 +207,7 @@ func bindTypeGo(kind abi.Type) string {
|
|||
|
||||
// The inner function of bindTypeGo, this finds the inner type of stringKind.
|
||||
// (Or just the type itself if it is not an array or slice)
|
||||
// The length of the matched part is returned, with the the translated type.
|
||||
// The length of the matched part is returned, with the translated type.
|
||||
func bindUnnestedTypeGo(stringKind string) (int, string) {
|
||||
|
||||
switch {
|
||||
|
@ -255,7 +255,7 @@ func bindTypeJava(kind abi.Type) string {
|
|||
|
||||
// The inner function of bindTypeJava, this finds the inner type of stringKind.
|
||||
// (Or just the type itself if it is not an array or slice)
|
||||
// The length of the matched part is returned, with the the translated type.
|
||||
// The length of the matched part is returned, with the translated type.
|
||||
func bindUnnestedTypeJava(stringKind string) (int, string) {
|
||||
|
||||
switch {
|
||||
|
|
23
vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go
generated
vendored
23
vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go
generated
vendored
|
@ -101,6 +101,7 @@ var (
|
|||
utils.MinerNotifyFlag,
|
||||
utils.MinerGasTargetFlag,
|
||||
utils.MinerLegacyGasTargetFlag,
|
||||
utils.MinerGasLimitFlag,
|
||||
utils.MinerGasPriceFlag,
|
||||
utils.MinerLegacyGasPriceFlag,
|
||||
utils.MinerEtherbaseFlag,
|
||||
|
@ -108,6 +109,7 @@ var (
|
|||
utils.MinerExtraDataFlag,
|
||||
utils.MinerLegacyExtraDataFlag,
|
||||
utils.MinerRecommitIntervalFlag,
|
||||
utils.MinerNoVerfiyFlag,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV5Flag,
|
||||
|
@ -235,7 +237,6 @@ func init() {
|
|||
// Start system runtime metrics collection
|
||||
go metrics.CollectProcessMetrics(3 * time.Second)
|
||||
|
||||
utils.SetupNetwork(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -336,26 +337,18 @@ func startNode(ctx *cli.Context, stack *node.Node) {
|
|||
if err := stack.Service(ðereum); err != nil {
|
||||
utils.Fatalf("Ethereum service not running: %v", err)
|
||||
}
|
||||
// Use a reduced number of threads if requested
|
||||
threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
|
||||
threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name)
|
||||
}
|
||||
if threads > 0 {
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
}
|
||||
if th, ok := ethereum.Engine().(threaded); ok {
|
||||
th.SetThreads(threads)
|
||||
}
|
||||
}
|
||||
// Set the gas price to the limits from the CLI and start mining
|
||||
gasprice := utils.GlobalBig(ctx, utils.MinerLegacyGasPriceFlag.Name)
|
||||
if ctx.IsSet(utils.MinerGasPriceFlag.Name) {
|
||||
gasprice = utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
|
||||
}
|
||||
ethereum.TxPool().SetGasPrice(gasprice)
|
||||
if err := ethereum.StartMining(true); err != nil {
|
||||
|
||||
threads := ctx.GlobalInt(utils.MinerLegacyThreadsFlag.Name)
|
||||
if ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) {
|
||||
threads = ctx.GlobalInt(utils.MinerThreadsFlag.Name)
|
||||
}
|
||||
if err := ethereum.StartMining(threads); err != nil {
|
||||
utils.Fatalf("Failed to start mining: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go
generated
vendored
|
@ -189,9 +189,11 @@ var AppHelpFlagGroups = []flagGroup{
|
|||
utils.MinerNotifyFlag,
|
||||
utils.MinerGasPriceFlag,
|
||||
utils.MinerGasTargetFlag,
|
||||
utils.MinerGasLimitFlag,
|
||||
utils.MinerEtherbaseFlag,
|
||||
utils.MinerExtraDataFlag,
|
||||
utils.MinerRecommitIntervalFlag,
|
||||
utils.MinerNoVerfiyFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
11
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go
generated
vendored
11
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go
generated
vendored
|
@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
|
|||
RUN \
|
||||
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gasprice {{.GasPrice}}' >> geth.sh
|
||||
echo $'exec geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--miner.etherbase {{.Etherbase}} --mine --miner.threads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --miner.gastarget {{.GasTarget}} --miner.gaslimit {{.GasLimit}} --miner.gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
|
@ -68,6 +68,7 @@ services:
|
|||
- STATS_NAME={{.Ethstats}}
|
||||
- MINER_NAME={{.Etherbase}}
|
||||
- GAS_TARGET={{.GasTarget}}
|
||||
- GAS_LIMIT={{.GasLimit}}
|
||||
- GAS_PRICE={{.GasPrice}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
|
@ -104,6 +105,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
|||
"Ethstats": config.ethstats,
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": uint64(1000000 * config.gasTarget),
|
||||
"GasLimit": uint64(1000000 * config.gasLimit),
|
||||
"GasPrice": uint64(1000000000 * config.gasPrice),
|
||||
"Unlock": config.keyJSON != "",
|
||||
})
|
||||
|
@ -122,6 +124,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
|||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": config.gasTarget,
|
||||
"GasLimit": config.gasLimit,
|
||||
"GasPrice": config.gasPrice,
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
@ -160,6 +163,7 @@ type nodeInfos struct {
|
|||
keyJSON string
|
||||
keyPass string
|
||||
gasTarget float64
|
||||
gasLimit float64
|
||||
gasPrice float64
|
||||
}
|
||||
|
||||
|
@ -175,8 +179,9 @@ func (info *nodeInfos) Report() map[string]string {
|
|||
}
|
||||
if info.gasTarget > 0 {
|
||||
// Miner or signer node
|
||||
report["Gas limit (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
|
||||
report["Gas price (minimum accepted)"] = fmt.Sprintf("%0.3f GWei", info.gasPrice)
|
||||
report["Gas floor (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
|
||||
report["Gas ceil (target maximum)"] = fmt.Sprintf("%0.3f MGas", info.gasLimit)
|
||||
|
||||
if info.etherbase != "" {
|
||||
// Ethash proof-of-work miner
|
||||
|
@ -217,6 +222,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
|||
totalPeers, _ := strconv.Atoi(infos.envvars["TOTAL_PEERS"])
|
||||
lightPeers, _ := strconv.Atoi(infos.envvars["LIGHT_PEERS"])
|
||||
gasTarget, _ := strconv.ParseFloat(infos.envvars["GAS_TARGET"], 64)
|
||||
gasLimit, _ := strconv.ParseFloat(infos.envvars["GAS_LIMIT"], 64)
|
||||
gasPrice, _ := strconv.ParseFloat(infos.envvars["GAS_PRICE"], 64)
|
||||
|
||||
// Container available, retrieve its node ID and its genesis json
|
||||
|
@ -256,6 +262,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
|||
keyJSON: keyJSON,
|
||||
keyPass: keyPass,
|
||||
gasTarget: gasTarget,
|
||||
gasLimit: gasLimit,
|
||||
gasPrice: gasPrice,
|
||||
}
|
||||
stats.enode = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.port)
|
||||
|
|
6
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go
generated
vendored
6
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go
generated
vendored
|
@ -50,7 +50,7 @@ func (w *wizard) deployNode(boot bool) {
|
|||
if boot {
|
||||
infos = &nodeInfos{port: 30303, peersTotal: 512, peersLight: 256}
|
||||
} else {
|
||||
infos = &nodeInfos{port: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
||||
infos = &nodeInfos{port: 30303, peersTotal: 50, peersLight: 0, gasTarget: 7.5, gasLimit: 10, gasPrice: 1}
|
||||
}
|
||||
}
|
||||
existed := err == nil
|
||||
|
@ -152,6 +152,10 @@ func (w *wizard) deployNode(boot bool) {
|
|||
fmt.Printf("What gas limit should empty blocks target (MGas)? (default = %0.3f)\n", infos.gasTarget)
|
||||
infos.gasTarget = w.readDefaultFloat(infos.gasTarget)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas limit should full blocks target (MGas)? (default = %0.3f)\n", infos.gasLimit)
|
||||
infos.gasLimit = w.readDefaultFloat(infos.gasLimit)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas price should the signer require (GWei)? (default = %0.3f)\n", infos.gasPrice)
|
||||
infos.gasPrice = w.readDefaultFloat(infos.gasPrice)
|
||||
|
|
44
vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go
generated
vendored
44
vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go
generated
vendored
|
@ -329,12 +329,17 @@ var (
|
|||
MinerGasTargetFlag = cli.Uint64Flag{
|
||||
Name: "miner.gastarget",
|
||||
Usage: "Target gas floor for mined blocks",
|
||||
Value: params.GenesisGasLimit,
|
||||
Value: eth.DefaultConfig.MinerGasFloor,
|
||||
}
|
||||
MinerLegacyGasTargetFlag = cli.Uint64Flag{
|
||||
Name: "targetgaslimit",
|
||||
Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)",
|
||||
Value: params.GenesisGasLimit,
|
||||
Value: eth.DefaultConfig.MinerGasFloor,
|
||||
}
|
||||
MinerGasLimitFlag = cli.Uint64Flag{
|
||||
Name: "miner.gaslimit",
|
||||
Usage: "Target gas ceiling for mined blocks",
|
||||
Value: eth.DefaultConfig.MinerGasCeil,
|
||||
}
|
||||
MinerGasPriceFlag = BigFlag{
|
||||
Name: "miner.gasprice",
|
||||
|
@ -366,9 +371,13 @@ var (
|
|||
}
|
||||
MinerRecommitIntervalFlag = cli.DurationFlag{
|
||||
Name: "miner.recommit",
|
||||
Usage: "Time interval to recreate the block being mined.",
|
||||
Usage: "Time interval to recreate the block being mined",
|
||||
Value: eth.DefaultConfig.MinerRecommit,
|
||||
}
|
||||
MinerNoVerfiyFlag = cli.BoolFlag{
|
||||
Name: "miner.noverify",
|
||||
Usage: "Disable remote sealing verification",
|
||||
}
|
||||
// Account settings
|
||||
UnlockedAccountFlag = cli.StringFlag{
|
||||
Name: "unlock",
|
||||
|
@ -1130,12 +1139,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
|||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
|
||||
cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerLegacyThreadsFlag.Name) {
|
||||
cfg.MinerThreads = ctx.GlobalInt(MinerLegacyThreadsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerThreadsFlag.Name) {
|
||||
cfg.MinerThreads = ctx.GlobalInt(MinerThreadsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
|
||||
cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
|
||||
}
|
||||
|
@ -1148,6 +1151,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
|||
if ctx.GlobalIsSet(MinerExtraDataFlag.Name) {
|
||||
cfg.MinerExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name))
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerLegacyGasTargetFlag.Name) {
|
||||
cfg.MinerGasFloor = ctx.GlobalUint64(MinerLegacyGasTargetFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerGasTargetFlag.Name) {
|
||||
cfg.MinerGasFloor = ctx.GlobalUint64(MinerGasTargetFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerGasLimitFlag.Name) {
|
||||
cfg.MinerGasCeil = ctx.GlobalUint64(MinerGasLimitFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
|
||||
cfg.MinerGasPrice = GlobalBig(ctx, MinerLegacyGasPriceFlag.Name)
|
||||
}
|
||||
|
@ -1157,6 +1169,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
|||
if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) {
|
||||
cfg.MinerRecommit = ctx.Duration(MinerRecommitIntervalFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(MinerNoVerfiyFlag.Name) {
|
||||
cfg.MinerNoverify = ctx.Bool(MinerNoVerfiyFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(VMEnableDebugFlag.Name) {
|
||||
// TODO(fjl): force-enable this in --dev mode
|
||||
cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name)
|
||||
|
@ -1269,15 +1284,6 @@ func RegisterEthStatsService(stack *node.Node, url string) {
|
|||
}
|
||||
}
|
||||
|
||||
// SetupNetwork configures the system for either the main net or some test network.
|
||||
func SetupNetwork(ctx *cli.Context) {
|
||||
// TODO(fjl): move target gas limit into config
|
||||
params.TargetGasLimit = ctx.GlobalUint64(MinerLegacyGasTargetFlag.Name)
|
||||
if ctx.GlobalIsSet(MinerGasTargetFlag.Name) {
|
||||
params.TargetGasLimit = ctx.GlobalUint64(MinerGasTargetFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func SetupMetrics(ctx *cli.Context) {
|
||||
if metrics.Enabled {
|
||||
log.Info("Enabling metrics collection")
|
||||
|
@ -1351,7 +1357,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
|||
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
|
||||
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
|
||||
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
|
||||
}, nil)
|
||||
}, nil, false)
|
||||
}
|
||||
}
|
||||
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/cmd/wnode/main.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/cmd/wnode/main.go
generated
vendored
|
@ -754,7 +754,7 @@ func extractIDFromEnode(s string) []byte {
|
|||
return n.ID[:]
|
||||
}
|
||||
|
||||
// obfuscateBloom adds 16 random bits to the the bloom
|
||||
// obfuscateBloom adds 16 random bits to the bloom
|
||||
// filter, in order to obfuscate the containing topics.
|
||||
// it does so deterministically within every session.
|
||||
// despite additional bits, it will match on average
|
||||
|
|
44
vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go
generated
vendored
44
vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go
generated
vendored
|
@ -590,17 +590,17 @@ func (c *Clique) Authorize(signer common.Address, signFn SignerFn) {
|
|||
|
||||
// Seal implements consensus.Engine, attempting to create a sealed block using
|
||||
// the local signing credentials.
|
||||
func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
|
||||
func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
header := block.Header()
|
||||
|
||||
// Sealing the genesis block is not supported
|
||||
number := header.Number.Uint64()
|
||||
if number == 0 {
|
||||
return nil, errUnknownBlock
|
||||
return errUnknownBlock
|
||||
}
|
||||
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
|
||||
if c.config.Period == 0 && len(block.Transactions()) == 0 {
|
||||
return nil, errWaitTransactions
|
||||
return errWaitTransactions
|
||||
}
|
||||
// Don't hold the signer fields for the entire sealing procedure
|
||||
c.lock.RLock()
|
||||
|
@ -610,10 +610,10 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-ch
|
|||
// Bail out if we're unauthorized to sign a block
|
||||
snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if _, authorized := snap.Signers[signer]; !authorized {
|
||||
return nil, errUnauthorized
|
||||
return errUnauthorized
|
||||
}
|
||||
// If we're amongst the recent signers, wait for the next block
|
||||
for seen, recent := range snap.Recents {
|
||||
|
@ -621,8 +621,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-ch
|
|||
// Signer is among recents, only wait if the current block doesn't shift it out
|
||||
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
|
||||
log.Info("Signed recently, must wait for others")
|
||||
<-stop
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -635,21 +634,29 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-ch
|
|||
|
||||
log.Trace("Out-of-turn signing requested", "wiggle", common.PrettyDuration(wiggle))
|
||||
}
|
||||
log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay))
|
||||
|
||||
select {
|
||||
case <-stop:
|
||||
return nil, nil
|
||||
case <-time.After(delay):
|
||||
}
|
||||
// Sign all the things!
|
||||
sighash, err := signFn(accounts.Account{Address: signer}, sigHash(header).Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
copy(header.Extra[len(header.Extra)-extraSeal:], sighash)
|
||||
// Wait until sealing is terminated or delay timeout.
|
||||
log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay))
|
||||
go func() {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case <-time.After(delay):
|
||||
}
|
||||
|
||||
return block.WithSeal(header), nil
|
||||
select {
|
||||
case results <- block.WithSeal(header):
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "sealhash", c.SealHash(header))
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
|
@ -673,6 +680,11 @@ func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
|
|||
return new(big.Int).Set(diffNoTurn)
|
||||
}
|
||||
|
||||
// SealHash returns the hash of a block prior to it being sealed.
|
||||
func (c *Clique) SealHash(header *types.Header) common.Hash {
|
||||
return sigHash(header)
|
||||
}
|
||||
|
||||
// Close implements consensus.Engine. It's a noop for clique as there is are no background threads.
|
||||
func (c *Clique) Close() error {
|
||||
return nil
|
||||
|
|
12
vendor/github.com/ethereum/go-ethereum/consensus/consensus.go
generated
vendored
12
vendor/github.com/ethereum/go-ethereum/consensus/consensus.go
generated
vendored
|
@ -86,9 +86,15 @@ type Engine interface {
|
|||
Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
|
||||
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
|
||||
|
||||
// Seal generates a new block for the given input block with the local miner's
|
||||
// seal place on top.
|
||||
Seal(chain ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error)
|
||||
// Seal generates a new sealing request for the given input block and pushes
|
||||
// the result into the given channel.
|
||||
//
|
||||
// Note, the method returns immediately and will send the result async. More
|
||||
// than one result may also be returned depending on the consensus algorothm.
|
||||
Seal(chain ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
|
||||
|
||||
// SealHash returns the hash of a block prior to it being sealed.
|
||||
SealHash(header *types.Header) common.Hash
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have.
|
||||
|
|
37
vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go
generated
vendored
37
vendor/github.com/ethereum/go-ethereum/consensus/ethash/consensus.go
generated
vendored
|
@ -31,15 +31,17 @@ import (
|
|||
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// Ethash proof-of-work protocol constants.
|
||||
var (
|
||||
FrontierBlockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||
ByzantiumBlockReward *big.Int = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
|
||||
FrontierBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||
ByzantiumBlockReward = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
|
||||
)
|
||||
|
||||
// Various error messages to mark blocks invalid. These should be private to
|
||||
|
@ -495,7 +497,7 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Head
|
|||
if fulldag {
|
||||
dataset := ethash.dataset(number, true)
|
||||
if dataset.generated() {
|
||||
digest, result = hashimotoFull(dataset.dataset, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
digest, result = hashimotoFull(dataset.dataset, ethash.SealHash(header).Bytes(), header.Nonce.Uint64())
|
||||
|
||||
// Datasets are unmapped in a finalizer. Ensure that the dataset stays alive
|
||||
// until after the call to hashimotoFull so it's not unmapped while being used.
|
||||
|
@ -513,7 +515,7 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Head
|
|||
if ethash.config.PowMode == ModeTest {
|
||||
size = 32 * 1024
|
||||
}
|
||||
digest, result = hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
digest, result = hashimotoLight(size, cache.cache, ethash.SealHash(header).Bytes(), header.Nonce.Uint64())
|
||||
|
||||
// Caches are unmapped in a finalizer. Ensure that the cache stays alive
|
||||
// until after the call to hashimotoLight so it's not unmapped while being used.
|
||||
|
@ -552,6 +554,29 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header
|
|||
return types.NewBlock(header, txs, uncles, receipts), nil
|
||||
}
|
||||
|
||||
// SealHash returns the hash of a block prior to it being sealed.
|
||||
func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
|
||||
hasher := sha3.NewKeccak256()
|
||||
|
||||
rlp.Encode(hasher, []interface{}{
|
||||
header.ParentHash,
|
||||
header.UncleHash,
|
||||
header.Coinbase,
|
||||
header.Root,
|
||||
header.TxHash,
|
||||
header.ReceiptHash,
|
||||
header.Bloom,
|
||||
header.Difficulty,
|
||||
header.Number,
|
||||
header.GasLimit,
|
||||
header.GasUsed,
|
||||
header.Time,
|
||||
header.Extra,
|
||||
})
|
||||
hasher.Sum(hash[:0])
|
||||
return hash
|
||||
}
|
||||
|
||||
// Some weird constants to avoid constant memory allocs for them.
|
||||
var (
|
||||
big8 = big.NewInt(8)
|
||||
|
|
33
vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go
generated
vendored
33
vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go
generated
vendored
|
@ -50,7 +50,7 @@ var (
|
|||
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
|
||||
// sharedEthash is a full instance that can be shared between multiple users.
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil)
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil, false)
|
||||
|
||||
// algorithmRevision is the data structure version used for file naming.
|
||||
algorithmRevision = 23
|
||||
|
@ -405,6 +405,12 @@ type Config struct {
|
|||
PowMode Mode
|
||||
}
|
||||
|
||||
// sealTask wraps a seal block with relative result channel for remote sealer thread.
|
||||
type sealTask struct {
|
||||
block *types.Block
|
||||
results chan<- *types.Block
|
||||
}
|
||||
|
||||
// mineResult wraps the pow solution parameters for the specified block.
|
||||
type mineResult struct {
|
||||
nonce types.BlockNonce
|
||||
|
@ -444,12 +450,11 @@ type Ethash struct {
|
|||
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||
|
||||
// Remote sealer related fields
|
||||
workCh chan *types.Block // Notification channel to push new work to remote sealer
|
||||
resultCh chan *types.Block // Channel used by mining threads to return result
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
|
||||
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
|
||||
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
|
||||
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
|
||||
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
|
||||
|
||||
// The fields below are hooks for testing
|
||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||
|
@ -464,7 +469,7 @@ type Ethash struct {
|
|||
// New creates a full sized ethash PoW scheme and starts a background thread for
|
||||
// remote mining, also optionally notifying a batch of remote services of new work
|
||||
// packages.
|
||||
func New(config Config, notify []string) *Ethash {
|
||||
func New(config Config, notify []string, noverify bool) *Ethash {
|
||||
if config.CachesInMem <= 0 {
|
||||
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.CachesInMem = 1
|
||||
|
@ -481,36 +486,34 @@ func New(config Config, notify []string) *Ethash {
|
|||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
workCh: make(chan *types.Block),
|
||||
resultCh: make(chan *types.Block),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
}
|
||||
go ethash.remote(notify)
|
||||
go ethash.remote(notify, noverify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
||||
// purposes.
|
||||
func NewTester(notify []string) *Ethash {
|
||||
func NewTester(notify []string, noverify bool) *Ethash {
|
||||
ethash := &Ethash{
|
||||
config: Config{PowMode: ModeTest},
|
||||
caches: newlru("cache", 1, newCache),
|
||||
datasets: newlru("dataset", 1, newDataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
workCh: make(chan *types.Block),
|
||||
resultCh: make(chan *types.Block),
|
||||
workCh: make(chan *sealTask),
|
||||
fetchWorkCh: make(chan *sealWork),
|
||||
submitWorkCh: make(chan *mineResult),
|
||||
fetchRateCh: make(chan chan uint64),
|
||||
submitRateCh: make(chan *hashrate),
|
||||
exitCh: make(chan chan error),
|
||||
}
|
||||
go ethash.remote(notify)
|
||||
go ethash.remote(notify, noverify)
|
||||
return ethash
|
||||
}
|
||||
|
||||
|
|
138
vendor/github.com/ethereum/go-ethereum/consensus/ethash/sealer.go
generated
vendored
138
vendor/github.com/ethereum/go-ethereum/consensus/ethash/sealer.go
generated
vendored
|
@ -35,6 +35,11 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
const (
|
||||
// staleThreshold is the maximum depth of the acceptable stale but valid ethash solution.
|
||||
staleThreshold = 7
|
||||
)
|
||||
|
||||
var (
|
||||
errNoMiningWork = errors.New("no mining work available yet")
|
||||
errInvalidSealResult = errors.New("invalid or stale proof-of-work solution")
|
||||
|
@ -42,16 +47,21 @@ var (
|
|||
|
||||
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
|
||||
// the block's difficulty requirements.
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
|
||||
// If we're running a fake PoW, simply return a 0 nonce immediately
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
header := block.Header()
|
||||
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
|
||||
return block.WithSeal(header), nil
|
||||
select {
|
||||
case results <- block.WithSeal(header):
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// If we're running a shared PoW, delegate sealing to it
|
||||
if ethash.shared != nil {
|
||||
return ethash.shared.Seal(chain, block, stop)
|
||||
return ethash.shared.Seal(chain, block, results, stop)
|
||||
}
|
||||
// Create a runner and the multiple search threads it directs
|
||||
abort := make(chan struct{})
|
||||
|
@ -62,7 +72,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
|||
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
ethash.lock.Unlock()
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
ethash.rand = rand.New(rand.NewSource(seed.Int64()))
|
||||
}
|
||||
|
@ -75,34 +85,45 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
|||
}
|
||||
// Push new work to remote sealer
|
||||
if ethash.workCh != nil {
|
||||
ethash.workCh <- block
|
||||
ethash.workCh <- &sealTask{block: block, results: results}
|
||||
}
|
||||
var pend sync.WaitGroup
|
||||
var (
|
||||
pend sync.WaitGroup
|
||||
locals = make(chan *types.Block)
|
||||
)
|
||||
for i := 0; i < threads; i++ {
|
||||
pend.Add(1)
|
||||
go func(id int, nonce uint64) {
|
||||
defer pend.Done()
|
||||
ethash.mine(block, id, nonce, abort, ethash.resultCh)
|
||||
ethash.mine(block, id, nonce, abort, locals)
|
||||
}(i, uint64(ethash.rand.Int63()))
|
||||
}
|
||||
// Wait until sealing is terminated or a nonce is found
|
||||
var result *types.Block
|
||||
select {
|
||||
case <-stop:
|
||||
// Outside abort, stop all miner threads
|
||||
close(abort)
|
||||
case result = <-ethash.resultCh:
|
||||
// One of the threads found a block, abort all others
|
||||
close(abort)
|
||||
case <-ethash.update:
|
||||
// Thread count was changed on user request, restart
|
||||
close(abort)
|
||||
go func() {
|
||||
var result *types.Block
|
||||
select {
|
||||
case <-stop:
|
||||
// Outside abort, stop all miner threads
|
||||
close(abort)
|
||||
case result = <-locals:
|
||||
// One of the threads found a block, abort all others
|
||||
select {
|
||||
case results <- result:
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
|
||||
}
|
||||
close(abort)
|
||||
case <-ethash.update:
|
||||
// Thread count was changed on user request, restart
|
||||
close(abort)
|
||||
if err := ethash.Seal(chain, block, results, stop); err != nil {
|
||||
log.Error("Failed to restart sealing after update", "err", err)
|
||||
}
|
||||
}
|
||||
// Wait for all miners to terminate and return the block
|
||||
pend.Wait()
|
||||
return ethash.Seal(chain, block, stop)
|
||||
}
|
||||
// Wait for all miners to terminate and return the block
|
||||
pend.Wait()
|
||||
return result, nil
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// mine is the actual proof-of-work miner that searches for a nonce starting from
|
||||
|
@ -111,7 +132,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
|||
// Extract some data from the header
|
||||
var (
|
||||
header = block.Header()
|
||||
hash = header.HashNoNonce().Bytes()
|
||||
hash = ethash.SealHash(header).Bytes()
|
||||
target = new(big.Int).Div(two256, header.Difficulty)
|
||||
number = header.Number.Uint64()
|
||||
dataset = ethash.dataset(number, false)
|
||||
|
@ -165,11 +186,12 @@ search:
|
|||
}
|
||||
|
||||
// remote is a standalone goroutine to handle remote mining related stuff.
|
||||
func (ethash *Ethash) remote(notify []string) {
|
||||
func (ethash *Ethash) remote(notify []string, noverify bool) {
|
||||
var (
|
||||
works = make(map[common.Hash]*types.Block)
|
||||
rates = make(map[common.Hash]hashrate)
|
||||
|
||||
results chan<- *types.Block
|
||||
currentBlock *types.Block
|
||||
currentWork [3]string
|
||||
|
||||
|
@ -213,7 +235,7 @@ func (ethash *Ethash) remote(notify []string) {
|
|||
// result[1], 32 bytes hex encoded seed hash used for DAG
|
||||
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
|
||||
makeWork := func(block *types.Block) {
|
||||
hash := block.HashNoNonce()
|
||||
hash := ethash.SealHash(block.Header())
|
||||
|
||||
currentWork[0] = hash.Hex()
|
||||
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
|
||||
|
@ -226,11 +248,15 @@ func (ethash *Ethash) remote(notify []string) {
|
|||
// submitWork verifies the submitted pow solution, returning
|
||||
// whether the solution was accepted or not (not can be both a bad pow as well as
|
||||
// any other error, like no pending work or stale mining result).
|
||||
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, hash common.Hash) bool {
|
||||
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
|
||||
if currentBlock == nil {
|
||||
log.Error("Pending work without block", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
// Make sure the work submitted is present
|
||||
block := works[hash]
|
||||
block := works[sealhash]
|
||||
if block == nil {
|
||||
log.Info("Work submitted but none pending", "hash", hash)
|
||||
log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64())
|
||||
return false
|
||||
}
|
||||
// Verify the correctness of submitted result.
|
||||
|
@ -239,26 +265,36 @@ func (ethash *Ethash) remote(notify []string) {
|
|||
header.MixDigest = mixDigest
|
||||
|
||||
start := time.Now()
|
||||
if err := ethash.verifySeal(nil, header, true); err != nil {
|
||||
log.Warn("Invalid proof-of-work submitted", "hash", hash, "elapsed", time.Since(start), "err", err)
|
||||
return false
|
||||
if !noverify {
|
||||
if err := ethash.verifySeal(nil, header, true); err != nil {
|
||||
log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", time.Since(start), "err", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Make sure the result channel is created.
|
||||
if ethash.resultCh == nil {
|
||||
// Make sure the result channel is assigned.
|
||||
if results == nil {
|
||||
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
|
||||
return false
|
||||
}
|
||||
log.Trace("Verified correct proof-of-work", "hash", hash, "elapsed", time.Since(start))
|
||||
log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", time.Since(start))
|
||||
|
||||
// Solutions seems to be valid, return to the miner and notify acceptance.
|
||||
select {
|
||||
case ethash.resultCh <- block.WithSeal(header):
|
||||
delete(works, hash)
|
||||
return true
|
||||
default:
|
||||
log.Info("Work submitted is stale", "hash", hash)
|
||||
return false
|
||||
solution := block.WithSeal(header)
|
||||
|
||||
// The submitted solution is within the scope of acceptance.
|
||||
if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() {
|
||||
select {
|
||||
case results <- solution:
|
||||
log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return true
|
||||
default:
|
||||
log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
|
||||
return false
|
||||
}
|
||||
}
|
||||
// The submitted block is too old to accept, drop it.
|
||||
log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
|
||||
return false
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
|
@ -266,14 +302,12 @@ func (ethash *Ethash) remote(notify []string) {
|
|||
|
||||
for {
|
||||
select {
|
||||
case block := <-ethash.workCh:
|
||||
if currentBlock != nil && block.ParentHash() != currentBlock.ParentHash() {
|
||||
// Start new round mining, throw out all previous work.
|
||||
works = make(map[common.Hash]*types.Block)
|
||||
}
|
||||
case work := <-ethash.workCh:
|
||||
// Update current work with new received block.
|
||||
// Note same work can be past twice, happens when changing CPU threads.
|
||||
makeWork(block)
|
||||
results = work.results
|
||||
|
||||
makeWork(work.block)
|
||||
|
||||
// Notify and requested URLs of the new work availability
|
||||
notifyWork()
|
||||
|
@ -315,6 +349,14 @@ func (ethash *Ethash) remote(notify []string) {
|
|||
delete(rates, id)
|
||||
}
|
||||
}
|
||||
// Clear stale pending blocks
|
||||
if currentBlock != nil {
|
||||
for hash, block := range works {
|
||||
if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() {
|
||||
delete(works, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case errc := <-ethash.exitCh:
|
||||
// Exit remote loop if ethash is closed and return relevant error.
|
||||
|
|
4
vendor/github.com/ethereum/go-ethereum/console/prompter.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/console/prompter.go
generated
vendored
|
@ -43,7 +43,7 @@ type UserPrompter interface {
|
|||
// choice to be made, returning that choice.
|
||||
PromptConfirm(prompt string) (bool, error)
|
||||
|
||||
// SetHistory sets the the input scrollback history that the prompter will allow
|
||||
// SetHistory sets the input scrollback history that the prompter will allow
|
||||
// the user to scroll back to.
|
||||
SetHistory(history []string)
|
||||
|
||||
|
@ -149,7 +149,7 @@ func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
|
||||
// SetHistory sets the the input scrollback history that the prompter will allow
|
||||
// SetHistory sets the input scrollback history that the prompter will allow
|
||||
// the user to scroll back to.
|
||||
func (p *terminalPrompter) SetHistory(history []string) {
|
||||
p.State.ReadHistory(strings.NewReader(strings.Join(history, "\n")))
|
||||
|
|
24
vendor/github.com/ethereum/go-ethereum/core/block_validator.go
generated
vendored
24
vendor/github.com/ethereum/go-ethereum/core/block_validator.go
generated
vendored
|
@ -45,7 +45,7 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin
|
|||
return validator
|
||||
}
|
||||
|
||||
// ValidateBody validates the given block's uncles and verifies the the block
|
||||
// ValidateBody validates the given block's uncles and verifies the block
|
||||
// header's transaction and uncle roots. The headers are assumed to be already
|
||||
// validated at this point.
|
||||
func (v *BlockValidator) ValidateBody(block *types.Block) error {
|
||||
|
@ -101,9 +101,11 @@ func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *stat
|
|||
return nil
|
||||
}
|
||||
|
||||
// CalcGasLimit computes the gas limit of the next block after parent.
|
||||
// This is miner strategy, not consensus protocol.
|
||||
func CalcGasLimit(parent *types.Block) uint64 {
|
||||
// CalcGasLimit computes the gas limit of the next block after parent. It aims
|
||||
// to keep the baseline gas above the provided floor, and increase it towards the
|
||||
// ceil if the blocks are full. If the ceil is exceeded, it will always decrease
|
||||
// the gas allowance.
|
||||
func CalcGasLimit(parent *types.Block, gasFloor, gasCeil uint64) uint64 {
|
||||
// contrib = (parentGasUsed * 3 / 2) / 1024
|
||||
contrib := (parent.GasUsed() + parent.GasUsed()/2) / params.GasLimitBoundDivisor
|
||||
|
||||
|
@ -121,12 +123,16 @@ func CalcGasLimit(parent *types.Block) uint64 {
|
|||
if limit < params.MinGasLimit {
|
||||
limit = params.MinGasLimit
|
||||
}
|
||||
// however, if we're now below the target (TargetGasLimit) we increase the
|
||||
// limit as much as we can (parentGasLimit / 1024 -1)
|
||||
if limit < params.TargetGasLimit {
|
||||
// If we're outside our allowed gas range, we try to hone towards them
|
||||
if limit < gasFloor {
|
||||
limit = parent.GasLimit() + decay
|
||||
if limit > params.TargetGasLimit {
|
||||
limit = params.TargetGasLimit
|
||||
if limit > gasFloor {
|
||||
limit = gasFloor
|
||||
}
|
||||
} else if limit > gasCeil {
|
||||
limit = parent.GasLimit() - decay
|
||||
if limit < gasCeil {
|
||||
limit = gasCeil
|
||||
}
|
||||
}
|
||||
return limit
|
||||
|
|
57
vendor/github.com/ethereum/go-ethereum/core/chain_indexer.go
generated
vendored
57
vendor/github.com/ethereum/go-ethereum/core/chain_indexer.go
generated
vendored
|
@ -85,6 +85,9 @@ type ChainIndexer struct {
|
|||
knownSections uint64 // Number of sections known to be complete (block wise)
|
||||
cascadedHead uint64 // Block number of the last completed section cascaded to subindexers
|
||||
|
||||
checkpointSections uint64 // Number of sections covered by the checkpoint
|
||||
checkpointHead common.Hash // Section head belonging to the checkpoint
|
||||
|
||||
throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources
|
||||
|
||||
log log.Logger
|
||||
|
@ -115,12 +118,19 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
|
|||
return c
|
||||
}
|
||||
|
||||
// AddKnownSectionHead marks a new section head as known/processed if it is newer
|
||||
// than the already known best section head
|
||||
func (c *ChainIndexer) AddKnownSectionHead(section uint64, shead common.Hash) {
|
||||
// AddCheckpoint adds a checkpoint. Sections are never processed and the chain
|
||||
// is not expected to be available before this point. The indexer assumes that
|
||||
// the backend has sufficient information available to process subsequent sections.
|
||||
//
|
||||
// Note: knownSections == 0 and storedSections == checkpointSections until
|
||||
// syncing reaches the checkpoint
|
||||
func (c *ChainIndexer) AddCheckpoint(section uint64, shead common.Hash) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.checkpointSections = section + 1
|
||||
c.checkpointHead = shead
|
||||
|
||||
if section < c.storedSections {
|
||||
return
|
||||
}
|
||||
|
@ -233,16 +243,23 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
|
|||
// If a reorg happened, invalidate all sections until that point
|
||||
if reorg {
|
||||
// Revert the known section number to the reorg point
|
||||
changed := head / c.sectionSize
|
||||
if changed < c.knownSections {
|
||||
c.knownSections = changed
|
||||
known := head / c.sectionSize
|
||||
stored := known
|
||||
if known < c.checkpointSections {
|
||||
known = 0
|
||||
}
|
||||
if stored < c.checkpointSections {
|
||||
stored = c.checkpointSections
|
||||
}
|
||||
if known < c.knownSections {
|
||||
c.knownSections = known
|
||||
}
|
||||
// Revert the stored sections from the database to the reorg point
|
||||
if changed < c.storedSections {
|
||||
c.setValidSections(changed)
|
||||
if stored < c.storedSections {
|
||||
c.setValidSections(stored)
|
||||
}
|
||||
// Update the new head number to the finalized section end and notify children
|
||||
head = changed * c.sectionSize
|
||||
head = known * c.sectionSize
|
||||
|
||||
if head < c.cascadedHead {
|
||||
c.cascadedHead = head
|
||||
|
@ -256,7 +273,18 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
|
|||
var sections uint64
|
||||
if head >= c.confirmsReq {
|
||||
sections = (head + 1 - c.confirmsReq) / c.sectionSize
|
||||
if sections < c.checkpointSections {
|
||||
sections = 0
|
||||
}
|
||||
if sections > c.knownSections {
|
||||
if c.knownSections < c.checkpointSections {
|
||||
// syncing reached the checkpoint, verify section head
|
||||
syncedHead := rawdb.ReadCanonicalHash(c.chainDb, c.checkpointSections*c.sectionSize-1)
|
||||
if syncedHead != c.checkpointHead {
|
||||
c.log.Error("Synced chain does not match checkpoint", "number", c.checkpointSections*c.sectionSize-1, "expected", c.checkpointHead, "synced", syncedHead)
|
||||
return
|
||||
}
|
||||
}
|
||||
c.knownSections = sections
|
||||
|
||||
select {
|
||||
|
@ -322,7 +350,6 @@ func (c *ChainIndexer) updateLoop() {
|
|||
updating = false
|
||||
c.log.Info("Finished upgrading chain index")
|
||||
}
|
||||
|
||||
c.cascadedHead = c.storedSections*c.sectionSize - 1
|
||||
for _, child := range c.children {
|
||||
c.log.Trace("Cascading chain index update", "head", c.cascadedHead)
|
||||
|
@ -402,8 +429,14 @@ func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
|
|||
c.children = append(c.children, indexer)
|
||||
|
||||
// Cascade any pending updates to new children too
|
||||
if c.storedSections > 0 {
|
||||
indexer.newHead(c.storedSections*c.sectionSize-1, false)
|
||||
sections := c.storedSections
|
||||
if c.knownSections < sections {
|
||||
// if a section is "stored" but not "known" then it is a checkpoint without
|
||||
// available chain data so we should not cascade it yet
|
||||
sections = c.knownSections
|
||||
}
|
||||
if sections > 0 {
|
||||
indexer.newHead(sections*c.sectionSize-1, false)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/core/chain_makers.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/core/chain_makers.go
generated
vendored
|
@ -240,7 +240,7 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S
|
|||
Difficulty: parent.Difficulty(),
|
||||
UncleHash: parent.UncleHash(),
|
||||
}),
|
||||
GasLimit: CalcGasLimit(parent),
|
||||
GasLimit: CalcGasLimit(parent, parent.GasLimit(), parent.GasLimit()),
|
||||
Number: new(big.Int).Add(parent.Number(), common.Big1),
|
||||
Time: time,
|
||||
}
|
||||
|
|
9
vendor/github.com/ethereum/go-ethereum/core/state/statedb.go
generated
vendored
9
vendor/github.com/ethereum/go-ethereum/core/state/statedb.go
generated
vendored
|
@ -489,10 +489,13 @@ func (self *StateDB) Copy() *StateDB {
|
|||
state.stateObjectsDirty[addr] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for hash, logs := range self.logs {
|
||||
state.logs[hash] = make([]*types.Log, len(logs))
|
||||
copy(state.logs[hash], logs)
|
||||
cpy := make([]*types.Log, len(logs))
|
||||
for i, l := range logs {
|
||||
cpy[i] = new(types.Log)
|
||||
*cpy[i] = *l
|
||||
}
|
||||
state.logs[hash] = cpy
|
||||
}
|
||||
for hash, preimage := range self.preimages {
|
||||
state.preimages[hash] = preimage
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/core/state_processor.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/core/state_processor.go
generated
vendored
|
@ -61,7 +61,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||
allLogs []*types.Log
|
||||
gp = new(GasPool).AddGas(block.GasLimit())
|
||||
)
|
||||
// Mutate the the block and state according to any hard-fork specs
|
||||
// Mutate the block and state according to any hard-fork specs
|
||||
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
|
||||
misc.ApplyDAOHardFork(statedb)
|
||||
}
|
||||
|
|
8
vendor/github.com/ethereum/go-ethereum/core/tx_journal.go
generated
vendored
8
vendor/github.com/ethereum/go-ethereum/core/tx_journal.go
generated
vendored
|
@ -34,7 +34,7 @@ var errNoActiveJournal = errors.New("no active journal")
|
|||
// devNull is a WriteCloser that just discards anything written into it. Its
|
||||
// goal is to allow the transaction journal to write into a fake journal when
|
||||
// loading transactions on startup without printing warnings due to no file
|
||||
// being readt for write.
|
||||
// being read for write.
|
||||
type devNull struct{}
|
||||
|
||||
func (*devNull) Write(p []byte) (n int, err error) { return len(p), nil }
|
||||
|
@ -57,7 +57,7 @@ func newTxJournal(path string) *txJournal {
|
|||
// load parses a transaction journal dump from disk, loading its contents into
|
||||
// the specified pool.
|
||||
func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
|
||||
// Skip the parsing if the journal file doens't exist at all
|
||||
// Skip the parsing if the journal file doesn't exist at all
|
||||
if _, err := os.Stat(journal.path); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
|
|||
|
||||
// Create a method to load a limited batch of transactions and bump the
|
||||
// appropriate progress counters. Then use this method to load all the
|
||||
// journalled transactions in small-ish batches.
|
||||
// journaled transactions in small-ish batches.
|
||||
loadBatch := func(txs types.Transactions) {
|
||||
for _, err := range add(txs) {
|
||||
if err != nil {
|
||||
|
@ -103,7 +103,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
|
|||
}
|
||||
break
|
||||
}
|
||||
// New transaction parsed, queue up for later, import if threnshold is reached
|
||||
// New transaction parsed, queue up for later, import if threshold is reached
|
||||
total++
|
||||
|
||||
if batch = append(batch, tx); batch.Len() > 1024 {
|
||||
|
|
23
vendor/github.com/ethereum/go-ethereum/core/types/block.go
generated
vendored
23
vendor/github.com/ethereum/go-ethereum/core/types/block.go
generated
vendored
|
@ -102,25 +102,6 @@ func (h *Header) Hash() common.Hash {
|
|||
return rlpHash(h)
|
||||
}
|
||||
|
||||
// HashNoNonce returns the hash which is used as input for the proof-of-work search.
|
||||
func (h *Header) HashNoNonce() common.Hash {
|
||||
return rlpHash([]interface{}{
|
||||
h.ParentHash,
|
||||
h.UncleHash,
|
||||
h.Coinbase,
|
||||
h.Root,
|
||||
h.TxHash,
|
||||
h.ReceiptHash,
|
||||
h.Bloom,
|
||||
h.Difficulty,
|
||||
h.Number,
|
||||
h.GasLimit,
|
||||
h.GasUsed,
|
||||
h.Time,
|
||||
h.Extra,
|
||||
})
|
||||
}
|
||||
|
||||
// Size returns the approximate memory used by all internal contents. It is used
|
||||
// to approximate and limit the memory consumption of various caches.
|
||||
func (h *Header) Size() common.StorageSize {
|
||||
|
@ -324,10 +305,6 @@ func (b *Block) Header() *Header { return CopyHeader(b.header) }
|
|||
// Body returns the non-header content of the block.
|
||||
func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} }
|
||||
|
||||
func (b *Block) HashNoNonce() common.Hash {
|
||||
return b.header.HashNoNonce()
|
||||
}
|
||||
|
||||
// Size returns the true RLP encoded storage size of the block, either by encoding
|
||||
// and returning it, or returning a previsouly cached value.
|
||||
func (b *Block) Size() common.StorageSize {
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/core/vm/contract.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/core/vm/contract.go
generated
vendored
|
@ -40,7 +40,7 @@ type AccountRef common.Address
|
|||
func (ar AccountRef) Address() common.Address { return (common.Address)(ar) }
|
||||
|
||||
// Contract represents an ethereum contract in the state database. It contains
|
||||
// the the contract code, calling arguments. Contract implements ContractRef
|
||||
// the contract code, calling arguments. Contract implements ContractRef
|
||||
type Contract struct {
|
||||
// CallerAddress is the result of the caller which initialised this
|
||||
// contract. However when the "call method" is delegated this value
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/secp256.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/crypto/secp256k1/secp256.go
generated
vendored
|
@ -86,7 +86,7 @@ func Sign(msg []byte, seckey []byte) ([]byte, error) {
|
|||
return sig, nil
|
||||
}
|
||||
|
||||
// RecoverPubkey returns the the public key of the signer.
|
||||
// RecoverPubkey returns the public key of the signer.
|
||||
// msg must be the 32-byte hash of the message to be signed.
|
||||
// sig must be a 65-byte compact ECDSA signature containing the
|
||||
// recovery id as the last element.
|
||||
|
|
47
vendor/github.com/ethereum/go-ethereum/eth/api.go
generated
vendored
47
vendor/github.com/ethereum/go-ethereum/eth/api.go
generated
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
"io"
|
||||
"math/big"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -34,7 +35,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
@ -94,47 +94,22 @@ func NewPrivateMinerAPI(e *Ethereum) *PrivateMinerAPI {
|
|||
return &PrivateMinerAPI{e: e}
|
||||
}
|
||||
|
||||
// Start the miner with the given number of threads. If threads is nil the number
|
||||
// of workers started is equal to the number of logical CPUs that are usable by
|
||||
// this process. If mining is already running, this method adjust the number of
|
||||
// threads allowed to use and updates the minimum price required by the transaction
|
||||
// pool.
|
||||
// Start starts the miner with the given number of threads. If threads is nil,
|
||||
// the number of workers started is equal to the number of logical CPUs that are
|
||||
// usable by this process. If mining is already running, this method adjust the
|
||||
// number of threads allowed to use and updates the minimum price required by the
|
||||
// transaction pool.
|
||||
func (api *PrivateMinerAPI) Start(threads *int) error {
|
||||
// Set the number of threads if the seal engine supports it
|
||||
if threads == nil {
|
||||
threads = new(int)
|
||||
} else if *threads == 0 {
|
||||
*threads = -1 // Disable the miner from within
|
||||
return api.e.StartMining(runtime.NumCPU())
|
||||
}
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
}
|
||||
if th, ok := api.e.engine.(threaded); ok {
|
||||
log.Info("Updated mining threads", "threads", *threads)
|
||||
th.SetThreads(*threads)
|
||||
}
|
||||
// Start the miner and return
|
||||
if !api.e.IsMining() {
|
||||
// Propagate the initial price point to the transaction pool
|
||||
api.e.lock.RLock()
|
||||
price := api.e.gasPrice
|
||||
api.e.lock.RUnlock()
|
||||
api.e.txPool.SetGasPrice(price)
|
||||
return api.e.StartMining(true)
|
||||
}
|
||||
return nil
|
||||
return api.e.StartMining(*threads)
|
||||
}
|
||||
|
||||
// Stop the miner
|
||||
func (api *PrivateMinerAPI) Stop() bool {
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
}
|
||||
if th, ok := api.e.engine.(threaded); ok {
|
||||
th.SetThreads(-1)
|
||||
}
|
||||
// Stop terminates the miner, both at the consensus engine level as well as at
|
||||
// the block creation level.
|
||||
func (api *PrivateMinerAPI) Stop() {
|
||||
api.e.StopMining()
|
||||
return true
|
||||
}
|
||||
|
||||
// SetExtra sets the extra data string that is included when this miner mines a block.
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go
generated
vendored
|
@ -294,7 +294,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
|||
failed = err
|
||||
break
|
||||
}
|
||||
// Reference the trie twice, once for us, once for the trancer
|
||||
// Reference the trie twice, once for us, once for the tracer
|
||||
database.TrieDB().Reference(root, common.Hash{})
|
||||
if number >= origin {
|
||||
database.TrieDB().Reference(root, common.Hash{})
|
||||
|
|
90
vendor/github.com/ethereum/go-ethereum/eth/backend.go
generated
vendored
90
vendor/github.com/ethereum/go-ethereum/eth/backend.go
generated
vendored
|
@ -102,12 +102,18 @@ func (s *Ethereum) AddLesServer(ls LesServer) {
|
|||
// New creates a new Ethereum object (including the
|
||||
// initialisation of the common Ethereum object)
|
||||
func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
// Ensure configuration values are compatible and sane
|
||||
if config.SyncMode == downloader.LightSync {
|
||||
return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum")
|
||||
}
|
||||
if !config.SyncMode.IsValid() {
|
||||
return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode)
|
||||
}
|
||||
if config.MinerGasPrice == nil || config.MinerGasPrice.Cmp(common.Big0) <= 0 {
|
||||
log.Warn("Sanitizing invalid miner gas price", "provided", config.MinerGasPrice, "updated", DefaultConfig.MinerGasPrice)
|
||||
config.MinerGasPrice = new(big.Int).Set(DefaultConfig.MinerGasPrice)
|
||||
}
|
||||
// Assemble the Ethereum object
|
||||
chainDb, err := CreateDB(ctx, config, "chaindata")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -124,13 +130,13 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
|||
chainConfig: chainConfig,
|
||||
eventMux: ctx.EventMux,
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: CreateConsensusEngine(ctx, chainConfig, &config.Ethash, config.MinerNotify, chainDb),
|
||||
engine: CreateConsensusEngine(ctx, chainConfig, &config.Ethash, config.MinerNotify, config.MinerNoverify, chainDb),
|
||||
shutdownChan: make(chan bool),
|
||||
networkID: config.NetworkId,
|
||||
gasPrice: config.MinerGasPrice,
|
||||
etherbase: config.Etherbase,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks, bloomConfirms),
|
||||
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms),
|
||||
}
|
||||
|
||||
log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId)
|
||||
|
@ -167,7 +173,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit)
|
||||
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine, config.MinerRecommit, config.MinerGasFloor, config.MinerGasCeil)
|
||||
eth.miner.SetExtra(makeExtraData(config.MinerExtraData))
|
||||
|
||||
eth.APIBackend = &EthAPIBackend{eth, nil}
|
||||
|
@ -210,7 +216,7 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
|
|||
}
|
||||
|
||||
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
|
||||
func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, db ethdb.Database) consensus.Engine {
|
||||
func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine {
|
||||
// If proof-of-authority is requested, set it up
|
||||
if chainConfig.Clique != nil {
|
||||
return clique.New(chainConfig.Clique, db)
|
||||
|
@ -222,7 +228,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainCo
|
|||
return ethash.NewFaker()
|
||||
case ethash.ModeTest:
|
||||
log.Warn("Ethash used in test mode")
|
||||
return ethash.NewTester(nil)
|
||||
return ethash.NewTester(nil, noverify)
|
||||
case ethash.ModeShared:
|
||||
log.Warn("Ethash used in shared mode")
|
||||
return ethash.NewShared()
|
||||
|
@ -234,7 +240,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, chainConfig *params.ChainCo
|
|||
DatasetDir: config.DatasetDir,
|
||||
DatasetsInMem: config.DatasetsInMem,
|
||||
DatasetsOnDisk: config.DatasetsOnDisk,
|
||||
}, notify)
|
||||
}, notify, noverify)
|
||||
engine.SetThreads(-1) // Disable CPU mining
|
||||
return engine
|
||||
}
|
||||
|
@ -333,32 +339,66 @@ func (s *Ethereum) SetEtherbase(etherbase common.Address) {
|
|||
s.miner.SetEtherbase(etherbase)
|
||||
}
|
||||
|
||||
func (s *Ethereum) StartMining(local bool) error {
|
||||
eb, err := s.Etherbase()
|
||||
if err != nil {
|
||||
log.Error("Cannot start mining without etherbase", "err", err)
|
||||
return fmt.Errorf("etherbase missing: %v", err)
|
||||
// StartMining starts the miner with the given number of CPU threads. If mining
|
||||
// is already running, this method adjust the number of threads allowed to use
|
||||
// and updates the minimum price required by the transaction pool.
|
||||
func (s *Ethereum) StartMining(threads int) error {
|
||||
// Update the thread count within the consensus engine
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
}
|
||||
if clique, ok := s.engine.(*clique.Clique); ok {
|
||||
wallet, err := s.accountManager.Find(accounts.Account{Address: eb})
|
||||
if wallet == nil || err != nil {
|
||||
log.Error("Etherbase account unavailable locally", "err", err)
|
||||
return fmt.Errorf("signer missing: %v", err)
|
||||
if th, ok := s.engine.(threaded); ok {
|
||||
log.Info("Updated mining threads", "threads", threads)
|
||||
if threads == 0 {
|
||||
threads = -1 // Disable the miner from within
|
||||
}
|
||||
clique.Authorize(eb, wallet.SignHash)
|
||||
th.SetThreads(threads)
|
||||
}
|
||||
if local {
|
||||
// If local (CPU) mining is started, we can disable the transaction rejection
|
||||
// mechanism introduced to speed sync times. CPU mining on mainnet is ludicrous
|
||||
// so none will ever hit this path, whereas marking sync done on CPU mining
|
||||
// will ensure that private networks work in single miner mode too.
|
||||
// If the miner was not running, initialize it
|
||||
if !s.IsMining() {
|
||||
// Propagate the initial price point to the transaction pool
|
||||
s.lock.RLock()
|
||||
price := s.gasPrice
|
||||
s.lock.RUnlock()
|
||||
s.txPool.SetGasPrice(price)
|
||||
|
||||
// Configure the local mining addess
|
||||
eb, err := s.Etherbase()
|
||||
if err != nil {
|
||||
log.Error("Cannot start mining without etherbase", "err", err)
|
||||
return fmt.Errorf("etherbase missing: %v", err)
|
||||
}
|
||||
if clique, ok := s.engine.(*clique.Clique); ok {
|
||||
wallet, err := s.accountManager.Find(accounts.Account{Address: eb})
|
||||
if wallet == nil || err != nil {
|
||||
log.Error("Etherbase account unavailable locally", "err", err)
|
||||
return fmt.Errorf("signer missing: %v", err)
|
||||
}
|
||||
clique.Authorize(eb, wallet.SignHash)
|
||||
}
|
||||
// If mining is started, we can disable the transaction rejection mechanism
|
||||
// introduced to speed sync times.
|
||||
atomic.StoreUint32(&s.protocolManager.acceptTxs, 1)
|
||||
|
||||
go s.miner.Start(eb)
|
||||
}
|
||||
go s.miner.Start(eb)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Ethereum) StopMining() { s.miner.Stop() }
|
||||
// StopMining terminates the miner, both at the consensus engine level as well as
|
||||
// at the block creation level.
|
||||
func (s *Ethereum) StopMining() {
|
||||
// Update the thread count within the consensus engine
|
||||
type threaded interface {
|
||||
SetThreads(threads int)
|
||||
}
|
||||
if th, ok := s.engine.(threaded); ok {
|
||||
th.SetThreads(-1)
|
||||
}
|
||||
// Stop the block creating itself
|
||||
s.miner.Stop()
|
||||
}
|
||||
|
||||
func (s *Ethereum) IsMining() bool { return s.miner.Mining() }
|
||||
func (s *Ethereum) Miner() *miner.Miner { return s.miner }
|
||||
|
||||
|
@ -386,7 +426,7 @@ func (s *Ethereum) Protocols() []p2p.Protocol {
|
|||
// Ethereum protocol implementation.
|
||||
func (s *Ethereum) Start(srvr *p2p.Server) error {
|
||||
// Start the bloom bits servicing goroutines
|
||||
s.startBloomHandlers()
|
||||
s.startBloomHandlers(params.BloomBitsBlocks)
|
||||
|
||||
// Start the RPC service
|
||||
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.NetVersion())
|
||||
|
|
15
vendor/github.com/ethereum/go-ethereum/eth/bloombits.go
generated
vendored
15
vendor/github.com/ethereum/go-ethereum/eth/bloombits.go
generated
vendored
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -50,7 +49,7 @@ const (
|
|||
|
||||
// startBloomHandlers starts a batch of goroutines to accept bloom bit database
|
||||
// retrievals from possibly a range of filters and serving the data to satisfy.
|
||||
func (eth *Ethereum) startBloomHandlers() {
|
||||
func (eth *Ethereum) startBloomHandlers(sectionSize uint64) {
|
||||
for i := 0; i < bloomServiceThreads; i++ {
|
||||
go func() {
|
||||
for {
|
||||
|
@ -62,9 +61,9 @@ func (eth *Ethereum) startBloomHandlers() {
|
|||
task := <-request
|
||||
task.Bitsets = make([][]byte, len(task.Sections))
|
||||
for i, section := range task.Sections {
|
||||
head := rawdb.ReadCanonicalHash(eth.chainDb, (section+1)*params.BloomBitsBlocks-1)
|
||||
head := rawdb.ReadCanonicalHash(eth.chainDb, (section+1)*sectionSize-1)
|
||||
if compVector, err := rawdb.ReadBloomBits(eth.chainDb, task.Bit, section, head); err == nil {
|
||||
if blob, err := bitutil.DecompressBytes(compVector, int(params.BloomBitsBlocks)/8); err == nil {
|
||||
if blob, err := bitutil.DecompressBytes(compVector, int(sectionSize/8)); err == nil {
|
||||
task.Bitsets[i] = blob
|
||||
} else {
|
||||
task.Error = err
|
||||
|
@ -81,10 +80,6 @@ func (eth *Ethereum) startBloomHandlers() {
|
|||
}
|
||||
|
||||
const (
|
||||
// bloomConfirms is the number of confirmation blocks before a bloom section is
|
||||
// considered probably final and its rotated bits are calculated.
|
||||
bloomConfirms = 256
|
||||
|
||||
// bloomThrottling is the time to wait between processing two consecutive index
|
||||
// sections. It's useful during chain upgrades to prevent disk overload.
|
||||
bloomThrottling = 100 * time.Millisecond
|
||||
|
@ -102,14 +97,14 @@ type BloomIndexer struct {
|
|||
|
||||
// NewBloomIndexer returns a chain indexer that generates bloom bits data for the
|
||||
// canonical chain for fast logs filtering.
|
||||
func NewBloomIndexer(db ethdb.Database, size, confReq uint64) *core.ChainIndexer {
|
||||
func NewBloomIndexer(db ethdb.Database, size, confirms uint64) *core.ChainIndexer {
|
||||
backend := &BloomIndexer{
|
||||
db: db,
|
||||
size: size,
|
||||
}
|
||||
table := ethdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix))
|
||||
|
||||
return core.NewChainIndexer(db, table, backend, size, confReq, bloomThrottling, "bloombits")
|
||||
return core.NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits")
|
||||
}
|
||||
|
||||
// Reset implements core.ChainIndexerBackend, starting a new bloombits index
|
||||
|
|
8
vendor/github.com/ethereum/go-ethereum/eth/config.go
generated
vendored
8
vendor/github.com/ethereum/go-ethereum/eth/config.go
generated
vendored
|
@ -48,7 +48,9 @@ var DefaultConfig = Config{
|
|||
DatabaseCache: 768,
|
||||
TrieCache: 256,
|
||||
TrieTimeout: 60 * time.Minute,
|
||||
MinerGasPrice: big.NewInt(18 * params.Shannon),
|
||||
MinerGasFloor: 8000000,
|
||||
MinerGasCeil: 8000000,
|
||||
MinerGasPrice: big.NewInt(params.GWei),
|
||||
MinerRecommit: 3 * time.Second,
|
||||
|
||||
TxPool: core.DefaultTxPoolConfig,
|
||||
|
@ -97,11 +99,13 @@ type Config struct {
|
|||
|
||||
// Mining-related options
|
||||
Etherbase common.Address `toml:",omitempty"`
|
||||
MinerThreads int `toml:",omitempty"`
|
||||
MinerNotify []string `toml:",omitempty"`
|
||||
MinerExtraData []byte `toml:",omitempty"`
|
||||
MinerGasFloor uint64
|
||||
MinerGasCeil uint64
|
||||
MinerGasPrice *big.Int
|
||||
MinerRecommit time.Duration
|
||||
MinerNoverify bool
|
||||
|
||||
// Ethash options
|
||||
Ethash ethash.Config
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go
generated
vendored
|
@ -662,7 +662,7 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest,
|
|||
for _, header := range request.Headers {
|
||||
taskQueue.Push(header, -float32(header.Number.Uint64()))
|
||||
}
|
||||
// Add the peer to the expiry report along the the number of failed requests
|
||||
// Add the peer to the expiry report along the number of failed requests
|
||||
expiries[id] = len(request.Headers)
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/eth/fetcher/fetcher.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/eth/fetcher/fetcher.go
generated
vendored
|
@ -204,7 +204,7 @@ func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time
|
|||
}
|
||||
}
|
||||
|
||||
// Enqueue tries to fill gaps the the fetcher's future import queue.
|
||||
// Enqueue tries to fill gaps the fetcher's future import queue.
|
||||
func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
|
||||
op := &inject{
|
||||
origin: peer,
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/eth/gasprice/gasprice.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/eth/gasprice/gasprice.go
generated
vendored
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
var maxPrice = big.NewInt(500 * params.Shannon)
|
||||
var maxPrice = big.NewInt(500 * params.GWei)
|
||||
|
||||
type Config struct {
|
||||
Blocks int
|
||||
|
|
24
vendor/github.com/ethereum/go-ethereum/eth/gen_config.go
generated
vendored
24
vendor/github.com/ethereum/go-ethereum/eth/gen_config.go
generated
vendored
|
@ -31,11 +31,13 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||
TrieCache int
|
||||
TrieTimeout time.Duration
|
||||
Etherbase common.Address `toml:",omitempty"`
|
||||
MinerThreads int `toml:",omitempty"`
|
||||
MinerNotify []string `toml:",omitempty"`
|
||||
MinerExtraData hexutil.Bytes `toml:",omitempty"`
|
||||
MinerGasFloor uint64
|
||||
MinerGasCeil uint64
|
||||
MinerGasPrice *big.Int
|
||||
MinerRecommit time.Duration
|
||||
MinerNoverify bool
|
||||
Ethash ethash.Config
|
||||
TxPool core.TxPoolConfig
|
||||
GPO gasprice.Config
|
||||
|
@ -55,11 +57,13 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||
enc.TrieCache = c.TrieCache
|
||||
enc.TrieTimeout = c.TrieTimeout
|
||||
enc.Etherbase = c.Etherbase
|
||||
enc.MinerThreads = c.MinerThreads
|
||||
enc.MinerNotify = c.MinerNotify
|
||||
enc.MinerExtraData = c.MinerExtraData
|
||||
enc.MinerGasFloor = c.MinerGasFloor
|
||||
enc.MinerGasCeil = c.MinerGasCeil
|
||||
enc.MinerGasPrice = c.MinerGasPrice
|
||||
enc.MinerRecommit = c.MinerRecommit
|
||||
enc.MinerNoverify = c.MinerNoverify
|
||||
enc.Ethash = c.Ethash
|
||||
enc.TxPool = c.TxPool
|
||||
enc.GPO = c.GPO
|
||||
|
@ -83,11 +87,13 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||
TrieCache *int
|
||||
TrieTimeout *time.Duration
|
||||
Etherbase *common.Address `toml:",omitempty"`
|
||||
MinerThreads *int `toml:",omitempty"`
|
||||
MinerNotify []string `toml:",omitempty"`
|
||||
MinerExtraData *hexutil.Bytes `toml:",omitempty"`
|
||||
MinerGasFloor *uint64
|
||||
MinerGasCeil *uint64
|
||||
MinerGasPrice *big.Int
|
||||
MinerRecommit *time.Duration
|
||||
MinerNoverify *bool
|
||||
Ethash *ethash.Config
|
||||
TxPool *core.TxPoolConfig
|
||||
GPO *gasprice.Config
|
||||
|
@ -134,21 +140,27 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||
if dec.Etherbase != nil {
|
||||
c.Etherbase = *dec.Etherbase
|
||||
}
|
||||
if dec.MinerThreads != nil {
|
||||
c.MinerThreads = *dec.MinerThreads
|
||||
}
|
||||
if dec.MinerNotify != nil {
|
||||
c.MinerNotify = dec.MinerNotify
|
||||
}
|
||||
if dec.MinerExtraData != nil {
|
||||
c.MinerExtraData = *dec.MinerExtraData
|
||||
}
|
||||
if dec.MinerGasFloor != nil {
|
||||
c.MinerGasFloor = *dec.MinerGasFloor
|
||||
}
|
||||
if dec.MinerGasCeil != nil {
|
||||
c.MinerGasCeil = *dec.MinerGasCeil
|
||||
}
|
||||
if dec.MinerGasPrice != nil {
|
||||
c.MinerGasPrice = dec.MinerGasPrice
|
||||
}
|
||||
if dec.MinerRecommit != nil {
|
||||
c.MinerRecommit = *dec.MinerRecommit
|
||||
}
|
||||
if dec.MinerNoverify != nil {
|
||||
c.MinerNoverify = *dec.MinerNoverify
|
||||
}
|
||||
if dec.Ethash != nil {
|
||||
c.Ethash = *dec.Ethash
|
||||
}
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go
generated
vendored
|
@ -47,7 +47,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
defaultGasPrice = 50 * params.Shannon
|
||||
defaultGasPrice = params.GWei
|
||||
)
|
||||
|
||||
// PublicEthereumAPI provides an API to access Ethereum related information.
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/les/api_backend.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/les/api_backend.go
generated
vendored
|
@ -192,7 +192,7 @@ func (b *LesApiBackend) BloomStatus() (uint64, uint64) {
|
|||
return 0, 0
|
||||
}
|
||||
sections, _, _ := b.eth.bloomIndexer.Sections()
|
||||
return light.BloomTrieFrequency, sections
|
||||
return params.BloomBitsBlocksClient, sections
|
||||
}
|
||||
|
||||
func (b *LesApiBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
||||
|
|
15
vendor/github.com/ethereum/go-ethereum/les/backend.go
generated
vendored
15
vendor/github.com/ethereum/go-ethereum/les/backend.go
generated
vendored
|
@ -94,26 +94,27 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
|||
lesCommons: lesCommons{
|
||||
chainDb: chainDb,
|
||||
config: config,
|
||||
iConfig: light.DefaultClientIndexerConfig,
|
||||
},
|
||||
chainConfig: chainConfig,
|
||||
eventMux: ctx.EventMux,
|
||||
peers: peers,
|
||||
reqDist: newRequestDistributor(peers, quitSync),
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: eth.CreateConsensusEngine(ctx, chainConfig, &config.Ethash, nil, chainDb),
|
||||
engine: eth.CreateConsensusEngine(ctx, chainConfig, &config.Ethash, nil, false, chainDb),
|
||||
shutdownChan: make(chan bool),
|
||||
networkId: config.NetworkId,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
bloomIndexer: eth.NewBloomIndexer(chainDb, light.BloomTrieFrequency, light.HelperTrieConfirmations),
|
||||
bloomIndexer: eth.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations),
|
||||
}
|
||||
|
||||
leth.relay = NewLesTxRelay(peers, leth.reqDist)
|
||||
leth.serverPool = newServerPool(chainDb, quitSync, &leth.wg)
|
||||
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool)
|
||||
|
||||
leth.odr = NewLesOdr(chainDb, leth.retriever)
|
||||
leth.chtIndexer = light.NewChtIndexer(chainDb, true, leth.odr)
|
||||
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, true, leth.odr)
|
||||
leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.retriever)
|
||||
leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequencyClient, params.HelperTrieConfirmations)
|
||||
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency)
|
||||
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)
|
||||
|
||||
// Note: NewLightChain adds the trusted checkpoint so it needs an ODR with
|
||||
|
@ -134,7 +135,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
|||
}
|
||||
|
||||
leth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay)
|
||||
if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, leth.serverPool, quitSync, &leth.wg); err != nil {
|
||||
if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, light.DefaultClientIndexerConfig, true, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, leth.serverPool, quitSync, &leth.wg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
leth.ApiBackend = &LesApiBackend{leth, nil}
|
||||
|
@ -229,8 +230,8 @@ func (s *LightEthereum) Protocols() []p2p.Protocol {
|
|||
// Start implements node.Service, starting all internal goroutines needed by the
|
||||
// Ethereum protocol implementation.
|
||||
func (s *LightEthereum) Start(srvr *p2p.Server) error {
|
||||
s.startBloomHandlers()
|
||||
log.Warn("Light client mode is an experimental feature")
|
||||
s.startBloomHandlers(params.BloomBitsBlocksClient)
|
||||
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.networkId)
|
||||
// clients are searching for the first advertised protocol in the list
|
||||
protocolVersion := AdvertiseProtocolVersions[0]
|
||||
|
|
4
vendor/github.com/ethereum/go-ethereum/les/bloombits.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/les/bloombits.go
generated
vendored
|
@ -43,7 +43,7 @@ const (
|
|||
|
||||
// startBloomHandlers starts a batch of goroutines to accept bloom bit database
|
||||
// retrievals from possibly a range of filters and serving the data to satisfy.
|
||||
func (eth *LightEthereum) startBloomHandlers() {
|
||||
func (eth *LightEthereum) startBloomHandlers(sectionSize uint64) {
|
||||
for i := 0; i < bloomServiceThreads; i++ {
|
||||
go func() {
|
||||
for {
|
||||
|
@ -57,7 +57,7 @@ func (eth *LightEthereum) startBloomHandlers() {
|
|||
compVectors, err := light.GetBloomBits(task.Context, eth.odr, task.Bit, task.Sections)
|
||||
if err == nil {
|
||||
for i := range task.Sections {
|
||||
if blob, err := bitutil.DecompressBytes(compVectors[i], int(light.BloomTrieFrequency/8)); err == nil {
|
||||
if blob, err := bitutil.DecompressBytes(compVectors[i], int(sectionSize/8)); err == nil {
|
||||
task.Bitsets[i] = blob
|
||||
} else {
|
||||
task.Error = err
|
||||
|
|
6
vendor/github.com/ethereum/go-ethereum/les/commons.go
generated
vendored
6
vendor/github.com/ethereum/go-ethereum/les/commons.go
generated
vendored
|
@ -33,6 +33,7 @@ import (
|
|||
// lesCommons contains fields needed by both server and client.
|
||||
type lesCommons struct {
|
||||
config *eth.Config
|
||||
iConfig *light.IndexerConfig
|
||||
chainDb ethdb.Database
|
||||
protocolManager *ProtocolManager
|
||||
chtIndexer, bloomTrieIndexer *core.ChainIndexer
|
||||
|
@ -81,7 +82,7 @@ func (c *lesCommons) nodeInfo() interface{} {
|
|||
|
||||
if !c.protocolManager.lightSync {
|
||||
// convert to client section size if running in server mode
|
||||
sections /= light.CHTFrequencyClient / light.CHTFrequencyServer
|
||||
sections /= c.iConfig.PairChtSize / c.iConfig.ChtSize
|
||||
}
|
||||
|
||||
if sections2 < sections {
|
||||
|
@ -94,7 +95,8 @@ func (c *lesCommons) nodeInfo() interface{} {
|
|||
if c.protocolManager.lightSync {
|
||||
chtRoot = light.GetChtRoot(c.chainDb, sectionIndex, sectionHead)
|
||||
} else {
|
||||
chtRoot = light.GetChtV2Root(c.chainDb, sectionIndex, sectionHead)
|
||||
idxV2 := (sectionIndex+1)*c.iConfig.PairChtSize/c.iConfig.ChtSize - 1
|
||||
chtRoot = light.GetChtRoot(c.chainDb, idxV2, sectionHead)
|
||||
}
|
||||
cht = light.TrustedCheckpoint{
|
||||
SectionIdx: sectionIndex,
|
||||
|
|
13
vendor/github.com/ethereum/go-ethereum/les/handler.go
generated
vendored
13
vendor/github.com/ethereum/go-ethereum/les/handler.go
generated
vendored
|
@ -94,6 +94,7 @@ type ProtocolManager struct {
|
|||
txrelay *LesTxRelay
|
||||
networkId uint64
|
||||
chainConfig *params.ChainConfig
|
||||
iConfig *light.IndexerConfig
|
||||
blockchain BlockChain
|
||||
chainDb ethdb.Database
|
||||
odr *LesOdr
|
||||
|
@ -123,13 +124,14 @@ type ProtocolManager struct {
|
|||
|
||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||
// with the ethereum network.
|
||||
func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, serverPool *serverPool, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) {
|
||||
func NewProtocolManager(chainConfig *params.ChainConfig, indexerConfig *light.IndexerConfig, lightSync bool, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, serverPool *serverPool, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) {
|
||||
// Create the protocol manager with the base fields
|
||||
manager := &ProtocolManager{
|
||||
lightSync: lightSync,
|
||||
eventMux: mux,
|
||||
blockchain: blockchain,
|
||||
chainConfig: chainConfig,
|
||||
iConfig: indexerConfig,
|
||||
chainDb: chainDb,
|
||||
odr: odr,
|
||||
networkId: networkId,
|
||||
|
@ -885,7 +887,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||
trieDb := trie.NewDatabase(ethdb.NewTable(pm.chainDb, light.ChtTablePrefix))
|
||||
for _, req := range req.Reqs {
|
||||
if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
|
||||
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, req.ChtNum*light.CHTFrequencyServer-1)
|
||||
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, req.ChtNum*pm.iConfig.ChtSize-1)
|
||||
if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
|
||||
trie, err := trie.New(root, trieDb)
|
||||
if err != nil {
|
||||
|
@ -1140,10 +1142,11 @@ func (pm *ProtocolManager) getAccount(statedb *state.StateDB, root, hash common.
|
|||
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
|
||||
switch id {
|
||||
case htCanonical:
|
||||
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*light.CHTFrequencyClient-1)
|
||||
return light.GetChtV2Root(pm.chainDb, idx, sectionHead), light.ChtTablePrefix
|
||||
idxV1 := (idx+1)*(pm.iConfig.PairChtSize/pm.iConfig.ChtSize) - 1
|
||||
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idxV1+1)*pm.iConfig.ChtSize-1)
|
||||
return light.GetChtRoot(pm.chainDb, idxV1, sectionHead), light.ChtTablePrefix
|
||||
case htBloomBits:
|
||||
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*light.BloomTrieFrequency-1)
|
||||
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.BloomTrieSize-1)
|
||||
return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix
|
||||
}
|
||||
return common.Hash{}, ""
|
||||
|
|
15
vendor/github.com/ethereum/go-ethereum/les/odr.go
generated
vendored
15
vendor/github.com/ethereum/go-ethereum/les/odr.go
generated
vendored
|
@ -28,16 +28,18 @@ import (
|
|||
// LesOdr implements light.OdrBackend
|
||||
type LesOdr struct {
|
||||
db ethdb.Database
|
||||
indexerConfig *light.IndexerConfig
|
||||
chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer
|
||||
retriever *retrieveManager
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func NewLesOdr(db ethdb.Database, retriever *retrieveManager) *LesOdr {
|
||||
func NewLesOdr(db ethdb.Database, config *light.IndexerConfig, retriever *retrieveManager) *LesOdr {
|
||||
return &LesOdr{
|
||||
db: db,
|
||||
retriever: retriever,
|
||||
stop: make(chan struct{}),
|
||||
db: db,
|
||||
indexerConfig: config,
|
||||
retriever: retriever,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,6 +75,11 @@ func (odr *LesOdr) BloomIndexer() *core.ChainIndexer {
|
|||
return odr.bloomIndexer
|
||||
}
|
||||
|
||||
// IndexerConfig returns the indexer config.
|
||||
func (odr *LesOdr) IndexerConfig() *light.IndexerConfig {
|
||||
return odr.indexerConfig
|
||||
}
|
||||
|
||||
const (
|
||||
MsgBlockBodies = iota
|
||||
MsgCode
|
||||
|
|
20
vendor/github.com/ethereum/go-ethereum/les/odr_requests.go
generated
vendored
20
vendor/github.com/ethereum/go-ethereum/les/odr_requests.go
generated
vendored
|
@ -365,7 +365,7 @@ func (r *ChtRequest) CanSend(peer *peer) bool {
|
|||
peer.lock.RLock()
|
||||
defer peer.lock.RUnlock()
|
||||
|
||||
return peer.headInfo.Number >= light.HelperTrieConfirmations && r.ChtNum <= (peer.headInfo.Number-light.HelperTrieConfirmations)/light.CHTFrequencyClient
|
||||
return peer.headInfo.Number >= r.Config.ChtConfirms && r.ChtNum <= (peer.headInfo.Number-r.Config.ChtConfirms)/r.Config.ChtSize
|
||||
}
|
||||
|
||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||
|
@ -379,7 +379,21 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
|
|||
Key: encNum[:],
|
||||
AuxReq: auxHeader,
|
||||
}
|
||||
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req})
|
||||
switch peer.version {
|
||||
case lpv1:
|
||||
var reqsV1 ChtReq
|
||||
if req.Type != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 {
|
||||
return fmt.Errorf("Request invalid in LES/1 mode")
|
||||
}
|
||||
blockNum := binary.BigEndian.Uint64(req.Key)
|
||||
// convert HelperTrie request to old CHT request
|
||||
reqsV1 = ChtReq{ChtNum: (req.TrieIdx + 1) * (r.Config.ChtSize / r.Config.PairChtSize), BlockNum: blockNum, FromLevel: req.FromLevel}
|
||||
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []ChtReq{reqsV1})
|
||||
case lpv2:
|
||||
return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req})
|
||||
default:
|
||||
panic(nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Valid processes an ODR request reply message from the LES network
|
||||
|
@ -484,7 +498,7 @@ func (r *BloomRequest) CanSend(peer *peer) bool {
|
|||
if peer.version < lpv2 {
|
||||
return false
|
||||
}
|
||||
return peer.headInfo.Number >= light.HelperTrieConfirmations && r.BloomTrieNum <= (peer.headInfo.Number-light.HelperTrieConfirmations)/light.BloomTrieFrequency
|
||||
return peer.headInfo.Number >= r.Config.BloomTrieConfirms && r.BloomTrieNum <= (peer.headInfo.Number-r.Config.BloomTrieConfirms)/r.Config.BloomTrieSize
|
||||
}
|
||||
|
||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||
|
|
30
vendor/github.com/ethereum/go-ethereum/les/peer.go
generated
vendored
30
vendor/github.com/ethereum/go-ethereum/les/peer.go
generated
vendored
|
@ -19,7 +19,6 @@ package les
|
|||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
@ -36,9 +35,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
errClosed = errors.New("peer set is closed")
|
||||
errAlreadyRegistered = errors.New("peer is already registered")
|
||||
errNotRegistered = errors.New("peer is not registered")
|
||||
errClosed = errors.New("peer set is closed")
|
||||
errAlreadyRegistered = errors.New("peer is already registered")
|
||||
errNotRegistered = errors.New("peer is not registered")
|
||||
errInvalidHelpTrieReq = errors.New("invalid help trie request")
|
||||
)
|
||||
|
||||
const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
|
||||
|
@ -284,21 +284,21 @@ func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
|
|||
}
|
||||
|
||||
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
|
||||
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq) error {
|
||||
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
|
||||
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, data interface{}) error {
|
||||
switch p.version {
|
||||
case lpv1:
|
||||
reqsV1 := make([]ChtReq, len(reqs))
|
||||
for i, req := range reqs {
|
||||
if req.Type != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 {
|
||||
return fmt.Errorf("Request invalid in LES/1 mode")
|
||||
}
|
||||
blockNum := binary.BigEndian.Uint64(req.Key)
|
||||
// convert HelperTrie request to old CHT request
|
||||
reqsV1[i] = ChtReq{ChtNum: (req.TrieIdx + 1) * (light.CHTFrequencyClient / light.CHTFrequencyServer), BlockNum: blockNum, FromLevel: req.FromLevel}
|
||||
reqs, ok := data.([]ChtReq)
|
||||
if !ok {
|
||||
return errInvalidHelpTrieReq
|
||||
}
|
||||
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqsV1)
|
||||
p.Log().Debug("Fetching batch of header proofs", "count", len(reqs))
|
||||
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
|
||||
case lpv2:
|
||||
reqs, ok := data.([]HelperTrieReq)
|
||||
if !ok {
|
||||
return errInvalidHelpTrieReq
|
||||
}
|
||||
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
|
||||
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
|
||||
default:
|
||||
panic(nil)
|
||||
|
|
14
vendor/github.com/ethereum/go-ethereum/les/server.go
generated
vendored
14
vendor/github.com/ethereum/go-ethereum/les/server.go
generated
vendored
|
@ -34,6 +34,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
|
@ -50,7 +51,7 @@ type LesServer struct {
|
|||
|
||||
func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
||||
quitSync := make(chan struct{})
|
||||
pm, err := NewProtocolManager(eth.BlockChain().Config(), false, config.NetworkId, eth.EventMux(), eth.Engine(), newPeerSet(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil, nil, quitSync, new(sync.WaitGroup))
|
||||
pm, err := NewProtocolManager(eth.BlockChain().Config(), light.DefaultServerIndexerConfig, false, config.NetworkId, eth.EventMux(), eth.Engine(), newPeerSet(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil, nil, quitSync, new(sync.WaitGroup))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -64,8 +65,9 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
|||
lesCommons: lesCommons{
|
||||
config: config,
|
||||
chainDb: eth.ChainDb(),
|
||||
chtIndexer: light.NewChtIndexer(eth.ChainDb(), false, nil),
|
||||
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), false, nil),
|
||||
iConfig: light.DefaultServerIndexerConfig,
|
||||
chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequencyServer, params.HelperTrieProcessConfirmations),
|
||||
bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency),
|
||||
protocolManager: pm,
|
||||
},
|
||||
quitSync: quitSync,
|
||||
|
@ -75,14 +77,14 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
|||
logger := log.New()
|
||||
|
||||
chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
|
||||
chtV2SectionCount := chtV1SectionCount / (light.CHTFrequencyClient / light.CHTFrequencyServer)
|
||||
chtV2SectionCount := chtV1SectionCount / (params.CHTFrequencyClient / params.CHTFrequencyServer)
|
||||
if chtV2SectionCount != 0 {
|
||||
// convert to LES/2 section
|
||||
chtLastSection := chtV2SectionCount - 1
|
||||
// convert last LES/2 section index back to LES/1 index for chtIndexer.SectionHead
|
||||
chtLastSectionV1 := (chtLastSection+1)*(light.CHTFrequencyClient/light.CHTFrequencyServer) - 1
|
||||
chtLastSectionV1 := (chtLastSection+1)*(params.CHTFrequencyClient/params.CHTFrequencyServer) - 1
|
||||
chtSectionHead := srv.chtIndexer.SectionHead(chtLastSectionV1)
|
||||
chtRoot := light.GetChtV2Root(pm.chainDb, chtLastSection, chtSectionHead)
|
||||
chtRoot := light.GetChtRoot(pm.chainDb, chtLastSectionV1, chtSectionHead)
|
||||
logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot)
|
||||
}
|
||||
bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()
|
||||
|
|
26
vendor/github.com/ethereum/go-ethereum/light/lightchain.go
generated
vendored
26
vendor/github.com/ethereum/go-ethereum/light/lightchain.go
generated
vendored
|
@ -48,6 +48,7 @@ var (
|
|||
// interface. It only does header validation during chain insertion.
|
||||
type LightChain struct {
|
||||
hc *core.HeaderChain
|
||||
indexerConfig *IndexerConfig
|
||||
chainDb ethdb.Database
|
||||
odr OdrBackend
|
||||
chainFeed event.Feed
|
||||
|
@ -81,13 +82,14 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.
|
|||
blockCache, _ := lru.New(blockCacheLimit)
|
||||
|
||||
bc := &LightChain{
|
||||
chainDb: odr.Database(),
|
||||
odr: odr,
|
||||
quit: make(chan struct{}),
|
||||
bodyCache: bodyCache,
|
||||
bodyRLPCache: bodyRLPCache,
|
||||
blockCache: blockCache,
|
||||
engine: engine,
|
||||
chainDb: odr.Database(),
|
||||
indexerConfig: odr.IndexerConfig(),
|
||||
odr: odr,
|
||||
quit: make(chan struct{}),
|
||||
bodyCache: bodyCache,
|
||||
bodyRLPCache: bodyRLPCache,
|
||||
blockCache: blockCache,
|
||||
engine: engine,
|
||||
}
|
||||
var err error
|
||||
bc.hc, err = core.NewHeaderChain(odr.Database(), config, bc.engine, bc.getProcInterrupt)
|
||||
|
@ -119,16 +121,16 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.
|
|||
func (self *LightChain) addTrustedCheckpoint(cp TrustedCheckpoint) {
|
||||
if self.odr.ChtIndexer() != nil {
|
||||
StoreChtRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.CHTRoot)
|
||||
self.odr.ChtIndexer().AddKnownSectionHead(cp.SectionIdx, cp.SectionHead)
|
||||
self.odr.ChtIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead)
|
||||
}
|
||||
if self.odr.BloomTrieIndexer() != nil {
|
||||
StoreBloomTrieRoot(self.chainDb, cp.SectionIdx, cp.SectionHead, cp.BloomRoot)
|
||||
self.odr.BloomTrieIndexer().AddKnownSectionHead(cp.SectionIdx, cp.SectionHead)
|
||||
self.odr.BloomTrieIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead)
|
||||
}
|
||||
if self.odr.BloomIndexer() != nil {
|
||||
self.odr.BloomIndexer().AddKnownSectionHead(cp.SectionIdx, cp.SectionHead)
|
||||
self.odr.BloomIndexer().AddCheckpoint(cp.SectionIdx, cp.SectionHead)
|
||||
}
|
||||
log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.SectionIdx+1)*CHTFrequencyClient-1, "hash", cp.SectionHead)
|
||||
log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.SectionIdx+1)*self.indexerConfig.ChtSize-1, "hash", cp.SectionHead)
|
||||
}
|
||||
|
||||
func (self *LightChain) getProcInterrupt() bool {
|
||||
|
@ -472,7 +474,7 @@ func (self *LightChain) SyncCht(ctx context.Context) bool {
|
|||
head := self.CurrentHeader().Number.Uint64()
|
||||
sections, _, _ := self.odr.ChtIndexer().Sections()
|
||||
|
||||
latest := sections*CHTFrequencyClient - 1
|
||||
latest := sections*self.indexerConfig.ChtSize - 1
|
||||
if clique := self.hc.Config().Clique; clique != nil {
|
||||
latest -= latest % clique.Epoch // epoch snapshot for clique
|
||||
}
|
||||
|
|
5
vendor/github.com/ethereum/go-ethereum/light/odr.go
generated
vendored
5
vendor/github.com/ethereum/go-ethereum/light/odr.go
generated
vendored
|
@ -44,6 +44,7 @@ type OdrBackend interface {
|
|||
BloomTrieIndexer() *core.ChainIndexer
|
||||
BloomIndexer() *core.ChainIndexer
|
||||
Retrieve(ctx context.Context, req OdrRequest) error
|
||||
IndexerConfig() *IndexerConfig
|
||||
}
|
||||
|
||||
// OdrRequest is an interface for retrieval requests
|
||||
|
@ -136,6 +137,7 @@ func (req *ReceiptsRequest) StoreResult(db ethdb.Database) {
|
|||
// ChtRequest is the ODR request type for state/storage trie entries
|
||||
type ChtRequest struct {
|
||||
OdrRequest
|
||||
Config *IndexerConfig
|
||||
ChtNum, BlockNum uint64
|
||||
ChtRoot common.Hash
|
||||
Header *types.Header
|
||||
|
@ -155,6 +157,7 @@ func (req *ChtRequest) StoreResult(db ethdb.Database) {
|
|||
// BloomRequest is the ODR request type for retrieving bloom filters from a CHT structure
|
||||
type BloomRequest struct {
|
||||
OdrRequest
|
||||
Config *IndexerConfig
|
||||
BloomTrieNum uint64
|
||||
BitIdx uint
|
||||
SectionIdxList []uint64
|
||||
|
@ -166,7 +169,7 @@ type BloomRequest struct {
|
|||
// StoreResult stores the retrieved data in local database
|
||||
func (req *BloomRequest) StoreResult(db ethdb.Database) {
|
||||
for i, sectionIdx := range req.SectionIdxList {
|
||||
sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*BloomTrieFrequency-1)
|
||||
sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*req.Config.BloomTrieSize-1)
|
||||
// if we don't have the canonical hash stored for this section head number, we'll still store it under
|
||||
// a key with a zero sectionHead. GetBloomBits will look there too if we still don't have the canonical
|
||||
// hash. In the unlikely case we've retrieved the section head hash since then, we'll just retrieve the
|
||||
|
|
18
vendor/github.com/ethereum/go-ethereum/light/odr_util.go
generated
vendored
18
vendor/github.com/ethereum/go-ethereum/light/odr_util.go
generated
vendored
|
@ -53,16 +53,16 @@ func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*typ
|
|||
for chtCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
|
||||
chtCount--
|
||||
if chtCount > 0 {
|
||||
sectionHeadNum = chtCount*CHTFrequencyClient - 1
|
||||
sectionHeadNum = chtCount*odr.IndexerConfig().ChtSize - 1
|
||||
sectionHead = odr.ChtIndexer().SectionHead(chtCount - 1)
|
||||
canonicalHash = rawdb.ReadCanonicalHash(db, sectionHeadNum)
|
||||
}
|
||||
}
|
||||
}
|
||||
if number >= chtCount*CHTFrequencyClient {
|
||||
if number >= chtCount*odr.IndexerConfig().ChtSize {
|
||||
return nil, ErrNoTrustedCht
|
||||
}
|
||||
r := &ChtRequest{ChtRoot: GetChtRoot(db, chtCount-1, sectionHead), ChtNum: chtCount - 1, BlockNum: number}
|
||||
r := &ChtRequest{ChtRoot: GetChtRoot(db, chtCount-1, sectionHead), ChtNum: chtCount - 1, BlockNum: number, Config: odr.IndexerConfig()}
|
||||
if err := odr.Retrieve(ctx, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -175,9 +175,9 @@ func GetBlockLogs(ctx context.Context, odr OdrBackend, hash common.Hash, number
|
|||
|
||||
// GetBloomBits retrieves a batch of compressed bloomBits vectors belonging to the given bit index and section indexes
|
||||
func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxList []uint64) ([][]byte, error) {
|
||||
db := odr.Database()
|
||||
result := make([][]byte, len(sectionIdxList))
|
||||
var (
|
||||
db = odr.Database()
|
||||
result = make([][]byte, len(sectionIdxList))
|
||||
reqList []uint64
|
||||
reqIdx []int
|
||||
)
|
||||
|
@ -193,7 +193,7 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi
|
|||
for bloomTrieCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
|
||||
bloomTrieCount--
|
||||
if bloomTrieCount > 0 {
|
||||
sectionHeadNum = bloomTrieCount*BloomTrieFrequency - 1
|
||||
sectionHeadNum = bloomTrieCount*odr.IndexerConfig().BloomTrieSize - 1
|
||||
sectionHead = odr.BloomTrieIndexer().SectionHead(bloomTrieCount - 1)
|
||||
canonicalHash = rawdb.ReadCanonicalHash(db, sectionHeadNum)
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi
|
|||
}
|
||||
|
||||
for i, sectionIdx := range sectionIdxList {
|
||||
sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*BloomTrieFrequency-1)
|
||||
sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*odr.IndexerConfig().BloomSize-1)
|
||||
// if we don't have the canonical hash stored for this section head number, we'll still look for
|
||||
// an entry with a zero sectionHead (we store it with zero section head too if we don't know it
|
||||
// at the time of the retrieval)
|
||||
|
@ -209,6 +209,7 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi
|
|||
if err == nil {
|
||||
result[i] = bloomBits
|
||||
} else {
|
||||
// TODO(rjl493456442) Convert sectionIndex to BloomTrie relative index
|
||||
if sectionIdx >= bloomTrieCount {
|
||||
return nil, ErrNoTrustedBloomTrie
|
||||
}
|
||||
|
@ -220,7 +221,8 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bitIdx uint, sectionIdxLi
|
|||
return result, nil
|
||||
}
|
||||
|
||||
r := &BloomRequest{BloomTrieRoot: GetBloomTrieRoot(db, bloomTrieCount-1, sectionHead), BloomTrieNum: bloomTrieCount - 1, BitIdx: bitIdx, SectionIdxList: reqList}
|
||||
r := &BloomRequest{BloomTrieRoot: GetBloomTrieRoot(db, bloomTrieCount-1, sectionHead), BloomTrieNum: bloomTrieCount - 1,
|
||||
BitIdx: bitIdx, SectionIdxList: reqList, Config: odr.IndexerConfig()}
|
||||
if err := odr.Retrieve(ctx, r); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
|
|
174
vendor/github.com/ethereum/go-ethereum/light/postprocess.go
generated
vendored
174
vendor/github.com/ethereum/go-ethereum/light/postprocess.go
generated
vendored
|
@ -36,20 +36,75 @@ import (
|
|||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
const (
|
||||
// CHTFrequencyClient is the block frequency for creating CHTs on the client side.
|
||||
CHTFrequencyClient = 32768
|
||||
// IndexerConfig includes a set of configs for chain indexers.
|
||||
type IndexerConfig struct {
|
||||
// The block frequency for creating CHTs.
|
||||
ChtSize uint64
|
||||
|
||||
// CHTFrequencyServer is the block frequency for creating CHTs on the server side.
|
||||
// Eventually this can be merged back with the client version, but that requires a
|
||||
// full database upgrade, so that should be left for a suitable moment.
|
||||
CHTFrequencyServer = 4096
|
||||
// A special auxiliary field represents client's chtsize for server config, otherwise represents server's chtsize.
|
||||
PairChtSize uint64
|
||||
|
||||
HelperTrieConfirmations = 2048 // number of confirmations before a server is expected to have the given HelperTrie available
|
||||
HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated
|
||||
// The number of confirmations needed to generate/accept a canonical hash help trie.
|
||||
ChtConfirms uint64
|
||||
|
||||
// The block frequency for creating new bloom bits.
|
||||
BloomSize uint64
|
||||
|
||||
// The number of confirmation needed before a bloom section is considered probably final and its rotated bits
|
||||
// are calculated.
|
||||
BloomConfirms uint64
|
||||
|
||||
// The block frequency for creating BloomTrie.
|
||||
BloomTrieSize uint64
|
||||
|
||||
// The number of confirmations needed to generate/accept a bloom trie.
|
||||
BloomTrieConfirms uint64
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
|
||||
DefaultServerIndexerConfig = &IndexerConfig{
|
||||
ChtSize: params.CHTFrequencyServer,
|
||||
PairChtSize: params.CHTFrequencyClient,
|
||||
ChtConfirms: params.HelperTrieProcessConfirmations,
|
||||
BloomSize: params.BloomBitsBlocks,
|
||||
BloomConfirms: params.BloomConfirms,
|
||||
BloomTrieSize: params.BloomTrieFrequency,
|
||||
BloomTrieConfirms: params.HelperTrieProcessConfirmations,
|
||||
}
|
||||
// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
|
||||
DefaultClientIndexerConfig = &IndexerConfig{
|
||||
ChtSize: params.CHTFrequencyClient,
|
||||
PairChtSize: params.CHTFrequencyServer,
|
||||
ChtConfirms: params.HelperTrieConfirmations,
|
||||
BloomSize: params.BloomBitsBlocksClient,
|
||||
BloomConfirms: params.HelperTrieConfirmations,
|
||||
BloomTrieSize: params.BloomTrieFrequency,
|
||||
BloomTrieConfirms: params.HelperTrieConfirmations,
|
||||
}
|
||||
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
|
||||
TestServerIndexerConfig = &IndexerConfig{
|
||||
ChtSize: 256,
|
||||
PairChtSize: 2048,
|
||||
ChtConfirms: 16,
|
||||
BloomSize: 256,
|
||||
BloomConfirms: 16,
|
||||
BloomTrieSize: 2048,
|
||||
BloomTrieConfirms: 16,
|
||||
}
|
||||
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
|
||||
TestClientIndexerConfig = &IndexerConfig{
|
||||
ChtSize: 2048,
|
||||
PairChtSize: 256,
|
||||
ChtConfirms: 128,
|
||||
BloomSize: 2048,
|
||||
BloomConfirms: 128,
|
||||
BloomTrieSize: 2048,
|
||||
BloomTrieConfirms: 128,
|
||||
}
|
||||
)
|
||||
|
||||
// TrustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with
|
||||
// trustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with
|
||||
// the appropriate section index and head hash. It is used to start light syncing from this checkpoint
|
||||
// and avoid downloading the entire header chain while still being able to securely access old headers/logs.
|
||||
type TrustedCheckpoint struct {
|
||||
|
@ -84,9 +139,9 @@ var trustedCheckpoints = map[common.Hash]TrustedCheckpoint{
|
|||
}
|
||||
|
||||
var (
|
||||
ErrNoTrustedCht = errors.New("No trusted canonical hash trie")
|
||||
ErrNoTrustedBloomTrie = errors.New("No trusted bloom trie")
|
||||
ErrNoHeader = errors.New("Header not found")
|
||||
ErrNoTrustedCht = errors.New("no trusted canonical hash trie")
|
||||
ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie")
|
||||
ErrNoHeader = errors.New("header not found")
|
||||
chtPrefix = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash
|
||||
ChtTablePrefix = "cht-"
|
||||
)
|
||||
|
@ -97,8 +152,8 @@ type ChtNode struct {
|
|||
Td *big.Int
|
||||
}
|
||||
|
||||
// GetChtRoot reads the CHT root assoctiated to the given section from the database
|
||||
// Note that sectionIdx is specified according to LES/1 CHT section size
|
||||
// GetChtRoot reads the CHT root associated to the given section from the database
|
||||
// Note that sectionIdx is specified according to LES/1 CHT section size.
|
||||
func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
|
||||
var encNumber [8]byte
|
||||
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
|
||||
|
@ -106,21 +161,15 @@ func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) c
|
|||
return common.BytesToHash(data)
|
||||
}
|
||||
|
||||
// GetChtV2Root reads the CHT root assoctiated to the given section from the database
|
||||
// Note that sectionIdx is specified according to LES/2 CHT section size
|
||||
func GetChtV2Root(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
|
||||
return GetChtRoot(db, (sectionIdx+1)*(CHTFrequencyClient/CHTFrequencyServer)-1, sectionHead)
|
||||
}
|
||||
|
||||
// StoreChtRoot writes the CHT root assoctiated to the given section into the database
|
||||
// Note that sectionIdx is specified according to LES/1 CHT section size
|
||||
// StoreChtRoot writes the CHT root associated to the given section into the database
|
||||
// Note that sectionIdx is specified according to LES/1 CHT section size.
|
||||
func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) {
|
||||
var encNumber [8]byte
|
||||
binary.BigEndian.PutUint64(encNumber[:], sectionIdx)
|
||||
db.Put(append(append(chtPrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes())
|
||||
}
|
||||
|
||||
// ChtIndexerBackend implements core.ChainIndexerBackend
|
||||
// ChtIndexerBackend implements core.ChainIndexerBackend.
|
||||
type ChtIndexerBackend struct {
|
||||
diskdb, trieTable ethdb.Database
|
||||
odr OdrBackend
|
||||
|
@ -130,33 +179,24 @@ type ChtIndexerBackend struct {
|
|||
trie *trie.Trie
|
||||
}
|
||||
|
||||
// NewBloomTrieIndexer creates a BloomTrie chain indexer
|
||||
func NewChtIndexer(db ethdb.Database, clientMode bool, odr OdrBackend) *core.ChainIndexer {
|
||||
var sectionSize, confirmReq uint64
|
||||
if clientMode {
|
||||
sectionSize = CHTFrequencyClient
|
||||
confirmReq = HelperTrieConfirmations
|
||||
} else {
|
||||
sectionSize = CHTFrequencyServer
|
||||
confirmReq = HelperTrieProcessConfirmations
|
||||
}
|
||||
idb := ethdb.NewTable(db, "chtIndex-")
|
||||
// NewChtIndexer creates a Cht chain indexer
|
||||
func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *core.ChainIndexer {
|
||||
trieTable := ethdb.NewTable(db, ChtTablePrefix)
|
||||
backend := &ChtIndexerBackend{
|
||||
diskdb: db,
|
||||
odr: odr,
|
||||
trieTable: trieTable,
|
||||
triedb: trie.NewDatabase(trieTable),
|
||||
sectionSize: sectionSize,
|
||||
sectionSize: size,
|
||||
}
|
||||
return core.NewChainIndexer(db, idb, backend, sectionSize, confirmReq, time.Millisecond*100, "cht")
|
||||
return core.NewChainIndexer(db, ethdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht")
|
||||
}
|
||||
|
||||
// fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
|
||||
// ODR backend in order to be able to add new entries and calculate subsequent root hashes
|
||||
func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error {
|
||||
batch := c.trieTable.NewBatch()
|
||||
r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1}
|
||||
r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1, Config: c.odr.IndexerConfig()}
|
||||
for {
|
||||
err := c.odr.Retrieve(ctx, r)
|
||||
switch err {
|
||||
|
@ -221,18 +261,13 @@ func (c *ChtIndexerBackend) Commit() error {
|
|||
}
|
||||
c.triedb.Commit(root, false)
|
||||
|
||||
if ((c.section+1)*c.sectionSize)%CHTFrequencyClient == 0 {
|
||||
log.Info("Storing CHT", "section", c.section*c.sectionSize/CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
|
||||
if ((c.section+1)*c.sectionSize)%params.CHTFrequencyClient == 0 {
|
||||
log.Info("Storing CHT", "section", c.section*c.sectionSize/params.CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
|
||||
}
|
||||
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
BloomTrieFrequency = 32768
|
||||
ethBloomBitsSection = 4096
|
||||
)
|
||||
|
||||
var (
|
||||
bloomTriePrefix = []byte("bltRoot-") // bloomTriePrefix + bloomTrieNum (uint64 big endian) -> trie root hash
|
||||
BloomTrieTablePrefix = "blt-"
|
||||
|
@ -255,33 +290,31 @@ func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root
|
|||
|
||||
// BloomTrieIndexerBackend implements core.ChainIndexerBackend
|
||||
type BloomTrieIndexerBackend struct {
|
||||
diskdb, trieTable ethdb.Database
|
||||
odr OdrBackend
|
||||
triedb *trie.Database
|
||||
section, parentSectionSize, bloomTrieRatio uint64
|
||||
trie *trie.Trie
|
||||
sectionHeads []common.Hash
|
||||
diskdb, trieTable ethdb.Database
|
||||
triedb *trie.Database
|
||||
odr OdrBackend
|
||||
section uint64
|
||||
parentSize uint64
|
||||
size uint64
|
||||
bloomTrieRatio uint64
|
||||
trie *trie.Trie
|
||||
sectionHeads []common.Hash
|
||||
}
|
||||
|
||||
// NewBloomTrieIndexer creates a BloomTrie chain indexer
|
||||
func NewBloomTrieIndexer(db ethdb.Database, clientMode bool, odr OdrBackend) *core.ChainIndexer {
|
||||
func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uint64) *core.ChainIndexer {
|
||||
trieTable := ethdb.NewTable(db, BloomTrieTablePrefix)
|
||||
backend := &BloomTrieIndexerBackend{
|
||||
diskdb: db,
|
||||
odr: odr,
|
||||
trieTable: trieTable,
|
||||
triedb: trie.NewDatabase(trieTable),
|
||||
diskdb: db,
|
||||
odr: odr,
|
||||
trieTable: trieTable,
|
||||
triedb: trie.NewDatabase(trieTable),
|
||||
parentSize: parentSize,
|
||||
size: size,
|
||||
}
|
||||
idb := ethdb.NewTable(db, "bltIndex-")
|
||||
|
||||
if clientMode {
|
||||
backend.parentSectionSize = BloomTrieFrequency
|
||||
} else {
|
||||
backend.parentSectionSize = ethBloomBitsSection
|
||||
}
|
||||
backend.bloomTrieRatio = BloomTrieFrequency / backend.parentSectionSize
|
||||
backend.bloomTrieRatio = size / parentSize
|
||||
backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio)
|
||||
return core.NewChainIndexer(db, idb, backend, BloomTrieFrequency, 0, time.Millisecond*100, "bloomtrie")
|
||||
return core.NewChainIndexer(db, ethdb.NewTable(db, "bltIndex-"), backend, size, 0, time.Millisecond*100, "bloomtrie")
|
||||
}
|
||||
|
||||
// fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the
|
||||
|
@ -296,7 +329,7 @@ func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section
|
|||
for i := 0; i < 20; i++ {
|
||||
go func() {
|
||||
for bitIndex := range indexCh {
|
||||
r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIdxList: []uint64{section - 1}}
|
||||
r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIdxList: []uint64{section - 1}, Config: b.odr.IndexerConfig()}
|
||||
for {
|
||||
if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers {
|
||||
// if there are no peers to serve, retry later
|
||||
|
@ -351,9 +384,9 @@ func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, las
|
|||
|
||||
// Process implements core.ChainIndexerBackend
|
||||
func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error {
|
||||
num := header.Number.Uint64() - b.section*BloomTrieFrequency
|
||||
if (num+1)%b.parentSectionSize == 0 {
|
||||
b.sectionHeads[num/b.parentSectionSize] = header.Hash()
|
||||
num := header.Number.Uint64() - b.section*b.size
|
||||
if (num+1)%b.parentSize == 0 {
|
||||
b.sectionHeads[num/b.parentSize] = header.Hash()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -372,7 +405,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
decompData, err2 := bitutil.DecompressBytes(data, int(b.parentSectionSize/8))
|
||||
decompData, err2 := bitutil.DecompressBytes(data, int(b.parentSize/8))
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
@ -397,6 +430,5 @@ func (b *BloomTrieIndexerBackend) Commit() error {
|
|||
sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
|
||||
log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize))
|
||||
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
4
vendor/github.com/ethereum/go-ethereum/miner/miner.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/miner/miner.go
generated
vendored
|
@ -52,13 +52,13 @@ type Miner struct {
|
|||
shouldStart int32 // should start indicates whether we should start after sync
|
||||
}
|
||||
|
||||
func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration) *Miner {
|
||||
func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, recommit time.Duration, gasFloor, gasCeil uint64) *Miner {
|
||||
miner := &Miner{
|
||||
eth: eth,
|
||||
mux: mux,
|
||||
engine: engine,
|
||||
exitCh: make(chan struct{}),
|
||||
worker: newWorker(config, engine, eth, mux, recommit),
|
||||
worker: newWorker(config, engine, eth, mux, recommit, gasFloor, gasCeil),
|
||||
canStart: 1,
|
||||
}
|
||||
go miner.update()
|
||||
|
|
219
vendor/github.com/ethereum/go-ethereum/miner/stress_clique.go
generated
vendored
Normal file
219
vendor/github.com/ethereum/go-ethereum/miner/stress_clique.go
generated
vendored
Normal file
|
@ -0,0 +1,219 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build none
|
||||
|
||||
// This file contains a miner stress test based on the Clique consensus engine.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
fdlimit.Raise(2048)
|
||||
|
||||
// Generate a batch of accounts to seal and fund with
|
||||
faucets := make([]*ecdsa.PrivateKey, 128)
|
||||
for i := 0; i < len(faucets); i++ {
|
||||
faucets[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
sealers := make([]*ecdsa.PrivateKey, 4)
|
||||
for i := 0; i < len(sealers); i++ {
|
||||
sealers[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
// Create a Clique network based off of the Rinkeby config
|
||||
genesis := makeGenesis(faucets, sealers)
|
||||
|
||||
var (
|
||||
nodes []*node.Node
|
||||
enodes []string
|
||||
)
|
||||
for _, sealer := range sealers {
|
||||
// Start the node and wait until it's up
|
||||
node, err := makeSealer(genesis, enodes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer node.Stop()
|
||||
|
||||
for node.Server().NodeInfo().Ports.Listener == 0 {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
// Connect the node to al the previous ones
|
||||
for _, enode := range enodes {
|
||||
enode, err := discover.ParseNode(enode)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
node.Server().AddPeer(enode)
|
||||
}
|
||||
// Start tracking the node and it's enode url
|
||||
nodes = append(nodes, node)
|
||||
|
||||
enode := fmt.Sprintf("enode://%s@127.0.0.1:%d", node.Server().NodeInfo().ID, node.Server().NodeInfo().Ports.Listener)
|
||||
enodes = append(enodes, enode)
|
||||
|
||||
// Inject the signer key and start sealing with it
|
||||
store := node.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
signer, err := store.ImportECDSA(sealer, "")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := store.Unlock(signer, ""); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// Iterate over all the nodes and start signing with them
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
for _, node := range nodes {
|
||||
var ethereum *eth.Ethereum
|
||||
if err := node.Service(ðereum); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := ethereum.StartMining(1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Start injecting transactions from the faucet like crazy
|
||||
nonces := make([]uint64, len(faucets))
|
||||
for {
|
||||
index := rand.Intn(len(faucets))
|
||||
|
||||
// Fetch the accessor for the relevant signer
|
||||
var ethereum *eth.Ethereum
|
||||
if err := nodes[index%len(nodes)].Service(ðereum); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Create a self transaction and inject into the pool
|
||||
tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000), nil), types.HomesteadSigner{}, faucets[index])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := ethereum.TxPool().AddLocal(tx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
nonces[index]++
|
||||
|
||||
// Wait if we're too saturated
|
||||
if pend, _ := ethereum.TxPool().Stats(); pend > 2048 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeGenesis creates a custom Clique genesis block based on some pre-defined
|
||||
// signer and faucet accounts.
|
||||
func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core.Genesis {
|
||||
// Create a Clique network based off of the Rinkeby config
|
||||
genesis := core.DefaultRinkebyGenesisBlock()
|
||||
genesis.GasLimit = 25000000
|
||||
|
||||
genesis.Config.ChainID = big.NewInt(18)
|
||||
genesis.Config.Clique.Period = 1
|
||||
genesis.Config.EIP150Hash = common.Hash{}
|
||||
|
||||
genesis.Alloc = core.GenesisAlloc{}
|
||||
for _, faucet := range faucets {
|
||||
genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{
|
||||
Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil),
|
||||
}
|
||||
}
|
||||
// Sort the signers and embed into the extra-data section
|
||||
signers := make([]common.Address, len(sealers))
|
||||
for i, sealer := range sealers {
|
||||
signers[i] = crypto.PubkeyToAddress(sealer.PublicKey)
|
||||
}
|
||||
for i := 0; i < len(signers); i++ {
|
||||
for j := i + 1; j < len(signers); j++ {
|
||||
if bytes.Compare(signers[i][:], signers[j][:]) > 0 {
|
||||
signers[i], signers[j] = signers[j], signers[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
genesis.ExtraData = make([]byte, 32+len(signers)*common.AddressLength+65)
|
||||
for i, signer := range signers {
|
||||
copy(genesis.ExtraData[32+i*common.AddressLength:], signer[:])
|
||||
}
|
||||
// Return the genesis block for initialization
|
||||
return genesis
|
||||
}
|
||||
|
||||
func makeSealer(genesis *core.Genesis, nodes []string) (*node.Node, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := ioutil.TempDir("", "")
|
||||
|
||||
config := &node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
DataDir: datadir,
|
||||
P2P: p2p.Config{
|
||||
ListenAddr: "0.0.0.0:0",
|
||||
NoDiscovery: true,
|
||||
MaxPeers: 25,
|
||||
},
|
||||
NoUSB: true,
|
||||
}
|
||||
// Start the node and configure a full Ethereum node on it
|
||||
stack, err := node.New(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return eth.New(ctx, ð.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.FullSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: core.DefaultTxPoolConfig,
|
||||
GPO: eth.DefaultConfig.GPO,
|
||||
MinerGasFloor: genesis.GasLimit * 9 / 10,
|
||||
MinerGasCeil: genesis.GasLimit * 11 / 10,
|
||||
MinerGasPrice: big.NewInt(1),
|
||||
MinerRecommit: time.Second,
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Start the node and return if successful
|
||||
return stack, stack.Start()
|
||||
}
|
199
vendor/github.com/ethereum/go-ethereum/miner/stress_ethash.go
generated
vendored
Normal file
199
vendor/github.com/ethereum/go-ethereum/miner/stress_ethash.go
generated
vendored
Normal file
|
@ -0,0 +1,199 @@
|
|||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build none
|
||||
|
||||
// This file contains a miner stress test based on the Ethash consensus engine.
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
fdlimit.Raise(2048)
|
||||
|
||||
// Generate a batch of accounts to seal and fund with
|
||||
faucets := make([]*ecdsa.PrivateKey, 128)
|
||||
for i := 0; i < len(faucets); i++ {
|
||||
faucets[i], _ = crypto.GenerateKey()
|
||||
}
|
||||
// Pre-generate the ethash mining DAG so we don't race
|
||||
ethash.MakeDataset(1, filepath.Join(os.Getenv("HOME"), ".ethash"))
|
||||
|
||||
// Create an Ethash network based off of the Ropsten config
|
||||
genesis := makeGenesis(faucets)
|
||||
|
||||
var (
|
||||
nodes []*node.Node
|
||||
enodes []string
|
||||
)
|
||||
for i := 0; i < 4; i++ {
|
||||
// Start the node and wait until it's up
|
||||
node, err := makeMiner(genesis, enodes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer node.Stop()
|
||||
|
||||
for node.Server().NodeInfo().Ports.Listener == 0 {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
// Connect the node to al the previous ones
|
||||
for _, enode := range enodes {
|
||||
enode, err := discover.ParseNode(enode)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
node.Server().AddPeer(enode)
|
||||
}
|
||||
// Start tracking the node and it's enode url
|
||||
nodes = append(nodes, node)
|
||||
|
||||
enode := fmt.Sprintf("enode://%s@127.0.0.1:%d", node.Server().NodeInfo().ID, node.Server().NodeInfo().Ports.Listener)
|
||||
enodes = append(enodes, enode)
|
||||
|
||||
// Inject the signer key and start sealing with it
|
||||
store := node.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
if _, err := store.NewAccount(""); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// Iterate over all the nodes and start signing with them
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
for _, node := range nodes {
|
||||
var ethereum *eth.Ethereum
|
||||
if err := node.Service(ðereum); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := ethereum.StartMining(1); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Start injecting transactions from the faucets like crazy
|
||||
nonces := make([]uint64, len(faucets))
|
||||
for {
|
||||
index := rand.Intn(len(faucets))
|
||||
|
||||
// Fetch the accessor for the relevant signer
|
||||
var ethereum *eth.Ethereum
|
||||
if err := nodes[index%len(nodes)].Service(ðereum); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Create a self transaction and inject into the pool
|
||||
tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000+rand.Int63n(65536)), nil), types.HomesteadSigner{}, faucets[index])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := ethereum.TxPool().AddLocal(tx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
nonces[index]++
|
||||
|
||||
// Wait if we're too saturated
|
||||
if pend, _ := ethereum.TxPool().Stats(); pend > 2048 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeGenesis creates a custom Ethash genesis block based on some pre-defined
|
||||
// faucet accounts.
|
||||
func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
|
||||
genesis := core.DefaultTestnetGenesisBlock()
|
||||
genesis.Difficulty = params.MinimumDifficulty
|
||||
genesis.GasLimit = 25000000
|
||||
|
||||
genesis.Config.ChainID = big.NewInt(18)
|
||||
genesis.Config.EIP150Hash = common.Hash{}
|
||||
|
||||
genesis.Alloc = core.GenesisAlloc{}
|
||||
for _, faucet := range faucets {
|
||||
genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{
|
||||
Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil),
|
||||
}
|
||||
}
|
||||
return genesis
|
||||
}
|
||||
|
||||
func makeMiner(genesis *core.Genesis, nodes []string) (*node.Node, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := ioutil.TempDir("", "")
|
||||
|
||||
config := &node.Config{
|
||||
Name: "geth",
|
||||
Version: params.Version,
|
||||
DataDir: datadir,
|
||||
P2P: p2p.Config{
|
||||
ListenAddr: "0.0.0.0:0",
|
||||
NoDiscovery: true,
|
||||
MaxPeers: 25,
|
||||
},
|
||||
NoUSB: true,
|
||||
UseLightweightKDF: true,
|
||||
}
|
||||
// Start the node and configure a full Ethereum node on it
|
||||
stack, err := node.New(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
return eth.New(ctx, ð.Config{
|
||||
Genesis: genesis,
|
||||
NetworkId: genesis.Config.ChainID.Uint64(),
|
||||
SyncMode: downloader.FullSync,
|
||||
DatabaseCache: 256,
|
||||
DatabaseHandles: 256,
|
||||
TxPool: core.DefaultTxPoolConfig,
|
||||
GPO: eth.DefaultConfig.GPO,
|
||||
Ethash: eth.DefaultConfig.Ethash,
|
||||
MinerGasFloor: genesis.GasLimit * 9 / 10,
|
||||
MinerGasCeil: genesis.GasLimit * 11 / 10,
|
||||
MinerGasPrice: big.NewInt(1),
|
||||
MinerRecommit: time.Second,
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Start the node and return if successful
|
||||
return stack, stack.Start()
|
||||
}
|
35
vendor/github.com/ethereum/go-ethereum/miner/unconfirmed.go
generated
vendored
35
vendor/github.com/ethereum/go-ethereum/miner/unconfirmed.go
generated
vendored
|
@ -25,11 +25,14 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// headerRetriever is used by the unconfirmed block set to verify whether a previously
|
||||
// chainRetriever is used by the unconfirmed block set to verify whether a previously
|
||||
// mined block is part of the canonical chain or not.
|
||||
type headerRetriever interface {
|
||||
type chainRetriever interface {
|
||||
// GetHeaderByNumber retrieves the canonical header associated with a block number.
|
||||
GetHeaderByNumber(number uint64) *types.Header
|
||||
|
||||
// GetBlockByNumber retrieves the canonical block associated with a block number.
|
||||
GetBlockByNumber(number uint64) *types.Block
|
||||
}
|
||||
|
||||
// unconfirmedBlock is a small collection of metadata about a locally mined block
|
||||
|
@ -44,14 +47,14 @@ type unconfirmedBlock struct {
|
|||
// used by the miner to provide logs to the user when a previously mined block
|
||||
// has a high enough guarantee to not be reorged out of the canonical chain.
|
||||
type unconfirmedBlocks struct {
|
||||
chain headerRetriever // Blockchain to verify canonical status through
|
||||
depth uint // Depth after which to discard previous blocks
|
||||
blocks *ring.Ring // Block infos to allow canonical chain cross checks
|
||||
lock sync.RWMutex // Protects the fields from concurrent access
|
||||
chain chainRetriever // Blockchain to verify canonical status through
|
||||
depth uint // Depth after which to discard previous blocks
|
||||
blocks *ring.Ring // Block infos to allow canonical chain cross checks
|
||||
lock sync.RWMutex // Protects the fields from concurrent access
|
||||
}
|
||||
|
||||
// newUnconfirmedBlocks returns new data structure to track currently unconfirmed blocks.
|
||||
func newUnconfirmedBlocks(chain headerRetriever, depth uint) *unconfirmedBlocks {
|
||||
func newUnconfirmedBlocks(chain chainRetriever, depth uint) *unconfirmedBlocks {
|
||||
return &unconfirmedBlocks{
|
||||
chain: chain,
|
||||
depth: depth,
|
||||
|
@ -103,7 +106,23 @@ func (set *unconfirmedBlocks) Shift(height uint64) {
|
|||
case header.Hash() == next.hash:
|
||||
log.Info("🔗 block reached canonical chain", "number", next.index, "hash", next.hash)
|
||||
default:
|
||||
log.Info("⑂ block became a side fork", "number", next.index, "hash", next.hash)
|
||||
// Block is not canonical, check whether we have an uncle or a lost block
|
||||
included := false
|
||||
for number := next.index; !included && number < next.index+uint64(set.depth) && number <= height; number++ {
|
||||
if block := set.chain.GetBlockByNumber(number); block != nil {
|
||||
for _, uncle := range block.Uncles() {
|
||||
if uncle.Hash() == next.hash {
|
||||
included = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if included {
|
||||
log.Info("⑂ block became an uncle", "number", next.index, "hash", next.hash)
|
||||
} else {
|
||||
log.Info("😱 block lost", "number", next.index, "hash", next.hash)
|
||||
}
|
||||
}
|
||||
// Drop the block out of the ring
|
||||
if set.blocks.Value == set.blocks.Next().Value {
|
||||
|
|
199
vendor/github.com/ethereum/go-ethereum/miner/worker.go
generated
vendored
199
vendor/github.com/ethereum/go-ethereum/miner/worker.go
generated
vendored
|
@ -18,7 +18,7 @@ package miner
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"errors"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -55,7 +55,7 @@ const (
|
|||
resubmitAdjustChanSize = 10
|
||||
|
||||
// miningLogAtDepth is the number of confirmations before logging successful mining.
|
||||
miningLogAtDepth = 5
|
||||
miningLogAtDepth = 7
|
||||
|
||||
// minRecommitInterval is the minimal time interval to recreate the mining block with
|
||||
// any newly arrived transactions.
|
||||
|
@ -72,6 +72,9 @@ const (
|
|||
// intervalAdjustBias is applied during the new resubmit interval calculation in favor of
|
||||
// increasing upper limit or decreasing lower limit so that the limit can be reachable.
|
||||
intervalAdjustBias = 200 * 1000.0 * 1000.0
|
||||
|
||||
// staleThreshold is the maximum depth of the acceptable stale block.
|
||||
staleThreshold = 7
|
||||
)
|
||||
|
||||
// environment is the worker's current environment and holds all of the current state information.
|
||||
|
@ -108,6 +111,7 @@ const (
|
|||
type newWorkReq struct {
|
||||
interrupt *int32
|
||||
noempty bool
|
||||
timestamp int64
|
||||
}
|
||||
|
||||
// intervalAdjust represents a resubmitting interval adjustment.
|
||||
|
@ -124,6 +128,9 @@ type worker struct {
|
|||
eth Backend
|
||||
chain *core.BlockChain
|
||||
|
||||
gasFloor uint64
|
||||
gasCeil uint64
|
||||
|
||||
// Subscriptions
|
||||
mux *event.TypeMux
|
||||
txsCh chan core.NewTxsEvent
|
||||
|
@ -136,7 +143,7 @@ type worker struct {
|
|||
// Channels
|
||||
newWorkCh chan *newWorkReq
|
||||
taskCh chan *task
|
||||
resultCh chan *task
|
||||
resultCh chan *types.Block
|
||||
startCh chan struct{}
|
||||
exitCh chan struct{}
|
||||
resubmitIntervalCh chan time.Duration
|
||||
|
@ -150,6 +157,9 @@ type worker struct {
|
|||
coinbase common.Address
|
||||
extra []byte
|
||||
|
||||
pendingMu sync.RWMutex
|
||||
pendingTasks map[common.Hash]*task
|
||||
|
||||
snapshotMu sync.RWMutex // The lock used to protect the block snapshot and state snapshot
|
||||
snapshotBlock *types.Block
|
||||
snapshotState *state.StateDB
|
||||
|
@ -165,21 +175,24 @@ type worker struct {
|
|||
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
|
||||
}
|
||||
|
||||
func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration) *worker {
|
||||
func newWorker(config *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, recommit time.Duration, gasFloor, gasCeil uint64) *worker {
|
||||
worker := &worker{
|
||||
config: config,
|
||||
engine: engine,
|
||||
eth: eth,
|
||||
mux: mux,
|
||||
chain: eth.BlockChain(),
|
||||
gasFloor: gasFloor,
|
||||
gasCeil: gasCeil,
|
||||
possibleUncles: make(map[common.Hash]*types.Block),
|
||||
unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
|
||||
pendingTasks: make(map[common.Hash]*task),
|
||||
txsCh: make(chan core.NewTxsEvent, txChanSize),
|
||||
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
|
||||
chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
|
||||
newWorkCh: make(chan *newWorkReq),
|
||||
taskCh: make(chan *task),
|
||||
resultCh: make(chan *task, resultQueueSize),
|
||||
resultCh: make(chan *types.Block, resultQueueSize),
|
||||
exitCh: make(chan struct{}),
|
||||
startCh: make(chan struct{}, 1),
|
||||
resubmitIntervalCh: make(chan time.Duration),
|
||||
|
@ -262,18 +275,10 @@ func (w *worker) isRunning() bool {
|
|||
return atomic.LoadInt32(&w.running) == 1
|
||||
}
|
||||
|
||||
// close terminates all background threads maintained by the worker and cleans up buffered channels.
|
||||
// close terminates all background threads maintained by the worker.
|
||||
// Note the worker does not support being closed multiple times.
|
||||
func (w *worker) close() {
|
||||
close(w.exitCh)
|
||||
// Clean up buffered channels
|
||||
for empty := false; !empty; {
|
||||
select {
|
||||
case <-w.resultCh:
|
||||
default:
|
||||
empty = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newWorkLoop is a standalone goroutine to submit new mining work upon received events.
|
||||
|
@ -281,6 +286,7 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
|
|||
var (
|
||||
interrupt *int32
|
||||
minRecommit = recommit // minimal resubmit interval specified by user.
|
||||
timestamp int64 // timestamp for each round of mining.
|
||||
)
|
||||
|
||||
timer := time.NewTimer(0)
|
||||
|
@ -292,7 +298,7 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
|
|||
atomic.StoreInt32(interrupt, s)
|
||||
}
|
||||
interrupt = new(int32)
|
||||
w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty}
|
||||
w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}
|
||||
timer.Reset(recommit)
|
||||
atomic.StoreInt32(&w.newTxs, 0)
|
||||
}
|
||||
|
@ -317,13 +323,27 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
|
|||
}
|
||||
recommit = time.Duration(int64(next))
|
||||
}
|
||||
// clearPending cleans the stale pending tasks.
|
||||
clearPending := func(number uint64) {
|
||||
w.pendingMu.Lock()
|
||||
for h, t := range w.pendingTasks {
|
||||
if t.block.NumberU64()+staleThreshold <= number {
|
||||
delete(w.pendingTasks, h)
|
||||
}
|
||||
}
|
||||
w.pendingMu.Unlock()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-w.startCh:
|
||||
clearPending(w.chain.CurrentBlock().NumberU64())
|
||||
timestamp = time.Now().Unix()
|
||||
commit(false, commitInterruptNewHead)
|
||||
|
||||
case <-w.chainHeadCh:
|
||||
case head := <-w.chainHeadCh:
|
||||
clearPending(head.Block.NumberU64())
|
||||
timestamp = time.Now().Unix()
|
||||
commit(false, commitInterruptNewHead)
|
||||
|
||||
case <-timer.C:
|
||||
|
@ -382,7 +402,7 @@ func (w *worker) mainLoop() {
|
|||
for {
|
||||
select {
|
||||
case req := <-w.newWorkCh:
|
||||
w.commitNewWork(req.interrupt, req.noempty)
|
||||
w.commitNewWork(req.interrupt, req.noempty, req.timestamp)
|
||||
|
||||
case ev := <-w.chainSideCh:
|
||||
if _, exist := w.possibleUncles[ev.Block.Hash()]; exist {
|
||||
|
@ -434,7 +454,7 @@ func (w *worker) mainLoop() {
|
|||
} else {
|
||||
// If we're mining, but nothing is being processed, wake on new transactions
|
||||
if w.config.Clique != nil && w.config.Clique.Period == 0 {
|
||||
w.commitNewWork(nil, false)
|
||||
w.commitNewWork(nil, false, time.Now().Unix())
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&w.newTxs, int32(len(ev.Txs)))
|
||||
|
@ -452,33 +472,6 @@ func (w *worker) mainLoop() {
|
|||
}
|
||||
}
|
||||
|
||||
// seal pushes a sealing task to consensus engine and submits the result.
|
||||
func (w *worker) seal(t *task, stop <-chan struct{}) {
|
||||
var (
|
||||
err error
|
||||
res *task
|
||||
)
|
||||
|
||||
if w.skipSealHook != nil && w.skipSealHook(t) {
|
||||
return
|
||||
}
|
||||
|
||||
if t.block, err = w.engine.Seal(w.chain, t.block, stop); t.block != nil {
|
||||
log.Info("Successfully sealed new block", "number", t.block.Number(), "hash", t.block.Hash(),
|
||||
"elapsed", common.PrettyDuration(time.Since(t.createdAt)))
|
||||
res = t
|
||||
} else {
|
||||
if err != nil {
|
||||
log.Warn("Block sealing failed", "err", err)
|
||||
}
|
||||
res = nil
|
||||
}
|
||||
select {
|
||||
case w.resultCh <- res:
|
||||
case <-w.exitCh:
|
||||
}
|
||||
}
|
||||
|
||||
// taskLoop is a standalone goroutine to fetch sealing task from the generator and
|
||||
// push them to consensus engine.
|
||||
func (w *worker) taskLoop() {
|
||||
|
@ -501,13 +494,24 @@ func (w *worker) taskLoop() {
|
|||
w.newTaskHook(task)
|
||||
}
|
||||
// Reject duplicate sealing work due to resubmitting.
|
||||
if task.block.HashNoNonce() == prev {
|
||||
sealHash := w.engine.SealHash(task.block.Header())
|
||||
if sealHash == prev {
|
||||
continue
|
||||
}
|
||||
// Interrupt previous sealing operation
|
||||
interrupt()
|
||||
stopCh = make(chan struct{})
|
||||
prev = task.block.HashNoNonce()
|
||||
go w.seal(task, stopCh)
|
||||
stopCh, prev = make(chan struct{}), sealHash
|
||||
|
||||
if w.skipSealHook != nil && w.skipSealHook(task) {
|
||||
continue
|
||||
}
|
||||
w.pendingMu.Lock()
|
||||
w.pendingTasks[w.engine.SealHash(task.block.Header())] = task
|
||||
w.pendingMu.Unlock()
|
||||
|
||||
if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil {
|
||||
log.Warn("Block sealing failed", "err", err)
|
||||
}
|
||||
case <-w.exitCh:
|
||||
interrupt()
|
||||
return
|
||||
|
@ -520,38 +524,54 @@ func (w *worker) taskLoop() {
|
|||
func (w *worker) resultLoop() {
|
||||
for {
|
||||
select {
|
||||
case result := <-w.resultCh:
|
||||
case block := <-w.resultCh:
|
||||
// Short circuit when receiving empty result.
|
||||
if result == nil {
|
||||
if block == nil {
|
||||
continue
|
||||
}
|
||||
// Short circuit when receiving duplicate result caused by resubmitting.
|
||||
block := result.block
|
||||
if w.chain.HasBlock(block.Hash(), block.NumberU64()) {
|
||||
continue
|
||||
}
|
||||
// Update the block hash in all logs since it is now available and not when the
|
||||
// receipt/log of individual transactions were created.
|
||||
for _, r := range result.receipts {
|
||||
for _, l := range r.Logs {
|
||||
l.BlockHash = block.Hash()
|
||||
}
|
||||
var (
|
||||
sealhash = w.engine.SealHash(block.Header())
|
||||
hash = block.Hash()
|
||||
)
|
||||
w.pendingMu.RLock()
|
||||
task, exist := w.pendingTasks[sealhash]
|
||||
w.pendingMu.RUnlock()
|
||||
if !exist {
|
||||
log.Error("Block found but no relative pending task", "number", block.Number(), "sealhash", sealhash, "hash", hash)
|
||||
continue
|
||||
}
|
||||
for _, log := range result.state.Logs() {
|
||||
log.BlockHash = block.Hash()
|
||||
// Different block could share same sealhash, deep copy here to prevent write-write conflict.
|
||||
var (
|
||||
receipts = make([]*types.Receipt, len(task.receipts))
|
||||
logs []*types.Log
|
||||
)
|
||||
for i, receipt := range task.receipts {
|
||||
receipts[i] = new(types.Receipt)
|
||||
*receipts[i] = *receipt
|
||||
// Update the block hash in all logs since it is now available and not when the
|
||||
// receipt/log of individual transactions were created.
|
||||
for _, log := range receipt.Logs {
|
||||
log.BlockHash = hash
|
||||
}
|
||||
logs = append(logs, receipt.Logs...)
|
||||
}
|
||||
// Commit block and state to database.
|
||||
stat, err := w.chain.WriteBlockWithState(block, result.receipts, result.state)
|
||||
stat, err := w.chain.WriteBlockWithState(block, receipts, task.state)
|
||||
if err != nil {
|
||||
log.Error("Failed writing block to chain", "err", err)
|
||||
continue
|
||||
}
|
||||
log.Info("Successfully sealed new block", "number", block.Number(), "sealhash", sealhash, "hash", hash,
|
||||
"elapsed", common.PrettyDuration(time.Since(task.createdAt)))
|
||||
|
||||
// Broadcast the block and announce chain insertion event
|
||||
w.mux.Post(core.NewMinedBlockEvent{Block: block})
|
||||
var (
|
||||
events []interface{}
|
||||
logs = result.state.Logs()
|
||||
)
|
||||
|
||||
var events []interface{}
|
||||
switch stat {
|
||||
case core.CanonStatTy:
|
||||
events = append(events, core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
|
||||
|
@ -604,13 +624,16 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error {
|
|||
func (w *worker) commitUncle(env *environment, uncle *types.Header) error {
|
||||
hash := uncle.Hash()
|
||||
if env.uncles.Contains(hash) {
|
||||
return fmt.Errorf("uncle not unique")
|
||||
return errors.New("uncle not unique")
|
||||
}
|
||||
if env.header.ParentHash == uncle.ParentHash {
|
||||
return errors.New("uncle is sibling")
|
||||
}
|
||||
if !env.ancestors.Contains(uncle.ParentHash) {
|
||||
return fmt.Errorf("uncle's parent unknown (%x)", uncle.ParentHash[0:4])
|
||||
return errors.New("uncle's parent unknown")
|
||||
}
|
||||
if env.family.Contains(hash) {
|
||||
return fmt.Errorf("uncle already in family (%x)", hash)
|
||||
return errors.New("uncle already included")
|
||||
}
|
||||
env.uncles.Add(uncle.Hash())
|
||||
return nil
|
||||
|
@ -774,20 +797,19 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
|
|||
}
|
||||
|
||||
// commitNewWork generates several new sealing tasks based on the parent block.
|
||||
func (w *worker) commitNewWork(interrupt *int32, noempty bool) {
|
||||
func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
tstart := time.Now()
|
||||
parent := w.chain.CurrentBlock()
|
||||
|
||||
tstamp := tstart.Unix()
|
||||
if parent.Time().Cmp(new(big.Int).SetInt64(tstamp)) >= 0 {
|
||||
tstamp = parent.Time().Int64() + 1
|
||||
if parent.Time().Cmp(new(big.Int).SetInt64(timestamp)) >= 0 {
|
||||
timestamp = parent.Time().Int64() + 1
|
||||
}
|
||||
// this will ensure we're not going off too far in the future
|
||||
if now := time.Now().Unix(); tstamp > now+1 {
|
||||
wait := time.Duration(tstamp-now) * time.Second
|
||||
if now := time.Now().Unix(); timestamp > now+1 {
|
||||
wait := time.Duration(timestamp-now) * time.Second
|
||||
log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait))
|
||||
time.Sleep(wait)
|
||||
}
|
||||
|
@ -796,9 +818,9 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool) {
|
|||
header := &types.Header{
|
||||
ParentHash: parent.Hash(),
|
||||
Number: num.Add(num, common.Big1),
|
||||
GasLimit: core.CalcGasLimit(parent),
|
||||
GasLimit: core.CalcGasLimit(parent, w.gasFloor, w.gasCeil),
|
||||
Extra: w.extra,
|
||||
Time: big.NewInt(tstamp),
|
||||
Time: big.NewInt(timestamp),
|
||||
}
|
||||
// Only set the coinbase if our consensus engine is running (avoid spurious block rewards)
|
||||
if w.isRunning() {
|
||||
|
@ -836,29 +858,24 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool) {
|
|||
if w.config.DAOForkSupport && w.config.DAOForkBlock != nil && w.config.DAOForkBlock.Cmp(header.Number) == 0 {
|
||||
misc.ApplyDAOHardFork(env.state)
|
||||
}
|
||||
|
||||
// compute uncles for the new block.
|
||||
var (
|
||||
uncles []*types.Header
|
||||
badUncles []common.Hash
|
||||
)
|
||||
// Accumulate the uncles for the current block
|
||||
for hash, uncle := range w.possibleUncles {
|
||||
if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
|
||||
delete(w.possibleUncles, hash)
|
||||
}
|
||||
}
|
||||
uncles := make([]*types.Header, 0, 2)
|
||||
for hash, uncle := range w.possibleUncles {
|
||||
if len(uncles) == 2 {
|
||||
break
|
||||
}
|
||||
if err := w.commitUncle(env, uncle.Header()); err != nil {
|
||||
log.Trace("Bad uncle found and will be removed", "hash", hash)
|
||||
log.Trace(fmt.Sprint(uncle))
|
||||
|
||||
badUncles = append(badUncles, hash)
|
||||
log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
|
||||
} else {
|
||||
log.Debug("Committing new uncle to block", "hash", hash)
|
||||
uncles = append(uncles, uncle.Header())
|
||||
}
|
||||
}
|
||||
for _, hash := range badUncles {
|
||||
delete(w.possibleUncles, hash)
|
||||
}
|
||||
|
||||
if !noempty {
|
||||
// Create an empty block based on temporary copied state for sealing in advance without waiting block
|
||||
|
@ -928,8 +945,8 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st
|
|||
}
|
||||
feesEth := new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether)))
|
||||
|
||||
log.Info("Commit new mining work", "number", block.Number(), "uncles", len(uncles), "txs", w.current.tcount,
|
||||
"gas", block.GasUsed(), "fees", feesEth, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
|
||||
"uncles", len(uncles), "txs", w.current.tcount, "gas", block.GasUsed(), "fees", feesEth, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
case <-w.exitCh:
|
||||
log.Info("Worker has exited")
|
||||
|
|
35
vendor/github.com/ethereum/go-ethereum/mobile/types.go
generated
vendored
35
vendor/github.com/ethereum/go-ethereum/mobile/types.go
generated
vendored
|
@ -168,25 +168,22 @@ func (b *Block) EncodeJSON() (string, error) {
|
|||
return string(data), err
|
||||
}
|
||||
|
||||
func (b *Block) GetParentHash() *Hash { return &Hash{b.block.ParentHash()} }
|
||||
func (b *Block) GetUncleHash() *Hash { return &Hash{b.block.UncleHash()} }
|
||||
func (b *Block) GetCoinbase() *Address { return &Address{b.block.Coinbase()} }
|
||||
func (b *Block) GetRoot() *Hash { return &Hash{b.block.Root()} }
|
||||
func (b *Block) GetTxHash() *Hash { return &Hash{b.block.TxHash()} }
|
||||
func (b *Block) GetReceiptHash() *Hash { return &Hash{b.block.ReceiptHash()} }
|
||||
func (b *Block) GetBloom() *Bloom { return &Bloom{b.block.Bloom()} }
|
||||
func (b *Block) GetDifficulty() *BigInt { return &BigInt{b.block.Difficulty()} }
|
||||
func (b *Block) GetNumber() int64 { return b.block.Number().Int64() }
|
||||
func (b *Block) GetGasLimit() int64 { return int64(b.block.GasLimit()) }
|
||||
func (b *Block) GetGasUsed() int64 { return int64(b.block.GasUsed()) }
|
||||
func (b *Block) GetTime() int64 { return b.block.Time().Int64() }
|
||||
func (b *Block) GetExtra() []byte { return b.block.Extra() }
|
||||
func (b *Block) GetMixDigest() *Hash { return &Hash{b.block.MixDigest()} }
|
||||
func (b *Block) GetNonce() int64 { return int64(b.block.Nonce()) }
|
||||
|
||||
func (b *Block) GetHash() *Hash { return &Hash{b.block.Hash()} }
|
||||
func (b *Block) GetHashNoNonce() *Hash { return &Hash{b.block.HashNoNonce()} }
|
||||
|
||||
func (b *Block) GetParentHash() *Hash { return &Hash{b.block.ParentHash()} }
|
||||
func (b *Block) GetUncleHash() *Hash { return &Hash{b.block.UncleHash()} }
|
||||
func (b *Block) GetCoinbase() *Address { return &Address{b.block.Coinbase()} }
|
||||
func (b *Block) GetRoot() *Hash { return &Hash{b.block.Root()} }
|
||||
func (b *Block) GetTxHash() *Hash { return &Hash{b.block.TxHash()} }
|
||||
func (b *Block) GetReceiptHash() *Hash { return &Hash{b.block.ReceiptHash()} }
|
||||
func (b *Block) GetBloom() *Bloom { return &Bloom{b.block.Bloom()} }
|
||||
func (b *Block) GetDifficulty() *BigInt { return &BigInt{b.block.Difficulty()} }
|
||||
func (b *Block) GetNumber() int64 { return b.block.Number().Int64() }
|
||||
func (b *Block) GetGasLimit() int64 { return int64(b.block.GasLimit()) }
|
||||
func (b *Block) GetGasUsed() int64 { return int64(b.block.GasUsed()) }
|
||||
func (b *Block) GetTime() int64 { return b.block.Time().Int64() }
|
||||
func (b *Block) GetExtra() []byte { return b.block.Extra() }
|
||||
func (b *Block) GetMixDigest() *Hash { return &Hash{b.block.MixDigest()} }
|
||||
func (b *Block) GetNonce() int64 { return int64(b.block.Nonce()) }
|
||||
func (b *Block) GetHash() *Hash { return &Hash{b.block.Hash()} }
|
||||
func (b *Block) GetHeader() *Header { return &Header{b.block.Header()} }
|
||||
func (b *Block) GetUncles() *Headers { return &Headers{b.block.Uncles()} }
|
||||
func (b *Block) GetTransactions() *Transactions { return &Transactions{b.block.Transactions()} }
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/p2p/dial.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/p2p/dial.go
generated
vendored
|
@ -146,7 +146,7 @@ func newDialState(static []*discover.Node, bootnodes []*discover.Node, ntab disc
|
|||
}
|
||||
|
||||
func (s *dialstate) addStatic(n *discover.Node) {
|
||||
// This overwites the task instead of updating an existing
|
||||
// This overwrites the task instead of updating an existing
|
||||
// entry, giving users the opportunity to force a resolve operation.
|
||||
s.static[n.ID] = &dialTask{flags: staticDialedConn, dest: n}
|
||||
}
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/p2p/server.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/p2p/server.go
generated
vendored
|
@ -77,7 +77,7 @@ type Config struct {
|
|||
// Disabling is useful for protocol debugging (manual topology).
|
||||
NoDiscovery bool
|
||||
|
||||
// DiscoveryV5 specifies whether the the new topic-discovery based V5 discovery
|
||||
// DiscoveryV5 specifies whether the new topic-discovery based V5 discovery
|
||||
// protocol should be started or not.
|
||||
DiscoveryV5 bool `toml:",omitempty"`
|
||||
|
||||
|
|
16
vendor/github.com/ethereum/go-ethereum/params/denomination.go
generated
vendored
16
vendor/github.com/ethereum/go-ethereum/params/denomination.go
generated
vendored
|
@ -17,18 +17,12 @@
|
|||
package params
|
||||
|
||||
// These are the multipliers for ether denominations.
|
||||
// Example: To get the wei value of an amount in 'douglas', use
|
||||
// Example: To get the wei value of an amount in 'gwei', use
|
||||
//
|
||||
// new(big.Int).Mul(value, big.NewInt(params.Douglas))
|
||||
// new(big.Int).Mul(value, big.NewInt(params.GWei))
|
||||
//
|
||||
const (
|
||||
Wei = 1
|
||||
Ada = 1e3
|
||||
Babbage = 1e6
|
||||
Shannon = 1e9
|
||||
Szabo = 1e12
|
||||
Finney = 1e15
|
||||
Ether = 1e18
|
||||
Einstein = 1e21
|
||||
Douglas = 1e42
|
||||
Wei = 1
|
||||
GWei = 1e9
|
||||
Ether = 1e18
|
||||
)
|
||||
|
|
32
vendor/github.com/ethereum/go-ethereum/params/network_params.go
generated
vendored
32
vendor/github.com/ethereum/go-ethereum/params/network_params.go
generated
vendored
|
@ -17,10 +17,38 @@
|
|||
package params
|
||||
|
||||
// These are network parameters that need to be constant between clients, but
|
||||
// aren't necesarilly consensus related.
|
||||
// aren't necessarily consensus related.
|
||||
|
||||
const (
|
||||
// BloomBitsBlocks is the number of blocks a single bloom bit section vector
|
||||
// contains.
|
||||
// contains on the server side.
|
||||
BloomBitsBlocks uint64 = 4096
|
||||
|
||||
// BloomBitsBlocksClient is the number of blocks a single bloom bit section vector
|
||||
// contains on the light client side
|
||||
BloomBitsBlocksClient uint64 = 32768
|
||||
|
||||
// BloomConfirms is the number of confirmation blocks before a bloom section is
|
||||
// considered probably final and its rotated bits are calculated.
|
||||
BloomConfirms = 256
|
||||
|
||||
// CHTFrequencyClient is the block frequency for creating CHTs on the client side.
|
||||
CHTFrequencyClient = 32768
|
||||
|
||||
// CHTFrequencyServer is the block frequency for creating CHTs on the server side.
|
||||
// Eventually this can be merged back with the client version, but that requires a
|
||||
// full database upgrade, so that should be left for a suitable moment.
|
||||
CHTFrequencyServer = 4096
|
||||
|
||||
// BloomTrieFrequency is the block frequency for creating BloomTrie on both
|
||||
// server/client sides.
|
||||
BloomTrieFrequency = 32768
|
||||
|
||||
// HelperTrieConfirmations is the number of confirmations before a client is expected
|
||||
// to have the given HelperTrie available.
|
||||
HelperTrieConfirmations = 2048
|
||||
|
||||
// HelperTrieProcessConfirmations is the number of confirmations before a HelperTrie
|
||||
// is generated
|
||||
HelperTrieProcessConfirmations = 256
|
||||
)
|
||||
|
|
4
vendor/github.com/ethereum/go-ethereum/params/protocol_params.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/params/protocol_params.go
generated
vendored
|
@ -18,10 +18,6 @@ package params
|
|||
|
||||
import "math/big"
|
||||
|
||||
var (
|
||||
TargetGasLimit = GenesisGasLimit // The artificial target
|
||||
)
|
||||
|
||||
const (
|
||||
GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations.
|
||||
MinGasLimit uint64 = 5000 // Minimum the gas limit may ever be.
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/params/version.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/params/version.go
generated
vendored
|
@ -23,7 +23,7 @@ import (
|
|||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 8 // Minor version component of the current release
|
||||
VersionPatch = 14 // Patch version component of the current release
|
||||
VersionPatch = 15 // Patch version component of the current release
|
||||
VersionMeta = "stable" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/signer/storage/aes_gcm_storage.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/signer/storage/aes_gcm_storage.go
generated
vendored
|
@ -36,7 +36,7 @@ type storedCredential struct {
|
|||
CipherText []byte `json:"c"`
|
||||
}
|
||||
|
||||
// AESEncryptedStorage is a storage type which is backed by a json-faile. The json-file contains
|
||||
// AESEncryptedStorage is a storage type which is backed by a json-file. The json-file contains
|
||||
// key-value mappings, where the keys are _not_ encrypted, only the values are.
|
||||
type AESEncryptedStorage struct {
|
||||
// File to read/write credentials
|
||||
|
|
4
vendor/github.com/ethereum/go-ethereum/swarm/api/api.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/swarm/api/api.go
generated
vendored
|
@ -111,7 +111,7 @@ func (e *NoResolverError) Error() string {
|
|||
}
|
||||
|
||||
// MultiResolver is used to resolve URL addresses based on their TLDs.
|
||||
// Each TLD can have multiple resolvers, and the resoluton from the
|
||||
// Each TLD can have multiple resolvers, and the resolution from the
|
||||
// first one in the sequence will be returned.
|
||||
type MultiResolver struct {
|
||||
resolvers map[string][]ResolveValidator
|
||||
|
@ -153,7 +153,7 @@ func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
|
|||
|
||||
// Resolve resolves address by choosing a Resolver by TLD.
|
||||
// If there are more default Resolvers, or for a specific TLD,
|
||||
// the Hash from the the first one which does not return error
|
||||
// the Hash from the first one which does not return error
|
||||
// will be returned.
|
||||
func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
|
||||
rs, err := m.getResolveValidator(addr)
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go
generated
vendored
|
@ -820,7 +820,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) {
|
|||
manifestAddr := uri.Address()
|
||||
|
||||
if manifestAddr == nil {
|
||||
manifestAddr, err = s.api.ResolveURI(r.Context(), uri, credentials)
|
||||
manifestAddr, err = s.api.Resolve(r.Context(), uri.Addr)
|
||||
if err != nil {
|
||||
getFileFail.Inc(1)
|
||||
RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound)
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/swarm/network/README.md
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/swarm/network/README.md
generated
vendored
|
@ -133,7 +133,7 @@ As part of the deletion protocol then, hashes of insured chunks to be removed ar
|
|||
Downstream peer on the other hand needs to make sure that they can only be finger pointed about a chunk they did receive and store.
|
||||
For this the check of a state should be exhaustive. If historical syncing finishes on one state, all hashes before are covered, no
|
||||
surprises. In other words historical syncing this process is self verifying. With session syncing however, it is not enough to check going back covering the range from old offset to new. Continuity (i.e., that the new state is extension of the old) needs to be verified: after downstream peer reads the range into a buffer, it appends the buffer the last known state at the last known offset and verifies the resulting hash matches
|
||||
the latest state. Past intervals of historical syncing are checked via the the session root.
|
||||
the latest state. Past intervals of historical syncing are checked via the session root.
|
||||
Upstream peer signs the states, downstream peers can use as handover proofs.
|
||||
Downstream peers sign off on a state together with an initial offset.
|
||||
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals/intervals.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals/intervals.go
generated
vendored
|
@ -101,7 +101,7 @@ func (i *Intervals) add(start, end uint64) {
|
|||
}
|
||||
}
|
||||
|
||||
// Merge adds all the intervals from the the m Interval to current one.
|
||||
// Merge adds all the intervals from the m Interval to current one.
|
||||
func (i *Intervals) Merge(m *Intervals) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/swarm/version/version.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/swarm/version/version.go
generated
vendored
|
@ -23,7 +23,7 @@ import (
|
|||
const (
|
||||
VersionMajor = 0 // Major version component of the current release
|
||||
VersionMinor = 3 // Minor version component of the current release
|
||||
VersionPatch = 2 // Patch version component of the current release
|
||||
VersionPatch = 3 // Patch version component of the current release
|
||||
VersionMeta = "stable" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/trie/sync.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/trie/sync.go
generated
vendored
|
@ -73,7 +73,7 @@ func newSyncMemBatch() *syncMemBatch {
|
|||
// and reconstructs the trie step by step until all is done.
|
||||
type Sync struct {
|
||||
database DatabaseReader // Persistent database to check for existing entries
|
||||
membatch *syncMemBatch // Memory buffer to avoid frequest database writes
|
||||
membatch *syncMemBatch // Memory buffer to avoid frequent database writes
|
||||
requests map[common.Hash]*request // Pending requests pertaining to a key hash
|
||||
queue *prque.Prque // Priority queue with the pending requests
|
||||
}
|
||||
|
|
4
vendor/github.com/ethereum/go-ethereum/whisper/whisperv5/whisper.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/whisper/whisperv5/whisper.go
generated
vendored
|
@ -291,7 +291,7 @@ func (w *Whisper) AddKeyPair(key *ecdsa.PrivateKey) (string, error) {
|
|||
return id, nil
|
||||
}
|
||||
|
||||
// HasKeyPair checks if the the whisper node is configured with the private key
|
||||
// HasKeyPair checks if the whisper node is configured with the private key
|
||||
// of the specified public pair.
|
||||
func (w *Whisper) HasKeyPair(id string) bool {
|
||||
w.keyMu.RLock()
|
||||
|
@ -717,7 +717,7 @@ func (w *Whisper) expire() {
|
|||
w.stats.messagesCleared++
|
||||
w.stats.memoryCleared += sz
|
||||
w.stats.memoryUsed -= sz
|
||||
return true
|
||||
return false
|
||||
})
|
||||
w.expirations[expiry].Clear()
|
||||
delete(w.expirations, expiry)
|
||||
|
|
2
vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/whisper.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/whisper/whisperv6/whisper.go
generated
vendored
|
@ -492,7 +492,7 @@ func (whisper *Whisper) DeleteKeyPairs() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HasKeyPair checks if the the whisper node is configured with the private key
|
||||
// HasKeyPair checks if the whisper node is configured with the private key
|
||||
// of the specified public pair.
|
||||
func (whisper *Whisper) HasKeyPair(id string) bool {
|
||||
deterministicID, err := toDeterministicID(id, keyIDSize)
|
||||
|
|
Loading…
Reference in a new issue