diff --git a/Godeps/_workspace/src/github.com/aristanetworks/goarista/AUTHORS b/Godeps/_workspace/src/github.com/aristanetworks/goarista/AUTHORS new file mode 100644 index 000000000000..5bb93cb3fa6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aristanetworks/goarista/AUTHORS @@ -0,0 +1,25 @@ +All contributors are required to sign a "Contributor License Agreement" at + + +The following organizations and people have contributed code to this library. +(Please keep both lists sorted alphabetically.) + + +Arista Networks, Inc. + + +Benoit Sigoure +Fabrice Rabaute + + + +The list of individual contributors for code currently in HEAD can be obtained +at any time with the following script: + +find . -type f \ +| while read i; do \ + git blame -t $i 2>/dev/null; \ + done \ +| sed 's/^[0-9a-f]\{8\} [^(]*(\([^)]*\) [-+0-9 ]\{14,\}).*/\1/;s/ *$//' \ +| awk '{a[$0]++; t++} END{for(n in a) print n}' \ +| sort diff --git a/Godeps/_workspace/src/github.com/aristanetworks/goarista/COPYING b/Godeps/_workspace/src/github.com/aristanetworks/goarista/COPYING new file mode 100644 index 000000000000..f433b1a53f5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aristanetworks/goarista/COPYING @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/Godeps/_workspace/src/github.com/aristanetworks/goarista/README.md b/Godeps/_workspace/src/github.com/aristanetworks/goarista/README.md new file mode 100644 index 000000000000..9ad95ba9aae2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aristanetworks/goarista/README.md @@ -0,0 +1,66 @@ +# Arista Go library [![Build Status](https://travis-ci.org/aristanetworks/goarista.svg?branch=master)](https://travis-ci.org/aristanetworks/goarista) [![codecov.io](http://codecov.io/github/aristanetworks/goarista/coverage.svg?branch=master)](http://codecov.io/github/aristanetworks/goarista?branch=master) [![GoDoc](https://godoc.org/github.com/aristanetworks/goarista?status.png)](https://godoc.org/github.com/aristanetworks/goarista) [![Go Report Card](https://goreportcard.com/badge/github.com/aristanetworks/goarista)](https://goreportcard.com/report/github.com/aristanetworks/goarista) + +## areflect + +Helper functions to work with the `reflect` package. Contains +`ForceExport()`, which bypasses the check in `reflect.Value` that +prevents accessing unexported attributes. + +## atime + +Provides access to a fast monotonic clock source, to fill in the gap in the +[Go standard library, which lacks one](https://github.com/golang/go/issues/12914). +Don't use `time.Now()` in code that needs to time things or otherwise assume +that time passes at a constant rate, instead use `atime.Nanotime()`. + +## cmd + +### occli + +Simple CLI client for the OpenConfig gRPC interface that prints the response +protobufs in text form or JSON. + +### ockafka + +Client for the OpenConfig gRPC interface that publishes updates to Kafka. + +### ocredis + +Client for the OpenConfig gRPC interface that publishes updates to Redis +using both [Redis' hashes](http://redis.io/topics/data-types-intro#hashes) +(one per container / entity / collection) and [Redis' Pub/Sub](http://redis.io/topics/pubsub) +mechanism, so that one can [subscribe](http://redis.io/commands/subscribe) to +incoming updates being applied on the hash maps. + +## dscp + +Provides `ListenTCPWithTOS()`, which is a replacement for `net.ListenTCP()` +that allows specifying the ToS (Type of Service), to specify DSCP / ECN / +class of service flags to use for incoming connections. + +## key + +Provides a common type used across various Arista projects, named `key.Key`, +which is used to work around the fact that Go can't let one +use a non-hashable type as a key to a `map`, and we sometimes need to use +a `map[string]interface{}` (or something containing one) as a key to maps. +As a result, we frequently use `map[key.Key]interface{}` instead of just +`map[interface{}]interface{}` when we need a generic key-value collection. + +## monitor + +A library to help expose monitoring metrics on top of the +[`expvar`](https://golang.org/pkg/expvar/) infrastructure. + +## netns + +`netns.Do(namespace, cb)` provides a handy mechanism to execute the given +callback `cb` in the given [network namespace](https://lwn.net/Articles/580893/). + +## test + +This is a [Go](http://golang.org/) library to help in writing unit tests. + +## Examples + +TBD diff --git a/Godeps/_workspace/src/github.com/aristanetworks/goarista/atime/issue15006.s b/Godeps/_workspace/src/github.com/aristanetworks/goarista/atime/issue15006.s new file mode 100644 index 000000000000..66109f4f3171 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aristanetworks/goarista/atime/issue15006.s @@ -0,0 +1,6 @@ +// Copyright (C) 2016 Arista Networks, Inc. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +// This file is intentionally empty. +// It's a workaround for https://github.com/golang/go/issues/15006 diff --git a/Godeps/_workspace/src/github.com/aristanetworks/goarista/atime/nanotime.go b/Godeps/_workspace/src/github.com/aristanetworks/goarista/atime/nanotime.go new file mode 100644 index 000000000000..1ede420485cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/aristanetworks/goarista/atime/nanotime.go @@ -0,0 +1,26 @@ +// Copyright (C) 2016 Arista Networks, Inc. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +// Package atime provides a fast monotonic clock source. +package atime + +import "unsafe" + +// Make goimports import the unsafe package, which is required to be able +// to use //go:linkname +var _ = unsafe.Sizeof(0) + +//go:noescape +//go:linkname nanotime runtime.nanotime +func nanotime() int64 + +// NanoTime returns the current time in nanoseconds from a monotonic clock. +// The time returned is based on some arbitrary platform-specific point in the +// past. The time returned is guaranteed to increase monotonically at a +// constant rate, unlike time.Now() from the Go standard library, which may +// slow down, speed up, jump forward or backward, due to NTP activity or leap +// seconds. +func NanoTime() uint64 { + return uint64(nanotime()) +} diff --git a/Godeps/_workspace/src/github.com/aristanetworks/goarista/atime/nanotime_test.go b/Godeps/_workspace/src/github.com/aristanetworks/goarista/atime/nanotime_test.go new file mode 100644 index 000000000000..15d53421f4cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/aristanetworks/goarista/atime/nanotime_test.go @@ -0,0 +1,22 @@ +// Copyright (C) 2016 Arista Networks, Inc. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +// Package atime provides a fast monotonic clock source. +package atime_test + +import ( + "testing" + + . "github.com/aristanetworks/goarista/atime" +) + +func TestNanoTime(t *testing.T) { + for i := 0; i < 100; i++ { + t1 := NanoTime() + t2 := NanoTime() + if t1 >= t2 { + t.Fatalf("t1=%d should have been strictly less than t2=%d", t1, t2) + } + } +} diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 321551ce053a..fe63dfcca665 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -128,7 +128,7 @@ func removeDB(ctx *cli.Context) error { fmt.Println("Removing chaindata...") start := time.Now() - os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "chaindata")) + os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), utils.ChainDbName(ctx))) fmt.Printf("Removed in %v\n", time.Since(start)) } else { @@ -153,7 +153,7 @@ func upgradeDB(ctx *cli.Context) error { utils.Fatalf("Unable to export chain for reimport %s", err) } chainDb.Close() - os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), "chaindata")) + os.RemoveAll(filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), utils.ChainDbName(ctx))) // Import the chain file. chain, chainDb = utils.MakeChain(ctx) diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index 92d6f7f86ad6..c3becc844e89 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -70,10 +70,13 @@ func localConsole(ctx *cli.Context) error { defer node.Stop() // Attach to the newly started node and start the JavaScript console - client, err := node.Attach() - if err != nil { - utils.Fatalf("Failed to attach to the inproc geth: %v", err) - } + client := rpc.NewClientRestartWrapper(func() *rpc.Client { + client, err := node.Attach() + if err != nil { + utils.Fatalf("Failed to attach to the inproc geth: %v", err) + } + return client + }) config := console.Config{ DataDir: node.DataDir(), DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), @@ -102,10 +105,14 @@ func localConsole(ctx *cli.Context) error { // console to it. func remoteConsole(ctx *cli.Context) error { // Attach to a remotely running geth instance and start the JavaScript console - client, err := dialRPC(ctx.Args().First()) - if err != nil { - utils.Fatalf("Unable to attach to remote geth: %v", err) - } + client := rpc.NewClientRestartWrapper(func() *rpc.Client { + client, err := dialRPC(ctx.Args().First()) + if err != nil { + utils.Fatalf("Unable to attach to remote geth: %v", err) + } + return client + }) + config := console.Config{ DataDir: utils.MustMakeDataDir(ctx), DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), @@ -154,10 +161,14 @@ func ephemeralConsole(ctx *cli.Context) error { defer node.Stop() // Attach to the newly started node and start the JavaScript console - client, err := node.Attach() - if err != nil { - utils.Fatalf("Failed to attach to the inproc geth: %v", err) - } + client := rpc.NewClientRestartWrapper(func() *rpc.Client { + client, err := node.Attach() + if err != nil { + utils.Fatalf("Failed to attach to the inproc geth: %v", err) + } + return client + }) + config := console.Config{ DataDir: node.DataDir(), DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), diff --git a/cmd/geth/main.go b/cmd/geth/main.go index de679cccaddd..50d865bf4e2f 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -40,6 +40,7 @@ import ( "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/release" "github.com/ethereum/go-ethereum/rlp" @@ -155,6 +156,10 @@ participating. utils.BlockchainVersionFlag, utils.OlympicFlag, utils.FastSyncFlag, + utils.LightModeFlag, + utils.NoDefSrvFlag, + utils.LightServFlag, + utils.LightPeersFlag, utils.CacheFlag, utils.LightKDFFlag, utils.JSpathFlag, @@ -261,7 +266,7 @@ func initGenesis(ctx *cli.Context) error { utils.Fatalf("must supply path to genesis JSON file") } - chainDb, err := ethdb.NewLDBDatabase(filepath.Join(utils.MustMakeDataDir(ctx), "chaindata"), 0, 0) + chainDb, err := ethdb.NewLDBDatabase(filepath.Join(utils.MustMakeDataDir(ctx), utils.ChainDbName(ctx)), 0, 0) if err != nil { utils.Fatalf("could not open database: %v", err) } @@ -320,6 +325,30 @@ func startNode(ctx *cli.Context, stack *node.Node) { // Start up the node itself utils.StartNode(stack) + if ctx.GlobalBool(utils.LightModeFlag.Name) && !ctx.GlobalBool(utils.NoDefSrvFlag.Name) { + // add default light server; test phase only + addPeer := func(url string) { + node, err := discover.ParseNode(url) + if err == nil { + stack.Server().AddPeer(node) + } + } + + if ctx.GlobalBool(utils.OpposeDAOFork.Name) { + // Classic (Azure) + addPeer("enode://fc3d7b57e5d317946bf421411632ec98d5ffcbf94548cd7bc10088e4fef176670f8ec70280d301a9d0b22fe498203f62b323da15b3acc18b02a1fee2a06b7d3f@40.118.3.223:30305") + } else { + // MainNet (Azure) + addPeer("enode://feaf206a308a669a789be45f4dadcb351246051727f12415ad69e44f8080daf0569c10fe1d9944d245dd1f3e1c89cedda8ce03d7e3d5ed8975a35cad4b4f7ec1@40.118.3.223:30303") + // MainNet (John Gerryts @phonikg) + addPeer("enode://3cbd26f73513af0e789c55ea9efa6d259be2d5f6882bdb52740e21e01379287b652642a87207f1bc07c64aae3ab51ab566dede7588d6064022d40577fe59d5de@50.112.52.169:30300") + } + if ctx.GlobalBool(utils.TestNetFlag.Name) { + // TestNet (John Gerryts @phonikg) + addPeer("enode://7d00e8c27b2328e2008a9fc86e81afba22681fdac675b99805fa62cc29ee8a2a9d83f916f7661da6a6bd78155a430bb2bd7cec733ca9e700e236ec9c71d97e24@50.112.52.169:30301") + } + } + // Unlock any account specifically requested accman := stack.AccountManager() passwords := utils.MakePasswordList(ctx) diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index eb897d2b5a94..4e992546f16d 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -70,6 +70,10 @@ var AppHelpFlagGroups = []flagGroup{ utils.DevModeFlag, utils.IdentityFlag, utils.FastSyncFlag, + utils.LightModeFlag, + utils.NoDefSrvFlag, + utils.LightServFlag, + utils.LightPeersFlag, utils.LightKDFFlag, utils.CacheFlag, utils.BlockchainVersionFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 067faf3cecdb..6e27ef35751e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -39,6 +39,8 @@ import ( "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/les" + "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/metrics" @@ -153,6 +155,24 @@ var ( Name: "fast", Usage: "Enable fast syncing through state downloads", } + LightModeFlag = cli.BoolFlag{ + Name: "light", + Usage: "Enable light client mode", + } + NoDefSrvFlag = cli.BoolFlag{ + Name: "nodefsrv", + Usage: "Don't add default LES server (only for test version)", + } + LightServFlag = cli.IntFlag{ + Name: "lightserv", + Usage: "Maximum percentage of time allowed for serving LES requests (0-90)", + Value: 20, + } + LightPeersFlag = cli.IntFlag{ + Name: "lightpeers", + Usage: "Maximum number of LES client peers", + Value: 10, + } LightKDFFlag = cli.BoolFlag{ Name: "lightkdf", Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", @@ -674,6 +694,10 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, relconf release.Conf Etherbase: MakeEtherbase(stack.AccountManager(), ctx), ChainConfig: MustMakeChainConfig(ctx), FastSync: ctx.GlobalBool(FastSyncFlag.Name), + LightMode: ctx.GlobalBool(LightModeFlag.Name), + NoDefSrv: ctx.GlobalBool(NoDefSrvFlag.Name), + LightServ: ctx.GlobalInt(LightServFlag.Name), + LightPeers: ctx.GlobalInt(LightPeersFlag.Name), BlockChainVersion: ctx.GlobalInt(BlockchainVersionFlag.Name), DatabaseCache: ctx.GlobalInt(CacheFlag.Name), DatabaseHandles: MakeDatabaseHandles(), @@ -709,6 +733,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, relconf release.Conf } ethConf.Genesis = core.TestNetGenesisBlock() state.StartingNonce = 1048576 // (2**20) + light.StartingNonce = 1048576 // (2**20) case ctx.GlobalBool(DevModeFlag.Name): ethConf.Genesis = core.OlympicGenesisBlock() @@ -718,10 +743,23 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, relconf release.Conf ethConf.PowTest = true } - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { - return eth.New(ctx, ethConf) - }); err != nil { - Fatalf("Failed to register the Ethereum service: %v", err) + if ethConf.LightMode { + if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + return les.New(ctx, ethConf) + }); err != nil { + Fatalf("Failed to register the Ethereum light node service: %v", err) + } + } else { + if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + fullNode, err := eth.New(ctx, ethConf) + if fullNode != nil { + ls, _ := les.NewLesServer(fullNode, ethConf) + fullNode.AddLesServer(ls) + } + return fullNode, err + }); err != nil { + Fatalf("Failed to register the Ethereum full node service: %v", err) + } } if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { return release.NewReleaseService(ctx, relconf) @@ -820,15 +858,24 @@ func MustMakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *core.ChainC return config } +func ChainDbName(ctx *cli.Context) string { + if ctx.GlobalBool(LightModeFlag.Name) { + return "lightchaindata" + } else { + return "chaindata" + } +} + // MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails. func MakeChainDatabase(ctx *cli.Context) ethdb.Database { var ( datadir = MustMakeDataDir(ctx) cache = ctx.GlobalInt(CacheFlag.Name) handles = MakeDatabaseHandles() + name = ChainDbName(ctx) ) - chainDb, err := ethdb.NewLDBDatabase(filepath.Join(datadir, "chaindata"), cache, handles) + chainDb, err := ethdb.NewLDBDatabase(filepath.Join(datadir, name), cache, handles) if err != nil { Fatalf("Could not open database: %v", err) } diff --git a/common/mclock/mclock.go b/common/mclock/mclock.go new file mode 100644 index 000000000000..7604293774e8 --- /dev/null +++ b/common/mclock/mclock.go @@ -0,0 +1,30 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// package mclock is a wrapper for a monotonic clock source +package mclock + +import ( + "time" + + "github.com/aristanetworks/goarista/atime" +) + +type AbsTime time.Duration // absolute monotonic time + +func Now() AbsTime { + return AbsTime(atime.NanoTime()) +} diff --git a/console/bridge.go b/console/bridge.go index 22ed7192bb4a..3009eabc457e 100644 --- a/console/bridge.go +++ b/console/bridge.go @@ -26,18 +26,20 @@ import ( "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/rpc" "github.com/robertkrimen/otto" + "golang.org/x/net/context" ) // bridge is a collection of JavaScript utility methods to bride the .js runtime // environment and the Go RPC connection backing the remote method calls. type bridge struct { - client *rpc.Client // RPC client to execute Ethereum requests through + client *rpc.ClientRestartWrapper // RPC client to execute Ethereum requests through prompter UserPrompter // Input prompter to allow interactive user feedback printer io.Writer // Output writer to serialize any display strings to + ctx context.Context } // newBridge creates a new JavaScript wrapper around an RPC client. -func newBridge(client *rpc.Client, prompter UserPrompter, printer io.Writer) *bridge { +func newBridge(client *rpc.ClientRestartWrapper, prompter UserPrompter, printer io.Writer) *bridge { return &bridge{ client: client, prompter: prompter, @@ -45,6 +47,10 @@ func newBridge(client *rpc.Client, prompter UserPrompter, printer io.Writer) *br } } +func (b *bridge) setContext(ctx context.Context) { + b.ctx = ctx +} + // NewAccount is a wrapper around the personal.newAccount RPC method that uses a // non-echoing password prompt to aquire the passphrase and executes the original // RPC method (saved in jeth.newAccount) with it to actually execute the RPC call. @@ -222,7 +228,26 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) { resp, _ := call.Otto.Object(`({"jsonrpc":"2.0"})`) resp.Set("id", req.Id) var result json.RawMessage - err = b.client.Call(&result, req.Method, req.Params...) + + client := b.client.Client() + errc := make(chan error, 1) + errc2 := make(chan error) + go func(){ + if b.ctx != nil { + select { + case <-b.ctx.Done(): + b.client.Restart() + errc2 <- b.ctx.Err() + case err := <-errc: + errc2 <- err + } + } else { + errc2 <- <-errc + } + }() + errc <- client.Call(&result, req.Method, req.Params...) + err = <-errc2 + switch err := err.(type) { case nil: if result == nil { diff --git a/console/console.go b/console/console.go index f224f0c2ebef..b0dff7e32b05 100644 --- a/console/console.go +++ b/console/console.go @@ -26,6 +26,7 @@ import ( "regexp" "sort" "strings" + "time" "github.com/ethereum/go-ethereum/internal/jsre" "github.com/ethereum/go-ethereum/internal/web3ext" @@ -33,6 +34,7 @@ import ( "github.com/mattn/go-colorable" "github.com/peterh/liner" "github.com/robertkrimen/otto" + "golang.org/x/net/context" ) var ( @@ -50,26 +52,27 @@ const DefaultPrompt = "> " // Config is te collection of configurations to fine tune the behavior of the // JavaScript console. type Config struct { - DataDir string // Data directory to store the console history at - DocRoot string // Filesystem path from where to load JavaScript files from - Client *rpc.Client // RPC client to execute Ethereum requests through - Prompt string // Input prompt prefix string (defaults to DefaultPrompt) - Prompter UserPrompter // Input prompter to allow interactive user feedback (defaults to TerminalPrompter) - Printer io.Writer // Output writer to serialize any display strings to (defaults to os.Stdout) - Preload []string // Absolute paths to JavaScript files to preload + DataDir string // Data directory to store the console history at + DocRoot string // Filesystem path from where to load JavaScript files from + Client *rpc.ClientRestartWrapper // RPC client to execute Ethereum requests through + Prompt string // Input prompt prefix string (defaults to DefaultPrompt) + Prompter UserPrompter // Input prompter to allow interactive user feedback (defaults to TerminalPrompter) + Printer io.Writer // Output writer to serialize any display strings to (defaults to os.Stdout) + Preload []string // Absolute paths to JavaScript files to preload } // Console is a JavaScript interpreted runtime environment. It is a fully fleged // JavaScript console attached to a running node via an external or in-process RPC // client. type Console struct { - client *rpc.Client // RPC client to execute Ethereum requests through - jsre *jsre.JSRE // JavaScript runtime environment running the interpreter - prompt string // Input prompt prefix string - prompter UserPrompter // Input prompter to allow interactive user feedback - histPath string // Absolute path to the console scrollback history - history []string // Scroll history maintained by the console - printer io.Writer // Output writer to serialize any display strings to + client *rpc.ClientRestartWrapper // RPC client to execute Ethereum requests through + jsre *jsre.JSRE // JavaScript runtime environment running the interpreter + prompt string // Input prompt prefix string + prompter UserPrompter // Input prompter to allow interactive user feedback + histPath string // Absolute path to the console scrollback history + history []string // Scroll history maintained by the console + printer io.Writer // Output writer to serialize any display strings to + setContext func(context.Context) } func New(config Config) (*Console, error) { @@ -103,6 +106,7 @@ func New(config Config) (*Console, error) { func (c *Console) init(preload []string) error { // Initialize the JavaScript <-> Go RPC bridge bridge := newBridge(c.client, c.prompter, c.printer) + c.setContext = bridge.setContext c.jsre.Set("jeth", struct{}{}) jethObj, _ := c.jsre.Get("jeth") @@ -127,7 +131,7 @@ func (c *Console) init(preload []string) error { return fmt.Errorf("web3 provider: %v", err) } // Load the supported APIs into the JavaScript runtime environment - apis, err := c.client.SupportedModules() + apis, err := c.client.Client().SupportedModules() if err != nil { return fmt.Errorf("api modules: %v", err) } @@ -253,7 +257,7 @@ func (c *Console) Welcome() { console.log(" datadir: " + admin.datadir); `) // List all the supported modules for the user to call - if apis, err := c.client.SupportedModules(); err == nil { + if apis, err := c.client.Client().SupportedModules(); err == nil { modules := make([]string, 0, len(apis)) for api, version := range apis { modules = append(modules, fmt.Sprintf("%s:%s", api, version)) @@ -347,7 +351,12 @@ func (c *Console) Interactive() { } } } + done := make(chan struct{}) + ctx, _ := context.WithTimeout(context.Background(), time.Second*40) + c.setContext(ctx) c.Evaluate(input) + c.setContext(nil) + close(done) input = "" } } diff --git a/core/blockchain.go b/core/blockchain.go index 888c98dce699..edb12b1f4f0f 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -618,6 +618,37 @@ func (self *BlockChain) Rollback(chain []common.Hash) { } } +// SetReceiptsData computes all the non-consensus fields of the receipts +func SetReceiptsData(block *types.Block, receipts types.Receipts) { + transactions, logIndex := block.Transactions(), uint(0) + + for j := 0; j < len(receipts); j++ { + // The transaction hash can be retrieved from the transaction itself + receipts[j].TxHash = transactions[j].Hash() + + // The contract address can be derived from the transaction itself + if MessageCreatesContract(transactions[j]) { + from, _ := transactions[j].From() + receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) + } + // The used gas can be calculated based on previous receipts + if j == 0 { + receipts[j].GasUsed = new(big.Int).Set(receipts[j].CumulativeGasUsed) + } else { + receipts[j].GasUsed = new(big.Int).Sub(receipts[j].CumulativeGasUsed, receipts[j-1].CumulativeGasUsed) + } + // The derived log fields can simply be set from the block and transaction + for k := 0; k < len(receipts[j].Logs); k++ { + receipts[j].Logs[k].BlockNumber = block.NumberU64() + receipts[j].Logs[k].BlockHash = block.Hash() + receipts[j].Logs[k].TxHash = receipts[j].TxHash + receipts[j].Logs[k].TxIndex = uint(j) + receipts[j].Logs[k].Index = logIndex + logIndex++ + } + } +} + // InsertReceiptChain attempts to complete an already existing header chain with // transaction and receipt data. func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { @@ -659,32 +690,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain continue } // Compute all the non-consensus fields of the receipts - transactions, logIndex := block.Transactions(), uint(0) - for j := 0; j < len(receipts); j++ { - // The transaction hash can be retrieved from the transaction itself - receipts[j].TxHash = transactions[j].Hash() - - // The contract address can be derived from the transaction itself - if MessageCreatesContract(transactions[j]) { - from, _ := transactions[j].From() - receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) - } - // The used gas can be calculated based on previous receipts - if j == 0 { - receipts[j].GasUsed = new(big.Int).Set(receipts[j].CumulativeGasUsed) - } else { - receipts[j].GasUsed = new(big.Int).Sub(receipts[j].CumulativeGasUsed, receipts[j-1].CumulativeGasUsed) - } - // The derived log fields can simply be set from the block and transaction - for k := 0; k < len(receipts[j].Logs); k++ { - receipts[j].Logs[k].BlockNumber = block.NumberU64() - receipts[j].Logs[k].BlockHash = block.Hash() - receipts[j].Logs[k].TxHash = receipts[j].TxHash - receipts[j].Logs[k].TxIndex = uint(j) - receipts[j].Logs[k].Index = logIndex - logIndex++ - } - } + SetReceiptsData(block, receipts) // Write all the data out into the database if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil { errs[index] = fmt.Errorf("failed to write block body: %v", err) diff --git a/core/database_util.go b/core/database_util.go index 5f9afe6ba6db..73fac20aade6 100644 --- a/core/database_util.go +++ b/core/database_util.go @@ -347,8 +347,13 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B if err != nil { return err } + return WriteBodyRLP(db, hash, number, data) +} + +// WriteBodyRLP writes a serialized body of a block into the database. +func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error { key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) - if err := db.Put(key, data); err != nil { + if err := db.Put(key, rlp); err != nil { glog.Fatalf("failed to store block body into database: %v", err) } glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4]) @@ -446,6 +451,16 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error { return nil } +// WriteReceipt stores a single transaction receipt into the database. +func WriteReceipt(db ethdb.Database, receipt *types.Receipt) error { + storageReceipt := (*types.ReceiptForStorage)(receipt) + data, err := rlp.EncodeToBytes(storageReceipt) + if err != nil { + return err + } + return db.Put(append(receiptsPrefix, receipt.TxHash.Bytes()...), data) +} + // WriteReceipts stores a batch of transaction receipts into the database. func WriteReceipts(db ethdb.Database, receipts types.Receipts) error { batch := db.NewBatch() @@ -614,3 +629,30 @@ func GetChainConfig(db ethdb.Database, hash common.Hash) (*ChainConfig, error) { return &config, nil } + +// FindCommonAncestor returns the last common ancestor of two block headers +func FindCommonAncestor(db ethdb.Database, a, b *types.Header) *types.Header { + for a.GetNumberU64() > b.GetNumberU64() { + a = GetHeader(db, a.ParentHash, a.GetNumberU64()-1) + if a == nil { + return nil + } + } + for a.GetNumberU64() < b.GetNumberU64() { + b = GetHeader(db, b.ParentHash, b.GetNumberU64()-1) + if b == nil { + return nil + } + } + for a.Hash() != b.Hash() { + a = GetHeader(db, a.ParentHash, a.GetNumberU64()-1) + if a == nil { + return nil + } + b = GetHeader(db, b.ParentHash, b.GetNumberU64()-1) + if b == nil { + return nil + } + } + return a +} diff --git a/core/types/block.go b/core/types/block.go index 599359247843..12fcb6cdc52f 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -70,6 +70,15 @@ type Header struct { Nonce BlockNonce } +func (h *Header) GetNumber() *big.Int { return new(big.Int).Set(h.Number) } +func (h *Header) GetGasLimit() *big.Int { return new(big.Int).Set(h.GasLimit) } +func (h *Header) GetGasUsed() *big.Int { return new(big.Int).Set(h.GasUsed) } +func (h *Header) GetDifficulty() *big.Int { return new(big.Int).Set(h.Difficulty) } +func (h *Header) GetTime() *big.Int { return new(big.Int).Set(h.Time) } +func (h *Header) GetNumberU64() uint64 { return h.Number.Uint64() } +func (h *Header) GetNonce() uint64 { return binary.BigEndian.Uint64(h.Nonce[:]) } +func (h *Header) GetExtra() []byte { return common.CopyBytes(h.Extra) } + func (h *Header) Hash() common.Hash { return rlpHash(h) } diff --git a/eth/api_backend.go b/eth/api_backend.go index efcdb3361997..aa4c75e20441 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -44,17 +44,17 @@ func (b *EthApiBackend) SetHead(number uint64) { b.eth.blockchain.SetHead(number) } -func (b *EthApiBackend) HeaderByNumber(blockNr rpc.BlockNumber) *types.Header { +func (b *EthApiBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { // Pending block is only known by the miner if blockNr == rpc.PendingBlockNumber { block, _ := b.eth.miner.Pending() - return block.Header() + return block.Header(), nil } // Otherwise resolve and return the block if blockNr == rpc.LatestBlockNumber { - return b.eth.blockchain.CurrentBlock().Header() + return b.eth.blockchain.CurrentBlock().Header(), nil } - return b.eth.blockchain.GetHeaderByNumber(uint64(blockNr)) + return b.eth.blockchain.GetHeaderByNumber(uint64(blockNr)), nil } func (b *EthApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error) { @@ -70,16 +70,16 @@ func (b *EthApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumb return b.eth.blockchain.GetBlockByNumber(uint64(blockNr)), nil } -func (b *EthApiBackend) StateAndHeaderByNumber(blockNr rpc.BlockNumber) (ethapi.State, *types.Header, error) { +func (b *EthApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (ethapi.State, *types.Header, error) { // Pending state is only known by the miner if blockNr == rpc.PendingBlockNumber { block, state := b.eth.miner.Pending() return EthApiState{state}, block.Header(), nil } // Otherwise resolve the block number and return its state - header := b.HeaderByNumber(blockNr) - if header == nil { - return nil, nil, nil + header, err := b.HeaderByNumber(ctx, blockNr) + if header == nil || err != nil { + return nil, nil, err } stateDb, err := state.New(header.Root, b.eth.chainDb) return EthApiState{stateDb}, header, err diff --git a/eth/backend.go b/eth/backend.go index e1d123a02a14..38b4eb60ec74 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -66,9 +66,13 @@ var ( type Config struct { ChainConfig *core.ChainConfig // chain configuration - NetworkId int // Network ID to use for selecting peers to connect to - Genesis string // Genesis JSON to seed the chain database with - FastSync bool // Enables the state download based fast synchronisation algorithm + NetworkId int // Network ID to use for selecting peers to connect to + Genesis string // Genesis JSON to seed the chain database with + FastSync bool // Enables the state download based fast synchronisation algorithm + LightMode bool // Running in light client mode + NoDefSrv bool // No default LES server + LightServ int // Maximum percentage of time allowed for serving LES requests + LightPeers int // Maximum number of LES client peers BlockChainVersion int SkipBcVersionCheck bool // e.g. blockchain export @@ -101,6 +105,12 @@ type Config struct { TestGenesisState ethdb.Database // Genesis state to seed the database with (testing only!) } +type LesServer interface { + Start() + Stop() + Protocols() []p2p.Protocol +} + // Ethereum implements the Ethereum full node service. type Ethereum struct { chainConfig *core.ChainConfig @@ -112,6 +122,7 @@ type Ethereum struct { txMu sync.Mutex blockchain *core.BlockChain protocolManager *ProtocolManager + ls LesServer // DB interfaces chainDb ethdb.Database // Block chain database @@ -120,7 +131,7 @@ type Ethereum struct { httpclient *httpclient.HTTPClient accountManager *accounts.Manager - apiBackend *EthApiBackend + ApiBackend *EthApiBackend miner *miner.Miner Mining bool @@ -136,10 +147,14 @@ type Ethereum struct { netRPCService *ethapi.PublicNetAPI } +func (s *Ethereum) AddLesServer(ls LesServer) { + s.ls = ls +} + // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { - chainDb, err := createDB(ctx, config) + chainDb, err := CreateDB(ctx, config, "chaindata") if err != nil { return nil, err } @@ -234,14 +249,14 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { GpobaseCorrectionFactor: config.GpobaseCorrectionFactor, } gpo := gasprice.NewGasPriceOracle(eth.blockchain, chainDb, eth.eventMux, gpoParams) - eth.apiBackend = &EthApiBackend{eth, gpo} + eth.ApiBackend = &EthApiBackend{eth, gpo} return eth, nil } -// createDB creates the chain database. -func createDB(ctx *node.ServiceContext, config *Config) (ethdb.Database, error) { - db, err := ctx.OpenDatabase("chaindata", config.DatabaseCache, config.DatabaseHandles) +// CreateDB creates the chain database. +func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Database, error) { + db, err := ctx.OpenDatabase(name, config.DatabaseCache, config.DatabaseHandles) if db, ok := db.(*ethdb.LDBDatabase); ok { db.Meter("eth/db/chaindata/") } @@ -289,7 +304,7 @@ func CreatePoW(config *Config) (*ethash.Ethash, error) { // APIs returns the collection of RPC services the ethereum package offers. // NOTE, some of these services probably need to be moved to somewhere else. func (s *Ethereum) APIs() []rpc.API { - return append(ethapi.GetAPIs(s.apiBackend, s.solcPath), []rpc.API{ + return append(ethapi.GetAPIs(s.ApiBackend, s.solcPath), []rpc.API{ { Namespace: "eth", Version: "1.0", @@ -313,7 +328,7 @@ func (s *Ethereum) APIs() []rpc.API { }, { Namespace: "eth", Version: "1.0", - Service: filters.NewPublicFilterAPI(s.chainDb, s.eventMux), + Service: filters.NewPublicFilterAPI(s.ApiBackend), Public: true, }, { Namespace: "admin", @@ -381,7 +396,11 @@ func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManage // Protocols implements node.Service, returning all the currently configured // network protocols to start. func (s *Ethereum) Protocols() []p2p.Protocol { - return s.protocolManager.SubProtocols + if s.ls == nil { + return s.protocolManager.SubProtocols + } else { + return append(s.protocolManager.SubProtocols, s.ls.Protocols()...) + } } // Start implements node.Service, starting all internal goroutines needed by the @@ -392,6 +411,9 @@ func (s *Ethereum) Start(srvr *p2p.Server) error { s.StartAutoDAG() } s.protocolManager.Start() + if s.ls != nil { + s.ls.Start() + } return nil } @@ -403,6 +425,9 @@ func (s *Ethereum) Stop() error { } s.blockchain.Stop() s.protocolManager.Stop() + if s.ls != nil { + s.ls.Stop() + } s.txPool.Stop() s.miner.Stop() s.eventMux.Stop() diff --git a/eth/bind.go b/eth/bind.go index bf7a7fb539df..26fc028605e6 100644 --- a/eth/bind.go +++ b/eth/bind.go @@ -42,11 +42,11 @@ type ContractBackend struct { // NewContractBackend creates a new native contract backend using an existing // Etheruem object. -func NewContractBackend(eth *Ethereum) *ContractBackend { +func NewContractBackend(apiBackend ethapi.Backend) *ContractBackend { return &ContractBackend{ - eapi: ethapi.NewPublicEthereumAPI(eth.apiBackend), - bcapi: ethapi.NewPublicBlockChainAPI(eth.apiBackend), - txapi: ethapi.NewPublicTransactionPoolAPI(eth.apiBackend), + eapi: ethapi.NewPublicEthereumAPI(apiBackend), + bcapi: ethapi.NewPublicBlockChainAPI(apiBackend), + txapi: ethapi.NewPublicTransactionPoolAPI(apiBackend), } } diff --git a/eth/db_upgrade.go b/eth/db_upgrade.go index 172bb0954a03..a0bce7f920e7 100644 --- a/eth/db_upgrade.go +++ b/eth/db_upgrade.go @@ -50,8 +50,6 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) { return nil // empty database, nothing to do } - glog.V(logger.Info).Infof("Upgrading chain database to use sequential keys") - stopChn := make(chan struct{}) stoppedChn := make(chan struct{}) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index c8f710450d54..d9a344da7e12 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -163,13 +163,13 @@ type Downloader struct { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlockAndState blockAndStateCheckFn, +func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, hasHeader headerCheckFn, hasBlockAndState blockAndStateCheckFn, getHeader headerRetrievalFn, getBlock blockRetrievalFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, headFastBlock headFastBlockRetrievalFn, commitHeadBlock headBlockCommitterFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, rollback chainRollbackFn, dropPeer peerDropFn) *Downloader { dl := &Downloader{ - mode: FullSync, + mode: mode, mux: mux, queue: newQueue(stateDb), peers: newPeerSet(), @@ -1161,10 +1161,23 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { for i, header := range rollback { hashes[i] = header.Hash() } - lastHeader, lastFastBlock, lastBlock := d.headHeader().Number, d.headFastBlock().Number(), d.headBlock().Number() + lastHeader, lastFastBlock, lastBlock := d.headHeader().Number, common.Big0, common.Big0 + if d.headFastBlock != nil { + lastFastBlock = d.headFastBlock().Number() + } + if d.headBlock != nil { + lastBlock = d.headBlock().Number() + } d.rollback(hashes) + curFastBlock, curBlock := common.Big0, common.Big0 + if d.headFastBlock != nil { + curFastBlock = d.headFastBlock().Number() + } + if d.headBlock != nil { + curBlock = d.headBlock().Number() + } glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)", - len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, d.headFastBlock().Number(), lastBlock, d.headBlock().Number()) + len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock) // If we're already past the pivot point, this could be an attack, thread carefully if rollback[len(rollback)-1].Number.Uint64() > pivot { @@ -1212,8 +1225,10 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { // L: Sync begins, and finds common ancestor at 11 // L: Request new headers up from 11 (R's TD was higher, it must have something) // R: Nothing to give - if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 { - return errStallingPeer + if d.mode != LightSync { + if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 { + return errStallingPeer + } } // If fast or light syncing, ensure promised headers are indeed delivered. This is // needed to detect scenarios where an attacker feeds a bad pivot and then bails out diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index a2efc746961e..384ae178e601 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -172,7 +172,7 @@ func newTester() *downloadTester { tester.stateDb, _ = ethdb.NewMemDatabase() tester.stateDb.Put(genesis.Root().Bytes(), []byte{0x00}) - tester.downloader = New(tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, + tester.downloader = New(FullSync, tester.stateDb, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, tester.getBlock, tester.headHeader, tester.headBlock, tester.headFastBlock, tester.commitHeadBlock, tester.getTd, tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.rollback, tester.dropPeer) diff --git a/eth/filters/api.go b/eth/filters/api.go index 3bc2203481fa..4ba3fb1f2a17 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/rpc" ) @@ -52,6 +53,7 @@ type filter struct { // PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various // information related to the Ethereum protocol such als blocks, transactions and logs. type PublicFilterAPI struct { + apiBackend ethapi.Backend mux *event.TypeMux quit chan struct{} chainDb ethdb.Database @@ -61,12 +63,13 @@ type PublicFilterAPI struct { } // NewPublicFilterAPI returns a new PublicFilterAPI instance. -func NewPublicFilterAPI(chainDb ethdb.Database, mux *event.TypeMux) *PublicFilterAPI { +func NewPublicFilterAPI(apiBackend ethapi.Backend) *PublicFilterAPI { api := &PublicFilterAPI{ - mux: mux, - chainDb: chainDb, - events: NewEventSystem(mux), - filters: make(map[rpc.ID]*filter), + apiBackend: apiBackend, + mux: apiBackend.EventMux(), + chainDb: apiBackend.ChainDb(), + events: NewEventSystem(apiBackend.EventMux()), + filters: make(map[rpc.ID]*filter), } go api.timeoutLoop() @@ -314,7 +317,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) rpc.ID { // GetLogs returns logs matching the given argument that are stored within the state. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs -func (api *PublicFilterAPI) GetLogs(crit FilterCriteria) []Log { +func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]Log, error) { if crit.FromBlock == nil { crit.FromBlock = big.NewInt(rpc.LatestBlockNumber.Int64()) } @@ -322,13 +325,14 @@ func (api *PublicFilterAPI) GetLogs(crit FilterCriteria) []Log { crit.ToBlock = big.NewInt(rpc.LatestBlockNumber.Int64()) } - filter := New(api.chainDb) + filter := New(api.apiBackend) filter.SetBeginBlock(crit.FromBlock.Int64()) filter.SetEndBlock(crit.ToBlock.Int64()) filter.SetAddresses(crit.Addresses) filter.SetTopics(crit.Topics) - return returnLogs(filter.Find()) + logs, err := filter.Find(ctx) + return returnLogs(logs), err } // UninstallFilter removes the filter with the given filter id. @@ -352,22 +356,23 @@ func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool { // If the filter could not be found an empty array of logs is returned. // // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs -func (api *PublicFilterAPI) GetFilterLogs(id rpc.ID) []Log { +func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]Log, error) { api.filtersMu.Lock() f, found := api.filters[id] api.filtersMu.Unlock() if !found || f.typ != LogsSubscription { - return []Log{} + return []Log{}, nil } - filter := New(api.chainDb) + filter := New(api.apiBackend) filter.SetBeginBlock(f.crit.FromBlock.Int64()) filter.SetEndBlock(f.crit.ToBlock.Int64()) filter.SetAddresses(f.crit.Addresses) filter.SetTopics(f.crit.Topics) - return returnLogs(filter.Find()) + logs, err := filter.Find(ctx) + return returnLogs(logs), err } // GetFilterChanges returns the logs for the filter with the given id since @@ -536,3 +541,4 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { return nil } + diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 4226620dc4e8..1e6a6f6f965b 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -17,17 +17,22 @@ package filters import ( - "math" + // "math" "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" + // "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/rpc" + "golang.org/x/net/context" ) // Filter can be used to retrieve and filter logs type Filter struct { + apiBackend ethapi.Backend + created time.Time db ethdb.Database @@ -38,8 +43,11 @@ type Filter struct { // New creates a new filter which uses a bloom filter on blocks to figure out whether // a particular block is interesting or not. -func New(db ethdb.Database) *Filter { - return &Filter{db: db} +func New(apiBackend ethapi.Backend) *Filter { + return &Filter{ + apiBackend: apiBackend, + db: apiBackend.ChainDb(), + } } // SetBeginBlock sets the earliest block for filtering. @@ -66,33 +74,30 @@ func (f *Filter) SetTopics(topics [][]common.Hash) { } // Run filters logs with the current parameters set -func (f *Filter) Find() []Log { - latestHash := core.GetHeadBlockHash(f.db) - latestBlock := core.GetBlock(f.db, latestHash, core.GetBlockNumber(f.db, latestHash)) - if latestBlock == nil { - return []Log{} - } +func (f *Filter) Find(ctx context.Context) ([]Log, error) { + head, _ := f.apiBackend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + headBlockNumber := head.Number.Uint64() var beginBlockNo uint64 = uint64(f.begin) if f.begin == -1 { - beginBlockNo = latestBlock.NumberU64() + beginBlockNo = headBlockNumber } - - endBlockNo := uint64(f.end) + var endBlockNo uint64 = uint64(f.end) if f.end == -1 { - endBlockNo = latestBlock.NumberU64() + endBlockNo = headBlockNumber } // if no addresses are present we can't make use of fast search which // uses the mipmap bloom filters to check for fast inclusion and uses // higher range probability in order to ensure at least a false positive - if len(f.addresses) == 0 { - return f.getLogs(beginBlockNo, endBlockNo) - } - return f.mipFind(beginBlockNo, endBlockNo, 0) + // if len(self.addresses) == 0 { + return f.getLogs(ctx, beginBlockNo, endBlockNo) + // } + // return self.mipFind(beginBlockNo, endBlockNo, 0) } -func (f *Filter) mipFind(start, end uint64, depth int) (logs []Log) { +/*func (self *Filter) mipFind(start, end uint64, depth int) (logs vm.Logs) { +>>>>>>> les: light client protocol and API level := core.MIPMapLevels[depth] // normalise numerator so we can work in level specific batches and // work with the proper range checks @@ -120,30 +125,24 @@ func (f *Filter) mipFind(start, end uint64, depth int) (logs []Log) { } return logs -} - -func (f *Filter) getLogs(start, end uint64) (logs []Log) { - var block *types.Block +}*/ +func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []Log, err error) { for i := start; i <= end; i++ { - hash := core.GetCanonicalHash(f.db, i) - if hash != (common.Hash{}) { - block = core.GetBlock(f.db, hash, i) - } else { // block not found - return logs - } - if block == nil { // block not found/written - return logs + header, err := f.apiBackend.HeaderByNumber(ctx, rpc.BlockNumber(i)) + if header == nil || err != nil { + return logs, err } // Use bloom filtering to see if this block is interesting given the // current parameters - if f.bloomFilter(block) { + if f.bloomFilter(header.Bloom) { // Get the logs of the block - var ( - receipts = core.GetBlockReceipts(f.db, block.Hash(), i) - unfiltered []Log - ) + receipts, err := f.apiBackend.GetReceipts(ctx, header.Hash()) + if err != nil { + return nil, err + } + var unfiltered []Log for _, receipt := range receipts { rl := make([]Log, len(receipt.Logs)) for i, l := range receipt.Logs { @@ -155,7 +154,7 @@ func (f *Filter) getLogs(start, end uint64) (logs []Log) { } } - return logs + return logs, nil } func includes(addresses []common.Address, a common.Address) bool { @@ -207,11 +206,11 @@ Logs: return ret } -func (f *Filter) bloomFilter(block *types.Block) bool { +func (f *Filter) bloomFilter(bloom types.Bloom) bool { if len(f.addresses) > 0 { var included bool for _, addr := range f.addresses { - if types.BloomLookup(block.Bloom(), addr) { + if types.BloomLookup(bloom, addr) { included = true break } @@ -225,7 +224,7 @@ func (f *Filter) bloomFilter(block *types.Block) bool { for _, sub := range f.topics { var included bool for _, topic := range sub { - if (topic == common.Hash{}) || types.BloomLookup(block.Bloom(), topic) { + if (topic == common.Hash{}) || types.BloomLookup(bloom, topic) { included = true break } diff --git a/eth/gasprice/lightprice.go b/eth/gasprice/lightprice.go new file mode 100644 index 000000000000..ce075ff4ee9f --- /dev/null +++ b/eth/gasprice/lightprice.go @@ -0,0 +1,160 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package gasprice + +import ( + "math/big" + "sort" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/rpc" + "golang.org/x/net/context" +) + +const ( + LpoAvgCount = 5 + LpoMinCount = 3 + LpoMaxBlocks = 20 + LpoSelect = 50 + LpoDefaultPrice = 20000000000 +) + +// LightPriceOracle recommends gas prices based on the content of recent +// blocks. Suitable for both light and full clients. +type LightPriceOracle struct { + backend ethapi.Backend + lastHead common.Hash + lastPrice *big.Int + cacheLock sync.RWMutex + fetchLock sync.Mutex +} + +// NewLightPriceOracle returns a new oracle. +func NewLightPriceOracle(backend ethapi.Backend) *LightPriceOracle { + return &LightPriceOracle{ + backend: backend, + lastPrice: big.NewInt(LpoDefaultPrice), + } +} + +// SuggestPrice returns the recommended gas price. +func (self *LightPriceOracle) SuggestPrice(ctx context.Context) (*big.Int, error) { + self.cacheLock.RLock() + lastHead := self.lastHead + lastPrice := self.lastPrice + self.cacheLock.RUnlock() + + head, _ := self.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + headHash := head.Hash() + if headHash == lastHead { + return lastPrice, nil + } + + self.fetchLock.Lock() + defer self.fetchLock.Unlock() + + // try checking the cache again, maybe the last fetch fetched what we need + self.cacheLock.RLock() + lastHead = self.lastHead + lastPrice = self.lastPrice + self.cacheLock.RUnlock() + if headHash == lastHead { + return lastPrice, nil + } + + blockNum := head.GetNumberU64() + chn := make(chan lpResult, LpoMaxBlocks) + sent := 0 + exp := 0 + var lps bigIntArray + for sent < LpoAvgCount && blockNum > 0 { + go self.getLowestPrice(ctx, blockNum, chn) + sent++ + exp++ + blockNum-- + } + maxEmpty := LpoAvgCount - LpoMinCount + for exp > 0 { + res := <-chn + if res.err != nil { + return nil, res.err + } + exp-- + if res.price != nil { + lps = append(lps, res.price) + } else { + if maxEmpty > 0 { + maxEmpty-- + } else { + if blockNum > 0 && sent < LpoMaxBlocks { + go self.getLowestPrice(ctx, blockNum, chn) + sent++ + exp++ + blockNum-- + } + } + } + } + price := lastPrice + if len(lps) > 0 { + sort.Sort(lps) + price = lps[(len(lps)-1)*LpoSelect/100] + } + + self.cacheLock.Lock() + self.lastHead = headHash + self.lastPrice = price + self.cacheLock.Unlock() + return price, nil +} + +type lpResult struct { + price *big.Int + err error +} + +// getLowestPrice calculates the lowest transaction gas price in a given block +// and sends it to the result channel. If the block is empty, price is nil. +func (self *LightPriceOracle) getLowestPrice(ctx context.Context, blockNum uint64, chn chan lpResult) { + block, err := self.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum)) + if block == nil { + chn <- lpResult{nil, err} + return + } + txs := block.Transactions() + if len(txs) == 0 { + chn <- lpResult{nil, nil} + return + } + // find smallest gasPrice + minPrice := txs[0].GasPrice() + for i := 1; i < len(txs); i++ { + price := txs[i].GasPrice() + if price.Cmp(minPrice) < 0 { + minPrice = price + } + } + chn <- lpResult{minPrice, nil} +} + +type bigIntArray []*big.Int + +func (s bigIntArray) Len() int { return len(s) } +func (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 } +func (s bigIntArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/eth/handler.go b/eth/handler.go index 886d89fd19ee..75140878862b 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -155,7 +155,7 @@ func NewProtocolManager(config *core.ChainConfig, fastSync bool, networkId int, return nil, errIncompatibleConfig } // Construct the different synchronisation mechanisms - manager.downloader = downloader.New(chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeaderByHash, + manager.downloader = downloader.New(downloader.FullSync, chaindb, manager.eventMux, blockchain.HasHeader, blockchain.HasBlockAndState, blockchain.GetHeaderByHash, blockchain.GetBlockByHash, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.FastSyncCommitHead, blockchain.GetTdByHash, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback, manager.removePeer) @@ -252,6 +252,10 @@ func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *p // handle is the callback invoked to manage the life cycle of an eth peer. When // this function terminates, the peer is disconnected. func (pm *ProtocolManager) handle(p *peer) error { + if pm.peers.Len() >= 20 { + return p2p.DiscTooManyPeers + } + glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name()) // Execute the Ethereum handshake diff --git a/ethdb/database.go b/ethdb/database.go index f93731cfea0d..a12ec4cebf77 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -39,13 +39,15 @@ var OpenFileLimit = 64 // cacheRatio specifies how the total allotted cache is distributed between the // various system databases. var cacheRatio = map[string]float64{ - "chaindata": 1.0, + "chaindata": 1.0, + "lightchaindata": 1.0, } // handleRatio specifies how the total allotted file descriptors is distributed // between the various system databases. var handleRatio = map[string]float64{ - "chaindata": 1.0, + "chaindata": 1.0, + "lightchaindata": 1.0, } type LDBDatabase struct { diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index ca825ad89a73..0f4d11c0e44d 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -317,14 +317,15 @@ func NewPublicBlockChainAPI(b Backend) *PublicBlockChainAPI { // BlockNumber returns the block number of the chain head. func (s *PublicBlockChainAPI) BlockNumber() *big.Int { - return s.b.HeaderByNumber(rpc.LatestBlockNumber).Number + header, _ := s.b.HeaderByNumber(context.Background(), rpc.LatestBlockNumber) // latest header should always be available + return header.Number } // GetBalance returns the amount of wei for the given address in the state of the // given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta // block numbers are also allowed. func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*big.Int, error) { - state, _, err := s.b.StateAndHeaderByNumber(blockNr) + state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr) if state == nil || err != nil { return nil, err } @@ -409,7 +410,7 @@ func (s *PublicBlockChainAPI) GetUncleCountByBlockHash(ctx context.Context, bloc // GetCode returns the code stored at the given address in the state for the given block number. func (s *PublicBlockChainAPI) GetCode(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (string, error) { - state, _, err := s.b.StateAndHeaderByNumber(blockNr) + state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr) if state == nil || err != nil { return "", err } @@ -424,7 +425,7 @@ func (s *PublicBlockChainAPI) GetCode(ctx context.Context, address common.Addres // block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta block // numbers are also allowed. func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNr rpc.BlockNumber) (string, error) { - state, _, err := s.b.StateAndHeaderByNumber(blockNr) + state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr) if state == nil || err != nil { return "0x", err } @@ -466,7 +467,7 @@ type CallArgs struct { } func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (string, *big.Int, error) { - state, header, err := s.b.StateAndHeaderByNumber(blockNr) + state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr) if state == nil || err != nil { return "0x", common.Big0, err } @@ -586,7 +587,7 @@ func FormatLogs(structLogs []vm.StructLog) []StructLogRes { // TraceCall executes a call and returns the amount of gas, created logs and optionally returned values. func (s *PublicBlockChainAPI) TraceCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (*ExecutionResult, error) { - state, header, err := s.b.StateAndHeaderByNumber(blockNr) + state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr) if state == nil || err != nil { return nil, err } @@ -825,7 +826,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByBlockHashAndIndex(ctx context // GetTransactionCount returns the number of transactions the given address has sent for the given block number func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*rpc.HexNumber, error) { - state, _, err := s.b.StateAndHeaderByNumber(blockNr) + state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr) if state == nil || err != nil { return nil, err } @@ -890,6 +891,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txH // GetTransactionReceipt returns the transaction receipt for the given transaction hash. func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) { +//fmt.Println("API GetTransactionReceipt", txHash) receipt := core.GetReceipt(s.b.ChainDb(), txHash) if receipt == nil { glog.V(logger.Debug).Infof("receipt not found for transaction %s", txHash.Hex()) @@ -897,12 +899,14 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (ma } tx, _, err := getTransaction(s.b.ChainDb(), s.b, txHash) +//fmt.Println("getTransaction", err) if err != nil { glog.V(logger.Debug).Infof("%v\n", err) return nil, nil } txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), txHash) +//fmt.Println("getTransactionBlockData", txBlock, blockIndex, index, err) if err != nil { glog.V(logger.Debug).Infof("%v\n", err) return nil, nil diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 791a06925600..8427b3e03e2c 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -44,9 +44,9 @@ type Backend interface { AccountManager() *accounts.Manager // BlockChain API SetHead(number uint64) - HeaderByNumber(blockNr rpc.BlockNumber) *types.Header + HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error) - StateAndHeaderByNumber(blockNr rpc.BlockNumber) (State, *types.Header, error) + StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (State, *types.Header, error) GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetTd(blockHash common.Hash) *big.Int diff --git a/les/api_backend.go b/les/api_backend.go new file mode 100644 index 000000000000..f17e0cc911c4 --- /dev/null +++ b/les/api_backend.go @@ -0,0 +1,144 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package les + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/light" + rpc "github.com/ethereum/go-ethereum/rpc" + "golang.org/x/net/context" +) + +type LesApiBackend struct { + eth *LightEthereum + gpo *gasprice.LightPriceOracle +} + +func (b *LesApiBackend) SetHead(number uint64) { + b.eth.blockchain.SetHead(number) +} + +func (b *LesApiBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { + if blockNr == rpc.LatestBlockNumber || blockNr == rpc.PendingBlockNumber { + return b.eth.blockchain.CurrentHeader(), nil + } + + return b.eth.blockchain.GetHeaderByNumberOdr(ctx, uint64(blockNr)) +} + +func (b *LesApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error) { + header, err := b.HeaderByNumber(ctx, blockNr) + if header == nil || err != nil { + return nil, err + } + return b.GetBlock(ctx, header.Hash()) +} + +func (b *LesApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (ethapi.State, *types.Header, error) { + header, err := b.HeaderByNumber(ctx, blockNr) + if header == nil || err != nil { + return nil, nil, err + } + return light.NewLightState(light.StateTrieID(header), b.eth.odr), header, nil +} + +func (b *LesApiBackend) GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error) { + return b.eth.blockchain.GetBlockByHash(ctx, blockHash) +} + +func (b *LesApiBackend) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) { + return light.GetBlockReceipts(ctx, b.eth.odr, blockHash, core.GetBlockNumber(b.eth.chainDb, blockHash)) +} + +func (b *LesApiBackend) GetTd(blockHash common.Hash) *big.Int { + return b.eth.blockchain.GetTdByHash(blockHash) +} + +func (b *LesApiBackend) GetVMEnv(ctx context.Context, msg core.Message, state ethapi.State, header *types.Header) (vm.Environment, func() error, error) { + stateDb := state.(*light.LightState).Copy() + addr, _ := msg.From() + from, err := stateDb.GetOrNewStateObject(ctx, addr) + if err != nil { + return nil, nil, err + } + from.SetBalance(common.MaxBig) + env := light.NewEnv(ctx, stateDb, b.eth.chainConfig, b.eth.blockchain, msg, header, b.eth.chainConfig.VmConfig) + return env, env.Error, nil +} + +func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { + return b.eth.txPool.Add(ctx, signedTx) +} + +func (b *LesApiBackend) RemoveTx(txHash common.Hash) { + b.eth.txPool.RemoveTx(txHash) +} + +func (b *LesApiBackend) GetPoolTransactions() types.Transactions { + return b.eth.txPool.GetTransactions() +} + +func (b *LesApiBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction { + return b.eth.txPool.GetTransaction(txHash) +} + +func (b *LesApiBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) { + return b.eth.txPool.GetNonce(ctx, addr) +} + +func (b *LesApiBackend) Stats() (pending int, queued int) { + return b.eth.txPool.Stats(), 0 +} + +func (b *LesApiBackend) TxPoolContent() (map[common.Address]map[uint64][]*types.Transaction, map[common.Address]map[uint64][]*types.Transaction) { + return b.eth.txPool.Content() +} + +func (b *LesApiBackend) Downloader() *downloader.Downloader { + return b.eth.Downloader() +} + +func (b *LesApiBackend) ProtocolVersion() int { + return b.eth.LesVersion() + 10000 +} + +func (b *LesApiBackend) SuggestPrice(ctx context.Context) (*big.Int, error) { + return b.gpo.SuggestPrice(ctx) +} + +func (b *LesApiBackend) ChainDb() ethdb.Database { + return b.eth.chainDb +} + +func (b *LesApiBackend) EventMux() *event.TypeMux { + return b.eth.eventMux +} + +func (b *LesApiBackend) AccountManager() *accounts.Manager { + return b.eth.accountManager +} diff --git a/les/backend.go b/les/backend.go new file mode 100644 index 000000000000..bd091d43c89b --- /dev/null +++ b/les/backend.go @@ -0,0 +1,190 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package les implements the Light Ethereum Subprotocol. +package les + +import ( + "errors" + "fmt" + "time" + + "github.com/ethereum/ethash" + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common/compiler" + "github.com/ethereum/go-ethereum/common/httpclient" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/filters" + "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + rpc "github.com/ethereum/go-ethereum/rpc" +) + +type LightEthereum struct { + odr *LesOdr + relay *LesTxRelay + chainConfig *core.ChainConfig + // Channel for shutting down the service + shutdownChan chan bool + // Handlers + txPool *light.TxPool + blockchain *light.LightChain + protocolManager *ProtocolManager + // DB interfaces + chainDb ethdb.Database // Block chain database + + ApiBackend *LesApiBackend + + eventMux *event.TypeMux + pow *ethash.Ethash + httpclient *httpclient.HTTPClient + accountManager *accounts.Manager + solcPath string + solc *compiler.Solidity + + NatSpec bool + PowTest bool + netVersionId int + netRPCService *ethapi.PublicNetAPI +} + +func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { + chainDb, err := eth.CreateDB(ctx, config, "lightchaindata") + if err != nil { + return nil, err + } + if err := eth.SetupGenesisBlock(&chainDb, config); err != nil { + return nil, err + } + pow, err := eth.CreatePoW(config) + if err != nil { + return nil, err + } + + odr := NewLesOdr(chainDb) + relay := NewLesTxRelay() + eth := &LightEthereum{ + odr: odr, + relay: relay, + chainDb: chainDb, + eventMux: ctx.EventMux, + accountManager: ctx.AccountManager, + pow: pow, + shutdownChan: make(chan bool), + httpclient: httpclient.New(config.DocRoot), + netVersionId: config.NetworkId, + NatSpec: config.NatSpec, + PowTest: config.PowTest, + solcPath: config.SolcPath, + } + + if config.ChainConfig == nil { + return nil, errors.New("missing chain config") + } + eth.chainConfig = config.ChainConfig + eth.chainConfig.VmConfig = vm.Config{ + EnableJit: config.EnableJit, + ForceJit: config.ForceJit, + } + eth.blockchain, err = light.NewLightChain(odr, eth.chainConfig, eth.pow, eth.eventMux) + if err != nil { + if err == core.ErrNoGenesis { + return nil, fmt.Errorf(`Genesis block not found. Please supply a genesis block with the "--genesis /path/to/file" argument`) + } + return nil, err + } + + eth.txPool = light.NewTxPool(eth.chainConfig, eth.eventMux, eth.blockchain, eth.relay) + if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.LightMode, config.NetworkId, eth.eventMux, eth.pow, eth.blockchain, nil, chainDb, odr, relay); err != nil { + return nil, err + } + + eth.ApiBackend = &LesApiBackend{eth, nil} + eth.ApiBackend.gpo = gasprice.NewLightPriceOracle(eth.ApiBackend) + return eth, nil +} + +// APIs returns the collection of RPC services the ethereum package offers. +// NOTE, some of these services probably need to be moved to somewhere else. +func (s *LightEthereum) APIs() []rpc.API { + return append(ethapi.GetAPIs(s.ApiBackend, s.solcPath), []rpc.API{ + { + Namespace: "eth", + Version: "1.0", + Service: downloader.NewPublicDownloaderAPI(s.protocolManager.downloader, s.eventMux), + Public: true, + }, { + Namespace: "eth", + Version: "1.0", + Service: filters.NewPublicFilterAPI(s.ApiBackend), + Public: true, + }, { + Namespace: "net", + Version: "1.0", + Service: s.netRPCService, + Public: true, + }, + }...) +} + +func (s *LightEthereum) ResetWithGenesisBlock(gb *types.Block) { + s.blockchain.ResetWithGenesisBlock(gb) +} + +func (s *LightEthereum) BlockChain() *light.LightChain { return s.blockchain } +func (s *LightEthereum) TxPool() *light.TxPool { return s.txPool } +func (s *LightEthereum) LesVersion() int { return int(s.protocolManager.SubProtocols[0].Version) } +func (s *LightEthereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader } + +// Protocols implements node.Service, returning all the currently configured +// network protocols to start. +func (s *LightEthereum) Protocols() []p2p.Protocol { + return s.protocolManager.SubProtocols +} + +// Start implements node.Service, starting all internal goroutines needed by the +// Ethereum protocol implementation. +func (s *LightEthereum) Start(srvr *p2p.Server) error { + s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.netVersionId) + s.protocolManager.Start() + return nil +} + +// Stop implements node.Service, terminating all internal goroutines used by the +// Ethereum protocol. +func (s *LightEthereum) Stop() error { + s.odr.Stop() + s.blockchain.Stop() + s.protocolManager.Stop() + s.txPool.Stop() + + s.eventMux.Stop() + + time.Sleep(time.Millisecond * 200) + s.chainDb.Close() + close(s.shutdownChan) + + return nil +} diff --git a/les/fetcher.go b/les/fetcher.go new file mode 100644 index 000000000000..bf9f010fb62b --- /dev/null +++ b/les/fetcher.go @@ -0,0 +1,283 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package les implements the Light Ethereum Subprotocol. +package les + +import ( +//"fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" +) + +type lightFetcher struct { + pm *ProtocolManager + odr *LesOdr + chain BlockChain + + headAnnouncedMu sync.Mutex + headAnnouncedBy map[common.Hash][]*peer + currentTd *big.Int + deliverChn chan fetchResponse + reqMu sync.RWMutex + requested map[uint64]fetchRequest + timeoutChn chan uint64 + notifyChn chan bool // true if initiated from outside + syncing bool + syncDone chan struct{} +} + +type fetchRequest struct { + hash common.Hash + amount uint64 + peer *peer +} + +type fetchResponse struct { + reqID uint64 + headers []*types.Header +} + +func newLightFetcher(pm *ProtocolManager) *lightFetcher { + f := &lightFetcher{ + pm: pm, + chain: pm.blockchain, + odr: pm.odr, + headAnnouncedBy: make(map[common.Hash][]*peer), + deliverChn: make(chan fetchResponse, 100), + requested: make(map[uint64]fetchRequest), + timeoutChn: make(chan uint64), + notifyChn: make(chan bool, 100), + syncDone: make(chan struct{}), + currentTd: big.NewInt(0), + } + go f.syncLoop() + return f +} + +func (f *lightFetcher) notify(p *peer, head *newBlockHashData) { + var headHash common.Hash + if head == nil { + // initial notify + headHash = p.Head() + } else { + if !p.addNotify(head) { +//fmt.Println("addNotify fail") + f.pm.removePeer(p.id) + } + headHash = head.Hash + } + f.headAnnouncedMu.Lock() + f.headAnnouncedBy[headHash] = append(f.headAnnouncedBy[headHash], p) + f.headAnnouncedMu.Unlock() + f.notifyChn <- true +} + +func (f *lightFetcher) gotHeader(header *types.Header) { + f.headAnnouncedMu.Lock() + defer f.headAnnouncedMu.Unlock() + + hash := header.Hash() + peerList := f.headAnnouncedBy[hash] + if peerList == nil { + return + } + number := header.GetNumberU64() + td := core.GetTd(f.pm.chainDb, hash, number) + for _, peer := range peerList { + peer.lock.Lock() + ok := peer.gotHeader(hash, number, td) + peer.lock.Unlock() + if !ok { +//fmt.Println("gotHeader fail") + f.pm.removePeer(peer.id) + } + } + delete(f.headAnnouncedBy, hash) +} + +func (f *lightFetcher) nextRequest() (*peer, *newBlockHashData) { + var bestPeer *peer + bestTd := f.currentTd + for _, peer := range f.pm.peers.AllPeers() { + peer.lock.RLock() + if !peer.headInfo.requested && (peer.headInfo.Td.Cmp(bestTd) > 0 || + (bestPeer != nil && peer.headInfo.Td.Cmp(bestTd) == 0 && peer.headInfo.haveHeaders > bestPeer.headInfo.haveHeaders)) { + bestPeer = peer + bestTd = peer.headInfo.Td + } + peer.lock.RUnlock() + } + if bestPeer == nil { + return nil, nil + } + bestPeer.lock.Lock() + res := bestPeer.headInfo + res.requested = true + bestPeer.lock.Unlock() + return bestPeer, res +} + +func (f *lightFetcher) deliverHeaders(reqID uint64, headers []*types.Header) { + f.deliverChn <- fetchResponse{reqID: reqID, headers: headers} +} + +func (f *lightFetcher) requestedID(reqID uint64) bool { + f.reqMu.RLock() + _, ok := f.requested[reqID] + f.reqMu.RUnlock() + return ok +} + +func (f *lightFetcher) request(p *peer, block *newBlockHashData) { +//fmt.Println("request", block.Number, block.haveHeaders) + amount := block.Number-block.haveHeaders + if amount == 0 { + return + } + if amount > 100 { + f.syncing = true + go func() { +//fmt.Println("f.pm.synchronise(p)") + f.pm.synchronise(p) +//fmt.Println("sync done") + f.syncDone <- struct{}{} + }() + return + } + + reqID := f.odr.getNextReqID() + f.reqMu.Lock() + f.requested[reqID] = fetchRequest{hash: block.Hash, amount: amount, peer: p} + f.reqMu.Unlock() + cost := p.GetRequestCost(GetBlockHeadersMsg, int(amount)) + p.fcServer.SendRequest(reqID, cost) + go p.RequestHeadersByHash(reqID, cost, block.Hash, int(amount), 0, true) + go func() { + time.Sleep(hardRequestTimeout) + f.timeoutChn <- reqID + }() +} + +func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { + if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { + return false + } + headers := make([]*types.Header, req.amount) + for i, header := range resp.headers { + headers[int(req.amount)-1-i] = header + } + if _, err := f.chain.InsertHeaderChain(headers, 1); err != nil { + return false + } + for _, header := range headers { + td := core.GetTd(f.pm.chainDb, header.Hash(), header.GetNumberU64()) + if td == nil { + return false + } + if td.Cmp(f.currentTd) > 0 { + f.currentTd = td + } + f.gotHeader(header) + } + return true +} + +func (f *lightFetcher) checkSyncedHeaders() { +//fmt.Println("checkSyncedHeaders()") + for _, peer := range f.pm.peers.AllPeers() { + peer.lock.Lock() + h := peer.firstHeadInfo + remove := false +loop: + for h != nil { + if td := core.GetTd(f.pm.chainDb, h.Hash, h.Number); td != nil { +//fmt.Println(" found", h.Number) + ok := peer.gotHeader(h.Hash, h.Number, td) + if !ok { + remove = true + break loop + } + if td.Cmp(f.currentTd) > 0 { + f.currentTd = td + } + } + h = h.next + } + peer.lock.Unlock() + if remove { +//fmt.Println("checkSync fail") + f.pm.removePeer(peer.id) + } + } +} + +func (f *lightFetcher) syncLoop() { + f.pm.wg.Add(1) + defer f.pm.wg.Done() + + srtoNotify := false + for { + select { + case <-f.pm.quitSync: + return + case ext := <-f.notifyChn: +//fmt.Println("<-f.notifyChn", f.syncing, ext, srtoNotify) + s := srtoNotify + srtoNotify = false + if !f.syncing && !(ext && s) { + if p, r := f.nextRequest(); r != nil { + srtoNotify = true + go func() { + time.Sleep(softRequestTimeout) + f.notifyChn <- false + }() + f.request(p, r) + } + } + case reqID := <-f.timeoutChn: + f.reqMu.Lock() + req, ok := f.requested[reqID] + if ok { + delete(f.requested, reqID) + } + f.reqMu.Unlock() + if ok { +//fmt.Println("hard timeout") + f.pm.removePeer(req.peer.id) + } + case resp := <-f.deliverChn: +//fmt.Println("<-f.deliverChn", f.syncing) + f.reqMu.Lock() + req, ok := f.requested[resp.reqID] + delete(f.requested, resp.reqID) + f.reqMu.Unlock() + if !ok || !(f.syncing || f.processResponse(req, resp)) { +//fmt.Println("processResponse fail") + f.pm.removePeer(req.peer.id) + } + case <-f.syncDone: +//fmt.Println("<-f.syncDone", f.syncing) + f.checkSyncedHeaders() + f.syncing = false + } + } +} diff --git a/les/flowcontrol/control.go b/les/flowcontrol/control.go new file mode 100644 index 000000000000..1b569db0bc28 --- /dev/null +++ b/les/flowcontrol/control.go @@ -0,0 +1,172 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package flowcontrol implements a client side flow control mechanism +package flowcontrol + +import ( + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" +) + +const fcTimeConst = 1000000 + +type ServerParams struct { + BufLimit, MinRecharge uint64 +} + +type ClientNode struct { + params *ServerParams + bufValue uint64 + lastTime int64 + lock sync.Mutex + cm *ClientManager + cmNode *cmNode +} + +func NewClientNode(cm *ClientManager, params *ServerParams) *ClientNode { + node := &ClientNode{ + cm: cm, + params: params, + bufValue: params.BufLimit, + lastTime: getTime(), + } + node.cmNode = cm.addNode(node) + return node +} + +func (peer *ClientNode) Remove(cm *ClientManager) { + cm.removeNode(peer.cmNode) +} + +func (peer *ClientNode) recalcBV(time int64) { + dt := uint64(time - peer.lastTime) + if time < peer.lastTime { + dt = 0 + } + peer.bufValue += peer.params.MinRecharge * dt / fcTimeConst + if peer.bufValue > peer.params.BufLimit { + peer.bufValue = peer.params.BufLimit + } + peer.lastTime = time +} + +func (peer *ClientNode) AcceptRequest() (uint64, bool) { + peer.lock.Lock() + defer peer.lock.Unlock() + + time := getTime() + peer.recalcBV(time) + return peer.bufValue, peer.cm.accept(peer.cmNode, time) +} + +func (peer *ClientNode) RequestProcessed(cost uint64) (bv, realCost uint64) { + peer.lock.Lock() + defer peer.lock.Unlock() + + time := getTime() + peer.recalcBV(time) + peer.bufValue -= cost + peer.recalcBV(time) + rcValue, rcost := peer.cm.processed(peer.cmNode, time) + if rcValue < peer.params.BufLimit { + bv := peer.params.BufLimit - rcValue + if bv > peer.bufValue { + peer.bufValue = bv + } + } + return peer.bufValue, rcost +} + +type ServerNode struct { + bufEstimate uint64 + lastTime int64 + params *ServerParams + sumCost uint64 // sum of req costs sent to this server + pending map[uint64]uint64 // value = sumCost after sending the given req + lock sync.Mutex +} + +func NewServerNode(params *ServerParams) *ServerNode { + return &ServerNode{ + bufEstimate: params.BufLimit, + lastTime: getTime(), + params: params, + pending: make(map[uint64]uint64), + } +} + +func getTime() int64 { + return int64(mclock.Now()) +} + +func (peer *ServerNode) recalcBLE(time int64) { + dt := uint64(time - peer.lastTime) + if time < peer.lastTime { + dt = 0 + } + peer.bufEstimate += peer.params.MinRecharge * dt / fcTimeConst + if peer.bufEstimate > peer.params.BufLimit { + peer.bufEstimate = peer.params.BufLimit + } + peer.lastTime = time +} + +func (peer *ServerNode) canSend(maxCost uint64) uint64 { + if peer.bufEstimate >= maxCost { + return 0 + } + return (maxCost - peer.bufEstimate) * fcTimeConst / peer.params.MinRecharge +} + +func (peer *ServerNode) CanSend(maxCost uint64) uint64 { + peer.lock.Lock() + defer peer.lock.Unlock() + + return peer.canSend(maxCost) +} + +// blocks until request can be sent +func (peer *ServerNode) SendRequest(reqID, maxCost uint64) { + peer.lock.Lock() + defer peer.lock.Unlock() + + peer.recalcBLE(getTime()) + for peer.bufEstimate < maxCost { + time.Sleep(time.Duration(peer.canSend(maxCost))) + peer.recalcBLE(getTime()) + } + peer.bufEstimate -= maxCost + peer.sumCost += maxCost + if reqID >= 0 { + peer.pending[reqID] = peer.sumCost + } +} + +func (peer *ServerNode) GotReply(reqID, bv uint64) { + peer.lock.Lock() + defer peer.lock.Unlock() + + sc, ok := peer.pending[reqID] + if !ok { + return + } + delete(peer.pending, reqID) + peer.bufEstimate = bv - (peer.sumCost - sc) + peer.lastTime = getTime() +} diff --git a/les/flowcontrol/manager.go b/les/flowcontrol/manager.go new file mode 100644 index 000000000000..c0469e7b66a6 --- /dev/null +++ b/les/flowcontrol/manager.go @@ -0,0 +1,223 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package flowcontrol implements a client side flow control mechanism +package flowcontrol + +import ( + "sync" + "time" +) + +const rcConst = 1000000 + +type cmNode struct { + node *ClientNode + lastUpdate int64 + reqAccepted int64 + serving, recharging bool + rcWeight uint64 + rcValue, rcDelta int64 + finishRecharge, startValue int64 +} + +func (node *cmNode) update(time int64) { + dt := time - node.lastUpdate + node.rcValue += node.rcDelta * dt / rcConst + node.lastUpdate = time + if node.recharging && time >= node.finishRecharge { + node.recharging = false + node.rcDelta = 0 + node.rcValue = 0 + } +} + +func (node *cmNode) set(serving bool, simReqCnt, sumWeight uint64) { + if node.serving && !serving { + node.recharging = true + sumWeight += node.rcWeight + } + node.serving = serving + if node.recharging && serving { + node.recharging = false + sumWeight -= node.rcWeight + } + + node.rcDelta = 0 + if serving { + node.rcDelta = int64(rcConst / simReqCnt) + } + if node.recharging { + node.rcDelta = -int64(node.node.cm.rcRecharge * node.rcWeight / sumWeight) + node.finishRecharge = node.lastUpdate + node.rcValue*rcConst/(-node.rcDelta) + } +} + +type ClientManager struct { + lock sync.Mutex + nodes map[*cmNode]struct{} + simReqCnt, sumWeight, rcSumValue uint64 + maxSimReq, maxRcSum uint64 + rcRecharge uint64 + resumeQueue chan chan bool + time int64 +} + +func NewClientManager(rcTarget, maxSimReq, maxRcSum uint64) *ClientManager { + cm := &ClientManager{ + nodes: make(map[*cmNode]struct{}), + resumeQueue: make(chan chan bool), + rcRecharge: rcConst * rcConst / (100*rcConst/rcTarget - rcConst), + maxSimReq: maxSimReq, + maxRcSum: maxRcSum, + } + go cm.queueProc() + return cm +} + +func (self *ClientManager) Stop() { + self.lock.Lock() + defer self.lock.Unlock() + + // signal any waiting accept routines to return false + self.nodes = make(map[*cmNode]struct{}) + close(self.resumeQueue) +} + +func (self *ClientManager) addNode(cnode *ClientNode) *cmNode { + time := getTime() + node := &cmNode{ + node: cnode, + lastUpdate: time, + finishRecharge: time, + rcWeight: 1, + } + self.lock.Lock() + defer self.lock.Unlock() + + self.nodes[node] = struct{}{} + self.update(getTime()) + return node +} + +func (self *ClientManager) removeNode(node *cmNode) { + self.lock.Lock() + defer self.lock.Unlock() + + time := getTime() + self.stop(node, time) + delete(self.nodes, node) + self.update(time) +} + +// recalc sumWeight +func (self *ClientManager) updateNodes(time int64) (rce bool) { + var sumWeight, rcSum uint64 + for node, _ := range self.nodes { + rc := node.recharging + node.update(time) + if rc && !node.recharging { + rce = true + } + if node.recharging { + sumWeight += node.rcWeight + } + rcSum += uint64(node.rcValue) + } + self.sumWeight = sumWeight + self.rcSumValue = rcSum + return +} + +func (self *ClientManager) update(time int64) { + for { + firstTime := time + for node, _ := range self.nodes { + if node.recharging && node.finishRecharge < firstTime { + firstTime = node.finishRecharge + } + } + if self.updateNodes(firstTime) { + for node, _ := range self.nodes { + if node.recharging { + node.set(node.serving, self.simReqCnt, self.sumWeight) + } + } + } else { + self.time = time + return + } + } +} + +func (self *ClientManager) canStartReq() bool { + return self.simReqCnt < self.maxSimReq && self.rcSumValue < self.maxRcSum +} + +func (self *ClientManager) queueProc() { + for rc := range self.resumeQueue { + for { + time.Sleep(time.Millisecond * 10) + self.lock.Lock() + self.update(getTime()) + cs := self.canStartReq() + self.lock.Unlock() + if cs { + break + } + } + close(rc) + } +} + +func (self *ClientManager) accept(node *cmNode, time int64) bool { + self.lock.Lock() + defer self.lock.Unlock() + + self.update(time) + if !self.canStartReq() { + resume := make(chan bool) + self.lock.Unlock() + self.resumeQueue <- resume + <-resume + self.lock.Lock() + if _, ok := self.nodes[node]; !ok { + return false // reject if node has been removed or manager has been stopped + } + } + self.simReqCnt++ + node.set(true, self.simReqCnt, self.sumWeight) + node.startValue = node.rcValue + self.update(self.time) + return true +} + +func (self *ClientManager) stop(node *cmNode, time int64) { + if node.serving { + self.update(time) + self.simReqCnt-- + node.set(false, self.simReqCnt, self.sumWeight) + self.update(time) + } +} + +func (self *ClientManager) processed(node *cmNode, time int64) (rcValue, rcCost uint64) { + self.lock.Lock() + defer self.lock.Unlock() + + self.stop(node, time) + return uint64(node.rcValue), uint64(node.rcValue - node.startValue) +} diff --git a/les/handler.go b/les/handler.go new file mode 100644 index 000000000000..ef64e25fbad1 --- /dev/null +++ b/les/handler.go @@ -0,0 +1,853 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package les implements the Light Ethereum Subprotocol. +package les + +import ( + "encoding/binary" + "errors" + "fmt" + "math/big" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/pow" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +const ( + softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. + estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header + + ethVersion = 63 // equivalent eth version for the downloader + + MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request + MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request + MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request + MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request + MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request + MaxHeaderProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request + MaxTxSend = 64 // Amount of transactions to be send per request + + disableClientRemovePeer = true +) + +// errIncompatibleConfig is returned if the requested protocols and configs are +// not compatible (low protocol version restrictions and high requirements). +var errIncompatibleConfig = errors.New("incompatible configuration") + +func errResp(code errCode, format string, v ...interface{}) error { + return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) +} + +type hashFetcherFn func(common.Hash) error + +type BlockChain interface { + HasHeader(hash common.Hash) bool + GetHeader(hash common.Hash, number uint64) *types.Header + GetHeaderByHash(hash common.Hash) *types.Header + CurrentHeader() *types.Header + GetTdByHash(hash common.Hash) *big.Int + InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) + Rollback(chain []common.Hash) + Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) + GetHeaderByNumber(number uint64) *types.Header + GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash + LastBlockHash() common.Hash + Genesis() *types.Block +} + +type txPool interface { + // AddTransactions should add the given transactions to the pool. + AddTransactions([]*types.Transaction) +} + +type ProtocolManager struct { + lightSync bool + txpool txPool + txrelay *LesTxRelay + networkId int + chainConfig *core.ChainConfig + blockchain BlockChain + chainDb ethdb.Database + odr *LesOdr + server *LesServer + + downloader *downloader.Downloader + fetcher *lightFetcher + peers *peerSet + + SubProtocols []p2p.Protocol + + eventMux *event.TypeMux + + // channels for fetcher, syncer, txsyncLoop + newPeerCh chan *peer + quitSync chan struct{} + noMorePeers chan struct{} + + syncMu sync.Mutex + syncing bool + syncDone chan struct{} + + // wait group is used for graceful shutdowns during downloading + // and processing + wg sync.WaitGroup +} + +// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable +// with the ethereum network. +func NewProtocolManager(chainConfig *core.ChainConfig, lightSync bool, networkId int, mux *event.TypeMux, pow pow.PoW, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay) (*ProtocolManager, error) { + // Create the protocol manager with the base fields + manager := &ProtocolManager{ + lightSync: lightSync, + eventMux: mux, + blockchain: blockchain, + chainConfig: chainConfig, + chainDb: chainDb, + networkId: networkId, + txpool: txpool, + txrelay: txrelay, + odr: odr, + peers: newPeerSet(), + newPeerCh: make(chan *peer), + quitSync: make(chan struct{}), + noMorePeers: make(chan struct{}), + } + // Initiate a sub-protocol for every implemented version we can handle + manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions)) + for i, version := range ProtocolVersions { + // Compatible, initialize the sub-protocol + version := version // Closure for the run + manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{ + Name: "les", + Version: version, + Length: ProtocolLengths[i], + Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { + peer := manager.newPeer(int(version), networkId, p, rw) + select { + case manager.newPeerCh <- peer: + manager.wg.Add(1) + defer manager.wg.Done() + return manager.handle(peer) + case <-manager.quitSync: + return p2p.DiscQuitting + } + }, + NodeInfo: func() interface{} { + return manager.NodeInfo() + }, + PeerInfo: func(id discover.NodeID) interface{} { + if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil { + return p.Info() + } + return nil + }, + }) + } + if len(manager.SubProtocols) == 0 { + return nil, errIncompatibleConfig + } + + removePeer := manager.removePeer + if disableClientRemovePeer { + removePeer = func(id string) {} + } + + if lightSync { + glog.V(logger.Debug).Infof("LES: create downloader") + manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash, + nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash, + blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer) + manager.fetcher = newLightFetcher(manager) + } + + if odr != nil { + odr.removePeer = removePeer + } + + /*validator := func(block *types.Block, parent *types.Block) error { + return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false) + } + heighter := func() uint64 { + return chainman.LastBlockNumberU64() + } + manager.fetcher = fetcher.New(chainman.GetBlockNoOdr, validator, nil, heighter, chainman.InsertChain, manager.removePeer) + */ + return manager, nil +} + +func (pm *ProtocolManager) removePeer(id string) { + // Short circuit if the peer was already removed + peer := pm.peers.Peer(id) + if peer == nil { + return + } + glog.V(logger.Debug).Infoln("Removing peer", id) + + // Unregister the peer from the downloader and Ethereum peer set + glog.V(logger.Debug).Infof("LES: unregister peer %v", id) + if pm.lightSync { + pm.downloader.UnregisterPeer(id) + pm.odr.UnregisterPeer(peer) + if pm.txrelay != nil { + pm.txrelay.removePeer(id) + } + } + if err := pm.peers.Unregister(id); err != nil { + glog.V(logger.Error).Infoln("Removal failed:", err) + } + // Hard disconnect at the networking layer + if peer != nil { + peer.Peer.Disconnect(p2p.DiscUselessPeer) + } +} + +func (pm *ProtocolManager) Start() { + if pm.lightSync { + // start sync handler + go pm.syncer() + } else { + go func() { + for range pm.newPeerCh { + } + }() + } +} + +func (pm *ProtocolManager) Stop() { + // Showing a log message. During download / process this could actually + // take between 5 to 10 seconds and therefor feedback is required. + glog.V(logger.Info).Infoln("Stopping light ethereum protocol handler...") + + // Quit the sync loop. + // After this send has completed, no new peers will be accepted. + pm.noMorePeers <- struct{}{} + + close(pm.quitSync) // quits syncer, fetcher + + // Disconnect existing sessions. + // This also closes the gate for any new registrations on the peer set. + // sessions which are already established but not added to pm.peers yet + // will exit when they try to register. + pm.peers.Close() + + // Wait for any process action + pm.wg.Wait() + + glog.V(logger.Info).Infoln("Light ethereum protocol handler stopped") +} + +func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { + return newPeer(pv, nv, p, newMeteredMsgWriter(rw)) +} + +// handle is the callback invoked to manage the life cycle of a les peer. When +// this function terminates, the peer is disconnected. +func (pm *ProtocolManager) handle(p *peer) error { + glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name()) + + // Execute the LES handshake + td, head, genesis := pm.blockchain.Status() + headNum := core.GetBlockNumber(pm.chainDb, head) + if err := p.Handshake(td, head, headNum, genesis, pm.server); err != nil { + glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err) + return err + } + if rw, ok := p.rw.(*meteredMsgReadWriter); ok { + rw.Init(p.version) + } + // Register the peer locally + glog.V(logger.Detail).Infof("%v: adding peer", p) + if err := pm.peers.Register(p); err != nil { + glog.V(logger.Error).Infof("%v: addition failed: %v", p, err) + return err + } + defer func() { + if pm.server != nil && pm.server.fcManager != nil && p.fcClient != nil { + p.fcClient.Remove(pm.server.fcManager) + } + pm.removePeer(p.id) + }() + + // Register the peer in the downloader. If the downloader considers it banned, we disconnect + glog.V(logger.Debug).Infof("LES: register peer %v", p.id) + if pm.lightSync { + requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error { + reqID := pm.odr.getNextReqID() + cost := p.GetRequestCost(GetBlockHeadersMsg, amount) + p.fcServer.SendRequest(reqID, cost) + return p.RequestHeadersByHash(reqID, cost, origin, amount, skip, reverse) + } + requestHeadersByNumber := func(origin uint64, amount int, skip int, reverse bool) error { + reqID := pm.odr.getNextReqID() + cost := p.GetRequestCost(GetBlockHeadersMsg, amount) + p.fcServer.SendRequest(reqID, cost) + return p.RequestHeadersByNumber(reqID, cost, origin, amount, skip, reverse) + } + if err := pm.downloader.RegisterPeer(p.id, ethVersion, p.HeadAndTd, + requestHeadersByHash, requestHeadersByNumber, nil, nil, nil); err != nil { + return err + } + pm.odr.RegisterPeer(p) + if pm.txrelay != nil { + pm.txrelay.addPeer(p) + } + + pm.fetcher.notify(p, nil) + } + + stop := make(chan struct{}) + defer close(stop) + go func() { + // new block announce loop + for { + select { + case announce := <-p.newBlockHashChn: + p.SendNewBlockHash(announce) + //fmt.Println(" BROADCAST sent") + case <-stop: + return + } + } + }() + + // main loop. handle incoming messages. + for { + if err := pm.handleMsg(p); err != nil { + glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err) + //fmt.Println("handleMsg err:", err) + return err + } + } + return nil +} + +var reqList = []uint64{GetBlockHeadersMsg, GetBlockBodiesMsg, GetCodeMsg, GetReceiptsMsg, GetProofsMsg, SendTxMsg, GetHeaderProofsMsg} + +// handleMsg is invoked whenever an inbound message is received from a remote +// peer. The remote connection is torn down upon returning any error. +func (pm *ProtocolManager) handleMsg(p *peer) error { + // Read the next message from the remote peer, and ensure it's fully consumed + msg, err := p.rw.ReadMsg() + if err != nil { + return err + } + + var costs *requestCosts + var reqCnt, maxReqs int + + //fmt.Println("MSG", msg.Code, msg.Size) + if rc, ok := p.fcCosts[msg.Code]; ok { // check if msg is a supported request type + costs = rc + if p.fcClient == nil { + return errResp(ErrRequestRejected, "") + } + bv, ok := p.fcClient.AcceptRequest() + if !ok || bv < costs.baseCost { + return errResp(ErrRequestRejected, "") + } + maxReqs = 10000 + if bv < pm.server.defParams.BufLimit { + d := bv - costs.baseCost + if d/10000 < costs.reqCost { + maxReqs = int(d / costs.reqCost) + } + } + } + + if msg.Size > ProtocolMaxMsgSize { + return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) + } + defer msg.Discard() + + var deliverMsg *Msg + + // Handle the message depending on its contents + switch msg.Code { + case StatusMsg: + glog.V(logger.Debug).Infof("LES: received StatusMsg from peer %v", p.id) + // Status messages should never arrive after the handshake + return errResp(ErrExtraStatusMsg, "uncontrolled status message") + + // Block header query, collect the requested headers and reply + case NewBlockHashMsg: + var req newBlockHashData + if err := msg.Decode(&req); err != nil { + return errResp(ErrDecode, "%v: %v", msg, err) + } + //fmt.Println("RECEIVED", req.Number, req.Hash, req.Td, req.ReorgDepth) + pm.fetcher.notify(p, &req) + + case GetBlockHeadersMsg: + glog.V(logger.Debug).Infof("LES: received GetBlockHeadersMsg from peer %v", p.id) + // Decode the complex header query + var req struct { + ReqID uint64 + Query getBlockHeadersData + } + if err := msg.Decode(&req); err != nil { + return errResp(ErrDecode, "%v: %v", msg, err) + } + + query := req.Query + if query.Amount > uint64(maxReqs) || query.Amount > MaxHeaderFetch { + return errResp(ErrRequestRejected, "") + } + + hashMode := query.Origin.Hash != (common.Hash{}) + + // Gather headers until the fetch or network limits is reached + var ( + bytes common.StorageSize + headers []*types.Header + unknown bool + ) + for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit { + // Retrieve the next header satisfying the query + var origin *types.Header + if hashMode { + origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash) + } else { + origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number) + } + if origin == nil { + break + } + number := origin.Number.Uint64() + headers = append(headers, origin) + bytes += estHeaderRlpSize + + // Advance to the next header of the query + switch { + case query.Origin.Hash != (common.Hash{}) && query.Reverse: + // Hash based traversal towards the genesis block + for i := 0; i < int(query.Skip)+1; i++ { + if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil { + query.Origin.Hash = header.ParentHash + number-- + } else { + unknown = true + break + } + } + case query.Origin.Hash != (common.Hash{}) && !query.Reverse: + // Hash based traversal towards the leaf block + if header := pm.blockchain.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil { + if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { + query.Origin.Hash = header.Hash() + } else { + unknown = true + } + } else { + unknown = true + } + case query.Reverse: + // Number based traversal towards the genesis block + if query.Origin.Number >= query.Skip+1 { + query.Origin.Number -= (query.Skip + 1) + } else { + unknown = true + } + + case !query.Reverse: + // Number based traversal towards the leaf block + query.Origin.Number += (query.Skip + 1) + } + } + + bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + query.Amount*costs.reqCost) + pm.server.fcCostStats.update(msg.Code, query.Amount, rcost) + return p.SendBlockHeaders(req.ReqID, bv, headers) + + case BlockHeadersMsg: + if pm.downloader == nil { + return errResp(ErrUnexpectedResponse, "") + } + + glog.V(logger.Debug).Infof("LES: received BlockHeadersMsg from peer %v", p.id) + // A batch of headers arrived to one of our previous requests + var resp struct { + ReqID, BV uint64 + Headers []*types.Header + } + if err := msg.Decode(&resp); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + p.fcServer.GotReply(resp.ReqID, resp.BV) + if pm.fetcher.requestedID(resp.ReqID) { + pm.fetcher.deliverHeaders(resp.ReqID, resp.Headers) + } else { + err := pm.downloader.DeliverHeaders(p.id, resp.Headers) + if err != nil { + glog.V(logger.Debug).Infoln(err) + } + } + + case GetBlockBodiesMsg: + glog.V(logger.Debug).Infof("LES: received GetBlockBodiesMsg from peer %v", p.id) + // Decode the retrieval message + var req struct { + ReqID uint64 + Hashes []common.Hash + } + if err := msg.Decode(&req); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + // Gather blocks until the fetch or network limits is reached + var ( + bytes int + bodies []rlp.RawValue + ) + reqCnt = len(req.Hashes) + if reqCnt > maxReqs || reqCnt > MaxBodyFetch { + return errResp(ErrRequestRejected, "") + } + for _, hash := range req.Hashes { + if bytes >= softResponseLimit { + break + } + // Retrieve the requested block body, stopping if enough was found + if data := core.GetBodyRLP(pm.chainDb, hash, core.GetBlockNumber(pm.chainDb, hash)); len(data) != 0 { + bodies = append(bodies, data) + bytes += len(data) + } + } + bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost) + pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost) + return p.SendBlockBodiesRLP(req.ReqID, bv, bodies) + + case BlockBodiesMsg: + if pm.odr == nil { + return errResp(ErrUnexpectedResponse, "") + } + + glog.V(logger.Debug).Infof("LES: received BlockBodiesMsg from peer %v", p.id) + // A batch of block bodies arrived to one of our previous requests + var resp struct { + ReqID, BV uint64 + Data []*types.Body + } + if err := msg.Decode(&resp); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + p.fcServer.GotReply(resp.ReqID, resp.BV) + deliverMsg = &Msg{ + MsgType: MsgBlockBodies, + ReqID: resp.ReqID, + Obj: resp.Data, + } + + case GetCodeMsg: + glog.V(logger.Debug).Infof("LES: received GetCodeMsg from peer %v", p.id) + // Decode the retrieval message + var req struct { + ReqID uint64 + Reqs []CodeReq + } + if err := msg.Decode(&req); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + // Gather state data until the fetch or network limits is reached + var ( + bytes int + data [][]byte + ) + reqCnt = len(req.Reqs) + if reqCnt > maxReqs || reqCnt > MaxCodeFetch { + return errResp(ErrRequestRejected, "") + } + for _, req := range req.Reqs { + // Retrieve the requested state entry, stopping if enough was found + if header := core.GetHeader(pm.chainDb, req.BHash, core.GetBlockNumber(pm.chainDb, req.BHash)); header != nil { + if trie, _ := trie.New(header.Root, pm.chainDb); trie != nil { + sdata := trie.Get(req.AccKey) + if so, err := state.DecodeObject(common.Address{}, pm.chainDb, sdata); err == nil { + entry := so.Code() + if bytes+len(entry) >= softResponseLimit { + break + } + data = append(data, entry) + bytes += len(entry) + } + } + } + } + bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost) + pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost) + return p.SendCode(req.ReqID, bv, data) + + case CodeMsg: + if pm.odr == nil { + return errResp(ErrUnexpectedResponse, "") + } + + glog.V(logger.Debug).Infof("LES: received CodeMsg from peer %v", p.id) + // A batch of node state data arrived to one of our previous requests + var resp struct { + ReqID, BV uint64 + Data [][]byte + } + if err := msg.Decode(&resp); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + p.fcServer.GotReply(resp.ReqID, resp.BV) + deliverMsg = &Msg{ + MsgType: MsgCode, + ReqID: resp.ReqID, + Obj: resp.Data, + } + + case GetReceiptsMsg: + glog.V(logger.Debug).Infof("LES: received GetReceiptsMsg from peer %v", p.id) + // Decode the retrieval message + var req struct { + ReqID uint64 + Hashes []common.Hash + } + if err := msg.Decode(&req); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + // Gather state data until the fetch or network limits is reached + var ( + bytes int + receipts []rlp.RawValue + ) + reqCnt = len(req.Hashes) + if reqCnt > maxReqs || reqCnt > MaxReceiptFetch { + return errResp(ErrRequestRejected, "") + } + for _, hash := range req.Hashes { + if bytes >= softResponseLimit { + break + } + // Retrieve the requested block's receipts, skipping if unknown to us + results := core.GetBlockReceipts(pm.chainDb, hash, core.GetBlockNumber(pm.chainDb, hash)) + if results == nil { + if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { + continue + } + } + // If known, encode and queue for response packet + if encoded, err := rlp.EncodeToBytes(results); err != nil { + glog.V(logger.Error).Infof("failed to encode receipt: %v", err) + } else { + receipts = append(receipts, encoded) + bytes += len(encoded) + } + } + bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost) + pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost) + return p.SendReceiptsRLP(req.ReqID, bv, receipts) + + case ReceiptsMsg: + if pm.odr == nil { + return errResp(ErrUnexpectedResponse, "") + } + + glog.V(logger.Debug).Infof("LES: received ReceiptsMsg from peer %v", p.id) + // A batch of receipts arrived to one of our previous requests + var resp struct { + ReqID, BV uint64 + Receipts []types.Receipts + } + if err := msg.Decode(&resp); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + p.fcServer.GotReply(resp.ReqID, resp.BV) + deliverMsg = &Msg{ + MsgType: MsgReceipts, + ReqID: resp.ReqID, + Obj: resp.Receipts, + } + + case GetProofsMsg: + glog.V(logger.Debug).Infof("LES: received GetProofsMsg from peer %v", p.id) + // Decode the retrieval message + var req struct { + ReqID uint64 + Reqs []ProofReq + } + if err := msg.Decode(&req); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + // Gather state data until the fetch or network limits is reached + var ( + bytes int + proofs proofsData + ) + reqCnt = len(req.Reqs) + if reqCnt > maxReqs || reqCnt > MaxProofsFetch { + return errResp(ErrRequestRejected, "") + } + for _, req := range req.Reqs { + if bytes >= softResponseLimit { + break + } + // Retrieve the requested state entry, stopping if enough was found + if header := core.GetHeader(pm.chainDb, req.BHash, core.GetBlockNumber(pm.chainDb, req.BHash)); header != nil { + if tr, _ := trie.New(header.Root, pm.chainDb); tr != nil { + if len(req.AccKey) > 0 { + data := tr.Get(req.AccKey) + tr = nil + if so, err := state.DecodeObject(common.Address{}, pm.chainDb, data); err == nil { + tr, _ = trie.New(common.BytesToHash(so.Root()), pm.chainDb) + } + } + if tr != nil { + proof := tr.Prove(req.Key) + proofs = append(proofs, proof) + bytes += len(proof) + } + } + } + } + bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost) + pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost) + return p.SendProofs(req.ReqID, bv, proofs) + + case ProofsMsg: + if pm.odr == nil { + return errResp(ErrUnexpectedResponse, "") + } + + glog.V(logger.Debug).Infof("LES: received ProofsMsg from peer %v", p.id) + // A batch of merkle proofs arrived to one of our previous requests + var resp struct { + ReqID, BV uint64 + Data [][]rlp.RawValue + } + if err := msg.Decode(&resp); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + p.fcServer.GotReply(resp.ReqID, resp.BV) + deliverMsg = &Msg{ + MsgType: MsgProofs, + ReqID: resp.ReqID, + Obj: resp.Data, + } + + case GetHeaderProofsMsg: + glog.V(logger.Debug).Infof("LES: received GetHeaderProofsMsg from peer %v", p.id) + // Decode the retrieval message + var req struct { + ReqID uint64 + Reqs []ChtReq + } + if err := msg.Decode(&req); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + // Gather state data until the fetch or network limits is reached + var ( + bytes int + proofs []ChtResp + ) + reqCnt = len(req.Reqs) + if reqCnt > maxReqs || reqCnt > MaxHeaderProofsFetch { + return errResp(ErrRequestRejected, "") + } + for _, req := range req.Reqs { + if bytes >= softResponseLimit { + break + } + + if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil { + if root := getChtRoot(pm.chainDb, req.ChtNum); root != (common.Hash{}) { + if tr, _ := trie.New(root, pm.chainDb); tr != nil { + var encNumber [8]byte + binary.BigEndian.PutUint64(encNumber[:], req.BlockNum) + proof := tr.Prove(encNumber[:]) + proofs = append(proofs, ChtResp{Header: header, Proof: proof}) + bytes += len(proof) + estHeaderRlpSize + } + } + } + } + bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost) + pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost) + return p.SendHeaderProofs(req.ReqID, bv, proofs) + + case HeaderProofsMsg: + if pm.odr == nil { + return errResp(ErrUnexpectedResponse, "") + } + + glog.V(logger.Debug).Infof("LES: received HeaderProofsMsg from peer %v", p.id) + var resp struct { + ReqID, BV uint64 + Data []ChtResp + } + if err := msg.Decode(&resp); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + p.fcServer.GotReply(resp.ReqID, resp.BV) + deliverMsg = &Msg{ + MsgType: MsgHeaderProofs, + ReqID: resp.ReqID, + Obj: resp.Data, + } + + case SendTxMsg: + if pm.txpool == nil { + return errResp(ErrUnexpectedResponse, "") + } + // Transactions arrived, parse all of them and deliver to the pool + var txs []*types.Transaction + if err := msg.Decode(&txs); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + reqCnt = len(txs) + if reqCnt > maxReqs || reqCnt > MaxTxSend { + return errResp(ErrRequestRejected, "") + } + pm.txpool.AddTransactions(txs) + _, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost) + pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost) + + default: + glog.V(logger.Debug).Infof("LES: received unknown message with code %d from peer %v", msg.Code, p.id) + return errResp(ErrInvalidMsgCode, "%v", msg.Code) + } + + if deliverMsg != nil { + return pm.odr.Deliver(p, deliverMsg) + } + + return nil +} + +// NodeInfo retrieves some protocol metadata about the running host node. +func (self *ProtocolManager) NodeInfo() *eth.EthNodeInfo { + return ð.EthNodeInfo{ + Network: self.networkId, + Difficulty: self.blockchain.GetTdByHash(self.blockchain.LastBlockHash()), + Genesis: self.blockchain.Genesis().Hash(), + Head: self.blockchain.LastBlockHash(), + } +} diff --git a/les/handler_test.go b/les/handler_test.go new file mode 100644 index 000000000000..55466178cfd7 --- /dev/null +++ b/les/handler_test.go @@ -0,0 +1,322 @@ +package les + +import ( + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error { + type resp struct { + ReqID, BV uint64 + Data interface{} + } + return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data}) +} + +// Tests that block headers can be retrieved from a remote chain based on user queries. +func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) } + +func testGetBlockHeaders(t *testing.T, protocol int) { + pm, _, _ := newTestProtocolManagerMust(t, false, downloader.MaxHashFetch+15, nil) + bc := pm.blockchain.(*core.BlockChain) + peer, _ := newTestPeer(t, "peer", protocol, pm, true) + defer peer.close() + + // Create a "random" unknown hash for testing + var unknown common.Hash + for i, _ := range unknown { + unknown[i] = byte(i) + } + // Create a batch of tests for various scenarios + limit := uint64(MaxHeaderFetch) + tests := []struct { + query *getBlockHeadersData // The query to execute for header retrieval + expect []common.Hash // The hashes of the block whose headers are expected + }{ + // A single random block should be retrievable by hash and number too + { + &getBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + []common.Hash{bc.GetBlockByNumber(limit / 2).Hash()}, + }, { + &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1}, + []common.Hash{bc.GetBlockByNumber(limit / 2).Hash()}, + }, + // Multiple headers should be retrievable in both directions + { + &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3}, + []common.Hash{ + bc.GetBlockByNumber(limit / 2).Hash(), + bc.GetBlockByNumber(limit/2 + 1).Hash(), + bc.GetBlockByNumber(limit/2 + 2).Hash(), + }, + }, { + &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, + []common.Hash{ + bc.GetBlockByNumber(limit / 2).Hash(), + bc.GetBlockByNumber(limit/2 - 1).Hash(), + bc.GetBlockByNumber(limit/2 - 2).Hash(), + }, + }, + // Multiple headers with skip lists should be retrievable + { + &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, + []common.Hash{ + bc.GetBlockByNumber(limit / 2).Hash(), + bc.GetBlockByNumber(limit/2 + 4).Hash(), + bc.GetBlockByNumber(limit/2 + 8).Hash(), + }, + }, { + &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, + []common.Hash{ + bc.GetBlockByNumber(limit / 2).Hash(), + bc.GetBlockByNumber(limit/2 - 4).Hash(), + bc.GetBlockByNumber(limit/2 - 8).Hash(), + }, + }, + // The chain endpoints should be retrievable + { + &getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1}, + []common.Hash{bc.GetBlockByNumber(0).Hash()}, + }, { + &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1}, + []common.Hash{bc.CurrentBlock().Hash()}, + }, + // Ensure protocol limits are honored + { + &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, + bc.GetBlockHashesFromHash(bc.CurrentBlock().Hash(), limit), + }, + // Check that requesting more than available is handled gracefully + { + &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, + []common.Hash{ + bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(), + bc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(), + }, + }, { + &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, + []common.Hash{ + bc.GetBlockByNumber(4).Hash(), + bc.GetBlockByNumber(0).Hash(), + }, + }, + // Check that requesting more than available is handled gracefully, even if mid skip + { + &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, + []common.Hash{ + bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(), + bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(), + }, + }, { + &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, + []common.Hash{ + bc.GetBlockByNumber(4).Hash(), + bc.GetBlockByNumber(1).Hash(), + }, + }, + // Check that non existing headers aren't returned + { + &getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1}, + []common.Hash{}, + }, { + &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1}, + []common.Hash{}, + }, + } + // Run each of the tests and verify the results against the chain + var reqID uint64 + for i, tt := range tests { + // Collect the headers to expect in the response + headers := []*types.Header{} + for _, hash := range tt.expect { + headers = append(headers, bc.GetHeaderByHash(hash)) + } + // Send the hash request and verify the response + reqID++ + cost := peer.GetRequestCost(GetBlockHeadersMsg, int(tt.query.Amount)) + sendRequest(peer.app, GetBlockHeadersMsg, reqID, cost, tt.query) + if err := expectResponse(peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil { + t.Errorf("test %d: headers mismatch: %v", i, err) + } + } +} + +// Tests that block contents can be retrieved from a remote chain based on their hashes. +func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) } + +func testGetBlockBodies(t *testing.T, protocol int) { + pm, _, _ := newTestProtocolManagerMust(t, false, downloader.MaxBlockFetch+15, nil) + bc := pm.blockchain.(*core.BlockChain) + peer, _ := newTestPeer(t, "peer", protocol, pm, true) + defer peer.close() + + // Create a batch of tests for various scenarios + limit := MaxBodyFetch + tests := []struct { + random int // Number of blocks to fetch randomly from the chain + explicit []common.Hash // Explicitly requested blocks + available []bool // Availability of explicitly requested blocks + expected int // Total number of existing blocks to expect + }{ + {1, nil, nil, 1}, // A single random block should be retrievable + {10, nil, nil, 10}, // Multiple random blocks should be retrievable + {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable + {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned + {0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable + {0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable + {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned + + // Existing and non-existing blocks interleaved should not cause problems + {0, []common.Hash{ + common.Hash{}, + bc.GetBlockByNumber(1).Hash(), + common.Hash{}, + bc.GetBlockByNumber(10).Hash(), + common.Hash{}, + bc.GetBlockByNumber(100).Hash(), + common.Hash{}, + }, []bool{false, true, false, true, false, true, false}, 3}, + } + // Run each of the tests and verify the results against the chain + var reqID uint64 + for i, tt := range tests { + // Collect the hashes to request, and the response to expect + hashes, seen := []common.Hash{}, make(map[int64]bool) + bodies := []*types.Body{} + + for j := 0; j < tt.random; j++ { + for { + num := rand.Int63n(int64(bc.CurrentBlock().NumberU64())) + if !seen[num] { + seen[num] = true + + block := bc.GetBlockByNumber(uint64(num)) + hashes = append(hashes, block.Hash()) + if len(bodies) < tt.expected { + bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()}) + } + break + } + } + } + for j, hash := range tt.explicit { + hashes = append(hashes, hash) + if tt.available[j] && len(bodies) < tt.expected { + block := bc.GetBlockByHash(hash) + bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()}) + } + } + reqID++ + // Send the hash request and verify the response + cost := peer.GetRequestCost(GetBlockBodiesMsg, len(hashes)) + sendRequest(peer.app, GetBlockBodiesMsg, reqID, cost, hashes) + if err := expectResponse(peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil { + t.Errorf("test %d: bodies mismatch: %v", i, err) + } + } +} + +// Tests that the contract codes can be retrieved based on account addresses. +func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) } + +func testGetCode(t *testing.T, protocol int) { + // Assemble the test environment + pm, _, _ := newTestProtocolManagerMust(t, false, 4, testChainGen) + bc := pm.blockchain.(*core.BlockChain) + peer, _ := newTestPeer(t, "peer", protocol, pm, true) + defer peer.close() + + var codereqs []*CodeReq + var codes [][]byte + + for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ { + header := bc.GetHeaderByNumber(i) + req := &CodeReq{ + BHash: header.Hash(), + AccKey: crypto.Keccak256(testContractAddr[:]), + } + codereqs = append(codereqs, req) + if i >= testContractDeployed { + codes = append(codes, testContractCodeDeployed) + } + } + + cost := peer.GetRequestCost(GetCodeMsg, len(codereqs)) + sendRequest(peer.app, GetCodeMsg, 42, cost, codereqs) + if err := expectResponse(peer.app, CodeMsg, 42, testBufLimit, codes); err != nil { + t.Errorf("codes mismatch: %v", err) + } +} + +// Tests that the transaction receipts can be retrieved based on hashes. +func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) } + +func testGetReceipt(t *testing.T, protocol int) { + // Assemble the test environment + pm, db, _ := newTestProtocolManagerMust(t, false, 4, testChainGen) + bc := pm.blockchain.(*core.BlockChain) + peer, _ := newTestPeer(t, "peer", protocol, pm, true) + defer peer.close() + + // Collect the hashes to request, and the response to expect + hashes, receipts := []common.Hash{}, []types.Receipts{} + for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ { + block := bc.GetBlockByNumber(i) + + hashes = append(hashes, block.Hash()) + receipts = append(receipts, core.GetBlockReceipts(db, block.Hash(), block.NumberU64())) + } + // Send the hash request and verify the response + cost := peer.GetRequestCost(GetReceiptsMsg, len(hashes)) + sendRequest(peer.app, GetReceiptsMsg, 42, cost, hashes) + if err := expectResponse(peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil { + t.Errorf("receipts mismatch: %v", err) + } +} + +// Tests that trie merkle proofs can be retrieved +func TestGetProofsLes1(t *testing.T) { testGetReceipt(t, 1) } + +func testGetProofs(t *testing.T, protocol int) { + // Assemble the test environment + pm, db, _ := newTestProtocolManagerMust(t, false, 4, testChainGen) + bc := pm.blockchain.(*core.BlockChain) + peer, _ := newTestPeer(t, "peer", protocol, pm, true) + defer peer.close() + + var proofreqs []ProofReq + var proofs [][]rlp.RawValue + + accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, common.Address{}} + for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ { + header := bc.GetHeaderByNumber(i) + root := header.Root + trie, _ := trie.NewSecure(root, db) + + for _, acc := range accounts { + req := ProofReq{ + BHash: header.Hash(), + Key: acc[:], + } + proofreqs = append(proofreqs, req) + + proof := trie.Prove(acc[:]) + proofs = append(proofs, proof) + } + } + // Send the proof request and verify the response + cost := peer.GetRequestCost(GetProofsMsg, len(proofreqs)) + sendRequest(peer.app, GetProofsMsg, 42, cost, proofreqs) + if err := expectResponse(peer.app, ProofsMsg, 42, testBufLimit, proofs); err != nil { + t.Errorf("proofs mismatch: %v", err) + } +} diff --git a/les/helper_test.go b/les/helper_test.go new file mode 100644 index 000000000000..9ca9dd77ef10 --- /dev/null +++ b/les/helper_test.go @@ -0,0 +1,316 @@ +// This file contains some shares testing functionality, common to multiple +// different files and modules being tested. + +package les + +import ( + "crypto/ecdsa" + "crypto/rand" + "math/big" + "sync" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/les/flowcontrol" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/params" +) + +var ( + testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey) + testBankFunds = big.NewInt(1000000) + + acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) + + testContractCode = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056") + testContractAddr common.Address + testContractCodeDeployed = testContractCode[16:] + testContractDeployed = uint64(2) + + testBufLimit = uint64(100) +) + +/* +contract test { + + uint256[100] data; + + function Put(uint256 addr, uint256 value) { + data[addr] = value; + } + + function Get(uint256 addr) constant returns (uint256 value) { + return data[addr]; + } +} +*/ + +func testChainGen(i int, block *core.BlockGen) { + switch i { + case 0: + // In block 1, the test bank sends account #1 some ether. + tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey) + block.AddTx(tx) + case 1: + // In block 2, the test bank sends some more ether to account #1. + // acc1Addr passes it on to account #2. + // acc1Addr creates a test contract. + tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey) + nonce := block.TxNonce(acc1Addr) + tx2, _ := types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key) + nonce++ + tx3, _ := types.NewContractCreation(nonce, big.NewInt(0), big.NewInt(200000), big.NewInt(0), testContractCode).SignECDSA(acc1Key) + testContractAddr = crypto.CreateAddress(acc1Addr, nonce) + block.AddTx(tx1) + block.AddTx(tx2) + block.AddTx(tx3) + case 2: + // Block 3 is empty but was mined by account #2. + block.SetCoinbase(acc2Addr) + block.SetExtra([]byte("yeehaw")) + data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001") + tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(testBankKey) + block.AddTx(tx) + case 3: + // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). + b2 := block.PrevBlock(1).Header() + b2.Extra = []byte("foo") + block.AddUncle(b2) + b3 := block.PrevBlock(2).Header() + b3.Extra = []byte("foo") + block.AddUncle(b3) + data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002") + tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(testBankKey) + block.AddTx(tx) + } +} + +func testRCL() RequestCostList { + cl := make(RequestCostList, len(reqList)) + for i, code := range reqList { + cl[i].MsgCode = code + cl[i].BaseCost = 0 + cl[i].ReqCost = 0 + } + return cl +} + +// newTestProtocolManager creates a new protocol manager for testing purposes, +// with the given number of blocks already known, and potential notification +// channels for different events. +func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *core.BlockGen)) (*ProtocolManager, ethdb.Database, *LesOdr, error) { + var ( + evmux = new(event.TypeMux) + pow = new(core.FakePow) + db, _ = ethdb.NewMemDatabase() + genesis = core.WriteGenesisBlockForTesting(db, core.GenesisAccount{testBankAddress, testBankFunds}) + chainConfig = &core.ChainConfig{HomesteadBlock: big.NewInt(0)} // homestead set to 0 because of chain maker + odr *LesOdr + chain BlockChain + ) + + if lightSync { + odr = NewLesOdr(db) + chain, _ = light.NewLightChain(odr, chainConfig, pow, evmux) + } else { + blockchain, _ := core.NewBlockChain(db, chainConfig, pow, evmux) + gchain, _ := core.GenerateChain(genesis, db, blocks, generator) + if _, err := blockchain.InsertChain(gchain); err != nil { + panic(err) + } + chain = blockchain + } + + pm, err := NewProtocolManager(chainConfig, lightSync, NetworkId, evmux, pow, chain, nil, db, odr, nil) + if err != nil { + return nil, nil, nil, err + } + if !lightSync { + srv := &LesServer{protocolManager: pm} + pm.server = srv + + srv.defParams = &flowcontrol.ServerParams{ + BufLimit: testBufLimit, + MinRecharge: 1, + } + + srv.fcManager = flowcontrol.NewClientManager(50, 10, 1000000000) + srv.fcCostStats = newCostStats(nil) + srv.fcCostStats.baseCost = 0 + for _, entry := range srv.fcCostStats.avg { + entry.baseCost = 0 + entry.reqCost = 0 + } + } + pm.Start() + return pm, db, odr, nil +} + +// newTestProtocolManagerMust creates a new protocol manager for testing purposes, +// with the given number of blocks already known, and potential notification +// channels for different events. In case of an error, the constructor force- +// fails the test. +func newTestProtocolManagerMust(t *testing.T, lightSync bool, blocks int, generator func(int, *core.BlockGen)) (*ProtocolManager, ethdb.Database, *LesOdr) { + pm, db, odr, err := newTestProtocolManager(lightSync, blocks, generator) + if err != nil { + t.Fatalf("Failed to create protocol manager: %v", err) + } + return pm, db, odr +} + +// testTxPool is a fake, helper transaction pool for testing purposes +type testTxPool struct { + pool []*types.Transaction // Collection of all transactions + added chan<- []*types.Transaction // Notification channel for new transactions + + lock sync.RWMutex // Protects the transaction pool +} + +// AddTransactions appends a batch of transactions to the pool, and notifies any +// listeners if the addition channel is non nil +func (p *testTxPool) AddTransactions(txs []*types.Transaction) { + p.lock.Lock() + defer p.lock.Unlock() + + p.pool = append(p.pool, txs...) + if p.added != nil { + p.added <- txs + } +} + +// GetTransactions returns all the transactions known to the pool +func (p *testTxPool) GetTransactions() types.Transactions { + p.lock.RLock() + defer p.lock.RUnlock() + + txs := make([]*types.Transaction, len(p.pool)) + copy(txs, p.pool) + + return txs +} + +// newTestTransaction create a new dummy transaction. +func newTestTransaction(from *ecdsa.PrivateKey, nonce uint64, datasize int) *types.Transaction { + tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), make([]byte, datasize)) + tx, _ = tx.SignECDSA(from) + + return tx +} + +// testPeer is a simulated peer to allow testing direct network calls. +type testPeer struct { + net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging + app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side + *peer +} + +// newTestPeer creates a new peer registered at the given protocol manager. +func newTestPeer(t *testing.T, name string, version int, pm *ProtocolManager, shake bool) (*testPeer, <-chan error) { + // Create a message pipe to communicate through + app, net := p2p.MsgPipe() + + // Generate a random id and create the peer + var id discover.NodeID + rand.Read(id[:]) + + peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net) + + // Start the peer on a new thread + errc := make(chan error, 1) + go func() { + select { + case pm.newPeerCh <- peer: + errc <- pm.handle(peer) + case <-pm.quitSync: + errc <- p2p.DiscQuitting + } + }() + tp := &testPeer{ + app: app, + net: net, + peer: peer, + } + // Execute any implicitly requested handshakes and return + if shake { + td, head, genesis := pm.blockchain.Status() + tp.handshake(t, td, head, genesis) + } + return tp, errc +} + +func newTestPeerPair(name string, version int, pm, pm2 *ProtocolManager) (*peer, <-chan error, *peer, <-chan error) { + // Create a message pipe to communicate through + app, net := p2p.MsgPipe() + + // Generate a random id and create the peer + var id discover.NodeID + rand.Read(id[:]) + + peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net) + peer2 := pm2.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), app) + + // Start the peer on a new thread + errc := make(chan error, 1) + errc2 := make(chan error, 1) + go func() { + select { + case pm.newPeerCh <- peer: + errc <- pm.handle(peer) + case <-pm.quitSync: + errc <- p2p.DiscQuitting + } + }() + go func() { + select { + case pm2.newPeerCh <- peer2: + errc2 <- pm2.handle(peer2) + case <-pm2.quitSync: + errc2 <- p2p.DiscQuitting + } + }() + return peer, errc, peer2, errc2 +} + +// handshake simulates a trivial handshake that expects the same state from the +// remote side as we are simulating locally. +func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash) { + var expList keyValueList + expList = expList.add("protocolVersion", uint64(p.version)) + expList = expList.add("networkId", uint64(NetworkId)) + expList = expList.add("td", td) + expList = expList.add("bestHash", head) + expList = expList.add("genesisHash", genesis) + sendList := make(keyValueList, len(expList)) + copy(sendList, expList) + expList = expList.add("serveHeaders", nil) + expList = expList.add("serveChainSince", uint64(0)) + expList = expList.add("serveStateSince", uint64(0)) + expList = expList.add("txRelay", nil) + expList = expList.add("flowControl/BL", testBufLimit) + expList = expList.add("flowControl/MRR", uint64(1)) + expList = expList.add("flowControl/MRC", testRCL()) + + if err := p2p.ExpectMsg(p.app, StatusMsg, expList); err != nil { + t.Fatalf("status recv: %v", err) + } + if err := p2p.Send(p.app, StatusMsg, sendList); err != nil { + t.Fatalf("status send: %v", err) + } +} + +// close terminates the local side of the peer, notifying the remote protocol +// manager of termination. +func (p *testPeer) close() { + p.app.Close() +} diff --git a/les/metrics.go b/les/metrics.go new file mode 100644 index 000000000000..88e6726e24d8 --- /dev/null +++ b/les/metrics.go @@ -0,0 +1,111 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package les + +import ( + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/p2p" +) + +var ( + /* propTxnInPacketsMeter = metrics.NewMeter("eth/prop/txns/in/packets") + propTxnInTrafficMeter = metrics.NewMeter("eth/prop/txns/in/traffic") + propTxnOutPacketsMeter = metrics.NewMeter("eth/prop/txns/out/packets") + propTxnOutTrafficMeter = metrics.NewMeter("eth/prop/txns/out/traffic") + propHashInPacketsMeter = metrics.NewMeter("eth/prop/hashes/in/packets") + propHashInTrafficMeter = metrics.NewMeter("eth/prop/hashes/in/traffic") + propHashOutPacketsMeter = metrics.NewMeter("eth/prop/hashes/out/packets") + propHashOutTrafficMeter = metrics.NewMeter("eth/prop/hashes/out/traffic") + propBlockInPacketsMeter = metrics.NewMeter("eth/prop/blocks/in/packets") + propBlockInTrafficMeter = metrics.NewMeter("eth/prop/blocks/in/traffic") + propBlockOutPacketsMeter = metrics.NewMeter("eth/prop/blocks/out/packets") + propBlockOutTrafficMeter = metrics.NewMeter("eth/prop/blocks/out/traffic") + reqHashInPacketsMeter = metrics.NewMeter("eth/req/hashes/in/packets") + reqHashInTrafficMeter = metrics.NewMeter("eth/req/hashes/in/traffic") + reqHashOutPacketsMeter = metrics.NewMeter("eth/req/hashes/out/packets") + reqHashOutTrafficMeter = metrics.NewMeter("eth/req/hashes/out/traffic") + reqBlockInPacketsMeter = metrics.NewMeter("eth/req/blocks/in/packets") + reqBlockInTrafficMeter = metrics.NewMeter("eth/req/blocks/in/traffic") + reqBlockOutPacketsMeter = metrics.NewMeter("eth/req/blocks/out/packets") + reqBlockOutTrafficMeter = metrics.NewMeter("eth/req/blocks/out/traffic") + reqHeaderInPacketsMeter = metrics.NewMeter("eth/req/headers/in/packets") + reqHeaderInTrafficMeter = metrics.NewMeter("eth/req/headers/in/traffic") + reqHeaderOutPacketsMeter = metrics.NewMeter("eth/req/headers/out/packets") + reqHeaderOutTrafficMeter = metrics.NewMeter("eth/req/headers/out/traffic") + reqBodyInPacketsMeter = metrics.NewMeter("eth/req/bodies/in/packets") + reqBodyInTrafficMeter = metrics.NewMeter("eth/req/bodies/in/traffic") + reqBodyOutPacketsMeter = metrics.NewMeter("eth/req/bodies/out/packets") + reqBodyOutTrafficMeter = metrics.NewMeter("eth/req/bodies/out/traffic") + reqStateInPacketsMeter = metrics.NewMeter("eth/req/states/in/packets") + reqStateInTrafficMeter = metrics.NewMeter("eth/req/states/in/traffic") + reqStateOutPacketsMeter = metrics.NewMeter("eth/req/states/out/packets") + reqStateOutTrafficMeter = metrics.NewMeter("eth/req/states/out/traffic") + reqReceiptInPacketsMeter = metrics.NewMeter("eth/req/receipts/in/packets") + reqReceiptInTrafficMeter = metrics.NewMeter("eth/req/receipts/in/traffic") + reqReceiptOutPacketsMeter = metrics.NewMeter("eth/req/receipts/out/packets") + reqReceiptOutTrafficMeter = metrics.NewMeter("eth/req/receipts/out/traffic")*/ + miscInPacketsMeter = metrics.NewMeter("les/misc/in/packets") + miscInTrafficMeter = metrics.NewMeter("les/misc/in/traffic") + miscOutPacketsMeter = metrics.NewMeter("les/misc/out/packets") + miscOutTrafficMeter = metrics.NewMeter("les/misc/out/traffic") +) + +// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of +// accumulating the above defined metrics based on the data stream contents. +type meteredMsgReadWriter struct { + p2p.MsgReadWriter // Wrapped message stream to meter + version int // Protocol version to select correct meters +} + +// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the +// metrics system is disabled, this fucntion returns the original object. +func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter { + if !metrics.Enabled { + return rw + } + return &meteredMsgReadWriter{MsgReadWriter: rw} +} + +// Init sets the protocol version used by the stream to know which meters to +// increment in case of overlapping message ids between protocol versions. +func (rw *meteredMsgReadWriter) Init(version int) { + rw.version = version +} + +func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) { + // Read the message and short circuit in case of an error + msg, err := rw.MsgReadWriter.ReadMsg() + if err != nil { + return msg, err + } + // Account for the data traffic + packets, traffic := miscInPacketsMeter, miscInTrafficMeter + packets.Mark(1) + traffic.Mark(int64(msg.Size)) + + return msg, err +} + +func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error { + // Account for the data traffic + packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter + packets.Mark(1) + traffic.Mark(int64(msg.Size)) + + // Send the packet to the p2p layer + return rw.MsgReadWriter.WriteMsg(msg) +} diff --git a/les/odr.go b/les/odr.go new file mode 100644 index 000000000000..2674ba6a19b9 --- /dev/null +++ b/les/odr.go @@ -0,0 +1,247 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package les + +import ( + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "golang.org/x/net/context" +) + +var ( + softRequestTimeout = time.Millisecond * 500 + hardRequestTimeout = time.Second * 10 + retryPeers = time.Second * 1 +) + +// peerDropFn is a callback type for dropping a peer detected as malicious. +type peerDropFn func(id string) + +type LesOdr struct { + light.OdrBackend + db ethdb.Database + stop chan struct{} + removePeer peerDropFn + mlock, clock sync.Mutex + sentReqs map[uint64]*sentReq + peers *odrPeerSet + lastReqID uint64 +} + +func NewLesOdr(db ethdb.Database) *LesOdr { + return &LesOdr{ + db: db, + stop: make(chan struct{}), + peers: newOdrPeerSet(), + sentReqs: make(map[uint64]*sentReq), + } +} + +func (odr *LesOdr) Stop() { + close(odr.stop) +} + +func (odr *LesOdr) Database() ethdb.Database { + return odr.db +} + +// validatorFunc is a function that processes a message and returns true if +// it was a meaningful answer to a given request +type validatorFunc func(ethdb.Database, *Msg) bool + +// sentReq is a request waiting for an answer that satisfies its valFunc +type sentReq struct { + valFunc validatorFunc + sentTo map[*peer]chan struct{} + lock sync.RWMutex // protects acces to sentTo + answered chan struct{} // closed and set to nil when any peer answers it +} + +// RegisterPeer registers a new LES peer to the ODR capable peer set +func (self *LesOdr) RegisterPeer(p *peer) error { + return self.peers.register(p) +} + +// UnregisterPeer removes a peer from the ODR capable peer set +func (self *LesOdr) UnregisterPeer(p *peer) { + self.peers.unregister(p) +} + +const ( + MsgBlockBodies = iota + MsgCode + MsgReceipts + MsgProofs + MsgHeaderProofs +) + +// Msg encodes a LES message that delivers reply data for a request +type Msg struct { + MsgType int + ReqID uint64 + Obj interface{} +} + +// Deliver is called by the LES protocol manager to deliver ODR reply messages to waiting requests +func (self *LesOdr) Deliver(peer *peer, msg *Msg) error { + var delivered chan struct{} + self.mlock.Lock() + req, ok := self.sentReqs[msg.ReqID] + self.mlock.Unlock() + if ok { + req.lock.Lock() + delivered, ok = req.sentTo[peer] + req.lock.Unlock() + } + + if !ok { + return errResp(ErrUnexpectedResponse, "reqID = %v", msg.ReqID) + } + + if req.valFunc(self.db, msg) { + close(delivered) + req.lock.Lock() + if req.answered != nil { + close(req.answered) + req.answered = nil + } + req.lock.Unlock() + return nil + } + return errResp(ErrInvalidResponse, "reqID = %v", msg.ReqID) +} + +func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout chan struct{}, reqWg *sync.WaitGroup) { + stime := mclock.Now() + defer func() { + req.lock.Lock() + delete(req.sentTo, peer) + req.lock.Unlock() + reqWg.Done() + }() + + select { + case <-delivered: + servTime := uint64(mclock.Now() - stime) + self.peers.updateTimeout(peer, false) + self.peers.updateServTime(peer, servTime) + return + case <-time.After(softRequestTimeout): + close(timeout) + if self.peers.updateTimeout(peer, true) { + self.removePeer(peer.id) + } + case <-self.stop: + return + } + + select { + case <-delivered: + servTime := uint64(mclock.Now() - stime) + self.peers.updateServTime(peer, servTime) + return + case <-time.After(hardRequestTimeout): + self.removePeer(peer.id) + case <-self.stop: + return + } +} + +// networkRequest sends a request to known peers until an answer is received +// or the context is cancelled +func (self *LesOdr) networkRequest(ctx context.Context, lreq LesOdrRequest) error { + answered := make(chan struct{}) + req := &sentReq{ + valFunc: lreq.Valid, + sentTo: make(map[*peer]chan struct{}), + answered: answered, // reply delivered by any peer + } + reqID := self.getNextReqID() + self.mlock.Lock() + self.sentReqs[reqID] = req + self.mlock.Unlock() + + reqWg := new(sync.WaitGroup) + reqWg.Add(1) + defer reqWg.Done() + go func() { + reqWg.Wait() + self.mlock.Lock() + delete(self.sentReqs, reqID) + self.mlock.Unlock() + }() + + exclude := make(map[*peer]struct{}) + for { + if peer := self.peers.bestPeer(lreq, exclude); peer == nil { + select { + case <-ctx.Done(): + return ctx.Err() + case <-req.answered: + return nil + case <-time.After(retryPeers): + } + } else { + exclude[peer] = struct{}{} + delivered := make(chan struct{}) + timeout := make(chan struct{}) + req.lock.Lock() + req.sentTo[peer] = delivered + req.lock.Unlock() + reqWg.Add(1) + cost := lreq.GetCost(peer) + peer.fcServer.SendRequest(reqID, cost) + go self.requestPeer(req, peer, delivered, timeout, reqWg) + lreq.Request(reqID, peer) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-answered: + return nil + case <-timeout: + } + } + } +} + +// Retrieve tries to fetch an object from the local db, then from the LES network. +// If the network retrieval was successful, it stores the object in local db. +func (self *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err error) { + lreq := LesRequest(req) + err = self.networkRequest(ctx, lreq) + if err == nil { + // retrieved from network, store in db + req.StoreResult(self.db) + } else { + glog.V(logger.Debug).Infof("networkRequest err = %v", err) + } + return +} + +func (self *LesOdr) getNextReqID() uint64 { + self.clock.Lock() + defer self.clock.Unlock() + + self.lastReqID++ + return self.lastReqID +} diff --git a/les/odr_peerset.go b/les/odr_peerset.go new file mode 100644 index 000000000000..0323ce27fcf9 --- /dev/null +++ b/les/odr_peerset.go @@ -0,0 +1,119 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package les + +import ( + "sync" +) + +const dropTimeoutRatio = 20 + +type odrPeerInfo struct { + reqTimeSum, reqTimeCnt, reqCnt, timeoutCnt uint64 +} + +// odrPeerSet represents the collection of active peer participating in the block +// download procedure. +type odrPeerSet struct { + peers map[*peer]*odrPeerInfo + lock sync.RWMutex +} + +// newPeerSet creates a new peer set top track the active download sources. +func newOdrPeerSet() *odrPeerSet { + return &odrPeerSet{ + peers: make(map[*peer]*odrPeerInfo), + } +} + +// Register injects a new peer into the working set, or returns an error if the +// peer is already known. +func (ps *odrPeerSet) register(p *peer) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + if _, ok := ps.peers[p]; ok { + return errAlreadyRegistered + } + ps.peers[p] = &odrPeerInfo{} + return nil +} + +// Unregister removes a remote peer from the active set, disabling any further +// actions to/from that particular entity. +func (ps *odrPeerSet) unregister(p *peer) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + if _, ok := ps.peers[p]; !ok { + return errNotRegistered + } + delete(ps.peers, p) + return nil +} + +func (ps *odrPeerSet) peerPriority(p *peer, info *odrPeerInfo, req LesOdrRequest) uint64 { + tm := p.fcServer.CanSend(req.GetCost(p)) + if info.reqTimeCnt > 0 { + tm += info.reqTimeSum / info.reqTimeCnt + } + return tm +} + +func (ps *odrPeerSet) bestPeer(req LesOdrRequest, exclude map[*peer]struct{}) *peer { + var best *peer + var bpv uint64 + ps.lock.Lock() + defer ps.lock.Unlock() + + for p, info := range ps.peers { + if _, ok := exclude[p]; !ok { + pv := ps.peerPriority(p, info, req) + if best == nil || pv < bpv { + best = p + bpv = pv + } + } + } + return best +} + +func (ps *odrPeerSet) updateTimeout(p *peer, timeout bool) (drop bool) { + ps.lock.Lock() + defer ps.lock.Unlock() + + if info, ok := ps.peers[p]; ok { + info.reqCnt++ + if timeout { + // check ratio before increase to allow an extra timeout + if info.timeoutCnt*dropTimeoutRatio >= info.reqCnt { + return true + } + info.timeoutCnt++ + } + } + return false +} + +func (ps *odrPeerSet) updateServTime(p *peer, servTime uint64) { + ps.lock.Lock() + defer ps.lock.Unlock() + + if info, ok := ps.peers[p]; ok { + info.reqTimeSum += servTime + info.reqTimeCnt++ + } +} diff --git a/les/odr_requests.go b/les/odr_requests.go new file mode 100644 index 000000000000..bf03469778a3 --- /dev/null +++ b/les/odr_requests.go @@ -0,0 +1,325 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package light implements on-demand retrieval capable state and chain objects +// for the Ethereum Light Client. +package les + +import ( + "bytes" + "encoding/binary" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +type LesOdrRequest interface { + GetCost(*peer) uint64 + Request(uint64, *peer) error + Valid(ethdb.Database, *Msg) bool // if true, keeps the retrieved object +} + +func LesRequest(req light.OdrRequest) LesOdrRequest { + switch r := req.(type) { + case *light.BlockRequest: + return (*BlockRequest)(r) + case *light.ReceiptsRequest: + return (*ReceiptsRequest)(r) + case *light.TrieRequest: + return (*TrieRequest)(r) + case *light.CodeRequest: + return (*CodeRequest)(r) + case *light.ChtRequest: + return (*ChtRequest)(r) + default: + return nil + } +} + +// BlockRequest is the ODR request type for block bodies +type BlockRequest light.BlockRequest + +// GetCost returns the cost of the given ODR request according to the serving +// peer's cost table (implementation of LesOdrRequest) +func (self *BlockRequest) GetCost(peer *peer) uint64 { + return peer.GetRequestCost(GetBlockBodiesMsg, 1) +} + +// Request sends an ODR request to the LES network (implementation of LesOdrRequest) +func (self *BlockRequest) Request(reqID uint64, peer *peer) error { + glog.V(logger.Debug).Infof("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id) + return peer.RequestBodies(reqID, self.GetCost(peer), []common.Hash{self.Hash}) +} + +// Valid processes an ODR request reply message from the LES network +// returns true and stores results in memory if the message was a valid reply +// to the request (implementation of LesOdrRequest) +func (self *BlockRequest) Valid(db ethdb.Database, msg *Msg) bool { + glog.V(logger.Debug).Infof("ODR: validating body of block %08x", self.Hash[:4]) + if msg.MsgType != MsgBlockBodies { + glog.V(logger.Debug).Infof("ODR: invalid message type") + return false + } + bodies := msg.Obj.([]*types.Body) + if len(bodies) != 1 { + glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(bodies)) + return false + } + body := bodies[0] + header := core.GetHeader(db, self.Hash, self.Number) + if header == nil { + glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4]) + return false + } + txHash := types.DeriveSha(types.Transactions(body.Transactions)) + if header.TxHash != txHash { + glog.V(logger.Debug).Infof("ODR: header.TxHash %08x does not match received txHash %08x", header.TxHash[:4], txHash[:4]) + return false + } + uncleHash := types.CalcUncleHash(body.Uncles) + if header.UncleHash != uncleHash { + glog.V(logger.Debug).Infof("ODR: header.UncleHash %08x does not match received uncleHash %08x", header.UncleHash[:4], uncleHash[:4]) + return false + } + data, err := rlp.EncodeToBytes(body) + if err != nil { + glog.V(logger.Debug).Infof("ODR: body RLP encode error: %v", err) + return false + } + self.Rlp = data + glog.V(logger.Debug).Infof("ODR: validation successful") + return true +} + +// ReceiptsRequest is the ODR request type for block receipts by block hash +type ReceiptsRequest light.ReceiptsRequest + +// GetCost returns the cost of the given ODR request according to the serving +// peer's cost table (implementation of LesOdrRequest) +func (self *ReceiptsRequest) GetCost(peer *peer) uint64 { + return peer.GetRequestCost(GetReceiptsMsg, 1) +} + +// Request sends an ODR request to the LES network (implementation of LesOdrRequest) +func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error { + glog.V(logger.Debug).Infof("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id) + return peer.RequestReceipts(reqID, self.GetCost(peer), []common.Hash{self.Hash}) +} + +// Valid processes an ODR request reply message from the LES network +// returns true and stores results in memory if the message was a valid reply +// to the request (implementation of LesOdrRequest) +func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool { + glog.V(logger.Debug).Infof("ODR: validating receipts for block %08x", self.Hash[:4]) + if msg.MsgType != MsgReceipts { + glog.V(logger.Debug).Infof("ODR: invalid message type") + return false + } + receipts := msg.Obj.([]types.Receipts) + if len(receipts) != 1 { + glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(receipts)) + return false + } + hash := types.DeriveSha(receipts[0]) + header := core.GetHeader(db, self.Hash, self.Number) + if header == nil { + glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4]) + return false + } + if !bytes.Equal(header.ReceiptHash[:], hash[:]) { + glog.V(logger.Debug).Infof("ODR: header receipts hash %08x does not match calculated RLP hash %08x", header.ReceiptHash[:4], hash[:4]) + return false + } + self.Receipts = receipts[0] + glog.V(logger.Debug).Infof("ODR: validation successful") + return true +} + +type ProofReq struct { + BHash common.Hash + AccKey, Key []byte + FromLevel uint +} + +// ODR request type for state/storage trie entries, see LesOdrRequest interface +type TrieRequest light.TrieRequest + +// GetCost returns the cost of the given ODR request according to the serving +// peer's cost table (implementation of LesOdrRequest) +func (self *TrieRequest) GetCost(peer *peer) uint64 { + return peer.GetRequestCost(GetProofsMsg, 1) +} + +// Request sends an ODR request to the LES network (implementation of LesOdrRequest) +func (self *TrieRequest) Request(reqID uint64, peer *peer) error { + glog.V(logger.Debug).Infof("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id) + req := &ProofReq{ + BHash: self.Id.BlockHash, + AccKey: self.Id.AccKey, + Key: self.Key, + } + return peer.RequestProofs(reqID, self.GetCost(peer), []*ProofReq{req}) +} + +// Valid processes an ODR request reply message from the LES network +// returns true and stores results in memory if the message was a valid reply +// to the request (implementation of LesOdrRequest) +func (self *TrieRequest) Valid(db ethdb.Database, msg *Msg) bool { + glog.V(logger.Debug).Infof("ODR: validating trie root %08x key %08x", self.Id.Root[:4], self.Key[:4]) + + if msg.MsgType != MsgProofs { + glog.V(logger.Debug).Infof("ODR: invalid message type") + return false + } + proofs := msg.Obj.([][]rlp.RawValue) + if len(proofs) != 1 { + glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs)) + return false + } + _, err := trie.VerifyProof(self.Id.Root, self.Key, proofs[0]) + if err != nil { + glog.V(logger.Debug).Infof("ODR: merkle proof verification error: %v", err) + return false + } + self.Proof = proofs[0] + glog.V(logger.Debug).Infof("ODR: validation successful") + return true +} + +type CodeReq struct { + BHash common.Hash + AccKey []byte +} + +// ODR request type for node data (used for retrieving contract code), see LesOdrRequest interface +type CodeRequest light.CodeRequest + +// GetCost returns the cost of the given ODR request according to the serving +// peer's cost table (implementation of LesOdrRequest) +func (self *CodeRequest) GetCost(peer *peer) uint64 { + return peer.GetRequestCost(GetCodeMsg, 1) +} + +// Request sends an ODR request to the LES network (implementation of LesOdrRequest) +func (self *CodeRequest) Request(reqID uint64, peer *peer) error { + glog.V(logger.Debug).Infof("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id) + req := &CodeReq{ + BHash: self.Id.BlockHash, + AccKey: self.Id.AccKey, + } + return peer.RequestCode(reqID, self.GetCost(peer), []*CodeReq{req}) +} + +// Valid processes an ODR request reply message from the LES network +// returns true and stores results in memory if the message was a valid reply +// to the request (implementation of LesOdrRequest) +func (self *CodeRequest) Valid(db ethdb.Database, msg *Msg) bool { + glog.V(logger.Debug).Infof("ODR: validating node data for hash %08x", self.Hash[:4]) + if msg.MsgType != MsgCode { + glog.V(logger.Debug).Infof("ODR: invalid message type") + return false + } + reply := msg.Obj.([][]byte) + if len(reply) != 1 { + glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(reply)) + return false + } + data := reply[0] + hash := crypto.Sha3Hash(data) + if !bytes.Equal(self.Hash[:], hash[:]) { + glog.V(logger.Debug).Infof("ODR: requested hash %08x does not match received data hash %08x", self.Hash[:4], hash[:4]) + return false + } + self.Data = data + glog.V(logger.Debug).Infof("ODR: validation successful") + return true +} + +type ChtReq struct { + ChtNum, BlockNum, FromLevel uint64 +} + +type ChtResp struct { + Header *types.Header + Proof []rlp.RawValue +} + +// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface +type ChtRequest light.ChtRequest + +// GetCost returns the cost of the given ODR request according to the serving +// peer's cost table (implementation of LesOdrRequest) +func (self *ChtRequest) GetCost(peer *peer) uint64 { + return peer.GetRequestCost(GetHeaderProofsMsg, 1) +} + +// Request sends an ODR request to the LES network (implementation of LesOdrRequest) +func (self *ChtRequest) Request(reqID uint64, peer *peer) error { + glog.V(logger.Debug).Infof("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id) + req := &ChtReq{ + ChtNum: self.ChtNum, + BlockNum: self.BlockNum, + } + return peer.RequestHeaderProofs(reqID, self.GetCost(peer), []*ChtReq{req}) +} + +// Valid processes an ODR request reply message from the LES network +// returns true and stores results in memory if the message was a valid reply +// to the request (implementation of LesOdrRequest) +func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool { + glog.V(logger.Debug).Infof("ODR: validating CHT #%d block #%d", self.ChtNum, self.BlockNum) + + if msg.MsgType != MsgHeaderProofs { + glog.V(logger.Debug).Infof("ODR: invalid message type") + return false + } + proofs := msg.Obj.([]ChtResp) + if len(proofs) != 1 { + glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs)) + return false + } + proof := proofs[0] + var encNumber [8]byte + binary.BigEndian.PutUint64(encNumber[:], self.BlockNum) + value, err := trie.VerifyProof(self.ChtRoot, encNumber[:], proof.Proof) + if err != nil { + glog.V(logger.Debug).Infof("ODR: CHT merkle proof verification error: %v", err) + return false + } + var node light.ChtNode + if err := rlp.DecodeBytes(value, &node); err != nil { + glog.V(logger.Debug).Infof("ODR: error decoding CHT node: %v", err) + return false + } + if node.Hash != proof.Header.Hash() { + glog.V(logger.Debug).Infof("ODR: CHT header hash does not match") + return false + } + + self.Proof = proof.Proof + self.Header = proof.Header + self.Td = node.Td + glog.V(logger.Debug).Infof("ODR: validation successful") + return true +} diff --git a/les/odr_test.go b/les/odr_test.go new file mode 100644 index 000000000000..08baa9171ae9 --- /dev/null +++ b/les/odr_test.go @@ -0,0 +1,223 @@ +package les + +import ( + "bytes" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "golang.org/x/net/context" +) + +type odrTestFn func(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte + +func TestOdrGetBlockLes1(t *testing.T) { testOdr(t, 1, 1, odrGetBlock) } + +func odrGetBlock(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { + var block *types.Block + if bc != nil { + block = bc.GetBlockByHash(bhash) + } else { + block, _ = lc.GetBlockByHash(ctx, bhash) + } + if block == nil { + return nil + } + rlp, _ := rlp.EncodeToBytes(block) + return rlp +} + +func TestOdrGetReceiptsLes1(t *testing.T) { testOdr(t, 1, 1, odrGetReceipts) } + +func odrGetReceipts(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { + var receipts types.Receipts + if bc != nil { + receipts = core.GetBlockReceipts(db, bhash, core.GetBlockNumber(db, bhash)) + } else { + receipts, _ = light.GetBlockReceipts(ctx, lc.Odr(), bhash, core.GetBlockNumber(db, bhash)) + } + if receipts == nil { + return nil + } + rlp, _ := rlp.EncodeToBytes(receipts) + return rlp +} + +func TestOdrAccountsLes1(t *testing.T) { testOdr(t, 1, 1, odrAccounts) } + +func odrAccounts(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { + dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678") + acc := []common.Address{testBankAddress, acc1Addr, acc2Addr, dummyAddr} + + trie.ClearGlobalCache() + + var res []byte + for _, addr := range acc { + if bc != nil { + header := bc.GetHeaderByHash(bhash) + st, err := state.New(header.Root, db) + if err == nil { + bal := st.GetBalance(addr) + rlp, _ := rlp.EncodeToBytes(bal) + res = append(res, rlp...) + } + } else { + header := lc.GetHeaderByHash(bhash) + st := light.NewLightState(light.StateTrieID(header), lc.Odr()) + bal, err := st.GetBalance(ctx, addr) + if err == nil { + rlp, _ := rlp.EncodeToBytes(bal) + res = append(res, rlp...) + } + } + } + + return res +} + +func TestOdrContractCallLes1(t *testing.T) { testOdr(t, 1, 2, odrContractCall) } + +// fullcallmsg is the message type used for call transations. +type fullcallmsg struct { + from *state.StateObject + to *common.Address + gas, gasPrice *big.Int + value *big.Int + data []byte +} + +// accessor boilerplate to implement core.Message +func (m fullcallmsg) From() (common.Address, error) { return m.from.Address(), nil } +func (m fullcallmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil } +func (m fullcallmsg) Nonce() uint64 { return m.from.Nonce() } +func (m fullcallmsg) To() *common.Address { return m.to } +func (m fullcallmsg) GasPrice() *big.Int { return m.gasPrice } +func (m fullcallmsg) Gas() *big.Int { return m.gas } +func (m fullcallmsg) Value() *big.Int { return m.value } +func (m fullcallmsg) Data() []byte { return m.data } + +// callmsg is the message type used for call transations. +type lightcallmsg struct { + from *light.StateObject + to *common.Address + gas, gasPrice *big.Int + value *big.Int + data []byte +} + +// accessor boilerplate to implement core.Message +func (m lightcallmsg) From() (common.Address, error) { return m.from.Address(), nil } +func (m lightcallmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil } +func (m lightcallmsg) Nonce() uint64 { return m.from.Nonce() } +func (m lightcallmsg) To() *common.Address { return m.to } +func (m lightcallmsg) GasPrice() *big.Int { return m.gasPrice } +func (m lightcallmsg) Gas() *big.Int { return m.gas } +func (m lightcallmsg) Value() *big.Int { return m.value } +func (m lightcallmsg) Data() []byte { return m.data } + +func odrContractCall(ctx context.Context, db ethdb.Database, config *core.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { + data := common.Hex2Bytes("60CD26850000000000000000000000000000000000000000000000000000000000000000") + + var res []byte + for i := 0; i < 3; i++ { + data[35] = byte(i) + if bc != nil { + header := bc.GetHeaderByHash(bhash) + statedb, err := state.New(header.Root, db) + if err == nil { + from := statedb.GetOrNewStateObject(testBankAddress) + from.SetBalance(common.MaxBig) + + msg := fullcallmsg{ + from: from, + gas: big.NewInt(100000), + gasPrice: big.NewInt(0), + value: big.NewInt(0), + data: data, + to: &testContractAddr, + } + + vmenv := core.NewEnv(statedb, config, bc, msg, header, config.VmConfig) + gp := new(core.GasPool).AddGas(common.MaxBig) + ret, _, _ := core.ApplyMessage(vmenv, msg, gp) + res = append(res, ret...) + } + } else { + header := lc.GetHeaderByHash(bhash) + state := light.NewLightState(light.StateTrieID(header), lc.Odr()) + from, err := state.GetOrNewStateObject(ctx, testBankAddress) + if err == nil { + from.SetBalance(common.MaxBig) + + msg := lightcallmsg{ + from: from, + gas: big.NewInt(100000), + gasPrice: big.NewInt(0), + value: big.NewInt(0), + data: data, + to: &testContractAddr, + } + + vmenv := light.NewEnv(ctx, state, config, lc, msg, header, config.VmConfig) + gp := new(core.GasPool).AddGas(common.MaxBig) + ret, _, _ := core.ApplyMessage(vmenv, msg, gp) + if vmenv.Error() == nil { + res = append(res, ret...) + } + } + } + } + return res +} + +func testOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) { + // Assemble the test environment + pm, db, odr := newTestProtocolManagerMust(t, false, 4, testChainGen) + lpm, ldb, odr := newTestProtocolManagerMust(t, true, 0, nil) + _, err1, lpeer, err2 := newTestPeerPair("peer", protocol, pm, lpm) + select { + case <-time.After(time.Millisecond * 100): + case err := <-err1: + t.Fatalf("peer 1 handshake error: %v", err) + case err := <-err2: + t.Fatalf("peer 1 handshake error: %v", err) + } + + lpm.synchronise(lpeer) + + test := func(expFail uint64) { + for i := uint64(0); i <= pm.blockchain.CurrentHeader().GetNumberU64(); i++ { + bhash := core.GetCanonicalHash(db, i) + b1 := fn(light.NoOdr, db, pm.chainConfig, pm.blockchain.(*core.BlockChain), nil, bhash) + ctx, _ := context.WithTimeout(context.Background(), 200*time.Millisecond) + b2 := fn(ctx, ldb, lpm.chainConfig, nil, lpm.blockchain.(*light.LightChain), bhash) + eq := bytes.Equal(b1, b2) + exp := i < expFail + if exp && !eq { + t.Errorf("odr mismatch") + } + if !exp && eq { + t.Errorf("unexpected odr match") + } + } + } + + // temporarily remove peer to test odr fails + odr.UnregisterPeer(lpeer) + // expect retrievals to fail (except genesis block) without a les peer + test(expFail) + odr.RegisterPeer(lpeer) + // expect all retrievals to pass + test(5) + odr.UnregisterPeer(lpeer) + // still expect all retrievals to pass, now data should be cached locally + test(5) +} diff --git a/les/peer.go b/les/peer.go new file mode 100644 index 000000000000..a191eeeffe72 --- /dev/null +++ b/les/peer.go @@ -0,0 +1,583 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package les implements the Light Ethereum Subprotocol. +package les + +import ( + "errors" + "fmt" + "math/big" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/les/flowcontrol" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" +) + +var ( + errClosed = errors.New("peer set is closed") + errAlreadyRegistered = errors.New("peer is already registered") + errNotRegistered = errors.New("peer is not registered") +) + +const maxHeadInfoLen = 20 + +type peer struct { + *p2p.Peer + + rw p2p.MsgReadWriter + + version int // Protocol version negotiated + network int // Network ID being on + + id string + + firstHeadInfo, headInfo *newBlockHashData + headInfoLen int + lock sync.RWMutex + + newBlockHashChn chan newBlockHashData + + fcClient *flowcontrol.ClientNode // nil if the peer is server only + fcServer *flowcontrol.ServerNode // nil if the peer is client only + fcServerParams *flowcontrol.ServerParams + fcCosts requestCostTable +} + +func newPeer(version, network int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { + id := p.ID() + + return &peer{ + Peer: p, + rw: rw, + version: version, + network: network, + id: fmt.Sprintf("%x", id[:8]), + newBlockHashChn: make(chan newBlockHashData, 20), + } +} + +// Info gathers and returns a collection of metadata known about a peer. +func (p *peer) Info() *eth.PeerInfo { + return ð.PeerInfo{ + Version: p.version, + Difficulty: p.Td(), + Head: fmt.Sprintf("%x", p.Head()), + } +} + +// Head retrieves a copy of the current head (most recent) hash of the peer. +func (p *peer) Head() (hash common.Hash) { + p.lock.RLock() + defer p.lock.RUnlock() + + copy(hash[:], p.headInfo.Hash[:]) + return hash +} + +func (p *peer) HeadAndTd() (hash common.Hash, td *big.Int) { + p.lock.RLock() + defer p.lock.RUnlock() + + copy(hash[:], p.headInfo.Hash[:]) + return hash, p.headInfo.Td +} + +func (p *peer) headBlockInfo() blockInfo { + p.lock.RLock() + defer p.lock.RUnlock() + + return blockInfo{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td} +} + +func (p *peer) addNotify(announce *newBlockHashData) bool { + p.lock.Lock() + defer p.lock.Unlock() + + if announce.Td.Cmp(p.headInfo.Td) < 1 { + return false + } + if p.headInfoLen >= maxHeadInfoLen { + //return false + p.firstHeadInfo = p.firstHeadInfo.next + p.headInfoLen-- + } + hh := p.headInfo.Number - announce.ReorgDepth + if p.headInfo.haveHeaders < hh { + hh = p.headInfo.haveHeaders + } + announce.haveHeaders = hh + p.headInfo.next = announce + p.headInfo = announce + p.headInfoLen++ + return true +} + +func (p *peer) gotHeader(hash common.Hash, number uint64, td *big.Int) bool { + h := p.firstHeadInfo + ptr := 0 + for h != nil { + if h.Hash == hash { + if h.Number != number || h.Td.Cmp(td) != 0 { + return false + } + h.headKnown = true + h.haveHeaders = h.Number + p.firstHeadInfo = h + p.headInfoLen -= ptr + last := h + h = h.next + // propagate haveHeaders through the chain + for h != nil { + hh := last.Number - h.ReorgDepth + if last.haveHeaders < hh { + hh = last.haveHeaders + } + if hh > h.haveHeaders { + h.haveHeaders = hh + } else { + return true + } + last = h + h = h.next + } + return true + } + h = h.next + ptr++ + } + return true +} + +// Td retrieves the current total difficulty of a peer. +func (p *peer) Td() *big.Int { + p.lock.RLock() + defer p.lock.RUnlock() + + return new(big.Int).Set(p.headInfo.Td) +} + +func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{}) error { + type req struct { + ReqID uint64 + Data interface{} + } + return p2p.Send(w, msgcode, req{reqID, data}) +} + +func sendResponse(w p2p.MsgWriter, msgcode, reqID, bv uint64, data interface{}) error { + type resp struct { + ReqID, BV uint64 + Data interface{} + } + return p2p.Send(w, msgcode, resp{reqID, bv, data}) +} + +func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 { + cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount) + if cost > p.fcServerParams.BufLimit { + cost = p.fcServerParams.BufLimit + } + return cost +} + +// SendNewBlockHash announces the availability of a number of blocks through +// a hash notification. +func (p *peer) SendNewBlockHash(request newBlockHashData) error { + return p2p.Send(p.rw, NewBlockHashMsg, request) +} + +// SendBlockHeaders sends a batch of block headers to the remote peer. +func (p *peer) SendBlockHeaders(reqID, bv uint64, headers []*types.Header) error { + return sendResponse(p.rw, BlockHeadersMsg, reqID, bv, headers) +} + +// SendBlockBodiesRLP sends a batch of block contents to the remote peer from +// an already RLP encoded format. +func (p *peer) SendBlockBodiesRLP(reqID, bv uint64, bodies []rlp.RawValue) error { + return sendResponse(p.rw, BlockBodiesMsg, reqID, bv, bodies) +} + +// SendCodeRLP sends a batch of arbitrary internal data, corresponding to the +// hashes requested. +func (p *peer) SendCode(reqID, bv uint64, data [][]byte) error { + return sendResponse(p.rw, CodeMsg, reqID, bv, data) +} + +// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the +// ones requested from an already RLP encoded format. +func (p *peer) SendReceiptsRLP(reqID, bv uint64, receipts []rlp.RawValue) error { + return sendResponse(p.rw, ReceiptsMsg, reqID, bv, receipts) +} + +// SendProofs sends a batch of merkle proofs, corresponding to the ones requested. +func (p *peer) SendProofs(reqID, bv uint64, proofs proofsData) error { + return sendResponse(p.rw, ProofsMsg, reqID, bv, proofs) +} + +// SendHeaderProofs sends a batch of header proofs, corresponding to the ones requested. +func (p *peer) SendHeaderProofs(reqID, bv uint64, proofs []ChtResp) error { + return sendResponse(p.rw, HeaderProofsMsg, reqID, bv, proofs) +} + +// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the +// specified header query, based on the hash of an origin block. +func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error { + glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse) + return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) +} + +// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the +// specified header query, based on the number of an origin block. +func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error { + glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse) + return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) +} + +// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes +// specified. +func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error { + glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes)) + return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes) +} + +// RequestCode fetches a batch of arbitrary data from a node's known state +// data, corresponding to the specified hashes. +func (p *peer) RequestCode(reqID, cost uint64, reqs []*CodeReq) error { + glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(reqs)) + return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs) +} + +// RequestReceipts fetches a batch of transaction receipts from a remote node. +func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error { + glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes)) + return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes) +} + +// RequestProofs fetches a batch of merkle proofs from a remote node. +func (p *peer) RequestProofs(reqID, cost uint64, reqs []*ProofReq) error { + glog.V(logger.Debug).Infof("%v fetching %v proofs", p, len(reqs)) + return sendRequest(p.rw, GetProofsMsg, reqID, cost, reqs) +} + +// RequestHeaderProofs fetches a batch of header merkle proofs from a remote node. +func (p *peer) RequestHeaderProofs(reqID, cost uint64, reqs []*ChtReq) error { + glog.V(logger.Debug).Infof("%v fetching %v header proofs", p, len(reqs)) + return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs) +} + +func (p *peer) SendTxs(cost uint64, txs types.Transactions) error { + glog.V(logger.Debug).Infof("%v relaying %v txs", p, len(txs)) + p.fcServer.SendRequest(0, cost) + return p2p.Send(p.rw, SendTxMsg, txs) +} + +type keyValueEntry struct { + Key string + Value rlp.RawValue +} +type keyValueList []keyValueEntry +type keyValueMap map[string]rlp.RawValue + +func (l keyValueList) add(key string, val interface{}) keyValueList { + var entry keyValueEntry + entry.Key = key + if val == nil { + val = uint64(0) + } + enc, err := rlp.EncodeToBytes(val) + if err == nil { + entry.Value = enc + } + return append(l, entry) +} + +func (l keyValueList) decode() keyValueMap { + m := make(keyValueMap) + for _, entry := range l { + m[entry.Key] = entry.Value + } + return m +} + +func (m keyValueMap) get(key string, val interface{}) error { + enc, ok := m[key] + if !ok { + return errResp(ErrHandshakeMissingKey, "%s", key) + } + if val == nil { + return nil + } + return rlp.DecodeBytes(enc, val) +} + +func (p *peer) sendReceiveHandshake(sendList keyValueList) (keyValueList, error) { + // Send out own handshake in a new thread + errc := make(chan error, 1) + go func() { + errc <- p2p.Send(p.rw, StatusMsg, sendList) + }() + // In the mean time retrieve the remote status message + msg, err := p.rw.ReadMsg() + if err != nil { + return nil, err + } + if msg.Code != StatusMsg { + return nil, errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg) + } + if msg.Size > ProtocolMaxMsgSize { + return nil, errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) + } + // Decode the handshake + var recvList keyValueList + if err := msg.Decode(&recvList); err != nil { + return nil, errResp(ErrDecode, "msg %v: %v", msg, err) + } + if err := <-errc; err != nil { + return nil, err + } + return recvList, nil +} + +// Handshake executes the les protocol handshake, negotiating version number, +// network IDs, difficulties, head and genesis blocks. +func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error { + p.lock.Lock() + defer p.lock.Unlock() + + var send keyValueList + send = send.add("protocolVersion", uint64(p.version)) + send = send.add("networkId", uint64(p.network)) + send = send.add("headTd", td) + send = send.add("headHash", head) + send = send.add("headNum", headNum) + send = send.add("genesisHash", genesis) + if server != nil { + send = send.add("serveHeaders", nil) + send = send.add("serveChainSince", uint64(0)) + send = send.add("serveStateSince", uint64(0)) + send = send.add("txRelay", nil) + send = send.add("flowControl/BL", server.defParams.BufLimit) + send = send.add("flowControl/MRR", server.defParams.MinRecharge) + list := server.fcCostStats.getCurrentList() + send = send.add("flowControl/MRC", list) + p.fcCosts = list.decode() + } + recvList, err := p.sendReceiveHandshake(send) + if err != nil { + return err + } + recv := recvList.decode() + + var rGenesis, rHash common.Hash + var rVersion, rNetwork, rNum uint64 + var rTd *big.Int + + if err := recv.get("protocolVersion", &rVersion); err != nil { + return err + } + if err := recv.get("networkId", &rNetwork); err != nil { + return err + } + if err := recv.get("headTd", &rTd); err != nil { + return err + } + if err := recv.get("headHash", &rHash); err != nil { + return err + } + if err := recv.get("headNum", &rNum); err != nil { + return err + } + if err := recv.get("genesisHash", &rGenesis); err != nil { + return err + } + + if rGenesis != genesis { + return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", rGenesis, genesis) + } + if int(rNetwork) != p.network { + return errResp(ErrNetworkIdMismatch, "%d (!= %d)", rNetwork, p.network) + } + if int(rVersion) != p.version { + return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version) + } + if server != nil { + if recv.get("serveStateSince", nil) == nil { + return errResp(ErrUselessPeer, "wanted client, got server") + } + p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams) + } else { + if recv.get("serveChainSince", nil) != nil { + return errResp(ErrUselessPeer, "peer cannot serve chain") + } + if recv.get("serveStateSince", nil) != nil { + return errResp(ErrUselessPeer, "peer cannot serve state") + } + if recv.get("txRelay", nil) != nil { + return errResp(ErrUselessPeer, "peer cannot relay transactions") + } + params := &flowcontrol.ServerParams{} + if err := recv.get("flowControl/BL", ¶ms.BufLimit); err != nil { + return err + } + if err := recv.get("flowControl/MRR", ¶ms.MinRecharge); err != nil { + return err + } + var MRC RequestCostList + if err := recv.get("flowControl/MRC", &MRC); err != nil { + return err + } + p.fcServerParams = params + p.fcServer = flowcontrol.NewServerNode(params) + p.fcServerParams = params + p.fcCosts = MRC.decode() + } + + p.firstHeadInfo = &newBlockHashData{Td: rTd, Hash: rHash, Number: rNum} + p.headInfo = p.firstHeadInfo + p.headInfoLen = 1 + return nil +} + +// String implements fmt.Stringer. +func (p *peer) String() string { + return fmt.Sprintf("Peer %s [%s]", p.id, + fmt.Sprintf("les/%d", p.version), + ) +} + +// peerSet represents the collection of active peers currently participating in +// the Light Ethereum sub-protocol. +type peerSet struct { + peers map[string]*peer + lock sync.RWMutex + closed bool +} + +// newPeerSet creates a new peer set to track the active participants. +func newPeerSet() *peerSet { + return &peerSet{ + peers: make(map[string]*peer), + } +} + +// Register injects a new peer into the working set, or returns an error if the +// peer is already known. +func (ps *peerSet) Register(p *peer) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + if ps.closed { + return errClosed + } + if _, ok := ps.peers[p.id]; ok { + return errAlreadyRegistered + } + ps.peers[p.id] = p + return nil +} + +// Unregister removes a remote peer from the active set, disabling any further +// actions to/from that particular entity. +func (ps *peerSet) Unregister(id string) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + if _, ok := ps.peers[id]; !ok { + return errNotRegistered + } + delete(ps.peers, id) + return nil +} + +// AllPeerIDs returns a list of all registered peer IDs +func (ps *peerSet) AllPeerIDs() []string { + ps.lock.RLock() + defer ps.lock.RUnlock() + + res := make([]string, len(ps.peers)) + idx := 0 + for id, _ := range ps.peers { + res[idx] = id + idx++ + } + return res +} + +// Peer retrieves the registered peer with the given id. +func (ps *peerSet) Peer(id string) *peer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return ps.peers[id] +} + +// Len returns if the current number of peers in the set. +func (ps *peerSet) Len() int { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return len(ps.peers) +} + +// BestPeer retrieves the known peer with the currently highest total difficulty. +func (ps *peerSet) BestPeer() *peer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + var ( + bestPeer *peer + bestTd *big.Int + ) + for _, p := range ps.peers { + if td := p.Td(); bestPeer == nil || td.Cmp(bestTd) > 0 { + bestPeer, bestTd = p, td + } + } + return bestPeer +} + +// AllPeers returns all peers in a list +func (ps *peerSet) AllPeers() []*peer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + list := make([]*peer, len(ps.peers)) + i := 0 + for _, peer := range ps.peers { + list[i] = peer + i++ + } + return list +} + +// Close disconnects all peers. +// No new peers can be registered after Close has returned. +func (ps *peerSet) Close() { + ps.lock.Lock() + defer ps.lock.Unlock() + + for _, p := range ps.peers { + p.Disconnect(p2p.DiscQuitting) + } + ps.closed = true +} diff --git a/les/protocol.go b/les/protocol.go new file mode 100644 index 000000000000..0ef2d8c06c1c --- /dev/null +++ b/les/protocol.go @@ -0,0 +1,208 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package les implements the Light Ethereum Subprotocol. +package les + +import ( + "fmt" + "io" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" +) + +// Constants to match up protocol versions and messages +const ( + lpv1 = 1 +) + +// Supported versions of the les protocol (first is primary). +var ProtocolVersions = []uint{lpv1} + +// Number of implemented message corresponding to different protocol versions. +var ProtocolLengths = []uint64{15} + +const ( + NetworkId = 1 + ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message +) + +// les protocol message codes +const ( + // Protocol messages belonging to LPV1 + StatusMsg = 0x00 + NewBlockHashMsg = 0x01 + GetBlockHeadersMsg = 0x02 + BlockHeadersMsg = 0x03 + GetBlockBodiesMsg = 0x04 + BlockBodiesMsg = 0x05 + GetReceiptsMsg = 0x06 + ReceiptsMsg = 0x07 + GetProofsMsg = 0x08 + ProofsMsg = 0x09 + GetCodeMsg = 0x0a + CodeMsg = 0x0b + SendTxMsg = 0x0c + GetHeaderProofsMsg = 0x0d + HeaderProofsMsg = 0x0e +) + +type errCode int + +const ( + ErrMsgTooLarge = iota + ErrDecode + ErrInvalidMsgCode + ErrProtocolVersionMismatch + ErrNetworkIdMismatch + ErrGenesisBlockMismatch + ErrNoStatusMsg + ErrExtraStatusMsg + ErrSuspendedPeer + ErrUselessPeer + ErrRequestRejected + ErrUnexpectedResponse + ErrInvalidResponse + ErrTooManyTimeouts + ErrHandshakeMissingKey +) + +func (e errCode) String() string { + return errorToString[int(e)] +} + +// XXX change once legacy code is out +var errorToString = map[int]string{ + ErrMsgTooLarge: "Message too long", + ErrDecode: "Invalid message", + ErrInvalidMsgCode: "Invalid message code", + ErrProtocolVersionMismatch: "Protocol version mismatch", + ErrNetworkIdMismatch: "NetworkId mismatch", + ErrGenesisBlockMismatch: "Genesis block mismatch", + ErrNoStatusMsg: "No status message", + ErrExtraStatusMsg: "Extra status message", + ErrSuspendedPeer: "Suspended peer", + ErrRequestRejected: "Request rejected", + ErrUnexpectedResponse: "Unexpected response", + ErrInvalidResponse: "Invalid response", + ErrTooManyTimeouts: "Too many request timeouts", + ErrHandshakeMissingKey: "Key missing from handshake message", +} + +type chainManager interface { + GetBlockHashesFromHash(hash common.Hash, amount uint64) (hashes []common.Hash) + GetBlock(hash common.Hash) (block *types.Block) + Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) +} + +// statusData is the network packet for the status message. +type statusData struct { + ProtocolVersion uint32 + NetworkId uint32 + TD *big.Int + CurrentBlock common.Hash + GenesisBlock common.Hash + History uint64 + BL, MRR uint64 + MRC RequestCostList +} + +// newBlockHashData is the network packet for the block announcements. +type newBlockHashData struct{ + Hash common.Hash // Hash of one particular block being announced + Number uint64 // Number of one particular block being announced + Td *big.Int // Total difficulty of one particular block being announced + ReorgDepth uint64 + + haveHeaders uint64 // we have the headers of the remote peer's chain up to this number + headKnown bool + requested bool + next *newBlockHashData +} +type blockInfo struct { + Hash common.Hash // Hash of one particular block being announced + Number uint64 // Number of one particular block being announced + Td *big.Int // Total difficulty of one particular block being announced +} + +// getBlockHashesData is the network packet for the hash based hash retrieval. +type getBlockHashesData struct { + Hash common.Hash + Amount uint64 +} + +// getBlockHeadersData represents a block header query. +type getBlockHeadersData struct { + Origin hashOrNumber // Block from which to retrieve headers + Amount uint64 // Maximum number of headers to retrieve + Skip uint64 // Blocks to skip between consecutive headers + Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) +} + +// hashOrNumber is a combined field for specifying an origin block. +type hashOrNumber struct { + Hash common.Hash // Block hash from which to retrieve headers (excludes Number) + Number uint64 // Block hash from which to retrieve headers (excludes Hash) +} + +// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the +// two contained union fields. +func (hn *hashOrNumber) EncodeRLP(w io.Writer) error { + if hn.Hash == (common.Hash{}) { + return rlp.Encode(w, hn.Number) + } + if hn.Number != 0 { + return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number) + } + return rlp.Encode(w, hn.Hash) +} + +// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents +// into either a block hash or a block number. +func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error { + _, size, _ := s.Kind() + origin, err := s.Raw() + if err == nil { + switch { + case size == 32: + err = rlp.DecodeBytes(origin, &hn.Hash) + case size <= 8: + err = rlp.DecodeBytes(origin, &hn.Number) + default: + err = fmt.Errorf("invalid input size %d for origin", size) + } + } + return err +} + +// newBlockData is the network packet for the block propagation message. +type newBlockData struct { + Block *types.Block + TD *big.Int +} + +// blockBodiesData is the network packet for block content distribution. +type blockBodiesData []*types.Body + +// CodeData is the network response packet for a node data retrieval. +type CodeData []struct { + Value []byte +} + +type proofsData [][]rlp.RawValue diff --git a/les/request_test.go b/les/request_test.go new file mode 100644 index 000000000000..df02afb32daa --- /dev/null +++ b/les/request_test.go @@ -0,0 +1,94 @@ +package les + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/light" + "golang.org/x/net/context" +) + +var testBankSecureTrieKey = secAddr(testBankAddress) + +func secAddr(addr common.Address) []byte { + return crypto.Keccak256(addr[:]) +} + +type accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest + +func TestBlockAccessLes1(t *testing.T) { testAccess(t, 1, tfBlockAccess) } + +func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { + return &light.BlockRequest{Hash: bhash, Number: number} +} + +func TestReceiptsAccessLes1(t *testing.T) { testAccess(t, 1, tfReceiptsAccess) } + +func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { + return &light.ReceiptsRequest{Hash: bhash, Number: number} +} + +func TestTrieEntryAccessLes1(t *testing.T) { testAccess(t, 1, tfTrieEntryAccess) } + +func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { + return &light.TrieRequest{Id: light.StateTrieID(core.GetHeader(db, bhash, core.GetBlockNumber(db, bhash))), Key: testBankSecureTrieKey} +} + +func TestCodeAccessLes1(t *testing.T) { testAccess(t, 1, tfCodeAccess) } + +func tfCodeAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { + header := core.GetHeader(db, bhash, core.GetBlockNumber(db, bhash)) + if header.GetNumberU64() < testContractDeployed { + return nil + } + sti := light.StateTrieID(header) + ci := light.StorageTrieID(sti, testContractAddr, common.Hash{}) + return &light.CodeRequest{Id: ci, Hash: crypto.Keccak256Hash(testContractCodeDeployed)} +} + +func testAccess(t *testing.T, protocol int, fn accessTestFn) { + // Assemble the test environment + pm, db, _ := newTestProtocolManagerMust(t, false, 4, testChainGen) + lpm, ldb, odr := newTestProtocolManagerMust(t, true, 0, nil) + _, err1, lpeer, err2 := newTestPeerPair("peer", protocol, pm, lpm) + select { + case <-time.After(time.Millisecond * 100): + case err := <-err1: + t.Fatalf("peer 1 handshake error: %v", err) + case err := <-err2: + t.Fatalf("peer 1 handshake error: %v", err) + } + + lpm.synchronise(lpeer) + + test := func(expFail uint64) { + for i := uint64(0); i <= pm.blockchain.CurrentHeader().GetNumberU64(); i++ { + bhash := core.GetCanonicalHash(db, i) + if req := fn(ldb, bhash, i); req != nil { + ctx, _ := context.WithTimeout(context.Background(), 200*time.Millisecond) + err := odr.Retrieve(ctx, req) + got := err == nil + exp := i < expFail + if exp && !got { + t.Errorf("object retrieval failed") + } + if !exp && got { + t.Errorf("unexpected object retrieval success") + } + } + } + } + + // temporarily remove peer to test odr fails + odr.UnregisterPeer(lpeer) + // expect retrievals to fail (except genesis block) without a les peer + test(0) + odr.RegisterPeer(lpeer) + // expect all retrievals to pass + test(5) + odr.UnregisterPeer(lpeer) +} diff --git a/les/server.go b/les/server.go new file mode 100644 index 000000000000..f751b6a91228 --- /dev/null +++ b/les/server.go @@ -0,0 +1,401 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package les implements the Light Ethereum Subprotocol. +package les + +import ( + "encoding/binary" + "fmt" + "math" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/les/flowcontrol" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +type LesServer struct { + protocolManager *ProtocolManager + fcManager *flowcontrol.ClientManager // nil if our node is client only + fcCostStats *requestCostStats + defParams *flowcontrol.ServerParams +} + +func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { + pm, err := NewProtocolManager(config.ChainConfig, false, config.NetworkId, eth.EventMux(), eth.Pow(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil) + if err != nil { + return nil, err + } + pm.blockLoop() + + srv := &LesServer{protocolManager: pm} + pm.server = srv + + srv.defParams = &flowcontrol.ServerParams{ + BufLimit: 300000000, + MinRecharge: 50000, + } + srv.fcManager = flowcontrol.NewClientManager(uint64(config.LightServ), 10, 1000000000) + srv.fcCostStats = newCostStats(eth.ChainDb()) + return srv, nil +} + +func (s *LesServer) Protocols() []p2p.Protocol { + return s.protocolManager.SubProtocols +} + +func (s *LesServer) Start() { + s.protocolManager.Start() +} + +func (s *LesServer) Stop() { + s.fcCostStats.store() + s.fcManager.Stop() + go func() { + <-s.protocolManager.noMorePeers + }() + s.protocolManager.Stop() +} + +type requestCosts struct { + baseCost, reqCost uint64 +} + +type requestCostTable map[uint64]*requestCosts + +type RequestCostList []struct { + MsgCode, BaseCost, ReqCost uint64 +} + +func (list RequestCostList) decode() requestCostTable { + table := make(requestCostTable) + for _, e := range list { + table[e.MsgCode] = &requestCosts{ + baseCost: e.BaseCost, + reqCost: e.ReqCost, + } + } + return table +} + +func (table requestCostTable) encode() RequestCostList { + list := make(RequestCostList, len(table)) + for idx, code := range reqList { + list[idx].MsgCode = code + list[idx].BaseCost = table[code].baseCost + list[idx].ReqCost = table[code].reqCost + } + return list +} + +type linReg struct { + sumX, sumY, sumXX, sumXY float64 + cnt uint64 +} + +const linRegMaxCnt = 100000 + +func (l *linReg) add(x, y float64) { + if l.cnt >= linRegMaxCnt { + sub := float64(l.cnt+1-linRegMaxCnt) / linRegMaxCnt + l.sumX -= l.sumX * sub + l.sumY -= l.sumY * sub + l.sumXX -= l.sumXX * sub + l.sumXY -= l.sumXY * sub + l.cnt = linRegMaxCnt - 1 + } + l.cnt++ + l.sumX += x + l.sumY += y + l.sumXX += x * x + l.sumXY += x * y +} + +func (l *linReg) calc() (b, m float64) { + if l.cnt == 0 { + return 0, 0 + } + cnt := float64(l.cnt) + d := cnt*l.sumXX - l.sumX*l.sumX + if d < 0.001 { + return l.sumY / cnt, 0 + } + m = (cnt*l.sumXY - l.sumX*l.sumY) / d + b = (l.sumY / cnt) - (m * l.sumX / cnt) + return b, m +} + +func (l *linReg) toBytes() []byte { + var arr [40]byte + binary.BigEndian.PutUint64(arr[0:8], math.Float64bits(l.sumX)) + binary.BigEndian.PutUint64(arr[8:16], math.Float64bits(l.sumY)) + binary.BigEndian.PutUint64(arr[16:24], math.Float64bits(l.sumXX)) + binary.BigEndian.PutUint64(arr[24:32], math.Float64bits(l.sumXY)) + binary.BigEndian.PutUint64(arr[32:40], l.cnt) + return arr[:] +} + +func linRegFromBytes(data []byte) *linReg { + if len(data) != 40 { + return nil + } + l := &linReg{} + l.sumX = math.Float64frombits(binary.BigEndian.Uint64(data[0:8])) + l.sumY = math.Float64frombits(binary.BigEndian.Uint64(data[8:16])) + l.sumXX = math.Float64frombits(binary.BigEndian.Uint64(data[16:24])) + l.sumXY = math.Float64frombits(binary.BigEndian.Uint64(data[24:32])) + l.cnt = binary.BigEndian.Uint64(data[32:40]) + return l +} + +type requestCostStats struct { + lock sync.RWMutex + db ethdb.Database + stats map[uint64]*linReg +} + +type requestCostStatsRlp []struct { + MsgCode uint64 + Data []byte +} + +var rcStatsKey = []byte("_requestCostStats") + +func newCostStats(db ethdb.Database) *requestCostStats { + stats := make(map[uint64]*linReg) + for _, code := range reqList { + stats[code] = &linReg{cnt: 100} + } + + if db != nil { + data, err := db.Get(rcStatsKey) + var statsRlp requestCostStatsRlp + if err == nil { + err = rlp.DecodeBytes(data, &statsRlp) + } + if err == nil { + for _, r := range statsRlp { + if stats[r.MsgCode] != nil { + if l := linRegFromBytes(r.Data); l != nil { + stats[r.MsgCode] = l + } + } + } + } + } + + return &requestCostStats{ + db: db, + stats: stats, + } +} + +func (s *requestCostStats) store() { + s.lock.Lock() + defer s.lock.Unlock() + + statsRlp := make(requestCostStatsRlp, len(reqList)) + for i, code := range reqList { + statsRlp[i].MsgCode = code + statsRlp[i].Data = s.stats[code].toBytes() + } + + if data, err := rlp.EncodeToBytes(statsRlp); err == nil { + s.db.Put(rcStatsKey, data) + } +} + +func (s *requestCostStats) getCurrentList() RequestCostList { + s.lock.Lock() + defer s.lock.Unlock() + + list := make(RequestCostList, len(reqList)) + //fmt.Println("RequestCostList") + for idx, code := range reqList { + b, m := s.stats[code].calc() + //fmt.Println(code, s.stats[code].cnt, b/1000000, m/1000000) + if m < 0 { + b += m + m = 0 + } + if b < 0 { + b = 0 + } + + list[idx].MsgCode = code + list[idx].BaseCost = uint64(b * 2) + list[idx].ReqCost = uint64(m * 2) + } + return list +} + +func (s *requestCostStats) update(msgCode, reqCnt, cost uint64) { + s.lock.Lock() + defer s.lock.Unlock() + + c, ok := s.stats[msgCode] + if !ok || reqCnt == 0 { + return + } + c.add(float64(reqCnt), float64(cost)) +} + +func (pm *ProtocolManager) blockLoop() { + pm.wg.Add(1) + sub := pm.eventMux.Subscribe(core.ChainHeadEvent{}) + newCht := make(chan struct{}, 10) + newCht <- struct{}{} + go func() { + var mu sync.Mutex + var lastHead *types.Header + lastBroadcastTd := common.Big0 + for { + select { + case ev := <-sub.Chan(): + peers := pm.peers.AllPeers() + if len(peers) > 0 { + header := ev.Data.(core.ChainHeadEvent).Block.Header() + hash := header.Hash() + number := header.GetNumberU64() + td := core.GetTd(pm.chainDb, hash, number) + if td != nil && td.Cmp(lastBroadcastTd) > 0 { + var reorg uint64 + if lastHead != nil { + reorg = lastHead.GetNumberU64() - core.FindCommonAncestor(pm.chainDb, header, lastHead).GetNumberU64() + } + lastHead = header + lastBroadcastTd = td + //fmt.Println("BROADCAST", number, hash, td, reorg) + announce := newBlockHashData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg} + for _, p := range peers { + select { + case p.newBlockHashChn <- announce: + default: + pm.removePeer(p.id) + } + } + } + } + newCht <- struct{}{} + case <-newCht: + go func() { + mu.Lock() + more := makeCht(pm.chainDb) + mu.Unlock() + if more { + time.Sleep(time.Millisecond * 10) + newCht <- struct{}{} + } + }() + case <-pm.quitSync: + sub.Unsubscribe() + pm.wg.Done() + return + } + } + }() +} + +var ( + lastChtKey = []byte("LastChtNumber") // chtNum (uint64 big endian) + chtPrefix = []byte("cht") // chtPrefix + chtNum (uint64 big endian) -> trie root hash + chtConfirmations = light.ChtFrequency / 2 +) + +func getChtRoot(db ethdb.Database, num uint64) common.Hash { + var encNumber [8]byte + binary.BigEndian.PutUint64(encNumber[:], num) + data, _ := db.Get(append(chtPrefix, encNumber[:]...)) + return common.BytesToHash(data) +} + +func storeChtRoot(db ethdb.Database, num uint64, root common.Hash) { + var encNumber [8]byte + binary.BigEndian.PutUint64(encNumber[:], num) + db.Put(append(chtPrefix, encNumber[:]...), root[:]) +} + +func makeCht(db ethdb.Database) bool { + headHash := core.GetHeadBlockHash(db) + headNum := core.GetBlockNumber(db, headHash) + + var newChtNum uint64 + if headNum > chtConfirmations { + newChtNum = (headNum - chtConfirmations) / light.ChtFrequency + } + + var lastChtNum uint64 + data, _ := db.Get(lastChtKey) + if len(data) == 8 { + lastChtNum = binary.BigEndian.Uint64(data[:]) + } + if newChtNum <= lastChtNum { + return false + } + + var t *trie.Trie + if lastChtNum > 0 { + var err error + t, err = trie.New(getChtRoot(db, lastChtNum), db) + if err != nil { + lastChtNum = 0 + } + } + if lastChtNum == 0 { + t, _ = trie.New(common.Hash{}, db) + } + + for num := lastChtNum * light.ChtFrequency; num < (lastChtNum+1)*light.ChtFrequency; num++ { + hash := core.GetCanonicalHash(db, num) + if hash == (common.Hash{}) { + panic("Canonical hash not found") + } + td := core.GetTd(db, hash, num) + if td == nil { + panic("TD not found") + } + var encNumber [8]byte + binary.BigEndian.PutUint64(encNumber[:], num) + var node light.ChtNode + node.Hash = hash + node.Td = td + data, _ := rlp.EncodeToBytes(node) + t.Update(encNumber[:], data) + } + + root, err := t.Commit() + if err != nil { + lastChtNum = 0 + } else { + lastChtNum++ + fmt.Printf("CHT %d %064x\n", lastChtNum, root) + storeChtRoot(db, lastChtNum, root) + var data [8]byte + binary.BigEndian.PutUint64(data[:], lastChtNum) + db.Put(lastChtKey, data[:]) + } + + return newChtNum > lastChtNum +} diff --git a/les/sync.go b/les/sync.go new file mode 100644 index 000000000000..f92f8ce04bbe --- /dev/null +++ b/les/sync.go @@ -0,0 +1,84 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package les + +import ( + "time" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/light" + "golang.org/x/net/context" +) + +const ( + //forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available + minDesiredPeerCount = 5 // Amount of peers desired to start syncing +) + +// syncer is responsible for periodically synchronising with the network, both +// downloading hashes and blocks as well as handling the announcement handler. +func (pm *ProtocolManager) syncer() { + // Start and ensure cleanup of sync mechanisms + //pm.fetcher.Start() + //defer pm.fetcher.Stop() + defer pm.downloader.Terminate() + + // Wait for different events to fire synchronisation operations + //forceSync := time.Tick(forceSyncCycle) + for { + select { + case <-pm.newPeerCh: +/* // Make sure we have peers to select from, then sync + if pm.peers.Len() < minDesiredPeerCount { + break + } + go pm.synchronise(pm.peers.BestPeer()) +*/ + /*case <-forceSync: + // Force a sync even if not enough peers are present + go pm.synchronise(pm.peers.BestPeer()) + */ + case <-pm.noMorePeers: + return + } + } +} + +func (pm *ProtocolManager) needToSync(peerHead blockInfo) bool { + head := pm.blockchain.CurrentHeader() + currentTd := core.GetTd(pm.chainDb, head.Hash(), head.Number.Uint64()) + return currentTd != nil && peerHead.Td.Cmp(currentTd) > 0 +} + +// synchronise tries to sync up our local block chain with a remote peer. +func (pm *ProtocolManager) synchronise(peer *peer) { + // Short circuit if no peers are available + if peer == nil { + return + } + + // Make sure the peer's TD is higher than our own. + if !pm.needToSync(peer.headBlockInfo()) { + return + } + + ctx, _ := context.WithTimeout(context.Background(), time.Second*5) + pm.blockchain.(*light.LightChain).SyncCht(ctx) + + pm.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync) +} diff --git a/les/txrelay.go b/les/txrelay.go new file mode 100644 index 000000000000..366e69a62c12 --- /dev/null +++ b/les/txrelay.go @@ -0,0 +1,156 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package les + +import ( + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +type ltrInfo struct { + tx *types.Transaction + sentTo map[*peer]struct{} +} + +type LesTxRelay struct { + txSent map[common.Hash]*ltrInfo + txPending map[common.Hash]struct{} + ps *peerSet + peerList []*peer + peerStartPos int + lock sync.RWMutex +} + +func NewLesTxRelay() *LesTxRelay { + return &LesTxRelay{ + txSent: make(map[common.Hash]*ltrInfo), + txPending: make(map[common.Hash]struct{}), + ps: newPeerSet(), + } +} + +func (self *LesTxRelay) addPeer(p *peer) { + self.lock.Lock() + defer self.lock.Unlock() + + self.ps.Register(p) + self.peerList = self.ps.AllPeers() +} + +func (self *LesTxRelay) removePeer(id string) { + self.lock.Lock() + defer self.lock.Unlock() + + self.ps.Unregister(id) + self.peerList = self.ps.AllPeers() +} + +// send sends a list of transactions to at most a given number of peers at +// once, never resending any particular transaction to the same peer twice +func (self *LesTxRelay) send(txs types.Transactions, count int) { + sendTo := make(map[*peer]types.Transactions) + + self.peerStartPos++ // rotate the starting position of the peer list + if self.peerStartPos >= len(self.peerList) { + self.peerStartPos = 0 + } + + for _, tx := range txs { + hash := tx.Hash() + ltr, ok := self.txSent[hash] + if !ok { + ltr = <rInfo{ + tx: tx, + sentTo: make(map[*peer]struct{}), + } + self.txSent[hash] = ltr + self.txPending[hash] = struct{}{} + } + + if len(self.peerList) > 0 { + cnt := count + pos := self.peerStartPos + for { + peer := self.peerList[pos] + if _, ok := ltr.sentTo[peer]; !ok { + sendTo[peer] = append(sendTo[peer], tx) + ltr.sentTo[peer] = struct{}{} + cnt-- + } + if cnt == 0 { + break // sent it to the desired number of peers + } + pos++ + if pos == len(self.peerList) { + pos = 0 + } + if pos == self.peerStartPos { + break // tried all available peers + } + } + } + } + + for peer, list := range sendTo { + cost := peer.GetRequestCost(SendTxMsg, len(list)) + go func() { + peer.fcServer.SendRequest(0, cost) + peer.SendTxs(cost, list) + }() + } +} + +func (self *LesTxRelay) Send(txs types.Transactions) { + self.lock.Lock() + defer self.lock.Unlock() + + self.send(txs, 3) +} + +func (self *LesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) { + self.lock.Lock() + defer self.lock.Unlock() + + for _, hash := range mined { + delete(self.txPending, hash) + } + + for _, hash := range rollback { + self.txPending[hash] = struct{}{} + } + + if len(self.txPending) > 0 { + txs := make(types.Transactions, len(self.txPending)) + i := 0 + for hash, _ := range self.txPending { + txs[i] = self.txSent[hash].tx + i++ + } + self.send(txs, 1) + } +} + +func (self *LesTxRelay) Discard(hashes []common.Hash) { + self.lock.Lock() + defer self.lock.Unlock() + + for _, hash := range hashes { + delete(self.txSent, hash) + delete(self.txPending, hash) + } +} diff --git a/light/lightchain.go b/light/lightchain.go new file mode 100644 index 000000000000..8e45d54d308d --- /dev/null +++ b/light/lightchain.go @@ -0,0 +1,505 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package light + +import ( + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/pow" + "github.com/ethereum/go-ethereum/rlp" + "github.com/hashicorp/golang-lru" + "golang.org/x/net/context" +) + +var ( + bodyCacheLimit = 256 + blockCacheLimit = 256 +) + +// LightChain represents a canonical chain that by default only handles block +// headers, downloading block bodies and receipts on demand through an ODR +// interface. It only does header validation during chain insertion. +type LightChain struct { + hc *core.HeaderChain + chainDb ethdb.Database + odr OdrBackend + eventMux *event.TypeMux + genesisBlock *types.Block + + mu sync.RWMutex + chainmu sync.RWMutex + procmu sync.RWMutex + + bodyCache *lru.Cache // Cache for the most recent block bodies + bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format + blockCache *lru.Cache // Cache for the most recent entire blocks + + quit chan struct{} + running int32 // running must be called automically + // procInterrupt must be atomically called + procInterrupt int32 // interrupt signaler for block processing + wg sync.WaitGroup + + pow pow.PoW + validator core.HeaderValidator +} + +// NewLightChain returns a fully initialised light chain using information +// available in the database. It initialises the default Ethereum header +// validator. +func NewLightChain(odr OdrBackend, config *core.ChainConfig, pow pow.PoW, mux *event.TypeMux) (*LightChain, error) { + bodyCache, _ := lru.New(bodyCacheLimit) + bodyRLPCache, _ := lru.New(bodyCacheLimit) + blockCache, _ := lru.New(blockCacheLimit) + + bc := &LightChain{ + chainDb: odr.Database(), + odr: odr, + eventMux: mux, + quit: make(chan struct{}), + bodyCache: bodyCache, + bodyRLPCache: bodyRLPCache, + blockCache: blockCache, + pow: pow, + } + + var err error + bc.hc, err = core.NewHeaderChain(odr.Database(), config, bc.Validator, bc.getProcInterrupt) + bc.SetValidator(core.NewHeaderValidator(config, bc.hc, pow)) + if err != nil { + return nil, err + } + + bc.genesisBlock, _ = bc.GetBlockByNumber(NoOdr, 0) + if bc.genesisBlock == nil { + bc.genesisBlock, err = core.WriteDefaultGenesisBlock(odr.Database()) + if err != nil { + return nil, err + } + glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block") + } + + if bc.genesisBlock.Hash() == (common.Hash{212, 229, 103, 64, 248, 118, 174, 248, 192, 16, 184, 106, 64, 213, 245, 103, 69, 161, 24, 208, 144, 106, 52, 230, 154, 236, 140, 13, 177, 203, 143, 163}) { + // add trusted CHT + if config.DAOForkSupport { + WriteTrustedCht(bc.chainDb, TrustedCht{ + Number: 523, + Root: common.HexToHash("c9c3203ca7e58bf0ceaae50ed00c7ae234e7b3cc054aee086a1d0873f843df4e"), + }) + } else { + WriteTrustedCht(bc.chainDb, TrustedCht{ + Number: 523, + Root: common.HexToHash("c035076523faf514038f619715de404a65398c51899b5dccca9c05b00bc79315"), + }) + } + glog.V(logger.Info).Infoln("Added trusted CHT for mainnet") + } else { + if bc.genesisBlock.Hash() == (common.Hash{12, 215, 134, 162, 66, 93, 22, 241, 82, 198, 88, 49, 108, 66, 62, 108, 225, 24, 30, 21, 195, 41, 88, 38, 215, 201, 144, 76, 186, 156, 227, 3}) { + // add trusted CHT for testnet + WriteTrustedCht(bc.chainDb, TrustedCht{ + Number: 319, + Root: common.HexToHash("43b679ff9b4918b0b19e6256f20e35877365ec3e20b38e3b2a02cef5606176dc"), + }) + glog.V(logger.Info).Infoln("Added trusted CHT for testnet") + } else { + DeleteTrustedCht(bc.chainDb) + } + } + + if err := bc.loadLastState(); err != nil { + return nil, err + } + // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain + for hash, _ := range core.BadHashes { + if header := bc.GetHeaderByHash(hash); header != nil { + glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]) + bc.SetHead(header.Number.Uint64() - 1) + glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation") + } + } + return bc, nil +} + +func (self *LightChain) getProcInterrupt() bool { + return atomic.LoadInt32(&self.procInterrupt) == 1 +} + +// Odr returns the ODR backend of the chain +func (self *LightChain) Odr() OdrBackend { + return self.odr +} + +// loadLastState loads the last known chain state from the database. This method +// assumes that the chain manager mutex is held. +func (self *LightChain) loadLastState() error { + if head := core.GetHeadHeaderHash(self.chainDb); head == (common.Hash{}) { + // Corrupt or empty database, init from scratch + self.Reset() + } else { + if header := self.GetHeaderByHash(head); header != nil { + self.hc.SetCurrentHeader(header) + } + } + + // Issue a status log and return + header := self.hc.CurrentHeader() + headerTd := self.GetTd(header.Hash(), header.Number.Uint64()) + glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd) + + return nil +} + +// SetHead rewinds the local chain to a new head. Everything above the new +// head will be deleted and the new one set. +func (bc *LightChain) SetHead(head uint64) { + bc.mu.Lock() + defer bc.mu.Unlock() + + bc.hc.SetHead(head, nil) + bc.loadLastState() +} + +// GasLimit returns the gas limit of the current HEAD block. +func (self *LightChain) GasLimit() *big.Int { + self.mu.RLock() + defer self.mu.RUnlock() + + return self.hc.CurrentHeader().GasLimit +} + +// LastBlockHash return the hash of the HEAD block. +func (self *LightChain) LastBlockHash() common.Hash { + self.mu.RLock() + defer self.mu.RUnlock() + + return self.hc.CurrentHeader().Hash() +} + +// Status returns status information about the current chain such as the HEAD Td, +// the HEAD hash and the hash of the genesis block. +func (self *LightChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) { + self.mu.RLock() + defer self.mu.RUnlock() + + header := self.hc.CurrentHeader() + hash := header.Hash() + return self.GetTd(hash, header.Number.Uint64()), hash, self.genesisBlock.Hash() +} + +// SetValidator sets the validator which is used to validate incoming headers. +func (self *LightChain) SetValidator(validator core.HeaderValidator) { + self.procmu.Lock() + defer self.procmu.Unlock() + self.validator = validator +} + +// Validator returns the current header validator. +func (self *LightChain) Validator() core.HeaderValidator { + self.procmu.RLock() + defer self.procmu.RUnlock() + return self.validator +} + +// State returns a new mutable state based on the current HEAD block. +func (self *LightChain) State() *LightState { + return NewLightState(StateTrieID(self.hc.CurrentHeader()), self.odr) +} + +// Reset purges the entire blockchain, restoring it to its genesis state. +func (bc *LightChain) Reset() { + bc.ResetWithGenesisBlock(bc.genesisBlock) +} + +// ResetWithGenesisBlock purges the entire blockchain, restoring it to the +// specified genesis state. +func (bc *LightChain) ResetWithGenesisBlock(genesis *types.Block) { + // Dump the entire block chain and purge the caches + bc.SetHead(0) + + bc.mu.Lock() + defer bc.mu.Unlock() + + // Prepare the genesis block and reinitialise the chain + if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { + glog.Fatalf("failed to write genesis block TD: %v", err) + } + if err := core.WriteBlock(bc.chainDb, genesis); err != nil { + glog.Fatalf("failed to write genesis block: %v", err) + } + bc.genesisBlock = genesis + bc.hc.SetGenesis(bc.genesisBlock.Header()) + bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) +} + +// Accessors + +// Genesis returns the genesis block +func (bc *LightChain) Genesis() *types.Block { + return bc.genesisBlock +} + +// GetBody retrieves a block body (transactions and uncles) from the database +// or ODR service by hash, caching it if found. +func (self *LightChain) GetBody(ctx context.Context, hash common.Hash) (*types.Body, error) { + // Short circuit if the body's already in the cache, retrieve otherwise + if cached, ok := self.bodyCache.Get(hash); ok { + body := cached.(*types.Body) + return body, nil + } + body, err := GetBody(ctx, self.odr, hash, self.hc.GetBlockNumber(hash)) + if err != nil { + return nil, err + } + // Cache the found body for next time and return + self.bodyCache.Add(hash, body) + return body, nil +} + +// GetBodyRLP retrieves a block body in RLP encoding from the database or +// ODR service by hash, caching it if found. +func (self *LightChain) GetBodyRLP(ctx context.Context, hash common.Hash) (rlp.RawValue, error) { + // Short circuit if the body's already in the cache, retrieve otherwise + if cached, ok := self.bodyRLPCache.Get(hash); ok { + return cached.(rlp.RawValue), nil + } + body, err := GetBodyRLP(ctx, self.odr, hash, self.hc.GetBlockNumber(hash)) + if err != nil { + return nil, err + } + // Cache the found body for next time and return + self.bodyRLPCache.Add(hash, body) + return body, nil +} + +// HasBlock checks if a block is fully present in the database or not, caching +// it if present. +func (bc *LightChain) HasBlock(hash common.Hash) bool { + blk, _ := bc.GetBlockByHash(NoOdr, hash) + return blk != nil +} + +// GetBlock retrieves a block from the database or ODR service by hash and number, +// caching it if found. +func (self *LightChain) GetBlock(ctx context.Context, hash common.Hash, number uint64) (*types.Block, error) { + // Short circuit if the block's already in the cache, retrieve otherwise + if block, ok := self.blockCache.Get(hash); ok { + return block.(*types.Block), nil + } + block, err := GetBlock(ctx, self.odr, hash, number) + if err != nil { + return nil, err + } + // Cache the found block for next time and return + self.blockCache.Add(block.Hash(), block) + return block, nil +} + +// GetBlockByHash retrieves a block from the database or ODR service by hash, +// caching it if found. +func (self *LightChain) GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return self.GetBlock(ctx, hash, self.hc.GetBlockNumber(hash)) +} + +// GetBlockByNumber retrieves a block from the database or ODR service by +// number, caching it (associated with its hash) if found. +func (self *LightChain) GetBlockByNumber(ctx context.Context, number uint64) (*types.Block, error) { + hash, err := GetCanonicalHash(ctx, self.odr, number) + if hash == (common.Hash{}) || err != nil { + return nil, err + } + return self.GetBlock(ctx, hash, number) +} + +// Stop stops the blockchain service. If any imports are currently in progress +// it will abort them using the procInterrupt. +func (bc *LightChain) Stop() { + if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { + return + } + close(bc.quit) + atomic.StoreInt32(&bc.procInterrupt, 1) + + bc.wg.Wait() + + glog.V(logger.Info).Infoln("Chain manager stopped") +} + +// Rollback is designed to remove a chain of links from the database that aren't +// certain enough to be valid. +func (self *LightChain) Rollback(chain []common.Hash) { + self.mu.Lock() + defer self.mu.Unlock() + + for i := len(chain) - 1; i >= 0; i-- { + hash := chain[i] + + if head := self.hc.CurrentHeader(); head.Hash() == hash { + self.hc.SetCurrentHeader(self.GetHeader(head.ParentHash, head.Number.Uint64()-1)) + } + } +} + +// postChainEvents iterates over the events generated by a chain insertion and +// posts them into the event mux. +func (self *LightChain) postChainEvents(events []interface{}) { + for _, event := range events { + if event, ok := event.(core.ChainEvent); ok { + if self.LastBlockHash() == event.Hash { + self.eventMux.Post(core.ChainHeadEvent{Block: event.Block}) + } + } + // Fire the insertion events individually too + self.eventMux.Post(event) + } +} + +// InsertHeaderChain attempts to insert the given header chain in to the local +// chain, possibly creating a reorg. If an error is returned, it will return the +// index number of the failing header as well an error describing what went wrong. +// +// The verify parameter can be used to fine tune whether nonce verification +// should be done or not. The reason behind the optional check is because some +// of the header retrieval mechanisms already need to verfy nonces, as well as +// because nonces can be verified sparsely, not needing to check each. +// +// In the case of a light chain, InsertHeaderChain also creates and posts light +// chain events when necessary. +func (self *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { + // Make sure only one thread manipulates the chain at once + self.chainmu.Lock() + defer self.chainmu.Unlock() + + self.wg.Add(1) + defer self.wg.Done() + + var events []interface{} + whFunc := func(header *types.Header) error { + self.mu.Lock() + defer self.mu.Unlock() + + status, err := self.hc.WriteHeader(header) + + switch status { + case core.CanonStatTy: + if glog.V(logger.Debug) { + glog.Infof("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4]) + } + events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()}) + + case core.SideStatTy: + if glog.V(logger.Detail) { + glog.Infof("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4]) + } + events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)}) + + case core.SplitStatTy: + events = append(events, core.ChainSplitEvent{Block: types.NewBlockWithHeader(header)}) + } + + return err + } + i, err := self.hc.InsertHeaderChain(chain, checkFreq, whFunc) + go self.postChainEvents(events) + return i, err +} + +// CurrentHeader retrieves the current head header of the canonical chain. The +// header is retrieved from the HeaderChain's internal cache. +func (self *LightChain) CurrentHeader() *types.Header { + self.mu.RLock() + defer self.mu.RUnlock() + + return self.hc.CurrentHeader() +} + +// GetTd retrieves a block's total difficulty in the canonical chain from the +// database by hash and number, caching it if found. +func (self *LightChain) GetTd(hash common.Hash, number uint64) *big.Int { + return self.hc.GetTd(hash, number) +} + +// GetTdByHash retrieves a block's total difficulty in the canonical chain from the +// database by hash, caching it if found. +func (self *LightChain) GetTdByHash(hash common.Hash) *big.Int { + return self.hc.GetTdByHash(hash) +} + +// GetHeader retrieves a block header from the database by hash and number, +// caching it if found. +func (self *LightChain) GetHeader(hash common.Hash, number uint64) *types.Header { + return self.hc.GetHeader(hash, number) +} + +// GetHeaderByHash retrieves a block header from the database by hash, caching it if +// found. +func (self *LightChain) GetHeaderByHash(hash common.Hash) *types.Header { + return self.hc.GetHeaderByHash(hash) +} + +// HasHeader checks if a block header is present in the database or not, caching +// it if present. +func (bc *LightChain) HasHeader(hash common.Hash) bool { + return bc.hc.HasHeader(hash) +} + +// GetBlockHashesFromHash retrieves a number of block hashes starting at a given +// hash, fetching towards the genesis block. +func (self *LightChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { + return self.hc.GetBlockHashesFromHash(hash, max) +} + +// GetHeaderByNumber retrieves a block header from the database by number, +// caching it (associated with its hash) if found. +func (self *LightChain) GetHeaderByNumber(number uint64) *types.Header { + return self.hc.GetHeaderByNumber(number) +} + +// GetHeaderByNumberOdr retrieves a block header from the database or network +// by number, caching it (associated with its hash) if found. +func (self *LightChain) GetHeaderByNumberOdr(ctx context.Context, number uint64) (*types.Header, error) { + if header := self.hc.GetHeaderByNumber(number); header != nil { + return header, nil + } + return GetHeaderByNumber(ctx, self.odr, number) +} + +func (self *LightChain) SyncCht(ctx context.Context) bool { + headNum := self.CurrentHeader().Number.Uint64() + cht := GetTrustedCht(self.chainDb) + if headNum+1 < cht.Number*ChtFrequency { + num := cht.Number*ChtFrequency - 1 + header, err := GetHeaderByNumber(ctx, self.odr, num) + if header != nil && err == nil { + self.mu.Lock() + if self.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() { + self.hc.SetCurrentHeader(header) + } + self.mu.Unlock() + return true + } + } + return false +} diff --git a/light/lightchain_test.go b/light/lightchain_test.go new file mode 100644 index 000000000000..07294f068476 --- /dev/null +++ b/light/lightchain_test.go @@ -0,0 +1,403 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package light + +import ( + "fmt" + "math/big" + "runtime" + "testing" + + "github.com/ethereum/ethash" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/pow" + "github.com/hashicorp/golang-lru" + "golang.org/x/net/context" +) + +// So we can deterministically seed different blockchains +var ( + canonicalSeed = 1 + forkSeed = 2 +) + +// makeHeaderChain creates a deterministic chain of headers rooted at parent. +func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header { + blocks, _ := core.GenerateChain(types.NewBlockWithHeader(parent), db, n, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)}) + }) + headers := make([]*types.Header, len(blocks)) + for i, block := range blocks { + headers[i] = block.Header() + } + return headers +} + +func testChainConfig() *core.ChainConfig { + return &core.ChainConfig{HomesteadBlock: big.NewInt(0)} +} + +// newCanonical creates a chain database, and injects a deterministic canonical +// chain. Depending on the full flag, if creates either a full block chain or a +// header only chain. +func newCanonical(n int) (ethdb.Database, *LightChain, error) { + // Create te new chain database + db, _ := ethdb.NewMemDatabase() + evmux := &event.TypeMux{} + + // Initialize a fresh chain with only a genesis block + genesis, _ := core.WriteTestNetGenesisBlock(db) + + blockchain, _ := NewLightChain(&dummyOdr{db: db}, testChainConfig(), core.FakePow{}, evmux) + // Create and inject the requested chain + if n == 0 { + return db, blockchain, nil + } + // Header-only chain requested + headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed) + _, err := blockchain.InsertHeaderChain(headers, 1) + return db, blockchain, err +} + +func init() { + runtime.GOMAXPROCS(runtime.NumCPU()) +} + +func thePow() pow.PoW { + pow, _ := ethash.NewForTesting() + return pow +} + +func theLightChain(db ethdb.Database, t *testing.T) *LightChain { + var eventMux event.TypeMux + core.WriteTestNetGenesisBlock(db) + LightChain, err := NewLightChain(&dummyOdr{db: db}, testChainConfig(), thePow(), &eventMux) + if err != nil { + t.Error("failed creating LightChain:", err) + t.FailNow() + return nil + } + + return LightChain +} + +// Test fork of length N starting from block i +func testFork(t *testing.T, LightChain *LightChain, i, n int, comparator func(td1, td2 *big.Int)) { + // Copy old chain up to #i into a new db + db, LightChain2, err := newCanonical(i) + if err != nil { + t.Fatal("could not make new canonical in testFork", err) + } + // Assert the chains have the same header/block at #i + var hash1, hash2 common.Hash + hash1 = LightChain.GetHeaderByNumber(uint64(i)).Hash() + hash2 = LightChain2.GetHeaderByNumber(uint64(i)).Hash() + if hash1 != hash2 { + t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1) + } + // Extend the newly created chain + var ( + headerChainB []*types.Header + ) + headerChainB = makeHeaderChain(LightChain2.CurrentHeader(), n, db, forkSeed) + if _, err := LightChain2.InsertHeaderChain(headerChainB, 1); err != nil { + t.Fatalf("failed to insert forking chain: %v", err) + } + // Sanity check that the forked chain can be imported into the original + var tdPre, tdPost *big.Int + + tdPre = LightChain.GetTdByHash(LightChain.CurrentHeader().Hash()) + if err := testHeaderChainImport(headerChainB, LightChain); err != nil { + t.Fatalf("failed to import forked header chain: %v", err) + } + tdPost = LightChain.GetTdByHash(headerChainB[len(headerChainB)-1].Hash()) + // Compare the total difficulties of the chains + comparator(tdPre, tdPost) +} + +func printChain(bc *LightChain) { + for i := bc.CurrentHeader().GetNumberU64(); i > 0; i-- { + b := bc.GetHeaderByNumber(uint64(i)) + fmt.Printf("\t%x %v\n", b.Hash(), b.Difficulty) + } +} + +// testHeaderChainImport tries to process a chain of header, writing them into +// the database if successful. +func testHeaderChainImport(chain []*types.Header, LightChain *LightChain) error { + for _, header := range chain { + // Try and validate the header + if err := LightChain.Validator().ValidateHeader(header, LightChain.GetHeaderByHash(header.ParentHash), false); err != nil { + return err + } + // Manually insert the header into the database, but don't reorganize (allows subsequent testing) + LightChain.mu.Lock() + core.WriteTd(LightChain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, LightChain.GetTdByHash(header.ParentHash))) + core.WriteHeader(LightChain.chainDb, header) + LightChain.mu.Unlock() + } + return nil +} + +// Tests that given a starting canonical chain of a given size, it can be extended +// with various length chains. +func TestExtendCanonicalHeaders(t *testing.T) { + length := 5 + + // Make first chain starting from genesis + _, processor, err := newCanonical(length) + if err != nil { + t.Fatalf("failed to make new canonical chain: %v", err) + } + // Define the difficulty comparator + better := func(td1, td2 *big.Int) { + if td2.Cmp(td1) <= 0 { + t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1) + } + } + // Start fork from current height + testFork(t, processor, length, 1, better) + testFork(t, processor, length, 2, better) + testFork(t, processor, length, 5, better) + testFork(t, processor, length, 10, better) +} + +// Tests that given a starting canonical chain of a given size, creating shorter +// forks do not take canonical ownership. +func TestShorterForkHeaders(t *testing.T) { + length := 10 + + // Make first chain starting from genesis + _, processor, err := newCanonical(length) + if err != nil { + t.Fatalf("failed to make new canonical chain: %v", err) + } + // Define the difficulty comparator + worse := func(td1, td2 *big.Int) { + if td2.Cmp(td1) >= 0 { + t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1) + } + } + // Sum of numbers must be less than `length` for this to be a shorter fork + testFork(t, processor, 0, 3, worse) + testFork(t, processor, 0, 7, worse) + testFork(t, processor, 1, 1, worse) + testFork(t, processor, 1, 7, worse) + testFork(t, processor, 5, 3, worse) + testFork(t, processor, 5, 4, worse) +} + +// Tests that given a starting canonical chain of a given size, creating longer +// forks do take canonical ownership. +func TestLongerForkHeaders(t *testing.T) { + length := 10 + + // Make first chain starting from genesis + _, processor, err := newCanonical(length) + if err != nil { + t.Fatalf("failed to make new canonical chain: %v", err) + } + // Define the difficulty comparator + better := func(td1, td2 *big.Int) { + if td2.Cmp(td1) <= 0 { + t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1) + } + } + // Sum of numbers must be greater than `length` for this to be a longer fork + testFork(t, processor, 0, 11, better) + testFork(t, processor, 0, 15, better) + testFork(t, processor, 1, 10, better) + testFork(t, processor, 1, 12, better) + testFork(t, processor, 5, 6, better) + testFork(t, processor, 5, 8, better) +} + +// Tests that given a starting canonical chain of a given size, creating equal +// forks do take canonical ownership. +func TestEqualForkHeaders(t *testing.T) { + length := 10 + + // Make first chain starting from genesis + _, processor, err := newCanonical(length) + if err != nil { + t.Fatalf("failed to make new canonical chain: %v", err) + } + // Define the difficulty comparator + equal := func(td1, td2 *big.Int) { + if td2.Cmp(td1) != 0 { + t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1) + } + } + // Sum of numbers must be equal to `length` for this to be an equal fork + testFork(t, processor, 0, 10, equal) + testFork(t, processor, 1, 9, equal) + testFork(t, processor, 2, 8, equal) + testFork(t, processor, 5, 5, equal) + testFork(t, processor, 6, 4, equal) + testFork(t, processor, 9, 1, equal) +} + +// Tests that chains missing links do not get accepted by the processor. +func TestBrokenHeaderChain(t *testing.T) { + // Make chain starting from genesis + db, LightChain, err := newCanonical(10) + if err != nil { + t.Fatalf("failed to make new canonical chain: %v", err) + } + // Create a forked chain, and try to insert with a missing link + chain := makeHeaderChain(LightChain.CurrentHeader(), 5, db, forkSeed)[1:] + if err := testHeaderChainImport(chain, LightChain); err == nil { + t.Errorf("broken header chain not reported") + } +} + +type bproc struct{} + +func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return nil } + +func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header { + var chain []*types.Header + for i, difficulty := range d { + header := &types.Header{ + Coinbase: common.Address{seed}, + Number: big.NewInt(int64(i + 1)), + Difficulty: big.NewInt(int64(difficulty)), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + } + if i == 0 { + header.ParentHash = genesis.Hash() + } else { + header.ParentHash = chain[i-1].Hash() + } + chain = append(chain, types.CopyHeader(header)) + } + return chain +} + +type dummyOdr struct { + OdrBackend + db ethdb.Database +} + +func (odr *dummyOdr) Database() ethdb.Database { + return odr.db +} + +func (odr *dummyOdr) Retrieve(ctx context.Context, req OdrRequest) error { + return nil +} + +func chm(genesis *types.Block, db ethdb.Database) *LightChain { + odr := &dummyOdr{db: db} + var eventMux event.TypeMux + bc := &LightChain{odr: odr, chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: core.FakePow{}} + bc.hc, _ = core.NewHeaderChain(db, testChainConfig(), bc.Validator, bc.getProcInterrupt) + bc.bodyCache, _ = lru.New(100) + bc.bodyRLPCache, _ = lru.New(100) + bc.blockCache, _ = lru.New(100) + bc.SetValidator(bproc{}) + bc.ResetWithGenesisBlock(genesis) + + return bc +} + +// Tests that reorganizing a long difficult chain after a short easy one +// overwrites the canonical numbers and links in the database. +func TestReorgLongHeaders(t *testing.T) { + testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10) +} + +// Tests that reorganizing a short difficult chain after a long easy one +// overwrites the canonical numbers and links in the database. +func TestReorgShortHeaders(t *testing.T) { + testReorg(t, []int{1, 2, 3, 4}, []int{1, 10}, 11) +} + +func testReorg(t *testing.T, first, second []int, td int64) { + // Create a pristine block chain + db, _ := ethdb.NewMemDatabase() + genesis, _ := core.WriteTestNetGenesisBlock(db) + bc := chm(genesis, db) + + // Insert an easy and a difficult chain afterwards + bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, first, 11), 1) + bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, second, 22), 1) + // Check that the chain is valid number and link wise + prev := bc.CurrentHeader() + for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) { + if prev.ParentHash != header.Hash() { + t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash()) + } + } + // Make sure the chain total difficulty is the correct one + want := new(big.Int).Add(genesis.Difficulty(), big.NewInt(td)) + if have := bc.GetTdByHash(bc.CurrentHeader().Hash()); have.Cmp(want) != 0 { + t.Errorf("total difficulty mismatch: have %v, want %v", have, want) + } +} + +// Tests that the insertion functions detect banned hashes. +func TestBadHeaderHashes(t *testing.T) { + // Create a pristine block chain + db, _ := ethdb.NewMemDatabase() + genesis, _ := core.WriteTestNetGenesisBlock(db) + bc := chm(genesis, db) + + // Create a chain, ban a hash and try to import + var err error + headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 4}, 10) + core.BadHashes[headers[2].Hash()] = true + _, err = bc.InsertHeaderChain(headers, 1) + if !core.IsBadHashError(err) { + t.Errorf("error mismatch: want: BadHashError, have: %v", err) + } +} + +// Tests that bad hashes are detected on boot, and the chan rolled back to a +// good state prior to the bad hash. +func TestReorgBadHeaderHashes(t *testing.T) { + // Create a pristine block chain + db, _ := ethdb.NewMemDatabase() + genesis, _ := core.WriteTestNetGenesisBlock(db) + bc := chm(genesis, db) + + // Create a chain, import and ban aferwards + headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 3, 4}, 10) + + if _, err := bc.InsertHeaderChain(headers, 1); err != nil { + t.Fatalf("failed to import headers: %v", err) + } + if bc.CurrentHeader().Hash() != headers[3].Hash() { + t.Errorf("last header hash mismatch: have: %x, want %x", bc.CurrentHeader().Hash(), headers[3].Hash()) + } + core.BadHashes[headers[3].Hash()] = true + defer func() { delete(core.BadHashes, headers[3].Hash()) }() + // Create a new chain manager and check it rolled back the state + ncm, err := NewLightChain(&dummyOdr{db: db}, testChainConfig(), core.FakePow{}, new(event.TypeMux)) + if err != nil { + t.Fatalf("failed to create new chain manager: %v", err) + } + if ncm.CurrentHeader().Hash() != headers[2].Hash() { + t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash()) + } +} diff --git a/light/odr.go b/light/odr.go index 4c69040ef869..679569bf903a 100644 --- a/light/odr.go +++ b/light/odr.go @@ -19,14 +19,22 @@ package light import ( + "math/big" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "golang.org/x/net/context" ) -// OdrBackend is an interface to a backend service that handles odr retrievals +// NoOdr is the default context passed to an ODR capable function when the ODR +// service is not required. +var NoOdr = context.Background() + +// OdrBackend is an interface to a backend service that handles ODR retrievals type OdrBackend interface { Database() ethdb.Database Retrieve(ctx context.Context, req OdrRequest) error @@ -37,17 +45,44 @@ type OdrRequest interface { StoreResult(db ethdb.Database) } +// TrieID identifies a state or account storage trie +type TrieID struct { + BlockHash, Root common.Hash + AccKey []byte +} + +// StateTrieID returns a TrieID for a state trie belonging to a certain block +// header. +func StateTrieID(header *types.Header) *TrieID { + return &TrieID{ + BlockHash: header.Hash(), + AccKey: nil, + Root: header.Root, + } +} + +// StorageTrieID returns a TrieID for a contract storage trie at a given account +// of a given state trie. It also requires the root hash of the trie for +// checking Merkle proofs. +func StorageTrieID(state *TrieID, addr common.Address, root common.Hash) *TrieID { + return &TrieID{ + BlockHash: state.BlockHash, + AccKey: crypto.Keccak256(addr[:]), + Root: root, + } +} + // TrieRequest is the ODR request type for state/storage trie entries type TrieRequest struct { OdrRequest - root common.Hash - key []byte - proof []rlp.RawValue + Id *TrieID + Key []byte + Proof []rlp.RawValue } // StoreResult stores the retrieved data in local database func (req *TrieRequest) StoreResult(db ethdb.Database) { - storeProof(db, req.proof) + storeProof(db, req.Proof) } // storeProof stores the new trie nodes obtained from a merkle proof in the database @@ -61,38 +96,61 @@ func storeProof(db ethdb.Database, proof []rlp.RawValue) { } } -// NodeDataRequest is the ODR request type for node data (used for retrieving contract code) -type NodeDataRequest struct { +// CodeRequest is the ODR request type for retrieving contract code +type CodeRequest struct { OdrRequest - hash common.Hash - data []byte + Id *TrieID + Hash common.Hash + Data []byte } -// GetData returns the retrieved node data after a successful request -func (req *NodeDataRequest) GetData() []byte { - return req.data +// StoreResult stores the retrieved data in local database +func (req *CodeRequest) StoreResult(db ethdb.Database) { + db.Put(req.Hash[:], req.Data) +} + +// BlockRequest is the ODR request type for retrieving block bodies +type BlockRequest struct { + OdrRequest + Hash common.Hash + Number uint64 + Rlp []byte } // StoreResult stores the retrieved data in local database -func (req *NodeDataRequest) StoreResult(db ethdb.Database) { - db.Put(req.hash[:], req.GetData()) +func (req *BlockRequest) StoreResult(db ethdb.Database) { + core.WriteBodyRLP(db, req.Hash, req.Number, req.Rlp) } -var sha3_nil = crypto.Keccak256Hash(nil) +// ReceiptsRequest is the ODR request type for retrieving block bodies +type ReceiptsRequest struct { + OdrRequest + Hash common.Hash + Number uint64 + Receipts types.Receipts +} -// retrieveNodeData tries to retrieve node data with the given hash from the network -func retrieveNodeData(ctx context.Context, odr OdrBackend, hash common.Hash) ([]byte, error) { - if hash == sha3_nil { - return nil, nil - } - res, _ := odr.Database().Get(hash[:]) - if res != nil { - return res, nil - } - r := &NodeDataRequest{hash: hash} - if err := odr.Retrieve(ctx, r); err != nil { - return nil, err - } else { - return r.GetData(), nil - } +// StoreResult stores the retrieved data in local database +func (req *ReceiptsRequest) StoreResult(db ethdb.Database) { + core.WriteBlockReceipts(db, req.Hash, req.Number, req.Receipts) +} + +// TrieRequest is the ODR request type for state/storage trie entries +type ChtRequest struct { + OdrRequest + ChtNum, BlockNum uint64 + ChtRoot common.Hash + Header *types.Header + Td *big.Int + Proof []rlp.RawValue +} + +// StoreResult stores the retrieved data in local database +func (req *ChtRequest) StoreResult(db ethdb.Database) { + // if there is a canonical hash, there is a header too + core.WriteHeader(db, req.Header) + hash, num := req.Header.Hash(), req.Header.Number.Uint64() + core.WriteTd(db, hash, num, req.Td) + core.WriteCanonicalHash(db, hash, num) + //storeProof(db, req.Proof) } diff --git a/light/odr_test.go b/light/odr_test.go new file mode 100644 index 000000000000..2add51281d13 --- /dev/null +++ b/light/odr_test.go @@ -0,0 +1,324 @@ +package light + +import ( + "bytes" + "errors" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "golang.org/x/net/context" +) + +var ( + testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey) + testBankFunds = big.NewInt(100000000) + + acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) + + testContractCode = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056") + testContractAddr common.Address +) + +type testOdr struct { + OdrBackend + sdb, ldb ethdb.Database + disable bool +} + +func (odr *testOdr) Database() ethdb.Database { + return odr.ldb +} + +var ErrOdrDisabled = errors.New("ODR disabled") + +func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { + if odr.disable { + return ErrOdrDisabled + } + switch req := req.(type) { + case *BlockRequest: + req.Rlp = core.GetBodyRLP(odr.sdb, req.Hash, core.GetBlockNumber(odr.sdb, req.Hash)) + case *ReceiptsRequest: + req.Receipts = core.GetBlockReceipts(odr.sdb, req.Hash, core.GetBlockNumber(odr.sdb, req.Hash)) + case *TrieRequest: + t, _ := trie.New(req.Id.Root, odr.sdb) + req.Proof = t.Prove(req.Key) + trie.ClearGlobalCache() + case *CodeRequest: + req.Data, _ = odr.sdb.Get(req.Hash[:]) + } + req.StoreResult(odr.ldb) + return nil +} + +type odrTestFn func(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte + +func TestOdrGetBlockLes1(t *testing.T) { testChainOdr(t, 1, 1, odrGetBlock) } + +func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte { + var block *types.Block + if bc != nil { + block = bc.GetBlockByHash(bhash) + } else { + block, _ = lc.GetBlockByHash(ctx, bhash) + } + if block == nil { + return nil + } + rlp, _ := rlp.EncodeToBytes(block) + return rlp +} + +func TestOdrGetReceiptsLes1(t *testing.T) { testChainOdr(t, 1, 1, odrGetReceipts) } + +func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte { + var receipts types.Receipts + if bc != nil { + receipts = core.GetBlockReceipts(db, bhash, core.GetBlockNumber(db, bhash)) + } else { + receipts, _ = GetBlockReceipts(ctx, lc.Odr(), bhash, core.GetBlockNumber(db, bhash)) + } + if receipts == nil { + return nil + } + rlp, _ := rlp.EncodeToBytes(receipts) + return rlp +} + +func TestOdrAccountsLes1(t *testing.T) { testChainOdr(t, 1, 1, odrAccounts) } + +func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte { + dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678") + acc := []common.Address{testBankAddress, acc1Addr, acc2Addr, dummyAddr} + + trie.ClearGlobalCache() + + var res []byte + for _, addr := range acc { + if bc != nil { + header := bc.GetHeaderByHash(bhash) + st, err := state.New(header.Root, db) + if err == nil { + bal := st.GetBalance(addr) + rlp, _ := rlp.EncodeToBytes(bal) + res = append(res, rlp...) + } + } else { + header := lc.GetHeaderByHash(bhash) + st := NewLightState(StateTrieID(header), lc.Odr()) + bal, err := st.GetBalance(ctx, addr) + if err == nil { + rlp, _ := rlp.EncodeToBytes(bal) + res = append(res, rlp...) + } + } + } + + return res +} + +func TestOdrContractCallLes1(t *testing.T) { testChainOdr(t, 1, 2, odrContractCall) } + +// fullcallmsg is the message type used for call transations. +type fullcallmsg struct { + from *state.StateObject + to *common.Address + gas, gasPrice *big.Int + value *big.Int + data []byte +} + +// accessor boilerplate to implement core.Message +func (m fullcallmsg) From() (common.Address, error) { return m.from.Address(), nil } +func (m fullcallmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil } +func (m fullcallmsg) Nonce() uint64 { return m.from.Nonce() } +func (m fullcallmsg) To() *common.Address { return m.to } +func (m fullcallmsg) GasPrice() *big.Int { return m.gasPrice } +func (m fullcallmsg) Gas() *big.Int { return m.gas } +func (m fullcallmsg) Value() *big.Int { return m.value } +func (m fullcallmsg) Data() []byte { return m.data } + +// callmsg is the message type used for call transations. +type lightcallmsg struct { + from *StateObject + to *common.Address + gas, gasPrice *big.Int + value *big.Int + data []byte +} + +// accessor boilerplate to implement core.Message +func (m lightcallmsg) From() (common.Address, error) { return m.from.Address(), nil } +func (m lightcallmsg) FromFrontier() (common.Address, error) { return m.from.Address(), nil } +func (m lightcallmsg) Nonce() uint64 { return m.from.Nonce() } +func (m lightcallmsg) To() *common.Address { return m.to } +func (m lightcallmsg) GasPrice() *big.Int { return m.gasPrice } +func (m lightcallmsg) Gas() *big.Int { return m.gas } +func (m lightcallmsg) Value() *big.Int { return m.value } +func (m lightcallmsg) Data() []byte { return m.data } + +func odrContractCall(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) []byte { + data := common.Hex2Bytes("60CD26850000000000000000000000000000000000000000000000000000000000000000") + + var res []byte + for i := 0; i < 3; i++ { + data[35] = byte(i) + if bc != nil { + header := bc.GetHeaderByHash(bhash) + statedb, err := state.New(header.Root, db) + if err == nil { + from := statedb.GetOrNewStateObject(testBankAddress) + from.SetBalance(common.MaxBig) + + msg := fullcallmsg{ + from: from, + gas: big.NewInt(100000), + gasPrice: big.NewInt(0), + value: big.NewInt(0), + data: data, + to: &testContractAddr, + } + + vmenv := core.NewEnv(statedb, testChainConfig(), bc, msg, header, vm.Config{}) + gp := new(core.GasPool).AddGas(common.MaxBig) + ret, _, _ := core.ApplyMessage(vmenv, msg, gp) + res = append(res, ret...) + } + } else { + header := lc.GetHeaderByHash(bhash) + state := NewLightState(StateTrieID(header), lc.Odr()) + from, err := state.GetOrNewStateObject(ctx, testBankAddress) + if err == nil { + from.SetBalance(common.MaxBig) + + msg := lightcallmsg{ + from: from, + gas: big.NewInt(100000), + gasPrice: big.NewInt(0), + value: big.NewInt(0), + data: data, + to: &testContractAddr, + } + + vmenv := NewEnv(ctx, state, testChainConfig(), lc, msg, header, vm.Config{}) + gp := new(core.GasPool).AddGas(common.MaxBig) + ret, _, _ := core.ApplyMessage(vmenv, msg, gp) + if vmenv.Error() == nil { + res = append(res, ret...) + } + } + } + } + return res +} + +func testChainGen(i int, block *core.BlockGen) { + switch i { + case 0: + // In block 1, the test bank sends account #1 some ether. + tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey) + block.AddTx(tx) + case 1: + // In block 2, the test bank sends some more ether to account #1. + // acc1Addr passes it on to account #2. + // acc1Addr creates a test contract. + tx1, _ := types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(testBankKey) + nonce := block.TxNonce(acc1Addr) + tx2, _ := types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(acc1Key) + nonce++ + tx3, _ := types.NewContractCreation(nonce, big.NewInt(0), big.NewInt(1000000), big.NewInt(0), testContractCode).SignECDSA(acc1Key) + testContractAddr = crypto.CreateAddress(acc1Addr, nonce) + block.AddTx(tx1) + block.AddTx(tx2) + block.AddTx(tx3) + case 2: + // Block 3 is empty but was mined by account #2. + block.SetCoinbase(acc2Addr) + block.SetExtra([]byte("yeehaw")) + data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001") + tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(testBankKey) + block.AddTx(tx) + case 3: + // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). + b2 := block.PrevBlock(1).Header() + b2.Extra = []byte("foo") + block.AddUncle(b2) + b3 := block.PrevBlock(2).Header() + b3.Extra = []byte("foo") + block.AddUncle(b3) + data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002") + tx, _ := types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), big.NewInt(100000), nil, data).SignECDSA(testBankKey) + block.AddTx(tx) + } +} + +func testChainOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) { + var ( + evmux = new(event.TypeMux) + pow = new(core.FakePow) + sdb, _ = ethdb.NewMemDatabase() + ldb, _ = ethdb.NewMemDatabase() + genesis = core.WriteGenesisBlockForTesting(sdb, core.GenesisAccount{testBankAddress, testBankFunds}) + ) + core.WriteGenesisBlockForTesting(ldb, core.GenesisAccount{testBankAddress, testBankFunds}) + // Assemble the test environment + blockchain, _ := core.NewBlockChain(sdb, testChainConfig(), pow, evmux) + gchain, _ := core.GenerateChain(genesis, sdb, 4, testChainGen) + if _, err := blockchain.InsertChain(gchain); err != nil { + panic(err) + } + + odr := &testOdr{sdb: sdb, ldb: ldb} + lightchain, _ := NewLightChain(odr, testChainConfig(), pow, evmux) + lightchain.SetValidator(bproc{}) + headers := make([]*types.Header, len(gchain)) + for i, block := range gchain { + headers[i] = block.Header() + } + if _, err := lightchain.InsertHeaderChain(headers, 1); err != nil { + panic(err) + } + + test := func(expFail uint64) { + for i := uint64(0); i <= blockchain.CurrentHeader().GetNumberU64(); i++ { + bhash := core.GetCanonicalHash(sdb, i) + b1 := fn(NoOdr, sdb, blockchain, nil, bhash) + ctx, _ := context.WithTimeout(context.Background(), 200*time.Millisecond) + b2 := fn(ctx, ldb, nil, lightchain, bhash) + eq := bytes.Equal(b1, b2) + exp := i < expFail + if exp && !eq { + t.Errorf("odr mismatch") + } + if !exp && eq { + t.Errorf("unexpected odr match") + } + } + } + + odr.disable = true + // expect retrievals to fail (except genesis block) without a les peer + test(expFail) + odr.disable = false + // expect all retrievals to pass + test(5) + odr.disable = true + // still expect all retrievals to pass, now data should be cached locally + test(5) +} diff --git a/light/odr_util.go b/light/odr_util.go new file mode 100644 index 000000000000..458f8233b9bf --- /dev/null +++ b/light/odr_util.go @@ -0,0 +1,185 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package light + +import ( + "bytes" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/net/context" +) + +var sha3_nil = crypto.Keccak256Hash(nil) + +var ( + ErrNoTrustedCht = errors.New("No trusted canonical hash trie") + ErrNoHeader = errors.New("Header not found") + + ChtFrequency = uint64(4096) + trustedChtKey = []byte("TrustedCHT") +) + +type ChtNode struct { + Hash common.Hash + Td *big.Int +} + +type TrustedCht struct { + Number uint64 + Root common.Hash +} + +func GetTrustedCht(db ethdb.Database) TrustedCht { + data, _ := db.Get(trustedChtKey) + var res TrustedCht + if err := rlp.DecodeBytes(data, &res); err != nil { + return TrustedCht{0, common.Hash{}} + } + return res +} + +func WriteTrustedCht(db ethdb.Database, cht TrustedCht) { + data, _ := rlp.EncodeToBytes(cht) + db.Put(trustedChtKey, data) +} + +func DeleteTrustedCht(db ethdb.Database) { + db.Delete(trustedChtKey) +} + +func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*types.Header, error) { + db := odr.Database() + hash := core.GetCanonicalHash(db, number) + if (hash != common.Hash{}) { + // if there is a canonical hash, there is a header too + header := core.GetHeader(db, hash, number) + if header == nil { + panic("Canonical hash present but header not found") + } + return header, nil + } + + cht := GetTrustedCht(db) + if number >= cht.Number*ChtFrequency { + return nil, ErrNoTrustedCht + } + + r := &ChtRequest{ChtRoot: cht.Root, ChtNum: cht.Number, BlockNum: number} + if err := odr.Retrieve(ctx, r); err != nil { + return nil, err + } else { + return r.Header, nil + } +} + +func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (common.Hash, error) { + hash := core.GetCanonicalHash(odr.Database(), number) + if (hash != common.Hash{}) { + return hash, nil + } + header, err := GetHeaderByNumber(ctx, odr, number) + if header != nil { + return header.Hash(), nil + } + return common.Hash{}, err +} + +// retrieveContractCode tries to retrieve the contract code of the given account +// with the given hash from the network (id points to the storage trie belonging +// to the same account) +func retrieveContractCode(ctx context.Context, odr OdrBackend, id *TrieID, hash common.Hash) ([]byte, error) { + if hash == sha3_nil { + return nil, nil + } + res, _ := odr.Database().Get(hash[:]) + if res != nil { + return res, nil + } + r := &CodeRequest{Id: id, Hash: hash} + if err := odr.Retrieve(ctx, r); err != nil { + return nil, err + } else { + return r.Data, nil + } +} + +// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. +func GetBodyRLP(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (rlp.RawValue, error) { + if data := core.GetBodyRLP(odr.Database(), hash, number); data != nil { + return data, nil + } + r := &BlockRequest{Hash: hash, Number: number} + if err := odr.Retrieve(ctx, r); err != nil { + return nil, err + } else { + return r.Rlp, nil + } +} + +// GetBody retrieves the block body (transactons, uncles) corresponding to the +// hash. +func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Body, error) { + data, err := GetBodyRLP(ctx, odr, hash, number) + if err != nil { + return nil, err + } + body := new(types.Body) + if err := rlp.Decode(bytes.NewReader(data), body); err != nil { + glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err) + return nil, err + } + return body, nil +} + +// GetBlock retrieves an entire block corresponding to the hash, assembling it +// back from the stored header and body. +func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Block, error) { + // Retrieve the block header and body contents + header := core.GetHeader(odr.Database(), hash, number) + if header == nil { + return nil, ErrNoHeader + } + body, err := GetBody(ctx, odr, hash, number) + if err != nil { + return nil, err + } + // Reassemble the block and return + return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles), nil +} + +// GetBlockReceipts retrieves the receipts generated by the transactions included +// in a block given by its hash. +func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (types.Receipts, error) { + receipts := core.GetBlockReceipts(odr.Database(), hash, number) + if receipts != nil { + return receipts, nil + } + r := &ReceiptsRequest{Hash: hash, Number: number} + if err := odr.Retrieve(ctx, r); err != nil { + return nil, err + } else { + return r.Receipts, nil + } +} diff --git a/light/state.go b/light/state.go index e18f9cdc5d40..08bb8e5baca6 100644 --- a/light/state.go +++ b/light/state.go @@ -33,10 +33,11 @@ var StartingNonce uint64 // state, retrieving unknown parts on-demand from the ODR backend. Changes are // never stored in the local database, only in the memory objects. type LightState struct { - odr OdrBackend - trie *LightTrie - + odr OdrBackend + trie *LightTrie + id *TrieID stateObjects map[string]*StateObject + refund *big.Int } // NewLightState creates a new LightState with the specified root. @@ -44,15 +45,25 @@ type LightState struct { // root is non-existent. In that case, ODR retrieval will always be unsuccessful // and every operation will return with an error or wait for the context to be // cancelled. -func NewLightState(root common.Hash, odr OdrBackend) *LightState { - tr := NewLightTrie(root, odr, true) +func NewLightState(id *TrieID, odr OdrBackend) *LightState { + var tr *LightTrie + if id != nil { + tr = NewLightTrie(id, odr, true) + } return &LightState{ odr: odr, trie: tr, + id: id, stateObjects: make(map[string]*StateObject), + refund: new(big.Int), } } +// AddRefund adds an amount to the refund value collected during a vm execution +func (self *LightState) AddRefund(gas *big.Int) { + self.refund.Add(self.refund, gas) +} + // HasAccount returns true if an account exists at the given address func (self *LightState) HasAccount(ctx context.Context, addr common.Address) (bool, error) { so, err := self.GetStateObject(ctx, addr) @@ -194,7 +205,7 @@ func (self *LightState) GetStateObject(ctx context.Context, addr common.Address) return nil, nil } - stateObject, err = DecodeObject(ctx, addr, self.odr, []byte(data)) + stateObject, err = DecodeObject(ctx, self.id, addr, self.odr, []byte(data)) if err != nil { return nil, err } @@ -258,12 +269,14 @@ func (self *LightState) CreateStateObject(ctx context.Context, addr common.Addre // Copy creates a copy of the state func (self *LightState) Copy() *LightState { // ignore error - we assume state-to-be-copied always exists - state := NewLightState(common.Hash{}, self.odr) + state := NewLightState(nil, self.odr) state.trie = self.trie + state.id = self.id for k, stateObject := range self.stateObjects { state.stateObjects[k] = stateObject.Copy() } + state.refund.Set(self.refund) return state } @@ -272,4 +285,10 @@ func (self *LightState) Copy() *LightState { func (self *LightState) Set(state *LightState) { self.trie = state.trie self.stateObjects = state.stateObjects + self.refund = state.refund +} + +// GetRefund returns the refund value collected during a vm execution +func (self *LightState) GetRefund() *big.Int { + return self.refund } diff --git a/light/state_object.go b/light/state_object.go index 030653c77072..0eb7d68b2cf7 100644 --- a/light/state_object.go +++ b/light/state_object.go @@ -40,7 +40,7 @@ func (self Code) String() string { } // Storage is a memory map cache of a contract storage -type Storage map[string]common.Hash +type Storage map[common.Hash]common.Hash // String returns a string representation of the storage cache func (self Storage) String() (str string) { @@ -102,7 +102,7 @@ func NewStateObject(address common.Address, odr OdrBackend) *StateObject { codeHash: emptyCodeHash, storage: make(Storage), } - object.trie = NewLightTrie(common.Hash{}, odr, true) + object.trie = NewLightTrie(&TrieID{}, odr, true) return object } @@ -135,8 +135,7 @@ func (self *StateObject) Storage() Storage { // GetState returns the storage value at the given address from either the cache // or the trie func (self *StateObject) GetState(ctx context.Context, key common.Hash) (common.Hash, error) { - strkey := key.Str() - value, exists := self.storage[strkey] + value, exists := self.storage[key] if !exists { var err error value, err = self.getAddr(ctx, key) @@ -144,7 +143,7 @@ func (self *StateObject) GetState(ctx context.Context, key common.Hash) (common. return common.Hash{}, err } if (value != common.Hash{}) { - self.storage[strkey] = value + self.storage[key] = value } } @@ -153,7 +152,7 @@ func (self *StateObject) GetState(ctx context.Context, key common.Hash) (common. // SetState sets the storage value at the given address func (self *StateObject) SetState(k, value common.Hash) { - self.storage[k.Str()] = value + self.storage[k] = value self.dirty = true } @@ -181,6 +180,9 @@ func (c *StateObject) SetBalance(amount *big.Int) { c.dirty = true } +// ReturnGas returns the gas back to the origin. Used by the Virtual machine or Closures +func (c *StateObject) ReturnGas(gas, price *big.Int) {} + // Copy creates a copy of the state object func (self *StateObject) Copy() *StateObject { stateObject := NewStateObject(self.Address(), self.odr) @@ -235,6 +237,23 @@ func (self *StateObject) Nonce() uint64 { return self.nonce } +// ForEachStorage calls a callback function for every key/value pair found +// in the local storage cache. Note that unlike core/state.StateObject, +// light.StateObject only returns cached values and doesn't download the +// entire storage tree. +func (self *StateObject) ForEachStorage(cb func(key, value common.Hash) bool) { + for h, v := range self.storage { + cb(h, v) + } +} + +// Never called, but must be present to allow StateObject to be used +// as a vm.Account interface that also satisfies the vm.ContractRef +// interface. Interfaces are awesome. +func (self *StateObject) Value() *big.Int { + panic("Value on StateObject should never be called") +} + // Encoding type extStateObject struct { @@ -245,7 +264,7 @@ type extStateObject struct { } // DecodeObject decodes an RLP-encoded state object. -func DecodeObject(ctx context.Context, address common.Address, odr OdrBackend, data []byte) (*StateObject, error) { +func DecodeObject(ctx context.Context, stateID *TrieID, address common.Address, odr OdrBackend, data []byte) (*StateObject, error) { var ( obj = &StateObject{address: address, odr: odr, storage: make(Storage)} ext extStateObject @@ -254,9 +273,10 @@ func DecodeObject(ctx context.Context, address common.Address, odr OdrBackend, d if err = rlp.DecodeBytes(data, &ext); err != nil { return nil, err } - obj.trie = NewLightTrie(ext.Root, odr, true) + trieID := StorageTrieID(stateID, address, ext.Root) + obj.trie = NewLightTrie(trieID, odr, true) if !bytes.Equal(ext.CodeHash, emptyCodeHash) { - if obj.code, err = retrieveNodeData(ctx, obj.odr, common.BytesToHash(ext.CodeHash)); err != nil { + if obj.code, err = retrieveContractCode(ctx, obj.odr, trieID, common.BytesToHash(ext.CodeHash)); err != nil { return nil, fmt.Errorf("can't find code for hash %x: %v", ext.CodeHash, err) } } diff --git a/light/state_test.go b/light/state_test.go index 2c2e6daea1b7..0aa44726c4df 100644 --- a/light/state_test.go +++ b/light/state_test.go @@ -22,34 +22,14 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" "golang.org/x/net/context" ) -type testOdr struct { - OdrBackend - sdb, ldb ethdb.Database -} - -func (odr *testOdr) Database() ethdb.Database { - return odr.ldb -} - -func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { - switch req := req.(type) { - case *TrieRequest: - t, _ := trie.New(req.root, odr.sdb) - req.proof = t.Prove(req.key) - trie.ClearGlobalCache() - case *NodeDataRequest: - req.data, _ = odr.sdb.Get(req.hash[:]) - } - req.StoreResult(odr.ldb) - return nil -} - func makeTestState() (common.Hash, ethdb.Database) { sdb, _ := ethdb.NewMemDatabase() st, _ := state.New(common.Hash{}, sdb) @@ -71,9 +51,11 @@ func makeTestState() (common.Hash, ethdb.Database) { func TestLightStateOdr(t *testing.T) { root, sdb := makeTestState() + header := &types.Header{Root: root, Number: big.NewInt(0)} + core.WriteHeader(sdb, header) ldb, _ := ethdb.NewMemDatabase() odr := &testOdr{sdb: sdb, ldb: ldb} - ls := NewLightState(root, odr) + ls := NewLightState(StateTrieID(header), odr) ctx := context.Background() trie.ClearGlobalCache() @@ -156,9 +138,11 @@ func TestLightStateOdr(t *testing.T) { func TestLightStateSetCopy(t *testing.T) { root, sdb := makeTestState() + header := &types.Header{Root: root, Number: big.NewInt(0)} + core.WriteHeader(sdb, header) ldb, _ := ethdb.NewMemDatabase() odr := &testOdr{sdb: sdb, ldb: ldb} - ls := NewLightState(root, odr) + ls := NewLightState(StateTrieID(header), odr) ctx := context.Background() trie.ClearGlobalCache() @@ -233,9 +217,11 @@ func TestLightStateSetCopy(t *testing.T) { func TestLightStateDelete(t *testing.T) { root, sdb := makeTestState() + header := &types.Header{Root: root, Number: big.NewInt(0)} + core.WriteHeader(sdb, header) ldb, _ := ethdb.NewMemDatabase() odr := &testOdr{sdb: sdb, ldb: ldb} - ls := NewLightState(root, odr) + ls := NewLightState(StateTrieID(header), odr) ctx := context.Background() trie.ClearGlobalCache() diff --git a/light/trie.go b/light/trie.go index e9c96ea48edf..6de6b48494b4 100644 --- a/light/trie.go +++ b/light/trie.go @@ -17,7 +17,6 @@ package light import ( - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" "golang.org/x/net/context" @@ -25,28 +24,28 @@ import ( // LightTrie is an ODR-capable wrapper around trie.SecureTrie type LightTrie struct { - trie *trie.SecureTrie - originalRoot common.Hash - odr OdrBackend - db ethdb.Database + trie *trie.SecureTrie + id *TrieID + odr OdrBackend + db ethdb.Database } // NewLightTrie creates a new LightTrie instance. It doesn't instantly try to // access the db or network and retrieve the root node, it only initializes its // encapsulated SecureTrie at the first actual operation. -func NewLightTrie(root common.Hash, odr OdrBackend, useFakeMap bool) *LightTrie { +func NewLightTrie(id *TrieID, odr OdrBackend, useFakeMap bool) *LightTrie { return &LightTrie{ // SecureTrie is initialized before first request - originalRoot: root, - odr: odr, - db: odr.Database(), + id: id, + odr: odr, + db: odr.Database(), } } // retrieveKey retrieves a single key, returns true and stores nodes in local // database if successful func (t *LightTrie) retrieveKey(ctx context.Context, key []byte) bool { - r := &TrieRequest{root: t.originalRoot, key: key} + r := &TrieRequest{Id: t.id, Key: key} return t.odr.Retrieve(ctx, r) == nil } @@ -79,7 +78,7 @@ func (t *LightTrie) do(ctx context.Context, fallbackKey []byte, fn func() error) func (t *LightTrie) Get(ctx context.Context, key []byte) (res []byte, err error) { err = t.do(ctx, key, func() (err error) { if t.trie == nil { - t.trie, err = trie.NewSecure(t.originalRoot, t.db) + t.trie, err = trie.NewSecure(t.id.Root, t.db) } if err == nil { res, err = t.trie.TryGet(key) @@ -98,7 +97,7 @@ func (t *LightTrie) Get(ctx context.Context, key []byte) (res []byte, err error) func (t *LightTrie) Update(ctx context.Context, key, value []byte) (err error) { err = t.do(ctx, key, func() (err error) { if t.trie == nil { - t.trie, err = trie.NewSecure(t.originalRoot, t.db) + t.trie, err = trie.NewSecure(t.id.Root, t.db) } if err == nil { err = t.trie.TryUpdate(key, value) @@ -112,7 +111,7 @@ func (t *LightTrie) Update(ctx context.Context, key, value []byte) (err error) { func (t *LightTrie) Delete(ctx context.Context, key []byte) (err error) { err = t.do(ctx, key, func() (err error) { if t.trie == nil { - t.trie, err = trie.NewSecure(t.originalRoot, t.db) + t.trie, err = trie.NewSecure(t.id.Root, t.db) } if err == nil { err = t.trie.TryDelete(key) diff --git a/light/txpool.go b/light/txpool.go new file mode 100644 index 000000000000..e94b2da95ecd --- /dev/null +++ b/light/txpool.go @@ -0,0 +1,557 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package light + +import ( + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/net/context" +) + +// txPermanent is the number of mined blocks after a mined transaction is +// considered permanent and no rollback is expected +var txPermanent = uint64(500) + +// TxPool implements the transaction pool for light clients, which keeps track +// of the status of locally created transactions, detecting if they are included +// in a block (mined) or rolled back. There are no queued transactions since we +// always receive all locally signed transactions in the same order as they are +// created. +type TxPool struct { + config *core.ChainConfig + quit chan bool + eventMux *event.TypeMux + events event.Subscription + mu sync.RWMutex + chain *LightChain + odr OdrBackend + chainDb ethdb.Database + relay TxRelayBackend + head common.Hash + nonce map[common.Address]uint64 // "pending" nonce + pending map[common.Hash]*types.Transaction // pending transactions by tx hash + mined map[common.Hash][]*types.Transaction // mined transactions by block hash + clearIdx uint64 // earliest block nr that can contain mined tx info + + homestead bool +} + +// TxRelayBackend provides an interface to the mechanism that forwards transacions +// to the ETH network. The implementations of the functions should be non-blocking. +// +// Send instructs backend to forward new transactions +// NewHead notifies backend about a new head after processed by the tx pool, +// including mined and rolled back transactions since the last event +// Discard notifies backend about transactions that should be discarded either +// because they have been replaced by a re-send or because they have been mined +// long ago and no rollback is expected +type TxRelayBackend interface { + Send(txs types.Transactions) + NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) + Discard(hashes []common.Hash) +} + +// NewTxPool creates a new light transaction pool +func NewTxPool(config *core.ChainConfig, eventMux *event.TypeMux, chain *LightChain, relay TxRelayBackend) *TxPool { + pool := &TxPool{ + config: config, + nonce: make(map[common.Address]uint64), + pending: make(map[common.Hash]*types.Transaction), + mined: make(map[common.Hash][]*types.Transaction), + quit: make(chan bool), + eventMux: eventMux, + events: eventMux.Subscribe(core.ChainHeadEvent{}), + chain: chain, + relay: relay, + odr: chain.Odr(), + chainDb: chain.Odr().Database(), + head: chain.CurrentHeader().Hash(), + clearIdx: chain.CurrentHeader().GetNumberU64(), + } + go pool.eventLoop() + + return pool +} + +// currentState returns the light state of the current head header +func (pool *TxPool) currentState() *LightState { + return NewLightState(StateTrieID(pool.chain.CurrentHeader()), pool.odr) +} + +// GetNonce returns the "pending" nonce of a given address. It always queries +// the nonce belonging to the latest header too in order to detect if another +// client using the same key sent a transaction. +func (pool *TxPool) GetNonce(ctx context.Context, addr common.Address) (uint64, error) { + nonce, err := pool.currentState().GetNonce(ctx, addr) + if err != nil { + return 0, err + } + sn, ok := pool.nonce[addr] + if ok && sn > nonce { + nonce = sn + } + if !ok || sn < nonce { + pool.nonce[addr] = nonce + } + return nonce, nil +} + +type txBlockData struct { + BlockHash common.Hash + BlockIndex uint64 + Index uint64 +} + +// storeTxBlockData stores the block position of a mined tx in the local db +func (pool *TxPool) storeTxBlockData(txh common.Hash, tbd txBlockData) { + //fmt.Println("storeTxBlockData", txh, tbd) + data, _ := rlp.EncodeToBytes(tbd) + pool.chainDb.Put(append(txh[:], byte(1)), data) +} + +// removeTxBlockData removes the stored block position of a rolled back tx +func (pool *TxPool) removeTxBlockData(txh common.Hash) { + //fmt.Println("removeTxBlockData", txh) + pool.chainDb.Delete(append(txh[:], byte(1))) +} + +// txStateChanges stores the recent changes between pending/mined states of +// transactions. True means mined, false means rolled back, no entry means no change +type txStateChanges map[common.Hash]bool + +// setState sets the status of a tx to either recently mined or recently rolled back +func (txc txStateChanges) setState(txHash common.Hash, mined bool) { + val, ent := txc[txHash] + if ent && (val != mined) { + delete(txc, txHash) + } else { + txc[txHash] = mined + } +} + +// getLists creates lists of mined and rolled back tx hashes +func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Hash) { + for hash, val := range txc { + if val { + mined = append(mined, hash) + } else { + rollback = append(rollback, hash) + } + } + return +} + +// checkMinedTxs checks newly added blocks for the currently pending transactions +// and marks them as mined if necessary. It also stores block position in the db +// and adds them to the received txStateChanges map. +func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, idx uint64, txc txStateChanges) error { + //fmt.Println("checkMinedTxs") + if len(pool.pending) == 0 { + return nil + } + //fmt.Println("len(pool) =", len(pool.pending)) + + block, err := GetBlock(ctx, pool.odr, hash, idx) + var receipts types.Receipts + if err != nil { + //fmt.Println(err) + return err + } + //fmt.Println("len(block.Transactions()) =", len(block.Transactions())) + + list := pool.mined[hash] + for i, tx := range block.Transactions() { + txHash := tx.Hash() + //fmt.Println(" txHash:", txHash) + if tx, ok := pool.pending[txHash]; ok { + //fmt.Println("TX FOUND") + if receipts == nil { + receipts, err = GetBlockReceipts(ctx, pool.odr, hash, idx) + if err != nil { + return err + } + if len(receipts) != len(block.Transactions()) { + panic(nil) // should never happen if hashes did match + } + core.SetReceiptsData(block, receipts) + } + //fmt.Println("WriteReceipt", receipts[i].TxHash) + core.WriteReceipt(pool.chainDb, receipts[i]) + pool.storeTxBlockData(txHash, txBlockData{hash, idx, uint64(i)}) + delete(pool.pending, txHash) + list = append(list, tx) + txc.setState(txHash, true) + } + } + if list != nil { + pool.mined[hash] = list + } + return nil +} + +// rollbackTxs marks the transactions contained in recently rolled back blocks +// as rolled back. It also removes block position info from the db and adds them +// to the received txStateChanges map. +func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) { + if list, ok := pool.mined[hash]; ok { + for _, tx := range list { + txHash := tx.Hash() + pool.removeTxBlockData(txHash) + pool.pending[txHash] = tx + txc.setState(txHash, false) + } + delete(pool.mined, hash) + } +} + +// setNewHead sets a new head header, processing (and rolling back if necessary) +// the blocks since the last known head and returns a txStateChanges map containing +// the recently mined and rolled back transaction hashes. If an error (context +// timeout) occurs during checking new blocks, it leaves the locally known head +// at the latest checked block and still returns a valid txStateChanges, making it +// possible to continue checking the missing blocks at the next chain head event +func (pool *TxPool) setNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) { + txc := make(txStateChanges) + oldh := pool.chain.GetHeaderByHash(pool.head) + newh := newHeader + // find common ancestor, create list of rolled back and new block hashes + var oldHashes, newHashes []common.Hash + for oldh.Hash() != newh.Hash() { + if oldh.GetNumberU64() >= newh.GetNumberU64() { + oldHashes = append(oldHashes, oldh.Hash()) + oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1) + } + if oldh.GetNumberU64() < newh.GetNumberU64() { + newHashes = append(newHashes, newh.Hash()) + newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1) + if newh == nil { + // happens when CHT syncing, nothing to do + newh = oldh + } + } + } + if oldh.GetNumberU64() < pool.clearIdx { + pool.clearIdx = oldh.GetNumberU64() + } + // roll back old blocks + for _, hash := range oldHashes { + pool.rollbackTxs(hash, txc) + } + pool.head = oldh.Hash() + // check mined txs of new blocks (array is in reversed order) + for i := len(newHashes) - 1; i >= 0; i-- { + hash := newHashes[i] + if err := pool.checkMinedTxs(ctx, hash, newHeader.GetNumberU64()-uint64(i), txc); err != nil { + return txc, err + } + pool.head = hash + } + + // clear old mined tx entries of old blocks + if idx := newHeader.GetNumberU64(); idx > pool.clearIdx+txPermanent { + idx2 := idx - txPermanent + for i := pool.clearIdx; i < idx2; i++ { + hash := core.GetCanonicalHash(pool.chainDb, i) + if list, ok := pool.mined[hash]; ok { + hashes := make([]common.Hash, len(list)) + for i, tx := range list { + hashes[i] = tx.Hash() + } + pool.relay.Discard(hashes) + delete(pool.mined, hash) + } + } + pool.clearIdx = idx2 + } + + return txc, nil +} + +// blockCheckTimeout is the time limit for checking new blocks for mined +// transactions. Checking resumes at the next chain head event if timed out. +const blockCheckTimeout = time.Second * 3 + +// eventLoop processes chain head events and also notifies the tx relay backend +// about the new head hash and tx state changes +func (pool *TxPool) eventLoop() { + for ev := range pool.events.Chan() { + switch ev.Data.(type) { + case core.ChainHeadEvent: + pool.mu.Lock() + ctx, _ := context.WithTimeout(context.Background(), blockCheckTimeout) + head := pool.chain.CurrentHeader() + txc, _ := pool.setNewHead(ctx, head) + m, r := txc.getLists() + pool.relay.NewHead(pool.head, m, r) + pool.homestead = pool.config.IsHomestead(head.Number) + pool.mu.Unlock() + } + } +} + +// Stop stops the light transaction pool +func (pool *TxPool) Stop() { + close(pool.quit) + pool.events.Unsubscribe() + glog.V(logger.Info).Infoln("Transaction pool stopped") +} + +// Stats returns the number of currently pending (locally created) transactions +func (pool *TxPool) Stats() (pending int) { + pool.mu.RLock() + defer pool.mu.RUnlock() + + pending = len(pool.pending) + return +} + +// validateTx checks whether a transaction is valid according to the consensus rules. +func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error { + // Validate sender + var ( + from common.Address + err error + ) + + // Validate the transaction sender and it's sig. Throw + // if the from fields is invalid. + if from, err = tx.From(); err != nil { + return core.ErrInvalidSender + } + + // Make sure the account exist. Non existent accounts + // haven't got funds and well therefor never pass. + currentState := pool.currentState() + if h, err := currentState.HasAccount(ctx, from); err == nil { + if !h { + return core.ErrNonExistentAccount + } + } else { + return err + } + + // Last but not least check for nonce errors + if n, err := currentState.GetNonce(ctx, from); err == nil { + if n > tx.Nonce() { + return core.ErrNonce + } + } else { + return err + } + + // Check the transaction doesn't exceed the current + // block limit gas. + header := pool.chain.GetHeaderByHash(pool.head) + if header.GasLimit.Cmp(tx.Gas()) < 0 { + return core.ErrGasLimit + } + + // Transactions can't be negative. This may never happen + // using RLP decoded transactions but may occur if you create + // a transaction using the RPC for example. + if tx.Value().Cmp(common.Big0) < 0 { + return core.ErrNegativeValue + } + + // Transactor should have enough funds to cover the costs + // cost == V + GP * GL + if b, err := currentState.GetBalance(ctx, from); err == nil { + if b.Cmp(tx.Cost()) < 0 { + return core.ErrInsufficientFunds + } + } else { + return err + } + + // Should supply enough intrinsic gas + if tx.Gas().Cmp(core.IntrinsicGas(tx.Data(), core.MessageCreatesContract(tx), pool.homestead)) < 0 { + return core.ErrIntrinsicGas + } + + return nil +} + +// add validates a new transaction and sets its state pending if processable. +// It also updates the locally stored nonce if necessary. +func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error { + hash := tx.Hash() + + if self.pending[hash] != nil { + return fmt.Errorf("Known transaction (%x)", hash[:4]) + } + err := self.validateTx(ctx, tx) + if err != nil { + return err + } + + if _, ok := self.pending[hash]; !ok { + self.pending[hash] = tx + + nonce := tx.Nonce() + 1 + addr, _ := tx.From() + if nonce > self.nonce[addr] { + self.nonce[addr] = nonce + } + + // Notify the subscribers. This event is posted in a goroutine + // because it's possible that somewhere during the post "Remove transaction" + // gets called which will then wait for the global tx pool lock and deadlock. + go self.eventMux.Post(core.TxPreEvent{tx}) + } + + if glog.V(logger.Debug) { + var toname string + if to := tx.To(); to != nil { + toname = common.Bytes2Hex(to[:4]) + } else { + toname = "[NEW_CONTRACT]" + } + // we can ignore the error here because From is + // verified in ValidateTransaction. + f, _ := tx.From() + from := common.Bytes2Hex(f[:4]) + glog.Infof("(t) %x => %s (%v) %x\n", from, toname, tx.Value, hash) + } + + return nil +} + +// Add adds a transaction to the pool if valid and passes it to the tx relay +// backend +func (self *TxPool) Add(ctx context.Context, tx *types.Transaction) error { + self.mu.Lock() + defer self.mu.Unlock() + + data, err := rlp.EncodeToBytes(tx) + if err != nil { + return err + } + + if err := self.add(ctx, tx); err != nil { + return err + } + //fmt.Println("Send", tx.Hash()) + self.relay.Send(types.Transactions{tx}) + + self.chainDb.Put(tx.Hash().Bytes(), data) + return nil +} + +// AddTransactions adds all valid transactions to the pool and passes them to +// the tx relay backend +func (self *TxPool) AddTransactions(ctx context.Context, txs []*types.Transaction) { + self.mu.Lock() + defer self.mu.Unlock() + var sendTx types.Transactions + + for _, tx := range txs { + if err := self.add(ctx, tx); err != nil { + glog.V(logger.Debug).Infoln("tx error:", err) + } else { + sendTx = append(sendTx, tx) + h := tx.Hash() + glog.V(logger.Debug).Infof("tx %x\n", h[:4]) + } + } + + if len(sendTx) > 0 { + self.relay.Send(sendTx) + } +} + +// GetTransaction returns a transaction if it is contained in the pool +// and nil otherwise. +func (tp *TxPool) GetTransaction(hash common.Hash) *types.Transaction { + // check the txs first + if tx, ok := tp.pending[hash]; ok { + return tx + } + return nil +} + +// GetTransactions returns all currently processable transactions. +// The returned slice may be modified by the caller. +func (self *TxPool) GetTransactions() (txs types.Transactions) { + self.mu.RLock() + defer self.mu.RUnlock() + + txs = make(types.Transactions, len(self.pending)) + i := 0 + for _, tx := range self.pending { + txs[i] = tx + i++ + } + return txs +} + +// Content retrieves the data content of the transaction pool, returning all the +// pending as well as queued transactions, grouped by account and nonce. +func (self *TxPool) Content() (map[common.Address]map[uint64][]*types.Transaction, map[common.Address]map[uint64][]*types.Transaction) { + self.mu.RLock() + defer self.mu.RUnlock() + + // Retrieve all the pending transactions and sort by account and by nonce + pending := make(map[common.Address]map[uint64][]*types.Transaction) + for _, tx := range self.pending { + account, _ := tx.From() + + owned, ok := pending[account] + if !ok { + owned = make(map[uint64][]*types.Transaction) + pending[account] = owned + } + owned[tx.Nonce()] = append(owned[tx.Nonce()], tx) + } + // There are no queued transactions in a light pool, just return an empty map + queued := make(map[common.Address]map[uint64][]*types.Transaction) + return pending, queued +} + +// RemoveTransactions removes all given transactions from the pool. +func (self *TxPool) RemoveTransactions(txs types.Transactions) { + self.mu.Lock() + defer self.mu.Unlock() + var hashes []common.Hash + for _, tx := range txs { + //self.RemoveTx(tx.Hash()) + hash := tx.Hash() + delete(self.pending, hash) + self.chainDb.Delete(hash[:]) + hashes = append(hashes, hash) + } + self.relay.Discard(hashes) +} + +// RemoveTx removes the transaction with the given hash from the pool. +func (pool *TxPool) RemoveTx(hash common.Hash) { + pool.mu.Lock() + defer pool.mu.Unlock() + // delete from pending pool + delete(pool.pending, hash) + pool.chainDb.Delete(hash[:]) + pool.relay.Discard([]common.Hash{hash}) +} diff --git a/light/txpool_test.go b/light/txpool_test.go new file mode 100644 index 000000000000..2775a52c96f9 --- /dev/null +++ b/light/txpool_test.go @@ -0,0 +1,140 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package light + +import ( + "math" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" + "golang.org/x/net/context" +) + +type testTxRelay struct { + send, nhMined, nhRollback, discard int +} + +func (self *testTxRelay) Send(txs types.Transactions) { + self.send = len(txs) +} + +func (self *testTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) { + self.nhMined = len(mined) + self.nhRollback = len(rollback) +} + +func (self *testTxRelay) Discard(hashes []common.Hash) { + self.discard = len(hashes) +} + +const poolTestTxs = 1000 +const poolTestBlocks = 100 + +// test tx 0..n-1 +var testTx [poolTestTxs]*types.Transaction + +// txs sent before block i +func sentTx(i int) int { + return int(math.Pow(float64(i)/float64(poolTestBlocks), 0.9) * poolTestTxs) +} + +// txs included in block i or before that (minedTx(i) <= sentTx(i)) +func minedTx(i int) int { + return int(math.Pow(float64(i)/float64(poolTestBlocks), 1.1) * poolTestTxs) +} + +func txPoolTestChainGen(i int, block *core.BlockGen) { + s := minedTx(i) + e := minedTx(i + 1) + for i := s; i < e; i++ { + block.AddTx(testTx[i]) + } +} + +func TestTxPool(t *testing.T) { + for i, _ := range testTx { + testTx[i], _ = types.NewTransaction(uint64(i), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil).SignECDSA(testBankKey) + } + + var ( + evmux = new(event.TypeMux) + pow = new(core.FakePow) + sdb, _ = ethdb.NewMemDatabase() + ldb, _ = ethdb.NewMemDatabase() + genesis = core.WriteGenesisBlockForTesting(sdb, core.GenesisAccount{testBankAddress, testBankFunds}) + ) + core.WriteGenesisBlockForTesting(ldb, core.GenesisAccount{testBankAddress, testBankFunds}) + // Assemble the test environment + blockchain, _ := core.NewBlockChain(sdb, testChainConfig(), pow, evmux) + gchain, _ := core.GenerateChain(genesis, sdb, poolTestBlocks, txPoolTestChainGen) + if _, err := blockchain.InsertChain(gchain); err != nil { + panic(err) + } + + odr := &testOdr{sdb: sdb, ldb: ldb} + relay := &testTxRelay{} + lightchain, _ := NewLightChain(odr, testChainConfig(), pow, evmux) + lightchain.SetValidator(bproc{}) + txPermanent = 50 + pool := NewTxPool(testChainConfig(), evmux, lightchain, relay) + + for ii, block := range gchain { + i := ii + 1 + ctx, _ := context.WithTimeout(context.Background(), 200*time.Millisecond) + s := sentTx(i - 1) + e := sentTx(i) + for i := s; i < e; i++ { + relay.send = 0 + pool.Add(ctx, testTx[i]) + got := relay.send + exp := 1 + if got != exp { + t.Errorf("relay.Send expected len = %d, got %d", exp, got) + } + } + + relay.nhMined = 0 + relay.nhRollback = 0 + relay.discard = 0 + if _, err := lightchain.InsertHeaderChain([]*types.Header{block.Header()}, 1); err != nil { + panic(err) + } + time.Sleep(time.Millisecond * 30) + + got := relay.nhMined + exp := minedTx(i) - minedTx(i-1) + if got != exp { + t.Errorf("relay.NewHead expected len(mined) = %d, got %d", exp, got) + } + + got = relay.discard + exp = 0 + if i > int(txPermanent)+1 { + exp = minedTx(i-int(txPermanent)-1) - minedTx(i-int(txPermanent)-2) + } + if got != exp { + t.Errorf("relay.Discard expected len = %d, got %d", exp, got) + } + } +} diff --git a/light/vm_env.go b/light/vm_env.go new file mode 100644 index 000000000000..d3c55eb8257e --- /dev/null +++ b/light/vm_env.go @@ -0,0 +1,261 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package light + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "golang.org/x/net/context" +) + +// VMEnv is the light client version of the vm execution environment. +// Unlike other structures, VMEnv holds a context that is applied by state +// retrieval requests through the entire execution. If any state operation +// returns an error, the execution fails. +type VMEnv struct { + vm.Environment + ctx context.Context + chainConfig *core.ChainConfig + evm *vm.EVM + state *VMState + header *types.Header + msg core.Message + depth int + chain *LightChain + typ vm.Type + // structured logging + logs []vm.StructLog + err error +} + +// NewEnv creates a new execution environment based on an ODR capable light state +func NewEnv(ctx context.Context, state *LightState, chainConfig *core.ChainConfig, chain *LightChain, msg core.Message, header *types.Header, cfg vm.Config) *VMEnv { + env := &VMEnv{ + chainConfig: chainConfig, + chain: chain, + header: header, + msg: msg, + typ: vm.StdVmTy, + } + env.state = &VMState{ctx: ctx, state: state, env: env} + + // if no log collector is present set self as the collector + if cfg.Logger.Collector == nil { + cfg.Logger.Collector = env + } + + env.evm = vm.New(env, cfg) + return env +} + +func (self *VMEnv) RuleSet() vm.RuleSet { return self.chainConfig } +func (self *VMEnv) Vm() vm.Vm { return self.evm } +func (self *VMEnv) Origin() common.Address { f, _ := self.msg.From(); return f } +func (self *VMEnv) BlockNumber() *big.Int { return self.header.Number } +func (self *VMEnv) Coinbase() common.Address { return self.header.Coinbase } +func (self *VMEnv) Time() *big.Int { return self.header.Time } +func (self *VMEnv) Difficulty() *big.Int { return self.header.Difficulty } +func (self *VMEnv) GasLimit() *big.Int { return self.header.GasLimit } +func (self *VMEnv) Value() *big.Int { return self.msg.Value() } +func (self *VMEnv) Db() vm.Database { return self.state } +func (self *VMEnv) Depth() int { return self.depth } +func (self *VMEnv) SetDepth(i int) { self.depth = i } +func (self *VMEnv) VmType() vm.Type { return self.typ } +func (self *VMEnv) SetVmType(t vm.Type) { self.typ = t } +func (self *VMEnv) GetHash(n uint64) common.Hash { + for header := self.chain.GetHeader(self.header.ParentHash, self.header.Number.Uint64()-1); header != nil; header = self.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) { + if header.GetNumberU64() == n { + return header.Hash() + } + } + + return common.Hash{} +} + +func (self *VMEnv) AddLog(log *vm.Log) { + //self.state.AddLog(log) +} +func (self *VMEnv) CanTransfer(from common.Address, balance *big.Int) bool { + return self.state.GetBalance(from).Cmp(balance) >= 0 +} + +func (self *VMEnv) MakeSnapshot() vm.Database { + return &VMState{ctx: self.ctx, state: self.state.state.Copy(), env: self} +} + +func (self *VMEnv) SetSnapshot(copy vm.Database) { + self.state.state.Set(copy.(*VMState).state) +} + +func (self *VMEnv) Transfer(from, to vm.Account, amount *big.Int) { + core.Transfer(from, to, amount) +} + +func (self *VMEnv) Call(me vm.ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) { + return core.Call(self, me, addr, data, gas, price, value) +} +func (self *VMEnv) CallCode(me vm.ContractRef, addr common.Address, data []byte, gas, price, value *big.Int) ([]byte, error) { + return core.CallCode(self, me, addr, data, gas, price, value) +} + +func (self *VMEnv) Create(me vm.ContractRef, data []byte, gas, price, value *big.Int) ([]byte, common.Address, error) { + return core.Create(self, me, data, gas, price, value) +} + +func (self *VMEnv) StructLogs() []vm.StructLog { + return self.logs +} + +func (self *VMEnv) AddStructLog(log vm.StructLog) { + self.logs = append(self.logs, log) +} + +// Error returns the error (if any) that happened during execution. +func (self *VMEnv) Error() error { + return self.err +} + +// VMState is a wrapper for the light state that holds the actual context and +// passes it to any state operation that requires it. +type VMState struct { + vm.Database + ctx context.Context + state *LightState + env *VMEnv +} + +// errHandler handles and stores any state error that happens during execution. +func (s *VMState) errHandler(err error) { + if err != nil && s.env.err == nil { + s.env.err = err + } +} + +// GetAccount returns the account object of the given account or nil if the +// account does not exist +func (s *VMState) GetAccount(addr common.Address) vm.Account { + so, err := s.state.GetStateObject(s.ctx, addr) + s.errHandler(err) + if err != nil { + // return a dummy state object to avoid panics + so = s.state.newStateObject(addr) + } + return so +} + +// CreateAccount creates creates a new account object and takes ownership. +func (s *VMState) CreateAccount(addr common.Address) vm.Account { + so, err := s.state.CreateStateObject(s.ctx, addr) + s.errHandler(err) + if err != nil { + // return a dummy state object to avoid panics + so = s.state.newStateObject(addr) + } + return so +} + +// AddBalance adds the given amount to the balance of the specified account +func (s *VMState) AddBalance(addr common.Address, amount *big.Int) { + err := s.state.AddBalance(s.ctx, addr, amount) + s.errHandler(err) +} + +// GetBalance retrieves the balance from the given address or 0 if the account does +// not exist +func (s *VMState) GetBalance(addr common.Address) *big.Int { + res, err := s.state.GetBalance(s.ctx, addr) + s.errHandler(err) + return res +} + +// GetNonce returns the nonce at the given address or 0 if the account does +// not exist +func (s *VMState) GetNonce(addr common.Address) uint64 { + res, err := s.state.GetNonce(s.ctx, addr) + s.errHandler(err) + return res +} + +// SetNonce sets the nonce of the specified account +func (s *VMState) SetNonce(addr common.Address, nonce uint64) { + err := s.state.SetNonce(s.ctx, addr, nonce) + s.errHandler(err) +} + +// GetCode returns the contract code at the given address or nil if the account +// does not exist +func (s *VMState) GetCode(addr common.Address) []byte { + res, err := s.state.GetCode(s.ctx, addr) + s.errHandler(err) + return res +} + +// SetCode sets the contract code at the specified account +func (s *VMState) SetCode(addr common.Address, code []byte) { + err := s.state.SetCode(s.ctx, addr, code) + s.errHandler(err) +} + +// AddRefund adds an amount to the refund value collected during a vm execution +func (s *VMState) AddRefund(gas *big.Int) { + s.state.AddRefund(gas) +} + +// GetRefund returns the refund value collected during a vm execution +func (s *VMState) GetRefund() *big.Int { + return s.state.GetRefund() +} + +// GetState returns the contract storage value at storage address b from the +// contract address a or common.Hash{} if the account does not exist +func (s *VMState) GetState(a common.Address, b common.Hash) common.Hash { + res, err := s.state.GetState(s.ctx, a, b) + s.errHandler(err) + return res +} + +// SetState sets the storage value at storage address key of the account addr +func (s *VMState) SetState(addr common.Address, key common.Hash, value common.Hash) { + err := s.state.SetState(s.ctx, addr, key, value) + s.errHandler(err) +} + +// Delete marks an account to be removed and clears its balance +func (s *VMState) Delete(addr common.Address) bool { + res, err := s.state.Delete(s.ctx, addr) + s.errHandler(err) + return res +} + +// Exist returns true if an account exists at the given address +func (s *VMState) Exist(addr common.Address) bool { + res, err := s.state.HasAccount(s.ctx, addr) + s.errHandler(err) + return res +} + +// IsDeleted returns true if the given account has been marked for deletion +// or false if the account does not exist +func (s *VMState) IsDeleted(addr common.Address) bool { + res, err := s.state.IsDeleted(s.ctx, addr) + s.errHandler(err) + return res +} diff --git a/release/release.go b/release/release.go index 21b5e2ce21cf..7c358aecee78 100644 --- a/release/release.go +++ b/release/release.go @@ -25,6 +25,8 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/node" @@ -58,12 +60,20 @@ type ReleaseService struct { // releases and notify the user of such. func NewReleaseService(ctx *node.ServiceContext, config Config) (node.Service, error) { // Retrieve the Ethereum service dependency to access the blockchain + var apiBackend ethapi.Backend var ethereum *eth.Ethereum - if err := ctx.Service(ðereum); err != nil { - return nil, err + if err := ctx.Service(ðereum); err == nil { + apiBackend = ethereum.ApiBackend + } else { + var ethereum *les.LightEthereum + if err := ctx.Service(ðereum); err == nil { + apiBackend = ethereum.ApiBackend + } else { + return nil, err + } } // Construct the release service - contract, err := NewReleaseOracle(config.Oracle, eth.NewContractBackend(ethereum)) + contract, err := NewReleaseOracle(config.Oracle, eth.NewContractBackend(apiBackend)) if err != nil { return nil, err } diff --git a/rpc/server.go b/rpc/server.go index 996c6370046e..f7dddded750a 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -337,7 +337,13 @@ func (s *Server) exec(ctx context.Context, codec ServerCodec, req *serverRequest if req.err != nil { response = codec.CreateErrorResponse(&req.id, req.err) } else { +/*fmt.Println() +fmt.Println("SREQ") +fmt.Println(*req)*/ response, callback = s.handle(ctx, codec, req) +/*fmt.Println("RESP") +fmt.Println(response) +fmt.Println()*/ } if err := codec.Write(response); err != nil { @@ -361,9 +367,15 @@ func (s *Server) execBatch(ctx context.Context, codec ServerCodec, requests []*s responses[i] = codec.CreateErrorResponse(&req.id, req.err) } else { var callback func() +/*fmt.Println() +fmt.Println("SREQ batch") +fmt.Println(*req)*/ if responses[i], callback = s.handle(ctx, codec, req); callback != nil { callbacks = append(callbacks, callback) } +/*fmt.Println("RESP") +fmt.Println(responses[i]) +fmt.Println()*/ } } @@ -391,6 +403,10 @@ func (s *Server) readRequest(codec ServerCodec) ([]*serverRequest, bool, Error) // verify requests for i, r := range reqs { +/*fmt.Println() +fmt.Println(time.Now()) +fmt.Println("REQ") +fmt.Println(r)*/ var ok bool var svc *service diff --git a/rpc/types.go b/rpc/types.go index 89c5b5bc9b85..e313bcda0855 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -272,3 +272,31 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { func (bn BlockNumber) Int64() int64 { return (int64)(bn) } + +type ClientRestartWrapper struct { + client *Client + newClientFn func() *Client + mu sync.RWMutex +} + +func NewClientRestartWrapper(newClientFn func() *Client) *ClientRestartWrapper { + return &ClientRestartWrapper{ + client: newClientFn(), + newClientFn: newClientFn, + } +} + +func (rw *ClientRestartWrapper) Client() *Client { + rw.mu.RLock() + defer rw.mu.RUnlock() + + return rw.client +} + +func (rw *ClientRestartWrapper) Restart() { + rw.mu.Lock() + defer rw.mu.Unlock() + + rw.client.Close() + rw.client = rw.newClientFn() +}