diff --git a/cmd/config.go b/cmd/config.go index 77ce94606..fe04cbac7 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -94,7 +94,8 @@ type Config struct { func GetStringSliceWorkaround(flagName string) []string { value := viper.GetString(flagName) if value == "" || value == " " { - return []string{} + values := viper.GetStringSlice(flagName) + return values } return strings.Split(value, ",") } @@ -147,7 +148,6 @@ func LoadConfig() *Config { // Peers config.ConnectIPs = GetStringSliceWorkaround("connect-ips") - glog.V(2).Infof("Connect IPs read in: %v", config.ConnectIPs) config.AddIPs = GetStringSliceWorkaround("add-ips") config.AddSeeds = GetStringSliceWorkaround("add-seeds") config.TargetOutboundPeers = viper.GetUint32("target-outbound-peers") @@ -266,6 +266,7 @@ func (config *Config) Print() { glog.Infof("MaxSyncBlockHeight: %v", config.MaxSyncBlockHeight) } + glog.Infof("Connect IPs: %s", config.ConnectIPs) if len(config.ConnectIPs) > 0 { glog.Infof("Connect IPs: %s", config.ConnectIPs) } diff --git a/cmd/node.go b/cmd/node.go index 52bf98b70..b1501978b 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -18,7 +18,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/deso-protocol/core/lib" "github.com/deso-protocol/core/migrate" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/go-pg/pg/v10" "github.com/golang/glog" migrations "github.com/robinjoseph08/go-pg-migrations/v3" diff --git a/go.mod b/go.mod index 01cae11fd..8ff891d0f 100644 --- a/go.mod +++ b/go.mod @@ -14,12 +14,11 @@ require ( github.com/bxcodec/faker v2.0.1+incompatible github.com/cloudflare/circl v1.5.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/decred/dcrd/container/lru v1.0.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/deso-protocol/go-deadlock v1.0.1 github.com/deso-protocol/go-merkle-tree v1.0.0 github.com/deso-protocol/uint256 v1.3.2 - github.com/dgraph-io/badger/v4 v4.3.1 + github.com/dgraph-io/badger/v3 v3.2103.5 github.com/emirpasic/gods v1.18.1 github.com/ethereum/go-ethereum v1.14.11 github.com/fatih/color v1.17.0 @@ -27,6 +26,7 @@ require ( github.com/go-pg/pg/v10 v10.13.0 github.com/golang/glog v1.2.2 github.com/google/uuid v1.6.0 + github.com/hashicorp/golang-lru/v2 v2.0.3 github.com/mitchellh/go-homedir v1.1.0 github.com/oleiade/lane v1.0.1 github.com/onflow/crypto v0.25.2 @@ -36,7 +36,7 @@ require ( github.com/shibukawa/configdir v0.0.0-20170330084843-e180dbdc8da0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.19.0 + github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.9.0 github.com/tyler-smith/go-bip39 v1.1.0 github.com/unrolled/secure v1.16.0 @@ -69,11 +69,12 @@ require ( github.com/andygrunwald/go-jira v1.16.0 // indirect github.com/btcsuite/btclog v0.0.0-20241017175713-3428138b75c7 // indirect github.com/bwesterb/go-ristretto v1.2.3 // indirect + github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect - github.com/dgraph-io/ristretto v1.0.0 // indirect + github.com/dgraph-io/ristretto v0.2.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect github.com/ebitengine/purego v0.8.0 // indirect @@ -85,6 +86,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect @@ -98,7 +100,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.17.1 // indirect github.com/kyokomi/emoji/v2 v2.2.13 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/go.sum b/go.sum index 95bcc7be4..c0af42e8b 100644 --- a/go.sum +++ b/go.sum @@ -33,9 +33,12 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/andygrunwald/go-jira v1.16.0 h1:PU7C7Fkk5L96JvPc6vDVIrd99vdPnYudHu4ju2c2ikQ= github.com/andygrunwald/go-jira v1.16.0/go.mod h1:UQH4IBVxIYWbgagc0LF/k9FRs9xjIiQ8hIcC6HfLwFU= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/brianvoe/gofakeit v3.18.0+incompatible h1:wDOmHc9DLG4nRjUVVaxA+CEglKOW72Y5+4WNxUIkjM8= github.com/brianvoe/gofakeit v3.18.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= @@ -72,6 +75,8 @@ github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7N github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -79,8 +84,12 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys= github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -91,8 +100,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/container/lru v1.0.0 h1:7foQymtbu18aQWYiY9RnNIeE+kvpiN+fiBQ3+viyJjI= -github.com/decred/dcrd/container/lru v1.0.0/go.mod h1:vlPwj0l+IzAHhQSsbgQnJgO5Cte78+yI065V+Mc5PRQ= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= @@ -106,10 +113,11 @@ github.com/deso-protocol/go-merkle-tree v1.0.0 h1:9zkI5dQsITYy77s4kbTGPQmZnhQ+Ls github.com/deso-protocol/go-merkle-tree v1.0.0/go.mod h1:V/vbg/maaNv6G7zf9VVs645nLFx/jsO2L/awFB/S/ZU= github.com/deso-protocol/uint256 v1.3.2 h1:nHwqfdCKgWimWLJbiN/9DV95qDJ5lZcf8n5cAHbdG6o= github.com/deso-protocol/uint256 v1.3.2/go.mod h1:Wq2bibbApz3TsiL+VPUnzr+UkhG4eBeQ0DpbQcjQYcA= -github.com/dgraph-io/badger/v4 v4.3.1 h1:7r5wKqmoRpGgSxqa0S/nGdpOpvvzuREGPLSua73C8tw= -github.com/dgraph-io/badger/v4 v4.3.1/go.mod h1:oObz97DImXpd6O/Dt8BqdKLLTDmEmarAimo72VV5whQ= -github.com/dgraph-io/ristretto v1.0.0 h1:SYG07bONKMlFDUYu5pEu3DGAh8c2OFNzKm6G9J4Si84= -github.com/dgraph-io/ristretto v1.0.0/go.mod h1:jTi2FiYEhQ1NsMmA7DeBykizjOuY88NhKBkepyu1jPc= +github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= +github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= +github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -156,12 +164,14 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -174,9 +184,11 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -186,6 +198,7 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -207,6 +220,9 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru/v2 v2.0.3 h1:kmRrRLlInXvng0SmLxmQpQkpbYAvcXm7NPDrgxJa9mE= +github.com/hashicorp/golang-lru/v2 v2.0.3/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= @@ -218,6 +234,7 @@ github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -230,8 +247,9 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= +github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -243,6 +261,7 @@ github.com/kyokomi/emoji/v2 v2.2.13 h1:GhTfQa67venUUvmleTNFnb+bi7S3aocF7ZCXU9fSO github.com/kyokomi/emoji/v2 v2.2.13/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE= github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -261,6 +280,7 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -287,6 +307,7 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= @@ -307,6 +328,7 @@ github.com/robinjoseph08/go-pg-migrations/v3 v3.1.0 h1:EjexnDlSIZoK/gMfQmKIqB7tY github.com/robinjoseph08/go-pg-migrations/v3 v3.1.0/go.mod h1:9yEG60N97UVFGD/UKQUXoGVZh/t8KXx3JxEpxhKFlKY= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= @@ -326,18 +348,25 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -345,6 +374,7 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -369,6 +399,7 @@ github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df h1:Y2l28Jr3 github.com/tsuyoshiwada/go-gitcmd v0.0.0-20180205145712-5f1f5f9475df/go.mod h1:pnyouUty/nBr/zm3GYwTIt+qFTLWbdjeLjZmJdzJOu8= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/unrolled/secure v1.16.0 h1:XgdAsS/Zl50ZfZPRJK6WpicFttfrsFYFd0+ONDBJubU= github.com/unrolled/secure v1.16.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= @@ -383,12 +414,14 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -400,6 +433,7 @@ go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -445,6 +479,7 @@ golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -455,9 +490,11 @@ golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -477,6 +514,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -527,15 +565,17 @@ gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -553,6 +593,7 @@ gopkg.in/DataDog/dd-trace-go.v1 v1.69.0 h1:zSY6DDsFRMQDNQYKWCv/AEwJXoPpDf1FfMyw7 gopkg.in/DataDog/dd-trace-go.v1 v1.69.0/go.mod h1:U9AOeBHNAL95JXcd/SPf4a7O5GNeF/yD13sJtli/yaU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/integration_testing/tools.go b/integration_testing/tools.go index 26e322335..94617bd18 100644 --- a/integration_testing/tools.go +++ b/integration_testing/tools.go @@ -14,7 +14,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/cmd" "github.com/deso-protocol/core/lib" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" "github.com/stretchr/testify/require" diff --git a/lib/block_producer.go b/lib/block_producer.go index a789c1815..234644ed8 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -367,8 +367,8 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) blockRet.Header.TransactionMerkleRoot = merkleRoot // Compute the next difficulty target given the current tip. - diffTarget, err := CalcNextDifficultyTarget( - lastNode, CurrentHeaderVersion, desoBlockProducer.params) + diffTarget, err := desoBlockProducer.chain.CalcNextDifficultyTarget( + lastNode, CurrentHeaderVersion) if err != nil { return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: Problem computing next difficulty: ") } diff --git a/lib/block_view.go b/lib/block_view.go index b981a82c2..3a8eacb12 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -18,7 +18,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/btcsuite/btcd/btcec/v2" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -5114,6 +5114,7 @@ func (bav *UtxoView) GetSpendableDeSoBalanceNanosForPublicKey(pkBytes []byte, // but we do have the header. As a result, this condition always evaluates to false and thus // we only process the block reward for the previous block instead of all immature block rewards // as defined by the params. + // NOTE: we are not using .GetParent here as it changes the meaning of this code. if blockNode.Parent != nil { nextBlockHash = blockNode.Parent.Hash } else { diff --git a/lib/block_view_atomic_txns_test.go b/lib/block_view_atomic_txns_test.go index ead9a7049..53d71c7e1 100644 --- a/lib/block_view_atomic_txns_test.go +++ b/lib/block_view_atomic_txns_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/btcsuite/btcd/btcec/v2" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index 99352cf84..c12b8af0a 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -14,7 +14,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/wire" merkletree "github.com/deso-protocol/go-merkle-tree" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_creator_coin_test.go b/lib/block_view_creator_coin_test.go index 7e4a3a05b..e25af8ac9 100644 --- a/lib/block_view_creator_coin_test.go +++ b/lib/block_view_creator_coin_test.go @@ -5,7 +5,7 @@ import ( "strconv" "testing" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_dao_coin_limit_order_test.go b/lib/block_view_dao_coin_limit_order_test.go index 98034b5e0..07d214725 100644 --- a/lib/block_view_dao_coin_limit_order_test.go +++ b/lib/block_view_dao_coin_limit_order_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/deso-protocol/uint256" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_dao_coin_test.go b/lib/block_view_dao_coin_test.go index aa64707bd..b006c3d7d 100644 --- a/lib/block_view_dao_coin_test.go +++ b/lib/block_view_dao_coin_test.go @@ -6,7 +6,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/deso-protocol/uint256" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_derived_key_test.go b/lib/block_view_derived_key_test.go index bc59dd3f9..e9d079786 100644 --- a/lib/block_view_derived_key_test.go +++ b/lib/block_view_derived_key_test.go @@ -13,7 +13,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/deso-protocol/uint256" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_flush.go b/lib/block_view_flush.go index 52a210636..e65341b4f 100644 --- a/lib/block_view_flush.go +++ b/lib/block_view_flush.go @@ -5,7 +5,7 @@ import ( "reflect" "github.com/btcsuite/btcd/btcec/v2" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/block_view_follow_test.go b/lib/block_view_follow_test.go index ef3823f63..fedb91a03 100644 --- a/lib/block_view_follow_test.go +++ b/lib/block_view_follow_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_like_test.go b/lib/block_view_like_test.go index a757c7f70..01b715475 100644 --- a/lib/block_view_like_test.go +++ b/lib/block_view_like_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_lockups.go b/lib/block_view_lockups.go index c2c3e34ea..3a4a8d532 100644 --- a/lib/block_view_lockups.go +++ b/lib/block_view_lockups.go @@ -8,7 +8,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/deso-protocol/uint256" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index 6d04aac60..48f435363 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -7,7 +7,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/deso-protocol/uint256" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -2462,7 +2462,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blk2.Header.Height)) // Update the tip - testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] + testMeta.chain.blockIndex.tip = testMeta.chain.blockIndex.tip.Parent // Validate the state update utxoView = NewUtxoView( @@ -2517,7 +2517,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blk1.Header.Height)) // Update the tip - testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] + testMeta.chain.blockIndex.setTip(testMeta.chain.blockIndex.tip.Parent) // Verify we return back to the initial state utxoView = NewUtxoView( diff --git a/lib/block_view_message_test.go b/lib/block_view_message_test.go index a47c3d85c..dbe204f99 100644 --- a/lib/block_view_message_test.go +++ b/lib/block_view_message_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec/v2" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_nft_test.go b/lib/block_view_nft_test.go index 2f4cfd64c..ba00d8ece 100644 --- a/lib/block_view_nft_test.go +++ b/lib/block_view_nft_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_post.go b/lib/block_view_post.go index e80d20daf..a6b9e6d30 100644 --- a/lib/block_view_post.go +++ b/lib/block_view_post.go @@ -13,7 +13,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/davecgh/go-spew/spew" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/gernest/mention" "github.com/golang/glog" "github.com/pkg/errors" diff --git a/lib/block_view_post_test.go b/lib/block_view_post_test.go index 264df7d69..e84eaa1c9 100644 --- a/lib/block_view_post_test.go +++ b/lib/block_view_post_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/lib/block_view_profile_test.go b/lib/block_view_profile_test.go index c59e37cd5..01101d005 100644 --- a/lib/block_view_profile_test.go +++ b/lib/block_view_profile_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 10ad6ab7f..b83d5d7ac 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -7,7 +7,7 @@ import ( "sort" "github.com/deso-protocol/uint256" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 50a3a0c6c..8aecfb24f 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -13,10 +13,10 @@ import ( "github.com/deso-protocol/core/bls" "github.com/btcsuite/btcd/btcec/v2" - "github.com/decred/dcrd/container/lru" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" embeddedpostgres "github.com/fergusstrange/embedded-postgres" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -702,8 +702,8 @@ func (tes *transactionTestSuite) testDisconnectBlock(tm *transactionTestMeta, te require.NoError(err) // sanity-check that the last block hash is the same as the last header hash. require.Equal(true, bytes.Equal( - tm.chain.bestChain[len(tm.chain.bestChain)-1].Hash.ToBytes(), - tm.chain.bestHeaderChain[len(tm.chain.bestHeaderChain)-1].Hash.ToBytes())) + tm.chain.blockIndex.GetTip().Hash.ToBytes(), + tm.chain.blockIndex.GetHeaderTip().Hash.ToBytes())) // Last block shouldn't be nil, and the number of expectedTxns should be the same as in the testVectorBlock + 1, // because of the additional block reward. require.NotNil(lastBlock) @@ -791,15 +791,14 @@ func (tes *transactionTestSuite) testDisconnectBlock(tm *transactionTestMeta, te // TODO: if ever needed we can call tm.chain.eventManager.blockDisconnected() here. // Update the block and header metadata chains. - tm.chain.bestChain = tm.chain.bestChain[:len(tm.chain.bestChain)-1] - tm.chain.bestHeaderChain = tm.chain.bestHeaderChain[:len(tm.chain.bestHeaderChain)-1] - delete(tm.chain.bestChainMap, *lastBlockHash) - delete(tm.chain.bestHeaderChainMap, *lastBlockHash) + tm.chain.blockIndex.setTip(tm.chain.BlockTip().GetParent(tm.chain.blockIndex)) + tm.chain.blockIndex.setHeaderTip(tm.chain.HeaderTip().GetParent(tm.chain.blockIndex)) // We don't pass the chain's snapshot above to prevent certain concurrency issues. As a // result, we need to reset the snapshot's db cache to get rid of stale data. if tm.chain.snapshot != nil { - tm.chain.snapshot.DatabaseCache = *lru.NewMap[string, []byte](DatabaseCacheSize) + tm.chain.snapshot.DatabaseCache, err = lru.New[string, []byte](int(DatabaseCacheSize)) + require.NoError(err) } // Note that unlike connecting test vectors, when disconnecting, we don't need to verify db entries. diff --git a/lib/block_view_validator.go b/lib/block_view_validator.go index 8cc00a14b..3b2560654 100644 --- a/lib/block_view_validator.go +++ b/lib/block_view_validator.go @@ -15,7 +15,7 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/uint256" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/blockchain.go b/lib/blockchain.go index 706e1d946..9884edac2 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -17,7 +17,7 @@ import ( "sync" "time" - "github.com/decred/dcrd/container/lru" + "github.com/hashicorp/golang-lru/v2" "github.com/deso-protocol/core/collections" @@ -30,7 +30,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" merkletree "github.com/deso-protocol/go-merkle-tree" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -59,7 +59,7 @@ const ( // have room for multiple forks each an entire history's length with this value). If // each node takes up 100 bytes of space this amounts to around 500MB, which also seems // like a reasonable size. - MaxBlockIndexNodes = 5000000 + MaxBlockIndexNodes = 50000000 // TODO: trim this down somehow... ) type BlockStatus uint32 @@ -123,7 +123,12 @@ func (nn *BlockNode) IsValidateFailed() bool { // IsCommitted returns true if a BlockNode has passed all validations, and it has been committed to // the Blockchain according to the Fast HotStuff commit rule. func (nn *BlockNode) IsCommitted() bool { - return nn.Status&StatusBlockCommitted != 0 || !blockNodeProofOfStakeCutoverMigrationTriggered(nn.Height) + //return nn.Status&StatusBlockCommitted != 0 || !blockNodeProofOfStakeCutoverMigrationTriggered(nn.Height) + return nn.Status&StatusBlockCommitted != 0 +} + +func (nn *BlockNode) ClearCommittedStatus() { + nn.Status &= BlockStatus(^uint32(StatusBlockCommitted)) } // IsFullyProcessed determines if the BlockStatus corresponds to a fully processed and stored block. @@ -236,6 +241,20 @@ func (nn *BlockNode) GetEncoderType() EncoderType { return EncoderTypeBlockNode } +func (nn *BlockNode) GetParent(blockIndex *BlockIndex) *BlockNode { + if nn.Parent != nil { + return nn.Parent + } + // If we don't have a parent, try to get it from the block index. + parentNode, exists := blockIndex.GetBlockNodeByHashAndHeight(nn.Header.PrevBlockHash, uint64(nn.Height-1)) + if !exists { + return nil + } + + nn.Parent = parentNode + return parentNode +} + // Append DeSo Encoder Metadata bytes to BlockNode bytes. func AddEncoderMetadataToBlockNodeBytes(blockNodeBytes []byte, blockHeight uint64) []byte { var blockData []byte @@ -371,6 +390,8 @@ func (nn *BlockNode) String() string { var parentHash *BlockHash if nn.Parent != nil { parentHash = nn.Parent.Hash + } else { + parentHash = nn.Header.PrevBlockHash } tstamp := uint32(0) if nn.Header != nil { @@ -404,14 +425,25 @@ func NewBlockNode( } } -func (nn *BlockNode) Ancestor(height uint32) *BlockNode { +func (nn *BlockNode) Ancestor(height uint32, blockIndex *BlockIndex) *BlockNode { if height > nn.Height { return nil } node := nn + // NOTE: using .Parent here is okay b/c it explicitly set it + // if we don't already have it when we fetch the parent from + // the block index. for ; node != nil && node.Height != height; node = node.Parent { // Keep iterating node until the condition no longer holds. + if node.Parent == nil { + var exists bool + node.Parent, exists = blockIndex.GetBlockNodeByHashAndHeight( + node.Header.PrevBlockHash, uint64(node.Height-1)) + if !exists { + return nil + } + } } return node @@ -422,24 +454,24 @@ func (nn *BlockNode) Ancestor(height uint32) *BlockNode { // height minus provided distance. // // This function is safe for concurrent access. -func (nn *BlockNode) RelativeAncestor(distance uint32) *BlockNode { - return nn.Ancestor(nn.Height - distance) +func (nn *BlockNode) RelativeAncestor(distance uint32, blockIndex *BlockIndex) *BlockNode { + return nn.Ancestor(nn.Height-distance, blockIndex) } // CalcNextDifficultyTarget computes the difficulty target expected of the // next block. -func CalcNextDifficultyTarget( - lastNode *BlockNode, version uint32, params *DeSoParams) (*BlockHash, error) { +func (bc *Blockchain) CalcNextDifficultyTarget( + lastNode *BlockNode, version uint32) (*BlockHash, error) { // Compute the blocks in each difficulty cycle. - blocksPerRetarget := uint32(params.TimeBetweenDifficultyRetargets / params.TimeBetweenBlocks) + blocksPerRetarget := uint32(bc.params.TimeBetweenDifficultyRetargets / bc.params.TimeBetweenBlocks) // We effectively skip the first difficulty retarget by returning the default // difficulty value for the first cycle. Not doing this (or something like it) // would cause the genesis block's timestamp, which could be off by several days // to significantly skew the first cycle in a way that is mostly annoying for // testing but also suboptimal for the mainnet. - minDiffBytes, err := hex.DecodeString(params.MinDifficultyTargetHex) + minDiffBytes, err := hex.DecodeString(bc.params.MinDifficultyTargetHex) if err != nil { return nil, errors.Wrapf(err, "CalcNextDifficultyTarget: Problem computing min difficulty") } @@ -460,19 +492,27 @@ func CalcNextDifficultyTarget( } // If we get here it means we reached a difficulty retarget point. - targetSecs := int64(params.TimeBetweenDifficultyRetargets / time.Second) - minRetargetTimeSecs := targetSecs / params.MaxDifficultyRetargetFactor - maxRetargetTimeSecs := targetSecs * params.MaxDifficultyRetargetFactor + targetSecs := int64(bc.params.TimeBetweenDifficultyRetargets / time.Second) + minRetargetTimeSecs := targetSecs / bc.params.MaxDifficultyRetargetFactor + maxRetargetTimeSecs := targetSecs * bc.params.MaxDifficultyRetargetFactor firstNodeHeight := lastNode.Height - blocksPerRetarget - firstNode := lastNode.Ancestor(firstNodeHeight) - if firstNode == nil { + // TODO: we need to write the migration to only have committed blocks from PoW. + // This code is dead for PoS. + // TODO: do we need to do something if we need to get this from the header chain? + firstNode, exists, err := bc.GetBlockFromBestChainByHeight(uint64(firstNodeHeight), true) + if err != nil { + return nil, errors.Wrapf(err, "CalcNextDifficultyTarget: Problem getting block at "+ + "beginning of retarget interval at height %d during retarget from height %d", + firstNodeHeight, lastNode.Height) + } + if firstNode == nil || !exists { return nil, fmt.Errorf("CalcNextDifficultyTarget: Problem getting block at "+ "beginning of retarget interval at height %d during retarget from height %d", firstNodeHeight, lastNode.Height) } - actualTimeDiffSecs := int64(lastNode.Header.GetTstampSecs() - firstNode.Header.GetTstampSecs()) + actualTimeDiffSecs := lastNode.Header.GetTstampSecs() - firstNode.Header.GetTstampSecs() clippedTimeDiffSecs := actualTimeDiffSecs if actualTimeDiffSecs < minRetargetTimeSecs { clippedTimeDiffSecs = minRetargetTimeSecs @@ -527,6 +567,144 @@ type CheckpointBlockInfoAndError struct { Error error } +type BlockIndex struct { + db *badger.DB + snapshot *Snapshot + blockIndexByHash *lru.Cache[BlockHash, *BlockNode] + blockIndexByHeight *lru.Cache[uint64, []*BlockNode] + tip *BlockNode + headerTip *BlockNode +} + +func NewBlockIndex(db *badger.DB, snapshot *Snapshot, tipNode *BlockNode) *BlockIndex { + blockIndexByHash, _ := lru.New[BlockHash, *BlockNode](MaxBlockIndexNodes) // TODO: parameterize this? + blockIndexByHeight, _ := lru.New[uint64, []*BlockNode](MaxBlockIndexNodes) // TODO: parameterize this? + return &BlockIndex{ + db: db, + snapshot: snapshot, + blockIndexByHash: blockIndexByHash, + blockIndexByHeight: blockIndexByHeight, + tip: tipNode, + } +} + +func (bi *BlockIndex) setBlockIndexFromMap(input map[BlockHash]*BlockNode) { + newHashToBlockNodeMap, _ := lru.New[BlockHash, *BlockNode](MaxBlockIndexNodes) + newHeightToBlockNodeMap, _ := lru.New[uint64, []*BlockNode](MaxBlockIndexNodes) + bi.blockIndexByHash = newHashToBlockNodeMap + bi.blockIndexByHeight = newHeightToBlockNodeMap + for _, val := range input { + bi.addNewBlockNodeToBlockIndex(val) + // This function is always used for tests. + // We assume that the tip is just the highest block in the block index. + if bi.tip == nil { + bi.tip = val + } else if val.Height > bi.tip.Height { + bi.tip = val + } + } +} + +func (bi *BlockIndex) setHeaderTip(tip *BlockNode) { + // Just to be safe, we also add it to the block index. + bi.addNewBlockNodeToBlockIndex(tip) + bi.headerTip = tip +} + +func (bi *BlockIndex) setTip(tip *BlockNode) { + // Just to be safe, we also add it to the block index. + bi.addNewBlockNodeToBlockIndex(tip) + bi.tip = tip +} + +func (bi *BlockIndex) addNewBlockNodeToBlockIndex(blockNode *BlockNode) { + bi.blockIndexByHash.Add(*blockNode.Hash, blockNode) + blocksAtHeight, exists := bi.blockIndexByHeight.Get(uint64(blockNode.Height)) + if !exists { + blocksAtHeight = []*BlockNode{} + } else { + // Make sure we don't add the same block node twice. + for ii, blockAtHeight := range blocksAtHeight { + if blockAtHeight.Hash.IsEqual(blockNode.Hash) { + blocksAtHeight[ii] = blockNode + break + } + } + } + bi.blockIndexByHeight.Add(uint64(blockNode.Height), append(blocksAtHeight, blockNode)) +} + +func (bi *BlockIndex) GetBlockNodeByHashOnly(blockHash *BlockHash) (*BlockNode, bool, error) { + val, exists := bi.blockIndexByHash.Get(*blockHash) + if exists { + return val, true, nil + } + height, err := GetHeightForHash(bi.db, bi.snapshot, blockHash) + if err != nil { + if errors.Is(err, badger.ErrKeyNotFound) { + return nil, false, nil + } + return nil, false, errors.Wrapf(err, "GetBlockNodeByHashOnly: Problem getting height for hash") + } + blockNode := GetHeightHashToNodeInfo(bi.db, bi.snapshot, uint32(height), blockHash, false) + if blockNode == nil { + return nil, false, nil + } + bi.addNewBlockNodeToBlockIndex(blockNode) + return blockNode, true, nil +} + +func (bi *BlockIndex) GetBlockNodeByHashAndHeight(blockHash *BlockHash, height uint64) (*BlockNode, bool) { + val, exists := bi.blockIndexByHash.Get(*blockHash) + if exists { + return val, true + } + if height > math.MaxUint32 { + glog.Fatalf("GetBlockNodeByHashAndHeight: Height %d is greater than math.MaxUint32", height) + } + bn := GetHeightHashToNodeInfo(bi.db, bi.snapshot, uint32(height), blockHash, false) + if bn == nil { + return nil, false + } + bi.addNewBlockNodeToBlockIndex(bn) + return bn, true +} + +func (bi *BlockIndex) GetBlockNodesByHeight(height uint64) []*BlockNode { + if height > math.MaxUint32 { + glog.Fatalf("GetBlockNodesByHeight: Height %d is greater than math.MaxUint32", height) + } + //if height > bi.maxHeightSeen { + // return []*BlockNode{} + //} + blockNodesAtHeight, exists := bi.blockIndexByHeight.Get(height) + if exists { + return blockNodesAtHeight + } + // TODO: cache current height to exit early? + prefixKey := _heightHashToNodePrefixByHeight(uint32(height), false) + _, valsFound := EnumerateKeysForPrefix(bi.db, prefixKey, false) + blockNodes := []*BlockNode{} + for _, val := range valsFound { + blockNode, err := DeserializeBlockNode(val) + if err != nil { + glog.Errorf("GetBlockNodesByHeight: Problem deserializing block node: %v", err) + continue + } + bi.addNewBlockNodeToBlockIndex(blockNode) + blockNodes = append(blockNodes, blockNode) + } + return blockNodes +} + +func (bi *BlockIndex) GetTip() *BlockNode { + return bi.tip +} + +func (bi *BlockIndex) GetHeaderTip() *BlockNode { + return bi.headerTip +} + type Blockchain struct { db *badger.DB postgres *Postgres @@ -553,20 +731,8 @@ type Blockchain struct { ChainLock deadlock.RWMutex // These should only be accessed after acquiring the ChainLock. - // - // An in-memory index of the "tree" of blocks we are currently aware of. - // This index includes forks and side-chains. - blockIndexByHash *collections.ConcurrentMap[BlockHash, *BlockNode] - // blockIndexByHeight is an in-memory map of block height to block nodes. This is - // used to quickly find the safe blocks from which the chain can be extended for PoS - blockIndexByHeight map[uint64]map[BlockHash]*BlockNode - // An in-memory slice of the blocks on the main chain only. The end of - // this slice is the best known tip that we have at any given time. - bestChain []*BlockNode - bestChainMap map[BlockHash]*BlockNode - - bestHeaderChain []*BlockNode - bestHeaderChainMap map[BlockHash]*BlockNode + blockIndex *BlockIndex + lowestBlockNotStored uint64 // We keep track of orphan blocks with the following data structures. Orphans // are not written to disk and are only cached in memory. Moreover we only keep @@ -577,7 +743,7 @@ type Blockchain struct { blockView *UtxoView // cache block view for each block - blockViewCache lru.Map[BlockHash, *BlockViewAndUtxoOps] + blockViewCache *lru.Cache[BlockHash, *BlockViewAndUtxoOps] // snapshot cache snapshotCache *SnapshotCache @@ -705,80 +871,67 @@ func getCheckpointBlockInfoFromProviderHelper(provider string) *CheckpointBlockI } func (bc *Blockchain) addNewBlockNodeToBlockIndex(blockNode *BlockNode) { - bc.blockIndexByHash.Set(*blockNode.Hash, blockNode) - if _, exists := bc.blockIndexByHeight[uint64(blockNode.Height)]; !exists { - bc.blockIndexByHeight[uint64(blockNode.Height)] = make(map[BlockHash]*BlockNode) - } - bc.blockIndexByHeight[uint64(blockNode.Height)][*blockNode.Hash] = blockNode + bc.blockIndex.addNewBlockNodeToBlockIndex(blockNode) } func (bc *Blockchain) CopyBlockIndexes() ( - _blockIndexByHash *collections.ConcurrentMap[BlockHash, *BlockNode], - _blockIndexByHeight map[uint64]map[BlockHash]*BlockNode, + _blockIndexByHash *lru.Cache[BlockHash, *BlockNode], ) { - newBlockIndexByHash := collections.NewConcurrentMap[BlockHash, *BlockNode]() - newBlockIndexByHeight := make(map[uint64]map[BlockHash]*BlockNode) - bc.blockIndexByHash.Iterate(func(kk BlockHash, vv *BlockNode) { - newBlockIndexByHash.Set(kk, vv) - blockHeight := uint64(vv.Height) - if _, exists := newBlockIndexByHeight[blockHeight]; !exists { - newBlockIndexByHeight[blockHeight] = make(map[BlockHash]*BlockNode) - } - newBlockIndexByHeight[blockHeight][kk] = vv - }) - return newBlockIndexByHash, newBlockIndexByHeight + newBlockIndexByHash, _ := lru.New[BlockHash, *BlockNode](MaxBlockIndexNodes) + for _, key := range bc.blockIndex.blockIndexByHash.Keys() { + val, _ := bc.blockIndex.blockIndexByHash.Get(key) + newBlockIndexByHash.Add(key, val) + } + return newBlockIndexByHash } -func (bc *Blockchain) constructBlockIndexByHeight() map[uint64]map[BlockHash]*BlockNode { - newBlockIndex := make(map[uint64]map[BlockHash]*BlockNode) - bc.blockIndexByHash.Iterate(func(_ BlockHash, blockNode *BlockNode) { - blockHeight := uint64(blockNode.Height) - if _, exists := newBlockIndex[blockHeight]; !exists { - newBlockIndex[blockHeight] = make(map[BlockHash]*BlockNode) - } - newBlockIndex[blockHeight][*blockNode.Hash] = blockNode - }) - return newBlockIndex +func (bc *Blockchain) GetBlockIndex() *BlockIndex { + return bc.blockIndex } +// TODO: read through to DB. func (bc *Blockchain) getAllBlockNodesIndexedAtHeight(blockHeight uint64) []*BlockNode { - return collections.MapValues(bc.blockIndexByHeight[blockHeight]) + return bc.blockIndex.GetBlockNodesByHeight(blockHeight) } func (bc *Blockchain) hasBlockNodesIndexedAtHeight(blockHeight uint64) bool { - blocksAtHeight, hasNestedMapAtHeight := bc.blockIndexByHeight[blockHeight] - if !hasNestedMapAtHeight { - return false - } - return len(blocksAtHeight) > 0 -} - -func (bc *Blockchain) CopyBestChain() ([]*BlockNode, map[BlockHash]*BlockNode) { - newBestChain := []*BlockNode{} - newBestChainMap := make(map[BlockHash]*BlockNode) - newBestChain = append(newBestChain, bc.bestChain...) - for kk, vv := range bc.bestChainMap { - newBestChainMap[kk] = vv - } - - return newBestChain, newBestChainMap -} - -func (bc *Blockchain) CopyBestHeaderChain() ([]*BlockNode, map[BlockHash]*BlockNode) { - newBestChain := []*BlockNode{} - newBestChainMap := make(map[BlockHash]*BlockNode) - newBestChain = append(newBestChain, bc.bestHeaderChain...) - for kk, vv := range bc.bestHeaderChainMap { - newBestChainMap[kk] = vv - } - - return newBestChain, newBestChainMap + blockNodes := bc.blockIndex.GetBlockNodesByHeight(blockHeight) + return len(blockNodes) > 0 } // IsFullyStored determines if there are block nodes that haven't been fully stored or processed in the best block chain. func (bc *Blockchain) IsFullyStored() bool { - if bc.ChainState() == SyncStateFullyCurrent { - for _, blockNode := range bc.bestChain { + // TODO: figure out how to iterate over best chain w/o having entire thing in memory. + chainState := bc.ChainState() + if chainState == SyncStateFullyCurrent || (chainState == SyncStateNeedBlocksss && + bc.headerTip().Height-bc.blockTip().Height < 10) { + // Get a sampling of blocks from the best chain and check if they are fully stored. + // We only need to check a few blocks to determine if the chain is fully stored. + blockTipHeight := uint64(bc.BlockTip().Height) + increment := blockTipHeight / 20 + if increment == 0 { + increment = 1 + } + blockHeights := []uint64{} + for ii := uint64(0); ii < blockTipHeight; ii += increment { + blockHeights = append(blockHeights, ii) + } + if blockTipHeight > 100 { + for ii := blockTipHeight - 20; ii < blockTipHeight; ii++ { + blockHeights = append(blockHeights, ii) + } + } + blockHeights = append(blockHeights, blockTipHeight) + blockHeightSet := NewSet(blockHeights) + for _, blockHeight := range blockHeightSet.ToSlice() { + blockNode, exists, err := bc.GetBlockFromBestChainByHeight(blockHeight, false) + if err != nil { + glog.Errorf("IsFullyStored: Problem getting block at height %d: %v", blockHeight, err) + return false + } + if !exists { + return false + } if !blockNode.Status.IsFullyProcessed() { return false } @@ -840,56 +993,56 @@ func (bc *Blockchain) _initChain() error { // to previous blocks we've read in and error if they don't. This works because // reading blocks in height order as we do here ensures that we'll always // add a block's parents, if they exist, before adding the block itself. + //var err error + //if bc.postgres != nil { + // bc.blockIndexByHash, err = bc.postgres.GetBlockIndex() + //} else { + // bc.blockIndexByHash, err = GetBlockIndex(bc.db, false /*bitcoinNodes*/, bc.params) + //} + //if err != nil { + // return errors.Wrapf(err, "_initChain: Problem reading block index from db") + //} + //bc.blockIndexByHeight = bc.constructBlockIndexByHeight() + + // For postgres, we still load the entire block index into memory. This is because var err error + var tipNode *BlockNode if bc.postgres != nil { - bc.blockIndexByHash, err = bc.postgres.GetBlockIndex() - } else { - bc.blockIndexByHash, err = GetBlockIndex(bc.db, false /*bitcoinNodes*/, bc.params) - } - if err != nil { - return errors.Wrapf(err, "_initChain: Problem reading block index from db") - } - bc.blockIndexByHeight = bc.constructBlockIndexByHeight() - - // At this point the blockIndexByHash should contain a full node tree with all - // nodes pointing to valid parent nodes. - { - // Find the tip node with the best node hash. - tipNode, exists := bc.blockIndexByHash.Get(*bestBlockHash) + bc.blockIndex.blockIndexByHash, err = bc.postgres.GetBlockIndex() + var exists bool + tipNode, exists = bc.blockIndex.blockIndexByHash.Get(*bestBlockHash) if !exists { - return fmt.Errorf("_initChain(block): Best hash (%#v) not found in block index", bestBlockHash) + return fmt.Errorf("_initChain: Best hash (%#v) not found in block index", bestBlockHash) } - - // Walk back from the best node to the genesis block and store them all - // in bestChain. - bc.bestChain, err = GetBestChain(tipNode) - if err != nil { - return errors.Wrapf(err, "_initChain(block): Problem reading best chain from db") - } - for _, bestChainNode := range bc.bestChain { - bc.bestChainMap[*bestChainNode.Hash] = bestChainNode + } else { + var tipNodeExists bool + // For badger, we only need the tip block to get started. + // Weird hack required for the genesis block. + if bestBlockHash.IsEqual(GenesisBlockHash) { + tipNode, tipNodeExists = bc.blockIndex.GetBlockNodeByHashAndHeight(bestBlockHash, 0) + } else { + tipNode, tipNodeExists, err = bc.blockIndex.GetBlockNodeByHashOnly(bestBlockHash) + if err != nil { + return errors.Wrapf(err, "_initChain: Problem reading best block from db") + } + if !tipNodeExists { + return fmt.Errorf("_initChain: Best hash (%#v) not found in block index", bestBlockHash) + } + // Walk back the last 24 hours of blocks. + currBlockCounter := 1 + for currBlockCounter < 3600*24 && tipNode.Header.PrevBlockHash != nil { + bc.blockIndex.GetBlockNodeByHashAndHeight(tipNode.Header.PrevBlockHash, tipNode.Header.Height-1) + currBlockCounter++ + } } - } - - // TODO: This code is a bit repetitive but this seemed clearer than factoring it out. - { - // Find the tip node with the best node hash. - tipNode, exists := bc.blockIndexByHash.Get(*bestHeaderHash) - if !exists { - return fmt.Errorf("_initChain(header): Best hash (%#v) not found in block index", bestHeaderHash) + if err = bc.blockIndex.LoadBlockIndexFromHeight(tipNode.Height, bc.params); err != nil { + return errors.Wrapf(err, "_initChain: Problem loading block index from db") } - // Walk back from the best node to the genesis block and store them all - // in bestChain. - bc.bestHeaderChain, err = GetBestChain(tipNode) - if err != nil { - return errors.Wrapf(err, "_initChain(header): Problem reading best chain from db") - } - for _, bestHeaderChainNode := range bc.bestHeaderChain { - bc.bestHeaderChainMap[*bestHeaderChainNode.Hash] = bestHeaderChainNode - } + // We start by simply setting the chain tip and header tip to the tip node. + bc.blockIndex.setTip(tipNode) + bc.blockIndex.setHeaderTip(tipNode) } - bc.isInitialized = true return nil @@ -931,20 +1084,12 @@ func (bc *Blockchain) _applyUncommittedBlocksToBestChain() error { } // Add the uncommitted blocks to the in-memory data structures. - if _, _, _, err := bc.tryApplyNewTip(uncommittedTipBlockNode, 0, lineageFromCommittedTip); err != nil { + if _, _, _, err = bc.tryApplyNewTip(uncommittedTipBlockNode, 0, lineageFromCommittedTip); err != nil { return errors.Wrapf(err, "_applyUncommittedBlocksToBestChain: ") } - ////////////////////////// Update the bestHeaderChain in-memory data structures ////////////////////////// - currentHeaderTip := bc.headerTip() - _, blocksToDetach, blocksToAttach := GetReorgBlocks(currentHeaderTip, uncommittedTipBlockNode) - bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( - bc.bestHeaderChain, - bc.bestHeaderChainMap, - blocksToDetach, - blocksToAttach, - ) - + bc.blockIndex.setTip(uncommittedTipBlockNode) + bc.blockIndex.setHeaderTip(uncommittedTipBlockNode) return nil } @@ -966,6 +1111,10 @@ func NewBlockchain( archivalMode bool, checkpointSyncingProviders []string, ) (*Blockchain, error) { + if err := RunBlockIndexMigrationOnce(db, params); err != nil { + return nil, errors.Wrapf(err, "NewBlockchain: Problem running block index migration") + } + trustedBlockProducerPublicKeys := make(map[PkMapKey]bool) for _, keyStr := range trustedBlockProducerPublicKeyStrs { pkBytes, _, err := Base58CheckDecode(keyStr) @@ -978,7 +1127,7 @@ func NewBlockchain( timer := &Timer{} timer.Initialize() - + blockViewCache, _ := lru.New[BlockHash, *BlockViewAndUtxoOps](100) // TODO: parameterize bc := &Blockchain{ db: db, postgres: postgres, @@ -991,13 +1140,8 @@ func NewBlockchain( eventManager: eventManager, archivalMode: archivalMode, - blockIndexByHash: collections.NewConcurrentMap[BlockHash, *BlockNode](), - blockIndexByHeight: make(map[uint64]map[BlockHash]*BlockNode), - bestChainMap: make(map[BlockHash]*BlockNode), - - bestHeaderChainMap: make(map[BlockHash]*BlockNode), - - blockViewCache: *lru.NewMap[BlockHash, *BlockViewAndUtxoOps](100), // TODO: parameterize + blockIndex: NewBlockIndex(db, snapshot, nil), // TODO: replace with actual tip. + blockViewCache: blockViewCache, snapshotCache: NewSnapshotCache(), checkpointSyncingProviders: checkpointSyncingProviders, @@ -1063,15 +1207,16 @@ func fastLog2Floor(n uint32) uint8 { // functions. // // This function MUST be called with the chain state lock held (for reads). -func locateInventory(locator []*BlockHash, stopHash *BlockHash, maxEntries uint32, - blockIndex *collections.ConcurrentMap[BlockHash, *BlockNode], bestChainList []*BlockNode, - bestChainMap map[BlockHash]*BlockNode) (*BlockNode, uint32) { +// TODO: this function needs a whole bunch of work. +func (bc *Blockchain) locateInventory(locator []*BlockHash, stopHash *BlockHash, maxEntries uint32) (*BlockNode, uint32) { // There are no block locators so a specific block is being requested // as identified by the stop hash. - stopNode, stopNodeExists := blockIndex.Get(*stopHash) + stopNode, stopNodeExists, stopNodeError := bc.GetBlockFromBestChainByHash(stopHash, true) if len(locator) == 0 { - if !stopNodeExists { + if stopNodeError != nil || !stopNodeExists || stopNode == nil { + // TODO: what should we really do here? + glog.Errorf("locateInventory: Block %v is not known", stopHash) // No blocks with the stop hash were found so there is // nothing to do. return nil, 0 @@ -1082,10 +1227,19 @@ func locateInventory(locator []*BlockHash, stopHash *BlockHash, maxEntries uint3 // Find the most recent locator block hash in the main chain. In the // case none of the hashes in the locator are in the main chain, fall // back to the genesis block. - startNode := bestChainList[0] + startNode, startNodeExists, err := bc.GetBlockFromBestChainByHeight(0, true) + if err != nil { + glog.Errorf("locateInventory: Problem getting block by height: %v", err) + return nil, 0 + } + if !startNodeExists { + glog.Errorf("locateInventory: Genesis block not found") + return nil, 0 + } for _, hash := range locator { - node, bestChainContainsNode := bestChainMap[*hash] - if bestChainContainsNode { + // TODO: replace w/ read-through cache call. + node := bc.GetBlockNodeWithHash(hash) + if node != nil { startNode = node break } @@ -1095,17 +1249,25 @@ func locateInventory(locator []*BlockHash, stopHash *BlockHash, maxEntries uint3 // is no next block it means the most recently known block is the tip of // the best chain, so there is nothing more to do. nextNodeHeight := uint32(startNode.Header.Height) + 1 - if uint32(len(bestChainList)) <= nextNodeHeight { + startNode, startNodeExists, err = bc.GetBlockFromBestChainByHeight(uint64(nextNodeHeight), true) + if err != nil { + glog.Errorf("locateInventory: Problem getting block by height: %v", err) + return nil, 0 + } + if !startNodeExists { return nil, 0 } - startNode = bestChainList[nextNodeHeight] // Calculate how many entries are needed. - tip := bestChainList[len(bestChainList)-1] - total := uint32((tip.Header.Height - startNode.Header.Height) + 1) - if stopNodeExists && stopNode.Header.Height >= startNode.Header.Height { + total := (bc.blockIndex.GetTip().Height - startNode.Height) + 1 + if stopNodeError != nil && stopNodeExists && stopNode != nil && + stopNode.Header.Height >= startNode.Header.Height { - _, bestChainContainsStopNode := bestChainMap[*stopNode.Hash] + _, bestChainContainsStopNode, err := bc.blockIndex.GetBlockNodeByHashOnly(stopNode.Hash) + if err != nil { + glog.Errorf("locateInventory: Problem getting block by hash: %v", err) + return nil, 0 + } if bestChainContainsStopNode { total = uint32((stopNode.Header.Height - startNode.Header.Height) + 1) } @@ -1124,15 +1286,12 @@ func locateInventory(locator []*BlockHash, stopHash *BlockHash, maxEntries uint3 // See the comment on the exported function for more details on special cases. // // This function MUST be called with the ChainLock held (for reads). -func locateHeaders(locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32, - blockIndex *collections.ConcurrentMap[BlockHash, *BlockNode], bestChainList []*BlockNode, - bestChainMap map[BlockHash]*BlockNode) []*MsgDeSoHeader { +func (bc *Blockchain) locateHeaders(locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32) []*MsgDeSoHeader { // Find the node after the first known block in the locator and the // total number of nodes after it needed while respecting the stop hash // and max entries. - node, total := locateInventory(locator, stopHash, maxHeaders, - blockIndex, bestChainList, bestChainMap) + node, total := bc.locateInventory(locator, stopHash, maxHeaders) if total == 0 { return nil } @@ -1147,7 +1306,15 @@ func locateHeaders(locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32, if uint32(len(headers)) == total { break } - node = bestChainList[node.Header.Height+1] + var nodeExists bool + node, nodeExists, err = bc.GetBlockFromBestChainByHeight(node.Header.Height+1, true) + if err != nil { + glog.Errorf("locateHeaders: Problem getting block by height: %v", err) + break + } + if !nodeExists { + break + } } return headers } @@ -1175,8 +1342,7 @@ func (bc *Blockchain) LocateBestBlockChainHeaders( // TODO: Shouldn't we hold a ChainLock here? I think it's fine though because the place // where it's currently called is single-threaded via a channel in server.go. Going to // avoid messing with it for now. - headers := locateHeaders(locator, stopHash, maxHeaders, - bc.blockIndexByHash, bc.bestChain, bc.bestChainMap) + headers := bc.locateHeaders(locator, stopHash, maxHeaders) return headers } @@ -1238,10 +1404,24 @@ func (bc *Blockchain) LatestLocator(tip *BlockNode) []*BlockHash { // ancestors must be too, so use a much faster O(1) lookup in // that case. Otherwise, fall back to walking backwards through // the nodes of the other chain to the correct ancestor. - if _, exists := bc.bestHeaderChainMap[*tip.Hash]; exists { - tip = bc.bestHeaderChain[height] + _, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(tip.Hash) + if err != nil { + glog.Errorf("LatestLocator: Problem getting block by hash: %v", err) + exists = false + } + if exists { + var innerExists bool + tip, innerExists, err = bc.GetBlockFromBestChainByHeight(uint64(height), true) + if err != nil { + glog.Errorf("LatestLocator: Problem getting block by height: %v", err) + break + } + if !innerExists { + glog.Errorf("LatestLocator: Block %v not found in best header chain", height) + break + } } else { - tip = tip.Ancestor(uint32(height)) + tip = tip.Ancestor(uint32(height), bc.blockIndex) } // Once 11 entries have been included, start doubling the @@ -1255,8 +1435,11 @@ func (bc *Blockchain) LatestLocator(tip *BlockNode) []*BlockHash { } func (bc *Blockchain) HeaderLocatorWithNodeHash(blockHash *BlockHash) ([]*BlockHash, error) { - node, exists := bc.blockIndexByHash.Get(*blockHash) - if !exists { + node, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(blockHash) + if err != nil { + return nil, fmt.Errorf("Blockchain.HeaderLocatorWithNodeHash: Problem getting node for hash %v: %v", blockHash, err) + } + if !exists || node == nil { return nil, fmt.Errorf("Blockchain.HeaderLocatorWithNodeHash: Node for hash %v is not in our blockIndexByHash", blockHash) } @@ -1285,7 +1468,11 @@ func (bc *Blockchain) GetBlockNodesToFetch( // If the tip of the best block chain is in the main header chain, make that // the start point for our fetch. - headerNodeStart, blockTipExistsInBestHeaderChain := bc.bestHeaderChainMap[*bestBlockTip.Hash] + headerNodeStart, blockTipExistsInBestHeaderChain, err := bc.GetBlockFromBestChainByHeight(uint64(bestBlockTip.Height), true) + if err != nil { + glog.Errorf("GetBlockToFetch: Problem getting block by height: %v", err) + return nil + } if !blockTipExistsInBestHeaderChain { // If the hash of the tip of the best blockchain is not in the best header chain, then // this is a case where the header chain has forked off from the best block @@ -1296,7 +1483,7 @@ func (bc *Blockchain) GetBlockNodesToFetch( // StatusBlockProcessed so this loop is guaranteed to terminate successfully. headerNodeStart = bc.headerTip() for headerNodeStart != nil && (headerNodeStart.Status&StatusBlockProcessed) == 0 { - headerNodeStart = headerNodeStart.Parent + headerNodeStart = headerNodeStart.GetParent(bc.blockIndex) } if headerNodeStart == nil { @@ -1304,7 +1491,17 @@ func (bc *Blockchain) GetBlockNodesToFetch( // an error and set it to the genesis block. glog.Errorf("GetBlockToFetch: headerNode was nil after iterating " + "backward through best header chain; using genesis block") - headerNodeStart = bc.bestHeaderChain[0] + var err error + var genesisBlockExists bool + headerNodeStart, genesisBlockExists, err = bc.GetBlockFromBestChainByHeight(0, true) + if err != nil { + glog.Errorf("GetBlockToFetch: Problem getting genesis block: %v", err) + return nil + } + if !genesisBlockExists { + glog.Errorf("GetBlockToFetch: Genesis block not found") + return nil + } } } @@ -1314,14 +1511,22 @@ func (bc *Blockchain) GetBlockNodesToFetch( currentHeight := headerNodeStart.Height + 1 blockNodesToFetch := []*BlockNode{} heightLimit := maxHeight - if heightLimit >= uint32(len(bc.bestHeaderChain)) { - heightLimit = uint32(len(bc.bestHeaderChain) - 1) + if heightLimit >= bc.blockIndex.GetHeaderTip().Height { + heightLimit = bc.blockIndex.GetHeaderTip().Height - 1 } for currentHeight <= heightLimit && len(blockNodesToFetch) < numBlocks { // Get the current hash and increment the height. - currentNode := bc.bestHeaderChain[currentHeight] + currentNode, currentNodeExists, err := bc.GetBlockFromBestChainByHeight(uint64(currentHeight), true) + if err != nil { + glog.Errorf("GetBlockToFetch: Problem getting block by height: %v", err) + return nil + } + if !currentNodeExists { + glog.Errorf("GetBlockToFetch: Block at height %d not found", currentHeight) + return nil + } currentHeight++ if _, exists := blocksToIgnore[*currentNode.Hash]; exists { @@ -1335,55 +1540,33 @@ func (bc *Blockchain) GetBlockNodesToFetch( return blockNodesToFetch } -func (bc *Blockchain) HasHeader(headerHash *BlockHash) bool { - _, exists := bc.blockIndexByHash.Get(*headerHash) - return exists +func (bc *Blockchain) HasHeader(headerHash *BlockHash) (bool, error) { + _, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(headerHash) + return exists, errors.Wrap(err, "Blockchain.HasHeader: ") } -func (bc *Blockchain) HeaderAtHeight(blockHeight uint32) *BlockNode { - if blockHeight >= uint32(len(bc.bestHeaderChain)) { - return nil - } - - return bc.bestHeaderChain[blockHeight] -} - -func (bc *Blockchain) HasBlock(blockHash *BlockHash) bool { - node, nodeExists := bc.blockIndexByHash.Get(*blockHash) - if !nodeExists { - glog.V(2).Infof("Blockchain.HasBlock: Node with hash %v does not exist in node index", blockHash) +func (bc *Blockchain) HasHeaderByHashAndHeight(headerHash *BlockHash, height uint64) bool { + if height > uint64(bc.headerTip().Height) { return false } - - if (node.Status & StatusBlockProcessed) == 0 { - glog.V(2).Infof("Blockchain.HasBlock: Node %v does not have StatusBlockProcessed so we don't have the block", node) - return false - } - - // Node exists with StatusBlockProcess set means we have it. - return true + _, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(headerHash, height) + return exists } -func (bc *Blockchain) HasBlockInBlockIndex(blockHash *BlockHash) bool { - bc.ChainLock.RLock() - defer bc.ChainLock.RUnlock() - - _, exists := bc.blockIndexByHash.Get(*blockHash) - return exists +// TODO: delete me? +func (bc *Blockchain) HeaderAtHeight(blockHeight uint32) (*BlockNode, bool, error) { + if blockHeight >= bc.blockIndex.GetHeaderTip().Height { + return nil, false, nil + } + return bc.GetBlockFromBestChainByHeight(uint64(blockHeight), true) } -// This needs to hold a lock on the blockchain because it read from an in-memory map that is -// not thread-safe. -func (bc *Blockchain) GetBlockHeaderFromIndex(blockHash *BlockHash) *MsgDeSoHeader { +func (bc *Blockchain) HasBlockInBlockIndex(blockHash *BlockHash) (bool, error) { bc.ChainLock.RLock() defer bc.ChainLock.RUnlock() - block, blockExists := bc.blockIndexByHash.Get(*blockHash) - if !blockExists { - return nil - } - - return block.Header + _, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(blockHash) + return exists, errors.Wrap(err, "Blockchain.HasBlockInBlockIndex: ") } // Don't need a lock because blocks don't get removed from the db after they're added @@ -1397,14 +1580,14 @@ func (bc *Blockchain) GetBlock(blockHash *BlockHash) *MsgDeSoBlock { return blk } -func (bc *Blockchain) GetBlockAtHeight(height uint32) *MsgDeSoBlock { - numBlocks := uint32(len(bc.bestChain)) - - if height >= numBlocks { - return nil +func (bc *Blockchain) GetBlockAtHeight(height uint32, isHeaderChain bool) (*MsgDeSoBlock, error) { + bn, bnExists, err := bc.GetBlockFromBestChainByHeight(uint64(height), isHeaderChain) + if !bnExists || err != nil { + glog.Errorf("Blockchain.GetBlockAtHeight: Problem getting block by height: %v", err) + return nil, err } - return bc.GetBlock(bc.bestChain[height].Hash) + return bc.GetBlock(bn.Hash), nil } // GetBlockNodeWithHash looks for a block node in the bestChain list that matches the hash. @@ -1412,7 +1595,11 @@ func (bc *Blockchain) GetBlockNodeWithHash(hash *BlockHash) *BlockNode { if hash == nil { return nil } - return bc.bestChainMap[*hash] + bn, bnExists, err := bc.blockIndex.GetBlockNodeByHashOnly(hash) + if !bnExists || err != nil { + return nil + } + return bn } // isTipMaxed compares the tip height to the MaxSyncBlockHeight height. @@ -1435,14 +1622,16 @@ func (bc *Blockchain) isTipCurrent(tip *BlockNode) bool { return tip.Height >= bc.MaxSyncBlockHeight } - minChainWorkBytes, _ := hex.DecodeString(bc.params.MinChainWorkHex) - // Not current if the cumulative work is below the threshold. - if bc.params.IsPoWBlockHeight(uint64(tip.Height)) && tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { - //glog.V(2).Infof("Blockchain.isTipCurrent: Tip not current because "+ - //"CumWork (%v) is less than minChainWorkBytes (%v)", - //tip.CumWork, BytesToBigint(minChainWorkBytes)) - return false + if bc.params.IsPoWBlockHeight(uint64(tip.Height)) { + minChainWorkBytes, _ := hex.DecodeString(bc.params.MinChainWorkHex) + + if tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { + //glog.V(2).Infof("Blockchain.isTipCurrent: Tip not current because "+ + //"CumWork (%v) is less than minChainWorkBytes (%v)", + //tip.CumWork, BytesToBigint(minChainWorkBytes)) + return false + } } // Not current if the tip has a timestamp older than the maximum @@ -1510,16 +1699,6 @@ func (ss SyncState) String() string { // // This function MUST be called with the ChainLock held (for reads). func (bc *Blockchain) chainState() SyncState { - // If the header is not current, then we're in the SyncStateSyncingHeaders. - headerTip := bc.headerTip() - if headerTip == nil { - return SyncStateSyncingHeaders - } - - if !bc.isTipCurrent(headerTip) { - return SyncStateSyncingHeaders - } - // If the header tip is current and the block tip is far in the past, then we're in the SyncStateSyncingSnapshot state. if bc.syncingState { return SyncStateSyncingSnapshot @@ -1541,12 +1720,6 @@ func (bc *Blockchain) chainState() SyncState { return SyncStateSyncingBlocks } - // If the header tip is current and the block tip is current but the block - // tip is not equal to the header tip then we're in SyncStateNeedBlocks. - if *blockTip.Hash != *headerTip.Hash { - return SyncStateNeedBlocksss - } - // If none of the checks above returned it means we're current. return SyncStateFullyCurrent } @@ -1575,7 +1748,28 @@ func (bc *Blockchain) checkArchivalMode() bool { } firstSnapshotHeight := bc.snapshot.CurrentEpochSnapshotMetadata.FirstSnapshotBlockHeight - for _, blockNode := range bc.bestChain { + _ = firstSnapshotHeight + // @diamondhands - can we spot check just a few blocks such as firstSnapshotHeight - 1, + // firstSnapshotHeight / 2 - 1, and firstSnapshotHeight / 4 - 1 to see if they are stored? + // We take a sampling of blocks to determine if we've downloaded all the blocks up to the first snapshot height. + blockHeights := []uint64{} + increment := firstSnapshotHeight / 10 + for ii := uint64(0); ii < firstSnapshotHeight; ii += increment { + blockHeights = append(blockHeights, ii) + } + for ii := firstSnapshotHeight - 10; ii < firstSnapshotHeight; ii++ { + blockHeights = append(blockHeights, ii) + } + blockHeights = append(blockHeights, firstSnapshotHeight) + for _, height := range blockHeights { + blockNode, exists, err := bc.GetBlockFromBestChainByHeight(height, false) + if err != nil { + glog.Errorf("checkArchivalMode: Problem getting block by height: %v", err) + return false + } + if !exists { + return false + } if uint64(blockNode.Height) > firstSnapshotHeight { return false } @@ -1625,13 +1819,7 @@ func (bc *Blockchain) isHyperSyncCondition() bool { // main chain for blocks, which is why separate functions are required for // each of them. func (bc *Blockchain) headerTip() *BlockNode { - if len(bc.bestHeaderChain) == 0 { - return nil - } - - // Note this should always work because we should have the genesis block - // in here. - return bc.bestHeaderChain[len(bc.bestHeaderChain)-1] + return bc.blockIndex.GetHeaderTip() } func (bc *Blockchain) HeaderTip() *BlockNode { @@ -1663,39 +1851,121 @@ func (bc *Blockchain) Snapshot() *Snapshot { // invalidate and chop off the headers corresponding to those blocks and // their ancestors so the two generally stay in sync. func (bc *Blockchain) blockTip() *BlockNode { - var tip *BlockNode - - if len(bc.bestChain) == 0 { - return nil - } - - tip = bc.bestChain[len(bc.bestChain)-1] - - return tip + return bc.blockIndex.GetTip() } func (bc *Blockchain) BlockTip() *BlockNode { return bc.blockTip() } +// TODO: this won't work for now. Need to figure out how to handle this. func (bc *Blockchain) BestChain() []*BlockNode { - return bc.bestChain + panic("BestChain not supported.") +} + +func (bc *Blockchain) GetBlockFromBestChainByHash(blockHash *BlockHash, useHeaderChain bool) (*BlockNode, bool, error) { + bn, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(blockHash) + if err != nil { + return nil, false, err + } + if !exists { + return nil, false, nil + } + if bn.IsCommitted() { + return bn, true, nil // TODO: what do we do about header chain? they're not committed so we're going to + // have to get a bunch of parents in order to be sure it is part of the best header chain. I guess we could + // have a map, but kinda defeats the purpose of this refactor. + } + // TODO: is this legit? It seems like it's fair game... + if bc.isSyncing() && useHeaderChain && bn.IsHeaderValidated() { + return bn, true, nil + } + blockTip := bc.BlockTip() + if useHeaderChain { + blockTip = bc.HeaderTip() + } + if blockTip == nil { + return nil, false, fmt.Errorf("GetBlockFromBestChainByHash: Block tip not found: use header chain: %v", useHeaderChain) + } + committedTip, exists := bc.GetCommittedTip() + if !exists { + return nil, false, errors.New("GetBlockFromBestChainByHash: Committed tip not found") + } + if uint64(bn.Height) > uint64(blockTip.Height) || uint64(bn.Height) < uint64(committedTip.Height) { + return nil, false, nil + } + currNode := &BlockNode{} + *currNode = *blockTip + for currNode != nil && currNode.Height >= bn.Height { + if currNode.Height == bn.Height { + if currNode.Hash.IsEqual(blockHash) { + return currNode, true, nil + } + return nil, false, nil + } + currNode = currNode.GetParent(bc.blockIndex) + } + return nil, false, nil } +func (bc *Blockchain) GetBlockFromBestChainByHeight(height uint64, useHeaderChain bool) (*BlockNode, bool, error) { + if !useHeaderChain { + committedTip, exists := bc.GetCommittedTip() + if !exists { + return nil, false, nil + } + if height >= uint64(committedTip.Height) { + // For this, we can just loop back from the tip block. + currentNode := bc.blockIndex.GetTip() + if useHeaderChain { + currentNode = bc.blockIndex.GetHeaderTip() + } + for currentNode != nil { + if uint64(currentNode.Height) == height { + return currentNode, true, nil + } + if currentNode.Height < committedTip.Height { + break + } + currentNode = currentNode.GetParent(bc.blockIndex) + } + return nil, false, nil + } + } + blockNodes := bc.blockIndex.GetBlockNodesByHeight(height) + if len(blockNodes) == 0 { + return nil, false, nil + } + for _, blockNode := range blockNodes { + if !useHeaderChain && blockNode.IsCommitted() { + return blockNode, true, nil + } + // TODO: this is crude and incorrect. + if useHeaderChain && blockNode.IsHeaderValidated() { + return blockNode, true, nil + } + } + return nil, false, nil +} + +// TODO: need to figure out how to handle this for exchange api tests. func (bc *Blockchain) SetBestChain(bestChain []*BlockNode) { - bc.bestChain = bestChain + for _, blockNode := range bestChain { + bc.blockIndex.addNewBlockNodeToBlockIndex(blockNode) + if bc.blockIndex.GetTip() == nil { + bc.blockIndex.setTip(blockNode) + } else if bc.blockIndex.GetTip().Height < blockNode.Height { + bc.blockIndex.setTip(blockNode) + } + } } -func (bc *Blockchain) SetBestChainMap( - bestChain []*BlockNode, - bestChainMap map[BlockHash]*BlockNode, - blockIndexByHash *collections.ConcurrentMap[BlockHash, *BlockNode], - blockIndexByHeight map[uint64]map[BlockHash]*BlockNode, +func (bc *Blockchain) setBestChainMap( + blockIndexByHash *lru.Cache[BlockHash, *BlockNode], + tipNode *BlockNode, ) { - bc.bestChain = bestChain - bc.bestChainMap = bestChainMap - bc.blockIndexByHash = blockIndexByHash - bc.blockIndexByHeight = blockIndexByHeight + bc.blockIndex.blockIndexByHash = blockIndexByHash + bc.blockIndex.setTip(tipNode) } func (bc *Blockchain) _validateOrphanBlockPoW(desoBlock *MsgDeSoBlock) error { @@ -1840,26 +2110,35 @@ func (bc *Blockchain) MarkBlockInvalid(node *BlockNode, errOccurred RuleError) { //} } -func _FindCommonAncestor(node1 *BlockNode, node2 *BlockNode) *BlockNode { +// Note: we make some assumptions that we only care about ancestors in the best chain. +func (bc *Blockchain) _FindCommonAncestor(node1 *BlockNode, node2 *BlockNode) *BlockNode { if node1 == nil || node2 == nil { // If either node is nil then there can't be a common ancestor. return nil } - // Get the two nodes to be at the same height. + // If both nodes are at a height greater than the committed tip, then we know that + // we have valid parent pointers and can use the Ancestor function to get use to the right place. if node1.Height > node2.Height { - node1 = node1.Ancestor(node2.Height) - } else if node1.Height < node2.Height { - node2 = node2.Ancestor(node1.Height) + node1 = node1.Ancestor(node2.Height, bc.blockIndex) + } else if node2.Height > node1.Height { + node2 = node2.Ancestor(node1.Height, bc.blockIndex) } // Iterate the nodes backward until they're either the same or we // reach the end of the lists. We only need to check node1 for nil // since they're the same height and we are iterating both back // in tandem. - for node1 != nil && !node1.Hash.IsEqual(node2.Hash) { - node1 = node1.Parent - node2 = node2.Parent + var exists bool + for !node1.Hash.IsEqual(node2.Hash) { + node1, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(node1.Header.PrevBlockHash, uint64(node1.Height-1)) + if !exists { + return nil + } + node2, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(node2.Header.PrevBlockHash, uint64(node2.Height-1)) + if !exists { + return nil + } } // By now either node1 == node2 and we found the common ancestor or @@ -1939,16 +2218,29 @@ func CheckTransactionSanity(txn *MsgDeSoTxn, blockHeight uint32, params *DeSoPar return nil } -func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockNode, _detachNodes []*BlockNode, _attachNodes []*BlockNode) { +func (bc *Blockchain) GetReorgBlocks(tip *BlockNode, newNode *BlockNode) ( + _commonAncestor *BlockNode, _detachNodes []*BlockNode, _attachNodes []*BlockNode) { + // TODO: finding common ancestors is very expensive for txindex when txindex is very far + // behind. Currently, it requires loading the entire chain into memory. // Find the common ancestor of this block and the main header chain. - commonAncestor := _FindCommonAncestor(tip, newNode) + commonAncestor := bc._FindCommonAncestor(tip, newNode) + + if commonAncestor == nil { + glog.Fatalf("No common ancestor found between tip and new node: tip hash (%v), newNode hash (%v)", tip.Hash, newNode.Hash) + return + } // Log a warning if the reorg is going to be a big one. - numBlocks := tip.Height - commonAncestor.Height - if numBlocks > 10 { - glog.Warningf("GetReorgBlocks: Proceeding with reorg of (%d) blocks from "+ - "block (%v) at height (%d) to block (%v) at height of (%d)", - numBlocks, tip, tip.Height, newNode, newNode.Height) + if tip != nil { + numBlocks := tip.Height - commonAncestor.Height + if numBlocks > 10 { + glog.Warningf("GetReorgBlocks: Proceeding with reorg of (%d) blocks from "+ + "block (%v) at height (%d) to block (%v) at height of (%d)", + numBlocks, tip, tip.Height, newNode, newNode.Height) + } + } else { + glog.Fatal("GetReorgBlocks: Tip is nil") + return } // Get the blocks to detach. Start at the tip and work backwards to the @@ -1958,8 +2250,15 @@ func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockN // detachBlocks will have the current tip as its first element and parents // of the tip thereafter. detachBlocks := []*BlockNode{} - for currentBlock := tip; *currentBlock.Hash != *commonAncestor.Hash; currentBlock = currentBlock.Parent { + currentBlock := &BlockNode{} + *currentBlock = *tip + for currentBlock != nil && *currentBlock.Hash != *commonAncestor.Hash { detachBlocks = append(detachBlocks, currentBlock) + var exists bool + currentBlock, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(currentBlock.Header.PrevBlockHash, uint64(currentBlock.Height-1)) + if !exists { + glog.Fatalf("GetReorgBlocks: Failed to find parent of block. Parent hash %v", currentBlock.Header.PrevBlockHash) + } } // Get the blocks to attach. Start at the new node and work backwards to @@ -1970,8 +2269,16 @@ func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockN // attachNodes will have the new node as its first element and work back to // the node right after the common ancestor as its last element. attachBlocks := []*BlockNode{} - for currentBlock := newNode; *currentBlock.Hash != *commonAncestor.Hash; currentBlock = currentBlock.Parent { + currentBlock = &BlockNode{} + *currentBlock = *newNode + for *currentBlock.Hash != *commonAncestor.Hash { attachBlocks = append(attachBlocks, currentBlock) + var exists bool + currentBlock, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(currentBlock.Header.PrevBlockHash, uint64(currentBlock.Height-1)) + if !exists { + // TODO: what should we do here? + glog.Fatal("GetReorgBlocks: Failed to find parent of block") + } } // Reverse attachBlocks so that the node right after the common ancestor // will be the first element and the node at the end of the list will be @@ -1983,14 +2290,14 @@ func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockN return commonAncestor, detachBlocks, attachBlocks } -func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap map[BlockHash]*BlockNode, detachBlocks []*BlockNode, attachBlocks []*BlockNode) ( - chainList []*BlockNode, chainMap map[BlockHash]*BlockNode) { +func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap *lru.Cache[BlockHash, *BlockNode], detachBlocks []*BlockNode, attachBlocks []*BlockNode) ( + chainList []*BlockNode, chainMap *lru.Cache[BlockHash, *BlockNode]) { // Remove the nodes we detached from the end of the best chain node list. tipIndex := len(mainChainList) - 1 for blockOffset := 0; blockOffset < len(detachBlocks); blockOffset++ { blockIndex := tipIndex - blockOffset - delete(mainChainMap, *mainChainList[blockIndex].Hash) + mainChainMap.Remove(*mainChainList[blockIndex].Hash) } mainChainList = mainChainList[:len(mainChainList)-len(detachBlocks)] @@ -1999,7 +2306,7 @@ func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap map[BlockH // first, with the new tip at the end. for _, attachNode := range attachBlocks { mainChainList = append(mainChainList, attachNode) - mainChainMap[*attachNode.Hash] = attachNode + mainChainMap.Add(*attachNode.Hash, attachNode) } return mainChainList, mainChainMap @@ -2024,7 +2331,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // index. If it does, then return an error. We should generally // expect that processHeaderPoW will only be called on headers we // haven't seen before. - _, nodeExists := bc.blockIndexByHash.Get(*headerHash) + _, nodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(headerHash, blockHeader.Height) if nodeExists { return false, false, HeaderErrorDuplicateHeader } @@ -2049,7 +2356,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B if blockHeader.PrevBlockHash == nil { return false, false, HeaderErrorNilPrevHash } - parentNode, parentNodeExists := bc.blockIndexByHash.Get(*blockHeader.PrevBlockHash) + parentNode, parentNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHeader.PrevBlockHash, blockHeader.Height-1) if !parentNodeExists { // This block is an orphan if its parent doesn't exist and we don't // process unconnectedTxns. @@ -2111,8 +2418,8 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // the parent block. Note that if the parent block is in the block index // then it has necessarily had its difficulty validated, and so using it to // do this check makes sense. - diffTarget, err := CalcNextDifficultyTarget( - parentNode, blockHeader.Version, bc.params) + diffTarget, err := bc.CalcNextDifficultyTarget( + parentNode, blockHeader.Version) if err != nil { return false, false, errors.Wrapf(err, "ProcessBlock: Problem computing difficulty "+ @@ -2166,9 +2473,8 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B if bc.isSyncing() { bc.addNewBlockNodeToBlockIndex(newNode) } else { - newBlockIndexByHash, newBlockIndexByHeight := bc.CopyBlockIndexes() - bc.blockIndexByHash = newBlockIndexByHash - bc.blockIndexByHeight = newBlockIndexByHeight + newBlockIndexByHash := bc.CopyBlockIndexes() + bc.blockIndex.blockIndexByHash = newBlockIndexByHash bc.addNewBlockNodeToBlockIndex(newNode) } @@ -2179,13 +2485,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B headerTip := bc.headerTip() if headerTip.CumWork.Cmp(newNode.CumWork) < 0 { isMainChain = true - - _, detachBlocks, attachBlocks := GetReorgBlocks(headerTip, newNode) - bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( - bc.bestHeaderChain, bc.bestHeaderChainMap, detachBlocks, attachBlocks) - - // Note that we don't store the best header hash here and so this is an - // in-memory-only adjustment. See the comment above on preventing attacks. + bc.blockIndex.setHeaderTip(newNode) } return isMainChain, false, nil @@ -2204,7 +2504,7 @@ func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *Bloc // If the header's height is after the PoS cut-over fork height, then we use the PoS header processing logic. // Otherwise, fall back to the PoW logic. if bc.params.IsPoSBlockHeight(blockHeader.Height) { - return bc.processHeaderPoS(blockHeader, verifySignatures) + return bc.processHeaderPoS(blockHeader, headerHash, verifySignatures) } return bc.processHeaderPoW(blockHeader, headerHash) @@ -2311,7 +2611,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bc.timer.Start("Blockchain.ProcessBlock: BlockNode") // See if a node for the block exists in our node index. - nodeToValidate, nodeExists := bc.blockIndexByHash.Get(*blockHash) + // TODO: validate that current height - 1 > 0 + nodeToValidate, nodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeader.Height) // If no node exists for this block at all, then process the header // first before we do anything. This should create a node and set // the header validation status for it. @@ -2332,7 +2633,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Reset the pointers after having presumably added the header to the // block index. - nodeToValidate, nodeExists = bc.blockIndexByHash.Get(*blockHash) + // TODO: validate that current height - 1 > 0 + nodeToValidate, nodeExists = bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeader.Height) } // At this point if the node still doesn't exist or if the header's validation // failed then we should return an error for the block. Note that at this point @@ -2351,7 +2653,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // In this case go ahead and return early. If its parents are truly legitimate then we // should re-request it and its parents from a node and reprocess it // once it is no longer an orphan. - parentNode, parentNodeExists := bc.blockIndexByHash.Get(*blockHeader.PrevBlockHash) + // TODO: validate that current height - 1 > 0 + parentNode, parentNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHeader.PrevBlockHash, blockHeader.Height-1) if !parentNodeExists || (parentNode.Status&StatusBlockProcessed) == 0 { return false, true, nil } @@ -2602,6 +2905,11 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // update our data structures to actually make this connection. Do this // in a transaction so that it is atomic. if bc.postgres != nil { + if !nodeToValidate.IsCommitted() { + nodeToValidate.Status |= StatusBlockCommitted + bc.blockIndex.addNewBlockNodeToBlockIndex(nodeToValidate) + } + if err = bc.postgres.UpsertBlockAndTransactions(nodeToValidate, desoBlock); err != nil { return false, false, errors.Wrapf(err, "ProcessBlock: Problem upserting block and transactions") } @@ -2623,6 +2931,10 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures err = bc.db.Update(func(txn *badger.Txn) error { // This will update the node's status. bc.timer.Start("Blockchain.ProcessBlock: Transactions Db height & hash") + if !nodeToValidate.IsCommitted() { + nodeToValidate.Status |= StatusBlockCommitted + bc.blockIndex.addNewBlockNodeToBlockIndex(nodeToValidate) + } if innerErr := PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, nodeToValidate, false /*bitcoinNodes*/, bc.eventManager); innerErr != nil { return errors.Wrapf( innerErr, "ProcessBlock: Problem calling PutHeightHashToNodeInfo after validation") @@ -2669,8 +2981,11 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Now that we've set the best chain in the db, update our in-memory data // structure to reflect this. Do a quick check first to make sure it's consistent. - lastIndex := len(bc.bestChain) - 1 - bestChainHash := bc.bestChain[lastIndex].Hash + bestChainTip := bc.blockIndex.GetTip() + if bestChainTip == nil { + return false, false, fmt.Errorf("ProcessBlock: Best chain tip is nil") + } + bestChainHash := bestChainTip.Hash if !bestChainHash.IsEqual(nodeToValidate.Header.PrevBlockHash) { return false, false, fmt.Errorf("ProcessBlock: Last block in bestChain "+ @@ -2680,15 +2995,13 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // If we're syncing there's no risk of concurrency issues. Otherwise, we // need to make a copy in order to be save. - if bc.isSyncing() { - bc.bestChain = append(bc.bestChain, nodeToValidate) - bc.bestChainMap[*nodeToValidate.Hash] = nodeToValidate - } else { - newBestChain, newBestChainMap := bc.CopyBestChain() - newBestChain = append(newBestChain, nodeToValidate) - newBestChainMap[*nodeToValidate.Hash] = nodeToValidate - bc.bestChain, bc.bestChainMap = newBestChain, newBestChainMap - } + // We no longer need to worry about whether we're syncing or not. Just + // set the tip. + //if bc.isSyncing() { + // bc.blockIndex.setTip(nodeToValidate) + //} else { + bc.blockIndex.setTip(nodeToValidate) + //} // This node is on the main chain so set this variable. isMainChain = true @@ -2744,7 +3057,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Find the common ancestor of this block and the main chain. // TODO: Reorgs with postgres? - commonAncestor, detachBlocks, attachBlocks := GetReorgBlocks(currentTip, nodeToValidate) + commonAncestor, detachBlocks, attachBlocks := bc.GetReorgBlocks(currentTip, nodeToValidate) // Log a warning if the reorg is going to be a big one. numBlocks := currentTip.Height - commonAncestor.Height if numBlocks > 10 { @@ -2843,7 +3156,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // If the parent node has been marked as invalid then mark this node as // invalid as well. - if (attachNode.Parent.Status & StatusBlockValidateFailed) != 0 { + if (attachNode.GetParent(bc.blockIndex).Status & StatusBlockValidateFailed) != 0 { bc.MarkBlockInvalid(attachNode, RuleErrorPreviousBlockInvalid) continue } @@ -2912,6 +3225,14 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures if err := PutBestHashWithTxn(txn, bc.snapshot, newTipNode.Hash, ChainTypeDeSoBlock, bc.eventManager); err != nil { return err } + if !newTipNode.IsCommitted() { + newTipNode.Status |= StatusBlockCommitted + // update the block index to be safe. + bc.addNewBlockNodeToBlockIndex(newTipNode) + if err := PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, newTipNode, false, bc.eventManager); err != nil { + return err + } + } for _, detachNode := range detachBlocks { // Delete the utxo operations for the blocks we're detaching since we don't need @@ -2920,6 +3241,16 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures return errors.Wrapf(err, "ProcessBlock: Problem deleting utxo operations for block") } + // We also need to revert the committed state if applicable. + if detachNode.IsCommitted() { + detachNode.ClearCommittedStatus() + // update the block index to be safe. + bc.addNewBlockNodeToBlockIndex(detachNode) + if err = PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, detachNode, false, bc.eventManager); err != nil { + return errors.Wrapf(err, "ProcessBlock: Problem putting height hash to node info for detach node that is not committed.") + } + } + // Note we could be even more aggressive here by deleting the nodes and // corresponding blocks from the db here (i.e. not storing any side chain // data on the db). But this seems like a minor optimization that comes at @@ -2932,6 +3263,15 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures if err := PutUtxoOperationsForBlockWithTxn(txn, bc.snapshot, blockHeight, attachNode.Hash, utxoOpsForAttachBlocks[ii], bc.eventManager); err != nil { return errors.Wrapf(err, "ProcessBlock: Problem putting utxo operations for block") } + + if !attachNode.IsCommitted() { + attachNode.Status |= StatusBlockCommitted + // update the block index to be safe. + bc.addNewBlockNodeToBlockIndex(attachNode) + if err = PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, attachNode, false, bc.eventManager); err != nil { + return errors.Wrapf(err, "ProcessBlock: Problem putting height hash to node info for detach node that is not committed.") + } + } } // Write the modified utxo set to the view. @@ -2948,10 +3288,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Now the db has been updated, update our in-memory best chain. Note that there // is no need to update the node index because it was updated as we went along. - newBestChain, newBestChainMap := bc.CopyBestChain() - newBestChain, newBestChainMap = updateBestChainInMemory( - newBestChain, newBestChainMap, detachBlocks, attachBlocks) - bc.bestChain, bc.bestChainMap = newBestChain, newBestChainMap + bc.blockIndex.setTip(newTipNode) + bc.blockIndex.setHeaderTip(newTipNode) // If we made it here then this block is on the main chain. isMainChain = true diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index d18eb6091..6ce522f7c 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -19,7 +19,7 @@ import ( chainlib "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec/v2" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -1222,6 +1222,8 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1229,6 +1231,8 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1236,11 +1240,11 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1248,8 +1252,9 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { // Blocks generating every 1 second, which is 2x too fast. TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, - StatusNone, + StatusHeaderValidated, )) + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1273,11 +1278,11 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { diffsAsInts = []int64{} for ii := 13; ii < 30; ii++ { lastNode := nodes[ii-1] - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1285,8 +1290,9 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { // Blocks generating every 4 second, which is 2x too slow. TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 4)), }, - StatusNone, + StatusHeaderValidated, )) + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1318,6 +1324,8 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1325,6 +1333,8 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1332,11 +1342,11 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1344,8 +1354,9 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { // Blocks generating every 1 second, which is 2x too fast. TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, - StatusNone, + StatusHeaderValidated, )) + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1369,11 +1380,11 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { diffsAsInts = []int64{} for ii := 13; ii < 30; ii++ { lastNode := nodes[ii-1] - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1381,8 +1392,9 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { // Blocks generating every 8 second, which is >2x too slow. TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 4)), }, - StatusNone, + StatusHeaderValidated, )) + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1414,6 +1426,8 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1421,6 +1435,8 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1428,11 +1444,11 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1440,9 +1456,9 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { // Blocks generating all at once. TstampNanoSecs: SecondsToNanoSeconds(0), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1469,12 +1485,15 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, TimeBetweenBlocks: 2 * time.Second, MaxDifficultyRetargetFactor: 3, } + bc.params = fakeParams nodes := []*BlockNode{} diffsAsInts := []int64{} @@ -1483,11 +1502,11 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1495,9 +1514,9 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { // Blocks generating every 2 second, which is under the limit. TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 2)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1524,6 +1543,8 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1531,6 +1552,8 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1538,11 +1561,11 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1550,9 +1573,9 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { // Blocks generating every 1 second, which is 2x too fast. TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1575,11 +1598,11 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { diffsAsInts = []int64{} for ii := 13; ii < 34; ii++ { lastNode := nodes[ii-1] - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, nil, @@ -1587,9 +1610,9 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { // Blocks generating every 3 seconds, which is slow but under the limit. TstampNanoSecs: SecondsToNanoSeconds(int64(ii) * 3), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 3245b0e0c..858b803de 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -10,8 +10,8 @@ import ( "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" - "github.com/decred/dcrd/container/lru" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" ) // connection_manager.go contains most of the logic for creating and managing @@ -53,7 +53,7 @@ type ConnectionManager struct { // TODO: seems like we don't use this. // Keep track of the nonces we've sent in our version messages so // we can prevent connections to ourselves. - sentNonces lru.Set[any] + sentNonces *lru.Cache[uint64, struct{}] // This section defines the data structures for storing all the // peers we're aware of. @@ -126,13 +126,14 @@ func NewConnectionManager( ValidateHyperSyncFlags(_hyperSync, _syncType) + sentNoncesCache, _ := lru.New[uint64, struct{}](1000) return &ConnectionManager{ srv: _srv, params: _params, listeners: _listeners, // We keep track of the last N nonces we've sent in order to detect // self connections. - sentNonces: *lru.NewSet[any](1000), + sentNonces: sentNoncesCache, //newestBlock: _newestBlock, // Initialize the peer data structures. diff --git a/lib/constants.go b/lib/constants.go index fe23b8674..56348de8c 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1956,5 +1956,7 @@ const DefaultTestnetCheckpointProvider = "https://test.deso.org" const RoutePathGetCommittedTipBlockInfo = "/api/v0/get-committed-tip-block-info" -// Constants that was removed from newer version of Btcec +const BlockIndexMigrationFileName = "block_index_migration.txt" + +// BtcecPubKeyBytesLenUncompressed is a constant that was removed from newer version of Btcec const BtcecPubKeyBytesLenUncompressed = 65 diff --git a/lib/db_adapter.go b/lib/db_adapter.go index 8cff5d757..4eea4d0da 100644 --- a/lib/db_adapter.go +++ b/lib/db_adapter.go @@ -4,7 +4,7 @@ import ( "bytes" "sort" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/db_utils.go b/lib/db_utils.go index 68f63ce61..63f78fcbf 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -24,7 +24,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/davecgh/go-spew/spew" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -604,7 +604,11 @@ type DBPrefixes struct { // When reading and writing data to this prefixes, please acquire the snapshotDbMutex in the snapshot. PrefixHypersyncSnapshotDBPrefix []byte `prefix_id:"[97]"` - // NEXT_TAG: 98 + // PrefixHashToHeight is used to store the height of a block given its hash. + // This helps us map a block hash to its height so we can look up the full info + // in PrefixHeightHashToNodeInfo. + PrefixHashToHeight []byte `prefix_id:"[98]"` + // NEXT_TAG: 99 } // DecodeStateKey decodes a state key into a DeSoEncoder type. This is useful for encoders which don't have a stored @@ -1138,7 +1142,7 @@ func DBSetWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, value []byte, eve return errors.Wrapf(err, "DBSetWithTxn: Problem preparing ancestral record") } // Now save the newest record to cache. - snap.DatabaseCache.Put(keyString, value) + snap.DatabaseCache.Add(keyString, value) if !snap.disableChecksum { // We have to remove the previous value from the state checksum. @@ -1237,7 +1241,7 @@ func DBDeleteWithTxn(txn *badger.Txn, snap *Snapshot, key []byte, eventManager * return errors.Wrapf(err, "DBDeleteWithTxn: Problem preparing ancestral record") } // Now delete the past record from the cache. - snap.DatabaseCache.Delete(keyString) + snap.DatabaseCache.Remove(keyString) // We have to remove the previous value from the state checksum. // Because checksum is commutative, we can safely remove the past value here. if !snap.disableChecksum { @@ -2806,7 +2810,7 @@ func DBGetAccessGroupExistenceByAccessGroupIdWithTxn(txn *badger.Txn, snap *Snap // Lookup the snapshot cache and check if we've already stored a value there. if isState { - if exists := snap.DatabaseCache.Exists(keyString); exists { + if exists := snap.DatabaseCache.Contains(keyString); exists { return true, nil } } @@ -5188,6 +5192,13 @@ func _heightHashToNodeIndexPrefix(bitcoinNodes bool) []byte { return prefix } +func _heightHashToNodePrefixByHeight(height uint32, bitcoinNodes bool) []byte { + prefix := _heightHashToNodeIndexPrefix(bitcoinNodes) + heightBytes := make([]byte, 4) + binary.BigEndian.PutUint32(heightBytes[:], height) + return append(prefix, heightBytes[:]...) +} + func _heightHashToNodeIndexKey(height uint32, hash *BlockHash, bitcoinNodes bool) []byte { prefix := _heightHashToNodeIndexPrefix(bitcoinNodes) @@ -5199,6 +5210,12 @@ func _heightHashToNodeIndexKey(height uint32, hash *BlockHash, bitcoinNodes bool return key } +func _hashToHeightIndexKey(hash *BlockHash) []byte { + key := append([]byte{}, Prefixes.PrefixHashToHeight...) + key = append(key, hash[:]...) + return key +} + func GetHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, height uint32, hash *BlockHash, bitcoinNodes bool) *BlockNode { @@ -5238,9 +5255,44 @@ func PutHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, if err := DBSetWithTxn(txn, snap, key, serializedNode, eventManager); err != nil { return err } + + hashToHeightKey := _hashToHeightIndexKey(node.Hash) + if err = DBSetWithTxn(txn, snap, hashToHeightKey, UintToBuf(uint64(node.Height)), eventManager); err != nil { + return err + } + return nil } +func PutHashToHeightBatch(handle *badger.DB, snap *Snapshot, hashToHeight map[BlockHash]uint32, eventManager *EventManager) error { + return handle.Update(func(txn *badger.Txn) error { + for hash, height := range hashToHeight { + key := _hashToHeightIndexKey(&hash) + if err := DBSetWithTxn(txn, snap, key, UintToBuf(uint64(height)), eventManager); err != nil { + return errors.Wrap(err, "PutHashToHeightBatch: Problem setting hash to height") + } + } + return nil + }) +} + +func GetHeightForHash(db *badger.DB, snap *Snapshot, hash *BlockHash) (uint64, error) { + var height uint64 + err := db.View(func(txn *badger.Txn) error { + key := _hashToHeightIndexKey(hash) + heightBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + return err + } + height, _ = Uvarint(heightBytes) + return nil + }) + if err != nil { + return 0, err + } + return height, nil +} + func PutHeightHashToNodeInfoBatch(handle *badger.DB, snap *Snapshot, nodes []*BlockNode, bitcoinNodes bool, eventManager *EventManager) error { @@ -5308,7 +5360,7 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, diffTarget, BytesToBigint(ExpectedWorkForBlockHash(diffTarget)[:]), // CumWork genesisBlock.Header, // Header - StatusHeaderValidated|StatusBlockProcessed|StatusBlockStored|StatusBlockValidated, // Status + StatusHeaderValidated|StatusBlockProcessed|StatusBlockStored|StatusBlockValidated|StatusBlockCommitted, // Status ) // Set the fields in the db to reflect the current state of our chain. @@ -5544,9 +5596,170 @@ func GetBlockIndex(handle *badger.DB, bitcoinNodes bool, params *DeSoParams) ( return blockIndex, nil } +func (bi *BlockIndex) LoadBlockIndexFromHeight(height uint32, params *DeSoParams) error { + prefix := _heightHashToNodePrefixByHeight(height, false) + + return bi.db.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + nodeIterator := txn.NewIterator(opts) + defer nodeIterator.Close() + for nodeIterator.Seek(prefix); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() { + var blockNode *BlockNode + + // Don't bother checking the key. We assume that the key lines up + // with what we've stored in the value in terms of (height, block hash). + item := nodeIterator.Item() + err := item.Value(func(blockNodeBytes []byte) error { + // Deserialize the block node. + var err error + // TODO: There is room for optimization here by pre-allocating a + // contiguous list of block nodes and then populating that list + // rather than having each blockNode be a stand-alone allocation. + blockNode, err = DeserializeBlockNode(blockNodeBytes) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + // If we got here it means we read a blockNode successfully. Store it + // into our node index. + bi.addNewBlockNodeToBlockIndex(blockNode) + + // Find the parent of this block, which should already have been read + // in and connect it. Skip the genesis block, which has height 0. Also + // skip the block if its PrevBlockHash is empty, which will be true for + // the BitcoinStartBlockNode. + // + // TODO: There is room for optimization here by keeping a reference to + // the last node we've iterated over and checking if that node is the + // parent. Doing this would avoid an expensive hashmap check to get + // the parent by its block hash. + if blockNode.Height == 0 || (*blockNode.Header.PrevBlockHash == BlockHash{}) { + continue + } + if parent, ok := bi.GetBlockNodeByHashAndHeight(blockNode.Header.PrevBlockHash, uint64(blockNode.Height)); ok { + // We found the parent node so connect it. + blockNode.Parent = parent + } else { + // If we're syncing a DeSo node and we hit a PoS block, we expect there to + // be orphan blocks in the block index. In this case, we don't throw an error. + if params.IsPoSBlockHeight(uint64(blockNode.Height)) { + continue + } + // In this case we didn't find the parent so error. There shouldn't + // be any unconnectedTxns in our block index. + return fmt.Errorf("GetBlockIndex: Could not find parent for blockNode: %+v", blockNode) + } + } + return nil + }) +} + +func RunBlockIndexMigration(handle *badger.DB, snapshot *Snapshot, eventManager *EventManager, params *DeSoParams) error { + return handle.Update(func(txn *badger.Txn) error { + prefix := _heightHashToNodeIndexPrefix(false) + opts := badger.DefaultIteratorOptions + opts.Prefix = prefix + // We don't need values for this migration. + opts.PrefetchValues = false + nodeIterator := txn.NewIterator(opts) + defer nodeIterator.Close() + hashToHeightMap := make(map[BlockHash]uint32) + // Just in case we need it, get the height of the best hash. + bestHash := DbGetBestHash(handle, snapshot, ChainTypeDeSoBlock) + var bestHashHeight uint32 + for nodeIterator.Seek(prefix); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() { + item := nodeIterator.Item().Key() + + // Parse the key to get the height and hash. + height := binary.BigEndian.Uint32(item[1:5]) + hash := BlockHash{} + copy(hash[:], item[5:]) + hashToHeightMap[hash] = height + if bestHash != nil && bestHash.IsEqual(&hash) { + bestHashHeight = height + } + if len(hashToHeightMap) < 10000 { + continue + } + innerErr := PutHashToHeightBatch(handle, snapshot, hashToHeightMap, eventManager) + if innerErr != nil { + return errors.Wrap(innerErr, "RunBlockIndexMigration: Problem putting hash to height") + } + hashToHeightMap = make(map[BlockHash]uint32) + } + if len(hashToHeightMap) > 0 { + innerErr := PutHashToHeightBatch(handle, snapshot, hashToHeightMap, eventManager) + if innerErr != nil { + return errors.Wrap(innerErr, "RunBlockIndexMigration: Problem putting hash to height") + } + } + // If we don't have a best hash, then we certainly haven't hit the first pos block height. + if bestHash == nil { + return nil + } + // TODO: get best chain up to PoS Cutover height and set all blocks in that chain to committed. + firstPoSBlockHeight := params.GetFirstPoSBlockHeight() + // Look up blocks at cutover height. + prefixKey := _heightHashToNodePrefixByHeight(uint32(firstPoSBlockHeight), false) + _, valsFound, err := _enumerateKeysForPrefixWithTxn(txn, prefixKey, false) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem enumerating keys for prefix") + } + if len(valsFound) > 1 { + return fmt.Errorf("RunBlockIndexMigration: More than one block found at PoS cutover height") + } + var blockNode *BlockNode + // In this case, we need to find pull the best hash from the DB and iterate backwards. + if len(valsFound) == 0 { + blockNode = GetHeightHashToNodeInfoWithTxn(txn, snapshot, bestHashHeight, bestHash, false) + if blockNode == nil { + return fmt.Errorf("RunBlockIndexMigration: block with Best hash (%v) and height (%v) not found", bestHash, bestHashHeight) + } + } else { + blockNode, err = DeserializeBlockNode(valsFound[0]) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem deserializing block node for pos cutover") + } + } + var blockNodeBatch []*BlockNode + for blockNode != nil { + if !blockNode.IsCommitted() { + blockNode.Status |= StatusBlockCommitted + } + // TODO: make sure I don't need a copy. + blockNodeBatch = append(blockNodeBatch, blockNode) + if len(blockNodeBatch) < 10000 { + continue + } + err = PutHeightHashToNodeInfoBatch(handle, snapshot, blockNodeBatch, false /*bitcoinNodes*/, eventManager) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem putting block node batch") + } + parentBlockNode := GetHeightHashToNodeInfoWithTxn(txn, snapshot, blockNode.Height, blockNode.Hash, false /*bitcoinNodes*/) + if blockNode.Height > 0 && parentBlockNode == nil { + return errors.New("RunBlockIndexMigration: Parent block node not found") + } + blockNode = parentBlockNode + } + err = PutHeightHashToNodeInfoBatch(handle, snapshot, blockNodeBatch, false /*bitcoinNodes*/, eventManager) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem putting block node batch") + } + return nil + }) +} + +// TODO: refactor to actually get the whole best chain if that's +// what someone wants. It'll take a while, but whatever. func GetBestChain(tipNode *BlockNode) ([]*BlockNode, error) { reversedBestChain := []*BlockNode{} - for tipNode != nil { + maxBestChainInitLength := 3600 * 100 // Cache up to 100 hours of blocks. + for tipNode != nil && len(reversedBestChain) < maxBestChainInitLength { if (tipNode.Status&StatusBlockValidated) == 0 && (tipNode.Status&StatusBitcoinHeaderValidated) == 0 { diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index 82a452df6..6a6a0f208 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec/v2" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index 247092d1e..7e9dceddf 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -18,7 +18,7 @@ import ( "time" "github.com/btcsuite/btcd/btcutil" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/gernest/mention" "github.com/btcsuite/btcd/btcec/v2" diff --git a/lib/miner.go b/lib/miner.go index 67ed8976a..d912f5858 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -208,6 +208,7 @@ func (desoMiner *DeSoMiner) MineAndProcessSingleBlock(threadIndex uint32, mempoo desoMiner.BlockProducer.chain.blockTip().Header) scs := spew.ConfigState{DisableMethods: true, Indent: " ", DisablePointerAddresses: true} glog.V(1).Infof(scs.Sdump(blockToMine)) + // Sanitize the block for the comparison we're about to do. We need to do // this because the comparison function below will think they're different // if one has nil and one has an empty list. Annoying, but this solves the diff --git a/lib/network.go b/lib/network.go index 90eb50f09..4cc49866f 100644 --- a/lib/network.go +++ b/lib/network.go @@ -1155,6 +1155,9 @@ func (msg *MsgDeSoBlockBundle) String() string { type MsgDeSoGetBlocks struct { HashList []*BlockHash + + StartHeight uint64 + NumBlocks uint64 } func (msg *MsgDeSoGetBlocks) GetMsgType() MsgType { diff --git a/lib/network_manager.go b/lib/network_manager.go index 0bfcf101a..a77974800 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -11,11 +11,11 @@ import ( "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" - "github.com/decred/dcrd/container/lru" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" ) @@ -69,7 +69,7 @@ type NetworkManager struct { NonValidatorInboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] // Cache of nonces used during handshake. - usedNonces lru.Set[uint64] + usedNonces *lru.Cache[uint64, struct{}] // The address manager keeps track of peer addresses we're aware of. When // we need to connect to a new outbound peer, it chooses one of the addresses @@ -121,7 +121,7 @@ func NewNetworkManager( minTxFeeRateNanosPerKB uint64, nodeServices ServiceFlag, ) *NetworkManager { - + usedNoncesCache, _ := lru.New[uint64, struct{}](1000) return &NetworkManager{ params: params, srv: srv, @@ -136,7 +136,7 @@ func NewNetworkManager( ValidatorOutboundIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), - usedNonces: *lru.NewSet[uint64](1000), + usedNonces: usedNoncesCache, connectIps: connectIps, persistentIpToRemoteNodeIdsMap: collections.NewConcurrentMap[string, RemoteNodeId](), activeValidatorsMap: collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator](), @@ -261,7 +261,7 @@ func (nm *NetworkManager) _handleVersionMessage(origin *Peer, desoMsg DeSoMessag // If we've seen this nonce before then return an error since this is a connection from ourselves. msgNonce := verMsg.Nonce if nm.usedNonces.Contains(msgNonce) { - nm.usedNonces.Delete(msgNonce) + nm.usedNonces.Remove(msgNonce) glog.Errorf("NetworkManager.handleVersionMessage: Disconnecting RemoteNode with id: (%v) "+ "nonce collision, nonce (%v)", origin.ID, msgNonce) nm.Disconnect(rn, "nonce collision") @@ -277,7 +277,7 @@ func (nm *NetworkManager) _handleVersionMessage(origin *Peer, desoMsg DeSoMessag return } - nm.usedNonces.Put(responseNonce) + nm.usedNonces.Add(responseNonce, struct{}{}) } // _handleVerackMessage is called when a new verack message is received. @@ -1248,7 +1248,7 @@ func (nm *NetworkManager) InitiateHandshake(rn *RemoteNode) { glog.Errorf("NetworkManager.InitiateHandshake: Error initiating handshake: %v", err) nm.Disconnect(rn, fmt.Sprintf("error initiating handshake: %v", err)) } - nm.usedNonces.Put(nonce) + nm.usedNonces.Add(nonce, struct{}{}) } // handleHandshakeComplete is called on a completed handshake with a RemoteNodes. diff --git a/lib/notifier.go b/lib/notifier.go index b6c88c37d..a42ffb3ab 100644 --- a/lib/notifier.go +++ b/lib/notifier.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/gernest/mention" "github.com/go-pg/pg/v10" diff --git a/lib/peer.go b/lib/peer.go index b4d91a3e6..96dfd68ad 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -9,10 +9,9 @@ import ( "sync/atomic" "time" - "github.com/decred/dcrd/container/lru" - "github.com/btcsuite/btcd/wire" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" ) @@ -111,7 +110,7 @@ type Peer struct { // Inventory stuff. // The inventory that we know the peer already has. - knownInventory lru.Set[InvVect] + knownInventory *lru.Cache[InvVect, struct{}] // Whether the peer is ready to receive INV messages. For a peer that // still needs a mempool download, this is false. @@ -292,7 +291,7 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { for _, invVect := range msg.InvList { // No matter what, add the inv to the peer's known inventory. - pp.knownInventory.Put(*invVect) + pp.knownInventory.Add(*invVect, struct{}{}) // If this is a hash we are currently processing, no need to do anything. // This check serves to fill the gap between the time when we've decided @@ -330,7 +329,14 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { } else if invVect.Type == InvTypeBlock { // For blocks, we check that the hash isn't known to us either in our // main header chain or in side chains. - if pp.srv.blockchain.HasHeader(¤tHash) { + // I think this check still works even though we don't explicitly sync headers because + // processing a block implicitly processes its header. + exists, err := pp.srv.blockchain.HasHeader(¤tHash) + if exists { + continue + } + if err != nil { + glog.Errorf("Server._handleInv: Error checking if block exists: %v", err) continue } @@ -339,7 +345,7 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { // If we made it here, it means the inventory was added to one of the // lists so mark it as processed on the Server. - pp.srv.inventoryBeingProcessed.Put(*invVect) + pp.srv.inventoryBeingProcessed.Add(*invVect, struct{}{}) } // If there were any transactions we don't yet have, request them using @@ -362,21 +368,17 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { } // If the peer has sent us any block hashes that are new to us then send - // a GetHeaders message to her to get back in sync with her. The flow + // a GetBlocks message to her to get back in sync with her. The flow // for this is generally: // - Receive an inv message from a peer for a block we don't have. - // - Send them a GetHeaders message with our most up-to-date block locator. - // - Receive back from them all the headers they're aware of that can be + // - Send them a GetBlocks message with our most up-to-date block as our StartHeight. + // - Receive back from them all the blocks they're aware of that can be // accepted into our chain. - // - We will then request from them all of the block data for the new headers - // we have if they affect our main chain. - // - When the blocks come in, we process them by adding them to the chain - // one-by-one. + // - We process them by adding them to the chain one-by-one. if len(blockHashList) > 0 { - locator := pp.srv.blockchain.LatestHeaderLocator() - pp.AddDeSoMessage(&MsgDeSoGetHeaders{ - StopHash: &BlockHash{}, - BlockLocator: locator, + pp.AddDeSoMessage(&MsgDeSoGetBlocks{ + StartHeight: uint64(pp.srv.blockchain.BlockTip().Height + 1), + NumBlocks: 0, }, false /*inbound*/) } } @@ -398,6 +400,26 @@ func (pp *Peer) HandleInv(msg *MsgDeSoInv) { } func (pp *Peer) HandleGetBlocks(msg *MsgDeSoGetBlocks) { + // Note: Genesis block has index zero, and we will never request it from a peer so + // we can safely interpret StartHeight = 0 as "unset". + if msg.StartHeight != 0 { + numBlocks := msg.NumBlocks + if numBlocks == 0 || numBlocks > MaxBlocksInFlightPoS { + numBlocks = MaxBlocksInFlightPoS + } + + allBlocks := MsgDeSoBlockBundle{} + + // FIXME: Look up all the blocks from StartHeight to StartHeight+numBlocks and send + // them back to the peer in a MsgDeSoBlockBundle + + pp.AddDeSoMessage(&allBlocks, false) + + return + } + // If we get here, then we have StartHeight = 0, meaning we fall back to the + // HashList in the message. + // Nothing to do if the request is empty. if len(msg.HashList) == 0 { glog.V(1).Infof("Server._handleGetBlocks: Received empty GetBlocks "+ @@ -639,7 +661,7 @@ func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAdd _cmgr *ConnectionManager, _srv *Server, _syncType NodeSyncType, peerDisconnectedChan chan *Peer) *Peer { - + knownInventory, _ := lru.New[InvVect, struct{}](maxKnownInventory) pp := Peer{ ID: _id, cmgr: _cmgr, @@ -652,7 +674,7 @@ func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAdd outputQueueChan: make(chan DeSoMessage), peerDisconnectedChan: peerDisconnectedChan, quit: make(chan interface{}), - knownInventory: *lru.NewSet[InvVect](maxKnownInventory), + knownInventory: knownInventory, blocksToSend: make(map[BlockHash]bool), stallTimeoutSeconds: _stallTimeoutSeconds, minTxFeeRateNanosPerKB: _minFeeRateNanosPerKB, @@ -978,7 +1000,7 @@ out: // Add the new inventory to the peer's knownInventory. for _, invVect := range invMsg.InvList { - pp.knownInventory.Put(*invVect) + pp.knownInventory.Add(*invVect, struct{}{}) } } diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 0ddae6d25..4b610f017 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -9,7 +9,7 @@ import ( "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -38,7 +38,7 @@ import ( // StatusHeaderValidated or StatusHeaderValidateFailed. // 5. Exit early if the's view is less than the current header chain's tip. // 6. Reorg the best header chain if the header's view is higher than the current tip. -func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures bool) ( +func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) ( _isMainChain bool, _isOrphan bool, _err error, ) { if !bc.params.IsPoSBlockHeight(header.Height) { @@ -48,14 +48,17 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures b ) } - headerHash, err := header.Hash() - if err != nil { - return false, false, errors.Wrapf(err, "processHeaderPoS: Problem hashing header") - } - // If the incoming header is already part of the best header chain, then we can exit early. // The header is not part of a fork, and is already an ancestor of the current header chain tip. - if _, isInBestHeaderChain := bc.bestHeaderChainMap[*headerHash]; isInBestHeaderChain { + // Here we explicitly check the bestHeaderChain.ChainMap to make sure the in-memory struct is properly + // updated. This is necessary because the block index may have been updated with the header but the + // bestHeaderChain.ChainMap may not have been updated yet. + // TODO: make sure this is ok or do we need to explicitly check the block index's cache? + _, isInBestHeaderChain, err := bc.GetBlockFromBestChainByHash(headerHash, true) + if err != nil { + return false, false, errors.Wrapf(err, "processHeaderPoS: Problem getting block from best chain by hash: ") + } + if isInBestHeaderChain { return true, false, nil } @@ -72,9 +75,12 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures b return false, false, errors.Wrapf(err, "processHeaderPoS: Problem validating and indexing header: ") } - // Now that we know we have a valid header, we check the block index for it any orphan children for it - // and heal the parent pointers for all of them. - bc.healPointersForOrphanChildren(blockNode) + // Don't worry about healing orphan children when we're syncing. + //if !bc.isSyncing() { + // // Now that we know we have a valid header, we check the block index for it any orphan children for it + // // and heal the parent pointers for all of them. + // bc.healPointersForOrphanChildren(blockNode) + //} // Exit early if the header is an orphan. if isOrphan { @@ -88,15 +94,17 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures b return false, false, nil } + bc.blockIndex.setHeaderTip(blockNode) + // The header is not an orphan and has a higher view than the current tip. We reorg the header chain // and apply the incoming header as the new tip. - _, blocksToDetach, blocksToAttach := GetReorgBlocks(currentTip, blockNode) - bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( - bc.bestHeaderChain, - bc.bestHeaderChainMap, - blocksToDetach, - blocksToAttach, - ) + //_, blocksToDetach, blocksToAttach := bc.GetReorgBlocks(currentTip, blockNode) + //bc.bestHeaderChain.Chain, bc.bestHeaderChain.ChainMap = updateBestChainInMemory( + // bc.bestHeaderChain.Chain, + // bc.bestHeaderChain.ChainMap, + // blocksToDetach, + // blocksToAttach, + //) // Success. The header is at the tip of the best header chain. return true, false, nil @@ -108,37 +116,37 @@ func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures b // later on, we not only need to store the parent in the block index but also need to update the // pointer from the orphan block's BlockNode to the parent. We do that dynamically here as we // process headers. -func (bc *Blockchain) healPointersForOrphanChildren(blockNode *BlockNode) { - // Fetch all potential children of this blockNode from the block index. - blockNodesAtNextHeight, exists := bc.blockIndexByHeight[blockNode.Header.Height+1] - if !exists { - // No children of this blockNode exist in the block index. Exit early. - return - } - - // Iterate through all block nodes at the next block height and update their parent pointers. - for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { - // Check if it's a child of the parent block node. - if !blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) { - continue - } - - // Check if it has its parent pointer set. If it does, then we exit early. - if blockNodeAtNextHeight.Parent != nil { - continue - } - - // If the parent block node is not set, then we set it to the parent block node. - blockNodeAtNextHeight.Parent = blockNode - } -} +//func (bc *Blockchain) healPointersForOrphanChildren(blockNode *BlockNode) { +// // Fetch all potential children of this blockNode from the block index. +// blockNodesAtNextHeight := bc.blockIndex.GetBlockNodesByHeight(blockNode.Header.Height + 1) +// exists := len(blockNodesAtNextHeight) > 0 +// if !exists { +// // No children of this blockNode exist in the block index. Exit early. +// return +// } +// +// // Iterate through all block nodes at the next block height and update their parent pointers. +// for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { +// // Check if it's a child of the parent block node. +// if !blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) { +// continue +// } +// +// // Check if it has its parent pointer set. If it does, then we exit early. +// if blockNodeAtNextHeight.Parent != nil { +// continue +// } +// +// // If the parent block node is not set, then we set it to the parent block node. +// blockNodeAtNextHeight.Parent = blockNode +// } +//} func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) ( _headerBlockNode *BlockNode, _isOrphan bool, _err error, ) { // Look up the header in the block index to check if it has already been validated and indexed. - blockNode, blockNodeExists := bc.blockIndexByHash.Get(*headerHash) - + blockNode, blockNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(headerHash, header.Height) // ------------------------------------ Base Cases ----------------------------------- // // The header is already validated. Exit early. @@ -157,7 +165,8 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas } // The header is an orphan. No need to store it in the block index. Exit early. - parentBlockNode, parentBlockNodeExists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + // TODO: validate that height - 1 > 0 + parentBlockNode, parentBlockNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !parentBlockNodeExists { return nil, true, nil } @@ -191,14 +200,14 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas // is also not valid. if parentBlockNode.IsHeaderValidateFailed() { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Parent header failed validations"), + header, headerHash, errors.New("validateAndIndexHeaderPoS: Parent header failed validations"), ) } // Verify that the header is properly formed. if err := bc.isValidBlockHeaderPoS(header); err != nil { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Header failed validations"), + header, headerHash, errors.New("validateAndIndexHeaderPoS: Header failed validations"), ) } @@ -210,13 +219,13 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas } if !isValidRandomSeedSignature { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Header has invalid random seed signature"), + header, headerHash, errors.New("validateAndIndexHeaderPoS: Header has invalid random seed signature"), ) } } // Store it as HeaderValidated now that it has passed all validations. - blockNode, err = bc.storeValidatedHeaderInBlockIndex(header) + blockNode, err = bc.storeValidatedHeaderInBlockIndex(header, headerHash) if err != nil { return nil, false, errors.Wrapf(err, "validateAndIndexHeaderPoS: Problem adding header to block index: ") } @@ -268,14 +277,15 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } // If we can't hash the block, we can never store in the block index and we should throw it out immediately. - if _, err := block.Hash(); err != nil { + blockHash, err := block.Hash() + if err != nil { return false, false, nil, errors.Wrapf(err, "processBlockPoS: Problem hashing block") } // In hypersync archival mode, we may receive blocks that have already been processed and committed during state // synchronization. However, we may want to store these blocks in the db for archival purposes. We check if the // block we're dealing with is an archival block. If it is, we store it and return early. - if success, err := bc.checkAndStoreArchivalBlock(block); err != nil { + if success, err := bc.checkAndStoreArchivalBlock(block, blockHash); err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem checking and storing archival block") } else if success { return true, false, nil, nil @@ -310,7 +320,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // We expect the utxoView for the parent block to be valid because we check that all ancestor blocks have // been validated. - parentUtxoViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash) + parentUtxoViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash, block.Header.Height-1) if err != nil { // This should never happen. If the parent is validated and extends from the tip, then we should // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. @@ -320,7 +330,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v parentUtxoView := parentUtxoViewAndUtxoOps.UtxoView // First, we perform a validation of the leader and the QC to prevent spam. // If the block fails this check, we throw it away. - passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, blockHash, parentUtxoView, verifySignatures) if err != nil { // If we hit an error, we can't store it since we're not sure if it passed the spam prevention check. return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem validating leader and QC") @@ -331,7 +341,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } // Validate the block and store it in the block index. The block is guaranteed to not be an orphan. - blockNode, err := bc.validateAndIndexBlockPoS(block, parentUtxoView, verifySignatures) + blockNode, err := bc.validateAndIndexBlockPoS(block, blockHash, parentUtxoView, verifySignatures) if err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem validating block: ") @@ -354,7 +364,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // header and applying it to the header chain will result in the two chains being out of // sync. The header chain is less critical and mutations to it are reversible. So we attempt // to mutate it first before attempting to mutate the block chain. - if _, _, err = bc.processHeaderPoS(block.Header, verifySignatures); err != nil { + if _, _, err = bc.processHeaderPoS(block.Header, blockHash, verifySignatures); err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem processing header") } @@ -401,7 +411,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // Now that we've processed this block, we check for any blocks that were previously // stored as orphans, which are children of this block. We can process them now. - blockNodesAtNextHeight := bc.blockIndexByHeight[uint64(blockNode.Height)+1] + blockNodesAtNextHeight := bc.blockIndex.GetBlockNodesByHeight(uint64(blockNode.Height) + 1) for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { if blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) && blockNodeAtNextHeight.IsStored() && @@ -544,32 +554,30 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // As a spam-prevention measure, we just throw away this block and don't store it. return nil } + + blockHash, err := block.Header.Hash() if err != nil { - return errors.Wrap(err, "processOrphanBlockPoS: Problem getting snapshot global params") + return errors.Wrap(err, "processOrphanBlockPoS: Problem hashing block") } + // All blocks should pass the basic integrity validations, which ensure the block // is not malformed. If the block is malformed, we should store it as ValidateFailed. if err = bc.isProperlyFormedBlockPoS(block); err != nil { - if _, innerErr := bc.storeValidateFailedBlockInBlockIndex(block); innerErr != nil { + if _, innerErr := bc.storeValidateFailedBlockInBlockIndex(block, blockHash); innerErr != nil { return errors.Wrapf(innerErr, "processOrphanBlockPoS: Problem adding validate failed block to block index: %v", err) } return nil } // Add to blockIndexByHash with status STORED only as we are not sure if it's valid yet. - _, err = bc.storeBlockInBlockIndex(block) + _, err = bc.storeBlockInBlockIndex(block, blockHash) return errors.Wrap(err, "processBlockPoS: Problem adding block to block index: ") } // checkAndStoreArchivalBlock is a helper function that takes in a block and checks if it's an archival block. // If it is, it stores the block in the db and returns true. If it's not, it returns false, or false and an error. -func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success bool, _err error) { - // First, get the block hash and lookup the block index. - blockHash, err := block.Hash() - if err != nil { - return false, errors.Wrap(err, "checkAndStoreArchivalBlock: Problem hashing block") - } - blockNode, exists := bc.blockIndexByHash.Get(*blockHash) +func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock, blockHash *BlockHash) (_success bool, _err error) { + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, block.Header.Height) // If the blockNode doesn't exist, or the block is not committed, or it's already stored, then we're not dealing // with an archival block. Archival blocks must have an existing blockNode, be committed, and not be stored. if !exists || !blockNode.IsCommitted() || blockNode.IsStored() { @@ -578,8 +586,7 @@ func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success // If we get to this point, we're dealing with an archival block, so we'll attempt to store it. // This means, this block node is already marked as COMMITTED and VALIDATED, and we just need to store it. - _, err = bc.storeBlockInBlockIndex(block) - if err != nil { + if _, err := bc.storeBlockInBlockIndex(block, blockHash); err != nil { return false, errors.Wrap(err, "checkAndStoreArchivalBlock: Problem storing block in block index") } return true, nil @@ -587,9 +594,9 @@ func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success // storeValidateFailedBlockWithWrappedError is a helper function that takes in a block and an error and // stores the block in the block index with status VALIDATE_FAILED. It returns the resulting BlockNode. -func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlock, outerErr error) ( +func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlock, hash *BlockHash, outerErr error) ( *BlockNode, error) { - blockNode, innerErr := bc.storeValidateFailedBlockInBlockIndex(block) + blockNode, innerErr := bc.storeValidateFailedBlockInBlockIndex(block, hash) if innerErr != nil { return nil, errors.Wrapf(innerErr, "storeValidateFailedBlockWithWrappedError: Problem adding validate failed block to block index: %v", @@ -600,6 +607,7 @@ func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlo func (bc *Blockchain) validateLeaderAndQC( block *MsgDeSoBlock, + blockHash *BlockHash, parentUtxoView *UtxoView, verifySignatures bool, ) (_passedSpamPreventionCheck bool, _err error) { @@ -621,7 +629,7 @@ func (bc *Blockchain) validateLeaderAndQC( "validateLeaderAndQC: Problem getting snapshot epoch number for epoch #%d", currentEpochEntry.EpochNumber) } - isValidPartialSig, err := parentUtxoView.hasValidProposerPartialSignaturePoS(block, snapshotAtEpochNumber) + isValidPartialSig, err := parentUtxoView.hasValidProposerPartialSignaturePoS(block, blockHash, snapshotAtEpochNumber) if err != nil { return false, errors.Wrap(err, "validateLeaderAndQC: Problem validating proposer partial sig") @@ -679,15 +687,16 @@ func (bc *Blockchain) validateLeaderAndQC( // return the new BlockNode. // - Error case: Something goes wrong that doesn't result in the block being marked VALIDATE or VALIDATE_FAILED. In // this case, we will add the block to the block index with status STORED and return the BlockNode. -func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoView *UtxoView, verifySignatures bool) ( - *BlockNode, error) { - blockHash, err := block.Header.Hash() - if err != nil { - return nil, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem hashing block %v", block) - } +func (bc *Blockchain) validateAndIndexBlockPoS( + block *MsgDeSoBlock, + blockHash *BlockHash, + parentUtxoView *UtxoView, + verifySignatures bool, +) (*BlockNode, error) { // Base case - Check if the block is validated or validate failed. If so, we can return early. - blockNode, exists := bc.blockIndexByHash.Get(*blockHash) + // TODO: validate height doesn't overflow uint32 + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, block.Header.Height) if exists && (blockNode.IsValidateFailed() || blockNode.IsValidated()) { return blockNode, nil } @@ -708,7 +717,7 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // Run the validation for the parent and update the block index with the parent's status. We first // check if the parent has a cached status. If so, we use the cached status. Otherwise, we run // the full validation algorithm on it, then index it and use the result. - parentBlockNode, err := bc.validatePreviouslyIndexedBlockPoS(block.Header.PrevBlockHash, verifySignatures) + parentBlockNode, err := bc.validatePreviouslyIndexedBlockPoS(block.Header.PrevBlockHash, block.Header.Height-1, verifySignatures) if err != nil { return blockNode, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem validating previously indexed block: ") } @@ -717,13 +726,13 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // this block as ValidateFailed. If the parent is not ValidateFailed, we ONLY store the block and move on. // We don't want to store it as ValidateFailed because we don't know if it's actually invalid. if parentBlockNode.IsValidateFailed() { - return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("parent block is ValidateFailed")) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, errors.New("parent block is ValidateFailed")) } // If the parent block still has a Stored status, it means that we weren't able to validate it // despite trying. The current block will also be stored as a Stored block. if !parentBlockNode.IsValidated() { - return bc.storeBlockInBlockIndex(block) + return bc.storeBlockInBlockIndex(block, blockHash) } // Validate the block's random seed signature @@ -731,14 +740,14 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(block.Header) if err != nil { var innerErr error - blockNode, innerErr = bc.storeBlockInBlockIndex(block) + blockNode, innerErr = bc.storeBlockInBlockIndex(block, blockHash) if innerErr != nil { return nil, errors.Wrapf(innerErr, "validateAndIndexBlockPoS: Problem adding block to block index: %v", err) } return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem validating random seed signature") } if !isValidRandomSeedSignature { - return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("invalid random seed signature")) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, errors.New("invalid random seed signature")) } } @@ -746,15 +755,15 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi serializedBlock, err := block.ToBytes(false) if err != nil { return bc.storeValidateFailedBlockWithWrappedError( - block, errors.Wrap(err, "validateAndIndexBlockPoS: Problem serializing block")) + block, blockHash, errors.Wrap(err, "validateAndIndexBlockPoS: Problem serializing block")) } if uint64(len(serializedBlock)) > parentUtxoView.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS { - return bc.storeValidateFailedBlockWithWrappedError(block, RuleErrorBlockTooBig) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, RuleErrorBlockTooBig) } // Check if the block is properly formed and passes all basic validations. if err = bc.isValidBlockPoS(block); err != nil { - return bc.storeValidateFailedBlockWithWrappedError(block, err) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, err) } // Connect this block to the parent block's UtxoView. @@ -765,7 +774,7 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // If we fail to connect the block, then it means the block is invalid. We should store it as ValidateFailed. if _, err = parentUtxoView.ConnectBlock(block, txHashes, verifySignatures, nil, block.Header.Height); err != nil { // If it doesn't connect, we want to mark it as ValidateFailed. - return bc.storeValidateFailedBlockWithWrappedError(block, err) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, err) } // If the block is too far in the future, we leave it as STORED and return early. @@ -774,11 +783,11 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem checking block timestamp") } if failsTimestampDriftCheck { - return bc.storeBlockInBlockIndex(block) + return bc.storeBlockInBlockIndex(block, blockHash) } // We can now add this block to the block index since we have performed all basic validations. - blockNode, err = bc.storeValidatedBlockInBlockIndex(block) + blockNode, err = bc.storeValidatedBlockInBlockIndex(block, blockHash) if err != nil { return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem adding block to block index: ") } @@ -789,10 +798,11 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // cached block, and runs the validateAndIndexBlockPoS algorithm on it. It returns the resulting BlockNode. func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( blockHash *BlockHash, + blockHeight uint64, verifySignatures bool, ) (*BlockNode, error) { // Check if the block is already in the block index. If so, we check its current status first. - blockNode, exists := bc.blockIndexByHash.Get(*blockHash) + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeight) if !exists { // We should never really hit this if the block has already been cached in the block index first. // We check here anyway to be safe. @@ -814,7 +824,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( return nil, errors.Wrapf(err, "validatePreviouslyIndexedBlockPoS: Problem fetching block from DB") } // Build utxoView for the block's parent. - parentUtxoViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash) + parentUtxoViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash, block.Header.Height-1) if err != nil { // This should never happen. If the parent is validated and extends from the tip, then we should // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. @@ -823,7 +833,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( parentUtxoView := parentUtxoViewAndUtxoOps.UtxoView // If the block isn't validated or validate failed, we need to run the anti-spam checks on it. - passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, blockHash, parentUtxoView, verifySignatures) if err != nil { // If we hit an error, that means there was an intermittent issue when trying to // validate the QC or the leader. @@ -832,7 +842,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( if !passedSpamPreventionCheck { // If the QC or Leader check failed, we'll never accept this block, but we've already stored it, // so we need to mark it as ValidateFailed. - blockNode, err = bc.storeValidateFailedBlockInBlockIndex(block) + blockNode, err = bc.storeValidateFailedBlockInBlockIndex(block, blockHash) if err != nil { return nil, errors.Wrap(err, "validatePreviouslyIndexedBlockPoS: Problem adding validate failed block to block index") @@ -841,7 +851,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( } // We run the full validation algorithm on the block. - return bc.validateAndIndexBlockPoS(block, parentUtxoView, verifySignatures) + return bc.validateAndIndexBlockPoS(block, blockHash, parentUtxoView, verifySignatures) } // isValidBlockPoS performs all basic block integrity checks. Any error @@ -890,7 +900,7 @@ func (bc *Blockchain) isValidBlockHeaderPoS(header *MsgDeSoHeader) error { // greater than its parent's timestamp. func (bc *Blockchain) isBlockTimestampValidRelativeToParentPoS(header *MsgDeSoHeader) error { // Validate that the timestamp is not less than its parent. - parentBlockNode, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1050,7 +1060,7 @@ func (bc *Blockchain) hasValidBlockHeightPoS(header *MsgDeSoHeader) error { return RuleErrorPoSBlockBeforeCutoverHeight } // Validate that the block height is exactly one greater than its parent. - parentBlockNode, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1065,7 +1075,7 @@ func (bc *Blockchain) hasValidBlockHeightPoS(header *MsgDeSoHeader) error { // hasValidBlockViewPoS validates the view for a given block header func (bc *Blockchain) hasValidBlockViewPoS(header *MsgDeSoHeader) error { // Validate that the view is greater than the latest uncommitted block. - parentBlockNode, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1094,7 +1104,7 @@ func (bc *Blockchain) hasValidBlockViewPoS(header *MsgDeSoHeader) error { func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(header *MsgDeSoHeader) (bool, error) { // Validate that the leader proposed a valid random seed signature. - parentBlock, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlock, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1115,8 +1125,20 @@ func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(header *MsgDeSoHead return isVerified, nil } -func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, snapshotAtEpochNumber uint64) ( - bool, error) { +func (bav *UtxoView) hasValidProposerPartialSignaturePoS( + block *MsgDeSoBlock, + blockHash *BlockHash, + snapshotAtEpochNumber uint64, +) (bool, error) { + // If we aren't provided a hash, we can just compute it on the fly. + // It's more efficient for us not to recompute the hash though, so we only do it if we have to. + if blockHash == nil { + var err error + blockHash, err = block.Hash() + if err != nil { + return false, errors.Wrapf(err, "hasValidProposerPartialSignaturePoS: Problem hashing block") + } + } votingPublicKey := block.Header.ProposerVotingPublicKey proposerPartialSig := block.Header.ProposerVotePartialSignature // If the proposer partial sig is nil, we can't validate it. That's an error. @@ -1140,11 +1162,6 @@ func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, sn if !snapshotBlockProposerValidatorEntry.VotingPublicKey.Eq(votingPublicKey) { return false, nil } - // Get the block's hash - blockHash, err := block.Header.Hash() - if err != nil { - return false, errors.Wrapf(err, "hasValidProposerPartialSignaturePoS: Problem hashing block") - } // Now that we have the snapshot validator entry and validated that the // voting public key from this block's header matches the snapshotted // voting public key, we can validate the partial sig. @@ -1314,16 +1331,18 @@ func (bc *Blockchain) getStoredLineageFromCommittedTip(header *MsgDeSoHeader) ( _missingBlockHashes []*BlockHash, _err error, ) { - highestCommittedBlock, idx := bc.GetCommittedTip() - if idx == -1 || highestCommittedBlock == nil { + highestCommittedBlock, exists := bc.GetCommittedTip() + if !exists || highestCommittedBlock == nil { return nil, nil, errors.New("getStoredLineageFromCommittedTip: No committed blocks found") } currentHash := header.PrevBlockHash.NewBlockHash() + currentHeight := header.Height - 1 ancestors := []*BlockNode{} prevHeight := header.Height prevView := header.GetView() for { - currentBlock, exists := bc.blockIndexByHash.Get(*currentHash) + // TODO: is currentHeight correct here? + currentBlock, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(currentHash, currentHeight) if !exists { return nil, []*BlockHash{currentHash}, RuleErrorMissingAncestorBlock } @@ -1361,20 +1380,26 @@ func (bc *Blockchain) getStoredLineageFromCommittedTip(header *MsgDeSoHeader) ( // getOrCreateBlockNodeFromBlockIndex returns the block node from the block index if it exists. // Otherwise, it creates a new block node and adds it to the blockIndexByHash and blockIndexByHeight. -func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - hash, err := block.Header.Hash() - if err != nil { - return nil, errors.Wrapf(err, "getOrCreateBlockNodeFromBlockIndex: Problem hashing block %v", block) +func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + // If we aren't provided a hash, we can just compute it on the fly. + // It's more efficient for us not to recompute the hash though, so we only do it if we have to. + if hash == nil { + var err error + hash, err = block.Hash() + if err != nil { + return nil, errors.Wrapf(err, "storeBlockInBlockIndex: Problem hashing block") + } } - blockNode, _ := bc.blockIndexByHash.Get(*hash) - prevBlockNode, _ := bc.blockIndexByHash.Get(*block.Header.PrevBlockHash) + blockNode, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(hash, block.Header.Height) if blockNode != nil { // If the block node already exists, we should set its parent if it doesn't have one already. if blockNode.Parent == nil { + prevBlockNode, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(block.Header.PrevBlockHash, block.Header.Height-1) blockNode.Parent = prevBlockNode } return blockNode, nil } + prevBlockNode, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(block.Header.PrevBlockHash, block.Header.Height-1) newBlockNode := NewBlockNode(prevBlockNode, hash, uint32(block.Header.Height), nil, nil, block.Header, StatusNone) bc.addNewBlockNodeToBlockIndex(newBlockNode) return newBlockNode, nil @@ -1382,8 +1407,8 @@ func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock) (* // storeBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its status to // StatusBlockStored. It also writes the block to the block index in badger -func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) +func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidatedHeaderInBlockIndex: Problem getting or creating block node") } @@ -1401,8 +1426,8 @@ func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (* return blockNode, nil } -func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(header *MsgDeSoHeader, wrapperError error) error { - if _, innerErr := bc.storeValidateFailedHeaderInBlockIndex(header); innerErr != nil { +func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(header *MsgDeSoHeader, hash *BlockHash, wrapperError error) error { + if _, innerErr := bc.storeValidateFailedHeaderInBlockIndex(header, hash); innerErr != nil { return errors.Wrapf(innerErr, "%v", wrapperError) } return wrapperError @@ -1410,8 +1435,8 @@ func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(head // storeValidateFailedHeaderInBlockIndex stores the header in the block index only and sets its status to // StatusHeaderValidateFailed. It does not write the header to the DB. -func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) +func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeader, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidateFailedHeaderInBlockIndex: Problem getting or creating block node") } @@ -1432,8 +1457,8 @@ func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeade // storeBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its status to // StatusBlockStored. It also writes the block to the block index in badger // by calling upsertBlockAndBlockNodeToDB. -func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) +func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block, hash) if err != nil { return nil, errors.Wrapf(err, "storeBlockInBlockIndex: Problem getting or creating block node") } @@ -1453,8 +1478,8 @@ func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, e // status to StatusBlockValidated. If it does not have the status StatusBlockStored already, we add that as we // will store the block in the DB after updating its status. It also writes the block to the block index in // badger by calling upsertBlockAndBlockNodeToDB. -func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) +func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidatedBlockInBlockIndex: Problem getting or creating block node") } @@ -1483,8 +1508,8 @@ func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*Blo // status to StatusBlockValidateFailed. If it does not have the status StatusBlockStored already, we add that as we // will store the block in the DB after updating its status. It also writes the block to the block index in badger // by calling upsertBlockAndBlockNodeToDB. -func (bc *Blockchain) storeValidateFailedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) +func (bc *Blockchain) storeValidateFailedBlockInBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidateFailedBlockInBlockIndex: Problem getting or creating block node") } @@ -1634,8 +1659,7 @@ func (bc *Blockchain) shouldReorg(blockNode *BlockNode, currentView uint64) bool // addTipBlockToBestChain adds the block as the new tip of the best chain. func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { - bc.bestChain = append(bc.bestChain, blockNode) - bc.bestChainMap[*blockNode.Hash] = blockNode + bc.blockIndex.setTip(blockNode) } // removeTipBlockFromBestChain removes the current tip from the best chain. It @@ -1644,9 +1668,9 @@ func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { // the bestChain slice and bestChainMap map. func (bc *Blockchain) removeTipBlockFromBestChain() *BlockNode { // Remove the last block from the best chain. - lastBlock := bc.bestChain[len(bc.bestChain)-1] - delete(bc.bestChainMap, *lastBlock.Hash) - bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] + lastBlock := bc.blockIndex.GetTip() + // Uhhh what happens if we don't have the parent set up!? + bc.blockIndex.setTip(lastBlock.GetParent(bc.blockIndex)) return lastBlock } @@ -1657,26 +1681,27 @@ func (bc *Blockchain) runCommitRuleOnBestChain(verifySignatures bool) error { currentBlock := bc.BlockTip() // If we can commit the grandparent, commit it. // Otherwise, we can't commit it and return nil. - blockToCommit, canCommit := bc.canCommitGrandparent(currentBlock) + blockNodeToCommit, canCommit := bc.canCommitGrandparent(currentBlock) if !canCommit { return nil } // Find all uncommitted ancestors of block to commit - _, idx := bc.GetCommittedTip() - if idx == -1 { + committedTip, exists := bc.GetCommittedTip() + if !exists { // This is an edge case we'll never hit in practice since all the PoW blocks // are committed. return errors.New("runCommitRuleOnBestChain: No committed blocks found") } uncommittedAncestors := []*BlockNode{} - for ii := idx + 1; ii < len(bc.bestChain); ii++ { - uncommittedAncestors = append(uncommittedAncestors, bc.bestChain[ii]) - if bc.bestChain[ii].Hash.IsEqual(blockToCommit) { - break - } + currentNode := &BlockNode{} + *currentNode = *blockNodeToCommit + for currentNode.Height > committedTip.Height { + uncommittedAncestors = append(uncommittedAncestors, currentNode) + currentNode = currentNode.GetParent(bc.blockIndex) } + uncommittedAncestors = collections.Reverse(uncommittedAncestors) for ii := 0; ii < len(uncommittedAncestors); ii++ { - if err := bc.commitBlockPoS(uncommittedAncestors[ii].Hash, verifySignatures); err != nil { + if err := bc.commitBlockPoS(uncommittedAncestors[ii].Hash, uint64(uncommittedAncestors[ii].Height), verifySignatures); err != nil { return errors.Wrapf(err, "runCommitRuleOnBestChain: Problem committing block %v", uncommittedAncestors[ii].Hash.String()) } @@ -1689,18 +1714,28 @@ func (bc *Blockchain) runCommitRuleOnBestChain(verifySignatures bool) error { // between the grandparent and parent of the new block, meaning the grandparent and parent // are proposed in consecutive views, and the "parent" is an ancestor of the incoming block // (not necessarily consecutive views). Additionally, the grandparent must not already be committed. -func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) (_grandparentBlockHash *BlockHash, _canCommit bool, +func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) ( + _grandparentBlockNode *BlockNode, + _canCommit bool, ) { // TODO: Is it sufficient that the current block's header points to the parent // or does it need to have something to do with the QC? - parent := bc.bestChainMap[*currentBlock.Header.PrevBlockHash] - grandParent := bc.bestChainMap[*parent.Header.PrevBlockHash] + parent, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(currentBlock.Header.PrevBlockHash, uint64(currentBlock.Height-1)) + if !exists { + glog.Errorf("canCommitGrandparent: Parent block %v not found in best chain map", currentBlock.Header.PrevBlockHash.String()) + return nil, false + } + grandParent, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(parent.Header.PrevBlockHash, uint64(parent.Height-1)) + if !exists { + glog.Errorf("canCommitGrandparent: Grandparent block %v not found in best chain map", parent.Header.PrevBlockHash.String()) + return nil, false + } if grandParent.IsCommitted() { return nil, false } if grandParent.Header.ProposedInView+1 == parent.Header.ProposedInView { // Then we can run the commit rule up to the grandparent! - return grandParent.Hash, true + return grandParent, true } return nil, false } @@ -1708,9 +1743,9 @@ func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) (_grandparen // commitBlockPoS commits the block with the given hash. Specifically, this updates the // BlockStatus to include StatusBlockCommitted and flushes the view after connecting the block // to the DB and updates relevant badger indexes with info about the block. -func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool) error { +func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, blockHeight uint64, verifySignatures bool) error { // block must be in the best chain. we grab the block node from there. - blockNode, exists := bc.bestChainMap[*blockHash] + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeight) if !exists { return errors.Errorf("commitBlockPoS: Block %v not found in best chain map", blockHash.String()) } @@ -1720,7 +1755,7 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool return errors.Errorf("commitBlockPoS: Block %v is already committed", blockHash.String()) } // Connect a view up to block we are committing. - utxoViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*blockHash) + utxoViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*blockHash, uint64(blockNode.Height)) if err != nil { return errors.Wrapf(err, "commitBlockPoS: Problem initializing UtxoView: ") } @@ -1839,7 +1874,10 @@ func (bc *Blockchain) GetUncommittedBlocks(tipHash *BlockHash) ([]*BlockNode, er } bc.ChainLock.RLock() defer bc.ChainLock.RUnlock() - tipBlock, exists := bc.bestChainMap[*tipHash] + tipBlock, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(tipHash) + if err != nil { + return nil, errors.Wrapf(err, "GetUncommittedBlocks: Problem getting block %v", tipHash.String()) + } if !exists { return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in best chain map", tipHash.String()) } @@ -1855,9 +1893,9 @@ func (bc *Blockchain) GetUncommittedBlocks(tipHash *BlockHash) ([]*BlockNode, er if currentParentHash == nil { return nil, errors.Errorf("GetUncommittedBlocks: Block %v has nil PrevBlockHash", currentBlock.Hash) } - currentBlock, _ = bc.blockIndexByHash.Get(*currentParentHash) + currentBlock, _ = bc.blockIndex.GetBlockNodeByHashAndHeight(currentParentHash, currentBlock.Header.Height-1) if currentBlock == nil { - return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in block index", currentBlock.Hash) + return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in block index", currentParentHash) } } return collections.Reverse(uncommittedBlockNodes), nil @@ -1892,18 +1930,17 @@ func (viewAndUtxoOps *BlockViewAndUtxoOps) Copy() *BlockViewAndUtxoOps { // GetUncommittedTipView builds a UtxoView to the uncommitted tip. func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - blockViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*bc.BlockTip().Hash) + blockTip := bc.BlockTip() + blockViewAndUtxoOps, err := bc.getUtxoViewAndUtxoOpsAtBlockHash(*blockTip.Hash, uint64(blockTip.Height)) if err != nil { return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem getting UtxoView at block hash") } return blockViewAndUtxoOps.UtxoView, nil } -func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockViewAndUtxoOps, error, bool) { - if viewAndUtxoOpsAtHash, exists := bc.blockViewCache.Get(blockHash); exists { - return viewAndUtxoOpsAtHash, nil, true - } - return nil, nil, false +func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockViewAndUtxoOps, bool) { + viewAndUtxoOpsAtHash, exists := bc.blockViewCache.Get(blockHash) + return viewAndUtxoOpsAtHash, exists } // getUtxoViewAndUtxoOpsAtBlockHash builds a UtxoView to the block provided and returns a BlockViewAndUtxoOps @@ -1912,18 +1949,19 @@ func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockV // all uncommitted ancestors of this block. Then it checks the block view cache to see if we have already // computed this view. If not, connecting the uncommitted ancestor blocks and saving to the cache. The // returned UtxoOps and FullBlock should NOT be modified. -func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( +func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash, blockHeight uint64) ( *BlockViewAndUtxoOps, error) { // Always fetch the lineage from the committed tip to the block provided first to // ensure that a valid UtxoView is returned. uncommittedAncestors := []*BlockNode{} - currentBlock, _ := bc.blockIndexByHash.Get(blockHash) + currentBlock, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(&blockHash, blockHeight) if currentBlock == nil { return nil, errors.Errorf("getUtxoViewAndUtxoOpsAtBlockHash: Block %v not found in block index", blockHash) } - highestCommittedBlock, _ := bc.GetCommittedTip() - if highestCommittedBlock == nil { + highestCommittedBlock, exists := bc.GetCommittedTip() + glog.Infof("Highest committed block: %v", highestCommittedBlock) + if !exists || highestCommittedBlock == nil { return nil, errors.Errorf("getUtxoViewAndUtxoOpsAtBlockHash: No committed blocks found") } // If the provided block is committed, we need to make sure it's the committed tip. @@ -1940,7 +1978,7 @@ func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( if currentParentHash == nil { return nil, errors.Errorf("getUtxoViewAndUtxoOpsAtBlockHash: Block %v has nil PrevBlockHash", currentBlock.Hash) } - currentBlock, _ = bc.blockIndexByHash.Get(*currentParentHash) + currentBlock, _ = bc.blockIndex.GetBlockNodeByHashAndHeight(currentParentHash, currentBlock.Header.Height-1) if currentBlock == nil { return nil, errors.Errorf("getUtxoViewAndUtxoOpsAtBlockHash: Block %v not found in block index", currentParentHash) } @@ -1948,15 +1986,8 @@ func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( return nil, errors.Errorf( "getUtxoViewAndUtxoOpsAtBlockHash: extends from a committed block that isn't the committed tip") } - if currentBlock.IsCommitted() && !currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { - return nil, errors.Errorf( - "getUtxoViewAndUtxoOpsAtBlockHash: extends from a committed block that isn't the committed tip") - } - } - viewAndUtxoOpsAtHash, err, exists := bc.getCachedBlockViewAndUtxoOps(blockHash) - if err != nil { - return nil, errors.Wrapf(err, "getUtxoViewAndUtxoOpsAtBlockHash: Problem getting cached BlockViewAndUtxoOps") } + viewAndUtxoOpsAtHash, exists := bc.getCachedBlockViewAndUtxoOps(blockHash) if exists { viewAndUtxoOpsCopy := viewAndUtxoOpsAtHash.Copy() return viewAndUtxoOpsCopy, nil @@ -1970,6 +2001,8 @@ func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( var utxoOps [][]*UtxoOperation var fullBlock *MsgDeSoBlock for ii := len(uncommittedAncestors) - 1; ii >= 0; ii-- { + glog.Infof("Connecting block %v", uncommittedAncestors[ii]) + var err error // We need to get these blocks from badger fullBlock, err = GetBlock(uncommittedAncestors[ii].Hash, bc.db, bc.snapshot) if err != nil { @@ -1990,7 +2023,7 @@ func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( utxoView.TipHash = &blockHash // Save a copy of the UtxoView to the cache. copiedView := utxoView.CopyUtxoView() - bc.blockViewCache.Put(blockHash, &BlockViewAndUtxoOps{ + bc.blockViewCache.Add(blockHash, &BlockViewAndUtxoOps{ UtxoView: copiedView, UtxoOps: utxoOps, Block: fullBlock, @@ -2003,13 +2036,16 @@ func (bc *Blockchain) getUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( } // GetCommittedTip returns the highest committed block and its index in the best chain. -func (bc *Blockchain) GetCommittedTip() (*BlockNode, int) { - for ii := len(bc.bestChain) - 1; ii >= 0; ii-- { - if bc.bestChain[ii].IsCommitted() { - return bc.bestChain[ii], ii +func (bc *Blockchain) GetCommittedTip() (*BlockNode, bool) { + // iterate backwards from the tip node + currentNode := bc.blockIndex.GetTip() + for currentNode != nil { + if currentNode.IsCommitted() { + return currentNode, true } + currentNode = currentNode.GetParent(bc.blockIndex) } - return nil, -1 + return nil, false } // GetSafeBlocks returns all headers of blocks from which the chain can safely extend. @@ -2034,9 +2070,9 @@ func (bc *Blockchain) GetSafeBlocks() ([]*MsgDeSoHeader, error) { func (bc *Blockchain) getSafeBlockNodes() ([]*BlockNode, error) { // First get committed tip. - committedTip, idx := bc.GetCommittedTip() - if idx == -1 || committedTip == nil { - return nil, errors.New("getSafeBlockNodes: No committed blocks found") + committedTip, exists := bc.GetCommittedTip() + if !exists || committedTip == nil { + return []*BlockNode{}, nil } // Now get all blocks from the committed tip to the best chain tip. safeBlocks := []*BlockNode{committedTip} @@ -2103,8 +2139,8 @@ func (bc *Blockchain) GetProofOfStakeGenesisQuorumCertificate() (*QuorumCertific func (bc *Blockchain) GetFinalCommittedPoWBlock() (*BlockNode, error) { // Fetch the block node for the cutover block - blockNodes, blockNodesExist := bc.blockIndexByHeight[bc.params.GetFinalPoWBlockHeight()] - if !blockNodesExist { + blockNodes := bc.blockIndex.GetBlockNodesByHeight(bc.params.GetFinalPoWBlockHeight()) + if len(blockNodes) == 0 { return nil, errors.Errorf("Error fetching cutover block nodes before height %d", bc.params.GetFinalPoWBlockHeight()) } diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 786e37e53..5c32df53a 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2,6 +2,7 @@ package lib import ( "bytes" + "encoding/hex" "fmt" "math" "math/rand" @@ -251,8 +252,8 @@ func TestHasValidBlockHeight(t *testing.T) { ValidatorsVoteQC: nil, ValidatorsTimeoutAggregateQC: nil, }, StatusBlockStored|StatusBlockValidated) - bc.bestChain = []*BlockNode{genesisBlock} - bc.blockIndexByHash.Set(*genesisBlock.Hash, genesisBlock) + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{*genesisBlock.Hash: genesisBlock}) + bc.blockIndex.blockIndexByHash.Add(*genesisBlock.Hash, genesisBlock) // Create a block with a valid header. randomPayload := RandomBytes(256) randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) @@ -301,7 +302,8 @@ func TestHasValidBlockHeight(t *testing.T) { require.Equal(t, err, RuleErrorInvalidPoSBlockHeight) block.Header.Height = 2 - bc.blockIndexByHash = collections.NewConcurrentMap[BlockHash, *BlockNode]() + // TODO: make sure setting to genesis block works. + bc.blockIndex = NewBlockIndex(bc.db, bc.snapshot, genesisBlock) err = bc.hasValidBlockHeightPoS(block.Header) require.Equal(t, err, RuleErrorMissingParentBlock) } @@ -317,19 +319,19 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { t.Cleanup(resetGlobalDeSoParams) hash1 := NewBlockHash(RandomBytes(32)) hash2 := NewBlockHash(RandomBytes(32)) - genesisNode := NewBlockNode(nil, hash1, 1, nil, nil, &MsgDeSoHeader{ + genesisNode := NewBlockNode(nil, hash1, 0, nil, nil, &MsgDeSoHeader{ Version: 2, - Height: 1, + Height: 0, ProposedInView: 1, }, StatusBlockStored|StatusBlockValidated) - block2 := NewBlockNode(genesisNode, hash2, 2, nil, nil, &MsgDeSoHeader{ + block2 := NewBlockNode(genesisNode, hash2, 1, nil, nil, &MsgDeSoHeader{ Version: 2, - Height: 2, + Height: 1, ProposedInView: 2, ValidatorsVoteQC: nil, ValidatorsTimeoutAggregateQC: nil, }, StatusBlockStored|StatusBlockValidated) - bc.blockIndexByHash = collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ *hash1: genesisNode, *hash2: block2, }) @@ -372,22 +374,21 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { }, }, } - blockNode, err := bc.storeBlockInBlockIndex(block) - require.NoError(t, err) newHash, err := block.Hash() require.NoError(t, err) + blockNode, err := bc.storeBlockInBlockIndex(block, newHash) + require.NoError(t, err) // Check the block index by hash - blockNodeFromIndex, exists := bc.blockIndexByHash.Get(*newHash) + blockNodeFromIndex, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(newHash, uint64(blockNode.Height)) require.True(t, exists) require.True(t, blockNodeFromIndex.Hash.IsEqual(blockNode.Hash)) require.Equal(t, blockNodeFromIndex.Height, uint32(2)) require.True(t, blockNodeFromIndex.IsStored()) require.False(t, blockNodeFromIndex.IsValidated()) // Check the block index by height - byHeightBlockNodes, exists := bc.blockIndexByHeight[2] - require.True(t, exists) + byHeightBlockNodes := bc.blockIndex.GetBlockNodesByHeight(2) require.Len(t, byHeightBlockNodes, 1) - require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(newHash)) require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 1) // Check the DB for the block @@ -399,20 +400,19 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { require.NoError(t, err) require.True(t, bytes.Equal(uncommittedBytes, origBlockBytes)) // Okay now we update the status of the block to include validated. - blockNode, err = bc.storeValidatedBlockInBlockIndex(block) + blockNode, err = bc.storeValidatedBlockInBlockIndex(block, newHash) require.NoError(t, err) - blockNodeFromIndex, exists = bc.blockIndexByHash.Get(*newHash) + blockNodeFromIndex, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(newHash, uncommittedBlock.Header.Height) require.True(t, exists) require.True(t, blockNodeFromIndex.Hash.IsEqual(blockNode.Hash)) require.Equal(t, blockNodeFromIndex.Height, uint32(2)) require.True(t, blockNodeFromIndex.IsStored()) require.True(t, blockNodeFromIndex.IsValidated()) // Check the block index by height. - byHeightBlockNodes, exists = bc.blockIndexByHeight[2] - require.True(t, exists) + byHeightBlockNodes = bc.blockIndex.GetBlockNodesByHeight(2) require.Len(t, byHeightBlockNodes, 1) - require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) - require.True(t, byHeightBlockNodes[*newHash].IsValidated()) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[0].IsValidated()) require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 1) @@ -425,28 +425,28 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { require.False(t, updatedBlockHash.IsEqual(newHash)) // Okay now put this new block in there. - blockNode, err = bc.storeBlockInBlockIndex(block) + blockNode, err = bc.storeBlockInBlockIndex(block, updatedBlockHash) require.NoError(t, err) // Make sure the blockIndexByHash is correct. - updatedBlockNode, exists := bc.blockIndexByHash.Get(*updatedBlockHash) + updatedBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(updatedBlockHash, uint64(blockNode.Height)) require.True(t, exists) require.True(t, updatedBlockNode.Hash.IsEqual(updatedBlockHash)) require.Equal(t, updatedBlockNode.Height, uint32(2)) require.True(t, updatedBlockNode.IsStored()) require.False(t, updatedBlockNode.IsValidated()) // Make sure the blockIndexByHeight is correct - byHeightBlockNodes, exists = bc.blockIndexByHeight[2] + byHeightBlockNodes = bc.blockIndex.GetBlockNodesByHeight(2) require.True(t, exists) require.Len(t, byHeightBlockNodes, 2) - require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) - require.True(t, byHeightBlockNodes[*updatedBlockHash].Hash.IsEqual(updatedBlockHash)) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(newHash) || byHeightBlockNodes[1].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(updatedBlockHash) || byHeightBlockNodes[1].Hash.IsEqual(updatedBlockHash)) require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 2) // If we're missing a field in the header, we should get an error // as we can't compute the hash. block.Header.ProposerVotingPublicKey = nil - _, err = bc.storeBlockInBlockIndex(block) + _, err = bc.storeBlockInBlockIndex(block, nil) require.Error(t, err) } @@ -473,11 +473,7 @@ func TestHasValidBlockViewPoS(t *testing.T) { ValidatorsVoteQC: nil, ValidatorsTimeoutAggregateQC: nil, }, StatusBlockStored|StatusBlockValidated) - bc.bestChain = []*BlockNode{ - genesisNode, - block2, - } - bc.blockIndexByHash = collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ *hash1: genesisNode, *hash2: block2, }) @@ -809,8 +805,7 @@ func TestGetLineageFromCommittedTip(t *testing.T) { Height: 1, ProposedInView: 1, }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) - bc.bestChain = []*BlockNode{genesisNode} - bc.blockIndexByHash = collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ *hash1: genesisNode, }) block := &MsgDeSoBlock{ @@ -846,8 +841,8 @@ func TestGetLineageFromCommittedTip(t *testing.T) { ProposedInView: 2, PrevBlockHash: hash1, }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) - bc.bestChain = append(bc.bestChain, block2) - bc.blockIndexByHash.Set(*hash2, block2) + bc.blockIndex.setTip(block2) + bc.blockIndex.blockIndexByHash.Add(*hash2, block2) ancestors, missingBlockHashes, err = bc.getStoredLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorDoesNotExtendCommittedTip) @@ -1237,21 +1232,27 @@ func TestShouldReorg(t *testing.T) { hash1 := NewBlockHash(RandomBytes(32)) hash2 := NewBlockHash(RandomBytes(32)) hash3 := NewBlockHash(RandomBytes(32)) - bc.bestChain = []*BlockNode{ + chain := []*BlockNode{ { Hash: hash1, Status: StatusBlockStored | StatusBlockValidated | StatusBlockCommitted, + Height: 0, }, { Hash: hash3, Status: StatusBlockStored | StatusBlockValidated, + Height: 1, }, } + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ + *hash1: chain[0], + *hash3: chain[1], + }) newBlock := &BlockNode{ Header: &MsgDeSoHeader{ ProposedInView: 2, - PrevBlockHash: bc.bestChain[1].Hash, + PrevBlockHash: chain[1].Hash, }, } @@ -1275,7 +1276,7 @@ func TestShouldReorg(t *testing.T) { // 1. Simple reorg. Just replacing the uncommitted tip. // 2. Create a longer chain and reorg to it. // 3. Make sure no reorg when current view is greater than block's view -// 4. Super happy path of simply extending current uncommitted tip. +// 4. Super happy path of simply extet anding current uncommitted tip. func TestTryApplyNewTip(t *testing.T) { setBalanceModelBlockHeights(t) bc, _, _ := NewTestBlockchain(t) @@ -1319,9 +1320,9 @@ func TestTryApplyNewTip(t *testing.T) { bc.addTipBlockToBestChain(bn1) bc.addTipBlockToBestChain(bn2) bc.addTipBlockToBestChain(bn3) - bc.blockIndexByHash.Set(*hash1, bn1) - bc.blockIndexByHash.Set(*hash2, bn2) - bc.blockIndexByHash.Set(*hash3, bn3) + bc.blockIndex.blockIndexByHash.Add(*hash1, bn1) + bc.blockIndex.blockIndexByHash.Add(*hash2, bn2) + bc.blockIndex.blockIndexByHash.Add(*hash3, bn3) // Simple reorg. Just replacing the uncommitted tip. newBlock := &MsgDeSoBlock{ @@ -1337,44 +1338,47 @@ func TestTryApplyNewTip(t *testing.T) { ancestors, _, err := bc.getStoredLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) checkBestChainForHash := func(hash *BlockHash) bool { - return collections.Any(bc.bestChain, func(bn *BlockNode) bool { - return bn.Hash.IsEqual(hash) - }) + _, exists, err := bc.GetBlockFromBestChainByHash(hash, false) + require.NoError(t, err) + return exists } // Try to apply newBlock as tip. This should succeed. newBlockNode := &BlockNode{ Header: newBlock.Header, Hash: newBlockHash, + Height: 4, } appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err := bc.tryApplyNewTip(newBlockNode, 9, ancestors) require.NoError(t, err) require.True(t, appliedNewTip) // hash 3 should no longer be in the best chain or best chain map - _, hash3ExistsInBestChainMap := bc.bestChainMap[*hash3] + _, hash3ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHash(hash3, false) + require.NoError(t, err) require.False(t, hash3ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash3)) require.Len(t, connectedBlockHashes, 1) require.Len(t, disconnectedBlockHashes, 1) // newBlock should be in the best chain and the best chain map and should be the tip. - _, newBlockExistsInBestChainMap := bc.bestChainMap[*newBlockHash] + _, newBlockExistsInBestChainMap, err := bc.GetBlockFromBestChainByHash(newBlockHash, false) + require.NoError(t, err) require.True(t, newBlockExistsInBestChainMap) require.True(t, checkBestChainForHash(newBlockHash)) require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) // Make sure block 2 and block 1 are still in the best chain. - _, hash2ExistsInBestChainMap := bc.bestChainMap[*hash2] + _, hash2ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHash(hash2, false) + require.NoError(t, err) require.True(t, hash2ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash2)) - _, hash1ExistsInBestChainMap := bc.bestChainMap[*hash1] + _, hash1ExistsInBestChainMap := bc.blockIndex.blockIndexByHash.Get(*hash1) require.True(t, hash1ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash1)) // Remove newBlock from the best chain and block index to reset the state. - bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] - delete(bc.bestChainMap, *newBlockHash) + bc.blockIndex.setTip(bc.blockIndex.GetTip().GetParent(bc.blockIndex)) // Add block 3 back bc.addTipBlockToBestChain(bn3) @@ -1403,14 +1407,16 @@ func TestTryApplyNewTip(t *testing.T) { Height: 6, }, } - bc.blockIndexByHash.Set(*hash4, bn4) - bc.blockIndexByHash.Set(*hash5, bn5) + bc.blockIndex.blockIndexByHash.Add(*hash4, bn4) + bc.blockIndex.blockIndexByHash.Add(*hash5, bn5) // Set new block's parent to hash5 newBlockNode.Header.PrevBlockHash = hash5 newBlockNode.Header.ProposedInView = 7 newBlockNode.Header.Height = 7 newBlockNode.Height = 7 + // Clear parent out for safety. + newBlockNode.Parent = nil require.NoError(t, err) ancestors, _, err = bc.getStoredLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) @@ -1422,19 +1428,23 @@ func TestTryApplyNewTip(t *testing.T) { // newBlockHash should be tip. require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) // hash 3 should no longer be in the best chain or best chain map - _, hash3ExistsInBestChainMap = bc.bestChainMap[*hash3] + _, hash3ExistsInBestChainMap, err = bc.GetBlockFromBestChainByHash(hash3, false) + require.NoError(t, err) require.False(t, hash3ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash3)) // hash 2 should no longer be in the best chain or best chain map - _, hash2ExistsInBestChainMap = bc.bestChainMap[*hash2] + _, hash2ExistsInBestChainMap, err = bc.GetBlockFromBestChainByHash(hash2, false) + require.NoError(t, err) require.False(t, hash2ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash2)) // hash 4 should be in the best chain and the best chain map - _, hash4ExistsInBestChainMap := bc.bestChainMap[*hash4] + _, hash4ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHash(hash4, false) + require.NoError(t, err) require.True(t, hash4ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash4)) // hash 5 should be in the best chain and the best chain map - _, hash5ExistsInBestChainMap := bc.bestChainMap[*hash5] + _, hash5ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHash(hash5, false) + require.NoError(t, err) require.True(t, hash5ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash5)) @@ -1442,11 +1452,12 @@ func TestTryApplyNewTip(t *testing.T) { require.Len(t, connectedBlockHashes, 3) require.Len(t, disconnectedBlockHashes, 2) - // Reset the state of the best chain. - delete(bc.bestChainMap, *hash4) - delete(bc.bestChainMap, *hash5) - delete(bc.bestChainMap, *newBlockHash) - bc.bestChain = bc.bestChain[:len(bc.bestChain)-3] + // Reset the state of the best chain - parent of newBlock + //bc.bestChain.ChainMap.Remove(*hash4) + //bc.bestChain.ChainMap.Remove(*hash5) + //bc.bestChain.ChainMap.Remove(*newBlockHash) + //bc.bestChain.Chain = bc.bestChain.Chain[:len(bc.bestChain.Chain)-3] + bc.blockIndex.setTip(newBlockNode.GetParent(bc.blockIndex)) // Add block 2 and 3 back. bc.addTipBlockToBestChain(bn2) @@ -1516,8 +1527,8 @@ func TestCanCommitGrandparent(t *testing.T) { PrevBlockHash: hash1, }, } - bc.bestChainMap[*hash1] = bn1 - bc.bestChainMap[*hash2] = bn2 + bc.blockIndex.addNewBlockNodeToBlockIndex(bn1) + bc.blockIndex.addNewBlockNodeToBlockIndex(bn2) // define incoming block hash3 := NewBlockHash(RandomBytes(32)) @@ -1536,7 +1547,7 @@ func TestCanCommitGrandparent(t *testing.T) { // (meaning they are in consecutive views). So we should be able // to commit bn1. grandparentHash, canCommit := bc.canCommitGrandparent(bn3) - require.True(t, hash1.IsEqual(grandparentHash)) + require.True(t, hash1.IsEqual(grandparentHash.Hash)) require.True(t, canCommit) // Update bn1 to be committed. We no longer can run the commit since bn1 is already @@ -1611,7 +1622,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // Add one more block to the best chain, but have the view be further in the future. // this should trigger a commit on block 2. - blockTemplate4 := _generateBlockAndAddToBestChain(testMeta, 14, 20, 429) + blockTemplate4 := _generateBlockAndAddToBestChain(testMeta, 15, 20, 429) err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) @@ -1623,7 +1634,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // Okay so add block 5 to the best chain. This should NOT trigger a commit on block 3 // as block 4 is not a direct child of block 3 based on its view. - blockTemplate5 := _generateBlockAndAddToBestChain(testMeta, 15, 21, 654) + blockTemplate5 := _generateBlockAndAddToBestChain(testMeta, 16, 21, 654) err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) @@ -1634,9 +1645,9 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, blockHash4, blockHash5}, blockHash2) // If we now add a block that is a descendent of block 5, we should be able to commit - // blocks 3 and 4 as block 4 and 5 possess a direct parent child relationship and + // blocks 3 and 4 as block 4 and 5 possess a direct parent child relationship, and // we have a descendent of block 5. - blockTemplate6 := _generateBlockAndAddToBestChain(testMeta, 16, 22, 912) + blockTemplate6 := _generateBlockAndAddToBestChain(testMeta, 17, 22, 912) require.NoError(t, err) err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) @@ -1659,7 +1670,8 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u } for _, committedHash := range committedBlocks { // Okay so let's make sure the block is committed. - blockNode, exists := testMeta.chain.bestChainMap[*committedHash] + blockNode, exists, err := testMeta.chain.GetBlockFromBestChainByHash(committedHash, false) + require.NoError(testMeta.t, err) require.True(testMeta.t, exists) require.True(testMeta.t, blockNode.IsCommitted()) @@ -1684,7 +1696,8 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u } for _, uncommittedBlockHash := range uncommittedBlocks { // Okay so let's make sure the block is uncommitted. - blockNode, exists := testMeta.chain.bestChainMap[*uncommittedBlockHash] + blockNode, exists, err := testMeta.chain.GetBlockFromBestChainByHash(uncommittedBlockHash, false) + require.NoError(testMeta.t, err) require.True(testMeta.t, exists) require.False(testMeta.t, blockNode.IsCommitted()) // TODO: Verify DB results?? Kinda silly to make sure everything is missing. @@ -1839,7 +1852,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { futureBlockHash, err = futureBlock.Hash() require.NoError(t, err) - futureBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*futureBlockHash) + futureBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(futureBlockHash, futureBlock.Header.Height) require.True(t, exists) require.False(t, futureBlockNode.IsCommitted()) require.True(t, futureBlockNode.IsStored()) @@ -1848,10 +1861,12 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { } var timeoutBlockHash *BlockHash + var timeoutBlockHeight uint64 { // Okay let's timeout view 15 var timeoutBlock *MsgDeSoBlock timeoutBlock = _generateRealBlock(testMeta, 15, 16, 381, blockHash3, true) + timeoutBlockHeight = timeoutBlock.Header.Height success, _, _, err := testMeta.chain.ProcessBlockPoS(timeoutBlock, 15, true) fmt.Println(err) require.True(t, success) @@ -1874,12 +1889,18 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { // Timeout block will no longer be in best chain, and will still be in an uncommitted state in the block index _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, reorgBlockHash}, blockHash2) _verifyRandomSeedHashHelper(testMeta, reorgBlock) - _, exists := testMeta.chain.bestChainMap[*timeoutBlockHash] + _, exists, err := testMeta.chain.GetBlockFromBestChainByHash(timeoutBlockHash, false) + require.NoError(t, err) require.False(t, exists) - timeoutBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*timeoutBlockHash) + timeoutBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(timeoutBlockHash, timeoutBlockHeight) require.True(t, exists) require.False(t, timeoutBlockNode.IsCommitted()) + + // The reorg block hash should be in the block index now. + reorgBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(reorgBlockHash, reorgBlock.Header.Height) + require.True(t, exists) + require.True(t, reorgBlockNode.IsStored()) } var dummyParentBlockHash, orphanBlockHash *BlockHash { @@ -1905,7 +1926,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.Len(t, missingBlockHashes, 1) require.True(t, missingBlockHashes[0].IsEqual(dummyParentBlockHash)) require.NoError(t, err) - orphanBlockInIndex, orphanBlockExists := testMeta.chain.blockIndexByHash.Get(*orphanBlockHash) + orphanBlockInIndex, orphanBlockExists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(orphanBlockHash, orphanBlock.Header.Height) require.True(t, orphanBlockExists) require.NotNil(t, orphanBlockInIndex) require.True(t, orphanBlockInIndex.IsStored()) @@ -1918,7 +1939,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.Len(t, missingBlockHashes, 0) require.NoError(t, err) - orphanBlockInIndex, orphanBlockExists = testMeta.chain.blockIndexByHash.Get(*orphanBlockHash) + orphanBlockInIndex, orphanBlockExists = testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(orphanBlockHash, orphanBlock.Header.Height) require.True(t, orphanBlockExists) require.NotNil(t, orphanBlockInIndex) require.True(t, orphanBlockInIndex.IsStored()) @@ -1944,7 +1965,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.True(t, missingBlockHashes[0].IsEqual(randomHash)) require.NoError(t, err) - malformedOrphanBlockInIndex, malformedOrphanBlockExists := testMeta.chain.blockIndexByHash.Get(*malformedOrphanBlockHash) + malformedOrphanBlockInIndex, malformedOrphanBlockExists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(malformedOrphanBlockHash, malformedOrphanBlock.Header.Height) require.True(t, malformedOrphanBlockExists) require.True(t, malformedOrphanBlockInIndex.IsValidateFailed()) require.True(t, malformedOrphanBlockInIndex.IsStored()) @@ -1982,11 +2003,12 @@ func TestGetSafeBlocks(t *testing.T) { testMeta := NewTestPoSBlockchainWithValidators(t) committedHash := testMeta.chain.BlockTip().Hash var block1 *MsgDeSoBlock + fmt.Println("HEX STUFF: ", hex.EncodeToString(committedHash[:])) block1 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight), uint64(testMeta.savedHeight), 1723, committedHash, false) block1Hash, err := block1.Hash() require.NoError(t, err) // Add block 1 w/ stored and validated - bn1, err := testMeta.chain.storeValidatedBlockInBlockIndex(block1) + bn1, err := testMeta.chain.storeValidatedBlockInBlockIndex(block1, nil) require.NoError(t, err) require.True(t, bn1.Hash.IsEqual(block1Hash)) // Create block 2 w/ block 1 as parent and add it to the block index w/ stored & validated @@ -1994,13 +2016,13 @@ func TestGetSafeBlocks(t *testing.T) { block2 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+1), uint64(testMeta.savedHeight+1), 1293, block1Hash, false) block2Hash, err := block2.Hash() require.NoError(t, err) - bn2, err := testMeta.chain.storeValidatedBlockInBlockIndex(block2) + bn2, err := testMeta.chain.storeValidatedBlockInBlockIndex(block2, nil) require.NoError(t, err) require.True(t, bn2.Hash.IsEqual(block2Hash)) // Add block 3 only as stored and validated var block3 *MsgDeSoBlock block3 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+2), 1372, block2Hash, false) - bn3, err := testMeta.chain.storeValidatedBlockInBlockIndex(block3) + bn3, err := testMeta.chain.storeValidatedBlockInBlockIndex(block3, nil) require.NoError(t, err) block3Hash, err := block3.Hash() require.NoError(t, err) @@ -2008,7 +2030,7 @@ func TestGetSafeBlocks(t *testing.T) { // Add block 3' only as stored var block3Prime *MsgDeSoBlock block3Prime = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+3), 137175, block2Hash, false) - bn3Prime, err := testMeta.chain.storeBlockInBlockIndex(block3Prime) + bn3Prime, err := testMeta.chain.storeBlockInBlockIndex(block3Prime, nil) require.NoError(t, err) block3PrimeHash, err := block3Prime.Hash() require.NoError(t, err) @@ -2019,7 +2041,7 @@ func TestGetSafeBlocks(t *testing.T) { block5.Header.Height = uint64(testMeta.savedHeight + 5) block5Hash, err := block5.Hash() require.NoError(t, err) - _, err = testMeta.chain.storeValidatedBlockInBlockIndex(block5) + _, err = testMeta.chain.storeValidatedBlockInBlockIndex(block5, nil) require.NoError(t, err) // Okay let's get the safe blocks. safeBlocks, err := testMeta.chain.GetSafeBlocks() @@ -2040,7 +2062,7 @@ func TestGetSafeBlocks(t *testing.T) { require.False(t, _checkSafeBlocksForBlockHash(block5Hash, safeBlocks)) // Update block 3 prime to be validated and it should now be a safe block. - bn3Prime, err = testMeta.chain.storeValidatedBlockInBlockIndex(block3Prime) + bn3Prime, err = testMeta.chain.storeValidatedBlockInBlockIndex(block3Prime, nil) require.NoError(t, err) require.True(t, bn3Prime.IsValidated()) safeBlocks, err = testMeta.chain.GetSafeBlocks() @@ -2067,7 +2089,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.False(t, blockNode.IsValidateFailed()) @@ -2088,7 +2110,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.True(t, blockNode.IsValidateFailed()) @@ -2115,7 +2137,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.False(t, exists) } @@ -2167,7 +2189,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.False(t, exists) } { @@ -2185,7 +2207,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := nextEpochBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, nextEpochBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.False(t, blockNode.IsValidateFailed()) @@ -2210,7 +2232,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := nextEpochBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, nextEpochBlock.Header.Height) require.False(t, exists) } { @@ -2261,7 +2283,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := nextEpochBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, nextEpochBlock.Header.Height) require.False(t, exists) } { @@ -2290,7 +2312,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // The block shouldn't be in the block index. blockHash, err := twoEpochsInFutureBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, twoEpochsInFutureBlock.Header.Height) require.False(t, exists) } { @@ -2307,7 +2329,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // The block should be in the block index. blockHash, err := prevEpochBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, prevEpochBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.False(t, blockNode.IsValidateFailed()) @@ -2323,7 +2345,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { utxoView := _newUtxoView(testMeta) snapshotEpochNumber, err := utxoView.GetCurrentSnapshotEpochNumber() require.NoError(t, err) - isValid, err := utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err := utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.True(t, isValid) @@ -2331,7 +2353,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { realVotingPublicKey := realBlock.Header.ProposerVotingPublicKey { realBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) // Reset the proposer voting public key @@ -2343,7 +2365,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { incorrectPayload := consensus.GetVoteSignaturePayload(13, testMeta.chain.BlockTip().Hash) realBlock.Header.ProposerVotePartialSignature, err = testMeta.blsPubKeyToBLSKeyMap[realBlock.Header.ProposerVotingPublicKey.ToString()].Sign(incorrectPayload[:]) - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) } @@ -2356,7 +2378,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { correctPayload := consensus.GetVoteSignaturePayload(12, realBlockHash) wrongPrivateKey := _generateRandomBLSPrivateKey(t) realBlock.Header.ProposerVotePartialSignature, err = wrongPrivateKey.Sign(correctPayload[:]) - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) } @@ -2375,7 +2397,7 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { require.NoError(t, err) realBlockHash, err := realBlock.Hash() require.NoError(t, err) - realBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*realBlockHash) + realBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(realBlockHash, realBlock.Header.Height) require.True(t, exists) require.True(t, realBlockNode.IsStored()) require.False(t, realBlockNode.IsValidateFailed()) @@ -2461,13 +2483,13 @@ func _generateRealBlockWithFailingTxn(testMeta *TestMeta, blockHeight uint64, vi } // TODO: Get real seed signature. - prevBlock, exists := testMeta.chain.blockIndexByHash.Get(*prevBlockHash) + prevBlock, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(prevBlockHash, blockHeight-1) require.True(testMeta.t, exists) // Always update the testMeta latestBlockView - latestBlockViewAndUtxoOps, err := testMeta.chain.getUtxoViewAndUtxoOpsAtBlockHash(*prevBlockHash) + latestBlockViewAndUtxoOps, err := testMeta.chain.getUtxoViewAndUtxoOpsAtBlockHash(*prevBlockHash, blockHeight-1) require.NoError(testMeta.t, err) latestBlockView := latestBlockViewAndUtxoOps.UtxoView - latestBlockNode, latestBlockNodeExists := testMeta.chain.blockIndexByHash.Get(*prevBlockHash) + latestBlockNode, latestBlockNodeExists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(prevBlockHash, blockHeight-1) require.True(testMeta.t, latestBlockNodeExists) latestBlockHeight := latestBlockNode.Height testMeta.posMempool.UpdateLatestBlock(latestBlockView, uint64(latestBlockHeight)) @@ -2521,10 +2543,10 @@ func _generateDummyBlock(testMeta *TestMeta, blockHeight uint64, view uint64, se require.NoError(testMeta.t, err) // Add block to block index. - blockNode, err := testMeta.chain.storeBlockInBlockIndex(msgDesoBlock) + blockNode, err := testMeta.chain.storeBlockInBlockIndex(msgDesoBlock, nil) require.NoError(testMeta.t, err) require.True(testMeta.t, blockNode.IsStored()) - _, exists := testMeta.chain.blockIndexByHash.Get(*newBlockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(newBlockHash, msgDesoBlock.Header.Height) require.True(testMeta.t, exists) // Remove the transactions from this block from the mempool. // This prevents nonce reuse issues when trying to make failing blocks. @@ -2537,17 +2559,20 @@ func _generateDummyBlock(testMeta *TestMeta, blockHeight uint64, view uint64, se // _generateBlockAndAddToBestChain generates a BlockTemplate by calling _generateRealBlock and then adds it to the // best chain. Finally it updates the PosMempool's latest block view. func _generateBlockAndAddToBestChain(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64) *MsgDeSoBlock { - blockTemplate := _generateRealBlock(testMeta, blockHeight, view, seed, testMeta.chain.BlockTip().Hash, false) + prevBlockNode, exists, err := testMeta.chain.GetBlockFromBestChainByHeight(blockHeight-1, false) + require.NoError(testMeta.t, err) + require.True(testMeta.t, exists) + blockTemplate := _generateRealBlock(testMeta, blockHeight, view, seed, prevBlockNode.Hash, false) var msgDesoBlock *MsgDeSoBlock msgDesoBlock = blockTemplate newBlockHash, err := msgDesoBlock.Hash() require.NoError(testMeta.t, err) // Add block to block index. - blockNode, err := testMeta.chain.storeValidatedBlockInBlockIndex(msgDesoBlock) + blockNode, err := testMeta.chain.storeValidatedBlockInBlockIndex(msgDesoBlock, nil) require.NoError(testMeta.t, err) require.True(testMeta.t, blockNode.IsStored()) require.True(testMeta.t, blockNode.IsValidated()) - newBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*newBlockHash) + newBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(newBlockHash, msgDesoBlock.Header.Height) require.True(testMeta.t, exists) testMeta.chain.addTipBlockToBestChain(newBlockNode) // Update the latest block view @@ -2678,7 +2703,7 @@ func _getFullRealBlockTemplate( // Get leader voting private key. leaderVotingPrivateKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] // Get hash of last block - chainTip, _ := testMeta.chain.blockIndexByHash.Get(*blockTemplate.Header.PrevBlockHash) + chainTip, _ := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockTemplate.Header.PrevBlockHash, blockTemplate.Header.Height-1) chainTipHash := chainTip.Hash // Get the vote signature payload // Hack to get view numbers working properly w/ PoW blocks. @@ -2819,6 +2844,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { for ii := 0; ii < 10; ii++ { _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) require.NoError(t, err) + fmt.Println("CHAIN TIP: ", chain.BlockTip().Hash) } m0PubBytes, _, _ := Base58CheckDecode(m0Pub) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index d29421a13..a33983240 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -250,7 +250,10 @@ func (fc *FastHotStuffConsensus) handleBlockProposalEvent( // Fetch the parent block parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) - parentBlock, parentBlockExists := fc.blockchain.blockIndexByHash.Get(*parentBlockHash) + parentBlock, parentBlockExists, err := fc.blockchain.blockIndex.GetBlockNodeByHashOnly(parentBlockHash) + if err != nil { + return errors.Errorf("Error fetching parent block: %v", parentBlockHash) + } if !parentBlockExists { return errors.Errorf("Error fetching parent block: %v", parentBlockHash) } @@ -487,7 +490,8 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo tipBlockHash := BlockHashFromConsensusInterface(event.TipBlockHash) // Fetch the HighQC from the Blockchain struct - tipBlockNode, tipBlockExists := fc.blockchain.blockIndexByHash.Get(*tipBlockHash) + // TODO: validate that TipHeight is a uint32 + tipBlockNode, tipBlockExists := fc.blockchain.blockIndex.GetBlockNodeByHashAndHeight(tipBlockHash, event.TipBlockHeight) if !tipBlockExists { return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error fetching tip block: %v", tipBlockHash) } @@ -558,13 +562,17 @@ func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoVa // If we don't have the highQC's block on hand, then we need to request it from the peer. We do // that first before storing the timeout message locally in the FastHotStuffEventLoop. This // prevents spamming of timeout messages by peers. - if !fc.blockchain.HasBlockInBlockIndex(msg.HighQC.BlockHash) { - err := errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) + hasBlockInBlockIndex, err := fc.blockchain.HasBlockInBlockIndex(msg.HighQC.BlockHash) + if err != nil { + return nil, errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error fetching block: ") + } + if !hasBlockInBlockIndex { + err = errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) return []*BlockHash{msg.HighQC.BlockHash}, err } // Process the timeout message locally in the FastHotStuffEventLoop - if err := fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { + if err = fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { // If we can't process the timeout locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. glog.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: %v", err) @@ -693,7 +701,7 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error hashing tip block: %v", err) } - utxoViewAndUtxoOps, err := fc.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*tipBlockHash) + utxoViewAndUtxoOps, err := fc.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*tipBlockHash, tipBlock.Height) if err != nil { return nil, errors.Errorf("Error fetching UtxoView for tip block: %v", err) } @@ -733,13 +741,16 @@ func (fc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) // Fetch the parent block - parentBlock, parentBlockExists := fc.blockchain.blockIndexByHash.Get(*parentBlockHash) + parentBlock, parentBlockExists, err := fc.blockchain.blockIndex.GetBlockNodeByHashOnly(parentBlockHash) + if err != nil { + return nil, errors.Errorf("Error fetching parent block: %v", parentBlockHash) + } if !parentBlockExists { return nil, errors.Errorf("Error fetching parent block: %v", parentBlockHash) } // Build a UtxoView at the parent block - parentUtxoViewAndUtxoOps, err := fc.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*parentBlockHash) + parentUtxoViewAndUtxoOps, err := fc.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*parentBlockHash, uint64(parentBlock.Height)) if err != nil { // This should never happen as long as the parent block is a descendant of the committed tip. return nil, errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index 130aa901c..8010e5aeb 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -102,6 +102,11 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { currentView := blockHeader.ValidatorsVoteQC.GetView() + 1 nextView := currentView + 1 + blockIndex := NewBlockIndex(nil, nil, nil) + blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ + *blockHash: {Header: blockHeader, Height: uint32(blockHeader.Height), Hash: blockHash}, + }) + // Create a mock consensus fastHotStuffConsensus := FastHotStuffConsensus{ lock: sync.RWMutex{}, @@ -111,11 +116,9 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { }, params: &DeSoTestnetParams, blockchain: &Blockchain{ - ChainLock: deadlock.RWMutex{}, - blockIndexByHash: collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ - *blockHash: {Header: blockHeader}, - }), - params: &DeSoTestnetParams, + ChainLock: deadlock.RWMutex{}, + blockIndex: blockIndex, + params: &DeSoTestnetParams, }, fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ OnIsInitialized: alwaysReturnTrue, diff --git a/lib/pos_epoch.go b/lib/pos_epoch.go index ef4e3c78d..fed62d90e 100644 --- a/lib/pos_epoch.go +++ b/lib/pos_epoch.go @@ -3,7 +3,7 @@ package lib import ( "bytes" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 3aff010a2..5592553f0 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -8,10 +8,9 @@ import ( "sync/atomic" "time" - "github.com/decred/dcrd/container/lru" - - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" ) @@ -183,11 +182,11 @@ type PosMempool struct { // recentBlockTxnCache is an LRU KV cache used to track the transaction that have been included in blocks. // This cache is used to power logic that waits for a transaction to either be validated in the mempool // or be included in a block. - recentBlockTxnCache lru.Set[BlockHash] + recentBlockTxnCache *lru.Cache[BlockHash, struct{}] // recentRejectedTxnCache is a cache to store the txns that were recently rejected so that we can return better // errors for them. - recentRejectedTxnCache lru.Map[BlockHash, error] + recentRejectedTxnCache *lru.Cache[BlockHash, error] } func NewPosMempool() *PosMempool { @@ -234,8 +233,8 @@ func (mp *PosMempool) Init( mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis mp.maxValidationViewConnects = maxValidationViewConnects mp.transactionValidationRefreshIntervalMillis = transactionValidationRefreshIntervalMillis - mp.recentBlockTxnCache = *lru.NewSet[BlockHash](100000) // cache 100K latest txns from blocks. - mp.recentRejectedTxnCache = *lru.NewMap[BlockHash, error](100000) // cache 100K rejected txns. + mp.recentBlockTxnCache, _ = lru.New[BlockHash, struct{}](100000) // cache 100K latest txns from blocks. + mp.recentRejectedTxnCache, _ = lru.New[BlockHash, error](100000) // cache 100K rejected txns. // Recreate and initialize the transaction register and the nonce tracker. mp.txnRegister = NewTransactionRegister() @@ -486,11 +485,11 @@ func (mp *PosMempool) AddTransaction(txn *MsgDeSoTxn, txnTimestamp time.Time) er } func (mp *PosMempool) addTxnHashToRecentBlockCache(txnHash BlockHash) { - mp.recentBlockTxnCache.Put(txnHash) + mp.recentBlockTxnCache.Add(txnHash, struct{}{}) } func (mp *PosMempool) deleteTxnHashFromRecentBlockCache(txnHash BlockHash) { - mp.recentBlockTxnCache.Delete(txnHash) + mp.recentBlockTxnCache.Remove(txnHash) } func (mp *PosMempool) isTxnHashInRecentBlockCache(txnHash BlockHash) bool { return mp.recentBlockTxnCache.Contains(txnHash) @@ -847,7 +846,7 @@ func (mp *PosMempool) validateTransactions() error { // Mark the txn as invalid and add an error to the cache so we can return it to the user if they // try to resubmit it. txn.SetValidated(false) - mp.recentRejectedTxnCache.Put(*txn.Hash, err) + mp.recentRejectedTxnCache.Add(*txn.Hash, err) // Try to remove the transaction with a lock. mp.removeTransaction(txn, true) diff --git a/lib/pos_mempool_persister.go b/lib/pos_mempool_persister.go index ceeface31..05a1ef617 100644 --- a/lib/pos_mempool_persister.go +++ b/lib/pos_mempool_persister.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_mempool_persister_test.go b/lib/pos_mempool_persister_test.go index 73887a01f..6cd1ef25d 100644 --- a/lib/pos_mempool_persister_test.go +++ b/lib/pos_mempool_persister_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/require" ) diff --git a/lib/pos_mempool_test.go b/lib/pos_mempool_test.go index 8dced4464..d04883919 100644 --- a/lib/pos_mempool_test.go +++ b/lib/pos_mempool_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/lib/pos_random_seed.go b/lib/pos_random_seed.go index 9c739c41d..6777de6e6 100644 --- a/lib/pos_random_seed.go +++ b/lib/pos_random_seed.go @@ -7,7 +7,7 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/uint256" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/pos_snapshot_cache.go b/lib/pos_snapshot_cache.go index f320e99b0..ec495633c 100644 --- a/lib/pos_snapshot_cache.go +++ b/lib/pos_snapshot_cache.go @@ -3,7 +3,7 @@ package lib import ( "sync" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/pkg/errors" ) diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index 7788d170c..b727c384a 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -9,7 +9,7 @@ import ( "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" ) diff --git a/lib/postgres.go b/lib/postgres.go index 38872b002..9c9ba432f 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -6,13 +6,13 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/deso-protocol/core/collections" + "github.com/hashicorp/golang-lru/v2" "net/url" "regexp" "strings" "github.com/deso-protocol/uint256" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" "github.com/go-pg/pg/v10" "github.com/go-pg/pg/v10/orm" "github.com/golang/glog" @@ -1298,6 +1298,8 @@ func (postgres *Postgres) UpsertBlockTx(tx *pg.Tx, blockNode *BlockNode) error { // The genesis block has a nil parent if blockNode.Parent != nil { block.ParentHash = blockNode.Parent.Hash + } else if !blockNode.Header.PrevBlockHash.IsEqual(GenesisBlockHash) { + block.ParentHash = blockNode.Header.PrevBlockHash } _, err := tx.Model(block).WherePK().OnConflict("(hash) DO UPDATE").Insert() @@ -1305,16 +1307,16 @@ func (postgres *Postgres) UpsertBlockTx(tx *pg.Tx, blockNode *BlockNode) error { } // GetBlockIndex gets all the PGBlocks and creates a map of BlockHash to BlockNode as needed by blockchain.go -func (postgres *Postgres) GetBlockIndex() (*collections.ConcurrentMap[BlockHash, *BlockNode], error) { +func (postgres *Postgres) GetBlockIndex() (*lru.Cache[BlockHash, *BlockNode], error) { var blocks []PGBlock err := postgres.db.Model(&blocks).Select() if err != nil { return nil, err } - blockMap := collections.NewConcurrentMap[BlockHash, *BlockNode]() + blockMap, _ := lru.New[BlockHash, *BlockNode](MaxBlockIndexNodes) for _, block := range blocks { - blockMap.Set(*block.Hash, &BlockNode{ + blockMap.Add(*block.Hash, &BlockNode{ Hash: block.Hash, Height: uint32(block.Height), DifficultyTarget: block.DifficultyTarget, @@ -1333,17 +1335,18 @@ func (postgres *Postgres) GetBlockIndex() (*collections.ConcurrentMap[BlockHash, } // Setup parent pointers - blockMap.Iterate(func(key BlockHash, blockNode *BlockNode) { + for _, key := range blockMap.Keys() { + blockNode, _ := blockMap.Get(key) // Genesis block has nil parent parentHash := blockNode.Header.PrevBlockHash if parentHash != nil { parent, exists := blockMap.Get(*parentHash) - if !exists { + if !exists && blockNode.Height > 0 { glog.Fatal("Parent block not found in block map") } blockNode.Parent = parent } - }) + } return blockMap, nil } diff --git a/lib/server.go b/lib/server.go index 0069540cb..7ddbfd2c3 100644 --- a/lib/server.go +++ b/lib/server.go @@ -4,27 +4,25 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/deso-protocol/go-deadlock" "net" + "path/filepath" "reflect" "runtime" "strings" "sync/atomic" "time" - "github.com/btcsuite/btcd/wire" - "github.com/deso-protocol/core/collections" - "github.com/deso-protocol/core/consensus" - - "github.com/decred/dcrd/container/lru" - "github.com/DataDog/datadog-go/v5/statsd" - "github.com/btcsuite/btcd/addrmgr" chainlib "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" - "github.com/dgraph-io/badger/v4" + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/consensus" + "github.com/deso-protocol/go-deadlock" + "github.com/dgraph-io/badger/v3" "github.com/golang/glog" + "github.com/hashicorp/golang-lru/v2" "github.com/pkg/errors" ) @@ -90,7 +88,8 @@ type Server struct { // adding it to this map and checking this map before replying will make it // so that we only send a reply to the first peer that sent us the inv, which // is more efficient. - inventoryBeingProcessed lru.Set[InvVect] + inventoryBeingProcessed *lru.Cache[InvVect, struct{}] + // hasRequestedSync indicates whether we've bootstrapped our mempool // by requesting all mempool transactions from a // peer. It's initially false @@ -228,7 +227,7 @@ func (srv *Server) _removeRequest(hash *BlockHash) { Type: InvTypeTx, Hash: *hash, } - srv.inventoryBeingProcessed.Delete(*invVect) + srv.inventoryBeingProcessed.Remove(*invVect) } // dataLock must be acquired for writing before calling this function. @@ -363,6 +362,24 @@ func ValidateHyperSyncFlags(isHypersync bool, syncType NodeSyncType) { } } +func RunBlockIndexMigrationOnce(db *badger.DB, params *DeSoParams) error { + blockIndexMigrationFileName := filepath.Join(db.Opts().Dir, BlockIndexMigrationFileName) + glog.V(0).Info("FileName: ", blockIndexMigrationFileName) + hasRunMigration, err := ReadBoolFromFile(blockIndexMigrationFileName) + if err == nil && hasRunMigration { + glog.V(0).Info("Block index migration has already been run") + return nil + } + glog.V(0).Info("Running block index migration") + if err = RunBlockIndexMigration(db, nil, nil, params); err != nil { + return errors.Wrapf(err, "Problem running block index migration") + } + if err = SaveBoolToFile(blockIndexMigrationFileName, true); err != nil { + return errors.Wrapf(err, "Problem saving block index migration file") + } + return nil +} + // NewServer initializes all of the internal data structures. Right now this basically // looks as follows: // - ConnectionManager starts and keeps track of peers. @@ -687,7 +704,8 @@ func NewServer( srv.blockProducer = _blockProducer srv.incomingMessages = _incomingMessages // Make this hold a multiple of what we hold for individual peers. - srv.inventoryBeingProcessed = *lru.NewSet[InvVect](maxKnownInventory) + srv.inventoryBeingProcessed, _ = lru.New[InvVect, struct{}](maxKnownInventory) + srv.requestTimeoutSeconds = 10 srv.statsdClient = statsd @@ -737,6 +755,16 @@ func (srv *Server) _handleGetHeaders(pp *Peer, msg *MsgDeSoGetHeaders) { glog.V(1).Infof("Server._handleGetHeadersMessage: called with locator: (%v), "+ "stopHash: (%v) from Peer %v", msg.BlockLocator, msg.StopHash, pp) + // FIXME: We can eliminate the call to LocateBestBlockChainHeaders and do a much + // simpler "shortcut" version that doesn't require complicated tree-traversal bs. + // The shortcut would be to just return all headers starting from msg.BlockLocator[0] + // up to msg.StopHash or maxHeadersPerMsg, whichever comes first. This would allow + // other nodes to sync from us and *keep* in sync with us, while allowing us to delete + // ALL of the complicated logic around locators and the best header chain. This all works + // because msg.BlockLocator[0] is the requesting-node's tip hash. The rest of the + // hashes, and all of the locator bs, are only needed to resolve forks, which can't + // happen with PoS anymore. + // Find the most recent known block in the best block chain based // on the block locator and fetch all of the headers after it until either // MaxHeadersPerMsg have been fetched or the provided stop @@ -862,7 +890,16 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { } // Go through the block nodes in the blockchain and download the blocks if they're not stored. - for _, blockNode := range srv.blockchain.bestChain { + for ii := uint32(srv.blockchain.lowestBlockNotStored); ii <= srv.blockchain.blockTip().Height; ii++ { + blockNode, exists, err := srv.blockchain.GetBlockFromBestChainByHeight(uint64(ii), false) + if err != nil { + glog.Errorf("GetBlocksToStore: Error getting block from best chain by height: %v", err) + return + } + if !exists { + glog.Errorf("GetBlocksToStore: Block at height %v not found in best chain", ii) + return + } // We find the first block that's not stored and get ready to download blocks starting from this block onwards. if blockNode.Status&StatusBlockStored == 0 { maxBlocksInFlight := MaxBlocksInFlight @@ -872,41 +909,45 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { maxBlocksInFlight = MaxBlocksInFlightPoS } + srv.blockchain.lowestBlockNotStored = uint64(blockNode.Height) numBlocksToFetch := maxBlocksInFlight - len(pp.requestedBlocks) - currentHeight := int(blockNode.Height) + currentHeight := uint64(blockNode.Height) blockNodesToFetch := []*BlockNode{} // In case there are blocks at tip that are already stored (which shouldn't really happen), we'll not download them. - var heightLimit int - for heightLimit = len(srv.blockchain.bestChain) - 1; heightLimit >= 0; heightLimit-- { - if !srv.blockchain.bestChain[heightLimit].Status.IsFullyProcessed() { + // We filter those out in the loop below by checking IsFullyProcessed. + // Find the blocks that we should download. + for len(blockNodesToFetch) < numBlocksToFetch { + if currentHeight > uint64(srv.blockchain.blockTip().Height) { break } - } - - // Find the blocks that we should download. - for currentHeight <= heightLimit && - len(blockNodesToFetch) < numBlocksToFetch { - // Get the current hash and increment the height. Genesis has height 0, so currentHeight corresponds to // the array index. - currentNode := srv.blockchain.bestChain[currentHeight] + currentNode, currNodeExists, err := srv.blockchain.GetBlockFromBestChainByHeight(currentHeight, false) + if err != nil { + glog.Errorf("GetBlocksToStore: Error getting block from best chain by height: %v", err) + return + } + if !currNodeExists { + glog.Errorf("GetBlocksToStore: Block at height %v not found in best chain", currentHeight) + return + } currentHeight++ + // If this node is already fully processed, then we don't need to download it. + if currentNode.Status.IsFullyProcessed() { + break + } // If we've already requested this block then we don't request it again. - if _, exists := pp.requestedBlocks[*currentNode.Hash]; exists { + if _, exists = pp.requestedBlocks[*currentNode.Hash]; exists { continue } blockNodesToFetch = append(blockNodesToFetch, currentNode) } - var hashList []*BlockHash - for _, node := range blockNodesToFetch { - hashList = append(hashList, node.Hash) - pp.requestedBlocks[*node.Hash] = true - } pp.AddDeSoMessage(&MsgDeSoGetBlocks{ - HashList: hashList, + StartHeight: srv.blockchain.lowestBlockNotStored + 1, + NumBlocks: 0, }, false) glog.V(1).Infof("GetBlocksToStore: Downloading blocks to store for header %v from peer %v", @@ -922,30 +963,16 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { // GetBlocks computes what blocks we need to fetch and asks for them from the // corresponding peer. It is typically called after we have exited // SyncStateSyncingHeaders. -func (srv *Server) RequestBlocksUpToHeight(pp *Peer, maxHeight int) { - numBlocksToFetch := srv.getMaxBlocksInFlight(pp) - len(pp.requestedBlocks) - blockNodesToFetch := srv.blockchain.GetBlockNodesToFetch( - numBlocksToFetch, maxHeight, pp.requestedBlocks, - ) - if len(blockNodesToFetch) == 0 { - // This can happen if, for example, we're already requesting the maximum - // number of blocks we can. Just return in this case. - return - } - - // If we're here then we have some blocks to fetch so fetch them. - hashList := []*BlockHash{} - for _, node := range blockNodesToFetch { - hashList = append(hashList, node.Hash) - pp.requestedBlocks[*node.Hash] = true - } - - pp.AddDeSoMessage(&MsgDeSoGetBlocks{HashList: hashList}, false) +func (srv *Server) RequestMaxBlocks(pp *Peer) { + blockTip := srv.blockchain.BlockTip() + pp.AddDeSoMessage(&MsgDeSoGetBlocks{ + StartHeight: uint64(blockTip.Height + 1), + NumBlocks: 0, + }, false) - glog.V(1).Infof("GetBlocks: Downloading %d blocks from header %v to header %v from peer %v", - len(blockNodesToFetch), - blockNodesToFetch[0].Header, - blockNodesToFetch[len(blockNodesToFetch)-1].Header, + glog.V(1).Infof("GetBlocks: Downloading blocks from height %v and hash %v from peer %v", + blockTip.Height, + blockTip.Hash.String(), pp, ) } @@ -1015,10 +1042,16 @@ func (srv *Server) shouldVerifySignatures(header *MsgDeSoHeader, isHeaderChain b } var hasSeenCheckpointBlockHash bool var checkpointBlockNode *BlockNode + var err error if isHeaderChain { - checkpointBlockNode, hasSeenCheckpointBlockHash = srv.blockchain.bestHeaderChainMap[*checkpointBlockInfo.Hash] + checkpointBlockNode, hasSeenCheckpointBlockHash, err = srv.blockchain.GetBlockFromBestChainByHash( + checkpointBlockInfo.Hash, true) } else { - checkpointBlockNode, hasSeenCheckpointBlockHash = srv.blockchain.bestChainMap[*checkpointBlockInfo.Hash] + checkpointBlockNode, hasSeenCheckpointBlockHash, err = srv.blockchain.GetBlockFromBestChainByHash( + checkpointBlockInfo.Hash, false) + } + if err != nil { + glog.Fatalf("shouldVerifySignatures: Problem getting checkpoint block node from best chain: %v", err) } // If we haven't seen the checkpoint block hash yet, we skip signature verification. if !hasSeenCheckpointBlockHash { @@ -1046,11 +1079,11 @@ func (srv *Server) getCheckpointSyncingStatus(isHeaders bool) string { if checkpointBlockInfo == nil { return "" } - hasSeenCheckPointBlockHash := false - if isHeaders { - _, hasSeenCheckPointBlockHash = srv.blockchain.bestHeaderChainMap[*checkpointBlockInfo.Hash] - } else { - _, hasSeenCheckPointBlockHash = srv.blockchain.bestChainMap[*checkpointBlockInfo.Hash] + _, hasSeenCheckPointBlockHash, err := srv.blockchain.GetBlockFromBestChainByHash( + checkpointBlockInfo.Hash, isHeaders) + + if err != nil { + glog.Fatalf("getCheckpointSyncingStatus: Problem getting checkpoint block node from best chain: %v", err) } if !hasSeenCheckPointBlockHash { return fmt.Sprintf("", checkpointBlockInfo.String()) @@ -1087,7 +1120,8 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // have this issue. Hitting duplicates after we're done syncing is // fine and can happen in certain cases. headerHash, _ := headerReceived.Hash() - if srv.blockchain.HasHeader(headerHash) { + hasHeader := srv.blockchain.HasHeaderByHashAndHeight(headerHash, headerReceived.Height) + if hasHeader { if srv.blockchain.isSyncing() { glog.Warningf("Server._handleHeaderBundle: Duplicate header %v received from peer %v "+ @@ -1176,6 +1210,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // syncing state either through hyper sync or block sync. First let's check if the peer // supports hypersync and if our block tip is old enough so that it makes sense to sync state. + // FIXME: This hypersync logic needs to run in _startSync somehow that we don't sync headers if NodeCanHypersyncState(srv.cmgr.SyncType) && srv.blockchain.isHyperSyncCondition() { // If hypersync conditions are satisfied, we will be syncing state. This assignment results // in srv.blockchain.chainState() to be equal to SyncStateSyncingSnapshot @@ -1186,6 +1221,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // snapshot height. currentHeaderTipHeight := uint64(srv.blockchain.headerTip().Height) + // FIXME: This hypersync logic needs to run in _startSync somehow that we don't sync headers if srv.blockchain.chainState() == SyncStateSyncingSnapshot { glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* state starting at "+ "height %v from peer %v", srv.blockchain.headerTip().Header.Height, pp) @@ -1229,11 +1265,21 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // expected height at which the snapshot should be taking place. We do this to make sure that the // snapshot we receive from the peer is up-to-date. // TODO: error handle if the hash doesn't exist for some reason. + expectedSnapshotHeightBlock, expectedSnapshotHeightblockExists, err := + srv.blockchain.GetBlockFromBestChainByHeight(expectedSnapshotHeight, true) + if err != nil { + glog.Errorf("Server._handleHeaderBundle: Problem getting expected snapshot height block, error (%v)", err) + return + } + if !expectedSnapshotHeightblockExists || expectedSnapshotHeightBlock == nil { + glog.Errorf("Server._handleHeaderBundle: Expected snapshot height block doesn't exist.") + return + } srv.HyperSyncProgress.SnapshotMetadata = &SnapshotEpochMetadata{ SnapshotBlockHeight: expectedSnapshotHeight, FirstSnapshotBlockHeight: expectedSnapshotHeight, CurrentEpochChecksumBytes: []byte{}, - CurrentEpochBlockHash: srv.blockchain.bestHeaderChain[expectedSnapshotHeight].Hash, + CurrentEpochBlockHash: expectedSnapshotHeightBlock.Hash, } srv.HyperSyncProgress.PrefixProgress = []*SyncPrefixProgress{} srv.HyperSyncProgress.Completed = false @@ -1293,9 +1339,9 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* blocks starting at "+ "height %d out of %d from peer %v", blockTip.Header.Height+1, msg.TipHeight, pp) - maxHeight := -1 + //maxHeight := -1 srv.blockchain.updateCheckpointBlockInfo() - srv.RequestBlocksUpToHeight(pp, maxHeight) + srv.RequestMaxBlocks(pp) return } @@ -1310,7 +1356,8 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // we're either not aware of or that we don't think is the best chain. // Doing things this way makes it so that when we request blocks we // are 100% positive the peer has them. - if !srv.blockchain.HasHeader(msg.TipHash) { + hasHeader := srv.blockchain.HasHeaderByHashAndHeight(msg.TipHash, uint64(msg.TipHeight)) + if !hasHeader { glog.V(1).Infof("Server._handleHeaderBundle: Peer's tip is not in our "+ "blockchain so not requesting anything else from them. Our block "+ "tip %v, their tip %v:%d, peer: %v", @@ -1326,7 +1373,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { glog.V(1).Infof("Server._handleHeaderBundle: *Downloading* blocks starting at "+ "block tip %v out of %d from peer %v", blockTip.Header, msg.TipHeight, pp) - srv.RequestBlocksUpToHeight(pp, int(msg.TipHeight)) + srv.RequestMaxBlocks(pp) return } @@ -1365,6 +1412,9 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* headers for blocks starting at "+ "header tip %v out of %d from peer %v", headerTip.Header, msg.TipHeight, pp) + // TODO: this may be wrong? + glog.V(0).Infof("Server._handleHeaderBundle: Num Headers in header chain: (header tip height: %v) ", + srv.blockchain.blockIndex.GetHeaderTip()) } func (srv *Server) _handleGetBlocks(pp *Peer, msg *MsgDeSoGetBlocks) { @@ -1636,10 +1686,18 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { srv.snapshot.PrintChecksum("Finished hyper sync. Checksum is:") glog.Infof(CLog(Magenta, fmt.Sprintf("Metadata checksum: (%v)", srv.HyperSyncProgress.SnapshotMetadata.CurrentEpochChecksumBytes))) - - glog.Infof(CLog(Yellow, fmt.Sprintf("Best header chain %v best block chain %v", - srv.blockchain.bestHeaderChain[msg.SnapshotMetadata.SnapshotBlockHeight], srv.blockchain.bestChain))) - + blockNode, exists, err := srv.blockchain.GetBlockFromBestChainByHeight(msg.SnapshotMetadata.SnapshotBlockHeight, true) + if err != nil { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, error (%v)", err) + return + } + if !exists { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, block node does not exist: (%v)", msg.SnapshotMetadata.SnapshotBlockHeight) + //return + } else { + glog.Infof(CLog(Yellow, fmt.Sprintf("Best header chain %v best block chain %v", + blockNode, srv.blockchain.blockIndex.GetTip()))) + } // Verify that the state checksum matches the one in HyperSyncProgress snapshot metadata. // If the checksums don't match, it means that we've been interacting with a peer that was misbehaving. checksumBytes, err := srv.snapshot.Checksum.ToBytes() @@ -1681,14 +1739,21 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { // being too large and possibly causing an error in badger. var blockNodeBatch []*BlockNode for ii := uint64(1); ii <= srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight; ii++ { - currentNode := srv.blockchain.bestHeaderChain[ii] + currentNode, currentNodeExists, err := srv.blockchain.GetBlockFromBestChainByHeight(ii, true) + if err != nil { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, error: (%v)", err) + break + } + if !currentNodeExists { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, block node does not exist") + break + } // Do not set the StatusBlockStored flag, because we still need to download the past blocks. currentNode.Status |= StatusBlockProcessed currentNode.Status |= StatusBlockValidated currentNode.Status |= StatusBlockCommitted srv.blockchain.addNewBlockNodeToBlockIndex(currentNode) - srv.blockchain.bestChainMap[*currentNode.Hash] = currentNode - srv.blockchain.bestChain = append(srv.blockchain.bestChain, currentNode) + srv.blockchain.blockIndex.setTip(currentNode) blockNodeBatch = append(blockNodeBatch, currentNode) if len(blockNodeBatch) < 10000 { continue @@ -1713,7 +1778,7 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { } // We also reset the in-memory snapshot cache, because it is populated with stale records after // we've initialized the chain with seed transactions. - srv.snapshot.DatabaseCache = *lru.NewMap[string, []byte](DatabaseCacheSize) + srv.snapshot.DatabaseCache, _ = lru.New[string, []byte](int(DatabaseCacheSize)) // If we got here then we finished the snapshot sync so set appropriate flags. srv.blockchain.syncingState = false @@ -1752,8 +1817,7 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { return } - headerTip := srv.blockchain.headerTip() - srv.RequestBlocksUpToHeight(pp, int(headerTip.Height)) + srv.RequestMaxBlocks(pp) } func (srv *Server) _startSync() { @@ -1819,10 +1883,12 @@ func (srv *Server) _startSync() { // Send a GetHeaders message to the Peer to start the headers sync. // Note that we include an empty BlockHash as the stopHash to indicate we want as // many headers as the Peer can give us. - locator := srv.blockchain.LatestHeaderLocator() - bestPeer.AddDeSoMessage(&MsgDeSoGetHeaders{ - StopHash: &BlockHash{}, - BlockLocator: locator, + //locator := srv.blockchain.LatestHeaderLocator() + bestPeer.AddDeSoMessage(&MsgDeSoGetBlocks{ + //StopHash: &BlockHash{}, + //BlockLocator: locator, + StartHeight: uint64(bestHeight + 1), + NumBlocks: 0, }, false) glog.V(1).Infof("Server._startSync: Downloading headers for blocks starting at "+ "header tip height %v from peer %v", bestHeight, bestPeer) @@ -1999,8 +2065,9 @@ func (srv *Server) _relayTransactions() { // Add the transaction to the peer's known inventory. We do // it here when we enqueue the message to the peers outgoing - // message queue so that we don't have remember to do it later. - pp.knownInventory.Put(*invVect) + // message queue so that we don't have to remember to do it later. + pp.knownInventory.Add(*invVect, struct{}{}) + invMsg.InvList = append(invMsg.InvList, invVect) } if len(invMsg.InvList) > 0 { @@ -2237,7 +2304,7 @@ func (srv *Server) _logAndDisconnectPeer(pp *Peer, blockMsg *MsgDeSoBlock, suffi // isLastBlock indicates that this is the last block in the list of blocks we received back // via a MsgDeSoBlockBundle message. When we receive a single block, isLastBlock will automatically // be true, which will give it its old single-block behavior. -func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { +func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock) { srv.timer.Start("Server._handleBlock: General") // Pull out the header for easy access. @@ -2264,14 +2331,6 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { return } - // Unless we're running a PoS validator, we should not expect to see a block that we did not request. If - // we see such a block, then we log an error and disconnect from the peer. - _, isRequestedBlock := pp.requestedBlocks[*blockHash] - if srv.fastHotStuffConsensus == nil && !isRequestedBlock { - srv._logAndDisconnectPeer(pp, blk, "Getting a block that we haven't requested before") - return - } - // Delete the block from the requested blocks map. We do this whether the block was requested or not. delete(pp.requestedBlocks, *blockHash) @@ -2341,11 +2400,9 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { // see an error with a block from a peer. if err != nil { if strings.Contains(err.Error(), "RuleErrorDuplicateBlock") { - // Just warn on duplicate blocks but don't disconnect the peer. - // TODO: This assuages a bug similar to the one referenced in the duplicate - // headers comment above but in the future we should probably try and figure - // out a way to be more strict about things. - glog.Warningf("Got duplicate block %v from peer %v", blk, pp) + // Ignore duplicate blocks. They can happen because of how we've changed the + // way we request blocks from the peer. + return } else if strings.Contains(err.Error(), RuleErrorFailedSpamPreventionsCheck.Error()) { // If the block fails the spam prevention check, then it must be signed by the // bad block proposer signature or it has a bad QC. In either case, we should @@ -2364,13 +2421,8 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { srv.timer.Print("Server._handleBlock: General") srv.timer.Print("Server._handleBlock: Process Block") - // If we're not at the last block yet, then we're done. The rest of this code is only - // relevant after we've connected the last block, and it generally involves fetching - // more data from our peer. - if !isLastBlock { - return - } - + // TODO: Is it OK to do this in the middle of a block bundle? I think it's fine, and possibly more + // correct this way. if isOrphan { // It's possible to receive an orphan block from the peer for a variety of reasons. If we // see an orphan block, we do one of two things: @@ -2398,85 +2450,12 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { return } - - // We shouldn't be receiving blocks while syncing headers, but we can end up here - // if it took longer than MaxTipAge to sync blocks to this point. We'll revert to - // syncing headers and then resume syncing blocks once we're current again. - if srv.blockchain.chainState() == SyncStateSyncingHeaders { - glog.Warningf("Server._handleBlock: Received block while syncing headers: %v", blk) - glog.Infof("Requesting headers: %v", pp) - - locator := srv.blockchain.LatestHeaderLocator() - pp.AddDeSoMessage(&MsgDeSoGetHeaders{ - StopHash: &BlockHash{}, - BlockLocator: locator, - }, false) - glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* headers for blocks starting at "+ - "header tip %v from peer %v", - srv.blockchain.HeaderTip(), pp) - return - } - - if srv.blockchain.chainState() == SyncStateSyncingHistoricalBlocks { - srv.GetBlocksToStore(pp) - if srv.blockchain.downloadingHistoricalBlocks { - return - } - } - - // If we're syncing blocks, call GetBlocks and try to get as many blocks - // from our peer as we can. This allows the initial block download to be - // more incremental since every time we're able to accept a block (or - // group of blocks) we indicate this to our peer so they can send us more. - if srv.blockchain.chainState() == SyncStateSyncingBlocks { - // Setting maxHeight = -1 gets us as many blocks as we can get from our - // peer, which is OK because we can assume the peer has all of them when - // we're syncing. - maxHeight := -1 - srv.RequestBlocksUpToHeight(pp, maxHeight) - return - } - - if srv.blockchain.chainState() == SyncStateNeedBlocksss { - // If we don't have any blocks to wait for anymore, hit the peer with - // a GetHeaders request to see if there are any more headers we should - // be aware of. This will generally happen in two cases: - // - With our sync peer after we’re almost at the end of syncing blocks. - // In this case, calling GetHeaders once the requestedblocks is almost - // gone will result in us getting all of the remaining blocks right up - // to the tip and then stopping, which is exactly what we want. - // - With a peer that sent us an inv. In this case, the peer could have - // more blocks for us or it could not. Either way, it’s good to check - // and worst case the peer will return an empty header bundle that will - // result in us not sending anything back because there won’t be any new - // blocks to request. - locator := srv.blockchain.LatestHeaderLocator() - pp.AddDeSoMessage(&MsgDeSoGetHeaders{ - StopHash: &BlockHash{}, - BlockLocator: locator, - }, false) - return - } - - // If we get here, it means we're in SyncStateFullyCurrent, which is great. - // In this case we shoot a MEMPOOL message over to the peer to bootstrap the mempool. - srv._tryRequestMempoolFromPeer(pp) - - // Exit early if the chain isn't SyncStateFullyCurrent. - if srv.blockchain.chainState() != SyncStateFullyCurrent { - return - } - - // If the chain is current, then try to transition to the FastHotStuff consensus. - srv.tryTransitionToFastHotStuffConsensus() } func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { if len(bundle.Blocks) == 0 { - glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Received EMPTY block bundle "+ - "at header height ( %v ) from Peer %v. Disconnecting peer since this should never happen.", - srv.blockchain.headerTip().Height, pp))) - pp.Disconnect("Received empty block bundle.") + // We will receive an empty block bundle if we are fully in-sync with the peer. + // Return early in this case so that we don't request more blocks from the peer. return } glog.Infof(CLog(Cyan, fmt.Sprintf("Server._handleBlockBundle: Received blocks ( %v->%v / %v ) from Peer %v. "+ @@ -2499,7 +2478,7 @@ func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { // _handleBlock is a legacy function that doesn't support erroring out. It's not a big deal // though as we'll just connect all the blocks after the failed one and those blocks will also // gracefully fail. - srv._handleBlock(pp, blk, ii == len(bundle.Blocks)-1 /*isLastBlock*/) + srv._handleBlock(pp, blk) numLogBlocks := 100 if srv.params.IsPoSBlockHeight(blk.Header.Height) || srv.params.NetworkType == NetworkType_TESTNET { @@ -2521,6 +2500,39 @@ func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { } } } + + if srv.blockchain.chainState() == SyncStateSyncingHistoricalBlocks { + srv.GetBlocksToStore(pp) + if srv.blockchain.downloadingHistoricalBlocks { + return + } + } + + // We need to keep requesting blocks until the peer has given us everything that it has. + // This condition is implicitly met when we receive a request for blocks that has fewer + // than the maximum possible number of blocks in it. + if len(bundle.Blocks) >= MaxBlocksInFlightPoS { + srv.RequestMaxBlocks(pp) + return + } + + // If we get here, it means we're finally in the steady-state and we're done syncing. + + // Try to request the mempool from the peer if we haven't already. This function is OK + // to call every time we get a block bundle, even in the steady-state, because it will + // just exit after the first time we've requested the mempool. + srv._tryRequestMempoolFromPeer(pp) + + // If the chain is current, then try to transition to the FastHotStuff consensus. + // This function is safe to call every time we get a block bundle, even in the steady-state, + // because it will just exit after the first time we've transitioned to FastHotStuff. + srv.tryTransitionToFastHotStuffConsensus() + + // Even though we're in the steady-state here, it generally doesn't hurt to follow up with a + // request for any remaining blocks from the peer. In the steady-state, this will return + // zero blocks, and we will exit the _handleBlockBundle function early at the top without + // requesting any more. + srv.RequestMaxBlocks(pp) } func (srv *Server) _handleInv(peer *Peer, msg *MsgDeSoInv) { @@ -2850,7 +2862,7 @@ func (srv *Server) _handlePeerMessages(serverMessage *ServerMessage) { srv._handleGetBlocks(serverMessage.Peer, msg) case *MsgDeSoBlock: // isLastBlock is always true when we get a legacy single-block message. - srv._handleBlock(serverMessage.Peer, msg, true) + srv._handleBlock(serverMessage.Peer, msg) case *MsgDeSoGetSnapshot: srv._handleGetSnapshot(serverMessage.Peer, msg) case *MsgDeSoSnapshotData: diff --git a/lib/snapshot.go b/lib/snapshot.go index 781141b91..2d8ffb895 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -5,7 +5,6 @@ import ( "context" "encoding/hex" "fmt" - "github.com/deso-protocol/go-deadlock" "math" "reflect" "runtime" @@ -14,11 +13,13 @@ import ( "time" "github.com/cloudflare/circl/group" - "github.com/decred/dcrd/container/lru" - "github.com/dgraph-io/badger/v4" + + "github.com/deso-protocol/go-deadlock" + "github.com/dgraph-io/badger/v3" "github.com/fatih/color" "github.com/golang/glog" "github.com/google/uuid" + "github.com/hashicorp/golang-lru/v2" "github.com/oleiade/lane" "github.com/pkg/errors" "golang.org/x/sync/semaphore" @@ -313,7 +314,7 @@ type Snapshot struct { // DatabaseCache is used to store most recent DB records that we've read/written. // This is a low-level optimization for ancestral records that // saves us read time when we're writing to the DB during UtxoView flush. - DatabaseCache lru.Map[string, []byte] + DatabaseCache *lru.Cache[string, []byte] // AncestralFlushCounter is used to offset ancestral records flush to occur only after x blocks. AncestralFlushCounter uint64 @@ -483,11 +484,14 @@ func NewSnapshot( "This may lead to unexpected behavior.") } + databaseCache, _ := lru.New[string, []byte](int(DatabaseCacheSize)) + // Set the snapshot. snap := &Snapshot{ - mainDb: mainDb, - SnapshotDbMutex: &snapshotDbMutex, - DatabaseCache: *lru.NewMap[string, []byte](DatabaseCacheSize), + mainDb: mainDb, + SnapshotDbMutex: &snapshotDbMutex, + DatabaseCache: databaseCache, + AncestralFlushCounter: uint64(0), snapshotBlockHeightPeriod: snapshotBlockHeightPeriod, OperationChannel: operationChannel, @@ -1399,7 +1403,7 @@ type StateChecksum struct { ctx context.Context // hashToCurveCache is a cache of computed hashToCurve mappings - hashToCurveCache lru.Map[string, group.Element] + hashToCurveCache *lru.Cache[string, group.Element] // When we want to add a database record to the state checksum, we will first have to // map the record to the Ristretto255 curve using the hash_to_curve. We will then add the @@ -1427,7 +1431,7 @@ func (sc *StateChecksum) Initialize(mainDb *badger.DB, snapshotDbMutex *sync.Mut sc.maxWorkers = int64(runtime.GOMAXPROCS(0)) // Set the hashToCurveCache - sc.hashToCurveCache = *lru.NewMap[string, group.Element](HashToCurveCache) + sc.hashToCurveCache, _ = lru.New[string, group.Element](int(HashToCurveCache)) // Set the worker pool semaphore and context. sc.semaphore = semaphore.NewWeighted(sc.maxWorkers) @@ -1498,7 +1502,7 @@ func (sc *StateChecksum) HashToCurve(bytes []byte) group.Element { // Compute the hash_to_curve primitive, mapping the bytes to an elliptic curve point. hashElement = sc.curve.HashToElement(bytes, sc.dst) // Also add to the hashToCurveCache - sc.hashToCurveCache.Put(bytesStr, hashElement) + sc.hashToCurveCache.Add(bytesStr, hashElement) } return hashElement diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 9c5108ceb..358d86294 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -777,7 +777,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser return true, nil } - blockHeight := uint64(server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Height) + blockHeight := uint64(server.blockchain.blockIndex.GetTip().Height) stateChangeSyncer.MempoolFlushId = originalCommittedFlushId @@ -804,7 +804,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser mempoolUtxoView.Snapshot = nil server.blockchain.ChainLock.RLock() - mempoolUtxoView.TipHash = server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Hash + mempoolUtxoView.TipHash = server.blockchain.blockIndex.GetTip().Hash server.blockchain.ChainLock.RUnlock() // A new transaction is created so that we can simulate writes to the db without actually writing to the db. @@ -815,7 +815,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser defer txn.Discard() glog.V(2).Infof("Time since mempool sync start: %v", time.Since(startTime)) startTime = time.Now() - err = mempoolUtxoView.FlushToDbWithTxn(txn, uint64(server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Height)) + err = mempoolUtxoView.FlushToDbWithTxn(txn, uint64(server.blockchain.blockIndex.GetTip().Height)) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: originalCommittedFlushId, @@ -847,7 +847,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // TODO: Have Z look at if we need to do some caching in the uncommitted blocks logic. // First connect the uncommitted blocks to the mempool view. for _, uncommittedBlock := range uncommittedBlocks { - utxoViewAndOpsAtBlockHash, err := server.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*uncommittedBlock.Hash) + utxoViewAndOpsAtBlockHash, err := server.blockchain.getUtxoViewAndUtxoOpsAtBlockHash(*uncommittedBlock.Hash, uint64(uncommittedBlock.Height)) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: originalCommittedFlushId, diff --git a/lib/txindex.go b/lib/txindex.go index ead79c53f..29bc0336c 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/dgraph-io/badger/v4" + "github.com/dgraph-io/badger/v3" chainlib "github.com/btcsuite/btcd/blockchain" "github.com/golang/glog" @@ -150,8 +150,8 @@ func NewTXIndex(coreChain *Blockchain, params *DeSoParams, dataDirectory string) } func (txi *TXIndex) FinishedSyncing() bool { - committedTip, idx := txi.CoreChain.GetCommittedTip() - if idx == -1 { + committedTip, exists := txi.CoreChain.GetCommittedTip() + if !exists { return false } return txi.TXIndexChain.BlockTip().Height == committedTip.Height @@ -171,7 +171,8 @@ func (txi *TXIndex) Start() { txi.updateWaitGroup.Done() return default: - if txi.CoreChain.ChainState() == SyncStateFullyCurrent { + chainState := txi.CoreChain.ChainState() + if chainState == SyncStateFullyCurrent || (chainState == SyncStateNeedBlocksss && txi.CoreChain.headerTip().Height-txi.CoreChain.blockTip().Height < 10) { if !txi.CoreChain.IsFullyStored() { glog.V(1).Infof("TXIndex: Waiting, blockchain is not fully stored") break @@ -207,8 +208,7 @@ func (txi *TXIndex) Stop() { // GetTxindexUpdateBlockNodes ... func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( - _txindexTipNode *BlockNode, _blockTipNode *BlockNode, _commonAncestor *BlockNode, - _detachBlocks []*BlockNode, _attachBlocks []*BlockNode) { + _txindexTipNode *BlockNode, _blockTipNode *BlockNode, _commonAncestor *BlockNode) { // Get the current txindex tip. txindexTipHash := txi.TXIndexChain.BlockTip() @@ -218,33 +218,18 @@ func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( // case. glog.Error("Error: TXIndexChain had nil tip; this should never " + "happen and it means the transaction index is broken.") - return nil, nil, nil, nil, nil + return nil, nil, nil } // If the tip of the txindex is no longer stored in the block index, it // means the txindex hit a fork that we are no longer keeping track of. // The only thing we can really do in this case is rebuild the entire index // from scratch. To do that, we return all the blocks in the index to detach // and all the blocks in the real chain to attach. - txindexTipNode, _ := txi.TXIndexChain.blockIndexByHash.Get(*txindexTipHash.Hash) + txindexTipNode, _ := txi.TXIndexChain.blockIndex.GetBlockNodeByHashAndHeight(txindexTipHash.Hash, uint64(txindexTipHash.Height)) // Get the committed tip. committedTip, _ := txi.CoreChain.GetCommittedTip() - if txindexTipNode == nil { - glog.Info("GetTxindexUpdateBlockNodes: Txindex tip was not found; building txindex starting at genesis block") - - newTxIndexBestChain, _ := txi.TXIndexChain.CopyBestChain() - newBlockchainBestChain, _ := txi.CoreChain.CopyBestChain() - - return txindexTipNode, committedTip, nil, newTxIndexBestChain, newBlockchainBestChain - } - - derefedTxindexTipNode := *txindexTipNode - - // At this point, we know our txindex tip is in our block index so - // there must be a common ancestor between the tip and the block tip. - commonAncestor, detachBlocks, attachBlocks := GetReorgBlocks(&derefedTxindexTipNode, committedTip) - - return txindexTipNode, committedTip, commonAncestor, detachBlocks, attachBlocks + return txindexTipNode, committedTip, txindexTipNode } // Update syncs the transaction index with the blockchain. @@ -264,7 +249,7 @@ func (txi *TXIndex) Update() error { // done with the rest of the function. txi.TXIndexLock.Lock() defer txi.TXIndexLock.Unlock() - txindexTipNode, blockTipNode, commonAncestor, detachBlocks, attachBlocks := txi.GetTxindexUpdateBlockNodes() + txindexTipNode, blockTipNode, commonAncestor := txi.GetTxindexUpdateBlockNodes() // Note that the blockchain's ChainLock does not need to be held at this // point because we're just reading blocks from the db, which never get @@ -293,97 +278,103 @@ func (txi *TXIndex) Update() error { // For each of the blocks we're removing, delete the transactions from // the transaction index. - for _, blockToDetach := range detachBlocks { - if txi.killed { - glog.Infof(CLog(Yellow, "TxIndex: Update: Killed while detaching blocks")) - break - } - // Go through each txn in the block and delete its mappings from our - // txindex. - glog.V(1).Infof("Update: Detaching block (height: %d, hash: %v)", - blockToDetach.Height, blockToDetach.Hash) - blockMsg, err := GetBlock(blockToDetach.Hash, txi.TXIndexChain.DB(), nil) - if err != nil { - return fmt.Errorf("Update: Problem fetching detach block "+ - "with hash %v: %v", blockToDetach.Hash, err) - } - blockHeight := uint64(txi.CoreChain.blockTip().Height) - err = txi.TXIndexChain.DB().Update(func(dbTxn *badger.Txn) error { - for _, txn := range blockMsg.Txns { - if err := DbDeleteTxindexTransactionMappingsWithTxn(dbTxn, nil, - blockHeight, txn, txi.Params, txi.CoreChain.eventManager, true); err != nil { - - return fmt.Errorf("Update: Problem deleting "+ - "transaction mappings for transaction %v: %v", txn.Hash(), err) - } - } - return nil - }) - if err != nil { - return err - } - - // Now that all the transactions have been deleted from our txindex, - // it's safe to disconnect the block from our txindex chain. - utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) - utxoOps, err := GetUtxoOperationsForBlock( - txi.TXIndexChain.DB(), nil, blockToDetach.Hash) - if err != nil { - return fmt.Errorf( - "Update: Error getting UtxoOps for block %v: %v", blockToDetach, err) - } - // Compute the hashes for all the transactions. - txHashes, err := ComputeTransactionHashes(blockMsg.Txns) - if err != nil { - return fmt.Errorf( - "Update: Error computing tx hashes for block %v: %v", - blockToDetach, err) - } - if err := utxoView.DisconnectBlock(blockMsg, txHashes, utxoOps, blockHeight); err != nil { - return fmt.Errorf("Update: Error detaching block "+ - "%v from UtxoView: %v", blockToDetach, err) - } - if err := utxoView.FlushToDb(blockHeight); err != nil { - return fmt.Errorf("Update: Error flushing view to db for block "+ - "%v: %v", blockToDetach, err) - } - // We have to flush a couple of extra things that the view doesn't flush... - if err := PutBestHash(txi.TXIndexChain.DB(), nil, utxoView.TipHash, ChainTypeDeSoBlock, txi.CoreChain.eventManager); err != nil { - return fmt.Errorf("Update: Error putting best hash for block "+ - "%v: %v", blockToDetach, err) - } - err = txi.TXIndexChain.DB().Update(func(txn *badger.Txn) error { - if err := DeleteUtxoOperationsForBlockWithTxn(txn, nil, blockToDetach.Hash, txi.TXIndexChain.eventManager, true); err != nil { - return fmt.Errorf("Update: Error deleting UtxoOperations 1 for block %v, %v", blockToDetach.Hash, err) - } - if err := txn.Delete(BlockHashToBlockKey(blockToDetach.Hash)); err != nil { - return fmt.Errorf("Update: Error deleting UtxoOperations 2 for block %v %v", blockToDetach.Hash, err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Update: Error updating badgger: %v", err) - } - // Delete this block from the chain db so we don't get duplicate block errors. - - // Remove this block from our bestChain data structures. - newBlockIndexByHash, newBlockIndexByHeight := txi.TXIndexChain.CopyBlockIndexes() - newBestChain, newBestChainMap := txi.TXIndexChain.CopyBestChain() - newBestChain = newBestChain[:len(newBestChain)-1] - delete(newBestChainMap, *(blockToDetach.Hash)) - newBlockIndexByHash.Remove(*(blockToDetach.Hash)) - - txi.TXIndexChain.SetBestChainMap(newBestChain, newBestChainMap, newBlockIndexByHash, newBlockIndexByHeight) - - // At this point the entries for the block should have been removed - // from both our Txindex chain and our transaction index mappings. - } + // TODO: delete - we're simplifying the txindex logic to only use committed state. + //for _, blockToDetach := range detachBlocks { + // if txi.killed { + // glog.Infof(CLog(Yellow, "TxIndex: Update: Killed while detaching blocks")) + // break + // } + // // Go through each txn in the block and delete its mappings from our + // // txindex. + // glog.V(1).Infof("Update: Detaching block (height: %d, hash: %v)", + // blockToDetach.Height, blockToDetach.Hash) + // blockMsg, err := GetBlock(blockToDetach.Hash, txi.TXIndexChain.DB(), nil) + // if err != nil { + // return fmt.Errorf("Update: Problem fetching detach block "+ + // "with hash %v: %v", blockToDetach.Hash, err) + // } + // blockHeight := uint64(txi.CoreChain.blockTip().Height) + // err = txi.TXIndexChain.DB().Update(func(dbTxn *badger.Txn) error { + // for _, txn := range blockMsg.Txns { + // if err := DbDeleteTxindexTransactionMappingsWithTxn(dbTxn, nil, + // blockHeight, txn, txi.Params, txi.CoreChain.eventManager, true); err != nil { + // + // return fmt.Errorf("Update: Problem deleting "+ + // "transaction mappings for transaction %v: %v", txn.Hash(), err) + // } + // } + // return nil + // }) + // if err != nil { + // return err + // } + // + // // Now that all the transactions have been deleted from our txindex, + // // it's safe to disconnect the block from our txindex chain. + // utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) + // utxoOps, err := GetUtxoOperationsForBlock( + // txi.TXIndexChain.DB(), nil, blockToDetach.Hash) + // if err != nil { + // return fmt.Errorf( + // "Update: Error getting UtxoOps for block %v: %v", blockToDetach, err) + // } + // // Compute the hashes for all the transactions. + // txHashes, err := ComputeTransactionHashes(blockMsg.Txns) + // if err != nil { + // return fmt.Errorf( + // "Update: Error computing tx hashes for block %v: %v", + // blockToDetach, err) + // } + // if err := utxoView.DisconnectBlock(blockMsg, txHashes, utxoOps, blockHeight); err != nil { + // return fmt.Errorf("Update: Error detaching block "+ + // "%v from UtxoView: %v", blockToDetach, err) + // } + // if err := utxoView.FlushToDb(blockHeight); err != nil { + // return fmt.Errorf("Update: Error flushing view to db for block "+ + // "%v: %v", blockToDetach, err) + // } + // // We have to flush a couple of extra things that the view doesn't flush... + // if err := PutBestHash(txi.TXIndexChain.DB(), nil, utxoView.TipHash, ChainTypeDeSoBlock, txi.CoreChain.eventManager); err != nil { + // return fmt.Errorf("Update: Error putting best hash for block "+ + // "%v: %v", blockToDetach, err) + // } + // err = txi.TXIndexChain.DB().Update(func(txn *badger.Txn) error { + // if err := DeleteUtxoOperationsForBlockWithTxn(txn, nil, blockToDetach.Hash, txi.TXIndexChain.eventManager, true); err != nil { + // return fmt.Errorf("Update: Error deleting UtxoOperations 1 for block %v, %v", blockToDetach.Hash, err) + // } + // if err := txn.Delete(BlockHashToBlockKey(blockToDetach.Hash)); err != nil { + // return fmt.Errorf("Update: Error deleting UtxoOperations 2 for block %v %v", blockToDetach.Hash, err) + // } + // return nil + // }) + // + // if err != nil { + // return fmt.Errorf("Update: Error updating badgger: %v", err) + // } + // // Delete this block from the chain db so we don't get duplicate block errors. + // + // // Remove this block from our bestChain data structures. + // newBlockIndex := txi.TXIndexChain.CopyBlockIndexes() + // newTip := blockToDetach.GetParent(txi.TXIndexChain.blockIndex) + // if newTip == nil { + // return fmt.Errorf("Update: Error getting parent of block %v", blockToDetach) + // } + // + // txi.TXIndexChain.SetBestChainMap(newBlockIndex, newTip) + // + // // At this point the entries for the block should have been removed + // // from both our Txindex chain and our transaction index mappings. + //} // For each of the blocks we're adding, process them on our txindex chain // and add their mappings to our txn index. Compute any metadata that might // be useful. - for _, blockToAttach := range attachBlocks { + // Get the next block after the current txindex tip hash. we know we've already processed the txindex tip hash. + blockToAttach, exists, err := txi.CoreChain.GetBlockFromBestChainByHeight(uint64(txindexTipNode.Height+1), false) + if !exists || err != nil { + return fmt.Errorf("Update: Problem getting block at height %d: %v", txindexTipNode.Height+1, err) + } + for !blockToAttach.Hash.IsEqual(blockTipNode.Hash) { if txi.killed { glog.Infof(CLog(Yellow, "TxIndex: Update: Killed while attaching blocks")) break @@ -408,7 +399,7 @@ func (txi *TXIndex) Update() error { utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) if blockToAttach.Header.PrevBlockHash != nil && !utxoView.TipHash.IsEqual(blockToAttach.Header.PrevBlockHash) { var utxoViewAndUtxoOps *BlockViewAndUtxoOps - utxoViewAndUtxoOps, err = txi.TXIndexChain.getUtxoViewAndUtxoOpsAtBlockHash(*blockToAttach.Header.PrevBlockHash) + utxoViewAndUtxoOps, err = txi.TXIndexChain.getUtxoViewAndUtxoOpsAtBlockHash(*blockToAttach.Header.PrevBlockHash, blockToAttach.Header.Height-1) if err != nil { return fmt.Errorf("Update: Problem getting UtxoView at block hash %v: %v", blockToAttach.Header.PrevBlockHash, err) @@ -453,6 +444,11 @@ func (txi *TXIndex) Update() error { return fmt.Errorf("Update: Problem attaching block %v: %v", blockToAttach, err) } + var exists bool + blockToAttach, exists, err = txi.CoreChain.GetBlockFromBestChainByHeight(uint64(blockToAttach.Height+1), false) + if !exists || err != nil { + return fmt.Errorf("Update: Problem getting block at height %d: %v", blockToAttach.Height+1, err) + } } glog.Infof("Update: Txindex update complete. New tip: (height: %d, hash: %v)", diff --git a/lib/types.go b/lib/types.go index dd7bf0518..5c486c99d 100644 --- a/lib/types.go +++ b/lib/types.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "reflect" "sort" "github.com/deso-protocol/uint256" @@ -238,7 +237,7 @@ func (bh *BlockHash) IsEqual(target *BlockHash) bool { return false } - return reflect.DeepEqual(bh[:], target[:]) + return bytes.Equal(bh[:], target[:]) } func (bh *BlockHash) NewBlockHash() *BlockHash {