diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e44082b..a1ce348 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,8 +12,8 @@ jobs: name: "Check" runs-on: "ubuntu-latest" steps: - - uses: "actions/checkout@v3" - - uses: "arduino/setup-protoc@v2" + - uses: "actions/checkout@v4" + - uses: "arduino/setup-protoc@v3" - uses: "dtolnay/rust-toolchain@stable" - run: "cargo check" @@ -21,8 +21,8 @@ jobs: name: "Test Suite" runs-on: "ubuntu-latest" steps: - - uses: "actions/checkout@v3" - - uses: "arduino/setup-protoc@v2" + - uses: "actions/checkout@v4" + - uses: "arduino/setup-protoc@v3" - uses: "dtolnay/rust-toolchain@stable" - run: "cargo test" @@ -30,8 +30,8 @@ jobs: name: "Rustfmt" runs-on: "ubuntu-latest" steps: - - uses: "actions/checkout@v3" - - uses: "arduino/setup-protoc@v2" + - uses: "actions/checkout@v4" + - uses: "arduino/setup-protoc@v3" - uses: "dtolnay/rust-toolchain@nightly" with: components: "rustfmt" @@ -41,9 +41,9 @@ jobs: name: "Clippy" runs-on: "ubuntu-latest" steps: - - uses: "actions/checkout@v3" - - uses: "arduino/setup-protoc@v2" + - uses: "actions/checkout@v4" + - uses: "arduino/setup-protoc@v3" - uses: "dtolnay/rust-toolchain@nightly" with: components: "clippy" - - run: "cargo clippy -- -D warnings" \ No newline at end of file + - run: "cargo clippy -- -D warnings" diff --git a/.gitignore b/.gitignore index 580b082..1391daa 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,5 @@ expanded.rs *.db *.db-* -.idea/ \ No newline at end of file +.idea/ +state.json \ No newline at end of file diff --git a/.sqlx/query-08364ef2659aff5ef337da7ea574ff0ddd2f713d7c5d4131932b357cc14b078b.json b/.sqlx/query-08364ef2659aff5ef337da7ea574ff0ddd2f713d7c5d4131932b357cc14b078b.json deleted file mode 100644 index f4a2a23..0000000 --- a/.sqlx/query-08364ef2659aff5ef337da7ea574ff0ddd2f713d7c5d4131932b357cc14b078b.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "db_name": "SQLite", - "query": "SELECT key_type, metadata FROM signers WHERE key = ?;", - "describe": { - "columns": [ - { - "name": "key_type", - "ordinal": 0, - "type_info": "Int64" - }, - { - "name": "metadata", - "ordinal": 1, - "type_info": "Text" - } - ], - "parameters": { - "Right": 1 - }, - "nullable": [ - false, - false - ] - }, - "hash": "08364ef2659aff5ef337da7ea574ff0ddd2f713d7c5d4131932b357cc14b078b" -} diff --git a/.sqlx/query-25ab59595acb0863eabb99cb2043204d497869fe55b83402b10c0710270c6f93.json b/.sqlx/query-25ab59595acb0863eabb99cb2043204d497869fe55b83402b10c0710270c6f93.json deleted file mode 100644 index 7164c24..0000000 --- a/.sqlx/query-25ab59595acb0863eabb99cb2043204d497869fe55b83402b10c0710270c6f93.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "INSERT INTO storage_allocations (\n id,\n rented_at,\n expires_at,\n chain_event_id,\n fid,\n units,\n payer\n) \nVALUES (?, ?, ?, ?, ?, ?, ?);", - "describe": { - "columns": [], - "parameters": { - "Right": 7 - }, - "nullable": [] - }, - "hash": "25ab59595acb0863eabb99cb2043204d497869fe55b83402b10c0710270c6f93" -} diff --git a/.sqlx/query-5aa05c7d36be3c45f8e9c7630b4d5839789951dc5c3d86e2f3defd9a0e7a03aa.json b/.sqlx/query-5aa05c7d36be3c45f8e9c7630b4d5839789951dc5c3d86e2f3defd9a0e7a03aa.json deleted file mode 100644 index 63c7bb3..0000000 --- a/.sqlx/query-5aa05c7d36be3c45f8e9c7630b4d5839789951dc5c3d86e2f3defd9a0e7a03aa.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE fids SET custody_address = ? WHERE fid = ?;", - "describe": { - "columns": [], - "parameters": { - "Right": 2 - }, - "nullable": [] - }, - "hash": "5aa05c7d36be3c45f8e9c7630b4d5839789951dc5c3d86e2f3defd9a0e7a03aa" -} diff --git a/.sqlx/query-6553a8bc645fff15cdbc57aae2de151b9a09b6b14ed10d30e9660f55d48be619.json b/.sqlx/query-6553a8bc645fff15cdbc57aae2de151b9a09b6b14ed10d30e9660f55d48be619.json deleted file mode 100644 index 5de934e..0000000 --- a/.sqlx/query-6553a8bc645fff15cdbc57aae2de151b9a09b6b14ed10d30e9660f55d48be619.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE fids SET recovery_address = ? WHERE fid = ?;", - "describe": { - "columns": [], - "parameters": { - "Right": 2 - }, - "nullable": [] - }, - "hash": "6553a8bc645fff15cdbc57aae2de151b9a09b6b14ed10d30e9660f55d48be619" -} diff --git a/.sqlx/query-712408ec60b640e910cde3fa35d145b4df80f5af1a262b2a68b6f9dc24858574.json b/.sqlx/query-712408ec60b640e910cde3fa35d145b4df80f5af1a262b2a68b6f9dc24858574.json deleted file mode 100644 index a3f152e..0000000 --- a/.sqlx/query-712408ec60b640e910cde3fa35d145b4df80f5af1a262b2a68b6f9dc24858574.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE signers \nSET remove_chain_event_id = ? \nWHERE key = ? AND key_type = ?;", - "describe": { - "columns": [], - "parameters": { - "Right": 3 - }, - "nullable": [] - }, - "hash": "712408ec60b640e910cde3fa35d145b4df80f5af1a262b2a68b6f9dc24858574" -} diff --git a/.sqlx/query-846ee38a52ecdaa0f20ccf0518979f1174279c4cd2634e2bb142b3e53c371905.json b/.sqlx/query-846ee38a52ecdaa0f20ccf0518979f1174279c4cd2634e2bb142b3e53c371905.json deleted file mode 100644 index 4437a88..0000000 --- a/.sqlx/query-846ee38a52ecdaa0f20ccf0518979f1174279c4cd2634e2bb142b3e53c371905.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "SQLite", - "query": "SELECT block_number\nFROM chain_events\nORDER BY block_number\nDESC LIMIT 1;", - "describe": { - "columns": [ - { - "name": "block_number", - "ordinal": 0, - "type_info": "Int64" - } - ], - "parameters": { - "Right": 0 - }, - "nullable": [ - false - ] - }, - "hash": "846ee38a52ecdaa0f20ccf0518979f1174279c4cd2634e2bb142b3e53c371905" -} diff --git a/.sqlx/query-a35a2cd0ac05b787cba63c0663c3d7bfcae0760ea9d4dd383917045327a20d39.json b/.sqlx/query-a35a2cd0ac05b787cba63c0663c3d7bfcae0760ea9d4dd383917045327a20d39.json deleted file mode 100644 index 315e73b..0000000 --- a/.sqlx/query-a35a2cd0ac05b787cba63c0663c3d7bfcae0760ea9d4dd383917045327a20d39.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "INSERT INTO fids (\n fid,\n registered_at,\n chain_event_id,\n custody_address,\n recovery_address\n) \nVALUES (?, ?, ?, ?, ?)\nON CONFLICT (fid) DO NOTHING;\n", - "describe": { - "columns": [], - "parameters": { - "Right": 5 - }, - "nullable": [] - }, - "hash": "a35a2cd0ac05b787cba63c0663c3d7bfcae0760ea9d4dd383917045327a20d39" -} diff --git a/.sqlx/query-a97217a47e60ef00e1f16cfef7ae6a769f03ab903b90d57d16bc9f00b46886f9.json b/.sqlx/query-a97217a47e60ef00e1f16cfef7ae6a769f03ab903b90d57d16bc9f00b46886f9.json deleted file mode 100644 index d1f3f96..0000000 --- a/.sqlx/query-a97217a47e60ef00e1f16cfef7ae6a769f03ab903b90d57d16bc9f00b46886f9.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "INSERT INTO signers (\n id,\n added_at,\n removed_at,\n fid,\n requester_fid,\n add_chain_event_id,\n remove_chain_event_id,\n key_type,\n metadata_type,\n key,\n metadata\n) \nVALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);", - "describe": { - "columns": [], - "parameters": { - "Right": 11 - }, - "nullable": [] - }, - "hash": "a97217a47e60ef00e1f16cfef7ae6a769f03ab903b90d57d16bc9f00b46886f9" -} diff --git a/.sqlx/query-efd903ad0bd7871fc42ad9fe7bfc3eb15061f8b8ff74e9641545d8445562b2bd.json b/.sqlx/query-efd903ad0bd7871fc42ad9fe7bfc3eb15061f8b8ff74e9641545d8445562b2bd.json deleted file mode 100644 index a9526d8..0000000 --- a/.sqlx/query-efd903ad0bd7871fc42ad9fe7bfc3eb15061f8b8ff74e9641545d8445562b2bd.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "INSERT INTO chain_events (\n id,\n block_timestamp,\n fid,\n chain_id,\n block_number,\n transaction_index,\n log_index,\n type,\n block_hash,\n transaction_hash,\n body,\n raw\n) \nVALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);", - "describe": { - "columns": [], - "parameters": { - "Right": 12 - }, - "nullable": [] - }, - "hash": "efd903ad0bd7871fc42ad9fe7bfc3eb15061f8b8ff74e9641545d8445562b2bd" -} diff --git a/Cargo.lock b/Cargo.lock index 5d31e0c..8a5502e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -240,7 +240,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -250,7 +250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -871,7 +871,7 @@ dependencies = [ "js-sys", "num-traits", "wasm-bindgen", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -992,6 +992,19 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "console" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "unicode-width", + "windows-sys 0.52.0", +] + [[package]] name = "const-hex" version = "1.10.0" @@ -1335,7 +1348,7 @@ dependencies = [ "libc", "option-ext", "redox_users", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1460,6 +1473,12 @@ dependencies = [ "log", ] +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + [[package]] name = "encoding_rs" version = "0.8.33" @@ -1537,7 +1556,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f258a7194e7f7c2a7837a8913aeab7fd8c383457034fa20ce4dd3dcb813e8eb8" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1548,7 +1567,7 @@ checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" dependencies = [ "cfg-if", "home", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2374,7 +2393,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2634,6 +2653,19 @@ dependencies = [ "hashbrown 0.14.2", ] +[[package]] +name = "indicatif" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" +dependencies = [ + "console", + "instant", + "number_prefix", + "portable-atomic", + "unicode-width", +] + [[package]] name = "inlinable_string" version = "0.1.15" @@ -2666,7 +2698,7 @@ checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2677,7 +2709,7 @@ checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ "socket2 0.5.5", "widestring", - "windows-sys", + "windows-sys 0.48.0", "winreg", ] @@ -2695,7 +2727,7 @@ checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", "rustix 0.38.24", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2852,6 +2884,7 @@ dependencies = [ "libp2p-quic", "libp2p-swarm", "libp2p-tcp", + "libp2p-tls", "libp2p-upnp", "multiaddr", "pin-project", @@ -3358,7 +3391,7 @@ checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3610,6 +3643,12 @@ dependencies = [ "syn 2.0.39", ] +[[package]] +name = "number_prefix" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" + [[package]] name = "object" version = "0.32.1" @@ -3723,7 +3762,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -3978,7 +4017,7 @@ dependencies = [ "libc", "log", "pin-project-lite", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4004,6 +4043,12 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + [[package]] name = "powerfmt" version = "0.2.0" @@ -4302,7 +4347,7 @@ dependencies = [ "libc", "socket2 0.5.5", "tracing", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4532,7 +4577,7 @@ dependencies = [ "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4687,7 +4732,7 @@ dependencies = [ "io-lifetimes", "libc", "linux-raw-sys 0.3.8", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4700,7 +4745,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.11", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -5071,7 +5116,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -5516,6 +5561,7 @@ dependencies = [ "futures", "futures-util", "hex", + "indicatif", "log", "prost", "serde", @@ -5597,7 +5643,7 @@ dependencies = [ "fastrand 2.0.1", "redox_syscall", "rustix 0.38.24", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -5709,7 +5755,7 @@ dependencies = [ "signal-hook-registry", "socket2 0.5.5", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -6118,6 +6164,12 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +[[package]] +name = "unicode-width" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" + [[package]] name = "unicode-xid" version = "0.2.4" @@ -6412,7 +6464,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ "windows-core", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -6421,7 +6473,7 @@ version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -6430,7 +6482,16 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.4", ] [[package]] @@ -6439,13 +6500,28 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +dependencies = [ + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -6454,42 +6530,84 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" + [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" + [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" + [[package]] name = "winnow" version = "0.5.19" @@ -6506,7 +6624,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f2dfd91..13da311 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ libp2p = { version = "0.52.0", features = [ "ed25519", "identify", "ping", + "tls", ] } prost = "0.11.9" serde = { version = "1.0.183", features = ["derive"] } @@ -60,7 +61,7 @@ alloy-primitives = { version = "0.5.3" } futures = { version = "0.3.17" } figment = { version = "0.10.12", features = ["toml", "env"] } dotenv = { version = "0.15.0" } - +indicatif = { version = "0.17.8" } [profile.release] codegen-units = 1 diff --git a/Config.toml b/Config.toml index 541fa8c..32e9864 100644 --- a/Config.toml +++ b/Config.toml @@ -9,3 +9,4 @@ storage_registry_address = "0x00000000fcce7f938e7ae6d3c335bd6a1a7c593d" abi_dir = "./lib/eth/abis" indexer_interval = 60 # This value is in seconds bootstrap_addrs = ["/ip4/107.21.22.118/tcp/2282"] +sync_block_range_size = 250 diff --git a/lib/cli/src/lib.rs b/lib/cli/src/lib.rs index 770ef32..cf43fcb 100644 --- a/lib/cli/src/lib.rs +++ b/lib/cli/src/lib.rs @@ -31,7 +31,7 @@ pub struct Cli { #[derive(Subcommand, Debug)] pub enum Commands { - Start(StartCommand), + Start(Box), #[command(name = "identity", about = "Create and verify Peer IDs")] Identity(IdentityArgs), Status(StatusCommand), diff --git a/lib/cli/src/start.rs b/lib/cli/src/start.rs index 9fe0ee4..875cfa0 100644 --- a/lib/cli/src/start.rs +++ b/lib/cli/src/start.rs @@ -1,8 +1,6 @@ use clap::Parser; use std::path::PathBuf; -const PEER_ID_FILENAME: &str = "id.protobuf"; -const DEFAULT_PEER_ID_DIR: &str = "./.hub"; const DEFAULT_PEER_ID_LOCATION: &str = "./.hub/default_id.protobuf"; const DEFAULT_CHUNK_SIZE: u64 = 10000; const DEFAULT_FNAME_SERVER_URL: &str = "https://fnames.farcaster.xyz"; @@ -16,7 +14,9 @@ pub struct TeleportOptions { )] pub network: Option, - #[arg(short, long, help = "Path to the PeerId file.")] + #[arg(short, long, + help = format!("The URL for the FName registry server (default: {})", DEFAULT_PEER_ID_LOCATION) + )] pub id: Option, #[arg(long, help = "The FID of the Hub operator")] @@ -247,7 +247,7 @@ pub struct DebuggingOptions { #[arg( long, - help = "The number of blocks to batch when syncing historical events from Farcaster contracts. (default: DEFAULT_CHUNK_SIZE)" + help = format!("The number of blocks to batch when syncing historical events from Farcaster contracts. (default: {})", DEFAULT_CHUNK_SIZE) )] pub chunk_size: Option, diff --git a/lib/common/src/lib.rs b/lib/common/src/lib.rs index 3940788..eed44cc 100644 --- a/lib/common/src/lib.rs +++ b/lib/common/src/lib.rs @@ -5,5 +5,6 @@ pub mod errors; pub mod peer_id; pub mod protobufs; pub mod signers; +pub mod state; pub mod time; pub mod username_proof; diff --git a/lib/common/src/state.rs b/lib/common/src/state.rs new file mode 100644 index 0000000..56fec94 --- /dev/null +++ b/lib/common/src/state.rs @@ -0,0 +1,35 @@ +use std::{fs::File, io::BufReader, path::Path}; + +use serde::{Deserialize, Serialize}; + +const STATE_FILENAME: &str = "state.json"; + +#[derive(Serialize, Deserialize)] +pub struct PersistentState { + pub last_synced_block: u64, +} + +impl PersistentState { + pub fn load() -> Self { + let path = Path::new(STATE_FILENAME); + if path.exists() && path.is_file() { + let file = File::open(path).unwrap(); + serde_json::from_reader(BufReader::new(file)).unwrap() + } else { + Self::default() + } + } + + pub fn store(&self) { + let file = File::create(STATE_FILENAME).unwrap(); + serde_json::to_writer(file, self).unwrap(); + } +} + +impl Default for PersistentState { + fn default() -> Self { + Self { + last_synced_block: 0, + } + } +} diff --git a/lib/common/src/time.rs b/lib/common/src/time.rs index 65bfd2f..59f45ee 100644 --- a/lib/common/src/time.rs +++ b/lib/common/src/time.rs @@ -1,6 +1,7 @@ use super::errors::*; const FARCASTER_EPOCH: i64 = 1609459200000; +const FARCASTER_EPOCH_IN_SECONDS: u32 = 1609459200; pub fn get_farcaster_time() -> Result { to_farcaster_time( @@ -32,6 +33,25 @@ pub fn to_farcaster_time(time: i64) -> Result { Ok(seconds_since_epoch.try_into().unwrap()) } +pub fn block_timestamp_to_farcaster_time(timestamp: u32) -> Result { + if timestamp < FARCASTER_EPOCH_IN_SECONDS { + return Err(HubError::BadRequest( + BadRequestType::InvalidParam, + "timestamp must be after Farcaster epoch (01/01/2021".to_string(), + )); + } + + let seconds_since_epoch = timestamp - FARCASTER_EPOCH_IN_SECONDS; + if seconds_since_epoch as i64 > 2i64.pow(32) - 1 { + return Err(HubError::BadRequest( + BadRequestType::InvalidParam, + "timestamp too far in future".to_string(), + )); + } + + Ok(seconds_since_epoch.try_into().unwrap()) +} + pub fn from_farcaster_time(time: u32) -> Result { Ok(time as i64 * 1000 + FARCASTER_EPOCH) } diff --git a/lib/eth/Cargo.toml b/lib/eth/Cargo.toml index 17bfeae..e4eef36 100644 --- a/lib/eth/Cargo.toml +++ b/lib/eth/Cargo.toml @@ -27,6 +27,7 @@ alloy-dyn-abi = { workspace = true } alloy-primitives = { workspace = true } log = { workspace = true } futures = { workspace = true } +indicatif = { workspace = true } [build-dependencies] ethers = { workspace = true } diff --git a/lib/eth/abis/KeyRegistry.json b/lib/eth/abis/KeyRegistry.json index ec3ce2b..8191939 100644 --- a/lib/eth/abis/KeyRegistry.json +++ b/lib/eth/abis/KeyRegistry.json @@ -6,10 +6,20 @@ "name": "_idRegistry", "type": "address" }, + { + "internalType": "address", + "name": "_migrator", + "type": "address" + }, { "internalType": "address", "name": "_initialOwner", "type": "address" + }, + { + "internalType": "uint256", + "name": "_maxKeysPerFid", + "type": "uint256" } ], "stateMutability": "nonpayable", @@ -20,6 +30,16 @@ "name": "AlreadyMigrated", "type": "error" }, + { + "inputs": [], + "name": "ExceedsMaximum", + "type": "error" + }, + { + "inputs": [], + "name": "GatewayFrozen", + "type": "error" + }, { "inputs": [ { @@ -38,12 +58,12 @@ }, { "inputs": [], - "name": "InvalidAddress", + "name": "InvalidKeyType", "type": "error" }, { "inputs": [], - "name": "InvalidKeyType", + "name": "InvalidMaxKeys", "type": "error" }, { @@ -73,17 +93,17 @@ }, { "inputs": [], - "name": "OnlyTrustedCaller", + "name": "OnlyGuardian", "type": "error" }, { "inputs": [], - "name": "Registrable", + "name": "OnlyMigrator", "type": "error" }, { "inputs": [], - "name": "Seedable", + "name": "PermissionRevoked", "type": "error" }, { @@ -166,6 +186,19 @@ "name": "Add", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "guardian", + "type": "address" + } + ], + "name": "Add", + "type": "event" + }, { "anonymous": false, "inputs": [ @@ -194,13 +227,20 @@ { "anonymous": false, "inputs": [], - "name": "DisableTrustedOnly", + "name": "EIP712DomainChanged", "type": "event" }, { "anonymous": false, - "inputs": [], - "name": "EIP712DomainChanged", + "inputs": [ + { + "indexed": false, + "internalType": "address", + "name": "keyGateway", + "type": "address" + } + ], + "name": "FreezeKeyGateway", "type": "event" }, { @@ -209,7 +249,7 @@ { "indexed": true, "internalType": "uint256", - "name": "keysMigratedAt", + "name": "migratedAt", "type": "uint256" } ], @@ -292,6 +332,19 @@ "name": "Remove", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "guardian", + "type": "address" + } + ], + "name": "Remove", + "type": "event" + }, { "anonymous": false, "inputs": [ @@ -315,25 +368,57 @@ "anonymous": false, "inputs": [ { - "indexed": true, + "indexed": false, "internalType": "address", - "name": "oldCaller", + "name": "oldKeyGateway", "type": "address" }, { - "indexed": true, + "indexed": false, "internalType": "address", - "name": "newCaller", + "name": "newKeyGateway", "type": "address" + } + ], + "name": "SetKeyGateway", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "oldMax", + "type": "uint256" }, + { + "indexed": false, + "internalType": "uint256", + "name": "newMax", + "type": "uint256" + } + ], + "name": "SetMaxKeysPerFid", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ { "indexed": false, "internalType": "address", - "name": "owner", + "name": "oldMigrator", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "newMigrator", "type": "address" } ], - "name": "SetTrustedCaller", + "name": "SetMigrator", "type": "event" }, { @@ -380,19 +465,6 @@ "name": "Unpaused", "type": "event" }, - { - "inputs": [], - "name": "ADD_TYPEHASH", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [], "name": "REMOVE_TYPEHASH", @@ -428,6 +500,11 @@ }, { "inputs": [ + { + "internalType": "address", + "name": "fidOwner", + "type": "address" + }, { "internalType": "uint32", "name": "keyType", @@ -458,41 +535,11 @@ "inputs": [ { "internalType": "address", - "name": "fidOwner", + "name": "guardian", "type": "address" - }, - { - "internalType": "uint32", - "name": "keyType", - "type": "uint32" - }, - { - "internalType": "bytes", - "name": "key", - "type": "bytes" - }, - { - "internalType": "uint8", - "name": "metadataType", - "type": "uint8" - }, - { - "internalType": "bytes", - "name": "metadata", - "type": "bytes" - }, - { - "internalType": "uint256", - "name": "deadline", - "type": "uint256" - }, - { - "internalType": "bytes", - "name": "sig", - "type": "bytes" } ], - "name": "addFor", + "name": "addGuardian", "outputs": [], "stateMutability": "nonpayable", "type": "function" @@ -559,13 +606,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [], - "name": "disableTrustedOnly", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "domainSeparatorV4", @@ -622,6 +662,26 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "freezeKeyGateway", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "gatewayFrozen", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "gracePeriod", @@ -635,6 +695,25 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "address", + "name": "guardian", + "type": "address" + } + ], + "name": "guardians", + "outputs": [ + { + "internalType": "bool", + "name": "isGuardian", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -680,6 +759,35 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "fid", + "type": "uint256" + }, + { + "internalType": "enum IKeyRegistry.KeyState", + "name": "state", + "type": "uint8" + }, + { + "internalType": "uint256", + "name": "index", + "type": "uint256" + } + ], + "name": "keyAt", + "outputs": [ + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -716,6 +824,19 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "keyGateway", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -745,14 +866,77 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "fid", + "type": "uint256" + }, + { + "internalType": "enum IKeyRegistry.KeyState", + "name": "state", + "type": "uint8" + } + ], + "name": "keysOf", + "outputs": [ + { + "internalType": "bytes[]", + "name": "", + "type": "bytes[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "fid", + "type": "uint256" + }, + { + "internalType": "enum IKeyRegistry.KeyState", + "name": "state", + "type": "uint8" + }, + { + "internalType": "uint256", + "name": "startIdx", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "batchSize", + "type": "uint256" + } + ], + "name": "keysOf", + "outputs": [ + { + "internalType": "bytes[]", + "name": "page", + "type": "bytes[]" + }, + { + "internalType": "uint256", + "name": "nextIdx", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], - "name": "keysMigratedAt", + "name": "maxKeysPerFid", "outputs": [ { - "internalType": "uint40", + "internalType": "uint256", "name": "", - "type": "uint40" + "type": "uint256" } ], "stateMutability": "view", @@ -760,11 +944,37 @@ }, { "inputs": [], - "name": "migrateKeys", + "name": "migrate", "outputs": [], "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "migratedAt", + "outputs": [ + { + "internalType": "uint40", + "name": "", + "type": "uint40" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "migrator", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -871,6 +1081,19 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "address", + "name": "guardian", + "type": "address" + } + ], + "name": "removeGuardian", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [], "name": "renounceOwnership", @@ -895,11 +1118,11 @@ "inputs": [ { "internalType": "address", - "name": "_trustedCaller", + "name": "_keyGateway", "type": "address" } ], - "name": "setTrustedCaller", + "name": "setKeyGateway", "outputs": [], "stateMutability": "nonpayable", "type": "function" @@ -907,22 +1130,12 @@ { "inputs": [ { - "internalType": "uint32", - "name": "keyType", - "type": "uint32" - }, - { - "internalType": "uint8", - "name": "metadataType", - "type": "uint8" - }, - { - "internalType": "contract IMetadataValidator", - "name": "validator", - "type": "address" + "internalType": "uint256", + "name": "_maxKeysPerFid", + "type": "uint256" } ], - "name": "setValidator", + "name": "setMaxKeysPerFid", "outputs": [], "stateMutability": "nonpayable", "type": "function" @@ -931,72 +1144,73 @@ "inputs": [ { "internalType": "address", - "name": "newOwner", + "name": "_migrator", "type": "address" } ], - "name": "transferOwnership", + "name": "setMigrator", "outputs": [], "stateMutability": "nonpayable", "type": "function" }, { "inputs": [ - { - "internalType": "address", - "name": "fidOwner", - "type": "address" - }, { "internalType": "uint32", "name": "keyType", "type": "uint32" }, - { - "internalType": "bytes", - "name": "key", - "type": "bytes" - }, { "internalType": "uint8", "name": "metadataType", "type": "uint8" }, { - "internalType": "bytes", - "name": "metadata", - "type": "bytes" + "internalType": "contract IMetadataValidator", + "name": "validator", + "type": "address" } ], - "name": "trustedAdd", + "name": "setValidator", "outputs": [], "stateMutability": "nonpayable", "type": "function" }, { - "inputs": [], - "name": "trustedCaller", + "inputs": [ + { + "internalType": "uint256", + "name": "fid", + "type": "uint256" + }, + { + "internalType": "enum IKeyRegistry.KeyState", + "name": "state", + "type": "uint8" + } + ], + "name": "totalKeys", "outputs": [ { - "internalType": "address", + "internalType": "uint256", "name": "", - "type": "address" + "type": "uint256" } ], "stateMutability": "view", "type": "function" }, { - "inputs": [], - "name": "trustedOnly", - "outputs": [ + "inputs": [ { - "internalType": "uint256", - "name": "", - "type": "uint256" + "internalType": "address", + "name": "newOwner", + "type": "address" } ], - "stateMutability": "view", + "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", "type": "function" }, { @@ -1006,6 +1220,19 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "useNonce", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { diff --git a/lib/eth/src/id_registry.rs b/lib/eth/src/id_registry.rs index 9071911..9c6f09d 100644 --- a/lib/eth/src/id_registry.rs +++ b/lib/eth/src/id_registry.rs @@ -4,13 +4,14 @@ use ethers::{ providers::{JsonRpcClient, Provider}, types::{Address, Filter, Log, U256}, }; +use log::{error, warn}; use sqlx::Acquire; use std::error::Error; use std::sync::Arc; use teleport_common::protobufs::generated::{ on_chain_event, IdRegisterEventBody, IdRegisterEventType, OnChainEvent, OnChainEventType, }; -use teleport_storage::db::{self}; +use teleport_storage::db::{self, ChainEventRow, FidRecoveryUpdate, FidRow, FidTransfer}; use teleport_storage::Store; #[derive(Debug, Clone, EthEvent)] @@ -77,7 +78,7 @@ impl Contract { }) } - pub async fn get_register_logs( + pub async fn get_id_registry_logs( &self, start_block: u64, end_block: u64, @@ -86,131 +87,190 @@ impl Contract { .address(self.inner.address()) .from_block(start_block) .to_block(end_block) - .topic0(get_signature_topic(REGISTER_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; + .topic0(vec![ + get_signature_topic(REGISTER_SIGNATURE), + get_signature_topic(TRANSFER_SIGNATURE), + get_signature_topic(RECOVERY_SIGNATURE), + get_signature_topic(CHANGE_RECOVERY_ADDRESS_SIGNATURE), + ]); - Ok(logs) - } - - async fn process_register_log( - &self, - log: &Log, - chain_id: u32, - timestamp: u32, - ) -> Result<(db::FidRow, db::ChainEventRow), Box> { - let parsed_log: Register = parse_log(log.clone())?; - - let body = IdRegisterEventBody { - to: parsed_log.to.to_fixed_bytes().to_vec(), - event_type: IdRegisterEventType::Register as i32, - from: vec![], - recovery_address: parsed_log.recovery.as_bytes().to_vec(), - }; + let all_logs = get_logs(self.provider.clone(), &filter).await?; - let onchain_event = OnChainEvent { - r#type: OnChainEventType::EventTypeIdRegister as i32, - chain_id, - block_number: log.block_number.unwrap().as_u32(), - block_hash: log.block_hash.unwrap().to_fixed_bytes().to_vec(), - block_timestamp: timestamp as u64, - transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), - log_index: log.log_index.unwrap().as_u32(), - fid: parsed_log.id.as_u64(), - body: Some(on_chain_event::Body::IdRegisterEventBody(body)), - tx_index: log.transaction_index.unwrap().as_u32(), - version: 2, - }; - - let event_row = db::ChainEventRow::new(onchain_event, log.data.to_vec()); - let fid_row = db::FidRow { - fid: parsed_log.id.as_u64() as i64, - registered_at: timestamp.into(), - chain_event_id: event_row.id.clone(), - custody_address: parsed_log.to.to_fixed_bytes(), - recovery_address: parsed_log.recovery.to_fixed_bytes(), - }; - - Ok((fid_row, event_row)) - } - - pub async fn persist_register_log( - &self, - store: &Store, - log: &Log, - chain_id: u32, - timestamp: u32, - ) -> Result<(), Box> { - let (fid_row, event_row) = self.process_register_log(log, chain_id, timestamp).await?; - - // These must happen 1 at a time to satisfy foreign key constraints - event_row.insert(store).await?; - fid_row.insert(&store).await?; - - Ok(()) + Ok(all_logs) } - pub async fn persist_many_register_logs( + pub async fn process_id_registry_logs( &self, store: &Store, logs: Vec, + timestamps: Vec, chain_id: u32, - timestamps: &[u32], ) -> Result<(), Box> { - let mut fid_rows = Vec::new(); - let mut event_rows = Vec::new(); - - for (log, timestamp) in logs.iter().zip(timestamps.iter()) { - let (fid_row, event_row) = self.process_register_log(log, chain_id, *timestamp).await?; - fid_rows.push(fid_row); - event_rows.push(event_row); + let mut chain_events: Vec = vec![]; + let mut fids: Vec = vec![]; + let mut fid_transfers: Vec = vec![]; + let mut fid_recovery_updated: Vec = vec![]; + + for (i, log) in logs.iter().enumerate() { + if log.block_hash.is_none() + || log.block_number.is_none() + || log.transaction_hash.is_none() + || log.transaction_index.is_none() + || log.log_index.is_none() + || log.removed.is_some_and(|removed| removed) + { + continue; + } + let timestamp = timestamps[i]; + + if log.topics[0] == get_signature_topic(REGISTER_SIGNATURE) { + let (chain_events_row, fid_row) = + match self.process_register_log(log, timestamp, chain_id) { + Ok((chain_events_row, fid_row)) => (chain_events_row, fid_row), + Err(e) => { + warn!("Failed to process Register log: {:?}", e); + continue; + } + }; + + chain_events.push(chain_events_row); + fids.push(fid_row); + } else if log.topics[0] == get_signature_topic(TRANSFER_SIGNATURE) { + let (chain_events_row, fid_transfer_row) = + match self.process_transfer_log(log, timestamp, chain_id) { + Ok((chain_events_row, fid_transfer_row)) => { + (chain_events_row, fid_transfer_row) + } + Err(e) => { + warn!("Failed to process Transfer log: {:?}", e); + continue; + } + }; + + chain_events.push(chain_events_row); + fid_transfers.push(fid_transfer_row); + } else if log.topics[0] == get_signature_topic(CHANGE_RECOVERY_ADDRESS_SIGNATURE) { + let (chain_events_row, fid_recovery_update_row) = + match self.process_change_recovery_address_log(log, timestamp, chain_id) { + Ok((chain_events_row, fid_recovery_update_row)) => { + (chain_events_row, fid_recovery_update_row) + } + Err(e) => { + warn!("Failed to process ChangeRecoveryAddress log: {:?}", e); + continue; + } + }; + + chain_events.push(chain_events_row); + fid_recovery_updated.push(fid_recovery_update_row); + } else if log.topics[0] == get_signature_topic(RECOVERY_SIGNATURE) { + let (chain_events_row, fid_transfer_row) = + match self.process_recovery_log(log, timestamp, chain_id) { + Ok((chain_events_row, fid_transfer_row)) => { + (chain_events_row, fid_transfer_row) + } + Err(e) => { + warn!("Failed to process Recover log: {:?}", e); + continue; + } + }; + + chain_events.push(chain_events_row); + fid_transfers.push(fid_transfer_row); + } } let mut connection = store.conn.acquire().await?; let mut transaction = connection.begin().await?; - let queries = db::ChainEventRow::generate_bulk_insert_queries(&event_rows)?; - for query in queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; + let event_queries = db::ChainEventRow::generate_bulk_insert_queries(&chain_events)?; + for event_query_str in event_queries { + let event_query = sqlx::query(&event_query_str); + let event_query_result = event_query.execute(&mut *transaction).await; + + match event_query_result { + Ok(_) => {} + Err(e) => { + error!( + "Failed to insert chain event row: {} {}", + e, &event_query_str + ); + transaction.rollback().await?; + return Err(Box::new(e)); + } + } + } + + let fids_queries = db::FidRow::generate_bulk_insert_queries(&fids)?; + for fid_query_str in fids_queries { + let fid_query = sqlx::query(&fid_query_str); + let fid_query_result = fid_query.execute(&mut *transaction).await; + match fid_query_result { + Ok(_) => {} + Err(e) => { + error!("Failed to insert fid row: {} {}", e, &fid_query_str); + transaction.rollback().await?; + return Err(Box::new(e)); + } + } } - let queries = db::FidRow::generate_bulk_insert_queries(&fid_rows)?; - for query in queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; + let transfer_queries = db::FidRow::generate_bulk_transfer_queries(&fid_transfers)?; + for transfer_query_str in transfer_queries { + let transfer_query = sqlx::query(&transfer_query_str); + let transfer_query_result = transfer_query.execute(&mut *transaction).await; + match transfer_query_result { + Ok(_) => {} + Err(e) => { + error!( + "Failed to conduct fid transfer: {} {}", + e, &transfer_query_str + ); + transaction.rollback().await?; + return Err(Box::new(e)); + } + } } + + let recovery_queries = + db::FidRow::generate_bulk_update_recovery_address_queries(&fid_recovery_updated)?; + for recovery_query_str in recovery_queries { + let recovery_query = sqlx::query(&recovery_query_str); + let recovery_query_result = recovery_query.execute(&mut *transaction).await; + match recovery_query_result { + Ok(_) => {} + Err(e) => { + error!( + "Failed to update fid recovery address: {} {}", + e, &recovery_query_str + ); + transaction.rollback().await?; + return Err(Box::new(e)); + } + } + } + transaction.commit().await?; Ok(()) } - pub async fn get_transfer_logs( - &self, - start_block: u64, - end_block: u64, - ) -> Result, Box> { - let filter = Filter::new() - .address(self.inner.address()) - .from_block(start_block) - .to_block(end_block) - .topic0(get_signature_topic(TRANSFER_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; - Ok(logs) - } - - pub async fn process_transfer_log( + fn process_register_log( &self, log: &Log, - chain_id: u32, timestamp: u32, - ) -> Result<(db::FidTransfer, db::ChainEventRow), Box> { - let parsed_log: Transfer = parse_log(log.clone())?; + chain_id: u32, + ) -> Result<(ChainEventRow, FidRow), Box> { + let parsed_log = match parse_log::(log.clone()) { + Ok(parsed_log) => parsed_log, + Err(e) => return Err(format!("Failed to parse Register event args: {:?}", e).into()), + }; - let body = IdRegisterEventBody { - to: parsed_log.to.to_fixed_bytes().to_vec(), - from: parsed_log.from.to_fixed_bytes().to_vec(), - event_type: IdRegisterEventType::Transfer as i32, - recovery_address: vec![], + let id_register_event_body = IdRegisterEventBody { + event_type: IdRegisterEventType::Register as i32, + to: parsed_log.to.as_bytes().to_vec(), + from: vec![], + recovery_address: parsed_log.recovery.as_bytes().to_vec(), }; let onchain_event = OnChainEvent { @@ -221,95 +281,39 @@ impl Contract { block_timestamp: timestamp as u64, transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), log_index: log.log_index.unwrap().as_u32(), - fid: parsed_log.id.as_u64(), - body: Some(on_chain_event::Body::IdRegisterEventBody(body)), + fid: parsed_log.id.try_into()?, tx_index: log.transaction_index.unwrap().as_u32(), version: 2, + body: Some(on_chain_event::Body::IdRegisterEventBody( + id_register_event_body, + )), }; - let event_row = db::ChainEventRow::new(onchain_event, log.data.to_vec()); - - let fid_transfer = db::FidTransfer { - fid: parsed_log.id.as_u32() as u32, + let chain_events_row = db::ChainEventRow::new(&onchain_event, log.data.to_vec()); + let fid_row = db::FidRow { + fid: parsed_log.id.try_into()?, + registered_at: timestamp as i64 * 1000, // timestamp is in seconds + transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), + log_index: log.log_index.unwrap().as_u32(), custody_address: parsed_log.to.to_fixed_bytes(), + recovery_address: parsed_log.recovery.to_fixed_bytes(), }; - Ok((fid_transfer, event_row)) + Ok((chain_events_row, fid_row)) } - pub async fn persist_transfer_log( + fn process_transfer_log( &self, - store: &Store, log: &Log, - chain_id: u32, timestamp: u32, - ) -> Result<(), Box> { - let (fid_transfer, event_row) = self.process_transfer_log(log, chain_id, timestamp).await?; - - event_row.insert(&store).await?; - db::FidRow::transfer(&store, &fid_transfer).await?; - - Ok(()) - } - - pub async fn persist_many_transfer_logs( - &self, - store: &Store, - logs: Vec, - chain_id: u32, - timestamps: &[u32], - ) -> Result<(), Box> { - let mut fid_transfers = Vec::new(); - let mut event_rows = Vec::new(); - - for (log, timestamp) in logs.iter().zip(timestamps.iter()) { - let (fid_transfer, event_row) = - self.process_transfer_log(log, chain_id, *timestamp).await?; - fid_transfers.push(fid_transfer); - event_rows.push(event_row); - } - - let mut conn = store.conn.acquire().await?; - let mut transaction = conn.begin().await?; - - let insert_queries = db::ChainEventRow::generate_bulk_insert_queries(&event_rows)?; - for query in insert_queries { - sqlx::query(&query).execute(&mut *transaction).await?; - } - - let transfer_queries = db::FidRow::generate_bulk_transfer_queries(&fid_transfers)?; - for query in transfer_queries { - sqlx::query(&query).execute(&mut *transaction).await?; - } - - transaction.commit().await?; - - Ok(()) - } - - pub async fn get_recovery_logs( - &self, - start_block: u64, - end_block: u64, - ) -> Result, Box> { - let filter = Filter::new() - .address(self.inner.address()) - .from_block(start_block) - .to_block(end_block) - .topic0(get_signature_topic(RECOVERY_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; - Ok(logs) - } - - pub async fn process_recovery_log( - &self, - log: &Log, chain_id: u32, - timestamp: u32, - ) -> Result<(db::FidTransfer, db::ChainEventRow), Box> { - let parsed_log: Recover = parse_log(log.clone())?; + ) -> Result<(ChainEventRow, FidTransfer), Box> { + let parsed_log = match parse_log::(log.clone()) { + Ok(parsed_log) => parsed_log, + Err(e) => return Err(format!("Failed to parse Transfer event args: {:?}", e).into()), + }; - let body = IdRegisterEventBody { + let id_register_event_body = IdRegisterEventBody { to: parsed_log.to.to_fixed_bytes().to_vec(), from: parsed_log.from.to_fixed_bytes().to_vec(), event_type: IdRegisterEventType::Transfer as i32, @@ -324,104 +328,42 @@ impl Contract { block_timestamp: timestamp as u64, transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), log_index: log.log_index.unwrap().as_u32(), - fid: parsed_log.id.as_u64(), - body: Some(on_chain_event::Body::IdRegisterEventBody(body)), + fid: parsed_log.id.try_into()?, + body: Some(on_chain_event::Body::IdRegisterEventBody( + id_register_event_body, + )), tx_index: log.transaction_index.unwrap().as_u32(), version: 2, }; - let event_row = db::ChainEventRow::new(onchain_event, log.data.to_vec()); + let chain_events_row = db::ChainEventRow::new(&onchain_event, log.data.to_vec()); + let fid_transfer_row = db::FidTransfer { + fid: parsed_log.id.try_into()?, + custody_address: parsed_log.to.to_fixed_bytes(), + }; - // A recovery's delta is identical to a transfer - Ok(( - db::FidTransfer { - fid: parsed_log.id.as_u32(), - custody_address: parsed_log.to.to_fixed_bytes(), - }, - event_row, - )) + Ok((chain_events_row, fid_transfer_row)) } - pub async fn persist_recovery_log( + fn process_change_recovery_address_log( &self, - store: &Store, log: &Log, - chain_id: u32, timestamp: u32, - ) -> Result<(), Box> { - let (recovery_update, event_row) = - self.process_recovery_log(log, chain_id, timestamp).await?; - - event_row.insert(&store).await?; - db::FidRow::transfer(&store, &recovery_update).await?; - - Ok(()) - } - - pub async fn persist_many_recovery_logs( - &self, - store: &Store, - logs: Vec, chain_id: u32, - timestamps: &[u32], - ) -> Result<(), Box> { - let mut recoveries = Vec::new(); - let mut event_rows = Vec::new(); - - for (log, timestamp) in logs.iter().zip(timestamps.iter()) { - let (recovery_update, event_row) = - self.process_recovery_log(log, chain_id, *timestamp).await?; - recoveries.push(recovery_update); - event_rows.push(event_row); - } - - let mut connection = store.conn.acquire().await?; - let mut transaction = connection.begin().await?; - - let insert_queries = db::ChainEventRow::generate_bulk_insert_queries(&event_rows)?; - for query in insert_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; - } - - let transfer_queries = db::FidRow::generate_bulk_transfer_queries(&recoveries)?; - for query in transfer_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; - } - - transaction.commit().await?; - - Ok(()) - } - - pub async fn get_change_recovery_address_logs( - &self, - start_block: u64, - end_block: u64, - ) -> Result, Box> { - let filter = Filter::new() - .address(self.inner.address()) - .from_block(start_block) - .to_block(end_block) - .topic0(get_signature_topic(CHANGE_RECOVERY_ADDRESS_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; - - Ok(logs) - } - - pub async fn process_change_recovery_address_log( - &self, - log: &Log, - chain_id: u32, - timestamp: u32, - ) -> Result<(db::FidRecoveryUpdate, db::ChainEventRow), Box> { - let parsed_log: ChangeRecoveryAddress = parse_log(log.clone())?; + ) -> Result<(ChainEventRow, FidRecoveryUpdate), Box> { + let parsed_log = match parse_log::(log.clone()) { + Ok(parsed_log) => parsed_log, + Err(e) => { + return Err( + format!("Failed to parse ChangeRecoveryAddress event args: {:?}", e).into(), + ) + } + }; - let body = IdRegisterEventBody { + let id_register_event_body = IdRegisterEventBody { to: vec![], from: vec![], - event_type: IdRegisterEventType::Transfer as i32, + event_type: IdRegisterEventType::ChangeRecovery as i32, recovery_address: parsed_log.recovery.as_bytes().to_vec(), }; @@ -433,205 +375,63 @@ impl Contract { block_timestamp: timestamp as u64, transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), log_index: log.log_index.unwrap().as_u32(), - fid: parsed_log.id.as_u64(), - body: Some(on_chain_event::Body::IdRegisterEventBody(body)), + fid: parsed_log.id.try_into()?, + body: Some(on_chain_event::Body::IdRegisterEventBody( + id_register_event_body, + )), tx_index: log.transaction_index.unwrap().as_u32(), version: 2, }; - let event_row = db::ChainEventRow::new(onchain_event, log.data.to_vec()); + let chain_events_row = db::ChainEventRow::new(&onchain_event, log.data.to_vec()); + let fid_recovery_update_row = db::FidRecoveryUpdate { + fid: parsed_log.id.try_into()?, + recovery_address: parsed_log.recovery.to_fixed_bytes(), + }; - Ok(( - db::FidRecoveryUpdate { - fid: parsed_log.id.as_u32(), - recovery_address: parsed_log.recovery.to_fixed_bytes(), - }, - event_row, - )) + Ok((chain_events_row, fid_recovery_update_row)) } - pub async fn persist_change_recovery_address_log( + fn process_recovery_log( &self, - store: &Store, log: &Log, - chain_id: u32, timestamp: u32, - ) -> Result<(), Box> { - let (fid_recovery_update, event_row) = self - .process_change_recovery_address_log(log, chain_id, timestamp) - .await?; - - db::FidRow::update_recovery_address(&store, &fid_recovery_update).await?; - event_row.insert(&store).await?; - - Ok(()) - } - - pub async fn persist_many_change_recovery_address_logs( - &self, - store: &Store, - logs: Vec, chain_id: u32, - timestamps: &[u32], - ) -> Result<(), Box> { - let mut recovery_address_updates = Vec::new(); - let mut event_rows = Vec::new(); - - for (log, timestamp) in logs.iter().zip(timestamps.iter()) { - let (update, event_row) = self - .process_change_recovery_address_log(log, chain_id, *timestamp) - .await?; - recovery_address_updates.push(update); - event_rows.push(event_row); - } - - let mut conn = store.conn.acquire().await?; - let mut transaction = conn.begin().await?; - - let insert_queries = db::ChainEventRow::generate_bulk_insert_queries(&event_rows)?; - for query in insert_queries { - sqlx::query(&query).execute(&mut *transaction).await?; - } - - let update_queries = - db::FidRow::generate_bulk_update_recovery_address_queries(&recovery_address_updates)?; - for query in update_queries { - sqlx::query(&query).execute(&mut *transaction).await?; - } - - transaction.commit().await?; - - Ok(()) - } -} + ) -> Result<(ChainEventRow, FidTransfer), Box> { + let parsed_log = match parse_log::(log.clone()) { + Ok(parsed_log) => parsed_log, + Err(e) => return Err(format!("Failed to parse Recover event args: {:?}", e).into()), + }; -#[cfg(test)] -mod tests { - use super::*; - use ethers::core::types::{Bytes, Log, H160, U64}; - use ethers::types::H256; - use hex::FromHex; - use sqlx::Row; - use std::path::Path; - use std::str::FromStr; - - async fn setup_db() -> Store { - let store = Store::new("sqlite::memory:".to_string()).await; - let migrator = sqlx::migrate::Migrator::new(Path::new("../storage/migrations")) - .await - .unwrap(); - migrator.run(&store.conn).await.unwrap(); - store - } + let id_register_event_body = IdRegisterEventBody { + to: parsed_log.to.to_fixed_bytes().to_vec(), + from: parsed_log.from.to_fixed_bytes().to_vec(), + event_type: IdRegisterEventType::Transfer as i32, + recovery_address: vec![], + }; - fn mock_log() -> Log { - Log { - address: H160::from_str("0x00000000fc6c5f01fc30151999387bb99a9f489b").unwrap(), - topics: vec![ - H256::from_str( - "0xf2e19a901b0748d8b08e428d0468896a039ac751ec4fec49b44b7b9c28097e45", - ) - .unwrap(), - H256::from_str( - "0x00000000000000000000000074551863ebff52d6e3d6657dd1d2337bdb60521b", - ) - .unwrap(), - H256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000d55", - ) - .unwrap(), - ], - data: Bytes::from_hex( - "0x00000000000000000000000000000000fcb080a4d6c39a9354da9eb9bc104cd7", - ) - .unwrap(), - block_hash: Some( - H256::from_str( - "0x81340703f2d3064dc4ce507b1491e25efdd32e048827f68819e12727c9924d5d", - ) - .unwrap(), - ), - block_number: Some(U64::from(111894017)), - transaction_hash: Some( - H256::from_str( - "0xd6b5e15c489e27cdeecbbd8801d62b6f7a0ff05609bc89dd3ab1083c9e3a2d1a", - ) - .unwrap(), - ), - transaction_index: Some(U64::from(7)), - log_index: Some(U256::from(208)), - transaction_log_index: None, - log_type: None, - removed: Some(false), - } - } + let onchain_event = OnChainEvent { + r#type: OnChainEventType::EventTypeIdRegister as i32, + chain_id, + block_number: log.block_number.unwrap().as_u32(), + block_hash: log.block_hash.unwrap().to_fixed_bytes().to_vec(), + block_timestamp: timestamp as u64, + transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), + log_index: log.log_index.unwrap().as_u32(), + fid: parsed_log.id.try_into()?, + body: Some(on_chain_event::Body::IdRegisterEventBody( + id_register_event_body, + )), + tx_index: log.transaction_index.unwrap().as_u32(), + version: 2, + }; - #[tokio::test] - async fn test_get_register_logs() { - let store = setup_db().await; - let (provider, mock) = Provider::mocked(); - mock.push::, Vec>(vec![mock_log()]) - .expect("pushing mock log"); + let chain_events_row = db::ChainEventRow::new(&onchain_event, log.data.to_vec()); + let fid_transfer_row = db::FidTransfer { + fid: parsed_log.id.try_into()?, + custody_address: parsed_log.to.to_fixed_bytes(), + }; - let id_registry = Contract::new( - provider, - "0x00000000fc6c5f01fc30151999387bb99a9f489b".to_string(), - "./abis/IdRegistry.json".to_string(), - ) - .unwrap(); - - let logs = id_registry.get_register_logs(0, 100000000).await.unwrap(); - id_registry - .persist_register_log(&store, &logs[0], 10u32, 0u32) - .await - .unwrap(); - - let mut conn = store.conn.acquire().await.unwrap(); - let chain_event_rows = sqlx::query("select * from chain_events") - .fetch_all(&mut *conn) - .await - .unwrap(); - assert_eq!(chain_event_rows.len(), 1); - assert_eq!(chain_event_rows[0].get::("fid"), 3413); - assert_eq!(chain_event_rows[0].get::("type"), 3); - assert_eq!(chain_event_rows[0].get::("chain_id"), 10); - assert_eq!(chain_event_rows[0].get::("transaction_index"), 7); - assert_eq!(chain_event_rows[0].get::("block_number"), 111894017); - assert_eq!( - hex::encode(chain_event_rows[0].get::, _>("block_hash")), - "81340703f2d3064dc4ce507b1491e25efdd32e048827f68819e12727c9924d5d" - ); - assert_eq!( - hex::encode(chain_event_rows[0].get::, _>("transaction_hash")), - "d6b5e15c489e27cdeecbbd8801d62b6f7a0ff05609bc89dd3ab1083c9e3a2d1a" - ); - assert_eq!( - hex::encode(chain_event_rows[0].get::, _>("body")), - "0a1474551863ebff52d6e3d6657dd1d2337bdb60521b1001221400000000fcb080a4d6c39a9354da9eb9bc104cd7" - ); - assert_eq!( - hex::encode(chain_event_rows[0].get::, _>("raw")), - "00000000000000000000000000000000fcb080a4d6c39a9354da9eb9bc104cd7" - ); - - let fid_rows = sqlx::query("select * from fids") - .fetch_all(&mut *conn) - .await - .unwrap(); - - assert_eq!(fid_rows.len(), 1); - assert_eq!(fid_rows[0].get::("fid"), 3413); - assert_eq!( - fid_rows[0].get::("chain_event_id"), - chain_event_rows[0].get::("id") - ); - assert_eq!( - hex::encode(fid_rows[0].get::, _>("custody_address")), - "74551863ebff52d6e3d6657dd1d2337bdb60521b" - ); - assert_eq!( - hex::encode(fid_rows[0].get::, _>("recovery_address")), - "00000000fcb080a4d6c39a9354da9eb9bc104cd7" - ); + Ok((chain_events_row, fid_transfer_row)) } } diff --git a/lib/eth/src/indexer.rs b/lib/eth/src/indexer.rs index 1664a81..611c6b1 100644 --- a/lib/eth/src/indexer.rs +++ b/lib/eth/src/indexer.rs @@ -2,37 +2,34 @@ use crate::id_registry; use crate::key_registry; use crate::storage_registry; use crate::utils::get_block_timestamp; + use ethers::{ providers::{JsonRpcClient, Middleware, Provider}, types::{BlockNumber, Log, H256}, }; -use std::collections::HashMap; -use std::error::Error; -use teleport_storage::{db, Store}; +use indicatif::{ProgressBar, ProgressStyle}; +use log::{debug, error}; +use std::{ + cmp::min, + collections::HashMap, + error::Error, + sync::{Arc, Mutex}, +}; +use teleport_common::state::PersistentState; +use teleport_storage::Store; use tokio; -// todo: Is this right? IdRegistry seems to be deployed at 108869029u64 -// const FARCASTER_START_BLOCK: u64 = 108864739u64; -const FARCASTER_START_BLOCK: u64 = 111816370u64; -// Number of blocks to read at a time (max 2000) -const BLOCK_INTERVAL: u64 = 2000; +const FARCASTER_START_BLOCK: u64 = 108864739u64; struct CollectedLogs { - register: Vec, - transfer: Vec, - recovery: Vec, - change_recovery_address: Vec, - add: Vec, - remove: Vec, - admin_reset: Vec, - migrated: Vec, - rent: Vec, - set_max_units: Vec, - deprecation_timestamp: Vec, + id_registry: Vec, + key_registry: Vec, + storage_registry: Vec, } pub struct Indexer { store: Store, + state: Arc>, provider: Provider, chain_id: u32, id_registry: id_registry::Contract, @@ -44,6 +41,7 @@ pub struct Indexer { impl Indexer { pub fn new( store: Store, + state: Arc>, provider: Provider, chain_id: u32, id_reg_address: String, @@ -69,6 +67,7 @@ impl Indexer { Ok(Indexer { store, + state, provider, id_registry, key_registry, @@ -79,14 +78,12 @@ impl Indexer { } pub async fn get_start_block(&self) -> u64 { - let max_block_num = db::ChainEventRow::max_block_number(&self.store) - .await - .unwrap_or(FARCASTER_START_BLOCK as i64); + let last_synced_block = self.state.lock().unwrap().last_synced_block; - if max_block_num == 0 { + if last_synced_block == 0 { FARCASTER_START_BLOCK } else { - max_block_num as u64 + 1 + last_synced_block + 1 } } @@ -95,69 +92,28 @@ impl Indexer { Ok(latest_block.number.unwrap().as_u64()) } - async fn collect_logs( + async fn get_all_logs( &self, start_block: u64, end_block: u64, ) -> Result> { - let register_future = self.id_registry.get_register_logs(start_block, end_block); - let transfer_future = self.id_registry.get_transfer_logs(start_block, end_block); - let recovery_future = self.id_registry.get_recovery_logs(start_block, end_block); - let change_recovery_address_future = self + let id_registry_logs = self .id_registry - .get_change_recovery_address_logs(start_block, end_block); - let add_future = self.key_registry.get_add_logs(start_block, end_block); - let remove_future = self.key_registry.get_remove_logs(start_block, end_block); - let admin_reset_future = self + .get_id_registry_logs(start_block, end_block); + let key_registry_logs = self .key_registry - .get_admin_reset_logs(start_block, end_block); - let migrated_future = self.key_registry.get_migrated_logs(start_block, end_block); - let rent_future = self.storage_registry.get_rent_logs(start_block, end_block); - let set_max_units_future = self - .storage_registry - .get_set_max_units_logs(start_block, end_block); - let deprecation_timestamp_future = self + .get_key_registry_logs(start_block, end_block); + let storage_registry_logs = self .storage_registry - .get_deprecation_timestamp_logs(start_block, end_block); - - let ( - register_logs, - transfer_logs, - recovery_logs, - change_recovery_address_logs, - add_logs, - remove_logs, - admin_reset_logs, - migrated_logs, - rent_logs, - set_max_units_logs, - deprecation_timestamp_logs, - ) = tokio::try_join!( - register_future, - transfer_future, - recovery_future, - change_recovery_address_future, - add_future, - remove_future, - admin_reset_future, - migrated_future, - rent_future, - set_max_units_future, - deprecation_timestamp_future - )?; + .get_storage_registry_logs(start_block, end_block); + + let (id_registry_logs, key_registry_logs, storage_registry_logs) = + tokio::try_join!(id_registry_logs, key_registry_logs, storage_registry_logs)?; Ok(CollectedLogs { - register: register_logs, - transfer: transfer_logs, - recovery: recovery_logs, - change_recovery_address: change_recovery_address_logs, - add: add_logs, - remove: remove_logs, - admin_reset: admin_reset_logs, - migrated: migrated_logs, - rent: rent_logs, - set_max_units: set_max_units_logs, - deprecation_timestamp: deprecation_timestamp_logs, + id_registry: id_registry_logs, + key_registry: key_registry_logs, + storage_registry: storage_registry_logs, }) } @@ -182,166 +138,11 @@ impl Indexer { (events, timestamps) } - async fn sync_register_logs(&mut self, register_logs: Vec) -> Result<(), Box> { - let (register_logs, timestamps) = self.fetch_event_timestamps(register_logs).await; - - self.id_registry - .persist_many_register_logs(&self.store, register_logs, self.chain_id, ×tamps) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_transfer_logs(&mut self, transfer_logs: Vec) -> Result<(), Box> { - let (transfer_logs, timestamps) = self.fetch_event_timestamps(transfer_logs).await; - - self.id_registry - .persist_many_transfer_logs(&self.store, transfer_logs, self.chain_id, ×tamps) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_recovery_logs(&mut self, recovery_logs: Vec) -> Result<(), Box> { - let (recovery_logs, timestamps) = self.fetch_event_timestamps(recovery_logs).await; - - self.id_registry - .persist_many_recovery_logs(&self.store, recovery_logs, self.chain_id, ×tamps) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_change_recovery_address_logs( - &mut self, - change_recovery_address_logs: Vec, - ) -> Result<(), Box> { - let (change_recovery_address_logs, timestamps) = self - .fetch_event_timestamps(change_recovery_address_logs) - .await; - - self.id_registry - .persist_many_change_recovery_address_logs( - &self.store, - change_recovery_address_logs, - self.chain_id, - ×tamps, - ) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_add_logs(&mut self, add_logs: Vec) -> Result<(), Box> { - let (add_logs, timestamps) = self.fetch_event_timestamps(add_logs).await; - - self.key_registry - .persist_many_add_logs(&self.store, add_logs, self.chain_id, ×tamps) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_remove_logs(&mut self, remove_logs: Vec) -> Result<(), Box> { - let (remove_logs, timestamps) = self.fetch_event_timestamps(remove_logs).await; - - self.key_registry - .persist_many_remove_logs(&self.store, remove_logs, self.chain_id, ×tamps) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_admin_reset_logs( - &mut self, - admin_reset_logs: Vec, - ) -> Result<(), Box> { - let (admin_reset_logs, timestamps) = self.fetch_event_timestamps(admin_reset_logs).await; - - self.key_registry - .persist_many_admin_reset_logs( - &self.store, - admin_reset_logs, - self.chain_id, - ×tamps, - ) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_migrated_logs(&mut self, migrated_logs: Vec) -> Result<(), Box> { - let (migrated_logs, timestamps) = self.fetch_event_timestamps(migrated_logs).await; - - self.key_registry - .persist_many_migrated_logs(&self.store, migrated_logs, self.chain_id, ×tamps) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_rent_logs(&mut self, rent_logs: Vec) -> Result<(), Box> { - let (rent_logs, timestamps) = self.fetch_event_timestamps(rent_logs).await; - - self.storage_registry - .persist_many_rent_logs(&self.store, rent_logs, self.chain_id, ×tamps) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_set_max_units_logs( - &mut self, - set_max_units_logs: Vec, - ) -> Result<(), Box> { - let (set_max_units_logs, timestamps) = - self.fetch_event_timestamps(set_max_units_logs).await; - - self.storage_registry - .persist_many_set_max_units_logs( - &self.store, - set_max_units_logs, - self.chain_id, - ×tamps, - ) - .await - .unwrap(); - - Ok(()) - } - - async fn sync_deprecation_timestamp_logs( - &mut self, - deprecation_logs: Vec, - ) -> Result<(), Box> { - let (deprecation_logs, timestamps) = self.fetch_event_timestamps(deprecation_logs).await; - - self.storage_registry - .persist_many_deprecation_timestamp_logs( - &self.store, - deprecation_logs, - self.chain_id, - ×tamps, - ) - .await - .unwrap(); - - Ok(()) - } - pub async fn subscribe( &mut self, start_block: u64, interval_in_secs: u64, + sync_block_range_size: u64, ) -> Result<(), Box> { let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(interval_in_secs)); @@ -349,91 +150,131 @@ impl Indexer { loop { interval.tick().await; let latest_block = self.get_latest_block().await?; - self.sync(current_block, latest_block).await.unwrap(); + self.sync(current_block, latest_block, sync_block_range_size) + .await + .unwrap(); current_block = latest_block + 1; } } - pub async fn sync(&mut self, start_block: u64, end_block: u64) -> Result<(), Box> { + pub async fn sync( + &mut self, + start_block: u64, + end_block: u64, + sync_block_range_size: u64, + ) -> Result<(), Box> { let mut current_block = start_block; - let start_time = std::time::Instant::now(); + let pb = ProgressBar::new(end_block - FARCASTER_START_BLOCK); + pb.set_style( + ProgressStyle::with_template( + "Syncing Blocks: [{elapsed_precise}] [{percent_precise}%] [{wide_bar:.cyan/blue}] {msg} (ETA: {eta_precise})", + ) + .unwrap() + .progress_chars("#>-"), + ); - // fetch logs in range [current_block_num, current_block_num + 2000] while current_block <= end_block { - let percent_complete = (current_block - FARCASTER_START_BLOCK) as f64 - / (end_block - FARCASTER_START_BLOCK) as f64; - - let bar_width = 20i32; - let progress = (percent_complete * bar_width as f64).round() as i32; - let bar: String = "=".repeat(progress.min(bar_width - 1) as usize) - + ">" - + &" ".repeat((bar_width - progress - 1).max(0) as usize); - - let elapsed_time = start_time.elapsed().as_secs(); - let rate_of_progress = (current_block - start_block) as f64 / elapsed_time as f64; // blocks per second - let total_blocks = (end_block - start_block) as f64; - let estimated_total_time = total_blocks / rate_of_progress; // total estimated time in seconds - let time_remaining = estimated_total_time - elapsed_time as f64; // remaining time in seconds - - log::info!( - "Syncing [{}] {:.2}% | ~{:.2} seconds remaining", - bar, - percent_complete * 100.0, - time_remaining - ); + pb.set_position(current_block - FARCASTER_START_BLOCK); + pb.set_message(format!("{}/{}", current_block, end_block)); let start = current_block; - let end = current_block + BLOCK_INTERVAL; - - let collected_logs = self.collect_logs(start, end).await?; - - // id registry logs - if collected_logs.register.len() > 0 { - self.sync_register_logs(collected_logs.register).await?; - } - if collected_logs.transfer.len() > 0 { - self.sync_transfer_logs(collected_logs.transfer).await?; - } - if collected_logs.recovery.len() > 0 { - self.sync_recovery_logs(collected_logs.recovery).await?; - } - if collected_logs.change_recovery_address.len() > 0 { - self.sync_change_recovery_address_logs(collected_logs.change_recovery_address) - .await?; + let end = current_block + min(sync_block_range_size, end_block - current_block); + + let collected_logs = self.get_all_logs(start, end).await?; + + if collected_logs.storage_registry.len() > 0 { + debug!( + "Found {} logs from the storage registry for current block range", + collected_logs.storage_registry.len() + ); + + let (storage_registry_logs, timestamps) = self + .fetch_event_timestamps(collected_logs.storage_registry) + .await; + + let result = self + .storage_registry + .process_storage_registry_logs( + &self.store, + storage_registry_logs, + timestamps, + self.chain_id, + ) + .await; + + match result { + Ok(_) => {} + Err(e) => { + error!("Error processing storage registry logs: {}", e); + } + } } - // key registry logs - if collected_logs.add.len() > 0 { - self.sync_add_logs(collected_logs.add).await?; - } - if collected_logs.remove.len() > 0 { - self.sync_remove_logs(collected_logs.remove).await?; - } - if collected_logs.admin_reset.len() > 0 { - self.sync_admin_reset_logs(collected_logs.admin_reset) - .await?; - } - if collected_logs.migrated.len() > 0 { - self.sync_migrated_logs(collected_logs.migrated).await?; + if collected_logs.id_registry.len() > 0 { + debug!( + "Found {} logs from the id registry for current block range", + collected_logs.id_registry.len() + ); + + let (id_registry_logs, timestamps) = self + .fetch_event_timestamps(collected_logs.id_registry) + .await; + + let result = self + .id_registry + .process_id_registry_logs( + &self.store, + id_registry_logs, + timestamps, + self.chain_id, + ) + .await; + + match result { + Ok(_) => {} + Err(e) => { + error!("Error processing id registry logs: {}", e); + } + } } - // storage registry logs - if collected_logs.rent.len() > 0 { - self.sync_rent_logs(collected_logs.rent).await?; - } - if collected_logs.set_max_units.len() > 0 { - self.sync_set_max_units_logs(collected_logs.set_max_units) - .await?; - } - if collected_logs.deprecation_timestamp.len() > 0 { - self.sync_deprecation_timestamp_logs(collected_logs.deprecation_timestamp) - .await?; + if collected_logs.key_registry.len() > 0 { + debug!( + "Found {} logs from the key registry for current block range", + collected_logs.key_registry.len() + ); + + let (key_registry_logs, timestamps) = self + .fetch_event_timestamps(collected_logs.key_registry) + .await; + + let result = self + .key_registry + .process_key_registry_logs( + &self.store, + key_registry_logs, + timestamps, + self.chain_id, + ) + .await; + + match result { + Ok(_) => {} + Err(e) => { + error!("Error processing key registry logs: {}", e); + } + } } self.block_timestamp_cache.clear(); + + self.state.lock().unwrap().last_synced_block = end; + self.state.lock().unwrap().store(); + current_block = end + 1; } + pb.finish_with_message("Synced!"); Ok(()) } } diff --git a/lib/eth/src/key_registry.rs b/lib/eth/src/key_registry.rs index 7c4ec29..42d777e 100644 --- a/lib/eth/src/key_registry.rs +++ b/lib/eth/src/key_registry.rs @@ -5,7 +5,7 @@ use ethers::{ providers::{JsonRpcClient, Provider}, types::{Address, Bytes, Filter, Log, H256, U256}, }; -use log; +use log::{self, error, info, warn}; use serde::{Deserialize, Serialize}; use serde_json::{self}; use sqlx::Acquire; @@ -15,10 +15,13 @@ use teleport_common::protobufs::generated::{ on_chain_event, OnChainEvent, OnChainEventType, SignerEventBody, SignerEventType, SignerMigratedEventBody, }; -use teleport_storage::db::{self}; +use teleport_storage::db::{self, ChainEventRow, SignerRemoved, SignerRow}; use teleport_storage::Store; +// Overriding the `signature` is required here due to a bug in ethers-rs that happens if we mention the `key` parameter as `Bytes` +// Since we have to use `H256` for `key`, the calculated signature doesn't match the actual signature for this event #[derive(Debug, Clone, EthEvent)] +#[ethevent(signature = "0x7d285df41058466977811345cd453c0c52e8d841ffaabc74fc050f277ad4de02")] #[allow(non_snake_case)] struct Add { #[ethevent(indexed)] @@ -26,12 +29,38 @@ struct Add { #[ethevent(indexed)] pub keyType: u32, #[ethevent(indexed)] - pub key: Bytes, + pub key: H256, pub keyBytes: Bytes, pub metadataType: u8, pub metadata: Bytes, } +// Overriding the `signature` is required here due to a bug in ethers-rs that happens if we mention the `key` parameter as `Bytes` +// Since we have to use `H256` for `key`, the calculated signature doesn't match the actual signature for this event +#[derive(Debug, Clone, EthEvent)] +#[ethevent(signature = "0x09e77066e0155f46785be12f6938a6b2e4be4381e59058129ce15f355cb96958")] +#[allow(non_snake_case)] +struct Remove { + #[ethevent(indexed)] + pub fid: U256, + #[ethevent(indexed)] + pub key: H256, + pub keyBytes: Bytes, +} + +// Overriding the `signature` is required here due to a bug in ethers-rs that happens if we mention the `key` parameter as `Bytes` +// Since we have to use `H256` for `key`, the calculated signature doesn't match the actual signature for this event +#[derive(Debug, Clone, EthEvent)] +#[ethevent(signature = "0x1ecc1009ebad5d2fb61239462f4f9f6f152662defe1845fc87f07d96bd1c60b4")] +#[allow(non_snake_case)] +struct AdminReset { + #[ethevent(indexed)] + pub fid: U256, + #[ethevent(indexed)] + pub key: H256, + pub keyBytes: Bytes, +} + #[derive(Debug, Clone, EthEvent)] #[allow(non_snake_case)] struct Migrated { @@ -46,7 +75,7 @@ pub struct Contract { } #[derive(Debug, Clone, Serialize, Deserialize)] -struct SignerRequestMetadata { +pub struct SignerRequestMetadata { pub request_fid: u64, pub request_signer: Vec, pub signature: Vec, @@ -74,7 +103,7 @@ impl Contract { }) } - pub async fn get_add_logs( + pub async fn get_key_registry_logs( &self, start_block: u64, end_block: u64, @@ -83,77 +112,169 @@ impl Contract { .address(self.inner.address()) .from_block(start_block) .to_block(end_block) - .topic0(get_signature_topic(ADD_SIGNER_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; + .topic0(vec![ + get_signature_topic(ADD_SIGNER_SIGNATURE), + get_signature_topic(REMOVE_SIGNER_SIGNATURE), + get_signature_topic(ADMIN_RESET_SIGNATURE), + get_signature_topic(MIGRATED_SIGNATURE), + ]); + + let all_logs = get_logs(self.provider.clone(), &filter).await?; - Ok(logs) + Ok(all_logs) } - fn decode_metadata(log: &Log) -> SignerRequestMetadata { - let metadata_abi = DynSolType::CustomStruct { - name: "metadata".to_string(), - prop_names: vec![ - "requestFid".to_string(), - "requestSigner".to_string(), - "signature".to_string(), - "deadline".to_string(), - ], - tuple: vec![ - DynSolType::Uint(256), - DynSolType::Address, - DynSolType::Bytes, - DynSolType::Uint(256), - ], - }; - let decoded = metadata_abi.abi_decode(&log.data[192..]).unwrap(); - let decoded_struct = decoded.as_custom_struct().unwrap(); - let values = decoded_struct.2; + pub async fn process_key_registry_logs( + &self, + store: &Store, + logs: Vec, + timestamps: Vec, + chain_id: u32, + ) -> Result<(), Box> { + let mut chain_events: Vec = vec![]; + let mut signers: Vec = vec![]; + let mut signer_removed_updates: Vec = vec![]; + + for (i, log) in logs.iter().enumerate() { + if log.block_hash.is_none() + || log.block_number.is_none() + || log.transaction_hash.is_none() + || log.transaction_index.is_none() + || log.log_index.is_none() + || log.removed.is_some_and(|removed| removed) + { + continue; + } - // extract fields from decoded struct - let (requester_fid, _) = values[0].as_uint().unwrap(); - let request_signer = values[1].as_address().unwrap(); - let signature = values[2].as_bytes().unwrap(); - let (deadline, _) = values[3].as_uint().unwrap(); + let timestamp = timestamps[i]; + + if log.topics[0] == get_signature_topic(ADD_SIGNER_SIGNATURE) { + let (chain_events_row, signer_row) = + match self.process_add_log(log, timestamp, chain_id) { + Ok((chain_events_row, signer_row)) => (chain_events_row, signer_row), + Err(e) => { + warn!("Failed to process Add log: {}", e); + continue; + } + }; + + chain_events.push(chain_events_row); + signers.push(signer_row); + } else if log.topics[0] == get_signature_topic(REMOVE_SIGNER_SIGNATURE) { + let (chain_events_row, signer_removed_update) = + match self.process_remove_log(log, timestamp, chain_id) { + Ok((chain_events_row, signer_removed_update)) => { + (chain_events_row, signer_removed_update) + } + Err(e) => { + warn!("Failed to process Remove log: {}", e); + continue; + } + }; + + chain_events.push(chain_events_row); + signer_removed_updates.push(signer_removed_update); + } else if log.topics[0] == get_signature_topic(ADMIN_RESET_SIGNATURE) { + let (chain_events_row, signer_removed_update) = + match self.process_admin_reset_log(log, timestamp, chain_id) { + Ok((chain_events_row, signer_removed_update)) => { + (chain_events_row, signer_removed_update) + } + Err(e) => { + warn!("Failed to process AdminReset log: {}", e); + continue; + } + }; + + chain_events.push(chain_events_row); + signer_removed_updates.push(signer_removed_update); + } else if log.topics[0] == get_signature_topic(MIGRATED_SIGNATURE) { + let chain_events_row = match self.process_migrated_log(log, timestamp, chain_id) { + Ok(chain_events_row) => chain_events_row, + Err(e) => { + warn!("Failed to process Migrated log: {}", e); + continue; + } + }; + + chain_events.push(chain_events_row); + } + } - // parse requester_fid as u64 - let requester_fid_int = requester_fid.to_string().parse::().unwrap(); + let mut connection = store.conn.acquire().await?; + let mut transaction = connection.begin().await?; - // parse deadline as u64 - let deadline_int = deadline.to_string().parse::().unwrap(); + let event_queries = db::ChainEventRow::generate_bulk_insert_queries(&chain_events)?; + for event_query_str in event_queries { + let event_query = sqlx::query(&event_query_str); + let event_query_result = event_query.execute(&mut *transaction).await; + match event_query_result { + Ok(_) => {} + Err(e) => { + error!( + "Failed to insert chain event row: {} {}", + e, &event_query_str + ); + transaction.rollback().await?; + return Err(Box::new(e)); + } + } + } - SignerRequestMetadata { - request_fid: requester_fid_int, - request_signer: request_signer.to_vec(), - signature: signature.to_vec(), - deadline: deadline_int, + let signer_queries = db::SignerRow::generate_bulk_insert_queries(&signers)?; + for signer_query_str in signer_queries { + let signer_query = sqlx::query(&signer_query_str); + let signer_query_result = signer_query.execute(&mut *transaction).await; + match signer_query_result { + Ok(_) => {} + Err(e) => { + error!("Failed to insert signer row: {} {}", e, &signer_query_str); + transaction.rollback().await?; + return Err(Box::new(e)); + } + } + } + + let signer_removed_queries = + db::SignerRow::generate_bulk_remove_signer_queries(&signer_removed_updates)?; + for signer_removed_query_str in signer_removed_queries { + let signer_removed_query = sqlx::query(&signer_removed_query_str); + let signer_removed_query_result = signer_removed_query.execute(&mut *transaction).await; + match signer_removed_query_result { + Ok(_) => {} + Err(e) => { + error!( + "Failed to mark signer row as removed: {} {}", + e, &signer_removed_query_str + ); + transaction.rollback().await?; + return Err(Box::new(e)); + } + } } + + transaction.commit().await?; + + Ok(()) } - pub async fn process_add_log( + fn process_add_log( &self, log: &Log, - chain_id: u32, timestamp: u32, - ) -> Result<(db::SignerRow, db::ChainEventRow), Box> { - let fid = U256::from_big_endian(log.topics[1].as_bytes()).as_u64(); - let key_type = U256::from_big_endian(log.topics[2].as_bytes()).as_u32(); - - let key = H256::from_slice(&log.data[128..160]); // 160 - let key_bytes = key.as_bytes(); - - // validate that keyBytes is an EdDSA pub key and keyType == 1 - assert_eq!(key_bytes.len(), 32, "key is not 32 bytes long"); + chain_id: u32, + ) -> Result<(ChainEventRow, SignerRow), Box> { + let parsed_log = match parse_log::(log.clone()) { + Ok(parsed_log) => parsed_log, + Err(e) => return Err(format!("Failed to parse Add event args: {:?}", e).into()), + }; - let metadata_type = log.data[190]; // 190 - let signer_request = Contract::::decode_metadata(&log); - let metadata_json = serde_json::to_string(&signer_request).unwrap(); - let metadata = metadata_json.to_string().as_bytes().to_vec(); - let body = SignerEventBody { - key: key_bytes.to_vec(), - key_type, + let signer_event_body = SignerEventBody { event_type: SignerEventType::Add as i32, - metadata, - metadata_type: metadata_type as u32, + key: parsed_log.keyBytes.to_vec(), + key_type: parsed_log.keyType, + metadata: parsed_log.metadata.to_vec(), + metadata_type: parsed_log.metadataType as u32, }; let onchain_event = OnChainEvent { @@ -164,133 +285,47 @@ impl Contract { block_timestamp: timestamp as u64, transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), log_index: log.log_index.unwrap().as_u32(), - fid, - body: Some(on_chain_event::Body::SignerEventBody(body.clone())), + fid: parsed_log.fid.try_into()?, tx_index: log.transaction_index.unwrap().as_u32(), version: 2, + body: Some(on_chain_event::Body::SignerEventBody( + signer_event_body.clone(), + )), }; - let event_row = db::ChainEventRow::new(onchain_event, log.data.to_vec()); + let signer_request = Contract::::decode_metadata(log); + let metadata_json = serde_json::to_string(&signer_request).unwrap(); - // prepare signer for db - let signer = db::SignerRow::new( - fid, + let chain_events_row = db::ChainEventRow::new(&onchain_event, log.data.to_vec()); + let signer_row = db::SignerRow::new( + &signer_event_body, + &onchain_event, signer_request.request_fid, - event_row.id.clone(), - None, - key_type as i64, - metadata_type as i64, - key_bytes.to_vec(), - metadata_json.to_string(), + metadata_json, ); - Ok((signer, event_row)) + Ok((chain_events_row, signer_row)) } - pub async fn persist_add_log( + fn process_remove_log( &self, - store: &Store, log: &Log, - chain_id: u32, timestamp: u32, - ) -> Result<(), Box> { - let (signer_row, event_row) = self.process_add_log(log, chain_id, timestamp).await?; - event_row.insert(&store).await?; - let result = signer_row.insert(&store).await; - if let Err(sqlx::error::Error::Database(e)) = &result { - if e.is_unique_violation() { - log::warn!("signer already exists, skipping"); - } else { - result?; - } - } - - Ok(()) - } - - pub async fn persist_many_add_logs( - &self, - store: &Store, - logs: Vec, - chain_id: u32, - timestamp: &[u32], - ) -> Result<(), Box> { - let mut signer_rows = Vec::new(); - let mut event_rows = Vec::new(); - - for (log, timestamp) in logs.iter().zip(timestamp.iter()) { - let (signer_row, event_row) = self.process_add_log(log, chain_id, *timestamp).await?; - signer_rows.push(signer_row); - event_rows.push(event_row); - } - - let mut connection = store.conn.acquire().await?; - let mut transaction = connection.begin().await?; - - let event_queries = db::ChainEventRow::generate_bulk_insert_queries(&event_rows)?; - for query in event_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; - } - - let signer_queries = db::SignerRow::generate_bulk_insert_queries(&signer_rows)?; - for query in signer_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; - } - - transaction.commit().await?; - - Ok(()) - } - - pub async fn get_remove_logs( - &self, - start_block: u64, - end_block: u64, - ) -> Result, Box> { - let filter = Filter::new() - .address(self.inner.address()) - .from_block(start_block) - .to_block(end_block) - .topic0(get_signature_topic(REMOVE_SIGNER_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; - - Ok(logs) - } - - pub async fn process_remove_log( - &self, - store: &Store, - log: &Log, chain_id: u32, - timestamp: u32, - ) -> Result<(Vec, db::ChainEventRow), Box> { - let fid = U256::from_big_endian(log.topics[1].as_bytes()); - let key_hash = Address::from(log.topics[2]); - log::info!( - "got Remove log for key hash: {:? } in tx: {:?}", - key_hash, - log.transaction_hash - ); - - // last 32 bytes of data is the keyBytes - let key_bytes = log.data.chunks(32).last().unwrap(); - - // get signer from db - let (key_type, metadata) = db::SignerRow::get_by_key(&store, key_bytes.to_vec()).await?; - let body = SignerEventBody { - key: key_bytes.to_vec(), - key_type: key_type as u32, - event_type: SignerEventType::Remove.into(), - metadata: metadata.into_bytes(), - metadata_type: 1u32, + ) -> Result<(ChainEventRow, SignerRemoved), Box> { + let parsed_log = match parse_log::(log.clone()) { + Ok(parsed_log) => parsed_log, + Err(e) => return Err(format!("Failed to parse Remove event args: {:?}", e).into()), }; - // validate that keyType == 1 - assert_eq!(key_type, 1, "key type is not 1"); + let signer_event_body = SignerEventBody { + event_type: SignerEventType::Remove as i32, + key: parsed_log.keyBytes.to_vec(), + key_type: 0, + metadata: vec![], + metadata_type: 0, + }; - // store event in db let onchain_event = OnChainEvent { r#type: OnChainEventType::EventTypeSigner as i32, chain_id, @@ -299,116 +334,41 @@ impl Contract { block_timestamp: timestamp as u64, transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), log_index: log.log_index.unwrap().as_u32(), - fid: fid.as_u64(), - body: Some(on_chain_event::Body::SignerEventBody(body)), + fid: parsed_log.fid.try_into()?, tx_index: log.transaction_index.unwrap().as_u32(), version: 2, + body: Some(on_chain_event::Body::SignerEventBody(signer_event_body)), }; - let event_row = db::ChainEventRow::new(onchain_event, log.data.to_vec()); - Ok((key_bytes.to_vec(), event_row)) - } - - /// Hubs listen for this, validate that keyType == 1 and keyBytes exists in db. - /// keyBytes is marked as removed, messages signed by keyBytes with `fid` are invalid (todo). - pub async fn persist_remove_log( - &self, - store: &Store, - log: &Log, - chain_id: u32, - timestamp: u32, - ) -> Result<(), Box> { - let (key_bytes, event_row) = self - .process_remove_log(store, log, chain_id, timestamp) - .await?; - - event_row.insert(store).await?; - db::SignerRow::update_remove_event(&store, key_bytes.to_vec(), event_row.id).await?; - - Ok(()) - } - - pub async fn persist_many_remove_logs( - &self, - store: &Store, - logs: Vec, - chain_id: u32, - timestamp: &[u32], - ) -> Result<(), Box> { - let mut updates: Vec<(Vec, String)> = Vec::new(); - let mut event_rows = Vec::new(); - - for (log, timestamp) in logs.iter().zip(timestamp.iter()) { - let (key_bytes, event_row) = self - .process_remove_log(store, log, chain_id, *timestamp) - .await?; - updates.push((key_bytes, event_row.id.clone())); - event_rows.push(event_row); - } - - let mut connection = store.conn.acquire().await?; - let mut transaction = connection.begin().await?; - - let insert_queries = db::ChainEventRow::generate_bulk_insert_queries(&event_rows)?; - for query in insert_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; - } - - let update_queries = db::SignerRow::generate_bulk_remove_update_queries(&updates)?; - for query in update_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; - } - - transaction.commit().await?; - - Ok(()) - } - - pub async fn get_admin_reset_logs( - &self, - start_block: u64, - end_block: u64, - ) -> Result, Box> { - let filter = Filter::new() - .address(self.inner.address()) - .from_block(start_block) - .to_block(end_block) - .topic0(get_signature_topic(ADMIN_RESET_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; + let chain_events_row = db::ChainEventRow::new(&onchain_event, log.data.to_vec()); + let signer_removed = db::SignerRemoved { + fid: parsed_log.fid.try_into()?, + key: parsed_log.keyBytes.to_vec(), + remove_transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), + remove_log_index: log.log_index.unwrap().as_u32(), + removed_at: onchain_event.block_timestamp * 1000, + }; - Ok(logs) + Ok((chain_events_row, signer_removed)) } - pub async fn process_admin_reset_log( + fn process_admin_reset_log( &self, - store: &Store, log: &Log, - chain_id: u32, timestamp: u32, - ) -> Result<(Vec, db::ChainEventRow), Box> { - let fid = U256::from_big_endian(log.topics[1].as_bytes()); - let key_hash = Address::from(log.topics[2]); - log::info!( - "got Admin Reset log for key hash: {:? } in tx: {:?}", - key_hash, - log.transaction_hash - ); - - // last 32 bytes of data is the keyBytes - let key_bytes = log.data.chunks(32).last().unwrap(); - - // get signer from db - let (key_type, metadata) = db::SignerRow::get_by_key(&store, key_bytes.to_vec()).await?; - assert_eq!(key_type, 1, "key type is not 1"); + chain_id: u32, + ) -> Result<(ChainEventRow, SignerRemoved), Box> { + let parsed_log = match parse_log::(log.clone()) { + Ok(parsed_log) => parsed_log, + Err(e) => return Err(format!("Failed to parse AdminReset event args: {:?}", e).into()), + }; - let body = SignerEventBody { - key: key_bytes.to_vec(), - key_type: key_type as u32, - event_type: SignerEventType::AdminReset.into(), - metadata: metadata.into_bytes(), - metadata_type: 1u32, + let signer_event_body = SignerEventBody { + event_type: SignerEventType::AdminReset as i32, + key: parsed_log.keyBytes.to_vec(), + key_type: 0, + metadata: vec![], + metadata_type: 0, }; let onchain_event = OnChainEvent { @@ -419,103 +379,35 @@ impl Contract { block_timestamp: timestamp as u64, transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), log_index: log.log_index.unwrap().as_u32(), - fid: fid.as_u64(), - body: Some(on_chain_event::Body::SignerEventBody(body)), + fid: parsed_log.fid.try_into()?, tx_index: log.transaction_index.unwrap().as_u32(), version: 2, + body: Some(on_chain_event::Body::SignerEventBody(signer_event_body)), }; - let event_row = db::ChainEventRow::new(onchain_event, log.data.to_vec()); + let chain_events_row = db::ChainEventRow::new(&onchain_event, log.data.to_vec()); + let signer_removed = db::SignerRemoved { + fid: parsed_log.fid.try_into()?, + key: parsed_log.keyBytes.to_vec(), + remove_transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), + remove_log_index: log.log_index.unwrap().as_u32(), + removed_at: onchain_event.block_timestamp * 1000, + }; - Ok((key_bytes.to_vec(), event_row)) + Ok((chain_events_row, signer_removed)) } - // validate that keyType == 1 and that keyBytes exists in db. - // these keyBytes is no longer tracked, messages signed by keyBytes with `fid` are invalid, - // dropped immediately and not accepted (todo) - pub async fn persist_admin_reset_log( + fn process_migrated_log( &self, - store: &Store, log: &Log, - chain_id: u32, timestamp: u32, - ) -> Result<(), Box> { - let (_, event_row) = self - .process_admin_reset_log(store, log, chain_id, timestamp) - .await?; - event_row.insert(&store).await?; - - // TODO: invalidate keyBytes and messages signed by these keyBytes - - Ok(()) - } - - // validate that keyType == 1 and that keyBytes exists in db. - // these keyBytes is no longer tracked, messages signed by keyBytes with `fid` are invalid, - // dropped immediately and not accepted (todo) - pub async fn persist_many_admin_reset_logs( - &self, - store: &Store, - logs: Vec, chain_id: u32, - timestamp: &[u32], - ) -> Result<(), Box> { - let mut updates: Vec<(Vec, String)> = Vec::new(); - let mut event_rows = Vec::new(); - - for (log, timestamp) in logs.iter().zip(timestamp.iter()) { - let (key_bytes, event_row) = self - .process_admin_reset_log(store, log, chain_id, *timestamp) - .await?; - updates.push((key_bytes, event_row.id.clone())); - event_rows.push(event_row); - } - - let mut connection = store.conn.acquire().await?; - let mut transaction = connection.begin().await?; - - let event_queries = db::ChainEventRow::generate_bulk_insert_queries(&event_rows)?; - for query in event_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; - } - - // TODO: invalidate keyBytes and messages signed by these keyBytes - - transaction.commit().await?; - - Ok(()) - } - - pub async fn get_migrated_logs( - &self, - start_block: u64, - end_block: u64, - ) -> Result, Box> { - let filter = Filter::new() - .address(self.inner.address()) - .from_block(start_block) - .to_block(end_block) - .topic0(get_signature_topic(MIGRATED_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; - - Ok(logs) - } - - pub async fn process_migrated_log( - &self, - _store: &Store, - log: &Log, - chain_id: u32, - timestamp: u32, - ) -> Result> { - let parsed_log: Migrated = parse_log(log.clone()).unwrap(); - let body = SignerMigratedEventBody { - migrated_at: parsed_log.keysMigratedAt.as_u64() as u32, + ) -> Result> { + let parsed_log = match parse_log::(log.clone()) { + Ok(parsed_log) => parsed_log, + Err(e) => return Err(format!("Failed to parse Migrated event args: {:?}", e).into()), }; - log::info!("got Migrated log in tx: {:?}", log.transaction_hash); - let onchain_event = OnChainEvent { r#type: OnChainEventType::EventTypeSignerMigrated as i32, chain_id, @@ -524,74 +416,58 @@ impl Contract { block_timestamp: timestamp as u64, transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), log_index: log.log_index.unwrap().as_u32(), - fid: 0, - body: Some(on_chain_event::Body::SignerMigratedEventBody(body)), + fid: 0u64, tx_index: log.transaction_index.unwrap().as_u32(), version: 2, + body: Some(on_chain_event::Body::SignerMigratedEventBody( + SignerMigratedEventBody { + migrated_at: parsed_log.keysMigratedAt.as_u32(), + }, + )), }; - let event_row = db::ChainEventRow::new(onchain_event, log.data.to_vec()); + let chain_events_row = db::ChainEventRow::new(&onchain_event, log.data.to_vec()); - Ok(event_row) + Ok(chain_events_row) } - pub async fn persist_migrated_log( - &self, - store: &Store, - log: &Log, - chain_id: u32, - timestamp: u32, - ) -> Result<(), Box> { - let event_row = self - .process_migrated_log(store, log, chain_id, timestamp) - .await?; - - event_row.insert(&store).await?; - - /* - TODO - 1. Stop accepting Farcaster Signer messages with a timestamp >= keysMigratedAt. - 2. After the grace period (24 hours), stop accepting all Farcaster Signer messages. - 3. Drop any messages created by off-chain Farcaster Signers whose pub key was not emitted as an Add event. - */ - - Ok(()) - } + fn decode_metadata(log: &Log) -> SignerRequestMetadata { + let metadata_abi = DynSolType::CustomStruct { + name: "metadata".to_string(), + prop_names: vec![ + "requestFid".to_string(), + "requestSigner".to_string(), + "signature".to_string(), + "deadline".to_string(), + ], + tuple: vec![ + DynSolType::Uint(256), + DynSolType::Address, + DynSolType::Bytes, + DynSolType::Uint(256), + ], + }; + let decoded = metadata_abi.abi_decode(&log.data[192..]).unwrap(); + let decoded_struct = decoded.as_custom_struct().unwrap(); + let values = decoded_struct.2; - pub async fn persist_many_migrated_logs( - &self, - store: &Store, - logs: Vec, - chain_id: u32, - timestamp: &[u32], - ) -> Result<(), Box> { - let mut event_rows = Vec::new(); + // extract fields from decoded struct + let (requester_fid, _) = values[0].as_uint().unwrap(); + let request_signer = values[1].as_address().unwrap(); + let signature = values[2].as_bytes().unwrap(); + let (deadline, _) = values[3].as_uint().unwrap(); - for (log, timestamp) in logs.iter().zip(timestamp.iter()) { - let event_row = self - .process_migrated_log(store, log, chain_id, *timestamp) - .await?; - event_rows.push(event_row); - } + // parse requester_fid as u64 + let requester_fid_int = requester_fid.to_string().parse::().unwrap(); - let mut connection = store.conn.acquire().await?; - let mut transaction = connection.begin().await?; + // parse deadline as u64 + let deadline_int = deadline.to_string().parse::().unwrap(); - let event_queries = db::ChainEventRow::generate_bulk_insert_queries(&event_rows)?; - for query in event_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; + SignerRequestMetadata { + request_fid: requester_fid_int, + request_signer: request_signer.to_vec(), + signature: signature.to_vec(), + deadline: deadline_int, } - - /* - TODO - 1. Stop accepting Farcaster Signer messages with a timestamp >= keysMigratedAt. - 2. After the grace period (24 hours), stop accepting all Farcaster Signer messages. - 3. Drop any messages created by off-chain Farcaster Signers whose pub key was not emitted as an Add event. - */ - - transaction.commit().await?; - - Ok(()) } } diff --git a/lib/eth/src/storage_registry.rs b/lib/eth/src/storage_registry.rs index b16a794..ee5b9fa 100644 --- a/lib/eth/src/storage_registry.rs +++ b/lib/eth/src/storage_registry.rs @@ -4,18 +4,19 @@ use ethers::{ providers::{JsonRpcClient, Provider}, types::{Address, Filter, Log, U256}, }; +use log::{error, info, warn}; use sqlx::Acquire; -use std::error::Error; use std::sync::Arc; -use teleport_common::protobufs::generated::{ - on_chain_event, OnChainEvent, OnChainEventType, StorageRentEventBody, +use std::{error::Error, iter::zip}; +use teleport_common::{ + protobufs::generated::{on_chain_event, OnChainEvent, OnChainEventType, StorageRentEventBody}, + time::block_timestamp_to_farcaster_time, }; -use teleport_storage::db::{self}; +use teleport_storage::db::{self, ChainEventRow, StorageAllocationRow}; use teleport_storage::Store; pub const RENT_SIGNATURE: &str = "Rent(address,uint256,uint256)"; -pub const SET_MAX_UNITS_SIGNATURE: &str = "SetMaxUnits(uint256,uint256)"; -pub const SET_DEPRECATION_TIMESTAMP_SIGNATURE: &str = "SetDeprecationTimestamp(uint256,uint256)"; +const RENT_EXPIRY_IN_SECONDS: u32 = 365 * 24 * 60 * 60; // One year #[derive(Debug, Clone, EthEvent)] struct Rent { @@ -26,20 +27,6 @@ struct Rent { pub units: U256, } -#[derive(Debug, Clone, EthEvent)] -#[allow(non_snake_case)] -struct SetMaxUnits { - pub oldMax: U256, - pub newMax: U256, -} - -#[derive(Debug, Clone, EthEvent)] -#[allow(non_snake_case)] -struct SetDeprecationTimestamp { - pub oldTimestamp: U256, - pub newTimestamp: U256, -} - #[derive(Debug, Clone)] pub struct Contract { provider: Provider, @@ -62,7 +49,7 @@ impl Contract { }) } - pub async fn get_rent_logs( + pub async fn get_storage_registry_logs( &self, start_block: u64, end_block: u64, @@ -72,97 +59,87 @@ impl Contract { .from_block(start_block) .to_block(end_block) .topic0(get_signature_topic(RENT_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; - - Ok(logs) - } - pub async fn process_rent_log( - &self, - log: &Log, - chain_id: u32, - timestamp: u32, - ) -> Result<(db::StorageAllocationRow, db::ChainEventRow), Box> { - let parsed_log: Rent = parse_log(log.clone()).unwrap(); - let units = parsed_log.units.as_u32(); - let expiry = parsed_log.units.as_u32() + 395 * 24 * 60 * 60; - let fid = parsed_log.fid.as_u64(); - let payer = parsed_log.payer.as_bytes().to_vec(); - - let body = StorageRentEventBody { - payer: parsed_log.payer.as_bytes().to_vec(), - units, - expiry, - }; + let all_logs = get_logs(self.provider.clone(), &filter).await?; - let onchain_event = OnChainEvent { - r#type: OnChainEventType::EventTypeStorageRent as i32, - chain_id, - block_number: log.block_number.unwrap().as_u32(), - block_hash: log.block_hash.unwrap().to_fixed_bytes().to_vec(), - block_timestamp: timestamp as u64, - transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), - log_index: log.log_index.unwrap().as_u32(), - fid, - body: Some(on_chain_event::Body::StorageRentEventBody(body)), - tx_index: log.transaction_index.unwrap().as_u32(), - version: 2, - }; - - let event_row = db::ChainEventRow::new(onchain_event, log.data.to_vec()); - let storage_allocation = - db::StorageAllocationRow::new(0, expiry, event_row.id.clone(), fid, units, payer); - - Ok((storage_allocation, event_row)) + Ok(all_logs) } - pub async fn persist_rent_log( - &self, - store: &Store, - log: &Log, - chain_id: u32, - timestamp: u32, - ) -> Result<(), Box> { - let (storage_allocation, event_row) = - self.process_rent_log(log, chain_id, timestamp).await?; - - event_row.insert(&store).await?; - storage_allocation.insert(&store).await?; - - Ok(()) - } - - pub async fn persist_many_rent_logs( + pub async fn process_storage_registry_logs( &self, store: &Store, logs: Vec, + timestamps: Vec, chain_id: u32, - timestamps: &[u32], ) -> Result<(), Box> { - let mut storage_allocations = Vec::new(); - let mut event_rows = Vec::new(); - - for (log, timestamp) in logs.iter().zip(timestamps.iter()) { - let (storage_allocation, event_row) = - self.process_rent_log(log, chain_id, *timestamp).await?; - storage_allocations.push(storage_allocation); - event_rows.push(event_row); + let mut chain_events: Vec = vec![]; + let mut storage_allocations: Vec = vec![]; + + for (i, log) in logs.iter().enumerate() { + if log.block_hash.is_none() + || log.block_number.is_none() + || log.transaction_hash.is_none() + || log.transaction_index.is_none() + || log.log_index.is_none() + || log.removed.is_some_and(|removed| removed) + { + continue; + } + + let timestamp = timestamps[i]; + + if log.topics[0] == get_signature_topic(RENT_SIGNATURE) { + let (chain_events_row, storage_allocations_row) = + match self.process_rent_log(log, timestamp, chain_id) { + Ok((chain_events_row, storage_allocations_row)) => { + (chain_events_row, storage_allocations_row) + } + Err(e) => { + warn!("Failed to process Rent log: {:?}", e); + continue; + } + }; + + chain_events.push(chain_events_row); + storage_allocations.push(storage_allocations_row); + } } let mut connection = store.conn.acquire().await?; let mut transaction = connection.begin().await?; - let event_queries = db::ChainEventRow::generate_bulk_insert_queries(&event_rows)?; - for query in event_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; - } - + let event_queries = db::ChainEventRow::generate_bulk_insert_queries(&chain_events)?; let allocation_queries = db::StorageAllocationRow::generate_bulk_insert_queries(&storage_allocations)?; - for query in allocation_queries { - let query = sqlx::query(&query); - query.execute(&mut *transaction).await?; + + for (event_query_str, allocation_query_str) in zip(event_queries, allocation_queries) { + let event_query = sqlx::query(&event_query_str); + let event_query_result = event_query.execute(&mut *transaction).await; + match event_query_result { + Ok(_) => {} + Err(e) => { + error!( + "Failed to insert chain event row: {} {}", + e, &event_query_str + ); + transaction.rollback().await?; + return Err(Box::new(e)); + } + } + + let allocation_query = sqlx::query(&allocation_query_str); + let allocation_query_result = allocation_query.execute(&mut *transaction).await; + match allocation_query_result { + Ok(_) => {} + Err(e) => { + error!( + "Failed to insert storage allocation row: {}\n {}\n {}", + e, &event_query_str, &allocation_query_str + ); + transaction.rollback().await?; + return Err(Box::new(e)); + } + } } transaction.commit().await?; @@ -170,117 +147,60 @@ impl Contract { Ok(()) } - pub async fn get_set_max_units_logs( - &self, - start_block: u64, - end_block: u64, - ) -> Result, Box> { - let filter = Filter::new() - .address(self.inner.address()) - .from_block(start_block) - .to_block(end_block) - .topic0(get_signature_topic(SET_MAX_UNITS_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; - - Ok(logs) - } - - pub async fn process_set_max_units_log( - &self, - log: &Log, - _chain_id: u32, - _timestamp: u32, - ) -> Result<(), Box> { - let parsed_log: SetMaxUnits = parse_log(log.clone()).unwrap(); - let _old_max = parsed_log.oldMax.as_u32(); - let _new_max = parsed_log.newMax.as_u32(); - - // TODO: Return proper rows to be stored - - Ok(()) - } - - pub async fn persist_set_max_units_log( + fn process_rent_log( &self, - _store: &Store, log: &Log, - chain_id: u32, timestamp: u32, - ) -> Result<(), Box> { - self.process_set_max_units_log(log, chain_id, timestamp); - - Ok(()) - } - - pub async fn persist_many_set_max_units_logs( - &self, - _store: &Store, - logs: Vec, chain_id: u32, - timestamps: &[u32], - ) -> Result<(), Box> { - for (log, timestamp) in logs.iter().zip(timestamps.iter()) { - self.process_set_max_units_log(log, chain_id, *timestamp) - .await?; - } - - // TODO: Store resulting rows in the database - - Ok(()) - } - - pub async fn get_deprecation_timestamp_logs( - &self, - start_block: u64, - end_block: u64, - ) -> Result, Box> { - let filter = Filter::new() - .address(self.inner.address()) - .from_block(start_block) - .to_block(end_block) - .topic0(get_signature_topic(SET_DEPRECATION_TIMESTAMP_SIGNATURE)); - let logs = get_logs(self.provider.clone(), &filter).await?; - - Ok(logs) - } - - async fn process_deprecation_timestamp_log(&self, log: &Log) -> Result<(), Box> { - let parsed_log: SetDeprecationTimestamp = parse_log(log.clone()).unwrap(); - let _old_timestamp = parsed_log.oldTimestamp.as_u32(); - let _new_timestamp = parsed_log.newTimestamp.as_u32(); - - // TODO: Return proper rows to be stored - - Ok(()) - } - - pub async fn persist_deprecation_timestamp_log( - &self, - _store: &Store, - log: &Log, - _chain_id: u32, - _timestamp: i64, - ) -> Result<(), Box> { - self.process_deprecation_timestamp_log(log).await?; - - // TODO: store deprecation timestamp in db + ) -> Result<(ChainEventRow, StorageAllocationRow), Box> { + let parsed_log = match parse_log::(log.clone()) { + Ok(parsed_log) => parsed_log, + Err(e) => return Err(format!("Failed to parse Rent event args: {:?}", e).into()), + }; - Ok(()) - } + let timestamp_as_fc_time = match block_timestamp_to_farcaster_time(timestamp) { + Ok(timestamp_as_fc_time) => timestamp_as_fc_time, + Err(e) => { + return Err( + format!("Failed to parse block timestamp: {:?} {:?}", timestamp, e).into(), + ) + } + }; - pub async fn persist_many_deprecation_timestamp_logs( - &self, - _store: &Store, - logs: Vec, - _chain_id: u32, - _timestamps: &[u32], - ) -> Result<(), Box> { - for log in logs { - self.process_deprecation_timestamp_log(&log).await?; - } + let expiry = timestamp_as_fc_time + RENT_EXPIRY_IN_SECONDS; + let storage_rent_event_body = StorageRentEventBody { + payer: parsed_log.payer.as_bytes().to_vec(), + units: parsed_log.units.as_u32(), + expiry, + }; - // TODO: store deprecation timestamps in db + let onchain_event = OnChainEvent { + r#type: OnChainEventType::EventTypeStorageRent as i32, + chain_id, + block_number: log.block_number.unwrap().as_u32(), + block_hash: log.block_hash.unwrap().to_fixed_bytes().to_vec(), + block_timestamp: timestamp as u64, + transaction_hash: log.transaction_hash.unwrap().as_bytes().to_vec(), + log_index: log.log_index.unwrap().as_u32(), + fid: parsed_log.fid.try_into()?, + tx_index: log.transaction_index.unwrap().as_u32(), + version: 2, + body: Some(on_chain_event::Body::StorageRentEventBody( + storage_rent_event_body, + )), + }; - Ok(()) + let chain_events_row = db::ChainEventRow::new(&onchain_event, log.data.to_vec()); + let storage_allocations_row = db::StorageAllocationRow::new( + timestamp_as_fc_time.into(), + expiry, + log.transaction_hash.unwrap().as_bytes().to_vec(), + log.log_index.unwrap().as_u32(), + parsed_log.fid.try_into()?, + parsed_log.units.as_u32(), + parsed_log.payer.as_bytes().to_vec(), + ); + + Ok((chain_events_row, storage_allocations_row)) } } diff --git a/lib/hub/src/hub.rs b/lib/hub/src/hub.rs index 411fe9a..1712479 100644 --- a/lib/hub/src/hub.rs +++ b/lib/hub/src/hub.rs @@ -3,6 +3,7 @@ use libp2p::{futures::channel::mpsc, Multiaddr, PeerId}; use teleport_common::protobufs::generated::*; use teleport_storage::Store; +#[allow(unused)] enum HubSubmitSource { Gossip, RPC, @@ -12,18 +13,21 @@ enum HubSubmitSource { FNameRegistry, } +#[allow(unused)] #[derive(Debug, Clone)] pub struct TestUser { fid: u64, mnemonic: String, } +#[allow(unused)] #[derive(Debug, Clone)] pub struct AddrInfo { pub id: PeerId, pub addrs: Vec, } +#[allow(unused)] #[derive(Debug, Clone)] pub struct HubOptions { pub network: FarcasterNetwork, @@ -75,6 +79,7 @@ pub struct HubOptions { pub hub_operator_fid: Option, } +#[allow(unused)] pub struct Hub { options: HubOptions, gossip_node: GossipNode, diff --git a/lib/hub/src/main.rs b/lib/hub/src/main.rs index 8772f6e..10846d9 100644 --- a/lib/hub/src/main.rs +++ b/lib/hub/src/main.rs @@ -5,22 +5,25 @@ pub mod sync; pub mod validation; use dotenv::dotenv; -use ethers::prelude::{Http, Provider}; +use ethers::{prelude::Provider, providers::Http}; use figment::{ providers::{Env, Format, Toml}, Figment, }; use libp2p::PeerId; use libp2p::{identity::ed25519, Multiaddr}; -use log; +use log::{self}; use p2p::gossip_node::NodeOptions; use prost::Message; use serde::Deserialize; -use std::fs::{self, canonicalize}; use std::path::Path; use std::path::PathBuf; use std::process::exit; use std::str::FromStr; +use std::{ + fs::{self, canonicalize}, + sync::{Arc, Mutex}, +}; use teleport_common::peer_id::{create_ed25519_peer_id, write_peer_id}; use teleport_common::protobufs::generated::hub_service_server::HubServiceServer; use teleport_common::protobufs::generated::{FarcasterNetwork, PeerIdProto}; @@ -45,6 +48,7 @@ struct Config { abi_dir: String, indexer_interval: u64, bootstrap_addrs: Vec, + sync_block_range_size: u64, } #[tokio::main] @@ -61,6 +65,9 @@ async fn main() { .extract() .expect("configuration error"); + // Load Persistent State + let state = Arc::new(Mutex::new(teleport_common::state::PersistentState::load())); + // run database migrations let store = teleport_storage::Store::new(config.db_path).await; @@ -74,6 +81,7 @@ async fn main() { let mut indexer = Indexer::new( store.clone(), + state.clone(), provider, config.chain_id, config.id_registry_address, @@ -89,12 +97,20 @@ async fn main() { let latest_block_num = indexer.get_latest_block().await.unwrap(); let start_block_num = indexer.get_start_block().await; indexer - .sync(start_block_num, latest_block_num) + .sync( + start_block_num, + latest_block_num, + config.sync_block_range_size, + ) .await .unwrap(); // Subscribe to new events asynchronously - let subscribe_task = indexer.subscribe(latest_block_num + 1, config.indexer_interval); + let subscribe_task = indexer.subscribe( + latest_block_num + 1, + config.indexer_interval, + config.sync_block_range_size, + ); let secret_key_hex = config.farcaster_priv_key; let mut secret_key_bytes = hex::decode(secret_key_hex).expect("Invalid hex string"); @@ -148,6 +164,7 @@ async fn main() { subscribe_task.await.unwrap(); } +#[allow(unused)] fn start(args: teleport_cli::start::StartCommand) { log::info!("Teleport Starting..."); diff --git a/lib/hub/src/p2p/event_loop.rs b/lib/hub/src/p2p/event_loop.rs index d9cc3eb..072b7f9 100644 --- a/lib/hub/src/p2p/event_loop.rs +++ b/lib/hub/src/p2p/event_loop.rs @@ -11,6 +11,7 @@ use libp2p::swarm::derive_prelude::Either; use libp2p::swarm::SwarmEvent; use libp2p::{futures::channel::mpsc, Swarm}; use libp2p::{Multiaddr, PeerId}; +use log::error; use prost::Message; use teleport_common::errors::{BadRequestType, HubError, UnavailableType}; use teleport_common::protobufs::{self, generated}; @@ -384,24 +385,32 @@ impl EventLoop { let res = self.gossip_message(message); if let Err(err) = res { - println!("Failed to gossip: {:?}", err); + error!("Failed to gossip: {:?}", err); } } Command::GossipContactInfo { contact_info } => { - self.gossip_contact_info(contact_info); + let res = self.gossip_contact_info(contact_info); + + if let Err(err) = res { + error!("Failed to gossip contact info: {:?}", err) + } } Command::DialMultiAddr { addr } => { let res = self.dial_multi_addr(addr); if let Err(err) = res { - println!("Failed to dial: {:?}", err); + error!("Failed to dial: {:?}", err); } } Command::GetState { sender } => { let mut state = self.state.clone(); state.external_addrs = self.swarm.external_addresses().map(|a| a.clone()).collect(); - sender.send(state); + let res = sender.send(state); + + if let Err(err) = res { + error!("Failed to send state: {:?}", err); + } } } } @@ -414,7 +423,11 @@ impl EventLoop { let bootstrap_addrs = self.state.bootstrap_addrs.clone(); for addr in bootstrap_addrs { - self.dial_multi_addr(addr); + let res = self.dial_multi_addr(addr); + + if let Err(err) = res { + error!("Failed to dial bootstrap addr: {:?}", err); + } } } diff --git a/lib/hub/src/p2p/gossip_node.rs b/lib/hub/src/p2p/gossip_node.rs index 95a9a1d..423b291 100644 --- a/lib/hub/src/p2p/gossip_node.rs +++ b/lib/hub/src/p2p/gossip_node.rs @@ -3,10 +3,7 @@ use std::{net::TcpListener, str::FromStr}; use crate::hub::AddrInfo; use libp2p::{ core::upgrade, - futures::{ - channel::{mpsc, oneshot}, - prelude::*, - }, + futures::channel::{mpsc, oneshot}, gossipsub::{self, Message as GossipSubMessage, MessageAuthenticity, MessageId}, identify, identity, noise, ping, swarm::SwarmBuilder, diff --git a/lib/hub/src/validation.rs b/lib/hub/src/validation.rs index 10f4a72..b83b65d 100644 --- a/lib/hub/src/validation.rs +++ b/lib/hub/src/validation.rs @@ -72,20 +72,14 @@ impl<'a> Validator<'a> { &self.message.hash, &pub_key, ) { - Ok(_) => { - return Ok(()); - } - Err(err) => { - return Err(HubError::Unknown(err.to_string())); - } + Ok(_) => Ok(()), + Err(err) => Err(HubError::Unknown(err.to_string())), } } - _ => { - return Err(HubError::Unknown(format!( - "Unknown signature scheme: {:?}", - self.message.signature_scheme - ))); - } + _ => Err(HubError::Unknown(format!( + "Unknown signature scheme: {:?}", + self.message.signature_scheme + ))), } } diff --git a/lib/rpc/src/admin_server.rs b/lib/rpc/src/admin_server.rs index 77cd5ba..b28e9c3 100644 --- a/lib/rpc/src/admin_server.rs +++ b/lib/rpc/src/admin_server.rs @@ -6,21 +6,18 @@ pub struct AdminServer {} #[tonic::async_trait] impl AdminService for AdminServer { - async fn rebuild_sync_trie( - &self, - request: tonic::Request, - ) -> Result, Status> { + async fn rebuild_sync_trie(&self, _request: Request) -> Result, Status> { todo!() } async fn delete_all_messages_from_db( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn submit_on_chain_event( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } diff --git a/lib/rpc/src/server.rs b/lib/rpc/src/server.rs index 90f8ddb..7a73c17 100644 --- a/lib/rpc/src/server.rs +++ b/lib/rpc/src/server.rs @@ -7,246 +7,249 @@ pub struct HubServer {} #[tonic::async_trait] impl HubService for HubServer { - async fn submit_message(&self, request: Request) -> Result, Status> { + async fn submit_message( + &self, + _request: Request, + ) -> Result, Status> { todo!() } type SubscribeStream = ReceiverStream>; async fn subscribe( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn validate_message( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_event( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } - async fn get_cast(&self, request: Request) -> Result, Status> { + async fn get_cast(&self, _request: Request) -> Result, Status> { todo!() } async fn get_casts_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_casts_by_parent( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_casts_by_mention( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } /// Reactions async fn get_reaction( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_reactions_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_reactions_by_cast( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_reactions_by_target( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } /// User Data async fn get_user_data( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_user_data_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_on_chain_events( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_id_registry_on_chain_event( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_id_registry_on_chain_event_by_address( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_current_storage_limits_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } /// Username Proof async fn get_username_proof( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_user_name_proofs_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } /// Verifications async fn get_verification( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_verifications_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } /// Signer async fn get_on_chain_signer( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_on_chain_signers_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_fids( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } /// Links - async fn get_link(&self, request: Request) -> Result, Status> { + async fn get_link(&self, _request: Request) -> Result, Status> { todo!() } async fn get_links_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_links_by_target( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } /// Bulk Methods async fn get_all_cast_messages_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_all_reaction_messages_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_all_verification_messages_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_all_user_data_messages_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_all_link_messages_by_fid( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } /// Sync Methods async fn get_info( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_sync_status( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_all_sync_ids_by_prefix( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_all_messages_by_sync_ids( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_sync_metadata_by_prefix( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } async fn get_sync_snapshot_by_prefix( &self, - request: Request, + _request: Request, ) -> Result, Status> { todo!() } diff --git a/lib/storage/migrations/20231118180138_farcaster.sql b/lib/storage/migrations/20231118180138_farcaster.sql index 10d72d0..27e1bdc 100644 --- a/lib/storage/migrations/20231118180138_farcaster.sql +++ b/lib/storage/migrations/20231118180138_farcaster.sql @@ -1,8 +1,7 @@ -- Add migration script here --- chain events -CREATE TABLE chain_events ( - id TEXT PRIMARY KEY, +CREATE TABLE IF NOT EXISTS chain_events ( created_at DATETIME NOT NULL DEFAULT (datetime('now')), block_timestamp DATETIME NOT NULL, fid INTEGER NOT NULL, @@ -14,38 +13,42 @@ CREATE TABLE chain_events ( block_hash BLOB NOT NULL, transaction_hash BLOB NOT NULL, body TEXT NOT NULL, - raw BLOB NOT NULL + raw BLOB NOT NULL, + PRIMARY KEY (transaction_hash, log_index) ); -CREATE INDEX chain_events_fid_index ON chain_events(fid); -CREATE INDEX chain_events_block_hash_index ON chain_events(block_hash); -CREATE INDEX chain_events_block_timestamp_index ON chain_events(block_timestamp); -CREATE INDEX chain_events_transaction_hash_index ON chain_events(transaction_hash); +CREATE INDEX IF NOT EXISTS chain_events_fid_index ON chain_events(fid); +CREATE INDEX IF NOT EXISTS chain_events_block_hash_index ON chain_events(block_hash); +CREATE INDEX IF NOT EXISTS chain_events_block_timestamp_index ON chain_events(block_timestamp); ---- FID -CREATE TABLE fids ( +CREATE TABLE IF NOT EXISTS fids ( fid INTEGER NOT NULL, created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, registered_at TEXT NOT NULL, - chain_event_id TEXT NOT NULL, -- UUIDs are stored as TEXT in SQLite + transaction_hash BLOB NOT NULL, + log_index SMALLINT NOT NULL, custody_address BLOB NOT NULL, recovery_address BLOB NOT NULL, PRIMARY KEY (fid), - FOREIGN KEY (chain_event_id) REFERENCES chain_events(id) ON DELETE CASCADE + UNIQUE (transaction_hash, log_index), + FOREIGN KEY (transaction_hash, log_index) REFERENCES chain_events(transaction_hash, log_index) ON DELETE CASCADE ); --- Signers -CREATE TABLE signers ( +CREATE TABLE IF NOT EXISTS signers ( id TEXT PRIMARY KEY, -- UUID as TEXT created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, - added_at TEXT NOT NULL, - removed_at TEXT, + added_at DATETIME NOT NULL, + removed_at DATETIME, fid INTEGER NOT NULL, requester_fid INTEGER NOT NULL, - add_chain_event_id TEXT NOT NULL, -- UUID as TEXT - remove_chain_event_id TEXT, + add_transaction_hash BLOB NOT NULL, + add_log_index SMALLINT NOT NULL, + remove_transaction_hash BLOB, + remove_log_index SMALLINT, key_type SMALLINT NOT NULL, metadata_type SMALLINT NOT NULL, key BLOB NOT NULL, @@ -53,15 +56,36 @@ CREATE TABLE signers ( UNIQUE (fid, key), FOREIGN KEY (fid) REFERENCES fids(fid) ON DELETE CASCADE, FOREIGN KEY (requester_fid) REFERENCES fids(fid) ON DELETE CASCADE, - FOREIGN KEY (add_chain_event_id) REFERENCES chain_events(id) ON DELETE CASCADE, - FOREIGN KEY (remove_chain_event_id) REFERENCES chain_events(id) ON DELETE CASCADE + UNIQUE (add_transaction_hash, add_log_index), + UNIQUE (remove_transaction_hash, remove_log_index), + FOREIGN KEY (add_transaction_hash, add_log_index) REFERENCES chain_events(transaction_hash, log_index) ON DELETE CASCADE, + FOREIGN KEY (remove_transaction_hash, remove_log_index) REFERENCES chain_events(transaction_hash, log_index) ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS signers_fid_index ON signers(fid); +CREATE INDEX IF NOT EXISTS signers_requester_fid_index ON signers(requester_fid); + +---- storage allocations +CREATE TABLE IF NOT EXISTS storage_allocations ( + id TEXT, -- UUID as TEXT + created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, + rented_at TEXT NOT NULL, + expires_at TEXT NOT NULL, + transaction_hash BLOB NOT NULL, + log_index SMALLINT NOT NULL, + fid INTEGER NOT NULL, + units INTEGER NOT NULL, + payer BLOB NOT NULL, + PRIMARY KEY (id), + UNIQUE (transaction_hash, log_index), + FOREIGN KEY (transaction_hash, log_index) REFERENCES chain_events(transaction_hash, log_index) ON DELETE CASCADE ); -CREATE INDEX signers_fid_index ON signers(fid); -CREATE INDEX signers_requester_fid_index ON signers(requester_fid); +CREATE INDEX IF NOT EXISTS storage_allocations_fid_expires_at_index ON storage_allocations(fid, expires_at); --- username proofs -CREATE TABLE username_proofs ( +CREATE TABLE IF NOT EXISTS username_proofs ( id TEXT PRIMARY KEY, -- UUID as TEXT created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -75,10 +99,10 @@ CREATE TABLE username_proofs ( FOREIGN KEY (fid) REFERENCES fids(fid) ON DELETE CASCADE ); -CREATE UNIQUE INDEX username_proofs_username_timestamp_unique ON username_proofs (username, timestamp); +CREATE UNIQUE INDEX IF NOT EXISTS username_proofs_username_timestamp_unique ON username_proofs (username, timestamp); --- fnames -CREATE TABLE fnames ( +CREATE TABLE IF NOT EXISTS fnames ( id TEXT PRIMARY KEY, -- UUID as TEXT created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -90,11 +114,11 @@ CREATE TABLE fnames ( FOREIGN KEY (fid) REFERENCES fids(fid) ON DELETE CASCADE ); -CREATE UNIQUE INDEX fnames_fid_unique ON fnames (fid); -CREATE UNIQUE INDEX fnames_username_unique ON fnames (username); +CREATE UNIQUE INDEX IF NOT EXISTS fnames_fid_unique ON fnames (fid); +CREATE UNIQUE INDEX IF NOT EXISTS fnames_username_unique ON fnames (username); --- messages -CREATE TABLE messages ( +CREATE TABLE IF NOT EXISTS messages ( id TEXT, -- UUID as TEXT created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -116,12 +140,13 @@ CREATE TABLE messages ( FOREIGN KEY (fid, signer) REFERENCES signers(fid, key) ON DELETE CASCADE ); -CREATE INDEX messages_timestamp_index ON messages(timestamp); -CREATE INDEX messages_fid_index ON messages(fid); -CREATE INDEX messages_signer_index ON messages(signer); +CREATE INDEX IF NOT EXISTS messages_timestamp_index ON messages(timestamp); +CREATE INDEX IF NOT EXISTS messages_fid_index ON messages(fid); +CREATE INDEX IF NOT EXISTS messages_signer_index ON messages(signer); +CREATE UNIQUE INDEX IF NOT EXISTS messages_hash_unique ON messages(hash); --- casts -CREATE TABLE casts ( +CREATE TABLE IF NOT EXISTS casts ( id TEXT, -- UUID as TEXT created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -143,16 +168,16 @@ CREATE TABLE casts ( FOREIGN KEY (hash) REFERENCES messages(hash) ON DELETE CASCADE ); -CREATE UNIQUE INDEX casts_hash_unique ON casts (hash); +CREATE UNIQUE INDEX IF NOT EXISTS casts_hash_unique ON casts (hash); -CREATE INDEX casts_timestamp_index ON casts(timestamp); -CREATE INDEX casts_parent_hash_index ON casts(parent_hash); -CREATE INDEX casts_root_parent_hash_index ON casts(root_parent_hash); -CREATE INDEX casts_parent_url_index ON casts(parent_url); -CREATE INDEX casts_root_parent_url_index ON casts(root_parent_url); +CREATE INDEX IF NOT EXISTS casts_timestamp_index ON casts(timestamp); +CREATE INDEX IF NOT EXISTS casts_parent_hash_index ON casts(parent_hash); +CREATE INDEX IF NOT EXISTS casts_root_parent_hash_index ON casts(root_parent_hash); +CREATE INDEX IF NOT EXISTS casts_parent_url_index ON casts(parent_url); +CREATE INDEX IF NOT EXISTS casts_root_parent_url_index ON casts(root_parent_url); --- reactions -CREATE TABLE reactions ( +CREATE TABLE IF NOT EXISTS reactions ( id TEXT, -- UUID as TEXT created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -170,14 +195,14 @@ CREATE TABLE reactions ( FOREIGN KEY (target_cast_hash) REFERENCES casts(hash) ON DELETE CASCADE ); -CREATE UNIQUE INDEX reactions_hash_unique ON reactions (hash); +CREATE UNIQUE INDEX IF NOT EXISTS reactions_hash_unique ON reactions (hash); -CREATE INDEX reactions_fid_timestamp_index ON reactions(fid, timestamp); -CREATE INDEX reactions_target_cast_hash_index ON reactions(target_cast_hash); -CREATE INDEX reactions_target_url_index ON reactions(target_url); +CREATE INDEX IF NOT EXISTS reactions_fid_timestamp_index ON reactions(fid, timestamp); +CREATE INDEX IF NOT EXISTS reactions_target_cast_hash_index ON reactions(target_cast_hash); +CREATE INDEX IF NOT EXISTS reactions_target_url_index ON reactions(target_url); --- links -CREATE TABLE links ( +CREATE TABLE IF NOT EXISTS links ( id TEXT, -- UUID as TEXT created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -193,11 +218,11 @@ CREATE TABLE links ( FOREIGN KEY (target_fid) REFERENCES fids(fid) ON DELETE CASCADE ); -CREATE UNIQUE INDEX links_hash_unique ON links (hash); -CREATE UNIQUE INDEX links_fid_target_fid_type_unique ON links (fid, target_fid, type); +CREATE UNIQUE INDEX IF NOT EXISTS links_hash_unique ON links (hash); +CREATE UNIQUE INDEX IF NOT EXISTS links_fid_target_fid_type_unique ON links (fid, target_fid, type); --- verifications -CREATE TABLE verifications ( +CREATE TABLE IF NOT EXISTS verifications ( id TEXT, -- UUID as TEXT created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -213,11 +238,11 @@ CREATE TABLE verifications ( FOREIGN KEY (hash) REFERENCES messages(hash) ON DELETE CASCADE ); -CREATE UNIQUE INDEX verifications_signer_address_fid_unique ON verifications (signer_address, fid); -CREATE INDEX verifications_fid_timestamp_index ON verifications (fid, timestamp); +CREATE UNIQUE INDEX IF NOT EXISTS verifications_signer_address_fid_unique ON verifications (signer_address, fid); +CREATE INDEX IF NOT EXISTS verifications_fid_timestamp_index ON verifications (fid, timestamp); --- user data -CREATE TABLE user_data ( +CREATE TABLE IF NOT EXISTS user_data ( id TEXT, -- UUID as TEXT created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -232,21 +257,5 @@ CREATE TABLE user_data ( FOREIGN KEY (hash) REFERENCES messages(hash) ON DELETE CASCADE ); -CREATE UNIQUE INDEX user_data_fid_type_unique ON user_data (fid, type); - ----- storage allocations -CREATE TABLE storage_allocations ( - id TEXT, -- UUID as TEXT - created_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, - rented_at TEXT NOT NULL, - expires_at TEXT NOT NULL, - chain_event_id TEXT NOT NULL, - fid INTEGER NOT NULL, - units INTEGER NOT NULL, - payer BLOB NOT NULL, - PRIMARY KEY (id), - FOREIGN KEY (chain_event_id) REFERENCES chain_events(id) ON DELETE CASCADE -); +CREATE UNIQUE INDEX IF NOT EXISTS user_data_fid_type_unique ON user_data (fid, type); -CREATE INDEX storage_allocations_fid_expires_at_index ON storage_allocations(fid, expires_at); diff --git a/lib/storage/src/db.rs b/lib/storage/src/db.rs index 78a2598..9a93fca 100644 --- a/lib/storage/src/db.rs +++ b/lib/storage/src/db.rs @@ -1,13 +1,11 @@ use prost::Message; -use sqlx::query::Query; -use sqlx::sqlite::SqliteArguments; -use sqlx::{Execute, Executor, QueryBuilder, Sqlite, Transaction}; use teleport_common::protobufs::generated::on_chain_event::Body::*; -use teleport_common::protobufs::generated::OnChainEvent; +use teleport_common::protobufs::generated::{OnChainEvent, SignerEventBody}; use uuid::Uuid; +const MAX_ROWS_PER_BATCH: usize = 50; + pub struct ChainEventRow { - pub id: String, pub block_timestamp: u64, pub fid: u64, pub chain_id: u32, @@ -22,10 +20,8 @@ pub struct ChainEventRow { } impl ChainEventRow { - pub fn new(onchain_event: OnChainEvent, raw_event: Vec) -> Self { - let id = Uuid::new_v4().to_string(); - - let serialized_body = match onchain_event.body { + pub fn new(onchain_event: &OnChainEvent, raw_event: Vec) -> Self { + let serialized_body = match &onchain_event.body { Some(body) => match body { SignerEventBody(event_body) => event_body.encode_to_vec(), SignerMigratedEventBody(event_body) => event_body.encode_to_vec(), @@ -36,7 +32,6 @@ impl ChainEventRow { }; Self { - id, block_timestamp: onchain_event.block_timestamp, fid: onchain_event.fid, chain_id: onchain_event.chain_id, @@ -44,60 +39,26 @@ impl ChainEventRow { transaction_index: onchain_event.tx_index, log_index: onchain_event.log_index, r#type: onchain_event.r#type, - block_hash: onchain_event.block_hash, - transaction_hash: onchain_event.transaction_hash, + block_hash: onchain_event.block_hash.clone(), + transaction_hash: onchain_event.transaction_hash.clone(), body: serialized_body, raw: raw_event, } } - pub async fn insert(&self, store: &crate::Store) -> Result { - let mut conn = store.conn.acquire().await.unwrap(); - let id = self.id.clone(); - let block_timestamp = self.block_timestamp as i64; - let fid = self.fid as i64; - let block_hash = self.block_hash.clone(); - let transaction_hash = self.transaction_hash.clone(); - let body = self.body.clone(); - let raw = self.raw.clone(); - sqlx::query_file!( - "src/queries/insert_chain_event.sql", - id, - block_timestamp, - fid, - self.chain_id, - self.block_number, - self.transaction_index, - self.log_index, - self.r#type, - block_hash, - transaction_hash, - body, - raw - ) - .execute(&mut *conn) - .await?; - - Ok(id) - } - pub fn generate_bulk_insert_queries( rows: &[ChainEventRow], ) -> Result, sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_row = 13; // Number of fields in ChainEventRow - let max_rows_per_batch = MAX_PARAMS / params_per_row; - let mut query_strings = Vec::new(); - for chunk in rows.chunks(max_rows_per_batch) { + for chunk in rows.chunks(MAX_ROWS_PER_BATCH) { let mut params = Vec::new(); - let sql = "INSERT INTO chain_events (id, block_timestamp, fid, chain_id, block_number, transaction_index, log_index, type, block_hash, transaction_hash, body, raw) VALUES "; + let sql = "INSERT INTO chain_events (block_timestamp, fid, chain_id, block_number, transaction_index, log_index, type, block_hash, transaction_hash, body, raw) VALUES"; + let conflict_sql = "ON CONFLICT (transaction_hash, log_index) DO NOTHING"; for row in chunk { let values = format!( - "('{}', {}, {}, {}, {}, {}, {}, {}, {:?}, {:?}, {:?}, {:?})", - row.id, + "({}, {}, {}, {}, {}, {}, {}, '{}', '{}', '{}', '{}')", row.block_timestamp as i64, row.fid as i64, row.chain_id as i32, @@ -113,419 +74,193 @@ impl ChainEventRow { params.push(values); } - let query_string = format!("{}{}", sql, params.join(", ")); + let query_string = format!("{} {} {}", sql, params.join(", "), conflict_sql); query_strings.push(query_string); } Ok(query_strings) } - - pub async fn bulk_insert( - store: &crate::Store, - rows: &[ChainEventRow], - ) -> Result<(), sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_row = 13; // Number of fields in ChainEventRow - let max_rows_per_batch = MAX_PARAMS / params_per_row; - - for chunk in rows.chunks(max_rows_per_batch) { - let mut query_builder = QueryBuilder::new( - "INSERT INTO chain_events (id, block_timestamp, fid, chain_id, block_number, transaction_index, log_index, type, block_hash, transaction_hash, body, raw) ", - ); - - query_builder.push_values(chunk.iter(), |mut b, row| { - b.push_bind(&row.id) - .push_bind(row.block_timestamp as i64) - .push_bind(row.fid as i64) - .push_bind(row.chain_id as i32) - .push_bind(row.block_number as i32) - .push_bind(row.transaction_index as i32) - .push_bind(row.log_index as i32) - .push_bind(row.r#type as i32) - .push_bind(&row.block_hash) - .push_bind(&row.transaction_hash) - .push_bind(&row.body) - .push_bind(&row.raw); - }); - - let query = query_builder.build(); - - let mut conn = store.conn.acquire().await.unwrap(); - query.execute(&mut *conn).await?; - } - - Ok(()) - } - - pub async fn max_block_number(store: &crate::Store) -> Result { - let mut conn = store.conn.acquire().await.unwrap(); - let row = sqlx::query_file!("src/queries/max_block_number.sql") - .fetch_one(&mut *conn) - .await?; - - Ok(row.block_number) - } } pub struct FidRow { - pub fid: i64, + pub fid: u64, pub registered_at: i64, - pub chain_event_id: String, + pub transaction_hash: Vec, + pub log_index: u32, pub custody_address: [u8; 20], pub recovery_address: [u8; 20], } pub struct FidTransfer { - pub fid: u32, + pub fid: u64, pub custody_address: [u8; 20], } pub struct FidRecoveryUpdate { - pub fid: u32, + pub fid: u64, pub recovery_address: [u8; 20], } impl FidRow { - pub async fn insert(&self, store: &crate::Store) -> Result<(), sqlx::Error> { - let mut conn = store.conn.acquire().await.unwrap(); - let recovery_address = self.recovery_address.clone(); - let recovery_address = recovery_address.as_slice(); - let chain_event_id = self.chain_event_id.clone(); - let custody_address = self.custody_address.clone(); - let custody_address = custody_address.as_slice(); - sqlx::query_file!( - "src/queries/insert_fid.sql", - self.fid, - self.registered_at, - chain_event_id, - custody_address, - recovery_address - ) - .execute(&mut *conn) - .await?; - Ok(()) - } - - pub async fn update_recovery_address( - store: &crate::Store, - update: &FidRecoveryUpdate, - ) -> Result<(), sqlx::Error> { - let mut conn = store.conn.acquire().await.unwrap(); - let fid = update.fid as i64; - let to = update.recovery_address.as_slice(); - sqlx::query_file!("src/queries/update_recovery_address.sql", to, fid) - .execute(&mut *conn) - .await?; - Ok(()) - } - - pub async fn transfer(store: &crate::Store, update: &FidTransfer) -> Result<(), sqlx::Error> { - let mut conn = store.conn.acquire().await.unwrap(); - let fid = update.fid as i64; - let to = update.custody_address.as_slice(); - sqlx::query_file!("src/queries/update_custody_address.sql", to, fid) - .execute(&mut *conn) - .await?; - Ok(()) - } - pub fn generate_bulk_insert_queries(rows: &[FidRow]) -> Result, sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_row = 5; // Number of fields in FidRow - let max_rows_per_batch = MAX_PARAMS / params_per_row; - let mut query_strings = Vec::new(); - for chunk in rows.chunks(max_rows_per_batch) { + for chunk in rows.chunks(MAX_ROWS_PER_BATCH) { let mut params = Vec::new(); - let sql = "INSERT INTO fids (fid, registered_at, chain_event_id, custody_address, recovery_address) VALUES "; + let sql = "INSERT INTO fids (fid, registered_at, transaction_hash, log_index, custody_address, recovery_address) VALUES"; + let conflict_sql = "ON CONFLICT (fid) DO NOTHING"; for row in chunk { let values = format!( - "({}, {}, '{}', {:?}, {:?})", + "({}, {}, '{}', {}, '{}', '{}')", row.fid, row.registered_at, - row.chain_event_id, + hex::encode(&row.transaction_hash), + row.log_index, hex::encode(&row.custody_address), hex::encode(&row.recovery_address) ); params.push(values); } - let query_string = format!("{}{}", sql, params.join(", ")); + let query_string = format!("{} {} {}", sql, params.join(", "), conflict_sql); query_strings.push(query_string); } Ok(query_strings) } - pub async fn bulk_insert(store: &crate::Store, rows: &[FidRow]) -> Result<(), sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_row = 5; // TODO: derive this from number of fields in FidRow rather than a hardcoded size - let max_rows_per_batch = MAX_PARAMS / params_per_row; - - for chunk in rows.chunks(max_rows_per_batch) { - let mut query_builder = QueryBuilder::new( - "INSERT INTO fids (fid, registered_at, chain_event_id, custody_address, recovery_address) ", - ); - query_builder.push_values(chunk.iter(), |mut b, row| { - b.push_bind(row.fid as u32) - .push_bind(row.registered_at as u32) - .push_bind(&row.chain_event_id) - .push_bind(row.custody_address.as_slice()) - .push_bind(row.recovery_address.as_slice()); - }); - - query_builder.push(" ON CONFLICT (fid) DO NOTHING"); // There appear to be duplicate register events... - - let query = query_builder.build(); - - let mut conn = store.conn.acquire().await.unwrap(); - query.execute(&mut *conn).await?; - } - - Ok(()) - } - pub fn generate_bulk_transfer_queries( transfers: &[FidTransfer], ) -> Result, sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_transfer = 2; // Each transfer requires two parameters (fid and custody_address) - let max_transfers_per_batch = MAX_PARAMS / params_per_transfer; let mut query_strings = Vec::new(); - for chunk in transfers.chunks(max_transfers_per_batch) { - let mut sql = String::from("UPDATE fids SET custody_address = CASE fid "); - let mut params: Vec = Vec::new(); + let sql = String::from("UPDATE fids SET custody_address = CASE "); + + for chunk in transfers.chunks(MAX_ROWS_PER_BATCH) { + let mut cases: Vec = vec![]; + let mut where_in: Vec = vec![]; for transfer in chunk { - sql.push_str(&format!( - " WHEN {} THEN '{}' ", + cases.push(format!( + "WHEN fid = {} THEN '{}'", transfer.fid, hex::encode(&transfer.custody_address) )); - params.push(transfer.fid.to_string()); - } - - sql.push_str(" END WHERE fid IN ("); - sql.push_str(¶ms.join(", ")); - sql.push_str(")"); - - query_strings.push(sql); - } - - Ok(query_strings) - } - - pub async fn bulk_transfer( - store: &crate::Store, - transfers: &[FidTransfer], - ) -> Result<(), sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_transfer = 2; // Each transfer requires two parameters (fid and custody_address) - let max_transfers_per_batch = MAX_PARAMS / params_per_transfer; - - for chunk in transfers.chunks(max_transfers_per_batch) { - let mut sql = String::from("UPDATE fids SET custody_address = CASE fid "); - let mut params: Vec<(i64, Vec)> = Vec::new(); - - for transfer in chunk { - sql.push_str(&format!(" WHEN ? THEN ? ")); - params.push(( - transfer.fid as i64, - transfer.custody_address.clone().to_vec(), - )); - } - sql.push_str(" END WHERE fid IN ("); - sql.push_str(&"?,".repeat(chunk.len()).trim_end_matches(',')); - sql.push_str(")"); - - let mut query = sqlx::query(&sql); - - for (fid, custody_address) in ¶ms { - query = query.bind(*fid).bind(custody_address); - } - - for transfer in chunk { - query = query.bind(transfer.fid as i64); + where_in.push(format!("'{}'", transfer.fid)); } - query - .execute(&mut *store.conn.acquire().await.unwrap()) - .await?; + let query_string = + sql.clone() + &cases.join(" ") + " END WHERE fid IN (" + &where_in.join(", ") + ")"; + query_strings.push(query_string); } - Ok(()) + Ok(query_strings) } pub fn generate_bulk_update_recovery_address_queries( updates: &[FidRecoveryUpdate], ) -> Result, sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_update = 2; // Each update requires two parameters (fid and recovery_address) - let max_updates_per_batch = MAX_PARAMS / params_per_update; let mut query_strings: Vec = Vec::new(); + let sql = String::from("UPDATE fids SET recovery_address = CASE "); - for chunk in updates.chunks(max_updates_per_batch) { - let mut sql = String::from("UPDATE fids SET recovery_address = CASE fid "); - let mut params: Vec = Vec::new(); + for chunk in updates.chunks(MAX_ROWS_PER_BATCH) { + let mut cases: Vec = vec![]; + let mut where_in: Vec = vec![]; for update in chunk { - sql.push_str(&format!(" WHEN {} THEN ? ", update.fid)); - params.push(format!( - "'{}'", - hex::encode(update.recovery_address.clone()) + cases.push(format!( + "WHEN fid = {} THEN '{}'", + update.fid, + hex::encode(&update.recovery_address) )); - } - - sql.push_str(" END WHERE fid IN ("); - sql.push_str(¶ms.iter().map(|_| "?").collect::>().join(",")); - sql.push_str(")"); - query_strings.push(sql); - } - - Ok(query_strings) - } - - pub async fn bulk_update_recovery_address( - store: &crate::Store, - updates: &[FidRecoveryUpdate], - ) -> Result<(), sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_update = 2; // Each update requires two parameters (fid and recovery_address) - let max_updates_per_batch = MAX_PARAMS / params_per_update; - - for chunk in updates.chunks(max_updates_per_batch) { - let mut sql = String::from("UPDATE fids SET recovery_address = CASE fid "); - let mut params: Vec<(i64, Vec)> = Vec::new(); - - for update in chunk { - sql.push_str(&format!(" WHEN ? THEN ? ")); - params.push((update.fid as i64, update.recovery_address.clone().to_vec())); + where_in.push(update.fid.to_string()); } - sql.push_str(" END WHERE fid IN ("); - sql.push_str(&"?,".repeat(chunk.len()).trim_end_matches(',')); - sql.push_str(")"); + let query_string = + sql.clone() + &cases.join(" ") + " END WHERE fid IN (" + &where_in.join(", ") + ")"; - let mut query = sqlx::query(&sql); - - for (fid, recovery_address) in ¶ms { - query = query.bind(*fid).bind(recovery_address); - } - - for update in chunk { - query = query.bind(update.fid as i64); - } - - query - .execute(&mut *store.conn.acquire().await.unwrap()) - .await?; + query_strings.push(query_string); } - Ok(()) + Ok(query_strings) } } #[derive(Debug)] pub struct SignerRow { pub id: String, - pub added_at: String, - pub removed_at: Option, + pub added_at: u64, + pub removed_at: Option, pub fid: u64, pub requester_fid: u64, - pub add_chain_event_id: String, - pub remove_chain_event_id: Option, + pub add_transaction_hash: Vec, + pub add_log_index: u32, + pub remove_transaction_hash: Option>, + pub remove_log_index: Option, pub key_type: i64, pub metadata_type: i64, pub key: Vec, pub metadata: String, } +pub struct SignerRemoved { + pub fid: u64, + pub key: Vec, + pub remove_transaction_hash: Vec, + pub remove_log_index: u32, + pub removed_at: u64, +} + impl SignerRow { pub fn new( - fid: u64, + signer_event_body: &SignerEventBody, + onchain_event: &OnChainEvent, requester_fid: u64, - add_chain_event_id: String, - remove_chain_event_id: Option, - key_type: i64, - metadata_type: i64, - key: Vec, metadata: String, ) -> Self { let id = Uuid::new_v4().to_string(); - let added_at = "0".to_string(); + + let added_at = onchain_event.block_timestamp * 1000; // block_timestamp is in seconds let removed_at = None; + Self { id, added_at, removed_at, - fid, + fid: onchain_event.fid, requester_fid, - add_chain_event_id, - remove_chain_event_id, - key_type, - metadata_type, - key, + add_transaction_hash: onchain_event.transaction_hash.clone(), + add_log_index: onchain_event.log_index, + remove_transaction_hash: None, + remove_log_index: None, + key_type: signer_event_body.key_type.into(), + metadata_type: signer_event_body.metadata_type.into(), + key: signer_event_body.key.clone(), metadata, } } - pub async fn insert(&self, store: &crate::Store) -> Result<(), sqlx::Error> { - let mut conn = store.conn.acquire().await.unwrap(); - let fid = self.fid as i64; - let requester_fid = self.requester_fid as i64; - let add_chain_event_id = self.add_chain_event_id.clone(); - let remove_chain_event_id = self.remove_chain_event_id.clone(); - let metadata = self.metadata.clone(); - let key = self.key.clone(); - sqlx::query_file!( - "src/queries/insert_signer.sql", - self.id, - self.added_at, - self.removed_at, - fid, - requester_fid, - add_chain_event_id, - remove_chain_event_id, - self.key_type, - self.metadata_type, - key, - metadata - ) - .execute(&mut *conn) - .await?; - Ok(()) - } - - pub fn generate_bulk_insert_queries(rows: &[SignerRow]) -> Result, sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_row = 11; // Number of fields in SignerRow - let max_rows_per_batch = MAX_PARAMS / params_per_row; - + pub fn generate_bulk_insert_queries(rows: &[Self]) -> Result, sqlx::Error> { let mut query_strings = Vec::new(); - for chunk in rows.chunks(max_rows_per_batch) { + let sql_prefix = "INSERT INTO signers (id, added_at, fid, requester_fid, add_transaction_hash, add_log_index, key_type, metadata_type, key, metadata) VALUES"; + let conflict_sql = "ON CONFLICT (fid, key) DO UPDATE SET remove_transaction_hash = NULL, remove_log_index = NULL"; + + for chunk in rows.chunks(MAX_ROWS_PER_BATCH) { let mut values_list = Vec::new(); - let sql_prefix = "INSERT INTO signers (id, added_at, removed_at, fid, requester_fid, add_chain_event_id, remove_chain_event_id, key_type, metadata_type, key, metadata) VALUES "; for row in chunk { let values = format!( - "('{}', '{}', {}, {}, {}, '{}', {}, {}, {}, {:?}, '{}')", + "('{}', {}, {}, {}, '{}', {}, {}, {}, '{}', '{}')", row.id, row.added_at, - row.removed_at - .as_ref() - .map_or("NULL".to_string(), |v| format!("'{}'", v)), row.fid, row.requester_fid, - row.add_chain_event_id, - row.remove_chain_event_id - .as_ref() - .map_or("NULL".to_string(), |v| format!("'{}'", v)), + hex::encode(&row.add_transaction_hash), + row.add_log_index, row.key_type, row.metadata_type, hex::encode(&row.key), @@ -534,157 +269,33 @@ impl SignerRow { values_list.push(values); } - let query = format!("{}{}", sql_prefix, values_list.join(", ")); + let query = format!("{} {} {}", sql_prefix, values_list.join(", "), conflict_sql); query_strings.push(query); } Ok(query_strings) } - pub async fn bulk_insert(store: &crate::Store, rows: &[SignerRow]) -> Result<(), sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_row = 11; - let max_rows_per_batch = MAX_PARAMS / params_per_row; - - for chunk in rows.chunks(max_rows_per_batch) { - let mut query_builder = QueryBuilder::new( - "INSERT INTO signers (id, added_at, removed_at, fid, requester_fid, add_chain_event_id, remove_chain_event_id, key_type, metadata_type, key, metadata) ", - ); - - query_builder.push_values(chunk.iter(), |mut b, row| { - b.push_bind(&row.id) - .push_bind(row.added_at.clone()) - .push_bind(row.removed_at.clone()) - .push_bind(row.fid as i64) - .push_bind(row.requester_fid as i64) - .push_bind(&row.add_chain_event_id) - .push_bind(&row.remove_chain_event_id) - .push_bind(row.key_type as i64) - .push_bind(row.metadata_type as i64) - .push_bind(&row.key) - .push_bind(&row.metadata); - }); - - query_builder.push(" ON CONFLICT DO NOTHING"); - - let query = query_builder.build(); - - let mut conn = store.conn.acquire().await.unwrap(); - query.execute(&mut *conn).await?; - } - - Ok(()) - } - - pub async fn update_remove_event( - store: &crate::Store, - key: Vec, - remove_chain_event_id: String, - ) -> Result { - let mut conn = store.conn.acquire().await.unwrap(); - let result = sqlx::query_file!( - "src/queries/update_remove_chain_event.sql", - remove_chain_event_id, - key, - 1i16 - ) - .execute(&mut *conn) - .await?; - - Ok(result.rows_affected()) - } - - pub fn generate_bulk_remove_update_queries( - updates: &[(Vec, String)], + pub fn generate_bulk_remove_signer_queries( + updates: &[SignerRemoved], ) -> Result, sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_update = 2; // Each update requires two parameters (key and remove_chain_event_id) - let max_updates_per_batch = MAX_PARAMS / params_per_update; - let mut query_strings = Vec::new(); - for chunk in updates.chunks(max_updates_per_batch) { - let mut sql = String::from("UPDATE signers SET remove_chain_event_id = CASE key "); - let mut params: Vec = Vec::new(); - - for (key, remove_chain_event_id) in chunk { - sql.push_str("WHEN ? THEN ? "); - params.push(hex::encode(key)); - params.push(remove_chain_event_id.clone()); - } - - sql.push_str("END WHERE key IN ("); - sql.push_str(&"?,".repeat(chunk.len()).trim_end_matches(',')); - sql.push_str(")"); - - let mut query_params = Vec::new(); - for (key, _) in chunk { - query_params.push(hex::encode(key)); - } - - let query_string = sql + ¶ms.join(", ") + &query_params.join(", "); - query_strings.push(query_string); + for update in updates { + let query_str = format!("UPDATE signers SET remove_transaction_hash = '{}', remove_log_index = {}, removed_at = {} WHERE key = '{}' AND fid = {}", hex::encode(&update.remove_transaction_hash), update.remove_log_index, update.removed_at, hex::encode(&update.key), update.fid); + query_strings.push(query_str); } Ok(query_strings) } - - pub async fn bulk_remove_update( - store: &crate::Store, - updates: &[(Vec, String)], // Tuple of (key, remove_chain_event_id) - ) -> Result<(), sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_update = 2; // Each update requires two parameters (key and remove_chain_event_id) - let max_updates_per_batch = MAX_PARAMS / params_per_update; - - for chunk in updates.chunks(max_updates_per_batch) { - let mut sql = String::from("UPDATE signers SET remove_chain_event_id = CASE key "); - let mut params: Vec<(Vec, String)> = Vec::new(); - - for (key, remove_chain_event_id) in chunk { - sql.push_str(&format!("WHEN ? THEN ? ")); - params.push((key.clone(), remove_chain_event_id.clone())); - } - - sql.push_str("END WHERE key IN ("); - sql.push_str(&"?,".repeat(chunk.len()).trim_end_matches(',')); - sql.push_str(")"); - - let mut query = sqlx::query(&sql); - - for (key, remove_chain_event_id) in ¶ms { - query = query.bind(key).bind(remove_chain_event_id); - } - - for (key, _) in chunk { - query = query.bind(key); - } - - let mut conn = store.conn.acquire().await.unwrap(); - query.execute(&mut *conn).await?; - } - - Ok(()) - } - - pub async fn get_by_key( - store: &crate::Store, - key: Vec, - ) -> Result<(i64, String), sqlx::Error> { - let mut conn = store.conn.acquire().await.unwrap(); - let record = sqlx::query_file!("src/queries/signer_metadata_by_key.sql", key) - .fetch_one(&mut *conn) - .await?; - - Ok((record.key_type, record.metadata)) - } } pub struct StorageAllocationRow { pub id: String, pub rented_at: i64, pub expires_at: u32, - pub chain_event_id: String, + pub transaction_hash: Vec, + pub log_index: u32, pub fid: u64, pub units: u32, pub payer: Vec, @@ -694,7 +305,8 @@ impl StorageAllocationRow { pub fn new( rented_at: i64, expires_at: u32, - chain_event_id: String, + transaction_hash: Vec, + log_index: u32, fid: u64, units: u32, payer: Vec, @@ -704,50 +316,27 @@ impl StorageAllocationRow { id, rented_at, expires_at, - chain_event_id, + transaction_hash, + log_index, fid, units, payer, } } - pub async fn insert(&self, store: &crate::Store) -> Result<(), sqlx::Error> { - let mut conn = store.conn.acquire().await.unwrap(); - let payer = self.payer.clone(); - let payer = payer.as_slice(); - let fid = self.fid as i64; - let units = self.units as i64; - let chain_event_id = self.chain_event_id.clone(); - sqlx::query_file!( - "src/queries/insert_storage_allocation.sql", - self.id, - self.rented_at, - self.expires_at, - chain_event_id, - fid, - units, - payer - ) - .execute(&mut *conn) - .await?; - Ok(()) - } - pub fn generate_bulk_insert_queries(rows: &[Self]) -> Result, sqlx::Error> { let mut queries = Vec::new(); - const MAX_PARAMS: usize = 999; - let params_per_row = 7; // Number of fields in StorageAllocationRow - let max_rows_per_batch = MAX_PARAMS / params_per_row; - for chunk in rows.chunks(max_rows_per_batch) { + for chunk in rows.chunks(MAX_ROWS_PER_BATCH) { let mut values = Vec::new(); for row in chunk { let value = format!( - "('{}', {}, {}, '{}', {}, {}, {:?})", + "('{}', {}, {}, '{}', {}, {}, {}, '{}')", row.id, row.rented_at, row.expires_at, - row.chain_event_id, + hex::encode(&row.transaction_hash), + row.log_index, row.fid, row.units, hex::encode(&row.payer) @@ -755,44 +344,13 @@ impl StorageAllocationRow { values.push(value); } let query = format!( - "INSERT INTO storage_allocations (id, rented_at, expires_at, chain_event_id, fid, units, payer) VALUES {}", - values.join(", ") + "INSERT INTO storage_allocations (id, rented_at, expires_at, transaction_hash, log_index, fid, units, payer) VALUES {} {}", + values.join(", "), + "ON CONFLICT (transaction_hash, log_index) DO NOTHING", ); queries.push(query); } Ok(queries) } - - pub async fn bulk_insert( - store: &crate::Store, - rows: &[StorageAllocationRow], - ) -> Result<(), sqlx::Error> { - const MAX_PARAMS: usize = 999; - let params_per_row = 7; // Number of fields in StorageAllocationRow - let max_rows_per_batch = MAX_PARAMS / params_per_row; - - for chunk in rows.chunks(max_rows_per_batch) { - let mut query_builder = QueryBuilder::new( - "INSERT INTO storage_allocations (id, rented_at, expires_at, chain_event_id, fid, units, payer) ", - ); - - query_builder.push_values(chunk.iter(), |mut b, row| { - b.push_bind(&row.id) - .push_bind(row.rented_at as i64) - .push_bind(row.expires_at as i32) - .push_bind(&row.chain_event_id) - .push_bind(row.fid as i64) - .push_bind(row.units as i32) - .push_bind(&row.payer); - }); - - let query = query_builder.build(); - - let mut conn = store.conn.acquire().await.unwrap(); - query.execute(&mut *conn).await?; - } - - Ok(()) - } } diff --git a/lib/storage/src/queries/insert_chain_event.sql b/lib/storage/src/queries/insert_chain_event.sql deleted file mode 100644 index 2b9dc96..0000000 --- a/lib/storage/src/queries/insert_chain_event.sql +++ /dev/null @@ -1,15 +0,0 @@ -INSERT INTO chain_events ( - id, - block_timestamp, - fid, - chain_id, - block_number, - transaction_index, - log_index, - type, - block_hash, - transaction_hash, - body, - raw -) -VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); \ No newline at end of file diff --git a/lib/storage/src/queries/insert_fid.sql b/lib/storage/src/queries/insert_fid.sql deleted file mode 100644 index 71c1d75..0000000 --- a/lib/storage/src/queries/insert_fid.sql +++ /dev/null @@ -1,9 +0,0 @@ -INSERT INTO fids ( - fid, - registered_at, - chain_event_id, - custody_address, - recovery_address -) -VALUES (?, ?, ?, ?, ?) -ON CONFLICT (fid) DO NOTHING; diff --git a/lib/storage/src/queries/insert_signer.sql b/lib/storage/src/queries/insert_signer.sql deleted file mode 100644 index 094e301..0000000 --- a/lib/storage/src/queries/insert_signer.sql +++ /dev/null @@ -1,14 +0,0 @@ -INSERT INTO signers ( - id, - added_at, - removed_at, - fid, - requester_fid, - add_chain_event_id, - remove_chain_event_id, - key_type, - metadata_type, - key, - metadata -) -VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); \ No newline at end of file diff --git a/lib/storage/src/queries/insert_storage_allocation.sql b/lib/storage/src/queries/insert_storage_allocation.sql deleted file mode 100644 index 4844e7a..0000000 --- a/lib/storage/src/queries/insert_storage_allocation.sql +++ /dev/null @@ -1,10 +0,0 @@ -INSERT INTO storage_allocations ( - id, - rented_at, - expires_at, - chain_event_id, - fid, - units, - payer -) -VALUES (?, ?, ?, ?, ?, ?, ?); \ No newline at end of file diff --git a/lib/storage/src/queries/max_block_number.sql b/lib/storage/src/queries/max_block_number.sql deleted file mode 100644 index 7f17840..0000000 --- a/lib/storage/src/queries/max_block_number.sql +++ /dev/null @@ -1,4 +0,0 @@ -SELECT block_number -FROM chain_events -ORDER BY block_number -DESC LIMIT 1; \ No newline at end of file diff --git a/lib/storage/src/queries/signer_metadata_by_key.sql b/lib/storage/src/queries/signer_metadata_by_key.sql deleted file mode 100644 index 819af65..0000000 --- a/lib/storage/src/queries/signer_metadata_by_key.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT key_type, metadata FROM signers WHERE key = ?; \ No newline at end of file diff --git a/lib/storage/src/queries/update_custody_address.sql b/lib/storage/src/queries/update_custody_address.sql deleted file mode 100644 index 562aca2..0000000 --- a/lib/storage/src/queries/update_custody_address.sql +++ /dev/null @@ -1 +0,0 @@ -UPDATE fids SET custody_address = ? WHERE fid = ?; \ No newline at end of file diff --git a/lib/storage/src/queries/update_recovery_address.sql b/lib/storage/src/queries/update_recovery_address.sql deleted file mode 100644 index 7c84df6..0000000 --- a/lib/storage/src/queries/update_recovery_address.sql +++ /dev/null @@ -1 +0,0 @@ -UPDATE fids SET recovery_address = ? WHERE fid = ?; \ No newline at end of file diff --git a/lib/storage/src/queries/update_remove_chain_event.sql b/lib/storage/src/queries/update_remove_chain_event.sql deleted file mode 100644 index b56a167..0000000 --- a/lib/storage/src/queries/update_remove_chain_event.sql +++ /dev/null @@ -1,3 +0,0 @@ -UPDATE signers -SET remove_chain_event_id = ? -WHERE key = ? AND key_type = ?; \ No newline at end of file